prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
# coding=utf-8
# pylint: disable-msg=E1101,W0612
import numpy as np
import pandas as pd
from pandas import (Index, Series, DataFrame, isnull)
from pandas.compat import lrange
from pandas import compat
from pandas.util.testing import assert_series_equal
import pandas.util.testing as tm
from .common import TestData
class TestSeriesApply(TestData, tm.TestCase):
def test_apply(self):
with np.errstate(all='ignore'):
assert_series_equal(self.ts.apply(np.sqrt), np.sqrt(self.ts))
# elementwise-apply
import math
assert_series_equal(self.ts.apply(math.exp), np.exp(self.ts))
# how to handle Series result, #2316
result = self.ts.apply(lambda x: Series(
[x, x ** 2], index=['x', 'x^2']))
expected = DataFrame({'x': self.ts, 'x^2': self.ts ** 2})
tm.assert_frame_equal(result, expected)
# empty series
s = Series(dtype=object, name='foo', index=pd.Index([], name='bar'))
rs = s.apply(lambda x: x)
tm.assert_series_equal(s, rs)
# check all metadata (GH 9322)
self.assertIsNot(s, rs)
self.assertIs(s.index, rs.index)
self.assertEqual(s.dtype, rs.dtype)
self.assertEqual(s.name, rs.name)
# index but no data
s = Series(index=[1, 2, 3])
rs = s.apply(lambda x: x)
| tm.assert_series_equal(s, rs) | pandas.util.testing.assert_series_equal |
import pytest
import numpy as np
import pandas
import pandas.util.testing as tm
from pandas.tests.frame.common import TestData
import matplotlib
import modin.pandas as pd
from modin.pandas.utils import to_pandas
from numpy.testing import assert_array_equal
from .utils import (
random_state,
RAND_LOW,
RAND_HIGH,
df_equals,
df_is_empty,
arg_keys,
name_contains,
test_data_values,
test_data_keys,
test_data_with_duplicates_values,
test_data_with_duplicates_keys,
numeric_dfs,
no_numeric_dfs,
test_func_keys,
test_func_values,
query_func_keys,
query_func_values,
agg_func_keys,
agg_func_values,
numeric_agg_funcs,
quantiles_keys,
quantiles_values,
indices_keys,
indices_values,
axis_keys,
axis_values,
bool_arg_keys,
bool_arg_values,
int_arg_keys,
int_arg_values,
)
# TODO remove once modin-project/modin#469 is resolved
agg_func_keys.remove("str")
agg_func_values.remove(str)
pd.DEFAULT_NPARTITIONS = 4
# Force matplotlib to not use any Xwindows backend.
matplotlib.use("Agg")
class TestDFPartOne:
# Test inter df math functions
def inter_df_math_helper(self, modin_df, pandas_df, op):
# Test dataframe to datframe
try:
pandas_result = getattr(pandas_df, op)(pandas_df)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(modin_df)
else:
modin_result = getattr(modin_df, op)(modin_df)
df_equals(modin_result, pandas_result)
# Test dataframe to int
try:
pandas_result = getattr(pandas_df, op)(4)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(4)
else:
modin_result = getattr(modin_df, op)(4)
df_equals(modin_result, pandas_result)
# Test dataframe to float
try:
pandas_result = getattr(pandas_df, op)(4.0)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(4.0)
else:
modin_result = getattr(modin_df, op)(4.0)
df_equals(modin_result, pandas_result)
# Test transposed dataframes to float
try:
pandas_result = getattr(pandas_df.T, op)(4.0)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df.T, op)(4.0)
else:
modin_result = getattr(modin_df.T, op)(4.0)
df_equals(modin_result, pandas_result)
frame_data = {
"{}_other".format(modin_df.columns[0]): [0, 2],
modin_df.columns[0]: [0, 19],
modin_df.columns[1]: [1, 1],
}
modin_df2 = pd.DataFrame(frame_data)
pandas_df2 = pandas.DataFrame(frame_data)
# Test dataframe to different dataframe shape
try:
pandas_result = getattr(pandas_df, op)(pandas_df2)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(modin_df2)
else:
modin_result = getattr(modin_df, op)(modin_df2)
df_equals(modin_result, pandas_result)
# Test dataframe to list
list_test = random_state.randint(RAND_LOW, RAND_HIGH, size=(modin_df.shape[1]))
try:
pandas_result = getattr(pandas_df, op)(list_test, axis=1)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(list_test, axis=1)
else:
modin_result = getattr(modin_df, op)(list_test, axis=1)
df_equals(modin_result, pandas_result)
# Test dataframe to series
series_test_modin = modin_df[modin_df.columns[0]]
series_test_pandas = pandas_df[pandas_df.columns[0]]
try:
pandas_result = getattr(pandas_df, op)(series_test_pandas, axis=0)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(series_test_modin, axis=0)
else:
modin_result = getattr(modin_df, op)(series_test_modin, axis=0)
df_equals(modin_result, pandas_result)
# Test dataframe to series with different index
series_test_modin = modin_df[modin_df.columns[0]].reset_index(drop=True)
series_test_pandas = pandas_df[pandas_df.columns[0]].reset_index(drop=True)
try:
pandas_result = getattr(pandas_df, op)(series_test_pandas, axis=0)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(series_test_modin, axis=0)
else:
modin_result = getattr(modin_df, op)(series_test_modin, axis=0)
df_equals(modin_result, pandas_result)
# Level test
new_idx = pandas.MultiIndex.from_tuples(
[(i // 4, i // 2, i) for i in modin_df.index]
)
modin_df_multi_level = modin_df.copy()
modin_df_multi_level.index = new_idx
# Defaults to pandas
with pytest.warns(UserWarning):
# Operation against self for sanity check
getattr(modin_df_multi_level, op)(modin_df_multi_level, axis=0, level=1)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_add(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "add")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_div(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "div")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_divide(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "divide")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_floordiv(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "floordiv")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_mod(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "mod")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_mul(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "mul")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_multiply(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "multiply")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_pow(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
# TODO: Revert to others once we have an efficient way of preprocessing for positive
# values
try:
pandas_df = pandas_df.abs()
except Exception:
pass
else:
modin_df = modin_df.abs()
self.inter_df_math_helper(modin_df, pandas_df, "pow")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_sub(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "sub")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_subtract(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "subtract")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_truediv(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "truediv")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___div__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__div__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___add__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__add__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___radd__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__radd__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___mul__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__mul__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___rmul__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__rmul__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___pow__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__pow__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___rpow__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__rpow__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___sub__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__sub__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___floordiv__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__floordiv__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___rfloordiv__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__rfloordiv__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___truediv__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__truediv__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___rtruediv__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__rtruediv__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___mod__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__mod__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___rmod__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__rmod__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___rdiv__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__rdiv__")
# END test inter df math functions
# Test comparison of inter operation functions
def comparison_inter_ops_helper(self, modin_df, pandas_df, op):
try:
pandas_result = getattr(pandas_df, op)(pandas_df)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(modin_df)
else:
modin_result = getattr(modin_df, op)(modin_df)
df_equals(modin_result, pandas_result)
try:
pandas_result = getattr(pandas_df, op)(4)
except TypeError:
with pytest.raises(TypeError):
getattr(modin_df, op)(4)
else:
modin_result = getattr(modin_df, op)(4)
df_equals(modin_result, pandas_result)
try:
pandas_result = getattr(pandas_df, op)(4.0)
except TypeError:
with pytest.raises(TypeError):
getattr(modin_df, op)(4.0)
else:
modin_result = getattr(modin_df, op)(4.0)
df_equals(modin_result, pandas_result)
try:
pandas_result = getattr(pandas_df, op)("a")
except TypeError:
with pytest.raises(TypeError):
repr(getattr(modin_df, op)("a"))
else:
modin_result = getattr(modin_df, op)("a")
df_equals(modin_result, pandas_result)
frame_data = {
"{}_other".format(modin_df.columns[0]): [0, 2],
modin_df.columns[0]: [0, 19],
modin_df.columns[1]: [1, 1],
}
modin_df2 = pd.DataFrame(frame_data)
pandas_df2 = pandas.DataFrame(frame_data)
try:
pandas_result = getattr(pandas_df, op)(pandas_df2)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(modin_df2)
else:
modin_result = getattr(modin_df, op)(modin_df2)
df_equals(modin_result, pandas_result)
new_idx = pandas.MultiIndex.from_tuples(
[(i // 4, i // 2, i) for i in modin_df.index]
)
modin_df_multi_level = modin_df.copy()
modin_df_multi_level.index = new_idx
# Defaults to pandas
with pytest.warns(UserWarning):
# Operation against self for sanity check
getattr(modin_df_multi_level, op)(modin_df_multi_level, axis=0, level=1)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_eq(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.comparison_inter_ops_helper(modin_df, pandas_df, "eq")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_ge(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.comparison_inter_ops_helper(modin_df, pandas_df, "ge")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_gt(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.comparison_inter_ops_helper(modin_df, pandas_df, "gt")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_le(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.comparison_inter_ops_helper(modin_df, pandas_df, "le")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_lt(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.comparison_inter_ops_helper(modin_df, pandas_df, "lt")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_ne(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.comparison_inter_ops_helper(modin_df, pandas_df, "ne")
# END test comparison of inter operation functions
# Test dataframe right operations
def inter_df_math_right_ops_helper(self, modin_df, pandas_df, op):
try:
pandas_result = getattr(pandas_df, op)(4)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(4)
else:
modin_result = getattr(modin_df, op)(4)
df_equals(modin_result, pandas_result)
try:
pandas_result = getattr(pandas_df, op)(4.0)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(4.0)
else:
modin_result = getattr(modin_df, op)(4.0)
df_equals(modin_result, pandas_result)
new_idx = pandas.MultiIndex.from_tuples(
[(i // 4, i // 2, i) for i in modin_df.index]
)
modin_df_multi_level = modin_df.copy()
modin_df_multi_level.index = new_idx
# Defaults to pandas
with pytest.warns(UserWarning):
# Operation against self for sanity check
getattr(modin_df_multi_level, op)(modin_df_multi_level, axis=0, level=1)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_radd(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_right_ops_helper(modin_df, pandas_df, "radd")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_rdiv(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_right_ops_helper(modin_df, pandas_df, "rdiv")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_rfloordiv(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_right_ops_helper(modin_df, pandas_df, "rfloordiv")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_rmod(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_right_ops_helper(modin_df, pandas_df, "rmod")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_rmul(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_right_ops_helper(modin_df, pandas_df, "rmul")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_rpow(self, request, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
# TODO: Revert to others once we have an efficient way of preprocessing for positive values
# We need to check that negative integers are not used efficiently
if "100x100" not in request.node.name:
self.inter_df_math_right_ops_helper(modin_df, pandas_df, "rpow")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_rsub(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_right_ops_helper(modin_df, pandas_df, "rsub")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_rtruediv(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_right_ops_helper(modin_df, pandas_df, "rtruediv")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___rsub__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_right_ops_helper(modin_df, pandas_df, "__rsub__")
# END test dataframe right operations
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_abs(self, request, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.abs()
except Exception as e:
with pytest.raises(type(e)):
modin_df.abs()
else:
modin_result = modin_df.abs()
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_add_prefix(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
test_prefix = "TEST"
new_modin_df = modin_df.add_prefix(test_prefix)
new_pandas_df = pandas_df.add_prefix(test_prefix)
df_equals(new_modin_df.columns, new_pandas_df.columns)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("testfunc", test_func_values, ids=test_func_keys)
def test_applymap(self, request, data, testfunc):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
with pytest.raises(ValueError):
x = 2
modin_df.applymap(x)
try:
pandas_result = pandas_df.applymap(testfunc)
except Exception as e:
with pytest.raises(type(e)):
modin_df.applymap(testfunc)
else:
modin_result = modin_df.applymap(testfunc)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("testfunc", test_func_values, ids=test_func_keys)
def test_applymap_numeric(self, request, data, testfunc):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
if name_contains(request.node.name, numeric_dfs):
try:
pandas_result = pandas_df.applymap(testfunc)
except Exception as e:
with pytest.raises(type(e)):
modin_df.applymap(testfunc)
else:
modin_result = modin_df.applymap(testfunc)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_add_suffix(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
test_suffix = "TEST"
new_modin_df = modin_df.add_suffix(test_suffix)
new_pandas_df = pandas_df.add_suffix(test_suffix)
df_equals(new_modin_df.columns, new_pandas_df.columns)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_at(self, request, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
# We skip nan datasets because nan != nan
if "nan" not in request.node.name:
key1 = modin_df.columns[0]
# Scaler
assert modin_df.at[0, key1] == pandas_df.at[0, key1]
# Series
df_equals(modin_df.loc[0].at[key1], pandas_df.loc[0].at[key1])
# Write Item
modin_df_copy = modin_df.copy()
pandas_df_copy = pandas_df.copy()
modin_df_copy.at[1, key1] = modin_df.at[0, key1]
pandas_df_copy.at[1, key1] = pandas_df.at[0, key1]
df_equals(modin_df_copy, pandas_df_copy)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_axes(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
for modin_axis, pd_axis in zip(modin_df.axes, pandas_df.axes):
assert np.array_equal(modin_axis, pd_axis)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_copy(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data) # noqa F841
# pandas_df is unused but there so there won't be confusing list comprehension
# stuff in the pytest.mark.parametrize
new_modin_df = modin_df.copy()
assert new_modin_df is not modin_df
assert np.array_equal(
new_modin_df._query_compiler._modin_frame._partitions,
modin_df._query_compiler._modin_frame._partitions,
)
assert new_modin_df is not modin_df
df_equals(new_modin_df, modin_df)
# Shallow copy tests
modin_df = pd.DataFrame(data)
modin_df_cp = modin_df.copy(False)
modin_df[modin_df.columns[0]] = 0
df_equals(modin_df, modin_df_cp)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_dtypes(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.dtypes, pandas_df.dtypes)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_ftypes(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.ftypes, pandas_df.ftypes)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("key", indices_values, ids=indices_keys)
def test_get(self, data, key):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.get(key), pandas_df.get(key))
df_equals(
modin_df.get(key, default="default"), pandas_df.get(key, default="default")
)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_get_dtype_counts(self, data):
modin_result = pd.DataFrame(data).get_dtype_counts().sort_index()
pandas_result = pandas.DataFrame(data).get_dtype_counts().sort_index()
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize(
"dummy_na", bool_arg_values, ids=arg_keys("dummy_na", bool_arg_keys)
)
@pytest.mark.parametrize(
"drop_first", bool_arg_values, ids=arg_keys("drop_first", bool_arg_keys)
)
def test_get_dummies(self, request, data, dummy_na, drop_first):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas.get_dummies(
pandas_df, dummy_na=dummy_na, drop_first=drop_first
)
except Exception as e:
with pytest.raises(type(e)):
pd.get_dummies(modin_df, dummy_na=dummy_na, drop_first=drop_first)
else:
modin_result = pd.get_dummies(
modin_df, dummy_na=dummy_na, drop_first=drop_first
)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_get_ftype_counts(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.get_ftype_counts(), pandas_df.get_ftype_counts())
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize("func", agg_func_values, ids=agg_func_keys)
def test_agg(self, data, axis, func):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.agg(func, axis)
except Exception as e:
with pytest.raises(type(e)):
modin_df.agg(func, axis)
else:
modin_result = modin_df.agg(func, axis)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize("func", agg_func_values, ids=agg_func_keys)
def test_agg_numeric(self, request, data, axis, func):
if name_contains(request.node.name, numeric_agg_funcs) and name_contains(
request.node.name, numeric_dfs
):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.agg(func, axis)
except Exception as e:
with pytest.raises(type(e)):
modin_df.agg(func, axis)
else:
modin_result = modin_df.agg(func, axis)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize("func", agg_func_values, ids=agg_func_keys)
def test_aggregate(self, request, data, func, axis):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.aggregate(func, axis)
except Exception as e:
with pytest.raises(type(e)):
modin_df.aggregate(func, axis)
else:
modin_result = modin_df.aggregate(func, axis)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize("func", agg_func_values, ids=agg_func_keys)
def test_aggregate_numeric(self, request, data, axis, func):
if name_contains(request.node.name, numeric_agg_funcs) and name_contains(
request.node.name, numeric_dfs
):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.agg(func, axis)
except Exception as e:
with pytest.raises(type(e)):
modin_df.agg(func, axis)
else:
modin_result = modin_df.agg(func, axis)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_aggregate_error_checking(self, data):
modin_df = pd.DataFrame(data)
assert modin_df.aggregate("ndim") == 2
with pytest.warns(UserWarning):
modin_df.aggregate(
{modin_df.columns[0]: "sum", modin_df.columns[1]: "mean"}
)
with pytest.warns(UserWarning):
modin_df.aggregate("cumproduct")
with pytest.raises(ValueError):
modin_df.aggregate("NOT_EXISTS")
def test_align(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).align(pd.DataFrame(data))
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
@pytest.mark.parametrize(
"bool_only", bool_arg_values, ids=arg_keys("bool_only", bool_arg_keys)
)
def test_all(self, data, axis, skipna, bool_only):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.all(axis=axis, skipna=skipna, bool_only=bool_only)
except Exception as e:
with pytest.raises(type(e)):
modin_df.all(axis=axis, skipna=skipna, bool_only=bool_only)
else:
modin_result = modin_df.all(axis=axis, skipna=skipna, bool_only=bool_only)
df_equals(modin_result, pandas_result)
# Test when axis is None. This will get repeated but easier than using list in parameterize decorator
try:
pandas_result = pandas_df.all(axis=None, skipna=skipna, bool_only=bool_only)
except Exception as e:
with pytest.raises(type(e)):
modin_df.all(axis=None, skipna=skipna, bool_only=bool_only)
else:
modin_result = modin_df.all(axis=None, skipna=skipna, bool_only=bool_only)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.T.all(
axis=axis, skipna=skipna, bool_only=bool_only
)
except Exception as e:
with pytest.raises(type(e)):
modin_df.T.all(axis=axis, skipna=skipna, bool_only=bool_only)
else:
modin_result = modin_df.T.all(axis=axis, skipna=skipna, bool_only=bool_only)
df_equals(modin_result, pandas_result)
# Test when axis is None. This will get repeated but easier than using list in parameterize decorator
try:
pandas_result = pandas_df.T.all(
axis=None, skipna=skipna, bool_only=bool_only
)
except Exception as e:
with pytest.raises(type(e)):
modin_df.T.all(axis=None, skipna=skipna, bool_only=bool_only)
else:
modin_result = modin_df.T.all(axis=None, skipna=skipna, bool_only=bool_only)
df_equals(modin_result, pandas_result)
# test level
modin_df_multi_level = modin_df.copy()
pandas_df_multi_level = pandas_df.copy()
axis = modin_df._get_axis_number(axis) if axis is not None else 0
levels = 3
axis_names_list = [["a", "b", "c"], None]
for axis_names in axis_names_list:
if axis == 0:
new_idx = pandas.MultiIndex.from_tuples(
[(i // 4, i // 2, i) for i in range(len(modin_df.index))],
names=axis_names,
)
modin_df_multi_level.index = new_idx
pandas_df_multi_level.index = new_idx
else:
new_col = pandas.MultiIndex.from_tuples(
[(i // 4, i // 2, i) for i in range(len(modin_df.columns))],
names=axis_names,
)
modin_df_multi_level.columns = new_col
pandas_df_multi_level.columns = new_col
for level in list(range(levels)) + (axis_names if axis_names else []):
try:
pandas_multi_level_result = pandas_df_multi_level.all(
axis=axis, bool_only=bool_only, level=level, skipna=skipna
)
except Exception as e:
with pytest.raises(type(e)):
modin_df_multi_level.all(
axis=axis, bool_only=bool_only, level=level, skipna=skipna
)
else:
modin_multi_level_result = modin_df_multi_level.all(
axis=axis, bool_only=bool_only, level=level, skipna=skipna
)
df_equals(modin_multi_level_result, pandas_multi_level_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
@pytest.mark.parametrize(
"bool_only", bool_arg_values, ids=arg_keys("bool_only", bool_arg_keys)
)
def test_any(self, data, axis, skipna, bool_only):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.any(axis=axis, skipna=skipna, bool_only=bool_only)
except Exception as e:
with pytest.raises(type(e)):
modin_df.any(axis=axis, skipna=skipna, bool_only=bool_only)
else:
modin_result = modin_df.any(axis=axis, skipna=skipna, bool_only=bool_only)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.any(axis=None, skipna=skipna, bool_only=bool_only)
except Exception as e:
with pytest.raises(type(e)):
modin_df.any(axis=None, skipna=skipna, bool_only=bool_only)
else:
modin_result = modin_df.any(axis=None, skipna=skipna, bool_only=bool_only)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.T.any(
axis=axis, skipna=skipna, bool_only=bool_only
)
except Exception as e:
with pytest.raises(type(e)):
modin_df.T.any(axis=axis, skipna=skipna, bool_only=bool_only)
else:
modin_result = modin_df.T.any(axis=axis, skipna=skipna, bool_only=bool_only)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.T.any(
axis=None, skipna=skipna, bool_only=bool_only
)
except Exception as e:
with pytest.raises(type(e)):
modin_df.T.any(axis=None, skipna=skipna, bool_only=bool_only)
else:
modin_result = modin_df.T.any(axis=None, skipna=skipna, bool_only=bool_only)
df_equals(modin_result, pandas_result)
# test level
modin_df_multi_level = modin_df.copy()
pandas_df_multi_level = pandas_df.copy()
axis = modin_df._get_axis_number(axis) if axis is not None else 0
levels = 3
axis_names_list = [["a", "b", "c"], None]
for axis_names in axis_names_list:
if axis == 0:
new_idx = pandas.MultiIndex.from_tuples(
[(i // 4, i // 2, i) for i in range(len(modin_df.index))],
names=axis_names,
)
modin_df_multi_level.index = new_idx
pandas_df_multi_level.index = new_idx
else:
new_col = pandas.MultiIndex.from_tuples(
[(i // 4, i // 2, i) for i in range(len(modin_df.columns))],
names=axis_names,
)
modin_df_multi_level.columns = new_col
pandas_df_multi_level.columns = new_col
for level in list(range(levels)) + (axis_names if axis_names else []):
try:
pandas_multi_level_result = pandas_df_multi_level.any(
axis=axis, bool_only=bool_only, level=level, skipna=skipna
)
except Exception as e:
with pytest.raises(type(e)):
modin_df_multi_level.any(
axis=axis, bool_only=bool_only, level=level, skipna=skipna
)
else:
modin_multi_level_result = modin_df_multi_level.any(
axis=axis, bool_only=bool_only, level=level, skipna=skipna
)
df_equals(modin_multi_level_result, pandas_multi_level_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_append(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
data_to_append = {"append_a": 2, "append_b": 1000}
ignore_idx_values = [True, False]
for ignore in ignore_idx_values:
try:
pandas_result = pandas_df.append(data_to_append, ignore_index=ignore)
except Exception as e:
with pytest.raises(type(e)):
modin_df.append(data_to_append, ignore_index=ignore)
else:
modin_result = modin_df.append(data_to_append, ignore_index=ignore)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.append(pandas_df.iloc[-1])
except Exception as e:
with pytest.raises(type(e)):
modin_df.append(modin_df.iloc[-1])
else:
modin_result = modin_df.append(modin_df.iloc[-1])
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.append(list(pandas_df.iloc[-1]))
except Exception as e:
with pytest.raises(type(e)):
modin_df.append(list(modin_df.iloc[-1]))
else:
modin_result = modin_df.append(list(modin_df.iloc[-1]))
df_equals(modin_result, pandas_result)
verify_integrity_values = [True, False]
for verify_integrity in verify_integrity_values:
try:
pandas_result = pandas_df.append(
[pandas_df, pandas_df], verify_integrity=verify_integrity
)
except Exception as e:
with pytest.raises(type(e)):
modin_df.append(
[modin_df, modin_df], verify_integrity=verify_integrity
)
else:
modin_result = modin_df.append(
[modin_df, modin_df], verify_integrity=verify_integrity
)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.append(
pandas_df, verify_integrity=verify_integrity
)
except Exception as e:
with pytest.raises(type(e)):
modin_df.append(modin_df, verify_integrity=verify_integrity)
else:
modin_result = modin_df.append(
modin_df, verify_integrity=verify_integrity
)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize("func", agg_func_values, ids=agg_func_keys)
def test_apply(self, request, data, func, axis):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
with pytest.raises(TypeError):
modin_df.apply({"row": func}, axis=1)
try:
pandas_result = pandas_df.apply(func, axis)
except Exception as e:
with pytest.raises(type(e)):
modin_df.apply(func, axis)
else:
modin_result = modin_df.apply(func, axis)
df_equals(modin_result, pandas_result)
def test_apply_metadata(self):
def add(a, b, c):
return a + b + c
data = {"A": [1, 2, 3], "B": [4, 5, 6], "C": [7, 8, 9]}
modin_df = pd.DataFrame(data)
modin_df["add"] = modin_df.apply(
lambda row: add(row["A"], row["B"], row["C"]), axis=1
)
pandas_df = pandas.DataFrame(data)
pandas_df["add"] = pandas_df.apply(
lambda row: add(row["A"], row["B"], row["C"]), axis=1
)
df_equals(modin_df, pandas_df)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("func", agg_func_values, ids=agg_func_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
def test_apply_numeric(self, request, data, func, axis):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
if name_contains(request.node.name, numeric_dfs):
try:
pandas_result = pandas_df.apply(func, axis)
except Exception as e:
with pytest.raises(type(e)):
modin_df.apply(func, axis)
else:
modin_result = modin_df.apply(func, axis)
df_equals(modin_result, pandas_result)
if "empty_data" not in request.node.name:
key = modin_df.columns[0]
modin_result = modin_df.apply(lambda df: df.drop(key), axis=1)
pandas_result = pandas_df.apply(lambda df: df.drop(key), axis=1)
df_equals(modin_result, pandas_result)
def test_as_blocks(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).as_blocks()
def test_as_matrix(self):
test_data = TestData()
frame = pd.DataFrame(test_data.frame)
mat = frame.as_matrix()
frame_columns = frame.columns
for i, row in enumerate(mat):
for j, value in enumerate(row):
col = frame_columns[j]
if np.isnan(value):
assert np.isnan(frame[col][i])
else:
assert value == frame[col][i]
# mixed type
mat = pd.DataFrame(test_data.mixed_frame).as_matrix(["foo", "A"])
assert mat[0, 0] == "bar"
df = pd.DataFrame({"real": [1, 2, 3], "complex": [1j, 2j, 3j]})
mat = df.as_matrix()
assert mat[0, 1] == 1j
# single block corner case
mat = pd.DataFrame(test_data.frame).as_matrix(["A", "B"])
expected = test_data.frame.reindex(columns=["A", "B"]).values
tm.assert_almost_equal(mat, expected)
def test_to_numpy(self):
test_data = TestData()
frame = pd.DataFrame(test_data.frame)
assert_array_equal(frame.values, test_data.frame.values)
def test_partition_to_numpy(self):
test_data = TestData()
frame = pd.DataFrame(test_data.frame)
for (
partition
) in frame._query_compiler._modin_frame._partitions.flatten().tolist():
assert_array_equal(partition.to_pandas().values, partition.to_numpy())
def test_asfreq(self):
index = pd.date_range("1/1/2000", periods=4, freq="T")
series = pd.Series([0.0, None, 2.0, 3.0], index=index)
df = pd.DataFrame({"s": series})
with pytest.warns(UserWarning):
# We are only testing that this defaults to pandas, so we will just check for
# the warning
df.asfreq(freq="30S")
def test_asof(self):
df = pd.DataFrame(
{"a": [10, 20, 30, 40, 50], "b": [None, None, None, None, 500]},
index=pd.DatetimeIndex(
[
"2018-02-27 09:01:00",
"2018-02-27 09:02:00",
"2018-02-27 09:03:00",
"2018-02-27 09:04:00",
"2018-02-27 09:05:00",
]
),
)
with pytest.warns(UserWarning):
df.asof(pd.DatetimeIndex(["2018-02-27 09:03:30", "2018-02-27 09:04:30"]))
def test_assign(self):
data = test_data_values[0]
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
with pytest.warns(UserWarning):
modin_result = modin_df.assign(new_column=pd.Series(modin_df.iloc[:, 0]))
pandas_result = pandas_df.assign(new_column=pd.Series(pandas_df.iloc[:, 0]))
df_equals(modin_result, pandas_result)
def test_astype(self):
td = TestData()
modin_df = pd.DataFrame(
td.frame.values, index=td.frame.index, columns=td.frame.columns
)
expected_df = pandas.DataFrame(
td.frame.values, index=td.frame.index, columns=td.frame.columns
)
modin_df_casted = modin_df.astype(np.int32)
expected_df_casted = expected_df.astype(np.int32)
df_equals(modin_df_casted, expected_df_casted)
modin_df_casted = modin_df.astype(np.float64)
expected_df_casted = expected_df.astype(np.float64)
df_equals(modin_df_casted, expected_df_casted)
modin_df_casted = modin_df.astype(str)
expected_df_casted = expected_df.astype(str)
df_equals(modin_df_casted, expected_df_casted)
modin_df_casted = modin_df.astype("category")
expected_df_casted = expected_df.astype("category")
df_equals(modin_df_casted, expected_df_casted)
dtype_dict = {"A": np.int32, "B": np.int64, "C": str}
modin_df_casted = modin_df.astype(dtype_dict)
expected_df_casted = expected_df.astype(dtype_dict)
df_equals(modin_df_casted, expected_df_casted)
# Ignore lint because this is testing bad input
bad_dtype_dict = {"B": np.int32, "B": np.int64, "B": str} # noqa F601
modin_df_casted = modin_df.astype(bad_dtype_dict)
expected_df_casted = expected_df.astype(bad_dtype_dict)
df_equals(modin_df_casted, expected_df_casted)
with pytest.raises(KeyError):
modin_df.astype({"not_exists": np.uint8})
def test_astype_category(self):
modin_df = pd.DataFrame(
{"col1": ["A", "A", "B", "B", "A"], "col2": [1, 2, 3, 4, 5]}
)
pandas_df = pandas.DataFrame(
{"col1": ["A", "A", "B", "B", "A"], "col2": [1, 2, 3, 4, 5]}
)
modin_result = modin_df.astype({"col1": "category"})
pandas_result = pandas_df.astype({"col1": "category"})
df_equals(modin_result, pandas_result)
assert modin_result.dtypes.equals(pandas_result.dtypes)
modin_result = modin_df.astype("category")
pandas_result = pandas_df.astype("category")
df_equals(modin_result, pandas_result)
assert modin_result.dtypes.equals(pandas_result.dtypes)
def test_at_time(self):
i = pd.date_range("2018-04-09", periods=4, freq="12H")
ts = pd.DataFrame({"A": [1, 2, 3, 4]}, index=i)
with pytest.warns(UserWarning):
ts.at_time("12:00")
def test_between_time(self):
i = pd.date_range("2018-04-09", periods=4, freq="12H")
ts = pd.DataFrame({"A": [1, 2, 3, 4]}, index=i)
with pytest.warns(UserWarning):
ts.between_time("0:15", "0:45")
def test_bfill(self):
test_data = TestData()
test_data.tsframe["A"][:5] = np.nan
test_data.tsframe["A"][-5:] = np.nan
modin_df = pd.DataFrame(test_data.tsframe)
df_equals(modin_df.bfill(), test_data.tsframe.bfill())
def test_blocks(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).blocks
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_bool(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data) # noqa F841
with pytest.raises(ValueError):
modin_df.bool()
modin_df.__bool__()
single_bool_pandas_df = pandas.DataFrame([True])
single_bool_modin_df = pd.DataFrame([True])
assert single_bool_pandas_df.bool() == single_bool_modin_df.bool()
with pytest.raises(ValueError):
# __bool__ always raises this error for DataFrames
single_bool_modin_df.__bool__()
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_boxplot(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data) # noqa F841
assert modin_df.boxplot() == to_pandas(modin_df).boxplot()
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
def test_clip(self, request, data, axis):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
if name_contains(request.node.name, numeric_dfs):
ind_len = (
len(modin_df.index)
if not pandas.DataFrame()._get_axis_number(axis)
else len(modin_df.columns)
)
# set bounds
lower, upper = np.sort(random_state.random_integers(RAND_LOW, RAND_HIGH, 2))
lower_list = random_state.random_integers(RAND_LOW, RAND_HIGH, ind_len)
upper_list = random_state.random_integers(RAND_LOW, RAND_HIGH, ind_len)
# test only upper scalar bound
modin_result = modin_df.clip(None, upper, axis=axis)
pandas_result = pandas_df.clip(None, upper, axis=axis)
df_equals(modin_result, pandas_result)
# test lower and upper scalar bound
modin_result = modin_df.clip(lower, upper, axis=axis)
pandas_result = pandas_df.clip(lower, upper, axis=axis)
df_equals(modin_result, pandas_result)
# test lower and upper list bound on each column
modin_result = modin_df.clip(lower_list, upper_list, axis=axis)
pandas_result = pandas_df.clip(lower_list, upper_list, axis=axis)
df_equals(modin_result, pandas_result)
# test only upper list bound on each column
modin_result = modin_df.clip(np.nan, upper_list, axis=axis)
pandas_result = pandas_df.clip(np.nan, upper_list, axis=axis)
df_equals(modin_result, pandas_result)
with pytest.raises(ValueError):
modin_df.clip(lower=[1, 2, 3], axis=None)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
def test_clip_lower(self, request, data, axis):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
if name_contains(request.node.name, numeric_dfs):
ind_len = (
len(modin_df.index)
if not pandas.DataFrame()._get_axis_number(axis)
else len(modin_df.columns)
)
# set bounds
lower = random_state.random_integers(RAND_LOW, RAND_HIGH, 1)[0]
lower_list = random_state.random_integers(RAND_LOW, RAND_HIGH, ind_len)
# test lower scalar bound
pandas_result = pandas_df.clip_lower(lower, axis=axis)
modin_result = modin_df.clip_lower(lower, axis=axis)
df_equals(modin_result, pandas_result)
# test lower list bound on each column
pandas_result = pandas_df.clip_lower(lower_list, axis=axis)
modin_result = modin_df.clip_lower(lower_list, axis=axis)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
def test_clip_upper(self, request, data, axis):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
if name_contains(request.node.name, numeric_dfs):
ind_len = (
len(modin_df.index)
if not pandas.DataFrame()._get_axis_number(axis)
else len(modin_df.columns)
)
# set bounds
upper = random_state.random_integers(RAND_LOW, RAND_HIGH, 1)[0]
upper_list = random_state.random_integers(RAND_LOW, RAND_HIGH, ind_len)
# test upper scalar bound
modin_result = modin_df.clip_upper(upper, axis=axis)
pandas_result = pandas_df.clip_upper(upper, axis=axis)
df_equals(modin_result, pandas_result)
# test upper list bound on each column
modin_result = modin_df.clip_upper(upper_list, axis=axis)
pandas_result = pandas_df.clip_upper(upper_list, axis=axis)
df_equals(modin_result, pandas_result)
def test_combine(self):
df1 = pd.DataFrame({"A": [0, 0], "B": [4, 4]})
df2 = pd.DataFrame({"A": [1, 1], "B": [3, 3]})
with pytest.warns(UserWarning):
df1.combine(df2, lambda s1, s2: s1 if s1.sum() < s2.sum() else s2)
def test_combine_first(self):
df1 = pd.DataFrame({"A": [None, 0], "B": [None, 4]})
df2 = pd.DataFrame({"A": [1, 1], "B": [3, 3]})
with pytest.warns(UserWarning):
df1.combine_first(df2)
def test_compound(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).compound()
def test_corr(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).corr()
def test_corrwith(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).corrwith(pd.DataFrame(data))
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"numeric_only", bool_arg_values, ids=arg_keys("numeric_only", bool_arg_keys)
)
def test_count(self, request, data, axis, numeric_only):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_result = modin_df.count(axis=axis, numeric_only=numeric_only)
pandas_result = pandas_df.count(axis=axis, numeric_only=numeric_only)
df_equals(modin_result, pandas_result)
modin_result = modin_df.T.count(axis=axis, numeric_only=numeric_only)
pandas_result = pandas_df.T.count(axis=axis, numeric_only=numeric_only)
df_equals(modin_result, pandas_result)
# test level
modin_df_multi_level = modin_df.copy()
pandas_df_multi_level = pandas_df.copy()
axis = modin_df._get_axis_number(axis) if axis is not None else 0
levels = 3
axis_names_list = [["a", "b", "c"], None]
for axis_names in axis_names_list:
if axis == 0:
new_idx = pandas.MultiIndex.from_tuples(
[(i // 4, i // 2, i) for i in range(len(modin_df.index))],
names=axis_names,
)
modin_df_multi_level.index = new_idx
pandas_df_multi_level.index = new_idx
try: # test error
pandas_df_multi_level.count(
axis=1, numeric_only=numeric_only, level=0
)
except Exception as e:
with pytest.raises(type(e)):
modin_df_multi_level.count(
axis=1, numeric_only=numeric_only, level=0
)
else:
new_col = pandas.MultiIndex.from_tuples(
[(i // 4, i // 2, i) for i in range(len(modin_df.columns))],
names=axis_names,
)
modin_df_multi_level.columns = new_col
pandas_df_multi_level.columns = new_col
try: # test error
pandas_df_multi_level.count(
axis=0, numeric_only=numeric_only, level=0
)
except Exception as e:
with pytest.raises(type(e)):
modin_df_multi_level.count(
axis=0, numeric_only=numeric_only, level=0
)
for level in list(range(levels)) + (axis_names if axis_names else []):
modin_multi_level_result = modin_df_multi_level.count(
axis=axis, numeric_only=numeric_only, level=level
)
pandas_multi_level_result = pandas_df_multi_level.count(
axis=axis, numeric_only=numeric_only, level=level
)
df_equals(modin_multi_level_result, pandas_multi_level_result)
def test_cov(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).cov()
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
def test_cummax(self, request, data, axis, skipna):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.cummax(axis=axis, skipna=skipna)
except Exception as e:
with pytest.raises(type(e)):
modin_df.cummax(axis=axis, skipna=skipna)
else:
modin_result = modin_df.cummax(axis=axis, skipna=skipna)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.T.cummax(axis=axis, skipna=skipna)
except Exception as e:
with pytest.raises(type(e)):
modin_df.T.cummax(axis=axis, skipna=skipna)
else:
modin_result = modin_df.T.cummax(axis=axis, skipna=skipna)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
def test_cummin(self, request, data, axis, skipna):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.cummin(axis=axis, skipna=skipna)
except Exception as e:
with pytest.raises(type(e)):
modin_df.cummin(axis=axis, skipna=skipna)
else:
modin_result = modin_df.cummin(axis=axis, skipna=skipna)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.T.cummin(axis=axis, skipna=skipna)
except Exception as e:
with pytest.raises(type(e)):
modin_df.T.cummin(axis=axis, skipna=skipna)
else:
modin_result = modin_df.T.cummin(axis=axis, skipna=skipna)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
def test_cumprod(self, request, data, axis, skipna):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.cumprod(axis=axis, skipna=skipna)
except Exception as e:
with pytest.raises(type(e)):
modin_df.cumprod(axis=axis, skipna=skipna)
else:
modin_result = modin_df.cumprod(axis=axis, skipna=skipna)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.T.cumprod(axis=axis, skipna=skipna)
except Exception as e:
with pytest.raises(type(e)):
modin_df.T.cumprod(axis=axis, skipna=skipna)
else:
modin_result = modin_df.T.cumprod(axis=axis, skipna=skipna)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
def test_cumsum(self, request, data, axis, skipna):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
# pandas exhibits weird behavior for this case
# Remove this case when we can pull the error messages from backend
if name_contains(request.node.name, ["datetime_timedelta_data"]) and (
axis == 0 or axis == "rows"
):
with pytest.raises(TypeError):
modin_df.cumsum(axis=axis, skipna=skipna)
else:
try:
pandas_result = pandas_df.cumsum(axis=axis, skipna=skipna)
except Exception as e:
with pytest.raises(type(e)):
modin_df.cumsum(axis=axis, skipna=skipna)
else:
modin_result = modin_df.cumsum(axis=axis, skipna=skipna)
df_equals(modin_result, pandas_result)
if name_contains(request.node.name, ["datetime_timedelta_data"]) and (
axis == 0 or axis == "rows"
):
with pytest.raises(TypeError):
modin_df.T.cumsum(axis=axis, skipna=skipna)
else:
try:
pandas_result = pandas_df.T.cumsum(axis=axis, skipna=skipna)
except Exception as e:
with pytest.raises(type(e)):
modin_df.T.cumsum(axis=axis, skipna=skipna)
else:
modin_result = modin_df.T.cumsum(axis=axis, skipna=skipna)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_describe(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.describe(), pandas_df.describe())
percentiles = [0.10, 0.11, 0.44, 0.78, 0.99]
df_equals(
modin_df.describe(percentiles=percentiles),
pandas_df.describe(percentiles=percentiles),
)
try:
pandas_result = pandas_df.describe(exclude=[np.float64])
except Exception as e:
with pytest.raises(type(e)):
modin_df.describe(exclude=[np.float64])
else:
modin_result = modin_df.describe(exclude=[np.float64])
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.describe(exclude=np.float64)
except Exception as e:
with pytest.raises(type(e)):
modin_df.describe(exclude=np.float64)
else:
modin_result = modin_df.describe(exclude=np.float64)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.describe(
include=[np.timedelta64, np.datetime64, np.object, np.bool]
)
except Exception as e:
with pytest.raises(type(e)):
modin_df.describe(
include=[np.timedelta64, np.datetime64, np.object, np.bool]
)
else:
modin_result = modin_df.describe(
include=[np.timedelta64, np.datetime64, np.object, np.bool]
)
df_equals(modin_result, pandas_result)
modin_result = modin_df.describe(include=str(modin_df.dtypes.values[0]))
pandas_result = pandas_df.describe(include=str(pandas_df.dtypes.values[0]))
df_equals(modin_result, pandas_result)
modin_result = modin_df.describe(include=[np.number])
pandas_result = pandas_df.describe(include=[np.number])
df_equals(modin_result, pandas_result)
df_equals(modin_df.describe(include="all"), pandas_df.describe(include="all"))
modin_df = pd.DataFrame(data).applymap(str)
pandas_df = pandas.DataFrame(data).applymap(str)
try:
df_equals(modin_df.describe(), pandas_df.describe())
except AssertionError:
# We have to do this because we choose the highest count slightly differently
# than pandas. Because there is no true guarantee which one will be first,
# If they don't match, make sure that the `freq` is the same at least.
df_equals(
modin_df.describe().loc[["count", "unique", "freq"]],
pandas_df.describe().loc[["count", "unique", "freq"]],
)
def test_describe_dtypes(self):
modin_df = pd.DataFrame(
{
"col1": list("abc"),
"col2": list("abc"),
"col3": list("abc"),
"col4": [1, 2, 3],
}
)
pandas_df = pandas.DataFrame(
{
"col1": list("abc"),
"col2": list("abc"),
"col3": list("abc"),
"col4": [1, 2, 3],
}
)
modin_result = modin_df.describe()
pandas_result = pandas_df.describe()
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"periods", int_arg_values, ids=arg_keys("periods", int_arg_keys)
)
def test_diff(self, request, data, axis, periods):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.diff(axis=axis, periods=periods)
except Exception as e:
with pytest.raises(type(e)):
modin_df.diff(axis=axis, periods=periods)
else:
modin_result = modin_df.diff(axis=axis, periods=periods)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.T.diff(axis=axis, periods=periods)
except Exception as e:
with pytest.raises(type(e)):
modin_df.T.diff(axis=axis, periods=periods)
else:
modin_result = modin_df.T.diff(axis=axis, periods=periods)
df_equals(modin_result, pandas_result)
def test_drop(self):
frame_data = {"A": [1, 2, 3, 4], "B": [0, 1, 2, 3]}
simple = pandas.DataFrame(frame_data)
modin_simple = pd.DataFrame(frame_data)
df_equals(modin_simple.drop("A", axis=1), simple[["B"]])
df_equals(modin_simple.drop(["A", "B"], axis="columns"), simple[[]])
df_equals(modin_simple.drop([0, 1, 3], axis=0), simple.loc[[2], :])
df_equals(modin_simple.drop([0, 3], axis="index"), simple.loc[[1, 2], :])
pytest.raises(ValueError, modin_simple.drop, 5)
pytest.raises(ValueError, modin_simple.drop, "C", 1)
pytest.raises(ValueError, modin_simple.drop, [1, 5])
pytest.raises(ValueError, modin_simple.drop, ["A", "C"], 1)
# errors = 'ignore'
df_equals(modin_simple.drop(5, errors="ignore"), simple)
df_equals(modin_simple.drop([0, 5], errors="ignore"), simple.loc[[1, 2, 3], :])
df_equals(modin_simple.drop("C", axis=1, errors="ignore"), simple)
df_equals(modin_simple.drop(["A", "C"], axis=1, errors="ignore"), simple[["B"]])
# non-unique
nu_df = pandas.DataFrame(
zip(range(3), range(-3, 1), list("abc")), columns=["a", "a", "b"]
)
modin_nu_df = pd.DataFrame(nu_df)
df_equals(modin_nu_df.drop("a", axis=1), nu_df[["b"]])
df_equals(modin_nu_df.drop("b", axis="columns"), nu_df["a"])
df_equals(modin_nu_df.drop([]), nu_df)
nu_df = nu_df.set_index(pandas.Index(["X", "Y", "X"]))
nu_df.columns = list("abc")
modin_nu_df = pd.DataFrame(nu_df)
df_equals(modin_nu_df.drop("X", axis="rows"), nu_df.loc[["Y"], :])
df_equals(modin_nu_df.drop(["X", "Y"], axis=0), nu_df.loc[[], :])
# inplace cache issue
frame_data = random_state.randn(10, 3)
df = pandas.DataFrame(frame_data, columns=list("abc"))
modin_df = pd.DataFrame(frame_data, columns=list("abc"))
expected = df[~(df.b > 0)]
modin_df.drop(labels=df[df.b > 0].index, inplace=True)
df_equals(modin_df, expected)
midx = pd.MultiIndex(
levels=[["lama", "cow", "falcon"], ["speed", "weight", "length"]],
codes=[[0, 0, 0, 1, 1, 1, 2, 2, 2], [0, 1, 2, 0, 1, 2, 0, 1, 2]],
)
df = pd.DataFrame(
index=midx,
columns=["big", "small"],
data=[
[45, 30],
[200, 100],
[1.5, 1],
[30, 20],
[250, 150],
[1.5, 0.8],
[320, 250],
[1, 0.8],
[0.3, 0.2],
],
)
with pytest.warns(UserWarning):
df.drop(index="length", level=1)
def test_drop_api_equivalence(self):
# equivalence of the labels/axis and index/columns API's
frame_data = [[1, 2, 3], [3, 4, 5], [5, 6, 7]]
modin_df = pd.DataFrame(
frame_data, index=["a", "b", "c"], columns=["d", "e", "f"]
)
modin_df1 = modin_df.drop("a")
modin_df2 = modin_df.drop(index="a")
df_equals(modin_df1, modin_df2)
modin_df1 = modin_df.drop("d", 1)
modin_df2 = modin_df.drop(columns="d")
df_equals(modin_df1, modin_df2)
modin_df1 = modin_df.drop(labels="e", axis=1)
modin_df2 = modin_df.drop(columns="e")
df_equals(modin_df1, modin_df2)
modin_df1 = modin_df.drop(["a"], axis=0)
modin_df2 = modin_df.drop(index=["a"])
df_equals(modin_df1, modin_df2)
modin_df1 = modin_df.drop(["a"], axis=0).drop(["d"], axis=1)
modin_df2 = modin_df.drop(index=["a"], columns=["d"])
df_equals(modin_df1, modin_df2)
with pytest.raises(ValueError):
modin_df.drop(labels="a", index="b")
with pytest.raises(ValueError):
modin_df.drop(labels="a", columns="b")
with pytest.raises(ValueError):
modin_df.drop(axis=1)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_drop_transpose(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_result = modin_df.T.drop(columns=[0, 1, 2])
pandas_result = pandas_df.T.drop(columns=[0, 1, 2])
df_equals(modin_result, pandas_result)
modin_result = modin_df.T.drop(index=["col3", "col1"])
pandas_result = pandas_df.T.drop(index=["col3", "col1"])
df_equals(modin_result, pandas_result)
modin_result = modin_df.T.drop(columns=[0, 1, 2], index=["col3", "col1"])
pandas_result = pandas_df.T.drop(columns=[0, 1, 2], index=["col3", "col1"])
df_equals(modin_result, pandas_result)
def test_droplevel(self):
df = (
pd.DataFrame([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]])
.set_index([0, 1])
.rename_axis(["a", "b"])
)
df.columns = pd.MultiIndex.from_tuples(
[("c", "e"), ("d", "f")], names=["level_1", "level_2"]
)
with pytest.warns(UserWarning):
df.droplevel("a")
with pytest.warns(UserWarning):
df.droplevel("level_2", axis=1)
@pytest.mark.parametrize(
"data", test_data_with_duplicates_values, ids=test_data_with_duplicates_keys
)
@pytest.mark.parametrize(
"keep", ["last", "first", False], ids=["last", "first", "False"]
)
@pytest.mark.parametrize(
"subset", [None, ["col1", "col3", "col7"]], ids=["None", "subset"]
)
def test_drop_duplicates(self, data, keep, subset):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(
modin_df.drop_duplicates(keep=keep, inplace=False, subset=subset),
pandas_df.drop_duplicates(keep=keep, inplace=False, subset=subset),
)
modin_results = modin_df.drop_duplicates(keep=keep, inplace=True, subset=subset)
pandas_results = pandas_df.drop_duplicates(
keep=keep, inplace=True, subset=subset
)
df_equals(modin_results, pandas_results)
def test_drop_duplicates_with_missing_index_values(self):
data = {
"columns": ["value", "time", "id"],
"index": [
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15,
20,
21,
22,
23,
24,
25,
26,
27,
32,
33,
34,
35,
36,
37,
38,
39,
40,
41,
],
"data": [
["3", 1279213398000.0, 88.0],
["3", 1279204682000.0, 88.0],
["0", 1245772835000.0, 448.0],
["0", 1270564258000.0, 32.0],
["0", 1267106669000.0, 118.0],
["7", 1300621123000.0, 5.0],
["0", 1251130752000.0, 957.0],
["0", 1311683506000.0, 62.0],
["9", 1283692698000.0, 89.0],
["9", 1270234253000.0, 64.0],
["0", 1285088818000.0, 50.0],
["0", 1218212725000.0, 695.0],
["2", 1383933968000.0, 348.0],
["0", 1368227625000.0, 257.0],
["1", 1454514093000.0, 446.0],
["1", 1428497427000.0, 134.0],
["1", 1459184936000.0, 568.0],
["1", 1502293302000.0, 599.0],
["1", 1491833358000.0, 829.0],
["1", 1485431534000.0, 806.0],
["8", 1351800505000.0, 101.0],
["0", 1357247721000.0, 916.0],
["0", 1335804423000.0, 370.0],
["24", 1327547726000.0, 720.0],
["0", 1332334140000.0, 415.0],
["0", 1309543100000.0, 30.0],
["18", 1309541141000.0, 30.0],
["0", 1298979435000.0, 48.0],
["14", 1276098160000.0, 59.0],
["0", 1233936302000.0, 109.0],
],
}
pandas_df = pandas.DataFrame(
data["data"], index=data["index"], columns=data["columns"]
)
modin_df = pd.DataFrame(
data["data"], index=data["index"], columns=data["columns"]
)
modin_result = modin_df.sort_values(["id", "time"]).drop_duplicates(["id"])
pandas_result = pandas_df.sort_values(["id", "time"]).drop_duplicates(["id"])
df_equals(modin_result, pandas_result)
def test_drop_duplicates_after_sort(self):
data = [
{"value": 1, "time": 2},
{"value": 1, "time": 1},
{"value": 2, "time": 1},
{"value": 2, "time": 2},
]
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_result = modin_df.sort_values(["value", "time"]).drop_duplicates(
["value"]
)
pandas_result = pandas_df.sort_values(["value", "time"]).drop_duplicates(
["value"]
)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize("how", ["any", "all"], ids=["any", "all"])
def test_dropna(self, data, axis, how):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
with pytest.raises(ValueError):
modin_df.dropna(axis=axis, how="invalid")
with pytest.raises(TypeError):
modin_df.dropna(axis=axis, how=None, thresh=None)
with pytest.raises(KeyError):
modin_df.dropna(axis=axis, subset=["NotExists"], how=how)
modin_result = modin_df.dropna(axis=axis, how=how)
pandas_result = pandas_df.dropna(axis=axis, how=how)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_dropna_inplace(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
pandas_result = pandas_df.dropna()
modin_df.dropna(inplace=True)
df_equals(modin_df, pandas_result)
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
pandas_df.dropna(thresh=2, inplace=True)
modin_df.dropna(thresh=2, inplace=True)
df_equals(modin_df, pandas_df)
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
pandas_df.dropna(axis=1, how="any", inplace=True)
modin_df.dropna(axis=1, how="any", inplace=True)
df_equals(modin_df, pandas_df)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_dropna_multiple_axes(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(
modin_df.dropna(how="all", axis=[0, 1]),
pandas_df.dropna(how="all", axis=[0, 1]),
)
df_equals(
modin_df.dropna(how="all", axis=(0, 1)),
pandas_df.dropna(how="all", axis=(0, 1)),
)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_dropna_multiple_axes_inplace(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_df_copy = modin_df.copy()
pandas_df_copy = pandas_df.copy()
modin_df_copy.dropna(how="all", axis=[0, 1], inplace=True)
pandas_df_copy.dropna(how="all", axis=[0, 1], inplace=True)
df_equals(modin_df_copy, pandas_df_copy)
modin_df_copy = modin_df.copy()
pandas_df_copy = pandas_df.copy()
modin_df_copy.dropna(how="all", axis=(0, 1), inplace=True)
pandas_df_copy.dropna(how="all", axis=(0, 1), inplace=True)
df_equals(modin_df_copy, pandas_df_copy)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_dropna_subset(self, request, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
if "empty_data" not in request.node.name:
column_subset = modin_df.columns[0:2]
df_equals(
modin_df.dropna(how="all", subset=column_subset),
pandas_df.dropna(how="all", subset=column_subset),
)
df_equals(
modin_df.dropna(how="any", subset=column_subset),
pandas_df.dropna(how="any", subset=column_subset),
)
row_subset = modin_df.index[0:2]
df_equals(
modin_df.dropna(how="all", axis=1, subset=row_subset),
pandas_df.dropna(how="all", axis=1, subset=row_subset),
)
df_equals(
modin_df.dropna(how="any", axis=1, subset=row_subset),
pandas_df.dropna(how="any", axis=1, subset=row_subset),
)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_dropna_subset_error(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data) # noqa F841
# pandas_df is unused so there won't be confusing list comprehension
# stuff in the pytest.mark.parametrize
with pytest.raises(KeyError):
modin_df.dropna(subset=list("EF"))
if len(modin_df.columns) < 5:
with pytest.raises(KeyError):
modin_df.dropna(axis=1, subset=[4, 5])
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_dot(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
col_len = len(modin_df.columns)
# Test list input
arr = np.arange(col_len)
modin_result = modin_df.dot(arr)
pandas_result = pandas_df.dot(arr)
df_equals(modin_result, pandas_result)
# Test bad dimensions
with pytest.raises(ValueError):
modin_result = modin_df.dot(np.arange(col_len + 10))
# Test series input
modin_series = pd.Series(np.arange(col_len), index=modin_df.columns)
pandas_series = pandas.Series(np.arange(col_len), index=modin_df.columns)
modin_result = modin_df.dot(modin_series)
pandas_result = pandas_df.dot(pandas_series)
df_equals(modin_result, pandas_result)
# Test when input series index doesn't line up with columns
with pytest.raises(ValueError):
modin_result = modin_df.dot(pd.Series(np.arange(col_len)))
with pytest.warns(UserWarning):
modin_df.dot(modin_df.T)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize(
"keep", ["last", "first", False], ids=["last", "first", "False"]
)
def test_duplicated(self, data, keep):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
pandas_result = pandas_df.duplicated(keep=keep)
modin_result = modin_df.duplicated(keep=keep)
df_equals(modin_result, pandas_result)
import random
subset = random.sample(
list(pandas_df.columns), random.randint(1, len(pandas_df.columns))
)
pandas_result = pandas_df.duplicated(keep=keep, subset=subset)
modin_result = modin_df.duplicated(keep=keep, subset=subset)
df_equals(modin_result, pandas_result)
def test_empty_df(self):
df = pd.DataFrame(index=["a", "b"])
df_is_empty(df)
tm.assert_index_equal(df.index, pd.Index(["a", "b"]))
assert len(df.columns) == 0
df = pd.DataFrame(columns=["a", "b"])
df_is_empty(df)
assert len(df.index) == 0
tm.assert_index_equal(df.columns, pd.Index(["a", "b"]))
df = pd.DataFrame()
df_is_empty(df)
assert len(df.index) == 0
assert len(df.columns) == 0
df = pd.DataFrame(index=["a", "b"])
df_is_empty(df)
tm.assert_index_equal(df.index, pd.Index(["a", "b"]))
assert len(df.columns) == 0
df = pd.DataFrame(columns=["a", "b"])
df_is_empty(df)
assert len(df.index) == 0
tm.assert_index_equal(df.columns, pd.Index(["a", "b"]))
df = pd.DataFrame()
df_is_empty(df)
assert len(df.index) == 0
assert len(df.columns) == 0
def test_equals(self):
frame_data = {"col1": [2.9, 3, 3, 3], "col2": [2, 3, 4, 1]}
modin_df1 = pd.DataFrame(frame_data)
modin_df2 = pd.DataFrame(frame_data)
assert modin_df1.equals(modin_df2)
df_equals(modin_df1, modin_df2)
df_equals(modin_df1, pd.DataFrame(modin_df1))
frame_data = {"col1": [2.9, 3, 3, 3], "col2": [2, 3, 5, 1]}
modin_df3 = pd.DataFrame(frame_data, index=list("abcd"))
assert not modin_df1.equals(modin_df3)
with pytest.raises(AssertionError):
df_equals(modin_df3, modin_df1)
with pytest.raises(AssertionError):
df_equals(modin_df3, modin_df2)
assert modin_df1.equals(modin_df2._query_compiler.to_pandas())
def test_eval_df_use_case(self):
frame_data = {"a": random_state.randn(10), "b": random_state.randn(10)}
df = pandas.DataFrame(frame_data)
modin_df = pd.DataFrame(frame_data)
# test eval for series results
tmp_pandas = df.eval("arctan2(sin(a), b)", engine="python", parser="pandas")
tmp_modin = modin_df.eval(
"arctan2(sin(a), b)", engine="python", parser="pandas"
)
assert isinstance(tmp_modin, pd.Series)
df_equals(tmp_modin, tmp_pandas)
# Test not inplace assignments
tmp_pandas = df.eval("e = arctan2(sin(a), b)", engine="python", parser="pandas")
tmp_modin = modin_df.eval(
"e = arctan2(sin(a), b)", engine="python", parser="pandas"
)
df_equals(tmp_modin, tmp_pandas)
# Test inplace assignments
df.eval(
"e = arctan2(sin(a), b)", engine="python", parser="pandas", inplace=True
)
modin_df.eval(
"e = arctan2(sin(a), b)", engine="python", parser="pandas", inplace=True
)
# TODO: Use a series equality validator.
df_equals(modin_df, df)
def test_eval_df_arithmetic_subexpression(self):
frame_data = {"a": random_state.randn(10), "b": random_state.randn(10)}
df = pandas.DataFrame(frame_data)
modin_df = pd.DataFrame(frame_data)
df.eval("not_e = sin(a + b)", engine="python", parser="pandas", inplace=True)
modin_df.eval(
"not_e = sin(a + b)", engine="python", parser="pandas", inplace=True
)
# TODO: Use a series equality validator.
df_equals(modin_df, df)
def test_ewm(self):
df = pd.DataFrame({"B": [0, 1, 2, np.nan, 4]})
with pytest.warns(UserWarning):
df.ewm(com=0.5).mean()
def test_expanding(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).expanding()
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_explode(self, data):
modin_df = pd.DataFrame(data)
with pytest.warns(UserWarning):
modin_df.explode(modin_df.columns[0])
def test_ffill(self):
test_data = TestData()
test_data.tsframe["A"][:5] = np.nan
test_data.tsframe["A"][-5:] = np.nan
modin_df = pd.DataFrame(test_data.tsframe)
df_equals(modin_df.ffill(), test_data.tsframe.ffill())
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize(
"method",
["backfill", "bfill", "pad", "ffill", None],
ids=["backfill", "bfill", "pad", "ffill", "None"],
)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize("limit", int_arg_values, ids=int_arg_keys)
def test_fillna(self, data, method, axis, limit):
# We are not testing when limit is not positive until pandas-27042 gets fixed.
# We are not testing when axis is over rows until pandas-17399 gets fixed.
if limit > 0 and axis != 1 and axis != "columns":
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.fillna(
0, method=method, axis=axis, limit=limit
)
except Exception as e:
with pytest.raises(type(e)):
modin_df.fillna(0, method=method, axis=axis, limit=limit)
else:
modin_result = modin_df.fillna(0, method=method, axis=axis, limit=limit)
df_equals(modin_result, pandas_result)
def test_fillna_sanity(self):
test_data = TestData()
tf = test_data.tsframe
tf.loc[tf.index[:5], "A"] = np.nan
tf.loc[tf.index[-5:], "A"] = np.nan
zero_filled = test_data.tsframe.fillna(0)
modin_df = pd.DataFrame(test_data.tsframe).fillna(0)
df_equals(modin_df, zero_filled)
padded = test_data.tsframe.fillna(method="pad")
modin_df = pd.DataFrame(test_data.tsframe).fillna(method="pad")
df_equals(modin_df, padded)
# mixed type
mf = test_data.mixed_frame
mf.loc[mf.index[5:20], "foo"] = np.nan
mf.loc[mf.index[-10:], "A"] = np.nan
result = test_data.mixed_frame.fillna(value=0)
modin_df = pd.DataFrame(test_data.mixed_frame).fillna(value=0)
df_equals(modin_df, result)
result = test_data.mixed_frame.fillna(method="pad")
modin_df = pd.DataFrame(test_data.mixed_frame).fillna(method="pad")
df_equals(modin_df, result)
pytest.raises(ValueError, test_data.tsframe.fillna)
pytest.raises(ValueError, pd.DataFrame(test_data.tsframe).fillna)
with pytest.raises(ValueError):
pd.DataFrame(test_data.tsframe).fillna(5, method="ffill")
# mixed numeric (but no float16)
mf = test_data.mixed_float.reindex(columns=["A", "B", "D"])
mf.loc[mf.index[-10:], "A"] = np.nan
result = mf.fillna(value=0)
modin_df = pd.DataFrame(mf).fillna(value=0)
df_equals(modin_df, result)
result = mf.fillna(method="pad")
modin_df = pd.DataFrame(mf).fillna(method="pad")
df_equals(modin_df, result)
# TODO: Use this when Arrow issue resolves:
# (https://issues.apache.org/jira/browse/ARROW-2122)
# empty frame
# df = DataFrame(columns=['x'])
# for m in ['pad', 'backfill']:
# df.x.fillna(method=m, inplace=True)
# df.x.fillna(method=m)
# with different dtype
frame_data = [
["a", "a", np.nan, "a"],
["b", "b", np.nan, "b"],
["c", "c", np.nan, "c"],
]
df = pandas.DataFrame(frame_data)
result = df.fillna({2: "foo"})
modin_df = pd.DataFrame(frame_data).fillna({2: "foo"})
df_equals(modin_df, result)
modin_df = pd.DataFrame(df)
df.fillna({2: "foo"}, inplace=True)
modin_df.fillna({2: "foo"}, inplace=True)
df_equals(modin_df, result)
frame_data = {
"Date": [pandas.NaT, pandas.Timestamp("2014-1-1")],
"Date2": [pandas.Timestamp("2013-1-1"), pandas.NaT],
}
df = pandas.DataFrame(frame_data)
result = df.fillna(value={"Date": df["Date2"]})
modin_df = pd.DataFrame(frame_data).fillna(value={"Date": df["Date2"]})
df_equals(modin_df, result)
# TODO: Use this when Arrow issue resolves:
# (https://issues.apache.org/jira/browse/ARROW-2122)
# with timezone
"""
frame_data = {'A': [pandas.Timestamp('2012-11-11 00:00:00+01:00'),
pandas.NaT]}
df = pandas.DataFrame(frame_data)
modin_df = pd.DataFrame(frame_data)
df_equals(modin_df.fillna(method='pad'), df.fillna(method='pad'))
frame_data = {'A': [pandas.NaT,
pandas.Timestamp('2012-11-11 00:00:00+01:00')]}
df = pandas.DataFrame(frame_data)
modin_df = pd.DataFrame(frame_data).fillna(method='bfill')
df_equals(modin_df, df.fillna(method='bfill'))
"""
def test_fillna_downcast(self):
# infer int64 from float64
frame_data = {"a": [1.0, np.nan]}
df = pandas.DataFrame(frame_data)
result = df.fillna(0, downcast="infer")
modin_df = pd.DataFrame(frame_data).fillna(0, downcast="infer")
df_equals(modin_df, result)
# infer int64 from float64 when fillna value is a dict
df = pandas.DataFrame(frame_data)
result = df.fillna({"a": 0}, downcast="infer")
modin_df = pd.DataFrame(frame_data).fillna({"a": 0}, downcast="infer")
df_equals(modin_df, result)
def test_ffill2(self):
test_data = TestData()
test_data.tsframe["A"][:5] = np.nan
test_data.tsframe["A"][-5:] = np.nan
modin_df = pd.DataFrame(test_data.tsframe)
df_equals(
modin_df.fillna(method="ffill"), test_data.tsframe.fillna(method="ffill")
)
def test_bfill2(self):
test_data = TestData()
test_data.tsframe["A"][:5] = np.nan
test_data.tsframe["A"][-5:] = np.nan
modin_df = pd.DataFrame(test_data.tsframe)
df_equals(
modin_df.fillna(method="bfill"), test_data.tsframe.fillna(method="bfill")
)
def test_fillna_inplace(self):
frame_data = random_state.randn(10, 4)
df = pandas.DataFrame(frame_data)
df[1][:4] = np.nan
df[3][-4:] = np.nan
modin_df = pd.DataFrame(df)
df.fillna(value=0, inplace=True)
try:
df_equals(modin_df, df)
except AssertionError:
pass
else:
assert False
modin_df.fillna(value=0, inplace=True)
df_equals(modin_df, df)
modin_df = pd.DataFrame(df).fillna(value={0: 0}, inplace=True)
assert modin_df is None
df[1][:4] = np.nan
df[3][-4:] = np.nan
modin_df = pd.DataFrame(df)
df.fillna(method="ffill", inplace=True)
try:
df_equals(modin_df, df)
except AssertionError:
pass
else:
assert False
modin_df.fillna(method="ffill", inplace=True)
df_equals(modin_df, df)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_frame_fillna_limit(self, data):
pandas_df = pandas.DataFrame(data)
index = pandas_df.index
result = pandas_df[:2].reindex(index)
modin_df = pd.DataFrame(result)
df_equals(
modin_df.fillna(method="pad", limit=2), result.fillna(method="pad", limit=2)
)
result = pandas_df[-2:].reindex(index)
modin_df = pd.DataFrame(result)
df_equals(
modin_df.fillna(method="backfill", limit=2),
result.fillna(method="backfill", limit=2),
)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_frame_pad_backfill_limit(self, data):
pandas_df = pandas.DataFrame(data)
index = pandas_df.index
result = pandas_df[:2].reindex(index)
modin_df = pd.DataFrame(result)
df_equals(
modin_df.fillna(method="pad", limit=2), result.fillna(method="pad", limit=2)
)
result = pandas_df[-2:].reindex(index)
modin_df = pd.DataFrame(result)
df_equals(
modin_df.fillna(method="backfill", limit=2),
result.fillna(method="backfill", limit=2),
)
def test_fillna_dtype_conversion(self):
# make sure that fillna on an empty frame works
df = pandas.DataFrame(index=range(3), columns=["A", "B"], dtype="float64")
modin_df = pd.DataFrame(index=range(3), columns=["A", "B"], dtype="float64")
df_equals(modin_df.fillna("nan"), df.fillna("nan"))
frame_data = {"A": [1, np.nan], "B": [1.0, 2.0]}
df = pandas.DataFrame(frame_data)
modin_df = pd.DataFrame(frame_data)
for v in ["", 1, np.nan, 1.0]:
df_equals(modin_df.fillna(v), df.fillna(v))
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_fillna_skip_certain_blocks(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
# don't try to fill boolean, int blocks
df_equals(modin_df.fillna(np.nan), pandas_df.fillna(np.nan))
def test_fillna_dict_series(self):
frame_data = {
"a": [np.nan, 1, 2, np.nan, np.nan],
"b": [1, 2, 3, np.nan, np.nan],
"c": [np.nan, 1, 2, 3, 4],
}
df = pandas.DataFrame(frame_data)
modin_df = pd.DataFrame(frame_data)
df_equals(modin_df.fillna({"a": 0, "b": 5}), df.fillna({"a": 0, "b": 5}))
df_equals(
modin_df.fillna({"a": 0, "b": 5, "d": 7}),
df.fillna({"a": 0, "b": 5, "d": 7}),
)
# Series treated same as dict
df_equals(modin_df.fillna(modin_df.max()), df.fillna(df.max()))
def test_fillna_dataframe(self):
frame_data = {
"a": [np.nan, 1, 2, np.nan, np.nan],
"b": [1, 2, 3, np.nan, np.nan],
"c": [np.nan, 1, 2, 3, 4],
}
df = pandas.DataFrame(frame_data, index=list("VWXYZ"))
modin_df = pd.DataFrame(frame_data, index=list("VWXYZ"))
# df2 may have different index and columns
df2 = pandas.DataFrame(
{
"a": [np.nan, 10, 20, 30, 40],
"b": [50, 60, 70, 80, 90],
"foo": ["bar"] * 5,
},
index=list("VWXuZ"),
)
modin_df2 = pd.DataFrame(df2)
# only those columns and indices which are shared get filled
df_equals(modin_df.fillna(modin_df2), df.fillna(df2))
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_fillna_columns(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(
modin_df.fillna(method="ffill", axis=1),
pandas_df.fillna(method="ffill", axis=1),
)
df_equals(
modin_df.fillna(method="ffill", axis=1),
pandas_df.fillna(method="ffill", axis=1),
)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_fillna_invalid_method(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data) # noqa F841
with tm.assert_raises_regex(ValueError, "ffil"):
modin_df.fillna(method="ffil")
def test_fillna_invalid_value(self):
test_data = TestData()
modin_df = pd.DataFrame(test_data.frame)
# list
pytest.raises(TypeError, modin_df.fillna, [1, 2])
# tuple
pytest.raises(TypeError, modin_df.fillna, (1, 2))
# frame with series
pytest.raises(TypeError, modin_df.iloc[:, 0].fillna, modin_df)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_fillna_col_reordering(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.fillna(method="ffill"), pandas_df.fillna(method="ffill"))
"""
TODO: Use this when Arrow issue resolves:
(https://issues.apache.org/jira/browse/ARROW-2122)
def test_fillna_datetime_columns(self):
frame_data = {'A': [-1, -2, np.nan],
'B': date_range('20130101', periods=3),
'C': ['foo', 'bar', None],
'D': ['foo2', 'bar2', None]}
df = pandas.DataFrame(frame_data, index=date_range('20130110', periods=3))
modin_df = pd.DataFrame(frame_data, index=date_range('20130110', periods=3))
df_equals(modin_df.fillna('?'), df.fillna('?'))
frame_data = {'A': [-1, -2, np.nan],
'B': [pandas.Timestamp('2013-01-01'),
pandas.Timestamp('2013-01-02'), pandas.NaT],
'C': ['foo', 'bar', None],
'D': ['foo2', 'bar2', None]}
df = pandas.DataFrame(frame_data, index=date_range('20130110', periods=3))
modin_df = pd.DataFrame(frame_data, index=date_range('20130110', periods=3))
df_equals(modin_df.fillna('?'), df.fillna('?'))
"""
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_filter(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
by = {"items": ["col1", "col5"], "regex": "4$|3$", "like": "col"}
df_equals(
modin_df.filter(items=by["items"]), pandas_df.filter(items=by["items"])
)
df_equals(
modin_df.filter(regex=by["regex"], axis=0),
pandas_df.filter(regex=by["regex"], axis=0),
)
df_equals(
modin_df.filter(regex=by["regex"], axis=1),
pandas_df.filter(regex=by["regex"], axis=1),
)
df_equals(modin_df.filter(like=by["like"]), pandas_df.filter(like=by["like"]))
with pytest.raises(TypeError):
modin_df.filter(items=by["items"], regex=by["regex"])
with pytest.raises(TypeError):
modin_df.filter()
def test_first(self):
i = pd.date_range("2018-04-09", periods=4, freq="2D")
ts = pd.DataFrame({"A": [1, 2, 3, 4]}, index=i)
with pytest.warns(UserWarning):
ts.first("3D")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_first_valid_index(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
assert modin_df.first_valid_index() == (pandas_df.first_valid_index())
@pytest.mark.skip(reason="Defaulting to Pandas")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_from_dict(self, data):
modin_df = pd.DataFrame(data) # noqa F841
pandas_df = pandas.DataFrame(data) # noqa F841
with pytest.raises(NotImplementedError):
pd.DataFrame.from_dict(None)
@pytest.mark.skip(reason="Defaulting to Pandas")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_from_items(self, data):
modin_df = pd.DataFrame(data) # noqa F841
pandas_df = pandas.DataFrame(data) # noqa F841
with pytest.raises(NotImplementedError):
pd.DataFrame.from_items(None)
@pytest.mark.skip(reason="Defaulting to Pandas")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_from_records(self, data):
modin_df = pd.DataFrame(data) # noqa F841
pandas_df = pandas.DataFrame(data) # noqa F841
with pytest.raises(NotImplementedError):
pd.DataFrame.from_records(None)
def test_get_value(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).get_value(0, "col1")
def test_get_values(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).get_values()
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("n", int_arg_values, ids=arg_keys("n", int_arg_keys))
def test_head(self, data, n):
# Test normal dataframe head
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.head(n), pandas_df.head(n))
df_equals(modin_df.head(len(modin_df) + 1), pandas_df.head(len(pandas_df) + 1))
# Test head when we call it from a QueryCompilerView
modin_result = modin_df.loc[:, ["col1", "col3", "col3"]].head(n)
pandas_result = pandas_df.loc[:, ["col1", "col3", "col3"]].head(n)
df_equals(modin_result, pandas_result)
def test_hist(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).hist(None)
@pytest.mark.skip(reason="Defaulting to Pandas")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_iat(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data) # noqa F841
with pytest.raises(NotImplementedError):
modin_df.iat()
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
def test_idxmax(self, data, axis, skipna):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
pandas_result = pandas_df.idxmax(axis=axis, skipna=skipna)
modin_result = modin_df.idxmax(axis=axis, skipna=skipna)
df_equals(modin_result, pandas_result)
pandas_result = pandas_df.T.idxmax(axis=axis, skipna=skipna)
modin_result = modin_df.T.idxmax(axis=axis, skipna=skipna)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
def test_idxmin(self, data, axis, skipna):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_result = modin_df.idxmin(axis=axis, skipna=skipna)
pandas_result = pandas_df.idxmin(axis=axis, skipna=skipna)
df_equals(modin_result, pandas_result)
modin_result = modin_df.T.idxmin(axis=axis, skipna=skipna)
pandas_result = pandas_df.T.idxmin(axis=axis, skipna=skipna)
df_equals(modin_result, pandas_result)
def test_infer_objects(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).infer_objects()
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_iloc(self, request, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
if not name_contains(request.node.name, ["empty_data"]):
# Scaler
np.testing.assert_equal(modin_df.iloc[0, 1], pandas_df.iloc[0, 1])
# Series
df_equals(modin_df.iloc[0], pandas_df.iloc[0])
df_equals(modin_df.iloc[1:, 0], pandas_df.iloc[1:, 0])
df_equals(modin_df.iloc[1:2, 0], pandas_df.iloc[1:2, 0])
# DataFrame
df_equals(modin_df.iloc[[1, 2]], pandas_df.iloc[[1, 2]])
# See issue #80
# df_equals(modin_df.iloc[[1, 2], [1, 0]], pandas_df.iloc[[1, 2], [1, 0]])
df_equals(modin_df.iloc[1:2, 0:2], pandas_df.iloc[1:2, 0:2])
# Issue #43
modin_df.iloc[0:3, :]
# Write Item
modin_df.iloc[[1, 2]] = 42
pandas_df.iloc[[1, 2]] = 42
df_equals(modin_df, pandas_df)
else:
with pytest.raises(IndexError):
modin_df.iloc[0, 1]
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_index(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.index, pandas_df.index)
modin_df_cp = modin_df.copy()
pandas_df_cp = pandas_df.copy()
modin_df_cp.index = [str(i) for i in modin_df_cp.index]
pandas_df_cp.index = [str(i) for i in pandas_df_cp.index]
df_equals(modin_df_cp.index, pandas_df_cp.index)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_indexing_duplicate_axis(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_df.index = pandas_df.index = [i // 3 for i in range(len(modin_df))]
assert any(modin_df.index.duplicated())
assert any(pandas_df.index.duplicated())
df_equals(modin_df.iloc[0], pandas_df.iloc[0])
df_equals(modin_df.loc[0], pandas_df.loc[0])
df_equals(modin_df.iloc[0, 0:4], pandas_df.iloc[0, 0:4])
df_equals(
modin_df.loc[0, modin_df.columns[0:4]],
pandas_df.loc[0, pandas_df.columns[0:4]],
)
def test_info(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).info(memory_usage="deep")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("loc", int_arg_values, ids=arg_keys("loc", int_arg_keys))
def test_insert(self, data, loc):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_df = modin_df.copy()
pandas_df = pandas_df.copy()
column = "New Column"
value = modin_df.iloc[:, 0]
try:
pandas_df.insert(loc, column, value)
except Exception as e:
with pytest.raises(type(e)):
modin_df.insert(loc, column, value)
else:
modin_df.insert(loc, column, value)
df_equals(modin_df, pandas_df)
with pytest.raises(ValueError):
modin_df.insert(0, "Bad Column", modin_df)
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_df.insert(0, "Duplicate", modin_df[modin_df.columns[0]])
pandas_df.insert(0, "Duplicate", pandas_df[pandas_df.columns[0]])
df_equals(modin_df, pandas_df)
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_df.insert(0, "Scalar", 100)
pandas_df.insert(0, "Scalar", 100)
df_equals(modin_df, pandas_df)
with pytest.raises(ValueError):
modin_df.insert(0, "Too Short", list(modin_df[modin_df.columns[0]])[:-1])
with pytest.raises(ValueError):
modin_df.insert(0, modin_df.columns[0], modin_df[modin_df.columns[0]])
with pytest.raises(IndexError):
modin_df.insert(len(modin_df.columns) + 100, "Bad Loc", 100)
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_result = pd.DataFrame(columns=list("ab")).insert(
0, modin_df.columns[0], modin_df[modin_df.columns[0]]
)
pandas_result = pandas.DataFrame(columns=list("ab")).insert(
0, pandas_df.columns[0], pandas_df[pandas_df.columns[0]]
)
df_equals(modin_result, pandas_result)
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_result = pd.DataFrame(index=modin_df.index).insert(
0, modin_df.columns[0], modin_df[modin_df.columns[0]]
)
pandas_result = pandas.DataFrame(index=pandas_df.index).insert(
0, pandas_df.columns[0], pandas_df[pandas_df.columns[0]]
)
df_equals(modin_result, pandas_result)
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_result = modin_df.insert(
0, "DataFrame insert", modin_df[[modin_df.columns[0]]]
)
pandas_result = pandas_df.insert(
0, "DataFrame insert", pandas_df[[pandas_df.columns[0]]]
)
df_equals(modin_result, pandas_result)
def test_interpolate(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).interpolate()
def test_is_copy(self):
data = test_data_values[0]
with pytest.warns(FutureWarning):
assert pd.DataFrame(data).is_copy == pandas.DataFrame(data).is_copy
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_items(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_items = modin_df.items()
pandas_items = pandas_df.items()
for modin_item, pandas_item in zip(modin_items, pandas_items):
modin_index, modin_series = modin_item
pandas_index, pandas_series = pandas_item
df_equals(pandas_series, modin_series)
assert pandas_index == modin_index
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_iteritems(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_items = modin_df.iteritems()
pandas_items = pandas_df.iteritems()
for modin_item, pandas_item in zip(modin_items, pandas_items):
modin_index, modin_series = modin_item
pandas_index, pandas_series = pandas_item
df_equals(pandas_series, modin_series)
assert pandas_index == modin_index
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_iterrows(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_iterrows = modin_df.iterrows()
pandas_iterrows = pandas_df.iterrows()
for modin_row, pandas_row in zip(modin_iterrows, pandas_iterrows):
modin_index, modin_series = modin_row
pandas_index, pandas_series = pandas_row
df_equals(pandas_series, modin_series)
assert pandas_index == modin_index
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_itertuples(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
# test default
modin_it_default = modin_df.itertuples()
pandas_it_default = pandas_df.itertuples()
for modin_row, pandas_row in zip(modin_it_default, pandas_it_default):
np.testing.assert_equal(modin_row, pandas_row)
# test all combinations of custom params
indices = [True, False]
names = [None, "NotPandas", "Pandas"]
for index in indices:
for name in names:
modin_it_custom = modin_df.itertuples(index=index, name=name)
pandas_it_custom = pandas_df.itertuples(index=index, name=name)
for modin_row, pandas_row in zip(modin_it_custom, pandas_it_custom):
np.testing.assert_equal(modin_row, pandas_row)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_ix(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data) # noqa F841
with pytest.raises(NotImplementedError):
modin_df.ix()
def test_join(self):
frame_data = {
"col1": [0, 1, 2, 3],
"col2": [4, 5, 6, 7],
"col3": [8, 9, 0, 1],
"col4": [2, 4, 5, 6],
}
modin_df = pd.DataFrame(frame_data)
pandas_df = pandas.DataFrame(frame_data)
frame_data2 = {"col5": [0], "col6": [1]}
modin_df2 = pd.DataFrame(frame_data2)
pandas_df2 = pandas.DataFrame(frame_data2)
join_types = ["left", "right", "outer", "inner"]
for how in join_types:
modin_join = modin_df.join(modin_df2, how=how)
pandas_join = pandas_df.join(pandas_df2, how=how)
df_equals(modin_join, pandas_join)
frame_data3 = {"col7": [1, 2, 3, 5, 6, 7, 8]}
modin_df3 = pd.DataFrame(frame_data3)
pandas_df3 = pandas.DataFrame(frame_data3)
join_types = ["left", "outer", "inner"]
for how in join_types:
modin_join = modin_df.join([modin_df2, modin_df3], how=how)
pandas_join = pandas_df.join([pandas_df2, pandas_df3], how=how)
df_equals(modin_join, pandas_join)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_keys(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.keys(), pandas_df.keys())
def test_kurt(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).kurt()
def test_kurtosis(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).kurtosis()
def test_last(self):
i = pd.date_range("2018-04-09", periods=4, freq="2D")
ts = pd.DataFrame({"A": [1, 2, 3, 4]}, index=i)
with pytest.warns(UserWarning):
ts.last("3D")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_last_valid_index(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
assert modin_df.last_valid_index() == (pandas_df.last_valid_index())
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_loc(self, request, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
# We skip nan datasets because nan != nan
if "nan" not in request.node.name:
key1 = modin_df.columns[0]
key2 = modin_df.columns[1]
# Scaler
assert modin_df.loc[0, key1] == pandas_df.loc[0, key1]
# Series
df_equals(modin_df.loc[0], pandas_df.loc[0])
df_equals(modin_df.loc[1:, key1], pandas_df.loc[1:, key1])
df_equals(modin_df.loc[1:2, key1], pandas_df.loc[1:2, key1])
# DataFrame
df_equals(modin_df.loc[[1, 2]], pandas_df.loc[[1, 2]])
# List-like of booleans
indices = [
True if i % 3 == 0 else False for i in range(len(modin_df.index))
]
columns = [
True if i % 5 == 0 else False for i in range(len(modin_df.columns))
]
modin_result = modin_df.loc[indices, columns]
pandas_result = pandas_df.loc[indices, columns]
df_equals(modin_result, pandas_result)
# See issue #80
# df_equals(modin_df.loc[[1, 2], ['col1']], pandas_df.loc[[1, 2], ['col1']])
df_equals(modin_df.loc[1:2, key1:key2], pandas_df.loc[1:2, key1:key2])
# From issue #421
df_equals(modin_df.loc[:, [key2, key1]], pandas_df.loc[:, [key2, key1]])
df_equals(modin_df.loc[[2, 1], :], pandas_df.loc[[2, 1], :])
# Write Item
modin_df_copy = modin_df.copy()
pandas_df_copy = pandas_df.copy()
modin_df_copy.loc[[1, 2]] = 42
pandas_df_copy.loc[[1, 2]] = 42
df_equals(modin_df_copy, pandas_df_copy)
def test_loc_multi_index(self):
modin_df = pd.read_csv(
"modin/pandas/test/data/blah.csv", header=[0, 1, 2, 3], index_col=0
)
pandas_df = pandas.read_csv(
"modin/pandas/test/data/blah.csv", header=[0, 1, 2, 3], index_col=0
)
df_equals(modin_df.loc[1], pandas_df.loc[1])
df_equals(modin_df.loc[1, "Presidents"], pandas_df.loc[1, "Presidents"])
df_equals(
modin_df.loc[1, ("Presidents", "Pure mentions")],
pandas_df.loc[1, ("Presidents", "Pure mentions")],
)
assert (
modin_df.loc[1, ("Presidents", "Pure mentions", "IND", "all")]
== pandas_df.loc[1, ("Presidents", "Pure mentions", "IND", "all")]
)
df_equals(
modin_df.loc[(1, 2), "Presidents"], pandas_df.loc[(1, 2), "Presidents"]
)
tuples = [
("bar", "one"),
("bar", "two"),
("bar", "three"),
("bar", "four"),
("baz", "one"),
("baz", "two"),
("baz", "three"),
("baz", "four"),
("foo", "one"),
("foo", "two"),
("foo", "three"),
("foo", "four"),
("qux", "one"),
("qux", "two"),
("qux", "three"),
("qux", "four"),
]
modin_index = pd.MultiIndex.from_tuples(tuples, names=["first", "second"])
pandas_index = pandas.MultiIndex.from_tuples(tuples, names=["first", "second"])
frame_data = np.random.randint(0, 100, size=(16, 100))
modin_df = pd.DataFrame(
frame_data,
index=modin_index,
columns=["col{}".format(i) for i in range(100)],
)
pandas_df = pandas.DataFrame(
frame_data,
index=pandas_index,
columns=["col{}".format(i) for i in range(100)],
)
df_equals(modin_df.loc["bar", "col1"], pandas_df.loc["bar", "col1"])
assert (
modin_df.loc[("bar", "one"), "col1"]
== pandas_df.loc[("bar", "one"), "col1"]
)
df_equals(
modin_df.loc["bar", ("col1", "col2")],
pandas_df.loc["bar", ("col1", "col2")],
)
def test_lookup(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).lookup([0, 1], ["col1", "col2"])
def test_mad(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).mad()
def test_mask(self):
df = pd.DataFrame(np.arange(10).reshape(-1, 2), columns=["A", "B"])
m = df % 3 == 0
with pytest.warns(UserWarning):
try:
df.mask(~m, -df)
except ValueError:
pass
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
@pytest.mark.parametrize(
"numeric_only", bool_arg_values, ids=arg_keys("numeric_only", bool_arg_keys)
)
def test_max(self, request, data, axis, skipna, numeric_only):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.max(
axis=axis, skipna=skipna, numeric_only=numeric_only
)
except Exception:
with pytest.raises(TypeError):
modin_df.max(axis=axis, skipna=skipna, numeric_only=numeric_only)
else:
modin_result = modin_df.max(
axis=axis, skipna=skipna, numeric_only=numeric_only
)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.T.max(
axis=axis, skipna=skipna, numeric_only=numeric_only
)
except Exception:
with pytest.raises(TypeError):
modin_df.T.max(axis=axis, skipna=skipna, numeric_only=numeric_only)
else:
modin_result = modin_df.T.max(
axis=axis, skipna=skipna, numeric_only=numeric_only
)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
@pytest.mark.parametrize(
"numeric_only", bool_arg_values, ids=arg_keys("numeric_only", bool_arg_keys)
)
def test_mean(self, request, data, axis, skipna, numeric_only):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.mean(
axis=axis, skipna=skipna, numeric_only=numeric_only
)
except Exception as e:
with pytest.raises(type(e)):
modin_df.mean(axis=axis, skipna=skipna, numeric_only=numeric_only)
else:
modin_result = modin_df.mean(
axis=axis, skipna=skipna, numeric_only=numeric_only
)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.T.mean(
axis=axis, skipna=skipna, numeric_only=numeric_only
)
except Exception as e:
with pytest.raises(type(e)):
modin_df.T.mean(axis=axis, skipna=skipna, numeric_only=numeric_only)
else:
modin_result = modin_df.T.mean(
axis=axis, skipna=skipna, numeric_only=numeric_only
)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
@pytest.mark.parametrize(
"numeric_only", bool_arg_values, ids=arg_keys("numeric_only", bool_arg_keys)
)
def test_median(self, request, data, axis, skipna, numeric_only):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.median(
axis=axis, skipna=skipna, numeric_only=numeric_only
)
except Exception:
with pytest.raises(TypeError):
modin_df.median(axis=axis, skipna=skipna, numeric_only=numeric_only)
else:
modin_result = modin_df.median(
axis=axis, skipna=skipna, numeric_only=numeric_only
)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.T.median(
axis=axis, skipna=skipna, numeric_only=numeric_only
)
except Exception:
with pytest.raises(TypeError):
modin_df.T.median(axis=axis, skipna=skipna, numeric_only=numeric_only)
else:
modin_result = modin_df.T.median(
axis=axis, skipna=skipna, numeric_only=numeric_only
)
df_equals(modin_result, pandas_result)
class TestDFPartTwo:
def test_melt(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).melt()
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize(
"index", bool_arg_values, ids=arg_keys("index", bool_arg_keys)
)
def test_memory_usage(self, data, index):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data) # noqa F841
modin_result = modin_df.memory_usage(index=index)
pandas_result = pandas_df.memory_usage(index=index)
df_equals(modin_result, pandas_result)
def test_merge(self):
frame_data = {
"col1": [0, 1, 2, 3],
"col2": [4, 5, 6, 7],
"col3": [8, 9, 0, 1],
"col4": [2, 4, 5, 6],
}
modin_df = pd.DataFrame(frame_data)
pandas_df = pandas.DataFrame(frame_data)
frame_data2 = {"col1": [0, 1, 2], "col2": [1, 5, 6]}
modin_df2 = pd.DataFrame(frame_data2)
pandas_df2 = pandas.DataFrame(frame_data2)
join_types = ["outer", "inner"]
for how in join_types:
# Defaults
modin_result = modin_df.merge(modin_df2, how=how)
pandas_result = pandas_df.merge(pandas_df2, how=how)
df_equals(modin_result, pandas_result)
# left_on and right_index
modin_result = modin_df.merge(
modin_df2, how=how, left_on="col1", right_index=True
)
pandas_result = pandas_df.merge(
pandas_df2, how=how, left_on="col1", right_index=True
)
df_equals(modin_result, pandas_result)
# left_index and right_on
modin_result = modin_df.merge(
modin_df2, how=how, left_index=True, right_on="col1"
)
pandas_result = pandas_df.merge(
pandas_df2, how=how, left_index=True, right_on="col1"
)
df_equals(modin_result, pandas_result)
# left_on and right_on col1
modin_result = modin_df.merge(
modin_df2, how=how, left_on="col1", right_on="col1"
)
pandas_result = pandas_df.merge(
pandas_df2, how=how, left_on="col1", right_on="col1"
)
df_equals(modin_result, pandas_result)
# left_on and right_on col2
modin_result = modin_df.merge(
modin_df2, how=how, left_on="col2", right_on="col2"
)
pandas_result = pandas_df.merge(
pandas_df2, how=how, left_on="col2", right_on="col2"
)
df_equals(modin_result, pandas_result)
# left_index and right_index
modin_result = modin_df.merge(
modin_df2, how=how, left_index=True, right_index=True
)
pandas_result = pandas_df.merge(
pandas_df2, how=how, left_index=True, right_index=True
)
df_equals(modin_result, pandas_result)
# Named Series promoted to DF
s = pd.Series(frame_data2.get("col1"))
with pytest.raises(ValueError):
modin_df.merge(s)
s = pd.Series(frame_data2.get("col1"), name="col1")
df_equals(modin_df.merge(s), modin_df.merge(modin_df2[["col1"]]))
with pytest.raises(ValueError):
modin_df.merge("Non-valid type")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
@pytest.mark.parametrize(
"numeric_only", bool_arg_values, ids=arg_keys("numeric_only", bool_arg_keys)
)
def test_min(self, data, axis, skipna, numeric_only):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.min(
axis=axis, skipna=skipna, numeric_only=numeric_only
)
except Exception:
with pytest.raises(TypeError):
modin_df.min(axis=axis, skipna=skipna, numeric_only=numeric_only)
else:
modin_result = modin_df.min(
axis=axis, skipna=skipna, numeric_only=numeric_only
)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.T.min(
axis=axis, skipna=skipna, numeric_only=numeric_only
)
except Exception:
with pytest.raises(TypeError):
modin_df.T.min(axis=axis, skipna=skipna, numeric_only=numeric_only)
else:
modin_result = modin_df.T.min(
axis=axis, skipna=skipna, numeric_only=numeric_only
)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"numeric_only", bool_arg_values, ids=arg_keys("numeric_only", bool_arg_keys)
)
def test_mode(self, request, data, axis, numeric_only):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.mode(axis=axis, numeric_only=numeric_only)
except Exception:
with pytest.raises(TypeError):
modin_df.mode(axis=axis, numeric_only=numeric_only)
else:
modin_result = modin_df.mode(axis=axis, numeric_only=numeric_only)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_ndim(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
assert modin_df.ndim == pandas_df.ndim
def test_nlargest(self):
df = pd.DataFrame(
{
"population": [
59000000,
65000000,
434000,
434000,
434000,
337000,
11300,
11300,
11300,
],
"GDP": [1937894, 2583560, 12011, 4520, 12128, 17036, 182, 38, 311],
"alpha-2": ["IT", "FR", "MT", "MV", "BN", "IS", "NR", "TV", "AI"],
},
index=[
"Italy",
"France",
"Malta",
"Maldives",
"Brunei",
"Iceland",
"Nauru",
"Tuvalu",
"Anguilla",
],
)
with pytest.warns(UserWarning):
df.nlargest(3, "population")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_notna(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.notna(), pandas_df.notna())
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_notnull(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.notnull(), pandas_df.notnull())
def test_nsmallest(self):
df = pd.DataFrame(
{
"population": [
59000000,
65000000,
434000,
434000,
434000,
337000,
11300,
11300,
11300,
],
"GDP": [1937894, 2583560, 12011, 4520, 12128, 17036, 182, 38, 311],
"alpha-2": ["IT", "FR", "MT", "MV", "BN", "IS", "NR", "TV", "AI"],
},
index=[
"Italy",
"France",
"Malta",
"Maldives",
"Brunei",
"Iceland",
"Nauru",
"Tuvalu",
"Anguilla",
],
)
with pytest.warns(UserWarning):
df.nsmallest(3, "population")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"dropna", bool_arg_values, ids=arg_keys("dropna", bool_arg_keys)
)
def test_nunique(self, data, axis, dropna):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_result = modin_df.nunique(axis=axis, dropna=dropna)
pandas_result = pandas_df.nunique(axis=axis, dropna=dropna)
df_equals(modin_result, pandas_result)
modin_result = modin_df.T.nunique(axis=axis, dropna=dropna)
pandas_result = pandas_df.T.nunique(axis=axis, dropna=dropna)
df_equals(modin_result, pandas_result)
def test_pct_change(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).pct_change()
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_pipe(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
n = len(modin_df.index)
a, b, c = 2 % n, 0, 3 % n
col = modin_df.columns[3 % len(modin_df.columns)]
def h(x):
return x.drop(columns=[col])
def g(x, arg1=0):
for _ in range(arg1):
x = x.append(x)
return x
def f(x, arg2=0, arg3=0):
return x.drop([arg2, arg3])
df_equals(
f(g(h(modin_df), arg1=a), arg2=b, arg3=c),
(modin_df.pipe(h).pipe(g, arg1=a).pipe(f, arg2=b, arg3=c)),
)
df_equals(
(modin_df.pipe(h).pipe(g, arg1=a).pipe(f, arg2=b, arg3=c)),
(pandas_df.pipe(h).pipe(g, arg1=a).pipe(f, arg2=b, arg3=c)),
)
def test_pivot(self):
df = pd.DataFrame(
{
"foo": ["one", "one", "one", "two", "two", "two"],
"bar": ["A", "B", "C", "A", "B", "C"],
"baz": [1, 2, 3, 4, 5, 6],
"zoo": ["x", "y", "z", "q", "w", "t"],
}
)
with pytest.warns(UserWarning):
df.pivot(index="foo", columns="bar", values="baz")
def test_pivot_table(self):
df = pd.DataFrame(
{
"A": ["foo", "foo", "foo", "foo", "foo", "bar", "bar", "bar", "bar"],
"B": ["one", "one", "one", "two", "two", "one", "one", "two", "two"],
"C": [
"small",
"large",
"large",
"small",
"small",
"large",
"small",
"small",
"large",
],
"D": [1, 2, 2, 3, 3, 4, 5, 6, 7],
"E": [2, 4, 5, 5, 6, 6, 8, 9, 9],
}
)
with pytest.warns(UserWarning):
df.pivot_table(values="D", index=["A", "B"], columns=["C"], aggfunc=np.sum)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_plot(self, request, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
if name_contains(request.node.name, numeric_dfs):
# We have to test this way because equality in plots means same object.
zipped_plot_lines = zip(modin_df.plot().lines, pandas_df.plot().lines)
for l, r in zipped_plot_lines:
if isinstance(l.get_xdata(), np.ma.core.MaskedArray) and isinstance(
r.get_xdata(), np.ma.core.MaskedArray
):
assert all((l.get_xdata() == r.get_xdata()).data)
else:
assert np.array_equal(l.get_xdata(), r.get_xdata())
if isinstance(l.get_ydata(), np.ma.core.MaskedArray) and isinstance(
r.get_ydata(), np.ma.core.MaskedArray
):
assert all((l.get_ydata() == r.get_ydata()).data)
else:
assert np.array_equal(l.get_xdata(), r.get_xdata())
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_pop(self, request, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
if "empty_data" not in request.node.name:
key = modin_df.columns[0]
temp_modin_df = modin_df.copy()
temp_pandas_df = pandas_df.copy()
modin_popped = temp_modin_df.pop(key)
pandas_popped = temp_pandas_df.pop(key)
df_equals(modin_popped, pandas_popped)
df_equals(temp_modin_df, temp_pandas_df)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
@pytest.mark.parametrize(
"numeric_only", bool_arg_values, ids=arg_keys("numeric_only", bool_arg_keys)
)
@pytest.mark.parametrize(
"min_count", int_arg_values, ids=arg_keys("min_count", int_arg_keys)
)
def test_prod(self, request, data, axis, skipna, numeric_only, min_count):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.prod(
axis=axis, skipna=skipna, numeric_only=numeric_only, min_count=min_count
)
except Exception:
with pytest.raises(TypeError):
modin_df.prod(
axis=axis,
skipna=skipna,
numeric_only=numeric_only,
min_count=min_count,
)
else:
modin_result = modin_df.prod(
axis=axis, skipna=skipna, numeric_only=numeric_only, min_count=min_count
)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.T.prod(
axis=axis, skipna=skipna, numeric_only=numeric_only, min_count=min_count
)
except Exception:
with pytest.raises(TypeError):
modin_df.T.prod(
axis=axis,
skipna=skipna,
numeric_only=numeric_only,
min_count=min_count,
)
else:
modin_result = modin_df.T.prod(
axis=axis, skipna=skipna, numeric_only=numeric_only, min_count=min_count
)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
@pytest.mark.parametrize(
"numeric_only", bool_arg_values, ids=arg_keys("numeric_only", bool_arg_keys)
)
@pytest.mark.parametrize(
"min_count", int_arg_values, ids=arg_keys("min_count", int_arg_keys)
)
def test_product(self, request, data, axis, skipna, numeric_only, min_count):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.product(
axis=axis, skipna=skipna, numeric_only=numeric_only, min_count=min_count
)
except Exception:
with pytest.raises(TypeError):
modin_df.product(
axis=axis,
skipna=skipna,
numeric_only=numeric_only,
min_count=min_count,
)
else:
modin_result = modin_df.product(
axis=axis, skipna=skipna, numeric_only=numeric_only, min_count=min_count
)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("q", quantiles_values, ids=quantiles_keys)
def test_quantile(self, request, data, q):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
if not name_contains(request.node.name, no_numeric_dfs):
df_equals(modin_df.quantile(q), pandas_df.quantile(q))
df_equals(modin_df.quantile(q, axis=1), pandas_df.quantile(q, axis=1))
try:
pandas_result = pandas_df.quantile(q, axis=1, numeric_only=False)
except Exception as e:
with pytest.raises(type(e)):
modin_df.quantile(q, axis=1, numeric_only=False)
else:
modin_result = modin_df.quantile(q, axis=1, numeric_only=False)
df_equals(modin_result, pandas_result)
else:
with pytest.raises(ValueError):
modin_df.quantile(q)
if not name_contains(request.node.name, no_numeric_dfs):
df_equals(modin_df.T.quantile(q), pandas_df.T.quantile(q))
df_equals(modin_df.T.quantile(q, axis=1), pandas_df.T.quantile(q, axis=1))
try:
pandas_result = pandas_df.T.quantile(q, axis=1, numeric_only=False)
except Exception as e:
with pytest.raises(type(e)):
modin_df.T.quantile(q, axis=1, numeric_only=False)
else:
modin_result = modin_df.T.quantile(q, axis=1, numeric_only=False)
df_equals(modin_result, pandas_result)
else:
with pytest.raises(ValueError):
modin_df.T.quantile(q)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("funcs", query_func_values, ids=query_func_keys)
def test_query(self, data, funcs):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
with pytest.raises(ValueError):
modin_df.query("")
with pytest.raises(NotImplementedError):
x = 2 # noqa F841
modin_df.query("col1 < @x")
try:
pandas_result = pandas_df.query(funcs)
except Exception as e:
with pytest.raises(type(e)):
modin_df.query(funcs)
else:
modin_result = modin_df.query(funcs)
df_equals(modin_result, pandas_result)
def test_query_after_insert(self):
modin_df = pd.DataFrame({"x": [-1, 0, 1, None], "y": [1, 2, None, 3]})
modin_df["z"] = modin_df.eval("x / y")
modin_df = modin_df.query("z >= 0")
modin_result = modin_df.reset_index(drop=True)
modin_result.columns = ["a", "b", "c"]
pandas_df = pd.DataFrame({"x": [-1, 0, 1, None], "y": [1, 2, None, 3]})
pandas_df["z"] = pandas_df.eval("x / y")
pandas_df = pandas_df.query("z >= 0")
pandas_result = pandas_df.reset_index(drop=True)
pandas_result.columns = ["a", "b", "c"]
df_equals(modin_result, pandas_result)
df_equals(modin_df, pandas_df)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"numeric_only", bool_arg_values, ids=arg_keys("numeric_only", bool_arg_keys)
)
@pytest.mark.parametrize(
"na_option", ["keep", "top", "bottom"], ids=["keep", "top", "bottom"]
)
def test_rank(self, data, axis, numeric_only, na_option):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.rank(
axis=axis, numeric_only=numeric_only, na_option=na_option
)
except Exception as e:
with pytest.raises(type(e)):
modin_df.rank(axis=axis, numeric_only=numeric_only, na_option=na_option)
else:
modin_result = modin_df.rank(
axis=axis, numeric_only=numeric_only, na_option=na_option
)
df_equals(modin_result, pandas_result)
def test_reindex(self):
frame_data = {
"col1": [0, 1, 2, 3],
"col2": [4, 5, 6, 7],
"col3": [8, 9, 10, 11],
"col4": [12, 13, 14, 15],
"col5": [0, 0, 0, 0],
}
pandas_df = pandas.DataFrame(frame_data)
modin_df = pd.DataFrame(frame_data)
df_equals(modin_df.reindex([0, 3, 2, 1]), pandas_df.reindex([0, 3, 2, 1]))
df_equals(modin_df.reindex([0, 6, 2]), pandas_df.reindex([0, 6, 2]))
df_equals(
modin_df.reindex(["col1", "col3", "col4", "col2"], axis=1),
pandas_df.reindex(["col1", "col3", "col4", "col2"], axis=1),
)
df_equals(
modin_df.reindex(["col1", "col7", "col4", "col8"], axis=1),
pandas_df.reindex(["col1", "col7", "col4", "col8"], axis=1),
)
df_equals(
modin_df.reindex(index=[0, 1, 5], columns=["col1", "col7", "col4", "col8"]),
pandas_df.reindex(
index=[0, 1, 5], columns=["col1", "col7", "col4", "col8"]
),
)
df_equals(
modin_df.T.reindex(["col1", "col7", "col4", "col8"], axis=0),
pandas_df.T.reindex(["col1", "col7", "col4", "col8"], axis=0),
)
def test_reindex_like(self):
df1 = pd.DataFrame(
[
[24.3, 75.7, "high"],
[31, 87.8, "high"],
[22, 71.6, "medium"],
[35, 95, "medium"],
],
columns=["temp_celsius", "temp_fahrenheit", "windspeed"],
index=pd.date_range(start="2014-02-12", end="2014-02-15", freq="D"),
)
df2 = pd.DataFrame(
[[28, "low"], [30, "low"], [35.1, "medium"]],
columns=["temp_celsius", "windspeed"],
index=pd.DatetimeIndex(["2014-02-12", "2014-02-13", "2014-02-15"]),
)
with pytest.warns(UserWarning):
df2.reindex_like(df1)
def test_rename_sanity(self):
test_data = TestData()
mapping = {"A": "a", "B": "b", "C": "c", "D": "d"}
modin_df = pd.DataFrame(test_data.frame)
df_equals(
modin_df.rename(columns=mapping), test_data.frame.rename(columns=mapping)
)
renamed2 = test_data.frame.rename(columns=str.lower)
df_equals(modin_df.rename(columns=str.lower), renamed2)
modin_df = pd.DataFrame(renamed2)
df_equals(
modin_df.rename(columns=str.upper), renamed2.rename(columns=str.upper)
)
# index
data = {"A": {"foo": 0, "bar": 1}}
# gets sorted alphabetical
df = pandas.DataFrame(data)
modin_df = pd.DataFrame(data)
tm.assert_index_equal(
modin_df.rename(index={"foo": "bar", "bar": "foo"}).index,
df.rename(index={"foo": "bar", "bar": "foo"}).index,
)
tm.assert_index_equal(
modin_df.rename(index=str.upper).index, df.rename(index=str.upper).index
)
# have to pass something
with pytest.raises(TypeError):
modin_df.rename()
# partial columns
renamed = test_data.frame.rename(columns={"C": "foo", "D": "bar"})
modin_df = pd.DataFrame(test_data.frame)
tm.assert_index_equal(
modin_df.rename(columns={"C": "foo", "D": "bar"}).index,
test_data.frame.rename(columns={"C": "foo", "D": "bar"}).index,
)
# TODO: Uncomment when transpose works
# other axis
# renamed = test_data.frame.T.rename(index={'C': 'foo', 'D': 'bar'})
# tm.assert_index_equal(
# test_data.frame.T.rename(index={'C': 'foo', 'D': 'bar'}).index,
# modin_df.T.rename(index={'C': 'foo', 'D': 'bar'}).index)
# index with name
index = pandas.Index(["foo", "bar"], name="name")
renamer = pandas.DataFrame(data, index=index)
modin_df = pd.DataFrame(data, index=index)
renamed = renamer.rename(index={"foo": "bar", "bar": "foo"})
modin_renamed = modin_df.rename(index={"foo": "bar", "bar": "foo"})
tm.assert_index_equal(renamed.index, modin_renamed.index)
assert renamed.index.name == modin_renamed.index.name
def test_rename_multiindex(self):
tuples_index = [("foo1", "bar1"), ("foo2", "bar2")]
tuples_columns = [("fizz1", "buzz1"), ("fizz2", "buzz2")]
index = pandas.MultiIndex.from_tuples(tuples_index, names=["foo", "bar"])
columns = pandas.MultiIndex.from_tuples(tuples_columns, names=["fizz", "buzz"])
frame_data = [(0, 0), (1, 1)]
df = pandas.DataFrame(frame_data, index=index, columns=columns)
modin_df = pd.DataFrame(frame_data, index=index, columns=columns)
#
# without specifying level -> accross all levels
renamed = df.rename(
index={"foo1": "foo3", "bar2": "bar3"},
columns={"fizz1": "fizz3", "buzz2": "buzz3"},
)
modin_renamed = modin_df.rename(
index={"foo1": "foo3", "bar2": "bar3"},
columns={"fizz1": "fizz3", "buzz2": "buzz3"},
)
tm.assert_index_equal(renamed.index, modin_renamed.index)
renamed = df.rename(
index={"foo1": "foo3", "bar2": "bar3"},
columns={"fizz1": "fizz3", "buzz2": "buzz3"},
)
tm.assert_index_equal(renamed.columns, modin_renamed.columns)
assert renamed.index.names == modin_renamed.index.names
assert renamed.columns.names == modin_renamed.columns.names
#
# with specifying a level
# dict
renamed = df.rename(columns={"fizz1": "fizz3", "buzz2": "buzz3"}, level=0)
modin_renamed = modin_df.rename(
columns={"fizz1": "fizz3", "buzz2": "buzz3"}, level=0
)
tm.assert_index_equal(renamed.columns, modin_renamed.columns)
renamed = df.rename(columns={"fizz1": "fizz3", "buzz2": "buzz3"}, level="fizz")
modin_renamed = modin_df.rename(
columns={"fizz1": "fizz3", "buzz2": "buzz3"}, level="fizz"
)
tm.assert_index_equal(renamed.columns, modin_renamed.columns)
renamed = df.rename(columns={"fizz1": "fizz3", "buzz2": "buzz3"}, level=1)
modin_renamed = modin_df.rename(
columns={"fizz1": "fizz3", "buzz2": "buzz3"}, level=1
)
tm.assert_index_equal(renamed.columns, modin_renamed.columns)
renamed = df.rename(columns={"fizz1": "fizz3", "buzz2": "buzz3"}, level="buzz")
modin_renamed = modin_df.rename(
columns={"fizz1": "fizz3", "buzz2": "buzz3"}, level="buzz"
)
tm.assert_index_equal(renamed.columns, modin_renamed.columns)
# function
func = str.upper
renamed = df.rename(columns=func, level=0)
modin_renamed = modin_df.rename(columns=func, level=0)
tm.assert_index_equal(renamed.columns, modin_renamed.columns)
renamed = df.rename(columns=func, level="fizz")
modin_renamed = modin_df.rename(columns=func, level="fizz")
tm.assert_index_equal(renamed.columns, modin_renamed.columns)
renamed = df.rename(columns=func, level=1)
modin_renamed = modin_df.rename(columns=func, level=1)
tm.assert_index_equal(renamed.columns, modin_renamed.columns)
renamed = df.rename(columns=func, level="buzz")
modin_renamed = modin_df.rename(columns=func, level="buzz")
tm.assert_index_equal(renamed.columns, modin_renamed.columns)
# index
renamed = df.rename(index={"foo1": "foo3", "bar2": "bar3"}, level=0)
modin_renamed = modin_df.rename(index={"foo1": "foo3", "bar2": "bar3"}, level=0)
tm.assert_index_equal(modin_renamed.index, renamed.index)
@pytest.mark.skip(reason="Pandas does not pass this test")
def test_rename_nocopy(self):
test_data = TestData().frame
modin_df = pd.DataFrame(test_data)
modin_renamed = modin_df.rename(columns={"C": "foo"}, copy=False)
modin_renamed["foo"] = 1
assert (modin_df["C"] == 1).all()
def test_rename_inplace(self):
test_data = TestData().frame
modin_df = pd.DataFrame(test_data)
df_equals(
modin_df.rename(columns={"C": "foo"}),
test_data.rename(columns={"C": "foo"}),
)
frame = test_data.copy()
modin_frame = modin_df.copy()
frame.rename(columns={"C": "foo"}, inplace=True)
modin_frame.rename(columns={"C": "foo"}, inplace=True)
df_equals(modin_frame, frame)
def test_rename_bug(self):
# rename set ref_locs, and set_index was not resetting
frame_data = {0: ["foo", "bar"], 1: ["bah", "bas"], 2: [1, 2]}
df = pandas.DataFrame(frame_data)
modin_df = pd.DataFrame(frame_data)
df = df.rename(columns={0: "a"})
df = df.rename(columns={1: "b"})
# TODO: Uncomment when set_index is implemented
# df = df.set_index(['a', 'b'])
# df.columns = ['2001-01-01']
modin_df = modin_df.rename(columns={0: "a"})
modin_df = modin_df.rename(columns={1: "b"})
# TODO: Uncomment when set_index is implemented
# modin_df = modin_df.set_index(['a', 'b'])
# modin_df.columns = ['2001-01-01']
df_equals(modin_df, df)
def test_rename_axis(self):
data = {"num_legs": [4, 4, 2], "num_arms": [0, 0, 2]}
index = ["dog", "cat", "monkey"]
modin_df = pd.DataFrame(data, index)
pandas_df = pandas.DataFrame(data, index)
df_equals(modin_df.rename_axis("animal"), pandas_df.rename_axis("animal"))
df_equals(
modin_df.rename_axis("limbs", axis="columns"),
pandas_df.rename_axis("limbs", axis="columns"),
)
modin_df.rename_axis("limbs", axis="columns", inplace=True)
pandas_df.rename_axis("limbs", axis="columns", inplace=True)
df_equals(modin_df, pandas_df)
new_index = pd.MultiIndex.from_product(
[["mammal"], ["dog", "cat", "monkey"]], names=["type", "name"]
)
modin_df.index = new_index
pandas_df.index = new_index
df_equals(
modin_df.rename_axis(index={"type": "class"}),
pandas_df.rename_axis(index={"type": "class"}),
)
df_equals(
modin_df.rename_axis(columns=str.upper),
pandas_df.rename_axis(columns=str.upper),
)
df_equals(
modin_df.rename_axis(
columns=[str.upper(o) for o in modin_df.columns.names]
),
pandas_df.rename_axis(
columns=[str.upper(o) for o in pandas_df.columns.names]
),
)
with pytest.raises(ValueError):
df_equals(
modin_df.rename_axis(str.upper, axis=1),
pandas_df.rename_axis(str.upper, axis=1),
)
def test_rename_axis_inplace(self):
test_frame = TestData().frame
modin_df = pd.DataFrame(test_frame)
result = test_frame.copy()
modin_result = modin_df.copy()
no_return = result.rename_axis("foo", inplace=True)
modin_no_return = modin_result.rename_axis("foo", inplace=True)
assert no_return is modin_no_return
df_equals(modin_result, result)
result = test_frame.copy()
modin_result = modin_df.copy()
no_return = result.rename_axis("bar", axis=1, inplace=True)
modin_no_return = modin_result.rename_axis("bar", axis=1, inplace=True)
assert no_return is modin_no_return
df_equals(modin_result, result)
def test_reorder_levels(self):
df = pd.DataFrame(
index=pd.MultiIndex.from_tuples(
[
(num, letter, color)
for num in range(1, 3)
for letter in ["a", "b", "c"]
for color in ["Red", "Green"]
],
names=["Number", "Letter", "Color"],
)
)
df["Value"] = np.random.randint(1, 100, len(df))
with pytest.warns(UserWarning):
df.reorder_levels(["Letter", "Color", "Number"])
def test_replace(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).replace()
def test_resample(self):
d = dict(
{
"price": [10, 11, 9, 13, 14, 18, 17, 19],
"volume": [50, 60, 40, 100, 50, 100, 40, 50],
}
)
df = pd.DataFrame(d)
df["week_starting"] = pd.date_range("01/01/2018", periods=8, freq="W")
with pytest.warns(UserWarning):
df.resample("M", on="week_starting")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_reset_index(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_result = modin_df.reset_index(inplace=False)
pandas_result = pandas_df.reset_index(inplace=False)
df_equals(modin_result, pandas_result)
modin_df_cp = modin_df.copy()
pd_df_cp = pandas_df.copy()
modin_df_cp.reset_index(inplace=True)
pd_df_cp.reset_index(inplace=True)
df_equals(modin_df_cp, pd_df_cp)
def test_rolling(self):
df = pd.DataFrame({"B": [0, 1, 2, np.nan, 4]})
with pytest.warns(UserWarning):
df.rolling(2, win_type="triang")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_round(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.round(), pandas_df.round())
df_equals(modin_df.round(1), pandas_df.round(1))
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
def test_sample(self, data, axis):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
with pytest.raises(ValueError):
modin_df.sample(n=3, frac=0.4, axis=axis)
with pytest.raises(KeyError):
modin_df.sample(frac=0.5, weights="CoLuMn_No_ExIsT", axis=0)
with pytest.raises(ValueError):
modin_df.sample(frac=0.5, weights=modin_df.columns[0], axis=1)
with pytest.raises(ValueError):
modin_df.sample(
frac=0.5, weights=[0.5 for _ in range(len(modin_df.index[:-1]))], axis=0
)
with pytest.raises(ValueError):
modin_df.sample(
frac=0.5,
weights=[0.5 for _ in range(len(modin_df.columns[:-1]))],
axis=1,
)
with pytest.raises(ValueError):
modin_df.sample(n=-3, axis=axis)
with pytest.raises(ValueError):
modin_df.sample(frac=0.2, weights=pandas.Series(), axis=axis)
if isinstance(axis, str):
num_axis = pandas.DataFrame()._get_axis_number(axis)
else:
num_axis = axis
# weights that sum to 1
sums = sum(i % 2 for i in range(len(modin_df.axes[num_axis])))
weights = [i % 2 / sums for i in range(len(modin_df.axes[num_axis]))]
modin_result = modin_df.sample(
frac=0.5, random_state=42, weights=weights, axis=axis
)
pandas_result = pandas_df.sample(
frac=0.5, random_state=42, weights=weights, axis=axis
)
df_equals(modin_result, pandas_result)
# weights that don't sum to 1
weights = [i % 2 for i in range(len(modin_df.axes[num_axis]))]
modin_result = modin_df.sample(
frac=0.5, random_state=42, weights=weights, axis=axis
)
pandas_result = pandas_df.sample(
frac=0.5, random_state=42, weights=weights, axis=axis
)
df_equals(modin_result, pandas_result)
modin_result = modin_df.sample(n=0, axis=axis)
pandas_result = pandas_df.sample(n=0, axis=axis)
df_equals(modin_result, pandas_result)
modin_result = modin_df.sample(frac=0.5, random_state=42, axis=axis)
pandas_result = pandas_df.sample(frac=0.5, random_state=42, axis=axis)
df_equals(modin_result, pandas_result)
modin_result = modin_df.sample(n=2, random_state=42, axis=axis)
pandas_result = pandas_df.sample(n=2, random_state=42, axis=axis)
df_equals(modin_result, pandas_result)
def test_select_dtypes(self):
frame_data = {
"test1": list("abc"),
"test2": np.arange(3, 6).astype("u1"),
"test3": np.arange(8.0, 11.0, dtype="float64"),
"test4": [True, False, True],
"test5": pandas.date_range("now", periods=3).values,
"test6": list(range(5, 8)),
}
df = pandas.DataFrame(frame_data)
rd = pd.DataFrame(frame_data)
include = np.float, "integer"
exclude = (np.bool_,)
r = rd.select_dtypes(include=include, exclude=exclude)
e = df[["test2", "test3", "test6"]]
df_equals(r, e)
r = rd.select_dtypes(include=np.bool_)
e = df[["test4"]]
df_equals(r, e)
r = rd.select_dtypes(exclude=np.bool_)
e = df[["test1", "test2", "test3", "test5", "test6"]]
df_equals(r, e)
try:
pd.DataFrame().select_dtypes()
assert False
except ValueError:
assert True
def test_sem(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).sem()
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
def test_set_axis(self, data, axis):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
x = pandas.DataFrame()._get_axis_number(axis)
index = modin_df.columns if x else modin_df.index
labels = ["{0}_{1}".format(index[i], i) for i in range(modin_df.shape[x])]
modin_result = modin_df.set_axis(labels, axis=axis, inplace=False)
pandas_result = pandas_df.set_axis(labels, axis=axis, inplace=False)
df_equals(modin_result, pandas_result)
with pytest.warns(FutureWarning):
modin_df.set_axis(axis, labels, inplace=False)
modin_df_copy = modin_df.copy()
modin_df.set_axis(labels, axis=axis, inplace=True)
# Check that the copy and original are different
try:
df_equals(modin_df, modin_df_copy)
except AssertionError:
assert True
else:
assert False
pandas_df.set_axis(labels, axis=axis, inplace=True)
df_equals(modin_df, pandas_df)
with pytest.warns(FutureWarning):
modin_df.set_axis(labels, axis=axis, inplace=None)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize(
"drop", bool_arg_values, ids=arg_keys("drop", bool_arg_keys)
)
@pytest.mark.parametrize(
"append", bool_arg_values, ids=arg_keys("append", bool_arg_keys)
)
def test_set_index(self, request, data, drop, append):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
if "empty_data" not in request.node.name:
key = modin_df.columns[0]
modin_result = modin_df.set_index(
key, drop=drop, append=append, inplace=False
)
pandas_result = pandas_df.set_index(
key, drop=drop, append=append, inplace=False
)
df_equals(modin_result, pandas_result)
modin_df_copy = modin_df.copy()
modin_df.set_index(key, drop=drop, append=append, inplace=True)
# Check that the copy and original are different
try:
df_equals(modin_df, modin_df_copy)
except AssertionError:
assert True
else:
assert False
pandas_df.set_index(key, drop=drop, append=append, inplace=True)
df_equals(modin_df, pandas_df)
def test_set_value(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).set_value(0, 0, 0)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_shape(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
assert modin_df.shape == pandas_df.shape
def test_shift(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).shift()
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_size(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
assert modin_df.size == pandas_df.size
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
@pytest.mark.parametrize(
"numeric_only", bool_arg_values, ids=arg_keys("numeric_only", bool_arg_keys)
)
def test_skew(self, request, data, axis, skipna, numeric_only):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.skew(
axis=axis, skipna=skipna, numeric_only=numeric_only
)
except Exception:
with pytest.raises(TypeError):
modin_df.skew(axis=axis, skipna=skipna, numeric_only=numeric_only)
else:
modin_result = modin_df.skew(
axis=axis, skipna=skipna, numeric_only=numeric_only
)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.T.skew(
axis=axis, skipna=skipna, numeric_only=numeric_only
)
except Exception:
with pytest.raises(TypeError):
modin_df.T.skew(axis=axis, skipna=skipna, numeric_only=numeric_only)
else:
modin_result = modin_df.T.skew(
axis=axis, skipna=skipna, numeric_only=numeric_only
)
df_equals(modin_result, pandas_result)
def test_slice_shift(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).slice_shift()
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"ascending", bool_arg_values, ids=arg_keys("ascending", bool_arg_keys)
)
@pytest.mark.parametrize("na_position", ["first", "last"], ids=["first", "last"])
@pytest.mark.parametrize(
"sort_remaining", bool_arg_values, ids=arg_keys("sort_remaining", bool_arg_keys)
)
def test_sort_index(self, data, axis, ascending, na_position, sort_remaining):
modin_df = pd.DataFrame(data)
pandas_df = | pandas.DataFrame(data) | pandas.DataFrame |
# this script is the same as the others, but has all of the functions, etc. in one .py script in case there are issues
# using the other versions. Primary use case for this would be debugging what is going on, or understanding
# the overall pipeline.
"""
Pipeline for Zero-shot transcription of a lecture video file to text using facebook's wav2vec2 model
This script is the 'single-file' edition
<NAME>
large model link / doc from host website (huggingface)
https://huggingface.co/facebook/wav2vec2-large-960h-lv60-self
sections in this file:
- define model parameters (pretrained model)
- basic user inputs (vid file name / directory)
- convert video to audio chunks of duration X*
- pass all X audio chunks through wav2vec2model, store results in a list
- write all results of the list into a text file, store various runtime metrics
- pass created text file through a spell checker and autocorrect spelling. save as new file
- run basic keyword extraction from (via YAKE) on spell-corrected file, save in the same directory as other results
- cleanup tasks (delete the X .wav files created for audio transcription, etc), report runtime, and exit
* (where X is some duration that does not overload your computer or crash your IDE)
"""
import math
import os
import pprint as pp
import re
import shutil
import sys
import time
from datetime import datetime
from io import StringIO
from os import listdir
from os.path import basename, dirname, isfile, join
import GPUtil as GPU
import humanize
import librosa
import moviepy.editor as mp
import neuspell
import pandas as pd
import pkg_resources
import plotly.express as px
import psutil
import pysbd
import torch
import wordninja
import yake
from cleantext import clean
from natsort import natsorted
from symspellpy import SymSpell
from tqdm.auto import tqdm
from transformers import Wav2Vec2ForCTC, Wav2Vec2Tokenizer
# --------------------------------------------------------------------------
# Function Definitions
# --------------------------------------------------------------------------
# General Utilities
def corr(s):
# adds space after period if there isn't one
# removes extra spaces
return re.sub(r'\.(?! )', '. ', re.sub(r' +', ' ', s))
def shorten_title(title_text, max_no=20):
if len(title_text) < max_no:
return title_text
else:
return title_text[:max_no] + "..."
class NullIO(StringIO):
# used to redirect system output for things that print a lot to console
def write(self, txt):
pass
def cleantxt_wrap(ugly_text):
# a wrapper for clean text with options different than default
# https://pypi.org/project/clean-text/
cleaned_text = clean(ugly_text,
fix_unicode=True, # fix various unicode errors
to_ascii=True, # transliterate to closest ASCII representation
lower=True, # lowercase text
no_line_breaks=True, # fully strip line breaks as opposed to only normalizing them
no_urls=True, # replace all URLs with a special token
no_emails=True, # replace all email addresses with a special token
no_phone_numbers=True, # replace all phone numbers with a special token
no_numbers=False, # replace all numbers with a special token
no_digits=False, # replace all digits with a special token
no_currency_symbols=True, # replace all currency symbols with a special token
no_punct=True, # remove punctuations
replace_with_punct="", # instead of removing punctuations you may replace them
replace_with_url="<URL>",
replace_with_email="<EMAIL>",
replace_with_phone_number="<PHONE>",
replace_with_number="<NUM>",
replace_with_digit="0",
lang="en" # set to 'de' for German special handling
)
return cleaned_text
def beautify_filename(filename, num_words=20, start_reverse=False,
word_separator="_"):
# takes a filename stored as text, removes extension, separates into X words ...
# and returns a nice filename with the words separateed by
# useful for when you are reading files, doing things to them, and making new files
filename = str(filename)
index_file_Ext = filename.rfind('.')
current_name = str(filename)[:index_file_Ext] # get rid of extension
clean_name = cleantxt_wrap(current_name) # wrapper with custom defs
file_words = wordninja.split(clean_name)
# splits concatenated text into a list of words based on common word freq
if len(file_words) <= num_words:
num_words = len(file_words)
if start_reverse:
t_file_words = file_words[-num_words:]
else:
t_file_words = file_words[:num_words]
pretty_name = word_separator.join(t_file_words) # see function argument
# NOTE IT DOES NOT RETURN THE EXTENSION
return pretty_name[: (len(pretty_name) - 1)] # there is a space always at the end, so -1
def quick_keys(filename, filepath, max_ngrams=3, num_keywords=20, save_db=False,
verbose=False, txt_lang='en', ddup_thresh=0.3):
# uses YAKE to quickly determine keywords in a text file. Saves Keywords and YAKE score (0 means very important) in
with open(join(filepath, filename), 'r', encoding="utf-8", errors='ignore') as file:
text = file.read()
custom_kw_extractor = yake.KeywordExtractor(lan=txt_lang, n=max_ngrams, dedupLim=ddup_thresh,
top=num_keywords, features=None)
yake_keywords = custom_kw_extractor.extract_keywords(text)
phrase_db = pd.DataFrame(yake_keywords)
if len(phrase_db) == 0:
print("warning - no phrases were able to be extracted... ")
return None
if verbose:
print("YAKE keywords are: \n", yake_keywords)
print("dataframe structure: \n")
pp.pprint(phrase_db.head())
phrase_db.columns = ['key_phrase', 'YAKE_score']
# add a column for how many words the phrases contain
yake_kw_len = []
yake_kw_freq = []
for entry in yake_keywords:
entry_wordcount = len(str(entry).split(" ")) - 1
yake_kw_len.append(entry_wordcount)
for index, row in phrase_db.iterrows():
search_term = row["key_phrase"]
entry_freq = text.count(str(search_term))
yake_kw_freq.append(entry_freq)
word_len_series = pd.Series(yake_kw_len, name='No. Words in Phrase')
word_freq_series = | pd.Series(yake_kw_freq, name='Phrase Freq. in Text') | pandas.Series |
# DataFrame是一个表格类型的数据结构,它含有一组有序的列,每列可以是不同的值类型(数值,字符串,布尔值)等.
# DataFrame既有行索引也有列索引,它可以被看做是由Series组成的字典(共用同一个索引).
# DataFrame中的数据是以一个或多个二维存放的(而不是列表,字典或别的一维数据结构).
'''
虽然DataFrame是以二维结构保存数据的,但仍然可以轻松地将其表示为更高维的数据
(层次化索引的表格型结构, 这是pandas中许多高级数据处理功能的关键要素)
'''
# 建DataFrame的办法有很多,最常用的一种是直接传入一个由等长列表或Numpy数组组成的字典
import pandas as pd
from pandas import DataFrame
data = {'state': ['Ohio', 'Ohio', 'Ohio', 'Nevada', 'Nevada', 'Nevada'],
'year': [2000, 2001, 2002, 2001, 2002, 2003],
'pop': [1.5, 1.7, 3.6, 2.4, 2.9, 3.2]}
frame = pd.DataFrame(data)
print(frame)
'''
output:
pop state year
0 1.5 Ohio 2000
1 1.7 Ohio 2001
2 3.6 Ohio 2002
3 2.4 Nevada 2001
4 2.9 Nevada 2002
5 3.2 Nevada 2003
'''
# 对于特别大的DataFrame, head()方法会选取前五行
print(frame.head())
'''
output:
pop state year
0 1.5 Ohio 2000
1 1.7 Ohio 2001
2 3.6 Ohio 2002
3 2.4 Nevada 2001
4 2.9 Nevada 2002
'''
# 如果指定了列序列,则DataFrame的列就会按照指定顺序进行排列
print(pd.DataFrame(data, columns=['year', 'state', 'pop']))
'''
output:
year state pop
0 2000 Ohio 1.5
1 2001 Ohio 1.7
2 2002 Ohio 3.6
3 2001 Nevada 2.4
4 2002 Nevada 2.9
5 2003 Nevada 3.2
'''
# 如果传入的列在数据中找不到,就会在结果中产生缺失值
frame2 = | pd.DataFrame(data, columns=['year', 'state', 'pop', 'debt'],
index=['one', 'two', 'three', 'four', 'five', 'six']) | pandas.DataFrame |
from flask import Flask, render_template, request, jsonify
from static import Model
import json
import pandas as pd
DATA_PATH = "static/data/exam_data.csv"
FEATURES = ["sleep", "study"]
TARGET = "exam_score"
TRAIN_DATA = pd.read_csv(DATA_PATH)
mod = Model()
mod.initialize(TRAIN_DATA, FEATURES, TARGET)
app = Flask(__name__)
@app.route('/')
def index():
return render_template('index.html')
@app.route('/predict/<string:modtype>', methods=['POST'])
def predict(modtype):
input_dict = request.get_json()
if not isinstance(input_dict[FEATURES[0]], list):
input_df = | pd.DataFrame(input_dict, index=[0]) | pandas.DataFrame |
import os
import numpy as np
import pandas as pd
from pathlib import Path
from sklearn import preprocessing
from sklearn.externals import joblib
from sklearn.preprocessing import OneHotEncoder
import librosa
class FeatureProcessor:
def __init__(self, data_dir='', output_dir='', acc_only=True, is_eval=False, silent=False):
self._data_dir = data_dir
self._output_dir = output_dir
self._acc_only = acc_only
self._is_eval = is_eval
self._fs = 4000
self._vfss_fs = 60
self._hop_len_s = 0.1
self._hop_len = int(self._fs * self._hop_len_s)
self._frame_res = self._fs / float(self._hop_len)
self._vfss_res = 1 / float(self._vfss_fs)
self._nb_frames_1s = int(self._frame_res)
self._win_len = 2 * self._hop_len
self._nfft = self._next_greater_power_of_2(self._win_len)
self._nb_channels = 3
self._unique_classes = 2
self._enc = OneHotEncoder(handle_unknown='ignore')
self._enc.fit([[0],[1]])
self._silent = silent
@staticmethod
def _next_greater_power_of_2(x):
return 2 ** (x - 1).bit_length()
def _load_signal(self, sigf_path):
sig_file = np.load(sigf_path)
if self._acc_only:
sig_data = sig_file[:,1:4]
else:
sig_data = sig_file
return sig_data
def _spectrogram(self, signal_input):
_nb_ch = signal_input.shape[1]
nb_bins = self._nfft // 2
_nb_frames = int(np.ceil(signal_input.shape[0] / float(self._hop_len)))
spectra = np.zeros((_nb_frames, nb_bins, _nb_ch), dtype=complex)
for ch_cnt in range(_nb_ch):
stft_ch = librosa.core.stft(np.asfortranarray(signal_input[:, ch_cnt]), n_fft=self._nfft, hop_length=self._hop_len,
win_length=self._win_len, window='hann')
spectra[:, :, ch_cnt] = stft_ch[1:, :_nb_frames].T
return spectra, _nb_frames
def _extract_spectrogram_for_file(self, signal_filename):
signal_in = self._load_signal(os.path.join(self._data_dir, 'signals', signal_filename))
signal_spec, _nb_frames = self._spectrogram(signal_in)
np.save(os.path.join(self._output_dir, 'features', '{}.npy'.format(signal_filename.split('.')[0])), signal_spec.reshape(_nb_frames, -1))
return _nb_frames
def _read_label_file(self, label_filename):
label_file = {
'participant': list(), 'file_num': list(), 'swallow_num': list(), 'PAs': list(), 'multiple': list(), 'age': list(), 'sex': list(), 'race': list(), 'volume': list(), 'viscosity': list(), 'utensil': list(), 'start': list(), 'end': list()
}
fid = open(os.path.join(self._data_dir, 'labels', label_filename), 'r')
next(fid)
for line in fid:
split_line = line.strip().split(',')
label_file['participant'].append(split_line[0])
label_file['file_num'].append(split_line[1])
label_file['swallow_num'].append(split_line[2])
label_file['PAs'].append(float(split_line[3]))
label_file['multiple'].append(split_line[4])
label_file['age'].append(float(split_line[5]))
label_file['sex'].append(split_line[6])
label_file['race'].append(split_line[7])
label_file['volume'].append(split_line[8])
label_file['viscosity'].append(split_line[9])
label_file['utensil'].append(split_line[10])
label_file['start'].append(int(np.floor(float(split_line[11])*self._vfss_res*self._frame_res)))
label_file['end'].append(int(np.ceil(float(split_line[12])*self._vfss_res*self._frame_res)))
fid.close()
return label_file
def _get_signal_labels(self, _label_file, _nb_frames):
swe_label = np.zeros((_nb_frames, 1))
for i, swe_start in enumerate(_label_file['start']):
start_frame = swe_start
end_frame = _nb_frames if _label_file['end'][i] > _nb_frames else _label_file['end'][i]
swe_label[start_frame:end_frame + 1, :] = 1
#swe_label = self._enc.transform(swe_label)
return swe_label#swe_label.toarray()
def _featlabel_extractor(self):
Path(os.path.join(self._output_dir, 'features')).mkdir(parents=True, exist_ok=True)
Path(os.path.join(self._output_dir, 'labels')).mkdir(parents=True, exist_ok=True)
df_slices = []
print('Extracting spectrograms into:{}\n'.format(os.path.join(self._output_dir, 'features')))
for file_cnt, file_name in enumerate(os.listdir(os.path.join(self._data_dir, 'signals'))):
if not self._silent:
print('{}: {}'.format(file_cnt, file_name))
fname = file_name.split('.')[0]
_nb_frames = self._extract_spectrogram_for_file('{}.npy'.format(fname))
label_file = self._read_label_file('{}.csv'.format(fname))
swe_label = self._get_signal_labels(label_file, _nb_frames)
np.save(os.path.join(self._output_dir, 'labels', '{}.npy'.format(fname)), swe_label)
df_slices.append([fname, _nb_frames])
data_records = | pd.DataFrame(df_slices,columns=['fname', 'nb_frames']) | pandas.DataFrame |
import pandas
class PositionInfo:
def __init__(self, position):
self.m_position = position
self.m_trades = {}
self.m_records = {}
def add_trade(self, trade):
"""
Log a trade for TCA.
"""
self.m_position.add_trade(trade)
self.m_trades.update({trade.time: trade})
def log_position_status(self, time):
"""
Log position status for PNL-analysis.
"""
record = {}
record.update({'NET_POSITION': self.m_position.m_net_position})
record.update({'NET_INVESTMENT': self.m_position.m_net_investment})
record.update({'REALIZED_PNL': self.m_position.m_realized_pnl})
record.update({'UNREALIZED_PNL': self.m_position.m_unrealized_pnl})
record.update({'TOTAL_PNL': self.m_position.m_total_pnl})
record.update({'TOTAL_COM': self.m_position.m_commissions})
self.m_records.update({time: record})
def generate_pnl_report(self, formate='frame'):
"""
Returns a PNL report either as a dictionary or as a Pandas DataFrame.
"""
if formate == 'frame':
report = | pandas.DataFrame.from_dict(self.m_records, orient='index') | pandas.DataFrame.from_dict |
import datetime as dt
import numpy as np
import pandas as pd
from pandas.testing import assert_series_equal, assert_frame_equal
import pytest
from solarforecastarbiter.datamodel import Observation
from solarforecastarbiter.validation import tasks, validator
from solarforecastarbiter.validation.quality_mapping import (
LATEST_VERSION_FLAG, DESCRIPTION_MASK_MAPPING,
DAILY_VALIDATION_FLAG)
@pytest.fixture()
def make_observation(single_site):
def f(variable):
return Observation(
name='test', variable=variable, interval_value_type='mean',
interval_length=pd.Timedelta('1hr'), interval_label='beginning',
site=single_site, uncertainty=0.1, observation_id='OBSID',
provider='Organization 1', extra_parameters='')
return f
@pytest.fixture()
def default_index(single_site):
return [pd.Timestamp('2019-01-01T08:00:00', tz=single_site.timezone),
pd.Timestamp('2019-01-01T09:00:00', tz=single_site.timezone),
pd.Timestamp('2019-01-01T10:00:00', tz=single_site.timezone),
pd.Timestamp('2019-01-01T11:00:00', tz=single_site.timezone),
pd.Timestamp('2019-01-01T13:00:00', tz=single_site.timezone)]
@pytest.fixture()
def daily_index(single_site):
out = pd.date_range(start='2019-01-01T08:00:00',
end='2019-01-01T19:00:00',
freq='1h',
tz=single_site.timezone)
return out.append(
pd.Index([pd.Timestamp('2019-01-02T09:00:00',
tz=single_site.timezone)]))
def test_validate_ghi(mocker, make_observation, default_index):
mocks = [mocker.patch.object(validator, f,
new=mocker.MagicMock(
wraps=getattr(validator, f)))
for f in ['check_timestamp_spacing',
'check_irradiance_day_night',
'check_ghi_limits_QCRad',
'check_ghi_clearsky',
'detect_clearsky_ghi']]
obs = make_observation('ghi')
data = pd.Series([10, 1000, -100, 500, 300], index=default_index)
flags = tasks.validate_ghi(obs, data)
for mock in mocks:
assert mock.called
expected = (pd.Series([0, 0, 0, 0, 1], index=data.index) *
DESCRIPTION_MASK_MAPPING['UNEVEN FREQUENCY'],
pd.Series([1, 0, 0, 0, 0], index=data.index) *
DESCRIPTION_MASK_MAPPING['NIGHTTIME'],
pd.Series([0, 1, 1, 0, 0], index=data.index) *
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'],
pd.Series([0, 1, 0, 1, 0], index=data.index) *
DESCRIPTION_MASK_MAPPING['CLEARSKY EXCEEDED'],
pd.Series(0, index=data.index) *
DESCRIPTION_MASK_MAPPING['CLEARSKY'])
for flag, exp in zip(flags, expected):
assert_series_equal(flag, exp | LATEST_VERSION_FLAG,
check_names=False)
def test_validate_mostly_clear(mocker, make_observation):
mocks = [mocker.patch.object(validator, f,
new=mocker.MagicMock(
wraps=getattr(validator, f)))
for f in ['check_timestamp_spacing',
'check_irradiance_day_night',
'check_ghi_limits_QCRad',
'check_ghi_clearsky',
'detect_clearsky_ghi']]
obs = make_observation('ghi').replace(interval_length=pd.Timedelta('5min'))
index = pd.date_range(start='2019-04-01T11:00', freq='5min',
tz=obs.site.timezone, periods=11)
data = pd.Series([742, 749, 756, 763, 769, 774, 779, 784, 789, 793, 700],
index=index)
flags = tasks.validate_ghi(obs, data)
for mock in mocks:
assert mock.called
expected = (pd.Series(0, index=data.index) *
DESCRIPTION_MASK_MAPPING['UNEVEN FREQUENCY'],
pd.Series(0, index=data.index) *
DESCRIPTION_MASK_MAPPING['NIGHTTIME'],
pd.Series(0, index=data.index) *
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'],
pd.Series(0, index=data.index) *
DESCRIPTION_MASK_MAPPING['CLEARSKY EXCEEDED'],
pd.Series([1] * 10 + [0], index=data.index) *
DESCRIPTION_MASK_MAPPING['CLEARSKY'])
for flag, exp in zip(flags, expected):
assert_series_equal(flag, exp | LATEST_VERSION_FLAG,
check_names=False)
def test_apply_immediate_validation(
mocker, make_observation, default_index):
obs = make_observation('ghi')
data = pd.DataFrame(
[(0, 0), (100, 0), (200, 0), (-1, 1), (1500, 0)],
index=default_index,
columns=['value', 'quality_flag'])
val = tasks.apply_immediate_validation(obs, data)
out = data.copy()
out['quality_flag'] = [
DESCRIPTION_MASK_MAPPING['NIGHTTIME'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['CLEARSKY EXCEEDED'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['OK'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['USER FLAGGED'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['UNEVEN FREQUENCY'] | LATEST_VERSION_FLAG |
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'] |
DESCRIPTION_MASK_MAPPING['CLEARSKY EXCEEDED']
]
assert_frame_equal(val, out)
def test_apply_immediate_validation_already_validated(
mocker, make_observation, default_index):
obs = make_observation('ghi')
data = pd.DataFrame(
[(0, 18), (100, 18), (200, 18), (-1, 19), (1500, 18)],
index=default_index,
columns=['value', 'quality_flag'])
val = tasks.apply_immediate_validation(obs, data)
out = data.copy()
out['quality_flag'] = [
DESCRIPTION_MASK_MAPPING['NIGHTTIME'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['CLEARSKY EXCEEDED'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['OK'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['USER FLAGGED'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['UNEVEN FREQUENCY'] | LATEST_VERSION_FLAG |
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'] |
DESCRIPTION_MASK_MAPPING['CLEARSKY EXCEEDED']
]
assert_frame_equal(val, out)
@pytest.mark.parametrize('var', ['air_temperature', 'wind_speed', 'dni', 'dhi',
'poa_global', 'relative_humidity'])
def test_apply_immediate_validation_other(
mocker, make_observation, default_index, var):
mock = mocker.MagicMock()
mocker.patch.dict(
'solarforecastarbiter.validation.tasks.IMMEDIATE_VALIDATION_FUNCS',
{var: mock})
obs = make_observation(var)
data = pd.DataFrame(
[(0, 0), (100, 0), (200, 0), (-1, 1), (1500, 0)],
index=default_index,
columns=['value', 'quality_flag'])
tasks.apply_immediate_validation(obs, data)
assert mock.called
@pytest.mark.parametrize('var', ['availability', 'curtailment', 'event',
'net_load'])
def test_apply_immediate_validation_defaults(
mocker, make_observation, default_index, var):
mock = mocker.spy(tasks, 'validate_defaults')
obs = make_observation(var)
data = pd.DataFrame(
[(0, 0), (100, 0), (200, 0), (-1, 1), (1500, 0)],
index=default_index,
columns=['value', 'quality_flag'])
tasks.apply_immediate_validation(obs, data)
assert mock.called
def test_fetch_and_validate_observation_ghi(mocker, make_observation,
default_index):
obs = make_observation('ghi')
data = pd.DataFrame(
[(0, 0), (100, 0), (200, 0), (-1, 1), (1500, 0)],
index=default_index,
columns=['value', 'quality_flag'])
mocker.patch('solarforecastarbiter.io.api.APISession.get_observation',
return_value=obs)
mocker.patch(
'solarforecastarbiter.io.api.APISession.get_observation_values',
return_value=data)
post_mock = mocker.patch(
'solarforecastarbiter.io.api.APISession.post_observation_values')
tasks.fetch_and_validate_observation(
'', obs.observation_id, data.index[0], data.index[-1])
out = data.copy()
out['quality_flag'] = [
DESCRIPTION_MASK_MAPPING['NIGHTTIME'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['CLEARSKY EXCEEDED'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['OK'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['USER FLAGGED'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['UNEVEN FREQUENCY'] | LATEST_VERSION_FLAG |
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'] |
DESCRIPTION_MASK_MAPPING['CLEARSKY EXCEEDED']
]
assert post_mock.call_count == 2
assert_frame_equal(post_mock.call_args_list[0][0][1], out[:-1])
assert_frame_equal(post_mock.call_args_list[1][0][1], out[-1:])
def test_fetch_and_validate_observation_ghi_nones(
mocker, make_observation, default_index):
obs = make_observation('ghi')
data = pd.DataFrame(
[(None, 1)] * 5, index=default_index,
columns=['value', 'quality_flag'])
mocker.patch('solarforecastarbiter.io.api.APISession.get_observation',
return_value=obs)
mocker.patch(
'solarforecastarbiter.io.api.APISession.get_observation_values',
return_value=data)
post_mock = mocker.patch(
'solarforecastarbiter.io.api.APISession.post_observation_values')
tasks.fetch_and_validate_observation(
'', obs.observation_id, data.index[0], data.index[-1])
out = data.copy()
base = (
DESCRIPTION_MASK_MAPPING['USER FLAGGED'] |
DESCRIPTION_MASK_MAPPING['LIMITS EXCEEDED'] |
DESCRIPTION_MASK_MAPPING['CLEARSKY EXCEEDED'] |
LATEST_VERSION_FLAG
)
out['quality_flag'] = [
base | DESCRIPTION_MASK_MAPPING['NIGHTTIME'],
base,
base,
base,
base | DESCRIPTION_MASK_MAPPING['UNEVEN FREQUENCY']
]
assert post_mock.call_count == 2
assert_frame_equal(post_mock.call_args_list[0][0][1], out[:-1])
assert_frame_equal(post_mock.call_args_list[1][0][1], out[-1:])
def test_fetch_and_validate_observation_not_listed(mocker, make_observation,
default_index):
obs = make_observation('curtailment')
data = pd.DataFrame(
[(0, 0), (100, 0), (200, 0), (-1, 1), (1500, 0)],
index=default_index,
columns=['value', 'quality_flag'])
mocker.patch('solarforecastarbiter.io.api.APISession.get_observation',
return_value=obs)
mocker.patch(
'solarforecastarbiter.io.api.APISession.get_observation_values',
return_value=data)
post_mock = mocker.patch(
'solarforecastarbiter.io.api.APISession.post_observation_values')
tasks.fetch_and_validate_observation(
'', obs.observation_id, data.index[0], data.index[-1])
out = data.copy()
out['quality_flag'] = [
DESCRIPTION_MASK_MAPPING['NIGHTTIME'] | LATEST_VERSION_FLAG,
LATEST_VERSION_FLAG,
LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['USER FLAGGED'] | LATEST_VERSION_FLAG,
DESCRIPTION_MASK_MAPPING['UNEVEN FREQUENCY'] | LATEST_VERSION_FLAG]
assert post_mock.call_count == 2
assert_frame_equal(post_mock.call_args_list[0][0][1], out[:-1])
assert_frame_equal(post_mock.call_args_list[1][0][1], out[-1:])
def test_validate_dni(mocker, make_observation, default_index):
mocks = [mocker.patch.object(validator, f,
new=mocker.MagicMock(
wraps=getattr(validator, f)))
for f in ['check_timestamp_spacing',
'check_irradiance_day_night',
'check_dni_limits_QCRad']]
obs = make_observation('dni')
data = pd.Series([10, 1000, -100, 500, 500], index=default_index)
flags = tasks.validate_dni(obs, data)
for mock in mocks:
assert mock.called
expected = (pd.Series([0, 0, 0, 0, 1], index=data.index) *
DESCRIPTION_MASK_MAPPING['UNEVEN FREQUENCY'],
| pd.Series([1, 0, 0, 0, 0], index=data.index) | pandas.Series |
import pandas as pd
file1 = r'1_search_standard_box_spacer_0_16_greedy.csv'
file2 = r'2_search_specific_box_spacer_0_16_greedy.csv'
file3 = r'3_search_Epsilonproteobacteria_box_spacer_0_16_greedy.csv'
with open(file1, 'r') as f1:
data1 = pd.read_csv(f1)
with open(file2, 'r') as f2:
data2 = | pd.read_csv(f2) | pandas.read_csv |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import pandas.core.groupby
import pandas as pd
from pandas.core.dtypes.common import is_list_like
import ray
from .utils import _map_partitions
from .utils import _inherit_docstrings
@_inherit_docstrings(pandas.core.groupby.DataFrameGroupBy)
class DataFrameGroupBy(object):
def __init__(self, df, by, axis, level, as_index, sort, group_keys,
squeeze, **kwargs):
self._columns = df.columns
self._index = df.index
self._axis = axis
self._row_metadata = df._row_metadata
self._col_metadata = df._col_metadata
if axis == 0:
partitions = [column for column in df._block_partitions.T]
self._index_grouped = pd.Series(self._index, index=self._index)\
.groupby(by=by, sort=sort)
else:
partitions = [row for row in df._block_partitions]
self._index_grouped = pd.Series(self._columns, index=self._index)\
.groupby(by=by, sort=sort)
self._keys_and_values = [(k, v)
for k, v in self._index_grouped]
self._grouped_partitions = \
list(zip(*(groupby._submit(args=(by,
axis,
level,
as_index,
sort,
group_keys,
squeeze) + tuple(part.tolist()),
num_return_vals=len(self))
for part in partitions)))
@property
def _iter(self):
from .dataframe import DataFrame
if self._axis == 0:
return [(self._keys_and_values[i][0],
DataFrame(col_partitions=part,
columns=self._columns,
index=self._keys_and_values[i][1].index,
row_metadata=self._row_metadata[
self._keys_and_values[i][1].index],
col_metadata=self._col_metadata))
for i, part in enumerate(self._grouped_partitions)]
else:
return [(self._keys_and_values[i][0],
DataFrame(row_partitions=part,
columns=self._keys_and_values[i][1].index,
index=self._index,
row_metadata=self._row_metadata,
col_metadata=self._col_metadata[
self._keys_and_values[i][1].index]))
for i, part in enumerate(self._grouped_partitions)]
@property
def ngroups(self):
return len(self)
def skew(self, **kwargs):
return self._apply_agg_function(lambda df: df.skew(**kwargs))
def ffill(self, limit=None):
return self._apply_agg_function(lambda df: df.ffill(limit=limit))
def sem(self, ddof=1):
return self._apply_agg_function(lambda df: df.sem(ddof=ddof))
def mean(self, *args, **kwargs):
return self._apply_agg_function(lambda df: df.mean(*args, **kwargs))
def any(self):
return self._apply_agg_function(lambda df: df.any())
@property
def plot(self):
raise NotImplementedError("Not Yet implemented.")
def ohlc(self):
raise NotImplementedError("Not Yet implemented.")
def __bytes__(self):
raise NotImplementedError("Not Yet implemented.")
@property
def tshift(self):
raise NotImplementedError("Not Yet implemented.")
@property
def groups(self):
return {k: | pd.Index(v) | pandas.Index |
from PyQt5 import QtWidgets,QtCore
from PyQt5.QtWidgets import QWidget,QLabel,QCheckBox,QPushButton,QApplication,QComboBox,QMessageBox,QFileDialog
from PyQt5.QtCore import QRect
import sys
import warnings;warnings.simplefilter('ignore')
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
import seaborn as sabrn
import pandas as pd
from pandas import DataFrame,read_excel,concat,ExcelWriter,ExcelFile
import time,datetime,math,os
class generate_plots_with_summary_table:
def __init__(self,parent=None):
self.Parent_window=parent
def Plot_Histogram(self,Raw_Data,Grouping_column_name,Test_no_name):
''' This Function used to Generate the Histogram plot if we provide mandotary items Dataframe name, Column name for which we need to plot the
graph and the grouping column if you have any'''
if type(Grouping_column_name)==str: Grouping_column_name=[Grouping_column_name]
if len(Grouping_column_name)!=0:
Grouped_data=Raw_Data.groupby(Grouping_column_name[0])
List_of_variables_for_grouping=[i for i in Raw_Data[Grouping_column_name[0]].unique()]
elif len(Grouping_column_name)==0:
Grouped_data=Raw_Data;List_of_variables_for_grouping=None
Grouped_data[Test_no_name].plot(kind='hist', alpha=1, legend=True,edgecolor ='black',stacked=False)
# Limits
if self.Parent_window!=None:
if 'Test_Limit_Details' in self.Parent_window.Loaded_Data_Files[self.Data_Table_Name]: Lo_Limit=float(self.Parent_window.Loaded_Data_Files[self.Data_Table_Name]['Test_Limit_Details'][Test_no_name][0])
elif 'Test_Limit_Details' not in self.Parent_window.Loaded_Data_Files[self.Data_Table_Name]: Lo_Limit=''
if 'Test_Limit_Details' in self.Parent_window.Loaded_Data_Files[self.Data_Table_Name]: Hi_Limit=float(self.Parent_window.Loaded_Data_Files[self.Data_Table_Name]['Test_Limit_Details'][Test_no_name][1])
elif 'Test_Limit_Details' not in self.Parent_window.Loaded_Data_Files[self.Data_Table_Name]: Hi_Limit=''
elif self.Parent_window==None: Lo_Limit=''; Hi_Limit=''
Lims=[Lo_Limit,Hi_Limit]
colors=['b','r']
Labels=['Lo_Limit','Hi_Limit']
for li,c,lbl in zip(Lims,colors,Labels):
(plt.axvline(x=li, c=c, label= lbl) ) if li!='' else '' #, label= lbl +'= {}'.format(li)
#plt.title(Test_no_name)
if len(List_of_variables_for_grouping)<=9: plt.legend(loc='center left', bbox_to_anchor=(1, 0.5), borderaxespad=0., prop={'size': 10})
elif len(List_of_variables_for_grouping)>9: plt.legend(loc='center left', bbox_to_anchor=(1, 0.5), borderaxespad=0., prop={'size': 6})
return plt
def Plot_Line_plot(self,Raw_Data,Test_no_name,Grouping_column_name):
''' This Function used to Generate the Line plot if we provide mandotary items Dataframe name, Column name for which we need to plot the
graph and the grouping column if you have any'''
if type(Grouping_column_name)==str: Grouping_column_name=[Grouping_column_name]
if len(Grouping_column_name)!=0: List_of_variables_for_grouping=[i for i in Raw_Data[Grouping_column_name[0]].unique()]
elif len(Grouping_column_name)==0: List_of_variables_for_grouping=[None]
# Colour codes to the multiple plots we are using
colors=['b','g','r','c','m','y','k']
symbols=['.','*','+','s','p','D','h','v','o']
plt.xticks( rotation='vertical',fontsize=6)
# Creating the new data table with the name of unique values in the Grouping column name which will help to generate the plot clear
List_of_data_sets={}
if len(Grouping_column_name)!=0:
for i in range(len(List_of_variables_for_grouping)): List_of_data_sets['Data_'+str(List_of_variables_for_grouping[i])]=Raw_Data[Raw_Data[Grouping_column_name[0]]==List_of_variables_for_grouping[i]]
elif len(Grouping_column_name)==0: List_of_data_sets['Data']=Raw_Data
markers=[color+symbol for symbol in symbols for color in colors]
if len(Grouping_column_name)!=0:
for i in range(len(List_of_variables_for_grouping)): plt.plot(list(List_of_data_sets['Data_'+str(List_of_variables_for_grouping[i])].index),List_of_data_sets['Data_'+str(List_of_variables_for_grouping[i])][Test_no_name],markers[i],label=List_of_variables_for_grouping[i])
elif len(Grouping_column_name)==0: plt.plot(list(List_of_data_sets['Data'].index),List_of_data_sets['Data'][Test_no_name],markers[0],label=None)
plt.xticks(rotation='vertical')
plt.xlabel('Devices')
if len(List_of_variables_for_grouping)<=9: plt.legend(loc='center left', bbox_to_anchor=(1, 0.5), borderaxespad=0., prop={'size': 10})
elif len(List_of_variables_for_grouping)>9: plt.legend(loc='center left', bbox_to_anchor=(1, 0.5), borderaxespad=0., prop={'size': 6})
#plt.legend()
if self.Parent_window!=None:
if 'Test_Limit_Details' in self.Parent_window.Loaded_Data_Files[self.Data_Table_Name]: Lo_Limit=float(self.Parent_window.Loaded_Data_Files[self.Data_Table_Name]['Test_Limit_Details'][Test_no_name][0])
elif 'Test_Limit_Details' not in self.Parent_window.Loaded_Data_Files[self.Data_Table_Name]: Lo_Limit=''
if 'Test_Limit_Details' in self.Parent_window.Loaded_Data_Files[self.Data_Table_Name]: Hi_Limit=float(self.Parent_window.Loaded_Data_Files[self.Data_Table_Name]['Test_Limit_Details'][Test_no_name][1])
elif 'Test_Limit_Details' not in self.Parent_window.Loaded_Data_Files[self.Data_Table_Name]: Hi_Limit=''
elif self.Parent_window==None:
Lo_Limit=''; Hi_Limit=''
Lims=[Lo_Limit,Hi_Limit]
colors=['b','r']
Labels=['Lo_Limit','Hi_Limit']
for li,c,lbl in zip(Lims,colors,Labels):
(plt.axvline(x=li, c=c, label= lbl) ) if li!='' else ''
return plt
def Plot_BOX_plot(self,Raw_Data,Test_no_name,Grouping_column_name):
''' This Function used to Generate the Box plot if we provide mandotary items Dataframe name, Column name for which we need to plot the
graph and the grouping column if you have any'''
if type(Grouping_column_name)==str: Grouping_column_name=[Grouping_column_name]
if len(Grouping_column_name)!=0:
List_of_variables_for_grouping=[i for i in Raw_Data[Grouping_column_name[0]].unique()]
boxgroupedx= Grouping_column_name[1] if len(Grouping_column_name)>1 else Grouping_column_name[0]
elif len(Grouping_column_name)==0: List_of_variables_for_grouping=[None];boxgroupedx=None
if (len(List_of_variables_for_grouping)==1 and List_of_variables_for_grouping[0]=='') or (len(List_of_variables_for_grouping)==1 and List_of_variables_for_grouping[0]!=""): box=sabrn.boxplot(data=Raw_Data,x=boxgroupedx, y=Test_no_name,width=0.5)
elif len(List_of_variables_for_grouping)>1: box=sabrn.boxplot(data=Raw_Data,x=Grouping_column_name[0], y=Test_no_name,hue=boxgroupedx,width=0.5)
if len(List_of_variables_for_grouping)<=9: plt.legend(loc='center left', bbox_to_anchor=(1, 0.5), borderaxespad=0., prop={'size': 10})
elif len(List_of_variables_for_grouping)>9: plt.legend(loc='center left', bbox_to_anchor=(1, 0.5), borderaxespad=0., prop={'size': 6})
box.set(xlabel='',ylabel='')
#plt.title('Box plot')
plt.xticks(rotation='horizontal',fontsize=6)
#plt.legend()
if self.Parent_window!=None:
if 'Test_Limit_Details' in self.Parent_window.Loaded_Data_Files[self.Data_Table_Name]: Lo_Limit=float(self.Parent_window.Loaded_Data_Files[self.Data_Table_Name]['Test_Limit_Details'][Test_no_name][0])
elif 'Test_Limit_Details' not in self.Parent_window.Loaded_Data_Files[self.Data_Table_Name]: Lo_Limit=''
if 'Test_Limit_Details' in self.Parent_window.Loaded_Data_Files[self.Data_Table_Name]: Hi_Limit=float(self.Parent_window.Loaded_Data_Files[self.Data_Table_Name]['Test_Limit_Details'][Test_no_name][1])
elif 'Test_Limit_Details' not in self.Parent_window.Loaded_Data_Files[self.Data_Table_Name]: Hi_Limit=''
elif self.Parent_window==None:
Lo_Limit=''; Hi_Limit=''
Lims=[Lo_Limit,Hi_Limit]
colors=['b','r']
Labels=['Lo_Limit','Hi_Limit']
for li,c,lbl in zip(Lims,colors,Labels):
(plt.axvline(x=li, c=c, label= lbl) ) if li!='' else ''
return plt
def rearrange(PVT_corners, data):
rearranged_data = []
data_len = len(data)
boxgroupedx=Dropdown_variable.get()
unique_count= len(np.unique(sheet_1[boxgroupedx]))
for i in range(PVT_corners):
try:
start, end = int(i*data_len/unique_count), int((i+1)*data_len/unique_count)
rearranged_data.append(data[start:end])
except IndexError:
pass
return rearranged_data
def plot_cumulatvie(TestNumber):
boxgroupedx=Dropdown_variable.get()
Table_attached=False
PVT_corners=len(set(sheet_1[boxgroupedx]))
PVTDATA=rearrange(PVT_corners,sheet_1[TestNumbers[TestNumber]])
#print(PVTDATA)
colors=['b','g','r','c','m','y','k']
symbols=['.','*','+','s','p','D','h','v','o']
LABELS=[]
for i in range(len(sheet_1[boxgroupedx])):
if len(LABELS)==0:LABELS.append(sheet_1[boxgroupedx][i])
if sheet_1[boxgroupedx][i] not in LABELS:LABELS.append(sheet_1[boxgroupedx][i])
markers=[color+symbol for symbol in symbols for color in colors]
#with open("test.txt", "a") as myfile:
for i in range(len(PVTDATA)):
#print(len(PVTDATA))
x_data = np.sort(PVTDATA[i])#;print(PVTDATA[i])
#myfile.write(PVTDATA[i])
y_data = np.arange(1, len(x_data)+1)/len(x_data)
plt.plot(x_data, y_data, markers[i], label=LABELS[i], rasterized=True)
plt.margins(0.02)
hilimt=float(sheet_2[TestNumbers[TestNumber]][5])
hilimt1=hilimt
lowlimt=float(sheet_2[TestNumbers[TestNumber]][6])
lowlimt1=lowlimt
Lowspec_min=[lowlimt,lowlimt1]
Lowspec_max=[0,1]
Highspec_min=[hilimt,hilimt1]
Highspec_max=[0,1]
Table2=FullDataSummaryTable[TestNumber-26]
if Check_box5_check.get()==1:
NewCPK=Textbox4.get();NewCPK=float(NewCPK)
if Table2[0][11]<NewCPK and Table2[0][9]!=0:
Cal_USL=round(Table2[0][7]+(3*Table2[0][9]*NewCPK),5)
Cal_LSL=round(Table2[0][7]-(3*Table2[0][9]*NewCPK),5)
new_hi_y=[float(Cal_USL),float(Cal_USL)]
new_lo_y=[float(Cal_LSL),float(Cal_LSL)]
if Table2[0][12]<NewCPK:
plt.plot(new_hi_y,Highspec_max,color='orange', linestyle='dashed')
r2='HighLim :%s' %Table2[0][3]; plt.text(0.86, 0.88, r2, fontsize=8,color='blue', transform=plt.gcf().transFigure)
r7='Cal HighLim : %s'%Cal_USL; plt.text(0.86, 0.84, r7, fontsize=8,color='blue', transform=plt.gcf().transFigure)
if Table2[0][13]<NewCPK:
plt.plot(new_lo_y,Lowspec_max,color='orange', linestyle='dashed')
r1='LowLim : %s'%Table2[0][2]; plt.text(0.86, 0.89, r1, fontsize=8,color='blue', transform=plt.gcf().transFigure)
r6='Cal LowLim : %s'%Cal_LSL; plt.text(0.86, 0.83, r6, fontsize=8,color='blue', transform=plt.gcf().transFigure)
plt.text(0.86, 0.69, "- - New Lims", fontsize=10,color='orange', transform=plt.gcf().transFigure)
r3='unit :%s'%Table2[0][4]; plt.text(0.86 ,0.87, r3, fontsize=8,color='blue', transform=plt.gcf().transFigure)
r4='CPK : %s'%Table2[0][11]; plt.text(0.86, 0.86, r4, fontsize=8,color='blue', transform=plt.gcf().transFigure)
r5='NewCPK: %s'%NewCPK; plt.text(0.86, 0.85, r5, fontsize=8,color='blue', transform=plt.gcf().transFigure)
plt.plot(Lowspec_min,Lowspec_max,color='blue')
plt.plot(Highspec_min,Highspec_max,color='red')
if len(np.unique(sheet_1[boxgroupedx]))<=9:
plt.legend(loc='center left', bbox_to_anchor=(1, 0.5), borderaxespad=0., prop={'size': 10})
elif len(np.unique(sheet_1[boxgroupedx]))>9:
plt.legend(loc='center left', bbox_to_anchor=(1, 0.5), borderaxespad=0., prop={'size': 6})
plt.rcParams["axes.edgecolor"] = "0.15"
plt.rcParams["axes.linewidth"] = 1.55
plt.margins(0.02) # decide how much margin you’d need, for data not to overlap
return plt
def Get_the_header_table(self,loop_count,test_number):
Table6=['GOURISANKAR_TOOL',"PageNumber"]
Table7=[['Characterization Report',loop_count]]
TestNumber=test_number
bbox_val= [-0.08, 1.06,1.2,0.08] if self.Numberofplotsinpdf==1 else ( [-0.08, 2.32,1.2,0.15] if self.Numberofplotsinpdf==2 else [-0.08, 3.61,1.2,0.22])
Header_table= plt.table(cellText=Table7,colLabels=Table6,cellLoc='center',colLoc='center',loc='top',bbox=bbox_val)#[-0.08,1.06,1.2,0.08]
Header_table.set_fontsize(16)
Header_table.scale(2.6,2.6)
Header_table.auto_set_font_size(False)
Header_table.auto_set_column_width(True)
#t2_data=TestNumbers[loop_count].split(';')[0]+TestNumbers[loop_count].split(';')[1]
t2='%s' %(TestNumber.split(';')[0]+'_'+TestNumber.split(';')[1])
plt.text(0.10, 0.92, t2, style='normal',fontsize=18,transform=plt.gcf().transFigure,bbox={'facecolor':'none', 'alpha':1, 'pad':5})
return plt
def convert_to_Grouped_Data(self,inputfile,Grouping_column_name):
return inputfile.groupby(Grouping_column_name)
def Merge_the_data_frames(self,File1,File2):
New_Data_with_merged=''
if len(File1.columns.difference(File2.columns))==0:
New_Data_with_merged=pd.concat([File1,File2])
print( 'Merging completed')
elif len(File1.columns.difference(File2.columns))!=0:
unmatched_column_names=File1.columns.difference(File2.columns)
print('There are %s unmatching columns are there. Unmatched column names in the file shown below'%len(unmatched_column_names))
for i in range(len(unmatched_column_names)): print(unmatched_column_names[i])
Still_want_to_merge_both_the_files=QMessageBox.question(self,'merge','The 2 inputs don"t have some columns matching you still want to proceed',QMessageBox.Yes | QMessageBox.No ,QMessageBox.Yes)
if Still_want_to_merge_both_the_files==QMessageBox.Yes:
New_Data_with_merged=pd.concat([File1,File2])
print( 'Merging completed')
elif Still_want_to_merge_both_the_files==QMessageBox.No:
print(" Merging of the 2 files was unsucessful :-( , Please try again")
else:
print(' Please give a valid input')
return New_Data_with_merged
def Get_the_summary_table(self,Summary_table,Test_no_name,table_column_header):
Summary_table_=Summary_table.transpose()
c=[0.1 if i==0 else 0.05 for i in range(len(list(table_column_header)))]
bbox_val=[-0.08,-0.25,1.2,0.1] if self.Numberofplotsinpdf==1 else [-0.08,-0.6,1.2,0.3]
Data_tbl=plt.table(cellText=[Summary_table_[Test_no_name].values],colWidths=c, colLabels=list(table_column_header),cellLoc='left',colLoc='left',loc='bottom',bbox=bbox_val)
Data_tbl.auto_set_font_size(False)
Data_tbl.set_fontsize(20)
Data_tbl.auto_set_column_width(True)
return plt
def Generate_summary_table1(self,Merged_data,Grouping_column_name,Test_no_name_list):
Ref_table=Merged_data[Merged_data[Grouping_column_name].str.contains('ref',case=False)]
New_board=Merged_data[Merged_data[Grouping_column_name].str.contains('ref',case=False)==False]
Table_header=['TestNum','TestName','Lo_Limit','Hi_Limit','Unit','SD_Ref_Lot(Bin1)','Mean_Ref_Board','SD_Ref_Board','Mean_New_Board',
'SD_New_Board','Delta_Mean','Mean_Shift','Mean_Shift_Criteria','SD_Ratio','Sigma_Spread_Criteria','Passed/Failed']
Summary_table={'TestNum':[],'TestName':[],'Lo_Limit':[],'Hi_Limit':[],'Unit':[],'SD_Ref_Lot(Bin1)':[],'Mean_Ref_Board':[],'SD_Ref_Board':[],'Mean_New_Board':[],
'SD_New_Board':[],'Delta_Mean':[],'Mean_Shift':[],'Mean_Shift_Criteria':[],'SD_Ratio':[],'Sigma_Spread_Criteria':[],'Passed/Failed':[]}
try:
for i in range(len(Test_no_name_list)):
if (Merged_data[Test_no_name_list[i]].dtype=='float'or Merged_data[Test_no_name_list[i]].dtype=='int64' ) and Test_no_name_list[i].count(';')>=5 :
TestNum=Test_no_name_list[i].split(';')[0]
TestName=Test_no_name_list[i].split(';')[1]
Lo_Limit=float(Test_no_name_list[i].split(';')[2]) if Test_no_name_list[i].split(';')[2]!='' else ''
Hi_Limit=float(Test_no_name_list[i].split(';')[3]) if Test_no_name_list[i].split(';')[3]!='' else ''
Unit=Test_no_name_list[i].split(';')[4]
SD_Ref_Lot=float(Test_no_name_list[i].split(';')[5]) if Test_no_name_list[i].split(';')[5]!='' else ''
Mean_Ref_Board=round(Ref_table[Test_no_name_list[i]].mean(),3)
SD_Ref_Board=round(Ref_table[Test_no_name_list[i]].std(),3)
Mean_New_Board=round(New_board[Test_no_name_list[i]].mean(),3)
SD_New_Board=round(New_board[Test_no_name_list[i]].std(),3)
Delta_Mean=round(abs(Mean_Ref_Board-Mean_New_Board),3)
Mean_Shift=round((Delta_Mean/(Hi_Limit-Lo_Limit)*100),3) if Hi_Limit!='' and Lo_Limit!='' and type(Hi_Limit)!=str and type(Lo_Limit)!=str and Hi_Limit!=Lo_Limit else 'N/A'
Mean_Shift_Criteria=(('Passed' if Delta_Mean< SD_Ref_Lot else 'For Check' ) if SD_Ref_Lot!='' else 'N/A') if Mean_Shift=='N/A' else ('Passed' if Mean_Shift!='N/A' and Mean_Shift<5 else 'Failed')
SD_Ratio= round((SD_New_Board/SD_Ref_Board),3) if SD_Ref_Board!='' and SD_New_Board!='' and SD_Ref_Board!=0 and SD_New_Board!=0 else 0
Sigma_Spread_Criteria= 'Passed' if SD_Ratio<1.5 else 'For Check'
Passed_Failed='Passed' if Mean_Shift_Criteria=='Passed' and Sigma_Spread_Criteria=='Passed' else ('Failed' if Mean_Shift_Criteria=='Failed' else 'For Check')
Table_header_val=[TestNum,TestName,Lo_Limit,Hi_Limit,Unit,SD_Ref_Lot,Mean_Ref_Board,SD_Ref_Board,Mean_New_Board,
SD_New_Board,Delta_Mean,Mean_Shift,Mean_Shift_Criteria,SD_Ratio,Sigma_Spread_Criteria,Passed_Failed]
for j in range(len(Table_header)): Summary_table[Table_header[j]].append(Table_header_val[j])
Dataframe=pd.DataFrame(Summary_table,columns=Table_header)
return Dataframe ,Table_header
except:
k=QMessageBox.information('Generation aborted!','Sorry :( Found some issue in Generating the Summary table',QMessageBox.Ok,QMessageBox.Ok)
print('Line Number or Loop Number is ',i,Test_no_name_list[i])
self.close()
#def CP(self,DataTable,Test_number_list):
def Generate_summary_table(self,Data_Table,Test_number_list,Summary_Stat_list):
''' This Function takes the data frame and test number list as input and creates the statistic summary table for the same'''
Data_Table_=Data_Table
Test_number_list_=Test_number_list
Summary_Stat_list_=Summary_Stat_list
self.Stat_item_data=[]
for i in Summary_Stat_list_:
if i=="Min": Min=Data_Table_[Test_number_list_].min();self.Stat_item_data.append(Min.round(5))
elif i=="Max": Max=Data_Table_[Test_number_list_].max();self.Stat_item_data.append(Max.round(5))
elif i=="Mean": Mean=Data_Table_[Test_number_list_].mean();self.Stat_item_data.append(Mean.round(5))
elif i=="Median(P50)": Median=Data_Table_[Test_number_list_].median();self.Stat_item_data.append(Median.round(5))
elif i=="StdDev": StdDev=Data_Table_[Test_number_list_].std();self.Stat_item_data.append(StdDev.round(5))
elif i=="P1": P1=Data_Table_[Test_number_list_].quantile(0.01);self.Stat_item_data.append(P1.round(5))
elif i=="P5": P5=Data_Table_[Test_number_list_].quantile(0.05);self.Stat_item_data.append(P5.round(5))
elif i=="P10": P10=Data_Table_[Test_number_list_].quantile(0.1);self.Stat_item_data.append(P10.round(5))
elif i=="P25": P25=Data_Table_[Test_number_list_].quantile(0.25);self.Stat_item_data.append(P25.round(5))
elif i=="P75": P75=Data_Table_[Test_number_list_].quantile(0.75);self.Stat_item_data.append(P75.round(5))
elif i=="P90": P90=Data_Table_[Test_number_list_].quantile(0.9);self.Stat_item_data.append(P90.round(5))
elif i=="P95": P95=Data_Table_[Test_number_list_].quantile(0.95);self.Stat_item_data.append(P95.round(5))
elif i=="P99": P99=Data_Table_[Test_number_list_].quantile(0.99);self.Stat_item_data.append(P99.round(5))
elif i=="COUNT": Count=Data_Table_[Test_number_list_].count();self.Stat_item_data.append(Count)
elif i=="PASS": Pass=Data_Table_[Test_number_list_].min();self.Stat_item_data.append(Pass)
elif i=="FAIL": Fail=Data_Table_[Test_number_list_].min();self.Stat_item_data.append(Fail)
elif i=="TOTAL": Total=Data_Table_[Test_number_list_].min();self.Stat_item_data.append(Total)
All_Stat_Summary_Table= | DataFrame(self.Stat_item_data,index=Summary_Stat_list_) | pandas.DataFrame |
# The MIT License (MIT)
# Copyright (c) 2017 Massachusetts Institute of Technology
#
# Authors: <NAME>
# This software has been created in projects supported by the US National
# Science Foundation and NASA (PI: Pankratius)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# Skdaccess imports
from skdaccess.framework.data_class import DataFetcherCache, TableWrapper
from skdaccess.framework.param_class import *
from pkg_resources import resource_filename
from skdaccess.utilities.mahali_util import convert_date, parseIonoFile
from skdaccess.utilities.support import retrieveCommonDatesHDF
# Standard library imports
from urllib import parse
from collections import OrderedDict
from collections import defaultdict
from itertools import repeat
# 3rd party imports
from tqdm import tqdm
import pandas as pd
class DataFetcher(DataFetcherCache):
'''
Data Fetcher for Mahali Data
'''
def __init__(self, ap_paramList=[], start_date=None, end_date=None):
'''
Initialize Mahali Data Fetcher
@param ap_paramList[stations]: Autolist of stations (Defaults to all stations)
@param start_date: Starting date for seelcting data (Defaults to beginning of available data)
@param end_date: Ending date for selecting data (Defaults to end of available data)
'''
# Get start date
if start_date == None:
self.start_date = pd.to_datetime('2015275', format='%Y%j')
else:
self.start_date = convert_date(start_date)
# Get end date
if end_date == None:
self.end_date = | pd.to_datetime('2015307', format='%Y%j') | pandas.to_datetime |
#!/usr/bin/env python
# coding: utf-8
import pandas as pd
from fbprophet import Prophet
import matplotlib.pyplot as plt
import seaborn as sns; sns.set()
df = pd.read_pickle('all_trips.pkl')
df.head()
# Day of Week
df['dow'] = df.trip_start_time.dt.day_name()
df['hour'] = df.trip_start_time.dt.hour
sns.set_style("darkgrid")
ax = sns.FacetGrid(data=df.groupby(
['dow', 'hour']
).hour.count().to_frame(
name='day_hour_count').reset_index(), col='dow', col_order=[
'Monday',
'Tuesday',
'Wednesday',
'Thursday',
'Friday',
'Saturday',
'Sunday'],
col_wrap=4)
ax.map(sns.barplot, 'hour', 'day_hour_count')
# Predict
daily = df.set_index('trip_start_time').groupby(pd.Grouper(freq='D')).size()
daily = pd.DataFrame(daily)
daily = daily.reset_index()
daily.columns = ['ds', 'y']
daily.head()
m = Prophet()
m.fit(daily)
future = m.make_future_dataframe(periods=5)
future.tail()
forecast = m.predict(future)
forecast[['ds', 'yhat', 'yhat_lower', 'yhat_upper']].tail()
forecast.to_pickle('forecast.pkl')
fig1 = m.plot(forecast)
# Hourly
hourly = df.set_index('trip_start_time').groupby( | pd.Grouper(freq='2h') | pandas.Grouper |
import vectorbt as vbt
import numpy as np
import pandas as pd
from numba import njit
from datetime import datetime
import pytest
from vectorbt.generic import nb as generic_nb
from vectorbt.generic.enums import range_dt
from tests.utils import record_arrays_close
seed = 42
day_dt = np.timedelta64(86400000000000)
mask = pd.DataFrame([
[True, False, False],
[False, True, False],
[False, False, True],
[True, False, False],
[False, True, False]
], index=pd.Index([
datetime(2020, 1, 1),
datetime(2020, 1, 2),
datetime(2020, 1, 3),
datetime(2020, 1, 4),
datetime(2020, 1, 5)
]), columns=['a', 'b', 'c'])
ts = pd.Series([1., 2., 3., 2., 1.], index=mask.index)
price = pd.DataFrame({
'open': [10, 11, 12, 11, 10],
'high': [11, 12, 13, 12, 11],
'low': [9, 10, 11, 10, 9],
'close': [11, 12, 11, 10, 9]
})
group_by = pd.Index(['g1', 'g1', 'g2'])
# ############# Global ############# #
def setup_module():
vbt.settings.numba['check_func_suffix'] = True
vbt.settings.caching.enabled = False
vbt.settings.caching.whitelist = []
vbt.settings.caching.blacklist = []
def teardown_module():
vbt.settings.reset()
# ############# accessors.py ############# #
class TestAccessors:
def test_indexing(self):
assert mask.vbt.signals['a'].total() == mask['a'].vbt.signals.total()
def test_freq(self):
assert mask.vbt.signals.wrapper.freq == day_dt
assert mask['a'].vbt.signals.wrapper.freq == day_dt
assert mask.vbt.signals(freq='2D').wrapper.freq == day_dt * 2
assert mask['a'].vbt.signals(freq='2D').wrapper.freq == day_dt * 2
assert pd.Series([False, True]).vbt.signals.wrapper.freq is None
assert pd.Series([False, True]).vbt.signals(freq='3D').wrapper.freq == day_dt * 3
assert pd.Series([False, True]).vbt.signals(freq=np.timedelta64(4, 'D')).wrapper.freq == day_dt * 4
@pytest.mark.parametrize(
"test_n",
[1, 2, 3, 4, 5],
)
def test_fshift(self, test_n):
pd.testing.assert_series_equal(mask['a'].vbt.signals.fshift(test_n), mask['a'].shift(test_n, fill_value=False))
np.testing.assert_array_equal(
mask['a'].vbt.signals.fshift(test_n).values,
generic_nb.fshift_1d_nb(mask['a'].values, test_n, fill_value=False)
)
pd.testing.assert_frame_equal(mask.vbt.signals.fshift(test_n), mask.shift(test_n, fill_value=False))
@pytest.mark.parametrize(
"test_n",
[1, 2, 3, 4, 5],
)
def test_bshift(self, test_n):
pd.testing.assert_series_equal(
mask['a'].vbt.signals.bshift(test_n),
mask['a'].shift(-test_n, fill_value=False))
np.testing.assert_array_equal(
mask['a'].vbt.signals.bshift(test_n).values,
generic_nb.bshift_1d_nb(mask['a'].values, test_n, fill_value=False)
)
pd.testing.assert_frame_equal(mask.vbt.signals.bshift(test_n), mask.shift(-test_n, fill_value=False))
def test_empty(self):
pd.testing.assert_series_equal(
pd.Series.vbt.signals.empty(5, index=np.arange(10, 15), name='a'),
pd.Series(np.full(5, False), index=np.arange(10, 15), name='a')
)
pd.testing.assert_frame_equal(
pd.DataFrame.vbt.signals.empty((5, 3), index=np.arange(10, 15), columns=['a', 'b', 'c']),
pd.DataFrame(np.full((5, 3), False), index=np.arange(10, 15), columns=['a', 'b', 'c'])
)
pd.testing.assert_series_equal(
| pd.Series.vbt.signals.empty_like(mask['a']) | pandas.Series.vbt.signals.empty_like |
import pandas as pd
import numpy as np
from random import randrange
from sklearn.model_selection import KFold, StratifiedKFold, GroupKFold
from sklearn.metrics import average_precision_score, f1_score,recall_score, accuracy_score, roc_auc_score
from catboost import CatBoostClassifier
from model.cat_custom_eval_metric import CatCustomAveragePrecisionScore
class Cat_Model:
def __init__(self, features, cat_features):
self.features = features
self.cat_features = cat_features
self.clf = None
def build_clf(self, n_estimators = 1000, learning_rate = 0.1, num_leaves = 16, reg_alpha = 10, reg_lambda = 7, **kwargs):
self.clf = CatBoostClassifier(
silent = False,
random_state = 10,
n_estimators = 1000,
# max_depth = 8,
learning_rate= 0.1,
# reg_lambda = 20,
eval_metric = CatCustomAveragePrecisionScore(),
od_type = 'Iter',
**kwargs
)
def run(self, data, y, groups, test, n_splits = 10, early_stopping_rounds= 100):
oof_preds_LGBM = np.zeros((data.shape[0]))
sub_preds_LGBM = np.zeros((test.shape[0]))
df_sub_preds_LGBM = | pd.DataFrame() | pandas.DataFrame |
from collections import OrderedDict
from datetime import datetime, timedelta
import numpy as np
import numpy.ma as ma
import pytest
from pandas._libs import iNaT, lib
from pandas.core.dtypes.common import is_categorical_dtype, is_datetime64tz_dtype
from pandas.core.dtypes.dtypes import (
CategoricalDtype,
DatetimeTZDtype,
IntervalDtype,
PeriodDtype,
)
import pandas as pd
from pandas import (
Categorical,
DataFrame,
Index,
Interval,
IntervalIndex,
MultiIndex,
NaT,
Period,
Series,
Timestamp,
date_range,
isna,
period_range,
timedelta_range,
)
import pandas._testing as tm
from pandas.core.arrays import IntervalArray, period_array
class TestSeriesConstructors:
@pytest.mark.parametrize(
"constructor,check_index_type",
[
# NOTE: some overlap with test_constructor_empty but that test does not
# test for None or an empty generator.
# test_constructor_pass_none tests None but only with the index also
# passed.
(lambda: Series(), True),
(lambda: Series(None), True),
(lambda: Series({}), True),
(lambda: Series(()), False), # creates a RangeIndex
(lambda: Series([]), False), # creates a RangeIndex
(lambda: Series((_ for _ in [])), False), # creates a RangeIndex
(lambda: Series(data=None), True),
(lambda: Series(data={}), True),
(lambda: Series(data=()), False), # creates a RangeIndex
(lambda: Series(data=[]), False), # creates a RangeIndex
(lambda: Series(data=(_ for _ in [])), False), # creates a RangeIndex
],
)
def test_empty_constructor(self, constructor, check_index_type):
with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
expected = Series()
result = constructor()
assert len(result.index) == 0
tm.assert_series_equal(result, expected, check_index_type=check_index_type)
def test_invalid_dtype(self):
# GH15520
msg = "not understood"
invalid_list = [pd.Timestamp, "pd.Timestamp", list]
for dtype in invalid_list:
with pytest.raises(TypeError, match=msg):
Series([], name="time", dtype=dtype)
def test_invalid_compound_dtype(self):
# GH#13296
c_dtype = np.dtype([("a", "i8"), ("b", "f4")])
cdt_arr = np.array([(1, 0.4), (256, -13)], dtype=c_dtype)
with pytest.raises(ValueError, match="Use DataFrame instead"):
Series(cdt_arr, index=["A", "B"])
def test_scalar_conversion(self):
# Pass in scalar is disabled
scalar = Series(0.5)
assert not isinstance(scalar, float)
# Coercion
assert float(Series([1.0])) == 1.0
assert int(Series([1.0])) == 1
def test_constructor(self, datetime_series):
with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
empty_series = Series()
assert datetime_series.index.is_all_dates
# Pass in Series
derived = Series(datetime_series)
assert derived.index.is_all_dates
assert tm.equalContents(derived.index, datetime_series.index)
# Ensure new index is not created
assert id(datetime_series.index) == id(derived.index)
# Mixed type Series
mixed = Series(["hello", np.NaN], index=[0, 1])
assert mixed.dtype == np.object_
assert mixed[1] is np.NaN
assert not empty_series.index.is_all_dates
with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
assert not Series().index.is_all_dates
# exception raised is of type Exception
with pytest.raises(Exception, match="Data must be 1-dimensional"):
Series(np.random.randn(3, 3), index=np.arange(3))
mixed.name = "Series"
rs = Series(mixed).name
xp = "Series"
assert rs == xp
# raise on MultiIndex GH4187
m = MultiIndex.from_arrays([[1, 2], [3, 4]])
msg = "initializing a Series from a MultiIndex is not supported"
with pytest.raises(NotImplementedError, match=msg):
Series(m)
@pytest.mark.parametrize("input_class", [list, dict, OrderedDict])
def test_constructor_empty(self, input_class):
with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
empty = Series()
empty2 = Series(input_class())
# these are Index() and RangeIndex() which don't compare type equal
# but are just .equals
tm.assert_series_equal(empty, empty2, check_index_type=False)
# With explicit dtype:
empty = Series(dtype="float64")
empty2 = Series(input_class(), dtype="float64")
tm.assert_series_equal(empty, empty2, check_index_type=False)
# GH 18515 : with dtype=category:
empty = Series(dtype="category")
empty2 = Series(input_class(), dtype="category")
tm.assert_series_equal(empty, empty2, check_index_type=False)
if input_class is not list:
# With index:
with tm.assert_produces_warning(DeprecationWarning, check_stacklevel=False):
empty = Series(index=range(10))
empty2 = Series(input_class(), index=range(10))
tm.assert_series_equal(empty, empty2)
# With index and dtype float64:
empty = Series(np.nan, index=range(10))
empty2 = Series(input_class(), index=range(10), dtype="float64")
tm.assert_series_equal(empty, empty2)
# GH 19853 : with empty string, index and dtype str
empty = Series("", dtype=str, index=range(3))
empty2 = Series("", index=range(3))
| tm.assert_series_equal(empty, empty2) | pandas._testing.assert_series_equal |
import pandas as pd
import numpy as np
import pyrolite.geochem
from ..util.log import Handle
logger = Handle(__name__)
def phasename(phaseID):
"""
Take a phase ID and return the name of the phase.
Parameters
------------
phaseID : :class:`str`
ID for the particular phase (e.g. 'olivine_0')
Returns
--------
:class:`str`
Name of the phase.
"""
if phaseID.find("_") > 0:
n = phaseID[: phaseID.find("_")]
else:
n = phaseID
return n
def tuple_reindex(df, columns=["pressure", "temperature"]):
"""
Create an index based on tuples from multiple columns.
Parameters
-----------
df: :class:`pandas.DataFrame`
Table DataFrame to reindex.
columns : :class:`list`
List of columns to incorporate into the tuple index.
Returns
-------
:class:`pandas.DataFrame`
Reindexed DataFrame.
"""
df.index = df.loc[:, columns].astype(int).itertuples(index=False)
return df
def integrate_solid_composition(df, frac=True):
"""
Integrate solid compositions to return a 'cumulate' like
composition. Note that in the case of non-fractional crystallisation
this will correspond to the solid composition.
Parameters
-----------
df : :class:`pandas.DataFrame`
DataFrame to integrate.
frac : :class:`bool`
Whether the experiment is a fractional crystallisation experiment.
Returns
-----------
df : :class:`pandas.DataFrame`
DataFrame containing an integrated solid composition.
"""
assert not "experiment" in df.columns, "Designed for single tables."
slds = df.loc[df.phase == "solid", :]
idx = (
df.loc[:, ["pressure", "temperature", "step"]]
.dropna()
.drop_duplicates()
.sort_values("step")
)
if frac:
cumulate = pd.DataFrame(columns=slds.columns, index=idx.index)
# solids typically don't exist for part of the history, so we need reindex here
# rather than .loc[<index list>, :]
cumulate["mass"] = np.nancumsum(slds["mass"].reindex(index=idx.index).values)
chem = slds.reindex(
index=idx.index,
columns=[
i for i in slds.pyrochem.list_compositional if i not in ["S", "H", "V"]
],
)
chem = chem.apply(pd.to_numeric, errors="coerce")
increments = (
slds["mass"].reindex(index=idx.index).values[:, np.newaxis] * chem.values
)
cumulate[chem.columns] = np.nancumsum(increments, axis=1)
cumulate[["pressure", "temperature", "step"]] = slds.loc[
:, ["pressure", "temperature", "step"]
]
else:
cumulate = slds.reindex(index=idx.index)
cumulate.pyrochem.add_MgNo()
return cumulate
def integrate_solid_proportions(df, frac=True):
"""
Integrate solid proportions to return a 'cumulate' split by integrated phase
masses. Note that in the case of non-fractional crystallisation
this will correspond to the overall solid phase abundances.
Parameters
-----------
df : :class:`pandas.DataFrame`
DataFrame to integrate.
frac : :class:`bool`
Whether the experiment is a fractional crystallisation experiment.
Returns
-----------
df : :class:`pandas.DataFrame`
DataFrame containing integrated solid phase proportions.
"""
assert not "experiment" in df.columns, "Designed for single tables."
# another dataframe for integrated minerals
phaseIDs = sorted(
[
pID
for pID in df.phaseID.unique()
if (not | pd.isnull(pID) | pandas.isnull |
import numpy as np
import pandas as pd
import us
import requests
from datetime import timedelta
def data_preparation(df):
#df.drop(columns=["pollster.url", "source.url", "question.text", "question.iteration", "entry.date.time..et.", "partisan", "affiliation", "Unnamed: 0"], inplace=True)
# #adding states to the df
# for state in state_name:
# state2 = state.replace(" ", "-").lower()
# df_state = pd.read_csv(f"temporary_{state2}.csv")
# df_state["State"] = state
# df_state.drop(columns=["Pollster URL", "Source URL", "Question Text", "Entry Date/Time (ET)",
# "Partisan", "Affiliation", "Question Iteration"], inplace=True)
# df = df.append(df_state)
### Cleaning up the data ###
#filtering data
run_date = pd.to_datetime("2016-11-08")
election_day = pd.to_datetime("2016-11-08")
start_date = pd.to_datetime("2016-03-01")
df = df.rename(columns={"number.of.observations": "n"})
df = df.rename(columns={"start.date": "start"})
df = df.rename(columns={"end.date": "end"})
df["start"] = pd.to_datetime(df["start"], format="%Y-%m-%d")
df["end"] = pd.to_datetime(df["end"], format="%Y-%m-%d")
df["t"] = df["end"] - ((timedelta(days=1) + (df["end"] - df["start"])) / 2).dt.ceil("d")
df = df[(df["t"] >= start_date) & ((df["population"] == "Likely Voters") | (df["population"] == "Registered Voters")
| (df["population"] == "Adults")) & (df["n"] > 1)]
#pollster arrangements
characters= "'!^-%&/()=?_.,<$>£#½§{[]}\}|;`"
for pollster in df["pollster"].unique():
ch_index_list = []
for ch in characters:
ch_index = [i for i, x in enumerate(pollster) if x == ch]
if ch_index:
ch_index_list.append(ch_index[0])
if not ch_index_list:
continue
first_ch = min(ch_index_list)
new_pollster = pollster.split(pollster[first_ch])[0]
if new_pollster[-1] == " ":
new_pollster = new_pollster[:-1]
df.replace(pollster, new_pollster, inplace=True)
df.replace(["Fox News", "WashPost", "ABC News", "DHM Research", "Public Opinion Strategies"],
["FOX", "Washington Post", "ABC", "DHM", "POS"], inplace=True)
df["mode"].replace(["Internet", "Live Phone", 'IVR/Online', 'Live Phone/Online', 'Automated Phone', 'IVR/Live Phone', 'Mixed', 'Mail'],
["Online Poll", "Live Phone Component", *["Other"]*6], inplace=True)
#dropping NAs
df["undecided"][df["undecided"].isna()] = 0
df["other"][df["other"].isna()] = 0
df["johnson"][df["johnson"].isna()] = 0
df["mcmullin"][df["mcmullin"].isna()] = 0
#calculating two party poll shares
df["twoparty"] = df["clinton"] + df["trump"]
df["polltype"] = df["population"]
#calculating Clinton vote shares
df["n_clinton"] = round(df["n"] * df["clinton"] / 100)
df["pct_clinton"] = df["clinton"] / df["twoparty"]
#calculating Trump vote shares
df["n_trump"] = round(df["n"] * df["trump"] / 100)
df["pct_trump"] = df["trump"] / df["twoparty"]
# # importing abbrevetions
# state_abbr = {x.abbr: x.name for x in us.states.STATES_CONTIGUOUS}
# state_abbr_items = state_abbr.items()
# state_abbr_list = list(state_abbr_items)
# state_abbr =pd.DataFrame(state_abbr_list, columns=["Abbr", "State"])
#
# a = pd.read_csv("abbr_list.csv")
# #combining with df
# df["Abbr"] = np.where(df["state"] == "--", "General", df["state"].map(state_abbr.set_index("State")["Abbr"]))
df["poll_day"] = df["t"] - min(df["t"]) + timedelta(days=1)
#creating indexes
columns = ["state", "pollster", "polltype", "mode"]
index_i = ["s", "p", "pop", "m"]
for i, col in enumerate(columns):
reindex = False
for ii, x in enumerate(df[col].sort_values().unique(), start=1):
if reindex:
ii-=1
if x=="--":
ii=51
reindex=True
df.loc[df[col] == x, f"index_{index_i[i]}"] = ii
df[f"index_{index_i[i]}"] = df[f"index_{index_i[i]}"].astype(int)
df["index_t"] = df["poll_day"].dt.days
#sorting and dropping duplicates
df = df.sort_values(by=["state", "t", "polltype", "twoparty" ])
df.drop_duplicates(['state','t','pollster'], inplace=True)
return df
def adjuster_terms(df):
all_polled_states = df["state"].unique()
all_polled_states = np.delete(all_polled_states, 0)
#getting states info from 2012
state2012 = pd.read_csv("2012.csv")
state2012["score"] = state2012["obama_count"] / (state2012["obama_count"] + state2012["romney_count"])
state2012["national score"] = sum(state2012["obama_count"]) / sum(state2012["obama_count"] + state2012["romney_count"])
state2012["delta"] = state2012["score"] - state2012["national score"]
state2012["share_national_vote"] = (state2012["total_count"] * (1 + state2012["adult_pop_growth_2011_15"]))\
/ sum(state2012["total_count"] * (1 + state2012["adult_pop_growth_2011_15"]))
state2012 = state2012.sort_values("state")
state_abb = state2012["state"]
state_name = state2012["state_name"]
prior_diff_score = pd.DataFrame(state2012["delta"])
prior_diff_score.set_index(state_abb, inplace=True)
state_weights = pd.DataFrame(state2012["share_national_vote"] / sum(state2012["share_national_vote"]))
state_weights.set_index(state_abb.sort_values(), inplace=True)
##creating covariance matrices
#preparing data
state_data = pd.read_csv("abbr_list.csv")
state_data = state_data[["year", "state", "dem"]]
state_data = state_data[state_data["year"] == 2016]
state_data.rename(columns={"year": "variable", "dem": "value"}, inplace=True)
state_data= state_data[["state", "variable", "value"]]
census = pd.read_csv("acs_2013_variables.csv")
census.dropna(inplace=True)
census.drop(columns=["state_fips", "pop_total", "pop_density"], inplace=True)
census = census.melt(id_vars="state")
state_data = state_data.append(census)
#adding urbanicity
urbanicity = | pd.read_csv("urbanicity_index.csv") | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
IB API - Store Historical Data of multiple stocks in dataframe
@author: <NAME> (http://rasuquant.com/wp/)
"""
# Import libraries
from ibapi.client import EClient
from ibapi.wrapper import EWrapper
from ibapi.contract import Contract
import pandas as pd
import openpyxl
import threading
import time
import os
class TradeApp(EWrapper, EClient):
def __init__(self):
EClient.__init__(self, self)
self.data = {}
def historicalData(self, reqId, bar):
if reqId not in self.data:
self.data[reqId] = [{"Date":bar.date,"Open":bar.open,"High":bar.high,"Low":bar.low,"Close":bar.close,"Volume":bar.volume}]
else:
self.data[reqId].append({"Date":bar.date,"Open":bar.open,"High":bar.high,"Low":bar.low,"Close":bar.close,"Volume":bar.volume})
print("reqID:{}, date:{}, open:{}, high:{}, low:{}, close:{}, volume:{}".format(reqId,bar.date,bar.open,bar.high,bar.low,bar.close,bar.volume))
def usTechStk(symbol,sec_type="STK",currency="USD",exchange="ISLAND"):
contract = Contract()
contract.symbol = symbol
contract.secType = sec_type
contract.currency = currency
contract.exchange = exchange
return contract
def histData(req_num,contract,duration,candle_size, whattoshow):
"""extracts historical data"""
app.reqHistoricalData(reqId=req_num,
contract=contract,
endDateTime='',
durationStr=duration,
barSizeSetting=candle_size,
whatToShow=whattoshow,
useRTH=1,
formatDate=1,
keepUpToDate=0,
chartOptions=[]) # EClient function to request contract details
def websocket_con():
app.run()
event.wait()
if event.is_set():
app.close()
###################storing trade app object in dataframe#######################
def dataDataframe(TradeApp_obj, reqId):
"returns extracted historical data in dataframe format"
df_data = {}
df_data = pd.DataFrame(TradeApp_obj.data[reqId])
df_data.set_index("Date",inplace=True)
return df_data
event = threading.Event()
app = TradeApp()
app.connect(host='127.0.0.1', port=7497, clientId=23) #port 4002 for ib gateway paper trading/7497 for TWS paper trading
con_thread = threading.Thread(target=websocket_con)
con_thread.start()
time.sleep(1) # some latency added to ensure that the connection is established
isleep = 20
tickers = {}
xlsx = | pd.ExcelFile("..\\input\\Tickers.xlsx") | pandas.ExcelFile |
# -*- coding: utf-8 -*-
import pytest
import numpy as np
import pandas as pd
from pandas import Timestamp
def create_dataframe(tuple_data):
"""Create pandas df from tuple data with a header."""
return pd.DataFrame.from_records(tuple_data[1:], columns=tuple_data[0])
### REUSABLE FIXTURES --------------------------------------------------------
@pytest.fixture()
def indices_3years():
"""Three indices over 3 years."""
return pd.DataFrame.from_records(
[
(Timestamp('2012-01-01 00:00:00'), 100.0, 100.0, 100.0),
(Timestamp('2012-02-01 00:00:00'), 101.239553643, 96.60525323799999, 97.776838217),
(Timestamp('2012-03-01 00:00:00'), 102.03030533, 101.450821724, 96.59101862),
(Timestamp('2012-04-01 00:00:00'), 104.432402661, 98.000263617, 94.491213369),
(Timestamp('2012-05-01 00:00:00'), 105.122830333, 95.946873831, 93.731891785),
(Timestamp('2012-06-01 00:00:00'), 103.976692567, 97.45914568100001, 90.131064035),
(Timestamp('2012-07-01 00:00:00'), 106.56768678200001, 94.788761174, 94.53487522),
(Timestamp('2012-08-01 00:00:00'), 106.652151036, 98.478217946, 92.56165627700001),
(Timestamp('2012-09-01 00:00:00'), 108.97290730799999, 99.986521241, 89.647230903),
(Timestamp('2012-10-01 00:00:00'), 106.20124385700001, 99.237117891, 92.27819603799999),
(Timestamp('2012-11-01 00:00:00'), 104.11913898700001, 100.993436318, 95.758970985),
(Timestamp('2012-12-01 00:00:00'), 107.76600978, 99.60424011299999, 95.697091336),
(Timestamp('2013-01-01 00:00:00'), 98.74350698299999, 100.357120656, 100.24073830200001),
(Timestamp('2013-02-01 00:00:00'), 100.46305431100001, 99.98213513200001, 99.499007278),
(Timestamp('2013-03-01 00:00:00'), 101.943121499, 102.034291064, 96.043392231),
(Timestamp('2013-04-01 00:00:00'), 99.358987741, 106.513055039, 97.332012817),
(Timestamp('2013-05-01 00:00:00'), 97.128074038, 106.132168479, 96.799806436),
(Timestamp('2013-06-01 00:00:00'), 94.42944162, 106.615734964, 93.72086654600001),
(Timestamp('2013-07-01 00:00:00'), 94.872365481, 103.069773446, 94.490515359),
(Timestamp('2013-08-01 00:00:00'), 98.239415397, 105.458081805, 93.57271149299999),
(Timestamp('2013-09-01 00:00:00'), 100.36774827100001, 106.144579258, 90.314524375),
(Timestamp('2013-10-01 00:00:00'), 100.660205114, 101.844838294, 88.35136848399999),
(Timestamp('2013-11-01 00:00:00'), 101.33948384799999, 100.592230114, 93.02874928899999),
(Timestamp('2013-12-01 00:00:00'), 101.74876982299999, 102.709038791, 93.38277933200001),
(Timestamp('2014-01-01 00:00:00'), 101.73439491, 99.579700011, 104.755837919),
(Timestamp('2014-02-01 00:00:00'), 100.247760523, 100.76732961, 100.197855834),
(Timestamp('2014-03-01 00:00:00'), 102.82080245600001, 99.763171909, 100.252537549),
(Timestamp('2014-04-01 00:00:00'), 104.469889684, 96.207920184, 98.719797067),
(Timestamp('2014-05-01 00:00:00'), 105.268899775, 99.357641836, 99.99786671),
(Timestamp('2014-06-01 00:00:00'), 107.41649204299999, 100.844974811, 96.463821506),
(Timestamp('2014-07-01 00:00:00'), 110.146087435, 102.01075029799999, 94.332755083),
(Timestamp('2014-08-01 00:00:00'), 109.17068484100001, 101.562418115, 91.15410351700001),
(Timestamp('2014-09-01 00:00:00'), 109.872892919, 101.471759564, 90.502291475),
(Timestamp('2014-10-01 00:00:00'), 108.508436998, 98.801947543, 93.97423224399999),
(Timestamp('2014-11-01 00:00:00'), 109.91248118, 97.730489099, 90.50638234200001),
(Timestamp('2014-12-01 00:00:00'), 111.19756703600001, 99.734704555, 90.470418612),
],
).set_index(0, drop=True)
@pytest.fixture()
def weights_3years():
return pd.DataFrame.from_records(
[
(Timestamp('2012-01-01 00:00:00'), 5.1869643839999995, 2.263444179, 3.145244219),
(Timestamp('2013-01-01 00:00:00'), 6.74500585, 1.8588606330000002, 3.992369584),
(Timestamp('2014-01-01 00:00:00'), 6.23115844, 2.361303832, 3.5764532489999996),
],
).set_index(0, drop=True)
@pytest.fixture()
def weights_3years_start_feb(weights_3years):
return weights_3years.shift(1, freq='MS')
@pytest.fixture()
def weight_shares_3years():
return pd.DataFrame.from_records(
[
(Timestamp('2012-01-01 00:00:00'), 0.489537029, 0.21362007800000002, 0.29684289199999997),
(Timestamp('2013-01-01 00:00:00'), 0.535477885, 0.147572705, 0.31694941),
(Timestamp('2014-01-01 00:00:00'), 0.512055362, 0.1940439, 0.293900738),
],
).set_index(0, drop=True)
@pytest.fixture()
def weights_shares_start_feb(weight_shares_3years):
return weight_shares_3years.shift(1, freq='MS')
@pytest.fixture()
def indices_1year(indices_3years):
return indices_3years.loc['2012', :]
@pytest.fixture()
def weights_1year(weights_3years):
return weights_3years.loc['2012', :]
@pytest.fixture()
def indices_6months(indices_3years):
return indices_3years.loc['2012-Jan':'2012-Jun', :]
@pytest.fixture()
def weights_6months(weights_3years):
return weights_3years.loc['2012', :]
@pytest.fixture()
def indices_transposed(indices_3years):
return indices_3years.T
@pytest.fixture()
def weights_transposed(weights_3years):
return weights_3years.T
@pytest.fixture()
def indices_missing(indices_3years):
indices_missing = indices_3years.copy()
change_to_nans = [
('2012-06', 2),
('2012-12', 3),
('2013-10', 2),
('2014-07', 1),
]
for sl in change_to_nans:
indices_missing.loc[sl] = np.nan
return indices_missing
@pytest.fixture()
def indices_missing_transposed(indices_missing):
return indices_missing.T
### AGGREGATION FIXTURES -----------------------------------------------------
@pytest.fixture()
def aggregate_outcome_3years():
return pd.DataFrame.from_records(
[
(Timestamp('2012-01-01 00:00:00'), 100.0),
(Timestamp('2012-02-01 00:00:00'), 99.22169156),
(Timestamp('2012-03-01 00:00:00'), 100.29190240000001),
(Timestamp('2012-04-01 00:00:00'), 100.10739720000001),
(Timestamp('2012-05-01 00:00:00'), 99.78134264),
(Timestamp('2012-06-01 00:00:00'), 98.47443727),
(Timestamp('2012-07-01 00:00:00'), 100.4796172),
(Timestamp('2012-08-01 00:00:00'), 100.7233716),
(Timestamp('2012-09-01 00:00:00'), 101.31654509999998),
(Timestamp('2012-10-01 00:00:00'), 100.5806089),
(Timestamp('2012-11-01 00:00:00'), 100.9697697),
(Timestamp('2012-12-01 00:00:00'), 102.4399192),
(Timestamp('2013-01-01 00:00:00'), 99.45617890000001),
(Timestamp('2013-02-01 00:00:00'), 100.08652959999999),
(Timestamp('2013-03-01 00:00:00'), 100.0866599),
(Timestamp('2013-04-01 00:00:00'), 99.7722843),
(Timestamp('2013-05-01 00:00:00'), 98.35278839),
(Timestamp('2013-06-01 00:00:00'), 96.00322344),
(Timestamp('2013-07-01 00:00:00'), 95.96105198),
(Timestamp('2013-08-01 00:00:00'), 97.82558448),
(Timestamp('2013-09-01 00:00:00'), 98.03388747),
(Timestamp('2013-10-01 00:00:00'), 96.93374613),
(Timestamp('2013-11-01 00:00:00'), 98.59512718),
(Timestamp('2013-12-01 00:00:00'), 99.23888357),
(Timestamp('2014-01-01 00:00:00'), 102.2042938),
(Timestamp('2014-02-01 00:00:00'), 100.3339127),
(Timestamp('2014-03-01 00:00:00'), 101.4726729),
(Timestamp('2014-04-01 00:00:00'), 101.17674840000001),
(Timestamp('2014-05-01 00:00:00'), 102.57269570000001),
(Timestamp('2014-06-01 00:00:00'), 102.9223313),
(Timestamp('2014-07-01 00:00:00'), 103.9199248),
(Timestamp('2014-08-01 00:00:00'), 102.3992605),
(Timestamp('2014-09-01 00:00:00'), 102.54967020000001),
(Timestamp('2014-10-01 00:00:00'), 102.35333840000001),
(Timestamp('2014-11-01 00:00:00'), 101.8451732),
(Timestamp('2014-12-01 00:00:00'), 102.8815443),
],
).set_index(0, drop=True).squeeze()
@pytest.fixture()
def aggregate_outcome_1year(aggregate_outcome_3years):
return aggregate_outcome_3years.loc['2012']
@pytest.fixture()
def aggregate_outcome_6months(aggregate_outcome_3years):
return aggregate_outcome_3years.loc['2012-Jan':'2012-Jun']
@pytest.fixture()
def aggregate_outcome_missing():
return pd.DataFrame.from_records(
[
(Timestamp('2012-01-01 00:00:00'), 100.0),
(Timestamp('2012-02-01 00:00:00'), 99.22169156),
(Timestamp('2012-03-01 00:00:00'), 100.29190240000001),
(Timestamp('2012-04-01 00:00:00'), 100.10739720000001),
(Timestamp('2012-05-01 00:00:00'), 99.78134264),
(Timestamp('2012-06-01 00:00:00'), 98.75024119),
(Timestamp('2012-07-01 00:00:00'), 100.4796172),
(Timestamp('2012-08-01 00:00:00'), 100.7233716),
(Timestamp('2012-09-01 00:00:00'), 101.31654509999998),
(Timestamp('2012-10-01 00:00:00'), 100.5806089),
(Timestamp('2012-11-01 00:00:00'), 100.9697697),
(Timestamp('2012-12-01 00:00:00'), 105.2864531),
(Timestamp('2013-01-01 00:00:00'), 99.45617890000001),
(Timestamp('2013-02-01 00:00:00'), 100.08652959999999),
(Timestamp('2013-03-01 00:00:00'), 100.0866599),
(Timestamp('2013-04-01 00:00:00'), 99.7722843),
(Timestamp('2013-05-01 00:00:00'), 98.35278839),
(Timestamp('2013-06-01 00:00:00'), 96.00322344),
(Timestamp('2013-07-01 00:00:00'), 95.96105198),
(Timestamp('2013-08-01 00:00:00'), 97.82558448),
(Timestamp('2013-09-01 00:00:00'), 98.03388747),
(Timestamp('2013-10-01 00:00:00'), 96.08353503),
(Timestamp('2013-11-01 00:00:00'), 98.59512718),
( | Timestamp('2013-12-01 00:00:00') | pandas.Timestamp |
import re
import time
import pandas as pd
import pytest
import sys
from unittest import mock
from openbrewerydb.constants import dtypes
from openbrewerydb.core import (_validate_state, _validate_brewery_type, _validate_postal_code,
_format_request_params, _get_data, load, timer)
from .example_data import test_json_data
@pytest.mark.parametrize('state', [
'texas',
'Wisconsin',
])
def test__validate_state(state):
result = _validate_state(state)
assert result is None
def test__validate_state_raises():
with pytest.raises(ValueError) as err:
_validate_state('tejas')
assert 'Invalid state entered' in str(err.value)
@pytest.mark.parametrize('brewery_type', [
'micro',
'regional',
'brewpub',
'large',
'planning',
'bar',
'contract',
'proprietor',
])
def test_format_brewery_type(brewery_type):
result = _validate_brewery_type(brewery_type)
assert result is None
@pytest.mark.parametrize('brewery_type', [
'invalid',
'Micro',
])
def test__validate_brewery_type(brewery_type):
with pytest.raises(ValueError) as err:
_validate_brewery_type(brewery_type=brewery_type)
assert 'Invalid brewery_type entered' in str(err.value)
@pytest.mark.parametrize('postal_code', [
'12345',
'12345-6789',
])
def test__validate_postal_code(postal_code):
result = _validate_postal_code(postal_code)
assert result is None
def test__validate_postal_code_raises():
with pytest.raises(ValueError) as err:
_validate_postal_code('1234')
assert 'Invalid postal_code entered' in str(err.value)
@pytest.mark.parametrize('city, state, brewery_type, postal_code, page', [
('portland', 'Maine', 'micro', '04103-1270', None),
('portland', 'Maine', 'micro', '04103-1270', 7),
(None, 'Maine', 'micro', None, None),
('portland', None, 'micro', None, None),
('portland', 'Maine', None, '04103-1270', None),
(None, None, None, None, None),
])
def test__format_request_params(city, state, brewery_type, postal_code, page):
result = _format_request_params(city=city,
state=state,
brewery_type=brewery_type,
postal_code=postal_code,
page=page)
expected = {'by_state': state,
'by_city': city,
'by_type': brewery_type,
'by_postal': postal_code,
}
if page is not None:
expected['page'] = str(page)
expected['per_page'] = '50'
assert result == expected
@pytest.mark.parametrize('page', [None, 9])
def test__format_request_params_keys(page):
result = _format_request_params(page=page)
expected = {'by_state',
'by_city',
'by_type',
'by_postal'
}
if page is not None:
expected.update(['page', 'per_page'])
assert set(result.keys()) == expected
@pytest.mark.parametrize('return_value, expected', [
(test_json_data, pd.DataFrame(test_json_data).astype(dtypes)),
((), pd.DataFrame()),
])
@mock.patch('openbrewerydb.core._get_request')
def test__get_data(mock_get_request, return_value, expected):
mock_get_request.return_value = mock.Mock()
mock_get_request.return_value.json.return_value = return_value
result = _get_data()
pd.testing.assert_frame_equal(result, expected)
@mock.patch('openbrewerydb.core._get_data')
def test_load_is_concat(mock_get_data):
test_data = [pd.DataFrame([1, 4, 5]),
pd.DataFrame([7, 2]),
pd.DataFrame([4.2, 4]),
pd.DataFrame(),
]
mock_get_data.side_effect = test_data
result = load()
expected = pd.concat(test_data, ignore_index=True)
| pd.testing.assert_frame_equal(result, expected) | pandas.testing.assert_frame_equal |
# -*- coding: utf-8 -*-
"""
Created on Tue May 12 17:07:00 2020
@author: hexx
This code do the following:
(1)saves policy, COVID, and Projection data downloaded online to local folder
(2)process and saved data to be usded to project mobility
"""
import pandas as pd
import numpy as np
import os
from sklearn import datasets, linear_model
import matplotlib.pyplot as plt
from myFunctions import def_add_datashift, createFolder
import warnings
warnings.filterwarnings("ignore")
createFolder('./Mobility projection')
scenario_cases = ['lower', 'mean', 'upper'] #'upper', 'lower',
startDate = '2020-02-24'
today_x = pd.to_datetime('today')
today =today_x.strftime("%Y-%m-%d")
# today ='2020-07-08'
PODA_Model = np.load("./PODA_Model_"+today+".npy",allow_pickle='TRUE').item()
YYG_Date = PODA_Model['YYG_File_Date']
moving_avg = PODA_Model['Moving_Average']
#create folder to save YYG Projection
createFolder('./YYG Data/'+YYG_Date)
# createFolder('./COVID/'+today)
df_StateName_Code = PODA_Model['StateName_StateCode']
ML_Data = PODA_Model['ML_Data']
# load Policy Data
df_Policy = pd.read_csv('https://raw.githubusercontent.com/COVID19StatePolicy/SocialDistancing/master/data/USstatesCov19distancingpolicy.csv', encoding= 'unicode_escape')
createFolder('./Policy File')
df_Policy.to_excel('./Policy File/Policy'+today+'.xlsx') # save policy data
# Read Population Data
df_Population = PODA_Model['State Population']
#Read County Area
df_Area = PODA_Model['State Area']
#Employment
df_Employee = PODA_Model['State Employment']
confirmed = ML_Data[ML_Data['State Name']=='Michigan']
confirmed = confirmed[['US Total Confirmed', 'US Daily Confirmed', 'US Daily Death']]
confirmed = confirmed.rename(columns={"US Total Confirmed":"ML US Total Confirmed", "US Daily Confirmed":"ML US Daily Confirmed",
"US Daily Death":"ML US Daily Death"})
infected_to_Confirmed = pd.DataFrame(columns = ['Country Name', 'scenario', 'shiftDay', 'regr_coef', 'regr_interp'])
infected_to_Confirmed_State = pd.DataFrame(columns = ['State Name', 'scenario', 'shiftDay', 'regr_coef', 'regr_interp'])
for zz, scenario in enumerate(scenario_cases):
'''
Calculate the new infected to confirmed correlation
'''
df_US_Projection = pd.read_csv('https://raw.githubusercontent.com/youyanggu/covid19_projections/master/projections/'+YYG_Date+'/US.csv')
df_US_Projection.to_csv('./YYG Data/'+YYG_Date+'/US.csv') # save US Projection data
df_US_Projection['date'] = pd.to_datetime(df_US_Projection['date'])
df_US_Projection.set_index('date', inplace=True)
YYG_Daily_Infected = df_US_Projection[['predicted_new_infected_'+scenario]]
YYG_Daily_Infected = YYG_Daily_Infected[(YYG_Daily_Infected.index < today_x) & (YYG_Daily_Infected.index > pd.to_datetime('2020-05-01'))]
R2_old=0
for j in range(0, 20):
YYG_Data_shifted = YYG_Daily_Infected['predicted_new_infected_'+scenario].shift(j).to_frame()
YYG_Data_shifted['date']=YYG_Data_shifted.index
YYG_Data_shifted=YYG_Data_shifted.set_index('date')
# merged = pd.merge_asof(YYG_Data_shifted, confirmed, left_index=True, right_index=True).dropna()
merged = confirmed.join(YYG_Data_shifted).dropna()
x_conv=merged['predicted_new_infected_'+scenario].to_numpy()
y_conv = merged['ML US Daily Confirmed'].to_numpy()
x_length = len(x_conv)
x_conv = x_conv.reshape(x_length, 1)
y_conv = y_conv.reshape(x_length, 1)
regr = linear_model.LinearRegression(fit_intercept = False)
regr.fit(x_conv, y_conv)
R2_new = regr.score(x_conv, y_conv)
if R2_new > R2_old:
new_row = {'Country Name': 'US', 'scenario': scenario, 'shiftDay': j,
'regr_coef': regr.coef_[0][0], 'regr_interp':regr.intercept_, 'R2': R2_new}
merged_select = merged
regr_selected = regr
R2_old = R2_new
infected_to_Confirmed=infected_to_Confirmed.append(new_row, ignore_index =True)
fig = plt.figure(figsize=(6, 5))
ax = fig.add_subplot(1, 1, 1)
# normalized scale
ax.plot(merged_select.index, merged_select['predicted_new_infected_'+scenario]*new_row['regr_coef'] + new_row['regr_interp'], 'o', label='YYG Predicted')
# ax.plot(merged_select.index, merged_select['predicted_total_infected_mean'], 'o', label='YYG Predicted')
ax.plot(merged_select.index, merged_select['ML US Daily Confirmed'], label='confirmed')
ax.set_xlabel('Label')
ax.set_ylabel('Prediction')
ax.set_xlim(pd.to_datetime('2020-05-01'), pd.to_datetime('today'))
fig.autofmt_xdate(rotation=45)
ax.legend()
ax.set_title('US'+scenario)
'''
'''
all_Data=pd.DataFrame()
#YYG State level projection
df_US_Projection['State Name']='US'
df_US_Projection['country_region_code'] = 'US'
df_US_Projection['country_region'] = 'United States'
df_US_Projection['retail_and_recreation'] =1
df_US_Projection['grocery_and_pharmacy'] =1
df_US_Projection['parks'] = 1
df_US_Projection['transit_stations'] = 1
df_US_Projection['workplaces'] = 1
df_US_Projection['residential'] = 1
df_US_Projection['US Daily Confirmed'] = df_US_Projection['predicted_new_infected_'+scenario].shift(new_row['shiftDay'])*new_row['regr_coef'] + new_row['regr_interp']
df_US_Projection['US Daily Confirmed'] = df_US_Projection['US Daily Confirmed'].rolling(window=moving_avg).mean()
# df_US_Projection['US Daily Confirmed'] = df_US_Projection['US Total Confirmed'].diff().rolling(window=moving_avg).mean()
for i, da in enumerate(confirmed.index):
df_US_Projection.loc[da,'US Total Confirmed']= confirmed.loc[da, 'ML US Total Confirmed']
df_US_Projection.loc[da,'US Daily Confirmed']= confirmed.loc[da, 'ML US Daily Confirmed']
# df_US_Projection['US Daily Confirmed'] = (df_US_Projection['predicted_new_infected_'+scenario].shift(shiftDay))/infected_Confirmed_Ratio
df_US_Projection['US Daily Confirmed Dfdt'] = df_US_Projection['US Daily Confirmed'].diff()
# df_US_Projection = def_add_datashift (df_US_Projection, 'US Total Confirmed', [1, 3, 7, 10])
df_US_Projection = def_add_datashift (df_US_Projection, 'US Daily Confirmed', [1, 3, 7, 10])
df_US_Projection = def_add_datashift (df_US_Projection, 'US Daily Confirmed Dfdt', [1, 3, 7, 10])
df_US_Projection['US Total Death'] = df_US_Projection['predicted_total_deaths_'+scenario].fillna(0) + df_US_Projection['total_deaths'].fillna(0)
df_US_Projection['US Daily Death'] = (df_US_Projection['predicted_deaths_'+scenario].fillna(0) + df_US_Projection['actual_deaths'].fillna(0)).rolling(window=moving_avg).mean()
for i, da in enumerate(confirmed.index):
df_US_Projection.loc[da,'US Daily Death']= confirmed.loc[da, 'ML US Daily Death']
df_US_Projection['US Daily Death Dfdt'] = df_US_Projection['US Daily Death'].diff()
# df_US_Projection = def_add_datashift (df_US_Projection, 'US Total Death', [1, 3, 7, 10])
df_US_Projection = def_add_datashift (df_US_Projection, 'US Daily Death', [1, 3, 7, 10])
df_US_Projection = def_add_datashift (df_US_Projection, 'US Daily Death Dfdt', [1, 3, 7, 10])
df_US_Projection = df_US_Projection.iloc[:, 18:100]
df_US_Projection = df_US_Projection[df_US_Projection.index > pd.to_datetime(startDate)]
stateNameList = df_StateName_Code['State Name'].drop_duplicates().dropna().tolist()
ML_Data_StateDailyDeath=pd.DataFrame()
for stateName in stateNameList:
if stateName == 'District of Columbia':
continue
state_Code = df_StateName_Code.loc[df_StateName_Code['State Name'] == stateName, 'State Code'].iloc[0]
print (scenario +': '+ stateName)
YYG_State_Proj_Location ='https://raw.githubusercontent.com/youyanggu/covid19_projections/master/projections/'+ YYG_Date +'/US_'+ state_Code+'.csv'
df_State_Projection = pd.read_csv(YYG_State_Proj_Location, header=0)
# save YYG State Projection data
if zz==0:
df_State_Projection.to_csv('./YYG Data/'+YYG_Date+'/US_'+state_Code+'.csv')
df_State_Projection['date'] = pd.to_datetime(df_State_Projection['date'])
df_State_Projection.set_index('date', inplace=True)
ML_Data_State = ML_Data[ML_Data['State Name'] == stateName]
ML_Data_StateDailyDeath = ML_Data_State[['State Daily Death']]
ML_Data_StateDailyDeath.rename(columns={'State Daily Death': 'ML State Daily Death'},inplace=True)
ML_Data_StateDailyDeath = ML_Data_StateDailyDeath[ML_Data_StateDailyDeath.index > df_State_Projection.index[0]]
'''
Calculate the new infected to confirmed correlation
'''
# df_State_Projection = pd.read_csv('https://raw.githubusercontent.com/youyanggu/covid19_projections/master/projections/'+YYG_Date+'/US.csv')
# df_US_Projection.to_csv('./YYG Data/'+YYG_Date+'/US.csv') # save US Projection data
# df_US_Projection['date'] = pd.to_datetime(df_US_Projection['date'])
# df_US_Projection.set_index('date', inplace=True)
YYG_Total_Infected = df_State_Projection[['predicted_total_infected_'+scenario]]
YYG_Total_Infected = YYG_Total_Infected[(YYG_Total_Infected.index < today_x) & (YYG_Total_Infected.index > pd.to_datetime('2020-05-01'))]
confirmed_State = ML_Data_State[['State Total Confirmed', 'State Daily Confirmed']]
confirmed_State = confirmed_State.rename(columns={"State Total Confirmed":"ML State Total Confirmed", "State Daily Confirmed":"ML State Daily Confirmed"})
R2_old=0
for j in range(0, 20):
YYG_Data_shifted = YYG_Total_Infected['predicted_total_infected_'+scenario].shift(j)
YYG_Data_shifted = YYG_Total_Infected['predicted_total_infected_'+scenario].shift(j).to_frame()
YYG_Data_shifted['date']=YYG_Data_shifted.index
YYG_Data_shifted=YYG_Data_shifted.set_index('date')
merged = confirmed_State.join(YYG_Data_shifted).dropna()
# merged = pd.merge_asof(YYG_Data_shifted, confirmed_State, left_index=True, right_index=True).dropna()
x_conv=merged['predicted_total_infected_'+scenario].to_numpy()
y_conv = merged['ML State Total Confirmed'].to_numpy()
x_length = len(x_conv)
x_conv = x_conv.reshape(x_length, 1)
y_conv = y_conv.reshape(x_length, 1)
regr = linear_model.LinearRegression(fit_intercept = True)
regr.fit(x_conv, y_conv)
R2_new = regr.score(x_conv, y_conv)
if R2_new > R2_old:
new_row_State = {'State Name': stateName, 'scenario': scenario, 'shiftDay': j,
'regr_coef': regr.coef_[0][0], 'regr_interp':regr.intercept_, 'R2': R2_new}
merged_select = merged
regr_selected = regr
R2_old = R2_new
infected_to_Confirmed_State=infected_to_Confirmed_State.append(new_row_State, ignore_index =True)
fig = plt.figure(figsize=(6, 5))
ax = fig.add_subplot(1, 1, 1)
# normalized scale
ax.plot(merged_select.index, merged_select['predicted_total_infected_'+scenario]*new_row_State['regr_coef'] + new_row_State['regr_interp'], 'o', label='YYG Predicted')
# ax.plot(merged_select.index, merged_select['predicted_total_infected_mean'], 'o', label='YYG Predicted')
ax.plot(merged_select.index, merged_select['ML State Total Confirmed'], label='confirmed')
ax.set_xlabel('Label')
ax.set_ylabel('Prediction')
ax.set_xlim(pd.to_datetime('2020-05-01'), pd.to_datetime('today'))
fig.autofmt_xdate(rotation=45)
ax.legend()
ax.set_title(stateName+scenario)
'''
'''
df_State_Projection['State Total Confirmed'] = df_State_Projection['predicted_total_infected_'+scenario].shift(new_row_State['shiftDay'])*new_row_State['regr_coef'] + new_row_State['regr_interp']
df_State_Projection['State Daily Confirmed'] = df_State_Projection['State Total Confirmed'].diff().rolling(window=moving_avg).mean()
for i, da in enumerate(confirmed_State.index):
df_State_Projection.loc[da,'State Total Confirmed']= confirmed_State.loc[da, 'ML State Total Confirmed']
df_State_Projection.loc[da,'State Daily Confirmed']= confirmed_State.loc[da, 'ML State Daily Confirmed']
df_State_Projection=df_State_Projection[df_State_Projection.index >= | pd.to_datetime('2020-03-01') | pandas.to_datetime |
import pytest
import numpy as np
import pandas
import pandas.util.testing as tm
from pandas.tests.frame.common import TestData
import matplotlib
import modin.pandas as pd
from modin.pandas.utils import to_pandas
from numpy.testing import assert_array_equal
from .utils import (
random_state,
RAND_LOW,
RAND_HIGH,
df_equals,
df_is_empty,
arg_keys,
name_contains,
test_data_values,
test_data_keys,
test_data_with_duplicates_values,
test_data_with_duplicates_keys,
numeric_dfs,
no_numeric_dfs,
test_func_keys,
test_func_values,
query_func_keys,
query_func_values,
agg_func_keys,
agg_func_values,
numeric_agg_funcs,
quantiles_keys,
quantiles_values,
indices_keys,
indices_values,
axis_keys,
axis_values,
bool_arg_keys,
bool_arg_values,
int_arg_keys,
int_arg_values,
)
# TODO remove once modin-project/modin#469 is resolved
agg_func_keys.remove("str")
agg_func_values.remove(str)
pd.DEFAULT_NPARTITIONS = 4
# Force matplotlib to not use any Xwindows backend.
matplotlib.use("Agg")
class TestDFPartOne:
# Test inter df math functions
def inter_df_math_helper(self, modin_df, pandas_df, op):
# Test dataframe to datframe
try:
pandas_result = getattr(pandas_df, op)(pandas_df)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(modin_df)
else:
modin_result = getattr(modin_df, op)(modin_df)
df_equals(modin_result, pandas_result)
# Test dataframe to int
try:
pandas_result = getattr(pandas_df, op)(4)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(4)
else:
modin_result = getattr(modin_df, op)(4)
df_equals(modin_result, pandas_result)
# Test dataframe to float
try:
pandas_result = getattr(pandas_df, op)(4.0)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(4.0)
else:
modin_result = getattr(modin_df, op)(4.0)
df_equals(modin_result, pandas_result)
# Test transposed dataframes to float
try:
pandas_result = getattr(pandas_df.T, op)(4.0)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df.T, op)(4.0)
else:
modin_result = getattr(modin_df.T, op)(4.0)
df_equals(modin_result, pandas_result)
frame_data = {
"{}_other".format(modin_df.columns[0]): [0, 2],
modin_df.columns[0]: [0, 19],
modin_df.columns[1]: [1, 1],
}
modin_df2 = pd.DataFrame(frame_data)
pandas_df2 = pandas.DataFrame(frame_data)
# Test dataframe to different dataframe shape
try:
pandas_result = getattr(pandas_df, op)(pandas_df2)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(modin_df2)
else:
modin_result = getattr(modin_df, op)(modin_df2)
df_equals(modin_result, pandas_result)
# Test dataframe to list
list_test = random_state.randint(RAND_LOW, RAND_HIGH, size=(modin_df.shape[1]))
try:
pandas_result = getattr(pandas_df, op)(list_test, axis=1)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(list_test, axis=1)
else:
modin_result = getattr(modin_df, op)(list_test, axis=1)
df_equals(modin_result, pandas_result)
# Test dataframe to series
series_test_modin = modin_df[modin_df.columns[0]]
series_test_pandas = pandas_df[pandas_df.columns[0]]
try:
pandas_result = getattr(pandas_df, op)(series_test_pandas, axis=0)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(series_test_modin, axis=0)
else:
modin_result = getattr(modin_df, op)(series_test_modin, axis=0)
df_equals(modin_result, pandas_result)
# Test dataframe to series with different index
series_test_modin = modin_df[modin_df.columns[0]].reset_index(drop=True)
series_test_pandas = pandas_df[pandas_df.columns[0]].reset_index(drop=True)
try:
pandas_result = getattr(pandas_df, op)(series_test_pandas, axis=0)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(series_test_modin, axis=0)
else:
modin_result = getattr(modin_df, op)(series_test_modin, axis=0)
df_equals(modin_result, pandas_result)
# Level test
new_idx = pandas.MultiIndex.from_tuples(
[(i // 4, i // 2, i) for i in modin_df.index]
)
modin_df_multi_level = modin_df.copy()
modin_df_multi_level.index = new_idx
# Defaults to pandas
with pytest.warns(UserWarning):
# Operation against self for sanity check
getattr(modin_df_multi_level, op)(modin_df_multi_level, axis=0, level=1)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_add(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "add")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_div(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "div")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_divide(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "divide")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_floordiv(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "floordiv")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_mod(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "mod")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_mul(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "mul")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_multiply(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "multiply")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_pow(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
# TODO: Revert to others once we have an efficient way of preprocessing for positive
# values
try:
pandas_df = pandas_df.abs()
except Exception:
pass
else:
modin_df = modin_df.abs()
self.inter_df_math_helper(modin_df, pandas_df, "pow")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_sub(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "sub")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_subtract(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "subtract")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_truediv(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "truediv")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___div__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__div__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___add__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__add__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___radd__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__radd__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___mul__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__mul__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___rmul__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__rmul__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___pow__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__pow__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___rpow__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__rpow__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___sub__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__sub__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___floordiv__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__floordiv__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___rfloordiv__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__rfloordiv__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___truediv__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__truediv__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___rtruediv__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__rtruediv__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___mod__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__mod__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___rmod__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__rmod__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___rdiv__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__rdiv__")
# END test inter df math functions
# Test comparison of inter operation functions
def comparison_inter_ops_helper(self, modin_df, pandas_df, op):
try:
pandas_result = getattr(pandas_df, op)(pandas_df)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(modin_df)
else:
modin_result = getattr(modin_df, op)(modin_df)
df_equals(modin_result, pandas_result)
try:
pandas_result = getattr(pandas_df, op)(4)
except TypeError:
with pytest.raises(TypeError):
getattr(modin_df, op)(4)
else:
modin_result = getattr(modin_df, op)(4)
df_equals(modin_result, pandas_result)
try:
pandas_result = getattr(pandas_df, op)(4.0)
except TypeError:
with pytest.raises(TypeError):
getattr(modin_df, op)(4.0)
else:
modin_result = getattr(modin_df, op)(4.0)
df_equals(modin_result, pandas_result)
try:
pandas_result = getattr(pandas_df, op)("a")
except TypeError:
with pytest.raises(TypeError):
repr(getattr(modin_df, op)("a"))
else:
modin_result = getattr(modin_df, op)("a")
df_equals(modin_result, pandas_result)
frame_data = {
"{}_other".format(modin_df.columns[0]): [0, 2],
modin_df.columns[0]: [0, 19],
modin_df.columns[1]: [1, 1],
}
modin_df2 = pd.DataFrame(frame_data)
pandas_df2 = pandas.DataFrame(frame_data)
try:
pandas_result = getattr(pandas_df, op)(pandas_df2)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(modin_df2)
else:
modin_result = getattr(modin_df, op)(modin_df2)
df_equals(modin_result, pandas_result)
new_idx = pandas.MultiIndex.from_tuples(
[(i // 4, i // 2, i) for i in modin_df.index]
)
modin_df_multi_level = modin_df.copy()
modin_df_multi_level.index = new_idx
# Defaults to pandas
with pytest.warns(UserWarning):
# Operation against self for sanity check
getattr(modin_df_multi_level, op)(modin_df_multi_level, axis=0, level=1)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_eq(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.comparison_inter_ops_helper(modin_df, pandas_df, "eq")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_ge(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.comparison_inter_ops_helper(modin_df, pandas_df, "ge")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_gt(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.comparison_inter_ops_helper(modin_df, pandas_df, "gt")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_le(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.comparison_inter_ops_helper(modin_df, pandas_df, "le")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_lt(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.comparison_inter_ops_helper(modin_df, pandas_df, "lt")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_ne(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.comparison_inter_ops_helper(modin_df, pandas_df, "ne")
# END test comparison of inter operation functions
# Test dataframe right operations
def inter_df_math_right_ops_helper(self, modin_df, pandas_df, op):
try:
pandas_result = getattr(pandas_df, op)(4)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(4)
else:
modin_result = getattr(modin_df, op)(4)
df_equals(modin_result, pandas_result)
try:
pandas_result = getattr(pandas_df, op)(4.0)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(4.0)
else:
modin_result = getattr(modin_df, op)(4.0)
df_equals(modin_result, pandas_result)
new_idx = pandas.MultiIndex.from_tuples(
[(i // 4, i // 2, i) for i in modin_df.index]
)
modin_df_multi_level = modin_df.copy()
modin_df_multi_level.index = new_idx
# Defaults to pandas
with pytest.warns(UserWarning):
# Operation against self for sanity check
getattr(modin_df_multi_level, op)(modin_df_multi_level, axis=0, level=1)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_radd(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_right_ops_helper(modin_df, pandas_df, "radd")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_rdiv(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_right_ops_helper(modin_df, pandas_df, "rdiv")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_rfloordiv(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_right_ops_helper(modin_df, pandas_df, "rfloordiv")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_rmod(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_right_ops_helper(modin_df, pandas_df, "rmod")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_rmul(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_right_ops_helper(modin_df, pandas_df, "rmul")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_rpow(self, request, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
# TODO: Revert to others once we have an efficient way of preprocessing for positive values
# We need to check that negative integers are not used efficiently
if "100x100" not in request.node.name:
self.inter_df_math_right_ops_helper(modin_df, pandas_df, "rpow")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_rsub(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_right_ops_helper(modin_df, pandas_df, "rsub")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_rtruediv(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_right_ops_helper(modin_df, pandas_df, "rtruediv")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___rsub__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_right_ops_helper(modin_df, pandas_df, "__rsub__")
# END test dataframe right operations
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_abs(self, request, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.abs()
except Exception as e:
with pytest.raises(type(e)):
modin_df.abs()
else:
modin_result = modin_df.abs()
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_add_prefix(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
test_prefix = "TEST"
new_modin_df = modin_df.add_prefix(test_prefix)
new_pandas_df = pandas_df.add_prefix(test_prefix)
df_equals(new_modin_df.columns, new_pandas_df.columns)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("testfunc", test_func_values, ids=test_func_keys)
def test_applymap(self, request, data, testfunc):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
with pytest.raises(ValueError):
x = 2
modin_df.applymap(x)
try:
pandas_result = pandas_df.applymap(testfunc)
except Exception as e:
with pytest.raises(type(e)):
modin_df.applymap(testfunc)
else:
modin_result = modin_df.applymap(testfunc)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("testfunc", test_func_values, ids=test_func_keys)
def test_applymap_numeric(self, request, data, testfunc):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
if name_contains(request.node.name, numeric_dfs):
try:
pandas_result = pandas_df.applymap(testfunc)
except Exception as e:
with pytest.raises(type(e)):
modin_df.applymap(testfunc)
else:
modin_result = modin_df.applymap(testfunc)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_add_suffix(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
test_suffix = "TEST"
new_modin_df = modin_df.add_suffix(test_suffix)
new_pandas_df = pandas_df.add_suffix(test_suffix)
df_equals(new_modin_df.columns, new_pandas_df.columns)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_at(self, request, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
# We skip nan datasets because nan != nan
if "nan" not in request.node.name:
key1 = modin_df.columns[0]
# Scaler
assert modin_df.at[0, key1] == pandas_df.at[0, key1]
# Series
df_equals(modin_df.loc[0].at[key1], pandas_df.loc[0].at[key1])
# Write Item
modin_df_copy = modin_df.copy()
pandas_df_copy = pandas_df.copy()
modin_df_copy.at[1, key1] = modin_df.at[0, key1]
pandas_df_copy.at[1, key1] = pandas_df.at[0, key1]
df_equals(modin_df_copy, pandas_df_copy)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_axes(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
for modin_axis, pd_axis in zip(modin_df.axes, pandas_df.axes):
assert np.array_equal(modin_axis, pd_axis)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_copy(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data) # noqa F841
# pandas_df is unused but there so there won't be confusing list comprehension
# stuff in the pytest.mark.parametrize
new_modin_df = modin_df.copy()
assert new_modin_df is not modin_df
assert np.array_equal(
new_modin_df._query_compiler._modin_frame._partitions,
modin_df._query_compiler._modin_frame._partitions,
)
assert new_modin_df is not modin_df
df_equals(new_modin_df, modin_df)
# Shallow copy tests
modin_df = pd.DataFrame(data)
modin_df_cp = modin_df.copy(False)
modin_df[modin_df.columns[0]] = 0
df_equals(modin_df, modin_df_cp)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_dtypes(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.dtypes, pandas_df.dtypes)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_ftypes(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.ftypes, pandas_df.ftypes)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("key", indices_values, ids=indices_keys)
def test_get(self, data, key):
modin_df = pd.DataFrame(data)
pandas_df = | pandas.DataFrame(data) | pandas.DataFrame |
from pathlib import Path
from scipy.stats import hmean
import pandas as pd
from tabulate import tabulate
# specify the root of a directory tree containing pickled coreference resolution here -> this script generates LaTeX
# tables for the main paper (and appendix)
scores_root = Path("...")
output_path = scores_root / "meta-aggregated-results"
output_path.mkdir(exist_ok=True, parents=True)
def walk(root: Path):
for p in root.iterdir():
yield p
if p.is_dir():
yield from walk(p)
PATH = "path"
SEED = "seed"
TOPICS = "topics"
TEST_CORPUS = "test-corpus"
DEV_CORPUS = "dev-corpus"
EXPERIMENT = "experiment"
SCENARIO = "scenario"
MEASURE = "measure"
METRIC = "metric"
all_scores_list = []
for p in walk(scores_root):
if p.suffix == ".pkl":
scores = | pd.read_pickle(p) | pandas.read_pickle |
import pytest
import numpy as np
import pandas as pd
from systrade.trading.brokers import PaperBroker
T_START = pd.to_datetime('2019/07/10-09:30:00:000000', format='%Y/%m/%d-%H:%M:%S:%f')
T_END = pd.to_datetime('2019/07/10-10:00:00:000000', format='%Y/%m/%d-%H:%M:%S:%f')
TIMEINDEX = pd.date_range(start=T_START,end=T_END,freq='1min')
DATA_DF = pd.DataFrame(data={'tick0':np.arange(len(TIMEINDEX)) ,
'tick1':np.arange(len(TIMEINDEX)-1,-1,-1)},
index=TIMEINDEX)
# DATA_DF = pd.DataFrame(data={'tick0':np.arange(len(TIMEINDEX))},
# index=TIMEINDEX)
class TestPaperBroker:
def test_init(self):
testseries = pd.Series(np.arange(10))
with pytest.raises(TypeError):
broker = PaperBroker(testseries)
with pytest.raises(TypeError):
broker = PaperBroker(DATA_DF,slippage_time=1.0)
with pytest.raises(TypeError):
broker = PaperBroker(DATA_DF,transaction_cost=lambda x: x**2)
with pytest.raises(ValueError):
broker = PaperBroker(DATA_DF,transaction_cost=-0.5)
with pytest.raises(TypeError):
broker = PaperBroker(DATA_DF,spread_pct=lambda x: x**2)
with pytest.raises(ValueError):
broker = PaperBroker(DATA_DF,spread_pct=-0.5)
with pytest.raises(ValueError):
broker = PaperBroker(DATA_DF,spread_pct=200)
def test_next_extant_time(self):
broker = PaperBroker(DATA_DF)
t_get = pd.to_datetime('2019/07/10-09:35:05:000000', format='%Y/%m/%d-%H:%M:%S:%f')
t_out = broker.next_extant_time(t_get)
t_expect = pd.to_datetime('2019/07/10-09:36:00:000000', format='%Y/%m/%d-%H:%M:%S:%f')
assert t_out==t_expect
t_get = pd.to_datetime('2019/07/10-11:35:00:000000', format='%Y/%m/%d-%H:%M:%S:%f')
with pytest.raises(ValueError):
t_out = broker.next_extant_time(t_get)
def test_get_timeindex_subset(self):
broker = PaperBroker(DATA_DF)
t0 = pd.to_datetime('2019/07/10-09:29:00:000000', format='%Y/%m/%d-%H:%M:%S:%f')
t1 = pd.to_datetime('2019/07/10-09:36:00:000000', format='%Y/%m/%d-%H:%M:%S:%f')
with pytest.raises(ValueError):
tind = broker.get_timeindex_subset(t0,t1)
t0 = pd.to_datetime('2019/07/10-09:34:00:000000', format='%Y/%m/%d-%H:%M:%S:%f')
t1 = pd.to_datetime('2019/07/10-11:36:00:000000', format='%Y/%m/%d-%H:%M:%S:%f')
with pytest.raises(ValueError):
tind = broker.get_timeindex_subset(t0,t1)
with pytest.raises(TypeError):
tind = broker.get_timeindex_subset(0,t1)
with pytest.raises(TypeError):
tind = broker.get_timeindex_subset(t0,1)
t1 = pd.to_datetime('2019/07/10-09:36:00:000000', format='%Y/%m/%d-%H:%M:%S:%f')
tind = broker.get_timeindex_subset(t0,t1)
print(tind)
print(pd.date_range(t0,t1,freq='1min'))
assert np.array_equal(tind.values,pd.date_range(t0,t1,freq='1min').values)
def test_get_firstlast_times(self):
broker = PaperBroker(DATA_DF)
t0,t1 = broker.get_firstlast_times()
assert t0==T_START
assert t1==T_END
def test_get_tick_list(self):
broker = PaperBroker(DATA_DF)
ticks = broker.get_tick_list()
assert ticks == ['tick0','tick1']
def test_get_price_list(self):
broker = PaperBroker(DATA_DF)
t0 = T_START
t1 = T_START + pd.DateOffset(minutes=5)
with pytest.raises(ValueError):
prices = broker.get_price_list('badtick',t0,t1)
with pytest.raises(ValueError):
prices = broker.get_price_list(['badtick'],t0,t1)
prices = broker.get_price_list('tick0',t0,t1)
assert np.array_equal(prices['tick0'].values , np.arange(6) )
prices = broker.get_price_list(['tick0','tick1'],t0,t1)
assert np.array_equal(prices['tick0'].values , np.arange(6) )
assert np.array_equal(prices['tick1'].values ,
np.arange(len(TIMEINDEX)-1,len(TIMEINDEX)-7,-1) )
def test_get_unslipped_price(self):
broker = PaperBroker(DATA_DF)
t_get = T_START+ | pd.DateOffset(minutes=5) | pandas.DateOffset |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
#Data Discovery
# Libraries for handling numeric computation and dataframes
import pandas as pd
import numpy as np
# Libraries for statistical plotting
import matplotlib.pyplot as plt
import seaborn as sns
get_ipython().run_line_magic('matplotlib', 'inline')
# My personal data storaged in my Github repository
rides = | pd.read_csv('https://raw.githubusercontent.com/sameerakhtari/Exploratory-Data-Analysis-on-Uber-Rides-Dataset/main/raw-data/My%20Uber%20Drives%20-%202016.csv') | pandas.read_csv |
# encoding=utf-8
from nltk.corpus import stopwords
from sklearn.preprocessing import LabelEncoder
from sklearn.pipeline import FeatureUnion
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
from sklearn.model_selection import train_test_split
from sklearn.cross_validation import KFold
from sklearn.linear_model import Ridge
from scipy.sparse import hstack, csr_matrix
import pandas as pd
import numpy as np
import lightgbm as lgb
import matplotlib.pyplot as plt
import gc, re
from sklearn.utils import shuffle
from contextlib import contextmanager
from sklearn.externals import joblib
import time
start_time=time.time()
print("Starting job at time:", time.time())
debug = True
print("loading data ...")
used_cols = ["item_id", "user_id"]
if debug == False:
train_df = pd.read_csv("../input/train.csv", parse_dates=["activation_date"])
y = train_df["deal_probability"]
test_df = pd.read_csv("../input/test.csv", parse_dates=["activation_date"])
# suppl
train_active = pd.read_csv("../input/train_active.csv", usecols=used_cols)
test_active = pd.read_csv("../input/test_active.csv", usecols=used_cols)
train_periods = pd.read_csv("../input/periods_train.csv", parse_dates=["date_from", "date_to"])
test_periods = pd.read_csv("../input/periods_test.csv", parse_dates=["date_from", "date_to"])
else:
train_df = pd.read_csv("../input/train.csv", parse_dates=["activation_date"])
train_df = shuffle(train_df, random_state=1234);
train_df = train_df.iloc[:100000]
y = train_df["deal_probability"]
test_df = pd.read_csv("../input/test.csv", nrows=1000, parse_dates=["activation_date"])
# suppl
train_active = | pd.read_csv("../input/train_active.csv", nrows=1000, usecols=used_cols) | pandas.read_csv |
from pandas.testing import assert_frame_equal
import pandas as pd
import pytest
from speed_daemon import data
@pytest.fixture
def default_input():
return {
"download": 1000000,
"ping": 1000000,
"timestamp": "2020-10-12T03:09:18.231187Z",
"upload": 1000000,
}
@pytest.fixture
def default_expected_response():
return {
"_timestamp_string": "2020-10-12T03:09:18.231187Z",
"date": pd.to_datetime("2020-10-12").date(),
"day_of_week": "Monday",
"download": 1000000,
"download_mbps": 1.0,
"hour_of_day": 3,
"ping": 1000000,
"upload": 1000000,
"upload_mbps": 1.0,
}
def test_parse_data_with_null_values(default_input, default_expected_response):
"""Zero value for null data (no connection)"""
default_input["download"] = None
default_input["upload"] = None
default_input["ping"] = None
test_result = data.parse_data(pd.DataFrame([default_input]))
default_expected_response["download"] = 0.0
default_expected_response["download_mbps"] = 0.0
default_expected_response["upload"] = 0.0
default_expected_response["upload_mbps"] = 0.0
default_expected_response["ping"] = 0.0
expected_result = pd.DataFrame([default_expected_response])
expected_result = expected_result.set_index(
pd.DatetimeIndex([pd.to_datetime("2020-10-12T03:09:18.231187Z")])
)
assert_frame_equal(test_result, expected_result, check_like=True)
def test_parse_data_localization_off(default_input, default_expected_response):
"""Test with no localization"""
test_data = pd.DataFrame([default_input])
test_result = data.parse_data(test_data)
expected_result = pd.DataFrame([default_expected_response])
expected_result = expected_result.set_index(
pd.DatetimeIndex([pd.to_datetime("2020-10-12T03:09:18.231187Z")])
)
assert_frame_equal(expected_result, test_result, check_like=True)
def test_parse_data_localization_on(default_input, default_expected_response):
"""Test localization with CDT (UTC-0500)"""
test_data = pd.DataFrame([default_input])
test_result = data.parse_data(test_data, localization="US/Central")
default_expected_response["date"] = pd.to_datetime("2020-10-11").date()
default_expected_response["day_of_week"] = "Sunday"
default_expected_response["hour_of_day"] = 22
expected_result = pd.DataFrame([default_expected_response])
expected_result = expected_result.set_index(
pd.DatetimeIndex([pd.to_datetime("2020-10-12T03:09:18.231187Z")])
)
expected_result = expected_result.set_index(
expected_result.index.tz_convert("US/Central")
)
assert_frame_equal(expected_result, test_result, check_like=True)
def test_parse_data_localization_on_cst(default_input, default_expected_response):
"""Test localization with CST (UTC-0600)"""
default_input["timestamp"] = "2020-11-12T03:09:18.231187Z"
test_data = pd.DataFrame([default_input])
test_result = data.parse_data(test_data, localization="US/Central")
default_expected_response["_timestamp_string"] = "2020-11-12T03:09:18.231187Z"
default_expected_response["date"] = pd.to_datetime("2020-11-11").date()
default_expected_response["day_of_week"] = "Wednesday"
default_expected_response["hour_of_day"] = 21
expected_result = | pd.DataFrame([default_expected_response]) | pandas.DataFrame |
import os
import pickle
import sys
from pathlib import Path
from typing import Union
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from thoipapy.utils import convert_truelike_to_bool, convert_falselike_to_bool
import thoipapy
def fig_plot_BOcurve_mult_train_datasets(s):
"""Plot the BO-curve for multiple training datasets.
Takes the datasets listed in settings under "train_datasets" and "test_datasets"
and plots the BO-curve of each combination in a single figure.
The Area Under the BO Curve for a sample size of 0 to 10 (AUBOC) is shown in the legend.
Currently plots both the new and old performance method.
NEW METHOD
----------
Performance = overlap between experiment and predicted MINUS the overlap expected in random selections
OLD METHOD
----------
Performance = overlap between experiment and predicted DIVIDED BY the overlap expected in random selections
Parameters
----------
s : dict
Settings dictionary for figures.
"""
# plt.rcParams.update({'font.size': 7})
test_set_list, train_set_list = thoipapy.utils.get_test_and_train_set_lists(s)
test_dataset_str = "-".join([str(n) for n in test_set_list])
train_dataset_str = "-".join([str(n) for n in train_set_list])
mult_testname = "testsets({})_trainsets({})".format(test_dataset_str, train_dataset_str)
sys.stdout.write(mult_testname)
mult_THOIPA_dir = os.path.join(s["data_dir"], "results", "compare_testset_trainset", "summaries", mult_testname)
thoipapy.utils.make_sure_path_exists(mult_THOIPA_dir)
plot_BOcurve(s, train_set_list, test_set_list, mult_THOIPA_dir, mult_testname)
plot_BOcurve(s, train_set_list, test_set_list, mult_THOIPA_dir, mult_testname, sheet_name="df_o_over_r", suffix="_BO_curve_old_method")
def plot_BOcurve(s, train_set_list, test_set_list, mult_THOIPA_dir, mult_testname, sheet_name="df_o_minus_r", suffix="_BO_curve"):
""" Separate function allowing a toggle of the OLD or NEW performance methods
Parameters
----------
s : dict
Settings dictionary for figures.
train_set_list : list
List of training datasets in selection
E.g. ["set02", "set04"]
test_set_list : list
List of test datasets in selection
E.g. ["set03", "set31"]
mult_THOIPA_dir : str
Path to folder containing results for multiple THOIPA comparisons.
mult_testname : str
String denoting this combination of test and training datasets
E.g. testsets(2)_trainsets(2)
sheet_name : str
Excel sheet_name
This is the toggle deciding whether the OLD or NEW performance measure is used
Default = new method ("df_o_minus_r"), where the overlap MINUS random_overlap is used
suffix : str
Suffix for figure
E.g. "" or "_old_method_o_over_r"
"""
BO_curve_png = os.path.join(mult_THOIPA_dir, "{}{}.png".format(mult_testname, suffix))
figsize = np.array([3.42, 3.42]) * 2 # DOUBLE the real size, due to problems on Bo computer with fontsizes
fig, ax = plt.subplots(figsize=figsize)
for train_set in train_set_list:
trainsetname = "set{:02d}".format(int(train_set))
for test_set in test_set_list:
testsetname = "set{:02d}".format(int(test_set))
# /media/mark/sindy/m_data/THOIPA_data/results/Bo_Curve/Testset03_Trainset01.THOIPA.validation/bocurve_data.xlsx
bocurve_data_xlsx = os.path.join(s["data_dir"], "results", "compare_testset_trainset", "data", "Test{}_Train{}.THOIPA".format(testsetname, trainsetname), "data", "bocurve_data.xlsx")
df = pd.read_excel(bocurve_data_xlsx, sheet_name=sheet_name, index_col=0)
df["mean_"] = df.mean(axis=1)
# apply cutoff (e.g. 5 residues for AUBOC5)
auboc_ser = df["mean_"].iloc[:s["n_residues_AUBOC_validation"]]
# use the composite trapezoidal rule to get the area under the curve
# https://docs.scipy.org/doc/numpy-1.13.0/reference/generated/numpy.trapz.html
AUBOC = np.trapz(y=auboc_ser, x=auboc_ser.index)
df["mean_"].plot(ax=ax, label="Test{}_Train{}(AUBOC={:0.1f})".format(testsetname, trainsetname, AUBOC))
ax.set_xlabel("sample size")
ax.set_ylabel("performance\n(observed overlap - random overlap)")
ax.set_xticks(range(1, df.shape[0] + 1))
ax.set_xticklabels(df.index)
ax.legend()
fig.tight_layout()
fig.savefig(BO_curve_png, dpi=240)
# fig.savefig(thoipapy.utils.pdf_subpath(BO_curve_png))
sys.stdout.write("\nfig_plot_BO_curve_mult_train_datasets finished ({})".format(BO_curve_png))
def compare_selected_predictors(s, logging):
"""Plot the BO-curve for multiple prediction methods
Takes the datasets listed in settings under the "selected_predictors" tab
(e.g. ["Testset03_Trainset04.THOIPA","Testset03.LIPS"])
and plots the BO-curves in a single figure.
The Area Under the BO Curve for a sample size of 0 to 10 (AUBOC) is shown in the legend.
Currently plots both the new and old performance method.
Performance is measured with the NEW METHOD:
Performance = overlap between experiment and predicted MINUS the overlap expected in random selections
Parameters
----------
s : dict
Settings dictionary for figures.
"""
# if s["set_number"] != s["test_datasets"]:
# raise Exception("set_number and test_datasets are not identical in settings file. This is recommended for test/train validation.")
# plt.rcParams.update({'font.size': 7})
logging.info("\n--------------- starting compare_selected_predictors ---------------\n")
BO_curve_png: Union[Path, str] = Path(s["data_dir"]) / f"results/{s['setname']}/blindvalidation/compare_selected_predictors_BO_curve.png"
AUBOC_bar_png: Union[Path, str] = Path(s["data_dir"]) / f"results/{s['setname']}/blindvalidation/compare_selected_predictors_AUBOC_barchart.png"
ROC_png: Union[Path, str] = Path(s["data_dir"]) / f"results/{s['setname']}/blindvalidation/compare_selected_predictors_ROC.png"
thoipapy.utils.make_sure_path_exists(BO_curve_png, isfile=True)
figsize = np.array([3.42, 3.42]) * 2 # DOUBLE the real size, due to problems on Bo computer with fontsizes
fig, ax = plt.subplots(figsize=figsize)
predictors_df = pd.read_excel(s["settings_path"], sheet_name="selected_predictors")
predictors_df["include"] = predictors_df["include"].apply(convert_truelike_to_bool, convert_nontrue=False)
predictors_df["include"] = predictors_df["include"].apply(convert_falselike_to_bool)
predictors_df = predictors_df.loc[predictors_df.include == True]
predictor_list = predictors_df.predictor.tolist()
area_under_curve_dict = {}
# create an empty dataframe to keep the pycharm IDE happy
df = | pd.DataFrame() | pandas.DataFrame |
import os, glob, sys
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import string
import re
def load_data(path):
"""Load training and testing datasets based on their path
Parameters
----------
path : relative path to location of data, should be always the same (string)
Returns
-------
Training and testing Dataframes
"""
train = pd.read_csv(os.path.join(path,'train.csv'))
test = pd.read_csv(os.path.join(path,'test.csv'))
return train, test
def modify_fare(df, n: int = 4):
"""Introduce n new intervals (based on quantiles) for the feature fare, such that it is modified from
being continuous to being discrete
Parameters
----------
df : panda dataframe
n: number of new intervals (int)
Returns
-------
Original dataframe with discretized version of the feature 'Fare', categories
"""
df['Fare'] = df['Fare'].fillna(df['Fare'].median())
df['Fare'] = pd.qcut(df['Fare'], n, labels = list(string.ascii_uppercase)[:n])
return df
def get_size_family(df, mod: bool = False):
"""Defines family relations based on the features 'SibSp' (the # of siblings / spouses aboard the Titanic)
and 'Parch' (the # of parents / children aboard the Titanic)
Parameters
----------
df : panda dataframe
Returns
-------
Original dataframe with a new feature called 'FamilySize'
"""
df['FamilySize'] = df['SibSp'] + df['Parch'] + 1
if mod:
bins_ = [0,1,2,12]
df['FamilySize'] = pd.cut(df["FamilySize"], bins = bins_, labels = list(string.ascii_uppercase)[:len(bins_)-1])
return df
def get_title(name):
"""Search for individual title in a string by considering it to have a ASCII format from A-Z
Parameters
----------
name : The name from which a title wants to be extracted (string)
Returns
-------
String associated to a found title
"""
title_search = re.search(' ([A-Za-z]+)\.', name)
# If the title exists, extract and return it.
if title_search:
return title_search.group(1)
return ""
def get_titles(df, mod: bool = True):
"""Search for all titles inside a dataframe, given the feature 'Name'
Parameters
----------
df : panda dataframe
mod : simplify the extend of titles available (boolean)
Returns
-------
Original dataframe with a new feature called 'Title'
"""
df['Title'] = df['Name'].apply(get_title)
if mod:
# perform modifications
df['Title'] = df['Title'].replace('Mlle', 'Miss')
df['Title'] = df['Title'].replace('Ms', 'Miss')
df['Title'] = df['Title'].replace('Mme', 'Mrs')
return df
def get_all_ages(df, n: int = 5):
"""Fills in empty Ages based on the Title of a person, and then introduces n intervals for the feature 'Ages',
such that it is modified from being continuous to be discrete
Parameters
----------
df : panda dataframe
n: number of new intervals (int)
Returns
-------
Discretized version of the feature 'Age', categories
"""
emb = []
for i, row in df.iterrows():
if pd.isnull(row['Age']):
title = row['Title']
age_avg = df['Age'][df['Title'] == title].mean()
age_std = df['Age'][df['Title'] == title].std()
emb.append(np.random.randint(age_avg - age_std, age_avg + age_std, size=1)[0])
else:
emb.append(row['Age'])
# Update column
df['Age'] = emb
# Create new column
df["Age"] = pd.cut(df["Age"], n, labels = list(string.ascii_uppercase)[:n])
return df
def get_age2(df):
"""Fills in empty Ages based on the Title of a person. DR
Parameters
----------
df : panda dataframe
Returns
-------
Dataframe with missing values for age filled.
"""
ages_mean = df[['Title', 'Age']].groupby(['Title'],
as_index=False).mean().set_index('Title').rename(columns={'Age': 'mean'})
ages_std = df[['Title', 'Age']].groupby(['Title'], as_index=False).std().set_index('Title').rename(columns={'Age': 'std'})
ages_title = | pd.merge(ages_mean,ages_std, how='inner', left_index=True, right_index=True) | pandas.merge |
import logging
import sys
import os
import json
import numpy as np
import pandas as pd
import asyncio
from aiohttp import web
from threading import Thread
from timeflux.helpers import clock
from timeflux.core.node import Node
from timeflux.core.exceptions import WorkerLoadError
class UI(Node):
"""Interact with Timeflux from the browser.
This node provides a web interface, available at ``http://localhost:8000`` by
default. Bi-directional communication is available through the WebSocket protocol.
A real-time data stream visualization application is provided at
``http://localhost:8000/monitor/``. Other example applications (such as P300 and
EEG signal quality) are provided in the ``apps`` directory of this package.
This node accepts any number of named input ports. Streams received from the browser
are forwarded to output ports.
Attributes:
i_* (Port): Dynamic inputs, expect DataFrame.
o_* (Port): Dynamic outputs, provide DataFrame.
Example:
.. literalinclude:: /../examples/ui.yaml
:language: yaml
"""
def __init__(
self, host="localhost", port=8000, routes={}, settings={}, debug=False
):
"""
Args:
host (string): The host to bind to.
port (int): The port to listen to.
routes (dict): A dictionary of custom web apps. Key is the name, value is the path.
settings (dict): An arbitrary configuration file that will be exposed to web apps.
debug (bool): Show dependencies debug information.
"""
self._root = os.path.abspath(
os.path.join(os.path.dirname(__file__), "..", "www")
)
self._clients = {}
self._subscriptions = {}
self._streams = {}
self._buffer = {}
# Debug
if not debug:
logging.getLogger("asyncio").setLevel(logging.WARNING)
logging.getLogger("aiohttp.access").setLevel(logging.WARNING)
# HTTP
app = web.Application()
app.router.add_static("/common/assets/", self._root + "/common/assets")
app.router.add_static("/monitor/assets/", self._root + "/monitor/assets")
app.add_routes(
[
web.get("/", self._route_index),
web.get("/ws", self._route_ws),
web.get("/settings.json", self._route_settings),
web.get("/{default}", self._route_default),
]
)
# Apps
self._routes = {"monitor": self._root + "/monitor"}
for name, path in routes.items():
self._routes[name] = self._find_path(path)
for name, path in self._routes.items():
try:
app.router.add_static(f"/{name}/assets/", f"{path}/assets")
except ValueError:
pass
app.add_routes([web.get(f"/{name}/", self._route_app)])
# Settings
self._settings = json.dumps(settings)
# Do not block
# https://stackoverflow.com/questions/51610074/how-to-run-an-aiohttp-server-in-a-thread
handler = app.make_handler()
self._loop = asyncio.get_event_loop()
server = self._loop.create_server(handler, host=host, port=port)
Thread(target=self._run, args=(server,)).start()
self.logger.info("UI available at http://%s:%d" % (host, port))
def _find_path(self, path):
path = os.path.normpath(path)
if os.path.isabs(path):
if os.path.isdir(path):
return path
else:
for base in sys.path:
full_path = os.path.join(base, path)
if os.path.isdir(full_path):
return full_path
raise WorkerLoadError(
f"Directory `{path}` could not be found in the search path."
)
def _run(self, server):
self._loop.run_until_complete(server)
self._loop.run_forever()
async def _route_index(self, request):
raise web.HTTPFound("/monitor/")
async def _route_ws(self, request):
ws = web.WebSocketResponse()
if "uuid" not in request.rel_url.query:
return ws
uuid = request.rel_url.query["uuid"]
self._clients[uuid] = {"socket": ws, "subscriptions": set()}
await ws.prepare(request)
await self._on_connect(uuid)
async for msg in ws:
if msg.type == web.WSMsgType.TEXT:
await self._on_message(uuid, msg)
self._on_disconnect(uuid)
return ws
async def _route_settings(self, request):
return web.Response(text=self._settings)
async def _route_default(self, request):
raise web.HTTPFound(request.path + "/")
async def _route_app(self, request):
name = request.path.strip("/")
try:
with open(self._routes[name] + "/index.html") as f:
return web.Response(text=f.read(), content_type="text/html")
except:
raise web.HTTPNotFound()
async def _on_connect(self, uuid):
self.logger.debug("Connect: %s", uuid)
await self._send("streams", self._streams, uuid=uuid)
def _on_disconnect(self, uuid):
for subscription in self._clients[uuid]["subscriptions"].copy():
self._on_unsubscribe(uuid, subscription)
del self._clients[uuid]
self.logger.debug("Disconnect: %s", uuid)
async def _on_message(self, uuid, message):
try:
message = message.json()
if "command" not in message or "payload" not in message:
message = False
except json.decoder.JSONDecodeError:
message = False
if not message:
self.logger.warn("Received an invalid JSON message from %s", uuid)
return
if "ack" in message and message["ack"]:
await self._send("ack", message["ack"], uuid=uuid)
if message["command"] == "subscribe":
self._on_subscribe(uuid, message["payload"])
elif message["command"] == "unsubscribe":
self._on_unsubscribe(uuid, message["payload"])
elif message["command"] == "publish":
await self._on_publish(
message["payload"]["name"],
message["payload"]["data"],
message["payload"]["meta"],
)
await self._send(
"stream", message["payload"], topic=message["payload"]["name"]
)
elif message["command"] == "sync":
# TODO
pass
def _on_subscribe(self, uuid, topic):
self.logger.debug("Subscribe: %s to %s", uuid, topic)
self._clients[uuid]["subscriptions"].add(topic)
if topic not in self._subscriptions:
self._subscriptions[topic] = {uuid}
else:
self._subscriptions[topic].add(uuid)
def _on_unsubscribe(self, uuid, topic):
self.logger.debug("Unsubscribe: %s from %s", uuid, topic)
self._clients[uuid]["subscriptions"].discard(topic)
if topic not in self._subscriptions:
return
if uuid not in self._subscriptions[topic]:
return
self._subscriptions[topic].discard(uuid)
if len(self._subscriptions[topic]) == 0:
del self._subscriptions[topic]
async def _on_publish(self, name, data, meta):
if name not in self._streams:
channels = list(list(data.values())[0].keys()) if data else []
await self._add_stream(name, channels)
self._buffer[name] = {"data": {}, "meta": None}
if data:
self._buffer[name]["data"].update(data)
if meta:
self._buffer[name]["meta"] = meta
async def _send(self, command, payload, uuid=None, topic=None):
message = json.dumps({"command": command, "payload": payload})
if not uuid and not topic:
# Broadcast to all clients
for uuid in self._clients:
if self._is_alive(uuid):
await self._clients[uuid]["socket"].send_str(message)
if uuid:
# Send to one client
if self._is_alive(uuid):
await self._clients[uuid]["socket"].send_str(message)
if topic:
# Send to all this topic's subscribers
if topic in self._subscriptions:
for uuid in self._subscriptions[topic]:
if self._is_alive(uuid):
await self._clients[uuid]["socket"].send_str(message)
self._dispose()
def _dispose(self):
"""Get rid of dead connections"""
for uuid in self._clients.copy():
if not self._is_alive(uuid):
self._on_disconnect(uuid)
def _is_alive(self, uuid):
"""Check if a socket is alive"""
# On Linux, client disconnections are not properly detected.
# This method should be used before each send_* to avoid (uncatchable) exceptions,
# memory leaks, and catastrophic failure.
# This is a hotfix while waiting for the issue to be resolved upstream.
return False if self._clients[uuid]["socket"]._req.transport is None else True
def _to_dict(self, data):
# Some users report a warning ("A value is trying to be set on a copy of a slice
# from a DataFrame"). I was not able to reproduce the behavior, but the
# data.copy() instruction seems to fix the issue, although it is somewhat
# probably impacting memory. This should be investigated further.
# See: https://stackoverflow.com/questions/20625582/how-to-deal-with-settingwithcopywarning-in-pandas
data = data.copy() # Remove?
data["index"] = (data.index.values.astype(np.float64) / 1e6).astype(
np.int64
) # from ns to ms
data.drop_duplicates(
subset="index", keep="last", inplace=True
) # remove duplicate indices
data.set_index("index", inplace=True) # replace index
data = data.to_dict(orient="index") # export to dict
return data
def _from_dict(self, data):
try:
data = pd.DataFrame.from_dict(data, orient="index")
data.index = | pd.to_datetime(data.index, unit="ms") | pandas.to_datetime |
import collections
import json
import os
from datetime import time
import random
from tqdm import tqdm
from main import cvtCsvDataframe
import pickle
import numpy as np
import pandas as pd
import random
import networkx as nx
import time
from main import FPGrowth
from shopping import Shopping, Cell
import main
# QoL for display
pd.set_option('display.max_columns', 30)
def encodeData():
df = pd.read_csv('products.txt', delimiter="\t")
dataHere = df['Nome'].str.strip()
indexes = [x for x in range(0,len(dataHere))]
df['ID'] = indexes
#print(data.to_numpy())
return df
products = encodeData()
'''
It is suppose to simulate N amount of shopping trips given test wishlists and staminas.
1 - Create a shopping with the given configuration
2 - Generate N random wishlists and their stamina
3 - Simulate each one and save the results
4 - Analyse the supermarket profit
'''
class SoS:
def __init__(self, configuration, staminaDistr,explanations):
self.shoppingClass = Shopping([23,21],configuration)
#self.shoppingClass.changeShoppingConfig(configuration)
self.shopping = self.shoppingClass.shopping
self.staminaDistr = staminaDistr
self.explanations = explanations
self.auxNeighbors = self.getAuxNeighbors()
self.auxNeighborsPrimary = self.getAuxNeighborsPrimary()
data, explanations = cvtCsvDataframe(pd.read_csv("data.csv"), pd.read_csv("explanations.csv"))
mergedReceiptExplanations = pd.merge(data, explanations, on='receiptID', how='outer')
self.boughtAndWishlist = mergedReceiptExplanations[['PRODUCTS', 'WISHLIST']].to_numpy()
def generateCustomers(self, samples):
'''
:return: Returns a sample of random customers with stamina and wishlist
'''
customers = []
wishlists = list(self.explanations['WISHLIST'].to_numpy())
randomWishlists = random.sample(wishlists,samples)
staminas = self.staminaDistr.sample(samples)
for i, j in zip(randomWishlists,staminas):
customers.append((i,int(j)))
return customers
def findNeighbors(self, currentCell, typeSearch):
'''
:param currentCell: Current cell to search
:param typeSearch: Type of search 1 - Halls 2- Shelves
:return: Return the neighbors
'''
neighbors = []
try:
#If there are neighbors in the top
if currentCell[0] > 0:
#Get the top neighbor
neighbors.append(self.shopping[currentCell[0] - 1][currentCell[1]].id)
#If there are neighbors on the left
if currentCell[1] > 0:
neighbors.append(self.shopping[currentCell[0]][currentCell[1] - 1].id)
#If there are neighbors on the right
if currentCell[1] < self.shopping.shape[1]:
neighbors.append(self.shopping[currentCell[0]][currentCell[1] + 1].id)
#If there are neighbors on the bottom
if currentCell[0] < self.shopping.shape[0]:
neighbors.append(self.shopping[currentCell[0] + 1][currentCell[1]].id)
except:
pass
aux = []
if typeSearch == 1:
notToAdd = [1,461,483,23]
for i in neighbors:
if i not in self.shoppingClass.config and i not in notToAdd:
aux.append(i)
else:
notToAdd = [1, 461, 483, 23]
for i in neighbors:
if i in self.shoppingClass.config and i not in notToAdd:
aux.append(i)
return aux
def findClosestProduct(self, item):
'''
:param item: Receives an item to search for
:return: Returns the closest product path there is
'''
size = self.shopping.shape
allPathsToItem = []
for j in range(size[1]):
for i in range(size[0]):
if self.shopping[i][j].product == item:
pathsToThisCell = self.auxNeighborsPrimary[f"[{i},{j}]"]
for s in pathsToThisCell: allPathsToItem.append(s)
pathsLenght = []
paths = []
for possiblePath in allPathsToItem:
paths.append(nx.dijkstra_path(self.shoppingClass.graphShopping, self.shoppingClass.entrance, possiblePath))
pathsLenght.append(len(nx.dijkstra_path(self.shoppingClass.graphShopping, self.shoppingClass.entrance, possiblePath)))
#Return the minimium path
return paths[np.argmin(pathsLenght)]
def getAuxNeighborsPrimary(self):
aux = {}
size = self.shopping.shape
for j in range(size[1]):
for i in range(size[0]):
aux[f"[{i},{j}]"] = self.findNeighbors([i, j], 1)
return aux
def getAuxNeighbors(self):
aux = {}
size = self.shopping.shape
for j in range(size[1]):
for i in range(size[0]):
aux[f"[{i},{j}]"] = self.findNeighbors([i, j], 2)
return aux
def getCellProducts(self, cell):
size = self.shopping.shape
for j in range(size[1]):
for i in range(size[0]):
if self.shopping[i][j].id == cell:
cells = self.auxNeighbors[f"[{i},{j}]"]
products = []
for c in cells:
products.append(self.shoppingClass.productsAux[c])
return products
def getProbabilityOfPicking(self, product):
#Check if the file already exists
if os.path.exists("probabilityBuy.p"): probToBuy = pickle.load(open("probabilityBuy.p","rb"))
#Otherwise write it
else:
# organize_data()
# Read the csv file and convert it to a well formatted dataframe
aux = {}
#For each receipt
for p in tqdm(self.boughtAndWishlist):
#go through the products bought
for i in p[0]:
if i not in list(aux.keys()):
aux[i] = {'NotIn': 0, 'Counter':0}
#Increase counter
aux[i]['Counter'] += 1
#If the product bought is not in the wishlist
if i not in p[1]:
#Increase counter of times that the product was bought and was not in the wishlist
aux[i]['NotIn'] += 1
probToBuy = {}
for k in aux:
probToBuy[k] = aux[k]['NotIn'] / aux[k]['Counter']
pickle.dump(probToBuy,open("probabilityBuy.p","wb"))
#Reutrn the respective probability
return probToBuy[product]
def simulateCustomers(self,customers):
'''
:param customers: Receives a list of customers
:return: Returns the simulation results
'''
sales = []
#For each customer
for customer in tqdm(customers):
currentWishlist = customer[0]
currentWishlist.reverse()
currentStamina = customer[1]
productsBought = []
#print(f"Customer wishlist: {currentWishlist}")
#While the customer still has products the wants and still has stamina keep the simulation
while len(currentWishlist) > 0 and currentStamina > 0:
item = currentWishlist[0]
#print(f"Looking for {products.loc[products['ID'] == item, 'Nome'].iloc[0]}")
closest = self.findClosestProduct(item)
#print(f"Found {products.loc[products['ID'] == item, 'Nome'].iloc[0]} on cell {closest[-1]}")
for cell in range(len(closest)):
#print(f"I am on cell {closest[cell]}")
prodcutsCloseToCell = self.getCellProducts(closest[cell])
for prod in prodcutsCloseToCell:
#If the product is in the wishlist then buy it
if prod in currentWishlist:
#print(f"Found {products.loc[products['ID'] == prod, 'Nome'].iloc[0]} which was in my wishlist, so I bought it.")
#Remove it from the wishlist
currentWishlist.remove(prod)
productsBought.append(prod)
#Otherwise calculate the probability of buying it
else:
#Probability of this product being picked without being in the wishlist
prob = self.getProbabilityOfPicking(prod)
#Random probability
randomProb = random.uniform(0,1)
#If it is bought
if randomProb <= prob:
productsBought.append(prod)
#print(f"Felt like buying {products.loc[products['ID'] == prod, 'Nome'].iloc[0]}, so I bought it.")
currentStamina -= 1
#print(f"Current stamina : {currentStamina}")
#Scenarios that the person leaves the shopping
if currentStamina <= 0:
#print("I got tired!")
break
elif len(currentWishlist) <= 0:
#print("Bought everything!")
break
sales.append(productsBought)
return sales
def evaluateShoppingCost(self, sales):
'''
:param sales: Receives a list of sales from customers
:return: Return the calcualte profit for those sales
'''
totalProfit = 0
for sale in tqdm(sales):
for product in sale:
totalProfit += (products.loc[products['ID'] == product, 'Preço'].iloc[0] / products.loc[products['ID'] == product, '<NAME>'].iloc[0])
return totalProfit
def generateSimulator(config):
#QoL for display
pd.set_option('display.max_columns', 30)
data, explanations = main.cvtCsvDataframe(pd.read_csv("data.csv"), pd.read_csv("explanations.csv"))
mergedReceiptExplanations = pd.merge(data, explanations, on='receiptID', how='outer')
simulator = SoS(config,main.obtainStaminaDistribution(mergedReceiptExplanations['DISTANCE'].to_numpy()), explanations)
return simulator
def orderProductsPerImportanceAndProfit(shop):
#Order products
ordered = products.sort_values(by=['<NAME>'], ascending=True)
ordered = ordered['ID'].to_numpy()
aux = []
for p in ordered:
for _ in range(products.loc[products['ID'] == p,'Total Prateleiras'].iloc[0]):
aux.append(p)
size = [23,21]
ranksShelves = {}
#Order importance cells
for j in range(size[1]):
for i in range(size[0]):
ranksShelves[shop.shopping[i][j].id] = shop.shopping[i][j].rank
ranksShelves = dict(sorted(ranksShelves.items(), key=lambda item: item[1]))
indice = 0
for i in ranksShelves.keys():
if i in shop.shoppingClass.config:
ranksShelves[i] = int(aux[indice])
indice += 1
with open("profitImportance.json","w") as f:
json.dump(ranksShelves,f)
return ranksShelves
def orderProductsPerPair(shop):
data, explanations = cvtCsvDataframe(pd.read_csv("data.csv"), | pd.read_csv("explanations.csv") | pandas.read_csv |
# -*- coding: utf-8 -*-
import numpy as np
import pytest
from numpy.random import RandomState
from numpy import nan
from datetime import datetime
from itertools import permutations
from pandas import (Series, Categorical, CategoricalIndex,
Timestamp, DatetimeIndex, Index, IntervalIndex)
import pandas as pd
from pandas import compat
from pandas._libs import (groupby as libgroupby, algos as libalgos,
hashtable as ht)
from pandas._libs.hashtable import unique_label_indices
from pandas.compat import lrange, range
import pandas.core.algorithms as algos
import pandas.core.common as com
import pandas.util.testing as tm
import pandas.util._test_decorators as td
from pandas.core.dtypes.dtypes import CategoricalDtype as CDT
from pandas.compat.numpy import np_array_datetime64_compat
from pandas.util.testing import assert_almost_equal
class TestMatch(object):
def test_ints(self):
values = np.array([0, 2, 1])
to_match = np.array([0, 1, 2, 2, 0, 1, 3, 0])
result = algos.match(to_match, values)
expected = np.array([0, 2, 1, 1, 0, 2, -1, 0], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
result = Series(algos.match(to_match, values, np.nan))
expected = Series(np.array([0, 2, 1, 1, 0, 2, np.nan, 0]))
tm.assert_series_equal(result, expected)
s = Series(np.arange(5), dtype=np.float32)
result = algos.match(s, [2, 4])
expected = np.array([-1, -1, 0, -1, 1], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
result = Series(algos.match(s, [2, 4], np.nan))
expected = Series(np.array([np.nan, np.nan, 0, np.nan, 1]))
tm.assert_series_equal(result, expected)
def test_strings(self):
values = ['foo', 'bar', 'baz']
to_match = ['bar', 'foo', 'qux', 'foo', 'bar', 'baz', 'qux']
result = algos.match(to_match, values)
expected = np.array([1, 0, -1, 0, 1, 2, -1], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
result = Series(algos.match(to_match, values, np.nan))
expected = Series(np.array([1, 0, np.nan, 0, 1, 2, np.nan]))
tm.assert_series_equal(result, expected)
class TestFactorize(object):
def test_basic(self):
labels, uniques = algos.factorize(['a', 'b', 'b', 'a', 'a', 'c', 'c',
'c'])
tm.assert_numpy_array_equal(
uniques, np.array(['a', 'b', 'c'], dtype=object))
labels, uniques = algos.factorize(['a', 'b', 'b', 'a',
'a', 'c', 'c', 'c'], sort=True)
exp = np.array([0, 1, 1, 0, 0, 2, 2, 2], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array(['a', 'b', 'c'], dtype=object)
tm.assert_numpy_array_equal(uniques, exp)
labels, uniques = algos.factorize(list(reversed(range(5))))
exp = np.array([0, 1, 2, 3, 4], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([4, 3, 2, 1, 0], dtype=np.int64)
tm.assert_numpy_array_equal(uniques, exp)
labels, uniques = algos.factorize(list(reversed(range(5))), sort=True)
exp = np.array([4, 3, 2, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([0, 1, 2, 3, 4], dtype=np.int64)
tm.assert_numpy_array_equal(uniques, exp)
labels, uniques = algos.factorize(list(reversed(np.arange(5.))))
exp = np.array([0, 1, 2, 3, 4], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([4., 3., 2., 1., 0.], dtype=np.float64)
tm.assert_numpy_array_equal(uniques, exp)
labels, uniques = algos.factorize(list(reversed(np.arange(5.))),
sort=True)
exp = np.array([4, 3, 2, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = np.array([0., 1., 2., 3., 4.], dtype=np.float64)
tm.assert_numpy_array_equal(uniques, exp)
def test_mixed(self):
# doc example reshaping.rst
x = Series(['A', 'A', np.nan, 'B', 3.14, np.inf])
labels, uniques = algos.factorize(x)
exp = np.array([0, 0, -1, 1, 2, 3], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = Index(['A', 'B', 3.14, np.inf])
tm.assert_index_equal(uniques, exp)
labels, uniques = algos.factorize(x, sort=True)
exp = np.array([2, 2, -1, 3, 0, 1], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = Index([3.14, np.inf, 'A', 'B'])
tm.assert_index_equal(uniques, exp)
def test_datelike(self):
# M8
v1 = Timestamp('20130101 09:00:00.00004')
v2 = Timestamp('20130101')
x = Series([v1, v1, v1, v2, v2, v1])
labels, uniques = algos.factorize(x)
exp = np.array([0, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = DatetimeIndex([v1, v2])
tm.assert_index_equal(uniques, exp)
labels, uniques = algos.factorize(x, sort=True)
exp = np.array([1, 1, 1, 0, 0, 1], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
exp = DatetimeIndex([v2, v1])
tm.assert_index_equal(uniques, exp)
# period
v1 = pd.Period('201302', freq='M')
v2 = pd.Period('201303', freq='M')
x = Series([v1, v1, v1, v2, v2, v1])
# periods are not 'sorted' as they are converted back into an index
labels, uniques = algos.factorize(x)
exp = np.array([0, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(uniques, pd.PeriodIndex([v1, v2]))
labels, uniques = algos.factorize(x, sort=True)
exp = np.array([0, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(uniques, pd.PeriodIndex([v1, v2]))
# GH 5986
v1 = pd.to_timedelta('1 day 1 min')
v2 = pd.to_timedelta('1 day')
x = Series([v1, v2, v1, v1, v2, v2, v1])
labels, uniques = algos.factorize(x)
exp = np.array([0, 1, 0, 0, 1, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(uniques, pd.to_timedelta([v1, v2]))
labels, uniques = algos.factorize(x, sort=True)
exp = np.array([1, 0, 1, 1, 0, 0, 1], dtype=np.intp)
tm.assert_numpy_array_equal(labels, exp)
tm.assert_index_equal(uniques, pd.to_timedelta([v2, v1]))
def test_factorize_nan(self):
# nan should map to na_sentinel, not reverse_indexer[na_sentinel]
# rizer.factorize should not raise an exception if na_sentinel indexes
# outside of reverse_indexer
key = np.array([1, 2, 1, np.nan], dtype='O')
rizer = ht.Factorizer(len(key))
for na_sentinel in (-1, 20):
ids = rizer.factorize(key, sort=True, na_sentinel=na_sentinel)
expected = np.array([0, 1, 0, na_sentinel], dtype='int32')
assert len(set(key)) == len(set(expected))
tm.assert_numpy_array_equal(pd.isna(key),
expected == na_sentinel)
# nan still maps to na_sentinel when sort=False
key = np.array([0, np.nan, 1], dtype='O')
na_sentinel = -1
# TODO(wesm): unused?
ids = rizer.factorize(key, sort=False, na_sentinel=na_sentinel) # noqa
expected = np.array([2, -1, 0], dtype='int32')
assert len(set(key)) == len(set(expected))
tm.assert_numpy_array_equal(pd.isna(key), expected == na_sentinel)
@pytest.mark.parametrize("data,expected_label,expected_level", [
(
[(1, 1), (1, 2), (0, 0), (1, 2), 'nonsense'],
[0, 1, 2, 1, 3],
[(1, 1), (1, 2), (0, 0), 'nonsense']
),
(
[(1, 1), (1, 2), (0, 0), (1, 2), (1, 2, 3)],
[0, 1, 2, 1, 3],
[(1, 1), (1, 2), (0, 0), (1, 2, 3)]
),
(
[(1, 1), (1, 2), (0, 0), (1, 2)],
[0, 1, 2, 1],
[(1, 1), (1, 2), (0, 0)]
)
])
def test_factorize_tuple_list(self, data, expected_label, expected_level):
# GH9454
result = pd.factorize(data)
tm.assert_numpy_array_equal(result[0],
np.array(expected_label, dtype=np.intp))
expected_level_array = com._asarray_tuplesafe(expected_level,
dtype=object)
tm.assert_numpy_array_equal(result[1], expected_level_array)
def test_complex_sorting(self):
# gh 12666 - check no segfault
# Test not valid numpy versions older than 1.11
if pd._np_version_under1p11:
pytest.skip("Test valid only for numpy 1.11+")
x17 = np.array([complex(i) for i in range(17)], dtype=object)
pytest.raises(TypeError, algos.factorize, x17[::-1], sort=True)
def test_uint64_factorize(self):
data = np.array([2**63, 1, 2**63], dtype=np.uint64)
exp_labels = np.array([0, 1, 0], dtype=np.intp)
exp_uniques = np.array([2**63, 1], dtype=np.uint64)
labels, uniques = algos.factorize(data)
tm.assert_numpy_array_equal(labels, exp_labels)
tm.assert_numpy_array_equal(uniques, exp_uniques)
data = np.array([2**63, -1, 2**63], dtype=object)
exp_labels = np.array([0, 1, 0], dtype=np.intp)
exp_uniques = np.array([2**63, -1], dtype=object)
labels, uniques = algos.factorize(data)
tm.assert_numpy_array_equal(labels, exp_labels)
tm.assert_numpy_array_equal(uniques, exp_uniques)
def test_deprecate_order(self):
# gh 19727 - check warning is raised for deprecated keyword, order.
# Test not valid once order keyword is removed.
data = np.array([2**63, 1, 2**63], dtype=np.uint64)
with tm.assert_produces_warning(expected_warning=FutureWarning):
algos.factorize(data, order=True)
with tm.assert_produces_warning(False):
algos.factorize(data)
@pytest.mark.parametrize('data', [
np.array([0, 1, 0], dtype='u8'),
np.array([-2**63, 1, -2**63], dtype='i8'),
np.array(['__nan__', 'foo', '__nan__'], dtype='object'),
])
def test_parametrized_factorize_na_value_default(self, data):
# arrays that include the NA default for that type, but isn't used.
l, u = algos.factorize(data)
expected_uniques = data[[0, 1]]
expected_labels = np.array([0, 1, 0], dtype=np.intp)
tm.assert_numpy_array_equal(l, expected_labels)
tm.assert_numpy_array_equal(u, expected_uniques)
@pytest.mark.parametrize('data, na_value', [
(np.array([0, 1, 0, 2], dtype='u8'), 0),
(np.array([1, 0, 1, 2], dtype='u8'), 1),
(np.array([-2**63, 1, -2**63, 0], dtype='i8'), -2**63),
(np.array([1, -2**63, 1, 0], dtype='i8'), 1),
(np.array(['a', '', 'a', 'b'], dtype=object), 'a'),
(np.array([(), ('a', 1), (), ('a', 2)], dtype=object), ()),
(np.array([('a', 1), (), ('a', 1), ('a', 2)], dtype=object),
('a', 1)),
])
def test_parametrized_factorize_na_value(self, data, na_value):
l, u = algos._factorize_array(data, na_value=na_value)
expected_uniques = data[[1, 3]]
expected_labels = np.array([-1, 0, -1, 1], dtype=np.intp)
tm.assert_numpy_array_equal(l, expected_labels)
tm.assert_numpy_array_equal(u, expected_uniques)
class TestUnique(object):
def test_ints(self):
arr = np.random.randint(0, 100, size=50)
result = algos.unique(arr)
assert isinstance(result, np.ndarray)
def test_objects(self):
arr = np.random.randint(0, 100, size=50).astype('O')
result = algos.unique(arr)
assert isinstance(result, np.ndarray)
def test_object_refcount_bug(self):
lst = ['A', 'B', 'C', 'D', 'E']
for i in range(1000):
len(algos.unique(lst))
def test_on_index_object(self):
mindex = pd.MultiIndex.from_arrays([np.arange(5).repeat(5), np.tile(
np.arange(5), 5)])
expected = mindex.values
expected.sort()
mindex = mindex.repeat(2)
result = pd.unique(mindex)
result.sort()
tm.assert_almost_equal(result, expected)
def test_datetime64_dtype_array_returned(self):
# GH 9431
expected = np_array_datetime64_compat(
['2015-01-03T00:00:00.000000000+0000',
'2015-01-01T00:00:00.000000000+0000'],
dtype='M8[ns]')
dt_index = pd.to_datetime(['2015-01-03T00:00:00.000000000+0000',
'2015-01-01T00:00:00.000000000+0000',
'2015-01-01T00:00:00.000000000+0000'])
result = algos.unique(dt_index)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
s = Series(dt_index)
result = algos.unique(s)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
arr = s.values
result = algos.unique(arr)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
def test_timedelta64_dtype_array_returned(self):
# GH 9431
expected = np.array([31200, 45678, 10000], dtype='m8[ns]')
td_index = pd.to_timedelta([31200, 45678, 31200, 10000, 45678])
result = algos.unique(td_index)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
s = Series(td_index)
result = algos.unique(s)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
arr = s.values
result = algos.unique(arr)
tm.assert_numpy_array_equal(result, expected)
assert result.dtype == expected.dtype
def test_uint64_overflow(self):
s = Series([1, 2, 2**63, 2**63], dtype=np.uint64)
exp = np.array([1, 2, 2**63], dtype=np.uint64)
tm.assert_numpy_array_equal(algos.unique(s), exp)
def test_nan_in_object_array(self):
l = ['a', np.nan, 'c', 'c']
result = pd.unique(l)
expected = np.array(['a', np.nan, 'c'], dtype=object)
tm.assert_numpy_array_equal(result, expected)
def test_categorical(self):
# we are expecting to return in the order
# of appearance
expected = Categorical(list('bac'), categories=list('bac'))
# we are expecting to return in the order
# of the categories
expected_o = Categorical(
list('bac'), categories=list('abc'), ordered=True)
# GH 15939
c = Categorical(list('baabc'))
result = c.unique()
tm.assert_categorical_equal(result, expected)
result = algos.unique(c)
tm.assert_categorical_equal(result, expected)
c = Categorical(list('baabc'), ordered=True)
result = c.unique()
tm.assert_categorical_equal(result, expected_o)
result = algos.unique(c)
tm.assert_categorical_equal(result, expected_o)
# Series of categorical dtype
s = Series(Categorical(list('baabc')), name='foo')
result = s.unique()
tm.assert_categorical_equal(result, expected)
result = pd.unique(s)
tm.assert_categorical_equal(result, expected)
# CI -> return CI
ci = CategoricalIndex(Categorical(list('baabc'),
categories=list('bac')))
expected = CategoricalIndex(expected)
result = ci.unique()
tm.assert_index_equal(result, expected)
result = pd.unique(ci)
tm.assert_index_equal(result, expected)
def test_datetime64tz_aware(self):
# GH 15939
result = Series(
Index([Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')])).unique()
expected = np.array([Timestamp('2016-01-01 00:00:00-0500',
tz='US/Eastern')], dtype=object)
tm.assert_numpy_array_equal(result, expected)
result = Index([Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')]).unique()
expected = DatetimeIndex(['2016-01-01 00:00:00'],
dtype='datetime64[ns, US/Eastern]', freq=None)
tm.assert_index_equal(result, expected)
result = pd.unique(
Series(Index([Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')])))
expected = np.array([Timestamp('2016-01-01 00:00:00-0500',
tz='US/Eastern')], dtype=object)
tm.assert_numpy_array_equal(result, expected)
result = pd.unique(Index([Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')]))
expected = DatetimeIndex(['2016-01-01 00:00:00'],
dtype='datetime64[ns, US/Eastern]', freq=None)
tm.assert_index_equal(result, expected)
def test_order_of_appearance(self):
# 9346
# light testing of guarantee of order of appearance
# these also are the doc-examples
result = pd.unique(Series([2, 1, 3, 3]))
tm.assert_numpy_array_equal(result,
np.array([2, 1, 3], dtype='int64'))
result = pd.unique(Series([2] + [1] * 5))
tm.assert_numpy_array_equal(result,
np.array([2, 1], dtype='int64'))
result = pd.unique(Series([Timestamp('20160101'),
Timestamp('20160101')]))
expected = np.array(['2016-01-01T00:00:00.000000000'],
dtype='datetime64[ns]')
tm.assert_numpy_array_equal(result, expected)
result = pd.unique(Index(
[Timestamp('20160101', tz='US/Eastern'),
Timestamp('20160101', tz='US/Eastern')]))
expected = DatetimeIndex(['2016-01-01 00:00:00'],
dtype='datetime64[ns, US/Eastern]',
freq=None)
tm.assert_index_equal(result, expected)
result = pd.unique(list('aabc'))
expected = np.array(['a', 'b', 'c'], dtype=object)
tm.assert_numpy_array_equal(result, expected)
result = pd.unique(Series(Categorical(list('aabc'))))
expected = Categorical(list('abc'))
tm.assert_categorical_equal(result, expected)
@pytest.mark.parametrize("arg ,expected", [
(('1', '1', '2'), np.array(['1', '2'], dtype=object)),
(('foo',), np.array(['foo'], dtype=object))
])
def test_tuple_with_strings(self, arg, expected):
# see GH 17108
result = pd.unique(arg)
tm.assert_numpy_array_equal(result, expected)
class TestIsin(object):
def test_invalid(self):
pytest.raises(TypeError, lambda: algos.isin(1, 1))
pytest.raises(TypeError, lambda: algos.isin(1, [1]))
pytest.raises(TypeError, lambda: algos.isin([1], 1))
def test_basic(self):
result = algos.isin([1, 2], [1])
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(np.array([1, 2]), [1])
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(Series([1, 2]), [1])
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(Series([1, 2]), Series([1]))
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(Series([1, 2]), set([1]))
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(['a', 'b'], ['a'])
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(Series(['a', 'b']), Series(['a']))
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(Series(['a', 'b']), set(['a']))
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(['a', 'b'], [1])
expected = np.array([False, False])
tm.assert_numpy_array_equal(result, expected)
def test_i8(self):
arr = pd.date_range('20130101', periods=3).values
result = algos.isin(arr, [arr[0]])
expected = np.array([True, False, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(arr, arr[0:2])
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(arr, set(arr[0:2]))
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
arr = pd.timedelta_range('1 day', periods=3).values
result = algos.isin(arr, [arr[0]])
expected = np.array([True, False, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(arr, arr[0:2])
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.isin(arr, set(arr[0:2]))
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
def test_large(self):
s = pd.date_range('20000101', periods=2000000, freq='s').values
result = algos.isin(s, s[0:2])
expected = np.zeros(len(s), dtype=bool)
expected[0] = True
expected[1] = True
tm.assert_numpy_array_equal(result, expected)
def test_categorical_from_codes(self):
# GH 16639
vals = np.array([0, 1, 2, 0])
cats = ['a', 'b', 'c']
Sd = Series(Categorical(1).from_codes(vals, cats))
St = Series(Categorical(1).from_codes(np.array([0, 1]), cats))
expected = np.array([True, True, False, True])
result = algos.isin(Sd, St)
tm.assert_numpy_array_equal(expected, result)
@pytest.mark.parametrize("empty", [[], Series(), np.array([])])
def test_empty(self, empty):
# see gh-16991
vals = Index(["a", "b"])
expected = np.array([False, False])
result = algos.isin(vals, empty)
tm.assert_numpy_array_equal(expected, result)
class TestValueCounts(object):
def test_value_counts(self):
np.random.seed(1234)
from pandas.core.reshape.tile import cut
arr = np.random.randn(4)
factor = cut(arr, 4)
# assert isinstance(factor, n)
result = algos.value_counts(factor)
breaks = [-1.194, -0.535, 0.121, 0.777, 1.433]
index = IntervalIndex.from_breaks(breaks).astype(CDT(ordered=True))
expected = Series([1, 1, 1, 1], index=index)
tm.assert_series_equal(result.sort_index(), expected.sort_index())
def test_value_counts_bins(self):
s = [1, 2, 3, 4]
result = algos.value_counts(s, bins=1)
expected = Series([4],
index=IntervalIndex.from_tuples([(0.996, 4.0)]))
tm.assert_series_equal(result, expected)
result = algos.value_counts(s, bins=2, sort=False)
expected = Series([2, 2],
index=IntervalIndex.from_tuples([(0.996, 2.5),
(2.5, 4.0)]))
tm.assert_series_equal(result, expected)
def test_value_counts_dtypes(self):
result = algos.value_counts([1, 1.])
assert len(result) == 1
result = algos.value_counts([1, 1.], bins=1)
assert len(result) == 1
result = algos.value_counts(Series([1, 1., '1'])) # object
assert len(result) == 2
pytest.raises(TypeError, lambda s: algos.value_counts(s, bins=1),
['1', 1])
def test_value_counts_nat(self):
td = Series([np.timedelta64(10000), pd.NaT], dtype='timedelta64[ns]')
dt = pd.to_datetime(['NaT', '2014-01-01'])
for s in [td, dt]:
vc = algos.value_counts(s)
vc_with_na = algos.value_counts(s, dropna=False)
assert len(vc) == 1
assert len(vc_with_na) == 2
exp_dt = Series({Timestamp('2014-01-01 00:00:00'): 1})
tm.assert_series_equal(algos.value_counts(dt), exp_dt)
# TODO same for (timedelta)
def test_value_counts_datetime_outofbounds(self):
# GH 13663
s = Series([datetime(3000, 1, 1), datetime(5000, 1, 1),
datetime(5000, 1, 1), datetime(6000, 1, 1),
datetime(3000, 1, 1), datetime(3000, 1, 1)])
res = s.value_counts()
exp_index = Index([datetime(3000, 1, 1), datetime(5000, 1, 1),
datetime(6000, 1, 1)], dtype=object)
exp = Series([3, 2, 1], index=exp_index)
tm.assert_series_equal(res, exp)
# GH 12424
res = pd.to_datetime(Series(['2362-01-01', np.nan]),
errors='ignore')
exp = Series(['2362-01-01', np.nan], dtype=object)
tm.assert_series_equal(res, exp)
def test_categorical(self):
s = Series(Categorical(list('aaabbc')))
result = s.value_counts()
expected = Series([3, 2, 1], index=CategoricalIndex(['a', 'b', 'c']))
tm.assert_series_equal(result, expected, check_index_type=True)
# preserve order?
s = s.cat.as_ordered()
result = s.value_counts()
expected.index = expected.index.as_ordered()
tm.assert_series_equal(result, expected, check_index_type=True)
def test_categorical_nans(self):
s = Series(Categorical(list('aaaaabbbcc'))) # 4,3,2,1 (nan)
s.iloc[1] = np.nan
result = s.value_counts()
expected = Series([4, 3, 2], index=CategoricalIndex(
['a', 'b', 'c'], categories=['a', 'b', 'c']))
tm.assert_series_equal(result, expected, check_index_type=True)
result = s.value_counts(dropna=False)
expected = Series([
4, 3, 2, 1
], index=CategoricalIndex(['a', 'b', 'c', np.nan]))
tm.assert_series_equal(result, expected, check_index_type=True)
# out of order
s = Series(Categorical(
list('aaaaabbbcc'), ordered=True, categories=['b', 'a', 'c']))
s.iloc[1] = np.nan
result = s.value_counts()
expected = Series([4, 3, 2], index=CategoricalIndex(
['a', 'b', 'c'], categories=['b', 'a', 'c'], ordered=True))
tm.assert_series_equal(result, expected, check_index_type=True)
result = s.value_counts(dropna=False)
expected = Series([4, 3, 2, 1], index=CategoricalIndex(
['a', 'b', 'c', np.nan], categories=['b', 'a', 'c'], ordered=True))
tm.assert_series_equal(result, expected, check_index_type=True)
def test_categorical_zeroes(self):
# keep the `d` category with 0
s = Series(Categorical(
list('bbbaac'), categories=list('abcd'), ordered=True))
result = s.value_counts()
expected = Series([3, 2, 1, 0], index=Categorical(
['b', 'a', 'c', 'd'], categories=list('abcd'), ordered=True))
tm.assert_series_equal(result, expected, check_index_type=True)
def test_dropna(self):
# https://github.com/pandas-dev/pandas/issues/9443#issuecomment-73719328
tm.assert_series_equal(
Series([True, True, False]).value_counts(dropna=True),
Series([2, 1], index=[True, False]))
tm.assert_series_equal(
Series([True, True, False]).value_counts(dropna=False),
Series([2, 1], index=[True, False]))
tm.assert_series_equal(
Series([True, True, False, None]).value_counts(dropna=True),
Series([2, 1], index=[True, False]))
tm.assert_series_equal(
Series([True, True, False, None]).value_counts(dropna=False),
Series([2, 1, 1], index=[True, False, np.nan]))
tm.assert_series_equal(
Series([10.3, 5., 5.]).value_counts(dropna=True),
Series([2, 1], index=[5., 10.3]))
tm.assert_series_equal(
Series([10.3, 5., 5.]).value_counts(dropna=False),
Series([2, 1], index=[5., 10.3]))
tm.assert_series_equal(
Series([10.3, 5., 5., None]).value_counts(dropna=True),
Series([2, 1], index=[5., 10.3]))
# 32-bit linux has a different ordering
if not compat.is_platform_32bit():
result = Series([10.3, 5., 5., None]).value_counts(dropna=False)
expected = Series([2, 1, 1], index=[5., 10.3, np.nan])
tm.assert_series_equal(result, expected)
def test_value_counts_normalized(self):
# GH12558
s = Series([1, 2, np.nan, np.nan, np.nan])
dtypes = (np.float64, np.object, 'M8[ns]')
for t in dtypes:
s_typed = s.astype(t)
result = s_typed.value_counts(normalize=True, dropna=False)
expected = Series([0.6, 0.2, 0.2],
index=Series([np.nan, 2.0, 1.0], dtype=t))
tm.assert_series_equal(result, expected)
result = s_typed.value_counts(normalize=True, dropna=True)
expected = Series([0.5, 0.5],
index=Series([2.0, 1.0], dtype=t))
tm.assert_series_equal(result, expected)
def test_value_counts_uint64(self):
arr = np.array([2**63], dtype=np.uint64)
expected = Series([1], index=[2**63])
result = algos.value_counts(arr)
tm.assert_series_equal(result, expected)
arr = np.array([-1, 2**63], dtype=object)
expected = Series([1, 1], index=[-1, 2**63])
result = algos.value_counts(arr)
# 32-bit linux has a different ordering
if not compat.is_platform_32bit():
tm.assert_series_equal(result, expected)
class TestDuplicated(object):
def test_duplicated_with_nas(self):
keys = np.array([0, 1, np.nan, 0, 2, np.nan], dtype=object)
result = algos.duplicated(keys)
expected = np.array([False, False, False, True, False, True])
tm.assert_numpy_array_equal(result, expected)
result = algos.duplicated(keys, keep='first')
expected = np.array([False, False, False, True, False, True])
tm.assert_numpy_array_equal(result, expected)
result = algos.duplicated(keys, keep='last')
expected = np.array([True, False, True, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = algos.duplicated(keys, keep=False)
expected = np.array([True, False, True, True, False, True])
tm.assert_numpy_array_equal(result, expected)
keys = np.empty(8, dtype=object)
for i, t in enumerate(zip([0, 0, np.nan, np.nan] * 2,
[0, np.nan, 0, np.nan] * 2)):
keys[i] = t
result = algos.duplicated(keys)
falses = [False] * 4
trues = [True] * 4
expected = np.array(falses + trues)
tm.assert_numpy_array_equal(result, expected)
result = algos.duplicated(keys, keep='last')
expected = np.array(trues + falses)
tm.assert_numpy_array_equal(result, expected)
result = | algos.duplicated(keys, keep=False) | pandas.core.algorithms.duplicated |
import geopandas
import pandas as pd
import click
import numpy as np
@click.command()
@click.option("--anthropogenic",
type=click.Path(exists=True),
default='ancientmetagenome-anthropogenic.tsv',
show_default=True,
help='ancientmetagenome-anthropogenic TSV file')
@click.option("--environmental",
type=click.Path(exists=True),
default='ancientmetagenome-environmental.tsv',
show_default=True,
help='ancientmetagenome-environmental TSV file')
@click.option("--meta_host",
type=click.Path(exists=True),
default='ancientmetagenome-hostassociated.tsv',
show_default=True,
help='ancientmetagenome-hostassociated TSV file')
@click.option("--single_host",
type=click.Path(exists=True),
default='ancientsinglegenome-hostassociated.tsv',
show_default=True,
help='ancientsinglegenome-hostassociated TSV file')
@click.option(
"--geojson",
type=click.Path(writable=True, dir_okay=False),
default="map.geo.json",
show_default=True,
help="Output geo.json file",
)
def convert(anthropogenic, environmental, meta_host, single_host, geojson):
"""\b
Converts csv file to geojson file
Author: <NAME>
Contact: <borry[at]shh.mpg.de>
CSV: path to CSV containing Latitude/Longitude columns
"""
sigma = 0.0001
anthro = pd.read_csv(anthropogenic, sep='\t', decimal=".")
anthro['dir_type'] = ['Ancient Metagenome - Anthropogenic']*anthro.shape[0]
envi = | pd.read_csv(environmental, sep='\t', decimal=".") | pandas.read_csv |
import pandas as pd
import numpy as np
import tensorflow as tf
from tensorflow import keras
import os
base_dir = "../input/"
train_dir = os.path.join(base_dir,"train/train")
testing_dir = os.path.join(base_dir, "test")
train = pd.read_csv("../input/train.csv")
train_dataframe = | pd.read_csv("../input/train.csv") | pandas.read_csv |
from itertools import product
from unittest.mock import patch
import pytest
import numpy as np
import pandas as pd
from pandas.util.testing import assert_frame_equal
from sm.engine.annotation.fdr import FDR, run_fdr_ranking
from sm.engine.formula_parser import format_modifiers
FDR_CONFIG = {'decoy_sample_size': 2}
@patch('sm.engine.annotation.fdr.DECOY_ADDUCTS', ['+He', '+Li'])
def test_fdr_decoy_adduct_selection_saves_corr():
fdr = FDR(
fdr_config=FDR_CONFIG,
chem_mods=[],
neutral_losses=[],
target_adducts=['+H', '+K', '[M]+'],
analysis_version=1,
)
exp_target_decoy_df = pd.DataFrame(
[
('H2O', '+H', '+He'),
('H2O', '+H', '+Li'),
('H2O', '+K', '+He'),
('H2O', '+K', '+Li'),
('H2O', '', '+He'),
('H2O', '', '+Li'),
],
columns=['formula', 'tm', 'dm'],
)
fdr.decoy_adducts_selection(target_formulas=['H2O'])
assert_frame_equal(
fdr.td_df.sort_values(by=['formula', 'tm', 'dm']).reset_index(drop=True),
exp_target_decoy_df.sort_values(by=['formula', 'tm', 'dm']).reset_index(drop=True),
)
@pytest.mark.parametrize('analysis_version,expected_fdrs', [(1, [0.2, 0.8]), (3, [1 / 4, 2 / 3])])
def test_estimate_fdr_returns_correct_df(analysis_version, expected_fdrs):
fdr = FDR(
fdr_config=FDR_CONFIG,
chem_mods=[],
neutral_losses=[],
target_adducts=['+H'],
analysis_version=analysis_version,
)
fdr.fdr_levels = [0.2, 0.8]
fdr.td_df = pd.DataFrame(
[['H2O', '+H', '+Cu'], ['H2O', '+H', '+Co'], ['C2H2', '+H', '+Ag'], ['C2H2', '+H', '+Ar']],
columns=['formula', 'tm', 'dm'],
)
msm_df = pd.DataFrame(
[
['H2O', '+H', 0.85],
['C2H2', '+H', 0.5],
['H2O', '+Cu', 0.5],
['H2O', '+Co', 0.5],
['C2H2', '+Ag', 0.75],
['C2H2', '+Ar', 0.0],
],
columns=['formula', 'modifier', 'msm'],
)
exp_sf_df = pd.DataFrame(
[
['H2O', '+H', 0.85],
['C2H2', '+H', 0.5],
],
columns=['formula', 'modifier', 'msm'],
).assign(fdr=expected_fdrs)
assert_frame_equal(fdr.estimate_fdr(msm_df, None), exp_sf_df)
def test_estimate_fdr_digitize_works():
fdr_config = {**FDR_CONFIG, 'decoy_sample_size': 1}
fdr = FDR(
fdr_config=fdr_config,
chem_mods=[],
neutral_losses=[],
target_adducts=['+H'],
analysis_version=1,
)
fdr.fdr_levels = [0.4, 0.8]
fdr.td_df = pd.DataFrame(
[['C1', '+H', '+Cu'], ['C2', '+H', '+Ag'], ['C3', '+H', '+Cl'], ['C4', '+H', '+Co']],
columns=['formula', 'tm', 'dm'],
)
msm_df = pd.DataFrame(
[
['C1', '+H', 1.0],
['C2', '+H', 0.75],
['C3', '+H', 0.5],
['C4', '+H', 0.25],
['C1', '+Cu', 0.75],
['C2', '+Ag', 0.3],
['C3', '+Cl', 0.25],
['C4', '+Co', 0.1],
],
columns=['formula', 'modifier', 'msm'],
)
exp_sf_df = pd.DataFrame(
[
['C1', '+H', 1.0, 0.4],
['C2', '+H', 0.75, 0.4],
['C3', '+H', 0.5, 0.4],
['C4', '+H', 0.25, 0.8],
],
columns=['formula', 'modifier', 'msm', 'fdr'],
)
assert_frame_equal(fdr.estimate_fdr(msm_df, None), exp_sf_df)
def test_ions():
formulas = ['H2O', 'C5H2OH']
target_adducts = ['+H', '+Na']
decoy_sample_size = 5
fdr_config = {**FDR_CONFIG, 'decoy_sample_size': decoy_sample_size}
fdr = FDR(
fdr_config=fdr_config,
chem_mods=[],
neutral_losses=[],
target_adducts=target_adducts,
analysis_version=1,
)
fdr.decoy_adducts_selection(target_formulas=['H2O', 'C5H2OH'])
ions = fdr.ion_tuples()
assert type(ions) == list
# total number varies because different (formula, modifier) pairs may receive the same (formula, decoy_modifier) pair
assert (
len(formulas) * decoy_sample_size + len(formulas) * len(target_adducts)
< len(ions)
<= len(formulas) * len(target_adducts) * decoy_sample_size
+ len(formulas) * len(target_adducts)
)
target_ions = [(formula, adduct) for formula, adduct in product(formulas, target_adducts)]
assert set(target_ions).issubset(set(map(tuple, ions)))
def test_chem_mods_and_neutral_losses():
formulas = ['H2O', 'C5H2OH']
chem_mods = ['-H+C']
neutral_losses = ['-O', '-C']
target_adducts = ['+H', '+Na', '[M]+']
target_modifiers = [
format_modifiers(cm, nl, ta)
for cm, nl, ta in product(['', *chem_mods], ['', *neutral_losses], target_adducts)
]
decoy_sample_size = 5
fdr_config = {**FDR_CONFIG, 'decoy_sample_size': decoy_sample_size}
fdr = FDR(
fdr_config=fdr_config,
chem_mods=chem_mods,
neutral_losses=neutral_losses,
target_adducts=target_adducts,
analysis_version=1,
)
fdr.decoy_adducts_selection(target_formulas=['H2O', 'C5H2OH'])
ions = fdr.ion_tuples()
assert type(ions) == list
# total number varies because different (formula, modifier) pairs may receive the same (formula, decoy_modifier) pair
min_count = len(formulas) * len(target_modifiers)
max_count = len(formulas) * len(target_modifiers) * (1 + decoy_sample_size)
assert min_count < len(ions) <= max_count
target_ions = list(product(formulas, target_modifiers))
assert set(target_ions).issubset(set(map(tuple, ions)))
def test_run_fdr_ranking():
target_scores = | pd.Series([1.0, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1, 0.0]) | pandas.Series |
'''
MIT License
Copyright (c) 2020 Minciencia
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
import sys
import tweepy
import pandas as pd
import datetime
def tweeting(consumer_key, consumer_secret, my_access_token, my_access_token_secret, carrier):
# Authentication
my_auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
my_auth.set_access_token(my_access_token, my_access_token_secret)
my_api = tweepy.API(my_auth)
# tweet
if carrier == 'reportediario':
my_positividad = pd.read_csv('../output/producto49/Positividad_Diaria_Media_T.csv')
my_positividad_ag = pd.read_csv('../output/producto49/Positividad_Diaria_Media_Ag_T.csv')
my_mediamovil = pd.read_csv('../output/producto75/MediaMovil_casos_nuevos_T.csv')
my_casos_nuevos_totales = pd.read_csv('../output/producto5/TotalesNacionales_T.csv')
casos_nuevos_totales = int(pd.to_numeric(my_casos_nuevos_totales.iloc[my_casos_nuevos_totales.index.max()][7]))
casos_nuevos_antigeno = int(pd.to_numeric(my_casos_nuevos_totales.iloc[my_casos_nuevos_totales.index.max()][19]))
mediamovil_nacional = int(pd.to_numeric(my_mediamovil.iloc[my_mediamovil.index.max()][17]))
variacion_nacional = float(100*(pd.to_numeric(my_mediamovil.iloc[my_mediamovil.index.max()][17]) - pd.to_numeric(
my_mediamovil.iloc[my_mediamovil.index.max() - 7][17]))/pd.to_numeric(my_mediamovil.iloc[my_mediamovil.index.max()][17]))
positividad_nacional = float(100*pd.to_numeric(my_positividad.iloc[my_positividad.index.max()][5]))
variacion_positividad = float(100*(pd.to_numeric(my_positividad.iloc[my_positividad.index.max()][5]) - pd.to_numeric(
my_positividad.iloc[my_positividad.index.max() - 7][5]))/pd.to_numeric(my_positividad.iloc[my_positividad.index.max()][5]))
positividad_nacional = ("%.2f" % positividad_nacional)
positividad = float(100*pd.to_numeric(my_positividad.iloc[my_positividad.index.max()][4]))
positividad_hoy = ("%.2f" % positividad)
casos_nuevos = str(int(my_positividad.iloc[my_positividad.index.max()][2]))
muestras = str(int(my_positividad.iloc[my_positividad.index.max()][1]))
tests_antigeno = str(int(my_positividad_ag.iloc[my_positividad_ag.index.max()][1]))
positividad_ag = float(100 * pd.to_numeric(my_positividad_ag.iloc[my_positividad_ag.index.max()][4]))
positividad_ag_hoy = ("%.2f" % positividad_ag)
# create update elements
tweet_text = '🤖Actualicé el reporte diario del @ministeriosalud de hoy 💫 Gracias a la Subsecretaría de Salud Pública y de Redes Asistenciales. Hay '+str(mediamovil_nacional)+' casos nuevos promedio en los últimos 7 días, con positividad de '+str(positividad_nacional)+'%. Más detalles en los productos en la imagen. https://github.com/MinCiencia/Datos-COVID19'
reply2_text = '🤖El total de casos confirmados hoy es '+str(casos_nuevos_totales)+', de los cuales '+str(casos_nuevos_antigeno)+' fueron confirmados con test de antígeno y '+casos_nuevos+' con PCR+. De las '+muestras+' muestras que se analizaron en las últimas 24 horas en laboratorios nacionales, un '+positividad_hoy+'% resultó positivo.'
reply3_text = '🤖Además, de los '+str(tests_antigeno)+ ' tests de antígeno realizados en el territorio nacional durante las últimas 24h, un '+positividad_ag_hoy+'% resultó positivo.'
if variacion_nacional >= 0 and variacion_positividad >= 0:
variacion_nacional = ("%.2f" % variacion_nacional)
variacion_positividad = ("%.2f" % variacion_positividad)
reply1_text = '🤖 En comparación con la semana anterior, la media móvil de los últimos 7 días para casos nuevos creció en '+str(variacion_nacional)+'% y la positividad en '+str(variacion_positividad)+'% a nivel nacional. Detalles a nivel regional en: https://github.com/MinCiencia/Datos-COVID19/tree/master/output/producto75 y https://github.com/MinCiencia/Datos-COVID19/tree/master/output/producto49'
elif variacion_nacional >= 0 and variacion_positividad < 0:
variacion_nacional = ("%.2f" % variacion_nacional)
variacion_positividad = ("%.2f" % variacion_positividad)
reply1_text = '🤖 En comparación con la semana anterior, la media móvil de los últimos 7 días para casos nuevos creció en '+str(variacion_nacional)+'% y la positividad bajó en '+str(variacion_positividad)+'% a nivel nacional. Detalles a nivel regional en: https://github.com/MinCiencia/Datos-COVID19/tree/master/output/producto75 y https://github.com/MinCiencia/Datos-COVID19/tree/master/output/producto49'
elif variacion_nacional < 0 and variacion_positividad < 0:
variacion_nacional = ("%.2f" % variacion_nacional)
variacion_positividad = ("%.2f" % variacion_positividad)
reply1_text = '🤖 En comparación con la semana anterior, la media móvil de los últimos 7 días para casos nuevos bajó en '+str(variacion_nacional)+'% y la positividad en '+str(variacion_positividad)+'% a nivel nacional. Detalles a nivel regional en: https://github.com/MinCiencia/Datos-COVID19/tree/master/output/producto75 y https://github.com/MinCiencia/Datos-COVID19/tree/master/output/producto49'
elif variacion_nacional < 0 and variacion_positividad >= 0:
variacion_nacional = ("%.2f" % variacion_nacional)
variacion_positividad = ("%.2f" % variacion_positividad)
reply1_text = '🤖 En comparación con la semana anterior, la media móvil de los últimos 7 días para casos nuevos bajó en ' + str(
variacion_nacional) + '% y la positividad aumentó en ' + str(
variacion_positividad) + '% a nivel nacional. Detalles a nivel regional en: https://github.com/MinCiencia/Datos-COVID19/tree/master/output/producto75 y https://github.com/MinCiencia/Datos-COVID19/tree/master/output/producto49'
# Generate text tweet with media (image)
media1= my_api.media_upload('./img/Datos covid_Bot_A_g1.png')
media2= my_api.media_upload('./img/Datos covid_Bot_A_g2.png')
media3= my_api.media_upload('./img/Datos covid_Bot_A_g3.png')
media4= my_api.media_upload('./img/Datos covid_Bot_A_g4.png')
try:
tweet = my_api.update_status(status=tweet_text,
media_ids=[media1.media_id, media2.media_id, media3.media_id, media4.media_id])
tweet2 = my_api.update_status(status=reply1_text, in_reply_to_status_id=tweet.id)
tweet3 = my_api.update_status(status=reply2_text, in_reply_to_status_id=tweet2.id)
tweet3 = my_api.update_status(status=reply3_text, in_reply_to_status_id=tweet3.id)
except tweepy.TweepError as error:
if error.api_code == 187:
# Do something special
print('duplicate message')
elif carrier == 'mmamp':
# create update elements
tweet_text = '🤖Actualicé los datos de calidad del aire en todo el territorio nacional, desde las estaciones del SINCA del @MMAChile 💫. Mira específicamente qué actualicé en la imagen y clona el GitHub https://github.com/MinCiencia/Datos-COVID19'
media1= my_api.media_upload('./img/Datos covid_Bot_G_g1.png')
# media2= my_api.media_upload('./img/Datos covid_Bot_A_g2.png')
# media3= my_api.media_upload('./img/Datos covid_Bot_A_g3.png')
# media4= my_api.media_upload('./img/Datos covid_Bot_A_g4.png')
# Generate text tweet with media (image)
my_api.update_status(status=tweet_text, media_ids=[media1.media_id])
elif carrier == 'informeepi':
my_epi= pd.read_csv('../output/producto1/Covid-19_T.csv')
fecha_informe = my_epi.iloc[my_epi.index.max()-1][0]
# create update elements
tweet_text = '🤖Actualicé los datos del Informe Epidemiológico publicado por @ministeriosalud de hoy 💫, con los datos correspondientes al '+fecha_informe+'. Gracias al equipo de especialistas en epidemiología. Mira qué actualicé en la imagen y clona el GitHub https://github.com/MinCiencia/Datos-COVID19'
reply1_text = '🤖A partir de este momento, todas mis respuestas sobre comunas del país 🇨🇱, corresponden al último informe. Más detalles en https://github.com/MinCiencia/Datos-COVID19'
media1= my_api.media_upload('./img/Datos covid_Bot_B_g1.png')
media2= my_api.media_upload('./img/Datos covid_Bot_B_g2.png')
media3= my_api.media_upload('./img/Datos covid_Bot_B_g3.png')
media4= my_api.media_upload('./img/Datos covid_Bot_B_g4.png')
# Generate text tweet with media (image)
tweet = my_api.update_status(status=tweet_text, media_ids=[media1.media_id,media2.media_id,media3.media_id,media4.media_id])
my_api.update_status(status=reply1_text, in_reply_to_status_id=tweet.id)
elif carrier == 'vacunacion':
now = datetime.datetime.now()
my_vacunacion = pd.read_csv('../output/producto76/vacunacion_t.csv')
vacunados = int(pd.to_numeric(my_vacunacion.iloc[my_vacunacion.index.max()][1]))
vacunados_pauta_completa = int(pd.to_numeric(my_vacunacion.iloc[my_vacunacion.index.max()][2])) + int(pd.to_numeric(my_vacunacion.iloc[my_vacunacion.index.max()][3]))
my_vacunacion_avance = 100*vacunados/16696002
my_vacunacion_avance_pauta_completa = 100*vacunados_pauta_completa/16696002
my_vacunacion_avance = ("%.2f" % my_vacunacion_avance)
my_vacunacion_avance_pauta_completa = ("%.2f" % my_vacunacion_avance_pauta_completa)
dosis_dia = vacunados + vacunados_pauta_completa + int(pd.to_numeric(my_vacunacion.iloc[my_vacunacion.index.max()][4])) - (pd.to_numeric(my_vacunacion.iloc[my_vacunacion.index.max()-1][1]) + pd.to_numeric(my_vacunacion.iloc[my_vacunacion.index.max()-1][2]) + pd.to_numeric(my_vacunacion.iloc[my_vacunacion.index.max()-1][3]) + pd.to_numeric(my_vacunacion.iloc[my_vacunacion.index.max()-1][4]))
my_vacunacion = my_vacunacion[1:]
my_vacunacion['total_dosis'] = | pd.to_numeric(my_vacunacion['Total']) | pandas.to_numeric |
#!/usr/bin/env python
# coding=utf-8
# Created by max on 17-10-10
from __future__ import division # for divide operation in python 2
from __future__ import print_function
import os
import sys
import time
import random
import numpy as np
import pandas as pd
from scipy import stats
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.font_manager
from pandas import DataFrame
import sklearn.metrics as metrics
from sklearn.preprocessing import LabelEncoder
from sklearn import model_selection
from sklearn.svm import OneClassSVM
from sklearn.neighbors import LocalOutlierFactor
from sklearn.covariance import EllipticEnvelope
from sklearn.ensemble import VotingClassifier
from sklearn.ensemble import IsolationForest
# Classification
from sklearn.linear_model import LogisticRegression
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.neighbors import KNeighborsClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.tree import DecisionTreeClassifier
from sklearn.svm import SVC
from sklearn.neural_network import MLPClassifier
from keras.layers import LSTM
from keras.layers import Dense, Activation, Dropout, Bidirectional
from keras.models import Sequential, load_model
from keras.callbacks import EarlyStopping, ModelCheckpoint
from sklearn.base import BaseEstimator, ClassifierMixin
from keras.wrappers.scikit_learn import KerasClassifier
__all__ = ["DataProcessor", "ModelProcessor"]
TAG_POSITIVE = "o"
TIMESTEPS = 1
TRAIN_PERCENTAGE = 0.8
EPOCHS = 60
BATCH_SIZE = 50
def series_to_supervised(data, n_in=1, n_out=1, dropnan=True):
"""convert series to supervised learning
:param data: array like
:param n_in:
:param n_out:
:param dropnan:
:return:
"""
n_vars = 1 if type(data) is list else data.shape[1]
df = DataFrame(data)
cols, names = list(), list()
# input sequence (t-n, ... t-1)
for i in range(n_in, 0, -1):
cols.append(df.shift(i))
names += [('var%d(t-%d)' % (j + 1, i)) for j in range(n_vars)]
# forecast sequence (t, t+1, ... t+n)
for i in range(0, n_out):
cols.append(df.shift(-i))
if i == 0:
names += [('var%d(t)' % (j + 1)) for j in range(n_vars)]
else:
names += [('var%d(t+%d)' % (j + 1, i)) for j in range(n_vars)]
# put it all together
agg = | pd.concat(cols, axis=1) | pandas.concat |
"""
Contains useful graphic generators. Currently, effect measure plots and functional form assessment plots
are implemented. Uses matplotlib to generate graphics. Future inclusions include forest plots
Contents:
Functional form assessment- func_form_plot()
Forest plot/ effect measure plot- EffectMeasurePlot()
P-value distribution plot- pvalue_plot()
Spaghetti plot- spaghetti_plot()
Receiver-Operator Curve- roc()
Dynamic risk plot- dynamic_risk_plot()
"""
import numpy as np
import pandas as pd
from scipy.stats import norm
import statsmodels.api as sm
import statsmodels.formula.api as smf
from statsmodels.genmod.families import links
from statsmodels.nonparametric.smoothers_lowess import lowess
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import matplotlib.ticker as mticker
class EffectMeasurePlot:
"""Used to generate effect measure plots. effectmeasure plot accepts four list type objects.
effectmeasure_plot is initialized with the associated names for each line, the point estimate,
the lower confidence limit, and the upper confidence limit.
Plots will resemble the following form:
_____________________________________________ Measure % CI
| |
1 | --------o------- | x n, 2n
| |
2 | ----o---- | w m, 2m
| |
|___________________________________________|
# # # #
The following functions (and their purposes) live within effectmeasure_plot
labels(**kwargs)
Used to change the labels in the plot, as well as the center and scale. Inputs are
keyword arguments
KEYWORDS:
-effectmeasure + changes the effect measure label
-conf_int + changes the confidence interval label
-scale + changes the scale to either log or linear
-center + changes the reference line for the center
colors(**kwargs)
Used to change the color of points and lines. Also can change the shape of points.
Valid colors and shapes for matplotlib are required. Inputs are keyword arguments
KEYWORDS:
-errorbarcolor + changes the error bar colors
-linecolor + changes the color of the reference line
-pointcolor + changes the color of the points
-pointshape + changes the shape of points
plot(t_adjuster=0.01,decimal=3,size=3)
Generates the effect measure plot of the input lists according to the pre-specified
colors, shapes, and labels of the class object
Arguments:
-t_adjuster + used to refine alignment of the table with the line graphs.
When generate plots, trial and error for this value are usually
necessary
-decimal + number of decimal places to display in the table
-size + size of the plot to generate
Example)
>>>lab = ['One','Two'] #generating lists of data to plot
>>>emm = [1.01,1.31]
>>>lcl = ['0.90',1.01]
>>>ucl = [1.11,1.53]
>>>
>>>x = zepid.graphics.effectmeasure_plot(lab,emm,lcl,ucl) #initializing effectmeasure_plot with the above lists
>>>x.labels(effectmeasure='RR') #changing the table label to 'RR'
>>>x.colors(pointcolor='r') #changing the point colors to red
>>>x.plot(t_adjuster=0.13) #generating the effect measure plot
"""
def __init__(self, label, effect_measure, lcl, ucl):
"""Initializes effectmeasure_plot with desired data to plot. All lists should be the same
length. If a blank space is desired in the plot, add an empty character object (' ') to
each list at the desired point.
Inputs:
label
-list of labels to use for y-axis
effect_measure
-list of numbers for point estimates to plot. If point estimate has trailing zeroes,
input as a character object rather than a float
lcl
-list of numbers for upper confidence limits to plot. If point estimate has trailing
zeroes, input as a character object rather than a float
ucl
-list of numbers for upper confidence limits to plot. If point estimate has
trailing zeroes, input as a character object rather than a float
"""
self.df = pd.DataFrame()
self.df['study'] = label
self.df['OR'] = effect_measure
self.df['LCL'] = lcl
self.df['UCL'] = ucl
self.df['OR2'] = self.df['OR'].astype(str).astype(float)
if (all(isinstance(item, float) for item in lcl)) & (all(isinstance(item, float) for item in effect_measure)):
self.df['LCL_dif'] = self.df['OR'] - self.df['LCL']
else:
self.df['LCL_dif'] = (pd.to_numeric(self.df['OR'])) - (pd.to_numeric(self.df['LCL']))
if (all(isinstance(item, float) for item in ucl)) & (all(isinstance(item, float) for item in effect_measure)):
self.df['UCL_dif'] = self.df['UCL'] - self.df['OR']
else:
self.df['UCL_dif'] = (pd.to_numeric(self.df['UCL'])) - (pd.to_numeric(self.df['OR']))
self.em = 'OR'
self.ci = '95% CI'
self.scale = 'linear'
self.center = 1
self.errc = 'dimgrey'
self.shape = 'd'
self.pc = 'k'
self.linec = 'gray'
def labels(self, **kwargs):
"""Function to change the labels of the outputted table. Additionally, the scale and reference
value can be changed.
Accepts the following keyword arguments:
effectmeasure
-changes the effect measure label
conf_int
-changes the confidence interval label
scale
-changes the scale to either log or linear
center
-changes the reference line for the center
"""
if 'effectmeasure' in kwargs:
self.em = kwargs['effectmeasure']
if 'ci' in kwargs:
self.ci = kwargs['conf_int']
if 'scale' in kwargs:
self.scale = kwargs['scale']
if 'center' in kwargs:
self.center = kwargs['center']
def colors(self, **kwargs):
"""Function to change colors and shapes.
Accepts the following keyword arguments:
errorbarcolor
-changes the error bar colors
linecolor
-changes the color of the reference line
pointcolor
-changes the color of the points
pointshape
-changes the shape of points
"""
if 'errorbarcolor' in kwargs:
self.errc = kwargs['errorbarcolor']
if 'pointshape' in kwargs:
self.shape = kwargs['pointshape']
if 'linecolor' in kwargs:
self.linec = kwargs['linecolor']
if 'pointcolor' in kwargs:
self.pc = kwargs['pointcolor']
def plot(self, figsize=(3, 3), t_adjuster=0.01, decimal=3, size=3, max_value=None, min_value=None):
"""Generates the matplotlib effect measure plot with the default or specified attributes.
The following variables can be used to further fine-tune the effect measure plot
t_adjuster
-used to refine alignment of the table with the line graphs. When generate plots, trial
and error for this value are usually necessary. I haven't come up with an algorithm to
determine this yet...
decimal
-number of decimal places to display in the table
size
-size of the plot to generate
max_value
-maximum value of x-axis scale. Default is None, which automatically determines max value
min_value
-minimum value of x-axis scale. Default is None, which automatically determines min value
"""
tval = []
ytick = []
for i in range(len(self.df)):
if (np.isnan(self.df['OR2'][i]) == False):
if ((isinstance(self.df['OR'][i], float)) & (isinstance(self.df['LCL'][i], float)) & (
isinstance(self.df['UCL'][i], float))):
tval.append([round(self.df['OR2'][i], decimal), (
'(' + str(round(self.df['LCL'][i], decimal)) + ', ' + str(
round(self.df['UCL'][i], decimal)) + ')')])
else:
tval.append(
[self.df['OR'][i], ('(' + str(self.df['LCL'][i]) + ', ' + str(self.df['UCL'][i]) + ')')])
ytick.append(i)
else:
tval.append([' ', ' '])
ytick.append(i)
if max_value is None:
if pd.to_numeric(self.df['UCL']).max() < 1:
maxi = round(((pd.to_numeric(self.df['UCL'])).max() + 0.05),
2) # setting x-axis maximum for UCL less than 1
if (pd.to_numeric(self.df['UCL']).max() < 9) and ( | pd.to_numeric(self.df['UCL']) | pandas.to_numeric |
import datetime
from typing import List
import numpy as np
import pandas as pd
import scipy.stats as sts
from sklearn.preprocessing import OneHotEncoder, LabelEncoder
from distributions import ContinousDistributionEstimator
from distributions import DiscreteDistributionEstimator
def array_drop_nan(y, axis: int = 0) -> np.ndarray:
"""
Returns numpy-ndarray without NaN.
Parameters
----------
y : array_like or pandas DataFrame/Series with nan-values
The array which contains or does not contain nan-values.
axis : integer (default = 0)
This parameter determines by which axis nan-values are dropped.
If axis = 0 than columns which contain nan-values will be dropped.
If axis = 1 rows which contain nan-values will be dropped,
Returns
-------
y : ndarray with float32 dtype
An array of the same shape as `y` without nan-values.
If all columns/rows contain nan-values then an
empty array will be returned.
Examples
--------
>>> array_drop_nan(np.array([1,2,3,4, None]), axis=0)
array([1., 2., 3., 4.], dtype=float32)
>>> array_drop_nan(np.array([[1,2, None], [4,5,6]]), axis=1)
array([[4., 5., 6.]], dtype=float32)
>>> array_drop_nan(np.array([[1,2, None], [4,5,6]]), axis=0)
array([[1., 2.],
[4., 5.]], dtype=float32)
"""
y = np.array(y, dtype=np.float32)
if len(y.shape) == 1:
y = y[~np.isnan(y)]
elif axis == 0:
y = y[:, ~np.any(np.isnan(y), axis=0)]
elif axis == 1:
y = y[~np.any(np.isnan(y), axis=1), :]
return y
def array_fill_nan(y, fill, **_) -> np.ndarray:
y = np.array(y, dtype=np.float32)
if len(y.shape) == 1:
y = y.reshape(-1, 1)
if callable(fill):
for j in range(y.shape[1]):
value = fill(array_drop_nan(y[:, j]))
y[:, j] = array_fill_nan(y[:, j], value).ravel()
else:
y[np.isnan(y)] = fill
return y
def share_missing(y, axis: int = 0):
if axis not in (0, 1):
raise ValueError('Axis must be in interval (0, 1) share_missing')
y = np.array(y, dtype=np.float32)
return np.isnan(y).sum(axis=axis) / y.shape[axis]
def timer(func):
"""Decorator for speed measurement of function
This simple decorator adds print of
spent time on execution of functions
with args and kwargs
Example:
@timer
def amount(a: float, b: float):
return a + b
------------
Return wrapper of function
"""
def wrapper(*args, **kwargs):
start = datetime.datetime.now()
result = func(*args, **kwargs)
stop = datetime.datetime.now()
print(f'Time on function is: {stop - start}')
return result
return wrapper
def mean_absolute_percentage_error(y_true: np.ndarray, y_fit: np.ndarray) -> float:
return np.mean(np.abs((y_true - y_fit) / y_true)) * 100
def sample_entropy(x, bins: int = 10):
"""Calculate sample entropy
using frequency distribution of data x
"""
return sts.entropy(sts.relfreq(x, numbins=bins)[0])
def correlation_tolerance(matrix, tol: float = 0.5, labels: list = None):
matrix = np.array(matrix, dtype=np.float32)
if matrix.shape[0] != matrix.shape[1]:
raise ValueError('Matrix must be semtric')
tol = -np.inf if tol is None else tol
upper_triu = np.triu_indices(matrix.shape[0])
labels = labels if labels is not None else upper_triu[1]
correlation = dict.fromkeys(labels)
for i, j in zip(upper_triu[0], upper_triu[1]):
if i == j:
continue
current_value = matrix[i, j]
if correlation[labels[i]] is None:
correlation[labels[i]] = dict()
if np.abs(current_value) >= tol:
correlation[labels[i]][labels[j]] = current_value
return correlation
def top_correlation(array, count: int = 10, labels: list = None):
array = np.array(array, dtype=np.float32)
labels = labels if labels is not None else np.arange(len(array))
indexies = np.argsort(array)
return array[indexies][-count:], labels[indexies]
def sort_dictionary_by_value(dictionary: dict):
return dict(sorted(dictionary.items(), key=lambda x: x[1], reverse=True))
def sample_data(x, sample_size=30):
uniq = set(x)
indeses = []
for i in uniq:
indeses.extend(np.random.choice(
list(np.where(x == i)[0]), int(sample_size / len(uniq))))
np.random.shuffle(indeses)
return indeses
def dataframe_col_values_by_func(df, to_replace, function: callable) -> pd.DataFrame:
data = np.array(df)
cols = df.columns
for col in range(data.shape[1]):
data_col = data[:, col]
value = function(data_col[data_col != to_replace].astype(float))
data_col[data_col == to_replace] = value
return pd.DataFrame(data, columns=cols)
def dataframe_label_encoder(dataframe: pd.DataFrame) -> tuple:
cols = dataframe.columns
encoder_function = LabelEncoder()
encoded_data = np.array(dataframe)
for i in range(encoded_data.shape[1]):
encoder_function.fit(encoded_data[:, i])
encoded_data[:, i] = encoder_function.transform(encoded_data[:, i])
return pd.DataFrame(encoded_data, columns=cols), encoder_function
def dataframe_onehot_encoder(dataframe: pd.DataFrame, sparse: bool = False) -> tuple:
cols = dataframe.columns
encoder_function = OneHotEncoder(sparse=sparse)
encoder = encoder_function.fit(dataframe)
encoder_cols = []
encoder_data = encoder.transform(dataframe)
for i in range(len(encoder.categories_)):
for j in encoder.categories_[i]:
encoder_cols.append(cols[i] + '.' + j)
return pd.DataFrame(encoder_data, columns=encoder_cols), encoder
def dataframe_drop_by_row_value(dataframe, check, return_index: bool = False):
cols = dataframe.columns
data = np.ravel(dataframe.values)
index = []
for i in range(len(data)):
if check != data[i]:
index.append(i)
to_ret = | pd.DataFrame(data[index], columns=cols) | pandas.DataFrame |
# Licensed to Modin Development Team under one or more contributor license agreements.
# See the NOTICE file distributed with this work for additional information regarding
# copyright ownership. The Modin Development Team licenses this file to you under the
# Apache License, Version 2.0 (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under
# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific language
# governing permissions and limitations under the License.
import pytest
import numpy as np
import pandas
from pandas.testing import assert_index_equal
import matplotlib
import modin.pandas as pd
import sys
from modin.pandas.test.utils import (
NROWS,
RAND_LOW,
RAND_HIGH,
df_equals,
arg_keys,
name_contains,
test_data,
test_data_values,
test_data_keys,
axis_keys,
axis_values,
int_arg_keys,
int_arg_values,
create_test_dfs,
eval_general,
generate_multiindex,
extra_test_parameters,
)
from modin.config import NPartitions
NPartitions.put(4)
# Force matplotlib to not use any Xwindows backend.
matplotlib.use("Agg")
def eval_setitem(md_df, pd_df, value, col=None, loc=None):
if loc is not None:
col = pd_df.columns[loc]
value_getter = value if callable(value) else (lambda *args, **kwargs: value)
eval_general(
md_df, pd_df, lambda df: df.__setitem__(col, value_getter(df)), __inplace__=True
)
@pytest.mark.parametrize(
"dates",
[
["2018-02-27 09:03:30", "2018-02-27 09:04:30"],
["2018-02-27 09:03:00", "2018-02-27 09:05:00"],
],
)
@pytest.mark.parametrize("subset", ["a", "b", ["a", "b"], None])
def test_asof_with_nan(dates, subset):
data = {"a": [10, 20, 30, 40, 50], "b": [None, None, None, None, 500]}
index = pd.DatetimeIndex(
[
"2018-02-27 09:01:00",
"2018-02-27 09:02:00",
"2018-02-27 09:03:00",
"2018-02-27 09:04:00",
"2018-02-27 09:05:00",
]
)
modin_where = pd.DatetimeIndex(dates)
pandas_where = pandas.DatetimeIndex(dates)
compare_asof(data, index, modin_where, pandas_where, subset)
@pytest.mark.parametrize(
"dates",
[
["2018-02-27 09:03:30", "2018-02-27 09:04:30"],
["2018-02-27 09:03:00", "2018-02-27 09:05:00"],
],
)
@pytest.mark.parametrize("subset", ["a", "b", ["a", "b"], None])
def test_asof_without_nan(dates, subset):
data = {"a": [10, 20, 30, 40, 50], "b": [70, 600, 30, -200, 500]}
index = pd.DatetimeIndex(
[
"2018-02-27 09:01:00",
"2018-02-27 09:02:00",
"2018-02-27 09:03:00",
"2018-02-27 09:04:00",
"2018-02-27 09:05:00",
]
)
modin_where = pd.DatetimeIndex(dates)
pandas_where = pandas.DatetimeIndex(dates)
compare_asof(data, index, modin_where, pandas_where, subset)
@pytest.mark.parametrize(
"lookup",
[
[60, 70, 90],
[60.5, 70.5, 100],
],
)
@pytest.mark.parametrize("subset", ["col2", "col1", ["col1", "col2"], None])
def test_asof_large(lookup, subset):
data = test_data["float_nan_data"]
index = list(range(NROWS))
modin_where = pd.Index(lookup)
pandas_where = pandas.Index(lookup)
compare_asof(data, index, modin_where, pandas_where, subset)
def compare_asof(
data, index, modin_where: pd.Index, pandas_where: pandas.Index, subset
):
modin_df = pd.DataFrame(data, index=index)
pandas_df = pandas.DataFrame(data, index=index)
df_equals(
modin_df.asof(modin_where, subset=subset),
pandas_df.asof(pandas_where, subset=subset),
)
df_equals(
modin_df.asof(modin_where.values, subset=subset),
pandas_df.asof(pandas_where.values, subset=subset),
)
df_equals(
modin_df.asof(list(modin_where.values), subset=subset),
pandas_df.asof(list(pandas_where.values), subset=subset),
)
df_equals(
modin_df.asof(modin_where.values[0], subset=subset),
pandas_df.asof(pandas_where.values[0], subset=subset),
)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_first_valid_index(data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
assert modin_df.first_valid_index() == (pandas_df.first_valid_index())
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("n", int_arg_values, ids=arg_keys("n", int_arg_keys))
def test_head(data, n):
# Test normal dataframe head
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.head(n), pandas_df.head(n))
df_equals(modin_df.head(len(modin_df) + 1), pandas_df.head(len(pandas_df) + 1))
# Test head when we call it from a QueryCompilerView
modin_result = modin_df.loc[:, ["col1", "col3", "col3"]].head(n)
pandas_result = pandas_df.loc[:, ["col1", "col3", "col3"]].head(n)
df_equals(modin_result, pandas_result)
@pytest.mark.skip(reason="Defaulting to Pandas")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_iat(data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data) # noqa F841
with pytest.raises(NotImplementedError):
modin_df.iat()
@pytest.mark.gpu
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_iloc(request, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
if not name_contains(request.node.name, ["empty_data"]):
# Scaler
np.testing.assert_equal(modin_df.iloc[0, 1], pandas_df.iloc[0, 1])
# Series
df_equals(modin_df.iloc[0], pandas_df.iloc[0])
df_equals(modin_df.iloc[1:, 0], pandas_df.iloc[1:, 0])
df_equals(modin_df.iloc[1:2, 0], pandas_df.iloc[1:2, 0])
# DataFrame
df_equals(modin_df.iloc[[1, 2]], pandas_df.iloc[[1, 2]])
# See issue #80
# df_equals(modin_df.iloc[[1, 2], [1, 0]], pandas_df.iloc[[1, 2], [1, 0]])
df_equals(modin_df.iloc[1:2, 0:2], pandas_df.iloc[1:2, 0:2])
# Issue #43
modin_df.iloc[0:3, :]
# Write Item
modin_df.iloc[[1, 2]] = 42
pandas_df.iloc[[1, 2]] = 42
df_equals(modin_df, pandas_df)
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_df.iloc[0] = modin_df.iloc[1]
pandas_df.iloc[0] = pandas_df.iloc[1]
df_equals(modin_df, pandas_df)
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_df.iloc[:, 0] = modin_df.iloc[:, 1]
pandas_df.iloc[:, 0] = pandas_df.iloc[:, 1]
df_equals(modin_df, pandas_df)
# From issue #1775
df_equals(
modin_df.iloc[lambda df: df.index.get_indexer_for(df.index[:5])],
pandas_df.iloc[lambda df: df.index.get_indexer_for(df.index[:5])],
)
else:
with pytest.raises(IndexError):
modin_df.iloc[0, 1]
@pytest.mark.gpu
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_index(data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.index, pandas_df.index)
modin_df_cp = modin_df.copy()
pandas_df_cp = pandas_df.copy()
modin_df_cp.index = [str(i) for i in modin_df_cp.index]
pandas_df_cp.index = [str(i) for i in pandas_df_cp.index]
df_equals(modin_df_cp.index, pandas_df_cp.index)
@pytest.mark.gpu
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_indexing_duplicate_axis(data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_df.index = pandas_df.index = [i // 3 for i in range(len(modin_df))]
assert any(modin_df.index.duplicated())
assert any(pandas_df.index.duplicated())
df_equals(modin_df.iloc[0], pandas_df.iloc[0])
df_equals(modin_df.loc[0], pandas_df.loc[0])
df_equals(modin_df.iloc[0, 0:4], pandas_df.iloc[0, 0:4])
df_equals(
modin_df.loc[0, modin_df.columns[0:4]],
pandas_df.loc[0, pandas_df.columns[0:4]],
)
@pytest.mark.gpu
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_keys(data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.keys(), pandas_df.keys())
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_loc(data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
key1 = modin_df.columns[0]
key2 = modin_df.columns[1]
# Scaler
df_equals(modin_df.loc[0, key1], pandas_df.loc[0, key1])
# Series
df_equals(modin_df.loc[0], pandas_df.loc[0])
df_equals(modin_df.loc[1:, key1], pandas_df.loc[1:, key1])
df_equals(modin_df.loc[1:2, key1], pandas_df.loc[1:2, key1])
# DataFrame
df_equals(modin_df.loc[[1, 2]], pandas_df.loc[[1, 2]])
# List-like of booleans
indices = [i % 3 == 0 for i in range(len(modin_df.index))]
columns = [i % 5 == 0 for i in range(len(modin_df.columns))]
modin_result = modin_df.loc[indices, columns]
pandas_result = pandas_df.loc[indices, columns]
df_equals(modin_result, pandas_result)
modin_result = modin_df.loc[:, columns]
pandas_result = pandas_df.loc[:, columns]
df_equals(modin_result, pandas_result)
modin_result = modin_df.loc[indices]
pandas_result = pandas_df.loc[indices]
df_equals(modin_result, pandas_result)
# See issue #80
# df_equals(modin_df.loc[[1, 2], ['col1']], pandas_df.loc[[1, 2], ['col1']])
df_equals(modin_df.loc[1:2, key1:key2], pandas_df.loc[1:2, key1:key2])
# From issue #421
df_equals(modin_df.loc[:, [key2, key1]], pandas_df.loc[:, [key2, key1]])
df_equals(modin_df.loc[[2, 1], :], pandas_df.loc[[2, 1], :])
# From issue #1023
key1 = modin_df.columns[0]
key2 = modin_df.columns[-2]
df_equals(modin_df.loc[:, key1:key2], pandas_df.loc[:, key1:key2])
# Write Item
modin_df_copy = modin_df.copy()
pandas_df_copy = pandas_df.copy()
modin_df_copy.loc[[1, 2]] = 42
pandas_df_copy.loc[[1, 2]] = 42
df_equals(modin_df_copy, pandas_df_copy)
# From issue #1775
df_equals(
modin_df.loc[lambda df: df.iloc[:, 0].isin(list(range(1000)))],
pandas_df.loc[lambda df: df.iloc[:, 0].isin(list(range(1000)))],
)
# From issue #1374
with pytest.raises(KeyError):
modin_df.loc["NO_EXIST"]
def test_loc_multi_index():
modin_df = pd.read_csv(
"modin/pandas/test/data/blah.csv", header=[0, 1, 2, 3], index_col=0
)
pandas_df = pandas.read_csv(
"modin/pandas/test/data/blah.csv", header=[0, 1, 2, 3], index_col=0
)
df_equals(modin_df.loc[1], pandas_df.loc[1])
df_equals(modin_df.loc[1, "Presidents"], pandas_df.loc[1, "Presidents"])
df_equals(
modin_df.loc[1, ("Presidents", "Pure mentions")],
pandas_df.loc[1, ("Presidents", "Pure mentions")],
)
assert (
modin_df.loc[1, ("Presidents", "Pure mentions", "IND", "all")]
== pandas_df.loc[1, ("Presidents", "Pure mentions", "IND", "all")]
)
df_equals(modin_df.loc[(1, 2), "Presidents"], pandas_df.loc[(1, 2), "Presidents"])
tuples = [
("bar", "one"),
("bar", "two"),
("bar", "three"),
("bar", "four"),
("baz", "one"),
("baz", "two"),
("baz", "three"),
("baz", "four"),
("foo", "one"),
("foo", "two"),
("foo", "three"),
("foo", "four"),
("qux", "one"),
("qux", "two"),
("qux", "three"),
("qux", "four"),
]
modin_index = pd.MultiIndex.from_tuples(tuples, names=["first", "second"])
pandas_index = pandas.MultiIndex.from_tuples(tuples, names=["first", "second"])
frame_data = np.random.randint(0, 100, size=(16, 100))
modin_df = pd.DataFrame(
frame_data,
index=modin_index,
columns=["col{}".format(i) for i in range(100)],
)
pandas_df = pandas.DataFrame(
frame_data,
index=pandas_index,
columns=["col{}".format(i) for i in range(100)],
)
df_equals(modin_df.loc["bar", "col1"], pandas_df.loc["bar", "col1"])
assert modin_df.loc[("bar", "one"), "col1"] == pandas_df.loc[("bar", "one"), "col1"]
df_equals(
modin_df.loc["bar", ("col1", "col2")],
pandas_df.loc["bar", ("col1", "col2")],
)
# From issue #1456
transposed_modin = modin_df.T
transposed_pandas = pandas_df.T
df_equals(
transposed_modin.loc[transposed_modin.index[:-2], :],
transposed_pandas.loc[transposed_pandas.index[:-2], :],
)
# From issue #1610
df_equals(modin_df.loc[modin_df.index], pandas_df.loc[pandas_df.index])
df_equals(modin_df.loc[modin_df.index[:7]], pandas_df.loc[pandas_df.index[:7]])
@pytest.mark.parametrize("index", [["row1", "row2", "row3"]])
@pytest.mark.parametrize("columns", [["col1", "col2"]])
def test_loc_assignment(index, columns):
md_df, pd_df = create_test_dfs(index=index, columns=columns)
for i, ind in enumerate(index):
for j, col in enumerate(columns):
value_to_assign = int(str(i) + str(j))
md_df.loc[ind][col] = value_to_assign
pd_df.loc[ind][col] = value_to_assign
df_equals(md_df, pd_df)
@pytest.fixture
def loc_iter_dfs():
columns = ["col1", "col2", "col3"]
index = ["row1", "row2", "row3"]
return create_test_dfs(
{col: ([idx] * len(index)) for idx, col in enumerate(columns)},
columns=columns,
index=index,
)
@pytest.mark.parametrize("reverse_order", [False, True])
@pytest.mark.parametrize("axis", [0, 1])
def test_loc_iter_assignment(loc_iter_dfs, reverse_order, axis):
if reverse_order and axis:
pytest.xfail(
"Due to internal sorting of lookup values assignment order is lost, see GH-#2552"
)
md_df, pd_df = loc_iter_dfs
select = [slice(None), slice(None)]
select[axis] = sorted(pd_df.axes[axis][:-1], reverse=reverse_order)
select = tuple(select)
pd_df.loc[select] = pd_df.loc[select] + pd_df.loc[select]
md_df.loc[select] = md_df.loc[select] + md_df.loc[select]
df_equals(md_df, pd_df)
@pytest.mark.parametrize("reverse_order", [False, True])
@pytest.mark.parametrize("axis", [0, 1])
def test_loc_order(loc_iter_dfs, reverse_order, axis):
md_df, pd_df = loc_iter_dfs
select = [slice(None), slice(None)]
select[axis] = sorted(pd_df.axes[axis][:-1], reverse=reverse_order)
select = tuple(select)
df_equals(pd_df.loc[select], md_df.loc[select])
@pytest.mark.gpu
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_loc_nested_assignment(data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
key1 = modin_df.columns[0]
key2 = modin_df.columns[1]
modin_df[key1].loc[0] = 500
pandas_df[key1].loc[0] = 500
df_equals(modin_df, pandas_df)
modin_df[key2].loc[0] = None
pandas_df[key2].loc[0] = None
df_equals(modin_df, pandas_df)
def test_iloc_assignment():
modin_df = pd.DataFrame(index=["row1", "row2", "row3"], columns=["col1", "col2"])
pandas_df = pandas.DataFrame(
index=["row1", "row2", "row3"], columns=["col1", "col2"]
)
modin_df.iloc[0]["col1"] = 11
modin_df.iloc[1]["col1"] = 21
modin_df.iloc[2]["col1"] = 31
modin_df.iloc[0]["col2"] = 12
modin_df.iloc[1]["col2"] = 22
modin_df.iloc[2]["col2"] = 32
pandas_df.iloc[0]["col1"] = 11
pandas_df.iloc[1]["col1"] = 21
pandas_df.iloc[2]["col1"] = 31
pandas_df.iloc[0]["col2"] = 12
pandas_df.iloc[1]["col2"] = 22
pandas_df.iloc[2]["col2"] = 32
df_equals(modin_df, pandas_df)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_iloc_nested_assignment(data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
key1 = modin_df.columns[0]
key2 = modin_df.columns[1]
modin_df[key1].iloc[0] = 500
pandas_df[key1].iloc[0] = 500
df_equals(modin_df, pandas_df)
modin_df[key2].iloc[0] = None
pandas_df[key2].iloc[0] = None
df_equals(modin_df, pandas_df)
def test_loc_series():
md_df, pd_df = create_test_dfs({"a": [1, 2], "b": [3, 4]})
pd_df.loc[pd_df["a"] > 1, "b"] = np.log(pd_df["b"])
md_df.loc[md_df["a"] > 1, "b"] = np.log(md_df["b"])
df_equals(pd_df, md_df)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_pop(request, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
if "empty_data" not in request.node.name:
key = modin_df.columns[0]
temp_modin_df = modin_df.copy()
temp_pandas_df = pandas_df.copy()
modin_popped = temp_modin_df.pop(key)
pandas_popped = temp_pandas_df.pop(key)
df_equals(modin_popped, pandas_popped)
df_equals(temp_modin_df, temp_pandas_df)
def test_reindex():
frame_data = {
"col1": [0, 1, 2, 3],
"col2": [4, 5, 6, 7],
"col3": [8, 9, 10, 11],
"col4": [12, 13, 14, 15],
"col5": [0, 0, 0, 0],
}
pandas_df = pandas.DataFrame(frame_data)
modin_df = pd.DataFrame(frame_data)
df_equals(modin_df.reindex([0, 3, 2, 1]), pandas_df.reindex([0, 3, 2, 1]))
df_equals(modin_df.reindex([0, 6, 2]), pandas_df.reindex([0, 6, 2]))
df_equals(
modin_df.reindex(["col1", "col3", "col4", "col2"], axis=1),
pandas_df.reindex(["col1", "col3", "col4", "col2"], axis=1),
)
df_equals(
modin_df.reindex(["col1", "col7", "col4", "col8"], axis=1),
pandas_df.reindex(["col1", "col7", "col4", "col8"], axis=1),
)
df_equals(
modin_df.reindex(index=[0, 1, 5], columns=["col1", "col7", "col4", "col8"]),
pandas_df.reindex(index=[0, 1, 5], columns=["col1", "col7", "col4", "col8"]),
)
df_equals(
modin_df.T.reindex(["col1", "col7", "col4", "col8"], axis=0),
pandas_df.T.reindex(["col1", "col7", "col4", "col8"], axis=0),
)
def test_reindex_like():
df1 = pd.DataFrame(
[
[24.3, 75.7, "high"],
[31, 87.8, "high"],
[22, 71.6, "medium"],
[35, 95, "medium"],
],
columns=["temp_celsius", "temp_fahrenheit", "windspeed"],
index=pd.date_range(start="2014-02-12", end="2014-02-15", freq="D"),
)
df2 = pd.DataFrame(
[[28, "low"], [30, "low"], [35.1, "medium"]],
columns=["temp_celsius", "windspeed"],
index=pd.DatetimeIndex(["2014-02-12", "2014-02-13", "2014-02-15"]),
)
with pytest.warns(UserWarning):
df2.reindex_like(df1)
def test_rename_sanity():
source_df = pandas.DataFrame(test_data["int_data"])[
["col1", "index", "col3", "col4"]
]
mapping = {"col1": "a", "index": "b", "col3": "c", "col4": "d"}
modin_df = pd.DataFrame(source_df)
df_equals(modin_df.rename(columns=mapping), source_df.rename(columns=mapping))
renamed2 = source_df.rename(columns=str.lower)
df_equals(modin_df.rename(columns=str.lower), renamed2)
modin_df = pd.DataFrame(renamed2)
df_equals(modin_df.rename(columns=str.upper), renamed2.rename(columns=str.upper))
# index
data = {"A": {"foo": 0, "bar": 1}}
# gets sorted alphabetical
df = pandas.DataFrame(data)
modin_df = pd.DataFrame(data)
assert_index_equal(
modin_df.rename(index={"foo": "bar", "bar": "foo"}).index,
df.rename(index={"foo": "bar", "bar": "foo"}).index,
)
assert_index_equal(
modin_df.rename(index=str.upper).index, df.rename(index=str.upper).index
)
# Using the `mapper` functionality with `axis`
assert_index_equal(
modin_df.rename(str.upper, axis=0).index, df.rename(str.upper, axis=0).index
)
assert_index_equal(
modin_df.rename(str.upper, axis=1).columns,
df.rename(str.upper, axis=1).columns,
)
# have to pass something
with pytest.raises(TypeError):
modin_df.rename()
# partial columns
renamed = source_df.rename(columns={"col3": "foo", "col4": "bar"})
modin_df = pd.DataFrame(source_df)
assert_index_equal(
modin_df.rename(columns={"col3": "foo", "col4": "bar"}).index,
source_df.rename(columns={"col3": "foo", "col4": "bar"}).index,
)
# other axis
renamed = source_df.T.rename(index={"col3": "foo", "col4": "bar"})
assert_index_equal(
source_df.T.rename(index={"col3": "foo", "col4": "bar"}).index,
modin_df.T.rename(index={"col3": "foo", "col4": "bar"}).index,
)
# index with name
index = pandas.Index(["foo", "bar"], name="name")
renamer = pandas.DataFrame(data, index=index)
modin_df = pd.DataFrame(data, index=index)
renamed = renamer.rename(index={"foo": "bar", "bar": "foo"})
modin_renamed = modin_df.rename(index={"foo": "bar", "bar": "foo"})
assert_index_equal(renamed.index, modin_renamed.index)
assert renamed.index.name == modin_renamed.index.name
def test_rename_multiindex():
tuples_index = [("foo1", "bar1"), ("foo2", "bar2")]
tuples_columns = [("fizz1", "buzz1"), ("fizz2", "buzz2")]
index = pandas.MultiIndex.from_tuples(tuples_index, names=["foo", "bar"])
columns = pandas.MultiIndex.from_tuples(tuples_columns, names=["fizz", "buzz"])
frame_data = [(0, 0), (1, 1)]
df = pandas.DataFrame(frame_data, index=index, columns=columns)
modin_df = pd.DataFrame(frame_data, index=index, columns=columns)
#
# without specifying level -> accross all levels
renamed = df.rename(
index={"foo1": "foo3", "bar2": "bar3"},
columns={"fizz1": "fizz3", "buzz2": "buzz3"},
)
modin_renamed = modin_df.rename(
index={"foo1": "foo3", "bar2": "bar3"},
columns={"fizz1": "fizz3", "buzz2": "buzz3"},
)
assert_index_equal(renamed.index, modin_renamed.index)
renamed = df.rename(
index={"foo1": "foo3", "bar2": "bar3"},
columns={"fizz1": "fizz3", "buzz2": "buzz3"},
)
assert_index_equal(renamed.columns, modin_renamed.columns)
assert renamed.index.names == modin_renamed.index.names
assert renamed.columns.names == modin_renamed.columns.names
#
# with specifying a level
# dict
renamed = df.rename(columns={"fizz1": "fizz3", "buzz2": "buzz3"}, level=0)
modin_renamed = modin_df.rename(
columns={"fizz1": "fizz3", "buzz2": "buzz3"}, level=0
)
assert_index_equal(renamed.columns, modin_renamed.columns)
renamed = df.rename(columns={"fizz1": "fizz3", "buzz2": "buzz3"}, level="fizz")
modin_renamed = modin_df.rename(
columns={"fizz1": "fizz3", "buzz2": "buzz3"}, level="fizz"
)
assert_index_equal(renamed.columns, modin_renamed.columns)
renamed = df.rename(columns={"fizz1": "fizz3", "buzz2": "buzz3"}, level=1)
modin_renamed = modin_df.rename(
columns={"fizz1": "fizz3", "buzz2": "buzz3"}, level=1
)
assert_index_equal(renamed.columns, modin_renamed.columns)
renamed = df.rename(columns={"fizz1": "fizz3", "buzz2": "buzz3"}, level="buzz")
modin_renamed = modin_df.rename(
columns={"fizz1": "fizz3", "buzz2": "buzz3"}, level="buzz"
)
assert_index_equal(renamed.columns, modin_renamed.columns)
# function
func = str.upper
renamed = df.rename(columns=func, level=0)
modin_renamed = modin_df.rename(columns=func, level=0)
assert_index_equal(renamed.columns, modin_renamed.columns)
renamed = df.rename(columns=func, level="fizz")
modin_renamed = modin_df.rename(columns=func, level="fizz")
assert_index_equal(renamed.columns, modin_renamed.columns)
renamed = df.rename(columns=func, level=1)
modin_renamed = modin_df.rename(columns=func, level=1)
assert_index_equal(renamed.columns, modin_renamed.columns)
renamed = df.rename(columns=func, level="buzz")
modin_renamed = modin_df.rename(columns=func, level="buzz")
assert_index_equal(renamed.columns, modin_renamed.columns)
# index
renamed = df.rename(index={"foo1": "foo3", "bar2": "bar3"}, level=0)
modin_renamed = modin_df.rename(index={"foo1": "foo3", "bar2": "bar3"}, level=0)
assert_index_equal(modin_renamed.index, renamed.index)
@pytest.mark.xfail(reason="Pandas does not pass this test")
def test_rename_nocopy():
source_df = pandas.DataFrame(test_data["int_data"])[
["col1", "index", "col3", "col4"]
]
modin_df = pd.DataFrame(source_df)
modin_renamed = modin_df.rename(columns={"col3": "foo"}, copy=False)
modin_renamed["foo"] = 1
assert (modin_df["col3"] == 1).all()
def test_rename_inplace():
source_df = pandas.DataFrame(test_data["int_data"])[
["col1", "index", "col3", "col4"]
]
modin_df = pd.DataFrame(source_df)
df_equals(
modin_df.rename(columns={"col3": "foo"}),
source_df.rename(columns={"col3": "foo"}),
)
frame = source_df.copy()
modin_frame = modin_df.copy()
frame.rename(columns={"col3": "foo"}, inplace=True)
modin_frame.rename(columns={"col3": "foo"}, inplace=True)
df_equals(modin_frame, frame)
def test_rename_bug():
# rename set ref_locs, and set_index was not resetting
frame_data = {0: ["foo", "bar"], 1: ["bah", "bas"], 2: [1, 2]}
df = pandas.DataFrame(frame_data)
modin_df = pd.DataFrame(frame_data)
df = df.rename(columns={0: "a"})
df = df.rename(columns={1: "b"})
df = df.set_index(["a", "b"])
df.columns = ["2001-01-01"]
modin_df = modin_df.rename(columns={0: "a"})
modin_df = modin_df.rename(columns={1: "b"})
modin_df = modin_df.set_index(["a", "b"])
modin_df.columns = ["2001-01-01"]
df_equals(modin_df, df)
def test_rename_axis():
data = {"num_legs": [4, 4, 2], "num_arms": [0, 0, 2]}
index = ["dog", "cat", "monkey"]
modin_df = pd.DataFrame(data, index)
pandas_df = pandas.DataFrame(data, index)
df_equals(modin_df.rename_axis("animal"), pandas_df.rename_axis("animal"))
df_equals(
modin_df.rename_axis("limbs", axis="columns"),
pandas_df.rename_axis("limbs", axis="columns"),
)
modin_df.rename_axis("limbs", axis="columns", inplace=True)
pandas_df.rename_axis("limbs", axis="columns", inplace=True)
df_equals(modin_df, pandas_df)
new_index = pd.MultiIndex.from_product(
[["mammal"], ["dog", "cat", "monkey"]], names=["type", "name"]
)
modin_df.index = new_index
pandas_df.index = new_index
df_equals(
modin_df.rename_axis(index={"type": "class"}),
pandas_df.rename_axis(index={"type": "class"}),
)
df_equals(
modin_df.rename_axis(columns=str.upper),
pandas_df.rename_axis(columns=str.upper),
)
df_equals(
modin_df.rename_axis(columns=[str.upper(o) for o in modin_df.columns.names]),
pandas_df.rename_axis(columns=[str.upper(o) for o in pandas_df.columns.names]),
)
with pytest.raises(ValueError):
df_equals(
modin_df.rename_axis(str.upper, axis=1),
pandas_df.rename_axis(str.upper, axis=1),
)
def test_rename_axis_inplace():
test_frame = pandas.DataFrame(test_data["int_data"])
modin_df = pd.DataFrame(test_frame)
result = test_frame.copy()
modin_result = modin_df.copy()
no_return = result.rename_axis("foo", inplace=True)
modin_no_return = modin_result.rename_axis("foo", inplace=True)
assert no_return is modin_no_return
df_equals(modin_result, result)
result = test_frame.copy()
modin_result = modin_df.copy()
no_return = result.rename_axis("bar", axis=1, inplace=True)
modin_no_return = modin_result.rename_axis("bar", axis=1, inplace=True)
assert no_return is modin_no_return
df_equals(modin_result, result)
def test_reorder_levels():
data = np.random.randint(1, 100, 12)
modin_df = pd.DataFrame(
data,
index=pd.MultiIndex.from_tuples(
[
(num, letter, color)
for num in range(1, 3)
for letter in ["a", "b", "c"]
for color in ["Red", "Green"]
],
names=["Number", "Letter", "Color"],
),
)
pandas_df = pandas.DataFrame(
data,
index=pandas.MultiIndex.from_tuples(
[
(num, letter, color)
for num in range(1, 3)
for letter in ["a", "b", "c"]
for color in ["Red", "Green"]
],
names=["Number", "Letter", "Color"],
),
)
df_equals(
modin_df.reorder_levels(["Letter", "Color", "Number"]),
pandas_df.reorder_levels(["Letter", "Color", "Number"]),
)
def test_reindex_multiindex():
data1, data2 = np.random.randint(1, 20, (5, 5)), np.random.randint(10, 25, 6)
index = np.array(["AUD", "BRL", "CAD", "EUR", "INR"])
modin_midx = pd.MultiIndex.from_product(
[["Bank_1", "Bank_2"], ["AUD", "CAD", "EUR"]], names=["Bank", "Curency"]
)
pandas_midx = pandas.MultiIndex.from_product(
[["Bank_1", "Bank_2"], ["AUD", "CAD", "EUR"]], names=["Bank", "Curency"]
)
modin_df1, modin_df2 = (
pd.DataFrame(data=data1, index=index, columns=index),
pd.DataFrame(data2, modin_midx),
)
pandas_df1, pandas_df2 = (
pandas.DataFrame(data=data1, index=index, columns=index),
pandas.DataFrame(data2, pandas_midx),
)
modin_df2.columns, pandas_df2.columns = ["Notional"], ["Notional"]
md_midx = pd.MultiIndex.from_product([modin_df2.index.levels[0], modin_df1.index])
pd_midx = pandas.MultiIndex.from_product(
[pandas_df2.index.levels[0], pandas_df1.index]
)
# reindex without axis, index, or columns
modin_result = modin_df1.reindex(md_midx, fill_value=0)
pandas_result = pandas_df1.reindex(pd_midx, fill_value=0)
df_equals(modin_result, pandas_result)
# reindex with only axis
modin_result = modin_df1.reindex(md_midx, fill_value=0, axis=0)
pandas_result = pandas_df1.reindex(pd_midx, fill_value=0, axis=0)
df_equals(modin_result, pandas_result)
# reindex with axis and level
modin_result = modin_df1.reindex(md_midx, fill_value=0, axis=0, level=0)
pandas_result = pandas_df1.reindex(pd_midx, fill_value=0, axis=0, level=0)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_reset_index(data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_result = modin_df.reset_index(inplace=False)
pandas_result = pandas_df.reset_index(inplace=False)
df_equals(modin_result, pandas_result)
modin_df_cp = modin_df.copy()
pd_df_cp = pandas_df.copy()
modin_df_cp.reset_index(inplace=True)
pd_df_cp.reset_index(inplace=True)
df_equals(modin_df_cp, pd_df_cp)
@pytest.mark.parametrize(
"data",
[
pytest.param(
test_data["int_data"],
marks=pytest.mark.skipif(not extra_test_parameters, reason="extra"),
),
test_data["float_nan_data"],
],
ids=["int_data", "float_nan_data"],
)
@pytest.mark.parametrize("nlevels", [3])
@pytest.mark.parametrize("columns_multiindex", [True, False])
@pytest.mark.parametrize(
"level",
[
"no_level",
None,
0,
1,
2,
[2, 0],
[2, 1],
[1, 0],
pytest.param(
[2, 1, 2],
marks=pytest.mark.skipif(not extra_test_parameters, reason="extra"),
),
pytest.param(
[0, 0, 0, 0],
marks=pytest.mark.skipif(not extra_test_parameters, reason="extra"),
),
pytest.param(
["level_name_1"],
marks=pytest.mark.skipif(not extra_test_parameters, reason="extra"),
),
pytest.param(
["level_name_2", "level_name_1"],
marks=pytest.mark.skipif(not extra_test_parameters, reason="extra"),
),
pytest.param(
[2, "level_name_0"],
marks=pytest.mark.skipif(not extra_test_parameters, reason="extra"),
),
],
)
@pytest.mark.parametrize("col_level", ["no_col_level", 0, 1, 2])
@pytest.mark.parametrize("col_fill", ["no_col_fill", None, 0, "new"])
@pytest.mark.parametrize("drop", [False])
@pytest.mark.parametrize(
"multiindex_levels_names_max_levels",
[
0,
1,
2,
pytest.param(
3, marks=pytest.mark.skipif(not extra_test_parameters, reason="extra")
),
pytest.param(
4, marks=pytest.mark.skipif(not extra_test_parameters, reason="extra")
),
],
)
@pytest.mark.parametrize(
"none_in_index_names",
[
pytest.param(
False,
marks=pytest.mark.skipif(not extra_test_parameters, reason="extra"),
),
True,
"mixed_1st_None",
pytest.param(
"mixed_2nd_None",
marks=pytest.mark.skipif(not extra_test_parameters, reason="extra"),
),
],
)
def test_reset_index_with_multi_index_no_drop(
data,
nlevels,
columns_multiindex,
level,
col_level,
col_fill,
drop,
multiindex_levels_names_max_levels,
none_in_index_names,
):
data_rows = len(data[list(data.keys())[0]])
index = generate_multiindex(data_rows, nlevels=nlevels)
data_columns = len(data.keys())
columns = (
generate_multiindex(data_columns, nlevels=nlevels)
if columns_multiindex
else pandas.RangeIndex(0, data_columns)
)
# Replace original data columns with generated
data = {columns[ind]: data[key] for ind, key in enumerate(data)}
index.names = (
[f"level_{i}" for i in range(index.nlevels)]
if multiindex_levels_names_max_levels == 0
else [
tuple(
[
f"level_{i}_name_{j}"
for j in range(
0,
max(multiindex_levels_names_max_levels + 1 - index.nlevels, 0)
+ i,
)
]
)
if max(multiindex_levels_names_max_levels + 1 - index.nlevels, 0) + i > 0
else f"level_{i}"
for i in range(index.nlevels)
]
)
if none_in_index_names is True:
index.names = [None] * len(index.names)
elif none_in_index_names:
names_list = list(index.names)
start_index = 0 if none_in_index_names == "mixed_1st_None" else 1
names_list[start_index::2] = [None] * len(names_list[start_index::2])
index.names = names_list
modin_df = pd.DataFrame(data, index=index, columns=columns)
pandas_df = pandas.DataFrame(data, index=index, columns=columns)
if isinstance(level, list):
level = [
index.names[int(x[len("level_name_") :])]
if isinstance(x, str) and x.startswith("level_name_")
else x
for x in level
]
kwargs = {"drop": drop}
if level != "no_level":
kwargs["level"] = level
if col_level != "no_col_level":
kwargs["col_level"] = col_level
if col_fill != "no_col_fill":
kwargs["col_fill"] = col_fill
eval_general(modin_df, pandas_df, lambda df: df.reset_index(**kwargs))
@pytest.mark.parametrize(
"data",
[
pytest.param(
test_data["int_data"],
marks=pytest.mark.skipif(not extra_test_parameters, reason="extra"),
),
test_data["float_nan_data"],
],
ids=["int_data", "float_nan_data"],
)
@pytest.mark.parametrize("nlevels", [3])
@pytest.mark.parametrize(
"level",
[
"no_level",
None,
0,
1,
2,
[2, 0],
[2, 1],
[1, 0],
pytest.param(
[2, 1, 2],
marks=pytest.mark.skipif(not extra_test_parameters, reason="extra"),
),
pytest.param(
[0, 0, 0, 0],
marks=pytest.mark.skipif(not extra_test_parameters, reason="extra"),
),
pytest.param(
["level_name_1"],
marks=pytest.mark.skipif(not extra_test_parameters, reason="extra"),
),
pytest.param(
["level_name_2", "level_name_1"],
marks=pytest.mark.skipif(not extra_test_parameters, reason="extra"),
),
pytest.param(
[2, "level_name_0"],
marks=pytest.mark.skipif(not extra_test_parameters, reason="extra"),
),
],
)
@pytest.mark.parametrize(
"multiindex_levels_names_max_levels",
[
0,
1,
2,
pytest.param(
3, marks=pytest.mark.skipif(not extra_test_parameters, reason="extra")
),
pytest.param(
4, marks=pytest.mark.skipif(not extra_test_parameters, reason="extra")
),
],
)
@pytest.mark.parametrize(
"none_in_index_names",
[
pytest.param(
False,
marks=pytest.mark.skipif(not extra_test_parameters, reason="extra"),
),
True,
"mixed_1st_None",
pytest.param(
"mixed_2nd_None",
marks=pytest.mark.skipif(not extra_test_parameters, reason="extra"),
),
],
)
def test_reset_index_with_multi_index_drop(
data, nlevels, level, multiindex_levels_names_max_levels, none_in_index_names
):
test_reset_index_with_multi_index_no_drop(
data,
nlevels,
True,
level,
"no_col_level",
"no_col_fill",
True,
multiindex_levels_names_max_levels,
none_in_index_names,
)
@pytest.mark.parametrize("index_levels_names_max_levels", [0, 1, 2])
def test_reset_index_with_named_index(index_levels_names_max_levels):
modin_df = pd.DataFrame(test_data_values[0])
pandas_df = pandas.DataFrame(test_data_values[0])
index_name = (
tuple([f"name_{j}" for j in range(0, index_levels_names_max_levels)])
if index_levels_names_max_levels > 0
else "NAME_OF_INDEX"
)
modin_df.index.name = pandas_df.index.name = index_name
df_equals(modin_df, pandas_df)
df_equals(modin_df.reset_index(drop=False), pandas_df.reset_index(drop=False))
modin_df.reset_index(drop=True, inplace=True)
pandas_df.reset_index(drop=True, inplace=True)
df_equals(modin_df, pandas_df)
modin_df = pd.DataFrame(test_data_values[0])
pandas_df = pandas.DataFrame(test_data_values[0])
modin_df.index.name = pandas_df.index.name = index_name
df_equals(modin_df.reset_index(drop=False), pandas_df.reset_index(drop=False))
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
def test_sample(data, axis):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
with pytest.raises(ValueError):
modin_df.sample(n=3, frac=0.4, axis=axis)
with pytest.raises(KeyError):
modin_df.sample(frac=0.5, weights="CoLuMn_No_ExIsT", axis=0)
with pytest.raises(ValueError):
modin_df.sample(frac=0.5, weights=modin_df.columns[0], axis=1)
with pytest.raises(ValueError):
modin_df.sample(
frac=0.5, weights=[0.5 for _ in range(len(modin_df.index[:-1]))], axis=0
)
with pytest.raises(ValueError):
modin_df.sample(
frac=0.5,
weights=[0.5 for _ in range(len(modin_df.columns[:-1]))],
axis=1,
)
with pytest.raises(ValueError):
modin_df.sample(n=-3, axis=axis)
with pytest.raises(ValueError):
modin_df.sample(frac=0.2, weights=pandas.Series(), axis=axis)
if isinstance(axis, str):
num_axis = pandas.DataFrame()._get_axis_number(axis)
else:
num_axis = axis
# weights that sum to 1
sums = sum(i % 2 for i in range(len(modin_df.axes[num_axis])))
weights = [i % 2 / sums for i in range(len(modin_df.axes[num_axis]))]
modin_result = modin_df.sample(
frac=0.5, random_state=42, weights=weights, axis=axis
)
pandas_result = pandas_df.sample(
frac=0.5, random_state=42, weights=weights, axis=axis
)
df_equals(modin_result, pandas_result)
# weights that don't sum to 1
weights = [i % 2 for i in range(len(modin_df.axes[num_axis]))]
modin_result = modin_df.sample(
frac=0.5, random_state=42, weights=weights, axis=axis
)
pandas_result = pandas_df.sample(
frac=0.5, random_state=42, weights=weights, axis=axis
)
df_equals(modin_result, pandas_result)
modin_result = modin_df.sample(n=0, axis=axis)
pandas_result = pandas_df.sample(n=0, axis=axis)
df_equals(modin_result, pandas_result)
modin_result = modin_df.sample(frac=0.5, random_state=42, axis=axis)
pandas_result = pandas_df.sample(frac=0.5, random_state=42, axis=axis)
df_equals(modin_result, pandas_result)
modin_result = modin_df.sample(n=2, random_state=42, axis=axis)
pandas_result = pandas_df.sample(n=2, random_state=42, axis=axis)
df_equals(modin_result, pandas_result)
# issue #1692, numpy RandomState object
# We must create a new random state for each iteration because the values that
# are selected will be impacted if the object has already been used.
random_state = np.random.RandomState(42)
modin_result = modin_df.sample(frac=0.5, random_state=random_state, axis=axis)
random_state = np.random.RandomState(42)
pandas_result = pandas_df.sample(frac=0.5, random_state=random_state, axis=axis)
df_equals(modin_result, pandas_result)
def test_select_dtypes():
frame_data = {
"test1": list("abc"),
"test2": np.arange(3, 6).astype("u1"),
"test3": np.arange(8.0, 11.0, dtype="float64"),
"test4": [True, False, True],
"test5": pandas.date_range("now", periods=3).values,
"test6": list(range(5, 8)),
}
df = pandas.DataFrame(frame_data)
rd = pd.DataFrame(frame_data)
include = np.float, "integer"
exclude = (np.bool_,)
r = rd.select_dtypes(include=include, exclude=exclude)
e = df[["test2", "test3", "test6"]]
df_equals(r, e)
r = rd.select_dtypes(include=np.bool_)
e = df[["test4"]]
df_equals(r, e)
r = rd.select_dtypes(exclude=np.bool_)
e = df[["test1", "test2", "test3", "test5", "test6"]]
df_equals(r, e)
try:
pd.DataFrame().select_dtypes()
assert False
except ValueError:
assert True
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("n", int_arg_values, ids=arg_keys("n", int_arg_keys))
def test_tail(data, n):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.tail(n), pandas_df.tail(n))
df_equals(modin_df.tail(len(modin_df)), pandas_df.tail(len(pandas_df)))
def test_xs():
d = {
"num_legs": [4, 4, 2, 2],
"num_wings": [0, 0, 2, 2],
"class": ["mammal", "mammal", "mammal", "bird"],
"animal": ["cat", "dog", "bat", "penguin"],
"locomotion": ["walks", "walks", "flies", "walks"],
}
df = pd.DataFrame(data=d)
df = df.set_index(["class", "animal", "locomotion"])
with pytest.warns(UserWarning):
df.xs("mammal")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___getitem__(data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
key = modin_df.columns[0]
modin_col = modin_df.__getitem__(key)
assert isinstance(modin_col, pd.Series)
pd_col = pandas_df[key]
df_equals(pd_col, modin_col)
slices = [
(None, -1),
(-1, None),
(1, 2),
(1, None),
(None, 1),
(1, -1),
(-3, -1),
(1, -1, 2),
]
# slice test
for slice_param in slices:
s = slice(*slice_param)
df_equals(modin_df[s], pandas_df[s])
# Test empty
df_equals(pd.DataFrame([])[:10], pandas.DataFrame([])[:10])
def test_getitem_empty_mask():
# modin-project/modin#517
modin_frames = []
pandas_frames = []
data1 = np.random.randint(0, 100, size=(100, 4))
mdf1 = pd.DataFrame(data1, columns=list("ABCD"))
pdf1 = pandas.DataFrame(data1, columns=list("ABCD"))
modin_frames.append(mdf1)
pandas_frames.append(pdf1)
data2 = np.random.randint(0, 100, size=(100, 4))
mdf2 = pd.DataFrame(data2, columns=list("ABCD"))
pdf2 = pandas.DataFrame(data2, columns=list("ABCD"))
modin_frames.append(mdf2)
pandas_frames.append(pdf2)
data3 = np.random.randint(0, 100, size=(100, 4))
mdf3 = pd.DataFrame(data3, columns=list("ABCD"))
pdf3 = pandas.DataFrame(data3, columns=list("ABCD"))
modin_frames.append(mdf3)
pandas_frames.append(pdf3)
modin_data = pd.concat(modin_frames)
pandas_data = pandas.concat(pandas_frames)
df_equals(
modin_data[[False for _ in modin_data.index]],
pandas_data[[False for _ in modin_data.index]],
)
def test_getitem_datetime_slice():
data = {"data": range(1000)}
index = pd.date_range("2017/1/4", periods=1000)
modin_df = pd.DataFrame(data=data, index=index)
pandas_df = pandas.DataFrame(data=data, index=index)
s = slice("2017-01-06", "2017-01-09")
df_equals(modin_df[s], pandas_df[s])
def test_getitem_same_name():
data = [
[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12],
[13, 14, 15, 16],
[17, 18, 19, 20],
]
columns = ["c1", "c2", "c1", "c3"]
modin_df = pd.DataFrame(data, columns=columns)
pandas_df = pandas.DataFrame(data, columns=columns)
df_equals(modin_df["c1"], pandas_df["c1"])
df_equals(modin_df["c2"], pandas_df["c2"])
df_equals(modin_df[["c1", "c2"]], pandas_df[["c1", "c2"]])
df_equals(modin_df["c3"], pandas_df["c3"])
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___getattr__(request, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data) # noqa F841
if "empty_data" not in request.node.name:
key = modin_df.columns[0]
col = modin_df.__getattr__(key)
col = modin_df.__getattr__("col1")
assert isinstance(col, pd.Series)
col = getattr(modin_df, "col1")
assert isinstance(col, pd.Series)
# Check that lookup in column doesn't override other attributes
df2 = modin_df.rename(index=str, columns={key: "columns"})
assert isinstance(df2.columns, pandas.Index)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___setitem__(data):
eval_setitem(*create_test_dfs(data), loc=-1, value=1)
eval_setitem(
*create_test_dfs(data), loc=-1, value=lambda df: type(df)(df[df.columns[0]])
)
nrows = len(data[list(data.keys())[0]])
arr = np.arange(nrows * 2).reshape(-1, 2)
eval_setitem(*create_test_dfs(data), loc=-1, value=arr)
eval_setitem(*create_test_dfs(data), col="___NON EXISTENT COLUMN", value=arr)
eval_setitem(*create_test_dfs(data), loc=0, value=np.arange(nrows))
modin_df = pd.DataFrame(columns=data.keys())
pandas_df = pandas.DataFrame(columns=data.keys())
for col in modin_df.columns:
modin_df[col] = np.arange(1000)
for col in pandas_df.columns:
pandas_df[col] = np.arange(1000)
df_equals(modin_df, pandas_df)
# Test series assignment to column
modin_df = pd.DataFrame(columns=modin_df.columns)
pandas_df = pandas.DataFrame(columns=pandas_df.columns)
modin_df[modin_df.columns[-1]] = modin_df[modin_df.columns[0]]
pandas_df[pandas_df.columns[-1]] = pandas_df[pandas_df.columns[0]]
df_equals(modin_df, pandas_df)
if not sys.version_info.major == 3 and sys.version_info.minor > 6:
# This test doesn't work correctly on Python 3.6
# Test 2d ndarray assignment to column
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_df["new_col"] = modin_df[[modin_df.columns[0]]].values
pandas_df["new_col"] = pandas_df[[pandas_df.columns[0]]].values
df_equals(modin_df, pandas_df)
assert isinstance(modin_df["new_col"][0], type(pandas_df["new_col"][0]))
modin_df[1:5] = 10
pandas_df[1:5] = 10
df_equals(modin_df, pandas_df)
# Transpose test
modin_df = pd.DataFrame(data).T
pandas_df = | pandas.DataFrame(data) | pandas.DataFrame |
import semantic_bert_config as config
import pandas as pd
from tqdm import tqdm
"""
Die Sätze enthalten unter anderem Platzhalter für Beispielsweise Länder oder Sportarten,
um den Datensatz leicht zu verädnern und deutlich zu vergrößern.
Für jeden Platzhalter wird ein neuer Satz mit dem neuen Wort gebildet.
(Es tauchen nie zwei Platzhalter in einem Satz auf.)
"""
def augment_data(row):
data = []
for dic in config.REPLACE_ARRAY:
if dic["indicator"] in row["TEXT"]:
for word in dic["word_list"]:
s = row["TEXT"].replace(dic["indicator"], word)
data.append({
"TEXT": s,
"LABEL1": row["LABEL1"],
"LABEL2": row["LABEL2"]
})
if not data:
return row.to_frame().T.fillna('none')
return pd.DataFrame.from_records(data).fillna('none')
"""
Kombiniert jeden des Fragenkatalogs mit den anderen und
überprüft auf thematische ähnlichkeit.
Sind die Sätze thematisch ähnlich => Label = entailment
andernfalls => Label = neutral TODO: was ist das richtige Label, eine wirklich contradiction ist es ja nicht.
Beispiele aus Standarddatensätzen:
neutral "A woman with a green headscarf, blue shirt and a very big grin." , The woman is young.
entailment "A woman with a green headscarf, blue shirt and a very big grin." , The woman is very happy.
contradiction "A woman with a green headscarf, blue shirt and a very big grin." , The woman has been shot.
"""
def combine_sentences(df):
combined_list = []
for i, outer_row in tqdm(df.iterrows(), total=len(df)):
for j, inner_row in df.iterrows():
# Den gleichen Satz müssen wir nicht miteinander vergleichen
# TODO: Braucht man Dublikate? Siehe Beispiel def remove_dublicates(), falls ja muss aus dem <= ein == werden
# Bei == sind es 2 hoch n Kombinationen
# Bei <= sind es (n über 2) Kombinationen
if i <= j:
continue
# Label1 ist nie none
label_list_outer = [outer_row["LABEL1"]]
label_list_inner = [inner_row["LABEL1"]]
if outer_row["LABEL2"] != "none":
label_list_outer.append(outer_row["LABEL2"])
if inner_row["LABEL2"] != "none":
label_list_inner.append(inner_row["LABEL2"])
sim = "neutral"
if any(label in label_list_outer for label in label_list_inner):
sim = "entailment"
combined_list.append({
"sentence1": outer_row["TEXT"],
"sentence2": inner_row["TEXT"],
"similarity": sim
})
return pd.DataFrame.from_records(combined_list)
"""
Da ich mir nicht sicher bin, ob das hin und her einen Unterschied macht,
hatte ich diese Option testhalber noch offen gehalten.
Beispiel:
sentence1 sentence2 similarity
0 Auf nach Darmstadt Auf nach Braunschweig neutral
1 Auf nach Braunschweig Auf nach Darmstadt neutral
"""
def remove_dublicates(df):
df_2 = df
for i, outer_row in tqdm(df.iterrows(), total=len(df)):
for j, inner_row in df.iterrows():
if i == j:
continue
if outer_row["sentence1"] == inner_row["sentence2"] and outer_row["sentence2"] == inner_row["sentence1"]:
df_2.drop(index=i,inplace=True)
return df_2.drop("index", axis="columns")
"""
Kombiniert alle Fragen aus dem Datensatz und weist ein entsprechendes Label zu.
Standard Labels sind: ["contradiction", "entailment", "neutral"]
Auf contradiction wird hier verzichtet.
Das Label wird positiv/entailment, wenn Die Sätze die gleiche Kategorie enthalten
Speichert anschließend die kombinierten und verarbeiteten Daten in einer .csv Datei.
"""
def run():
# Laden der csv-Datenbank
df = | pd.read_csv("../input/fragen.csv", delimiter=",") | pandas.read_csv |
# Copyright 2020, <NAME>, <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND
# NONINFRINGEMENT. IN NO EVENT WILL THE LICENSOR OR OTHER CONTRIBUTORS
# BE LIABLE FOR ANY CLAIM, DAMAGES, OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF, OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
# See the License for the specific language governing permissions and
# limitations under the License.
import pandas as pd
import numpy as np
from typing import Dict, List
import yaml
import os
def _read_IMARIS_cell_migration_data(filename):
'''
'''
if (not os.path.isfile(filename)):
return | pd.DataFrame() | pandas.DataFrame |
import os
import pandas as pd
import numpy as np
import pickle
import logging, shutil, glob
import pymongo, joblib
from joblib import Parallel, delayed
from Fuzzy_clustering.ver_tf2.Clusterer_optimize_deep import cluster_optimize, clusterer
from sklearn.preprocessing import MinMaxScaler
from Fuzzy_clustering.ver_tf2.Cluster_train_regressors import cluster_train
from Fuzzy_clustering.ver_tf2.Global_train_regressor import global_train
from Fuzzy_clustering.ver_tf2.Cluster_train_regressor_TL import cluster_train_tl
from Fuzzy_clustering.ver_tf2.Global_train_regressor_TL import global_train_tl
from Fuzzy_clustering.ver_tf2.NWP_sampler import nwp_sampler
from Fuzzy_clustering.ver_tf2.Global_predict_regressor import global_predict
from Fuzzy_clustering.ver_tf2.Cluster_predict_regressors import cluster_predict
from Fuzzy_clustering.ver_tf2.Combine_train_model import Combine_train
import time
# for timing
from contextlib import contextmanager
from timeit import default_timer
@contextmanager
def elapsed_timer():
start = default_timer()
elapser = lambda: default_timer() - start
yield lambda: elapser()
end = default_timer()
elapser = lambda: end-start
class ModelTrainManager(object):
def __init__(self, path_model):
self.istrained = False
self.path_model = path_model
try:
self.load()
except:
pass
def init(self, static_data, data_variables, use_db=False):
self.data_variables = data_variables
self.static_data = static_data
self.thres_split = static_data['clustering']['thres_split']
self.thres_act = static_data['clustering']['thres_act']
self.n_clusters = static_data['clustering']['n_clusters']
self.rated = static_data['rated']
self.var_imp = static_data['clustering']['var_imp']
self.var_lin = static_data['clustering']['var_lin']
self.var_nonreg = static_data['clustering']['var_nonreg']
self.create_logger()
self.use_db = use_db
if use_db:
self.db = self.open_db()
def open_db(self):
try:
myclient = pymongo.MongoClient("mongodb://" + self.static_data['url'] + ":" + self.static_data['port'] + "/")
project_db = myclient[self.static_data['_id']]
except:
self.logger.info('Cannot open Database')
self.use_db=False
project_db=None
raise ConnectionError('Cannot open Database')
self.logger.info('Open Database successfully')
return project_db
def create_logger(self):
self.logger = logging.getLogger(self.static_data['_id'])
self.logger.setLevel(logging.INFO)
handler = logging.FileHandler(os.path.join(self.path_model, 'log_model.log'), 'a')
handler.setLevel(logging.INFO)
# create a logging format
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
# add the handlers to the logger
self.logger.addHandler(handler)
def merge_old_data(self,X, y, X_cnn=np.array([]), X_lstm=np.array([])):
data_path=self.static_data['path_data']
if os.path.exists(os.path.join(data_path,'dataset_X.csv')):
X1 = pd.read_csv(os.path.join(data_path, 'dataset_X.csv'), index_col=0, header=0, parse_dates=True, dayfirst=True)
y1 = pd.read_csv(os.path.join(data_path, 'dataset_y.csv'), index_col=0, header=0, parse_dates=True, dayfirst=True)
try:
X=X.append(X1)
y=y.append(y1)
X=X.round(4)
y=y.round(4)
X['target'] = y
X=X.drop_duplicates()
y = X['target'].copy(deep=True)
y = y.to_frame()
y.columns=['target']
X = X.drop(columns='target')
except ImportError:
raise AssertionError('Cannot merge the historical data with the new ones')
X.to_csv(os.path.join(data_path, 'dataset_X.csv'))
y.to_csv(os.path.join(data_path, 'dataset_y.csv'))
if os.path.exists(os.path.join(data_path, 'dataset_cnn.pickle')):
X_3d = joblib.load(os.path.join(self.static_data['path_data'], 'dataset_cnn.pickle'))
X_cnn = np.vstack([X_cnn, X_3d])
joblib.dump(X_cnn, os.path.join(self.static_data['path_data'], 'dataset_cnn.pickle'))
if os.path.exists(os.path.join(data_path, 'dataset_lstm.pickle')):
X_2d = joblib.load(os.path.join(self.static_data['path_data'], 'dataset_lstm.pickle'))
X_lstm = np.vstack([X_lstm, X_2d])
joblib.dump(X_lstm, os.path.join(self.static_data['path_data'], 'dataset_lstm.pickle'))
self.logger.info('Data merged successfully')
return X, y, X_cnn, X_lstm
def load_data(self):
data_path = self.static_data['path_data']
X = pd.read_csv(os.path.join(data_path, 'dataset_X.csv'), index_col=0, header=0, parse_dates=True, dayfirst=True)
y = pd.read_csv(os.path.join(data_path, 'dataset_y.csv'), index_col=0, header=0, parse_dates=True, dayfirst=True)
if os.path.exists(os.path.join(data_path, 'dataset_cnn.pickle')):
X_cnn = joblib.load(os.path.join(data_path, 'dataset_cnn.pickle'))
X_cnn = X_cnn.transpose([0, 2, 3, 1])
else:
X_cnn = np.array([])
if os.path.exists(os.path.join(data_path, 'dataset_lstm.pickle')):
X_lstm = joblib.load(os.path.join(data_path, 'dataset_lstm.pickle'))
else:
X_lstm = np.array([])
self.logger.info('Data loaded successfully')
return X, y, X_cnn, X_lstm
def backup(self,hor=None):
#TODO write to backup checking the version of the model (if there are previous versions, write current model in different folder)
if hor is None:
for filename in glob.glob(os.path.join(self.path_model, '*.*')):
shutil.copy(filename, self.static_data['path_backup'])
else:
for filename in glob.glob(os.path.join(self.path_model, '*.*')):
shutil.copy(filename, os.path.join(self.static_data['path_backup'],'hor_'+str(hor)))
def scale(self,X):
self.sc = MinMaxScaler(feature_range=(0, 1)).fit(X.values)
self.save()
return pd.DataFrame(self.sc.transform(X.values),columns=X.columns,index=X.index)
def train_fuzzy_clustering(self, X, y1):
N, D = X.shape
n_split = int(np.round(N * 0.7))
X_test = X.iloc[n_split + 1:]
y_test = y1.iloc[n_split + 1:]
X_train = X.iloc[:n_split]
y_train = y1.iloc[:n_split]
optimizer = cluster_optimize(self.static_data)
if self.rated is None:
rated = None
else:
rated = 20
if self.static_data['type'] == 'fa':
optimizer.run(X_train, y_train, X_test, y_test, rated, num_samples=300)
else:
optimizer.run(X_train, y_train, X_test, y_test, rated)
self.save()
def find_clusters_for_training(self, X_new, train):
act_new = self.clusterer.compute_activations(X_new)
if len(self.var_nonreg) > 0:
X_new = X_new.drop(columns=self.var_nonreg).copy(deep=True)
train_clust = []
if not len(train) == 0:
for clust in train:
indices = act_new[clust].index[act_new[clust] >= self.thres_act].tolist()
if len(indices) > 0:
inputs = X_new.loc[act_new[clust] >= self.thres_act]
cluster_dir = os.path.join(self.path_model, 'Regressor_layer/' + clust)
if not os.path.exists(cluster_dir):
os.makedirs(cluster_dir)
if not os.path.exists(os.path.join(cluster_dir, 'data')):
os.makedirs(os.path.join(cluster_dir, 'data'))
if not inputs.shape[0] == 0:
train_clust.append(clust)
else:
for clust in act_new.columns:
indices = act_new[clust].index[act_new[clust] >= self.thres_act].tolist()
if len(indices) > 0:
inputs = X_new.loc[act_new[clust] >= self.thres_act]
cluster_dir = os.path.join(self.path_model, 'Regressor_layer/' + clust)
if not os.path.exists(cluster_dir):
os.makedirs(cluster_dir)
if not os.path.exists(os.path.join(cluster_dir, 'data')):
os.makedirs(os.path.join(cluster_dir, 'data'))
if not inputs.shape[0] == 0:
train_clust.append(clust)
return train_clust
def split_test_data(self, activations, X1, y1, X_cnn, X_lstm):
split_indices = []
for clust in activations.columns:
indices = activations[clust].index[activations[clust] >= self.thres_act].tolist()
if len(indices) > 0:
if len(indices) > 1000:
n_split = int(np.round(len(indices) * 0.75))
split_indices.append(indices[n_split + 1])
else:
n_split = int(np.round(len(indices) * 0.85))
split_indices.append(indices[n_split + 1])
split_test = pd.Series(split_indices).min()
X_test = X1.loc[split_test:]
if X_test.shape[0] > 0.35 * X1.shape[0]:
split_test = None
self.split_test = split_test
return split_test
def save_global_data(self, activations, X1, y1, X_cnn, X_lstm):
# VARIABLES USED ONLY FOR CLUSTERING
if len(self.var_nonreg) > 0:
X1 = X1.drop(columns=self.var_nonreg).copy(deep=True)
split_test = self.split_test
self.logger.info('Save datasets for global model')
cluster_dir=os.path.join(self.static_data['path_model'], 'Global_regressor')
cluster_data_dir = os.path.join(cluster_dir, 'data')
if not os.path.exists(cluster_data_dir):
os.makedirs(cluster_data_dir)
act = activations
inputs = X1
targets = y1
inputs = inputs.drop(targets.index[pd.isnull(targets).values.ravel()])
targets = targets.drop(targets.index[pd.isnull(targets).values.ravel()])
targets = targets.drop(inputs.index[pd.isnull(inputs).any(1).values.ravel()])
inputs = inputs.drop(inputs.index[ | pd.isnull(inputs) | pandas.isnull |
# -*- coding: utf-8 -*-
# author: ysoftman
# python version : 3.x
# desc : pandas test
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
# pandas 에는 Timestamp, DatetimeIndex, Period, PeriodIndex 클래스가 있다.
# 타임스탬프 형식들
print(pd.Timestamp('2/15/2019 07:20PM'))
print(pd.Timestamp('2019-02-15 07:20PM'))
# 한달 후 계산
print(pd.Timestamp('2/15/2019 07:20PM') + pd.DateOffset(months=1))
# 1월달로 설정
print(pd.Timestamp('2/15/2019 07:20PM') + pd.DateOffset(month=1))
# 10일 후 계산
print(pd.Timestamp('2/15/2019 07:20PM') + pd.DateOffset(days=10))
# 10일로 설정
print(pd.Timestamp('2/15/2019 07:20PM') + pd.DateOffset(day=10))
# datetime 형식으로 변경
dt = pd.Timestamp('2019-02-15 07:20PM').to_pydatetime()
print(dt.year)
print(dt.month)
print(dt.day)
print(dt.hour)
print(dt.minute)
print(dt.second)
print()
# period 는 일정 기간을 나타내는 형식으로, 특정 날이나, 달을 나타낸다.
# 'M' 달을 나타낸다.
print(pd.Period('02/2019'))
# 'D' 일을 나타낸다.
print(pd.Period('02/15/2019'))
print()
# timestamp 로 series 를 생성
t1 = pd.Series(list('abc'), [pd.Timestamp(
'2016-09-01'), pd.Timestamp('2016-09-02'), pd.Timestamp('2016-09-03')])
print(t1)
# timestamp 인덱스는 datetimeindex 로 지정되어 있다.
print(type(t1.index))
print()
# period 인덱스는 periodindex 로 지정되어 있다.
t2 = pd.Series(list('def'), [pd.Period('2016-09'),
pd.Period('2016-10'), pd.Period('2016-11')])
print(t2)
print(type(t2.index))
print()
d1 = ['2 June 2013', 'Aug 29, 2014', '2015-06-26', '7/12/16']
ts3 = pd.DataFrame(np.random.randint(10, 100, (4, 2)),
index=d1, columns=list('ab'))
print(ts3)
print()
# 인덱스를 datetime 포맷으로 변경할 수 있다.
ts3.index = pd.to_datetime(ts3.index)
print(ts3)
print()
# 일을 우선 표기하고 안하고 차이
# 2012-04-07 00:00:00
print(pd.to_datetime('4.7.12'))
# 2012-07-04 00:00:00
print(pd.to_datetime('4.7.12', dayfirst=True))
print()
# 타임스탬프 차이
print(pd.Timestamp('9/3/2016') - | pd.Timestamp('9/1/2016') | pandas.Timestamp |
# @author <NAME>
#to merge the 3 different year ED visit files to single file
import pandas as pd
import os
import glob
import numpy as np
def seriesToTypes(series):
try:
series=series.astype("Int64")
except (TypeError,ValueError):
try:
series=pd.to_numeric(series,downcast='unsigned')
except (TypeError,ValueError): pass
#series.loc[pd.isna(series)]=pd.NA
# try:
# series=series.apply(lambda x: pd.NA if pd.isna(x) else str(x)).astype('string')
# series=series.astype('str')
# except:
# pass
return series
folder=r'\\vetmed2.vetmed.w2k.vt.edu\Blitzer\NASA project\Balaji\DSHS ED visit data\Dataset 3_13_2020'
IP_files = glob.glob(folder+'\\IP_*.{}'.format('txt'))
ip_df=pd.DataFrame()
for f in IP_files:
df= | pd.read_csv(f,sep='\t') | pandas.read_csv |
# -*- coding: utf-8 -*-
import pytest
import numpy as np
import pandas as pd
import pandas.util.testing as tm
import pandas.compat as compat
###############################################################
# Index / Series common tests which may trigger dtype coercions
###############################################################
class CoercionBase(object):
klasses = ['index', 'series']
dtypes = ['object', 'int64', 'float64', 'complex128', 'bool',
'datetime64', 'datetime64tz', 'timedelta64', 'period']
@property
def method(self):
raise NotImplementedError(self)
def _assert(self, left, right, dtype):
# explicitly check dtype to avoid any unexpected result
if isinstance(left, pd.Series):
tm.assert_series_equal(left, right)
elif isinstance(left, pd.Index):
tm.assert_index_equal(left, right)
else:
raise NotImplementedError
self.assertEqual(left.dtype, dtype)
self.assertEqual(right.dtype, dtype)
def test_has_comprehensive_tests(self):
for klass in self.klasses:
for dtype in self.dtypes:
method_name = 'test_{0}_{1}_{2}'.format(self.method,
klass, dtype)
if not hasattr(self, method_name):
msg = 'test method is not defined: {0}, {1}'
raise AssertionError(msg.format(type(self), method_name))
class TestSetitemCoercion(CoercionBase, tm.TestCase):
method = 'setitem'
def _assert_setitem_series_conversion(self, original_series, loc_value,
expected_series, expected_dtype):
""" test series value's coercion triggered by assignment """
temp = original_series.copy()
temp[1] = loc_value
tm.assert_series_equal(temp, expected_series)
# check dtype explicitly for sure
self.assertEqual(temp.dtype, expected_dtype)
# .loc works different rule, temporary disable
# temp = original_series.copy()
# temp.loc[1] = loc_value
# tm.assert_series_equal(temp, expected_series)
def test_setitem_series_object(self):
obj = pd.Series(list('abcd'))
self.assertEqual(obj.dtype, np.object)
# object + int -> object
exp = pd.Series(['a', 1, 'c', 'd'])
self._assert_setitem_series_conversion(obj, 1, exp, np.object)
# object + float -> object
exp = pd.Series(['a', 1.1, 'c', 'd'])
self._assert_setitem_series_conversion(obj, 1.1, exp, np.object)
# object + complex -> object
exp = pd.Series(['a', 1 + 1j, 'c', 'd'])
self._assert_setitem_series_conversion(obj, 1 + 1j, exp, np.object)
# object + bool -> object
exp = pd.Series(['a', True, 'c', 'd'])
self._assert_setitem_series_conversion(obj, True, exp, np.object)
def test_setitem_series_int64(self):
obj = pd.Series([1, 2, 3, 4])
self.assertEqual(obj.dtype, np.int64)
# int + int -> int
exp = pd.Series([1, 1, 3, 4])
self._assert_setitem_series_conversion(obj, 1, exp, np.int64)
# int + float -> float
# TODO_GH12747 The result must be float
# tm.assert_series_equal(temp, pd.Series([1, 1.1, 3, 4]))
# self.assertEqual(temp.dtype, np.float64)
exp = pd.Series([1, 1, 3, 4])
self._assert_setitem_series_conversion(obj, 1.1, exp, np.int64)
# int + complex -> complex
exp = pd.Series([1, 1 + 1j, 3, 4])
self._assert_setitem_series_conversion(obj, 1 + 1j, exp, np.complex128)
# int + bool -> int
exp = pd.Series([1, 1, 3, 4])
self._assert_setitem_series_conversion(obj, True, exp, np.int64)
def test_setitem_series_float64(self):
obj = pd.Series([1.1, 2.2, 3.3, 4.4])
self.assertEqual(obj.dtype, np.float64)
# float + int -> float
exp = pd.Series([1.1, 1.0, 3.3, 4.4])
self._assert_setitem_series_conversion(obj, 1, exp, np.float64)
# float + float -> float
exp = pd.Series([1.1, 1.1, 3.3, 4.4])
self._assert_setitem_series_conversion(obj, 1.1, exp, np.float64)
# float + complex -> complex
exp = pd.Series([1.1, 1 + 1j, 3.3, 4.4])
self._assert_setitem_series_conversion(obj, 1 + 1j, exp,
np.complex128)
# float + bool -> float
exp = pd.Series([1.1, 1.0, 3.3, 4.4])
self._assert_setitem_series_conversion(obj, True, exp, np.float64)
def test_setitem_series_complex128(self):
obj = pd.Series([1 + 1j, 2 + 2j, 3 + 3j, 4 + 4j])
self.assertEqual(obj.dtype, np.complex128)
# complex + int -> complex
exp = pd.Series([1 + 1j, 1, 3 + 3j, 4 + 4j])
self._assert_setitem_series_conversion(obj, True, exp, np.complex128)
# complex + float -> complex
exp = pd.Series([1 + 1j, 1.1, 3 + 3j, 4 + 4j])
self._assert_setitem_series_conversion(obj, 1.1, exp, np.complex128)
# complex + complex -> complex
exp = pd.Series([1 + 1j, 1 + 1j, 3 + 3j, 4 + 4j])
self._assert_setitem_series_conversion(obj, 1 + 1j, exp, np.complex128)
# complex + bool -> complex
exp = pd.Series([1 + 1j, 1, 3 + 3j, 4 + 4j])
self._assert_setitem_series_conversion(obj, True, exp, np.complex128)
def test_setitem_series_bool(self):
obj = pd.Series([True, False, True, False])
self.assertEqual(obj.dtype, np.bool)
# bool + int -> int
# TODO_GH12747 The result must be int
# tm.assert_series_equal(temp, pd.Series([1, 1, 1, 0]))
# self.assertEqual(temp.dtype, np.int64)
exp = pd.Series([True, True, True, False])
self._assert_setitem_series_conversion(obj, 1, exp, np.bool)
# TODO_GH12747 The result must be int
# assigning int greater than bool
# tm.assert_series_equal(temp, pd.Series([1, 3, 1, 0]))
# self.assertEqual(temp.dtype, np.int64)
exp = pd.Series([True, True, True, False])
self._assert_setitem_series_conversion(obj, 3, exp, np.bool)
# bool + float -> float
# TODO_GH12747 The result must be float
# tm.assert_series_equal(temp, pd.Series([1., 1.1, 1., 0.]))
# self.assertEqual(temp.dtype, np.float64)
exp = pd.Series([True, True, True, False])
self._assert_setitem_series_conversion(obj, 1.1, exp, np.bool)
# bool + complex -> complex (buggy, results in bool)
# TODO_GH12747 The result must be complex
# tm.assert_series_equal(temp, pd.Series([1, 1 + 1j, 1, 0]))
# self.assertEqual(temp.dtype, np.complex128)
exp = pd.Series([True, True, True, False])
self._assert_setitem_series_conversion(obj, 1 + 1j, exp, np.bool)
# bool + bool -> bool
exp = pd.Series([True, True, True, False])
self._assert_setitem_series_conversion(obj, True, exp, np.bool)
def test_setitem_series_datetime64(self):
obj = pd.Series([pd.Timestamp('2011-01-01'),
pd.Timestamp('2011-01-02'),
pd.Timestamp('2011-01-03'),
pd.Timestamp('2011-01-04')])
self.assertEqual(obj.dtype, 'datetime64[ns]')
# datetime64 + datetime64 -> datetime64
exp = pd.Series([pd.Timestamp('2011-01-01'),
pd.Timestamp('2012-01-01'),
pd.Timestamp('2011-01-03'),
pd.Timestamp('2011-01-04')])
self._assert_setitem_series_conversion(obj, pd.Timestamp('2012-01-01'),
exp, 'datetime64[ns]')
# datetime64 + int -> object
# ToDo: The result must be object
exp = pd.Series([pd.Timestamp('2011-01-01'),
pd.Timestamp(1),
pd.Timestamp('2011-01-03'),
pd.Timestamp('2011-01-04')])
self._assert_setitem_series_conversion(obj, 1, exp, 'datetime64[ns]')
# ToDo: add more tests once the above issue has been fixed
def test_setitem_series_datetime64tz(self):
tz = 'US/Eastern'
obj = pd.Series([pd.Timestamp('2011-01-01', tz=tz),
pd.Timestamp('2011-01-02', tz=tz),
pd.Timestamp('2011-01-03', tz=tz),
pd.Timestamp('2011-01-04', tz=tz)])
self.assertEqual(obj.dtype, 'datetime64[ns, US/Eastern]')
# datetime64tz + datetime64tz -> datetime64tz
exp = pd.Series([pd.Timestamp('2011-01-01', tz=tz),
pd.Timestamp('2012-01-01', tz=tz),
pd.Timestamp('2011-01-03', tz=tz),
pd.Timestamp('2011-01-04', tz=tz)])
value = pd.Timestamp('2012-01-01', tz=tz)
self._assert_setitem_series_conversion(obj, value, exp,
'datetime64[ns, US/Eastern]')
# datetime64 + int -> object
# ToDo: The result must be object
exp = pd.Series([pd.Timestamp('2011-01-01', tz=tz),
pd.Timestamp(1, tz=tz),
pd.Timestamp('2011-01-03', tz=tz),
pd.Timestamp('2011-01-04', tz=tz)])
self._assert_setitem_series_conversion(obj, 1, exp,
'datetime64[ns, US/Eastern]')
# ToDo: add more tests once the above issue has been fixed
def test_setitem_series_timedelta64(self):
pass
def test_setitem_series_period(self):
pass
def _assert_setitem_index_conversion(self, original_series, loc_key,
expected_index, expected_dtype):
""" test index's coercion triggered by assign key """
temp = original_series.copy()
temp[loc_key] = 5
exp = pd.Series([1, 2, 3, 4, 5], index=expected_index)
tm.assert_series_equal(temp, exp)
# check dtype explicitly for sure
self.assertEqual(temp.index.dtype, expected_dtype)
temp = original_series.copy()
temp.loc[loc_key] = 5
exp = pd.Series([1, 2, 3, 4, 5], index=expected_index)
tm.assert_series_equal(temp, exp)
# check dtype explicitly for sure
self.assertEqual(temp.index.dtype, expected_dtype)
def test_setitem_index_object(self):
obj = pd.Series([1, 2, 3, 4], index=list('abcd'))
self.assertEqual(obj.index.dtype, np.object)
# object + object -> object
exp_index = pd.Index(list('abcdx'))
self._assert_setitem_index_conversion(obj, 'x', exp_index, np.object)
# object + int -> IndexError, regarded as location
temp = obj.copy()
with tm.assertRaises(IndexError):
temp[5] = 5
# object + float -> object
exp_index = pd.Index(['a', 'b', 'c', 'd', 1.1])
self._assert_setitem_index_conversion(obj, 1.1, exp_index, np.object)
def test_setitem_index_int64(self):
# tests setitem with non-existing numeric key
obj = pd.Series([1, 2, 3, 4])
self.assertEqual(obj.index.dtype, np.int64)
# int + int -> int
exp_index = pd.Index([0, 1, 2, 3, 5])
self._assert_setitem_index_conversion(obj, 5, exp_index, np.int64)
# int + float -> float
exp_index = pd.Index([0, 1, 2, 3, 1.1])
self._assert_setitem_index_conversion(obj, 1.1, exp_index, np.float64)
# int + object -> object
exp_index = pd.Index([0, 1, 2, 3, 'x'])
self._assert_setitem_index_conversion(obj, 'x', exp_index, np.object)
def test_setitem_index_float64(self):
# tests setitem with non-existing numeric key
obj = pd.Series([1, 2, 3, 4], index=[1.1, 2.1, 3.1, 4.1])
self.assertEqual(obj.index.dtype, np.float64)
# float + int -> int
temp = obj.copy()
# TODO_GH12747 The result must be float
with tm.assertRaises(IndexError):
temp[5] = 5
# float + float -> float
exp_index = pd.Index([1.1, 2.1, 3.1, 4.1, 5.1])
self._assert_setitem_index_conversion(obj, 5.1, exp_index, np.float64)
# float + object -> object
exp_index = pd.Index([1.1, 2.1, 3.1, 4.1, 'x'])
self._assert_setitem_index_conversion(obj, 'x', exp_index, np.object)
def test_setitem_index_complex128(self):
pass
def test_setitem_index_bool(self):
pass
def test_setitem_index_datetime64(self):
pass
def test_setitem_index_datetime64tz(self):
pass
def test_setitem_index_timedelta64(self):
pass
def test_setitem_index_period(self):
pass
class TestInsertIndexCoercion(CoercionBase, tm.TestCase):
klasses = ['index']
method = 'insert'
def _assert_insert_conversion(self, original, value,
expected, expected_dtype):
""" test coercion triggered by insert """
target = original.copy()
res = target.insert(1, value)
tm.assert_index_equal(res, expected)
self.assertEqual(res.dtype, expected_dtype)
def test_insert_index_object(self):
obj = pd.Index(list('abcd'))
self.assertEqual(obj.dtype, np.object)
# object + int -> object
exp = pd.Index(['a', 1, 'b', 'c', 'd'])
self._assert_insert_conversion(obj, 1, exp, np.object)
# object + float -> object
exp = pd.Index(['a', 1.1, 'b', 'c', 'd'])
self._assert_insert_conversion(obj, 1.1, exp, np.object)
# object + bool -> object
res = obj.insert(1, False)
tm.assert_index_equal(res, pd.Index(['a', False, 'b', 'c', 'd']))
self.assertEqual(res.dtype, np.object)
# object + object -> object
exp = pd.Index(['a', 'x', 'b', 'c', 'd'])
self._assert_insert_conversion(obj, 'x', exp, np.object)
def test_insert_index_int64(self):
obj = pd.Int64Index([1, 2, 3, 4])
self.assertEqual(obj.dtype, np.int64)
# int + int -> int
exp = pd.Index([1, 1, 2, 3, 4])
self._assert_insert_conversion(obj, 1, exp, np.int64)
# int + float -> float
exp = pd.Index([1, 1.1, 2, 3, 4])
self._assert_insert_conversion(obj, 1.1, exp, np.float64)
# int + bool -> int
exp = pd.Index([1, 0, 2, 3, 4])
self._assert_insert_conversion(obj, False, exp, np.int64)
# int + object -> object
exp = pd.Index([1, 'x', 2, 3, 4])
self._assert_insert_conversion(obj, 'x', exp, np.object)
def test_insert_index_float64(self):
obj = pd.Float64Index([1., 2., 3., 4.])
self.assertEqual(obj.dtype, np.float64)
# float + int -> int
exp = pd.Index([1., 1., 2., 3., 4.])
self._assert_insert_conversion(obj, 1, exp, np.float64)
# float + float -> float
exp = pd.Index([1., 1.1, 2., 3., 4.])
self._assert_insert_conversion(obj, 1.1, exp, np.float64)
# float + bool -> float
exp = pd.Index([1., 0., 2., 3., 4.])
self._assert_insert_conversion(obj, False, exp, np.float64)
# float + object -> object
exp = pd.Index([1., 'x', 2., 3., 4.])
self._assert_insert_conversion(obj, 'x', exp, np.object)
def test_insert_index_complex128(self):
pass
def test_insert_index_bool(self):
pass
def test_insert_index_datetime64(self):
obj = pd.DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03',
'2011-01-04'])
self.assertEqual(obj.dtype, 'datetime64[ns]')
# datetime64 + datetime64 => datetime64
exp = pd.DatetimeIndex(['2011-01-01', '2012-01-01', '2011-01-02',
'2011-01-03', '2011-01-04'])
self._assert_insert_conversion(obj, pd.Timestamp('2012-01-01'),
exp, 'datetime64[ns]')
# ToDo: must coerce to object
msg = "Passed item and index have different timezone"
with tm.assertRaisesRegexp(ValueError, msg):
obj.insert(1, pd.Timestamp('2012-01-01', tz='US/Eastern'))
# ToDo: must coerce to object
msg = "cannot insert DatetimeIndex with incompatible label"
with tm.assertRaisesRegexp(TypeError, msg):
obj.insert(1, 1)
def test_insert_index_datetime64tz(self):
obj = pd.DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03',
'2011-01-04'], tz='US/Eastern')
self.assertEqual(obj.dtype, 'datetime64[ns, US/Eastern]')
# datetime64tz + datetime64tz => datetime64
exp = pd.DatetimeIndex(['2011-01-01', '2012-01-01', '2011-01-02',
'2011-01-03', '2011-01-04'], tz='US/Eastern')
val = pd.Timestamp('2012-01-01', tz='US/Eastern')
self._assert_insert_conversion(obj, val, exp,
'datetime64[ns, US/Eastern]')
# ToDo: must coerce to object
msg = "Passed item and index have different timezone"
with tm.assertRaisesRegexp(ValueError, msg):
obj.insert(1, pd.Timestamp('2012-01-01'))
# ToDo: must coerce to object
msg = "Passed item and index have different timezone"
with tm.assertRaisesRegexp(ValueError, msg):
obj.insert(1, pd.Timestamp('2012-01-01', tz='Asia/Tokyo'))
# ToDo: must coerce to object
msg = "cannot insert DatetimeIndex with incompatible label"
with tm.assertRaisesRegexp(TypeError, msg):
obj.insert(1, 1)
def test_insert_index_timedelta64(self):
obj = pd.TimedeltaIndex(['1 day', '2 day', '3 day', '4 day'])
self.assertEqual(obj.dtype, 'timedelta64[ns]')
# timedelta64 + timedelta64 => timedelta64
exp = pd.TimedeltaIndex(['1 day', '10 day', '2 day', '3 day', '4 day'])
self._assert_insert_conversion(obj, pd.Timedelta('10 day'),
exp, 'timedelta64[ns]')
# ToDo: must coerce to object
msg = "cannot insert TimedeltaIndex with incompatible label"
with tm.assertRaisesRegexp(TypeError, msg):
obj.insert(1, pd.Timestamp('2012-01-01'))
# ToDo: must coerce to object
msg = "cannot insert TimedeltaIndex with incompatible label"
with tm.assertRaisesRegexp(TypeError, msg):
obj.insert(1, 1)
def test_insert_index_period(self):
obj = pd.PeriodIndex(['2011-01', '2011-02', '2011-03', '2011-04'],
freq='M')
self.assertEqual(obj.dtype, 'period[M]')
# period + period => period
exp = pd.PeriodIndex(['2011-01', '2012-01', '2011-02',
'2011-03', '2011-04'], freq='M')
self._assert_insert_conversion(obj, pd.Period('2012-01', freq='M'),
exp, 'period[M]')
# period + datetime64 => object
exp = pd.Index([pd.Period('2011-01', freq='M'),
pd.Timestamp('2012-01-01'),
pd.Period('2011-02', freq='M'),
pd.Period('2011-03', freq='M'),
pd.Period('2011-04', freq='M')], freq='M')
self._assert_insert_conversion(obj, pd.Timestamp('2012-01-01'),
exp, np.object)
# period + int => object
exp = pd.Index([pd.Period('2011-01', freq='M'),
1,
pd.Period('2011-02', freq='M'),
pd.Period('2011-03', freq='M'),
pd.Period('2011-04', freq='M')], freq='M')
self._assert_insert_conversion(obj, 1, exp, np.object)
# period + object => object
exp = pd.Index([pd.Period('2011-01', freq='M'),
'x',
pd.Period('2011-02', freq='M'),
pd.Period('2011-03', freq='M'),
pd.Period('2011-04', freq='M')], freq='M')
self._assert_insert_conversion(obj, 'x', exp, np.object)
class TestWhereCoercion(CoercionBase, tm.TestCase):
method = 'where'
def _assert_where_conversion(self, original, cond, values,
expected, expected_dtype):
""" test coercion triggered by where """
target = original.copy()
res = target.where(cond, values)
self._assert(res, expected, expected_dtype)
def _where_object_common(self, klass):
obj = klass(list('abcd'))
self.assertEqual(obj.dtype, np.object)
cond = klass([True, False, True, False])
# object + int -> object
exp = klass(['a', 1, 'c', 1])
self._assert_where_conversion(obj, cond, 1, exp, np.object)
values = klass([5, 6, 7, 8])
exp = klass(['a', 6, 'c', 8])
self._assert_where_conversion(obj, cond, values, exp, np.object)
# object + float -> object
exp = klass(['a', 1.1, 'c', 1.1])
self._assert_where_conversion(obj, cond, 1.1, exp, np.object)
values = klass([5.5, 6.6, 7.7, 8.8])
exp = klass(['a', 6.6, 'c', 8.8])
self._assert_where_conversion(obj, cond, values, exp, np.object)
# object + complex -> object
exp = klass(['a', 1 + 1j, 'c', 1 + 1j])
self._assert_where_conversion(obj, cond, 1 + 1j, exp, np.object)
values = klass([5 + 5j, 6 + 6j, 7 + 7j, 8 + 8j])
exp = klass(['a', 6 + 6j, 'c', 8 + 8j])
self._assert_where_conversion(obj, cond, values, exp, np.object)
if klass is pd.Series:
exp = klass(['a', 1, 'c', 1])
self._assert_where_conversion(obj, cond, True, exp, np.object)
values = klass([True, False, True, True])
exp = klass(['a', 0, 'c', 1])
self._assert_where_conversion(obj, cond, values, exp, np.object)
elif klass is pd.Index:
# object + bool -> object
exp = klass(['a', True, 'c', True])
self._assert_where_conversion(obj, cond, True, exp, np.object)
values = klass([True, False, True, True])
exp = klass(['a', False, 'c', True])
self._assert_where_conversion(obj, cond, values, exp, np.object)
else:
NotImplementedError
def test_where_series_object(self):
self._where_object_common(pd.Series)
def test_where_index_object(self):
self._where_object_common(pd.Index)
def _where_int64_common(self, klass):
obj = klass([1, 2, 3, 4])
self.assertEqual(obj.dtype, np.int64)
cond = klass([True, False, True, False])
# int + int -> int
exp = klass([1, 1, 3, 1])
self._assert_where_conversion(obj, cond, 1, exp, np.int64)
values = klass([5, 6, 7, 8])
exp = klass([1, 6, 3, 8])
self._assert_where_conversion(obj, cond, values, exp, np.int64)
# int + float -> float
exp = klass([1, 1.1, 3, 1.1])
self._assert_where_conversion(obj, cond, 1.1, exp, np.float64)
values = klass([5.5, 6.6, 7.7, 8.8])
exp = klass([1, 6.6, 3, 8.8])
self._assert_where_conversion(obj, cond, values, exp, np.float64)
# int + complex -> complex
if klass is pd.Series:
exp = klass([1, 1 + 1j, 3, 1 + 1j])
self._assert_where_conversion(obj, cond, 1 + 1j, exp,
np.complex128)
values = klass([5 + 5j, 6 + 6j, 7 + 7j, 8 + 8j])
exp = klass([1, 6 + 6j, 3, 8 + 8j])
self._assert_where_conversion(obj, cond, values, exp,
np.complex128)
# int + bool -> int
exp = klass([1, 1, 3, 1])
self._assert_where_conversion(obj, cond, True, exp, np.int64)
values = klass([True, False, True, True])
exp = klass([1, 0, 3, 1])
self._assert_where_conversion(obj, cond, values, exp, np.int64)
def test_where_series_int64(self):
self._where_int64_common(pd.Series)
def test_where_index_int64(self):
self._where_int64_common(pd.Index)
def _where_float64_common(self, klass):
obj = klass([1.1, 2.2, 3.3, 4.4])
self.assertEqual(obj.dtype, np.float64)
cond = klass([True, False, True, False])
# float + int -> float
exp = klass([1.1, 1.0, 3.3, 1.0])
self._assert_where_conversion(obj, cond, 1, exp, np.float64)
values = klass([5, 6, 7, 8])
exp = klass([1.1, 6.0, 3.3, 8.0])
self._assert_where_conversion(obj, cond, values, exp, np.float64)
# float + float -> float
exp = klass([1.1, 1.1, 3.3, 1.1])
self._assert_where_conversion(obj, cond, 1.1, exp, np.float64)
values = klass([5.5, 6.6, 7.7, 8.8])
exp = klass([1.1, 6.6, 3.3, 8.8])
self._assert_where_conversion(obj, cond, values, exp, np.float64)
# float + complex -> complex
if klass is pd.Series:
exp = klass([1.1, 1 + 1j, 3.3, 1 + 1j])
self._assert_where_conversion(obj, cond, 1 + 1j, exp,
np.complex128)
values = klass([5 + 5j, 6 + 6j, 7 + 7j, 8 + 8j])
exp = klass([1.1, 6 + 6j, 3.3, 8 + 8j])
self._assert_where_conversion(obj, cond, values, exp,
np.complex128)
# float + bool -> float
exp = klass([1.1, 1.0, 3.3, 1.0])
self._assert_where_conversion(obj, cond, True, exp, np.float64)
values = klass([True, False, True, True])
exp = klass([1.1, 0.0, 3.3, 1.0])
self._assert_where_conversion(obj, cond, values, exp, np.float64)
def test_where_series_float64(self):
self._where_float64_common(pd.Series)
def test_where_index_float64(self):
self._where_float64_common(pd.Index)
def test_where_series_complex128(self):
obj = pd.Series([1 + 1j, 2 + 2j, 3 + 3j, 4 + 4j])
self.assertEqual(obj.dtype, np.complex128)
cond = pd.Series([True, False, True, False])
# complex + int -> complex
exp = pd.Series([1 + 1j, 1, 3 + 3j, 1])
self._assert_where_conversion(obj, cond, 1, exp, np.complex128)
values = | pd.Series([5, 6, 7, 8]) | pandas.Series |
import re
import json
import datetime
from datetime import datetime
from datetime import timedelta
import pandas as pd
from pandas.io.json import json_normalize
import numpy as np
from nltk.sentiment.vader import SentimentIntensityAnalyzer
import argparse
import os
import csv
class ProcessTweets(object):
def __init__(self, filename, outname):
self.filename = filename
self.outname = outname
json_file = open(filename)
json_str = json_file.read()
self.json = json.loads(json_str)
self.sid = SentimentIntensityAnalyzer()
def clean_tweet(self, tweet):
return ' '.join(re.sub("(@[A-Za-z0-9]+)|([^0-9A-Za-z \t])|(\w+:\/\/\S+)", " ", tweet).split())
def get_sentiment(self, tweet):
polarity_scores = self.sid.polarity_scores(tweet)
return polarity_scores['neg'], polarity_scores['pos'], polarity_scores['neu']
def get_tweets(self):
df = pd.DataFrame.from_dict(self.json)
df['timestamp'] = pd.to_datetime(df['timestamp'])
df.sort_values(by=['timestamp'], inplace=True, ascending=True)
df.reset_index(inplace=True)
self.json = df.to_dict()
timestamps = self.json['timestamp']
start_date = pd.to_datetime(timestamps[0])
end_date = start_date + timedelta(hours=1)
sentiments = dict()
temp = []
tweets = self.json['text']
for count, tweet in enumerate(tweets, start=0):
tweet = tweets[tweet]
curr_time = timestamps[count]
if isinstance(tweet, int):
print(tweet)
if curr_time >= start_date and curr_time < end_date:
neg, pos, neu = self.get_sentiment(self.clean_tweet(tweet))
temp.append([neg, pos, neu])
else:
means = np.mean(np.asarray(temp), axis=0)
obj = {'neg': means[0], 'pos': means[1], 'neu': means[2]}
sentiments[start_date.strftime("%Y-%m-%d %H:%M:%S")] = obj
temp = []
start_date = end_date
end_date = start_date + timedelta(hours=1)
neg, pos, neu = self.get_sentiment(self.clean_tweet(tweet))
temp.append([neg, pos, neu])
tmp_df = pd.DataFrame.from_dict(sentiments)
neg = tmp_df.loc['neg', :]
pos = tmp_df.loc['pos', :]
neu = tmp_df.loc['neu', :]
df = | pd.DataFrame() | pandas.DataFrame |
# %% imports
import numpy as np
import pandas as pd
import config as cfg
from src.utils.data_processing import hours_in_year, medea_path
# --------------------------------------------------------------------------- #
# %% settings and initializing
# --------------------------------------------------------------------------- #
STATIC_FNAME = medea_path('data', 'processed', 'data_static.xlsx')
idx = pd.IndexSlice
# --------------------------------------------------------------------------- #
# %% read in data
# --------------------------------------------------------------------------- #
static_data = {
'CAP_R': pd.read_excel(STATIC_FNAME, 'INITIAL_CAP_R', header=[0], index_col=[0, 1]),
'CAPCOST_R': pd.read_excel(STATIC_FNAME, 'CAPITALCOST_R', header=[0], index_col=[0, 1]),
'potentials': pd.read_excel(STATIC_FNAME, 'potentials', header=[0], index_col=[0]),
'tec': pd.read_excel(STATIC_FNAME, 'parameters_G'),
'feasops': pd.read_excel(STATIC_FNAME, 'FEASIBLE_INPUT-OUTPUT'),
'cost_transport': pd.read_excel(STATIC_FNAME, 'COST_TRANSPORT', header=[0], index_col=[0]),
'CAPCOST_K': pd.read_excel(STATIC_FNAME, 'CAPITALCOST_S', header=[0], index_col=[0, 1]),
'CAP_X': pd.read_excel(STATIC_FNAME, 'ATC', index_col=[0]),
'DISTANCE': pd.read_excel(STATIC_FNAME, 'KM', index_col=[0]),
'AIR_POLLUTION': pd.read_excel(STATIC_FNAME, 'AIR_POLLUTION', index_col=[0])
}
# --------------------------------------------------------------------------------------------------------------------
plant_data = {
'hydro': pd.read_excel(medea_path('data', 'processed', 'plant-list_hydro.xlsx'), 'opsd_hydro'),
'conventional': pd.read_excel(medea_path('data', 'processed', 'power_plant_db.xlsx'))
}
ts_data = {
'timeseries': pd.read_csv(medea_path('data', 'processed', 'medea_regional_timeseries.csv'))
}
# --------------------------------------------------------------------------- #
# %% prepare set data
# --------------------------------------------------------------------------- #
dict_sets = {
'f': {
'Nuclear': [10],
'Lignite': [20],
'Coal': [30],
'Gas': [40],
'Oil': [50],
'Hydro': [60],
'Biomass': [70],
'Solar': [80],
'Wind': [90],
'Power': [100],
'Heat': [110],
'Syngas': [120]
},
'l': {f'l{x}': [True] for x in range(1, 5)},
'm': {
'el': True,
'ht': True
},
'n': {
'pv': [True],
'ror': [True],
'wind_on': [True],
'wind_off': [True]
},
'k': {
'psp_day': [True],
'psp_week': [True],
'psp_season': [True],
'res_day': [True],
'res_week': [True],
'res_season': [True],
'battery': [True]
},
't': {f't{hour}': [True] for hour in range(1, hours_in_year(cfg.year) + 1)},
'z': {zone: [True] for zone in cfg.zones}
}
# convert to DataFrames
for key, value in dict_sets.items():
dict_sets.update({key: pd.DataFrame.from_dict(dict_sets[key], orient='index', columns=['Value'])})
# --------------------------------------------------------------------------- #
# %% prepare static data
# --------------------------------------------------------------------------- #
# Source 'CO2_INTENSITY': CO2 Emission Factors for Fossil Fuels, UBA, 2016
dict_static = {
'CO2_INTENSITY': {
'Nuclear': [0],
'Lignite': [0.399],
'Coal': [0.337],
'Gas': [0.201],
'Oil': [0.266],
'Hydro': [0],
'Biomass': [0],
'Solar': [0],
'Wind': [0],
'Power': [0],
'Heat': [0],
'Syngas': [0]
},
'eta': {
'nuc': [0.34],
'lig_stm': [0.31], 'lig_stm_chp': [0.31],
'lig_boa': [0.43], 'lig_boa_chp': [0.43],
'coal_sub': [0.32], 'coal_sub_chp': [0.32],
'coal_sc': [0.41], 'coal_sc_chp': [0.41],
'coal_usc': [0.44], 'coal_usc_chp': [0.44],
'coal_igcc': [0.55],
'ng_stm': [0.40], 'ng_stm_chp': [0.40],
'ng_cbt_lo': [0.34], 'ng_cbt_lo_chp': [0.34],
'ng_cbt_hi': [0.40], 'ng_cbt_hi_chp': [0.40],
'ng_cc_lo': [0.38], 'ng_cc_lo_chp': [0.38],
'ng_cc_hi': [0.55], 'ng_cc_hi_chp': [0.55],
'ng_mtr': [0.40], 'ng_mtr_chp': [0.40],
'ng_boiler_chp': [0.90],
'oil_stm': [0.31], 'oil_stm_chp': [0.31],
'oil_cbt': [0.35], 'oil_cbt_chp': [0.35],
'oil_cc': [0.42], 'oil_cc_chp': [0.42],
'bio': [0.35], 'bio_chp': [0.35],
'heatpump_pth': [3.0]
},
'map_name2fuel': {
'nuc': 'Nuclear',
'lig': 'Lignite',
'coal': 'Coal',
'ng': 'Gas',
'oil': 'Oil',
'bio': 'Biomass',
'heatpump': 'Power'
},
'CAPCOST_X': {
'AT': [1250],
'DE': [1250]
},
'VALUE_NSE': {
'AT': [12500],
'DE': [12500]
},
'LAMBDA': [0.125],
'SIGMA': [0.175]
}
dict_additions = {
'boilers': {
# 'medea_type': [49.5],
'set_element': 'ng_boiler_chp',
('cap', 'AT'): [4.5],
('cap', 'DE'): [25.5],
('eta', 'AT'): [0.9],
('eta', 'DE'): [0.9]
# ('count', 'AT'): [15],
# ('count', 'DE'): [85],
# ('num', 'AT'): [85],
# ('num', 'DE'): [255]
},
'heatpumps': {
# 'medea_type': [100],
'set_element': 'heatpump_pth',
('cap', 'AT'): [0.1],
('cap', 'DE'): [0.1],
('eta', 'AT'): [3.0],
('eta', 'DE'): [3.0]
# ('count', 'AT'): [1],
# ('count', 'DE'): [1],
# ('num', 'AT'): [1],
# ('num', 'DE'): [1]
},
'batteries': {
'power_in': [0],
'power_out': [0],
'energy_max': [0],
'efficiency_in': [0.96],
'efficiency_out': [0.96],
'cost_power': [static_data['CAPCOST_K'].loc[('AT', 'battery'), 'annuity-power'].round(4)],
'cost_energy': [static_data['CAPCOST_K'].loc[('AT', 'battery'), 'annuity-energy'].round(4)],
'inflow_factor': [0]
}
}
dict_instantiate = {'CO2_INTENSITY': pd.DataFrame.from_dict(dict_static['CO2_INTENSITY'],
orient='index', columns=['Value'])}
dict_instantiate.update({'efficiency': pd.DataFrame.from_dict(dict_static['eta'], orient='index', columns=['l1'])})
dict_instantiate['efficiency']['product'] = 'el'
dict_instantiate['efficiency'].loc[dict_instantiate['efficiency'].index.str.contains('pth'), 'product'] = 'ht'
dict_instantiate['efficiency'].loc['ng_boiler_chp', 'product'] = 'ht'
dict_instantiate['efficiency']['fuel'] = dict_instantiate['efficiency'].index.to_series().str.split('_').str.get(
0).replace(dict_static['map_name2fuel'])
dict_instantiate['efficiency'].set_index(['product', 'fuel'], append=True, inplace=True)
dict_instantiate['efficiency'].index.set_names(['medea_type', 'product', 'fuel_name'], inplace=True)
for i in range(1, 6):
dict_instantiate['efficiency'][f'l{i}'] = dict_instantiate['efficiency']['l1']
dict_instantiate.update({'CAP_R': static_data['CAP_R'].loc[idx[:, cfg.year], :]})
dict_instantiate.update({'CAP_X': static_data['CAP_X'].loc[
static_data['CAP_X'].index.str.contains('|'.join(cfg.zones)),
static_data['CAP_X'].columns.str.contains('|'.join(cfg.zones))] / 1000})
dict_instantiate.update({'DISTANCE': static_data['DISTANCE'].loc[static_data['DISTANCE'].index.str.contains(
'|'.join(cfg.zones)), static_data['DISTANCE'].columns.str.contains('|'.join(cfg.zones))]})
static_data.update({'CAPCOST_X': pd.DataFrame.from_dict(dict_static['CAPCOST_X'], orient='index', columns=['Value'])})
static_data.update({'VALUE_NSE': pd.DataFrame.from_dict(dict_static['VALUE_NSE'], orient='index', columns=['Value'])})
static_data.update({'LAMBDA': pd.DataFrame(dict_static['LAMBDA'], columns=['Value'])})
static_data.update({'SIGMA': pd.DataFrame(dict_static['SIGMA'], columns=['Value'])})
# --------------------------------------------------------------------------- #
# %% preprocessing plant data
# --------------------------------------------------------------------------- #
# dispatchable (thermal) plants
# filter active thermal plants
plant_data.update({'active': plant_data['conventional'].loc[
(plant_data['conventional']['UnitOperOnlineDate'] < pd.Timestamp(cfg.year, 1, 1)) &
(plant_data['conventional']['UnitOperRetireDate'] > pd.Timestamp(cfg.year, 12, 31)) |
np.isnat(plant_data['conventional']['UnitOperRetireDate'])]})
# exclude hydro power plant
plant_data['active'] = plant_data['active'].loc[(plant_data['active']['MedeaType'] < 60) |
(plant_data['active']['MedeaType'] >= 70)]
# capacities by country in GW
prop_g = plant_data['active'].groupby(['MedeaType', 'PlantCountry'])['UnitNameplate'].sum().to_frame() / 1000
prop_g['eta'] = plant_data['active'].groupby(['MedeaType', 'PlantCountry'])['Eta'].mean().to_frame()
# prop_g['count'] = plant_data['active'].groupby(['MedeaType'])['PlantCountry'].value_counts().to_frame(name='count')
# prop_g['num'] = (prop_g['UnitNameplate'].round(decimals=1) * 10).astype(int)
prop_g.rename(index={'Germany': 'DE', 'Austria': 'AT'}, columns={'UnitNameplate': 'cap'}, inplace=True)
prop_g = prop_g.unstack(-1)
prop_g.drop(0.0, axis=0, inplace=True)
# index by plant element names instead of medea_type-numbers
prop_g.index = prop_g.index.map(pd.Series(static_data['tec']['set_element'].values,
index=static_data['tec']['medea_type'].values).to_dict())
# update 'empirical' efficiencies with generic efficiencies
for zone in cfg.zones:
prop_g.loc[:, idx['eta', zone]].update(pd.DataFrame.from_dict(dict_static['eta'],
orient='index', columns=['eta']).iloc[:, 0])
# add data for heat boilers
prop_g = prop_g.append(pd.DataFrame.from_dict(dict_additions['boilers']).set_index('set_element'))
# add data for heatpumps
prop_g = prop_g.append(pd.DataFrame.from_dict(dict_additions['heatpumps']).set_index('set_element'))
# remove non-existent plant
prop_g = prop_g.stack(-1).swaplevel(axis=0)
prop_g = prop_g.dropna()
# update instantiation dictionary
dict_instantiate.update({'tec_props': prop_g})
# add 'tec'-set to dict_sets
dict_sets.update({'i': pd.DataFrame(data=True, index=prop_g.index.get_level_values(1).unique().values,
columns=['Value'])})
static_data['feasops']['fuel_name'] = (static_data['feasops']['medea_type'] / 10).apply(np.floor) * 10
static_data['feasops']['fuel_name'].replace({y: x for x, y in dict_sets['f'].itertuples()}, inplace=True)
static_data['feasops']['set_element'] = static_data['feasops']['medea_type']
static_data['feasops']['set_element'].replace(
{x: y for x, y in static_data['tec'][['medea_type', 'set_element']].values}, inplace=True)
static_data['feasops'].dropna(inplace=True)
static_data['feasops'].set_index(['set_element', 'l', 'fuel_name'], inplace=True)
# following line produces memory error (0xC00000FD) --> workaround with element-wise division
# df_feasops['fuel_need'] = df_feasops['fuel']/ df_eff
# TODO: PerformanceWarning: indexing past lexsort depth may impact performance (3 times)
static_data['feasops']['fuel_need'] = np.nan
for typ in static_data['feasops'].index.get_level_values(0).unique():
for lim in static_data['feasops'].index.get_level_values(1).unique():
static_data['feasops'].loc[idx[typ, lim], 'fuel_need'] = static_data['feasops'].loc[
idx[typ, lim], 'fuel'].mean() / \
dict_static['eta'][typ][0]
# adjust static_data['tec'] to reflect modelled power plants
static_data['tec'].set_index('set_element', inplace=True)
static_data['tec'] = static_data['tec'].loc[static_data['tec'].index.isin(dict_sets['i'].index), :]
dict_instantiate['efficiency'] = \
dict_instantiate['efficiency'].loc[
dict_instantiate['efficiency'].index.get_level_values(0).isin(dict_sets['i'].index), :]
static_data['feasops'] = \
static_data['feasops'].loc[static_data['feasops'].index.get_level_values(0).isin(dict_sets['i'].index), :]
# --------------------------------------------------------------------------- #
# hydro storage data
# drop all ror data
plant_data['hydro'].drop(plant_data['hydro'][plant_data['hydro'].technology == 'Run-of-river'].index, inplace=True)
# filter out data without reservoir size in GWh
plant_data['hydro'].dropna(subset=['energy_max', 'power_in'], inplace=True)
# calculate duration of generation from full reservoir
plant_data['hydro']['max_duration'] = plant_data['hydro']['energy_max'] / plant_data['hydro']['power_out'] * 1000 / 24
plant_data['hydro']['count'] = 1
plant_data.update({'hydro_clusters': plant_data['hydro'].groupby(['technology', 'country',
pd.cut(plant_data['hydro']['max_duration'],
[0, 2, 7, 75])]).sum()})
plant_data['hydro_clusters']['efficiency_in'] = plant_data['hydro_clusters']['efficiency_in'] / \
plant_data['hydro_clusters']['count']
plant_data['hydro_clusters']['efficiency_out'] = plant_data['hydro_clusters']['efficiency_out'] / \
plant_data['hydro_clusters']['count']
plant_data['hydro_clusters']['cost_power'] = np.nan
plant_data['hydro_clusters']['cost_energy'] = np.nan
# assign technology and zone index to rows
plant_data['hydro_clusters']['country'] = plant_data['hydro_clusters'].index.get_level_values(1)
plant_data['hydro_clusters']['category'] = plant_data['hydro_clusters'].index.get_level_values(2).rename_categories(
['day', 'week', 'season']).astype(str)
plant_data['hydro_clusters']['tech'] = plant_data['hydro_clusters'].index.get_level_values(0)
plant_data['hydro_clusters']['tech'] = plant_data['hydro_clusters']['tech'].replace(['Pumped Storage', 'Reservoir'],
['psp', 'res'])
plant_data['hydro_clusters']['set_elem'] = plant_data['hydro_clusters']['tech'] + '_' + plant_data['hydro_clusters'][
'category']
plant_data['hydro_clusters'] = plant_data['hydro_clusters'].set_index(['set_elem', 'country'])
plant_data['hydro_clusters'].fillna(0, inplace=True)
plant_data['hydro_clusters']['power_out'] = plant_data['hydro_clusters']['power_out'] / 1000 # conversion from MW to GW
plant_data['hydro_clusters']['power_in'] = plant_data['hydro_clusters']['power_in'] / 1000 # conversion from MW to GW
plant_data['hydro_clusters']['inflow_factor'] = (
plant_data['hydro_clusters']['energy_max'] / plant_data['hydro_clusters']['energy_max'].sum())
plant_data['hydro_clusters'] = plant_data['hydro_clusters'].loc[:, ['power_in', 'power_out', 'energy_max',
'efficiency_in', 'efficiency_out', 'cost_power',
'cost_energy', 'inflow_factor']].copy()
# append battery data
bat_idx = pd.MultiIndex.from_product([['battery'], list(cfg.zones)])
df_battery = pd.DataFrame(np.nan, bat_idx, dict_additions['batteries'].keys())
for zone in list(cfg.zones):
for key in dict_additions['batteries'].keys():
df_battery.loc[('battery', zone), key] = dict_additions['batteries'][key][0]
plant_data['storage_clusters'] = plant_data['hydro_clusters'].append(df_battery)
# --------------------------------------------------------------------------- #
# %% process time series data
# --------------------------------------------------------------------------- #
ts_data['timeseries']['DateTime'] = pd.to_datetime(ts_data['timeseries']['DateTime'])
ts_data['timeseries'].set_index('DateTime', inplace=True)
# constrain data to scenario year
ts_data['timeseries'] = ts_data['timeseries'].loc[
(pd.Timestamp(cfg.year, 1, 1, 0, 0).tz_localize('UTC') <= ts_data['timeseries'].index) & (
ts_data['timeseries'].index <= pd.Timestamp(cfg.year, 12, 31, 23, 0).tz_localize('UTC'))]
# drop index and set index of df_time instead
if len(ts_data['timeseries']) == len(dict_sets['t']):
ts_data['timeseries'].set_index(dict_sets['t'].index, inplace=True)
else:
raise ValueError('Mismatch of time series data and model time resolution. Is cfg.year wrong?')
ts_data['timeseries']['DE-power-load'] = ts_data['timeseries']['DE-power-load'] / 0.91
# for 0.91 scaling factor see
# https://www.entsoe.eu/fileadmin/user_upload/_library/publications/ce/Load_and_Consumption_Data.pdf
# create price time series incl transport cost
ts_data['timeseries']['Nuclear'] = 3.5
ts_data['timeseries']['Lignite'] = 4.5
ts_data['timeseries']['Biomass'] = 6.5
# subset of zonal time series
ts_data['zonal'] = ts_data['timeseries'].loc[:, ts_data['timeseries'].columns.str.startswith(('AT', 'DE'))].copy()
ts_data['zonal'].columns = ts_data['zonal'].columns.str.split('-', expand=True)
# adjust column naming to reflect proper product names ('el' and 'ht')
ts_data['zonal'] = ts_data['zonal'].rename(columns={'power': 'el', 'heat': 'ht'})
model_prices = ['Coal', 'Oil', 'Gas', 'EUA', 'Nuclear', 'Lignite', 'Biomass', 'price_day_ahead']
ts_data['price'] = pd.DataFrame(index=ts_data['timeseries'].index,
columns=pd.MultiIndex.from_product([model_prices, cfg.zones]))
for zone in cfg.zones:
for fuel in model_prices:
if fuel in static_data['cost_transport'].index:
ts_data['price'][(fuel, zone)] = ts_data['timeseries'][fuel] + static_data['cost_transport'].loc[fuel, zone]
else:
ts_data['price'][(fuel, zone)] = ts_data['timeseries'][fuel]
ts_inflows = pd.DataFrame(index=list(ts_data['zonal'].index),
columns=pd.MultiIndex.from_product([cfg.zones, dict_sets['k'].index]))
for zone in list(cfg.zones):
for strg in dict_sets['k'].index:
if 'battery' not in strg:
ts_inflows.loc[:, (zone, strg)] = ts_data['zonal'].loc[:, idx[zone, 'inflows', 'reservoir']] * \
plant_data['storage_clusters'].loc[(strg, zone), 'inflow_factor']
ts_data.update({'inflows': ts_inflows})
dict_instantiate.update({'ancil': ts_data['zonal'].loc[:, idx[:, 'el', 'load']].max().unstack((1, 2)).squeeze() * 0.125
+ dict_instantiate['CAP_R'].unstack(1).drop('ror', axis=1).sum(axis=1) * 0.075})
dict_instantiate.update({'PEAK_LOAD': ts_data['zonal'].loc[:, idx[:, 'el', 'load']].max().unstack((1, 2)).squeeze()})
dict_instantiate.update({'PEAK_PROFILE': ts_data['zonal'].loc[:, idx[:, :, 'profile']].max().unstack(2).drop(
'ror', axis=0, level=1)})
# drop rows with all zeros
plant_data['storage_clusters'] = \
plant_data['storage_clusters'].loc[~(plant_data['storage_clusters'] == 0).all(axis=1), :].copy()
# --------------------------------------------------------------------------- #
# %% limits on investment - long-run vs short-run & # TODO: potentials
# --------------------------------------------------------------------------- #
invest_limits = {}
lim_invest_thermal = | pd.DataFrame([0]) | pandas.DataFrame |
# -*- coding: utf-8 -*-
import unittest
import platform
import pandas as pd
import numpy as np
import pyarrow.parquet as pq
import hpat
from hpat.tests.test_utils import (
count_array_REPs, count_parfor_REPs, count_array_OneDs, get_start_end)
from hpat.tests.gen_test_data import ParquetGenerator
from numba import types
from numba.config import IS_32BITS
from numba.errors import TypingError
_cov_corr_series = [(pd.Series(x), pd.Series(y)) for x, y in [
(
[np.nan, -2., 3., 9.1],
[np.nan, -2., 3., 5.0],
),
# TODO(quasilyte): more intricate data for complex-typed series.
# Some arguments make assert_almost_equal fail.
# Functions that yield mismaching results:
# _column_corr_impl and _column_cov_impl.
(
[complex(-2., 1.0), complex(3.0, 1.0)],
[complex(-3., 1.0), complex(2.0, 1.0)],
),
(
[complex(-2.0, 1.0), complex(3.0, 1.0)],
[1.0, -2.0],
),
(
[1.0, -4.5],
[complex(-4.5, 1.0), complex(3.0, 1.0)],
),
]]
min_float64 = np.finfo('float64').min
max_float64 = np.finfo('float64').max
test_global_input_data_float64 = [
[1., np.nan, -1., 0., min_float64, max_float64],
[np.nan, np.inf, np.NINF, np.NZERO]
]
min_int64 = np.iinfo('int64').min
max_int64 = np.iinfo('int64').max
max_uint64 = np.iinfo('uint64').max
test_global_input_data_integer64 = [
[1, -1, 0],
[min_int64, max_int64],
[max_uint64]
]
test_global_input_data_numeric = test_global_input_data_integer64 + test_global_input_data_float64
test_global_input_data_unicode_kind4 = [
'ascii',
'12345',
'1234567890',
'¡Y tú quién te crees?',
'🐍⚡',
'大处着眼,小处着手。',
]
test_global_input_data_unicode_kind1 = [
'ascii',
'12345',
'1234567890',
]
def _make_func_from_text(func_text, func_name='test_impl'):
loc_vars = {}
exec(func_text, {}, loc_vars)
test_impl = loc_vars[func_name]
return test_impl
def _make_func_use_binop1(operator):
func_text = "def test_impl(A, B):\n"
func_text += " return A {} B\n".format(operator)
return _make_func_from_text(func_text)
def _make_func_use_binop2(operator):
func_text = "def test_impl(A, B):\n"
func_text += " A {} B\n".format(operator)
func_text += " return A\n"
return _make_func_from_text(func_text)
def _make_func_use_method_arg1(method):
func_text = "def test_impl(A, B):\n"
func_text += " return A.{}(B)\n".format(method)
return _make_func_from_text(func_text)
GLOBAL_VAL = 2
class TestSeries(unittest.TestCase):
def test_create1(self):
def test_impl():
df = pd.DataFrame({'A': [1, 2, 3]})
return (df.A == 1).sum()
hpat_func = hpat.jit(test_impl)
self.assertEqual(hpat_func(), test_impl())
@unittest.skip('Feature request: implement Series::ctor with list(list(type))')
def test_create_list_list_unicode(self):
def test_impl():
S = pd.Series([
['abc', 'defg', 'ijk'],
['lmn', 'opq', 'rstuvwxyz']
])
return S
hpat_func = hpat.jit(test_impl)
result_ref = test_impl()
result = hpat_func()
pd.testing.assert_series_equal(result, result_ref)
@unittest.skip('Feature request: implement Series::ctor with list(list(type))')
def test_create_list_list_integer(self):
def test_impl():
S = pd.Series([
[123, 456, -789],
[-112233, 445566, 778899]
])
return S
hpat_func = hpat.jit(test_impl)
result_ref = test_impl()
result = hpat_func()
pd.testing.assert_series_equal(result, result_ref)
@unittest.skip('Feature request: implement Series::ctor with list(list(type))')
def test_create_list_list_float(self):
def test_impl():
S = pd.Series([
[1.23, -4.56, 7.89],
[11.2233, 44.5566, -778.899]
])
return S
hpat_func = hpat.jit(test_impl)
result_ref = test_impl()
result = hpat_func()
pd.testing.assert_series_equal(result, result_ref)
def test_create2(self):
def test_impl(n):
df = pd.DataFrame({'A': np.arange(n)})
return (df.A == 2).sum()
hpat_func = hpat.jit(test_impl)
n = 11
self.assertEqual(hpat_func(n), test_impl(n))
def test_create_series1(self):
def test_impl():
A = pd.Series([1, 2, 3])
return A
hpat_func = hpat.jit(test_impl)
pd.testing.assert_series_equal(hpat_func(), test_impl())
def test_create_series_index1(self):
# create and box an indexed Series
def test_impl():
A = pd.Series([1, 2, 3], ['A', 'C', 'B'])
return A
hpat_func = hpat.jit(test_impl)
pd.testing.assert_series_equal(hpat_func(), test_impl())
def test_create_series_index2(self):
def test_impl():
A = pd.Series([1, 2, 3], index=['A', 'C', 'B'])
return A
hpat_func = hpat.jit(test_impl)
pd.testing.assert_series_equal(hpat_func(), test_impl())
def test_create_series_index3(self):
def test_impl():
A = pd.Series([1, 2, 3], index=['A', 'C', 'B'], name='A')
return A
hpat_func = hpat.jit(test_impl)
pd.testing.assert_series_equal(hpat_func(), test_impl())
def test_create_series_index4(self):
def test_impl(name):
A = pd.Series([1, 2, 3], index=['A', 'C', 'B'], name=name)
return A
hpat_func = hpat.jit(test_impl)
pd.testing.assert_series_equal(hpat_func('A'), test_impl('A'))
def test_create_str(self):
def test_impl():
df = pd.DataFrame({'A': ['a', 'b', 'c']})
return (df.A == 'a').sum()
hpat_func = hpat.jit(test_impl)
self.assertEqual(hpat_func(), test_impl())
def test_pass_df1(self):
def test_impl(df):
return (df.A == 2).sum()
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
self.assertEqual(hpat_func(df), test_impl(df))
def test_pass_df_str(self):
def test_impl(df):
return (df.A == 'a').sum()
hpat_func = hpat.jit(test_impl)
df = pd.DataFrame({'A': ['a', 'b', 'c']})
self.assertEqual(hpat_func(df), test_impl(df))
def test_pass_series1(self):
# TODO: check to make sure it is series type
def test_impl(A):
return (A == 2).sum()
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
self.assertEqual(hpat_func(df.A), test_impl(df.A))
def test_pass_series2(self):
# test creating dataframe from passed series
def test_impl(A):
df = pd.DataFrame({'A': A})
return (df.A == 2).sum()
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
self.assertEqual(hpat_func(df.A), test_impl(df.A))
def test_pass_series_str(self):
def test_impl(A):
return (A == 'a').sum()
hpat_func = hpat.jit(test_impl)
df = pd.DataFrame({'A': ['a', 'b', 'c']})
self.assertEqual(hpat_func(df.A), test_impl(df.A))
def test_pass_series_index1(self):
def test_impl(A):
return A
hpat_func = hpat.jit(test_impl)
S = pd.Series([3, 5, 6], ['a', 'b', 'c'], name='A')
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_size(self):
def test_impl(S):
return S.size
hpat_func = hpat.jit(test_impl)
n = 11
for S, expected in [
(pd.Series(), 0),
(pd.Series([]), 0),
(pd.Series(np.arange(n)), n),
(pd.Series([np.nan, 1, 2]), 3),
(pd.Series(['1', '2', '3']), 3),
]:
with self.subTest(S=S, expected=expected):
self.assertEqual(hpat_func(S), expected)
self.assertEqual(hpat_func(S), test_impl(S))
def test_series_attr2(self):
def test_impl(A):
return A.copy().values
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
np.testing.assert_array_equal(hpat_func(df.A), test_impl(df.A))
def test_series_attr3(self):
def test_impl(A):
return A.min()
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
self.assertEqual(hpat_func(df.A), test_impl(df.A))
def test_series_attr4(self):
def test_impl(A):
return A.cumsum().values
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
np.testing.assert_array_equal(hpat_func(df.A), test_impl(df.A))
def test_series_argsort1(self):
def test_impl(A):
return A.argsort()
hpat_func = hpat.jit(test_impl)
n = 11
A = pd.Series(np.random.ranf(n))
pd.testing.assert_series_equal(hpat_func(A), test_impl(A))
def test_series_attr6(self):
def test_impl(A):
return A.take([2, 3]).values
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
np.testing.assert_array_equal(hpat_func(df.A), test_impl(df.A))
def test_series_attr7(self):
def test_impl(A):
return A.astype(np.float64)
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
np.testing.assert_array_equal(hpat_func(df.A), test_impl(df.A))
def test_series_getattr_ndim(self):
'''Verifies getting Series attribute ndim is supported'''
def test_impl(S):
return S.ndim
hpat_func = hpat.jit(test_impl)
n = 11
S = pd.Series(np.arange(n))
self.assertEqual(hpat_func(S), test_impl(S))
def test_series_getattr_T(self):
'''Verifies getting Series attribute T is supported'''
def test_impl(S):
return S.T
hpat_func = hpat.jit(test_impl)
n = 11
S = pd.Series(np.arange(n))
np.testing.assert_array_equal(hpat_func(S), test_impl(S))
def test_series_copy_str1(self):
def test_impl(A):
return A.copy()
hpat_func = hpat.jit(test_impl)
S = pd.Series(['aa', 'bb', 'cc'])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_copy_int1(self):
def test_impl(A):
return A.copy()
hpat_func = hpat.jit(test_impl)
S = pd.Series([1, 2, 3])
np.testing.assert_array_equal(hpat_func(S), test_impl(S))
def test_series_copy_deep(self):
def test_impl(A, deep):
return A.copy(deep=deep)
hpat_func = hpat.jit(test_impl)
for S in [
pd.Series([1, 2]),
pd.Series([1, 2], index=["a", "b"]),
]:
with self.subTest(S=S):
for deep in (True, False):
with self.subTest(deep=deep):
actual = hpat_func(S, deep)
expected = test_impl(S, deep)
pd.testing.assert_series_equal(actual, expected)
self.assertEqual(actual.values is S.values, expected.values is S.values)
self.assertEqual(actual.values is S.values, not deep)
# Shallow copy of index is not supported yet
if deep:
self.assertEqual(actual.index is S.index, expected.index is S.index)
self.assertEqual(actual.index is S.index, not deep)
def test_series_astype_int_to_str1(self):
'''Verifies Series.astype implementation with function 'str' as argument
converts integer series to series of strings
'''
def test_impl(S):
return S.astype(str)
hpat_func = hpat.jit(test_impl)
n = 11
S = pd.Series(np.arange(n))
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_astype_int_to_str2(self):
'''Verifies Series.astype implementation with a string literal dtype argument
converts integer series to series of strings
'''
def test_impl(S):
return S.astype('str')
hpat_func = hpat.jit(test_impl)
n = 11
S = pd.Series(np.arange(n))
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_astype_str_to_str1(self):
'''Verifies Series.astype implementation with function 'str' as argument
handles string series not changing it
'''
def test_impl(S):
return S.astype(str)
hpat_func = hpat.jit(test_impl)
S = pd.Series(['aa', 'bb', 'cc'])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_astype_str_to_str2(self):
'''Verifies Series.astype implementation with a string literal dtype argument
handles string series not changing it
'''
def test_impl(S):
return S.astype('str')
hpat_func = hpat.jit(test_impl)
S = pd.Series(['aa', 'bb', 'cc'])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_astype_str_to_str_index_str(self):
'''Verifies Series.astype implementation with function 'str' as argument
handles string series not changing it
'''
def test_impl(S):
return S.astype(str)
hpat_func = hpat.jit(test_impl)
S = pd.Series(['aa', 'bb', 'cc'], index=['d', 'e', 'f'])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_astype_str_to_str_index_int(self):
'''Verifies Series.astype implementation with function 'str' as argument
handles string series not changing it
'''
def test_impl(S):
return S.astype(str)
hpat_func = hpat.jit(test_impl)
S = pd.Series(['aa', 'bb', 'cc'], index=[1, 2, 3])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
@unittest.skip('TODO: requires str(datetime64) support in Numba')
def test_series_astype_dt_to_str1(self):
'''Verifies Series.astype implementation with function 'str' as argument
converts datetime series to series of strings
'''
def test_impl(A):
return A.astype(str)
hpat_func = hpat.jit(test_impl)
S = pd.Series([pd.Timestamp('20130101 09:00:00'),
pd.Timestamp('20130101 09:00:02'),
pd.Timestamp('20130101 09:00:03')
])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
@unittest.skip('AssertionError: Series are different'
'[left]: [0.000000, 1.000000, 2.000000, 3.000000, ...'
'[right]: [0.0, 1.0, 2.0, 3.0, ...'
'TODO: needs alignment to NumPy on Numba side')
def test_series_astype_float_to_str1(self):
'''Verifies Series.astype implementation with function 'str' as argument
converts float series to series of strings
'''
def test_impl(A):
return A.astype(str)
hpat_func = hpat.jit(test_impl)
n = 11.0
S = pd.Series(np.arange(n))
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_astype_int32_to_int64(self):
'''Verifies Series.astype implementation with NumPy dtype argument
converts series with dtype=int32 to series with dtype=int64
'''
def test_impl(A):
return A.astype(np.int64)
hpat_func = hpat.jit(test_impl)
n = 11
S = pd.Series(np.arange(n), dtype=np.int32)
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_astype_int_to_float64(self):
'''Verifies Series.astype implementation with NumPy dtype argument
converts integer series to series of float
'''
def test_impl(A):
return A.astype(np.float64)
hpat_func = hpat.jit(test_impl)
n = 11
S = pd.Series(np.arange(n))
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_astype_float_to_int32(self):
'''Verifies Series.astype implementation with NumPy dtype argument
converts float series to series of integers
'''
def test_impl(A):
return A.astype(np.int32)
hpat_func = hpat.jit(test_impl)
n = 11.0
S = pd.Series(np.arange(n))
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
@unittest.skip('TODO: needs Numba astype impl support string literal as dtype arg')
def test_series_astype_literal_dtype1(self):
'''Verifies Series.astype implementation with a string literal dtype argument
converts float series to series of integers
'''
def test_impl(A):
return A.astype('int32')
hpat_func = hpat.jit(test_impl)
n = 11.0
S = pd.Series(np.arange(n))
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
@unittest.skip('TODO: needs Numba astype impl support converting unicode_type to int')
def test_series_astype_str_to_int32(self):
'''Verifies Series.astype implementation with NumPy dtype argument
converts series of strings to series of integers
'''
import numba
def test_impl(A):
return A.astype(np.int32)
hpat_func = hpat.jit(test_impl)
n = 11
S = pd.Series([str(x) for x in np.arange(n) - n // 2])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
@unittest.skip('TODO: needs Numba astype impl support converting unicode_type to float')
def test_series_astype_str_to_float64(self):
'''Verifies Series.astype implementation with NumPy dtype argument
converts series of strings to series of float
'''
def test_impl(A):
return A.astype(np.float64)
hpat_func = hpat.jit(test_impl)
S = pd.Series(['3.24', '1E+05', '-1', '-1.3E-01', 'nan', 'inf'])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_astype_str_index_str(self):
'''Verifies Series.astype implementation with function 'str' as argument
handles string series not changing it
'''
def test_impl(S):
return S.astype(str)
hpat_func = hpat.jit(test_impl)
S = pd.Series(['aa', 'bb', 'cc'], index=['a', 'b', 'c'])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_astype_str_index_int(self):
'''Verifies Series.astype implementation with function 'str' as argument
handles string series not changing it
'''
def test_impl(S):
return S.astype(str)
hpat_func = hpat.jit(test_impl)
S = pd.Series(['aa', 'bb', 'cc'], index=[2, 3, 5])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_np_call_on_series1(self):
def test_impl(A):
return np.min(A)
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
np.testing.assert_array_equal(hpat_func(df.A), test_impl(df.A))
def test_series_values(self):
def test_impl(A):
return A.values
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
np.testing.assert_array_equal(hpat_func(df.A), test_impl(df.A))
def test_series_values1(self):
def test_impl(A):
return (A == 2).values
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
np.testing.assert_array_equal(hpat_func(df.A), test_impl(df.A))
def test_series_shape1(self):
def test_impl(A):
return A.shape
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
self.assertEqual(hpat_func(df.A), test_impl(df.A))
def test_static_setitem_series1(self):
def test_impl(A):
A[0] = 2
return (A == 2).sum()
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
self.assertEqual(hpat_func(df.A), test_impl(df.A))
def test_setitem_series1(self):
def test_impl(A, i):
A[i] = 2
return (A == 2).sum()
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
self.assertEqual(hpat_func(df.A.copy(), 0), test_impl(df.A.copy(), 0))
def test_setitem_series2(self):
def test_impl(A, i):
A[i] = 100
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
A1 = df.A.copy()
A2 = df.A
hpat_func(A1, 0)
test_impl(A2, 0)
pd.testing.assert_series_equal(A1, A2)
@unittest.skip("enable after remove dead in hiframes is removed")
def test_setitem_series3(self):
def test_impl(A, i):
S = pd.Series(A)
S[i] = 100
hpat_func = hpat.jit(test_impl)
n = 11
A = np.arange(n)
A1 = A.copy()
A2 = A
hpat_func(A1, 0)
test_impl(A2, 0)
np.testing.assert_array_equal(A1, A2)
def test_setitem_series_bool1(self):
def test_impl(A):
A[A > 3] = 100
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
A1 = df.A.copy()
A2 = df.A
hpat_func(A1)
test_impl(A2)
pd.testing.assert_series_equal(A1, A2)
def test_setitem_series_bool2(self):
def test_impl(A, B):
A[A > 3] = B[A > 3]
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n), 'B': np.arange(n)**2})
A1 = df.A.copy()
A2 = df.A
hpat_func(A1, df.B)
test_impl(A2, df.B)
pd.testing.assert_series_equal(A1, A2)
def test_static_getitem_series1(self):
def test_impl(A):
return A[0]
hpat_func = hpat.jit(test_impl)
n = 11
A = pd.Series(np.arange(n))
self.assertEqual(hpat_func(A), test_impl(A))
def test_getitem_series1(self):
def test_impl(A, i):
return A[i]
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
self.assertEqual(hpat_func(df.A, 0), test_impl(df.A, 0))
def test_getitem_series_str1(self):
def test_impl(A, i):
return A[i]
hpat_func = hpat.jit(test_impl)
df = pd.DataFrame({'A': ['aa', 'bb', 'cc']})
self.assertEqual(hpat_func(df.A, 0), test_impl(df.A, 0))
def test_series_iat1(self):
def test_impl(A):
return A.iat[3]
hpat_func = hpat.jit(test_impl)
n = 11
S = pd.Series(np.arange(n)**2)
self.assertEqual(hpat_func(S), test_impl(S))
def test_series_iat2(self):
def test_impl(A):
A.iat[3] = 1
return A
hpat_func = hpat.jit(test_impl)
n = 11
S = pd.Series(np.arange(n)**2)
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_iloc1(self):
def test_impl(A):
return A.iloc[3]
hpat_func = hpat.jit(test_impl)
n = 11
S = pd.Series(np.arange(n)**2)
self.assertEqual(hpat_func(S), test_impl(S))
def test_series_iloc2(self):
def test_impl(A):
return A.iloc[3:8]
hpat_func = hpat.jit(test_impl)
n = 11
S = pd.Series(np.arange(n)**2)
pd.testing.assert_series_equal(
hpat_func(S), test_impl(S).reset_index(drop=True))
def test_series_op1(self):
arithmetic_binops = ('+', '-', '*', '/', '//', '%', '**')
for operator in arithmetic_binops:
test_impl = _make_func_use_binop1(operator)
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(1, n), 'B': np.ones(n - 1)})
pd.testing.assert_series_equal(hpat_func(df.A, df.B), test_impl(df.A, df.B), check_names=False)
def test_series_op2(self):
arithmetic_binops = ('+', '-', '*', '/', '//', '%', '**')
for operator in arithmetic_binops:
test_impl = _make_func_use_binop1(operator)
hpat_func = hpat.jit(test_impl)
n = 11
if platform.system() == 'Windows' and not IS_32BITS:
df = pd.DataFrame({'A': np.arange(1, n, dtype=np.int64)})
else:
df = pd.DataFrame({'A': np.arange(1, n)})
pd.testing.assert_series_equal(hpat_func(df.A, 1), test_impl(df.A, 1), check_names=False)
def test_series_op3(self):
arithmetic_binops = ('+', '-', '*', '/', '//', '%', '**')
for operator in arithmetic_binops:
test_impl = _make_func_use_binop2(operator)
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(1, n), 'B': np.ones(n - 1)})
pd.testing.assert_series_equal(hpat_func(df.A, df.B), test_impl(df.A, df.B), check_names=False)
def test_series_op4(self):
arithmetic_binops = ('+', '-', '*', '/', '//', '%', '**')
for operator in arithmetic_binops:
test_impl = _make_func_use_binop2(operator)
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(1, n)})
pd.testing.assert_series_equal(hpat_func(df.A, 1), test_impl(df.A, 1), check_names=False)
def test_series_op5(self):
arithmetic_methods = ('add', 'sub', 'mul', 'div', 'truediv', 'floordiv', 'mod', 'pow')
for method in arithmetic_methods:
test_impl = _make_func_use_method_arg1(method)
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(1, n), 'B': np.ones(n - 1)})
pd.testing.assert_series_equal(hpat_func(df.A, df.B), test_impl(df.A, df.B), check_names=False)
@unittest.skipIf(platform.system() == 'Windows', 'Series values are different (20.0 %)'
'[left]: [1, 1024, 59049, 1048576, 9765625, 60466176, 282475249, 1073741824, 3486784401, 10000000000]'
'[right]: [1, 1024, 59049, 1048576, 9765625, 60466176, 282475249, 1073741824, -808182895, 1410065408]')
def test_series_op5_integer_scalar(self):
arithmetic_methods = ('add', 'sub', 'mul', 'div', 'truediv', 'floordiv', 'mod', 'pow')
for method in arithmetic_methods:
test_impl = _make_func_use_method_arg1(method)
hpat_func = hpat.jit(test_impl)
n = 11
if platform.system() == 'Windows' and not IS_32BITS:
operand_series = pd.Series(np.arange(1, n, dtype=np.int64))
else:
operand_series = pd.Series(np.arange(1, n))
operand_scalar = 10
pd.testing.assert_series_equal(
hpat_func(operand_series, operand_scalar),
test_impl(operand_series, operand_scalar),
check_names=False)
def test_series_op5_float_scalar(self):
arithmetic_methods = ('add', 'sub', 'mul', 'div', 'truediv', 'floordiv', 'mod', 'pow')
for method in arithmetic_methods:
test_impl = _make_func_use_method_arg1(method)
hpat_func = hpat.jit(test_impl)
n = 11
operand_series = pd.Series(np.arange(1, n))
operand_scalar = .5
pd.testing.assert_series_equal(
hpat_func(operand_series, operand_scalar),
test_impl(operand_series, operand_scalar),
check_names=False)
def test_series_op6(self):
def test_impl(A):
return -A
hpat_func = hpat.jit(test_impl)
n = 11
A = pd.Series(np.arange(n))
pd.testing.assert_series_equal(hpat_func(A), test_impl(A))
def test_series_op7(self):
comparison_binops = ('<', '>', '<=', '>=', '!=', '==')
for operator in comparison_binops:
test_impl = _make_func_use_binop1(operator)
hpat_func = hpat.jit(test_impl)
n = 11
A = pd.Series(np.arange(n))
B = pd.Series(np.arange(n)**2)
pd.testing.assert_series_equal(hpat_func(A, B), test_impl(A, B), check_names=False)
def test_series_op8(self):
comparison_methods = ('lt', 'gt', 'le', 'ge', 'ne', 'eq')
for method in comparison_methods:
test_impl = _make_func_use_method_arg1(method)
hpat_func = hpat.jit(test_impl)
n = 11
A = pd.Series(np.arange(n))
B = pd.Series(np.arange(n)**2)
pd.testing.assert_series_equal(hpat_func(A, B), test_impl(A, B), check_names=False)
@unittest.skipIf(platform.system() == 'Windows', "Attribute dtype are different: int64, int32")
def test_series_op8_integer_scalar(self):
comparison_methods = ('lt', 'gt', 'le', 'ge', 'eq', 'ne')
for method in comparison_methods:
test_impl = _make_func_use_method_arg1(method)
hpat_func = hpat.jit(test_impl)
n = 11
operand_series = pd.Series(np.arange(1, n))
operand_scalar = 10
pd.testing.assert_series_equal(
hpat_func(operand_series, operand_scalar),
test_impl(operand_series, operand_scalar),
check_names=False)
def test_series_op8_float_scalar(self):
comparison_methods = ('lt', 'gt', 'le', 'ge', 'eq', 'ne')
for method in comparison_methods:
test_impl = _make_func_use_method_arg1(method)
hpat_func = hpat.jit(test_impl)
n = 11
operand_series = pd.Series(np.arange(1, n))
operand_scalar = .5
pd.testing.assert_series_equal(
hpat_func(operand_series, operand_scalar),
test_impl(operand_series, operand_scalar),
check_names=False)
def test_series_inplace_binop_array(self):
def test_impl(A, B):
A += B
return A
hpat_func = hpat.jit(test_impl)
n = 11
A = np.arange(n)**2.0 # TODO: use 2 for test int casting
B = pd.Series(np.ones(n))
np.testing.assert_array_equal(hpat_func(A.copy(), B), test_impl(A, B))
def test_series_fusion1(self):
def test_impl(A, B):
return A + B + 1
hpat_func = hpat.jit(test_impl)
n = 11
if platform.system() == 'Windows' and not IS_32BITS:
A = pd.Series(np.arange(n), dtype=np.int64)
B = pd.Series(np.arange(n)**2, dtype=np.int64)
else:
A = pd.Series(np.arange(n))
B = pd.Series(np.arange(n)**2)
pd.testing.assert_series_equal(hpat_func(A, B), test_impl(A, B))
self.assertEqual(count_parfor_REPs(), 1)
def test_series_fusion2(self):
# make sure getting data var avoids incorrect single def assumption
def test_impl(A, B):
S = B + 2
if A[0] == 0:
S = A + 1
return S + B
hpat_func = hpat.jit(test_impl)
n = 11
if platform.system() == 'Windows' and not IS_32BITS:
A = pd.Series(np.arange(n), dtype=np.int64)
B = pd.Series(np.arange(n)**2, dtype=np.int64)
else:
A = pd.Series(np.arange(n))
B = pd.Series(np.arange(n)**2)
pd.testing.assert_series_equal(hpat_func(A, B), test_impl(A, B))
self.assertEqual(count_parfor_REPs(), 3)
def test_series_len(self):
def test_impl(A, i):
return len(A)
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
self.assertEqual(hpat_func(df.A, 0), test_impl(df.A, 0))
def test_series_box(self):
def test_impl():
A = pd.Series([1, 2, 3])
return A
hpat_func = hpat.jit(test_impl)
pd.testing.assert_series_equal(hpat_func(), test_impl())
def test_series_box2(self):
def test_impl():
A = pd.Series(['1', '2', '3'])
return A
hpat_func = hpat.jit(test_impl)
pd.testing.assert_series_equal(hpat_func(), test_impl())
def test_series_list_str_unbox1(self):
def test_impl(A):
return A.iloc[0]
hpat_func = hpat.jit(test_impl)
S = pd.Series([['aa', 'b'], ['ccc'], []])
np.testing.assert_array_equal(hpat_func(S), test_impl(S))
# call twice to test potential refcount errors
np.testing.assert_array_equal(hpat_func(S), test_impl(S))
def test_np_typ_call_replace(self):
# calltype replacement is tricky for np.typ() calls since variable
# type can't provide calltype
def test_impl(i):
return np.int32(i)
hpat_func = hpat.jit(test_impl)
self.assertEqual(hpat_func(1), test_impl(1))
def test_series_ufunc1(self):
def test_impl(A, i):
return np.isinf(A).values
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
np.testing.assert_array_equal(hpat_func(df.A, 1), test_impl(df.A, 1))
def test_list_convert(self):
def test_impl():
df = pd.DataFrame({'one': np.array([-1, np.nan, 2.5]),
'two': ['foo', 'bar', 'baz'],
'three': [True, False, True]})
return df.one.values, df.two.values, df.three.values
hpat_func = hpat.jit(test_impl)
one, two, three = hpat_func()
self.assertTrue(isinstance(one, np.ndarray))
self.assertTrue(isinstance(two, np.ndarray))
self.assertTrue(isinstance(three, np.ndarray))
@unittest.skip("needs empty_like typing fix in npydecl.py")
def test_series_empty_like(self):
def test_impl(A):
return np.empty_like(A)
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
self.assertTrue(isinstance(hpat_func(df.A), np.ndarray))
def test_series_fillna1(self):
def test_impl(A):
return A.fillna(5.0)
hpat_func = hpat.jit(test_impl)
df = pd.DataFrame({'A': [1.0, 2.0, np.nan, 1.0]})
pd.testing.assert_series_equal(hpat_func(df.A),
test_impl(df.A), check_names=False)
# test inplace fillna for named numeric series (obtained from DataFrame)
def test_series_fillna_inplace1(self):
def test_impl(A):
A.fillna(5.0, inplace=True)
return A
hpat_func = hpat.jit(test_impl)
df = pd.DataFrame({'A': [1.0, 2.0, np.nan, 1.0]})
pd.testing.assert_series_equal(hpat_func(df.A),
test_impl(df.A), check_names=False)
def test_series_fillna_str1(self):
def test_impl(A):
return A.fillna("dd")
hpat_func = hpat.jit(test_impl)
df = pd.DataFrame({'A': ['aa', 'b', None, 'ccc']})
pd.testing.assert_series_equal(hpat_func(df.A),
test_impl(df.A), check_names=False)
def test_series_fillna_str_inplace1(self):
def test_impl(A):
A.fillna("dd", inplace=True)
return A
hpat_func = hpat.jit(test_impl)
S1 = pd.Series(['aa', 'b', None, 'ccc'])
S2 = S1.copy()
pd.testing.assert_series_equal(hpat_func(S1), test_impl(S2))
# TODO: handle string array reflection
# hpat_func(S1)
# test_impl(S2)
# np.testing.assert_array_equal(S1, S2)
def test_series_fillna_str_inplace_empty1(self):
def test_impl(A):
A.fillna("", inplace=True)
return A
hpat_func = hpat.jit(test_impl)
S1 = pd.Series(['aa', 'b', None, 'ccc'])
S2 = S1.copy()
pd.testing.assert_series_equal(hpat_func(S1), test_impl(S2))
@unittest.skip('Unsupported functionality: failed to handle index')
def test_series_fillna_index_str(self):
def test_impl(S):
return S.fillna(5.0)
hpat_func = hpat.jit(test_impl)
S = pd.Series([1.0, 2.0, np.nan, 1.0], index=['a', 'b', 'c', 'd'])
pd.testing.assert_series_equal(hpat_func(S),
test_impl(S), check_names=False)
@unittest.skip('Unsupported functionality: failed to handle index')
def test_series_fillna_index_int(self):
def test_impl(S):
return S.fillna(5.0)
hpat_func = hpat.jit(test_impl)
S = pd.Series([1.0, 2.0, np.nan, 1.0], index=[2, 3, 4, 5])
pd.testing.assert_series_equal(hpat_func(S),
test_impl(S), check_names=False)
@unittest.skipIf(hpat.config.config_pipeline_hpat_default,
'No support of axis argument in old-style Series.dropna() impl')
def test_series_dropna_axis1(self):
'''Verifies Series.dropna() implementation handles 'index' as axis argument'''
def test_impl(S):
return S.dropna(axis='index')
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([1.0, 2.0, np.nan, 1.0, np.inf])
S2 = S1.copy()
pd.testing.assert_series_equal(hpat_func(S1), test_impl(S2))
@unittest.skipIf(hpat.config.config_pipeline_hpat_default,
'No support of axis argument in old-style Series.dropna() impl')
def test_series_dropna_axis2(self):
'''Verifies Series.dropna() implementation handles 0 as axis argument'''
def test_impl(S):
return S.dropna(axis=0)
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([1.0, 2.0, np.nan, 1.0, np.inf])
S2 = S1.copy()
pd.testing.assert_series_equal(hpat_func(S1), test_impl(S2))
@unittest.skipIf(hpat.config.config_pipeline_hpat_default,
'No support of axis argument in old-style Series.dropna() impl')
def test_series_dropna_axis3(self):
'''Verifies Series.dropna() implementation handles correct non-literal axis argument'''
def test_impl(S, axis):
return S.dropna(axis=axis)
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([1.0, 2.0, np.nan, 1.0, np.inf])
S2 = S1.copy()
axis_values = [0, 'index']
for value in axis_values:
pd.testing.assert_series_equal(hpat_func(S1, value), test_impl(S2, value))
@unittest.skipIf(hpat.config.config_pipeline_hpat_default,
'BUG: old-style dropna impl returns series without index')
def test_series_dropna_float_index1(self):
'''Verifies Series.dropna() implementation for float series with default index'''
def test_impl(S):
return S.dropna()
hpat_func = hpat.jit(test_impl)
for data in test_global_input_data_float64:
S1 = pd.Series(data)
S2 = S1.copy()
pd.testing.assert_series_equal(hpat_func(S1), test_impl(S2))
@unittest.skipIf(hpat.config.config_pipeline_hpat_default,
'BUG: old-style dropna impl returns series without index')
def test_series_dropna_float_index2(self):
'''Verifies Series.dropna() implementation for float series with string index'''
def test_impl(S):
return S.dropna()
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([1.0, 2.0, np.nan, 1.0, np.inf], ['a', 'b', 'c', 'd', 'e'])
S2 = S1.copy()
pd.testing.assert_series_equal(hpat_func(S1), test_impl(S2))
@unittest.skipIf(hpat.config.config_pipeline_hpat_default,
'BUG: old-style dropna impl returns series without index')
def test_series_dropna_str_index1(self):
'''Verifies Series.dropna() implementation for series of strings with default index'''
def test_impl(S):
return S.dropna()
hpat_func = hpat.jit(test_impl)
S1 = pd.Series(['aa', 'b', None, 'cccd', ''])
S2 = S1.copy()
pd.testing.assert_series_equal(hpat_func(S1), test_impl(S2))
@unittest.skipIf(hpat.config.config_pipeline_hpat_default,
'BUG: old-style dropna impl returns series without index')
def test_series_dropna_str_index2(self):
'''Verifies Series.dropna() implementation for series of strings with string index'''
def test_impl(S):
return S.dropna()
hpat_func = hpat.jit(test_impl)
S1 = pd.Series(['aa', 'b', None, 'cccd', ''], ['a', 'b', 'c', 'd', 'e'])
S2 = S1.copy()
pd.testing.assert_series_equal(hpat_func(S1), test_impl(S2))
@unittest.skipIf(hpat.config.config_pipeline_hpat_default,
'BUG: old-style dropna impl returns series without index')
def test_series_dropna_str_index3(self):
def test_impl(S):
return S.dropna()
hpat_func = hpat.jit(test_impl)
S1 = pd.Series(['aa', 'b', None, 'cccd', ''], index=[1, 2, 5, 7, 10])
S2 = S1.copy()
pd.testing.assert_series_equal(hpat_func(S1), test_impl(S2))
@unittest.skip('BUG: old-style dropna impl returns series without index, in new-style inplace is unsupported')
def test_series_dropna_float_inplace_no_index1(self):
'''Verifies Series.dropna() implementation for float series with default index and inplace argument True'''
def test_impl(S):
S.dropna(inplace=True)
return S
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([1.0, 2.0, np.nan, 1.0, np.inf])
S2 = S1.copy()
pd.testing.assert_series_equal(hpat_func(S1), test_impl(S2))
@unittest.skip('TODO: add reflection support and check method return value')
def test_series_dropna_float_inplace_no_index2(self):
'''Verifies Series.dropna(inplace=True) results are reflected back in the original float series'''
def test_impl(S):
return S.dropna(inplace=True)
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([1.0, 2.0, np.nan, 1.0, np.inf])
S2 = S1.copy()
self.assertIsNone(hpat_func(S1))
self.assertIsNone(test_impl(S2))
pd.testing.assert_series_equal(S1, S2)
@unittest.skip('BUG: old-style dropna impl returns series without index, in new-style inplace is unsupported')
def test_series_dropna_str_inplace_no_index1(self):
'''Verifies Series.dropna() implementation for series of strings
with default index and inplace argument True
'''
def test_impl(S):
S.dropna(inplace=True)
return S
hpat_func = hpat.jit(test_impl)
S1 = pd.Series(['aa', 'b', None, 'cccd', ''])
S2 = S1.copy()
pd.testing.assert_series_equal(hpat_func(S1), test_impl(S2))
@unittest.skip('TODO: add reflection support and check method return value')
def test_series_dropna_str_inplace_no_index2(self):
'''Verifies Series.dropna(inplace=True) results are reflected back in the original string series'''
def test_impl(S):
return S.dropna(inplace=True)
hpat_func = hpat.jit(test_impl)
S1 = pd.Series(['aa', 'b', None, 'cccd', ''])
S2 = S1.copy()
self.assertIsNone(hpat_func(S1))
self.assertIsNone(test_impl(S2))
pd.testing.assert_series_equal(S1, S2)
def test_series_dropna_str_parallel1(self):
'''Verifies Series.dropna() distributed work for series of strings with default index'''
def test_impl(A):
B = A.dropna()
return (B == 'gg').sum()
hpat_func = hpat.jit(distributed=['A'])(test_impl)
S1 = pd.Series(['aa', 'b', None, 'ccc', 'dd', 'gg'])
start, end = get_start_end(len(S1))
# TODO: gatherv
self.assertEqual(hpat_func(S1[start:end]), test_impl(S1))
self.assertEqual(count_array_REPs(), 0)
self.assertEqual(count_parfor_REPs(), 0)
self.assertTrue(count_array_OneDs() > 0)
@unittest.skip('AssertionError: Series are different\n'
'Series length are different\n'
'[left]: 3, Int64Index([0, 1, 2], dtype=\'int64\')\n'
'[right]: 2, Int64Index([1, 2], dtype=\'int64\')')
def test_series_dropna_dt_no_index1(self):
'''Verifies Series.dropna() implementation for datetime series with default index'''
def test_impl(S):
return S.dropna()
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([pd.NaT, pd.Timestamp('1970-12-01'), pd.Timestamp('2012-07-25')])
S2 = S1.copy()
pd.testing.assert_series_equal(hpat_func(S1), test_impl(S2))
def test_series_dropna_bool_no_index1(self):
'''Verifies Series.dropna() implementation for bool series with default index'''
def test_impl(S):
return S.dropna()
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([True, False, False, True])
S2 = S1.copy()
pd.testing.assert_series_equal(hpat_func(S1), test_impl(S2))
@unittest.skipIf(hpat.config.config_pipeline_hpat_default,
'BUG: old-style dropna impl returns series without index')
def test_series_dropna_int_no_index1(self):
'''Verifies Series.dropna() implementation for integer series with default index'''
def test_impl(S):
return S.dropna()
hpat_func = hpat.jit(test_impl)
n = 11
S1 = pd.Series(np.arange(n, dtype=np.int64))
S2 = S1.copy()
pd.testing.assert_series_equal(hpat_func(S1), test_impl(S2))
@unittest.skip('numba.errors.TypingError - fix needed\n'
'Failed in hpat mode pipeline'
'(step: convert to distributed)\n'
'Invalid use of Function(<built-in function len>)'
'with argument(s) of type(s): (none)\n')
def test_series_rename1(self):
def test_impl(A):
return A.rename('B')
hpat_func = hpat.jit(test_impl)
df = pd.DataFrame({'A': [1.0, 2.0, np.nan, 1.0]})
pd.testing.assert_series_equal(hpat_func(df.A), test_impl(df.A))
def test_series_sum_default(self):
def test_impl(S):
return S.sum()
hpat_func = hpat.jit(test_impl)
S = pd.Series([1., 2., 3.])
self.assertEqual(hpat_func(S), test_impl(S))
def test_series_sum_nan(self):
def test_impl(S):
return S.sum()
hpat_func = hpat.jit(test_impl)
# column with NA
S = pd.Series([np.nan, 2., 3.])
self.assertEqual(hpat_func(S), test_impl(S))
# all NA case should produce 0
S = | pd.Series([np.nan, np.nan]) | pandas.Series |
import dask.dataframe as dd
import numpy as np
import pandas as pd
from pandas.api.types import is_categorical_dtype
DEFAULT_WINDOW = 7
DEFAULT_TAKE_LOGS = True
DEFAULT_CENTER = False
DEFAULT_MIN_PERIODS = 1
def calculate_weekly_incidences_from_results(
results,
outcome,
groupby=None,
):
"""Create the weekly incidences from a list of simulation runs.
Args:
results (list): list of dask DataFrames with the time series data from sid
simulations.
Returns:
weekly_incidences (pandas.DataFrame): every column is the
weekly incidence over time for one simulation run.
The index are the dates of the simulation period if groupby is None, else
the index is a MultiIndex with date and the groups.
"""
weekly_incidences = []
for res in results:
daily_smoothed = smoothed_outcome_per_hundred_thousand_sim(
df=res,
outcome=outcome,
take_logs=False,
window=7,
center=False,
groupby=groupby,
)
weekly_smoothed = daily_smoothed * 7
if groupby is None:
full_index = pd.date_range(
weekly_smoothed.index.min(), weekly_smoothed.index.max()
)
else:
groups = weekly_smoothed.index.get_level_values(groupby).unique()
dates = weekly_smoothed.index.get_level_values("date").unique()
full_index = pd.MultiIndex.from_product(iterables=[dates, groups])
expanded = weekly_smoothed.reindex(full_index).fillna(0)
weekly_incidences.append(expanded)
df = | pd.concat(weekly_incidences, axis=1) | pandas.concat |
# coding=utf-8
# pylint: disable-msg=E1101,W0612
from datetime import datetime, timedelta
import sys
import numpy as np
import pandas.compat as compat
from pandas.compat import lrange, range, u
import pandas as pd
from pandas import (
Categorical, DataFrame, Index, Series, date_range, option_context,
period_range, timedelta_range)
from pandas.core.base import StringMixin
from pandas.core.index import MultiIndex
import pandas.util.testing as tm
from .common import TestData
class TestSeriesRepr(TestData):
def test_multilevel_name_print(self):
index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'], ['one', 'two',
'three']],
codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['first', 'second'])
s = Series(lrange(0, len(index)), index=index, name='sth')
expected = ["first second", "foo one 0",
" two 1", " three 2",
"bar one 3", " two 4",
"baz two 5", " three 6",
"qux one 7", " two 8",
" three 9", "Name: sth, dtype: int64"]
expected = "\n".join(expected)
assert repr(s) == expected
def test_name_printing(self):
# Test small Series.
s = Series([0, 1, 2])
s.name = "test"
assert "Name: test" in repr(s)
s.name = None
assert "Name:" not in repr(s)
# Test big Series (diff code path).
s = Series(lrange(0, 1000))
s.name = "test"
assert "Name: test" in repr(s)
s.name = None
assert "Name:" not in repr(s)
s = Series(index=date_range('20010101', '20020101'), name='test')
assert "Name: test" in repr(s)
def test_repr(self):
str(self.ts)
str(self.series)
str(self.series.astype(int))
str(self.objSeries)
str(Series(tm.randn(1000), index=np.arange(1000)))
str(Series(tm.randn(1000), index=np.arange(1000, 0, step=-1)))
# empty
str(self.empty)
# with NaNs
self.series[5:7] = np.NaN
str(self.series)
# with Nones
ots = self.ts.astype('O')
ots[::2] = None
repr(ots)
# various names
for name in ['', 1, 1.2, 'foo', u('\u03B1\u03B2\u03B3'),
'loooooooooooooooooooooooooooooooooooooooooooooooooooong',
('foo', 'bar', 'baz'), (1, 2), ('foo', 1, 2.3),
(u('\u03B1'), u('\u03B2'), u('\u03B3')),
(u('\u03B1'), 'bar')]:
self.series.name = name
repr(self.series)
biggie = Series(tm.randn(1000), index=np.arange(1000),
name=('foo', 'bar', 'baz'))
repr(biggie)
# 0 as name
ser = Series(np.random.randn(100), name=0)
rep_str = repr(ser)
assert "Name: 0" in rep_str
# tidy repr
ser = Series(np.random.randn(1001), name=0)
rep_str = repr(ser)
assert "Name: 0" in rep_str
ser = Series(["a\n\r\tb"], name="a\n\r\td", index=["a\n\r\tf"])
assert "\t" not in repr(ser)
assert "\r" not in repr(ser)
assert "a\n" not in repr(ser)
# with empty series (#4651)
s = Series([], dtype=np.int64, name='foo')
assert repr(s) == 'Series([], Name: foo, dtype: int64)'
s = Series([], dtype=np.int64, name=None)
assert repr(s) == 'Series([], dtype: int64)'
def test_tidy_repr(self):
a = Series([u("\u05d0")] * 1000)
a.name = 'title1'
repr(a) # should not raise exception
@tm.capture_stderr
def test_repr_bool_fails(self):
s = Series([DataFrame(np.random.randn(2, 2)) for i in range(5)])
# It works (with no Cython exception barf)!
repr(s)
output = sys.stderr.getvalue()
assert output == ''
def test_repr_name_iterable_indexable(self):
s = Series([1, 2, 3], name=np.int64(3))
# it works!
repr(s)
s.name = (u("\u05d0"), ) * 2
repr(s)
def test_repr_should_return_str(self):
# https://docs.python.org/3/reference/datamodel.html#object.__repr__
# ...The return value must be a string object.
# (str on py2.x, str (unicode) on py3)
data = [8, 5, 3, 5]
index1 = [u("\u03c3"), u("\u03c4"), u("\u03c5"), u("\u03c6")]
df = Series(data, index=index1)
assert type(df.__repr__() == str) # both py2 / 3
def test_repr_max_rows(self):
# GH 6863
with pd.option_context('max_rows', None):
str(Series(range(1001))) # should not raise exception
def test_unicode_string_with_unicode(self):
df = Series([u("\u05d0")], name=u("\u05d1"))
if compat.PY3:
str(df)
else:
compat.text_type(df)
def test_bytestring_with_unicode(self):
df = Series([u("\u05d0")], name=u("\u05d1"))
if compat.PY3:
bytes(df)
else:
str(df)
def test_timeseries_repr_object_dtype(self):
index = Index([datetime(2000, 1, 1) + timedelta(i)
for i in range(1000)], dtype=object)
ts = Series(np.random.randn(len(index)), index)
repr(ts)
ts = tm.makeTimeSeries(1000)
assert repr(ts).splitlines()[-1].startswith('Freq:')
ts2 = ts.iloc[np.random.randint(0, len(ts) - 1, 400)]
repr(ts2).splitlines()[-1]
def test_latex_repr(self):
result = r"""\begin{tabular}{ll}
\toprule
{} & 0 \\
\midrule
0 & $\alpha$ \\
1 & b \\
2 & c \\
\bottomrule
\end{tabular}
"""
with option_context('display.latex.escape', False,
'display.latex.repr', True):
s = Series([r'$\alpha$', 'b', 'c'])
assert result == s._repr_latex_()
assert s._repr_latex_() is None
class TestCategoricalRepr(object):
def test_categorical_repr_unicode(self):
# GH#21002 if len(index) > 60, sys.getdefaultencoding()=='ascii',
# and we are working in PY2, then rendering a Categorical could raise
# UnicodeDecodeError by trying to decode when it shouldn't
class County(StringMixin):
name = u'<NAME>'
state = u'PR'
def __unicode__(self):
return self.name + u', ' + self.state
cat = pd.Categorical([County() for n in range(61)])
idx = | pd.Index(cat) | pandas.Index |
import numpy as np
import pandas as pd
import pytest
from pathlib import Path
from compare_df import foos # noqa
# from compare_df.__main__ import main # noqa TODO, main test fails
@pytest.mark.parametrize(
"frame_1, frame_2, expected",
[
( | pd.DataFrame() | pandas.DataFrame |
import sys
import numpy as np
import pandas as pd
from helicalc.coil import CoilIntegrator
from helicalc.geometry import read_solenoid_geom_combined
from tqdm import tqdm
# output info
output_dir = '/home/ckampa/data/pickles/helicalc/testing/'
save_name = 'Loop_Current_Test_Stream2.pkl'
x0 = 1.06055
lx2 = 0.005
Nx = 21
z0 = 0
lz2 = 0.002
Nz = 9
y0 = 0
xs = np.linspace(x0-lx2, x0+lx2, Nx)
zs = np.linspace(z0-lz2, z0+lz2, Nz)
X, Z = np.meshgrid(xs, zs)
X = X.flatten()
Z = Z.flatten()
df = | pd.DataFrame({'X':X, 'Z':Z}) | pandas.DataFrame |
import numpy as np
import pandas as pd
import pytest
from sklearn.impute import SimpleImputer
from sklearn.preprocessing import OneHotEncoder, StandardScaler
from sklearn.feature_selection import (
f_regression,
SelectKBest,
SelectFromModel,
)
from sklearn.linear_model import Lasso
from sklearn.datasets import load_boston
from feature_engine.wrappers import SklearnTransformerWrapper
def test_sklearn_imputer_numeric_with_constant(df_na):
variables_to_impute = ["Age", "Marks"]
na_variables_left_after_imputation = [
col
for col in df_na.loc[:, df_na.isna().any()].columns
if col not in variables_to_impute
]
transformer = SklearnTransformerWrapper(
transformer=SimpleImputer(fill_value=-999, strategy="constant"),
variables=variables_to_impute,
)
# transformed dataframe
ref = df_na.copy()
ref[variables_to_impute] = ref[variables_to_impute].fillna(-999)
dataframe_na_transformed = transformer.fit_transform(df_na)
# init params
assert isinstance(transformer.transformer, SimpleImputer)
assert transformer.variables == variables_to_impute
# fit params
assert transformer.input_shape_ == (8, 6)
# transformed output
assert all(
dataframe_na_transformed[na_variables_left_after_imputation].isna().sum() != 0
)
assert all(dataframe_na_transformed[variables_to_impute].isna().sum() == 0)
pd.testing.assert_frame_equal(ref, dataframe_na_transformed)
def test_sklearn_imputer_object_with_constant(df_na):
variables_to_impute = ["Name", "City"]
na_variables_left_after_imputation = [
col
for col in df_na.loc[:, df_na.isna().any()].columns
if col not in variables_to_impute
]
transformer = SklearnTransformerWrapper(
transformer=SimpleImputer(fill_value="missing", strategy="constant"),
variables=variables_to_impute,
)
# transformed dataframe
ref = df_na.copy()
ref[variables_to_impute] = ref[variables_to_impute].fillna("missing")
dataframe_na_transformed = transformer.fit_transform(df_na)
# init params
assert isinstance(transformer.transformer, SimpleImputer)
assert transformer.variables == variables_to_impute
# fit params
assert transformer.input_shape_ == (8, 6)
# transformed output
assert all(
dataframe_na_transformed[na_variables_left_after_imputation].isna().sum() != 0
)
assert all(dataframe_na_transformed[variables_to_impute].isna().sum() == 0)
pd.testing.assert_frame_equal(ref, dataframe_na_transformed)
def test_sklearn_imputer_allfeatures_with_constant(df_na):
transformer = SklearnTransformerWrapper(
transformer=SimpleImputer(fill_value="missing", strategy="constant")
)
# transformed dataframe
ref = df_na.copy()
ref = ref.fillna("missing")
dataframe_na_transformed = transformer.fit_transform(df_na)
# init params
assert isinstance(transformer.transformer, SimpleImputer)
# fit params
assert transformer.input_shape_ == (8, 6)
# transformed output
assert all(dataframe_na_transformed.isna().sum() == 0)
pd.testing.assert_frame_equal(ref, dataframe_na_transformed)
def test_sklearn_standardscaler_numeric(df_vartypes):
variables_to_scale = ["Age", "Marks"]
transformer = SklearnTransformerWrapper(
transformer=StandardScaler(), variables=variables_to_scale
)
ref = df_vartypes.copy()
ref[variables_to_scale] = (
ref[variables_to_scale] - ref[variables_to_scale].mean()
) / ref[variables_to_scale].std(ddof=0)
transformed_df = transformer.fit_transform(df_vartypes)
# init params
assert isinstance(transformer.transformer, StandardScaler)
assert transformer.variables == variables_to_scale
# fit params
assert transformer.input_shape_ == (4, 5)
assert (transformer.transformer.mean_.round(6) == np.array([19.5, 0.75])).all()
assert all(transformer.transformer.scale_.round(6) == [1.118034, 0.111803])
pd.testing.assert_frame_equal(ref, transformed_df)
def test_sklearn_standardscaler_object(df_vartypes):
variables_to_scale = ["Name"]
transformer = SklearnTransformerWrapper(
transformer=StandardScaler(), variables=variables_to_scale
)
with pytest.raises(TypeError):
transformer.fit_transform(df_vartypes)
# init params
assert isinstance(transformer.transformer, StandardScaler)
assert transformer.variables == variables_to_scale
def test_sklearn_standardscaler_allfeatures(df_vartypes):
transformer = SklearnTransformerWrapper(transformer=StandardScaler())
ref = df_vartypes.copy()
variables_to_scale = list(ref.select_dtypes(include="number").columns)
ref[variables_to_scale] = (
ref[variables_to_scale] - ref[variables_to_scale].mean()
) / ref[variables_to_scale].std(ddof=0)
transformed_df = transformer.fit_transform(df_vartypes)
# init params
assert isinstance(transformer.transformer, StandardScaler)
assert transformer.variables == variables_to_scale
# fit params
assert transformer.input_shape_ == (4, 5)
assert (transformer.transformer.mean_.round(6) == np.array([19.5, 0.75])).all()
assert all(transformer.transformer.scale_.round(6) == [1.118034, 0.111803])
| pd.testing.assert_frame_equal(ref, transformed_df) | pandas.testing.assert_frame_equal |
import os
import time
import numpy as np
import pandas
import pandas as pd
from scripts.MADDPG.maddpg import MADDPG
from scripts.MADDPG.buffer import MultiAgentReplayBuffer
# from scripts.MADDPG_original.maddpg import MADDPG
# from scripts.MADDPG_original.buffer import MultiAgentReplayBuffer
from make_env import make_env
from scripts.MADDPG.edge_env import EdgeEnv
| pandas.set_option('display.max_columns', None) | pandas.set_option |
import pandas as pd
import numpy as np
class DataParser:
@staticmethod
def _parse_companies(cmp_list):
"""
Создает DataFrame компаний по списку словарей из запроса
:param cmp_list: list of dicts
:return: pandas.DataFrame
"""
ret_df = pd.DataFrame(columns=['ID', 'TITLE', 'CMP_TYPE_CUSTOMER', 'CMP_TYPE_PARTNER'])
if cmp_list:
cmp_df = pd.DataFrame(cmp_list)
cmp_df['CMP_TYPE_CUSTOMER'] = cmp_df['COMPANY_TYPE'].apply(lambda x: 1 if (x == 'CUSTOMER') else 0)
cmp_df['CMP_TYPE_PARTNER'] = cmp_df['COMPANY_TYPE'].apply(lambda x: 1 if (x == 'PARTNER') else 0)
cmp_df = cmp_df.drop(columns=['COMPANY_TYPE'], axis=1)
ret_df = pd.concat([ret_df, cmp_df])
return ret_df
@staticmethod
def _parse_deals(deal_list):
"""
Создает DataFrame сделок по списку словарей из запроса
:param deal_list: list of dicts
:return: pandas.DataFrame
"""
ret_df = pd.DataFrame(columns=[
'OPPORTUNITY_DEAL_Q01', 'PROBABILITY_DEAL_Q01', 'TIME_DIFF_BEGIN_CLOSE_DEAL_Q01',
'OPPORTUNITY_DEAL_Q09', 'PROBABILITY_DEAL_Q09', 'TIME_DIFF_BEGIN_CLOSE_DEAL_Q09',
'OPPORTUNITY_DEAL_MEAN', 'PROBABILITY', 'TIME_DIFF_BEGIN_CLOSE_DEAL_MEAN', 'CLOSED',
'OPPORTUNITY_DEAL_MEDIAN', 'TIME_DIFF_BEGIN_CLOSE_DEAL_MEDIAN', 'DEAL_BY_YEAR'])
ret_df.index.name = 'COMPANY_ID'
if deal_list:
deal_df = pd.DataFrame(deal_list)
deal_df['CLOSED'] = deal_df['CLOSED'].apply(lambda x: 1 if (x == 'Y') else 0)
deal_df['OPPORTUNITY'] = pd.to_numeric(deal_df['OPPORTUNITY'])
deal_df['PROBABILITY'] = pd.to_numeric(deal_df['PROBABILITY'])
deal_df['BEGINDATE'] = pd.to_datetime(deal_df['BEGINDATE'])
deal_df['CLOSEDATE'] = pd.to_datetime(deal_df['CLOSEDATE'])
deal_df['TIME_DIFF_BEGIN_CLOSE'] = (deal_df['CLOSEDATE'] - deal_df['BEGINDATE']).astype(
'timedelta64[h]') / 24
deal_group = deal_df.groupby(by='COMPANY_ID')
deal_count = pd.DataFrame(deal_group['CLOSED'].count())
deal_date_max = deal_group['CLOSEDATE'].max()
deal_date_min = deal_group['BEGINDATE'].min()
d = {'YEAR': (deal_date_max - deal_date_min).astype('timedelta64[h]') / (24 * 365)}
deal_date_max_min_diff = pd.DataFrame(data=d)
deal_by_year = pd.DataFrame()
deal_by_year['DEAL_BY_YEAR'] = (deal_count['CLOSED'] / deal_date_max_min_diff['YEAR']).astype(np.float32)
deal_quantile01 = deal_group['OPPORTUNITY', 'PROBABILITY', 'TIME_DIFF_BEGIN_CLOSE'].quantile(0.1)
deal_quantile09 = deal_group['OPPORTUNITY', 'PROBABILITY', 'TIME_DIFF_BEGIN_CLOSE'].quantile(0.9)
deal_mean = deal_group['OPPORTUNITY', 'PROBABILITY', 'TIME_DIFF_BEGIN_CLOSE', 'CLOSED'].mean()
deal_median = deal_group['OPPORTUNITY', 'TIME_DIFF_BEGIN_CLOSE'].median()
deal_result = pd.merge(deal_quantile01, deal_quantile09, on='COMPANY_ID',
suffixes=['_DEAL_Q01', '_DEAL_Q09'])
deal_result1 = pd.merge(deal_mean, deal_median, on='COMPANY_ID', suffixes=['_DEAL_MEAN', '_DEAL_MEDIAN'])
deal_result = pd.merge(deal_result, deal_result1, on='COMPANY_ID')
deal_result = pd.merge(deal_result, deal_by_year, on='COMPANY_ID')
deal_result = deal_result.mask(np.isinf(deal_result))
ret_df = pd.concat([ret_df, deal_result])
return ret_df
@staticmethod
def _parse_invoices(inv_list):
"""
Создает DataFrame счетов по списку словарей из запроса
:param inv_list: list of dicts
:return: pandas.DataFrame
"""
ret_df = pd.DataFrame(columns=[
'PRICE_INV_Q01', 'TIME_DIFF_PAYED_BILL_INV_Q01', 'TIME_DIFF_PAYBEF_PAYED_INV_Q01',
'PRICE_INV_Q09', 'TIME_DIFF_PAYED_BILL_INV_Q09', 'TIME_DIFF_PAYBEF_PAYED_INV_Q09', 'PRICE_INV_MEAN',
'TIME_DIFF_PAYED_BILL_INV_MEAN', 'TIME_DIFF_PAYBEF_PAYED_INV_MEAN', 'PAYED', 'STATUS_ID_P',
'STATUS_ID_D', 'STATUS_ID_N', 'STATUS_ID_T', 'PRICE_INV_MEDIAN', 'TIME_DIFF_PAYED_BILL_INV_MEDIAN',
'TIME_DIFF_PAYBEF_PAYED_INV_MEDIAN', 'MONTH_TOGETHER_INV', 'DEAL_BY_YEAR'])
ret_df.index.name = 'UF_COMPANY_ID'
if inv_list:
inv_df = pd.DataFrame(inv_list)
inv_df['PRICE'] = pd.to_numeric(inv_df['PRICE'])
inv_df['DATE_BILL'] = pd.to_datetime(inv_df['DATE_BILL'])
inv_df['DATE_PAYED'] = pd.to_datetime(inv_df['DATE_PAYED'])
inv_df['DATE_PAY_BEFORE'] = pd.to_datetime(inv_df['DATE_PAY_BEFORE'])
inv_df['TIME_DIFF_PAYED_BILL'] = (inv_df['DATE_PAYED'] - inv_df['DATE_BILL']).astype('timedelta64[h]') / 24
inv_df['TIME_DIFF_PAYBEF_PAYED'] = (inv_df['DATE_PAY_BEFORE'] - inv_df['DATE_PAYED']).astype('timedelta64[h]') / 24
inv_df['PAYED'] = inv_df['PAYED'].apply(lambda x: 1 if (x == 'Y') else 0)
inv_df['STATUS_ID_P'] = inv_df['STATUS_ID'].apply(lambda x: 1 if (x == 'P') else 0)
inv_df['STATUS_ID_D'] = inv_df['STATUS_ID'].apply(lambda x: 1 if (x == 'D') else 0)
inv_df['STATUS_ID_N'] = inv_df['STATUS_ID'].apply(lambda x: 1 if (x == 'N') else 0)
inv_df['STATUS_ID_T'] = inv_df['STATUS_ID'].apply(lambda x: 1 if (x == 'T') else 0)
inv_group = inv_df.groupby(by='UF_COMPANY_ID')
inv_date_max = inv_group['DATE_PAYED'].max()
inv_date_min = inv_group['DATE_PAYED'].min()
inv_month_together = pd.DataFrame()
inv_month_together['MONTH_TOGETHER_INV'] = (inv_date_max - inv_date_min).astype('timedelta64[h]') / (
24 * 30)
inv_count = pd.DataFrame(inv_group['PAYED'].count())
inv_by_year = pd.DataFrame(
data={'DEAL_BY_YEAR': (inv_count['PAYED'] / inv_month_together['MONTH_TOGETHER_INV']) * 12})
inv_quantile01 = inv_group['PRICE', 'TIME_DIFF_PAYED_BILL', 'TIME_DIFF_PAYBEF_PAYED'].quantile(0.1)
inv_quantile09 = inv_group['PRICE', 'TIME_DIFF_PAYED_BILL', 'TIME_DIFF_PAYBEF_PAYED'].quantile(0.9)
inv_mean = inv_group['PRICE', 'TIME_DIFF_PAYED_BILL', 'TIME_DIFF_PAYBEF_PAYED', 'PAYED',
'STATUS_ID_P', 'STATUS_ID_D', 'STATUS_ID_N', 'STATUS_ID_T'].mean()
inv_median = inv_group['PRICE', 'TIME_DIFF_PAYED_BILL', 'TIME_DIFF_PAYBEF_PAYED'].median()
inv_result = pd.merge(inv_quantile01, inv_quantile09, on='UF_COMPANY_ID', suffixes=['_INV_Q01', '_INV_Q09'])
inv_result1 = pd.merge(inv_mean, inv_median, on='UF_COMPANY_ID', suffixes=['_INV_MEAN', '_INV_MEDIAN'])
inv_result = pd.merge(inv_result, inv_result1, on='UF_COMPANY_ID')
inv_result = pd.merge(inv_result, inv_month_together, on='UF_COMPANY_ID')
inv_result = | pd.merge(inv_result, inv_by_year, on='UF_COMPANY_ID') | pandas.merge |
import pandas as pd
import numpy as np
import yfinance as yf
from sklearn.linear_model import LinearRegression
import statsmodels
import statsmodels.api as sm
import statsmodels.tsa.stattools as ts
import datetime
import scipy.stats
import math
import openpyxl as pyxl
from scipy import signal
from scipy import stats as ss
import statistics
from finta import TA
from filterpy.kalman import KalmanFilter
from filterpy.common import Q_discrete_white_noise
import pandas_ta as ta
from pingouin import gzscore
from .Statsmodels_Regression_All_OneValueIndicators import *
from .Statsmodels_FittedValues import *
from .Statsmodels_LR_Residuals import *
def RegressionAnalysis(df, Independent, Explanatory, Indicators, prefix=None):
"""
This function performs regression models, comparaison between series
Arguments:
----------
- df: Pandas DataFrame
Contains the data to be analyzed
- Independent: str
The name of column in df for the Independent variable data
- Explanatory: str or list
The name of the column in df for the Explanatory variable data. In case of a multivariate analysis, needed to pass a list object of all column names.
- Indicators: list
The list of the indicators/models names to compute
Return:
----------
- df: Pandas DataFrame
- Contains the initial df and all series indicators are added like the Residuals or the Fitted Values
- OneValueIndicators: Pandas DataFrame
- Contains all the indicators calculated with only one value like the FTest or the TTest
"""
if Indicators == None:
Indicators = ["OLS", "GLSAR", "RecursiveLS", "Yule Walker Order 1", "Yule Walker Order 2",
"Yule Walker Order 3", "Burg Order 1", "Burg Order 2", "Burg Order 3",
"QuantReg", "GLM Binomial", "GLM Gamma", "GLM Gaussian", "GLM Inverse Gaussian",
"GLM Negative Binomial", "GLM Poisson", "GLM Tweedie"
"AR", "ARMA", "ARIMA", "Granger Causality",
"<NAME>", "Cointegration"]
# Pre-processing
Independent = df[Independent]
Independent = pd.DataFrame(Independent)
Explanatory = df[Explanatory]
Explanatory = | pd.DataFrame(Explanatory) | pandas.DataFrame |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
import os
import io
import re
import json
import glob
import tempfile
import pysam
import pandas as pd
class Cutadapt_log(object):
"""Wrapper cutadapt log file"""
def __init__(self, log):
self.log = log
# stat
if isinstance(log, Cutadapt_log):
self.stat = stat.stat
elif isinstance(log, dict):
self.stat = stat
elif isinstance(log, io.TextIOWrapper):
self.stat = self._log_parser()
elif os.path.isfile(log):
self.stat = self._log_parser()
else:
raise ValueError('not supported file')
def _log_parser(self):
"""Wrapper log file"""
dd = {}
with open(self.log, 'rt') as ff:
for line in ff:
if line.startswith('This is cutadapt'):
sep = line.strip().split(' ')
dd['version'] = sep[3]
dd['python'] = sep[6]
elif 'Command line parameters' in line:
dd['cmd'] = line.strip().split(':')[1]
elif 'Total reads processed' in line:
value = line.strip().split(':')[1]
value = re.sub(',', '', value.strip())
dd['total'] = int(value)
elif 'Reads written (passing filters)' in line:
value = line.strip().split(':')[1]
value = value.strip().split(' ')[0]
value = re.sub(',', '', value)
dd['clean'] = int(value)
else:
continue
pct = float(dd['clean']) / float(dd['total']) * 100
dd['pct'] = '%.1f%%' % pct
return dd
def _tmp(self):
"""Create a temp file"""
tmpfn = tempfile.NamedTemporaryFile(prefix='tmp',
suffix='.json',
delete=False)
return tmpfn.name
def saveas(self, _out=None):
"""Make a copy of statistics of mapping results"""
if _out is None:
_out = os.path.splitext(self.log)[0] + '.json'
# _out = self._tmp()
dd = self.stat
with open(_out, 'wt') as fo:
json.dump(dd, fo, indent=4)
return _out
class Json_file(object):
"""Parsing Json and dict file"""
def __init__(self, fn):
self.fn = fn
if isinstance(fn, Json_file):
self.stat = fn.stat
elif isinstance(fn, dict):
self.stat = fn
elif os.path.exists(fn):
self.stat = self.json_reader()
else:
raise ValueError('unknown file format:')
def _tmp(self):
"""Create a temp file"""
tmpfn = tempfile.NamedTemporaryFile(prefix='tmp',
suffix='.json',
delete=False)
return tmpfn.name
def json_reader(self):
"""Load json file as dict"""
fn = self.fn
if os.path.isfile(fn) and os.path.getsize(fn) > 0:
with open(fn, 'rt') as ff:
return json.load(ff)
def json_writer(self, to=None):
"""Write dict to file in json format"""
fn = self.fn
if to is None:
to = self._tmp()
if isinstance(fn, Json_file):
fn = fn.fn
elif isinstance(fn, dict):
fn = fn
with open(to, 'wt') as ff:
json.dump(fn, ff, indent=4, sort_keys=True)
##--------------------##
## figure 1
def trim_wrapper(path, smp_name='demo'):
"""
trimming and remove duplicates
input: /path_out/input_reads/
"""
json_files = sorted(glob.glob(path + '/*.cutadapt.json'))
da = []
for j in json_files:
id = re.sub(r'.cutadapt.json', '', os.path.basename(j))
nodup = os.path.join(os.path.dirname(j), id + '.reads.txt') # total reads, nodup
d = json_reader(j)
with open(nodup) as f:
d['nodup'] = next(f).rstrip()
tooshort = int(d['raw']) - int(d['clean'])
dup = int(d['clean']) - int(d['nodup'])
dn = pd.DataFrame({'group': ['raw', 'too_short', 'PCR_dup', 'no_dup'],
id: [d['raw'], tooshort, dup, d['nodup']]})
dn.set_index('group', inplace = True)
da.append(dn)
df = pd.concat(da, axis = 1)
df = df.apply(pd.to_numeric)
# add merge data
df.insert(0, smp_name, df.sum(axis = 1))
return df
# path_trim = os.path.join(path_out, 'input_reads')
# df = trim_wrapper(path_trim, smp_name)
# print(df)
## mapping pct
def map_wrapper(path, smp_name='demo'):
"""
mapping to various genome
input: /path_out/genome_mapping/
"""
m_files = glob.glob(os.path.join(path, '*.mapping_stat.csv'))
m_files = sorted(m_files, key=len)
ma = []
for m in m_files:
# skip merge stat
m_prefix = re.sub(r'.mapping_stat.csv', '', os.path.basename(m))
if m_prefix == smp_name:
continue
dm = pd.read_csv(m, ',').filter(items=['group', 'read'])
dm.set_index('group', inplace=True)
dm2 = dm.rename(columns={'read': m_prefix})
ma.append(dm2)
df = | pd.concat(ma, axis=1) | pandas.concat |
"""
Functions about routes.
"""
from collections import OrderedDict
from typing import Optional, Iterable, List, Dict, TYPE_CHECKING
import json
import geopandas as gp
import pandas as pd
import numpy as np
import shapely.geometry as sg
import shapely.ops as so
import folium as fl
from . import constants as cs
from . import helpers as hp
# Help mypy but avoid circular imports
if TYPE_CHECKING:
from .feed import Feed
def compute_route_stats_0(
trip_stats_subset: pd.DataFrame,
headway_start_time: str = "07:00:00",
headway_end_time: str = "19:00:00",
*,
split_directions: bool = False,
) -> pd.DataFrame:
"""
Compute stats for the given subset of trips stats (of the form output by the
function :func:`.trips.compute_trip_stats`).
If ``split_directions``, then separate the stats by trip direction (0 or 1).
Use the headway start and end times to specify the time period for computing
headway stats.
Return a DataFrame with the columns
- ``'route_id'``
- ``'route_short_name'``
- ``'route_type'``
- ``'direction_id'``
- ``'num_trips'``: number of trips on the route in the subset
- ``'num_trip_starts'``: number of trips on the route with
nonnull start times
- ``'num_trip_ends'``: number of trips on the route with nonnull
end times that end before 23:59:59
- ``'is_loop'``: 1 if at least one of the trips on the route has
its ``is_loop`` field equal to 1; 0 otherwise
- ``'is_bidirectional'``: 1 if the route has trips in both
directions; 0 otherwise
- ``'start_time'``: start time of the earliest trip on the route
- ``'end_time'``: end time of latest trip on the route
- ``'max_headway'``: maximum of the durations (in minutes)
between trip starts on the route between
``headway_start_time`` and ``headway_end_time`` on the given
dates
- ``'min_headway'``: minimum of the durations (in minutes)
mentioned above
- ``'mean_headway'``: mean of the durations (in minutes)
mentioned above
- ``'peak_num_trips'``: maximum number of simultaneous trips in
service (for the given direction, or for both directions when
``split_directions==False``)
- ``'peak_start_time'``: start time of first longest period
during which the peak number of trips occurs
- ``'peak_end_time'``: end time of first longest period during
which the peak number of trips occurs
- ``'service_duration'``: total of the duration of each trip on
the route in the given subset of trips; measured in hours
- ``'service_distance'``: total of the distance traveled by each
trip on the route in the given subset of trips; measured in
whatever distance units are present in ``trip_stats_subset``;
contains all ``np.nan`` entries if ``feed.shapes is None``
- ``'service_speed'``: service_distance/service_duration;
measured in distance units per hour
- ``'mean_trip_distance'``: service_distance/num_trips
- ``'mean_trip_duration'``: service_duration/num_trips
If not ``split_directions``, then remove the
direction_id column and compute each route's stats,
except for headways, using its trips running in both directions.
In this case, (1) compute max headway by taking the max of the
max headways in both directions; (2) compute mean headway by
taking the weighted mean of the mean headways in both
directions.
If ``trip_stats_subset`` is empty, return an empty DataFrame.
Raise a ValueError if ``split_directions`` and no non-NaN
direction ID values present
"""
if trip_stats_subset.empty:
return pd.DataFrame()
# Convert trip start and end times to seconds to ease calculations below
f = trip_stats_subset.copy()
f[["start_time", "end_time"]] = f[["start_time", "end_time"]].applymap(
hp.timestr_to_seconds
)
headway_start = hp.timestr_to_seconds(headway_start_time)
headway_end = hp.timestr_to_seconds(headway_end_time)
def compute_route_stats_split_directions(group):
# Take this group of all trips stats for a single route
# and compute route-level stats.
d = OrderedDict()
d["route_short_name"] = group["route_short_name"].iat[0]
d["route_type"] = group["route_type"].iat[0]
d["num_trips"] = group.shape[0]
d["num_trip_starts"] = group["start_time"].count()
d["num_trip_ends"] = group.loc[
group["end_time"] < 24 * 3600, "end_time"
].count()
d["is_loop"] = int(group["is_loop"].any())
d["start_time"] = group["start_time"].min()
d["end_time"] = group["end_time"].max()
# Compute max and mean headway
stimes = group["start_time"].values
stimes = sorted(
[stime for stime in stimes if headway_start <= stime <= headway_end]
)
headways = np.diff(stimes)
if headways.size:
d["max_headway"] = np.max(headways) / 60 # minutes
d["min_headway"] = np.min(headways) / 60 # minutes
d["mean_headway"] = np.mean(headways) / 60 # minutes
else:
d["max_headway"] = np.nan
d["min_headway"] = np.nan
d["mean_headway"] = np.nan
# Compute peak num trips
active_trips = hp.get_active_trips_df(group[["start_time", "end_time"]])
times, counts = active_trips.index.values, active_trips.values
start, end = hp.get_peak_indices(times, counts)
d["peak_num_trips"] = counts[start]
d["peak_start_time"] = times[start]
d["peak_end_time"] = times[end]
d["service_distance"] = group["distance"].sum()
d["service_duration"] = group["duration"].sum()
return pd.Series(d)
def compute_route_stats(group):
d = OrderedDict()
d["route_short_name"] = group["route_short_name"].iat[0]
d["route_type"] = group["route_type"].iat[0]
d["num_trips"] = group.shape[0]
d["num_trip_starts"] = group["start_time"].count()
d["num_trip_ends"] = group.loc[
group["end_time"] < 24 * 3600, "end_time"
].count()
d["is_loop"] = int(group["is_loop"].any())
d["is_bidirectional"] = int(group["direction_id"].unique().size > 1)
d["start_time"] = group["start_time"].min()
d["end_time"] = group["end_time"].max()
# Compute headway stats
headways = np.array([])
for direction in [0, 1]:
stimes = group[group["direction_id"] == direction]["start_time"].values
stimes = sorted(
[stime for stime in stimes if headway_start <= stime <= headway_end]
)
headways = np.concatenate([headways, np.diff(stimes)])
if headways.size:
d["max_headway"] = np.max(headways) / 60 # minutes
d["min_headway"] = np.min(headways) / 60 # minutes
d["mean_headway"] = np.mean(headways) / 60 # minutes
else:
d["max_headway"] = np.nan
d["min_headway"] = np.nan
d["mean_headway"] = np.nan
# Compute peak num trips
active_trips = hp.get_active_trips_df(group[["start_time", "end_time"]])
times, counts = active_trips.index.values, active_trips.values
start, end = hp.get_peak_indices(times, counts)
d["peak_num_trips"] = counts[start]
d["peak_start_time"] = times[start]
d["peak_end_time"] = times[end]
d["service_distance"] = group["distance"].sum()
d["service_duration"] = group["duration"].sum()
return pd.Series(d)
if split_directions:
f = f.loc[lambda x: x.direction_id.notnull()].assign(
direction_id=lambda x: x.direction_id.astype(int)
)
if f.empty:
raise ValueError(
"At least one trip stats direction ID value " "must be non-NaN."
)
g = (
f.groupby(["route_id", "direction_id"])
.apply(compute_route_stats_split_directions)
.reset_index()
)
# Add the is_bidirectional column
def is_bidirectional(group):
d = {}
d["is_bidirectional"] = int(group["direction_id"].unique().size > 1)
return pd.Series(d)
gg = g.groupby("route_id").apply(is_bidirectional).reset_index()
g = g.merge(gg)
else:
g = f.groupby("route_id").apply(compute_route_stats).reset_index()
# Compute a few more stats
g["service_speed"] = (g["service_distance"] / g["service_duration"]).fillna(
g["service_distance"]
)
g["mean_trip_distance"] = g["service_distance"] / g["num_trips"]
g["mean_trip_duration"] = g["service_duration"] / g["num_trips"]
# Convert route times to time strings
g[["start_time", "end_time", "peak_start_time", "peak_end_time"]] = g[
["start_time", "end_time", "peak_start_time", "peak_end_time"]
].applymap(lambda x: hp.timestr_to_seconds(x, inverse=True))
return g
def compute_route_time_series_0(
trip_stats_subset: pd.DataFrame,
date_label: str = "20010101",
freq: str = "5Min",
*,
split_directions: bool = False,
) -> pd.DataFrame:
"""
Compute stats in a 24-hour time series form for the given subset of trips (of the
form output by the function :func:`.trips.compute_trip_stats`).
If ``split_directions``, then separate each routes's stats by trip direction.
Set the time series frequency according to the given frequency string;
max frequency is one minute ('Min').
Use the given YYYYMMDD date label as the date in the time series index.
Return a DataFrame time series version the following route stats for each route.
- ``num_trips``: number of trips in service on the route
at any time within the time bin
- ``num_trip_starts``: number of trips that start within
the time bin
- ``num_trip_ends``: number of trips that end within the
time bin, ignoring trips that end past midnight
- ``service_distance``: sum of the service duration accrued
during the time bin across all trips on the route;
measured in hours
- ``service_distance``: sum of the service distance accrued
during the time bin across all trips on the route; measured
in kilometers
- ``service_speed``: ``service_distance/service_duration``
for the route
The columns are hierarchical (multi-indexed) with
- top level: name is ``'indicator'``; values are
``'num_trip_starts'``, ``'num_trip_ends'``, ``'num_trips'``,
``'service_distance'``, ``'service_duration'``, and
``'service_speed'``
- middle level: name is ``'route_id'``;
values are the active routes
- bottom level: name is ``'direction_id'``; values are 0s and 1s
If not ``split_directions``, then don't include the bottom level.
The time series has a timestamp index for a 24-hour period
sampled at the given frequency.
The maximum allowable frequency is 1 minute.
If ``trip_stats_subset`` is empty, then return an empty
DataFrame with the columns ``'num_trip_starts'``,
``'num_trip_ends'``, ``'num_trips'``, ``'service_distance'``,
``'service_duration'``, and ``'service_speed'``.
Notes
-----
- The time series is computed at a one-minute frequency, then
resampled at the end to the given frequency
- Trips that lack start or end times are ignored, so the the
aggregate ``num_trips`` across the day could be less than the
``num_trips`` column of :func:`compute_route_stats_0`
- All trip departure times are taken modulo 24 hours.
So routes with trips that end past 23:59:59 will have all
their stats wrap around to the early morning of the time series,
except for their ``num_trip_ends`` indicator.
Trip endings past 23:59:59 not binned so that resampling the
``num_trips`` indicator works efficiently.
- Note that the total number of trips for two consecutive time bins
t1 < t2 is the sum of the number of trips in bin t2 plus the
number of trip endings in bin t1.
Thus we can downsample the ``num_trips`` indicator by keeping
track of only one extra count, ``num_trip_ends``, and can avoid
recording individual trip IDs.
- All other indicators are downsampled by summing.
- Raise a ValueError if ``split_directions`` and no non-NaN
direction ID values present
"""
if trip_stats_subset.empty:
return pd.DataFrame()
tss = trip_stats_subset.copy()
if split_directions:
tss = tss.loc[lambda x: x.direction_id.notnull()].assign(
direction_id=lambda x: x.direction_id.astype(int)
)
if tss.empty:
raise ValueError(
"At least one trip stats direction ID value " "must be non-NaN."
)
# Alter route IDs to encode direction:
# <route ID>-0 and <route ID>-1 or <route ID>-NA
tss["route_id"] = (
tss["route_id"] + "-" + tss["direction_id"].map(lambda x: str(int(x)))
)
routes = tss["route_id"].unique()
# Build a dictionary of time series and then merge them all
# at the end.
# Assign a uniform generic date for the index
date_str = date_label
day_start = pd.to_datetime(date_str + " 00:00:00")
day_end = pd.to_datetime(date_str + " 23:59:00")
rng = | pd.period_range(day_start, day_end, freq="Min") | pandas.period_range |
from sklearn.ensemble import RandomForestRegressor
from sklearn.datasets import make_regression
import pandas as pd
import numpy as np
from sklearn import preprocessing
from sklearn.model_selection import train_test_split
import numpy as np, tensorflow as tf
from sklearn.preprocessing import OneHotEncoder
import os
import csv
import gc
from sklearn.metrics import mean_squared_error
import math
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import ConstantKernel, RBF
from sklearn.gaussian_process.kernels import RationalQuadratic
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import RepeatedKFold
from sklearn import linear_model
from xgboost.sklearn import XGBRegressor
from sklearn.decomposition import PCA
import copy
import pyflux as pf
import datetime
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
PRICED_BITCOIN_FILE_PATH = "C:/Users/wang.yuhao/Documents/ChainNet/data/original_data/pricedBitcoin2009-2018.csv"
DAILY_OCCURRENCE_FILE_PATH = "C:/Users/wang.yuhao/Documents/ChainNet/data/original_data/dailyOccmatrices/"
betti0_input_path = "C:/Users/wang.yuhao/Documents/ChainNet/data/original_data/betti_0(100).csv"
betti1_input_path = "C:/Users/wang.yuhao/Documents/ChainNet/data/original_data/betti_1(100).csv"
DAILY_FILTERED_OCCURRENCE_FILE_PATH = "C:/Users/wang.yuhao/Documents/ChainNet/data/original_data/filteredDailyOccMatrices/"
ROW = -1
COLUMN = -1
TEST_SPLIT = 0.01
ALL_YEAR_INPUT_ALLOWED = False
YEAR = 2017
# Baseline
from sklearn.metrics import mean_squared_error
from sklearn import metrics
import matplotlib.pyplot as plt
def exclude_days(train, test):
row, column = train.shape
train_days = np.asarray(train[:, -1]).reshape(-1, 1)
x_train = train[:, 0:column - 1]
test_days = np.asarray(test[:, -1]).reshape(-1, 1)
x_test = test[:, 0:column - 1]
return x_train, x_test, train_days, test_days
def merge_data(occurrence_data, daily_occurrence_normalized_matrix, aggregation_of_previous_days_allowed):
if(aggregation_of_previous_days_allowed):
if(occurrence_data.size==0):
occurrence_data = daily_occurrence_normalized_matrix
else:
occurrence_data = np.add(occurrence_data, daily_occurrence_normalized_matrix)
else:
if(occurrence_data.size == 0):
occurrence_data = daily_occurrence_normalized_matrix
else:
occurrence_data = np.concatenate((occurrence_data, daily_occurrence_normalized_matrix), axis=0)
#print("merge_data shape: {} occurrence_data: {} ".format(occurrence_data.shape, occurrence_data))
return occurrence_data
def get_normalized_matrix_from_file(day, year, totaltx):
daily_occurrence_matrix_path_name = DAILY_OCCURRENCE_FILE_PATH + "occ" + str(year) + '{:03}'.format(day) + '.csv'
daily_occurence_matrix = pd.read_csv(daily_occurrence_matrix_path_name, sep=",", header=None).values
return np.asarray(daily_occurence_matrix).reshape(1, daily_occurence_matrix.size)/totaltx
def fl_get_normalized_matrix_from_file(day, year, totaltx, n_components):
daily_occurence_matrix = np.asarray([],dtype=np.float32)
for filter_number in range(0, 50, 10):
daily_occurrence_matrix_path_name = DAILY_FILTERED_OCCURRENCE_FILE_PATH + "occ" + str(year) + '{:03}'.format(day) + "_" + str(filter_number) +'.csv'
daily_occurence_matrix_read = pd.read_csv(daily_occurrence_matrix_path_name, sep=",", header=None).values
if(daily_occurence_matrix.size == 0):
daily_occurence_matrix = daily_occurence_matrix_read
else:
daily_occurence_matrix = np.concatenate((daily_occurence_matrix, daily_occurence_matrix_read), axis = 1)
pca = PCA(n_components = 20)
pca.fit(daily_occurence_matrix)
daily_occurence_matrix = pca.transform(daily_occurence_matrix)
#print("daily_occurence_matrix: ", daily_occurence_matrix, daily_occurence_matrix.shape)
#return np.asarray(daily_occurence_matrix).reshape(1, daily_occurence_matrix.size)/totaltx
return np.asarray(daily_occurence_matrix).reshape(1, daily_occurence_matrix.size)
def get_daily_occurrence_matrices(priced_bitcoin, current_row, is_price_of_previous_days_allowed, aggregation_of_previous_days_allowed):
#print("priced_bitcoin: ", priced_bitcoin, priced_bitcoin.shape)
#print("current_row: ", current_row, current_row.shape)
previous_price_data = np.array([], dtype=np.float32)
occurrence_data = np.array([], dtype=np.float32)
for index, row in priced_bitcoin.iterrows():
if not ((row.values == current_row.values).all()):
previous_price_data = np.append(previous_price_data, row['price'])
previous_price_data = np.append(previous_price_data, row['totaltx'])
#print("previous_price_data: ", previous_price_data,row['day'], row['year'], row['totaltx'])
#print("occurrence_data: ", occurrence_data)
if(is_price_of_previous_days_allowed):
#print("previous_price_data: ", np.asarray(previous_price_data).reshape(1, -1), np.asarray(previous_price_data).reshape(1, -1).shape)
occurrence_data = np.asarray(previous_price_data).reshape(1, -1)
occurrence_input = np.concatenate((occurrence_data, np.asarray(current_row['price']).reshape(1,1)), axis=1)
#print("current_row: ", current_row, current_row.shape)
#print(" price occurrence_input: ", np.asarray(current_row['price']).reshape(1,1), (np.asarray(current_row['price']).reshape(1,1)).shape)
#print("concatenate with price occurrence_input: ", occurrence_input, occurrence_input.shape)
occurrence_input = np.concatenate((occurrence_input, np.asarray(current_row['day']).reshape(1,1)), axis=1)
#print(" price occurrence_input: ", np.asarray(current_row['day']).reshape(1,1), (np.asarray(current_row['day']).reshape(1,1)).shape)
#print("concatenate with day occurrence_input: ", occurrence_input, occurrence_input.shape)
return occurrence_input
def betti_get_daily_occurrence_matrices(priced_bitcoin, current_row, is_price_of_previous_days_allowed, aggregation_of_previous_days_allowed):
occurrence_data = np.array([], dtype=np.float32)
for index, row in priced_bitcoin.iterrows():
if not ((row.values == current_row.values).all()):
previous_price_data = np.array([], dtype=np.float32)
previous_price_data = np.append(previous_price_data, row['price'])
previous_price_data = np.append(previous_price_data, row['totaltx'])
betti0_50 = read_betti(betti0_input_path, row['day'])
previous_price_data = np.append(previous_price_data, np.asarray(betti0_50).reshape(1,-1))
betti1_50 = read_betti(betti1_input_path, row['day'])
previous_price_data = np.append(previous_price_data, np.asarray(betti1_50).reshape(1,-1))
if occurrence_data.size == 0:
occurrence_data = previous_price_data
else:
occurrence_data = np.row_stack((occurrence_data,previous_price_data))
#print(occurrence_data, occurrence_data.shape)
#print(previous_price_data, previous_price_data.shape)
occurrence_data = np.asarray(occurrence_data).reshape(1, -1)
#betti0_50 = read_betti(betti0_input_path, current_row['day'])
#occurrence_input = np.concatenate((occurrence_data, np.asarray(betti0_50).reshape(1,-1)), axis=1)
#betti1_50 = read_betti(betti1_input_path, current_row['day'])
#occurrence_input = np.concatenate((occurrence_input, np.asarray(betti1_50).reshape(1,-1)), axis=1)
occurrence_input = np.concatenate((occurrence_data, np.asarray(current_row['price']).reshape(1,1)), axis=1)
occurrence_input = np.concatenate((occurrence_input, np.asarray(current_row['day']).reshape(1,1)), axis=1)
return occurrence_input
def betti_der_get_daily_occurrence_matrices(priced_bitcoin, current_row, is_price_of_previous_days_allowed, aggregation_of_previous_days_allowed):
#print("priced_bitcoin: ", priced_bitcoin, priced_bitcoin.shape)
#print("current_row: ", current_row, current_row.shape)
previous_price_data = np.array([], dtype=np.float32)
occurrence_data = np.array([], dtype=np.float32)
for index, row in priced_bitcoin.iterrows():
if not ((row.values == current_row.values).all()):
previous_price_data = np.append(previous_price_data, row['price'])
previous_price_data = np.append(previous_price_data, row['totaltx'])
betti0_50 = read_betti(betti0_input_path, row['day'])
previous_price_data = np.append(previous_price_data, np.asarray(betti0_50).reshape(1,-1))
betti1_50 = read_betti(betti1_input_path, row['day'])
previous_price_data = np.append(previous_price_data, np.asarray(betti1_50).reshape(1,-1))
betti0_50_diff1 = betti0_50.diff(1).dropna()
previous_price_data = np.concatenate((previous_price_data.reshape(1,-1), np.asarray(betti0_50_diff1).reshape(1,-1)), axis=1)
betti1_50_diff1 = betti1_50.diff(1).dropna()
previous_price_data = np.concatenate((previous_price_data, np.asarray(betti1_50_diff1).reshape(1,-1)), axis=1)
if occurrence_data.size == 0:
occurrence_data = previous_price_data
else:
occurrence_data = np.concatenate((occurrence_data, previous_price_data.reshape(1,-1)), axis=1)
#print(occurrence_data, occurrence_data.shape)
#print("previous_price_data: ", previous_price_data,row['day'], row['year'], row['totaltx'])
occurrence_data = np.asarray(occurrence_data).reshape(1, -1)
occurrence_input = np.concatenate((occurrence_data, np.asarray(current_row['price']).reshape(1,1)), axis=1)
occurrence_input = np.concatenate((occurrence_input, np.asarray(current_row['day']).reshape(1,1)), axis=1)
return occurrence_input
def fl_get_daily_occurrence_matrices(priced_bitcoin, current_row, is_price_of_previous_days_allowed, aggregation_of_previous_days_allowed):
previous_price_data = np.array([], dtype=np.float32)
occurrence_data = np.array([], dtype=np.float32)
for index, row in priced_bitcoin.iterrows():
if not ((row.values == current_row.values).all()):
previous_price_data = np.append(previous_price_data, row['price'])
previous_price_data = np.append(previous_price_data, row['totaltx'])
daily_occurrence_normalized_matrix = fl_get_normalized_matrix_from_file(row['day'], row['year'], row['totaltx'], 20)
occurrence_data = merge_data(occurrence_data, daily_occurrence_normalized_matrix, aggregation_of_previous_days_allowed)
#print("occurrence_data: ",occurrence_data, occurrence_data.shape)
if(is_price_of_previous_days_allowed):
occurrence_data = np.concatenate((occurrence_data.reshape(1,-1), np.asarray(previous_price_data).reshape(1,-1)), axis=1)
occurrence_input = np.concatenate((occurrence_data.reshape(1,-1), np.asarray(current_row['price']).reshape(1,1)), axis=1)
occurrence_input = np.concatenate((occurrence_input, np.asarray(current_row['day']).reshape(1,1)), axis=1)
#print("occurrence_input: ",occurrence_input, occurrence_input.shape)
return occurrence_input
def read_betti(file_path, day):
day = day - 1
betti = pd.read_csv(file_path, index_col=0)
try:
betti_50 = betti.iloc[day, 0:50]
except:
print("day:", day)
return betti_50
def rf_base_rmse_mode(train_input, train_target, test_input, test_target):
rf_regression = RandomForestRegressor(max_depth=2, random_state=0)
rf_regression.fit(train_input, train_target.ravel() )
predicted = rf_regression.predict(test_input)
rf_base_rmse = np.sqrt(metrics.mean_squared_error(test_target, predicted))
return rf_base_rmse
def gp_base_rmse_mode(train_input, train_target, test_input, test_target):
param = {
'kernel': RationalQuadratic(alpha=0.01, length_scale=1),
'n_restarts_optimizer': 2
}
adj_params = {'kernel': [RationalQuadratic(alpha=0.01,length_scale=1)],
'n_restarts_optimizer': [2]}
gpr = GaussianProcessRegressor(**param)
cv = RepeatedKFold(n_splits=10, n_repeats=3, random_state=1)
cscv = GridSearchCV(gpr, adj_params, scoring='neg_mean_absolute_error', cv=cv, n_jobs=-1)
cscv.fit(train_input,train_target)
#print("cv_results_:",cscv.cv_results_)
print("best_params_: ",cscv.best_params_)
gpr = GaussianProcessRegressor(**cscv.best_params_)
gpr.fit(train_input, train_target)
mu, cov = gpr.predict(test_input, return_cov=True)
test_y = mu.ravel()
#uncertainty = 1.96 * np.sqrt(np.diag(cov))
gp_base_rmse = np.sqrt(metrics.mean_squared_error(test_target, test_y))
print(gp_base_rmse)
return gp_base_rmse
def enet_base_rmse_mode(train_input, train_target, test_input, test_target):
param = {
'alpha': 10,
'l1_ratio': 1,
}
elastic = linear_model.ElasticNet(**param)
adj_params = {'alpha': [10],
'l1_ratio': [ 1]}
#'max_iter': [100000]}
cv = RepeatedKFold(n_splits=10, n_repeats=3, random_state=1)
cscv = GridSearchCV(elastic, adj_params, scoring='neg_mean_absolute_error', cv=cv, n_jobs=-1)
cscv.fit(train_input, train_target)
print("best_params_: ",cscv.best_params_)
elastic= linear_model.ElasticNet(**cscv.best_params_)
elastic.fit(train_input,train_target.ravel())
predicted = elastic.predict(test_input)
enet_base_rmse = np.sqrt(metrics.mean_squared_error(test_target, predicted))
print("enet_base_rmse: ", enet_base_rmse)
#print ("RMSE:", np.sqrt(metrics.mean_squared_error(test_target, predicted)))
return enet_base_rmse
def xgbt_base_rmse_mode(train_input, train_target, test_input, test_target):
param = {
'n_estimators':1000,
'learning_rate': 0.01,
}
adj_params = {
'n_estimators':[1000],
'learning_rate': [0.01]
}
xgbt = XGBRegressor(**param)
cv = RepeatedKFold(n_splits=10, n_repeats=3, random_state=1)
cscv = GridSearchCV(xgbt, adj_params, scoring='neg_mean_absolute_error', cv=cv, n_jobs=-1)
cscv.fit(train_input, train_target)
print("best_params_: ", cscv.best_params_)
xgbt= XGBRegressor(**cscv.best_params_)
xgbt.fit(train_input,train_target.ravel())
predicted = xgbt.predict(test_input)
xgbt_base_rmse = np.sqrt(metrics.mean_squared_error(test_target, predicted))
print("xgbt_base_rmse: ", xgbt_base_rmse)
#print ("RMSE:", np.sqrt(metrics.mean_squared_error(test_target, predicted)))
return xgbt_base_rmse
def arimax_initialize_setting(dataset_model, window_size, prediction_horizon, is_price_of_previous_days_allowed, aggregation_of_previous_days_allowed):
data = preprocess_data(dataset_model, window_size, prediction_horizon, is_price_of_previous_days_allowed, aggregation_of_previous_days_allowed)
train = data[0:100, :]
test = data[100:100+prediction_horizon, :]
x_train, x_test, train_days, test_days = exclude_days(train, test)
row, column = x_train.shape
train_target = np.asarray(x_train[:, -1]).reshape(-1)
train_input = x_train[:, 0:column - 1]
test_target = x_test[: , -1]
test_input = x_test[ : , 0:column - 1]
return train_input, train_target, test_input, test_target, train_days, test_days
def arimax_base_rmse_mode(train_input, train_target, test_input, test_target):
train_input_diff_arr = np.array([])
train_columns_name = []
train_input_column = int(train_input.shape[1])
for i in range(train_input_column):
if(i%2==0):
train_columns_name.append('price_' + str(i))
else:
train_columns_name.append('totaltx_' + str(i))
train_input_diff = np.diff(train_input[:,i] )
if i == 0:
train_input_diff_arr = train_input_diff
else:
train_input_diff_arr = np.dstack((train_input_diff_arr, train_input_diff))
columns_name = copy.deepcopy(train_columns_name)
columns_name.append('current_price')
train_target_diff = np.diff(train_target )
train_input_diff_arr = np.dstack((train_input_diff_arr, train_target_diff))
train_input_diff_arr = pd.DataFrame(train_input_diff_arr[0], columns = columns_name)
model = pf.ARIMAX(data=train_input_diff_arr,formula="current_price~totaltx_5",ar=1,ma=2,integ=0)
model_1 = model.fit("MLE")
model_1.summary()
test_input_pd = pd.DataFrame(test_input, columns = train_columns_name)
test_target_pd = pd.DataFrame(test_target, columns = ['current_price'])
test_input_target = pd.concat([test_input_pd, test_target_pd], axis=1)
pred = model.predict(h=test_input_target.shape[0],
oos_data=test_input_target,
intervals=True, )
arimax_base_rmse = mean_squared_error([test_input_target.iloc[0, 6]],[(train_target[99])+pred.current_price[99]])
print("arimax_base_rmse:",arimax_base_rmse)
return arimax_base_rmse
def run_print_model(train_input, train_target, test_input, test_target, train_days, test_days):
rf_base_rmse = rf_base_rmse_mode(train_input, train_target, test_input, test_target)
xgbt_base_rmse = xgbt_base_rmse_mode(train_input, train_target, test_input, test_target)
gp_base_rmse = gp_base_rmse_mode(train_input, train_target, test_input, test_target)
enet_base_rmse = enet_base_rmse_mode(train_input, train_target, test_input, test_target)
return rf_base_rmse, xgbt_base_rmse, gp_base_rmse, enet_base_rmse
#print_results(predicted, test_target, original_log_return, predicted_log_return, cost, test_days, rmse)
#return rf_base_rmse
def preprocess_data(dataset_model, window_size, prediction_horizon, is_price_of_previous_days_allowed, aggregation_of_previous_days_allowed):
priced_bitcoin = pd.read_csv(PRICED_BITCOIN_FILE_PATH, sep=",")
if(ALL_YEAR_INPUT_ALLOWED):
pass
else:
priced_bitcoin = priced_bitcoin[priced_bitcoin['year']==YEAR].reset_index(drop=True)
# get normalized occurence matrix in a flat format and merge with totaltx
daily_occurrence_input = np.array([],dtype=np.float32)
temp = np.array([], dtype=np.float32)
for current_index, current_row in priced_bitcoin.iterrows():
if(current_index<(window_size+prediction_horizon-1)):
pass
else:
start_index = current_index - (window_size + prediction_horizon) + 1
end_index = current_index - prediction_horizon
if(dataset_model=="base"):
temp = get_daily_occurrence_matrices(priced_bitcoin[start_index:end_index+1], current_row, is_price_of_previous_days_allowed, aggregation_of_previous_days_allowed)
elif(dataset_model=="betti"):
temp = betti_get_daily_occurrence_matrices(priced_bitcoin[start_index:end_index+1], current_row, is_price_of_previous_days_allowed, aggregation_of_previous_days_allowed)
elif(dataset_model=="fl"):
temp = fl_get_daily_occurrence_matrices(priced_bitcoin[start_index:end_index+1], current_row, is_price_of_previous_days_allowed, aggregation_of_previous_days_allowed)
elif(dataset_model=="betti_der"):
temp = betti_der_get_daily_occurrence_matrices(priced_bitcoin[start_index:end_index+1], current_row, is_price_of_previous_days_allowed, aggregation_of_previous_days_allowed)
else:
sys.exit("Dataset model support only baseline, betti, fl and betti_der!")
if(daily_occurrence_input.size == 0):
daily_occurrence_input = temp
else:
daily_occurrence_input = np.concatenate((daily_occurrence_input, temp), axis=0)
return daily_occurrence_input
def initialize_setting(dataset_model, window_size, prediction_horizon, is_price_of_previous_days_allowed, aggregation_of_previous_days_allowed):
data = preprocess_data(dataset_model, window_size, prediction_horizon, is_price_of_previous_days_allowed, aggregation_of_previous_days_allowed)
train = data[0:100, :]
test = data[100, :].reshape(1, -1)
x_train, x_test, train_days, test_days = exclude_days(train, test)
#print("x_train:", x_train)
row, column = x_train.shape
train_target = np.asarray(x_train[:, -1]).reshape(-1)
train_input = x_train[:, 0:column - 1]
#x_test = x_test.reshape(-1,1)
test_target = x_test[: , -1]
test_input = x_test[ : , 0:column - 1]
return train_input, train_target, test_input, test_target, train_days, test_days
parameter_dict = {#0: dict({'is_price_of_previous_days_allowed':True, 'aggregation_of_previous_days_allowed':True})}
1: dict({'is_price_of_previous_days_allowed':True, 'aggregation_of_previous_days_allowed':False})}
for step in parameter_dict:
names = locals()
gc.collect()
evalParameter = parameter_dict.get(step)
is_price_of_previous_days_allowed = evalParameter.get('is_price_of_previous_days_allowed')
aggregation_of_previous_days_allowed = evalParameter.get('aggregation_of_previous_days_allowed')
print("IS_PRICE_OF_PREVIOUS_DAYS_ALLOWED: ", is_price_of_previous_days_allowed)
print("AGGREGATION_OF_PREVIOUS_DAYS_ALLOWED: ", aggregation_of_previous_days_allowed)
window_size_array = [3, 5, 7]
horizon_size_array = [1, 2, 5, 7, 10, 15, 20, 25, 30]
dataset_model_array = ["base", "betti", "fl","betti_der"]
for dataset_model in dataset_model_array:
print('dataset_model: ', dataset_model)
for window_size in window_size_array:
print('WINDOW_SIZE: ', window_size)
for prediction_horizon in horizon_size_array:
print("PREDICTION_HORIZON: ", prediction_horizon)
train_input, train_target, test_input, test_target, train_days, test_days = initialize_setting(dataset_model, window_size, prediction_horizon, is_price_of_previous_days_allowed, aggregation_of_previous_days_allowed)
rf_base_rmse, xgbt_base_rmse, gp_base_rmse, enet_base_rmse = run_print_model(train_input, train_target, test_input, test_target, train_days, test_days)
rmse = pd.DataFrame({'rf_' + dataset_model + '_rmse_'+str(window_size): [rf_base_rmse], 'xgbt_' + dataset_model + '_rmse_'+str(window_size): [xgbt_base_rmse], 'gp_' + dataset_model + '_rmse_'+str(window_size): [gp_base_rmse], 'enet_' + dataset_model + '_rmse_'+str(window_size): [enet_base_rmse]})
if(prediction_horizon==1):
rmse_total = rmse
else:
rmse_total = [rmse_total, rmse]
rmse_total = pd.concat(rmse_total)
if(window_size==3):
names['rmse_' + dataset_model + '_total'] = rmse_total
else:
names['rmse_' + dataset_model + '_total'] = pd.concat([names.get('rmse_' + dataset_model + '_total') , rmse_total], axis=1)
names['rmse_' + dataset_model + '_total'].index = | pd.Series(horizon_size_array) | pandas.Series |
# -*- coding: utf-8 -*-
"""
Created on Sun Oct 10 08:48:34 2021
@author: PatCa
"""
import numpy as np
import pandas as pd
import joblib
from pickle import dump
from sklearn.model_selection import train_test_split
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import RobustScaler, StandardScaler
from sklearn.compose import ColumnTransformer
from sklearn.decomposition import PCA
def PCA_Data():
#Import raw data
source_feature = pd.read_csv('data/features.csv')
source_label = pd.read_csv('data/labels.csv')
#Combine features and labels and copy
source_data = pd.merge(source_feature, source_label, on="trackID")
clean_data = source_data.copy()
#Remove na and duplicates
clean_data = clean_data.dropna()
clean_data = clean_data.drop_duplicates()
#Check type
clean_data = clean_data.astype({'time_signature':int,'key':int,'mode':int})
#Rename categorical values
mode_dict = {0:'minor', 1:'major'}
key_dict = {0:'C', 1:'D', 2:'E',3:'F', 4:'G', 5:'H', 6:'I', 7:'J', 8:'K', 9:'L',
10:'M', 11:'N'}
label_dict = {'soul and reggae':1, 'pop':2, 'punk':3, 'jazz and blues':4,
'dance and electronica':5,'folk':6, 'classic pop and rock':7, 'metal':8}
clean_data['mode'] = clean_data['mode'].replace(mode_dict)
clean_data['key'] = clean_data['key'].replace(key_dict)
clean_data['genre'] = clean_data['genre'].replace(label_dict)
#Remove small categories
clean_data = clean_data[clean_data.time_signature != 0]
#Separate out text feature "tags" and remove from clean_data dataframe
word_df = pd.DataFrame(data=clean_data[['tags','genre']].to_numpy(), columns=['tags','genre'])
clean_data = clean_data.drop(columns=['title','trackID'])
#%%Split data for training and testing
train_data = clean_data
y = train_data[['genre']] #Make Dataframe
training_data = train_data.loc[:,train_data.columns != 'genre']
(X_train, X_test, Y_train, Y_test) = train_test_split(training_data, y,
test_size=0.2,
random_state=42,stratify=y)
#Separate out text data
word_df_train = pd.concat((X_train['tags'],Y_train), axis=1)
word_df_test = pd.concat((X_test['tags'],Y_test), axis=1)
X_train = X_train.drop(columns='tags')
X_test = X_test.drop(columns='tags')
#%%Check feature correlation
nc_cols = ['loudness','tempo','time_signature','key','mode','duration']
cat_feat = ['time_signature','key','mode']
cont_data = X_train.drop(columns=nc_cols)
#%% PCA on cont_data2
pca_scaler = StandardScaler()
pca_scaler.fit(cont_data)
dump(pca_scaler, open('model_artifacts/pca_scaler.pkl', 'wb'))
cont_data_norm = pca_scaler.transform(cont_data)
pca = PCA(0.95).fit(cont_data_norm)
dump(pca, open('model_artifacts/pca.pkl', 'wb'))
num_pca_cols = pca.n_components_
data_pca_array = pca.transform(cont_data_norm)
cont_data_pca = pd.DataFrame(data=data_pca_array)
col_names = ['PCA_'+str(i) for i in range(num_pca_cols)]
cont_data_pca.columns = col_names
X_train2 = X_train[nc_cols]
X_train3 = pd.concat([X_train2.reset_index(drop=True), cont_data_pca.reset_index(drop=True)], axis=1)
#%% Transform test data
cont_test_data = X_test.drop(columns=nc_cols)
cont_test_data_norm = pca_scaler.transform(cont_test_data)
#cont_test_data_norm = (cont_test_data-cont_test_data.mean())/(cont_test_data.std())
test_data_pca_array = pca.transform(cont_test_data_norm)
cont_test_data_pca = | pd.DataFrame(data=test_data_pca_array) | pandas.DataFrame |
import pytest
jinja2 = pytest.importorskip("jinja2")
from pandas import DataFrame
from pandas.io.formats.style import Styler
@pytest.fixture
def df():
return DataFrame(
data=[[0, -0.609], [1, -1.228]],
columns=["A", "B"],
index=["x", "y"],
)
@pytest.fixture
def styler(df):
return Styler(df, uuid_len=0)
def test_concat_bad_columns(styler):
msg = "`other.data` must have same columns as `Styler.data"
with pytest.raises(ValueError, match=msg):
styler.concat( | DataFrame([[1, 2]]) | pandas.DataFrame |
from flask import Flask, render_template, request
import plotly
import plotly.graph_objs as go
from plotly.subplots import make_subplots
import pandas as pd
import numpy as np
import json
from flaskext.mysql import MySQL
import statistics
import yaml
app = Flask(__name__)
db = yaml.load(open('/home/abxka/canisonet_development/db.yaml'), Loader=yaml.FullLoader)
app.config['MYSQL_DATABASE_HOST'] = db['mysql_host']
app.config['MYSQL_DATABASE_USER'] = db['mysql_user']
app.config['MYSQL_DATABASE_PASSWORD'] = db['mysql_password']
app.config['MYSQL_DATABASE_DB'] = db['mysql_db']
mysql = MySQL()
mysql.init_app(app)
@app.route("/")
def home():
cur = mysql.get_db().cursor()
cur.execute('''SELECT * FROM cancertypesinproject''')
cancer_types = cur.fetchall()
cancertypes = pd.DataFrame(cancer_types,columns=['PCAWG-Code', 'Cancer Type Name', 'PCAWG_GTEx'])
cancertypes_dict = cancertypes.to_dict(orient='records')
cur2 = mysql.get_db().cursor()
cur2.execute(''' SELECT * FROM ENST_Genename_ENSG_TranscriptName ''')
genename_cmdt = cur2.fetchall()
genenamecmdt = pd.DataFrame(genename_cmdt, columns=['Feature', 'DomCancerTrans','GeneName1_x', 'ENSG', 'Transcript_Name'])
genenamecmdt_gene_list = list(genenamecmdt.iloc[:,2].unique())
genenamecmdt_gene_cmdt = list(genenamecmdt.iloc[:,1].unique())
genenamecmdt_gene_tn = list(genenamecmdt.iloc[:,4].unique())
temp_dict = genenamecmdt.to_dict(orient='records')
return render_template('home.html', data=cancertypes_dict, data2=genenamecmdt_gene_list, data3=genenamecmdt_gene_cmdt, temp_dict=temp_dict, genenamecmdt_gene_tn=genenamecmdt_gene_tn)
@app.route("/cspec_cMDTs")
def cspec_cMDTs():
cur = mysql.get_db().cursor()
cur.execute(''' SELECT CancerType, cMDT, GeneName, Count, Total, Frequency, mart_export.`Associated Transcript Name` FROM tables9 LEFT JOIN mart_export ON tables9.cMDT = mart_export.`Ensembl Transcript ID` ORDER BY Frequency DESC''')
TableS9_tuple = cur.fetchall()
result = pd.DataFrame(TableS9_tuple, columns=['CancerType', 'cMDT', 'GeneName','Count', 'Total', 'Frequency', 'Transcript_Name'])
result[['Splitted','CancerType2']] = result.CancerType.str.split('.', expand=True)
result2 = result[['CancerType2', 'CancerType', 'cMDT', 'GeneName','Count', 'Total', 'Frequency', 'Transcript_Name']]
result2 = result2.astype({'Count': int, 'Total': int})
temp_dict = result2.to_dict(orient='record')
return render_template('cspec_cMDTs.html', temp_dict=temp_dict)
@app.route("/download", methods=['GET', 'POST'])
def download():
return render_template("download.html")
@app.route("/help", methods=['GET', 'POST'])
def help():
cur = mysql.get_db().cursor()
cur.execute(''' SELECT CancerType, Total FROM tables4 ''')
TableS4_tuple = cur.fetchall()
TableS4 = pd.DataFrame(TableS4_tuple, columns=['CancerType','Total'])
TableS4 = TableS4.rename(columns={'CancerType': 'CancerType','Total':'Total'})
TableS4_uniq = TableS4.drop_duplicates()
graphJSON = sample_size(TableS4_uniq)
return render_template("help.html", TableS4=TableS4, graphJSON=graphJSON)
def sample_size(TableS4):
x = TableS4.sort_values(by=['Total']).iloc[:,0].str.split(".", n=1, expand=True).iloc[:,1]
y = TableS4.sort_values(by=['Total']).iloc[:,1]
data = [go.Bar(x=x, y=y, marker_color = 'indianred')]
graphJSON = json.dumps(data, cls=plotly.utils.PlotlyJSONEncoder)
return graphJSON
@app.route("/Cancer", methods=['GET', 'POST'])
def Cancer():
SampleCancerType = request.args.get('cancer')
cur = mysql.get_db().cursor()
sql = ''' SELECT Tissue, GeneName1, CancerSampleId, DomCancerTrans, mart_export.`Associated Transcript Name` FROM interactiondisruptionindominanttranscripts
LEFT JOIN mart_export ON interactiondisruptionindominanttranscripts.DomCancerTrans = mart_export.`Ensembl Transcript ID`
WHERE interactiondisruptionindominanttranscripts.Tissue = %s '''
adr = (SampleCancerType, )
cur.execute(sql, adr)
isonet_tuple = cur.fetchall()
df = pd.DataFrame(isonet_tuple, columns=['Tissue', 'GeneName1', 'CancerSampleId', 'DomCancerTrans', 'Associated Transcript Name'])
df[['Splitted','CancerType2']] = df.Tissue.str.split('.', expand=True)
df_iso = df[['CancerType2', 'Tissue', 'CancerSampleId', 'GeneName1', 'DomCancerTrans', 'Associated Transcript Name']]
df_iso2 = df_iso.drop_duplicates()
result = df_iso2.rename(columns={'Associated Transcript Name':'Transcript_Name'})
temp_dict = result.to_dict(orient='records')
## second table in the page representing second graph
cur2 = mysql.get_db().cursor()
sql2 = '''SELECT CancerType, SampleID, NumberOfMDTs FROM mdts_vs_muts WHERE CancerType = %s '''
adr2 = (SampleCancerType, )
cur2.execute(sql2, adr2)
dataset_muts = cur2.fetchall()
muts_df = pd.DataFrame(dataset_muts, columns= ['CancerType', 'SampleID', 'NumberOfMDTs' ])
muts_df[['Splitted','CancerType2']] = muts_df.CancerType.str.split('.', expand=True)
dataset = muts_df.to_dict(orient='records')
return render_template("Cancer_Based.html", SampleCancerType = SampleCancerType, data = temp_dict, data2 = dataset)
def CancerSpecific(SampleCancerType):
SampleCancerType = request.args.get('cancer')
cur = mysql.get_db().cursor()
sql = ''' SELECT cMDT, Frequency, CancerType, mart_export.`Associated Transcript Name` FROM tables4 LEFT JOIN mart_export ON tables4.cMDT = mart_export.`Ensembl Transcript ID`
WHERE tables4.CancerType = %s ORDER BY Frequency DESC LIMIT 10 '''
adr = (SampleCancerType, )
cur.execute(sql,adr)
TableS4_tuple = cur.fetchall()
result = pd.DataFrame(TableS4_tuple, columns=['cMDT','Frequency','CancerType', 'Transcript_Name'])
cur2 = mysql.get_db().cursor()
sql2 = ''' SELECT CancerType, SampleID, NumberOfMDTs FROM mdts_vs_muts WHERE CancerType = %s '''
adr2 = (SampleCancerType, )
cur2.execute(sql2, adr2)
cMDT_mdts_muts = cur2.fetchall()
cMDT_dist = pd.DataFrame(cMDT_mdts_muts, columns=['CancerType', 'SampleID','NumberOfMDTs'])
plot = make_subplots(rows=1, cols=2, column_widths=[0.7, 0.3], subplot_titles=("Top 10 Transcripts", "Distribiton of cMDTs Across Samples"))
trace1 = go.Bar(
name = '% of ENSTs across ' + SampleCancerType,
marker_color = 'indianred',
x=result.iloc[:,3],
y=result.iloc[:,1]*100
)
trace2 = go.Box(y=cMDT_dist.NumberOfMDTs,boxpoints='outliers',jitter=0.3, name = 'Distribution of cMDT Counts in Samples',
marker_color = '#00CC96')
plot.append_trace(trace1, row=1, col=1)
plot.update_yaxes(title_text="Occurence of Transcripts in Samples (%)", row=1, col=1)
plot.append_trace(trace2, row=1, col=2)
plot.update_yaxes(title_text="cMDT Counts across samples", row=1, col=2)
plot.update_layout(showlegend=False)
graphJSON = json.dumps(plot, cls=plotly.utils.PlotlyJSONEncoder)
return graphJSON
@app.route('/CancerSpecific', methods=['GET', 'POST'])
def change_features5():
SampleCancerType = request.args.get('cancer')
graphJSON = CancerSpecific(SampleCancerType)
return graphJSON
@app.route("/Transcript", methods=['GET', 'POST'])
def Transcript():
genename = request.args.get('gene')
enstid = request.args.get('enst')
# Query interactiondisruptionindominanttranscripts from mysql table
cur2 = mysql.get_db().cursor()
sql = ''' SELECT Tissue, ENSG, NumberOfGtexMDIs, GeneName1, GeneName2, TotalNumberOfStringInt, NumberOfUniqMissedInteractionsOfDomCancerTrans, Pfam1, Domain1, Pfam2, Domain2, CancerSampleId, DomCancerTrans, StringDensityRank1, Region1, mart_export.`Associated Transcript Name` FROM interactiondisruptionindominanttranscripts
LEFT JOIN mart_export ON interactiondisruptionindominanttranscripts.DomCancerTrans = mart_export.`Ensembl Transcript ID`
WHERE interactiondisruptionindominanttranscripts.DomCancerTrans = %s '''
adr = (enstid,)
cur2.execute(sql, adr)
isonet_tuple = cur2.fetchall()
df = pd.DataFrame(isonet_tuple, columns=['Tissue', 'ENSG', 'NumberOfGtexMDIs', 'GeneName1', 'GeneName2',
'TotalNumberOfStringInt', 'NumberOfUniqMissedInteractionsOfDomCancerTrans',
'Pfam1', 'Domain1', 'Pfam2', 'Domain2', 'CancerSampleId', 'DomCancerTrans',
'StringDensityRank1','Region1', 'Transcript_Name'])
df = df.rename(columns={'ENSG': 'ENSGid', 'NumberOfUniqMissedInteractionsOfDomCancerTrans': 'MissedInteractions','TotalNumberOfStringInt': 'NumberOfStringInt'})
df['Domain1'].replace({"-":"None"},inplace=True)
df['Domain2'].replace({"-":"None"},inplace=True)
df['MissedInteractions'].replace({-1:0},inplace=True)
#result = df.to_dict(orient='records')
transcript_name = df[df.DomCancerTrans == enstid].iloc[0,15]
if genename == "":
genename = df[df.DomCancerTrans == enstid].iloc[0,3]
elif genename == None:
genename = df[df.DomCancerTrans == enstid].iloc[0,3]
enst_list = list(df[df.GeneName1 == genename]['DomCancerTrans'].unique())
if enstid in enst_list:
# Query cancer gene census genes from mysql table
cur4 = mysql.get_db().cursor()
cur4.execute( '''SELECT `Gene Symbol` FROM cancer_gene_census ''' )
cancer_gene_census_tuple = cur4.fetchall()
df_cgc = pd.DataFrame(cancer_gene_census_tuple, columns=['Gene Symbol'])
df_cgc = df_cgc.rename(columns={'Gene Symbol': 'GeneName'})
df_cgc_list = df_cgc['GeneName'].tolist()
df[['Splitted','CancerType2']] = df.Tissue.str.split('.', expand=True)
df = df.drop_duplicates()
data_dict = df.to_dict(orient='records')
#make a table for some statistics
statistic_table = df[['GeneName1', 'GeneName2', 'NumberOfStringInt', 'MissedInteractions', 'Domain1', 'Domain2', 'StringDensityRank1', 'Region1', 'DomCancerTrans']].drop_duplicates()
statistics_table_dict = statistic_table.to_dict(orient='records')
string_score = statistic_table.iloc[0,6]
#string_score = float("{:.2f}".format(string_score))*100
string_score = int(statistic_table.iloc[0,6]*100)
### DRAW PIE CHARTS
data = make_subplots(rows=1, cols=2, specs=[[{'type':'domain'}, {'type':'domain'}]],
subplot_titles=("% Interaction Lost", "Cancer Types"))
data.add_trace(go.Pie(labels=df.drop_duplicates(subset=['CancerSampleId', 'Tissue']).Tissue), 1, 2)
data.update_traces(selector=dict(type='pie'), textinfo='label+text',hoverinfo='label+percent',
marker=dict(line=dict(color='#000000', width=4)))
if statistic_table.iloc[0,2] == 0:
data.add_trace(go.Pie(values=[100, 0], labels=['% of Remaining Interaction', '% of Interaction Lost']),1,1)
data.update_traces(selector=dict(type='pie'),textinfo='label+text',hoverinfo='label+percent',
marker=dict(colors=['mediumturquoise', 'gold'], line=dict(color='#000000', width=4)))
else:
data.add_trace(go.Pie(values=[(statistic_table.iloc[0,2]-statistic_table.iloc[0,3])*100/(statistic_table.iloc[0,2]),statistic_table.iloc[0,3]*100/(statistic_table.iloc[0,2])], labels=['% of Remaining Interaction', '% of Interaction Lost']),1,1)
data.update_traces(selector=dict(type='pie'),textinfo='label+text',hoverinfo='label+percent',
marker=dict(colors=['mediumturquoise', 'gold'], line=dict(color='#000000', width=4)))
data.update_layout(showlegend=False, title_font_size=18)
graphJSON2 = json.dumps(data, cls=plotly.utils.PlotlyJSONEncoder)
## end of PIE CHART
## Take missed interactions for the interested ENST id from the missed_interactions table from mysqldb
cur3 = mysql.get_db().cursor()
#sql3 = ''' SELECT ENSG, ENSP, ENST, MissInts FROM missed_interactions WHERE ENST = %s '''
sql3 = ''' SELECT ENSG, ENSP, ENST, MissInts FROM interactionsinisoforms_900 WHERE ENST = %s '''
adr3 = (enstid,)
cur3.execute(sql3, adr3)
missed_interactions_tuple = cur3.fetchall()
missed_interactions = pd.DataFrame(missed_interactions_tuple, columns=['ENSG', 'ENSP', 'ENST', 'MissInts'])
Isoform_Int_Network_splitted = pd.DataFrame(missed_interactions.MissInts.str.split(':').tolist())
Isoform_Int_Network = pd.concat([missed_interactions, Isoform_Int_Network_splitted], axis=1)
ENSP_list = list()
ensp_frame = list()
## take existing interactions for the interested ENST id from the missed_interactions table from mysqldb
cur_ext_int = mysql.get_db().cursor()
#sql3 = ''' SELECT ENSG, ENSP, ENST, ExistInts FROM missed_interactions WHERE ENST = %s '''
sql_ext_int = ''' SELECT ENSG, ENSP, ENST, ExistInts FROM interactionsinisoforms_900 WHERE ENST = %s '''
adr_ext_int = (enstid,)
cur_ext_int.execute(sql_ext_int, adr_ext_int)
exist_interactions_tuple = cur_ext_int.fetchall()
exist_interactions = pd.DataFrame(exist_interactions_tuple, columns=['ENSG', 'ENSP', 'ENST', 'ExistInts'])
Isoform_Int_Network_splitted_exists = pd.DataFrame(exist_interactions.ExistInts.str.split(':').tolist())
#Isoform_Int_Network_exists = pd.concat([exist_interactions, Isoform_Int_Network_splitted_exists], axis=1)
for eachcolumn in range(3, len(Isoform_Int_Network.iloc[0,:])):
Isoform_Int_Network.iloc[0,eachcolumn] = str(Isoform_Int_Network.iloc[0,eachcolumn])
if "ENSP" in Isoform_Int_Network.iloc[0,eachcolumn]:
ENSP_list.append(Isoform_Int_Network.iloc[0,eachcolumn])
else:
continue
#ensp_frame.append(ENSP_list)
#ensp_frame = functools.reduce(operator.iconcat, ensp_frame, [])
ensp_frame = ENSP_list
## for the existing ints
ensp_frame_exists = list()
ENSP_list_exists = list()
#for eachcolumn in range(3, len(Isoform_Int_Network_exists.iloc[0,:])):
# Isoform_Int_Network_exists.iloc[0,eachcolumn] = str(Isoform_Int_Network_exists.iloc[0,eachcolumn])
# if "ENSP" in Isoform_Int_Network_exists.iloc[0,eachcolumn]:
# ENSP_list_exists.append(Isoform_Int_Network_exists.iloc[0,eachcolumn])
# else:
# continue
for eachcolumn in range(3, len(Isoform_Int_Network_splitted_exists.iloc[0,:])):
Isoform_Int_Network_splitted_exists.iloc[0,eachcolumn] = str(Isoform_Int_Network_splitted_exists.iloc[0,eachcolumn])
if "ENSP" in Isoform_Int_Network_splitted_exists.iloc[0,eachcolumn]:
ENSP_list_exists.append(Isoform_Int_Network_splitted_exists.iloc[0,eachcolumn])
else:
continue
ensp_frame_exists = ENSP_list_exists
partner_genenames = []
try:
placeholders = ','.join(['%s'] * len(ensp_frame))
cur_ensp = mysql.get_db().cursor()
cur_ensp.execute('''SELECT ENSPid, GeneName FROM ensg_enst_ensp_des WHERE ENSPid IN (%s)'''%placeholders, tuple(ensp_frame))
ensp_tuple = cur_ensp.fetchall()
ensp_genename = pd.DataFrame(ensp_tuple, columns=['ENSPid', 'GeneName'])
partner_genenames = list(ensp_genename['GeneName'])
except:
pass
partner_genenames_exists = []
try:
placeholders = ','.join(['%s'] * len(ensp_frame_exists))
cur_ensp_exists = mysql.get_db().cursor()
cur_ensp_exists.execute('''SELECT ENSPid, GeneName FROM ensg_enst_ensp_des WHERE ENSPid IN (%s)'''%placeholders, tuple(ensp_frame_exists))
ensp_tuple_exists = cur_ensp_exists.fetchall()
ensp_genename_exists = pd.DataFrame(ensp_tuple_exists, columns=['ENSPid', 'GeneName'])
partner_genenames_exists = list(ensp_genename_exists['GeneName'])
except:
pass
cgc_partners = [eachpartner for eachpartner in partner_genenames if eachpartner in df_cgc_list]
cgc_partners_df = pd.DataFrame({'GeneName':cgc_partners})
df_cgc_dict = cgc_partners_df.drop_duplicates().to_dict(orient='records')
return render_template('network.html', ensp_frame_exists=ensp_frame_exists, string_score=string_score, partner_genenames_exists=partner_genenames_exists, transcript_name=transcript_name, genename=genename, enstid=enstid, partner_genenames=partner_genenames, data=data_dict, data_statistics = statistics_table_dict, df_cgc_list=df_cgc_list, cgc=df_cgc_dict, graphJSON2=graphJSON2)
@app.route("/Gene", methods=['GET', 'POST'])
def Gene():
# Take genename and enstid from url
genename = request.args.get('gene')
# Query cancer gene census genes from mysql table
cur4 = mysql.get_db().cursor()
cur4.execute( '''SELECT `Gene Symbol` FROM cancer_gene_census ''' )
cancer_gene_census_tuple = cur4.fetchall()
df_cgc = pd.DataFrame(cancer_gene_census_tuple, columns=['Gene Symbol'])
df_cgc = df_cgc.rename(columns={'Gene Symbol': 'GeneName'})
df_cgc_list = df_cgc['GeneName'].tolist()
cur5 = mysql.get_db().cursor()
sql5 = ''' SELECT Tissue, ENSG, NumberOfGtexMDIs, GeneName1, GeneName2, TotalNumberOfStringInt, NumberOfUniqMissedInteractionsOfDomCancerTrans, Pfam1, Domain1, Pfam2, Domain2, CancerSampleId, DomCancerTrans, StringDensityRank1, Region1, mart_export.`Associated Transcript Name` FROM interactiondisruptionindominanttranscripts
LEFT JOIN mart_export ON interactiondisruptionindominanttranscripts.DomCancerTrans = mart_export.`Ensembl Transcript ID`
WHERE interactiondisruptionindominanttranscripts.GeneName1 = %s '''
adr5 = (genename,)
cur5.execute(sql5, adr5)
isonet_tuple = cur5.fetchall()
df = pd.DataFrame(isonet_tuple, columns=['Tissue', 'ENSG', 'NumberOfGtexMDIs', 'GeneName1', 'GeneName2', 'TotalNumberOfStringInt', 'NumberOfUniqMissedInteractionsOfDomCancerTrans', 'Pfam1', 'Domain1', 'Pfam2', 'Domain2', 'CancerSampleId', 'DomCancerTrans', 'StringDensityRank1', 'Region1', 'Transcript_Name'])
df = df.rename(columns={'ENSG': 'ENSGid', 'TotalNumberOfStringInt': 'NumberOfStringInt','NumberOfUniqMissedInteractionsOfDomCancerTrans': 'MissedInteractions'})
df[['Splitted','CancerType2']] = df.Tissue.str.split('.', expand=True)
df = df.drop_duplicates()
df['Domain1'].replace({"-":"None"},inplace=True)
df['Domain2'].replace({"-":"None"},inplace=True)
df['MissedInteractions'].replace({-1:0},inplace=True)
result = df.to_dict(orient='records')
statistic_table = df[['GeneName1', 'GeneName2', 'NumberOfStringInt', 'MissedInteractions', 'Domain1', 'Domain2', 'StringDensityRank1', 'Region1', 'DomCancerTrans', 'Transcript_Name']].drop_duplicates()
statistics_table_dict = statistic_table.to_dict(orient='records')
data_dict = df.to_dict(orient='records')
string_score = statistic_table.iloc[0,6]
string_score = float("{:.2f}".format(string_score))*100
### DRAW PIE CHART
data = make_subplots(rows=1, cols=1, specs=[[{'type':'domain'}]])
data.add_trace(go.Pie(labels=df.Tissue, title="Cancer Types"), 1, 1)
data.update_traces(selector=dict(type='pie'), textinfo='label+text',hoverinfo='label+percent',
marker=dict(line=dict(color='#000000', width=4)))
data.update_layout(title=" Gene Name: {} ".format(genename), showlegend=False, title_font_size=24)
graphJSON2 = json.dumps(data, cls=plotly.utils.PlotlyJSONEncoder)
# take missed interactions from the db
ensg = df.iloc[0,1]
cur3 = mysql.get_db().cursor()
sql3 = ''' SELECT ENSG, ENSP, ENST, ExistInts, MissInts FROM interactionsinisoforms_900 WHERE ENSG = %s LIMIT 1'''
adr3 = (ensg,)
cur3.execute(sql3, adr3)
missed_interactions_tuple = cur3.fetchall()
missed_interactions = pd.DataFrame(missed_interactions_tuple, columns=['ENSG', 'ENSP', 'ENST', 'ExistsInts', 'MissInts'])
Isoform_Int_Network_splitted = pd.DataFrame(missed_interactions.MissInts.str.split(':').tolist())
#Isoform_Int_Network = pd.concat([missed_interactions, Isoform_Int_Network_splitted], axis=1)
Isoform_Int_Network_splitted_exists = pd.DataFrame(missed_interactions.ExistsInts.str.split(':').tolist())
#Isoform_Int_Network_exists = pd.concat([missed_interactions, Isoform_Int_Network_splitted_exists], axis=1)
ENSP_list = list()
ensp_frame = list()
for eachcolumn in range(4, len(Isoform_Int_Network_splitted.iloc[0,:])):
Isoform_Int_Network_splitted.iloc[0,eachcolumn] = str(Isoform_Int_Network_splitted.iloc[0,eachcolumn])
if "ENSP" in Isoform_Int_Network_splitted.iloc[0,eachcolumn]:
ENSP_list.append(Isoform_Int_Network_splitted.iloc[0,eachcolumn])
else:
continue
ensp_frame = ENSP_list
ensp_frame_exists = list()
ENSP_list_exists = list()
for eachcolumn in range(3, len(Isoform_Int_Network_splitted_exists.iloc[0,:])):
Isoform_Int_Network_splitted_exists.iloc[0,eachcolumn] = str(Isoform_Int_Network_splitted_exists.iloc[0,eachcolumn])
if "ENSP" in Isoform_Int_Network_splitted_exists.iloc[0,eachcolumn]:
ENSP_list_exists.append(Isoform_Int_Network_splitted_exists.iloc[0,eachcolumn])
else:
continue
ensp_frame_exists = ENSP_list_exists
partner_genenames = []
try:
placeholders = ','.join(['%s'] * len(ensp_frame))
cur_ensp = mysql.get_db().cursor()
cur_ensp.execute('''SELECT ENSPid, GeneName FROM ensg_enst_ensp_des WHERE ENSPid IN (%s)'''%placeholders, tuple(ensp_frame))
ensp_tuple = cur_ensp.fetchall()
ensp_genename = pd.DataFrame(ensp_tuple, columns=['ENSPid', 'GeneName'])
partner_genenames = list(ensp_genename['GeneName'])
except:
pass
partner_genenames_exists = []
try:
placeholders = ','.join(['%s'] * len(ensp_frame_exists))
cur_ensp_exists = mysql.get_db().cursor()
cur_ensp_exists.execute('''SELECT ENSPid, GeneName FROM ensg_enst_ensp_des WHERE ENSPid IN (%s)'''%placeholders, tuple(ensp_frame_exists))
ensp_tuple_exists = cur_ensp_exists.fetchall()
ensp_genename_exists = pd.DataFrame(ensp_tuple_exists, columns=['ENSPid', 'GeneName'])
partner_genenames_exists = list(ensp_genename_exists['GeneName'])
except:
pass
return render_template('GeneBased.html', genename=genename, partner_genenames_exists=partner_genenames_exists, partner_genenames=partner_genenames, string_score=string_score, df_cgc_list=df_cgc_list, data=data_dict, data_statistics = statistics_table_dict, result=result, graphJSON2=graphJSON2)
@app.route("/Sample", methods=['GET', 'POST'])
def Sample():
CancerSampleId = request.args.get('sampleid')
genename = request.args.get('gene')
tissue = request.args.get('tissue')
cur = mysql.get_db().cursor()
sql = ''' SELECT Tissue, ENSG, GeneName1, CancerSampleId, DomCancerTrans, GTExMDIs FROM interactiondisruptionindominanttranscripts WHERE CancerSampleId = %s '''
adr = (CancerSampleId, )
cur.execute(sql, adr)
isonet_tuple = cur.fetchall()
df = pd.DataFrame(isonet_tuple, columns=['Tissue', 'ENSG', 'GeneName1', 'CancerSampleId', 'DomCancerTrans', 'GTExMDIs'])
df = df.rename(columns={'ENSG': 'ENSGid'})
df = df.drop_duplicates()
dff = df[df.GeneName1 == genename]
cancer_trans_id = list(dff['DomCancerTrans'])[0] ## DomCancerTrans id
normal_trans_ids = dff['GTExMDIs'].str.split(";", expand = True) # n=1
normal_trans_id_list = []
#list of normal trans id
for i in range(len(normal_trans_ids.iloc[0,:])):
for j in range(len(normal_trans_ids.iloc[:,0])):
if "ENST" in str(normal_trans_ids.iloc[j,i]):
normal_trans_id_list.append(normal_trans_ids.iloc[j,i].split(":", 1)[0])
else:
continue
normal_trans_id_dict = dict()
for index,value in enumerate(normal_trans_id_list):
normal_trans_id_dict[index] = value
return render_template("Sample_Based.html", CancerSampleId=CancerSampleId, tissue=tissue, genename=genename, normal_trans_id_dict=normal_trans_id_dict, normal_trans_id_list=normal_trans_id_list, cancer_trans_id=cancer_trans_id)
def update_fig(CancerSampleId, genename, tissue):
tissue = request.args.get('tissuetype')
tissuetype = tissue.split('.')[1].replace('-','_')
CancerSampleId = request.args.get('CanSampleId')
cur = mysql.get_db().cursor()
sql = ''' SELECT Tissue, ENSG, GeneName1, CancerSampleId, DomCancerTrans, GTExMDIs FROM interactiondisruptionindominanttranscripts WHERE CancerSampleId = %s '''
adr = (CancerSampleId, )
cur.execute(sql, adr)
isonet_tuple = cur.fetchall()
df = pd.DataFrame(isonet_tuple, columns=['Tissue', 'ENSG', 'GeneName1', 'CancerSampleId', 'DomCancerTrans', 'GTExMDIs'])
df = df.rename(columns={'ENSG': 'ENSGid'})
dff = df[df.GeneName1 == genename]
#col_name = dff.iloc[0,3] ## sample id
cancer_trans_id = dff.iloc[0,4] ## DomCancerTrans id
normal_trans_ids = dff['GTExMDIs'].str.split(";", expand = True) # n=1
normal_trans_id_list = []
#list of normal trans id
for i in range(len(normal_trans_ids.iloc[0,:])):
for j in range(len(normal_trans_ids.iloc[:,0])):
if "ENST" in str(normal_trans_ids.iloc[j,i]):
normal_trans_id_list.append(normal_trans_ids.iloc[j,i].split(":", 1)[0])
else:
continue
# extract normal transcripts expressions from gtex data - it can be more than 1 transcript
gtex = '_gtex'
my_tissue = tissuetype+gtex
#placeholders = '|'.join(['%s'] * len(normal_trans_id_list))
gtex_cur_normal = mysql.get_db().cursor()
df_gtex_normal = pd.DataFrame()
for eachtranscript in normal_trans_id_list:
gtex_sql_normal = '''SELECT * FROM ''' + my_tissue + ''' WHERE Feature REGEXP %s'''
gtex_adr_normal = (eachtranscript,)
gtex_cur_normal.execute(gtex_sql_normal, gtex_adr_normal)
df_gtex_tuple = gtex_cur_normal.fetchall()
df_gtex_normal_df = pd.DataFrame(df_gtex_tuple)
df_gtex_normal = df_gtex_normal.append(df_gtex_normal_df, ignore_index = True)
#df_gtex_normal_exp_list = list(df_gtex_normal['Feature'])
# extract cancer transcript expression from gtex data - only 1 transcript
gtex_cur_cancer = mysql.get_db().cursor()
gtex_sql_cancer = ''' SELECT * FROM ''' + my_tissue + ''' WHERE Feature REGEXP %s '''
gtex_adr_cancer = (cancer_trans_id,)
gtex_cur_cancer.execute(gtex_sql_cancer, gtex_adr_cancer)
df_gtex_tuple_2 = gtex_cur_cancer.fetchall()
df_gtex_cancer = pd.DataFrame(df_gtex_tuple_2)
pcawg = '_pcawg'
my_tissue2 = tissuetype + pcawg
#extract normal transcripts expressions from pcawg data - it can be more than 1 transcript
pcawg_cur_normal = mysql.get_db().cursor()
df_pcawg_normal = | pd.DataFrame() | pandas.DataFrame |
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
from rotation_analysis.analysis.probe.event_plotting_functions import plot_raster, plot_histogram
from rotation_analysis.analysis.probe.probe_block_plotter import ProbeBlockPlotter
from rotation_analysis.analysis.probe.probe_events_collection import ProbeEventsCollection
from rotation_analysis.analysis.probe.probe_trial import ProbeTrial
from rotation_analysis.analysis.block import Block
class ProbeBlock(Block):
"""
A block of trials. All analysis at this level considers groupings of trials, set by the context manager and
a dictionary of conditions see temp_setattr for further information.
"""
def __init__(self, cluster, use_bsl_2, spike_struct, bonsai_io, igor_io, trigger_trace_io):
self.bonsai_io = bonsai_io
self.shuffles_results = pd.DataFrame()
self.cell = cluster
self.metrics_functions_dict = {
'frequency': self.get_trials_freqs,
}
self.trials = []
# assert bonsai_io.n_triggers == igor_io.n_triggers == trigger_trace_io.n_triggers
self.trials = [ProbeTrial(self, i, use_bsl_2, spike_struct,
bonsai_io, igor_io, trigger_trace_io) for i in range(trigger_trace_io.n_triggers)]
self.plotter = ProbeBlockPlotter(self)
self.spike_struct = spike_struct
self.stats_df = pd.DataFrame()
def get_metadata(self, recordings):
return None
def plot(self):
self.plotter.plot_raster()
def get_averages_df(self):
df_dict = {}
metrics_funcs = {
'frequency': self.get_average_freq,
}
mapping = {
'frequency': 'freq',
}
for c1, c2 in self.condition_pairs:
for metric in self.analysed_metrics:
df_dict['cell id'] = self.cell.id
df_dict['condition'] = self.condition_str()
df_dict['best channel'] = self.cell.depth
metric_func = metrics_funcs[metric]
metric = mapping[metric]
if c1 == 'bsl_short':
col1 = '{}_{}_{}'.format(c1, c2, metric)
else:
col1 = '{}_{}'.format(c1, metric)
col2 = '{}_{}'.format(c2, metric)
df_dict[col1] = metric_func(c1, c2)
df_dict[col2] = metric_func(c2)
col3 = 'delta_{}_{}'.format(c2, c1)
df_dict[col3] = metric_func(c2) - metric_func(c1, c2)
return pd.DataFrame(df_dict, index=[0])
def get_events_in_period(self, period, constraining_period=None, relative=True):
all_events = []
for t in self.kept_trials:
start, end = t.stimulus.get_ranges_by_type(period, constraining_period)[0]
events = t.get_events_in_period(period, constraining_period)
if relative:
events = ProbeEventsCollection(events - start)
all_events.append(events)
return all_events
def plot_all_sub_stimuli(self, n_bins=30, time_match_group='c_wise', fig=None):
"""
:param n_bins: the binning size of the histogram
:param time_match_group: the condition_pair to use to determine the duration of the group being compared
(e.g. when comparing a baseline that can be of arbitrary length
:return:
"""
if fig is None:
fig = plt.figure(facecolor='w', figsize=(8, 3))
PLOT_SPACE = 5000
labels = self.kept_trials[0].stimulus.histogram_plot_labels
time_match_group = labels[-1]
assert 'bsl' not in time_match_group
for i, label in enumerate(labels):
events_in_period = self.get_events_in_period(label, time_match_group)
start, end = self.kept_trials[0].stimulus.get_ranges_by_type(label, time_match_group)[0]
duration_in_samples = end - start
ax = fig.add_subplot(2, len(labels), i + 1)
plot_raster(events_in_period, label=label)
plt.xlim([0 - PLOT_SPACE, duration_in_samples + PLOT_SPACE])
ax = fig.add_subplot(2, len(labels), i + 1 + len(labels))
plot_histogram(events_in_period, ax=ax, duration_in_samples=duration_in_samples, label=label, n_bins=n_bins)
plt.xlim([0 - PLOT_SPACE, duration_in_samples + PLOT_SPACE])
plt.tight_layout()
return fig
def plot_all_stimuli(self):
for t in self.kept_trials:
t.stimulus.plot()
plt.show()
def to_df(self):
df_dict = {}
for c1, c2 in self.condition_pairs:
for metric in self.analysed_metrics:
df_dict['cell_id'] = self.cell.id
df_dict['condition'] = self.condition_str()
df_dict['best channel'] = self.cell.depth
colname = '{}_{}'.format(c1, metric)
df_dict[colname] = self.metrics_functions_dict[metric](c1, c2)
colname = '{}_{}'.format(c2, metric)
df_dict[colname] = self.metrics_functions_dict[metric](c2)
colname = 'delta_{}_{}'.format(c2, c1)
delta_c2_c1 = self.metrics_functions_dict[metric](c2) - self.metrics_functions_dict[metric](c1, c2) # TODO: add to metrics dictionary
df_dict[colname] = delta_c2_c1
df = pd.DataFrame(df_dict) # TODO: order columns
return df
def generate_db(self):
db = pd.DataFrame()
df_dict = {}
for metric in self.analysed_metrics:
for t in self.trials:
values_dict = {}
events_list_dict = {}
for c1, c2 in t.stimulus.condition_pairs:
val_c1 = t.get_frequency(c1, c2)
val_c2 = t.get_frequency(c2)
values_dict.setdefault(c1, val_c1)
values_dict.setdefault(c2, val_c2)
event_list_c1 = t.get_events_in_period(c1, c2, relative=True).events
event_list_c2 = t.get_events_in_period(c2, None, relative=True).events
events_list_dict.setdefault('{}_event_times'.format(c1), event_list_c1)
events_list_dict.setdefault('{}_event_times'.format(c2), event_list_c2)
n_datapoints = len(values_dict.keys())
df_dict['trial_id'] = [t.idx] * n_datapoints
df_dict['cell_id'] = [self.cell.id] * n_datapoints
df_dict['within_stimulus_condition'] = list(values_dict.keys())
df_dict['metric'] = [metric] * n_datapoints
df_dict['values'] = list(values_dict.values())
df_dict['event_locs'] = list(events_list_dict.values())
df_dict['experiment'] = [t.bonsai_data()['Condition']] * n_datapoints
for k, v in t.bonsai_data().items(): # TODO: convert bonsai naming to snake case
ignore_keys = ['Experiment', 'Timestamp', 'Condition']
if k in ignore_keys:
continue
if not isinstance(v, str) and np.isnan(v):
continue
df_dict['between_stimuli_condition_metric'] = [k] * n_datapoints
df_dict['between_stimuli_condition'] = [v] * n_datapoints
df = | pd.DataFrame(df_dict) | pandas.DataFrame |
"""
A collection of Algos used to create Strategy logic.
"""
from __future__ import division
import abc
import random
import re
import numpy as np
import pandas as pd
import sklearn.covariance
from future.utils import iteritems
import bt
from bt.core import Algo, AlgoStack, SecurityBase, is_zero
def run_always(f):
"""
Run always decorator to be used with Algo
to ensure stack runs the decorated Algo
on each pass, regardless of failures in the stack.
"""
f.run_always = True
return f
class PrintDate(Algo):
"""
This Algo simply print's the current date.
Can be useful for debugging purposes.
"""
def __call__(self, target):
print(target.now)
return True
class PrintTempData(Algo):
"""
This Algo prints the temp data.
Useful for debugging.
Args:
* fmt_string (str): A string that will later be formatted with the
target's temp dict. Therefore, you should provide
what you want to examine within curly braces ( { } )
"""
def __init__(self, fmt_string=None):
super(PrintTempData, self).__init__()
self.fmt_string = fmt_string
def __call__(self, target):
if self.fmt_string:
print(self.fmt_string.format(**target.temp))
else:
print(target.temp)
return True
class PrintInfo(Algo):
"""
Prints out info associated with the target strategy. Useful for debugging
purposes.
Args:
* fmt_string (str): A string that will later be formatted with the
target object's __dict__ attribute. Therefore, you should provide
what you want to examine within curly braces ( { } )
Ex:
PrintInfo('Strategy {name} : {now}')
This will print out the name and the date (now) on each call.
Basically, you provide a string that will be formatted with target.__dict__
"""
def __init__(self, fmt_string="{name} {now}"):
super(PrintInfo, self).__init__()
self.fmt_string = fmt_string
def __call__(self, target):
print(self.fmt_string.format(**target.__dict__))
return True
class Debug(Algo):
"""
Utility Algo that calls pdb.set_trace when triggered.
In the debug session, 'target' is available and can be examined through the
StrategyBase interface.
"""
def __call__(self, target):
import pdb
pdb.set_trace()
return True
class RunOnce(Algo):
"""
Returns True on first run then returns False.
Args:
* run_on_first_call: bool which determines if it runs the first time the algo is called
As the name says, the algo only runs once. Useful in situations
where we want to run the logic once (buy and hold for example).
"""
def __init__(self):
super(RunOnce, self).__init__()
self.has_run = False
def __call__(self, target):
# if it hasn't run then we will
# run it and set flag
if not self.has_run:
self.has_run = True
return True
# return false to stop future execution
return False
class RunPeriod(Algo):
def __init__(
self, run_on_first_date=True, run_on_end_of_period=False, run_on_last_date=False
):
super(RunPeriod, self).__init__()
self._run_on_first_date = run_on_first_date
self._run_on_end_of_period = run_on_end_of_period
self._run_on_last_date = run_on_last_date
def __call__(self, target):
# get last date
now = target.now
# if none nothing to do - return false
if now is None:
return False
# not a known date in our universe
if now not in target.data.index:
return False
# get index of the current date
index = target.data.index.get_loc(target.now)
result = False
# index 0 is a date added by the Backtest Constructor
if index == 0:
return False
# first date
if index == 1:
if self._run_on_first_date:
result = True
# last date
elif index == (len(target.data.index) - 1):
if self._run_on_last_date:
result = True
else:
# create pandas.Timestamp for useful .week,.quarter properties
now = pd.Timestamp(now)
index_offset = -1
if self._run_on_end_of_period:
index_offset = 1
date_to_compare = target.data.index[index + index_offset]
date_to_compare = pd.Timestamp(date_to_compare)
result = self.compare_dates(now, date_to_compare)
return result
@abc.abstractmethod
def compare_dates(self, now, date_to_compare):
raise (NotImplementedError("RunPeriod Algo is an abstract class!"))
class RunDaily(RunPeriod):
"""
Returns True on day change.
Args:
* run_on_first_date (bool): determines if it runs the first time the algo is called
* run_on_end_of_period (bool): determines if it should run at the end of the period
or the beginning
* run_on_last_date (bool): determines if it runs on the last time the algo is called
Returns True if the target.now's day has changed
compared to the last(or next if run_on_end_of_period) date, if not returns False.
Useful for daily rebalancing strategies.
"""
def compare_dates(self, now, date_to_compare):
if now.date() != date_to_compare.date():
return True
return False
class RunWeekly(RunPeriod):
"""
Returns True on week change.
Args:
* run_on_first_date (bool): determines if it runs the first time the algo is called
* run_on_end_of_period (bool): determines if it should run at the end of the period
or the beginning
* run_on_last_date (bool): determines if it runs on the last time the algo is called
Returns True if the target.now's week has changed
since relative to the last(or next) date, if not returns False. Useful for
weekly rebalancing strategies.
"""
def compare_dates(self, now, date_to_compare):
if now.year != date_to_compare.year or now.week != date_to_compare.week:
return True
return False
class RunMonthly(RunPeriod):
"""
Returns True on month change.
Args:
* run_on_first_date (bool): determines if it runs the first time the algo is called
* run_on_end_of_period (bool): determines if it should run at the end of the period
or the beginning
* run_on_last_date (bool): determines if it runs on the last time the algo is called
Returns True if the target.now's month has changed
since relative to the last(or next) date, if not returns False. Useful for
monthly rebalancing strategies.
"""
def compare_dates(self, now, date_to_compare):
if now.year != date_to_compare.year or now.month != date_to_compare.month:
return True
return False
class RunQuarterly(RunPeriod):
"""
Returns True on quarter change.
Args:
* run_on_first_date (bool): determines if it runs the first time the algo is called
* run_on_end_of_period (bool): determines if it should run at the end of the period
or the beginning
* run_on_last_date (bool): determines if it runs on the last time the algo is called
Returns True if the target.now's quarter has changed
since relative to the last(or next) date, if not returns False. Useful for
quarterly rebalancing strategies.
"""
def compare_dates(self, now, date_to_compare):
if now.year != date_to_compare.year or now.quarter != date_to_compare.quarter:
return True
return False
class RunYearly(RunPeriod):
"""
Returns True on year change.
Args:
* run_on_first_date (bool): determines if it runs the first time the algo is called
* run_on_end_of_period (bool): determines if it should run at the end of the period
or the beginning
* run_on_last_date (bool): determines if it runs on the last time the algo is called
Returns True if the target.now's year has changed
since relative to the last(or next) date, if not returns False. Useful for
yearly rebalancing strategies.
"""
def compare_dates(self, now, date_to_compare):
if now.year != date_to_compare.year:
return True
return False
class RunOnDate(Algo):
"""
Returns True on a specific set of dates.
Args:
* dates (list): List of dates to run Algo on.
"""
def __init__(self, *dates):
"""
Args:
* dates (*args): A list of dates. Dates will be parsed
by pandas.to_datetime so pass anything that it can
parse. Typically, you will pass a string 'yyyy-mm-dd'.
"""
super(RunOnDate, self).__init__()
# parse dates and save
self.dates = [pd.to_datetime(d) for d in dates]
def __call__(self, target):
return target.now in self.dates
class RunAfterDate(Algo):
"""
Returns True after a date has passed
Args:
* date: Date after which to start trading
Note:
This is useful for algos that rely on trailing averages where you
don't want to start trading until some amount of data has been built up
"""
def __init__(self, date):
"""
Args:
* date: Date after which to start trading
"""
super(RunAfterDate, self).__init__()
# parse dates and save
self.date = pd.to_datetime(date)
def __call__(self, target):
return target.now > self.date
class RunAfterDays(Algo):
"""
Returns True after a specific number of 'warmup' trading days have passed
Args:
* days (int): Number of trading days to wait before starting
Note:
This is useful for algos that rely on trailing averages where you
don't want to start trading until some amount of data has been built up
"""
def __init__(self, days):
"""
Args:
* days (int): Number of trading days to wait before starting
"""
super(RunAfterDays, self).__init__()
self.days = days
def __call__(self, target):
if self.days > 0:
self.days -= 1
return False
return True
class RunIfOutOfBounds(Algo):
"""
This algo returns true if any of the target weights deviate by an amount greater
than tolerance. For example, it will be run if the tolerance is set to 0.5 and
a security grows from a target weight of 0.2 to greater than 0.3.
A strategy where rebalancing is performed quarterly or whenever any
security's weight deviates by more than 20% could be implemented by:
Or([runQuarterlyAlgo,runIfOutOfBoundsAlgo(0.2)])
Args:
* tolerance (float): Allowed deviation of each security weight.
Requires:
* Weights
"""
def __init__(self, tolerance):
self.tolerance = float(tolerance)
super(RunIfOutOfBounds, self).__init__()
def __call__(self, target):
if "weights" not in target.temp:
return True
targets = target.temp["weights"]
for cname in target.children:
if cname in targets:
c = target.children[cname]
deviation = abs((c.weight - targets[cname]) / targets[cname])
if deviation > self.tolerance:
return True
if "cash" in target.temp:
cash_deviation = abs(
(target.capital - targets.value) / targets.value - target.temp["cash"]
)
if cash_deviation > self.tolerance:
return True
return False
class RunEveryNPeriods(Algo):
"""
This algo runs every n periods.
Args:
* n (int): Run each n periods
* offset (int): Applies to the first run. If 0, this algo will run the
first time it is called.
This Algo can be useful for the following type of strategy:
Each month, select the top 5 performers. Hold them for 3 months.
You could then create 3 strategies with different offsets and create a
master strategy that would allocate equal amounts of capital to each.
"""
def __init__(self, n, offset=0):
super(RunEveryNPeriods, self).__init__()
self.n = n
self.offset = offset
self.idx = n - offset - 1
self.lcall = 0
def __call__(self, target):
# ignore multiple calls on same period
if self.lcall == target.now:
return False
else:
self.lcall = target.now
# run when idx == (n-1)
if self.idx == (self.n - 1):
self.idx = 0
return True
else:
self.idx += 1
return False
class SelectAll(Algo):
"""
Sets temp['selected'] with all securities (based on universe).
Selects all the securities and saves them in temp['selected'].
By default, SelectAll does not include securities that have no
data (nan) on current date or those whose price is zero or negative.
Args:
* include_no_data (bool): Include securities that do not have data?
* include_negative (bool): Include securities that have negative
or zero prices?
Sets:
* selected
"""
def __init__(self, include_no_data=False, include_negative=False):
super(SelectAll, self).__init__()
self.include_no_data = include_no_data
self.include_negative = include_negative
def __call__(self, target):
if self.include_no_data:
target.temp["selected"] = target.universe.columns
else:
universe = target.universe.loc[target.now].dropna()
if self.include_negative:
target.temp["selected"] = list(universe.index)
else:
target.temp["selected"] = list(universe[universe > 0].index)
return True
class SelectThese(Algo):
"""
Sets temp['selected'] with a set list of tickers.
Args:
* ticker (list): List of tickers to select.
* include_no_data (bool): Include securities that do not have data?
* include_negative (bool): Include securities that have negative
or zero prices?
Sets:
* selected
"""
def __init__(self, tickers, include_no_data=False, include_negative=False):
super(SelectThese, self).__init__()
self.tickers = tickers
self.include_no_data = include_no_data
self.include_negative = include_negative
def __call__(self, target):
if self.include_no_data:
target.temp["selected"] = self.tickers
else:
universe = target.universe.loc[target.now, self.tickers].dropna()
if self.include_negative:
target.temp["selected"] = list(universe.index)
else:
target.temp["selected"] = list(universe[universe > 0].index)
return True
class SelectHasData(Algo):
"""
Sets temp['selected'] based on all items in universe that meet
data requirements.
This is a more advanced version of SelectAll. Useful for selecting
tickers that need a certain amount of data for future algos to run
properly.
For example, if we need the items with 3 months of data or more,
we could use this Algo with a lookback period of 3 months.
When providing a lookback period, it is also wise to provide a min_count.
This is basically the number of data points needed within the lookback
period for a series to be considered valid. For example, in our 3 month
lookback above, we might want to specify the min_count as being
57 -> a typical trading month has give or take 20 trading days. If we
factor in some holidays, we can use 57 or 58. It's really up to you.
If you don't specify min_count, min_count will default to ffn's
get_num_days_required.
Args:
* lookback (DateOffset): A DateOffset that determines the lookback
period.
* min_count (int): Minimum number of days required for a series to be
considered valid. If not provided, ffn's get_num_days_required is
used to estimate the number of points required.
* include_no_data (bool): Include securities that do not have data?
* include_negative (bool): Include securities that have negative
or zero prices?
Sets:
* selected
"""
def __init__(
self,
lookback=pd.DateOffset(months=3),
min_count=None,
include_no_data=False,
include_negative=False,
):
super(SelectHasData, self).__init__()
self.lookback = lookback
if min_count is None:
min_count = bt.ffn.get_num_days_required(lookback)
self.min_count = min_count
self.include_no_data = include_no_data
self.include_negative = include_negative
def __call__(self, target):
if "selected" in target.temp:
selected = target.temp["selected"]
else:
selected = target.universe.columns
filt = target.universe.loc[target.now - self.lookback :, selected]
cnt = filt.count()
cnt = cnt[cnt >= self.min_count]
if not self.include_no_data:
cnt = cnt[~target.universe.loc[target.now, selected].isnull()]
if not self.include_negative:
cnt = cnt[target.universe.loc[target.now, selected] > 0]
target.temp["selected"] = list(cnt.index)
return True
class SelectN(Algo):
"""
Sets temp['selected'] based on ranking temp['stat'].
Selects the top or botton N items based on temp['stat'].
This is usually some kind of metric that will be computed in a
previous Algo and will be used for ranking purposes. Can select
top or bottom N based on sort_descending parameter.
Args:
* n (int): select top n items.
* sort_descending (bool): Should the stat be sorted in descending order
before selecting the first n items?
* all_or_none (bool): If true, only populates temp['selected'] if we
have n items. If we have less than n, then temp['selected'] = [].
* filter_selected (bool): If True, will only select from the existing
'selected' list.
Sets:
* selected
Requires:
* stat
"""
def __init__(
self, n, sort_descending=True, all_or_none=False, filter_selected=False
):
super(SelectN, self).__init__()
if n < 0:
raise ValueError("n cannot be negative")
self.n = n
self.ascending = not sort_descending
self.all_or_none = all_or_none
self.filter_selected = filter_selected
def __call__(self, target):
stat = target.temp["stat"].dropna()
if self.filter_selected and "selected" in target.temp:
stat = stat.loc[stat.index.intersection(target.temp["selected"])]
stat.sort_values(ascending=self.ascending, inplace=True)
# handle percent n
keep_n = self.n
if self.n < 1:
keep_n = int(self.n * len(stat))
sel = list(stat[:keep_n].index)
if self.all_or_none and len(sel) < keep_n:
sel = []
target.temp["selected"] = sel
return True
class SelectMomentum(AlgoStack):
"""
Sets temp['selected'] based on a simple momentum filter.
Selects the top n securities based on the total return over
a given lookback period. This is just a wrapper around an
AlgoStack with two algos: StatTotalReturn and SelectN.
Note, that SelectAll() or similar should be called before
SelectMomentum(), as StatTotalReturn uses values of temp['selected']
Args:
* n (int): select first N elements
* lookback (DateOffset): lookback period for total return
calculation
* lag (DateOffset): Lag interval for total return calculation
* sort_descending (bool): Sort descending (highest return is best)
* all_or_none (bool): If true, only populates temp['selected'] if we
have n items. If we have less than n, then temp['selected'] = [].
Sets:
* selected
Requires:
* selected
"""
def __init__(
self,
n,
lookback=pd.DateOffset(months=3),
lag=pd.DateOffset(days=0),
sort_descending=True,
all_or_none=False,
):
super(SelectMomentum, self).__init__(
StatTotalReturn(lookback=lookback, lag=lag),
SelectN(n=n, sort_descending=sort_descending, all_or_none=all_or_none),
)
class SelectWhere(Algo):
"""
Selects securities based on an indicator DataFrame.
Selects securities where the value is True on the current date
(target.now) only if current date is present in signal DataFrame.
For example, this could be the result of a pandas boolean comparison such
as data > 100.
Args:
* signal (str|DataFrame): Boolean DataFrame containing selection logic.
If a string is passed, frame is accessed using target.get_data
This is the preferred way of using the algo.
* include_no_data (bool): Include securities that do not have data?
* include_negative (bool): Include securities that have negative
or zero prices?
Sets:
* selected
"""
def __init__(self, signal, include_no_data=False, include_negative=False):
super(SelectWhere, self).__init__()
if isinstance(signal, pd.DataFrame):
self.signal_name = None
self.signal = signal
else:
self.signal_name = signal
self.signal = None
self.include_no_data = include_no_data
self.include_negative = include_negative
def __call__(self, target):
# get signal Series at target.now
if self.signal_name is None:
signal = self.signal
else:
signal = target.get_data(self.signal_name)
if target.now in signal.index:
sig = signal.loc[target.now]
# get tickers where True
# selected = sig.index[sig]
selected = sig[sig == True].index # noqa: E712
# save as list
if not self.include_no_data:
universe = target.universe.loc[target.now, list(selected)].dropna()
if self.include_negative:
selected = list(universe.index)
else:
selected = list(universe[universe > 0].index)
target.temp["selected"] = list(selected)
return True
class SelectRandomly(AlgoStack):
"""
Sets temp['selected'] based on a random subset of
the items currently in temp['selected'].
Selects n random elements from the list stored in temp['selected'].
This is useful for benchmarking against a strategy where we believe
the selection algorithm is adding value.
For example, if we are testing a momentum strategy and we want to see if
selecting securities based on momentum is better than just selecting
securities randomly, we could use this Algo to create a random Strategy
used for random benchmarking.
Note:
Another selection algorithm should be use prior to this Algo to
populate temp['selected']. This will typically be SelectAll.
Args:
* n (int): Select N elements randomly.
* include_no_data (bool): Include securities that do not have data?
* include_negative (bool): Include securities that have negative
or zero prices?
Sets:
* selected
Requires:
* selected
"""
def __init__(self, n=None, include_no_data=False, include_negative=False):
super(SelectRandomly, self).__init__()
self.n = n
self.include_no_data = include_no_data
self.include_negative = include_negative
def __call__(self, target):
if "selected" in target.temp:
sel = target.temp["selected"]
else:
sel = list(target.universe.columns)
if not self.include_no_data:
universe = target.universe.loc[target.now, sel].dropna()
if self.include_negative:
sel = list(universe.index)
else:
sel = list(universe[universe > 0].index)
if self.n is not None:
n = self.n if self.n < len(sel) else len(sel)
sel = random.sample(sel, int(n))
target.temp["selected"] = sel
return True
class SelectRegex(Algo):
"""
Sets temp['selected'] based on a regex on their names.
Useful when working with a large universe of different kinds of securities
Args:
* regex (str): regular expression on the name
Sets:
* selected
Requires:
* selected
"""
def __init__(self, regex):
super(SelectRegex, self).__init__()
self.regex = re.compile(regex)
def __call__(self, target):
selected = target.temp["selected"]
selected = [s for s in selected if self.regex.search(s)]
target.temp["selected"] = selected
return True
class ResolveOnTheRun(Algo):
"""
Looks at securities set in temp['selected'] and searches for names that
match the names of "aliases" for on-the-run securities in the provided
data. Then replaces the alias with the name of the underlying security
appropriate for the given date, and sets it back on temp['selected']
Args:
* on_the_run (str): Name of a Data frame with
- columns set to "on the run" ticker names
- index set to the timeline for the backtest
- values are the actual security name to use for the given date
* include_no_data (bool): Include securities that do not have data?
* include_negative (bool): Include securities that have negative
or zero prices?
Requires:
* selected
Sets:
* selected
"""
def __init__(self, on_the_run, include_no_data=False, include_negative=False):
super(ResolveOnTheRun, self).__init__()
self.on_the_run = on_the_run
self.include_no_data = include_no_data
self.include_negative = include_negative
def __call__(self, target):
# Resolve real tickers based on OTR
on_the_run = target.get_data(self.on_the_run)
selected = target.temp["selected"]
aliases = [s for s in selected if s in on_the_run.columns]
resolved = on_the_run.loc[target.now, aliases].tolist()
if not self.include_no_data:
universe = target.universe.loc[target.now, resolved].dropna()
if self.include_negative:
resolved = list(universe.index)
else:
resolved = list(universe[universe > 0].index)
target.temp["selected"] = resolved + [
s for s in selected if s not in on_the_run.columns
]
return True
class SetStat(Algo):
"""
Sets temp['stat'] for use by downstream algos (such as SelectN).
Args:
* stat (str|DataFrame): A dataframe of the same dimension as target.universe
If a string is passed, frame is accessed using target.get_data
This is the preferred way of using the algo.
Sets:
* stat
"""
def __init__(self, stat):
if isinstance(stat, pd.DataFrame):
self.stat_name = None
self.stat = stat
else:
self.stat_name = stat
self.stat = None
def __call__(self, target):
if self.stat_name is None:
stat = self.stat
else:
stat = target.get_data(self.stat_name)
target.temp["stat"] = stat.loc[target.now]
return True
class StatTotalReturn(Algo):
"""
Sets temp['stat'] with total returns over a given period.
Sets the 'stat' based on the total return of each element in
temp['selected'] over a given lookback period. The total return
is determined by ffn's calc_total_return.
Args:
* lookback (DateOffset): lookback period.
* lag (DateOffset): Lag interval. Total return is calculated in
the inteval [now - lookback - lag, now - lag]
Sets:
* stat
Requires:
* selected
"""
def __init__(self, lookback= | pd.DateOffset(months=3) | pandas.DateOffset |
import streamlit as st
import itertools
import pandas as pd
import numpy as np
from scipy.stats import pearsonr
from scipy import stats
import matplotlib.pyplot as plt
import plotly.express as px
import matplotlib
import seaborn as sns
from sklearn.pipeline import Pipeline
from wordcloud import WordCloud
from sklearn.decomposition import PCA
from sklearn.preprocessing import LabelEncoder
from sklearn import preprocessing
from scipy.stats import chi2_contingency,chi2
import statsmodels.api as sm
from scipy.stats import spearmanr
from sklearn.experimental import enable_iterative_imputer
from sklearn.impute import IterativeImputer
from sklearn.pipeline import Pipeline
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import GaussianNB
from xgboost import XGBClassifier
from scipy.stats import anderson
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from PIL import Image
image = Image.open('cover.jpg')
matplotlib.use("Agg")
class DataFrame_Loader():
def __init__(self):
print("Loadind DataFrame")
def read_csv(self,data):
self.df = pd.read_csv(data)
return self.df
class EDA_Dataframe_Analysis():
def __init__(self):
print("General_EDA object created")
def show_dtypes(self,x):
return x.dtypes
def show_columns(self,x):
return x.columns
def Show_Missing(self,x):
return x.isna().sum()
def Show_Missing1(self,x):
return x.isna().sum()
def Show_Missing2(self,x):
return x.isna().sum()
def show_hist(self,x):
return x.hist()
def Tabulation(self,x):
table = pd.DataFrame(x.dtypes,columns=['dtypes'])
table1 =pd.DataFrame(x.columns,columns=['Names'])
table = table.reset_index()
table= table.rename(columns={'index':'Name'})
table['No of Missing'] = x.isnull().sum().values
table['No of Uniques'] = x.nunique().values
table['Percent of Missing'] = ((x.isnull().sum().values)/ (x.shape[0])) *100
table['First Observation'] = x.loc[0].values
table['Second Observation'] = x.loc[1].values
table['Third Observation'] = x.loc[2].values
for name in table['Name'].value_counts().index:
table.loc[table['Name'] == name, 'Entropy'] = round(stats.entropy(x[name].value_counts(normalize=True), base=2),2)
return table
def Numerical_variables(self,x):
Num_var = [var for var in x.columns if x[var].dtypes!="object"]
Num_var = x[Num_var]
return Num_var
def categorical_variables(self,x):
cat_var = [var for var in x.columns if x[var].dtypes=="object"]
cat_var = x[cat_var]
return cat_var
def impute(self,x):
df=x.dropna()
return df
def imputee(self,x):
df=x.dropna()
return df
def Show_pearsonr(self,x,y):
result = pearsonr(x,y)
return result
def Show_spearmanr(self,x,y):
result = spearmanr(x,y)
return result
def plotly(self,a,x,y):
fig = px.scatter(a, x=x, y=y)
fig.update_traces(marker=dict(size=10,
line=dict(width=2,
color='DarkSlateGrey')),
selector=dict(mode='markers'))
fig.show()
def show_displot(self,x):
plt.figure(1)
plt.subplot(121)
sns.distplot(x)
plt.subplot(122)
x.plot.box(figsize=(16,5))
plt.show()
def Show_DisPlot(self,x):
plt.style.use('fivethirtyeight')
plt.figure(figsize=(12,7))
return sns.distplot(x, bins = 25)
def Show_CountPlot(self,x):
fig_dims = (18, 8)
fig, ax = plt.subplots(figsize=fig_dims)
return sns.countplot(x,ax=ax)
def plotly_histogram(self,a,x,y):
fig = px.histogram(a, x=x, y=y)
fig.update_traces(marker=dict(size=10,
line=dict(width=2,
color='DarkSlateGrey')),
selector=dict(mode='markers'))
fig.show()
def plotly_violin(self,a,x,y):
fig = px.histogram(a, x=x, y=y)
fig.update_traces(marker=dict(size=10,
line=dict(width=2,
color='DarkSlateGrey')),
selector=dict(mode='markers'))
fig.show()
def Show_PairPlot(self,x):
return sns.pairplot(x)
def Show_HeatMap(self,x):
f,ax = plt.subplots(figsize=(15, 15))
return sns.heatmap(x.corr(),annot=True,ax=ax);
def wordcloud(self,x):
wordcloud = WordCloud(width = 1000, height = 500).generate(" ".join(x))
plt.imshow(wordcloud)
plt.axis("off")
return wordcloud
def label(self,x):
from sklearn.preprocessing import LabelEncoder
le = LabelEncoder()
x=le.fit_transform(x)
return x
def label1(self,x):
from sklearn.preprocessing import LabelEncoder
le = LabelEncoder()
x=le.fit_transform(x)
return x
def concat(self,x,y,z,axis):
return pd.concat([x,y,z],axis)
def dummy(self,x):
return pd.get_dummies(x)
def qqplot(self,x):
return sm.qqplot(x, line ='45')
def Anderson_test(self,a):
return anderson(a)
def PCA(self,x):
pca =PCA(n_components=8)
principlecomponents = pca.fit_transform(x)
principledf = pd.DataFrame(data = principlecomponents)
return principledf
def outlier(self,x):
high=0
q1 = x.quantile(.25)
q3 = x.quantile(.75)
iqr = q3-q1
low = q1-1.5*iqr
high += q3+1.5*iqr
outlier = (x.loc[(x < low) | (x > high)])
return(outlier)
def check_cat_relation(self,x,y,confidence_interval):
cross_table = pd.crosstab(x,y,margins=True)
stat,p,dof,expected = chi2_contingency(cross_table)
print("Chi_Square Value = {0}".format(stat))
print("P-Value = {0}".format(p))
alpha = 1 - confidence_interval
return p,alpha
if p > alpha:
print(">> Accepting Null Hypothesis <<")
print("There Is No Relationship Between Two Variables")
else:
print(">> Rejecting Null Hypothesis <<")
print("There Is A Significance Relationship Between Two Variables")
class Attribute_Information():
def __init__(self):
print("Attribute Information object created")
def Column_information(self,data):
data_info = pd.DataFrame(
columns=['No of observation',
'No of Variables',
'No of Numerical Variables',
'No of Factor Variables',
'No of Categorical Variables',
'No of Logical Variables',
'No of Date Variables',
'No of zero variance variables'])
data_info.loc[0,'No of observation'] = data.shape[0]
data_info.loc[0,'No of Variables'] = data.shape[1]
data_info.loc[0,'No of Numerical Variables'] = data._get_numeric_data().shape[1]
data_info.loc[0,'No of Factor Variables'] = data.select_dtypes(include='category').shape[1]
data_info.loc[0,'No of Logical Variables'] = data.select_dtypes(include='bool').shape[1]
data_info.loc[0,'No of Categorical Variables'] = data.select_dtypes(include='object').shape[1]
data_info.loc[0,'No of Date Variables'] = data.select_dtypes(include='datetime64').shape[1]
data_info.loc[0,'No of zero variance variables'] = data.loc[:,data.apply(pd.Series.nunique)==1].shape[1]
data_info =data_info.transpose()
data_info.columns=['value']
data_info['value'] = data_info['value'].astype(int)
return data_info
def __get_missing_values(self,data):
#Getting sum of missing values for each feature
missing_values = data.isnull().sum()
#Feature missing values are sorted from few to many
missing_values.sort_values(ascending=False, inplace=True)
#Returning missing values
return missing_values
def __iqr(self,x):
return x.quantile(q=0.75) - x.quantile(q=0.25)
def __outlier_count(self,x):
upper_out = x.quantile(q=0.75) + 1.5 * self.__iqr(x)
lower_out = x.quantile(q=0.25) - 1.5 * self.__iqr(x)
return len(x[x > upper_out]) + len(x[x < lower_out])
def num_count_summary(self,df):
df_num = df._get_numeric_data()
data_info_num = pd.DataFrame()
i=0
for c in df_num.columns:
data_info_num.loc[c,'Negative values count']= df_num[df_num[c]<0].shape[0]
data_info_num.loc[c,'Positive values count']= df_num[df_num[c]>0].shape[0]
data_info_num.loc[c,'Zero count']= df_num[df_num[c]==0].shape[0]
data_info_num.loc[c,'Unique count']= len(df_num[c].unique())
data_info_num.loc[c,'Negative Infinity count']= df_num[df_num[c]== -np.inf].shape[0]
data_info_num.loc[c,'Positive Infinity count']= df_num[df_num[c]== np.inf].shape[0]
data_info_num.loc[c,'Missing Percentage']= df_num[df_num[c].isnull()].shape[0]/ df_num.shape[0]
data_info_num.loc[c,'Count of outliers']= self.__outlier_count(df_num[c])
i = i+1
return data_info_num
def statistical_summary(self,df):
df_num = df._get_numeric_data()
data_stat_num = pd.DataFrame()
try:
data_stat_num = pd.concat([df_num.describe().transpose(),
pd.DataFrame(df_num.quantile(q=0.10)),
pd.DataFrame(df_num.quantile(q=0.90)),
pd.DataFrame(df_num.quantile(q=0.95))],axis=1)
data_stat_num.columns = ['count','mean','std','min','25%','50%','75%','max','10%','90%','95%']
except:
pass
return data_stat_num
class Data_Base_Modelling():
def __init__(self):
print("General_EDA object created")
def Label_Encoding(self,x):
category_col =[var for var in x.columns if x[var].dtypes =="object"]
labelEncoder = preprocessing.LabelEncoder()
mapping_dict={}
for col in category_col:
x[col] = labelEncoder.fit_transform(x[col])
le_name_mapping = dict(zip(labelEncoder.classes_, labelEncoder.transform(labelEncoder.classes_)))
mapping_dict[col]=le_name_mapping
return mapping_dict
def IMpupter(self,x):
imp_mean = IterativeImputer(random_state=0)
x = imp_mean.fit_transform(x)
x = pd.DataFrame(x)
return x
def Logistic_Regression(self,x_train,y_train,x_test,y_test):
pipeline_dt=Pipeline([('dt_classifier',LogisticRegression())])
pipelines = [pipeline_dt]
best_accuracy=0.0
best_classifier=0
best_pipeline=""
pipe_dict = { 0: 'Decision Tree'}
for pipe in pipelines:
pipe.fit(x_train, y_train)
for i,model in enumerate(pipelines):
return (classification_report(y_test,model.predict(x_test)))
def Decision_Tree(self,x_train,y_train,x_test,y_test):
pipeline_dt=Pipeline([('dt_classifier',DecisionTreeClassifier())])
pipelines = [pipeline_dt]
best_accuracy=0.0
best_classifier=0
best_pipeline=""
pipe_dict = { 0: 'Decision Tree'}
for pipe in pipelines:
pipe.fit(x_train, y_train)
for i,model in enumerate(pipelines):
return (classification_report(y_test,model.predict(x_test)))
def RandomForest(self,x_train,y_train,x_test,y_test):
pipeline_dt=Pipeline([('dt_classifier',RandomForestClassifier())])
pipelines = [pipeline_dt]
best_accuracy=0.0
best_classifier=0
best_pipeline=""
pipe_dict = { 0: 'Decision Tree'}
for pipe in pipelines:
pipe.fit(x_train, y_train)
for i,model in enumerate(pipelines):
return (classification_report(y_test,model.predict(x_test)))
def naive_bayes(self,x_train,y_train,x_test,y_test):
pipeline_dt=Pipeline([('dt_classifier',GaussianNB())])
pipelines = [pipeline_dt]
best_accuracy=0.0
best_classifier=0
best_pipeline=""
pipe_dict = { 0: 'Decision Tree'}
for pipe in pipelines:
pipe.fit(x_train, y_train)
for i,model in enumerate(pipelines):
return (classification_report(y_test,model.predict(x_test)))
def XGb_classifier(self,x_train,y_train,x_test,y_test):
pipeline_dt=Pipeline([('dt_classifier',XGBClassifier())])
pipelines = [pipeline_dt]
best_accuracy=0.0
best_classifier=0
best_pipeline=""
pipe_dict = { 0: 'Decision Tree'}
for pipe in pipelines:
pipe.fit(x_train, y_train)
for i,model in enumerate(pipelines):
return (classification_report(y_test,model.predict(x_test)))
st.image(image, use_column_width=True)
def main():
st.title("Machine Learning Application for Automated EDA")
st.info("This Web Application is created and maintained by *_DHEERAJ_ _KUMAR_ _K_*")
"""https://github.com/DheerajKumar97"""
activities = ["General EDA","EDA For Linear Models","Model Building for Classification Problem"]
choice = st.sidebar.selectbox("Select Activities",activities)
if choice == 'General EDA':
st.subheader("Exploratory Data Analysis")
data = st.file_uploader("Upload a Dataset", type=["csv"])
if data is not None:
df = load.read_csv(data)
st.dataframe(df.head())
st.success("Data Frame Loaded successfully")
if st.checkbox("Show dtypes"):
st.write(dataframe.show_dtypes(df))
if st.checkbox("Show Columns"):
st.write(dataframe.show_columns(df))
if st.checkbox("Show Missing"):
st.write(dataframe.Show_Missing1(df))
if st.checkbox("column information"):
st.write(info.Column_information(df))
if st.checkbox("Aggregation Tabulation"):
st.write(dataframe.Tabulation(df))
if st.checkbox("Num Count Summary"):
st.write(info.num_count_summary(df))
if st.checkbox("Statistical Summary"):
st.write(info.statistical_summary(df))
# if st.checkbox("Show Selected Columns"):
# selected_columns = st.multiselect("Select Columns",all_columns)
# new_df = df[selected_columns]
# st.dataframe(new_df)
if st.checkbox("Show Selected Columns"):
selected_columns = st.multiselect("Select Columns",dataframe.show_columns(df))
new_df = df[selected_columns]
st.dataframe(new_df)
if st.checkbox("Numerical Variables"):
num_df = dataframe.Numerical_variables(df)
numer_df= | pd.DataFrame(num_df) | pandas.DataFrame |
import time
import datetime
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import matplotlib.cbook as cbook
import pandas as pd
howFarBack = 10000
def rsiFunc(prices, n=14):
deltas = np.diff(prices)
seed = deltas[:n+1]
up = seed[seed>=0].sum()/n
down = -seed[seed<0].sum()/n
rs = up/down
rsi = np.zeros_like(prices)
rsi[:n] = 100. - 100./(1.+rs)
#print(rsi[:n])
for i in range(n, len(prices)-1):
delta = deltas[i-1]
#print('delta:',delta)
if delta > 0:
upval = delta
downval = 0.
else:
upval = 0.
downval =-delta
up = (up*(n-1)+upval)/n
down = (down*(n-1)+downval)/n
rs = up/down
rsi[i] = 100. - 100./(1.+rs)
return rsi
def ExpMovingAverage(values, window):
weights = np.exp(np.linspace(-1.,0., window))
weights /= weights.sum()
a = np.convolve(values, weights, mode='full')[:len(values)]
a[:window] = a[window]
return a
def chartData():
Datear = []
Pricear = []
Volumear = []
df = | pd.read_csv('Data\BitfinexBTCUSD.csv', parse_dates = True, names=['Date', 'Open', 'High', 'Low', 'Close', 'Volume']) | pandas.read_csv |
import sys
import os
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import scipy as sp
import pylab
from matplotlib import colors, colorbar
from scipy import cluster
#import rpy2
#import rpy2.robjects as robjects
#from rpy2.robjects.packages import importr
from tqdm import tqdm
#from rpy2.robjects import r, numpy2ri
import time
import yaml
import networkx as nx
import argparse
sys.setrecursionlimit(10000)
from . import lineageGroup_utils as lg_utils
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
# NOTE: NEED PANDAS >= 0.22.0
def create_output_dir(outputdir = None):
"""
A simple function to create an output directory to store important logging information,
as well as important figures for qc
"""
if outputdir is None:
i = 1
outputdir = "output" + str(i)
while os.path.exists(os.path.dirname(outputdir)):
i += 1
outputdir = "output" + str(i)
if not os.path.exists(outputdir):
os.makedirs(outputdir)
with open(outputdir + "/lglog.txt", "w") as f:
f.write("LINEAGE GROUP OUTPUT LOG:\n")
return outputdir
def findTopLG(PIVOT_in, iteration, outputdir, min_intbc_prop = 0.2, kinship_thresh=0.2):
# calculate sum of observed intBCs, identify top intBC
intBC_sums = PIVOT_in.sum(0).sort_values(ascending=False)
ordered_intBCs = intBC_sums.index.tolist()
intBC_top = intBC_sums.index[0]
# take subset of PIVOT table that contain cells that have the top intBC
subPIVOT_in = PIVOT_in[PIVOT_in[intBC_top]>0]
subPIVOT_in_sums = subPIVOT_in.sum(0)
ordered_intBCs2 = subPIVOT_in_sums.sort_values(ascending=False).index.tolist()
subPIVOT_in = subPIVOT_in[ordered_intBCs2]
# binarize
subPIVOT_in[subPIVOT_in>0]=1
# Define intBC set
subPIVOT_in_sums2 = subPIVOT_in.sum(0)
total = subPIVOT_in_sums2[intBC_top]
intBC_sums_filt = subPIVOT_in_sums2[subPIVOT_in_sums2>=min_intbc_prop*total]
# Reduce PIV to only intBCs considered in set
intBC_set = intBC_sums_filt.index.tolist()
PIV_set = PIVOT_in.iloc[:,PIVOT_in.columns.isin(intBC_set)]
# Calculate fraction of UMIs within intBC_set ("kinship") for each cell in PIV_set
f_inset = PIV_set.sum(axis=1)
# define set of cells with good kinship
f_inset_filt = f_inset[f_inset>=kinship_thresh]
LG_cells = f_inset_filt.index.tolist()
# Return updated PIV with LG_cells removed
PIV_noLG = PIVOT_in.iloc[~PIVOT_in.index.isin(LG_cells),:]
# Return PIV with LG_cells assigned
PIV_LG = PIVOT_in.iloc[PIVOT_in.index.isin(LG_cells),:]
PIV_LG["lineageGrp"]= iteration+1
with open(outputdir + "/lglog.txt", "a") as f:
# print statements
f.write("LG"+str(iteration+1)+" Assignment: " + str(PIV_LG.shape[0]) + " cells assigned\n")
# Plot distribution of kinship scores
h4 = plt.figure(figsize=(15,10))
ax4 = plt.hist(f_inset, bins=49, alpha=0.5, histtype='step')
yax4 = plt.yscale('log', basey=10)
plt.savefig(outputdir + "/kinship_scores.png")
return PIV_LG, PIV_noLG, intBC_set
def iterative_lg_assign(pivot_in, min_clust_size, outputdir, min_intbc_thresh=0.2, kinship_thresh=0.2):
## Run LG Assign function
# initiate output variables
PIV_assigned = pd.DataFrame()
master_intBC_list = []
# Loop for iteratively assigning LGs
prev_clust_size = np.inf
i = 0
while prev_clust_size > min_clust_size:
# run function
PIV_outs = findTopLG(pivot_in, i, outputdir, min_intbc_prop=min_intbc_thresh, kinship_thresh=kinship_thresh)
# parse returned objects
PIV_LG = PIV_outs[0]
PIV_noLG = PIV_outs[1]
intBC_set_i = PIV_outs[2]
# append returned objects to output variables
PIV_assigned = PIV_assigned.append(PIV_LG)
master_intBC_list.append(intBC_set_i)
# update PIVOT-in
pivot_in = PIV_noLG
prev_clust_size = PIV_LG.shape[0]
i += 1
return PIV_assigned, master_intBC_list
def get_lg_group(df, piv, curr_LG):
lg_group = df[df["lineageGrp"] == curr_LG]
cells = np.unique(lg_group["cellBC"])
lg_pivot = piv.loc[cells]
props = lg_pivot.apply(lambda x: pylab.sum(x) / len(x)).to_frame().reset_index()
props.columns = ["iBC", "prop"]
props = props.sort_values(by="prop", ascending=False)
props.index = props["iBC"]
return lg_group, props
def rand_cmap(nlabels, type='bright', first_color_black=True, last_color_black=False, verbose=True):
"""
Creates a random colormap to be used together with matplotlib. Useful for segmentation tasks
:param nlabels: Number of labels (size of colormap)
:param type: 'bright' for strong colors, 'soft' for pastel colors
:param first_color_black: Option to use first color as black, True or False
:param last_color_black: Option to use last color as black, True or False
:param verbose: Prints the number of labels and shows the colormap. True or False
:return: colormap for matplotlib
"""
from matplotlib.colors import LinearSegmentedColormap
import colorsys
if type not in ('bright', 'soft'):
print ('Please choose "bright" or "soft" for type')
return
if verbose:
print('Number of labels: ' + str(nlabels))
# Generate color map for bright colors, based on hsv
if type == 'bright':
randHSVcolors = [(np.random.uniform(low=0.0, high=1),
np.random.uniform(low=0.2, high=1),
np.random.uniform(low=0.9, high=1)) for i in range(nlabels)]
# Convert HSV list to RGB
randRGBcolors = []
for HSVcolor in randHSVcolors:
randRGBcolors.append(colorsys.hsv_to_rgb(HSVcolor[0], HSVcolor[1], HSVcolor[2]))
if first_color_black:
randRGBcolors[0] = [0, 0, 0]
if last_color_black:
randRGBcolors[-1] = [0, 0, 0]
random_colormap = LinearSegmentedColormap.from_list('new_map', randRGBcolors, N=nlabels)
# Generate soft pastel colors, by limiting the RGB spectrum
if type == 'soft':
low = 0.6
high = 0.95
randRGBcolors = [(np.random.uniform(low=low, high=high),
np.random.uniform(low=low, high=high),
np.random.uniform(low=low, high=high)) for i in range(nlabels)]
if first_color_black:
randRGBcolors[0] = [0, 0, 0]
if last_color_black:
randRGBcolors[-1] = [0, 0, 0]
random_colormap = LinearSegmentedColormap.from_list('new_map', randRGBcolors, N=nlabels)
# Display colorbar
if verbose:
from matplotlib import colors, colorbar
from matplotlib import pyplot as plt
fig, ax = plt.subplots(1, 1, figsize=(15, 0.5))
bounds = np.linspace(0, nlabels, nlabels + 1)
norm = colors.BoundaryNorm(bounds, nlabels)
cb = colorbar.ColorbarBase(ax, cmap=random_colormap, norm=norm, spacing='proportional', ticks=None,
boundaries=bounds, format='%1i', orientation=u'horizontal')
return random_colormap
def assign_lineage_groups(dfMT, max_kinship_LG, master_intBCs):
"""
Assign cells in the allele table to a lineage group
:param alleletable: allele table
:param ind1: clusterings
:param df_pivot_I: binary pivot table relating cell BC to integration BC
:return: allele table with lineage group assignments
"""
dfMT["lineageGrp"]=0
cellBC2LG = {}
for n in max_kinship_LG.index:
cellBC2LG[n] = max_kinship_LG.loc[n, "lineageGrp"]
dfMT["lineageGrp"] = dfMT["cellBC"].map(cellBC2LG)
dfMT["lineageGrp"] = dfMT["lineageGrp"].fillna(value=0)
lg_sizes = {}
rename_lg = {}
for n, g in dfMT.groupby(["lineageGrp"]):
if n != 0:
lg_sizes[n] = len(g["cellBC"].unique())
sorted_by_value = sorted(lg_sizes.items(), key = lambda kv: kv[1])[::-1]
for i, tup in zip(range(1, len(sorted_by_value)+1), sorted_by_value):
print(i, tup[0], float(i))
rename_lg[tup[0]] = float(i)
rename_lg[0] = 0.0
dfMT["lineageGrp"] = dfMT.apply(lambda x: rename_lg[x.lineageGrp], axis=1)
return dfMT
def plot_overlap_heatmap(at_pivot_I, at, outputdir):
# remove old plots
plt.close()
flat_master = []
for n, lg in at.groupby("lineageGrp"):
for item in lg["intBC"].unique():
flat_master.append(item)
at_pivot_I = at_pivot_I[flat_master]
h2 = plt.figure(figsize=(20,20))
axmat2 = h2.add_axes([0.3,0.1,0.6,0.8])
im2 = axmat2.matshow(at_pivot_I, aspect='auto', origin='upper')
plt.savefig(outputdir + "/clustered_intbc.png")
plt.close()
def add_cutsite_encoding(lg_group):
lg_group["s1"] = 0
lg_group["s2"] = 0
lg_group["s3"] = 0
for i in lg_group.index:
if lg_group.loc[i, "r1"] == "['None']":
lg_group.loc[i, "s1"] = .9
elif "D" in lg_group.loc[i, "r1"]:
lg_group.loc[i, "s1"] = 1.9
elif 'I' in lg_group.loc[i, "r1"]:
lg_group.loc[i, 's1'] = 2.9
if lg_group.loc[i, "r2"] == "['None']":
lg_group.loc[i, "s2"] = .9
elif "D" in lg_group.loc[i, "r2"]:
lg_group.loc[i, "s2"] = 1.9
elif 'I' in lg_group.loc[i, "r2"]:
lg_group.loc[i, 's2'] = 2.9
if lg_group.loc[i, "r3"] == "['None']":
lg_group.loc[i, "s3"] = .9
elif "D" in lg_group.loc[i, "r3"]:
lg_group.loc[i, "s3"] = 1.9
elif 'I' in lg_group.loc[i, "r3"]:
lg_group.loc[i, 's3'] = 2.9
return lg_group
def plot_overlap_heatmap_lg(at, at_pivot_I, outputdir):
if not os.path.exists(outputdir + "/lineageGrp_piv_heatmaps"):
os.makedirs(outputdir + "/lineageGrp_piv_heatmaps")
for n, lg_group in tqdm(at.groupby("lineageGrp")):
plt.close()
lg_group = add_cutsite_encoding(lg_group)
s_cmap = colors.ListedColormap(['grey', 'red', 'blue'], N=3)
lg_group_pivot = pd.pivot_table(lg_group, index=["cellBC"], columns=["intBC"], values=['s1', 's2', 's3'], aggfunc=pylab.mean).T
lg_group_pivot2 = pd.pivot_table(lg_group,index=['cellBC'],columns=['intBC'],values='UMI',aggfunc=pylab.size)
cell_umi_count = lg_group.groupby(["cellBC"]).agg({"UMI": "count"}).sort_values(by="UMI")
n_unique_alleles = lg_group.groupby(["intBC"]).agg({"r1": "nunique", "r2": "nunique", "r3": "nunique"})
cellBCList = lg_group["cellBC"].unique()
col_order = lg_group_pivot2.dropna(axis=1, how="all").sum().sort_values(ascending=False,inplace=False).index
if len(col_order) < 2:
continue
s3 = lg_group_pivot.unstack(level=0).T
s3 = s3[col_order]
s3 = s3.T.stack(level=1).T
s3 = s3.loc[cell_umi_count.index]
s3_2 = lg_group_pivot2.dropna(axis=1, how="all").sum().sort_values(ascending=False, inplace=False)[col_order]
n_unique_alleles = n_unique_alleles.loc[col_order]
s3_intBCs = col_order
s3_cellBCs = s3.index.tolist()
# Plot heatmap
h = plt.figure(figsize=(14,10))
ax = h.add_axes([0.3, 0.1, 0.6, 0.8],frame_on=True)
im = ax.matshow(s3, aspect='auto', origin ="lower", cmap=s_cmap)
axx1 = plt.xticks(range(1, len(col_order)*3, 3), col_order, rotation='vertical', family="monospace")
ax3 = h.add_axes([0.2, 0.1, 0.1, 0.8], frame_on=True)
plt.barh(range(s3.shape[0]), cell_umi_count["UMI"])
plt.ylim([0, s3.shape[0]])
ax3.autoscale(tight=True)
axy0 = ax3.set_yticks(range(len(s3_cellBCs)))
axy1 = ax3.set_yticklabels(s3_cellBCs, family='monospace')
w = (1/3)
x = np.arange(len(s3_intBCs))
ax2 = h.add_axes([0.3, 0, 0.6, 0.1], frame_on = False)
b1 = ax2.bar(x - w, n_unique_alleles["r1"], width = w, label="r1")
b2 = ax2.bar(x, n_unique_alleles["r2"], width = w, label="r2")
b3 = ax2.bar(x + w, n_unique_alleles["r3"], width = w, label='r3')
ax2.set_xlim([0, len(s3_intBCs)])
ax2.set_ylim(ymin=0, ymax=(max(n_unique_alleles["r1"].max(), n_unique_alleles["r2"].max(), n_unique_alleles["r3"].max()) + 10))
ax2.set_xticks([])
ax2.yaxis.tick_right()
ax2.invert_yaxis()
ax2.autoscale(tight=True)
plt.legend()
#plt.gcf().subplots_adjust(bottom=0.15)
plt.tight_layout()
plt.savefig(outputdir + "/lineageGrp_piv_heatmaps/lg_" + str(int(n)) + "_piv_heatmap.png")
plt.close()
def collectAlleles(at, thresh = 0.05):
lineageGrps = at["lineageGrp"].unique()
at_piv = pd.pivot_table(at, index="cellBC", columns="intBC", values="UMI", aggfunc="count")
at_piv.fillna(value = 0, inplace=True)
at_piv[at_piv > 0] = 1
lgs = []
for i in tqdm(lineageGrps):
lg = at[at["lineageGrp"] == i]
cells = lg["cellBC"].unique()
lg_pivot = at_piv.loc[cells]
props = lg_pivot.apply(lambda x: pylab.sum(x) / len(x)).to_frame().reset_index()
props.columns = ["iBC", "prop"]
props = props.sort_values(by="prop", ascending=False)
props.index = props["iBC"]
p_bc = props[(props["prop"] > thresh) & (props["iBC"] != "NC")]
lg_group = lg.loc[np.in1d(lg["intBC"], p_bc["iBC"])]
lgs.append(lg_group)
return lgs
def filteredLG2AT(filtered_lgs):
final_df = pd.concat(filtered_lgs)
final_df = final_df.groupby(["cellBC", "intBC", "allele", "r1", "r2", "r3", "r1_no_context", "r2_no_context", "r3_no_context", "lineageGrp"], as_index=False).agg({"UMI": "count", "readCount": "sum"})
final_df["Sample"] = final_df.apply(lambda x: x.cellBC.split(".")[0], axis=1)
return final_df
def filter_low_prop_intBCs(PIV_assigned, thresh = 0.2):
master_intBCs = {}
master_LGs = []
for i, PIV_i in PIV_assigned.groupby(["lineageGrp"]):
PIVi_bin = PIV_i.copy()
PIVi_bin = PIVi_bin.drop(['lineageGrp'], axis=1) # drop the lineageGroup column
PIVi_bin[PIVi_bin>0]=1
intBC_sums = PIVi_bin.sum(0)
ordered_intBCs = intBC_sums.sort_values(ascending=False).index.tolist()
intBC_normsums = intBC_sums/max(intBC_sums)
intBC_normsums_filt_i = intBC_normsums[intBC_normsums >= thresh]
intBC_set_i = intBC_normsums_filt_i.index.tolist()
# update masters
master_intBCs[i] = intBC_set_i
master_LGs.append(i)
return master_LGs, master_intBCs
def filterCellBCs(moleculetable, outputdir, umiCountThresh = 10, verbose=True):
"""
Filter out cell barcodes thmt have too few UMIs
:param moleculetable: allele table
:param outputdir: file pmth to output directory
:return: filtered allele table, cellBC to number umis mapping
"""
if verbose:
with open(outputdir + "/lglog.txt", "a") as f:
f.write("FILTER CELL BARCODES:\n")
f.write("Initial:\n")
f.write("# UMIs: " + str(moleculetable.shape[0]) + "\n")
f.write("# Cell BCs: " + str(len(np.unique(moleculetable["cellBC"]))) + "\n")
tooFewUMI_UMI = []
cellBC2nM = {}
# Create a cell-filter dictionary for hash lookup lmter on when filling
# in the table
cell_filter = {}
for n, group in tqdm(moleculetable.groupby(["cellBC"])):
if np.sum(group["UMI"].values) <= umiCountThresh:
cell_filter[n] = "bad"
tooFewUMI_UMI.append(np.sum(group["UMI"].values))
else:
cell_filter[n] = "good"
cellBC2nM[n] = np.sum(group["UMI"].values)
# apply the filter using the hash table created above
moleculetable["status"] = moleculetable["cellBC"].map(cell_filter)
# count how many cells/umi's passed the filter for logging purposes
status = cell_filter.values()
tooFewUMI_cellBC = len(status) - len(np.where(status == "good")[0])
tooFewUMI_UMI = np.sum(tooFewUMI_UMI)
goodumis = moleculetable[(moleculetable["status"] == "good")].shape[0]
# filter based on status & reindex
n_moleculetable = moleculetable[(moleculetable["status"] == "good")]
n_moleculetable.index = [i for i in range(n_moleculetable.shape[0])]
# log results
if verbose:
with open(outputdir + "/lglog.txt", "a") as f:
f.write("Post:\n")
f.write("# UMIs: " + str(n_moleculetable.shape[0]) + "\n")
f.write("# Cell BCs: " + str(len(np.unique(n_moleculetable["cellBC"]))) + "\n\n")
return n_moleculetable, cellBC2nM
def merge_lineage_groups(at, outputdir, thresh=0.3):
lg_intbc_piv = pd.pivot_table(at, index="lineageGrp", columns=["intBC"], values="UMI", aggfunc="count")
lg_intbc_piv[lg_intbc_piv > 0] = 1
lg_intbc_piv.fillna(value=0)
lg_oMat = np.asarray(lg_utils.maxOverlap(lg_intbc_piv.T))
lg_oMat = sp.spatial.distance.squareform(lg_oMat)
for i in range(lg_oMat.shape[0]):
lg_oMat[i, i] = 1.0
to_collapse = []
for i in range(lg_oMat.shape[0]):
for j in range(i+1, lg_oMat.shape[0]):
if lg_oMat[i, j] > thresh:
coll = (i, j)
to_collapse.append(coll)
collapse_net = nx.Graph()
for pair in to_collapse:
collapse_net.add_edge(pair[0], pair[1])
num_lg = len(at["lineageGrp"].unique())
cc = list(nx.connected_components(collapse_net))
for i, c in zip(range(1, len(cc)+1), cc):
for n in c:
at.loc[at["lineageGrp"] == n, "lineageGrp" ]= i + num_lg
lg_sizes = {}
rename_lg = {}
for n, g in at.groupby(["lineageGrp"]):
lg_sizes[n] = len(g["cellBC"].unique())
sorted_by_value = sorted(lg_sizes.items(), key = lambda kv: kv[1])[::-1]
for i, tup in zip(range(len(sorted_by_value)), sorted_by_value):
rename_lg[tup[0]] = float(i)
at["lineageGrp"] = at.apply(lambda x: rename_lg[x.lineageGrp], axis=1)
with open(outputdir + "/lglog.txt", "a") as f:
f.write("Collapsing the following lineage groups:\n")
for coll in to_collapse:
f.write(str(coll) + "\n")
return at
def filter_cells_by_kinship_scores(PIV, master_LGs, master_intBCs, outputdir):
dfLG2intBC = pd.DataFrame()
for i in range(len(master_LGs)):
LGi = master_LGs[i]
intBCsi = master_intBCs[LGi]
dfi = | pd.DataFrame(index=[LGi], columns=intBCsi, data=1) | pandas.DataFrame |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
将持仓数据合并
"""
import os
import pandas as pd
from kquant_data.config import __CONFIG_H5_FUT_DATA_DIR__
def process1(input_path, output_path, member_name, folder_name):
# 由于文件数太多,从头加载很慢,需要增量处理
for dirpath, dirnames, filenames in os.walk(input_path):
for dirname in dirnames:
# 由于read_csv时不支持中文,只好改用英文
path2 = os.path.join(output_path, folder_name, '%s.csv' % dirname)
try:
df_old = pd.read_csv(path2, encoding='utf-8-sig', parse_dates=True, index_col=['date'])
last_date_csv = df_old.index[-1].strftime('%Y-%m-%d.csv')
dfs = df_old
except:
last_date_csv = '1900-01-01.csv'
dfs = None
sub_dirpath = os.path.join(dirpath, dirname)
print('开始处理', sub_dirpath)
for _dirpath, _dirnames, _filenames in os.walk(sub_dirpath):
for _filename in _filenames:
if _filename <= last_date_csv:
continue
path = os.path.join(_dirpath, _filename)
df = pd.read_csv(path, encoding='utf-8-sig', parse_dates=True, index_col=['date'])
row = df[df['member_name'] == member_name]
dfs = pd.concat([dfs, row])
# dfs.set_index('date')
if dfs is None:
continue
dfs.to_csv(path2, encoding='utf-8-sig', date_format='%Y-%m-%d')
print("处理完成", path2)
def process2(input_path, output_path, folder_name):
for dirpath, dirnames, filenames in os.walk(input_path):
dfs_long = None
dfs_short = None
for filename in filenames:
path = os.path.join(dirpath, filename)
df = pd.read_csv(path, encoding='utf-8-sig', parse_dates=['date'])
df.index = df['date']
col_name = filename[:-4]
col_long = df['long_position_increase']
col_long.name = col_name
dfs_long = pd.concat([dfs_long, col_long], axis=1)
col_short = df['short_position_increase']
col_short.name = col_name
dfs_short = | pd.concat([dfs_short, col_short], axis=1) | pandas.concat |
import pandas as pd
raw_csv_data = pd.read_csv('data/Absenteeism-data.csv')
df = raw_csv_data.copy()
# dropping the ID columns
df = df.drop(['ID'], axis=1)
# print(df.head())
# converting the categorical column reason for absence into dummy columns
reason_columns = pd.get_dummies(df['Reason for Absence'], drop_first=True)
age_dummies = | pd.get_dummies(df['Age'], drop_first=True) | pandas.get_dummies |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from scipy.stats import gamma
class ReproductionNumber:
def __init__(self, incidence, prior_shape=1, prior_scale=5,
si_pmf=None, si_pars=None, t_start=None, window_width=None):
"""
Initialize ReproductionNumber class
:param incidence: pandas DataFrame with columns 'dates' and 'incidence' (number of new cases per day).
:param prior_shape: value of shape parameter of Gamma prior for reproduction number estimation.
:param prior_scale: value of scale parameter of Gamma prior for reproduction number estimation.
:param si_pmf: pandas DataFrame with columns 'interval_length' and 'probability'.
Represents probability mass function for given values of serial interval.
:param si_pars: dictionary with keys 'mean' and 'sd'.
Represents parameters to generate PMF for serial interval.
"""
self.incidence = incidence.reset_index().set_index('dates')
self.prior_shape = prior_shape
self.prior_scale = prior_scale
self.si_pmf = si_pmf
self.si_pars = si_pars
self.t_start = t_start
self.window_width = window_width
self.t_end = None
self.posterior_parameters = {}
self.posterior_summary = None
self.check_time_periods()
self.check_serial_number_pmf()
np.random.seed(42)
def check_time_periods(self):
if self.window_width is None:
self.window_width = 6
if self.t_start is None:
self.t_start = np.arange(1, self.incidence.shape[0] - self.window_width)
elif isinstance(self.t_start, list):
self.t_start = np.array(self.t_start)
self.t_end = self.t_start + self.window_width
def check_serial_number_pmf(self):
if self.si_pmf is not None and self.si_pars is not None:
txt = "You must pass either 'si_pmf' or 'si_pars', not both."
raise AttributeError(txt)
if self.si_pmf is None:
if self.si_pars is None:
txt = "You must pass either 'si_pmf' or 'si_pars'. You've passed neither."
raise AttributeError(txt)
if not all([i in self.si_pars.keys() for i in ['mean', 'sd']]):
txt = "'si_pars' must be a dictionary with 'mean' and 'sd' keys."
raise AttributeError(txt)
self.compute_serial_interval_pmf()
else:
self.si_pmf = self.si_pmf.reset_index().set_index('interval_length')['probability']
def compute_serial_interval_pmf(self, k=None, mu=None, sigma=None):
if k is None:
k = np.arange(self.incidence.shape[0])
elif not isinstance(k, np.ndarray):
raise TypeError("k must be of type numpy.ndarray, probably shape = (n_time_windows, ).")
if mu is None:
mu = self.si_pars['mean']
if sigma is None:
sigma = self.si_pars['sd']
if sigma < 0:
raise AttributeError("sigma must be >=0.")
if mu <= 1:
raise AttributeError("mu must be >1")
if not (k >= 0.).sum() == len(k):
raise AttributeError("all values in k must be >=0.")
shape = ((mu - 1) / sigma) ** 2
scale = (sigma ** 2) / (mu - 1)
def cdf_gamma(x, shape_, scale_):
return gamma.cdf(x=x, a=shape_, scale=scale_)
si_pmf = k * cdf_gamma(k,
shape,
scale) + (k - 2) * cdf_gamma(k - 2,
shape,
scale) - 2 * (k - 1) * cdf_gamma(k - 1,
shape,
scale)
si_pmf = si_pmf + shape * scale * (2 * cdf_gamma(k - 1,
shape + 1,
scale) - cdf_gamma(k - 2,
shape + 1,
scale) - cdf_gamma(k,
shape + 1,
scale))
si_pmf = np.array([np.max([0, i]) for i in si_pmf])
self.si_pmf = si_pmf
def compute_overall_infectivity(self):
def fill_up_with_zeros(x, ref):
x_nrows, ref_nrows = x.shape[0], ref.shape[0]
updated_x = x
if x_nrows < ref_nrows:
updated_x = np.concatenate([x, np.zeros(1 + ref_nrows - x_nrows)])
return updated_x
incid, si_pmf = self.incidence, self.si_pmf
si_pmf = fill_up_with_zeros(x=si_pmf, ref=incid)
number_of_time_points = incid.shape[0]
overall_infectivity = np.zeros((number_of_time_points,))
for t in range(1, number_of_time_points + 1):
overall_infectivity[t - 1] = (si_pmf[:t] * incid.iloc[:t][::-1]['incidence']).sum()
overall_infectivity[0] = np.nan
return overall_infectivity
def compute_posterior_parameters(self, prior_shape=None, prior_scale=None):
incid, si_pmf = self.incidence, self.si_pmf
t_start, t_end = self.t_start, self.t_end
if prior_shape is None:
prior_shape = self.prior_shape
if prior_scale is None:
prior_scale = self.prior_scale
number_of_time_windows = len(t_start)
overall_infectivity = self.compute_overall_infectivity()
final_mean_si = (si_pmf * range(len(si_pmf))).sum()
posterior_shape = np.zeros(number_of_time_windows)
posterior_scale = np.zeros(number_of_time_windows)
for t in range(number_of_time_windows):
if t_end[t] > final_mean_si:
posterior_shape[t] = prior_shape + (incid.iloc[range(t_start[t], t_end[t] + 1)]["incidence"]).sum()
else:
posterior_shape[t] = np.nan
for t in range(number_of_time_windows):
if t_end[t] > final_mean_si:
period_overall_infectivity = (overall_infectivity[range(t_start[t], t_end[t] + 1)]).sum()
posterior_scale[t] = 1 / ((1 / prior_scale) + period_overall_infectivity)
else:
posterior_scale[t] = np.nan
self.posterior_parameters['shape'] = posterior_shape
self.posterior_parameters['scale'] = posterior_scale
def sample_from_posterior(self, sample_size=1000):
if not all([i in self.posterior_parameters.keys() for i in ['scale', 'shape']]):
txt = "Can't sample from posterior before computing posterior parameters."
raise IndexError(txt)
posterior_shape = self.posterior_parameters['shape']
posterior_scale = self.posterior_parameters['scale']
number_of_time_windows = len(self.t_start)
sample_r_posterior = np.zeros((number_of_time_windows, sample_size))
for t in range(number_of_time_windows):
if not t > len(posterior_shape) - 1:
sample_r_posterior[t, ] = np.random.gamma(shape=posterior_shape[t],
scale=posterior_scale[t],
size=sample_size)
else:
sample_r_posterior[t,] = np.nan
return sample_r_posterior.transpose()
def compute_posterior_summaries(self, posterior_sample, t_max=None):
start_dates = self.incidence.index[self.t_start]
end_dates = self.incidence.index[self.t_end]
post_mean_r = posterior_sample.mean(axis=0)
post_sd = posterior_sample.std(axis=0)
post_shape = self.posterior_parameters['shape']
post_scale = self.posterior_parameters['scale']
post_upper_quantile_r = np.quantile(posterior_sample, q=0.975, axis=0)
post_lower_quantile_r = np.quantile(posterior_sample, q=0.025, axis=0)
summary_dict = {
'start_dates': start_dates, 'end_dates': end_dates,
'Rt_mean': post_mean_r, 'Rt_sd': post_sd,
'Rt_q0.975': post_upper_quantile_r, 'Rt_q0.025': post_lower_quantile_r,
'Rt_shape': post_shape, 'Rt_scale': post_scale
}
posterior_summary = pd.DataFrame(summary_dict)
posterior_summary['start_dates'] = posterior_summary['start_dates'].astype('datetime64[ns]')
posterior_summary['end_dates'] = posterior_summary['end_dates'].astype('datetime64[ns]')
if t_max is not None:
last_day = max(posterior_summary['end_dates'])
final_date = max(posterior_summary['end_dates']) + pd.Timedelta(days=t_max)
last_day_data = posterior_summary[posterior_summary['end_dates'] == last_day].to_dict(orient='list')
dates_ahead = pd.date_range(start=last_day, end=final_date)[1:]
forecast_d = pd.DataFrame({
'start_dates': pd.NaT, 'end_dates': dates_ahead
})
forecast_d['Rt_mean'] = last_day_data['Rt_mean'][0]
forecast_d['Rt_sd'] = last_day_data['Rt_sd'][0]
forecast_d['Rt_q0.975'] = last_day_data['Rt_q0.975'][0]
forecast_d['Rt_q0.025'] = last_day_data['Rt_q0.025'][0]
forecast_d['Rt_shape'] = last_day_data['Rt_shape'][0]
forecast_d['Rt_scale'] = last_day_data['Rt_scale'][0]
posterior_summary = | pd.concat([posterior_summary, forecast_d], ignore_index=True) | pandas.concat |
import matplotlib.pyplot as plt
import numpy
import pandas as pd
import math
import numpy.fft as fourier
import scipy.interpolate as inter
# READ DATA FROM SIMULATION
iT = 0
nT = 3
nend = 30000 #Interrompi risultati qui, perchè dopo non ha più senso
nend = 180000
df1 = pd.read_csv('Bl1outin.txt', header=None)
bl1mom = df1.values[iT:nend:nT,:]
df2 = pd.read_csv('Bl2outin.txt', header=None)
bl2mom = df2.values[iT:nend:nT,:]
df3 = pd.read_csv('Bl3outin.txt', header=None)
bl3mom = df3.values[iT:nend:nT,:]
df4 = pd.read_csv('Azimuth.txt', header=None)
turbinfo = df4.values[iT:nend:nT,:]
df5 = pd.read_csv('/home/antonio/SOWFA/exampleCases/UniWind_3Turb_SC_OBS+YAWERROR_DEMOD/5MW_Baseline/Wind/WindSim.uniform', sep='\t', header=None)
windinfo = df5.values
df6 = pd.read_csv('ECROSS.txt', header=None)
data6 = df6.values[iT:nend:nT,:]
df7 = pd.read_csv('EMOM.txt', header=None)
data7 = df7.values[iT:nend:nT,:]
#GIVEN PARAMETERS
R = 63 #TURBINE RADIUS
print(windinfo)
V0 = windinfo[0,1] # it's a constant vector, so take only 1 value
yawerr = -windinfo[:,2]*numpy.pi/180
vert_shear = windinfo[:,5]
u0_p = V0*numpy.sin(yawerr) #CROSS_WIND
k1_p = vert_shear #VERTICAL WIND SHEAR POWER EXPONENT
dtFAST = 0.005
time = turbinfo[:,3]
timewind = windinfo[:,0]
u0_int = inter.interp1d(timewind, u0_p)
k1_int = inter.interp1d(timewind, k1_p)
wr = turbinfo[:,1]
azimuth1 = turbinfo[:,0]
azimuth2 = turbinfo[:,0] + 2*numpy.pi/3
azimuth3 = turbinfo[:,0] + 4*numpy.pi/3
u0bar = numpy.multiply(u0_int(time), 1/(wr*R))
V0bar = V0/(wr*R)
k1bar = numpy.multiply(k1_int(time), V0bar)
Tper = (2*numpy.pi) / wr
tau = Tper/1.5 #DA ABBASSARE IN SEGUITO per filtrare meglio la risposta a 3P-->1,5P #CAZZO ATTENTO 3 o 1/3
print(V0)
m_out_notfil = numpy.zeros([len(bl1mom[:,0])*3])
m_in_notfil = numpy.zeros([len(bl1mom[:,0])*3])
for i in range(len(bl1mom[:,0])): # REARRANGING THE MOMENT BLADE VECTOR FOR CALCULATIONS
m_out_notfil[3*i:3*i+3] = numpy.array([bl1mom[i,0], bl2mom[i,0], bl3mom[i,0]])
m_in_notfil[3*i:3*i+3] = numpy.array([bl1mom[i,1], bl2mom[i,1], bl3mom[i,1]])
def ColTransf(ang1, ang2, ang3): #COLEMAN MBC TRANSFORMATION
out = numpy.array([[1, 1, 1], [2*math.cos(ang1), 2*math.cos(ang2), 2*math.cos(ang3)], [2*math.sin(ang1), 2*math.sin(ang2), 2*math.sin(ang3)]])/3
return out
m_out_tr = numpy.zeros([len(bl1mom[:,0])*3])
m_in_tr = numpy.zeros([len(bl1mom[:,0])*3])
for i in range(len(bl1mom[:,0])): #APPLYING MBC TRANSF. TO MOMENT VECTOR
ColT = ColTransf(azimuth1[i], azimuth2[i], azimuth3[i])
m_out_tr[3*i:3*i+3] = numpy.dot(ColT, m_out_notfil[3*i:3*i+3].transpose())
m_in_tr[3*i:3*i+3] = numpy.dot(ColT, m_in_notfil[3*i:3*i+3].transpose())
#NOW I GO IN FREQUENCY DOMAIN
m_out_tr_time1 = m_out_tr[0::3]
m_out_tr_time2 = m_out_tr[1::3]
m_out_tr_time3 = m_out_tr[2::3]
m_in_tr_time1 = m_in_tr[0::3]
m_in_tr_time2 = m_in_tr[1::3]
m_in_tr_time3 = m_in_tr[2::3]
print(m_out_tr_time1)
plt.plot(time, bl1mom[:,0])
plt.title("M_OUT_TR_1")
plt.show()
print(m_out_tr_time1)
plt.plot(time, bl2mom[:,0])
plt.title("M_OUT_TR_1")
plt.show()
print(m_out_tr_time1)
plt.plot(time, bl3mom[:,0])
plt.title("M_OUT_TR_1")
plt.show()
print(m_out_tr_time1)
plt.plot(time, bl1mom[:,1])
plt.title("M_OUT_TR_1")
plt.show()
print(m_out_tr_time1)
plt.plot(time, bl2mom[:,1])
plt.title("M_OUT_TR_1")
plt.show()
print(m_out_tr_time1)
plt.plot(time, bl3mom[:,1])
plt.title("M_OUT_TR_1")
plt.show()
plt.plot(time, wr)
plt.title("WR")
plt.show()
plt.plot(time, m_out_tr_time1)
plt.title("M_OUT_1")
plt.show()
freq = fourier.fftfreq(len(m_out_tr_time1), d=dtFAST)
m_out_tr_freq1 = fourier.fft(m_out_tr_time1)
m_out_tr_freq2 = fourier.fft(m_out_tr_time2)
m_out_tr_freq3 = fourier.fft(m_out_tr_time3)
m_in_tr_freq1 = fourier.fft(m_in_tr_time1)
m_in_tr_freq2 = fourier.fft(m_in_tr_time2)
m_in_tr_freq3 = fourier.fft(m_in_tr_time3)
def FILTER_LP(input, freq, tau):
s = 2*numpy.pi*freq*1j
output = (1/(tau*s + 1))*input
return output
m_out_freq1 = numpy.zeros([len(m_out_tr_freq1)], dtype=complex)
m_out_freq2 = numpy.zeros([len(m_out_tr_freq2)], dtype=complex)
m_out_freq3 = numpy.zeros([len(m_out_tr_freq3)], dtype=complex)
m_in_freq1 = numpy.zeros([len(m_in_tr_freq1)], dtype=complex)
m_in_freq2 = numpy.zeros([len(m_in_tr_freq2)], dtype=complex)
m_in_freq3 = numpy.zeros([len(m_in_tr_freq3)], dtype=complex)
for i in range(len(m_out_tr_freq1)):
m_out_freq1[i] = FILTER_LP(m_out_tr_freq1[i], freq[i], tau[i])
m_out_freq2[i] = FILTER_LP(m_out_tr_freq2[i], freq[i], tau[i])
m_out_freq3[i] = FILTER_LP(m_out_tr_freq3[i], freq[i], tau[i])
m_in_freq1[i] = FILTER_LP(m_in_tr_freq1[i], freq[i], tau[i])
m_in_freq2[i] = FILTER_LP(m_in_tr_freq2[i], freq[i], tau[i])
m_in_freq3[i] = FILTER_LP(m_in_tr_freq3[i], freq[i], tau[i])
m_out_time1 = fourier.ifft(m_out_freq1).real # I CAN DO IT---> NEGATIVE PART IS NEGLIGIBLE (about 0) + the signal is real
m_out_time2 = fourier.ifft(m_out_freq2).real
m_out_time3 = fourier.ifft(m_out_freq3).real
m_in_time1 = fourier.ifft(m_in_freq1).real
m_in_time2 = fourier.ifft(m_in_freq2).real
m_in_time3 = fourier.ifft(m_in_freq3).real
print(m_out_time1)
print(data7)
plt.plot(time, m_out_time1,'b',data7[:,6], data7[:,0],'r')
plt.title("M_OUT_1")
plt.show()
plt.plot(time, m_out_time2,'b',data7[:,6], data7[:,1],'r')
plt.title("M_OUT_2")
plt.show()
plt.plot(time, m_out_time3,'b',data7[:,6], data7[:,2],'r')
plt.title("M_OUT_3")
plt.show()
plt.plot(time, m_in_time1,'b',data7[:,6], data7[:,3],'r')
plt.title("M_IN_1")
plt.show()
plt.plot(time, m_in_time2,'b',data7[:,6], data7[:,4],'r')
plt.title("M_IN_2")
plt.show()
plt.plot(time, m_in_time3,'b',data7[:,6], data7[:,5],'r')
plt.title("M_IN_3")
plt.show()
ind = numpy.random.randint(low = 0, high=len(m_out_time1), size=10000)
m_u0 = numpy.zeros((4,10000))
m_k1V0 = numpy.zeros((5,10000))
m_u0 = numpy.array([[numpy.multiply(m_out_time2[ind], 1/m_out_time1[ind])], [numpy.multiply(m_out_time3[ind], 1/m_out_time1[ind])], [numpy.multiply(m_in_time2[ind], 1/m_in_time1[ind])], [numpy.multiply(m_in_time3[ind], 1/m_in_time1[ind])]])
m_k1V0 = numpy.array([[numpy.ones((10000,))], [m_out_time2[ind]], [m_out_time3[ind]], [m_in_time2[ind]], [m_in_time3[ind]]])
w_vec = numpy.array([u0bar[ind], k1bar[ind]])
print(m_u0)
print(m_k1V0)
print(w_vec)
m_u0 = numpy.reshape(m_u0, (4,10000))
m_k1V0 = numpy.reshape(m_k1V0, (5,10000))
print(numpy.shape(m_k1V0))
Tu0 = numpy.dot(u0bar[ind], numpy.linalg.pinv(m_u0))
print(Tu0)
Tk1V0 = numpy.dot(k1bar[ind], numpy.linalg.pinv(m_k1V0))
print(Tk1V0)
m_prova = m_u0
m_prova = numpy.vstack((m_prova, m_k1V0))
m_prova = numpy.reshape(m_prova, (9,10000))
print(m_prova)
T = numpy.zeros([2,9])
T[0,0:4] = Tu0
T[1,4:9] = Tk1V0
print(T)
w_prova = numpy.dot(T, m_prova)
print(numpy.shape(w_prova))
print(w_vec)
print(w_prova[0,:]-u0bar[ind])
CWIND = numpy.multiply(w_prova[0,:], wr[ind])*R
CREAL_ind = u0_int(time)
CREAL = CREAL_ind[ind]
print(numpy.mean(numpy.abs(CWIND-CREAL)))
timep = time[ind]
i1 = numpy.argsort(timep)
plt.plot(timep[i1], CWIND[i1],'b', timep[i1], CREAL[i1], 'r')
plt.title("RESULTS")
plt.show()
print(numpy.shape(CREAL[i1]))
T_tocsv = numpy.hstack((numpy.array([[V0], [V0]]), T))
dataset = | pd.DataFrame(data=T_tocsv) | pandas.DataFrame |
import pandas as pd
import tensorflow as tf
from matplotlib import pyplot as plt
import os
from ..utils.argo_utils import create_reset_metric
from .LoggingMeanTensorsHook import evaluate_means_over_dataset
import itertools
class LoggerHelperMultiDS:
def __init__(self, path, loggername, tensors_names, tensor_nodes,
ds_handle, datasets_initializers, datasets_handles, datasets_eval_names,
erase_old=True):
self._filename = os.path.join(path, loggername+".txt")
self._plotfilename = os.path.join(path, loggername)
reference_names = ["epoch", "dataset"]
self._monitor_tensors_values, self._monitor_tensors_updates, self._monitor_tensors_reset = self._get_mean_ops(tensor_nodes)
self._pd_csv_kwargs = {
"sep" : "\t"
}
if (not erase_old) and os.path.exists(self._filename) and os.path.getsize(self._filename) > 0:
self._df = | pd.read_csv(self._filename, **self._pd_csv_kwargs) | pandas.read_csv |
from __future__ import absolute_import
from __future__ import print_function
import sys
import glob
import time
import numpy as np
import pandas as pd
import os.path
import time
import datetime
import re
from keras.preprocessing import sequence
from keras.optimizers import SGD, RMSprop, Adagrad
from keras.utils import np_utils
from keras.models import Sequential, Graph, Model
from keras.models import model_from_json
from keras.layers import Input, merge, Flatten, Dense, Activation, Convolution1D, ZeroPadding1D
#from keras.layers.core import Dense, Dropout, Activation, TimeDistributedDense, Flatten, Reshape, Permute, Merge, Lambda
#from keras.layers.convolutional import Convolution1D, MaxPooling1D, Convolution2D, MaxPooling2D, UpSampling1D, UpSampling2D, ZeroPadding1D
from keras.layers.advanced_activations import ParametricSoftplus, SReLU
from keras.callbacks import ModelCheckpoint, Callback
import matplotlib.pyplot as plt
path = "./training_data_large/" # to make sure signal files are written in same directory as data files
def draw_model(model):
from IPython.display import SVG
from keras.utils.visualize_util import model_to_dot
from keras.utils.visualize_util import plot
#graph = to_graph(model, show_shape=True)
#graph.write_png("UFCNN_1.png")
SVG(model_to_dot(model).create(prog='dot', format='svg'))
plot(model, to_file='UFCNN_1.png')
def print_nodes_shapes(model):
for k, v in model.inputs.items():
print("{} : {} : {} : {}".format(k, type(v), v.input_shape, v.output_shape))
for k, v in model.nodes.items():
print("{} : {} : {} : {}".format(k, type(v), v.input_shape, v.output_shape))
for k, v in model.outputs.items():
print("{} : {} : {} : {}".format(k, type(v), v.input_shape, v.output_shape))
def print_layers_shapes(model):
for l in model.layers:
print("{} : {} : {}".format(type(l), l.input_shape, l.output_shape))
def save_neuralnet (model, model_name):
json_string = model.to_json()
open(path + model_name + '_architecture.json', 'w').write(json_string)
model.save_weights(path + model_name + '_weights.h5', overwrite=True)
yaml_string = model.to_yaml()
with open(path + model_name + '_data.yml', 'w') as outfile:
outfile.write( yaml_string)
def load_neuralnet(model_name):
"""
reading the model from disk - including all the trained weights and the complete model design (hyperparams, planes,..)
"""
arch_name = path + model_name + '_architecture.json'
weight_name = path + model_name + '_weights.h5'
if not os.path.isfile(arch_name) or not os.path.isfile(weight_name):
print("model_name given and file %s and/or %s not existing. Aborting." % (arch_name, weight_name))
sys.exit()
print("Loaded model: ",model_name)
model = model_from_json(open(arch_name).read())
model.load_weights(weight_name)
return model
def ufcnn_model_concat(sequence_length=5000,
features=1,
nb_filter=150,
filter_length=5,
output_dim=1,
optimizer='adagrad',
loss='mse',
regression = True,
class_mode=None,
activation="softplus",
init="lecun_uniform"):
#model = Graph()
#model.add_input(name='input', input_shape=(None, features))
main_input = Input(name='input', shape=(None, features))
#########################################################
#model.add_node(ZeroPadding1D(2), name='input_padding', input='input') # to avoid lookahead bias
input_padding = (ZeroPadding1D(2))(main_input) # to avoid lookahead bias
#########################################################
#model.add_node(Convolution1D(nb_filter=nb_filter, filter_length=filter_length, border_mode='valid', init=init, input_shape=(sequence_length, features)), name='conv1', input='input_padding')
#model.add_node(Activation(activation), name='relu1', input='conv1')
conv1 = Convolution1D(nb_filter=nb_filter, filter_length=filter_length, border_mode='valid', init=init, input_shape=(sequence_length, features))(input_padding)
relu1 = (Activation(activation))(conv1)
#########################################################
#model.add_node(Convolution1D(nb_filter=nb_filter, filter_length=filter_length, border_mode='same', init=init), name='conv2', input='relu1')
#model.add_node(Activation(activation), name='relu2', input='conv2')
conv2 = (Convolution1D(nb_filter=nb_filter, filter_length=filter_length, border_mode='same', init=init))(relu1)
relu2 = (Activation(activation))(conv2)
#########################################################
#model.add_node(Convolution1D(nb_filter=nb_filter, filter_length=filter_length, border_mode='same', init=init), name='conv3', input='relu2')
#model.add_node(Activation(activation), name='relu3', input='conv3')
conv3 = (Convolution1D(nb_filter=nb_filter, filter_length=filter_length, border_mode='same', init=init))(relu2)
relu3 = (Activation(activation))(conv3)
#########################################################
#model.add_node(Convolution1D(nb_filter=nb_filter, filter_length=filter_length, border_mode='same', init=init), name='conv4', input='relu3')
#model.add_node(Activation(activation), name='relu4', input='conv4')
conv4 = (Convolution1D(nb_filter=nb_filter, filter_length=filter_length, border_mode='same', init=init))(relu3)
relu4 = (Activation(activation))(conv4)
#########################################################
#model.add_node(Convolution1D(nb_filter=nb_filter, filter_length=filter_length, border_mode='same', init=init), name='conv5', input='relu4')
#model.add_node(Activation(activation), name='relu5', input='conv5')
conv5 = (Convolution1D(nb_filter=nb_filter, filter_length=filter_length, border_mode='same', init=init))(relu4)
relu5 = (Activation(activation))(conv5)
#########################################################
#model.add_node(Convolution1D(nb_filter=nb_filter,filter_length=filter_length, border_mode='same', init=init),
# name='conv6',
# inputs=['relu3', 'relu5'],
# merge_mode='concat', concat_axis=-1)
#model.add_node(Activation(activation), name='relu6', input='conv6')
conv6 = merge([relu3, relu5], mode='concat', concat_axis=1)
relu6 = (Activation(activation))(conv6)
#########################################################
#model.add_node(Convolution1D(nb_filter=nb_filter,filter_length=filter_length, border_mode='same', init=init),
# name='conv7',
# inputs=['relu2', 'relu6'],
# merge_mode='concat', concat_axis=-1)
#model.add_node(Activation(activation), name='relu7', input='conv7')
conv7 = merge([relu2, relu6], mode='concat', concat_axis=1)
relu7 = (Activation(activation))(conv7)
#########################################################
#model.add_node(Convolution1D(nb_filter=nb_filter,filter_length=filter_length, border_mode='same', init=init),
# name='conv8',
# inputs=['relu1', 'relu7'],
# merge_mode='concat', concat_axis=-1)
#model.add_node(Activation(activation), name='relu8', input='conv8')
conv8 = merge([relu1, relu7], mode='concat', concat_axis=1)
relu8 = (Activation(activation))(conv8)
#########################################################
if regression:
#########################################################
#model.add_node(Convolution1D(nb_filter=output_dim, filter_length=sequence_length, border_mode='same', init=init), name='conv9', input='relu8')
#model.add_output(name='output', input='conv9')
conv9 = Convolution1D(nb_filter=output_dim, filter_length=sequence_length, border_mode='same', init=init)(relu8)
output = conv9
#main_output = conv9.output
else:
#model.add_node(Convolution1D(nb_filter=output_dim, filter_length=sequence_length, border_mode='same', init=init), name='conv9', input='relu8')
#model.add_node(Activation('softmax'), name='activation', input='conv9')
#model.add_output(name='output', input='activation')
conv9 = Convolution1D(nb_filter=output_dim, filter_length=sequence_length, border_mode='same', init=init)(relu8)
activation = (Activation('softmax'))(conv9)
#main_output = activation.output
output = activation
#model.compile(optimizer=optimizer, loss={'output': loss})
model = Model(input=main_input, output=output)
model.compile(optimizer=optimizer, loss=loss)
return model
def ufcnn_model_deconv(sequence_length=5000,
features=4,
nb_filter=150,
filter_length=5,
output_dim=1,
optimizer='adagrad',
loss='mse',
regression = False,
class_mode=None,
activation="softplus",
init="lecun_uniform"):
#model = Graph()
#model.add_input(name='input', input_shape=(None, features))
main_input = Input(name='input', shape=(None, features))
#########################################################
#model.add_node(ZeroPadding1D(2), name='input_padding', input='input') # to avoid lookahead bias
input_padding = (ZeroPadding1D(2))(main_input) # to avoid lookahead bias
#########################################################
#model.add_node(Convolution1D(nb_filter=nb_filter, filter_length=filter_length, border_mode='valid', init=init, input_shape=(sequence_length, features)), name='conv1', input='input_padding')
#model.add_node(Activation(activation), name='relu1', input='conv1')
conv1 = Convolution1D(nb_filter=nb_filter, filter_length=filter_length, border_mode='valid', init=init, input_shape=(sequence_length, features))(input_padding)
relu1 = (Activation(activation))(conv1)
#########################################################
#model.add_node(Convolution1D(nb_filter=nb_filter, filter_length=filter_length, border_mode='same', init=init), name='conv2', input='relu1')
#model.add_node(Activation(activation), name='relu2', input='conv2')
conv2 = (Convolution1D(nb_filter=nb_filter, filter_length=filter_length, border_mode='same', init=init))(relu1)
relu2 = (Activation(activation))(conv2)
#########################################################
#model.add_node(Convolution1D(nb_filter=nb_filter, filter_length=filter_length, border_mode='same', init=init), name='conv3', input='relu2')
#model.add_node(Activation(activation), name='relu3', input='conv3')
conv3 = (Convolution1D(nb_filter=nb_filter, filter_length=filter_length, border_mode='same', init=init))(relu2)
relu3 = (Activation(activation))(conv3)
#########################################################
#model.add_node(Convolution1D(nb_filter=nb_filter, filter_length=filter_length, border_mode='same', init=init), name='conv4', input='relu3')
#model.add_node(Activation(activation), name='relu4', input='conv4')
conv4 = (Convolution1D(nb_filter=nb_filter, filter_length=filter_length, border_mode='same', init=init))(relu3)
relu4 = (Activation(activation))(conv4)
#########################################################
#model.add_node(Convolution1D(nb_filter=nb_filter, filter_length=filter_length, border_mode='same', init=init), name='conv5', input='relu4')
#model.add_node(Activation(activation), name='relu5', input='conv5')
conv5 = (Convolution1D(nb_filter=nb_filter, filter_length=filter_length, border_mode='same', init=init))(relu4)
relu5 = (Activation(activation))(conv5)
#########################################################
#model.add_node(Convolution1D(nb_filter=nb_filter,filter_length=filter_length, border_mode='same', init=init),
# name='conv6',
# inputs=['relu3', 'relu5'],
# merge_mode='concat', concat_axis=-1)
#model.add_node(Activation(activation), name='relu6', input='conv6')
conv6 = merge([relu3, relu5], mode='concat', concat_axis=1)
relu6 = (Activation(activation))(conv6)
#########################################################
#model.add_node(Convolution1D(nb_filter=nb_filter,filter_length=filter_length, border_mode='same', init=init),
# name='conv7',
# inputs=['relu2', 'relu6'],
# merge_mode='concat', concat_axis=-1)
#model.add_node(Activation(activation), name='relu7', input='conv7')
conv7 = merge([relu2, relu6], mode='concat', concat_axis=1)
relu7 = (Activation(activation))(conv7)
#########################################################
#model.add_node(Convolution1D(nb_filter=nb_filter,filter_length=filter_length, border_mode='same', init=init),
# name='conv8',
# inputs=['relu1', 'relu7'],
# merge_mode='concat', concat_axis=-1)
#model.add_node(Activation(activation), name='relu8', input='conv8')
conv8 = merge([relu1, relu7], mode='concat', concat_axis=1)
relu8 = (Activation(activation))(conv8)
#########################################################
if regression:
#########################################################
#model.add_node(Convolution1D(nb_filter=output_dim, filter_length=sequence_length, border_mode='same', init=init), name='conv9', input='relu8')
#model.add_output(name='output', input='conv9')
conv9 = Convolution1D(nb_filter=output_dim, filter_length=sequence_length, border_mode='same', init=init)(relu8)
output = conv9
#main_output = conv9.output
else:
#model.add_node(Convolution1D(nb_filter=output_dim, filter_length=sequence_length, border_mode='same', init=init), name='conv9', input='relu8')
#model.add_node(Activation('softmax'), name='activation', input='conv9')
#model.add_output(name='output', input='activation')
conv9 = Convolution1D(nb_filter=output_dim, filter_length=sequence_length, border_mode='same', init=init)(relu8)
activation = (Activation('softmax'))(conv9)
#main_output = activation.output
output = activation
#model.compile(optimizer=optimizer, loss={'output': loss})
model = Model(input=main_input, output=output)
model.compile(optimizer=optimizer, loss=loss)
return model
def ufcnn_model_seq(sequence_length=5000,
features=1,
nb_filter=150,
filter_length=5,
output_dim=1,
optimizer='adagrad',
loss='mse',
regression = True,
class_mode=None,
init="lecun_uniform"):
model = Sequential()
model.add(ZeroPadding1D(2, input_shape=(None, features)))
#########################################################
model.add(Convolution1D(nb_filter=nb_filter, filter_length=filter_length, border_mode='valid', init=init))
model.add(Activation('relu'))
model.add(Convolution1D(nb_filter=output_dim, filter_length=sequence_length, border_mode='same', init=init))
model.add(Activation('sigmoid'))
model.compile(optimizer=optimizer, loss=loss)
return model
def ufcnn_model(sequence_length=5000,
features=1,
nb_filter=150,
filter_length=5,
output_dim=1,
optimizer='adagrad',
loss='mse',
regression = True,
class_mode=None,
init="lecun_uniform",
mode='concat'):
if mode == 'concat':
return ufcnn_model_concat(sequence_length,
features,
nb_filter,
filter_length,
output_dim,
optimizer,
loss,
regression,
class_mode,
init)
else:
raise NotImplemented
def gen_cosine_amp(amp=100, period=25, x0=0, xn=50000, step=1, k=0.0001):
"""Generates an absolute cosine time series with the amplitude
exponentially decreasing
Arguments:
amp: amplitude of the cosine function
period: period of the cosine function
x0: initial x of the time series
xn: final x of the time series
step: step of the time series discretization
k: exponential rate
Ernst 20160301 from https://github.com/fchollet/keras/blob/master/examples/stateful_lstm.py
as a first test for the ufcnn
"""
cos = np.zeros(((xn - x0) * step, 1, 1))
print("Cos. Shape",cos.shape)
for i in range(len(cos)):
idx = x0 + i * step
cos[i, 0, 0] = amp * np.cos(idx / (2 * np.pi * period))
cos[i, 0, 0] = cos[i, 0, 0] * np.exp(-k * idx)
return cos
def train_and_predict_regression(model, sequence_length=5000, batch_size=128, epochs=5):
lahead = 1
cos = gen_cosine_amp(xn = sequence_length * 100)
expected_output = np.zeros((len(cos), 1, 1))
for i in range(len(cos) - lahead):
expected_output[i, 0] = np.mean(cos[i + 1:i + lahead + 1])
print('Training')
for i in range(epochs):
print('Epoch', i, '/', epochs)
model.fit({'input': cos, 'output': expected_output},
verbose=1,
nb_epoch=1,
shuffle=False,
batch_size=batch_size)
print('Predicting')
predicted_output = model.predict({'input': cos,}, batch_size=batch_size)
return {'model': model, 'predicted_output': predicted_output, 'expected_output': expected_output}
def treat_X_tradcom(mean):
""" treat some columns of the dataframe together when normalizing the dataframe:
col. 1, 2, 4 ... Mkt Price, Bid price, Ask Price
col 3 and 5 ... Ask & Bid price
"""
result = mean.copy()
#print("Result before max",result)
mkt = mean[1]
bid_px = mean[2]
ask_px = mean[4]
px_max=max(mkt,bid_px,ask_px)
result[1] = px_max
result[2] = px_max
result[4] = px_max
bid = mean[3]
ask = mean[5]
ba_max=max(bid,ask)
result[3] = ba_max
result[5] = ba_max
print("Result after max",result)
return result
def standardize_inputs(source, colgroups=None, mean=None, std=None):
"""
Standardize input features.
Groups of features could be listed in order to be standardized together.
source: Pandas.DataFrame or filename of csv file with features
colgroups: list of lists of groups of features to be standardized together (e.g. bid/ask price, bid/ask size)
returns Xdf ...Pandas.DataFrame, mean ...Pandas.DataFrame, std ...Pandas.DataFrame
"""
import itertools
import types
#if isinstance(source, types.StringTypes):
if isinstance(source, str):
Xdf = pd.read_csv(source, sep=" ", index_col = 0, header = None)
elif isinstance(source, pd.DataFrame):
Xdf = source
else:
raise TypeError
df = pd.DataFrame()
me = pd.DataFrame()
st = pd.DataFrame()
for colgroup in colgroups:
_df,_me,_st = standardize_columns(Xdf[colgroup])
# if mean & std are given, do not multiply with colgroup mean
if mean is not None and std is not None:
_df = Xdf[colgroup]
df = pd.concat([df, _df], axis=1)
me = pd.concat([me, _me])
st = pd.concat([st, _st])
print("In Group me")
print(me)
# _temp_list = list(itertools.chain.from_iterable(colgroups))
separate_features = [col for col in Xdf.columns if col not in list(itertools.chain.from_iterable(colgroups))]
if mean is None and std is None:
_me = Xdf[separate_features].mean()
_df = Xdf[separate_features].sub(_me)
_st = Xdf[separate_features].std()
_df = _df[separate_features].div(_st)
else:
_df = Xdf[separate_features]
df = pd.concat([df, _df], axis=1)
me = pd.concat([me, _me])
st = pd.concat([st, _st])
me = | pd.Series(me[0]) | pandas.Series |
# @Author: <NAME><Nareshvrao>
# @Date: 2020-12-22, 12:44:08
# @Last modified by: Naresh
# @Last modified time: 2019-12-22, 1:13:26
import warnings
warnings.filterwarnings("ignore")
from pathlib import Path
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import tqdm
from util.utils import *
def compute_detail_score(df, dice):
res = []
res.append(df[dice].mean())
#c1 -> c4 dice
for label in ['Fish', 'Flower', 'Gravel', 'Sugar']:
df_tmp = df[df['cls'] == label]
res.append(df_tmp[dice].mean())
# neg & pos dice
res.append(df[df['truth'] == ''][dice].mean())
res.append(df[df['truth'] != ''][dice].mean())
# c1 -> c4 pos
for label in ['Fish', 'Flower', 'Gravel', 'Sugar']:
df_tmp = df[df['cls'] == label]
res.append(df_tmp[df_tmp['truth'] != ''][dice].mean())
return res
def ensemble_rles(rles1, rles2, mode='intersect'):
res = []
for rle1, rle2 in tqdm.tqdm(zip(rles1, rles2)):
m1 = rle2mask(rle1, height=350, width=525, fill_value=1)
m2 = rle2mask(rle2, height=350, width=525, fill_value=1)
if mode == 'intersect':
mask = ((m1+m2) == 2).astype(int)
elif mode == 'union':
mask = ((m1+m2) > 0).astype(int)
else:
RuntimeError('%s not implemented.'%mode)
rle = mask2rle(mask)
res.append(rle)
return res
def load_stacking(seg_name, tta, ts=0.5):
df_seg_val = pd.read_csv('../output/'+seg_name+'/valid_5fold_tta%d.csv'%tta)
df_seg_test = pd.read_csv('../output/'+seg_name+'/test_5fold_tta%d.csv'%tta)
df_seg_val['s1'], df_seg_test['s1'] = np.nan, np.nan
df_seg_val['s1'].loc[df_seg_val.pred >= ts] = '1 1'
df_seg_test['s1'].loc[df_seg_test.pred >= ts] = '1 1'
return df_seg_val[['Image_Label', 's1']], df_seg_test[['Image_Label', 's1']]
def load_seg_pred(seg_name, name, tta):
#load val
df_val = []
try:
for fold in range(5):
if tta <= 1:
df_val.append(pd.read_csv('../output/'+ seg_name + '/' + 'valid_fold%d.csv'%fold))
else:
df_val.append(pd.read_csv('../output/'+ seg_name + '/' + 'valid_fold%d_tta%d.csv'%(fold, tta)))
df_val = pd.concat(df_val)
except:
df_val = pd.read_csv('../output/'+ seg_name + '/' + 'valid_5fold_tta%d.csv'%(tta))
df_val = df_val[['Image_Label', 'EncodedPixels']]
#df_val.rename(columns={'s3': 'EncodedPixels'}, inplace=True)
df_test = pd.read_csv('../output/'+ seg_name + '/' + 'test_5fold_tta%d.csv'%tta)
df_val.rename(columns={'EncodedPixels': name}, inplace=True)
df_test.rename(columns={'EncodedPixels': name}, inplace=True)
return df_val, df_test
def load_seg_cls_pred(seg_name, name, tta, ts):
#load val
df_val = []
try:
for fold in range(5):
if tta <= 1:
df_val.append(pd.read_csv('../output/'+ seg_name + '/' + 'valid_cls_fold%d.csv'%fold))
else:
df_val.append(pd.read_csv('../output/'+ seg_name + '/' + 'valid_cls_fold%d_tta%d.csv'%(fold, tta)))
df_val = pd.concat(df_val)
except:
df_val = pd.read_csv('../output/'+ seg_name + '/' + 'valid_5fold_tta%d.csv'%(tta))
df_val = df_val[['Image_Label', 'EncodedPixels']]
#df_val.rename(columns={'s3': 'EncodedPixels'}, inplace=True)
df_test = pd.read_csv('../output/'+ seg_name + '/' + 'test_cls_5fold_tta%d.csv'%tta)
df_val['EncodedPixels'] = '1 1'
df_val['EncodedPixels'].loc[df_val['0'] < ts] = np.nan
df_test['EncodedPixels'] = '1 1'
df_test['EncodedPixels'].loc[df_test['0'] < ts] = np.nan
df_val.rename(columns={'EncodedPixels': name}, inplace=True)
df_test.rename(columns={'EncodedPixels': name}, inplace=True)
return df_val, df_test
def load_classifier(classifier, tta):
try:
df_cls_val = []
df_cls_test = []
for fold in range(5):
if tta <= 1:
df_cls_val.append(pd.read_csv('../output/'+ classifier + '/' + 'valid_cls_fold%d.csv'%fold))
df_cls_test.append(pd.read_csv('../output/'+ classifier + '/' + 'test_cls_fold%d.csv'%fold))
else:
df_cls_val.append(pd.read_csv('../output/'+ classifier + '/' + 'valid_cls_fold%d_tta%d.csv'%(fold, tta)))
df_cls_test.append(pd.read_csv('../output/'+ classifier + '/' + 'test_cls_fold%d_tta%d.csv'%(fold, tta)))
df_cls_val = | pd.concat(df_cls_val) | pandas.concat |
# -*- coding: utf-8 -*-
import os
from typing import IO
import pandas as pd
from PySDDP.dessem.script.templates.cadterm import CadTermTemplate
COMENTARIO = '&'
class CadTerm(CadTermTemplate):
"""
Classe que contem todos os elementos comuns a qualquer versao do arquivo CadTerm do Dessem.
Esta classe tem como intuito fornecer duck typing para a classe Dessem e ainda adicionar um nivel de especificacao
dentro da fabrica. Alem disso esta classe deve passar adiante a responsabilidade da implementacao dos metodos de
leitura e escrita
"""
def __init__(self):
super().__init__()
self.cadusit = dict()
self.cadunidt = dict()
self.cadconf = dict()
self.cadmin = dict()
self.cadusit_df: pd.DataFrame()
self.cadunidt_df: pd.DataFrame()
self.cadconf_df: pd.DataFrame()
self.cadmin_df: pd.DataFrame()
self.termo = None
self._comentarios_ = None
def ler(self, file_name: str) -> None:
"""
Metodo para leitura do arquivo de cadastro das usinas termoeletricas
Manual do Usuario III.2 Arquivo contendo informações sobre os dados fisicos das usinas termoeletricas (TERM.DAT).
Este arquivo é composto por dois tipos de registros: o primeiro contém informações sobre o início de
comissionamento e número de unidades de cada usina termoelétrica, enquanto o segundo tipo de registro fornece as
características físicas de cada unidade geradora das usinas.
:param file_name: string com o caminho completo para o arquivo
:return:
"""
dir_base = os.path.split(file_name)[0]
# Listas referentes a CADUSIT
self.cadusit['mneumo'] = list()
self.cadusit['num_usi'] = list()
self.cadusit['nome'] = list()
self.cadusit['num_subsistema'] = list()
self.cadusit['ano'] = list()
self.cadusit['mes'] = list()
self.cadusit['di'] = list()
self.cadusit['hr'] = list()
self.cadusit['m'] = list()
self.cadusit['num_ger'] = list()
# Listas referentes a CADUNIDT
self.cadunidt['mneumo'] = list()
self.cadunidt['num_usi'] = list()
self.cadunidt['ind_ger'] = list()
self.cadunidt['ano'] = list()
self.cadunidt['mes'] = list()
self.cadunidt['di'] = list()
self.cadunidt['hr'] = list()
self.cadunidt['m'] = list()
self.cadunidt['pot'] = list()
self.cadunidt['ger_min'] = list()
self.cadunidt['temp_on'] = list()
self.cadunidt['temp_off'] = list()
self.cadunidt['custo_frio'] = list()
self.cadunidt['custo_desl'] = list()
self.cadunidt['ramp_tom'] = list()
self.cadunidt['ramp_alivio'] = list()
self.cadunidt['flag_rest'] = list()
self.cadunidt['num_oscilacao'] = list()
self.cadunidt['flag_equiv'] = list()
self.cadunidt['ramp_trans'] = list()
# Listas referentes a CADCONF
self.cadconf['mneumo'] = list()
self.cadconf['num_usi'] = list()
self.cadconf['ind_equi'] = list()
self.cadconf['ind_ger'] = list()
# Listas referentes a CADMIN
self.cadmin['mneumo'] = list()
self.cadmin['num_usi'] = list()
self.cadmin['ind_equi'] = list()
self.cadmin['ind_ger'] = list()
self.termo = list()
self._comentarios_ = list()
# noinspection PyBroadException
try:
with open(file_name, 'r', encoding='latin-1') as f: # type: IO[str]
# Seguir o manual do usuario
continua = True
while continua:
self.next_line(f)
linha = self.linha.strip()
# Se a linha for comentario não faço nada e pulo pra proxima linha
if linha[0] == COMENTARIO:
self._comentarios_.append(linha)
self.termo.append(linha)
continue
mneumo = linha[:8].strip().lower()
self.termo.append(linha[:8])
# Leitura dos dados de acordo com o mneumo correspondente
if mneumo == 'cadusit':
self.cadusit['mneumo'].append(self.linha[:7])
self.cadusit['num_usi'].append(self.linha[8:11])
self.cadusit['nome'].append(self.linha[12:24])
self.cadusit['num_subsistema'].append(self.linha[25:27])
self.cadusit['ano'].append(self.linha[28:32])
self.cadusit['mes'].append(self.linha[33:35])
self.cadusit['di'].append(self.linha[36:38])
self.cadusit['hr'].append(self.linha[39:41])
self.cadusit['m'].append(self.linha[42:43])
self.cadusit['num_ger'].append(self.linha[45:48])
self.dados['cadusit']['valores'] = self.cadusit
self.cadusit_df = | pd.DataFrame(self.cadusit) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
script used for building forecast model
"""
# =============================================================================
# IMPORTS
# =============================================================================
import pandas as pd
import numpy as np
import datetime as dt
import matplotlib.pyplot as plt
import time
from sklearn.preprocessing import StandardScaler, MinMaxScaler#, Normalizer
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import InputLayer, LSTM, GRU, Dense, Dropout, LayerNormalization, Bidirectional #BatchNormalization - NO
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint
from tensorflow.keras.preprocessing.sequence import TimeseriesGenerator
# =============================================================================
# FUNCTIONS
# =============================================================================
# =============================================================================
# EXECUTE
# =============================================================================
#import data
df = pd.read_csv('Data/weather_data.csv')
#get temp and time
df['datetime'] = pd.to_datetime(df['datetime'], format='%d/%m/%Y')
df = df.set_index('datetime')
temp = df['temp'].iloc[:-7]
test_data = df['temp'].iloc[-7:]
# =============================================================================
#
# #split data (one year for validation, one week for test)
# train = temp.iloc[:-450]
# validation = temp.iloc[-450:-7]
# test = temp.iloc[-7:]
#
# #scale data
# scaler = MinMaxScaler()
# scaler.fit(train.values.reshape(-1,1))
# train_scaled = scaler.transform(train.values.reshape(-1,1))
# validation_scaled = scaler.transform(validation.values.reshape(-1,1))
# test_scaled = scaler.transform(test.values.reshape(-1,1))
#
# #make model
# length = 30
# n_features = 1
#
# model = Sequential()
# model.add(LSTM(units=100, activation='tanh', input_shape=(length, n_features), dropout=0, recurrent_dropout=0))#, stateful=True, batch_input_shape=(1, 30, 1)))
# #model.add(Bidirectional(LSTM(units=100, activation='tanh', input_shape=(length, n_features), dropout=0, recurrent_dropout=0)))
# #model.add(LayerNormalization())
# model.add(Dense(1))
#
# #print(model.summary())
#
# #data generator
# generator = TimeseriesGenerator(data=train_scaled, targets=train_scaled, length=length, batch_size=1)
# val_generator = TimeseriesGenerator(data=validation_scaled, targets=validation_scaled, length=length, batch_size=1)
#
# #callbacks
# early_stop = EarlyStopping(monitor='val_loss', patience=5)
#
# #compile and fit data
# model.compile(optimizer='adam', loss='mse', metrics=['mae'])
# model.fit(generator, validation_data=val_generator, epochs=8, callbacks=[early_stop])
#
# #evaluate
# model.evaluate(val_generator)
#
# =============================================================================
#predict
#model.predict(train_scaled[:7].reshape(-1,7,1))
#model.predict(train_scaled[:11].reshape(-1,11,1))
#model class for training
class BuildModel():
"""
Build a model. Arguments allow one to customise the hyper parameters
ATTRIBUTES :-
length - number of steps in time sequence to feed the rnn
layers_num - number of rnn layers in model (capped at 3)
layers_type - select "LSTM" or "GRU"
units - number of units in rnn layers
num_step_preds - number of steps/days in time to predict
dropout - dropout % to be applied to rnn units
batch_size - number of samples to feed model at a time.
patience - how many epochs to wait before stopping model after finding good score.
model_name - file name of model we save. must end in ".h5" eg 'temp_model.h5'
"""
def __init__(self, model_name, length=10, layers_num=1, layers_type='LSTM',\
units=50, num_step_preds=1, dropout=0.0, epochs=8,\
batch_size=1, patience=5):
#assertions for input
assert 0 < layers_num < 4, "1 <= layers_num <= 3"
assert layers_type in ['LSTM', 'GRU'], "layers_type is LSTM or GRU"
assert 0 <= dropout < 1, "dropout must be float < 1"
assert model_name[-3:] == '.h5', "End model_name with '.h5'"
#initialise
self.length = length
self.layers_num = layers_num
self.layers_type = layers_type
self.units = units
self.num_step_preds = num_step_preds
self.dropout = dropout
self.epochs = epochs
self.batch_size = batch_size
self.model_name = model_name
self.n_features = 1
#callbacks
self.callbacks =[EarlyStopping(monitor='val_loss', patience=patience),\
ModelCheckpoint(self.model_name, monitor='val_loss',\
save_best_only=True)]
#BUILD MODEL
##inputs
self.model = Sequential()
self.model.add(InputLayer(input_shape=(self.length, self.n_features)))
##add extra layers as required (or not if layers_num = 1)
for i in range(layers_num - 1):
self.model.add(eval('{}(units={}, dropout={}, return_sequences=True)'\
.format(self.layers_type, self.units, self.dropout)))
##closing rnn layer (do not return squences)
self.model.add(eval('{}(units={}, dropout={})'\
.format(self.layers_type, self.units, self.dropout)))
##Dense output
self.model.add(Dense(units=self.num_step_preds))
#compile model
self.model.compile(optimizer='adam', loss='mse', metrics=['mae'])
def setupData(self, series, val_days=450):
"""
splits data, scales data, creates generators for the model
"""
assert val_days > self.length , "val_days must exceed length"
#split data into train and validation
self.train = series.iloc[:-val_days]
self.validation = series.iloc[-val_days:]
#scale data for neural network suitability
self.scaler = MinMaxScaler()
self.scaler.fit(self.train.values.reshape(-1,1))
self.train_scaled = \
self.scaler.transform(self.train.values.reshape(-1,1))
self.validation_scaled = \
self.scaler.transform(self.validation.values.reshape(-1,1))
#create time series generators
self.generator = \
TimeseriesGenerator(data=self.train_scaled,\
targets=self.train_scaled,\
length=self.length,\
batch_size=self.batch_size)
self.val_generator = \
TimeseriesGenerator(data=self.validation_scaled,\
targets=self.validation_scaled,\
length=self.length,\
batch_size=self.batch_size)
def fitModel(self):
"""
Fits the model on your generators for training and validation sets.
EarlyStopping call back ends training if val_loss doesnt improve.
Record epoch metrics in a DataFrame.
"""
self.model.fit(self.generator, validation_data=self.val_generator,\
epochs=self.epochs, callbacks=self.callbacks)
self.history = pd.DataFrame(self.model.history.history)
def loadModel(self):
"""
Load a model instead of fitting a new one (uses model_name)
"""
self.model = tf.keras.models.load_model(self.model_name)
def predAhead(self, days, series=None):
"""
Predicts a number of days ahead set by the user. Input your own
series or dont if you want to predict off of the validation set.
"""
assert self.num_step_preds == 1,\
"sorry, function not yet available for multi step models"
#use end of the validation set to project forward if no series given
if series == None:
series = self.validation
#get end of the series to plug into the model
assert len(series) >= self.length,\
"series must be at least {} days".format(self.length)
series_cut = series.iloc[-self.length:]
#scale inputs to what model is expecting
series_scaled = \
self.scaler.transform(series_cut.values.reshape(-1,1))
#predict ahead by appending predictions and removing first values
pred_series = series_scaled.reshape(1, self.length, self.n_features)
predictions = []
for i in range(days):
pred = self.model.predict(pred_series)
pred_series = np.append(pred_series[:,1:,:], [pred], axis=1)
predictions.append(pred)
#inverse scale back to original units
predictions = np.array(predictions)
predictions = self.scaler.inverse_transform(\
predictions.reshape(days, self.n_features))\
.round(1)
#convert to pandas series
predictions = pd.Series(predictions.reshape(days))
predictions.index = self.validation.index[-days:] +\
dt.timedelta(days=days)
return predictions
def plotPreds(self, predictions, test_series=None, run_up=None,\
ylabel='units'):
"""
plot the predictions of the model. plot them against another series
(test series). plot with with a run up leading to the pred period
(validation set).
"""
#set up figure
plt.figure(figsize=(10,6))
plt.ylabel(ylabel)
plt.xlabel('datetime')
#plot lines
if run_up is None:
run_up = self.validation[-7:]
if test_series is not None:
plt.plot(pd.concat([run_up, test_series[:1]]))
plt.plot(test_series)
else:
plt.plot(run_up)
#plot points
plt.scatter(predictions.index, predictions, edgecolors='k',\
label='predictions', c='#2ca02c', s=64)
if test_series is not None:
plt.scatter(test_series.index, test_series, marker='X',\
edgecolors='k', label='test_data', c='#ff7f0e', s=200)
plt.legend()
# =============================================================================
# test = BuildModel(model_name='test.h5', length=1, units=10, epochs=2)
# test.setupData(temp)
# test.fitModel()
#
# #print(test.model.history.history)
# predictions = test.predAhead(7)
# test.plotPreds(predictions, test_data, ylabel='tempC')
#
# =============================================================================
# =============================================================================
# #plotting
# plt.figure(figsize=(10,6))
# plt.ylabel('temp')
# plt.xlabel('datetime')
# plt.plot(pd.concat([temp[-7:], test_data[:1]]))
# plt.plot(test_data)
# plt.scatter(predictions.index, predictions, edgecolors='k', label='predictions', c='#2ca02c', s=64)
# plt.scatter(test_data.index, test_data, marker='X', edgecolors='k', label='test_data',
# c='#ff7f0e', s=200)
# plt.legend()
# =============================================================================
def gridTableGen(length: list, layers_num: list, layers_type: list,\
units: list, dropout: list):
"""returns table of every combo for the hyperparameters"""
#get cross joins to acquire every combination
grid_table = pd.DataFrame(length).merge(\
| pd.DataFrame(layers_num) | pandas.DataFrame |
"""
Author: <NAME>.
Date: 02/11/2020.
File to create a single time split (training/validation/testing).
The dataset is expected to be in a folder following the structure:
data/
cross_validation/ (The folder you're currently in)
dataset/
0/
1/
preprocessing/
You must change the logic to read your dataset in case it follows another structure.
The bottom section of this code expects a list with the absoulte path to the images
and a list with their labels.
"""
import glob
import pandas as pd
from sklearn.model_selection import train_test_split
#! /////////// Change code to read your dataset //////
SPLIT_CHAR = '/' # Change for \\ if you're using Windows
DATASET_FOLDER = '..' + SPLIT_CHAR + 'dataset' + SPLIT_CHAR # Change '..' for an absolute path
IMAGE_EXTENSION = '*.png' # Change for the extension of your images
print('Reading Dataset...')
# Get absolute paths to all images in dataset
images = glob.glob(DATASET_FOLDER + '*' + SPLIT_CHAR + IMAGE_EXTENSION)
# Get labels per image
labels = [int(img.split(SPLIT_CHAR)[-2]) for img in images]
print("Splitting dataset...")
# Split dataset
train_ratio = 0.75
val_ratio = 0.1
test_ratio = 0.15
train_x, test_x, train_y, test_y = train_test_split(\
images, labels,\
train_size=train_ratio,\
stratify=labels)
val_x, test_x, val_y, test_y = train_test_split(\
test_x, test_y,\
test_size=test_ratio/(test_ratio+val_ratio),\
stratify=test_y)
print("Saving datasets...")
# Save the splits on csv files
dataset_df = pd.DataFrame({'ID_IMG':images, 'LABEL': labels})
dataset_df.to_csv('../full_dataset_labels.csv')
train_df = | pd.DataFrame({'ID_IMG':train_x, 'LABEL': train_y}) | pandas.DataFrame |
import numpy as np
import pytest
import pandas as pd
from pandas import (
CategoricalDtype,
CategoricalIndex,
DataFrame,
Index,
IntervalIndex,
MultiIndex,
Series,
Timestamp,
)
import pandas._testing as tm
class TestDataFrameSortIndex:
def test_sort_index_and_reconstruction_doc_example(self):
# doc example
df = DataFrame(
{"value": [1, 2, 3, 4]},
index=MultiIndex(
levels=[["a", "b"], ["bb", "aa"]], codes=[[0, 0, 1, 1], [0, 1, 0, 1]]
),
)
assert df.index.is_lexsorted()
assert not df.index.is_monotonic
# sort it
expected = DataFrame(
{"value": [2, 1, 4, 3]},
index=MultiIndex(
levels=[["a", "b"], ["aa", "bb"]], codes=[[0, 0, 1, 1], [0, 1, 0, 1]]
),
)
result = df.sort_index()
assert result.index.is_lexsorted()
assert result.index.is_monotonic
tm.assert_frame_equal(result, expected)
# reconstruct
result = df.sort_index().copy()
result.index = result.index._sort_levels_monotonic()
assert result.index.is_lexsorted()
assert result.index.is_monotonic
tm.assert_frame_equal(result, expected)
def test_sort_index_non_existent_label_multiindex(self):
# GH#12261
df = DataFrame(0, columns=[], index=MultiIndex.from_product([[], []]))
df.loc["b", "2"] = 1
df.loc["a", "3"] = 1
result = df.sort_index().index.is_monotonic
assert result is True
def test_sort_index_reorder_on_ops(self):
# GH#15687
df = DataFrame(
np.random.randn(8, 2),
index=MultiIndex.from_product(
[["a", "b"], ["big", "small"], ["red", "blu"]],
names=["letter", "size", "color"],
),
columns=["near", "far"],
)
df = df.sort_index()
def my_func(group):
group.index = ["newz", "newa"]
return group
result = df.groupby(level=["letter", "size"]).apply(my_func).sort_index()
expected = MultiIndex.from_product(
[["a", "b"], ["big", "small"], ["newa", "newz"]],
names=["letter", "size", None],
)
tm.assert_index_equal(result.index, expected)
def test_sort_index_nan_multiindex(self):
# GH#14784
# incorrect sorting w.r.t. nans
tuples = [[12, 13], [np.nan, np.nan], [np.nan, 3], [1, 2]]
mi = MultiIndex.from_tuples(tuples)
df = DataFrame(np.arange(16).reshape(4, 4), index=mi, columns=list("ABCD"))
s = Series(np.arange(4), index=mi)
df2 = DataFrame(
{
"date": pd.DatetimeIndex(
[
"20121002",
"20121007",
"20130130",
"20130202",
"20130305",
"20121002",
"20121207",
"20130130",
"20130202",
"20130305",
"20130202",
"20130305",
]
),
"user_id": [1, 1, 1, 1, 1, 3, 3, 3, 5, 5, 5, 5],
"whole_cost": [
1790,
np.nan,
280,
259,
np.nan,
623,
90,
312,
np.nan,
301,
359,
801,
],
"cost": [12, 15, 10, 24, 39, 1, 0, np.nan, 45, 34, 1, 12],
}
).set_index(["date", "user_id"])
# sorting frame, default nan position is last
result = df.sort_index()
expected = df.iloc[[3, 0, 2, 1], :]
tm.assert_frame_equal(result, expected)
# sorting frame, nan position last
result = df.sort_index(na_position="last")
expected = df.iloc[[3, 0, 2, 1], :]
tm.assert_frame_equal(result, expected)
# sorting frame, nan position first
result = df.sort_index(na_position="first")
expected = df.iloc[[1, 2, 3, 0], :]
tm.assert_frame_equal(result, expected)
# sorting frame with removed rows
result = df2.dropna().sort_index()
expected = df2.sort_index().dropna()
tm.assert_frame_equal(result, expected)
# sorting series, default nan position is last
result = s.sort_index()
expected = s.iloc[[3, 0, 2, 1]]
tm.assert_series_equal(result, expected)
# sorting series, nan position last
result = s.sort_index(na_position="last")
expected = s.iloc[[3, 0, 2, 1]]
tm.assert_series_equal(result, expected)
# sorting series, nan position first
result = s.sort_index(na_position="first")
expected = s.iloc[[1, 2, 3, 0]]
| tm.assert_series_equal(result, expected) | pandas._testing.assert_series_equal |
"""
A data pipeline class that handles all ETL from the Open-Meteo API to our Postgres database
"""
from datetime import datetime
import logging
import pandas as pd
import requests
from sqlalchemy import text
from database.engine import PgEngine
from utils import build_api_url
class Pipeline:
def __init__(self):
self.engine = PgEngine().get_engine()
def get_locations(self) -> pd.DataFrame:
"""
Get the current set of latitude/longitude coordinates
:return: A DataFrame of latitude/longitude coordinates
"""
with self.engine.connect() as conn:
result = conn.execute(text(
"select id, latitude, longitude from location"
))
return pd.DataFrame(result, columns=["id", "latitude", "longitude"])
def check_latest_insert(self, location_id: str):
"""
Check the Postgres database for the time of the most recently added data
:param location_id: The id of the location to check
:return: None
"""
with self.engine.connect() as conn:
result = conn.execute(text(
"select max(time) as time from hourly_weather where location_id = :location"
), location=location_id)
timestamp = result.fetchall()[0][0]
return timestamp
@staticmethod
def fetch_data(lat: str, lng: str) -> pd.DataFrame:
"""
Fetch data from the API
:param lat: Latitude coordinate
:param lng: Longitude coordinate
:return: A DataFrame of weather data
"""
url = build_api_url(lat, lng)
# Fetch data and convert to dataframe
r = requests.get(url)
return pd.DataFrame(r.json()["hourly"])
@staticmethod
def preprocess_data(data: pd.DataFrame, timestamp: datetime) -> pd.DataFrame:
"""
Preprocess the fetched data
:param data: A DataFrame of hourly weather data
:param timestamp: A
:return: None
"""
# Convert time column to datetime
data["time"] = | pd.to_datetime(data["time"]) | pandas.to_datetime |
import pytest
import numpy as np
import pandas
import pandas.util.testing as tm
from pandas.tests.frame.common import TestData
import matplotlib
import modin.pandas as pd
from modin.pandas.utils import to_pandas
from numpy.testing import assert_array_equal
from .utils import (
random_state,
RAND_LOW,
RAND_HIGH,
df_equals,
df_is_empty,
arg_keys,
name_contains,
test_data_values,
test_data_keys,
test_data_with_duplicates_values,
test_data_with_duplicates_keys,
numeric_dfs,
no_numeric_dfs,
test_func_keys,
test_func_values,
query_func_keys,
query_func_values,
agg_func_keys,
agg_func_values,
numeric_agg_funcs,
quantiles_keys,
quantiles_values,
indices_keys,
indices_values,
axis_keys,
axis_values,
bool_arg_keys,
bool_arg_values,
int_arg_keys,
int_arg_values,
)
# TODO remove once modin-project/modin#469 is resolved
agg_func_keys.remove("str")
agg_func_values.remove(str)
pd.DEFAULT_NPARTITIONS = 4
# Force matplotlib to not use any Xwindows backend.
matplotlib.use("Agg")
class TestDFPartOne:
# Test inter df math functions
def inter_df_math_helper(self, modin_df, pandas_df, op):
# Test dataframe to datframe
try:
pandas_result = getattr(pandas_df, op)(pandas_df)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(modin_df)
else:
modin_result = getattr(modin_df, op)(modin_df)
df_equals(modin_result, pandas_result)
# Test dataframe to int
try:
pandas_result = getattr(pandas_df, op)(4)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(4)
else:
modin_result = getattr(modin_df, op)(4)
df_equals(modin_result, pandas_result)
# Test dataframe to float
try:
pandas_result = getattr(pandas_df, op)(4.0)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(4.0)
else:
modin_result = getattr(modin_df, op)(4.0)
df_equals(modin_result, pandas_result)
# Test transposed dataframes to float
try:
pandas_result = getattr(pandas_df.T, op)(4.0)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df.T, op)(4.0)
else:
modin_result = getattr(modin_df.T, op)(4.0)
df_equals(modin_result, pandas_result)
frame_data = {
"{}_other".format(modin_df.columns[0]): [0, 2],
modin_df.columns[0]: [0, 19],
modin_df.columns[1]: [1, 1],
}
modin_df2 = pd.DataFrame(frame_data)
pandas_df2 = pandas.DataFrame(frame_data)
# Test dataframe to different dataframe shape
try:
pandas_result = getattr(pandas_df, op)(pandas_df2)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(modin_df2)
else:
modin_result = getattr(modin_df, op)(modin_df2)
df_equals(modin_result, pandas_result)
# Test dataframe to list
list_test = random_state.randint(RAND_LOW, RAND_HIGH, size=(modin_df.shape[1]))
try:
pandas_result = getattr(pandas_df, op)(list_test, axis=1)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(list_test, axis=1)
else:
modin_result = getattr(modin_df, op)(list_test, axis=1)
df_equals(modin_result, pandas_result)
# Test dataframe to series
series_test_modin = modin_df[modin_df.columns[0]]
series_test_pandas = pandas_df[pandas_df.columns[0]]
try:
pandas_result = getattr(pandas_df, op)(series_test_pandas, axis=0)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(series_test_modin, axis=0)
else:
modin_result = getattr(modin_df, op)(series_test_modin, axis=0)
df_equals(modin_result, pandas_result)
# Test dataframe to series with different index
series_test_modin = modin_df[modin_df.columns[0]].reset_index(drop=True)
series_test_pandas = pandas_df[pandas_df.columns[0]].reset_index(drop=True)
try:
pandas_result = getattr(pandas_df, op)(series_test_pandas, axis=0)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(series_test_modin, axis=0)
else:
modin_result = getattr(modin_df, op)(series_test_modin, axis=0)
df_equals(modin_result, pandas_result)
# Level test
new_idx = pandas.MultiIndex.from_tuples(
[(i // 4, i // 2, i) for i in modin_df.index]
)
modin_df_multi_level = modin_df.copy()
modin_df_multi_level.index = new_idx
# Defaults to pandas
with pytest.warns(UserWarning):
# Operation against self for sanity check
getattr(modin_df_multi_level, op)(modin_df_multi_level, axis=0, level=1)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_add(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "add")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_div(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "div")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_divide(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "divide")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_floordiv(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "floordiv")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_mod(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "mod")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_mul(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "mul")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_multiply(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "multiply")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_pow(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
# TODO: Revert to others once we have an efficient way of preprocessing for positive
# values
try:
pandas_df = pandas_df.abs()
except Exception:
pass
else:
modin_df = modin_df.abs()
self.inter_df_math_helper(modin_df, pandas_df, "pow")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_sub(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "sub")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_subtract(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "subtract")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_truediv(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "truediv")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___div__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__div__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___add__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__add__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___radd__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__radd__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___mul__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__mul__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___rmul__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__rmul__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___pow__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__pow__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___rpow__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__rpow__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___sub__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__sub__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___floordiv__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__floordiv__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___rfloordiv__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__rfloordiv__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___truediv__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__truediv__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___rtruediv__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__rtruediv__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___mod__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__mod__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___rmod__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__rmod__")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___rdiv__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_helper(modin_df, pandas_df, "__rdiv__")
# END test inter df math functions
# Test comparison of inter operation functions
def comparison_inter_ops_helper(self, modin_df, pandas_df, op):
try:
pandas_result = getattr(pandas_df, op)(pandas_df)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(modin_df)
else:
modin_result = getattr(modin_df, op)(modin_df)
df_equals(modin_result, pandas_result)
try:
pandas_result = getattr(pandas_df, op)(4)
except TypeError:
with pytest.raises(TypeError):
getattr(modin_df, op)(4)
else:
modin_result = getattr(modin_df, op)(4)
df_equals(modin_result, pandas_result)
try:
pandas_result = getattr(pandas_df, op)(4.0)
except TypeError:
with pytest.raises(TypeError):
getattr(modin_df, op)(4.0)
else:
modin_result = getattr(modin_df, op)(4.0)
df_equals(modin_result, pandas_result)
try:
pandas_result = getattr(pandas_df, op)("a")
except TypeError:
with pytest.raises(TypeError):
repr(getattr(modin_df, op)("a"))
else:
modin_result = getattr(modin_df, op)("a")
df_equals(modin_result, pandas_result)
frame_data = {
"{}_other".format(modin_df.columns[0]): [0, 2],
modin_df.columns[0]: [0, 19],
modin_df.columns[1]: [1, 1],
}
modin_df2 = pd.DataFrame(frame_data)
pandas_df2 = pandas.DataFrame(frame_data)
try:
pandas_result = getattr(pandas_df, op)(pandas_df2)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(modin_df2)
else:
modin_result = getattr(modin_df, op)(modin_df2)
df_equals(modin_result, pandas_result)
new_idx = pandas.MultiIndex.from_tuples(
[(i // 4, i // 2, i) for i in modin_df.index]
)
modin_df_multi_level = modin_df.copy()
modin_df_multi_level.index = new_idx
# Defaults to pandas
with pytest.warns(UserWarning):
# Operation against self for sanity check
getattr(modin_df_multi_level, op)(modin_df_multi_level, axis=0, level=1)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_eq(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.comparison_inter_ops_helper(modin_df, pandas_df, "eq")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_ge(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.comparison_inter_ops_helper(modin_df, pandas_df, "ge")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_gt(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.comparison_inter_ops_helper(modin_df, pandas_df, "gt")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_le(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.comparison_inter_ops_helper(modin_df, pandas_df, "le")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_lt(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.comparison_inter_ops_helper(modin_df, pandas_df, "lt")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_ne(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.comparison_inter_ops_helper(modin_df, pandas_df, "ne")
# END test comparison of inter operation functions
# Test dataframe right operations
def inter_df_math_right_ops_helper(self, modin_df, pandas_df, op):
try:
pandas_result = getattr(pandas_df, op)(4)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(4)
else:
modin_result = getattr(modin_df, op)(4)
df_equals(modin_result, pandas_result)
try:
pandas_result = getattr(pandas_df, op)(4.0)
except Exception as e:
with pytest.raises(type(e)):
getattr(modin_df, op)(4.0)
else:
modin_result = getattr(modin_df, op)(4.0)
df_equals(modin_result, pandas_result)
new_idx = pandas.MultiIndex.from_tuples(
[(i // 4, i // 2, i) for i in modin_df.index]
)
modin_df_multi_level = modin_df.copy()
modin_df_multi_level.index = new_idx
# Defaults to pandas
with pytest.warns(UserWarning):
# Operation against self for sanity check
getattr(modin_df_multi_level, op)(modin_df_multi_level, axis=0, level=1)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_radd(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_right_ops_helper(modin_df, pandas_df, "radd")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_rdiv(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_right_ops_helper(modin_df, pandas_df, "rdiv")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_rfloordiv(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_right_ops_helper(modin_df, pandas_df, "rfloordiv")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_rmod(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_right_ops_helper(modin_df, pandas_df, "rmod")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_rmul(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_right_ops_helper(modin_df, pandas_df, "rmul")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_rpow(self, request, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
# TODO: Revert to others once we have an efficient way of preprocessing for positive values
# We need to check that negative integers are not used efficiently
if "100x100" not in request.node.name:
self.inter_df_math_right_ops_helper(modin_df, pandas_df, "rpow")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_rsub(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_right_ops_helper(modin_df, pandas_df, "rsub")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_rtruediv(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_right_ops_helper(modin_df, pandas_df, "rtruediv")
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test___rsub__(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
self.inter_df_math_right_ops_helper(modin_df, pandas_df, "__rsub__")
# END test dataframe right operations
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_abs(self, request, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.abs()
except Exception as e:
with pytest.raises(type(e)):
modin_df.abs()
else:
modin_result = modin_df.abs()
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_add_prefix(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
test_prefix = "TEST"
new_modin_df = modin_df.add_prefix(test_prefix)
new_pandas_df = pandas_df.add_prefix(test_prefix)
df_equals(new_modin_df.columns, new_pandas_df.columns)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("testfunc", test_func_values, ids=test_func_keys)
def test_applymap(self, request, data, testfunc):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
with pytest.raises(ValueError):
x = 2
modin_df.applymap(x)
try:
pandas_result = pandas_df.applymap(testfunc)
except Exception as e:
with pytest.raises(type(e)):
modin_df.applymap(testfunc)
else:
modin_result = modin_df.applymap(testfunc)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("testfunc", test_func_values, ids=test_func_keys)
def test_applymap_numeric(self, request, data, testfunc):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
if name_contains(request.node.name, numeric_dfs):
try:
pandas_result = pandas_df.applymap(testfunc)
except Exception as e:
with pytest.raises(type(e)):
modin_df.applymap(testfunc)
else:
modin_result = modin_df.applymap(testfunc)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_add_suffix(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
test_suffix = "TEST"
new_modin_df = modin_df.add_suffix(test_suffix)
new_pandas_df = pandas_df.add_suffix(test_suffix)
df_equals(new_modin_df.columns, new_pandas_df.columns)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_at(self, request, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
# We skip nan datasets because nan != nan
if "nan" not in request.node.name:
key1 = modin_df.columns[0]
# Scaler
assert modin_df.at[0, key1] == pandas_df.at[0, key1]
# Series
df_equals(modin_df.loc[0].at[key1], pandas_df.loc[0].at[key1])
# Write Item
modin_df_copy = modin_df.copy()
pandas_df_copy = pandas_df.copy()
modin_df_copy.at[1, key1] = modin_df.at[0, key1]
pandas_df_copy.at[1, key1] = pandas_df.at[0, key1]
df_equals(modin_df_copy, pandas_df_copy)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_axes(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
for modin_axis, pd_axis in zip(modin_df.axes, pandas_df.axes):
assert np.array_equal(modin_axis, pd_axis)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_copy(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data) # noqa F841
# pandas_df is unused but there so there won't be confusing list comprehension
# stuff in the pytest.mark.parametrize
new_modin_df = modin_df.copy()
assert new_modin_df is not modin_df
assert np.array_equal(
new_modin_df._query_compiler._modin_frame._partitions,
modin_df._query_compiler._modin_frame._partitions,
)
assert new_modin_df is not modin_df
df_equals(new_modin_df, modin_df)
# Shallow copy tests
modin_df = pd.DataFrame(data)
modin_df_cp = modin_df.copy(False)
modin_df[modin_df.columns[0]] = 0
df_equals(modin_df, modin_df_cp)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_dtypes(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.dtypes, pandas_df.dtypes)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_ftypes(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.ftypes, pandas_df.ftypes)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("key", indices_values, ids=indices_keys)
def test_get(self, data, key):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.get(key), pandas_df.get(key))
df_equals(
modin_df.get(key, default="default"), pandas_df.get(key, default="default")
)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_get_dtype_counts(self, data):
modin_result = pd.DataFrame(data).get_dtype_counts().sort_index()
pandas_result = pandas.DataFrame(data).get_dtype_counts().sort_index()
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize(
"dummy_na", bool_arg_values, ids=arg_keys("dummy_na", bool_arg_keys)
)
@pytest.mark.parametrize(
"drop_first", bool_arg_values, ids=arg_keys("drop_first", bool_arg_keys)
)
def test_get_dummies(self, request, data, dummy_na, drop_first):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas.get_dummies(
pandas_df, dummy_na=dummy_na, drop_first=drop_first
)
except Exception as e:
with pytest.raises(type(e)):
pd.get_dummies(modin_df, dummy_na=dummy_na, drop_first=drop_first)
else:
modin_result = pd.get_dummies(
modin_df, dummy_na=dummy_na, drop_first=drop_first
)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_get_ftype_counts(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.get_ftype_counts(), pandas_df.get_ftype_counts())
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize("func", agg_func_values, ids=agg_func_keys)
def test_agg(self, data, axis, func):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.agg(func, axis)
except Exception as e:
with pytest.raises(type(e)):
modin_df.agg(func, axis)
else:
modin_result = modin_df.agg(func, axis)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize("func", agg_func_values, ids=agg_func_keys)
def test_agg_numeric(self, request, data, axis, func):
if name_contains(request.node.name, numeric_agg_funcs) and name_contains(
request.node.name, numeric_dfs
):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.agg(func, axis)
except Exception as e:
with pytest.raises(type(e)):
modin_df.agg(func, axis)
else:
modin_result = modin_df.agg(func, axis)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize("func", agg_func_values, ids=agg_func_keys)
def test_aggregate(self, request, data, func, axis):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.aggregate(func, axis)
except Exception as e:
with pytest.raises(type(e)):
modin_df.aggregate(func, axis)
else:
modin_result = modin_df.aggregate(func, axis)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize("func", agg_func_values, ids=agg_func_keys)
def test_aggregate_numeric(self, request, data, axis, func):
if name_contains(request.node.name, numeric_agg_funcs) and name_contains(
request.node.name, numeric_dfs
):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.agg(func, axis)
except Exception as e:
with pytest.raises(type(e)):
modin_df.agg(func, axis)
else:
modin_result = modin_df.agg(func, axis)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_aggregate_error_checking(self, data):
modin_df = pd.DataFrame(data)
assert modin_df.aggregate("ndim") == 2
with pytest.warns(UserWarning):
modin_df.aggregate(
{modin_df.columns[0]: "sum", modin_df.columns[1]: "mean"}
)
with pytest.warns(UserWarning):
modin_df.aggregate("cumproduct")
with pytest.raises(ValueError):
modin_df.aggregate("NOT_EXISTS")
def test_align(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).align(pd.DataFrame(data))
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
@pytest.mark.parametrize(
"bool_only", bool_arg_values, ids=arg_keys("bool_only", bool_arg_keys)
)
def test_all(self, data, axis, skipna, bool_only):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.all(axis=axis, skipna=skipna, bool_only=bool_only)
except Exception as e:
with pytest.raises(type(e)):
modin_df.all(axis=axis, skipna=skipna, bool_only=bool_only)
else:
modin_result = modin_df.all(axis=axis, skipna=skipna, bool_only=bool_only)
df_equals(modin_result, pandas_result)
# Test when axis is None. This will get repeated but easier than using list in parameterize decorator
try:
pandas_result = pandas_df.all(axis=None, skipna=skipna, bool_only=bool_only)
except Exception as e:
with pytest.raises(type(e)):
modin_df.all(axis=None, skipna=skipna, bool_only=bool_only)
else:
modin_result = modin_df.all(axis=None, skipna=skipna, bool_only=bool_only)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.T.all(
axis=axis, skipna=skipna, bool_only=bool_only
)
except Exception as e:
with pytest.raises(type(e)):
modin_df.T.all(axis=axis, skipna=skipna, bool_only=bool_only)
else:
modin_result = modin_df.T.all(axis=axis, skipna=skipna, bool_only=bool_only)
df_equals(modin_result, pandas_result)
# Test when axis is None. This will get repeated but easier than using list in parameterize decorator
try:
pandas_result = pandas_df.T.all(
axis=None, skipna=skipna, bool_only=bool_only
)
except Exception as e:
with pytest.raises(type(e)):
modin_df.T.all(axis=None, skipna=skipna, bool_only=bool_only)
else:
modin_result = modin_df.T.all(axis=None, skipna=skipna, bool_only=bool_only)
df_equals(modin_result, pandas_result)
# test level
modin_df_multi_level = modin_df.copy()
pandas_df_multi_level = pandas_df.copy()
axis = modin_df._get_axis_number(axis) if axis is not None else 0
levels = 3
axis_names_list = [["a", "b", "c"], None]
for axis_names in axis_names_list:
if axis == 0:
new_idx = pandas.MultiIndex.from_tuples(
[(i // 4, i // 2, i) for i in range(len(modin_df.index))],
names=axis_names,
)
modin_df_multi_level.index = new_idx
pandas_df_multi_level.index = new_idx
else:
new_col = pandas.MultiIndex.from_tuples(
[(i // 4, i // 2, i) for i in range(len(modin_df.columns))],
names=axis_names,
)
modin_df_multi_level.columns = new_col
pandas_df_multi_level.columns = new_col
for level in list(range(levels)) + (axis_names if axis_names else []):
try:
pandas_multi_level_result = pandas_df_multi_level.all(
axis=axis, bool_only=bool_only, level=level, skipna=skipna
)
except Exception as e:
with pytest.raises(type(e)):
modin_df_multi_level.all(
axis=axis, bool_only=bool_only, level=level, skipna=skipna
)
else:
modin_multi_level_result = modin_df_multi_level.all(
axis=axis, bool_only=bool_only, level=level, skipna=skipna
)
df_equals(modin_multi_level_result, pandas_multi_level_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
@pytest.mark.parametrize(
"bool_only", bool_arg_values, ids=arg_keys("bool_only", bool_arg_keys)
)
def test_any(self, data, axis, skipna, bool_only):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.any(axis=axis, skipna=skipna, bool_only=bool_only)
except Exception as e:
with pytest.raises(type(e)):
modin_df.any(axis=axis, skipna=skipna, bool_only=bool_only)
else:
modin_result = modin_df.any(axis=axis, skipna=skipna, bool_only=bool_only)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.any(axis=None, skipna=skipna, bool_only=bool_only)
except Exception as e:
with pytest.raises(type(e)):
modin_df.any(axis=None, skipna=skipna, bool_only=bool_only)
else:
modin_result = modin_df.any(axis=None, skipna=skipna, bool_only=bool_only)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.T.any(
axis=axis, skipna=skipna, bool_only=bool_only
)
except Exception as e:
with pytest.raises(type(e)):
modin_df.T.any(axis=axis, skipna=skipna, bool_only=bool_only)
else:
modin_result = modin_df.T.any(axis=axis, skipna=skipna, bool_only=bool_only)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.T.any(
axis=None, skipna=skipna, bool_only=bool_only
)
except Exception as e:
with pytest.raises(type(e)):
modin_df.T.any(axis=None, skipna=skipna, bool_only=bool_only)
else:
modin_result = modin_df.T.any(axis=None, skipna=skipna, bool_only=bool_only)
df_equals(modin_result, pandas_result)
# test level
modin_df_multi_level = modin_df.copy()
pandas_df_multi_level = pandas_df.copy()
axis = modin_df._get_axis_number(axis) if axis is not None else 0
levels = 3
axis_names_list = [["a", "b", "c"], None]
for axis_names in axis_names_list:
if axis == 0:
new_idx = pandas.MultiIndex.from_tuples(
[(i // 4, i // 2, i) for i in range(len(modin_df.index))],
names=axis_names,
)
modin_df_multi_level.index = new_idx
pandas_df_multi_level.index = new_idx
else:
new_col = pandas.MultiIndex.from_tuples(
[(i // 4, i // 2, i) for i in range(len(modin_df.columns))],
names=axis_names,
)
modin_df_multi_level.columns = new_col
pandas_df_multi_level.columns = new_col
for level in list(range(levels)) + (axis_names if axis_names else []):
try:
pandas_multi_level_result = pandas_df_multi_level.any(
axis=axis, bool_only=bool_only, level=level, skipna=skipna
)
except Exception as e:
with pytest.raises(type(e)):
modin_df_multi_level.any(
axis=axis, bool_only=bool_only, level=level, skipna=skipna
)
else:
modin_multi_level_result = modin_df_multi_level.any(
axis=axis, bool_only=bool_only, level=level, skipna=skipna
)
df_equals(modin_multi_level_result, pandas_multi_level_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_append(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
data_to_append = {"append_a": 2, "append_b": 1000}
ignore_idx_values = [True, False]
for ignore in ignore_idx_values:
try:
pandas_result = pandas_df.append(data_to_append, ignore_index=ignore)
except Exception as e:
with pytest.raises(type(e)):
modin_df.append(data_to_append, ignore_index=ignore)
else:
modin_result = modin_df.append(data_to_append, ignore_index=ignore)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.append(pandas_df.iloc[-1])
except Exception as e:
with pytest.raises(type(e)):
modin_df.append(modin_df.iloc[-1])
else:
modin_result = modin_df.append(modin_df.iloc[-1])
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.append(list(pandas_df.iloc[-1]))
except Exception as e:
with pytest.raises(type(e)):
modin_df.append(list(modin_df.iloc[-1]))
else:
modin_result = modin_df.append(list(modin_df.iloc[-1]))
df_equals(modin_result, pandas_result)
verify_integrity_values = [True, False]
for verify_integrity in verify_integrity_values:
try:
pandas_result = pandas_df.append(
[pandas_df, pandas_df], verify_integrity=verify_integrity
)
except Exception as e:
with pytest.raises(type(e)):
modin_df.append(
[modin_df, modin_df], verify_integrity=verify_integrity
)
else:
modin_result = modin_df.append(
[modin_df, modin_df], verify_integrity=verify_integrity
)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.append(
pandas_df, verify_integrity=verify_integrity
)
except Exception as e:
with pytest.raises(type(e)):
modin_df.append(modin_df, verify_integrity=verify_integrity)
else:
modin_result = modin_df.append(
modin_df, verify_integrity=verify_integrity
)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize("func", agg_func_values, ids=agg_func_keys)
def test_apply(self, request, data, func, axis):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
with pytest.raises(TypeError):
modin_df.apply({"row": func}, axis=1)
try:
pandas_result = pandas_df.apply(func, axis)
except Exception as e:
with pytest.raises(type(e)):
modin_df.apply(func, axis)
else:
modin_result = modin_df.apply(func, axis)
df_equals(modin_result, pandas_result)
def test_apply_metadata(self):
def add(a, b, c):
return a + b + c
data = {"A": [1, 2, 3], "B": [4, 5, 6], "C": [7, 8, 9]}
modin_df = pd.DataFrame(data)
modin_df["add"] = modin_df.apply(
lambda row: add(row["A"], row["B"], row["C"]), axis=1
)
pandas_df = pandas.DataFrame(data)
pandas_df["add"] = pandas_df.apply(
lambda row: add(row["A"], row["B"], row["C"]), axis=1
)
df_equals(modin_df, pandas_df)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("func", agg_func_values, ids=agg_func_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
def test_apply_numeric(self, request, data, func, axis):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
if name_contains(request.node.name, numeric_dfs):
try:
pandas_result = pandas_df.apply(func, axis)
except Exception as e:
with pytest.raises(type(e)):
modin_df.apply(func, axis)
else:
modin_result = modin_df.apply(func, axis)
df_equals(modin_result, pandas_result)
if "empty_data" not in request.node.name:
key = modin_df.columns[0]
modin_result = modin_df.apply(lambda df: df.drop(key), axis=1)
pandas_result = pandas_df.apply(lambda df: df.drop(key), axis=1)
df_equals(modin_result, pandas_result)
def test_as_blocks(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).as_blocks()
def test_as_matrix(self):
test_data = TestData()
frame = pd.DataFrame(test_data.frame)
mat = frame.as_matrix()
frame_columns = frame.columns
for i, row in enumerate(mat):
for j, value in enumerate(row):
col = frame_columns[j]
if np.isnan(value):
assert np.isnan(frame[col][i])
else:
assert value == frame[col][i]
# mixed type
mat = pd.DataFrame(test_data.mixed_frame).as_matrix(["foo", "A"])
assert mat[0, 0] == "bar"
df = pd.DataFrame({"real": [1, 2, 3], "complex": [1j, 2j, 3j]})
mat = df.as_matrix()
assert mat[0, 1] == 1j
# single block corner case
mat = pd.DataFrame(test_data.frame).as_matrix(["A", "B"])
expected = test_data.frame.reindex(columns=["A", "B"]).values
tm.assert_almost_equal(mat, expected)
def test_to_numpy(self):
test_data = TestData()
frame = pd.DataFrame(test_data.frame)
assert_array_equal(frame.values, test_data.frame.values)
def test_partition_to_numpy(self):
test_data = TestData()
frame = pd.DataFrame(test_data.frame)
for (
partition
) in frame._query_compiler._modin_frame._partitions.flatten().tolist():
assert_array_equal(partition.to_pandas().values, partition.to_numpy())
def test_asfreq(self):
index = pd.date_range("1/1/2000", periods=4, freq="T")
series = pd.Series([0.0, None, 2.0, 3.0], index=index)
df = pd.DataFrame({"s": series})
with pytest.warns(UserWarning):
# We are only testing that this defaults to pandas, so we will just check for
# the warning
df.asfreq(freq="30S")
def test_asof(self):
df = pd.DataFrame(
{"a": [10, 20, 30, 40, 50], "b": [None, None, None, None, 500]},
index=pd.DatetimeIndex(
[
"2018-02-27 09:01:00",
"2018-02-27 09:02:00",
"2018-02-27 09:03:00",
"2018-02-27 09:04:00",
"2018-02-27 09:05:00",
]
),
)
with pytest.warns(UserWarning):
df.asof(pd.DatetimeIndex(["2018-02-27 09:03:30", "2018-02-27 09:04:30"]))
def test_assign(self):
data = test_data_values[0]
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
with pytest.warns(UserWarning):
modin_result = modin_df.assign(new_column=pd.Series(modin_df.iloc[:, 0]))
pandas_result = pandas_df.assign(new_column=pd.Series(pandas_df.iloc[:, 0]))
df_equals(modin_result, pandas_result)
def test_astype(self):
td = TestData()
modin_df = pd.DataFrame(
td.frame.values, index=td.frame.index, columns=td.frame.columns
)
expected_df = pandas.DataFrame(
td.frame.values, index=td.frame.index, columns=td.frame.columns
)
modin_df_casted = modin_df.astype(np.int32)
expected_df_casted = expected_df.astype(np.int32)
df_equals(modin_df_casted, expected_df_casted)
modin_df_casted = modin_df.astype(np.float64)
expected_df_casted = expected_df.astype(np.float64)
df_equals(modin_df_casted, expected_df_casted)
modin_df_casted = modin_df.astype(str)
expected_df_casted = expected_df.astype(str)
df_equals(modin_df_casted, expected_df_casted)
modin_df_casted = modin_df.astype("category")
expected_df_casted = expected_df.astype("category")
df_equals(modin_df_casted, expected_df_casted)
dtype_dict = {"A": np.int32, "B": np.int64, "C": str}
modin_df_casted = modin_df.astype(dtype_dict)
expected_df_casted = expected_df.astype(dtype_dict)
df_equals(modin_df_casted, expected_df_casted)
# Ignore lint because this is testing bad input
bad_dtype_dict = {"B": np.int32, "B": np.int64, "B": str} # noqa F601
modin_df_casted = modin_df.astype(bad_dtype_dict)
expected_df_casted = expected_df.astype(bad_dtype_dict)
df_equals(modin_df_casted, expected_df_casted)
with pytest.raises(KeyError):
modin_df.astype({"not_exists": np.uint8})
def test_astype_category(self):
modin_df = pd.DataFrame(
{"col1": ["A", "A", "B", "B", "A"], "col2": [1, 2, 3, 4, 5]}
)
pandas_df = pandas.DataFrame(
{"col1": ["A", "A", "B", "B", "A"], "col2": [1, 2, 3, 4, 5]}
)
modin_result = modin_df.astype({"col1": "category"})
pandas_result = pandas_df.astype({"col1": "category"})
df_equals(modin_result, pandas_result)
assert modin_result.dtypes.equals(pandas_result.dtypes)
modin_result = modin_df.astype("category")
pandas_result = pandas_df.astype("category")
df_equals(modin_result, pandas_result)
assert modin_result.dtypes.equals(pandas_result.dtypes)
def test_at_time(self):
i = pd.date_range("2018-04-09", periods=4, freq="12H")
ts = pd.DataFrame({"A": [1, 2, 3, 4]}, index=i)
with pytest.warns(UserWarning):
ts.at_time("12:00")
def test_between_time(self):
i = pd.date_range("2018-04-09", periods=4, freq="12H")
ts = pd.DataFrame({"A": [1, 2, 3, 4]}, index=i)
with pytest.warns(UserWarning):
ts.between_time("0:15", "0:45")
def test_bfill(self):
test_data = TestData()
test_data.tsframe["A"][:5] = np.nan
test_data.tsframe["A"][-5:] = np.nan
modin_df = pd.DataFrame(test_data.tsframe)
df_equals(modin_df.bfill(), test_data.tsframe.bfill())
def test_blocks(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).blocks
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_bool(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data) # noqa F841
with pytest.raises(ValueError):
modin_df.bool()
modin_df.__bool__()
single_bool_pandas_df = pandas.DataFrame([True])
single_bool_modin_df = pd.DataFrame([True])
assert single_bool_pandas_df.bool() == single_bool_modin_df.bool()
with pytest.raises(ValueError):
# __bool__ always raises this error for DataFrames
single_bool_modin_df.__bool__()
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_boxplot(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data) # noqa F841
assert modin_df.boxplot() == to_pandas(modin_df).boxplot()
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
def test_clip(self, request, data, axis):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
if name_contains(request.node.name, numeric_dfs):
ind_len = (
len(modin_df.index)
if not pandas.DataFrame()._get_axis_number(axis)
else len(modin_df.columns)
)
# set bounds
lower, upper = np.sort(random_state.random_integers(RAND_LOW, RAND_HIGH, 2))
lower_list = random_state.random_integers(RAND_LOW, RAND_HIGH, ind_len)
upper_list = random_state.random_integers(RAND_LOW, RAND_HIGH, ind_len)
# test only upper scalar bound
modin_result = modin_df.clip(None, upper, axis=axis)
pandas_result = pandas_df.clip(None, upper, axis=axis)
df_equals(modin_result, pandas_result)
# test lower and upper scalar bound
modin_result = modin_df.clip(lower, upper, axis=axis)
pandas_result = pandas_df.clip(lower, upper, axis=axis)
df_equals(modin_result, pandas_result)
# test lower and upper list bound on each column
modin_result = modin_df.clip(lower_list, upper_list, axis=axis)
pandas_result = pandas_df.clip(lower_list, upper_list, axis=axis)
df_equals(modin_result, pandas_result)
# test only upper list bound on each column
modin_result = modin_df.clip(np.nan, upper_list, axis=axis)
pandas_result = pandas_df.clip(np.nan, upper_list, axis=axis)
df_equals(modin_result, pandas_result)
with pytest.raises(ValueError):
modin_df.clip(lower=[1, 2, 3], axis=None)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
def test_clip_lower(self, request, data, axis):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
if name_contains(request.node.name, numeric_dfs):
ind_len = (
len(modin_df.index)
if not pandas.DataFrame()._get_axis_number(axis)
else len(modin_df.columns)
)
# set bounds
lower = random_state.random_integers(RAND_LOW, RAND_HIGH, 1)[0]
lower_list = random_state.random_integers(RAND_LOW, RAND_HIGH, ind_len)
# test lower scalar bound
pandas_result = pandas_df.clip_lower(lower, axis=axis)
modin_result = modin_df.clip_lower(lower, axis=axis)
df_equals(modin_result, pandas_result)
# test lower list bound on each column
pandas_result = pandas_df.clip_lower(lower_list, axis=axis)
modin_result = modin_df.clip_lower(lower_list, axis=axis)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
def test_clip_upper(self, request, data, axis):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
if name_contains(request.node.name, numeric_dfs):
ind_len = (
len(modin_df.index)
if not pandas.DataFrame()._get_axis_number(axis)
else len(modin_df.columns)
)
# set bounds
upper = random_state.random_integers(RAND_LOW, RAND_HIGH, 1)[0]
upper_list = random_state.random_integers(RAND_LOW, RAND_HIGH, ind_len)
# test upper scalar bound
modin_result = modin_df.clip_upper(upper, axis=axis)
pandas_result = pandas_df.clip_upper(upper, axis=axis)
df_equals(modin_result, pandas_result)
# test upper list bound on each column
modin_result = modin_df.clip_upper(upper_list, axis=axis)
pandas_result = pandas_df.clip_upper(upper_list, axis=axis)
df_equals(modin_result, pandas_result)
def test_combine(self):
df1 = pd.DataFrame({"A": [0, 0], "B": [4, 4]})
df2 = pd.DataFrame({"A": [1, 1], "B": [3, 3]})
with pytest.warns(UserWarning):
df1.combine(df2, lambda s1, s2: s1 if s1.sum() < s2.sum() else s2)
def test_combine_first(self):
df1 = pd.DataFrame({"A": [None, 0], "B": [None, 4]})
df2 = pd.DataFrame({"A": [1, 1], "B": [3, 3]})
with pytest.warns(UserWarning):
df1.combine_first(df2)
def test_compound(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).compound()
def test_corr(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).corr()
def test_corrwith(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).corrwith(pd.DataFrame(data))
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"numeric_only", bool_arg_values, ids=arg_keys("numeric_only", bool_arg_keys)
)
def test_count(self, request, data, axis, numeric_only):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_result = modin_df.count(axis=axis, numeric_only=numeric_only)
pandas_result = pandas_df.count(axis=axis, numeric_only=numeric_only)
df_equals(modin_result, pandas_result)
modin_result = modin_df.T.count(axis=axis, numeric_only=numeric_only)
pandas_result = pandas_df.T.count(axis=axis, numeric_only=numeric_only)
df_equals(modin_result, pandas_result)
# test level
modin_df_multi_level = modin_df.copy()
pandas_df_multi_level = pandas_df.copy()
axis = modin_df._get_axis_number(axis) if axis is not None else 0
levels = 3
axis_names_list = [["a", "b", "c"], None]
for axis_names in axis_names_list:
if axis == 0:
new_idx = pandas.MultiIndex.from_tuples(
[(i // 4, i // 2, i) for i in range(len(modin_df.index))],
names=axis_names,
)
modin_df_multi_level.index = new_idx
pandas_df_multi_level.index = new_idx
try: # test error
pandas_df_multi_level.count(
axis=1, numeric_only=numeric_only, level=0
)
except Exception as e:
with pytest.raises(type(e)):
modin_df_multi_level.count(
axis=1, numeric_only=numeric_only, level=0
)
else:
new_col = pandas.MultiIndex.from_tuples(
[(i // 4, i // 2, i) for i in range(len(modin_df.columns))],
names=axis_names,
)
modin_df_multi_level.columns = new_col
pandas_df_multi_level.columns = new_col
try: # test error
pandas_df_multi_level.count(
axis=0, numeric_only=numeric_only, level=0
)
except Exception as e:
with pytest.raises(type(e)):
modin_df_multi_level.count(
axis=0, numeric_only=numeric_only, level=0
)
for level in list(range(levels)) + (axis_names if axis_names else []):
modin_multi_level_result = modin_df_multi_level.count(
axis=axis, numeric_only=numeric_only, level=level
)
pandas_multi_level_result = pandas_df_multi_level.count(
axis=axis, numeric_only=numeric_only, level=level
)
df_equals(modin_multi_level_result, pandas_multi_level_result)
def test_cov(self):
data = test_data_values[0]
with pytest.warns(UserWarning):
pd.DataFrame(data).cov()
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
def test_cummax(self, request, data, axis, skipna):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.cummax(axis=axis, skipna=skipna)
except Exception as e:
with pytest.raises(type(e)):
modin_df.cummax(axis=axis, skipna=skipna)
else:
modin_result = modin_df.cummax(axis=axis, skipna=skipna)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.T.cummax(axis=axis, skipna=skipna)
except Exception as e:
with pytest.raises(type(e)):
modin_df.T.cummax(axis=axis, skipna=skipna)
else:
modin_result = modin_df.T.cummax(axis=axis, skipna=skipna)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
def test_cummin(self, request, data, axis, skipna):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.cummin(axis=axis, skipna=skipna)
except Exception as e:
with pytest.raises(type(e)):
modin_df.cummin(axis=axis, skipna=skipna)
else:
modin_result = modin_df.cummin(axis=axis, skipna=skipna)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.T.cummin(axis=axis, skipna=skipna)
except Exception as e:
with pytest.raises(type(e)):
modin_df.T.cummin(axis=axis, skipna=skipna)
else:
modin_result = modin_df.T.cummin(axis=axis, skipna=skipna)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
def test_cumprod(self, request, data, axis, skipna):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.cumprod(axis=axis, skipna=skipna)
except Exception as e:
with pytest.raises(type(e)):
modin_df.cumprod(axis=axis, skipna=skipna)
else:
modin_result = modin_df.cumprod(axis=axis, skipna=skipna)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.T.cumprod(axis=axis, skipna=skipna)
except Exception as e:
with pytest.raises(type(e)):
modin_df.T.cumprod(axis=axis, skipna=skipna)
else:
modin_result = modin_df.T.cumprod(axis=axis, skipna=skipna)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"skipna", bool_arg_values, ids=arg_keys("skipna", bool_arg_keys)
)
def test_cumsum(self, request, data, axis, skipna):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
# pandas exhibits weird behavior for this case
# Remove this case when we can pull the error messages from backend
if name_contains(request.node.name, ["datetime_timedelta_data"]) and (
axis == 0 or axis == "rows"
):
with pytest.raises(TypeError):
modin_df.cumsum(axis=axis, skipna=skipna)
else:
try:
pandas_result = pandas_df.cumsum(axis=axis, skipna=skipna)
except Exception as e:
with pytest.raises(type(e)):
modin_df.cumsum(axis=axis, skipna=skipna)
else:
modin_result = modin_df.cumsum(axis=axis, skipna=skipna)
df_equals(modin_result, pandas_result)
if name_contains(request.node.name, ["datetime_timedelta_data"]) and (
axis == 0 or axis == "rows"
):
with pytest.raises(TypeError):
modin_df.T.cumsum(axis=axis, skipna=skipna)
else:
try:
pandas_result = pandas_df.T.cumsum(axis=axis, skipna=skipna)
except Exception as e:
with pytest.raises(type(e)):
modin_df.T.cumsum(axis=axis, skipna=skipna)
else:
modin_result = modin_df.T.cumsum(axis=axis, skipna=skipna)
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_describe(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
df_equals(modin_df.describe(), pandas_df.describe())
percentiles = [0.10, 0.11, 0.44, 0.78, 0.99]
df_equals(
modin_df.describe(percentiles=percentiles),
pandas_df.describe(percentiles=percentiles),
)
try:
pandas_result = pandas_df.describe(exclude=[np.float64])
except Exception as e:
with pytest.raises(type(e)):
modin_df.describe(exclude=[np.float64])
else:
modin_result = modin_df.describe(exclude=[np.float64])
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.describe(exclude=np.float64)
except Exception as e:
with pytest.raises(type(e)):
modin_df.describe(exclude=np.float64)
else:
modin_result = modin_df.describe(exclude=np.float64)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.describe(
include=[np.timedelta64, np.datetime64, np.object, np.bool]
)
except Exception as e:
with pytest.raises(type(e)):
modin_df.describe(
include=[np.timedelta64, np.datetime64, np.object, np.bool]
)
else:
modin_result = modin_df.describe(
include=[np.timedelta64, np.datetime64, np.object, np.bool]
)
df_equals(modin_result, pandas_result)
modin_result = modin_df.describe(include=str(modin_df.dtypes.values[0]))
pandas_result = pandas_df.describe(include=str(pandas_df.dtypes.values[0]))
df_equals(modin_result, pandas_result)
modin_result = modin_df.describe(include=[np.number])
pandas_result = pandas_df.describe(include=[np.number])
df_equals(modin_result, pandas_result)
df_equals(modin_df.describe(include="all"), pandas_df.describe(include="all"))
modin_df = pd.DataFrame(data).applymap(str)
pandas_df = pandas.DataFrame(data).applymap(str)
try:
df_equals(modin_df.describe(), pandas_df.describe())
except AssertionError:
# We have to do this because we choose the highest count slightly differently
# than pandas. Because there is no true guarantee which one will be first,
# If they don't match, make sure that the `freq` is the same at least.
df_equals(
modin_df.describe().loc[["count", "unique", "freq"]],
pandas_df.describe().loc[["count", "unique", "freq"]],
)
def test_describe_dtypes(self):
modin_df = pd.DataFrame(
{
"col1": list("abc"),
"col2": list("abc"),
"col3": list("abc"),
"col4": [1, 2, 3],
}
)
pandas_df = pandas.DataFrame(
{
"col1": list("abc"),
"col2": list("abc"),
"col3": list("abc"),
"col4": [1, 2, 3],
}
)
modin_result = modin_df.describe()
pandas_result = pandas_df.describe()
df_equals(modin_result, pandas_result)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
@pytest.mark.parametrize("axis", axis_values, ids=axis_keys)
@pytest.mark.parametrize(
"periods", int_arg_values, ids=arg_keys("periods", int_arg_keys)
)
def test_diff(self, request, data, axis, periods):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
try:
pandas_result = pandas_df.diff(axis=axis, periods=periods)
except Exception as e:
with pytest.raises(type(e)):
modin_df.diff(axis=axis, periods=periods)
else:
modin_result = modin_df.diff(axis=axis, periods=periods)
df_equals(modin_result, pandas_result)
try:
pandas_result = pandas_df.T.diff(axis=axis, periods=periods)
except Exception as e:
with pytest.raises(type(e)):
modin_df.T.diff(axis=axis, periods=periods)
else:
modin_result = modin_df.T.diff(axis=axis, periods=periods)
df_equals(modin_result, pandas_result)
def test_drop(self):
frame_data = {"A": [1, 2, 3, 4], "B": [0, 1, 2, 3]}
simple = pandas.DataFrame(frame_data)
modin_simple = pd.DataFrame(frame_data)
df_equals(modin_simple.drop("A", axis=1), simple[["B"]])
df_equals(modin_simple.drop(["A", "B"], axis="columns"), simple[[]])
df_equals(modin_simple.drop([0, 1, 3], axis=0), simple.loc[[2], :])
df_equals(modin_simple.drop([0, 3], axis="index"), simple.loc[[1, 2], :])
pytest.raises(ValueError, modin_simple.drop, 5)
pytest.raises(ValueError, modin_simple.drop, "C", 1)
pytest.raises(ValueError, modin_simple.drop, [1, 5])
pytest.raises(ValueError, modin_simple.drop, ["A", "C"], 1)
# errors = 'ignore'
df_equals(modin_simple.drop(5, errors="ignore"), simple)
df_equals(modin_simple.drop([0, 5], errors="ignore"), simple.loc[[1, 2, 3], :])
df_equals(modin_simple.drop("C", axis=1, errors="ignore"), simple)
df_equals(modin_simple.drop(["A", "C"], axis=1, errors="ignore"), simple[["B"]])
# non-unique
nu_df = pandas.DataFrame(
zip(range(3), range(-3, 1), list("abc")), columns=["a", "a", "b"]
)
modin_nu_df = pd.DataFrame(nu_df)
df_equals(modin_nu_df.drop("a", axis=1), nu_df[["b"]])
df_equals(modin_nu_df.drop("b", axis="columns"), nu_df["a"])
df_equals(modin_nu_df.drop([]), nu_df)
nu_df = nu_df.set_index(pandas.Index(["X", "Y", "X"]))
nu_df.columns = list("abc")
modin_nu_df = pd.DataFrame(nu_df)
df_equals(modin_nu_df.drop("X", axis="rows"), nu_df.loc[["Y"], :])
df_equals(modin_nu_df.drop(["X", "Y"], axis=0), nu_df.loc[[], :])
# inplace cache issue
frame_data = random_state.randn(10, 3)
df = pandas.DataFrame(frame_data, columns=list("abc"))
modin_df = pd.DataFrame(frame_data, columns=list("abc"))
expected = df[~(df.b > 0)]
modin_df.drop(labels=df[df.b > 0].index, inplace=True)
df_equals(modin_df, expected)
midx = pd.MultiIndex(
levels=[["lama", "cow", "falcon"], ["speed", "weight", "length"]],
codes=[[0, 0, 0, 1, 1, 1, 2, 2, 2], [0, 1, 2, 0, 1, 2, 0, 1, 2]],
)
df = pd.DataFrame(
index=midx,
columns=["big", "small"],
data=[
[45, 30],
[200, 100],
[1.5, 1],
[30, 20],
[250, 150],
[1.5, 0.8],
[320, 250],
[1, 0.8],
[0.3, 0.2],
],
)
with pytest.warns(UserWarning):
df.drop(index="length", level=1)
def test_drop_api_equivalence(self):
# equivalence of the labels/axis and index/columns API's
frame_data = [[1, 2, 3], [3, 4, 5], [5, 6, 7]]
modin_df = pd.DataFrame(
frame_data, index=["a", "b", "c"], columns=["d", "e", "f"]
)
modin_df1 = modin_df.drop("a")
modin_df2 = modin_df.drop(index="a")
df_equals(modin_df1, modin_df2)
modin_df1 = modin_df.drop("d", 1)
modin_df2 = modin_df.drop(columns="d")
df_equals(modin_df1, modin_df2)
modin_df1 = modin_df.drop(labels="e", axis=1)
modin_df2 = modin_df.drop(columns="e")
df_equals(modin_df1, modin_df2)
modin_df1 = modin_df.drop(["a"], axis=0)
modin_df2 = modin_df.drop(index=["a"])
df_equals(modin_df1, modin_df2)
modin_df1 = modin_df.drop(["a"], axis=0).drop(["d"], axis=1)
modin_df2 = modin_df.drop(index=["a"], columns=["d"])
df_equals(modin_df1, modin_df2)
with pytest.raises(ValueError):
modin_df.drop(labels="a", index="b")
with pytest.raises(ValueError):
modin_df.drop(labels="a", columns="b")
with pytest.raises(ValueError):
modin_df.drop(axis=1)
@pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
def test_drop_transpose(self, data):
modin_df = pd.DataFrame(data)
pandas_df = pandas.DataFrame(data)
modin_result = modin_df.T.drop(columns=[0, 1, 2])
pandas_result = pandas_df.T.drop(columns=[0, 1, 2])
df_equals(modin_result, pandas_result)
modin_result = modin_df.T.drop(index=["col3", "col1"])
pandas_result = pandas_df.T.drop(index=["col3", "col1"])
df_equals(modin_result, pandas_result)
modin_result = modin_df.T.drop(columns=[0, 1, 2], index=["col3", "col1"])
pandas_result = pandas_df.T.drop(columns=[0, 1, 2], index=["col3", "col1"])
df_equals(modin_result, pandas_result)
def test_droplevel(self):
df = (
pd.DataFrame([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]])
.set_index([0, 1])
.rename_axis(["a", "b"])
)
df.columns = pd.MultiIndex.from_tuples(
[("c", "e"), ("d", "f")], names=["level_1", "level_2"]
)
with pytest.warns(UserWarning):
df.droplevel("a")
with pytest.warns(UserWarning):
df.droplevel("level_2", axis=1)
@pytest.mark.parametrize(
"data", test_data_with_duplicates_values, ids=test_data_with_duplicates_keys
)
@pytest.mark.parametrize(
"keep", ["last", "first", False], ids=["last", "first", "False"]
)
@pytest.mark.parametrize(
"subset", [None, ["col1", "col3", "col7"]], ids=["None", "subset"]
)
def test_drop_duplicates(self, data, keep, subset):
modin_df = pd.DataFrame(data)
pandas_df = | pandas.DataFrame(data) | pandas.DataFrame |
from __future__ import print_function
import os, sys
import time
import numpy as np
import pandas as pd
from sklearn.decomposition import PCA
from sklearn.manifold import TSNE
from umap import UMAP
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import seaborn as sns
from gfa_parser import gfa_to_G, get_one_type_gfa, one_type_gfa_to_df
from spaligner_parser import spaligner_to_df_not_ss
# Coloring using db
# Transcript names define the cluster (i.e. color) of node and all its persons
# Here we don't know how transcripts correspond to persons so can't identify their colors
# because the input graph is regular
def db_coloring(spaligner_ground_truth_tsv, G):
tsv_df = spaligner_to_df_not_ss(spaligner_ground_truth_tsv, G)
# Split path column into multiple rows
new_df = pd.DataFrame(tsv_df['path of the alignment'].str.replace(';', ',').str.split(',').tolist(),
index=tsv_df['sequence name']).stack()
new_df = new_df.reset_index([0, 'sequence name'])
new_df.columns = ['ground_truth', 'initial_node']
# Generate set of sequence names for each node with orientation
db_colors = new_df.groupby('initial_node')['ground_truth'].apply(set).apply(' '.join)
return db_colors
def persona_coloring(persona_clustering_tsv):
# Coloring using persona graph clustering
# Now we know colors for persons separately (not for initial graph nodes)
# But it isn't ground truth since depends on clustering quality
persona_colors = pd.Series()
with open(persona_clustering_tsv, 'r') as fin:
num_cluster = 0
for line in fin:
personas = line.strip().split(',')
curr = pd.Series([num_cluster] * len(personas), index=personas)
persona_colors = persona_colors.append(curr, verify_integrity=True)
num_cluster += 1
return persona_colors
# Coloring using SPAdes gfa
# Transcript (path) names define the cluster (i.e. color) of node and all its persons
def spades_coloring(gfa, outdir):
p_gfa = get_one_type_gfa(gfa, 'P', outdir)
p_gfa_df = one_type_gfa_to_df(p_gfa)
os.remove(p_gfa)
colors = pd.DataFrame(p_gfa_df.SegmentNames.str.split(',').tolist(), index=p_gfa_df.PathName).stack()
colors = colors.reset_index()[[0, 'PathName']]
colors.columns = ['SegmentNames', 'PathName']
# To distinguish forward and reverse complement transcript colors
colors['PathName'] = colors['PathName'].apply(lambda p: "{}+".format(p))
# Colors for reverse complement nodes
# since pathes (and links) in gfa includes only one of them (forward or rc)
rc_colors = colors.applymap(lambda s: s.translate(str.maketrans({'+': '-', '-': '+'})))
spades_colors = pd.concat([colors, rc_colors], axis=0).set_index('SegmentNames')
spades_colors = spades_colors.groupby('SegmentNames')['PathName'].apply(set).apply(' '.join)
return spades_colors
def do_PCA(X):
pca = PCA(n_components=3)
pca_result = pca.fit_transform(X.values)
pca_df = pd.DataFrame({'pca_1': pca_result[:, 0],
'pca_2': pca_result[:, 1],
'pca_3': pca_result[:, 2]},
index=X.index)
print('Explained variation per principal component: {}'.format(pca.explained_variance_ratio_))
return pca_df
# PCA
def plot_pca_2d(df, color_col, outdir):
plt.figure(figsize=(16, 10))
pca_plt = sns.scatterplot(
x="pca_1", y="pca_2",
hue=color_col,
palette=sns.color_palette("hls", df[color_col].nunique()),
data=df,
legend=None,
# alpha=0.3
)
pca_plt.figure.savefig(os.path.join(outdir, "pca_2d.{}.png".format(color_col)))
def plot_pca_3d(df, color_col, outdir):
ax = plt.figure(figsize=(16, 10)).gca(projection='3d')
ax.scatter(
xs=df["pca_1"],
ys=df["pca_2"],
zs=df["pca_3"],
c=df[color_col],
cmap='tab10'
)
ax.set_xlabel('pca-one')
ax.set_ylabel('pca-two')
ax.set_zlabel('pca-three')
plt.savefig(os.path.join(outdir, "pca_3d.{}.png".format(color_col)))
def get_subset(X, df, N=10000):
X_subset = X.sample(min(df.shape[0], N))
df_subset = df.loc[X_subset.index, :]
return X_subset, df_subset
# Since t-SNE scales quadratically in the number of objects N,
# its applicability is limited to data sets with only a few thousand input objects.
def do_t_SNE(X):
time_start = time.time()
tsne = TSNE(n_components=2, verbose=1, perplexity=40, n_iter=300)
tsne_result = tsne.fit_transform(X.values)
print('t-SNE done! Time elapsed: {} seconds'.format(time.time() - time_start))
tsne_df = pd.DataFrame({'tsne_1': tsne_result[:, 0],
'tsne_2': tsne_result[:, 1]},
index=X.index)
return tsne_df
def plot_t_SNE(df, color_col, outdir):
plt.figure(figsize=(16, 10))
t_SNE_plt = sns.scatterplot(
x="tsne_1", y="tsne_2",
hue=color_col,
palette=sns.color_palette("hls", df[color_col].nunique()),
data=df,
legend=None,
# alpha=0.3
)
t_SNE_plt.figure.savefig(os.path.join(outdir, "t-SNE.{}.png".format(color_col)))
def do_umap(X, n_neighbors):
time_start = time.time()
umap = UMAP(n_neighbors=n_neighbors, verbose=True)
umap_result = umap.fit_transform(X.values)
print('UMAP done! Time elapsed: {} seconds'.format(time.time() - time_start))
umap_df = pd.DataFrame({'umap_{}_1'.format(n_neighbors): umap_result[:, 0],
'umap_{}_2'.format(n_neighbors): umap_result[:, 1]},
index=X.index)
return umap_df
def plot_umap(df, color_col, n_neighbors, outdir):
plt.figure(figsize=(16, 10))
umap_plt = sns.scatterplot(
x="umap_{}_1".format(n_neighbors),
y="umap_{}_2".format(n_neighbors),
hue=color_col,
palette=sns.color_palette("hls", df[color_col].nunique()),
data=df,
legend=None,
# alpha=0.3
)
plt.title('n_neighbors = {}'.format(n_neighbors))
umap_plt.figure.savefig(os.path.join(outdir, "umap.{}.{}.png".format(color_col, n_neighbors)))
# persona_embedding.tsv persona_graph_mapping.tsv node_to_db.tsv persona_clustering.tsv outdir
def visualize_embedding(embedding_df, persona_to_node_tsv, spaligner_ground_truth_tsv, p_clustering_tsv, gfa, G, outdir):
persona_to_node = pd.read_csv(persona_to_node_tsv, sep=' ',
header=None, index_col=0,
names=['initial_node'])
df = pd.concat([embedding_df, persona_to_node], axis=1)
# Coloring using db
node_colors = db_coloring(spaligner_ground_truth_tsv, G)
df = df.join(node_colors, on='initial_node')
# Colorize nodes without pathes in red
df['ground_truth'] = df['ground_truth'].fillna('0')
# Coloring using SPAdes pathes
spades_colors = spades_coloring(gfa, outdir)
df = df.join(spades_colors, on='initial_node').fillna('0')
# Coloring using persona graph clustering
persona_colors = persona_coloring(p_clustering_tsv)
df = pd.concat([df, persona_colors.to_frame(name='persona_color')], axis=1)
sns.pairplot(df, vars=embedding_df.keys()).savefig(os.path.join(outdir, "pairplot.png"))
# PCA
pca_df = do_PCA(embedding_df)
df = pd.concat([df, pca_df], axis=1)
plot_pca_2d(df, 'ground_truth', outdir)
# plot_pca_3d(df, 'ground_truth')
plot_pca_2d(df, 'persona_color', outdir)
# plot_pca_3d(df, 'persona_color')
plot_pca_2d(df, 'PathName', outdir)
# T-SNE
X_subset, df_subset = get_subset(embedding_df, df, 10000)
# pca_df = do_PCA(X_subset)
tsne_df = do_t_SNE(X_subset)
df_subset = pd.concat([df_subset, tsne_df], axis=1)
plot_t_SNE(df_subset, 'ground_truth', outdir)
plot_t_SNE(df_subset, 'persona_color', outdir)
plot_t_SNE(df_subset, 'PathName', outdir)
# UMAP
# plot_umap(df_subset, 'ground_truth', 15, outdir)
# plot_umap(df_subset, 'persona_color', 15, outdir)
for n in (2, 5, 10, 20, 50, 100, 200):
umap_df = do_umap(X_subset, n)
df_subset = | pd.concat([df_subset, umap_df], axis=1) | pandas.concat |
import ast
import json
import os
import sys
import uuid
import lxml
import networkx as nx
import pandas as pd
import geopandas as gpd
import pytest
from pandas.testing import assert_frame_equal, assert_series_equal
from shapely.geometry import LineString, Polygon, Point
from genet.core import Network
from genet.inputs_handler import matsim_reader
from tests.test_outputs_handler_matsim_xml_writer import network_dtd, schedule_dtd
from genet.schedule_elements import Route, Service, Schedule
from genet.utils import plot, spatial
from genet.inputs_handler import read
from tests.fixtures import assert_semantically_equal, route, stop_epsg_27700, network_object_from_test_data, \
full_fat_default_config_path, correct_schedule, vehicle_definitions_config_path
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))
pt2matsim_network_test_file = os.path.abspath(
os.path.join(os.path.dirname(__file__), "test_data", "matsim", "network.xml"))
pt2matsim_schedule_file = os.path.abspath(
os.path.join(os.path.dirname(__file__), "test_data", "matsim", "schedule.xml"))
puma_network_test_file = os.path.abspath(
os.path.join(os.path.dirname(__file__), "test_data", "puma", "network.xml"))
puma_schedule_test_file = os.path.abspath(
os.path.join(os.path.dirname(__file__), "test_data", "puma", "schedule.xml"))
simplified_network = os.path.abspath(
os.path.join(os.path.dirname(__file__), "test_data", "simplified_network", "network.xml"))
simplified_schedule = os.path.abspath(
os.path.join(os.path.dirname(__file__), "test_data", "simplified_network", "schedule.xml"))
network_link_attrib_text_missing = os.path.abspath(
os.path.join(os.path.dirname(__file__), "test_data", "matsim", "network_link_attrib_text_missing.xml"))
@pytest.fixture()
def network1():
n1 = Network('epsg:27700')
n1.add_node('101982',
{'id': '101982',
'x': '528704.1425925883',
'y': '182068.78193707118',
'lon': -0.14625948709424305,
'lat': 51.52287873323954,
's2_id': 5221390329378179879})
n1.add_node('101986',
{'id': '101986',
'x': '528835.203274008',
'y': '182006.27331298392',
'lon': -0.14439428709377497,
'lat': 51.52228713323965,
's2_id': 5221390328605860387})
n1.add_link('0', '101982', '101986',
attribs={'id': '0',
'from': '101982',
'to': '101986',
'freespeed': 4.166666666666667,
'capacity': 600.0,
'permlanes': 1.0,
'oneway': '1',
'modes': ['car'],
's2_from': 5221390329378179879,
's2_to': 5221390328605860387,
'length': 52.765151087870265,
'attributes': {'osm:way:access': {'name': 'osm:way:access',
'class': 'java.lang.String',
'text': 'permissive'},
'osm:way:highway': {'name': 'osm:way:highway',
'class': 'java.lang.String',
'text': 'unclassified'},
'osm:way:id': {'name': 'osm:way:id',
'class': 'java.lang.Long',
'text': '26997928'},
'osm:way:name': {'name': 'osm:way:name',
'class': 'java.lang.String',
'text': 'Brunswick Place'}}})
return n1
@pytest.fixture()
def network2():
n2 = Network('epsg:4326')
n2.add_node('101982',
{'id': '101982',
'x': -0.14625948709424305,
'y': 51.52287873323954,
'lon': -0.14625948709424305,
'lat': 51.52287873323954,
's2_id': 5221390329378179879})
n2.add_node('101990',
{'id': '101990',
'x': -0.14770188709624754,
'y': 51.5205729332399,
'lon': -0.14770188709624754,
'lat': 51.5205729332399,
's2_id': 5221390304444511271})
n2.add_link('0', '101982', '101990',
attribs={'id': '0',
'from': '101982',
'to': '101990',
'freespeed': 4.166666666666667,
'capacity': 600.0,
'permlanes': 1.0,
'oneway': '1',
'modes': ['car'],
's2_from': 5221390329378179879,
's2_to': 5221390304444511271,
'length': 52.765151087870265,
'attributes': {'osm:way:access': {'name': 'osm:way:access',
'class': 'java.lang.String',
'text': 'permissive'},
'osm:way:highway': {'name': 'osm:way:highway',
'class': 'java.lang.String',
'text': 'unclassified'},
'osm:way:id': {'name': 'osm:way:id',
'class': 'java.lang.Long',
'text': '26997928'},
'osm:way:name': {'name': 'osm:way:name',
'class': 'java.lang.String',
'text': 'Brunswick Place'}}})
return n2
def test_network_graph_initiates_as_not_simplififed():
n = Network('epsg:27700')
assert not n.graph.graph['simplified']
def test__repr__shows_graph_info_and_schedule_info():
n = Network('epsg:4326')
assert 'instance at' in n.__repr__()
assert 'graph' in n.__repr__()
assert 'schedule' in n.__repr__()
def test__str__shows_info():
n = Network('epsg:4326')
assert 'Graph info' in n.__str__()
assert 'Schedule info' in n.__str__()
def test_reproject_changes_x_y_values_for_all_nodes(network1):
network1.reproject('epsg:4326')
nodes = dict(network1.nodes())
correct_nodes = {
'101982': {'id': '101982', 'x': -0.14625948709424305, 'y': 51.52287873323954, 'lon': -0.14625948709424305,
'lat': 51.52287873323954, 's2_id': 5221390329378179879},
'101986': {'id': '101986', 'x': -0.14439428709377497, 'y': 51.52228713323965, 'lon': -0.14439428709377497,
'lat': 51.52228713323965, 's2_id': 5221390328605860387}}
target_change_log = pd.DataFrame(
{'timestamp': {3: '2020-07-09 19:50:51', 4: '2020-07-09 19:50:51'}, 'change_event': {3: 'modify', 4: 'modify'},
'object_type': {3: 'node', 4: 'node'}, 'old_id': {3: '101982', 4: '101986'},
'new_id': {3: '101982', 4: '101986'}, 'old_attributes': {
3: "{'id': '101982', 'x': '528704.1425925883', 'y': '182068.78193707118', 'lon': -0.14625948709424305, 'lat': 51.52287873323954, 's2_id': 5221390329378179879}",
4: "{'id': '101986', 'x': '528835.203274008', 'y': '182006.27331298392', 'lon': -0.14439428709377497, 'lat': 51.52228713323965, 's2_id': 5221390328605860387}"},
'new_attributes': {
3: "{'id': '101982', 'x': -0.14625948709424305, 'y': 51.52287873323954, 'lon': -0.14625948709424305, 'lat': 51.52287873323954, 's2_id': 5221390329378179879}",
4: "{'id': '101986', 'x': -0.14439428709377497, 'y': 51.52228713323965, 'lon': -0.14439428709377497, 'lat': 51.52228713323965, 's2_id': 5221390328605860387}"},
'diff': {3: [('change', 'x', ('528704.1425925883', -0.14625948709424305)),
('change', 'y', ('182068.78193707118', 51.52287873323954))],
4: [('change', 'x', ('528835.203274008', -0.14439428709377497)),
('change', 'y', ('182006.27331298392', 51.52228713323965))]}}
)
assert_semantically_equal(nodes, correct_nodes)
for i in [3, 4]:
assert_semantically_equal(ast.literal_eval(target_change_log.loc[i, 'old_attributes']),
ast.literal_eval(network1.change_log.loc[i, 'old_attributes']))
assert_semantically_equal(ast.literal_eval(target_change_log.loc[i, 'new_attributes']),
ast.literal_eval(network1.change_log.loc[i, 'new_attributes']))
cols_to_compare = ['change_event', 'object_type', 'old_id', 'new_id', 'diff']
assert_frame_equal(network1.change_log[cols_to_compare].tail(2), target_change_log[cols_to_compare],
check_dtype=False)
def test_reproject_delegates_reprojection_to_schedules_own_method(network1, route, mocker):
mocker.patch.object(Schedule, 'reproject')
network1.schedule = Schedule(epsg='epsg:27700', services=[Service(id='id', routes=[route])])
network1.reproject('epsg:4326')
network1.schedule.reproject.assert_called_once_with('epsg:4326', 1)
def test_reproject_updates_graph_crs(network1):
network1.reproject('epsg:4326')
assert network1.graph.graph['crs'] == {'init': 'epsg:4326'}
def test_reprojecting_links_with_geometries():
n = Network('epsg:27700')
n.add_nodes({'A': {'x': -82514.72274, 'y': 220772.02798},
'B': {'x': -82769.25894, 'y': 220773.0637}})
n.add_links({'1': {'from': 'A', 'to': 'B',
'geometry': LineString([(-82514.72274, 220772.02798),
(-82546.23894, 220772.88254),
(-82571.87107, 220772.53339),
(-82594.92709, 220770.68385),
(-82625.33255, 220770.45579),
(-82631.26842, 220770.40158),
(-82669.7309, 220770.04349),
(-82727.94946, 220770.79793),
(-82757.38528, 220771.75412),
(-82761.82425, 220771.95614),
(-82769.25894, 220773.0637)])}})
n.reproject('epsg:2157')
geometry_coords = list(n.link('1')['geometry'].coords)
assert round(geometry_coords[0][0], 7) == 532006.5605980
assert round(geometry_coords[0][1], 7) == 547653.3751768
assert round(geometry_coords[-1][0], 7) == 531753.4315189
assert round(geometry_coords[-1][1], 7) == 547633.5224837
def test_adding_the_same_networks():
n_left = Network('epsg:27700')
n_left.add_node('1', {'id': '1', 'x': 528704.1425925883, 'y': 182068.78193707118,
'lon': -0.14625948709424305, 'lat': 51.52287873323954, 's2_id': 5221390329378179879})
n_left.add_node('2', {'id': '2', 'x': 528835.203274008, 'y': 182006.27331298392,
'lon': -0.14439428709377497, 'lat': 51.52228713323965, 's2_id': 5221390328605860387})
n_left.add_link('1', '1', '2', attribs={'modes': ['walk']})
n_right = Network('epsg:27700')
n_right.add_node('1', {'id': '1', 'x': 528704.1425925883, 'y': 182068.78193707118,
'lon': -0.14625948709424305, 'lat': 51.52287873323954, 's2_id': 5221390329378179879})
n_right.add_node('2', {'id': '2', 'x': 528835.203274008, 'y': 182006.27331298392,
'lon': -0.14439428709377497, 'lat': 51.52228713323965, 's2_id': 5221390328605860387})
n_right.add_link('1', '1', '2', attribs={'modes': ['walk']})
n_left.add(n_right)
assert_semantically_equal(dict(n_left.nodes()), {
'1': {'id': '1', 'x': 528704.1425925883, 'y': 182068.78193707118, 'lon': -0.14625948709424305,
'lat': 51.52287873323954, 's2_id': 5221390329378179879},
'2': {'id': '2', 'x': 528835.203274008, 'y': 182006.27331298392, 'lon': -0.14439428709377497,
'lat': 51.52228713323965, 's2_id': 5221390328605860387}})
assert_semantically_equal(dict(n_left.links()), {'1': {'modes': ['walk'], 'from': '1', 'to': '2', 'id': '1'}})
def test_adding_the_same_networks_but_with_differing_projections():
n_left = Network('epsg:27700')
n_left.add_node('1', {'id': '1', 'x': 528704.1425925883, 'y': 182068.78193707118,
'lon': -0.14625948709424305, 'lat': 51.52287873323954, 's2_id': 5221390329378179879})
n_left.add_node('2', {'id': '2', 'x': 528835.203274008, 'y': 182006.27331298392,
'lon': -0.14439428709377497, 'lat': 51.52228713323965, 's2_id': 5221390328605860387})
n_left.add_link('1', '1', '2', attribs={'modes': ['walk']})
n_right = Network('epsg:27700')
n_right.add_node('1', {'id': '1', 'x': 528704.1425925883, 'y': 182068.78193707118,
'lon': -0.14625948709424305, 'lat': 51.52287873323954, 's2_id': 5221390329378179879})
n_right.add_node('2', {'id': '2', 'x': 528835.203274008, 'y': 182006.27331298392,
'lon': -0.14439428709377497, 'lat': 51.52228713323965, 's2_id': 5221390328605860387})
n_right.add_link('1', '1', '2', attribs={'modes': ['walk']})
n_right.reproject('epsg:4326')
n_left.add(n_right)
assert_semantically_equal(dict(n_left.nodes()), {
'1': {'id': '1', 'x': 528704.1425925883, 'y': 182068.78193707118, 'lon': -0.14625948709424305,
'lat': 51.52287873323954, 's2_id': 5221390329378179879},
'2': {'id': '2', 'x': 528835.203274008, 'y': 182006.27331298392, 'lon': -0.14439428709377497,
'lat': 51.52228713323965, 's2_id': 5221390328605860387}})
assert_semantically_equal(dict(n_left.links()), {'1': {'modes': ['walk'], 'from': '1', 'to': '2', 'id': '1'}})
def test_adding_networks_with_clashing_node_ids():
n_left = Network('epsg:27700')
n_left.add_node('1', {'id': '1', 'x': 528704.1425925883, 'y': 182068.78193707118,
'lon': -0.14625948709424305, 'lat': 51.52287873323954, 's2_id': 5221390329378179879})
n_left.add_node('2', {'id': '2', 'x': 528835.203274008, 'y': 182006.27331298392,
'lon': -0.14439428709377497, 'lat': 51.52228713323965, 's2_id': 5221390328605860387})
n_left.add_link('1', '1', '2', attribs={'modes': ['walk']})
n_right = Network('epsg:27700')
n_right.add_node('10', {'id': '1', 'x': 528704.1425925883, 'y': 182068.78193707118,
'lon': -0.14625948709424305, 'lat': 51.52287873323954, 's2_id': 5221390329378179879})
n_right.add_node('20', {'id': '2', 'x': 528835.203274008, 'y': 182006.27331298392,
'lon': -0.14439428709377497, 'lat': 51.52228713323965, 's2_id': 5221390328605860387})
n_right.add_link('1', '10', '20', attribs={'modes': ['walk']})
n_left.add(n_right)
assert_semantically_equal(dict(n_left.nodes()), {
'1': {'id': '1', 'x': 528704.1425925883, 'y': 182068.78193707118, 'lon': -0.14625948709424305,
'lat': 51.52287873323954, 's2_id': 5221390329378179879},
'2': {'id': '2', 'x': 528835.203274008, 'y': 182006.27331298392, 'lon': -0.14439428709377497,
'lat': 51.52228713323965, 's2_id': 5221390328605860387}})
assert_semantically_equal(dict(n_left.links()), {'1': {'modes': ['walk'], 'from': '1', 'to': '2', 'id': '1'}})
def test_adding_networks_with_clashing_link_ids():
n_left = Network('epsg:27700')
n_left.add_node('1', {'id': '1', 'x': 528704.1425925883, 'y': 182068.78193707118,
'lon': -0.14625948709424305, 'lat': 51.52287873323954, 's2_id': 5221390329378179879})
n_left.add_node('2', {'id': '2', 'x': 528835.203274008, 'y': 182006.27331298392,
'lon': -0.14439428709377497, 'lat': 51.52228713323965, 's2_id': 5221390328605860387})
n_left.add_link('1', '1', '2', attribs={'modes': ['walk']})
n_right = Network('epsg:27700')
n_right.add_node('1', {'id': '1', 'x': 528704.1425925883, 'y': 182068.78193707118,
'lon': -0.14625948709424305, 'lat': 51.52287873323954, 's2_id': 5221390329378179879})
n_right.add_node('2', {'id': '2', 'x': 528835.203274008, 'y': 182006.27331298392,
'lon': -0.14439428709377497, 'lat': 51.52228713323965, 's2_id': 5221390328605860387})
n_right.add_link('10', '1', '2', attribs={'modes': ['walk']})
n_left.add(n_right)
assert_semantically_equal(dict(n_left.nodes()), {
'1': {'id': '1', 'x': 528704.1425925883, 'y': 182068.78193707118, 'lon': -0.14625948709424305,
'lat': 51.52287873323954, 's2_id': 5221390329378179879},
'2': {'id': '2', 'x': 528835.203274008, 'y': 182006.27331298392, 'lon': -0.14439428709377497,
'lat': 51.52228713323965, 's2_id': 5221390328605860387}})
assert_semantically_equal(dict(n_left.links()), {'1': {'modes': ['walk'], 'from': '1', 'to': '2', 'id': '1'}})
def test_adding_networks_with_clashing_multiindices():
n_left = Network('epsg:27700')
n_left.add_node('1', {'id': '1', 'x': 528704.1425925883, 'y': 182068.78193707118,
'lon': -0.14625948709424305, 'lat': 51.52287873323954, 's2_id': 5221390329378179879})
n_left.add_node('2', {'id': '2', 'x': 528835.203274008, 'y': 182006.27331298392,
'lon': -0.14439428709377497, 'lat': 51.52228713323965, 's2_id': 5221390328605860387})
n_left.add_link('1', '1', '2', 0, attribs={'modes': ['walk']})
n_right = Network('epsg:27700')
n_left.add_node('1', {'id': '1', 'x': 528704.1425925883, 'y': 182068.78193707118,
'lon': -0.14625948709424305, 'lat': 51.52287873323954, 's2_id': 5221390329378179879})
n_left.add_node('2', {'id': '2', 'x': 528835.203274008, 'y': 182006.27331298392,
'lon': -0.14439428709377497, 'lat': 51.52228713323965, 's2_id': 5221390328605860387})
n_left.add_link('1', '1', '2', 0, attribs={'modes': ['walk', 'bike']})
n_left.add(n_right)
assert len(list(n_left.nodes())) == 2
assert n_left.node('1') == {'id': '1', 'x': 528704.1425925883, 'y': 182068.78193707118,
'lon': -0.14625948709424305, 'lat': 51.52287873323954, 's2_id': 5221390329378179879}
assert n_left.node('2') == {'id': '2', 'x': 528835.203274008, 'y': 182006.27331298392,
'lon': -0.14439428709377497, 'lat': 51.52228713323965, 's2_id': 5221390328605860387}
assert len(n_left.link_id_mapping) == 2
assert n_left.link('1') == {'modes': ['walk'], 'from': '1', 'to': '2', 'id': '1'}
assert n_left.graph['1']['2'][0] == {'modes': ['walk'], 'from': '1', 'to': '2', 'id': '1'}
def test_adding_disjoint_networks_with_unique_ids():
n_left = Network('epsg:27700')
n_left.add_node('1', {'id': '1', 'x': 528704.1425925883, 'y': 182068.78193707118,
'lon': -0.14625948709424305, 'lat': 51.52287873323954, 's2_id': 5221390329378179879})
n_left.add_node('2', {'id': '2', 'x': 528835.203274008, 'y': 182006.27331298392,
'lon': -0.14439428709377497, 'lat': 51.52228713323965, 's2_id': 5221390328605860387})
n_left.add_link('1', '1', '2', attribs={'modes': ['walk']})
n_right = Network('epsg:27700')
n_right.add_node('10', {'id': '1', 'x': 1, 'y': 1,
'lon': 1, 'lat': 1, 's2_id': 1})
n_right.add_node('20', {'id': '2', 'x': 1, 'y': 1,
'lon': 1, 'lat': 1, 's2_id': 2})
n_right.add_link('100', '10', '20', attribs={'modes': ['walk']})
n_left.add(n_right)
assert_semantically_equal(dict(n_left.nodes()), {'10': {'id': '1', 'x': 1, 'y': 1, 'lon': 1, 'lat': 1, 's2_id': 1},
'20': {'id': '2', 'x': 1, 'y': 1, 'lon': 1, 'lat': 1, 's2_id': 2},
'1': {'id': '1', 'x': 528704.1425925883, 'y': 182068.78193707118,
'lon': -0.14625948709424305, 'lat': 51.52287873323954,
's2_id': 5221390329378179879},
'2': {'id': '2', 'x': 528835.203274008, 'y': 182006.27331298392,
'lon': -0.14439428709377497, 'lat': 51.52228713323965,
's2_id': 5221390328605860387}})
assert_semantically_equal(dict(n_left.links()), {'100': {'modes': ['walk'], 'from': '10', 'to': '20', 'id': '100'},
'1': {'modes': ['walk'], 'from': '1', 'to': '2', 'id': '1'}})
def test_adding_disjoint_networks_with_clashing_ids():
n_left = Network('epsg:27700')
n_left.add_node('1', {'id': '1', 'x': 528704.1425925883, 'y': 182068.78193707118,
'lon': -0.14625948709424305, 'lat': 51.52287873323954, 's2_id': 5221390329378179879})
n_left.add_node('2', {'id': '2', 'x': 528835.203274008, 'y': 182006.27331298392,
'lon': -0.14439428709377497, 'lat': 51.52228713323965, 's2_id': 5221390328605860387})
n_left.add_link('1', '1', '2', attribs={'modes': ['walk']})
n_right = Network('epsg:27700')
n_right.add_node('1', {'id': '1', 'x': 1, 'y': 1,
'lon': 1, 'lat': 1, 's2_id': 1})
n_right.add_node('2', {'id': '2', 'x': 1, 'y': 1,
'lon': 1, 'lat': 1, 's2_id': 2})
n_right.add_link('1', '1', '2', attribs={'modes': ['walk']})
n_left.add(n_right)
assert len(list(n_left.nodes())) == 4
assert n_left.node('1') == {'id': '1', 'x': 528704.1425925883, 'y': 182068.78193707118,
'lon': -0.14625948709424305, 'lat': 51.52287873323954, 's2_id': 5221390329378179879}
assert n_left.node('2') == {'id': '2', 'x': 528835.203274008, 'y': 182006.27331298392,
'lon': -0.14439428709377497, 'lat': 51.52228713323965, 's2_id': 5221390328605860387}
assert len(n_left.link_id_mapping) == 2
assert n_left.link('1') == {'modes': ['walk'], 'from': '1', 'to': '2', 'id': '1'}
def test_adding_simplified_network_and_not_throws_error():
n = Network('epsg:2770')
m = Network('epsg:2770')
m.graph.graph['simplified'] = True
with pytest.raises(RuntimeError) as error_info:
n.add(m)
assert "cannot add" in str(error_info.value)
def test_print_shows_info(mocker):
mocker.patch.object(Network, 'info')
n = Network('epsg:27700')
n.print()
n.info.assert_called_once()
def test_plot_delegates_to_util_plot_plot_graph_routes(mocker):
mocker.patch.object(plot, 'plot_graph_routes')
n = Network('epsg:27700')
n.plot()
plot.plot_graph_routes.assert_called_once()
def test_plot_graph_delegates_to_util_plot_plot_graph(mocker):
mocker.patch.object(plot, 'plot_graph')
n = Network('epsg:27700')
n.plot_graph()
plot.plot_graph.assert_called_once()
def test_plot_schedule_delegates_to_util_plot_plot_non_routed_schedule_graph(mocker, network_object_from_test_data):
mocker.patch.object(plot, 'plot_non_routed_schedule_graph')
n = network_object_from_test_data
n.plot_schedule()
plot.plot_non_routed_schedule_graph.assert_called_once()
def test_attempt_to_simplify_already_simplified_network_throws_error():
n = Network('epsg:27700')
n.graph.graph["simplified"] = True
with pytest.raises(RuntimeError) as error_info:
n.simplify()
assert "cannot simplify" in str(error_info.value)
def test_simplifing_puma_network_results_in_correct_record_of_removed_links_and_expected_graph_data():
n = read.read_matsim(path_to_network=puma_network_test_file, epsg='epsg:27700',
path_to_schedule=puma_schedule_test_file)
link_ids_pre_simplify = set(dict(n.links()).keys())
n.simplify()
assert n.is_simplified()
link_ids_post_simplify = set(dict(n.links()).keys())
assert link_ids_post_simplify & link_ids_pre_simplify
new_links = link_ids_post_simplify - link_ids_pre_simplify
deleted_links = link_ids_pre_simplify - link_ids_post_simplify
assert set(n.link_simplification_map.keys()) == deleted_links
assert set(n.link_simplification_map.values()) == new_links
assert (set(n.link_id_mapping.keys()) & new_links) == new_links
report = n.generate_validation_report()
assert report['routing']['services_have_routes_in_the_graph']
assert report['schedule']['schedule_level']['is_valid_schedule']
def test_simplified_network_saves_to_correct_dtds(tmpdir, network_dtd, schedule_dtd):
n = read.read_matsim(path_to_network=puma_network_test_file, epsg='epsg:27700',
path_to_schedule=puma_schedule_test_file)
n.simplify()
n.write_to_matsim(tmpdir)
generated_network_file_path = os.path.join(tmpdir, 'network.xml')
xml_obj = lxml.etree.parse(generated_network_file_path)
assert network_dtd.validate(xml_obj), \
'Doc generated at {} is not valid against DTD due to {}'.format(generated_network_file_path,
network_dtd.error_log.filter_from_errors())
generated_schedule_file_path = os.path.join(tmpdir, 'schedule.xml')
xml_obj = lxml.etree.parse(generated_schedule_file_path)
assert schedule_dtd.validate(xml_obj), \
'Doc generated at {} is not valid against DTD due to {}'.format(generated_network_file_path,
schedule_dtd.error_log.filter_from_errors())
def test_simplifying_network_with_multi_edges_resulting_in_multi_paths():
n = Network('epsg:27700')
n.add_nodes({
'n_-1': {'x': -1, 'y': -1, 's2_id': -1},
'n_0': {'x': 0, 'y': 0, 's2_id': 0},
'n_1': {'x': 1, 'y': 1, 's2_id': 1},
'n_2': {'x': 2, 'y': 2, 's2_id': 2},
'n_3': {'x': 3, 'y': 3, 's2_id': 3},
'n_4': {'x': 4, 'y': 4, 's2_id': 4},
'n_5': {'x': 5, 'y': 5, 's2_id': 5},
'n_6': {'x': 6, 'y': 5, 's2_id': 6},
})
n.add_links({
'l_-1': {'from': 'n_-1', 'to': 'n_1', 'freespeed': 1, 'capacity': 1, 'permlanes': 1, 'length': 1,
'modes': {'car'}},
'l_0': {'from': 'n_0', 'to': 'n_1', 'freespeed': 1, 'capacity': 1, 'permlanes': 1, 'length': 1,
'modes': {'car'}},
'l_1': {'from': 'n_1', 'to': 'n_2', 'freespeed': 1, 'capacity': 1, 'permlanes': 1, 'length': 1,
'modes': {'car'}},
'l_2': {'from': 'n_1', 'to': 'n_2', 'freespeed': 1, 'capacity': 1, 'permlanes': 1, 'length': 1,
'modes': {'car'}},
'l_3': {'from': 'n_2', 'to': 'n_3', 'freespeed': 1, 'capacity': 1, 'permlanes': 1, 'length': 1,
'modes': {'car'}},
'l_4': {'from': 'n_2', 'to': 'n_3', 'freespeed': 1, 'capacity': 1, 'permlanes': 1, 'length': 1,
'modes': {'car'}},
'l_5': {'from': 'n_3', 'to': 'n_4', 'freespeed': 1, 'capacity': 1, 'permlanes': 1, 'length': 1,
'modes': {'car'}},
'l_6': {'from': 'n_3', 'to': 'n_4', 'freespeed': 1, 'capacity': 1, 'permlanes': 1, 'length': 1,
'modes': {'car'}},
'l_7': {'from': 'n_4', 'to': 'n_5', 'freespeed': 1, 'capacity': 1, 'permlanes': 1, 'length': 1,
'modes': {'car'}},
'l_8': {'from': 'n_4', 'to': 'n_6', 'freespeed': 1, 'capacity': 1, 'permlanes': 1, 'length': 1,
'modes': {'car'}}
})
n.simplify()
assert set(n.link_simplification_map) == {'l_4', 'l_1', 'l_5', 'l_3', 'l_6', 'l_2'}
def test_reading_back_simplified_network():
# simplified networks have additional geometry attribute and some of their attributes are composite, e.g. links
# now refer to a number of osm ways each with a unique id
n = read.read_matsim(path_to_network=simplified_network, epsg='epsg:27700',
path_to_schedule=simplified_schedule)
number_of_simplified_links = 659
links_with_geometry = n.extract_links_on_edge_attributes(conditions={'geometry': lambda x: True})
assert len(links_with_geometry) == number_of_simplified_links
for link in links_with_geometry:
attribs = n.link(link)
if 'attributes' in attribs:
assert not 'geometry' in attribs['attributes']
for k, v in attribs['attributes'].items():
if isinstance(v['text'], str):
assert not ',' in v['text']
def test_network_with_missing_link_attribute_elem_text_is_read_and_able_to_save_again(tmpdir):
n = read.read_matsim(path_to_network=network_link_attrib_text_missing, epsg='epsg:27700')
n.write_to_matsim(tmpdir)
def test_node_attribute_data_under_key_returns_correct_pd_series_with_nested_keys():
n = Network('epsg:27700')
n.add_node(1, {'a': {'b': 1}})
n.add_node(2, {'a': {'b': 4}})
output_series = n.node_attribute_data_under_key(key={'a': 'b'})
assert_series_equal(output_series, pd.Series({1: 1, 2: 4}))
def test_node_attribute_data_under_key_returns_correct_pd_series_with_flat_keys():
n = Network('epsg:27700')
n.add_node(1, {'b': 1})
n.add_node(2, {'b': 4})
output_series = n.node_attribute_data_under_key(key='b')
assert_series_equal(output_series, pd.Series({1: 1, 2: 4}))
def test_node_attribute_data_under_keys(network1):
df = network1.node_attribute_data_under_keys(['x', 'y'])
df_to_compare = pd.DataFrame({'x': {'101982': '528704.1425925883', '101986': '528835.203274008'},
'y': {'101982': '182068.78193707118', '101986': '182006.27331298392'}})
| assert_frame_equal(df, df_to_compare) | pandas.testing.assert_frame_equal |
#!/usr/bin/env python3
import os
import time
import functools
import subprocess
import numpy as np
import pandas as pd
import bottleneck as bn
import tensorflow as tf
import multiprocessing as mp
from scipy.special import gamma
from multiprocessing import Pool
from tensorflow.keras.models import load_model
import deepmp.utils as ut
from deepmp.model import *
epsilon = 0.05
gamma_val = 0.8
#<NAME>
beta_a = 1
beta_b = 22
beta_c = 14.5
# Human
# beta_a = 1
# beta_b = 6.5
# beta_c = 10.43
EPSILON = np.finfo(np.float64).resolution
log_EPSILON = np.log(EPSILON)
read_names = ['chrom', 'pos', 'strand', 'pos_in_strand', 'readname', 'pred_prob',
'inferred_label']
queen_size_border = 2000
time_wait = 5
# ------------------------------------------------------------------------------
# READ PREDICTION MULTIPROCESSING
# ------------------------------------------------------------------------------
def _write_predictions_to_file(write_fp, predictions_q):
while True:
if predictions_q.empty():
time.sleep(time_wait)
continue
predictions_to_file = predictions_q.get()
try:
predictions_to_file.to_csv(
write_fp, sep='\t', mode='a', index=None, header=None
)
except:
break
def _fill_files_queue(h5s_q, h5s_files, batch_size):
for i in np.arange(0, len(h5s_files), batch_size):
h5s_q.put(h5s_files[i:(i+batch_size)])
return
def do_multiprocessing_main(h5s_q, predictions_q, errornum_q, model_type,
trained_model, kmer, err_feat):
#Obtain predictions from every h5
while not h5s_q.empty():
try:
h5s = h5s_q.get()
except Exception:
break
model = load_model_or_weights(trained_model, model_type, kmer)
predictions, error_num = do_read_calling_multiprocessing(
h5s, model_type, model, kmer, err_feat
)
errornum_q.put(error_num)
predictions_q.put(predictions)
while predictions_q.qsize() > queen_size_border:
time.sleep(time_wait)
def do_read_calling_multiprocessing(h5s, model_type, trained_model, kmer, err_feat):
predictions = | pd.DataFrame() | pandas.DataFrame |
"""
@author : <NAME> (<EMAIL>)
Date: 15 Dec 2019
Script to assess Apprentice law (Belief 5)
License: See LICENSE file
"""
from PSPConstants import *
from results_generator import *
import pandas as pd
def percentage(numer, denom):
if denom > 0:
return float(numer*100/denom)
else:
return 0
def filterNumeric(samples):
count = 0
ss = []
for s in samples:
if str(s) == 'inf':
ss.append(1000)
print("appending ", 1000)
count += 1
elif str(s) == 'nan':
ss.append(-1)
print("appending ", -1)
count += 1
else:
ss.append(s)
per = percentage(count,len(samples))
if per > 5:
assert 1 == 0
# print("\% replaced ", per, count )
return ss
def calculateProductivity(pdf, filter=True):
tempDF = pd.DataFrame()
tempDF['production-rate'] = pdf[ACTLOC_COLNAME] / (pdf[ACTMIN_COLNAME]/60)
samples = tempDF['production-rate'].values.tolist()
# if filter:
# samples = filterNumeric(samples)
return samples
def generateProductivityExpertNovice():
df = getPSPDF()
pspDF = df[df[PROGRAMASSIGNMENT_COLNAME].isin(PROGRAM_ASSIGNMENT_LIST_ALL)]
globalMin = min(pspDF[ACTLOC_COLNAME] / (pspDF[ACTMIN_COLNAME] / 60))
globalMax = max(pspDF[ACTLOC_COLNAME] / (pspDF[ACTMIN_COLNAME] / 60))
expertDF = pspDF[(pspDF[YEARSPROGRAMMINGEXPERIENCE_COLNAME] >= 3)]
noviceDF = pspDF[(pspDF[YEARSPROGRAMMINGEXPERIENCE_COLNAME] < 3)]
output_file_name = 'belief_5_ProductivityExpertNovice.txt'
remove(output_file_name)
appendTreatment(output_file_name, 'expert', normalize(calculateProductivity(expertDF), globalMin, globalMax))
appendTreatment(output_file_name, 'novice', normalize(calculateProductivity(noviceDF), globalMin, globalMax))
def calculateDD(pdf, filter=True):
tempDF = | pd.DataFrame() | pandas.DataFrame |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
:Purpose: Perform automated testing on pdvalidate.
:Platform: Linux/Windows | Python 3.5
:Developer: <NAME>
:Email: <EMAIL>
"""
# pylint: disable=protected-access
# pylint: disable=wrong-import-position
import os
import sys
sys.path.insert(0, os.path.dirname(os.path.dirname(__file__)))
import datetime
import numpy as np
import pytest
import pandas as pd
from pandas.util.testing import assert_series_equal, assert_frame_equal
from pdvalidate.validation import ei, \
validate as pdv, \
ValidationWarning
class TestReturnTypes():
strings = pd.Series(['1', '1', 'ab\n', 'a b', 'Ab', 'AB', np.nan])
masks = [pd.Series([False, False, False, True, True, False, False]),
pd.Series([True, True, False, True, True, False, True])]
def test_return_mask_series(self):
assert_series_equal(pdv._get_return_object(self.masks, self.strings, 'mask_series'),
pd.Series([True, True, False, True, True, False, True]))
def test_return_mask_frame(self):
assert_frame_equal(pdv._get_return_object(self.masks, self.strings, 'mask_frame'),
pd.concat(self.masks, axis='columns'))
def test_return_values(self):
assert_series_equal(pdv._get_return_object(self.masks, self.strings, 'values'),
pd.Series([np.nan, np.nan, 'ab\n', np.nan, np.nan, 'AB', np.nan]))
def test_wrong_return_type(self):
with pytest.raises(ValueError):
pdv._get_return_object(self.masks, self.strings, 'wrong return type')
class TestMaskNonconvertible():
mixed = pd.Series([1, 2.3, np.nan, 'abc', pd.datetime(2014, 1, 7), '2014'])
inconvertible_numeric = pd.Series([False, False, False, True, True, False])
inconvertible_exact_dates = pd.Series([True, True, False, True, True, False])
inconvertible_inexact_dates = pd.Series([True, True, False, True, False, False])
def test_numeric(self):
assert_series_equal(pdv.mask_nonconvertible(self.mixed, 'numeric'),
self.inconvertible_numeric)
def test_datetime_exact_date(self):
assert_series_equal(pdv.mask_nonconvertible(self.mixed,
'datetime',
datetime_format='%Y',
exact_date=True),
self.inconvertible_exact_dates)
assert_series_equal(pdv.mask_nonconvertible(self.mixed,
'datetime',
datetime_format='%Y', exact_date=False),
self.inconvertible_inexact_dates)
class TestToDatetime():
mixed = pd.Series([1, 2.3, np.nan, 'abc', pd.datetime(2014, 1, 7), '2014'])
def test_exact(self):
expected_result1 = [pd.NaT, pd.NaT, pd.NaT, pd.NaT, pd.NaT,
pd.Timestamp('2014-01-01 00:00:00')]
assert (pdv.to_datetime(self.mixed,
datetime_format='%Y',
exact=True).tolist() == expected_result1)
expected_result2 = [pd.NaT, pd.NaT, pd.NaT, pd.NaT,
pd.Timestamp('2014-01-07 00:00:00'),
pd.Timestamp('2014-01-01 00:00:00')]
assert (pdv.to_datetime(self.mixed,
datetime_format='%Y/%m/%d',
exact=False).tolist() == expected_result2)
class TestToNumeric():
mixed = pd.Series([1, 2.3, np.nan, 'abc', pd.datetime(2014, 1, 7), '2014'])
def test_conversion(self):
assert (pdv.to_numeric(self.mixed).sum() == 2017.3)
pytest.warns(ValidationWarning, pdv.to_numeric, self.mixed)
class TestToString():
mixed = pd.Series([1, 2.3, np.nan, 'abc', pd.datetime(2014, 1, 7)])
numeric_as_strings = pd.Series(['1', '2.3', np.nan, 'abc', pd.datetime(2014, 1, 7)])
datetimes_as_strings = pd.Series([1, 2.3, np.nan, 'abc', '2014-01-07'])
all_values_as_strings = pd.Series(['1', '2.3', np.nan, 'abc', '2014-01-07'])
def test_numeric_to_string(self):
assert_series_equal(pdv._numeric_to_string(self.mixed),
self.numeric_as_strings)
def test_datetime_to_string(self):
assert_series_equal(pdv._datetime_to_string(self.mixed,
datetime_format='%Y-%m-%d'),
self.datetimes_as_strings)
def test_to_string(self):
assert_series_equal(pdv.to_string(self.mixed,
float_format='%g',
datetime_format='%Y-%m-%d'),
self.all_values_as_strings)
class TestValidateDate():
dates = pd.Series([datetime.datetime(2014, 1, 7),
datetime.datetime(2014, 1, 7),
datetime.datetime(2014, 2, 28),
pd.NaT])
rtype = 'mask_series'
def test_validation(self):
results, msg = pdv.validate_date(self.dates, return_type='values')
assert_series_equal(self.dates, results)
_, msg = pdv.validate_date(self.dates,
nullable=False,
return_type=self.rtype)
assert ei.natv in msg
_, msg = pdv.validate_date(self.dates,
unique=True,
return_type=self.rtype)
assert ei.nonu in msg
_, msg = pdv.validate_date(self.dates,
min_date=datetime.date(2014, 1, 8),
return_type=self.rtype)
assert ei.elyd in msg
_, msg = pdv.validate_date(self.dates,
max_date=datetime.date(2014, 1, 8),
return_type=self.rtype)
assert ei.lted in msg
class TestValidateTimestamp():
timestamps = pd.Series([pd.Timestamp(2014, 1, 7, 12, 0, 5),
pd.Timestamp(2014, 1, 7, 12, 0, 5),
pd.Timestamp(2014, 2, 28, 0, 0, 0),
pd.NaT])
rtype = 'mask_series'
def test_validation(self):
results, msg = pdv.validate_timestamp(self.timestamps, return_type='values')
assert_series_equal(self.timestamps, results)
_, msg = pdv.validate_timestamp(self.timestamps, nullable=False, return_type=self.rtype)
assert ei.natv in msg
_, msg = pdv.validate_timestamp(self.timestamps, unique=True, return_type=self.rtype)
assert ei.nonu in msg
_, msg = pdv.validate_timestamp(self.timestamps,
min_timestamp=pd.Timestamp(2014, 1, 8),
return_type=self.rtype)
assert ei.elyt in msg
_, msg = pdv.validate_timestamp(self.timestamps,
max_timestamp=pd.Timestamp(2014, 1, 8),
return_type=self.rtype)
assert ei.ltet in msg
class TestValidateNumber():
numeric_with_string = pd.Series([-1, -1, 2.3, '1'])
numeric = pd.Series([-1, -1, 2.3, np.nan])
rtype = 'mask_series'
def test_validation(self):
results, msg = pdv.validate_numeric(self.numeric_with_string,
return_type='values')
assert_series_equal(results, self.numeric)
_, msg = pdv.validate_numeric(self.numeric, nullable=False, return_type=self.rtype)
assert ei.nanv in msg
_, msg = pdv.validate_numeric(self.numeric, unique=True, return_type=self.rtype)
assert ei.nonu in msg
_, msg = pdv.validate_numeric(self.numeric, integer=True, return_type=self.rtype)
assert ei.nint in msg
_, msg = pdv.validate_numeric(self.numeric, min_value=0, return_type=self.rtype)
assert ei.lowv in msg
_, msg = pdv.validate_numeric(self.numeric, max_value=0, return_type=self.rtype)
assert ei.hghv in msg
class TestValidateString():
mixed = pd.Series(['ab\n', 'ab\r\n', 'a b', 'Ab', 'Ab', 'AB', ' aBc', 'aBc ', 1, np.nan])
strings = pd.Series(['ab\n', 'ab\r\n', 'a b', 'Ab', 'Ab', 'AB', ' aBc', 'aBc ', np.nan, np.nan])
rtype = 'mask_series'
# pylint: disable=line-too-long
def test_validation(self):
results, msg = pdv.validate_string(self.mixed, return_type='values')
| assert_series_equal(results, self.strings) | pandas.util.testing.assert_series_equal |
# -*- coding: utf-8 -*-
# Copyright (C) 2017-2018 <NAME> <<EMAIL>>
# License: BSD (3-clause)
"""MNE-based functionality not further categorized."""
from __future__ import division, print_function
from collections import namedtuple # noqa: I100
import matplotlib.pyplot as plt
import mne
import numpy as np
import pandas as pd
from scipy import stats
from scipy.ndimage.measurements import center_of_mass
from scipy.signal import argrelmin, savgol_filter
IafEst = namedtuple('IAFEstimate',
['PeakAlphaFrequency', 'CenterOfGravity', 'AlphaBand'])
def savgol_iaf(raw, picks=None, # noqa: C901
fmin=None, fmax=None,
resolution=0.25,
average=True,
ax=None,
window_length=11, polyorder=5,
pink_max_r2=0.9):
"""Estimate individual alpha frequency (IAF).
Parameters
----------
raw : instance of Raw
The raw data to do these estimations on.
picks : array-like of int | None
List of channels to use.
fmin : int | None
Lower bound of alpha frequency band. If None, it will be
empirically estimated using a polynomial fitting method to
determine the edges of the central parabolic peak density,
with assumed center of 10 Hz.
fmax : int | None
Upper bound of alpha frequency band. If None, it will be
empirically estimated using a polynomial fitting method to
determine the edges of the central parabolic peak density,
with assumed center of 10 Hz.
resolution : float
The resolution in the frequency domain for calculating the PSD.
average : bool
Whether to average the PSD estimates across channels or provide
a separate estimate for each channel. Currently, only True is
supported.
ax : instance of matplotlib Axes | None | False
Axes to plot PSD analysis into. If None, axes will be created
(and plot not shown by default). If False, no plotting will be done.
window_length : int
Window length in samples to use for Savitzky-Golay smoothing of
PSD when estimating IAF.
polyorder : int
Polynomial order to use for Savitzky-Golay smoothing of
PSD when estimating IAF.
pink_max_r2 : float
Maximum R^2 allowed when comparing the PSD distribution to the
pink noise 1/f distribution on the range 1 to 30 Hz.
If this threshold is exceeded, then IAF is assumed unclear and
None is returned for both PAF and CoG.
Returns
-------
IafEst : instance of ``collections.namedtuple`` called IAFEstimate
Named tuple with fields for the peak alpha frequency (PAF),
alpha center of gravity (CoG), and the bounds of the alpha band
(as a tuple).
Notes
-----
Based on method developed by
`<NAME> <https://zenodo.org/badge/latestdoi/80904585>`_.
In addition to appropriate software citation (Zenodo DOI or
git commit), please cite:
<NAME>., <NAME>., <NAME>., &
<NAME>. (2018). Toward a reliable, automated method
of individual alpha frequency (IAF) quantification. Psychophysiology,
e13064. doi:10.1111/psyp.13064
"""
n_fft = int(raw.info['sfreq'] / resolution)
psd, freqs = mne.time_frequency.psd_welch(raw, picks=picks,
n_fft=n_fft, fmin=1.,
fmax=30.)
if ax is None:
fig = plt.figure() # noqa: F841
ax = plt.gca()
if average:
psd = np.mean(psd, axis=0)
if fmin is None or fmax is None:
if fmin is None:
fmin_bound = 5
else:
fmin_bound = fmin
if fmax is None:
fmax_bound = 15
else:
fmax_bound = fmax
alpha_search = np.logical_and(freqs >= fmin_bound,
freqs <= fmax_bound)
freqs_search = freqs[alpha_search]
psd_search = savgol_filter(psd[alpha_search],
window_length=psd[alpha_search].shape[0],
polyorder=10)
# argrel min returns a tuple, so we flatten that with [0]
# then we get the last element of the resulting array with [-1]
# which is the minimum closest to the 'median' alpha of 10 Hz
if fmin is None:
try:
left_min = argrelmin(psd_search[freqs_search < 10])[0][-1]
fmin = freqs_search[freqs_search < 10][left_min]
except IndexError:
raise ValueError("Unable to automatically determine lower end of alpha band.") # noqa: 501
if fmax is None:
# here we want the first element of the array which is closest to
# the 'median' alpha of 10 Hz
try:
right_min = argrelmin(psd_search[freqs_search > 10])[0][0]
fmax = freqs_search[freqs_search > 10][right_min]
except IndexError:
raise ValueError("Unable to automatically determine upper end of alpha band.") # noqa: 501
psd_smooth = savgol_filter(psd,
window_length=window_length,
polyorder=polyorder)
alpha_band = np.logical_and(freqs >= fmin, freqs <= fmax)
slope, intercept, r, p, se = stats.linregress(np.log(freqs),
np.log(psd_smooth))
if r**2 > pink_max_r2:
paf = None
cog = None
else:
paf_idx = np.argmax(psd_smooth[alpha_band])
paf = freqs[alpha_band][paf_idx]
cog_idx = center_of_mass(psd_smooth[alpha_band])
try:
cog_idx = int(np.round(cog_idx[0]))
cog = freqs[alpha_band][cog_idx]
except ValueError:
cog = None
# set PAF to None as well, because this is a pathological case
paf = None
if ax:
plt_psd, = ax.plot(freqs, psd, label="Raw PSD")
plt_smooth, = ax.plot(freqs, psd_smooth, label="Smoothed PSD")
plt_pink, = ax.plot(freqs,
np.exp(slope * np.log(freqs) + intercept),
label='$1/f$ fit ($R^2={:0.2}$)'.format(r**2))
try:
plt_search, = ax.plot(freqs_search, psd_search,
label='Alpha-band Search Parabola')
ax.legend(handles=[plt_psd, plt_smooth, plt_search, plt_pink])
except UnboundLocalError:
# this happens when the user fully specified an alpha band
ax.legend(handles=[plt_psd, plt_smooth, plt_pink])
ax.set_ylabel("PSD")
ax.set_xlabel("Hz")
return IafEst(paf, cog, (fmin, fmax))
def attenuation_iaf(raws, picks=None, # noqa: C901
fmin=None, fmax=None,
resolution=0.25,
average=True,
ax=None,
savgol=False,
window_length=11, polyorder=5,
flat_max_r=0.98):
"""Estimate individual alpha frequency (IAF).
Parameters
----------
raws : list-like of Raw
Two Raws to calculate IAF from difference (attenuation) in PSD from.
picks : array-like of int | None
List of channels to use.
fmin : int | None
Lower bound of alpha frequency band. If None, it will be
empirically estimated using a polynomial fitting method to
determine the edges of the central parabolic peak density,
with assumed center of 10 Hz.
fmax : int | None
Upper bound of alpha frequency band. If None, it will be
empirically estimated using a polynomial fitting method to
determine the edges of the central parabolic peak density,
with assumed center of 10 Hz.
resolution : float
The resolution in the frequency domain for calculating the PSD.
average : bool
Whether to average the PSD estimates across channels or provide
a separate estimate for each channel. Currently, only True is
supported.
ax : instance of matplotlib Axes | None | False
Axes to plot PSD analysis into. If None, axes will be created
(and plot not shown by default). If False, no plotting will be done.
savgol : False | 'each' | 'diff'
Use Savitzky-Golay filtering to smooth PSD estimates -- either applied
to either each PSD estimate or to the difference (i.e. the attenuation
estimate).
window_length : int
Window length in samples to use for Savitzky-Golay smoothing of
PSD when estimating IAF.
polyorder : int
Polynomial order to use for Savitzky-Golay smoothing of
PSD when estimating IAF.
flat_max_r: float
Maximum (Pearson) correlation allowed when comparing the raw PSD
distributions to each other in the range 1 to 30 Hz.
If this threshold is exceeded, then IAF is assumed unclear and
None is returned for both PAF and CoG. Note that the sign of the
coefficient is ignored.
Returns
-------
IafEst : instance of ``collections.namedtuple`` called IAFEstimate
Named tuple with fields for the peak alpha frequency (PAF),
alpha center of gravity (CoG), and the bounds of the alpha band
(as a tuple).
Notes
-----
Based on method developed by
`<NAME> <https://zenodo.org/badge/latestdoi/80904585>`_.
In addition to appropriate software citation (Zenodo DOI or
git commit), please cite:
<NAME>., <NAME>., <NAME>., &
<NAME>. (2018). Toward a reliable, automated method
of individual alpha frequency (IAF) quantification. Psychophysiology,
e13064. doi:10.1111/psyp.13064
"""
# TODO: check value of savgol parameter
def psd_est(r):
n_fft = int(r.info['sfreq'] / resolution)
return mne.time_frequency.psd_welch(r, picks=picks, n_fft=n_fft,
fmin=1., fmax=30.)
psd, freqs = zip(*[psd_est(r) for r in raws])
assert np.allclose(*freqs)
if savgol == 'each':
psd = [savgol_filter(p,
window_length=window_length,
polyorder=polyorder) for p in psd]
att_psd = psd[1] - psd[0]
if average:
att_psd = np.mean(att_psd, axis=0)
psd = [np.mean(p, axis=0) for p in psd]
att_psd = np.abs(att_psd)
att_freqs = freqs[0]
if ax is None:
fig = plt.figure() # noqa: F841
ax = plt.gca()
if fmin is None or fmax is None:
if fmin is None:
fmin_bound = 5
else:
fmin_bound = fmin
if fmax is None:
fmax_bound = 15
else:
fmax_bound = fmax
alpha_search = np.logical_and(att_freqs >= fmin_bound,
att_freqs <= fmax_bound)
freqs_search = att_freqs[alpha_search]
# set the window to the entire interval
# don't use the sname window_length because that's used as a
# parameter for the function as a whole
wlen = att_psd[alpha_search].shape[0]
psd_search = savgol_filter(att_psd[alpha_search],
window_length=wlen,
polyorder=10)
# argrel min returns a tuple, so we flatten that with [0]
# then we get the last element of the resulting array with [-1]
# which is the minimum closest to the 'median' alpha of 10 Hz
if fmin is None:
try:
left_min = argrelmin(psd_search[freqs_search < 10])[0][-1]
fmin = freqs_search[freqs_search < 10][left_min]
except IndexError:
raise ValueError("Unable to automatically determine lower end of alpha band.") # noqa: 501
if fmax is None:
# here we want the first element of the array which is closest to
# the 'median' alpha of 10 Hz
try:
right_min = argrelmin(psd_search[freqs_search > 10])[0][0]
fmax = freqs_search[freqs_search > 10][right_min]
except IndexError:
raise ValueError("Unable to automatically determine upper end of alpha band.") # noqa: 501
if savgol == 'diff':
att_psd = savgol_filter(att_psd,
window_length=window_length,
polyorder=polyorder)
alpha_band = np.logical_and(att_freqs >= fmin, att_freqs <= fmax)
r, p = stats.pearsonr(psd[0], psd[1])
if np.abs(r) > np.abs(flat_max_r):
paf = None
cog = None
else:
paf_idx = np.argmax(att_psd[alpha_band])
paf = att_freqs[alpha_band][paf_idx]
cog_idx = center_of_mass(att_psd[alpha_band])
cog_idx = int(np.round(cog_idx[0]))
cog = att_freqs[alpha_band][cog_idx]
if ax:
sgnote = '(with SG-Smoothing)' if savgol == 'each' else ''
plt_psd1, = ax.plot(freqs[0], psd[0],
label="Raw PSD #1 {}".format(sgnote))
plt_psd2, = ax.plot(freqs[1], psd[1],
label="Raw PSD #2 {}".format(sgnote))
sgnote = '(with SG-Smoothing)' if savgol == 'diff' else ''
plt_att_psd, = ax.plot(att_freqs, att_psd,
label="Attenuated PSD {}".format(sgnote))
# plt_pink, = ax.plot(att_freqs,
# np.exp(slope * np.log(att_freqs) + intercept),
# label='$1/f$ fit ($R^2={:0.2}$)'.format(r**2))
ax.text(np.max(att_freqs) * 0.5, np.max(att_psd) * 0.67,
'Raw PSD Pearson $r={:0.2}$'.format(r))
try:
plt_search, = ax.plot(freqs_search, psd_search,
label='Alpha-band Search Parabola')
ax.legend(handles=[plt_psd1, plt_psd2, plt_att_psd, plt_search])
except UnboundLocalError:
# this happens when the user fully specified an alpha band
ax.legend(handles=[plt_psd1, plt_psd2, plt_att_psd])
ax.set_ylabel("PSD")
ax.set_xlabel("Hz")
return IafEst(paf, cog, (fmin, fmax))
def abs_threshold(epochs, threshold,
eeg=True, eog=False, misc=False, stim=False):
"""Compute mask for dropping epochs based on absolute voltage threshold.
Parameters
----------
epochs : instance of Epochs
The epoched data to do threshold rejection on.
threshold : float
The absolute threshold (in *volts*) to reject at.
eeg : bool
If True include EEG channels in thresholding procedure.
eog : bool
If True include EOG channels in thresholding procedure.
misc : bool
If True include miscellaneous channels in thresholding procedure.
stim : bool
If True include stimulus channels in thresholding procedure.
Returns
-------
rej : instance of ndarray
Boolean mask for whether or not the epochs exceeded the rejection
threshold at any time point for any channel.
Notes
-----
More precise selection of channels can be performed by passing a
'reduced' Epochs instance from the various ``picks`` methods.
"""
data = epochs.pick_types(eeg=eeg, misc=misc, stim=stim).get_data()
# channels and times are last two dimension in MNE ndarrays,
# and we collapse across them to get a (n_epochs,) shaped array
rej = np.any( np.abs(data) > threshold, axis=(-1, -2) ) # noqa: E201, E202
return rej
def retrieve(epochs, windows, items=None,
summary_fnc=dict(mean=np.mean), **kwargs):
"""Retrieve summarized epoch data for further statistical analysis.
Parameters
----------
epochs : instance of Epochs
The epoched data to extract windowed summary statistics from.
windows : dict of tuples
Named tuples defining time windows for extraction (relative to
epoch-locking event). Units are dependent on the keyword argument
scale_time. Default is milliseconds.
summary_fnc : dict of functions
Functions to apply to generate summary statistics in each time
window. The keys serve as column names.
items : ndarray | None
Items corresponding to the individual epoch / trials (for
e.g. repeated measure designs). Shape should be (n_epochs,). If
None (default), then item numbers will not be included in the
generated data frame.
kwargs :
Keyword arguments to pass to Epochs.to_data_frame. Particularly
relevant are ``scalings`` and ``scale_time``.
Returns
-------
dat : instance of pandas.DataFrame
Long-format data frame of summarized data
"""
df = epochs.to_data_frame(index=['epoch', 'time'], **kwargs)
chs = [c for c in df.columns if c not in ('condition')]
# the order is important here!
# otherwise the shortcut with items later won't work
factors = ['epoch', 'condition']
sel = factors + chs
df = df.reset_index()
id_vars = ['epoch', 'condition', 'win', 'wname']
if items is not None:
id_vars += ['item']
dat = | pd.DataFrame(columns=id_vars) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
@author: hkaneko
"""
import random
import sys
import matplotlib.figure as figure
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import structure_generator
from deap import base
from deap import creator
from deap import tools
from rdkit import Chem
from rdkit.Chem import AllChem, Descriptors
from rdkit.ML.Descriptors import MoleculeDescriptors
from scipy.spatial.distance import cdist
from scipy.stats import norm
from sklearn import metrics
from sklearn import svm
from sklearn.cross_decomposition import PLSRegression
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import Matern, DotProduct, WhiteKernel, RBF, ConstantKernel
from sklearn.model_selection import cross_val_predict, GridSearchCV
dataset = pd.read_csv('molecules_with_logS.csv', index_col=0) # SMILES 付きデータセットの読み込み
file_name_of_main_fragments = 'sample_main_fragments.smi' # 'r_group' 主骨格のフラグメントがあるファイル名。サンプルとして、'sample_main_fragments.smi' があります。
file_name_of_sub_fragments = 'sample_sub_fragments.smi' # 'r_group' 側鎖のフラグメントがあるファイル名。サンプルとして、'sample_main_fragments.smi' があります
# deleting_descriptor_names = ['Ipc', 'Kappa3']
deleting_descriptor_names = []
number_of_iteration_of_ga = 10 # GA による構造生成を何回繰り返すか (number_of_iteration_of_ga × number_of_population) の数だけ化学構造が得られます
method_name = 'gp' # 'pls' or 'svr' or 'gp'
target_range = [1, 3] # target range of y. This is active only for 'gp'
fold_number = 5 # N-fold CV の N
max_number_of_principal_components = 30 # 使用する主成分の最大数
svr_cs = 2 ** np.arange(-5, 11, dtype=float) # C の候補
svr_epsilons = 2 ** np.arange(-10, 1, dtype=float) # ε の候補
svr_gammas = 2 ** np.arange(-20, 11, dtype=float) # γ の候補
number_of_population = 30 # GA の個体数
number_of_generation = 50 # GA の世代数
probability_of_crossover = 0.5
probability_of_mutation = 0.2
threshold_of_variable_selection = 0.5
minimum_number = -10 ** 10
if method_name != 'pls' and method_name != 'svr' and method_name != 'gp':
sys.exit('\'{0}\' という回帰分析手法はありません。method_name を見直してください。'.format(method_name))
smiles = dataset.iloc[:, 0] # 分子の SMILES
y = dataset.iloc[:, 1] # 物性・活性などの目的変数
# 計算する記述子名の取得
descriptor_names = []
for descriptor_information in Descriptors.descList:
descriptor_names.append(descriptor_information[0])
print('計算する記述子の数 :', len(descriptor_names))
# 記述子の計算
descriptor_calculator = MoleculeDescriptors.MolecularDescriptorCalculator(descriptor_names)
descriptors = [] # ここに計算された記述子の値を追加
print('分子の数 :', len(smiles))
for index, smiles_i in enumerate(smiles):
print(index + 1, '/', len(smiles))
molecule = Chem.MolFromSmiles(smiles_i)
descriptors.append(descriptor_calculator.CalcDescriptors(molecule))
original_x = pd.DataFrame(descriptors, index=dataset.index, columns=descriptor_names)
if deleting_descriptor_names is not None:
original_x = original_x.drop(deleting_descriptor_names, axis=1)
original_x = original_x.replace(np.inf, np.nan).fillna(np.nan) # inf を NaN に置き換え
nan_variable_flags = original_x.isnull().any() # NaN を含む変数
original_x = original_x.drop(original_x.columns[nan_variable_flags], axis=1) # NaN を含む変数を削除
# 標準偏差が 0 の説明変数を削除
std_0_variable_flags = original_x.std() == 0
x = original_x.drop(original_x.columns[std_0_variable_flags], axis=1)
# オートスケーリング
autoscaled_x = (x - x.mean()) / x.std()
autoscaled_y = (y - y.mean()) / y.std()
if method_name == 'pls':
# CV による成分数の最適化
components = [] # 空の list の変数を作成して、成分数をこの変数に追加していきます同じく成分数をこの変数に追加
r2_in_cv_all = [] # 空の list の変数を作成して、成分数ごとのクロスバリデーション後の r2 をこの変数に追加
for component in range(1, min(np.linalg.matrix_rank(autoscaled_x), max_number_of_principal_components) + 1):
# PLS
model = PLSRegression(n_components=component) # PLS モデルの宣言
estimated_y_in_cv = pd.DataFrame(cross_val_predict(model, autoscaled_x, autoscaled_y,
cv=fold_number)) # クロスバリデーション推定値の計算し、DataFrame型に変換
estimated_y_in_cv = estimated_y_in_cv * y.std() + y.mean() # スケールをもとに戻す
r2_in_cv = metrics.r2_score(y, estimated_y_in_cv) # r2 を計算
print(component, r2_in_cv) # 成分数と r2 を表示
r2_in_cv_all.append(r2_in_cv) # r2 を追加
components.append(component) # 成分数を追加
optimal_component_number = components[r2_in_cv_all.index(max(r2_in_cv_all))]
print('\nCV で最適化された成分数 :', optimal_component_number)
# PLS
model = PLSRegression(n_components=optimal_component_number) # モデルの宣言
elif method_name == 'svr':
# グラム行列の分散を最大化することによる γ の最適化
variance_of_gram_matrix = list()
for index, ocsvm_gamma in enumerate(svr_gammas):
print(index + 1, '/', len(svr_gammas))
gram_matrix = np.exp(-ocsvm_gamma * cdist(autoscaled_x, autoscaled_x, metric='seuclidean'))
variance_of_gram_matrix.append(gram_matrix.var(ddof=1))
optimal_svr_gamma = svr_gammas[variance_of_gram_matrix.index(max(variance_of_gram_matrix))]
# CV による ε の最適化
model_in_cv = GridSearchCV(svm.SVR(kernel='rbf', C=3, gamma=optimal_svr_gamma), {'epsilon': svr_epsilons},
cv=fold_number, iid=False, verbose=2)
model_in_cv.fit(autoscaled_x, autoscaled_y)
optimal_svr_epsilon = model_in_cv.best_params_['epsilon']
# CV による C の最適化
model_in_cv = GridSearchCV(svm.SVR(kernel='rbf', epsilon=optimal_svr_epsilon, gamma=optimal_svr_gamma),
{'C': svr_cs}, cv=fold_number, iid=False, verbose=2)
model_in_cv.fit(autoscaled_x, autoscaled_y)
optimal_svr_c = model_in_cv.best_params_['C']
# CV による γ の最適化
model_in_cv = GridSearchCV(svm.SVR(kernel='rbf', epsilon=optimal_svr_epsilon, C=optimal_svr_c),
{'gamma': svr_gammas}, cv=fold_number, iid=False, verbose=2)
model_in_cv.fit(autoscaled_x, autoscaled_y)
optimal_svr_gamma = model_in_cv.best_params_['gamma']
# 最適化された C, ε, γ
print('C : {0}\nε : {1}\nGamma : {2}'.format(optimal_svr_c, optimal_svr_epsilon, optimal_svr_gamma))
# SVR
model = svm.SVR(kernel='rbf', C=optimal_svr_c, epsilon=optimal_svr_epsilon, gamma=optimal_svr_gamma) # モデルの宣言
elif method_name == 'gp': # Gaussian process
model = GaussianProcessRegressor(ConstantKernel() * RBF() + WhiteKernel(), alpha=0)
model.fit(autoscaled_x, autoscaled_y) # モデルの構築
if method_name == 'pls':
# 標準回帰係数
standard_regression_coefficients = pd.DataFrame(model.coef_, index=x.columns,
columns=['standard_regression_coefficients'])
standard_regression_coefficients.to_csv(
'pls_standard_regression_coefficients.csv') # csv ファイルに保存。同じ名前のファイルがあるときは上書きされますので注意してください
# 構造生成
main_molecules = [molecule for molecule in Chem.SmilesMolSupplier(file_name_of_main_fragments,
delimiter='\t', titleLine=False)
if molecule is not None]
fragment_molecules = [molecule for molecule in Chem.SmilesMolSupplier(file_name_of_sub_fragments,
delimiter='\t', titleLine=False)
if molecule is not None]
creator.create('FitnessMax', base.Fitness, weights=(1.0,)) # for minimization, set weights as (-1.0,)
creator.create('Individual', list, fitness=creator.FitnessMax)
toolbox = base.Toolbox()
min_boundary = np.zeros(len(fragment_molecules) + 1)
max_boundary = np.ones(len(fragment_molecules) + 1) * 1.0
def create_ind_uniform(min_boundary, max_boundary):
index = []
for min, max in zip(min_boundary, max_boundary):
index.append(random.uniform(min, max))
return index
toolbox.register('create_ind', create_ind_uniform, min_boundary, max_boundary)
toolbox.register('individual', tools.initIterate, creator.Individual, toolbox.create_ind)
toolbox.register('population', tools.initRepeat, list, toolbox.individual)
def evalOneMax(individual):
individual_array = np.array(individual)
generated_smiles = structure_generator.structure_generator_based_on_r_group(main_molecules, fragment_molecules,
individual_array)
generated_molecule = Chem.MolFromSmiles(generated_smiles)
if generated_molecule is not None:
AllChem.Compute2DCoords(generated_molecule)
descriptors_of_generated_molecule = descriptor_calculator.CalcDescriptors(generated_molecule)
descriptors_of_generated_molecule = | pd.DataFrame(descriptors_of_generated_molecule, index=descriptor_names) | pandas.DataFrame |
"""
This module is used to analysis the trending category along with
the time variation. The time steps can be days and months.
"""
import pandas as pd
df_category = pd.read_csv('./data/US_category_id.csv')
df = | pd.read_csv('./data/US_youtube_trending_data.csv') | pandas.read_csv |
# Combine results of P(L), P(E) and P(C) into P(LCE)
import psycopg2 # For connecting to PostgreSQL database
import pandas as pd # Data analysis toolkit with flexible data structures
import numpy as np # Fundamental toolkit for scientific computation with N-dimensional array support
from sqlalchemy import create_engine
import datetime
import matplotlib.pyplot as plt
conn = psycopg2.connect("dbname='yelp' host='' user='' password=''")
cur = conn.cursor()
# Fetch P(C) where C = Food category
query = "select user_id, food_authority_prob from food_prob_rf2;"
cur.execute(query)
data = cur.fetchall()
df_food = pd.DataFrame(data)
df_food.rename(columns={df_food.columns[0]: 'user_id', df_food.columns[1]: 'food_prob'}, inplace=True)
df_food.head()
# Fetch P(C) where C = Nightlife category
query = "select user_id, nightlife_authority_prob from nightlife_prob_rf2;"
cur.execute(query)
data = cur.fetchall()
df_nl = pd.DataFrame(data)
df_nl.rename(columns={df_nl.columns[0]: 'user_id', df_nl.columns[1]: 'nightlife_prob'}, inplace=True)
df_nl.head()
# Fetch P(C) where C = Shopping category
query = "select user_id, shopping_authority_prob from shopping_prob_rf2;"
cur.execute(query)
data = cur.fetchall()
df_sp = pd.DataFrame(data)
df_sp.rename(columns={df_sp.columns[0]: 'user_id', df_sp.columns[1]: 'shopping_prob'}, inplace=True)
df_sp.head()
# Fetching P(E) data
query = "select user_id, elite_prob_rf from elite_prob_rf;"
cur.execute(query)
data = cur.fetchall()
df_e = pd.DataFrame(data)
df_e.rename(columns={df_e.columns[0]: 'user_id', df_e.columns[1]: 'elite_prob'}, inplace=True)
df_e.head()
# Fetch P(L) where L = Las Vegas
query = "select distinct user_id, probability from user_location_parallel where city like 'Las Vegas';"
cur.execute(query)
data = cur.fetchall()
df_lv = pd.DataFrame(data)
df_lv.rename(columns={df_lv.columns[0]: 'user_id', df_lv.columns[1]: 'lv_prob'}, inplace=True)
df_lv.head()
# Joining all the dataframes
df2 = pd.merge(df_e, df_lv, on='user_id', how='outer')
df3 = pd.merge(df2, df_food, on='user_id', how='outer')
df4 = pd.merge(df3, df_nl, on='user_id', how='outer')
df5 = | pd.merge(df4, df_sp, on='user_id', how='outer') | pandas.merge |
# this file was created to handle processed ISC data
import os, glob
import pandas as pd
import numpy as np
from pandas import read_excel
from pathlib import Path
from matplotlib import pyplot as plt
def sum_up (df, list_size_spectra, min_size, max_size):
'''
This function handle 'Binned_All_Images' sheet to define the number of images per each depth bin (default 10 images)
And return number of images per bin
'''
target_size_spectra = [x for x in list_size_spectra if x > min_size and x < max_size] # select target size spectra
target_df = df.loc[:, target_size_spectra] # create target size spectra image
target_df = target_df.loc[: , ~(target_df == 0).all()] # drop size spectra having all Zero value
loc_number = 10 # start loc image number with 10 (loc means number of images)
more_than_three = False
while more_than_three == False:
for i in range(0, len(target_df.index), loc_number):
loc_target_df = target_df.loc[i: i+loc_number].sum(skipna=True, axis=0)
if any(x < 2 for x in loc_target_df):
loc_number += 1
break
else:
if len(target_df.index) <= loc_number + i:
more_than_three = True
else:
pass
continue
return loc_number
def isc_summary (ctd_df, vol_spec_df, aggr_con_df, particle_range):
# plot for ISC and CTD
# data preparation
# CTD
depth = tuple(ctd_df['Depths (m)'])
temp = tuple(ctd_df['Temperature (dC)'])
sal = tuple(ctd_df['Salinity (PSU)'])
turb = tuple(ctd_df['Turbidity (NTU)'])
fluo = tuple(ctd_df['Fluorescence (mg/m3)'])
ctd_label = {'depth': 'Depths (m)', 'temp': 'Temperature (dC)', 'sal': 'Salinity (PSU)', 'turb': 'Turbidity (NTU)',
'fluo': 'Fluorescence (mg/m3)'}
# particle volume data
vol_sml = tuple(vol_spec_df[str(particle_range[0])+'-'+str(particle_range[1])])
vol_med = tuple(vol_spec_df[str(particle_range[1])+'-'+str(particle_range[2])])
vol_lrg = tuple(vol_spec_df[str(particle_range[2])+'-'+str(particle_range[3])])
vol_tol = tuple(np.array(vol_sml) + np.array(vol_med) + np.array(vol_lrg))
# particle abundance data
abd_sml = tuple(aggr_con_df[str(particle_range[0])+'-'+str(particle_range[1])])
abd_med = tuple(aggr_con_df[str(particle_range[1])+'-'+str(particle_range[2])])
abd_lrg = tuple(aggr_con_df[str(particle_range[2])+'-'+str(particle_range[3])])
abd_tol = tuple(np.array(abd_sml) + np.array(abd_med) + np.array(abd_lrg))
vol_sml, vol_med, vol_lrg, vol_tol, abd_sml, abd_lrg, abd_tol
vol = {'vol_sml':vol_sml, 'vol_med':vol_med, 'vol_lrg':vol_lrg, 'vol_tol':vol_tol}
abd = {'abd_sml':abd_sml, 'abd_med':abd_med, 'abd_lrg':abd_lrg, 'abd_tol':abd_tol}
ctd = {'depth':depth, 'temp':temp, 'sal':sal, 'turb':turb, 'fluo':fluo}
return vol, abd, ctd, ctd_label
def depth_bin_interval (df, depth_bin_size, max_depth):
# reforming dataframe with certain depth interval
# 1. drop unnecessary columns and modify error values (e.g. - value to 0)
df.dropna(axis=1, how='all', inplace=True) # drop columns having all nan
for c in df.columns:
if df[c].dtype == 'object':
df.drop([c], axis=1, inplace=True)
# 2. reforming data frame with certain depth interval
depth_range = range(0, int(max_depth+depth_bin_size), depth_bin_size) # set depth bin range
bin_df = pd.DataFrame() # create empty dataframe
for b in range(0, len(depth_range)-1):
each_df = df.loc[(depth_range[b] <= df['Depths (m)']) & (df['Depths (m)'] < depth_range[b+1])]
index_each_df = tuple(each_df.index)
index_up, index_down = index_each_df[0], index_each_df[-1]
bin_df[depth_range[b+1]] = df.loc[(depth_range[b] <= df['Depths (m)']) & (df['Depths (m)'] < depth_range[b+1])].sum(axis=0)/(index_down-index_up+1)
bin_df = bin_df.T
bin_df['Depths (m)'] = bin_df.index
bin_df.reset_index(drop=True, inplace=True)
return bin_df
def particle_bin_interval (df, particle_range):
# reformatig particel size range
# df is dataframe from ISC excel file, particle range is the range of particle in list
# 1. drop unnecessary columns
df.dropna(axis=1, how='all', inplace=True) # drop columns having all nan
for c in df.columns:
if df[c].dtype == 'object':
df.drop([c], axis=1, inplace=True)
# 2. collapse columns (raw particel size) to certain particle size range
bin_df = pd.DataFrame() # create empty frame
bin_df['Depths (m)'] = df['Depths (m)'] # add depth data
for b in range(0, len(particle_range)-1) :
cols = list(df.columns) # list the columns
cols.remove('Depths (m)') # remove the 'Depths (m)'
col_list = [x for x in cols if (float(x) < particle_range[b+1]) & (float(x) >= particle_range[b])]
each_df = df.loc[:, col_list] # this datafram contains within the particle size b to b+1
bin_df[str(particle_range[b])+'-'+str(particle_range[b+1]) ] = each_df.sum(axis=1)
bin_df['total']= bin_df.iloc[:, 1:].sum(axis=1)
return bin_df
def read_isc (file_name, processed_or_raw):
'''
reads in processed ISC data
'''
# 1. import xlsx file and seperate each sheets to seperate dataframe
xl_file = pd.ExcelFile(file_name)
if processed_or_raw == 'raw':
ctd_df = pd.read_excel(xl_file, sheet_name='CTD-Data', header=2)
vol_spec_df = pd.read_excel(xl_file, sheet_name='VolumeSpectraData', header=2)
aggr_con_df = | pd.read_excel(xl_file, sheet_name='AggregateConcentration', header=2) | pandas.read_excel |
from statsmodels.compat.python import lmap
import calendar
from io import BytesIO
import locale
import numpy as np
from numpy.testing import assert_, assert_equal
import pandas as pd
import pytest
from statsmodels.datasets import elnino, macrodata
from statsmodels.graphics.tsaplots import (
month_plot,
plot_acf,
plot_pacf,
plot_predict,
quarter_plot,
seasonal_plot,
)
from statsmodels.tsa import arima_process as tsp
from statsmodels.tsa.ar_model import AutoReg
from statsmodels.tsa.arima.model import ARIMA
try:
from matplotlib import pyplot as plt
except ImportError:
pass
@pytest.mark.matplotlib
def test_plot_acf(close_figures):
# Just test that it runs.
fig = plt.figure()
ax = fig.add_subplot(111)
ar = np.r_[1.0, -0.9]
ma = np.r_[1.0, 0.9]
armaprocess = tsp.ArmaProcess(ar, ma)
rs = np.random.RandomState(1234)
acf = armaprocess.generate_sample(100, distrvs=rs.standard_normal)
plot_acf(acf, ax=ax, lags=10)
plot_acf(acf, ax=ax)
plot_acf(acf, ax=ax, alpha=None)
@pytest.mark.matplotlib
def test_plot_acf_irregular(close_figures):
# Just test that it runs.
fig = plt.figure()
ax = fig.add_subplot(111)
ar = np.r_[1.0, -0.9]
ma = np.r_[1.0, 0.9]
armaprocess = tsp.ArmaProcess(ar, ma)
rs = np.random.RandomState(1234)
acf = armaprocess.generate_sample(100, distrvs=rs.standard_normal)
plot_acf(acf, ax=ax, lags=np.arange(1, 11))
plot_acf(acf, ax=ax, lags=10, zero=False)
plot_acf(acf, ax=ax, alpha=None, zero=False)
@pytest.mark.matplotlib
def test_plot_pacf(close_figures):
# Just test that it runs.
fig = plt.figure()
ax = fig.add_subplot(111)
ar = np.r_[1.0, -0.9]
ma = np.r_[1.0, 0.9]
armaprocess = tsp.ArmaProcess(ar, ma)
rs = np.random.RandomState(1234)
pacf = armaprocess.generate_sample(100, distrvs=rs.standard_normal)
with pytest.warns(FutureWarning):
plot_pacf(pacf, ax=ax)
with pytest.warns(FutureWarning, match="The default"):
plot_pacf(pacf, ax=ax, alpha=None)
@pytest.mark.matplotlib
def test_plot_pacf_kwargs(close_figures):
# Just test that it runs.
fig = plt.figure()
ax = fig.add_subplot(111)
ar = np.r_[1.0, -0.9]
ma = np.r_[1.0, 0.9]
armaprocess = tsp.ArmaProcess(ar, ma)
rs = np.random.RandomState(1234)
pacf = armaprocess.generate_sample(100, distrvs=rs.standard_normal)
buff = BytesIO()
with pytest.warns(FutureWarning, match="The default"):
plot_pacf(pacf, ax=ax)
fig.savefig(buff, format="rgba")
buff_linestyle = BytesIO()
fig_linestyle = plt.figure()
ax = fig_linestyle.add_subplot(111)
with pytest.warns(FutureWarning, match="The default"):
plot_pacf(pacf, ax=ax, ls="-")
fig_linestyle.savefig(buff_linestyle, format="rgba")
buff_with_vlines = BytesIO()
fig_with_vlines = plt.figure()
ax = fig_with_vlines.add_subplot(111)
vlines_kwargs = {"linestyles": "dashdot"}
with pytest.warns(FutureWarning, match="The default"):
plot_pacf(pacf, ax=ax, vlines_kwargs=vlines_kwargs)
fig_with_vlines.savefig(buff_with_vlines, format="rgba")
buff.seek(0)
buff_linestyle.seek(0)
buff_with_vlines.seek(0)
plain = buff.read()
linestyle = buff_linestyle.read()
with_vlines = buff_with_vlines.read()
assert_(plain != linestyle)
assert_(with_vlines != plain)
assert_(linestyle != with_vlines)
@pytest.mark.matplotlib
def test_plot_acf_kwargs(close_figures):
# Just test that it runs.
fig = plt.figure()
ax = fig.add_subplot(111)
ar = np.r_[1.0, -0.9]
ma = np.r_[1.0, 0.9]
armaprocess = tsp.ArmaProcess(ar, ma)
rs = np.random.RandomState(1234)
acf = armaprocess.generate_sample(100, distrvs=rs.standard_normal)
buff = BytesIO()
plot_acf(acf, ax=ax)
fig.savefig(buff, format="rgba")
buff_with_vlines = BytesIO()
fig_with_vlines = plt.figure()
ax = fig_with_vlines.add_subplot(111)
vlines_kwargs = {"linestyles": "dashdot"}
plot_acf(acf, ax=ax, vlines_kwargs=vlines_kwargs)
fig_with_vlines.savefig(buff_with_vlines, format="rgba")
buff.seek(0)
buff_with_vlines.seek(0)
plain = buff.read()
with_vlines = buff_with_vlines.read()
assert_(with_vlines != plain)
@pytest.mark.matplotlib
def test_plot_acf_missing(close_figures):
# Just test that it runs.
fig = plt.figure()
ax = fig.add_subplot(111)
ar = np.r_[1.0, -0.9]
ma = np.r_[1.0, 0.9]
armaprocess = tsp.ArmaProcess(ar, ma)
rs = np.random.RandomState(1234)
acf = armaprocess.generate_sample(100, distrvs=rs.standard_normal)
acf[::13] = np.nan
buff = BytesIO()
plot_acf(acf, ax=ax, missing="drop")
fig.savefig(buff, format="rgba")
buff.seek(0)
fig = plt.figure()
ax = fig.add_subplot(111)
buff_conservative = BytesIO()
plot_acf(acf, ax=ax, missing="conservative")
fig.savefig(buff_conservative, format="rgba")
buff_conservative.seek(0)
assert_(buff.read() != buff_conservative.read())
@pytest.mark.matplotlib
def test_plot_pacf_irregular(close_figures):
# Just test that it runs.
fig = plt.figure()
ax = fig.add_subplot(111)
ar = np.r_[1.0, -0.9]
ma = np.r_[1.0, 0.9]
armaprocess = tsp.ArmaProcess(ar, ma)
rs = np.random.RandomState(1234)
pacf = armaprocess.generate_sample(100, distrvs=rs.standard_normal)
with pytest.warns(FutureWarning, match="The default"):
plot_pacf(pacf, ax=ax, lags=np.arange(1, 11))
with pytest.warns(FutureWarning, match="The default"):
plot_pacf(pacf, ax=ax, lags=10, zero=False)
with pytest.warns(FutureWarning, match="The default"):
plot_pacf(pacf, ax=ax, alpha=None, zero=False)
@pytest.mark.matplotlib
def test_plot_month(close_figures):
dta = elnino.load_pandas().data
dta["YEAR"] = dta.YEAR.astype(int).apply(str)
dta = dta.set_index("YEAR").T.unstack()
dates = pd.to_datetime(["-".join([x[1], x[0]]) for x in dta.index.values])
# test dates argument
fig = month_plot(dta.values, dates=dates, ylabel="el nino")
# test with a TimeSeries DatetimeIndex with no freq
dta.index = pd.DatetimeIndex(dates)
fig = month_plot(dta)
# w freq
dta.index = pd.DatetimeIndex(dates, freq="MS")
fig = month_plot(dta)
# test with a TimeSeries PeriodIndex
dta.index = | pd.PeriodIndex(dates, freq="M") | pandas.PeriodIndex |
import argparse
from itertools import combinations
from typing import Dict
import json
import os
import pandas as pd
from scirex.metrics.clustering_metrics import match_predicted_clusters_to_gold
from scirex.metrics.f1 import compute_f1
from scirex.predictors.utils import map_predicted_spans_to_gold, merge_method_subrelations
from scirex_utilities.entity_utils import used_entities
from scirex_utilities.json_utilities import load_jsonl
parser = argparse.ArgumentParser()
parser.add_argument("--gold-file")
parser.add_argument("--ner-file")
parser.add_argument("--clusters-file")
parser.add_argument("--salient-mentions-file")
def has_all_mentions(doc, relation):
has_mentions = all(len(doc["clusters"][x[1]]) > 0 for x in relation)
return has_mentions
def convert_to_dict(data):
return {x["doc_id"]: x for x in data}
def ner_metrics(gold_data, predicted_data):
mapping = {}
for doc in gold_data:
predicted_doc = predicted_data[doc["doc_id"]]
predicted_spans = predicted_doc["ner"]
gold_spans = doc["ner"]
mapping[doc["doc_id"]] = map_predicted_spans_to_gold(predicted_spans, gold_spans)
return mapping
def salent_mentions_metrics(gold_data,
predicted_salient_mentions,
produce_error_file = True,
generate_errors_file="/tmp/salient_mentions_error_files"):
all_metrics = []
predicted = 0
gold = 0
matched = 0
marked_up_words = []
for i, doc in enumerate(gold_data):
gold_salient_spans = [span for coref_cluster in doc['coref'].values() for span in coref_cluster]
predicted_doc = predicted_salient_mentions[doc["doc_id"]]
saliency_spans = []
doc_words = doc["words"]
if produce_error_file:
writer = open(os.path.join(generate_errors_file, str(i)), 'w')
writer.write(json.dumps(doc["n_ary_relations"]) + "\n")
existing_spans = set()
for [start_span, end_span, saliency, _] in predicted_doc["saliency"]:
if saliency:
saliency_spans.append((start_span, end_span))
if produce_error_file:
if (start_span, end_span) not in existing_spans:
# Add span metadata gloss to text.
existing_spans.add((start_span, end_span))
gold_saliency = (start_span, end_span) in gold_salient_spans
if gold_saliency and saliency:
doc_words[start_span] = '{+{' + doc_words[start_span]
doc_words[end_span] = doc_words[end_span] + '}+}'
elif saliency:
doc_words[start_span] = '<-<' + doc_words[start_span]
doc_words[end_span-1] = doc_words[end_span-1] + '>->'
elif gold_saliency:
doc_words[start_span] = '<+<' + doc_words[start_span]
doc_words[end_span] = doc_words[end_span] + '>+>'
else:
doc_words[start_span] = '{-{' + doc_words[start_span]
doc_words[end_span-1] = doc_words[end_span-1] + '}-}'
for _, end_sentence_idx in doc["sentences"]:
doc_words[end_sentence_idx-1] = doc_words[end_sentence_idx-1] + " "
for start_section, end_section in doc["sections"]:
doc_words[start_section] = '\t' + doc_words[start_section]
doc_words[end_section-1] = doc_words[end_section-1] + '\n'
matching_spans = set(gold_salient_spans).intersection(saliency_spans)
matched += len(matching_spans)
predicted += len(saliency_spans)
gold += len(gold_salient_spans)
if produce_error_file:
writer.write(f"# of gold salient spans: {len(gold_salient_spans)}\n")
writer.write(f"# of predicted salient spans: {len(saliency_spans)}\n")
writer.write(f"# of matching spans: {len(matching_spans)}\n")
i = 0
while i < len(doc_words):
delimiters = ['{+{', '}+}', '<-<', '>->', '<+<', '>+>', '{-{', '}-}']
character = doc_words[i].strip()
for delimiter in delimiters:
character = character.strip(delimiter)
if len(character) == 1:
if character in [".", ",", "?", "!", ":", ";", ")", "]"]:
doc_words[i-1] = doc_words[i-1] + doc_words[i]
del doc_words[i]
i -= 1
elif character in ["(", "["]:
doc_words[i+1] = doc_words[i] + doc_words[i+1]
del doc_words[i]
i -= 1
i += 1
writer.write(" ".join(doc_words))
precision, recall, f1 = compute_f1(predicted, gold, matched, m=1)
all_metrics = | pd.DataFrame({"f1": [f1], "p": [precision], "r": [recall]}) | pandas.DataFrame |
from cowin_api import CoWinAPI
import pandas as pd
from copy import deepcopy
import datetime
from decouple import config
import smtplib
from email.message import EmailMessage
import time
import os
import logging
# Either insert your emails and password here or use python-decouple or follow this article https://saralgyaan.com/posts/set-passwords-and-secret-keys-in-environment-variables-maclinuxwindows-python-quicktip/
FROM_EMAIL = config('FROM_EMAIL')
TO_EMAIL = config('TO_EMAIL')
PASSWORD = config('PASSWORD')
# Just Change these values
no_of_days = 28 # Change this to 7,14,21 or 28
pincodes = ['141001', '141002'] # Add as many pincodes as you want separated by commas
min_age_limit = 18 # Change this to 18 if you want 18+
BASE_DATE = datetime.datetime.now()
DATE_LIST = date_list = [BASE_DATE + datetime.timedelta(days=x * 7) for x in range(int(no_of_days / 7))]
dates = [date.strftime("%d-%m-%Y") for date in date_list]
# Start the API
cowin = CoWinAPI()
# Logging stuff
MY_PATH = os.getcwd()
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
file_handler = logging.FileHandler(f'{os.path.join(MY_PATH, "cowin_email_alerts.log")}')
fmt = logging.Formatter('%(levelname)s : %(name)s : %(asctime)s : %(message)s')
file_handler.setFormatter(fmt)
logger.addHandler(file_handler)
def send_email(text_file: str):
""" This function sends the email if the Vaccination slot is available
Parameters
----------
text_file: str
This is a text file containing the details of all the slots available, it is generated by main function if there is an availability.
Requires
--------
TO_EMAIL : str
The email address to which you need to send the email
FROM_EMAIL: str
The email address from which you want to send the email
PASSWORD: str
Password of the FROM_EMAIL
You can either hard code it at line 11-13 above or use python-decouple or environmental variables
For more details about sending emails, check this article
https://saralgyaan.com/posts/use-python-to-send-email/
Sends
-----
The email
"""
message = EmailMessage()
message['Subject'] = 'Covid Vaccination Slot is available'
message['From'] = FROM_EMAIL
message['To'] = TO_EMAIL
with open(text_file, 'r') as f:
contents = f.readlines()
text = '\n'.join(contents)
final_text = f'Dear Udit,\n\n Covid Vaccination slots are available at the following locations\n {text} \n\nRegards,\n Udit'
message.set_content(final_text)
with smtplib.SMTP_SSL('smtp.gmail.com', 465) as smtp:
smtp.login(FROM_EMAIL, PASSWORD)
smtp.send_message(message)
def get_availability(pincode: str, date: str, min_age_limit: int):
"""
This function checks the availability of the Covid Vaccination and create a pandas dataframe of the available slots details.
Parameters
----------
pincode : str
It is provided by the user in the list on line 17
date : str
It is auto-generated on the basis of the no. of days for which inquiry is made. Days could be 7,14,21 or 28 (preferably).
min_age_limit : int
It is provided by the user at line 18
Returns
-------
df : Pandas dataframe
Containing the details of the hospital where slot is available.
"""
results = cowin.get_availability_by_pincode(pincode, date, min_age_limit)
master_data = results['centers']
if master_data != []:
df = pd.DataFrame(master_data)
if len(df):
df = df.explode("sessions")
df['min_age_limit'] = df.sessions.apply(lambda x: x['min_age_limit'])
df['vaccine'] = df.sessions.apply(lambda x: x['vaccine'])
df['available_capacity'] = df.sessions.apply(lambda x: x['available_capacity'])
df['date'] = df.sessions.apply(lambda x: x['date'])
df = df[["date", "available_capacity", "vaccine", "min_age_limit", "pincode", "name", "state_name", "district_name", "block_name", "fee_type"]]
df = df[df['available_capacity'] != 0]
df.drop_duplicates(inplace=True)
return df
def main():
"""
This is the main function which uses get_availability() to check for the availability and thereafter send_email() to send the emails if the slots are available.
Parameters
----------
None
"""
final_df = None
for pincode in pincodes:
for date in dates:
temp_df = get_availability(pincode, date, min_age_limit)
if final_df is not None:
final_df = | pd.concat([final_df, temp_df]) | pandas.concat |
# pylint: disable=W0102
import unittest
import nose
import numpy as np
from pandas import Index, MultiIndex, DataFrame, Series
from pandas.sparse.array import SparseArray
from pandas.core.internals import *
import pandas.core.internals as internals
import pandas.util.testing as tm
from pandas.util.testing import (
assert_almost_equal, assert_frame_equal, randn)
from pandas.compat import zip, u
def assert_block_equal(left, right):
assert_almost_equal(left.values, right.values)
assert(left.dtype == right.dtype)
assert(left.items.equals(right.items))
assert(left.ref_items.equals(right.ref_items))
def get_float_mat(n, k, dtype):
return np.repeat(np.atleast_2d(np.arange(k, dtype=dtype)), n, axis=0)
TEST_COLS = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 's1', 's2']
N = 10
def get_float_ex(cols=['a', 'c', 'e'], dtype = np.float_):
floats = get_float_mat(N, len(cols), dtype = dtype).T
return make_block(floats, cols, TEST_COLS)
def get_complex_ex(cols=['h']):
complexes = (get_float_mat(N, 1, dtype = np.float_).T * 1j).astype(np.complex128)
return make_block(complexes, cols, TEST_COLS)
def get_obj_ex(cols=['b', 'd']):
mat = np.empty((N, 2), dtype=object)
mat[:, 0] = 'foo'
mat[:, 1] = 'bar'
return make_block(mat.T, cols, TEST_COLS)
def get_bool_ex(cols=['f']):
mat = np.ones((N, 1), dtype=bool)
return make_block(mat.T, cols, TEST_COLS)
def get_int_ex(cols=['g'], dtype = np.int_):
mat = randn(N, 1).astype(dtype)
return make_block(mat.T, cols, TEST_COLS)
def get_dt_ex(cols=['h']):
mat = randn(N, 1).astype(int).astype('M8[ns]')
return make_block(mat.T, cols, TEST_COLS)
def get_sparse_ex1():
sa1 = SparseArray([0, 0, 1, 2, 3, 0, 4, 5, 0, 6], fill_value=0)
return make_block(sa1, ['s1'], TEST_COLS)
def get_sparse_ex2():
sa2 = SparseArray([0, 0, 2, 3, 4, 0, 6, 7, 0, 8], fill_value=0)
return make_block(sa2, ['s2'], TEST_COLS)
def create_blockmanager(blocks):
l = []
for b in blocks:
l.extend(b.items)
items = Index(l)
for b in blocks:
b.ref_items = items
index_sz = blocks[0].shape[1]
return BlockManager(blocks, [items, np.arange(index_sz)])
def create_singleblockmanager(blocks):
l = []
for b in blocks:
l.extend(b.items)
items = Index(l)
for b in blocks:
b.ref_items = items
return SingleBlockManager(blocks, [items])
class TestBlock(unittest.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
self.fblock = get_float_ex()
self.cblock = get_complex_ex()
self.oblock = get_obj_ex()
self.bool_block = get_bool_ex()
self.int_block = get_int_ex()
def test_constructor(self):
int32block = get_int_ex(['a'],dtype = np.int32)
self.assert_(int32block.dtype == np.int32)
def test_pickle(self):
import pickle
def _check(blk):
pickled = pickle.dumps(blk)
unpickled = pickle.loads(pickled)
assert_block_equal(blk, unpickled)
_check(self.fblock)
_check(self.cblock)
_check(self.oblock)
_check(self.bool_block)
def test_ref_locs(self):
assert_almost_equal(self.fblock.ref_locs, [0, 2, 4])
def test_attrs(self):
self.assert_(self.fblock.shape == self.fblock.values.shape)
self.assert_(self.fblock.dtype == self.fblock.values.dtype)
self.assert_(len(self.fblock) == len(self.fblock.values))
def test_merge(self):
avals = randn(2, 10)
bvals = randn(2, 10)
ref_cols = ['e', 'a', 'b', 'd', 'f']
ablock = make_block(avals, ['e', 'b'], ref_cols)
bblock = make_block(bvals, ['a', 'd'], ref_cols)
merged = ablock.merge(bblock)
exvals = np.vstack((avals, bvals))
excols = ['e', 'b', 'a', 'd']
eblock = make_block(exvals, excols, ref_cols)
eblock = eblock.reindex_items_from(ref_cols)
assert_block_equal(merged, eblock)
# TODO: merge with mixed type?
def test_copy(self):
cop = self.fblock.copy()
self.assert_(cop is not self.fblock)
assert_block_equal(self.fblock, cop)
def test_items(self):
cols = self.fblock.items
self.assert_(np.array_equal(cols, ['a', 'c', 'e']))
cols2 = self.fblock.items
self.assert_(cols is cols2)
def test_assign_ref_items(self):
new_cols = Index(['foo', 'bar', 'baz', 'quux', 'hi'])
self.fblock.set_ref_items(new_cols)
self.assert_(np.array_equal(self.fblock.items,
['foo', 'baz', 'hi']))
def test_reindex_index(self):
pass
def test_reindex_items_from(self):
new_cols = Index(['e', 'b', 'c', 'f'])
reindexed = self.fblock.reindex_items_from(new_cols)
assert_almost_equal(reindexed.ref_locs, [0, 2])
self.assertEquals(reindexed.values.shape[0], 2)
self.assert_((reindexed.values[0] == 2).all())
self.assert_((reindexed.values[1] == 1).all())
def test_reindex_cast(self):
pass
def test_insert(self):
pass
def test_delete(self):
newb = self.fblock.delete('a')
assert_almost_equal(newb.ref_locs, [2, 4])
self.assert_((newb.values[0] == 1).all())
newb = self.fblock.delete('c')
assert_almost_equal(newb.ref_locs, [0, 4])
self.assert_((newb.values[1] == 2).all())
newb = self.fblock.delete('e')
assert_almost_equal(newb.ref_locs, [0, 2])
self.assert_((newb.values[1] == 1).all())
self.assertRaises(Exception, self.fblock.delete, 'b')
def test_split_block_at(self):
# with dup column support this method was taken out
# GH3679
raise nose.SkipTest
bs = list(self.fblock.split_block_at('a'))
self.assertEqual(len(bs), 1)
self.assertTrue(np.array_equal(bs[0].items, ['c', 'e']))
bs = list(self.fblock.split_block_at('c'))
self.assertEqual(len(bs), 2)
self.assertTrue(np.array_equal(bs[0].items, ['a']))
self.assertTrue(np.array_equal(bs[1].items, ['e']))
bs = list(self.fblock.split_block_at('e'))
self.assertEqual(len(bs), 1)
self.assertTrue(np.array_equal(bs[0].items, ['a', 'c']))
bblock = get_bool_ex(['f'])
bs = list(bblock.split_block_at('f'))
self.assertEqual(len(bs), 0)
def test_unicode_repr(self):
mat = np.empty((N, 2), dtype=object)
mat[:, 0] = 'foo'
mat[:, 1] = 'bar'
cols = ['b', u("\u05d0")]
str_repr = repr(make_block(mat.T, cols, TEST_COLS))
def test_get(self):
pass
def test_set(self):
pass
def test_fillna(self):
pass
def test_repr(self):
pass
class TestBlockManager(unittest.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
self.blocks = [get_float_ex(),
get_obj_ex(),
get_bool_ex(),
get_int_ex(),
get_complex_ex()]
all_items = [b.items for b in self.blocks]
items = sorted(all_items[0].append(all_items[1:]))
items = Index(items)
for b in self.blocks:
b.ref_items = items
self.mgr = BlockManager(self.blocks, [items, np.arange(N)])
def test_constructor_corner(self):
pass
def test_attrs(self):
self.assertEquals(self.mgr.nblocks, len(self.mgr.blocks))
self.assertEquals(len(self.mgr), len(self.mgr.items))
def test_is_mixed_dtype(self):
self.assert_(self.mgr.is_mixed_type)
mgr = create_blockmanager([get_bool_ex(['a']), get_bool_ex(['b'])])
self.assert_(not mgr.is_mixed_type)
def test_is_indexed_like(self):
self.assert_(self.mgr._is_indexed_like(self.mgr))
mgr2 = self.mgr.reindex_axis(np.arange(N - 1), axis=1)
self.assert_(not self.mgr._is_indexed_like(mgr2))
def test_block_id_vector_item_dtypes(self):
expected = [0, 1, 0, 1, 0, 2, 3, 4]
result = self.mgr.block_id_vector
assert_almost_equal(expected, result)
result = self.mgr.item_dtypes
# as the platform may not exactly match this, pseudo match
expected = ['float64', 'object', 'float64', 'object', 'float64',
'bool', 'int64', 'complex128']
for e, r in zip(expected, result):
np.dtype(e).kind == np.dtype(r).kind
def test_duplicate_item_failure(self):
items = Index(['a', 'a'])
blocks = [get_bool_ex(['a']), get_float_ex(['a'])]
for b in blocks:
b.ref_items = items
# test trying to create _ref_locs with/o ref_locs set on the blocks
self.assertRaises(AssertionError, BlockManager, blocks, [items, np.arange(N)])
blocks[0].set_ref_locs([0])
blocks[1].set_ref_locs([1])
mgr = BlockManager(blocks, [items, np.arange(N)])
mgr.iget(1)
# invalidate the _ref_locs
for b in blocks:
b._ref_locs = None
mgr._ref_locs = None
mgr._items_map = None
self.assertRaises(AssertionError, mgr._set_ref_locs, do_refs=True)
def test_contains(self):
self.assert_('a' in self.mgr)
self.assert_('baz' not in self.mgr)
def test_pickle(self):
import pickle
pickled = pickle.dumps(self.mgr)
mgr2 = pickle.loads(pickled)
# same result
assert_frame_equal(DataFrame(self.mgr), DataFrame(mgr2))
# share ref_items
self.assert_(mgr2.blocks[0].ref_items is mgr2.blocks[1].ref_items)
# GH2431
self.assertTrue(hasattr(mgr2, "_is_consolidated"))
self.assertTrue(hasattr(mgr2, "_known_consolidated"))
# reset to False on load
self.assertFalse(mgr2._is_consolidated)
self.assertFalse(mgr2._known_consolidated)
def test_get(self):
pass
def test_get_scalar(self):
for item in self.mgr.items:
for i, index in enumerate(self.mgr.axes[1]):
res = self.mgr.get_scalar((item, index))
exp = self.mgr.get(item)[i]
assert_almost_equal(res, exp)
def test_set(self):
pass
def test_set_change_dtype(self):
self.mgr.set('baz', np.zeros(N, dtype=bool))
self.mgr.set('baz', np.repeat('foo', N))
self.assert_(self.mgr.get('baz').dtype == np.object_)
mgr2 = self.mgr.consolidate()
mgr2.set('baz', np.repeat('foo', N))
self.assert_(mgr2.get('baz').dtype == np.object_)
mgr2.set('quux', | randn(N) | pandas.util.testing.randn |
# lc_utils.py
# Code functions that are needed to run this lab
import sys
import pandas as pd
import warnings
warnings.filterwarnings('ignore')
import time
from datetime import datetime
import math
import pandas as pd
#from pandas import scatter_matrix
from pandas.plotting import scatter_matrix
#pd.set_option('display.height', 1000)
pd.set_option('display.max_rows', 500)
pd.set_option('display.max_columns', 500)
pd.set_option('display.width', 1000)
import glob
# custom library for some helper functions
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.model_selection import train_test_split
from sklearn.model_selection import KFold
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import confusion_matrix
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
from collections import defaultdict
import seaborn as sns
from itertools import compress
import itertools
import operator
#CLASS_ENVIRONMENT = ["wsl-1231" : "" ,
#print("CLASS_ENVIRONMENT = {}".format(CLASS_ENVIRONMENT))
import df_utils as dfu
def set_env(CLASS_ENVIRONMENT) :
if(CLASS_ENVIRONMENT == 'dv-mac' or CLASS_ENVIRONMENT == 'wsl-1231') :
from keras.layers import Input, Dense
from keras.models import Model
from keras import regularizers
from keras.models import load_model
elif(CLASS_ENVIRONMENT == 'nimbix') :
import tensorflow as tf
from tensorflow.keras.layers import Input, Dense
from tensorflow.keras.models import Model
from tensorflow.keras import regularizers
from tensorflow.keras.models import load_model
elif(CLASS_ENVIRONMENT == 'acc') :
import tensorflow as tf
from tensorflow.python.keras.layers import Input, Dense
from tensorflow.python.keras.models import Model
from tensorflow.python.keras import regularizers
from tensorflow.python.keras.models import load_model
else :
print("ERROR loading CLASS_ENVIRONMENT {}".format(CLASS_ENVIRONMENT))
# utility print function
def nprint(mystring) :
print("**{}** : {}".format(sys._getframe(1).f_code.co_name,mystring))
class LCDF(dfu.MLDF) :
## Abstract Custom Implementations
def __init__(self, mode="acc", num_files_to_load=2) :
nprint("Loading Data. Overriding __init__ from dfutils")
self.df = self.load_sample_data(mode,num_files_to_load)
def load_sample_data(self, CLASS_ENVIRONMENT, num_files_to_load=2) :
'''
Used to load data for simple labs for techu. use acc for all data!
'''
#For lab force LoanStats_securev1_2018Q1.csv
nprint("CLASS_ENVIRONMENT = {}".format(CLASS_ENVIRONMENT))
loanstats_csv_files = None
if(CLASS_ENVIRONMENT == 'nimbix') :
location='/dl-labs/mldl-101/lab5-powerai-lc/'
nprint("Setting data location to {}".format(location))
loanstats_csv_files = glob.glob(location + 'LoanStats_securev1_2016Q1*csv.gz') # 'LoanStats_secure*csv'
elif(CLASS_ENVIRONMENT == 'acc') :
location='/gpfs/home/s4s004/vanstee/2019-06-lendingclub-git/rawdata/'
nprint("Setting data location to {}".format(location))
loanstats_csv_files = glob.glob(location + 'LoanStats_securev1_*csv.gz') # 'LoanStats_secure*csv'
elif(CLASS_ENVIRONMENT == 'acctest') :
location='/gpfs/home/s4s004/vanstee/2019-06-lendingclub-git/testdata/'
nprint("Setting data location to {}".format(location))
loanstats_csv_files = glob.glob(location + 'test*.csv') # 'LoanStats_secure*csv'
elif(CLASS_ENVIRONMENT == 'wsl-1231') :
location='../datasets/'
nprint("Setting data location to {}".format(location))
loanstats_csv_files = glob.glob(location + 'LoanStats_securev1*csv.gz') # 'LoanStats_secure*csv'
else :
nprint("Setting data location to default {}".format(location))
loanstats_csv_files = glob.glob(location + 'LoanStats_securev1_2016Q1*csv') # 'LoanStats_secure*csv'
num_file = len(loanstats_csv_files)
loan_list = []
nprint("Found {} files. CSV files = {}".format(num_file, loanstats_csv_files))
loan_df = None
for i in range(num_file) : #len(loanstats_csv_files)
nprint("Loading {}".format(loanstats_csv_files[i]))
loan_list.append( pd.read_csv(loanstats_csv_files[i], index_col=None, header=1))
loan_df = | pd.concat(loan_list,axis=0) | pandas.concat |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Aug 18 12:06:53 2020
@author: hamishgibbs
"""
import sys
import __main__ as main
import pandas as pd
from progress.bar import Bar
import networkx as nx
from networkx.algorithms.community.kclique import k_clique_communities
import igraph as ig
import leidenalg
import numpy as np
#%%
if not hasattr(main, '__file__'):
argv = ['code', '/Users/hamishgibbs/Documents/Covid-19/facebook_mobility_uk/data/processed/mobility_days_norm.csv',
'/Users/hamishgibbs/Documents/Covid-19/facebook_mobility_uk/data/processed/infomap/leiden_full_norm.csv']
else:
argv = sys.argv
#%%
def od_df(df):
df = df.loc[:,['start_quadkey', 'end_quadkey', 'n_crisis']].rename(columns = {'start_quadkey':'from', 'end_quadkey':'to', 'n_crisis':'weight'})
return(df)
def od_graph(df):
df = od_df(df)
g = nx.from_pandas_edgelist(df, 'from', 'to', ['weight']).to_directed()
return(g)
def od_igraph(mob_date):
g = ig.Graph.TupleList(od_df(mob_date).itertuples(index=False), directed=True, weights=True, edge_attrs=None)
g.vs['id'] = g.vs['name']
return(g)
mob = | pd.read_csv(argv[1]) | pandas.read_csv |
import cv2
import numpy as np
import base64
import pandas as pd
import plotly.graph_objects as go
from datetime import datetime, time, timedelta, date
import wget
from zipfile import ZipFile
import os
import json
import plotly.express as px
import joblib
# pip install streamlit --upgrade
# pip install streamlit==0.78.0
class Inference:
def __init__(self,model_path="model/model.pkl"):
self.nomi_regioni = ['Abruzzo', 'Basilicata', 'Calabria', 'Campania', 'Emilia-Romagna', '<NAME>', 'Lazio', 'Liguria', 'Lombardia', 'Marche', 'Molise', '<NAME>', '<NAME>', 'Piemonte', 'Puglia', 'Sardegna', 'Sicilia', 'Toscana', 'Umbria', "Valle d'Aosta", 'Veneto']
dict_names = {"bianca":0,"gialla": 1, "arancione": 2, "rossa": 3}
self.names = list(dict_names)
self.model = joblib.load(model_path)
def predict(self,inputs, regione):
idx = self.nomi_regioni.index(regione)
v = [ 0 for i in range(0,len(self.nomi_regioni))]
v[idx] = 1
inputs.extend(v)
X = np.array(inputs,dtype=np.float).reshape(1,-1)
Y_hat = self.model.predict(X)
return self.names[int(Y_hat[0])]
def fig_stats_variation(regione, data_inizio, data_fine,options):
#select = ["deceduti","totale_casi","dimessi_guariti","terapia_intensiva","tamponi","isolamento_domiciliare"]
df = None
title = "Variazione Giornaliera"
if regione=="Italia":
df = get_data_nazione()
df = df[ (df["data"]>=data_inizio) & (df["data"]<=data_fine) ]
else:
df = get_data_regioni()
df = df[ (df["data"]>=data_inizio) & (df["data"]<=data_fine) ]
df = df[df["denominazione_regione"]==regione]
# Script to aggregate data
# https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.resample.html
dft = df.copy()
dft = dft.set_index("data")
dft["count"] = [1 for i in range(0,len(df))]
agg = {"count" : "size"}
for s in options:
agg[s] = "median"
dft = dft.resample('1D').agg(agg)
# Variation daily
df = {"data": dft.index[1:]}
for s in options:
start = dft[s][:-1].values
end = dft[s][1:].values
df[s] = ( end - start )
#df[s] = np.round( ( end / start -1 )*100,2)
df = pd.DataFrame(df)
df = df.set_index("data")
#dft.dropna()
#print(dft.head())
# Rolling average variation
#df = df[ (df["data"]>=data_inizio) & (df["data"]<=data_fine) ]
fig = go.Figure()
for name in options:
fig.add_trace(go.Scatter(x=df.index, y=df[name],
mode='lines+markers',#mode='lines+markers',
name=name.replace("_"," "),
hoverlabel_namelength=-1))
fig.update_layout(
showlegend=True,
hovermode = "x",
yaxis_title = "Persone",
#paper_bgcolor = "rgb(0,0,0)" ,
#plot_bgcolor = "rgb(10,10,10)" ,
legend=dict(orientation="h",yanchor="bottom", y=1.02,xanchor="right", x=1,title_text=""),
dragmode="pan",
title=dict(
x = 0.5,
y = 0.05,
text = title,
font=dict(
size = 20,
color = "rgb(0,0,0)"
)
)
)
return fig
def fig_stats(regione, data_inizio, data_fine,options):
#select = ["deceduti","totale_casi","dimessi_guariti","terapia_intensiva","tamponi","isolamento_domiciliare"]
df = None
title = "Andamento Cumulativo"
if regione=="Italia":
df = get_data_nazione()
df = df[ (df["data"]>=data_inizio) & (df["data"]<=data_fine) ]
df = df.set_index("data")
else:
df = get_data_regioni()
df = df[ (df["data"]>=data_inizio) & (df["data"]<=data_fine) ]
df = df[df["denominazione_regione"]==regione]
df = df.set_index("data")
#df = df[ (df["data"]>=data_inizio) & (df["data"]<=data_fine) ]
fig = go.Figure()
for name in options:
fig.add_trace(go.Scatter(x=df.index, y=df[name],
mode='lines+markers',#mode='lines+markers',
name=name.replace("_"," "),
hoverlabel_namelength=-1))
fig.update_layout(
showlegend=True,
hovermode = "x",
yaxis_title = "Persone",
#paper_bgcolor = "rgb(0,0,0)" ,
#plot_bgcolor = "rgb(10,10,10)" ,
legend=dict(orientation="h",yanchor="bottom", y=1.02,xanchor="right", x=1,title_text=""),
dragmode="pan",
title=dict(
x = 0.5,
y = 0.05,
text = title,
font=dict(
size = 20,
color = "rgb(0,0,0)"
)
)
)
return fig
def get_stats(regione,data_inizio, data_fine):
select = ["deceduti","totale_casi","dimessi_guariti","variazione_totale_positivi"]
df = None
if regione=="Italia":
df = get_data_nazione()
else:
df = get_data_regioni()
df = df[df["denominazione_regione"]==regione]
df = df[ (df["data"]>=data_inizio) & (df["data"]<=data_fine) ]
incremento = ( df.iloc[-1,:][select] - df.iloc[-2,:][select] ) .to_dict()
data = ( df.iloc[-1,:][select]) .to_dict()
df = pd.DataFrame ([data,incremento],columns=select, index=["Situazione","Incremento"])
df = df.rename(columns={"deceduti": "Deceduti", "totale_casi": "Totale Casi", "dimessi_guariti": "Dimessi Guariti","variazione_totale_positivi" : "Var. Totale Positivi" })
return df
def get_nomi_regioni():
df = get_data_regioni()
#df["data"] = [ datetime.strptime(d, "%Y-%m-%dT%H:%M:%S") for d in df["data"]]
return df["denominazione_regione"].unique().tolist()
def get_options():
select = ["deceduti","totale_casi","dimessi_guariti","terapia_intensiva","tamponi","isolamento_domiciliare"]
return select
def get_date():
df = get_data_nazione()
start = df["data"].tolist()[0]
end= df["data"].tolist()[-1]
d = end
date = []
date.append(d.strftime("%Y-%m-%d"))
while (d>start):
t = d -timedelta(days=0, weeks=1)
date.append(t.strftime("%Y-%m-%d"))
d = t
#date = [ d.strftime("%Y-%m-%d") for d in df["data"].dt.date]
return date
def get_data_nazione():
'''
Keys: ['data', 'stato', 'ricoverati_con_sintomi', 'terapia_intensiva',
'totale_ospedalizzati', 'isolamento_domiciliare', 'totale_positivi',
'variazione_totale_positivi', 'nuovi_positivi', 'dimessi_guariti',
'deceduti', 'casi_da_sospetto_diagnostico', 'casi_da_screening',
'totale_casi', 'tamponi', 'casi_testati', 'note',
'ingressi_terapia_intensiva', 'note_test', 'note_casi',
'totale_positivi_test_molecolare',
'totale_positivi_test_antigenico_rapido', 'tamponi_test_molecolare',
'tamponi_test_antigenico_rapido']
'''
#url = "https://raw.githubusercontent.com/pcm-dpc/COVID-19/master/dati-andamento-nazionale/dpc-covid19-ita-andamento-nazionale.csv"
url = "data/dpc-covid19-ita-andamento-nazionale.csv"
df = | pd.read_csv(url) | pandas.read_csv |
import shutil
import os
from datetime import datetime
import pandas as pd
import json
import numpy as np
import cv2
cv2.setNumThreads(0)
import ast
# from utils_dir.plot_loss import plot_loss
DEFAULT_TIME_FORMAT = "%Y-%m-%d %H:%M:%S"
class MyEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, np.integer):
return int(obj)
elif isinstance(obj, np.floating):
return float(obj)
elif isinstance(obj, np.ndarray):
return obj.tolist()
else:
return super(MyEncoder, self).default(obj)
def get_dir_paths(parent_dir):
dir_paths = [os.path.join(parent_dir, dir) for dir in os.listdir(parent_dir)]
dir_paths = [dir for dir in dir_paths if os.path.isdir(dir)]
return dir_paths
def get_dir_names(parent_dir):
dir_names = [dir_name for dir_name in os.listdir(parent_dir)
if os.path.isdir(os.path.join(parent_dir, dir_name))]
return dir_names
def get_dir_name_of_path(path):
return os.path.basename(os.path.dirname(path))
def get_file_names(parent_dir):
file_names = [file_name for file_name in os.listdir(parent_dir)
if os.path.isfile(os.path.join(parent_dir, file_name))]
return file_names
def get_file_paths(parent_dir):
file_paths = [os.path.join(parent_dir, file_name) for file_name in os.listdir(parent_dir)
if os.path.isfile(os.path.join(parent_dir, file_name))]
return file_paths
def get_parent_path(path):
return path[:path.rfind("/")]
def get_all_file_paths(dir, abs_path=False):
file_paths = []
for root, dirs, files in os.walk(dir):
for file in files:
path = os.path.join(root, file)
if abs_path:
path = os.path.abspath(path)
file_paths.append(path)
return file_paths
def get_files_with_extension(paths, extensions):
result = []
for path in paths:
for ext in extensions:
if path.endswith(ext):
result.append(path)
break
return result
def make_dirs(dir):
if not os.path.exists(dir):
os.makedirs(dir)
def make_parent_dirs(path):
dir = get_parent_path(path)
make_dirs(dir)
def load_str(path):
data = ""
try:
with open(path, 'r') as f:
data = f.read().strip()
except:
print("Error when load str from ", os.path.abspath(path))
return data
def save_str(data, save_path):
make_parent_dirs(save_path)
try:
with open(save_path, 'w') as f:
f.write(data)
print("Save str data to {} done".format(save_path))
except:
print("Error when save str to ", os.path.abspath(save_path))
def get_time_str(time=datetime.now(), fmt=DEFAULT_TIME_FORMAT):
try:
return time.strftime(fmt)
except:
return ""
def save_list(lst, save_path):
make_parent_dirs(save_path)
with open(save_path, "w") as f:
f.write("\n".join(lst))
print("Save data (size = {}) to {} done".format(len(lst), save_path))
def load_list(path):
data = []
with open(path, 'r') as f:
data = f.read().strip().split("\n")
print("Load list data (size = {}) from {} done".format(len(data), path))
return data
def load_csv(path, **kwargs):
data = None
try:
data = | pd.read_csv(path, **kwargs) | pandas.read_csv |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.