prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
@Auther :<EMAIL>
@Time :2018/7/5 3:08
@File :test_pandas.py
'''
import pandas as pd
def t1():
a = [['a', '1.2', '4.2'], ['b', '70', '0.03'], ['x', '5', '0']]
df = pd.DataFrame(a, columns=list("ABC"))
print(df.dtypes)
print(df)
def t2():
obj = pd.Series(list('cadaabbcc'))
uniques = obj.unique()
print(obj.dtypes)
print(uniques.shape)
def t3():
df = pd.DataFrame()
df2 = pd.read_csv()
df3 = | pd.Series() | pandas.Series |
# Arithmetic tests for DataFrame/Series/Index/Array classes that should
# behave identically.
# Specifically for datetime64 and datetime64tz dtypes
from datetime import (
datetime,
time,
timedelta,
)
from itertools import (
product,
starmap,
)
import operator
import warnings
import numpy as np
import pytest
import pytz
from pandas._libs.tslibs.conversion import localize_pydatetime
from pandas._libs.tslibs.offsets import shift_months
from pandas.errors import PerformanceWarning
import pandas as pd
from pandas import (
DateOffset,
DatetimeIndex,
NaT,
Period,
Series,
Timedelta,
TimedeltaIndex,
Timestamp,
date_range,
)
import pandas._testing as tm
from pandas.core.arrays import (
DatetimeArray,
TimedeltaArray,
)
from pandas.core.ops import roperator
from pandas.tests.arithmetic.common import (
assert_cannot_add,
assert_invalid_addsub_type,
assert_invalid_comparison,
get_upcast_box,
)
# ------------------------------------------------------------------
# Comparisons
class TestDatetime64ArrayLikeComparisons:
# Comparison tests for datetime64 vectors fully parametrized over
# DataFrame/Series/DatetimeIndex/DatetimeArray. Ideally all comparison
# tests will eventually end up here.
def test_compare_zerodim(self, tz_naive_fixture, box_with_array):
# Test comparison with zero-dimensional array is unboxed
tz = tz_naive_fixture
box = box_with_array
dti = date_range("20130101", periods=3, tz=tz)
other = np.array(dti.to_numpy()[0])
dtarr = tm.box_expected(dti, box)
xbox = get_upcast_box(dtarr, other, True)
result = dtarr <= other
expected = np.array([True, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
@pytest.mark.parametrize(
"other",
[
"foo",
-1,
99,
4.0,
object(),
timedelta(days=2),
# GH#19800, GH#19301 datetime.date comparison raises to
# match DatetimeIndex/Timestamp. This also matches the behavior
# of stdlib datetime.datetime
datetime(2001, 1, 1).date(),
# GH#19301 None and NaN are *not* cast to NaT for comparisons
None,
np.nan,
],
)
def test_dt64arr_cmp_scalar_invalid(self, other, tz_naive_fixture, box_with_array):
# GH#22074, GH#15966
tz = tz_naive_fixture
rng = date_range("1/1/2000", periods=10, tz=tz)
dtarr = tm.box_expected(rng, box_with_array)
assert_invalid_comparison(dtarr, other, box_with_array)
@pytest.mark.parametrize(
"other",
[
# GH#4968 invalid date/int comparisons
list(range(10)),
np.arange(10),
np.arange(10).astype(np.float32),
np.arange(10).astype(object),
pd.timedelta_range("1ns", periods=10).array,
np.array(pd.timedelta_range("1ns", periods=10)),
list(pd.timedelta_range("1ns", periods=10)),
pd.timedelta_range("1 Day", periods=10).astype(object),
pd.period_range("1971-01-01", freq="D", periods=10).array,
pd.period_range("1971-01-01", freq="D", periods=10).astype(object),
],
)
def test_dt64arr_cmp_arraylike_invalid(
self, other, tz_naive_fixture, box_with_array
):
tz = tz_naive_fixture
dta = date_range("1970-01-01", freq="ns", periods=10, tz=tz)._data
obj = tm.box_expected(dta, box_with_array)
assert_invalid_comparison(obj, other, box_with_array)
def test_dt64arr_cmp_mixed_invalid(self, tz_naive_fixture):
tz = tz_naive_fixture
dta = date_range("1970-01-01", freq="h", periods=5, tz=tz)._data
other = np.array([0, 1, 2, dta[3], Timedelta(days=1)])
result = dta == other
expected = np.array([False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = dta != other
tm.assert_numpy_array_equal(result, ~expected)
msg = "Invalid comparison between|Cannot compare type|not supported between"
with pytest.raises(TypeError, match=msg):
dta < other
with pytest.raises(TypeError, match=msg):
dta > other
with pytest.raises(TypeError, match=msg):
dta <= other
with pytest.raises(TypeError, match=msg):
dta >= other
def test_dt64arr_nat_comparison(self, tz_naive_fixture, box_with_array):
# GH#22242, GH#22163 DataFrame considered NaT == ts incorrectly
tz = tz_naive_fixture
box = box_with_array
ts = Timestamp("2021-01-01", tz=tz)
ser = Series([ts, NaT])
obj = tm.box_expected(ser, box)
xbox = get_upcast_box(obj, ts, True)
expected = Series([True, False], dtype=np.bool_)
expected = tm.box_expected(expected, xbox)
result = obj == ts
tm.assert_equal(result, expected)
class TestDatetime64SeriesComparison:
# TODO: moved from tests.series.test_operators; needs cleanup
@pytest.mark.parametrize(
"pair",
[
(
[Timestamp("2011-01-01"), NaT, Timestamp("2011-01-03")],
[NaT, NaT, Timestamp("2011-01-03")],
),
(
[Timedelta("1 days"), NaT, Timedelta("3 days")],
[NaT, NaT, Timedelta("3 days")],
),
(
[Period("2011-01", freq="M"), NaT, Period("2011-03", freq="M")],
[NaT, NaT, Period("2011-03", freq="M")],
),
],
)
@pytest.mark.parametrize("reverse", [True, False])
@pytest.mark.parametrize("dtype", [None, object])
@pytest.mark.parametrize(
"op, expected",
[
(operator.eq, Series([False, False, True])),
(operator.ne, Series([True, True, False])),
(operator.lt, Series([False, False, False])),
(operator.gt, Series([False, False, False])),
(operator.ge, Series([False, False, True])),
(operator.le, Series([False, False, True])),
],
)
def test_nat_comparisons(
self,
dtype,
index_or_series,
reverse,
pair,
op,
expected,
):
box = index_or_series
l, r = pair
if reverse:
# add lhs / rhs switched data
l, r = r, l
left = Series(l, dtype=dtype)
right = box(r, dtype=dtype)
result = op(left, right)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"data",
[
[Timestamp("2011-01-01"), NaT, Timestamp("2011-01-03")],
[Timedelta("1 days"), NaT, Timedelta("3 days")],
[Period("2011-01", freq="M"), NaT, Period("2011-03", freq="M")],
],
)
@pytest.mark.parametrize("dtype", [None, object])
def test_nat_comparisons_scalar(self, dtype, data, box_with_array):
box = box_with_array
left = Series(data, dtype=dtype)
left = tm.box_expected(left, box)
xbox = get_upcast_box(left, NaT, True)
expected = [False, False, False]
expected = tm.box_expected(expected, xbox)
if box is pd.array and dtype is object:
expected = pd.array(expected, dtype="bool")
tm.assert_equal(left == NaT, expected)
tm.assert_equal(NaT == left, expected)
expected = [True, True, True]
expected = tm.box_expected(expected, xbox)
if box is pd.array and dtype is object:
expected = pd.array(expected, dtype="bool")
tm.assert_equal(left != NaT, expected)
tm.assert_equal(NaT != left, expected)
expected = [False, False, False]
expected = tm.box_expected(expected, xbox)
if box is pd.array and dtype is object:
expected = pd.array(expected, dtype="bool")
tm.assert_equal(left < NaT, expected)
tm.assert_equal(NaT > left, expected)
tm.assert_equal(left <= NaT, expected)
tm.assert_equal(NaT >= left, expected)
tm.assert_equal(left > NaT, expected)
tm.assert_equal(NaT < left, expected)
tm.assert_equal(left >= NaT, expected)
tm.assert_equal(NaT <= left, expected)
@pytest.mark.parametrize("val", [datetime(2000, 1, 4), datetime(2000, 1, 5)])
def test_series_comparison_scalars(self, val):
series = Series(date_range("1/1/2000", periods=10))
result = series > val
expected = Series([x > val for x in series])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"left,right", [("lt", "gt"), ("le", "ge"), ("eq", "eq"), ("ne", "ne")]
)
def test_timestamp_compare_series(self, left, right):
# see gh-4982
# Make sure we can compare Timestamps on the right AND left hand side.
ser = Series(date_range("20010101", periods=10), name="dates")
s_nat = ser.copy(deep=True)
ser[0] = Timestamp("nat")
ser[3] = Timestamp("nat")
left_f = getattr(operator, left)
right_f = getattr(operator, right)
# No NaT
expected = left_f(ser, Timestamp("20010109"))
result = right_f(Timestamp("20010109"), ser)
tm.assert_series_equal(result, expected)
# NaT
expected = left_f(ser, Timestamp("nat"))
result = right_f(Timestamp("nat"), ser)
tm.assert_series_equal(result, expected)
# Compare to Timestamp with series containing NaT
expected = left_f(s_nat, Timestamp("20010109"))
result = right_f(Timestamp("20010109"), s_nat)
tm.assert_series_equal(result, expected)
# Compare to NaT with series containing NaT
expected = left_f(s_nat, NaT)
result = right_f(NaT, s_nat)
tm.assert_series_equal(result, expected)
def test_dt64arr_timestamp_equality(self, box_with_array):
# GH#11034
ser = Series([Timestamp("2000-01-29 01:59:00"), Timestamp("2000-01-30"), NaT])
ser = tm.box_expected(ser, box_with_array)
xbox = get_upcast_box(ser, ser, True)
result = ser != ser
expected = tm.box_expected([False, False, True], xbox)
tm.assert_equal(result, expected)
warn = FutureWarning if box_with_array is pd.DataFrame else None
with tm.assert_produces_warning(warn):
# alignment for frame vs series comparisons deprecated
result = ser != ser[0]
expected = tm.box_expected([False, True, True], xbox)
tm.assert_equal(result, expected)
with tm.assert_produces_warning(warn):
# alignment for frame vs series comparisons deprecated
result = ser != ser[2]
expected = tm.box_expected([True, True, True], xbox)
tm.assert_equal(result, expected)
result = ser == ser
expected = tm.box_expected([True, True, False], xbox)
tm.assert_equal(result, expected)
with tm.assert_produces_warning(warn):
# alignment for frame vs series comparisons deprecated
result = ser == ser[0]
expected = tm.box_expected([True, False, False], xbox)
tm.assert_equal(result, expected)
with tm.assert_produces_warning(warn):
# alignment for frame vs series comparisons deprecated
result = ser == ser[2]
expected = tm.box_expected([False, False, False], xbox)
tm.assert_equal(result, expected)
@pytest.mark.parametrize(
"datetimelike",
[
Timestamp("20130101"),
datetime(2013, 1, 1),
np.datetime64("2013-01-01T00:00", "ns"),
],
)
@pytest.mark.parametrize(
"op,expected",
[
(operator.lt, [True, False, False, False]),
(operator.le, [True, True, False, False]),
(operator.eq, [False, True, False, False]),
(operator.gt, [False, False, False, True]),
],
)
def test_dt64_compare_datetime_scalar(self, datetimelike, op, expected):
# GH#17965, test for ability to compare datetime64[ns] columns
# to datetimelike
ser = Series(
[
Timestamp("20120101"),
Timestamp("20130101"),
np.nan,
Timestamp("20130103"),
],
name="A",
)
result = op(ser, datetimelike)
expected = Series(expected, name="A")
tm.assert_series_equal(result, expected)
class TestDatetimeIndexComparisons:
# TODO: moved from tests.indexes.test_base; parametrize and de-duplicate
def test_comparators(self, comparison_op):
index = tm.makeDateIndex(100)
element = index[len(index) // 2]
element = Timestamp(element).to_datetime64()
arr = np.array(index)
arr_result = comparison_op(arr, element)
index_result = comparison_op(index, element)
assert isinstance(index_result, np.ndarray)
tm.assert_numpy_array_equal(arr_result, index_result)
@pytest.mark.parametrize(
"other",
[datetime(2016, 1, 1), Timestamp("2016-01-01"), np.datetime64("2016-01-01")],
)
def test_dti_cmp_datetimelike(self, other, tz_naive_fixture):
tz = tz_naive_fixture
dti = date_range("2016-01-01", periods=2, tz=tz)
if tz is not None:
if isinstance(other, np.datetime64):
# no tzaware version available
return
other = localize_pydatetime(other, dti.tzinfo)
result = dti == other
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = dti > other
expected = np.array([False, True])
tm.assert_numpy_array_equal(result, expected)
result = dti >= other
expected = np.array([True, True])
tm.assert_numpy_array_equal(result, expected)
result = dti < other
expected = np.array([False, False])
tm.assert_numpy_array_equal(result, expected)
result = dti <= other
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize("dtype", [None, object])
def test_dti_cmp_nat(self, dtype, box_with_array):
left = DatetimeIndex([Timestamp("2011-01-01"), NaT, Timestamp("2011-01-03")])
right = DatetimeIndex([NaT, NaT, Timestamp("2011-01-03")])
left = tm.box_expected(left, box_with_array)
right = tm.box_expected(right, box_with_array)
xbox = get_upcast_box(left, right, True)
lhs, rhs = left, right
if dtype is object:
lhs, rhs = left.astype(object), right.astype(object)
result = rhs == lhs
expected = np.array([False, False, True])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
result = lhs != rhs
expected = np.array([True, True, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
expected = np.array([False, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(lhs == NaT, expected)
tm.assert_equal(NaT == rhs, expected)
expected = np.array([True, True, True])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(lhs != NaT, expected)
tm.assert_equal(NaT != lhs, expected)
expected = np.array([False, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(lhs < NaT, expected)
tm.assert_equal(NaT > lhs, expected)
def test_dti_cmp_nat_behaves_like_float_cmp_nan(self):
fidx1 = pd.Index([1.0, np.nan, 3.0, np.nan, 5.0, 7.0])
fidx2 = pd.Index([2.0, 3.0, np.nan, np.nan, 6.0, 7.0])
didx1 = DatetimeIndex(
["2014-01-01", NaT, "2014-03-01", NaT, "2014-05-01", "2014-07-01"]
)
didx2 = DatetimeIndex(
["2014-02-01", "2014-03-01", NaT, NaT, "2014-06-01", "2014-07-01"]
)
darr = np.array(
[
np.datetime64("2014-02-01 00:00"),
np.datetime64("2014-03-01 00:00"),
np.datetime64("nat"),
np.datetime64("nat"),
np.datetime64("2014-06-01 00:00"),
np.datetime64("2014-07-01 00:00"),
]
)
cases = [(fidx1, fidx2), (didx1, didx2), (didx1, darr)]
# Check pd.NaT is handles as the same as np.nan
with tm.assert_produces_warning(None):
for idx1, idx2 in cases:
result = idx1 < idx2
expected = np.array([True, False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = idx2 > idx1
expected = np.array([True, False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= idx2
expected = np.array([True, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx2 >= idx1
expected = np.array([True, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 == idx2
expected = np.array([False, False, False, False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 != idx2
expected = np.array([True, True, True, True, True, False])
tm.assert_numpy_array_equal(result, expected)
with tm.assert_produces_warning(None):
for idx1, val in [(fidx1, np.nan), (didx1, NaT)]:
result = idx1 < val
expected = np.array([False, False, False, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 > val
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= val
tm.assert_numpy_array_equal(result, expected)
result = idx1 >= val
tm.assert_numpy_array_equal(result, expected)
result = idx1 == val
tm.assert_numpy_array_equal(result, expected)
result = idx1 != val
expected = np.array([True, True, True, True, True, True])
tm.assert_numpy_array_equal(result, expected)
# Check pd.NaT is handles as the same as np.nan
with tm.assert_produces_warning(None):
for idx1, val in [(fidx1, 3), (didx1, datetime(2014, 3, 1))]:
result = idx1 < val
expected = np.array([True, False, False, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 > val
expected = np.array([False, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= val
expected = np.array([True, False, True, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 >= val
expected = np.array([False, False, True, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 == val
expected = np.array([False, False, True, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 != val
expected = np.array([True, True, False, True, True, True])
tm.assert_numpy_array_equal(result, expected)
def test_comparison_tzawareness_compat(self, comparison_op, box_with_array):
# GH#18162
op = comparison_op
box = box_with_array
dr = date_range("2016-01-01", periods=6)
dz = dr.tz_localize("US/Pacific")
dr = tm.box_expected(dr, box)
dz = tm.box_expected(dz, box)
if box is pd.DataFrame:
tolist = lambda x: x.astype(object).values.tolist()[0]
else:
tolist = list
if op not in [operator.eq, operator.ne]:
msg = (
r"Invalid comparison between dtype=datetime64\[ns.*\] "
"and (Timestamp|DatetimeArray|list|ndarray)"
)
with pytest.raises(TypeError, match=msg):
op(dr, dz)
with pytest.raises(TypeError, match=msg):
op(dr, tolist(dz))
with pytest.raises(TypeError, match=msg):
op(dr, np.array(tolist(dz), dtype=object))
with pytest.raises(TypeError, match=msg):
op(dz, dr)
with pytest.raises(TypeError, match=msg):
op(dz, tolist(dr))
with pytest.raises(TypeError, match=msg):
op(dz, np.array(tolist(dr), dtype=object))
# The aware==aware and naive==naive comparisons should *not* raise
assert np.all(dr == dr)
assert np.all(dr == tolist(dr))
assert np.all(tolist(dr) == dr)
assert np.all(np.array(tolist(dr), dtype=object) == dr)
assert np.all(dr == np.array(tolist(dr), dtype=object))
assert np.all(dz == dz)
assert np.all(dz == tolist(dz))
assert np.all(tolist(dz) == dz)
assert np.all(np.array(tolist(dz), dtype=object) == dz)
assert np.all(dz == np.array(tolist(dz), dtype=object))
def test_comparison_tzawareness_compat_scalars(self, comparison_op, box_with_array):
# GH#18162
op = comparison_op
dr = date_range("2016-01-01", periods=6)
dz = dr.tz_localize("US/Pacific")
dr = tm.box_expected(dr, box_with_array)
dz = tm.box_expected(dz, box_with_array)
# Check comparisons against scalar Timestamps
ts = Timestamp("2000-03-14 01:59")
ts_tz = Timestamp("2000-03-14 01:59", tz="Europe/Amsterdam")
assert np.all(dr > ts)
msg = r"Invalid comparison between dtype=datetime64\[ns.*\] and Timestamp"
if op not in [operator.eq, operator.ne]:
with pytest.raises(TypeError, match=msg):
op(dr, ts_tz)
assert np.all(dz > ts_tz)
if op not in [operator.eq, operator.ne]:
with pytest.raises(TypeError, match=msg):
op(dz, ts)
if op not in [operator.eq, operator.ne]:
# GH#12601: Check comparison against Timestamps and DatetimeIndex
with pytest.raises(TypeError, match=msg):
op(ts, dz)
@pytest.mark.parametrize(
"other",
[datetime(2016, 1, 1), Timestamp("2016-01-01"), np.datetime64("2016-01-01")],
)
# Bug in NumPy? https://github.com/numpy/numpy/issues/13841
# Raising in __eq__ will fallback to NumPy, which warns, fails,
# then re-raises the original exception. So we just need to ignore.
@pytest.mark.filterwarnings("ignore:elementwise comp:DeprecationWarning")
@pytest.mark.filterwarnings("ignore:Converting timezone-aware:FutureWarning")
def test_scalar_comparison_tzawareness(
self, comparison_op, other, tz_aware_fixture, box_with_array
):
op = comparison_op
tz = tz_aware_fixture
dti = date_range("2016-01-01", periods=2, tz=tz)
dtarr = tm.box_expected(dti, box_with_array)
xbox = get_upcast_box(dtarr, other, True)
if op in [operator.eq, operator.ne]:
exbool = op is operator.ne
expected = np.array([exbool, exbool], dtype=bool)
expected = tm.box_expected(expected, xbox)
result = op(dtarr, other)
tm.assert_equal(result, expected)
result = op(other, dtarr)
tm.assert_equal(result, expected)
else:
msg = (
r"Invalid comparison between dtype=datetime64\[ns, .*\] "
f"and {type(other).__name__}"
)
with pytest.raises(TypeError, match=msg):
op(dtarr, other)
with pytest.raises(TypeError, match=msg):
op(other, dtarr)
def test_nat_comparison_tzawareness(self, comparison_op):
# GH#19276
# tzaware DatetimeIndex should not raise when compared to NaT
op = comparison_op
dti = DatetimeIndex(
["2014-01-01", NaT, "2014-03-01", NaT, "2014-05-01", "2014-07-01"]
)
expected = np.array([op == operator.ne] * len(dti))
result = op(dti, NaT)
tm.assert_numpy_array_equal(result, expected)
result = op(dti.tz_localize("US/Pacific"), NaT)
tm.assert_numpy_array_equal(result, expected)
def test_dti_cmp_str(self, tz_naive_fixture):
# GH#22074
# regardless of tz, we expect these comparisons are valid
tz = tz_naive_fixture
rng = date_range("1/1/2000", periods=10, tz=tz)
other = "1/1/2000"
result = rng == other
expected = np.array([True] + [False] * 9)
tm.assert_numpy_array_equal(result, expected)
result = rng != other
expected = np.array([False] + [True] * 9)
tm.assert_numpy_array_equal(result, expected)
result = rng < other
expected = np.array([False] * 10)
tm.assert_numpy_array_equal(result, expected)
result = rng <= other
expected = np.array([True] + [False] * 9)
tm.assert_numpy_array_equal(result, expected)
result = rng > other
expected = np.array([False] + [True] * 9)
tm.assert_numpy_array_equal(result, expected)
result = rng >= other
expected = np.array([True] * 10)
tm.assert_numpy_array_equal(result, expected)
def test_dti_cmp_list(self):
rng = date_range("1/1/2000", periods=10)
result = rng == list(rng)
expected = rng == rng
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize(
"other",
[
pd.timedelta_range("1D", periods=10),
pd.timedelta_range("1D", periods=10).to_series(),
pd.timedelta_range("1D", periods=10).asi8.view("m8[ns]"),
],
ids=lambda x: type(x).__name__,
)
def test_dti_cmp_tdi_tzawareness(self, other):
# GH#22074
# reversion test that we _don't_ call _assert_tzawareness_compat
# when comparing against TimedeltaIndex
dti = date_range("2000-01-01", periods=10, tz="Asia/Tokyo")
result = dti == other
expected = np.array([False] * 10)
tm.assert_numpy_array_equal(result, expected)
result = dti != other
expected = np.array([True] * 10)
tm.assert_numpy_array_equal(result, expected)
msg = "Invalid comparison between"
with pytest.raises(TypeError, match=msg):
dti < other
with pytest.raises(TypeError, match=msg):
dti <= other
with pytest.raises(TypeError, match=msg):
dti > other
with pytest.raises(TypeError, match=msg):
dti >= other
def test_dti_cmp_object_dtype(self):
# GH#22074
dti = date_range("2000-01-01", periods=10, tz="Asia/Tokyo")
other = dti.astype("O")
result = dti == other
expected = np.array([True] * 10)
tm.assert_numpy_array_equal(result, expected)
other = dti.tz_localize(None)
result = dti != other
tm.assert_numpy_array_equal(result, expected)
other = np.array(list(dti[:5]) + [Timedelta(days=1)] * 5)
result = dti == other
expected = np.array([True] * 5 + [False] * 5)
tm.assert_numpy_array_equal(result, expected)
msg = ">=' not supported between instances of 'Timestamp' and 'Timedelta'"
with pytest.raises(TypeError, match=msg):
dti >= other
# ------------------------------------------------------------------
# Arithmetic
class TestDatetime64Arithmetic:
# This class is intended for "finished" tests that are fully parametrized
# over DataFrame/Series/Index/DatetimeArray
# -------------------------------------------------------------
# Addition/Subtraction of timedelta-like
@pytest.mark.arm_slow
def test_dt64arr_add_timedeltalike_scalar(
self, tz_naive_fixture, two_hours, box_with_array
):
# GH#22005, GH#22163 check DataFrame doesn't raise TypeError
tz = tz_naive_fixture
rng = date_range("2000-01-01", "2000-02-01", tz=tz)
expected = date_range("2000-01-01 02:00", "2000-02-01 02:00", tz=tz)
rng = tm.box_expected(rng, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = rng + two_hours
tm.assert_equal(result, expected)
rng += two_hours
tm.assert_equal(rng, expected)
def test_dt64arr_sub_timedeltalike_scalar(
self, tz_naive_fixture, two_hours, box_with_array
):
tz = tz_naive_fixture
rng = date_range("2000-01-01", "2000-02-01", tz=tz)
expected = date_range("1999-12-31 22:00", "2000-01-31 22:00", tz=tz)
rng = tm.box_expected(rng, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = rng - two_hours
tm.assert_equal(result, expected)
rng -= two_hours
tm.assert_equal(rng, expected)
# TODO: redundant with test_dt64arr_add_timedeltalike_scalar
def test_dt64arr_add_td64_scalar(self, box_with_array):
# scalar timedeltas/np.timedelta64 objects
# operate with np.timedelta64 correctly
ser = Series([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")])
expected = Series(
[Timestamp("20130101 9:01:01"), Timestamp("20130101 9:02:01")]
)
dtarr = tm.box_expected(ser, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = dtarr + np.timedelta64(1, "s")
tm.assert_equal(result, expected)
result = np.timedelta64(1, "s") + dtarr
tm.assert_equal(result, expected)
expected = Series(
[Timestamp("20130101 9:01:00.005"), Timestamp("20130101 9:02:00.005")]
)
expected = tm.box_expected(expected, box_with_array)
result = dtarr + np.timedelta64(5, "ms")
tm.assert_equal(result, expected)
result = np.timedelta64(5, "ms") + dtarr
tm.assert_equal(result, expected)
def test_dt64arr_add_sub_td64_nat(self, box_with_array, tz_naive_fixture):
# GH#23320 special handling for timedelta64("NaT")
tz = tz_naive_fixture
dti = date_range("1994-04-01", periods=9, tz=tz, freq="QS")
other = np.timedelta64("NaT")
expected = DatetimeIndex(["NaT"] * 9, tz=tz)
obj = tm.box_expected(dti, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = obj + other
tm.assert_equal(result, expected)
result = other + obj
tm.assert_equal(result, expected)
result = obj - other
tm.assert_equal(result, expected)
msg = "cannot subtract"
with pytest.raises(TypeError, match=msg):
other - obj
def test_dt64arr_add_sub_td64ndarray(self, tz_naive_fixture, box_with_array):
tz = tz_naive_fixture
dti = date_range("2016-01-01", periods=3, tz=tz)
tdi = TimedeltaIndex(["-1 Day", "-1 Day", "-1 Day"])
tdarr = tdi.values
expected = date_range("2015-12-31", "2016-01-02", periods=3, tz=tz)
dtarr = tm.box_expected(dti, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = dtarr + tdarr
tm.assert_equal(result, expected)
result = tdarr + dtarr
tm.assert_equal(result, expected)
expected = date_range("2016-01-02", "2016-01-04", periods=3, tz=tz)
expected = tm.box_expected(expected, box_with_array)
result = dtarr - tdarr
tm.assert_equal(result, expected)
msg = "cannot subtract|(bad|unsupported) operand type for unary"
with pytest.raises(TypeError, match=msg):
tdarr - dtarr
# -----------------------------------------------------------------
# Subtraction of datetime-like scalars
@pytest.mark.parametrize(
"ts",
[
Timestamp("2013-01-01"),
Timestamp("2013-01-01").to_pydatetime(),
Timestamp("2013-01-01").to_datetime64(),
],
)
def test_dt64arr_sub_dtscalar(self, box_with_array, ts):
# GH#8554, GH#22163 DataFrame op should _not_ return dt64 dtype
idx = date_range("2013-01-01", periods=3)._with_freq(None)
idx = tm.box_expected(idx, box_with_array)
expected = TimedeltaIndex(["0 Days", "1 Day", "2 Days"])
expected = tm.box_expected(expected, box_with_array)
result = idx - ts
tm.assert_equal(result, expected)
def test_dt64arr_sub_datetime64_not_ns(self, box_with_array):
# GH#7996, GH#22163 ensure non-nano datetime64 is converted to nano
# for DataFrame operation
dt64 = np.datetime64("2013-01-01")
assert dt64.dtype == "datetime64[D]"
dti = date_range("20130101", periods=3)._with_freq(None)
dtarr = tm.box_expected(dti, box_with_array)
expected = TimedeltaIndex(["0 Days", "1 Day", "2 Days"])
expected = tm.box_expected(expected, box_with_array)
result = dtarr - dt64
tm.assert_equal(result, expected)
result = dt64 - dtarr
tm.assert_equal(result, -expected)
def test_dt64arr_sub_timestamp(self, box_with_array):
ser = date_range("2014-03-17", periods=2, freq="D", tz="US/Eastern")
ser = ser._with_freq(None)
ts = ser[0]
ser = tm.box_expected(ser, box_with_array)
delta_series = Series([np.timedelta64(0, "D"), np.timedelta64(1, "D")])
expected = tm.box_expected(delta_series, box_with_array)
tm.assert_equal(ser - ts, expected)
tm.assert_equal(ts - ser, -expected)
def test_dt64arr_sub_NaT(self, box_with_array):
# GH#18808
dti = DatetimeIndex([NaT, Timestamp("19900315")])
ser = tm.box_expected(dti, box_with_array)
result = ser - NaT
expected = Series([NaT, NaT], dtype="timedelta64[ns]")
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(result, expected)
dti_tz = dti.tz_localize("Asia/Tokyo")
ser_tz = tm.box_expected(dti_tz, box_with_array)
result = ser_tz - NaT
expected = Series([NaT, NaT], dtype="timedelta64[ns]")
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(result, expected)
# -------------------------------------------------------------
# Subtraction of datetime-like array-like
def test_dt64arr_sub_dt64object_array(self, box_with_array, tz_naive_fixture):
dti = date_range("2016-01-01", periods=3, tz=tz_naive_fixture)
expected = dti - dti
obj = tm.box_expected(dti, box_with_array)
expected = tm.box_expected(expected, box_with_array)
with tm.assert_produces_warning(PerformanceWarning):
result = obj - obj.astype(object)
tm.assert_equal(result, expected)
def test_dt64arr_naive_sub_dt64ndarray(self, box_with_array):
dti = date_range("2016-01-01", periods=3, tz=None)
dt64vals = dti.values
dtarr = tm.box_expected(dti, box_with_array)
expected = dtarr - dtarr
result = dtarr - dt64vals
tm.assert_equal(result, expected)
result = dt64vals - dtarr
tm.assert_equal(result, expected)
def test_dt64arr_aware_sub_dt64ndarray_raises(
self, tz_aware_fixture, box_with_array
):
tz = tz_aware_fixture
dti = date_range("2016-01-01", periods=3, tz=tz)
dt64vals = dti.values
dtarr = tm.box_expected(dti, box_with_array)
msg = "subtraction must have the same timezones or"
with pytest.raises(TypeError, match=msg):
dtarr - dt64vals
with pytest.raises(TypeError, match=msg):
dt64vals - dtarr
# -------------------------------------------------------------
# Addition of datetime-like others (invalid)
def test_dt64arr_add_dt64ndarray_raises(self, tz_naive_fixture, box_with_array):
tz = tz_naive_fixture
dti = date_range("2016-01-01", periods=3, tz=tz)
dt64vals = dti.values
dtarr = tm.box_expected(dti, box_with_array)
assert_cannot_add(dtarr, dt64vals)
def test_dt64arr_add_timestamp_raises(self, box_with_array):
# GH#22163 ensure DataFrame doesn't cast Timestamp to i8
idx = DatetimeIndex(["2011-01-01", "2011-01-02"])
ts = idx[0]
idx = tm.box_expected(idx, box_with_array)
assert_cannot_add(idx, ts)
# -------------------------------------------------------------
# Other Invalid Addition/Subtraction
@pytest.mark.parametrize(
"other",
[
3.14,
np.array([2.0, 3.0]),
# GH#13078 datetime +/- Period is invalid
Period("2011-01-01", freq="D"),
# https://github.com/pandas-dev/pandas/issues/10329
time(1, 2, 3),
],
)
@pytest.mark.parametrize("dti_freq", [None, "D"])
def test_dt64arr_add_sub_invalid(self, dti_freq, other, box_with_array):
dti = DatetimeIndex(["2011-01-01", "2011-01-02"], freq=dti_freq)
dtarr = tm.box_expected(dti, box_with_array)
msg = "|".join(
[
"unsupported operand type",
"cannot (add|subtract)",
"cannot use operands with types",
"ufunc '?(add|subtract)'? cannot use operands with types",
"Concatenation operation is not implemented for NumPy arrays",
]
)
assert_invalid_addsub_type(dtarr, other, msg)
@pytest.mark.parametrize("pi_freq", ["D", "W", "Q", "H"])
@pytest.mark.parametrize("dti_freq", [None, "D"])
def test_dt64arr_add_sub_parr(
self, dti_freq, pi_freq, box_with_array, box_with_array2
):
# GH#20049 subtracting PeriodIndex should raise TypeError
dti = DatetimeIndex(["2011-01-01", "2011-01-02"], freq=dti_freq)
pi = dti.to_period(pi_freq)
dtarr = tm.box_expected(dti, box_with_array)
parr = tm.box_expected(pi, box_with_array2)
msg = "|".join(
[
"cannot (add|subtract)",
"unsupported operand",
"descriptor.*requires",
"ufunc.*cannot use operands",
]
)
assert_invalid_addsub_type(dtarr, parr, msg)
def test_dt64arr_addsub_time_objects_raises(self, box_with_array, tz_naive_fixture):
# https://github.com/pandas-dev/pandas/issues/10329
tz = tz_naive_fixture
obj1 = date_range("2012-01-01", periods=3, tz=tz)
obj2 = [time(i, i, i) for i in range(3)]
obj1 = tm.box_expected(obj1, box_with_array)
obj2 = tm.box_expected(obj2, box_with_array)
with warnings.catch_warnings(record=True):
# pandas.errors.PerformanceWarning: Non-vectorized DateOffset being
# applied to Series or DatetimeIndex
# we aren't testing that here, so ignore.
warnings.simplefilter("ignore", PerformanceWarning)
# If `x + y` raises, then `y + x` should raise here as well
msg = (
r"unsupported operand type\(s\) for -: "
"'(Timestamp|DatetimeArray)' and 'datetime.time'"
)
with pytest.raises(TypeError, match=msg):
obj1 - obj2
msg = "|".join(
[
"cannot subtract DatetimeArray from ndarray",
"ufunc (subtract|'subtract') cannot use operands with types "
r"dtype\('O'\) and dtype\('<M8\[ns\]'\)",
]
)
with pytest.raises(TypeError, match=msg):
obj2 - obj1
msg = (
r"unsupported operand type\(s\) for \+: "
"'(Timestamp|DatetimeArray)' and 'datetime.time'"
)
with pytest.raises(TypeError, match=msg):
obj1 + obj2
msg = "|".join(
[
r"unsupported operand type\(s\) for \+: "
"'(Timestamp|DatetimeArray)' and 'datetime.time'",
"ufunc (add|'add') cannot use operands with types "
r"dtype\('O'\) and dtype\('<M8\[ns\]'\)",
]
)
with pytest.raises(TypeError, match=msg):
obj2 + obj1
class TestDatetime64DateOffsetArithmetic:
# -------------------------------------------------------------
# Tick DateOffsets
# TODO: parametrize over timezone?
def test_dt64arr_series_add_tick_DateOffset(self, box_with_array):
# GH#4532
# operate with pd.offsets
ser = Series([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")])
expected = Series(
[Timestamp("20130101 9:01:05"), Timestamp("20130101 9:02:05")]
)
ser = tm.box_expected(ser, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = ser + pd.offsets.Second(5)
tm.assert_equal(result, expected)
result2 = pd.offsets.Second(5) + ser
tm.assert_equal(result2, expected)
def test_dt64arr_series_sub_tick_DateOffset(self, box_with_array):
# GH#4532
# operate with pd.offsets
ser = Series([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")])
expected = Series(
[Timestamp("20130101 9:00:55"), Timestamp("20130101 9:01:55")]
)
ser = tm.box_expected(ser, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = ser - pd.offsets.Second(5)
tm.assert_equal(result, expected)
result2 = -pd.offsets.Second(5) + ser
tm.assert_equal(result2, expected)
msg = "(bad|unsupported) operand type for unary"
with pytest.raises(TypeError, match=msg):
pd.offsets.Second(5) - ser
@pytest.mark.parametrize(
"cls_name", ["Day", "Hour", "Minute", "Second", "Milli", "Micro", "Nano"]
)
def test_dt64arr_add_sub_tick_DateOffset_smoke(self, cls_name, box_with_array):
# GH#4532
# smoke tests for valid DateOffsets
ser = Series([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")])
ser = tm.box_expected(ser, box_with_array)
offset_cls = getattr(pd.offsets, cls_name)
ser + offset_cls(5)
offset_cls(5) + ser
ser - offset_cls(5)
def test_dti_add_tick_tzaware(self, tz_aware_fixture, box_with_array):
# GH#21610, GH#22163 ensure DataFrame doesn't return object-dtype
tz = tz_aware_fixture
if tz == "US/Pacific":
dates = date_range("2012-11-01", periods=3, tz=tz)
offset = dates + pd.offsets.Hour(5)
assert dates[0] + pd.offsets.Hour(5) == offset[0]
dates = date_range("2010-11-01 00:00", periods=3, tz=tz, freq="H")
expected = DatetimeIndex(
["2010-11-01 05:00", "2010-11-01 06:00", "2010-11-01 07:00"],
freq="H",
tz=tz,
)
dates = tm.box_expected(dates, box_with_array)
expected = tm.box_expected(expected, box_with_array)
# TODO: sub?
for scalar in [pd.offsets.Hour(5), np.timedelta64(5, "h"), timedelta(hours=5)]:
offset = dates + scalar
tm.assert_equal(offset, expected)
offset = scalar + dates
tm.assert_equal(offset, expected)
# -------------------------------------------------------------
# RelativeDelta DateOffsets
def test_dt64arr_add_sub_relativedelta_offsets(self, box_with_array):
# GH#10699
vec = DatetimeIndex(
[
Timestamp("2000-01-05 00:15:00"),
Timestamp("2000-01-31 00:23:00"),
Timestamp("2000-01-01"),
Timestamp("2000-03-31"),
Timestamp("2000-02-29"),
Timestamp("2000-12-31"),
Timestamp("2000-05-15"),
Timestamp("2001-06-15"),
]
)
vec = tm.box_expected(vec, box_with_array)
vec_items = vec.iloc[0] if box_with_array is pd.DataFrame else vec
# DateOffset relativedelta fastpath
relative_kwargs = [
("years", 2),
("months", 5),
("days", 3),
("hours", 5),
("minutes", 10),
("seconds", 2),
("microseconds", 5),
]
for i, (unit, value) in enumerate(relative_kwargs):
off = DateOffset(**{unit: value})
expected = DatetimeIndex([x + off for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec + off)
expected = DatetimeIndex([x - off for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec - off)
off = DateOffset(**dict(relative_kwargs[: i + 1]))
expected = DatetimeIndex([x + off for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec + off)
expected = DatetimeIndex([x - off for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec - off)
msg = "(bad|unsupported) operand type for unary"
with pytest.raises(TypeError, match=msg):
off - vec
# -------------------------------------------------------------
# Non-Tick, Non-RelativeDelta DateOffsets
# TODO: redundant with test_dt64arr_add_sub_DateOffset? that includes
# tz-aware cases which this does not
@pytest.mark.parametrize(
"cls_and_kwargs",
[
"YearBegin",
("YearBegin", {"month": 5}),
"YearEnd",
("YearEnd", {"month": 5}),
"MonthBegin",
"MonthEnd",
"SemiMonthEnd",
"SemiMonthBegin",
"Week",
("Week", {"weekday": 3}),
"Week",
("Week", {"weekday": 6}),
"BusinessDay",
"BDay",
"QuarterEnd",
"QuarterBegin",
"CustomBusinessDay",
"CDay",
"CBMonthEnd",
"CBMonthBegin",
"BMonthBegin",
"BMonthEnd",
"BusinessHour",
"BYearBegin",
"BYearEnd",
"BQuarterBegin",
("LastWeekOfMonth", {"weekday": 2}),
(
"FY5253Quarter",
{
"qtr_with_extra_week": 1,
"startingMonth": 1,
"weekday": 2,
"variation": "nearest",
},
),
("FY5253", {"weekday": 0, "startingMonth": 2, "variation": "nearest"}),
("WeekOfMonth", {"weekday": 2, "week": 2}),
"Easter",
("DateOffset", {"day": 4}),
("DateOffset", {"month": 5}),
],
)
@pytest.mark.parametrize("normalize", [True, False])
@pytest.mark.parametrize("n", [0, 5])
def test_dt64arr_add_sub_DateOffsets(
self, box_with_array, n, normalize, cls_and_kwargs
):
# GH#10699
# assert vectorized operation matches pointwise operations
if isinstance(cls_and_kwargs, tuple):
# If cls_name param is a tuple, then 2nd entry is kwargs for
# the offset constructor
cls_name, kwargs = cls_and_kwargs
else:
cls_name = cls_and_kwargs
kwargs = {}
if n == 0 and cls_name in [
"WeekOfMonth",
"LastWeekOfMonth",
"FY5253Quarter",
"FY5253",
]:
# passing n = 0 is invalid for these offset classes
return
vec = DatetimeIndex(
[
Timestamp("2000-01-05 00:15:00"),
Timestamp("2000-01-31 00:23:00"),
Timestamp("2000-01-01"),
Timestamp("2000-03-31"),
Timestamp("2000-02-29"),
Timestamp("2000-12-31"),
Timestamp("2000-05-15"),
Timestamp("2001-06-15"),
]
)
vec = tm.box_expected(vec, box_with_array)
vec_items = vec.iloc[0] if box_with_array is pd.DataFrame else vec
offset_cls = getattr(pd.offsets, cls_name)
with warnings.catch_warnings(record=True):
# pandas.errors.PerformanceWarning: Non-vectorized DateOffset being
# applied to Series or DatetimeIndex
# we aren't testing that here, so ignore.
warnings.simplefilter("ignore", PerformanceWarning)
offset = offset_cls(n, normalize=normalize, **kwargs)
expected = DatetimeIndex([x + offset for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec + offset)
expected = DatetimeIndex([x - offset for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, vec - offset)
expected = DatetimeIndex([offset + x for x in vec_items])
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(expected, offset + vec)
msg = "(bad|unsupported) operand type for unary"
with pytest.raises(TypeError, match=msg):
offset - vec
def test_dt64arr_add_sub_DateOffset(self, box_with_array):
# GH#10699
s = date_range("2000-01-01", "2000-01-31", name="a")
s = tm.box_expected(s, box_with_array)
result = s + DateOffset(years=1)
result2 = DateOffset(years=1) + s
exp = date_range("2001-01-01", "2001-01-31", name="a")._with_freq(None)
exp = tm.box_expected(exp, box_with_array)
tm.assert_equal(result, exp)
tm.assert_equal(result2, exp)
result = s - DateOffset(years=1)
exp = date_range("1999-01-01", "1999-01-31", name="a")._with_freq(None)
exp = tm.box_expected(exp, box_with_array)
tm.assert_equal(result, exp)
s = DatetimeIndex(
[
Timestamp("2000-01-15 00:15:00", tz="US/Central"),
Timestamp("2000-02-15", tz="US/Central"),
],
name="a",
)
s = tm.box_expected(s, box_with_array)
result = s + pd.offsets.Day()
result2 = pd.offsets.Day() + s
exp = DatetimeIndex(
[
Timestamp("2000-01-16 00:15:00", tz="US/Central"),
Timestamp("2000-02-16", tz="US/Central"),
],
name="a",
)
exp = tm.box_expected(exp, box_with_array)
tm.assert_equal(result, exp)
tm.assert_equal(result2, exp)
s = DatetimeIndex(
[
Timestamp("2000-01-15 00:15:00", tz="US/Central"),
Timestamp("2000-02-15", tz="US/Central"),
],
name="a",
)
s = tm.box_expected(s, box_with_array)
result = s + | pd.offsets.MonthEnd() | pandas.offsets.MonthEnd |
"""Tests various time series functions which are used extensively in tcapy
"""
__author__ = 'saeedamen' # <NAME> / <EMAIL>
#
# Copyright 2017 Cuemacro Ltd. - http//www.cuemacro.com / @cuemacro
#
# See the License for the specific language governing permissions and limitations under the License.
#
import pandas as pd
import numpy as np
from datetime import timedelta
from pandas.testing import assert_frame_equal
from tcapy.util.timeseries import TimeSeriesOps
from tcapy.util.customexceptions import *
from test.config import *
ticker = 'EURUSD'
start_date = '20 Apr 2017'
finish_date = '07 Jun 2017'
def test_vlookup():
"""Runs a test for the VLOOKUP function which is used extensively in a lot of the metric construction
"""
dt = pd.date_range(start='01 Jan 2018', end='05 Jan 2018', freq='1min')
rand_data = np.random.random(len(dt))
df_before = pd.DataFrame(index=dt, columns=['rand'], data=rand_data)
millseconds_tests = [100, 500]
# Try perturbing by nothing, then 100 and 500 milliseconds
for millseconds in millseconds_tests:
df_perturb = pd.DataFrame(index=dt - timedelta(milliseconds=millseconds), columns=['rand'],
data=rand_data)
# Do a VLOOKUP (which should give us all the previous ones) - take off the last point (which would be AFTER
# our perturbation)
search, dt_search = TimeSeriesOps().vlookup_style_data_frame(dt[0:-1], df_perturb, 'rand')
df_after = pd.DataFrame(index=dt_search + timedelta(milliseconds=millseconds), data=search.values,
columns=['rand'])
# check the search dataframes are equal
assert_frame_equal(df_before[0:-1], df_after, check_dtype=False)
# in this case, our lookup series doesn't overlap at all with our range, so we should get back and exception
dt_lookup = pd.date_range(start='30 Dec 2017', end='31 Dec 2018', freq='1min')
df_perturb = pd.DataFrame(index=dt + timedelta(milliseconds=millseconds), columns=['rand'],
data=rand_data)
exception_has_been_triggered = False
try:
search, dt_search = TimeSeriesOps().vlookup_style_data_frame(dt_lookup, df_perturb, 'rand')
except ValidationException:
exception_has_been_triggered = True
assert (exception_has_been_triggered)
def test_filter_between_days_times():
"""Runs a test for the filter by time of day and day of the week, on synthetically constructed data and then checks
that no data is outside those time windows
"""
from tcapy.analysis.tradeorderfilter import TradeOrderFilterTimeOfDayWeekMonth
dt = pd.date_range(start='01 Jan 2018', end='05 Jan 2018', freq='1min')
df = pd.DataFrame(index=dt, columns=['Rand'], data=np.random.random(len(dt)))
df = df.tz_localize('utc')
trade_order_filter = TradeOrderFilterTimeOfDayWeekMonth(time_of_day={'start_time': '07:00:00',
'finish_time': '17:00:00'},
day_of_week='mon')
df = trade_order_filter.filter_trade_order(trade_order_df=df)
assert (df.index[0].hour >= 7 and df.index[-1].hour <= 17 and df.index[0].dayofweek == 0)
def test_remove_consecutive_duplicates():
"""Tests that consecutive duplicates are removed correctly in time series
"""
dt = pd.date_range(start='01 Jan 2018', end='05 Jan 2018', freq='30s')
df = pd.DataFrame(index=dt, columns=['bid', 'mid', 'ask'])
df['mid'] = np.random.random(len(dt))
df['bid'] = np.random.random(len(dt))
df['ask'] = np.random.random(len(dt))
# Filter by 'mid'
df2 = df.copy()
df2.index = df2.index + timedelta(seconds=10)
df_new = df.append(df2)
df_new = df_new.sort_index()
df_new = TimeSeriesOps().drop_consecutive_duplicates(df_new, 'mid')
assert_frame_equal(df_new, df)
# For 'bid' and 'ask'
df2 = df.copy()
df2.index = df2.index + timedelta(seconds=10)
df_new = df.append(df2)
df_new = df_new.sort_index()
df_new = TimeSeriesOps().drop_consecutive_duplicates(df_new, ['bid', 'ask'])
assert_frame_equal(df_new, df)
def test_ohlc():
"""Tests the open/high/low/close resampling works on time series
"""
dt = pd.date_range(start='01 Jan 2018', end='05 Jan 2018', freq='1s')
df = pd.DataFrame(index=dt, columns=['bid', 'mid', 'ask'])
df['mid'] = np.random.random(len(dt))
df_ohlc = TimeSeriesOps().resample_time_series(df, resample_amount=1, how='ohlc', unit='minutes', field='mid')
assert all(df_ohlc['high'] >= df_ohlc['low'])
def test_time_delta():
"""Tests time delta function works for a number of different times"""
td = TimeSeriesOps().get_time_delta("12:30")
assert (td.seconds == 45000)
td = TimeSeriesOps().get_time_delta("12:30:35")
assert (td.seconds == 45035)
print(td)
def test_overwrite_time_in_datetimeindex():
"""Tests that overwriting the time with a specific time of day works
"""
# Clocks went forward in London on 00:00 31 Mar 2020
datetimeindex = pd.date_range('28 Mar 2020', '05 Apr 2020', freq='h')
datetimeindex = datetimeindex.tz_localize("utc")
datetimeindex = TimeSeriesOps().overwrite_time_of_day_in_datetimeindex(datetimeindex, "16:00", overwrite_timezone="Europe/London")
# Back in UTC time 16:00 LDN is 15:00 UTC after DST changes (and is 16:00 UTC beforehand)
assert datetimeindex[0].hour == 16 and datetimeindex[-1].hour == 15
def test_chunk():
"""Tests the chunking of dataframes works
"""
dt = | pd.date_range(start='01 Jan 2018', end='05 Jan 2018', freq='1min') | pandas.date_range |
import altair as alt
from matplotlib.colors import to_rgba
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib as mpl
import six
def build_dataframe(fields):
field_names = {}
data = pd.DataFrame()
for name, field in six.iteritems(fields):
if field is not None:
if isinstance(field, pd.Series):
fname = field.name
else:
fname = name
data[fname] = field
field_names[name] = fname
else:
field_names[name] = None
return data, field_names
def dtype_to_vega_type(t):
if t == np.dtype('datetime64[ns]'):
return 'temporal'
if t == np.float64 or t == np.int64:
return 'quantitative'
return 'nominal'
def size_chart(chart, size, aspect):
dpi = mpl.rcParams['figure.dpi']
if size:
if isinstance(chart, alt.FacetChart):
chart = chart.spec
chart.height = size*dpi
chart.width = aspect*size*dpi
def vega_color(color):
if isinstance(color, six.string_types) and (color.startswith('rgb(') or color.startswith('rgba(')):
return color
c = to_rgba(color)
return "rgba(%s,%s,%s,%s)" % (int(c[0]*255), int(c[1]*255), int(c[2]*255), c[3])
def vega_palette(palette, color=None, saturation=1, vega_type="nominal"):
if palette:
if isinstance(palette, mpl.colors.Colormap):
pal = palette.colors
else:
pal = sns.color_palette(palette)
elif color:
pal = [color]
elif vega_type == "nominal":
pal = sns.color_palette()
else:
pal = sns.cubehelix_palette(0, as_cmap=True).colors
if saturation < 1:
pal = sns.color_palette(pal, desat=saturation)
pal = sns.color_palette(pal)
return [vega_color(c) for c in pal]
def vega_semantic_type(data):
try:
float_data = data.astype(np.float)
values = np.unique(float_data.dropna())
if np.array_equal(values, np.array([0., 1.])):
return "nominal"
return "quantitative"
except (ValueError, TypeError):
return "nominal"
# From seaborn.categorical
def infer_orient(x, y, orient=None):
"""Determine how the plot should be oriented based on the data."""
orient = str(orient)
def is_categorical(s):
try:
# Correct way, but does not exist in older Pandas
try:
return pd.api.types.is_categorical_dtype(s)
except AttributeError:
return | pd.core.common.is_categorical_dtype(s) | pandas.core.common.is_categorical_dtype |
# -*- coding: utf-8 -*-
import sys
import numpy as np
import pandas as pd
import pytest
from pandas import DataFrame
from pandas.util import testing as tm
class TestToCSV(object):
@pytest.mark.xfail((3, 6, 5) > sys.version_info >= (3, 5),
reason=("Python csv library bug "
"(see https://bugs.python.org/issue32255)"))
def test_to_csv_with_single_column(self):
# see gh-18676, https://bugs.python.org/issue32255
#
# Python's CSV library adds an extraneous '""'
# before the newline when the NaN-value is in
# the first row. Otherwise, only the newline
# character is added. This behavior is inconsistent
# and was patched in https://bugs.python.org/pull_request4672.
df1 = DataFrame([None, 1])
expected1 = """\
""
1.0
"""
with tm.ensure_clean('test.csv') as path:
df1.to_csv(path, header=None, index=None)
with open(path, 'r') as f:
assert f.read() == expected1
df2 = DataFrame([1, None])
expected2 = """\
1.0
""
"""
with tm.ensure_clean('test.csv') as path:
df2.to_csv(path, header=None, index=None)
with open(path, 'r') as f:
assert f.read() == expected2
def test_to_csv_defualt_encoding(self):
# GH17097
df = DataFrame({'col': [u"AAAAA", u"ÄÄÄÄÄ", u"ßßßßß", u"聞聞聞聞聞"]})
with tm.ensure_clean('test.csv') as path:
# the default to_csv encoding in Python 2 is ascii, and that in
# Python 3 is uft-8.
if pd.compat.PY2:
# the encoding argument parameter should be utf-8
with tm.assert_raises_regex(UnicodeEncodeError, 'ascii'):
df.to_csv(path)
else:
df.to_csv(path)
tm.assert_frame_equal(pd.read_csv(path, index_col=0), df)
def test_to_csv_quotechar(self):
df = DataFrame({'col': [1, 2]})
expected = """\
"","col"
"0","1"
"1","2"
"""
with tm.ensure_clean('test.csv') as path:
df.to_csv(path, quoting=1) # 1=QUOTE_ALL
with open(path, 'r') as f:
assert f.read() == expected
expected = """\
$$,$col$
$0$,$1$
$1$,$2$
"""
with tm.ensure_clean('test.csv') as path:
df.to_csv(path, quoting=1, quotechar="$")
with open(path, 'r') as f:
assert f.read() == expected
with tm.ensure_clean('test.csv') as path:
with tm.assert_raises_regex(TypeError, 'quotechar'):
df.to_csv(path, quoting=1, quotechar=None)
def test_to_csv_doublequote(self):
df = DataFrame({'col': ['a"a', '"bb"']})
expected = '''\
"","col"
"0","a""a"
"1","""bb"""
'''
with tm.ensure_clean('test.csv') as path:
df.to_csv(path, quoting=1, doublequote=True) # QUOTE_ALL
with open(path, 'r') as f:
assert f.read() == expected
from _csv import Error
with tm.ensure_clean('test.csv') as path:
with tm.assert_raises_regex(Error, 'escapechar'):
df.to_csv(path, doublequote=False) # no escapechar set
def test_to_csv_escapechar(self):
df = DataFrame({'col': ['a"a', '"bb"']})
expected = '''\
"","col"
"0","a\\"a"
"1","\\"bb\\""
'''
with tm.ensure_clean('test.csv') as path: # QUOTE_ALL
df.to_csv(path, quoting=1, doublequote=False, escapechar='\\')
with open(path, 'r') as f:
assert f.read() == expected
df = DataFrame({'col': ['a,a', ',bb,']})
expected = """\
,col
0,a\\,a
1,\\,bb\\,
"""
with tm.ensure_clean('test.csv') as path:
df.to_csv(path, quoting=3, escapechar='\\') # QUOTE_NONE
with open(path, 'r') as f:
assert f.read() == expected
def test_csv_to_string(self):
df = DataFrame({'col': [1, 2]})
expected = ',col\n0,1\n1,2\n'
assert df.to_csv() == expected
def test_to_csv_decimal(self):
# GH 781
df = DataFrame({'col1': [1], 'col2': ['a'], 'col3': [10.1]})
expected_default = ',col1,col2,col3\n0,1,a,10.1\n'
assert df.to_csv() == expected_default
expected_european_excel = ';col1;col2;col3\n0;1;a;10,1\n'
assert df.to_csv(decimal=',', sep=';') == expected_european_excel
expected_float_format_default = ',col1,col2,col3\n0,1,a,10.10\n'
assert df.to_csv(float_format='%.2f') == expected_float_format_default
expected_float_format = ';col1;col2;col3\n0;1;a;10,10\n'
assert df.to_csv(decimal=',', sep=';',
float_format='%.2f') == expected_float_format
# GH 11553: testing if decimal is taken into account for '0.0'
df = pd.DataFrame({'a': [0, 1.1], 'b': [2.2, 3.3], 'c': 1})
expected = 'a,b,c\n0^0,2^2,1\n1^1,3^3,1\n'
assert df.to_csv(index=False, decimal='^') == expected
# same but for an index
assert df.set_index('a').to_csv(decimal='^') == expected
# same for a multi-index
assert df.set_index(['a', 'b']).to_csv(decimal="^") == expected
def test_to_csv_float_format(self):
# testing if float_format is taken into account for the index
# GH 11553
df = pd.DataFrame({'a': [0, 1], 'b': [2.2, 3.3], 'c': 1})
expected = 'a,b,c\n0,2.20,1\n1,3.30,1\n'
assert df.set_index('a').to_csv(float_format='%.2f') == expected
# same for a multi-index
assert df.set_index(['a', 'b']).to_csv(
float_format='%.2f') == expected
def test_to_csv_na_rep(self):
# testing if NaN values are correctly represented in the index
# GH 11553
df = DataFrame({'a': [0, np.NaN], 'b': [0, 1], 'c': [2, 3]})
expected = "a,b,c\n0.0,0,2\n_,1,3\n"
assert df.set_index('a').to_csv(na_rep='_') == expected
assert df.set_index(['a', 'b']).to_csv(na_rep='_') == expected
# now with an index containing only NaNs
df = DataFrame({'a': np.NaN, 'b': [0, 1], 'c': [2, 3]})
expected = "a,b,c\n_,0,2\n_,1,3\n"
assert df.set_index('a').to_csv(na_rep='_') == expected
assert df.set_index(['a', 'b']).to_csv(na_rep='_') == expected
# check if na_rep parameter does not break anything when no NaN
df = DataFrame({'a': 0, 'b': [0, 1], 'c': [2, 3]})
expected = "a,b,c\n0,0,2\n0,1,3\n"
assert df.set_index('a').to_csv(na_rep='_') == expected
assert df.set_index(['a', 'b']).to_csv(na_rep='_') == expected
def test_to_csv_date_format(self):
# GH 10209
df_sec = DataFrame({'A': pd.date_range('20130101', periods=5, freq='s')
})
df_day = DataFrame({'A': pd.date_range('20130101', periods=5, freq='d')
})
expected_default_sec = (',A\n0,2013-01-01 00:00:00\n1,'
'2013-01-01 00:00:01\n2,2013-01-01 00:00:02'
'\n3,2013-01-01 00:00:03\n4,'
'2013-01-01 00:00:04\n')
assert df_sec.to_csv() == expected_default_sec
expected_ymdhms_day = (',A\n0,2013-01-01 00:00:00\n1,'
'2013-01-02 00:00:00\n2,2013-01-03 00:00:00'
'\n3,2013-01-04 00:00:00\n4,'
'2013-01-05 00:00:00\n')
assert (df_day.to_csv(date_format='%Y-%m-%d %H:%M:%S') ==
expected_ymdhms_day)
expected_ymd_sec = (',A\n0,2013-01-01\n1,2013-01-01\n2,'
'2013-01-01\n3,2013-01-01\n4,2013-01-01\n')
assert df_sec.to_csv(date_format='%Y-%m-%d') == expected_ymd_sec
expected_default_day = (',A\n0,2013-01-01\n1,2013-01-02\n2,'
'2013-01-03\n3,2013-01-04\n4,2013-01-05\n')
assert df_day.to_csv() == expected_default_day
assert df_day.to_csv(date_format='%Y-%m-%d') == expected_default_day
# testing if date_format parameter is taken into account for
# multi-indexed dataframes (GH 7791)
df_sec['B'] = 0
df_sec['C'] = 1
expected_ymd_sec = 'A,B,C\n2013-01-01,0,1\n'
df_sec_grouped = df_sec.groupby([pd.Grouper(key='A', freq='1h'), 'B'])
assert (df_sec_grouped.mean().to_csv(date_format='%Y-%m-%d') ==
expected_ymd_sec)
def test_to_csv_multi_index(self):
# GH 6618
df = DataFrame([1], columns=pd.MultiIndex.from_arrays([[1], [2]]))
exp = ",1\n,2\n0,1\n"
assert df.to_csv() == exp
exp = "1\n2\n1\n"
assert df.to_csv(index=False) == exp
df = DataFrame([1], columns=pd.MultiIndex.from_arrays([[1], [2]]),
index= | pd.MultiIndex.from_arrays([[1], [2]]) | pandas.MultiIndex.from_arrays |
import pandas as pd
import datetime
import matplotlib
# read in the csv files for monthly contributions and monthly fund returns
pay = | pd.read_csv("monthly_max_contributions.csv") | pandas.read_csv |
import numpy as np
import pandas as pd
import unittest
from empyrical.perf_attrib import perf_attrib
class PerfAttribTestCase(unittest.TestCase):
def test_perf_attrib_simple(self):
start_date = "2017-01-01"
periods = 2
dts = | pd.date_range(start_date, periods=periods, name="dt") | pandas.date_range |
from __future__ import annotations
import math
import warnings
from functools import partial
from itertools import count
from typing import Callable, Sequence, Union
import numpy as np
import pandas as pd
from scipy.optimize import minimize
from scipy.stats import chi2
from pykelihood.cached_property import cached_property
from pykelihood.distributions import GPD, Distribution, Exponential
from pykelihood.metrics import (
AIC,
BIC,
Brier_score,
bootstrap,
crps,
opposite_log_likelihood,
qq_l1_distance,
quantile_score,
)
from pykelihood.parameters import ParametrizedFunction
warnings.filterwarnings("ignore")
class Profiler(object):
def __init__(
self,
distribution: Distribution,
data: pd.Series,
score_function: Callable = opposite_log_likelihood,
name: str = "Standard",
inference_confidence: float = 0.99,
single_profiling_param=None,
):
"""
:param distribution: distribution on which the inference is based
:param data: variable of interest
:param score_function: function used for optimisation
:param name: name (optional) of the likelihood if it needs to be compared to other likelihood functions
:param inference_confidence: wanted confidence for intervals
:param fit_chi2: whether the results from the likelihood ratio method must be fitted to a chi2
or a generic chi2 with degree of freedom 1 is used
:param single_profiling_param: parameter that we want to fix to create the profiles based on likelihood
"""
self.name = name
self.distribution = distribution
self.data = data
self.score_function = score_function
self.inference_confidence = inference_confidence
self.single_profiling_param = single_profiling_param
@cached_property
def standard_mle(self):
estimate = self.distribution.fit(self.data)
ll = -opposite_log_likelihood(estimate, self.data)
ll = ll if isinstance(ll, float) else ll[0]
return (estimate, ll)
@cached_property
def optimum(self):
x0 = self.distribution.optimisation_params
estimate = self.distribution.fit_instance(
self.data, score=self.score_function, x0=x0
)
func = -self.score_function(estimate, self.data)
func = func if isinstance(func, float) else func[0]
return (estimate, func)
@cached_property
def profiles(self):
profiles = {}
opt, func = self.optimum
if self.single_profiling_param is not None:
params = [self.single_profiling_param]
else:
params = opt.optimisation_param_dict.keys()
for name, k in opt.optimisation_param_dict.items():
if name in params:
r = float(k)
lb = r - 5 * (10 ** math.floor(math.log10(np.abs(r))))
ub = r + 5 * (10 ** math.floor(math.log10(np.abs(r))))
range = list(np.linspace(lb, ub, 50))
profiles[name] = self.test_profile_likelihood(range, name)
return profiles
def test_profile_likelihood(self, range_for_param, param):
opt, func = self.optimum
profile_ll = []
params = []
for x in range_for_param:
try:
pl = opt.fit_instance(
self.data,
score=self.score_function,
**{param: x},
)
pl_value = -self.score_function(pl, self.data)
pl_value = pl_value if isinstance(pl_value, float) else pl_value[0]
if np.isfinite(pl_value):
profile_ll.append(pl_value)
params.append([p.value for p in pl.flattened_params])
except:
pass
chi2_par = {"df": 1}
lower_bound = func - chi2.ppf(self.inference_confidence, **chi2_par) / 2
filtered_params = pd.DataFrame(
[x + [eval] for x, eval in zip(params, profile_ll) if eval >= lower_bound]
)
cols = list(opt.flattened_param_dict.keys()) + ["score"]
filtered_params = filtered_params.rename(columns=dict(zip(count(), cols)))
return filtered_params
def confidence_interval(self, metric: Callable[[Distribution], float]):
"""
:param metric: function depending on the distribution: it can be one of the parameter (ex: lambda x: x.shape() for a parameter called "shape"),
or a metric relevant to the field of study (ex: the 100-years return level for extreme value analysis by setting lambda x: x.isf(1/100))...
:return: bounds based on parameter profiles for this metric
"""
estimates = []
profiles = self.profiles
if self.single_profiling_param is not None:
params = [self.single_profiling_param]
else:
params = profiles.keys()
for param in params:
columns = list(self.optimum[0].optimisation_param_dict.keys())
result = profiles[param].apply(
lambda row: metric(
self.distribution.with_params({k: row[k] for k in columns}.values())
),
axis=1,
)
estimates.extend(list(result.values))
if len(estimates):
return [np.min(estimates), np.max(estimates)]
else:
return [-np.inf, np.inf]
class DetrendedFluctuationAnalysis(object):
def __init__(
self,
data: pd.DataFrame,
scale_lim: Sequence[int] = None,
scale_step: float = None,
):
"""
:param data: pandas Dataframe, if it contains a column for the day and month, the profiles are normalized
according to the mean for each calendar day averaged over years.
:param scale_lim: limits for window sizes
:param scale_step: steps for window sizes
"""
if not ("month" in data.columns and "day" in data.columns):
print("Will use the total average to normalize the data...")
mean = data["data"].mean()
std = data["data"].std()
data = data.assign(mean=mean).assign(std=std)
else:
mean = (
data.groupby(["month", "day"])
.agg({"data": "mean"})["data"]
.rename("mean")
.reset_index()
)
std = (
data.groupby(["month", "day"])
.agg({"data": "std"})["data"]
.rename("std")
.reset_index()
)
data = data.merge(mean, on=["month", "day"], how="left").merge(
std, on=["month", "day"], how="left"
)
phi = (data["data"] - data["mean"]) / data["std"]
phi = (
phi.dropna()
) # cases where there is only one value for a given day / irrelevant for DFA
self.y = np.cumsum(np.array(phi))
if scale_lim is None:
lim_inf = 10 ** (math.floor(np.log10(len(data))) - 1)
lim_sup = min(
10 ** (math.ceil(np.log10(len(data)))), len(phi)
) # assuming all observations are equally splitted
scale_lim = [lim_inf, lim_sup]
if scale_step is None:
scale_step = 10 ** (math.floor(np.log10(len(data)))) / 2
self.scale_lim = scale_lim
self.scale_step = scale_step
@staticmethod
def calc_rms(x: np.array, scale: int, polynomial_order: int):
"""
windowed Root Mean Square (RMS) with polynomial detrending.
Args:
-----
*x* : numpy.array
one dimensional data vector
*scale* : int
length of the window in which RMS will be calculaed
Returns:
--------
*rms* : numpy.array
RMS data in each window with length len(x)//scale
"""
# making an array with data divided in windows
shape = (x.shape[0] // scale, scale)
X = np.lib.stride_tricks.as_strided(x, shape=shape)
# vector of x-axis points to regression
scale_ax = np.arange(scale)
rms = np.zeros(X.shape[0])
for e, xcut in enumerate(X):
coeff = np.polyfit(scale_ax, xcut, deg=polynomial_order)
xfit = np.polyval(coeff, scale_ax)
# detrending and computing RMS of each window
rms[e] = np.mean((xcut - xfit) ** 2)
return rms
@staticmethod
def trend_type(alpha: float):
if round(alpha, 1) < 1:
if round(alpha, 1) < 0.5:
return "Anti-correlated"
elif round(alpha, 1) == 0.5:
return "Uncorrelated, white noise"
elif round(alpha, 1) > 0.5:
return "Correlated"
elif round(alpha, 1) == 1:
return "Noise, pink noise"
elif round(alpha, 1) > 1:
if round(alpha, 1) < 1.5:
return "Non-stationary, unbounded"
else:
return "Brownian Noise"
def __call__(
self, polynomial_order: int, show=False, ax=None, supplement_title="", color="r"
):
"""
Detrended Fluctuation Analysis - measures power law scaling coefficient
of the given signal *x*.
More details about the algorithm you can find e.g. here:
Kropp, Jürgen, & <NAME>. 2010. Case Studies. Chap. 8-11, pages 167–244 of : In extremis :
disruptive events and trends in climate and hydrology. Springer Science & Business Media.
"""
y = self.y
scales = (
np.arange(self.scale_lim[0], self.scale_lim[1], self.scale_step)
).astype(np.int)
fluct = np.zeros(len(scales))
# computing RMS for each window
for e, sc in enumerate(scales):
fluct[e] = np.sqrt(
np.mean(self.calc_rms(y, sc, polynomial_order=polynomial_order))
)
# as this stage, F^2(s) should be something of the form s^h(2); taking the log should give a linear form of coefficient h(2)
coeff = np.polyfit(np.log(scales), np.log(fluct), 1)
# numpy polyfit returns the highest power first
if show:
import matplotlib
matplotlib.rcParams["text.usetex"] = True
ax = ax or matplotlib.pyplot.gca()
default_title = "Detrended Fluctuation Analysis"
title = (
default_title
if supplement_title == ""
else f"{default_title} {supplement_title}"
)
fluctfit = np.exp(np.polyval(coeff, np.log(scales)))
ax.loglog(scales, fluct, "o", color=color, alpha=0.6)
ax.loglog(
scales,
fluctfit,
color=color,
alpha=0.6,
label=r"DFA-{}, {}: $\alpha$={}".format(
polynomial_order, self.trend_type(coeff[0]), round(coeff[0], 2)
),
)
ax.set_title(title)
ax.set_xlabel(r"$\log_{10}$(time window)")
ax.set_ylabel(r"$\log_{10}$F(t)")
ax.legend(loc="lower right", fontsize="small")
return scales, fluct, coeff[0]
def pettitt_test(data: Union[np.array, pd.DataFrame, pd.Series]):
"""
Pettitt's non-parametric test for change-point detection.
Given an input signal, it reports the likely position of a single switch point along with
the significance probability for location K, approximated for p <= 0.05.
"""
T = len(data)
if isinstance(data, pd.DataFrame) or isinstance(data, pd.Series):
X = np.array(data).reshape((len(data), 1))
else:
X = data.reshape((len(data), 1))
vector_of_ones = np.ones([1, len(X)])
matrix_col_X = np.matmul(X, vector_of_ones)
matrix_lines_X = matrix_col_X.T
diff = matrix_lines_X - matrix_col_X
diff_sign = np.sign(diff)
U_initial = diff_sign[0, 1:].sum()
sum_of_each_line = diff_sign[1:].sum(axis=1)
cs = sum_of_each_line.cumsum()
U = U_initial + cs
U = list(U)
U.insert(0, U_initial)
loc = np.argmax(np.abs(U))
K = np.max(np.abs(U))
p = np.exp(-3 * K ** 2 / (T ** 3 + T ** 2))
return (loc, p)
def threshold_selection_gpd_NorthorpColeman(
data: Union[pd.Series, np.ndarray],
thresholds: Union[Sequence, np.ndarray],
plot=False,
):
"""
Method based on a multiple threshold penultimate model,
introduced by Northorp and Coleman in 2013 for threshold selection in extreme value analysis.
Returns: table with likelihood computed using hypothesis of constant parameters h0, ll with h1, p_value of test, # obs
and figure to plot as an option
"""
if isinstance(data, pd.Series):
data = data.rename("realized")
elif isinstance(data, np.ndarray):
data = pd.Series(data, name="realized")
else:
return TypeError("Observations should be in array or pandas series format.")
if isinstance(thresholds, Sequence):
thresholds = np.array(thresholds)
fits = {}
nll_ref = {}
for u in thresholds:
d = data[data > u]
fits[u] = GPD.fit(d, loc=u)
nll_ref[u] = opposite_log_likelihood(fits[u], d)
def negated_ll(x: np.array, ref_threshold: float):
tol = 1e-10
sigma_init = x[0]
if sigma_init <= tol:
return 10 ** 10
xi_init = x[1:]
# It could be interesting to consider this parameter stability condition
# if len(xi_init[np.abs(xi_init) >= 1.]):
# return 10**10
thresh = [u for u in thresholds if u >= ref_threshold]
thresh_diff = pd.Series(
np.concatenate([np.diff(thresh), [np.nan]]), index=thresh, name="w"
)
xi = pd.Series(xi_init, index=thresh, name="xi")
sigma = pd.Series(
sigma_init
+ np.cumsum(np.concatenate([[0], xi.iloc[:-1] * thresh_diff.iloc[:-1]])),
index=thresh,
name="sigma",
)
params_and_conditions = (
| pd.concat([sigma, xi, thresh_diff], axis=1) | pandas.concat |
#!/usr/bin/env python
'''
This script calculates a mean GloVe vector for each review
and trains a regression model to predict the usefulness
of each review.
This script is directly adapted from the jupyter notebook "revisit_nlp.ipynb".
'''
from keras.preprocessing.sequence import pad_sequences
from keras.preprocessing.text import Tokenizer
from sklearn.ensemble import RandomForestRegressor
from sklearn.kernel_ridge import KernelRidge
from sklearn.linear_model import LinearRegression
import argparse
import numpy as np
import os
import pandas as pd
import pickle as pkl
import smogn
import sys
import xgboost
# define a transformer class to engineer features
from sklearn.base import BaseEstimator, TransformerMixin
class Word2VecVectorizer(BaseEstimator, TransformerMixin):
def __init__(self, model):
print('Loading in word vectors...')
self.word_vectors = model
print('Finished loading in word vectors')
def fit(self, data):
return self
def transform(self, data):
# determine the dimensionality of vectors
v = self.word_vectors.get_vector('king')
self.D = v.shape[0]
X = np.zeros((len(data), self.D))
n = 0
emptycount = 0
for sentence in data:
tokens = sentence.split()
vecs = []
m = 0
for word in tokens:
try:
# throws KeyError if word not found
vec = self.word_vectors.get_vector(word)
vecs.append(vec)
m += 1
except KeyError:
pass
if len(vecs) > 0:
vecs = np.array(vecs)
X[n] = vecs.mean(axis=0)
else:
emptycount += 1
n += 1
print('Numer of samples with no words found: %s / %s' % (emptycount, len(data)))
return X
if __name__ == '__main__':
# command line options
parser = argparse.ArgumentParser()
parser.add_argument('-d', '--glove_dim', type=int, default=50)
parser.add_argument('-m', '--model', type=int, default=0,
help=' \
0: untuned RandomForestRegressor\n \
1: untuned XGBoost \
'
)
parser.add_argument('--imbalanced', action='store_true')
parser.add_argument('--load_weight', action='store_true')
args = parser.parse_args()
regr_opts = {
0: ('pred_rfr_untuned', RandomForestRegressor),
1: ('pred_xgbr_untuned', xgboost.XGBRegressor),
2: ('pred_krr_untuned', KernelRidge),
3: ('pred_linr_untuned', LinearRegression)
}
if not args.glove_dim in [50, 100, 200, 300]:
print('GloVe dimension must be one in {50, 100, 200, 300}.')
sys.exit(-1)
# load train and test data
df_train = pd.read_csv('../processed_data/train_meta_inc.csv')
df_test = | pd.read_csv('../processed_data/test_meta_inc.csv') | pandas.read_csv |
import click
from app.models import types
from app.settings import app_cfg
from app.utils import click_utils
@click.command()
@click.option('-i', '--input', 'opt_dir_in', required=True,
help='Path to project folder (metadata.csv, mask, real)')
@click.option('-f', '--force', 'opt_force', is_flag=True,
help='Force overwrite annotations file')
@click.option('--width', 'opt_width', default=320,
help='Image width to process mask at (over 320 is slow)')
@click.pass_context
def cli(ctx, opt_dir_in, opt_force, opt_width):
"""Converts image, masks, and metadata to CSV annotations"""
from os.path import join
from glob import glob
from pathlib import Path
from dataclasses import asdict
import logging
import pandas as pd
import cv2 as cv
import numpy as np
from tqdm import tqdm
from app.utils import file_utils, color_utils, anno_utils
# init log
log = app_cfg.LOG
log.info('Converting masks to annotations')
# init
records = []
# output file
fp_annotations = join(opt_dir_in, 'annotations.csv')
if Path(fp_annotations).exists() and not opt_force:
log.error(f'File exists: {fp_annotations}. Use "-f/--force" to overwrite')
return
# load the color coded CSV
fp_metadata = join(opt_dir_in, app_cfg.FN_METADATA)
df_objects = pd.read_csv(fp_metadata)
log.info(f'Metadata file contains {len(df_objects):,} objects')
# glob mask
fp_dir_im_reals = join(opt_dir_in, app_cfg.DN_REAL)
fp_dir_im_masks = join(opt_dir_in, app_cfg.DN_MASK)
fps_reals = glob(join(fp_dir_im_reals, '*.png'))
fps_masks = glob(join(fp_dir_im_masks, '*.png'))
if len(fps_masks) != len(fps_reals):
log.warn(f'Directories not balanced: {len(fps_masks)} masks != {len(fps_real)}')
log.info(f'Converting {len(fps_masks)} mask images to annotations...')
# iterate through all images
for fp_mask in tqdm(fps_masks):
fn_mask = Path(fp_mask).name
im_mask = cv.imread(fp_mask)
w, h = im_mask.shape[:2][::-1]
scale = opt_width / w
im_mask_sm = cv.resize(im_mask, None, fx=scale, fy=scale, interpolation=cv.INTER_NEAREST)
# flatten image and find unique colors
im_mask_sm_rgb = cv.cvtColor(im_mask_sm, cv.COLOR_BGR2RGB)
w_sm, h_sm = im_mask_sm.shape[:2][::-1]
im_flat_rgb = im_mask_sm_rgb.reshape((w_sm * h_sm, 3))
rgb_unique = np.unique(im_flat_rgb, axis=0)
# iterate through all colors for all objects
for df in df_objects.itertuples():
# if the color is found in the image with a large enough area, append bbox
rgb_int = (df.color_r, df.color_g, df.color_b) # RGB uint8
found = any([(rgb_int == tuple(c)) for c in rgb_unique])
if found:
color_hex = f'0x{color_utils.rgb_int_to_hex(rgb_int)}'
bbox_norm = anno_utils.color_mask_to_rect(im_mask_sm_rgb, rgb_int)
if bbox_norm:
bbox_nlc = bbox_norm.to_labeled(df.label, df.label_index, fn_mask).to_colored(color_hex)
records.append(asdict(bbox_nlc))
# Convert to dataframe
df_annos = | pd.DataFrame.from_dict(records) | pandas.DataFrame.from_dict |
import matplotlib.pyplot as plt
from matplotlib import colors
import pickle
import pandas as pd
import pathlib
import json
from jsmin import jsmin
import numpy as np
from scipy import stats
import seaborn as sns
from tikzplotlib import save as tikz_save
def post_process_data(input_file_name):
# Colors
lineColor = [['C0'], ['C1'], ['C2'], [
'C3'], ['C4'], ['C5'], ['C6'], ['C7']]
# -------------------------
# Open and read input file
# -------------------------
# First, remove comments from the file with jsmin because json doesn't allow it
with open(f"{input_file_name}") as js_file:
minified = jsmin(js_file.read())
user_inputs = json.loads(minified)
if (user_inputs.get("PostProcess") is not None):
inputFields = user_inputs["PostProcess"]
else:
raise ValueError('Ask for post processing data but no inputs were provided')
out_folder = pathlib.Path("output")
out_data_file = pathlib.Path(out_folder, "output.txt")
model_eval_folder = pathlib.Path(out_folder, "model_eval")
with open(out_data_file, 'r') as file_param:
for ind, line in enumerate(file_param):
if ind == 1:
# Get random variable names
c_chain = line.strip()
unpar_name=c_chain.split()
elif ind == 3:
# Get number of iterations
c_chain = line.strip()
n_iterations = int(c_chain)
n_unpar = len(unpar_name)
n_samples = n_iterations + 1
unpar_name_list = {}
for i, name_param in enumerate(unpar_name):
unpar_name_list[name_param] = i
num_fig = 0
# -------------------------------------------
# --------- Plot experimental data ----------
# -------------------------------------------
# if (inputFields.get("Data") is not None):
# # Load experimental data
# with open('output/data', 'rb') as file_data_exp:
# pickler_data_exp = pickle.Unpickler(file_data_exp)
# data_exp = pickler_data_exp.load()
# if inputFields["Data"]["display"] == "yes":
# for i in range(data_exp.n_data_set):
# ind_1 = data_exp.index_data_set[i, 0]
# ind_2 = data_exp.index_data_set[i, 1]
# plt.figure(inputFields["Data"]["num_plot"])
# plt.plot(data_exp.x[ind_1:ind_2+1], data_exp.y[ind_1:ind_2+1],
# 'o', color=lineColor[i][0], mfc='none')
# error_bar (data_exp.x[ind_1:ind_2+1], data_exp.y[ind_1:ind_2+1],
# data_exp.std_y[ind_1:ind_2+1], lineColor[i][0])
# #, edgecolors='r'
if (inputFields.get("Data") is not None):
# Load experimental data
data_file = pathlib.Path(out_folder, "data.bin")
with open(data_file, 'rb') as file_data_exp:
pickler_data_exp = pickle.Unpickler(file_data_exp)
data_exp = pickler_data_exp.load()
if inputFields["Data"]["display"] == "yes":
num_plot = inputFields["Data"]["num_plot"]
for num_data_set, data_id in enumerate(data_exp.keys()):
n_x = len(data_exp[data_id].x)
n_data_set = int(len(data_exp[data_id].y[0])/n_x)
for i in range(n_data_set):
plt.figure(num_plot[num_data_set])
plt.figure(i)
if data_exp[data_id].n_runs > 1:
plt.plot(data_exp[data_id].x, data_exp[data_id].mean_y[i*n_x:(i+1)*n_x],
'o', color=lineColor[num_data_set][0], mfc='none')
for j in range(data_exp[data_id].n_runs):
plt.plot(data_exp[data_id].x, data_exp[data_id].y[j][i*n_x:i*n_x+n_x],'o', color=lineColor[num_data_set][0], mfc='none', label="Exp. data")
plt.xlabel(user_inputs["Sampling"]["BayesianPosterior"]["Data"][num_data_set]["xField"][0])
plt.ylabel(user_inputs["Sampling"]["BayesianPosterior"]["Data"][num_data_set]["yField"][0])
# error_bar (data_exp[data_id].x, data_exp[data_id].y[j][i*n_x:i*n_x+n_x]
# data_exp[data_id].std_y[i*n_x:i*n_x+n_x], lineColor[num_data_set][0])
error_bar (data_exp[data_id].x, data_exp[data_id].y[j][i*n_x:i*n_x+n_x],
data_exp[data_id].std_y[i*n_x:i*n_x+n_x], lineColor[num_data_set][0])
#, edgecolors='r'
plt.legend()
# -------------------------------------------
# --------- Plot initial guess --------------
# -------------------------------------------
if (inputFields.get("InitialGuess") is not None):
if inputFields["InitialGuess"]["display"] == "yes":
for num_data_set, data_id in enumerate(data_exp.keys()):
init_model_eval_file = pathlib.Path(model_eval_folder, f"{data_id}_fun_eval-{0}.npy")
data_init = np.load(init_model_eval_file)
n_x = len(data_exp[data_id].x)
n_data_set = int(len(data_exp[data_id].y[0])/n_x)
for i in range(n_data_set):
plt.figure(num_plot[num_data_set])
plt.figure(i)
plt.plot(data_exp[data_id].x,
data_init[i*n_x:(i+1)*n_x], '--', color=lineColor[num_data_set][0], label="Init. guess")
plt.legend()
if (inputFields.get("MarkovChain") is not None) or (inputFields.get("Posterior") is not None) or (inputFields.get("Propagation") is not None):
reader = pd.read_csv('output/mcmc_chain.csv', header=None)
param_value_raw = reader.values
n_samples = len(param_value_raw[:, 0]) # + 1
# # Load the samples of the distribution
# param_value_raw = np.zeros((n_samples, n_unpar))
# with open('output/mcmc_chain.dat', 'r') as file_param:
# i = 0
# for line in file_param:
# c_chain = line.strip()
# param_value_raw[i, :] = np.fromstring(
# c_chain[1:len(c_chain)-1], sep=' ')
# i += 1
# -------------------------------------------
# --------- Plot markov chains --------------
# -------------------------------------------
if inputFields.get("MarkovChain") is not None and inputFields["MarkovChain"]["display"] == "yes":
num_fig = 100
vec_std = np.zeros(n_unpar)
for i in range(n_unpar):
plt.figure(num_fig+i)
plt.plot(range(n_samples), param_value_raw[:, i])
plt.xlabel("Number of iterations")
plt.ylabel(unpar_name[i])
#saveToTikz('markov_chain_'+unpar_name[i]+'.tex')
c_mean_val = np.mean(param_value_raw[:, i])
c_std_val = np.std(param_value_raw[:, i])
vec_std[i] = c_std_val
cv = c_std_val / c_mean_val
Q1 = np.percentile(param_value_raw[:, i], 25, axis=0)
Q3 = np.percentile(param_value_raw[:, i], 75, axis=0)
cqv = (Q3 - Q1)/(Q3 + Q1)
print("{}: mean value = {}; standard dev. = {}; cv = {}; cqv = {}".format(unpar_name[i], c_mean_val, c_std_val, cv, cqv))
if inputFields["MarkovChain"].get("check_convergence") is not None and inputFields["MarkovChain"]["check_convergence"] == "yes":
# Computing convergence criteria and graphs for each chains
# From <NAME> al., Bayesian Data Analysis, 2014.
mean_it = np.zeros(n_samples-1)
plt.figure(1000+i)
for it in range(n_samples-1):
mean_it[it] = np.mean(param_value_raw[0:it+1, i])
plt.plot(range(n_samples-1), mean_it)
"""
cov_c = np.cov(param_value_raw, rowvar=False)
print("Final chain covariance matrix:")
print(cov_c)
corr_c = cov_c
for i in range(n_unpar):
corr_c[i][i] = cov_c[i][i] / (vec_std[i] * vec_std[i])
for j in range(i+1, n_unpar):
corr_c[i][j] = cov_c[i][j] / (vec_std[i] * vec_std[j])
corr_c[j][i] = corr_c[i][j]
print("Final chain correlation matrix:")
print(corr_c)
fig = plt.figure(400, figsize=(16, 12))
ax = sns.heatmap(
corr_c,
vmin=-1, vmax=1, center=0,
cmap=sns.diverging_palette(20, 220, n=200),
square=True,
)
ax.set_xticklabels(
ax.get_xticklabels(),
rotation=45,
horizontalalignment='right'
)
# From https://towardsdatascience.com/better-heatmaps-and-correlation-matrix-plots-in-python-41445d0f2bec
def heatmap(x, y, size):
plot_grid = plt.GridSpec(1, 15, hspace=0.2, wspace=0.1) # Setup a 1x15 grid
ax = plt.subplot(plot_grid[:,:-1]) # Use the leftmost 14 columns of the grid for the main plot
# Mapping from column names to integer coordinates
x_labels = x
y_labels = y
x_to_num = []
y_to_num=[]
for i, name in enumerate(x_labels):
for j, name in enumerate(x_labels):
x_to_num.append(j)
y_to_num.append(i)
size_scale = 500
m = np.abs(size.flatten())
n_colors = 256 # Use 256 colors for the diverging color palette
palette = sns.diverging_palette(20, 220, n=n_colors) # Create the palette
color_min, color_max = [-1, 1] # Range of values that will be mapped to the palette, i.e. min and max possible correlation
def value_to_color(val):
val_position = float((val - color_min)) / (color_max - color_min) # position of value in the input range, relative to the length of the input range
ind = int(val_position * (n_colors - 1)) # target index in the color palette
return palette[ind]
color_vec = []
for i, val in enumerate(size.flatten()):
color_vec.append(value_to_color(val))
#print(color_vec)
ax.scatter(
x=x_to_num, # Use mapping for x
y=y_to_num, # Use mapping for y
s=m*size_scale, # Vector of square sizes, proportional to size parameter
c=color_vec,
marker='s' # Use square as scatterplot marker
)
# Show column labels on the axes
ax.set_xticks([v for v in range(len(x_labels))])
ax.set_xticklabels(x_labels, rotation=45, horizontalalignment='right', fontsize=20)
ax.set_yticks([v for v in range(len(x_labels))])
ax.set_yticklabels(y_labels, fontsize=20)
ax.grid(False, 'major')
ax.grid(True, 'minor')
ax.set_xticks([t + 0.5 for t in ax.get_xticks()], minor=True)
ax.set_yticks([t + 0.5 for t in ax.get_yticks()], minor=True)
# ax.set_xlim([-0.5, max([v for v in x_to_num.values()]) + 0.5])
# ax.set_ylim([-0.5, max([v for v in y_to_num.values()]) + 0.5])
# Add color legend on the right side of the plot
ax = plt.subplot(plot_grid[:,-1]) # Use the rightmost column of the plot
col_x = [0]*len(palette) # Fixed x coordinate for the bars
bar_y=np.linspace(color_min, color_max, n_colors) # y coordinates for each of the n_colors bars
bar_height = bar_y[1] - bar_y[0]
ax.barh(
y=bar_y,
width=[5]*len(palette), # Make bars 5 units wide
left=col_x, # Make bars start at 0
height=bar_height,
color=palette,
linewidth=0.0,
)
ax.set_xlim(1, 2) # Bars are going from 0 to 5, so lets crop the plot somewhere in the middle
ax.grid(False) # Hide grid
ax.set_facecolor('white') # Make background white
ax.set_xticks([]) # Remove horizontal ticks
ax.tick_params(axis="y", labelsize=20)
ax.set_yticks(np.linspace(min(bar_y), max(bar_y), 3)) # Show vertical ticks for min, middle and max
ax.yaxis.tick_right() # Show vertical ticks on the right
unpar_name_real = ["$\\log_{10} (\\mathcal{A}_{1,1})$", "$\\log_{10} (\\mathcal{A}_{1,2})$", "$\\log_{10} (\\mathcal{A}_{2,1})$", "$\\log_{10} (\\mathcal{A}_{3,1})$",
"$\\mathcal{E}_{1,1}$", "$\\mathcal{E}_{1,2}$", "$\\mathcal{E}_{2,1}$", "$\\mathcal{E}_{3,1}$",
"$\\gamma_{2,1,5}$", "$\\gamma_{3,1,7}$"]
heatmap(
x=unpar_name_real,
y=unpar_name_real,
size=corr_c
)
fig.savefig('correlation_matrix.pdf')
# Savetotikz not good for this
# saveToTikz('correlation_matrix.tex')
"""
"""
# 2D MCMC iterations
#---------------
num_fig = 500
for i in range(n_unpar):
for j in range(i+1, n_unpar):
plt.figure(num_fig+i)
plt.plot(param_value_raw[:, i], param_value_raw[:, j])
plt.xlabel(unpar_name[i])
plt.ylabel(unpar_name[j])
num_fig += 1
"""
# -------------------------------------------
# -------- Posterior distribution -----------
# -------------------------------------------
if inputFields.get("Posterior") is not None and inputFields["Posterior"]["display"] == "yes":
burnin_it = inputFields["Posterior"]["burnin"]
param_value = param_value_raw[range(burnin_it, n_samples), :]
num_fig = 200
if inputFields["Posterior"]["distribution"] == "marginal":
if "ksdensity" in inputFields["Posterior"]["estimation"]:
for i in range(n_unpar):
# Estimate marginal pdf using gaussian kde
data_i = param_value[:, i]
kde = stats.gaussian_kde(data_i)
x = np.linspace(data_i.min(), data_i.max(), 100)
p = kde(x)
# Plot
plt.figure(num_fig+i)
plt.plot(x, p)
plt.xlabel(unpar_name[i])
plt.ylabel("Probability density")
# Find and plot the mode
plt.plot(x[np.argmax(p)], p.max(), 'r*')
if inputFields["Posterior"]["distribution"] == "yes":
saveToTikz("marginal_pdf_param_"+i+".tex")
if "hist" in inputFields["Posterior"]["estimation"]:
for i in range(n_unpar):
data_i = param_value[:, i]
plt.figure(num_fig+i)
plt.hist(data_i, bins='auto', density=True)
for i in range(n_unpar):
plt.figure(num_fig+i)
#saveToTikz('marginal_pdf_'+inputFields["Posterior"]["estimation"]+'_'+unpar_name[i]+'.tex')
if inputFields["Posterior"]["distribution"] == "bivariate":
# Compute bivariate marginal pdf
if "scatter" in inputFields["Posterior"]["estimation"]:
for i in range(n_unpar):
range_data = range(1,len(param_value[:,i]), 10)
data_i = param_value[range_data, i]
for j in range(i+1, n_unpar):
data_j = param_value[range_data, j]
plt.figure(num_fig)
plt.scatter(data_j, data_i, c=['C2'], s=10)
num_fig += 1
if "contour" in inputFields ["Posterior"]["estimation"]:
for i, var_name in enumerate(unpar_name):
# Get first coordinate param values
x = param_value[:, i]
xmin = np.min(x)
xmax = np.max(x)
# Get second coordinatee param values
for var_name_2 in unpar_name[i+1:len(unpar_name)]:
# Number of the corresponding parameter name
k = unpar_name_list[var_name_2]
y = param_value[:, k]
ymax = np.max(y)
ymin = np.min(y)
# Peform the kernel density estimate
xx, yy = np.mgrid[xmin:xmax:100j, ymin:ymax:100j]
positions = np.vstack([xx.ravel(), yy.ravel()])
values = np.vstack([x, y])
kernel = stats.gaussian_kde(values)
f = np.reshape(kernel(positions).T, xx.shape)
fig = plt.figure(num_fig)
ax = fig.gca()
ax.set_xlim(xmin, xmax)
ax.set_ylim(ymin, ymax)
# Contourf plot
cfset = ax.contourf(xx, yy, f, cmap='Blues')
## Or kernel density estimate plot instead of the contourf plot
#ax.imshow(np.rot90(f), cmap='Blues', extent=[xmin, xmax, ymin, ymax])
# Contour plot
cset = ax.contour(xx, yy, f, colors='k')
# Label plot
ax.clabel(cset, inline=1, fontsize=10)
plt.xlabel(var_name)
plt.ylabel(var_name_2)
num_fig = num_fig + 1
#saveToTikz('bivariate_contour_'+var_name+'_'+var_name_2+'.tex')
# plt.figure(num_fig)
# plt.scatter(np.log(x), np.log(y), c=['C2'], s=10)
# plt.xlabel(var_name)
# plt.ylabel(var_name_2)
# num_fig = num_fig + 1
#saveToTikz('no_reparam.tex')
# ----- Bivariate probability distribution functions -----
# --------------------------------------------------------
# OLD MATLAB IMPLEMENTATIL
# figNum=1;
# %param_values=inputParams.Parametrization(param_values);
# if strcmp(bivariatePDF.plot, 'yes')
# for i = 1 : nParam_uncertain
# param_i = param_values(samplesPlot,i);
# for j = i+1 : nParam_uncertain
# param_j = param_values(samplesPlot,j);
# if strcmp(bivariatePDF.plotHist, 'yes')
# figure
# hist3([param_i param_j], bivariatePDF.binHist);
# set(get(gca,'child'),'FaceColor','interp','CDataMode','auto');
# %ylim([1.2 1.5].*1e5); xlim([0 0.5].*1e6)
# end
# if strcmp(bivariatePDF.plotContourFilled, 'yes')
# figure; hold on
# [n,c] = hist3([param_i param_j], bivariatePDF.binHist);
# [~,h]=contourf(c{1}, c{2}, n.', 80);
# set(h,'LineColor','none')
# end
# if strcmp(bivariatePDF.plotContour, 'yes')
# figure(figNum); hold on
# [n,c] = hist3([param_i param_j], bivariatePDF.binHist);
# [~,h]=contour(c{1}, c{2}, n.', 10);
# set(h,'LineColor',matlab_default_colors(1,:))
# end
# if strcmp(bivariatePDF.scatterPlot, 'yes')
# figure(figNum); hold on
# scatter(param_j, param_i, 15, matlab_default_colors(2,:), 'filled')
# end
# figNum=figNum+1;
# xlabel(paramNames(j));
# ylabel(paramNames(i));
# run('plot_dimensions'); box off
# if strcmp(bivariatePDF.saveImg, 'yes')
# matlab2tikz(['2Dpdf_' num2str(i) '_' num2str(j) '.tex'])
# end
# end
# end
# end
# -------------------------------------------
# ------ Posterior predictive check ---------
# -------------------------------------------
if (inputFields.get("PosteriorPredictiveCheck") is not None):
if inputFields["PosteriorPredictiveCheck"]["display"] == "yes":
# By default, we have saved 100 function evaluations
n_fun_eval = n_samples # 100
delta_it = int(n_samples/n_fun_eval)
delta_it = 10
start_val = int(inputFields["PosteriorPredictiveCheck"]["burnin"]*delta_it)
# By default, the last function evaluation to be plotted is equal to the number of iterations
end_val = int(n_samples)
# Num of fun eval after discarding burn in samples
n_fun_eval_est = int((n_samples - start_val)/delta_it)
#for i in range(data_exp.n_data_set):
for num_data_set, data_id in enumerate(data_exp.keys()):
n_x = len(data_exp[data_id].x)
n_data_set = int(len(data_exp[data_id].y[0])/n_x)
for i in range(n_data_set):
plt.figure(num_plot[num_data_set])
plt.figure(i)
# Initialise bounds
init_model_eval_file = pathlib.Path(model_eval_folder, f"{data_id}_fun_eval-{0}.npy")
data_ij = np.load(init_model_eval_file)
data_ij_max = data_ij
data_ij_min = data_ij
data_ij_mean = np.zeros(n_x)
data_ij_var = np.zeros(n_x)
ind_1 = i*n_x
ind_2 =(i+1)*n_x
# Histogram
data_hist = np.zeros([n_fun_eval_est, n_x])
for c_eval, j in enumerate(range(start_val+delta_it, end_val, delta_it)):
# Load current data
model_eval_j_file = pathlib.Path(model_eval_folder, f"{data_id}_fun_eval-{j}.npy")
data_ij = np.load(model_eval_j_file)
data_set_n = data_ij[ind_1:ind_2]
# Update bounds
for k in range(n_x):
if data_ij_max[k] < data_set_n[k]:
data_ij_max[k] = data_set_n[k]
elif data_ij_min[k] > data_set_n[k]:
data_ij_min[k] = data_set_n[k]
data_hist[c_eval, :] = data_set_n[:]
# Update mean
data_ij_mean[:] = data_ij_mean[:] + data_set_n[:]
# Plot all realisation (modify alpha value to see something)
# plt.plot(data_exp[data_id].x, data_set_n[:], alpha=0.5)
# Compute mean
data_ij_mean = data_ij_mean[:]/n_fun_eval_est
# Identical loop to compute the variance
for j in range(start_val+delta_it, end_val, delta_it):
# Load current data
model_eval_j_file = pathlib.Path(model_eval_folder, f"{data_id}_fun_eval-{j}.npy")
data_ij = np.load(model_eval_j_file)
data_set_n = data_ij[ind_1:ind_2]
# Compute variance
data_ij_var = data_ij_var[:] + (data_set_n[:] - data_ij_mean[:])**2
data_ij_var = data_ij_var[:]/(n_fun_eval_est - 1)
# # Plot median and all results from propagation
# plt.plot(data_exp.x[ind_1:ind_2+1], (data_ij_min +
# data_ij_max)/2, color=lineColor[i][0], alpha=0.5)
# plt.fill_between(data_exp.x[ind_1:ind_2+1], data_ij_min[:],
# data_ij_max[:], facecolor=lineColor[i][0], alpha=0.1)
# Plot mean and 95% confidence interval for the mean
# CI_lowerbound = data_ij_mean - 1.96*np.sqrt(data_ij_var/n_fun_eval)
# CI_upperbound = data_ij_mean + 1.96*np.sqrt(data_ij_var/n_fun_eval)
# plt.plot(data_exp.x[ind_1:ind_2+1], data_ij_mean, color=lineColor[i][0], alpha=0.5)
# plt.fill_between(data_exp.x[ind_1:ind_2+1], CI_lowerbound, CI_upperbound, facecolor=lineColor[i][0], alpha=0.1)
# Plot mean
# ---------
plt.plot(data_exp[data_id].x, data_ij_mean, color=lineColor[num_data_set][0], alpha=0.5, label="Mean prop.")
# Plot 95% credible interval
# ---------------------------
if inputFields["PosteriorPredictiveCheck"]["cred_int"] == "yes":
low_cred_int = np.percentile(data_hist, 2.5, axis=0)
high_cred_int = np.percentile(data_hist, 97.5, axis=0)
plt.fill_between(data_exp[data_id].x, low_cred_int, high_cred_int, facecolor=lineColor[num_data_set][0], alpha=0.3, label="95\% cred. int.")
# Plot 95% prediction interval
# -----------------------------
# For the prediction interval, we add the std to the result
# Thus, the value of the sigma in the likelihood must be present in a csv file
if inputFields["PosteriorPredictiveCheck"]["pred_int"] == "yes":
path_to_est_sigma = pathlib.Path(out_folder, "estimated_sigma.csv")
if path_to_est_sigma.exists(): # Sigma was estimated
print("Prediction interval computed using the estimated the standard deviations.")
reader = pd.read_csv(path_to_est_sigma)
sigma_values = reader['model_id'].values
else: # Values from the data
print("Prediction interval computed using the standard deviations from the data.")
sigma_values = data_exp[data_id].std_y[i*n_x:i*n_x+n_x]
plt.fill_between(data_exp[data_id].x, low_cred_int-sigma_values,
high_cred_int+sigma_values, facecolor=lineColor[num_data_set][0], alpha=0.1, label="95\% pred. int.")
#plt.fill_between(data_exp[data_id].x, low_cred_int-data_exp[data_id].std_y[ind_1:ind_2],
# high_cred_int+data_exp[data_id].std_y[ind_1:ind_2], facecolor=lineColor[num_data_set][0], alpha=0.1)
plt.legend()
# Values are saved in csv format using Panda dataframe
df = pd.DataFrame({"x": data_exp[data_id].x,
"mean" : data_ij_mean,
"lower_bound": data_ij_min,
"upper_bound": data_ij_max})
path_to_predCheckInt_file = pathlib.Path(out_folder, f"{data_id}_posterior_pred_check_interval.csv")
df.to_csv(path_to_predCheckInt_file, index=None)
df_CI = pd.DataFrame({"x": data_exp[data_id].x,
"CI_lb": low_cred_int,
"CI_ub": high_cred_int})
path_to_predCheckCI_file = pathlib.Path(out_folder, f"{data_id}_posterior_pred_check_CI.csv")
df_CI.to_csv(path_to_predCheckCI_file, index=None)
del data_ij_max, data_ij_min, data_set_n
# -------------------------------------------
# ------------ Propagation ------------------
# -------------------------------------------
if (inputFields.get("Propagation") is not None):
if inputFields["Propagation"]["display"] == "yes":
num_plot = inputFields["Propagation"]["num_plot"]
for num_model_id, model_id in enumerate(inputFields["Propagation"]["model_id"]):
results_prop_CI = pd.read_csv('output/'+model_id+'_CI.csv')
results_prop_intervals = | pd.read_csv('output/'+model_id+'_interval.csv') | pandas.read_csv |
import numpy as np
import pytest
from pandas.core.dtypes.common import is_integer_dtype
import pandas as pd
from pandas import Categorical, CategoricalIndex, DataFrame, Series, get_dummies
import pandas._testing as tm
from pandas.core.arrays.sparse import SparseArray, SparseDtype
class TestGetDummies:
@pytest.fixture
def df(self):
return DataFrame({"A": ["a", "b", "a"], "B": ["b", "b", "c"], "C": [1, 2, 3]})
@pytest.fixture(params=["uint8", "i8", np.float64, bool, None])
def dtype(self, request):
return np.dtype(request.param)
@pytest.fixture(params=["dense", "sparse"])
def sparse(self, request):
# params are strings to simplify reading test results,
# e.g. TestGetDummies::test_basic[uint8-sparse] instead of [uint8-True]
return request.param == "sparse"
def effective_dtype(self, dtype):
if dtype is None:
return np.uint8
return dtype
def test_get_dummies_raises_on_dtype_object(self, df):
with pytest.raises(ValueError):
get_dummies(df, dtype="object")
def test_get_dummies_basic(self, sparse, dtype):
s_list = list("abc")
s_series = Series(s_list)
s_series_index = Series(s_list, list("ABC"))
expected = DataFrame(
{"a": [1, 0, 0], "b": [0, 1, 0], "c": [0, 0, 1]},
dtype=self.effective_dtype(dtype),
)
if sparse:
expected = expected.apply(SparseArray, fill_value=0.0)
result = get_dummies(s_list, sparse=sparse, dtype=dtype)
tm.assert_frame_equal(result, expected)
result = get_dummies(s_series, sparse=sparse, dtype=dtype)
tm.assert_frame_equal(result, expected)
expected.index = list("ABC")
result = get_dummies(s_series_index, sparse=sparse, dtype=dtype)
tm.assert_frame_equal(result, expected)
def test_get_dummies_basic_types(self, sparse, dtype):
# GH 10531
s_list = list("abc")
s_series = Series(s_list)
s_df = DataFrame(
{"a": [0, 1, 0, 1, 2], "b": ["A", "A", "B", "C", "C"], "c": [2, 3, 3, 3, 2]}
)
expected = DataFrame(
{"a": [1, 0, 0], "b": [0, 1, 0], "c": [0, 0, 1]},
dtype=self.effective_dtype(dtype),
columns=list("abc"),
)
if sparse:
if is_integer_dtype(dtype):
fill_value = 0
elif dtype == bool:
fill_value = False
else:
fill_value = 0.0
expected = expected.apply(SparseArray, fill_value=fill_value)
result = get_dummies(s_list, sparse=sparse, dtype=dtype)
tm.assert_frame_equal(result, expected)
result = get_dummies(s_series, sparse=sparse, dtype=dtype)
tm.assert_frame_equal(result, expected)
result = get_dummies(s_df, columns=s_df.columns, sparse=sparse, dtype=dtype)
if sparse:
dtype_name = f"Sparse[{self.effective_dtype(dtype).name}, {fill_value}]"
else:
dtype_name = self.effective_dtype(dtype).name
expected = Series({dtype_name: 8})
result = result.dtypes.value_counts()
result.index = [str(i) for i in result.index]
tm.assert_series_equal(result, expected)
result = get_dummies(s_df, columns=["a"], sparse=sparse, dtype=dtype)
expected_counts = {"int64": 1, "object": 1}
expected_counts[dtype_name] = 3 + expected_counts.get(dtype_name, 0)
expected = Series(expected_counts).sort_index()
result = result.dtypes.value_counts()
result.index = [str(i) for i in result.index]
result = result.sort_index()
tm.assert_series_equal(result, expected)
def test_get_dummies_just_na(self, sparse):
just_na_list = [np.nan]
just_na_series = Series(just_na_list)
just_na_series_index = Series(just_na_list, index=["A"])
res_list = get_dummies(just_na_list, sparse=sparse)
res_series = get_dummies(just_na_series, sparse=sparse)
res_series_index = get_dummies(just_na_series_index, sparse=sparse)
assert res_list.empty
assert res_series.empty
assert res_series_index.empty
assert res_list.index.tolist() == [0]
assert res_series.index.tolist() == [0]
assert res_series_index.index.tolist() == ["A"]
def test_get_dummies_include_na(self, sparse, dtype):
s = ["a", "b", np.nan]
res = get_dummies(s, sparse=sparse, dtype=dtype)
exp = DataFrame(
{"a": [1, 0, 0], "b": [0, 1, 0]}, dtype=self.effective_dtype(dtype)
)
if sparse:
exp = exp.apply(SparseArray, fill_value=0.0)
tm.assert_frame_equal(res, exp)
# Sparse dataframes do not allow nan labelled columns, see #GH8822
res_na = get_dummies(s, dummy_na=True, sparse=sparse, dtype=dtype)
exp_na = DataFrame(
{np.nan: [0, 0, 1], "a": [1, 0, 0], "b": [0, 1, 0]},
dtype=self.effective_dtype(dtype),
)
exp_na = exp_na.reindex(["a", "b", np.nan], axis=1)
# hack (NaN handling in assert_index_equal)
exp_na.columns = res_na.columns
if sparse:
exp_na = exp_na.apply(SparseArray, fill_value=0.0)
tm.assert_frame_equal(res_na, exp_na)
res_just_na = get_dummies([np.nan], dummy_na=True, sparse=sparse, dtype=dtype)
exp_just_na = DataFrame(
Series(1, index=[0]), columns=[np.nan], dtype=self.effective_dtype(dtype)
)
tm.assert_numpy_array_equal(res_just_na.values, exp_just_na.values)
def test_get_dummies_unicode(self, sparse):
# See GH 6885 - get_dummies chokes on unicode values
import unicodedata
e = "e"
eacute = unicodedata.lookup("LATIN SMALL LETTER E WITH ACUTE")
s = [e, eacute, eacute]
res = get_dummies(s, prefix="letter", sparse=sparse)
exp = DataFrame(
{"letter_e": [1, 0, 0], f"letter_{eacute}": [0, 1, 1]}, dtype=np.uint8
)
if sparse:
exp = exp.apply(SparseArray, fill_value=0)
tm.assert_frame_equal(res, exp)
def test_dataframe_dummies_all_obj(self, df, sparse):
df = df[["A", "B"]]
result = get_dummies(df, sparse=sparse)
expected = DataFrame(
{"A_a": [1, 0, 1], "A_b": [0, 1, 0], "B_b": [1, 1, 0], "B_c": [0, 0, 1]},
dtype=np.uint8,
)
if sparse:
expected = DataFrame(
{
"A_a": SparseArray([1, 0, 1], dtype="uint8"),
"A_b": | SparseArray([0, 1, 0], dtype="uint8") | pandas.core.arrays.sparse.SparseArray |
#! /usr/bin/env python3
from tools import splitXY, top_nucs, filter_nucs
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import ElasticNet
from sklearn.model_selection import cross_val_predict
from sklearn.model_selection import learning_curve
from sklearn.model_selection import StratifiedKFold
from sklearn.preprocessing import scale
from sklearn.utils import shuffle
from sklearn import metrics
import numpy as np
import pandas as pd
TRAINSET_SIZES = np.linspace(0.1, 1.0, 19)
def format(m, cv1, tr1, cv2, tr2):
# average cv folds + format
m = m.tolist()
cv1 = np.array([np.mean(row) for row in cv1]).tolist()
tr1 = np.array([np.mean(row) for row in tr1]).tolist()
cv2 = np.array([np.mean(row) for row in cv2]).tolist()
tr2 = np.array([np.mean(row) for row in tr2]).tolist()
score = []
for i in m:
idx = m.index(i)
scr = (cv1[idx], tr1[idx], cv2[idx], tr2[idx])
score.append(scr)
return score
def classification(X, Y, clist, cv_fold):
"""
Training for Classification: predicts models using two regularization params,
returning the training and cross validation accuracies for each.
"""
accuracy = []
for c in clist:
l1 = LogisticRegression(C=c, penalty='l1')
l2 = LogisticRegression(C=c, penalty='l2')
strat_cv = StratifiedKFold(n_splits=cv_fold, shuffle=True)
# for logR bug:
Xs, Ys = shuffle(X, Y)
l1_pred = cross_val_predict(l1, Xs, Ys, cv=strat_cv)
l2_pred = cross_val_predict(l2, Xs, Ys, cv=strat_cv)
# for diagnostic curves
l1.fit(Xs, Ys)
l2.fit(Xs, Ys)
tr1_pred = l1.predict(Xs)
tr2_pred = l2.predict(Xs)
# Accuracies
cvl1 = metrics.accuracy_score(Ys, l1_pred)
cvl2 = metrics.accuracy_score(Ys, l2_pred)
trl1 = metrics.accuracy_score(Ys, tr1_pred)
trl2 = metrics.accuracy_score(Ys, tr2_pred)
# accuracies for csv
acc = (cvl1, trl1, cvl2, trl2)
accuracy.append(acc)
return accuracy
def lc_classification(X, Y, c, cv_fold):
"""
Learning curve for classification: predicts models using two regularization
parameters, returning the training and cross validation accuracies for each
with respect to a given training set size
"""
l1 = LogisticRegression(C=c, penalty='l1')
l2 = LogisticRegression(C=c, penalty='l2')
strat_cv = StratifiedKFold(n_splits=cv_fold, shuffle=True)
# for logR bug:
Xs, Ys = shuffle(X, Y)
_, trl1, cvl1 = learning_curve(l1, Xs, Ys, train_sizes=TRAINSET_SIZES,
cv=strat_cv
)
m, trl2, cvl2 = learning_curve(l2, X, Ys, train_sizes=TRAINSET_SIZES,
cv=strat_cv
)
accuracy = format(m, cvl1, trl1, cvl2, trl2)
return accuracy
def regression(X, Y, alist, cv_fold):
"""
Training for Regression: predicts a model using two regularization parameters
for optimization, returning the training and cross validation errors for each.
"""
mse = []
for a in alist:
l1 = ElasticNet(alpha=a, l1_ratio=1, selection='random')
l2 = ElasticNet(alpha=a, l1_ratio=0, selection='random')
l1_pred = cross_val_predict(l1, X, Y, cv=cv_fold)
l2_pred = cross_val_predict(l2, X, Y, cv=cv_fold)
# for diagnostic curves
l1.fit(X, Y)
l2.fit(X, Y)
tr1_pred = l1.predict(X)
tr2_pred = l2.predict(X)
# Errors, negative for 'higher better' convention
cvl1 = -1 * metrics.mean_squared_error(Y, l1_pred)
cvl2 = -1 * metrics.mean_squared_error(Y, l2_pred)
trl1 = -1 * metrics.mean_squared_error(Y, tr1_pred)
trl2 = -1 * metrics.mean_squared_error(Y, tr2_pred)
# mean squared error for csv
mse_i = (cvl1, trl1, cvl2, trl2)
mse.append(mse_i)
return mse
def lc_regression(X, y, a, cv_fold):
"""
Learning curve for regression: predicts models using two regularization
parameters, returning the training and cross validation accuracies for each
with respect to a given training set size
"""
l1 = ElasticNet(alpha=a, l1_ratio=1, selection='random')
l2 = ElasticNet(alpha=a, l1_ratio=0, selection='random')
_, trl1, cvl1 = learning_curve(l1, X, y, train_sizes=TRAINSET_SIZES,
cv=cv_fold, scoring='neg_mean_squared_error'
)
m, trl2, cvl2 = learning_curve(l2, X, y, train_sizes=TRAINSET_SIZES,
cv=cv_fold, scoring='neg_mean_squared_error'
)
mse = format(m, cvl1, trl1, cvl2, trl2)
return mse
def validation_curves(X, rY, cY, eY, bY, nuc_subset):
"""
Given training data, this script runs some ML algorithms
(currently, this is 2 linear models with 2 error metrics)
for each prediction category: reactor type, cooling time, enrichment,
and burnup. Learning curves are also generated; comment out if unecessary
Parameters
----------
X : dataframe that includes all training data
*Y : series with labels for training data
Returns
-------
reactor*.csv : accuracy for l1 & l2 norms
cooling*.csv : negative error for l1 & l2 norms
enrichment*.csv : negative error for l1 & l2 norms
burnup*.csv : negative error for l1 & l2 norms
"""
#####################
## Learning Curves ##
#####################
alpha = 0.1
rxtr_lc = lc_classification(X, rY, alpha, cv_folds)
cool_lc= lc_regression(X, cY, alpha, cv_folds)
enr_lc = lc_regression(X, eY, alpha, cv_folds)
burn_lc = lc_regression(X, bY, alpha, cv_folds)
r_lc = 'lc_' + 'reactor' + nucs_tracked + train_src + '.csv'
c_lc = 'lc_' + 'cooling' + nucs_tracked + train_src + '.csv'
e_lc = 'lc_' + 'enrichment' + nucs_tracked + train_src + '.csv'
b_lc = 'lc_' + 'burnup' + nucs_tracked + train_src + '.csv'
idx_lc = TRAINSET_SIZES
pd.DataFrame(rxtr_lc, columns=cols, index=idx_lc).to_csv(r_lc)
pd.DataFrame(cool_lc, columns=cols, index=idx_lc).to_csv(c_lc)
pd.DataFrame(enr_lc, columns=cols, index=idx_lc).to_csv(e_lc)
pd.DataFrame(burn_lc, columns=cols, index=idx_lc).to_csv(b_lc)
#######################
## Validation Curves ##
#######################
alist = (0.001, 0.005, 0.01, 0.02, 0.04, 0.06, 0.08, 0.1, 0.2, 0.4, 0.6,
0.8, 1, 1.3, 1.7, 2, 5, 10, 50, 100
)
rxtr_vc = classification(X, rY, alist, cv_folds)
cool_vc = regression(X, cY, alist, cv_folds)
enr_vc = regression(X, eY, alist, cv_folds)
burn_vc = regression(X, bY, alist, cv_folds)
idx_vc = alist
r_vc = 'vc_' + 'reactor' + nucs_tracked + train_src + '.csv'
c_vc = 'vc_' + 'cooling' + nucs_tracked + train_src + '.csv'
e_vc = 'vc_' + 'enrichment' + nucs_tracked + train_src + '.csv'
b_vc = 'vc_' + 'burnup' + nucs_tracked + train_src + '.csv'
| pd.DataFrame(rxtr_vc, columns=cols, index=idx_vc) | pandas.DataFrame |
import pandas as pd
import re
import win32com.client
from graphviz import Digraph
def LoadExcelStructure(fileFolder,fileName):
"""
Return a dataframe containing information about your Excel file VB structure
fileFolder: Your Excel file folder
fileName: Your Excel file name including the extension
"""
fileFolder=fileFolder + "/"
xl = win32com.client.Dispatch('Excel.Application')
wb = xl.Workbooks.Open(fileFolder + fileName)
xl.Visible = 1
df_ModInfo=pd.DataFrame()
listInfo=[]
# Go over every VBA compoents
print("Reading the Excel structure")
for VBComp in wb.VBProject.VBComponents:
for lineCodeMod in range(1,VBComp.CodeModule.countOfLines):
VBComponent=VBComp.name
VBComponentClean=re.sub(r'[\W_]+','',VBComponent)
ProcName=VBComp.CodeModule.ProcOfLine(lineCodeMod)
ProcNameClean=re.sub(r'[\W_]+','',ProcName[0])
ProcLineNumber=lineCodeMod-VBComp.CodeModule.ProcStartLine(ProcName[0],ProcName[1])
ProcLineNumberFromBody=lineCodeMod-VBComp.CodeModule.ProcBodyLine(ProcName[0],ProcName[1])
LineOfCode=VBComp.CodeModule.Lines(lineCodeMod,1)
listInfo.append([VBComponent,VBComponentClean,ProcName[0],ProcNameClean,ProcName[1],ProcLineNumber,ProcLineNumberFromBody,LineOfCode])
VBComp.CodeModule
df_ModInfo
df_ModInfo=pd.DataFrame(listInfo,columns=['VBComponent','VBComponentClean','ProcName','ProcNameClean','ProcKind','ProcLineNumber','ProcLineNumberFromBody','LineOfCode'])
df_ModInfo['FoncOnLine'] = FoncOnLine(df_ModInfo['LineOfCode'],df_ModInfo['ProcName'],df_ModInfo['ProcLineNumberFromBody'])
wb.Close(False)
return df_ModInfo
def FoncOnLine(series,ProcName,ProcLineNumberFromBody):
"""
Check if a function or a sub is being called in a line of code
series: List of all lines of code
ProcName : All the proceduers names
ProcLineNumberFromBody: The line number of the function of sub
"""
names=set(ProcName)
# FINDS ANY NAME
matches_DefaultLink= | pd.DataFrame(data=None) | pandas.DataFrame |
from __future__ import division # brings in Python 3.0 mixed type calculation rules
import datetime
import inspect
import numpy as np
import numpy.testing as npt
import os.path
import pandas as pd
import sys
from tabulate import tabulate
import unittest
print("Python version: " + sys.version)
print("Numpy version: " + np.__version__)
from ..ted_exe import Ted
test = {}
class TestTed(unittest.TestCase):
"""
Unit tests for TED model.
"""
print("ted unittests conducted at " + str(datetime.datetime.today()))
def setUp(self):
"""
Setup routine for ted unit tests.
:return:
"""
pass
def tearDown(self):
"""
Teardown routine for ted unit tests.
:return:
"""
pass
# teardown called after each test
# e.g. maybe write test results to some text file
def create_ted_object(self):
# create empty pandas dataframes to create empty object for testing
df_empty = pd.DataFrame()
# create an empty ted object
ted_empty = Ted(df_empty, df_empty)
return ted_empty
def test_daily_app_flag(self):
"""
:description generates a daily flag to denote whether a pesticide is applied that day or not (1 - applied, 0 - anot applied)
:param num_apps; number of applications
:param app_interval; number of days between applications
:NOTE in TED model there are two application scenarios per simulation (one for a min/max exposure scenario)
(this is why the parameters are passed in)
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
ted_empty = self.create_ted_object()
expected_results = pd.Series([], dtype='bool')
result = pd.Series([[]], dtype='bool')
expected_results = [[True, False, False, True, False, False, True, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False],
[True, False, False, False, False, False, False, True, False, False,
False, False, False, False, True, False, False, False, False, False,
False, True, False, False, False, False, False, False, True, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False],
[True, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False]]
try:
# internal model constants
ted_empty.num_simulation_days = 366
# input varialbles that change per simulation
ted_empty.num_apps_min = pd.Series([3, 5, 1])
ted_empty.app_interval_min = pd.Series([3, 7, 1])
for i in range (3):
result[i] = ted_empty.daily_app_flag(ted_empty.num_apps_min[i], ted_empty.app_interval_min[i])
np.array_equal(result[i],expected_results[i])
finally:
for i in range(3):
tab = [result[i], expected_results[i]]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_set_drift_parameters(self):
"""
:description provides parmaeter values to use when calculating distances from edge of application source area to
concentration of interest
:param app_method; application method (aerial/ground/airblast)
:param boom_hgt; height of boom (low/high) - 'NA' if not ground application
:param drop_size; droplet spectrum for application (see list below for aerial/ground - 'NA' if airblast)
:param param_a (result[i][0]; parameter a for spray drift distance calculation
:param param_b (result[i][1]; parameter b for spray drift distance calculation
:param param_c (result[i][2]; parameter c for spray drift distance calculation
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
ted_empty = self.create_ted_object()
expected_results = pd.Series([], dtype='float')
result = pd.Series(9*[[0.,0.,0.]], dtype='float')
expected_results = [[0.0292,0.822,0.6539],[0.043,1.03,0.5],[0.0721,1.0977,0.4999],[0.1014,1.1344,0.4999],
[1.0063,0.9998,1.0193],[5.5513,0.8523,1.0079],[0.1913,1.2366,1.0552],
[2.4154,0.9077,1.0128],[0.0351,2.4586,0.4763]]
try:
# input variable that change per simulation
ted_empty.app_method_min = pd.Series(['aerial','aerial','aerial','aerial','ground','ground','ground','ground','airblast'])
ted_empty.boom_hgt_min = pd.Series(['','','','','low','low','high','high',''])
ted_empty.droplet_spec_min = pd.Series(['very_fine_to_fine','fine_to_medium','medium_to_coarse','coarse_to_very_coarse',
'very_fine_to_fine','fine_to_medium-coarse','very_fine_to_fine','fine_to_medium-coarse',''])
for i in range (9): # test that the nine combinations are accessed
result[i][0], result[i][1], result[i][2] = ted_empty.set_drift_parameters(ted_empty.app_method_min[i], ted_empty.boom_hgt_min[i], ted_empty.droplet_spec_min[i])
npt.assert_allclose(result[i],expected_results[i],rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
for i in range (9):
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_drift_distance_calc(self):
"""
:description provides parmaeter values to use when calculating distances from edge of application source area to
concentration of interest
:param app_rate_frac; fraction of active ingredient application rate equivalent to the health threshold of concern
:param param_a; parameter a for spray drift distance calculation
:param param_b; parameter b for spray drift distance calculation
:param param_c; parameter c for spray drift distance calculation
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
ted_empty = self.create_ted_object()
expected_results = pd.Series([], dtype='float')
result = pd.Series([], dtype='float')
expected_results = [302.050738, 11.484378, 0.0]
try:
# internal model constants
ted_empty.max_distance_from_source = 1000.
# input variable that is internally specified from among options
param_a = pd.Series([0.0292, 0.1913, 0.0351], dtype='float')
param_b = pd.Series([0.822, 1.2366, 2.4586], dtype='float')
param_c = pd.Series([0.6539, 1.0522, 0.4763], dtype='float')
# internally calculated variables
app_rate_frac = pd.Series([0.1,0.25,0.88], dtype='float')
for i in range(3):
result[i] = ted_empty.drift_distance_calc(app_rate_frac[i], param_a[i], param_b[i], param_c[i], ted_empty.max_distance_from_source)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_conc_timestep(self):
"""
:description unittest for function conc_timestep:
:param conc_ini; initial concentration for day (actually previous day concentration)
:param half_life; halflife of pesiticde representing either foliar dissipation halflife or aerobic soil metabolism halflife (days)
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
ted_empty = self.create_ted_object()
expected_results = pd.Series([], dtype='float')
result = pd.Series([], dtype='float')
expected_results = [9.803896e-4, 0.106066, 1.220703e-3]
try:
# input variable that is internally specified from among options
half_life = | pd.Series([35., 2., .1]) | pandas.Series |
import numpy as np
import pytest
from pandas import (
Categorical,
DataFrame,
Series,
_testing as tm,
concat,
read_hdf,
)
from pandas.tests.io.pytables.common import (
_maybe_remove,
ensure_clean_path,
ensure_clean_store,
)
pytestmark = [
pytest.mark.single,
# pytables https://github.com/PyTables/PyTables/issues/822
pytest.mark.filterwarnings(
"ignore:a closed node found in the registry:UserWarning"
),
]
def test_categorical(setup_path):
with ensure_clean_store(setup_path) as store:
# Basic
_maybe_remove(store, "s")
s = Series(
Categorical(
["a", "b", "b", "a", "a", "c"],
categories=["a", "b", "c", "d"],
ordered=False,
)
)
store.append("s", s, format="table")
result = store.select("s")
tm.assert_series_equal(s, result)
_maybe_remove(store, "s_ordered")
s = Series(
Categorical(
["a", "b", "b", "a", "a", "c"],
categories=["a", "b", "c", "d"],
ordered=True,
)
)
store.append("s_ordered", s, format="table")
result = store.select("s_ordered")
tm.assert_series_equal(s, result)
_maybe_remove(store, "df")
df = DataFrame({"s": s, "vals": [1, 2, 3, 4, 5, 6]})
store.append("df", df, format="table")
result = store.select("df")
tm.assert_frame_equal(result, df)
# Dtypes
_maybe_remove(store, "si")
s = Series([1, 1, 2, 2, 3, 4, 5]).astype("category")
store.append("si", s)
result = store.select("si")
tm.assert_series_equal(result, s)
_maybe_remove(store, "si2")
s = Series([1, 1, np.nan, 2, 3, 4, 5]).astype("category")
store.append("si2", s)
result = store.select("si2")
tm.assert_series_equal(result, s)
# Multiple
_maybe_remove(store, "df2")
df2 = df.copy()
df2["s2"] = Series(list("abcdefg")).astype("category")
store.append("df2", df2)
result = store.select("df2")
tm.assert_frame_equal(result, df2)
# Make sure the metadata is OK
info = store.info()
assert "/df2 " in info
# assert '/df2/meta/values_block_0/meta' in info
assert "/df2/meta/values_block_1/meta" in info
# unordered
_maybe_remove(store, "s2")
s = Series(
Categorical(
["a", "b", "b", "a", "a", "c"],
categories=["a", "b", "c", "d"],
ordered=False,
)
)
store.append("s2", s, format="table")
result = store.select("s2")
tm.assert_series_equal(result, s)
# Query
_maybe_remove(store, "df3")
store.append("df3", df, data_columns=["s"])
expected = df[df.s.isin(["b", "c"])]
result = store.select("df3", where=['s in ["b","c"]'])
tm.assert_frame_equal(result, expected)
expected = df[df.s.isin(["b", "c"])]
result = store.select("df3", where=['s = ["b","c"]'])
tm.assert_frame_equal(result, expected)
expected = df[df.s.isin(["d"])]
result = store.select("df3", where=['s in ["d"]'])
tm.assert_frame_equal(result, expected)
expected = df[df.s.isin(["f"])]
result = store.select("df3", where=['s in ["f"]'])
tm.assert_frame_equal(result, expected)
# Appending with same categories is ok
store.append("df3", df)
df = concat([df, df])
expected = df[df.s.isin(["b", "c"])]
result = store.select("df3", where=['s in ["b","c"]'])
tm.assert_frame_equal(result, expected)
# Appending must have the same categories
df3 = df.copy()
df3["s"] = df3["s"].cat.remove_unused_categories()
msg = "cannot append a categorical with different categories to the existing"
with pytest.raises(ValueError, match=msg):
store.append("df3", df3)
# Remove, and make sure meta data is removed (its a recursive
# removal so should be).
result = store.select("df3/meta/s/meta")
assert result is not None
store.remove("df3")
with pytest.raises(
KeyError, match="'No object named df3/meta/s/meta in the file'"
):
store.select("df3/meta/s/meta")
def test_categorical_conversion(setup_path):
# GH13322
# Check that read_hdf with categorical columns doesn't return rows if
# where criteria isn't met.
obsids = ["ESP_012345_6789", "ESP_987654_3210"]
imgids = ["APF00006np", "APF0001imm"]
data = [4.3, 9.8]
# Test without categories
df = DataFrame({"obsids": obsids, "imgids": imgids, "data": data})
# We are expecting an empty DataFrame matching types of df
expected = df.iloc[[], :]
with ensure_clean_path(setup_path) as path:
df.to_hdf(path, "df", format="table", data_columns=True)
result = read_hdf(path, "df", where="obsids=B")
tm.assert_frame_equal(result, expected)
# Test with categories
df.obsids = df.obsids.astype("category")
df.imgids = df.imgids.astype("category")
# We are expecting an empty DataFrame matching types of df
expected = df.iloc[[], :]
with ensure_clean_path(setup_path) as path:
df.to_hdf(path, "df", format="table", data_columns=True)
result = read_hdf(path, "df", where="obsids=B")
tm.assert_frame_equal(result, expected)
def test_categorical_nan_only_columns(setup_path):
# GH18413
# Check that read_hdf with categorical columns with NaN-only values can
# be read back.
df = DataFrame(
{
"a": ["a", "b", "c", np.nan],
"b": [np.nan, np.nan, np.nan, np.nan],
"c": [1, 2, 3, 4],
"d": Series([None] * 4, dtype=object),
}
)
df["a"] = df.a.astype("category")
df["b"] = df.b.astype("category")
df["d"] = df.b.astype("category")
expected = df
with ensure_clean_path(setup_path) as path:
df.to_hdf(path, "df", format="table", data_columns=True)
result = read_hdf(path, "df")
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"where, df, expected",
[
('col=="q"', DataFrame({"col": ["a", "b", "s"]}), DataFrame({"col": []})),
('col=="a"', DataFrame({"col": ["a", "b", "s"]}), DataFrame({"col": ["a"]})),
],
)
def test_convert_value(setup_path, where: str, df: DataFrame, expected: DataFrame):
# GH39420
# Check that read_hdf with categorical columns can filter by where condition.
df.col = df.col.astype("category")
max_widths = {"col": 1}
categorical_values = sorted(df.col.unique())
expected.col = expected.col.astype("category")
expected.col = expected.col.cat.set_categories(categorical_values)
with ensure_clean_path(setup_path) as path:
df.to_hdf(path, "df", format="table", min_itemsize=max_widths)
result = | read_hdf(path, where=where) | pandas.read_hdf |
from datetime import datetime
import numpy as np
import pytest
import pandas.util._test_decorators as td
from pandas.core.dtypes.base import _registry as ea_registry
from pandas.core.dtypes.common import (
is_categorical_dtype,
is_interval_dtype,
is_object_dtype,
)
from pandas.core.dtypes.dtypes import (
CategoricalDtype,
DatetimeTZDtype,
IntervalDtype,
PeriodDtype,
)
from pandas import (
Categorical,
DataFrame,
DatetimeIndex,
Index,
Interval,
IntervalIndex,
MultiIndex,
NaT,
Period,
PeriodIndex,
Series,
Timestamp,
cut,
date_range,
notna,
period_range,
)
import pandas._testing as tm
from pandas.core.arrays import SparseArray
from pandas.tseries.offsets import BDay
class TestDataFrameSetItem:
@pytest.mark.parametrize("dtype", ["int32", "int64", "float32", "float64"])
def test_setitem_dtype(self, dtype, float_frame):
arr = np.random.randn(len(float_frame))
float_frame[dtype] = np.array(arr, dtype=dtype)
assert float_frame[dtype].dtype.name == dtype
def test_setitem_list_not_dataframe(self, float_frame):
data = np.random.randn(len(float_frame), 2)
float_frame[["A", "B"]] = data
tm.assert_almost_equal(float_frame[["A", "B"]].values, data)
def test_setitem_error_msmgs(self):
# GH 7432
df = DataFrame(
{"bar": [1, 2, 3], "baz": ["d", "e", "f"]},
index=Index(["a", "b", "c"], name="foo"),
)
ser = Series(
["g", "h", "i", "j"],
index=Index(["a", "b", "c", "a"], name="foo"),
name="fiz",
)
msg = "cannot reindex from a duplicate axis"
with pytest.raises(ValueError, match=msg):
df["newcol"] = ser
# GH 4107, more descriptive error message
df = DataFrame(np.random.randint(0, 2, (4, 4)), columns=["a", "b", "c", "d"])
msg = "incompatible index of inserted column with frame index"
with pytest.raises(TypeError, match=msg):
df["gr"] = df.groupby(["b", "c"]).count()
def test_setitem_benchmark(self):
# from the vb_suite/frame_methods/frame_insert_columns
N = 10
K = 5
df = DataFrame(index=range(N))
new_col = np.random.randn(N)
for i in range(K):
df[i] = new_col
expected = DataFrame(np.repeat(new_col, K).reshape(N, K), index=range(N))
tm.assert_frame_equal(df, expected)
def test_setitem_different_dtype(self):
df = DataFrame(
np.random.randn(5, 3), index=np.arange(5), columns=["c", "b", "a"]
)
df.insert(0, "foo", df["a"])
df.insert(2, "bar", df["c"])
# diff dtype
# new item
df["x"] = df["a"].astype("float32")
result = df.dtypes
expected = Series(
[np.dtype("float64")] * 5 + [np.dtype("float32")],
index=["foo", "c", "bar", "b", "a", "x"],
)
tm.assert_series_equal(result, expected)
# replacing current (in different block)
df["a"] = df["a"].astype("float32")
result = df.dtypes
expected = Series(
[np.dtype("float64")] * 4 + [np.dtype("float32")] * 2,
index=["foo", "c", "bar", "b", "a", "x"],
)
tm.assert_series_equal(result, expected)
df["y"] = df["a"].astype("int32")
result = df.dtypes
expected = Series(
[np.dtype("float64")] * 4 + [np.dtype("float32")] * 2 + [np.dtype("int32")],
index=["foo", "c", "bar", "b", "a", "x", "y"],
)
tm.assert_series_equal(result, expected)
def test_setitem_empty_columns(self):
# GH 13522
df = DataFrame(index=["A", "B", "C"])
df["X"] = df.index
df["X"] = ["x", "y", "z"]
exp = DataFrame(data={"X": ["x", "y", "z"]}, index=["A", "B", "C"])
tm.assert_frame_equal(df, exp)
def test_setitem_dt64_index_empty_columns(self):
rng = date_range("1/1/2000 00:00:00", "1/1/2000 1:59:50", freq="10s")
df = DataFrame(index=np.arange(len(rng)))
df["A"] = rng
assert df["A"].dtype == np.dtype("M8[ns]")
def test_setitem_timestamp_empty_columns(self):
# GH#19843
df = DataFrame(index=range(3))
df["now"] = Timestamp("20130101", tz="UTC")
expected = DataFrame(
[[Timestamp("20130101", tz="UTC")]] * 3, index=[0, 1, 2], columns=["now"]
)
tm.assert_frame_equal(df, expected)
def test_setitem_wrong_length_categorical_dtype_raises(self):
# GH#29523
cat = Categorical.from_codes([0, 1, 1, 0, 1, 2], ["a", "b", "c"])
df = DataFrame(range(10), columns=["bar"])
msg = (
rf"Length of values \({len(cat)}\) "
rf"does not match length of index \({len(df)}\)"
)
with pytest.raises(ValueError, match=msg):
df["foo"] = cat
def test_setitem_with_sparse_value(self):
# GH#8131
df = DataFrame({"c_1": ["a", "b", "c"], "n_1": [1.0, 2.0, 3.0]})
sp_array = SparseArray([0, 0, 1])
df["new_column"] = sp_array
expected = Series(sp_array, name="new_column")
tm.assert_series_equal(df["new_column"], expected)
def test_setitem_with_unaligned_sparse_value(self):
df = DataFrame({"c_1": ["a", "b", "c"], "n_1": [1.0, 2.0, 3.0]})
sp_series = Series(SparseArray([0, 0, 1]), index=[2, 1, 0])
df["new_column"] = sp_series
expected = Series(SparseArray([1, 0, 0]), name="new_column")
tm.assert_series_equal(df["new_column"], expected)
def test_setitem_dict_preserves_dtypes(self):
# https://github.com/pandas-dev/pandas/issues/34573
expected = DataFrame(
{
"a": Series([0, 1, 2], dtype="int64"),
"b": Series([1, 2, 3], dtype=float),
"c": Series([1, 2, 3], dtype=float),
}
)
df = DataFrame(
{
"a": Series([], dtype="int64"),
"b": Series([], dtype=float),
"c": Series([], dtype=float),
}
)
for idx, b in enumerate([1, 2, 3]):
df.loc[df.shape[0]] = {"a": int(idx), "b": float(b), "c": float(b)}
tm.assert_frame_equal(df, expected)
@pytest.mark.parametrize(
"obj,dtype",
[
(Period("2020-01"), PeriodDtype("M")),
(Interval(left=0, right=5), IntervalDtype("int64", "right")),
(
Timestamp("2011-01-01", tz="US/Eastern"),
DatetimeTZDtype(tz="US/Eastern"),
),
],
)
def test_setitem_extension_types(self, obj, dtype):
# GH: 34832
expected = DataFrame({"idx": [1, 2, 3], "obj": Series([obj] * 3, dtype=dtype)})
df = DataFrame({"idx": [1, 2, 3]})
df["obj"] = obj
tm.assert_frame_equal(df, expected)
@pytest.mark.parametrize(
"ea_name",
[
dtype.name
for dtype in ea_registry.dtypes
# property would require instantiation
if not isinstance(dtype.name, property)
]
# mypy doesn't allow adding lists of different types
# https://github.com/python/mypy/issues/5492
+ ["datetime64[ns, UTC]", "period[D]"], # type: ignore[list-item]
)
def test_setitem_with_ea_name(self, ea_name):
# GH 38386
result = DataFrame([0])
result[ea_name] = [1]
expected = DataFrame({0: [0], ea_name: [1]})
tm.assert_frame_equal(result, expected)
def test_setitem_dt64_ndarray_with_NaT_and_diff_time_units(self):
# GH#7492
data_ns = np.array([1, "nat"], dtype="datetime64[ns]")
result = Series(data_ns).to_frame()
result["new"] = data_ns
expected = DataFrame({0: [1, None], "new": [1, None]}, dtype="datetime64[ns]")
tm.assert_frame_equal(result, expected)
# OutOfBoundsDatetime error shouldn't occur
data_s = np.array([1, "nat"], dtype="datetime64[s]")
result["new"] = data_s
expected = DataFrame({0: [1, None], "new": [1e9, None]}, dtype="datetime64[ns]")
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("unit", ["h", "m", "s", "ms", "D", "M", "Y"])
def test_frame_setitem_datetime64_col_other_units(self, unit):
# Check that non-nano dt64 values get cast to dt64 on setitem
# into a not-yet-existing column
n = 100
dtype = np.dtype(f"M8[{unit}]")
vals = np.arange(n, dtype=np.int64).view(dtype)
ex_vals = vals.astype("datetime64[ns]")
df = DataFrame({"ints": np.arange(n)}, index=np.arange(n))
df[unit] = vals
assert df[unit].dtype == np.dtype("M8[ns]")
assert (df[unit].values == ex_vals).all()
@pytest.mark.parametrize("unit", ["h", "m", "s", "ms", "D", "M", "Y"])
def test_frame_setitem_existing_datetime64_col_other_units(self, unit):
# Check that non-nano dt64 values get cast to dt64 on setitem
# into an already-existing dt64 column
n = 100
dtype = np.dtype(f"M8[{unit}]")
vals = np.arange(n, dtype=np.int64).view(dtype)
ex_vals = vals.astype("datetime64[ns]")
df = DataFrame({"ints": np.arange(n)}, index=np.arange(n))
df["dates"] = np.arange(n, dtype=np.int64).view("M8[ns]")
# We overwrite existing dt64 column with new, non-nano dt64 vals
df["dates"] = vals
assert (df["dates"].values == ex_vals).all()
def test_setitem_dt64tz(self, timezone_frame):
df = timezone_frame
idx = df["B"].rename("foo")
# setitem
df["C"] = idx
tm.assert_series_equal(df["C"], Series(idx, name="C"))
df["D"] = "foo"
df["D"] = idx
tm.assert_series_equal(df["D"], Series(idx, name="D"))
del df["D"]
# assert that A & C are not sharing the same base (e.g. they
# are copies)
v1 = df._mgr.arrays[1]
v2 = df._mgr.arrays[2]
tm.assert_extension_array_equal(v1, v2)
v1base = v1._data.base
v2base = v2._data.base
assert v1base is None or (id(v1base) != id(v2base))
# with nan
df2 = df.copy()
df2.iloc[1, 1] = NaT
df2.iloc[1, 2] = NaT
result = df2["B"]
tm.assert_series_equal(notna(result), Series([True, False, True], name="B"))
tm.assert_series_equal(df2.dtypes, df.dtypes)
def test_setitem_periodindex(self):
rng = period_range("1/1/2000", periods=5, name="index")
df = DataFrame(np.random.randn(5, 3), index=rng)
df["Index"] = rng
rs = Index(df["Index"])
tm.assert_index_equal(rs, rng, check_names=False)
assert rs.name == "Index"
assert rng.name == "index"
rs = df.reset_index().set_index("index")
assert isinstance(rs.index, PeriodIndex)
tm.assert_index_equal(rs.index, rng)
def test_setitem_complete_column_with_array(self):
# GH#37954
df = DataFrame({"a": ["one", "two", "three"], "b": [1, 2, 3]})
arr = np.array([[1, 1], [3, 1], [5, 1]])
df[["c", "d"]] = arr
expected = DataFrame(
{
"a": ["one", "two", "three"],
"b": [1, 2, 3],
"c": [1, 3, 5],
"d": [1, 1, 1],
}
)
expected["c"] = expected["c"].astype(arr.dtype)
expected["d"] = expected["d"].astype(arr.dtype)
assert expected["c"].dtype == arr.dtype
assert expected["d"].dtype == arr.dtype
tm.assert_frame_equal(df, expected)
@pytest.mark.parametrize("dtype", ["f8", "i8", "u8"])
def test_setitem_bool_with_numeric_index(self, dtype):
# GH#36319
cols = Index([1, 2, 3], dtype=dtype)
df = DataFrame(np.random.randn(3, 3), columns=cols)
df[False] = ["a", "b", "c"]
expected_cols = Index([1, 2, 3, False], dtype=object)
if dtype == "f8":
expected_cols = Index([1.0, 2.0, 3.0, False], dtype=object)
tm.assert_index_equal(df.columns, expected_cols)
@pytest.mark.parametrize("indexer", ["B", ["B"]])
def test_setitem_frame_length_0_str_key(self, indexer):
# GH#38831
df = DataFrame(columns=["A", "B"])
other = DataFrame({"B": [1, 2]})
df[indexer] = other
expected = DataFrame({"A": [np.nan] * 2, "B": [1, 2]})
expected["A"] = expected["A"].astype("object")
tm.assert_frame_equal(df, expected)
def test_setitem_frame_duplicate_columns(self, using_array_manager):
# GH#15695
cols = ["A", "B", "C"] * 2
df = DataFrame(index=range(3), columns=cols)
df.loc[0, "A"] = (0, 3)
df.loc[:, "B"] = (1, 4)
df["C"] = (2, 5)
expected = DataFrame(
[
[0, 1, 2, 3, 4, 5],
[np.nan, 1, 2, np.nan, 4, 5],
[np.nan, 1, 2, np.nan, 4, 5],
],
dtype="object",
)
if using_array_manager:
# setitem replaces column so changes dtype
expected.columns = cols
expected["C"] = expected["C"].astype("int64")
# TODO(ArrayManager) .loc still overwrites
expected["B"] = expected["B"].astype("int64")
else:
# set these with unique columns to be extra-unambiguous
expected[2] = expected[2].astype(np.int64)
expected[5] = expected[5].astype(np.int64)
expected.columns = cols
tm.assert_frame_equal(df, expected)
def test_setitem_frame_duplicate_columns_size_mismatch(self):
# GH#39510
cols = ["A", "B", "C"] * 2
df = DataFrame(index=range(3), columns=cols)
with pytest.raises(ValueError, match="Columns must be same length as key"):
df[["A"]] = (0, 3, 5)
df2 = df.iloc[:, :3] # unique columns
with pytest.raises(ValueError, match="Columns must be same length as key"):
df2[["A"]] = (0, 3, 5)
@pytest.mark.parametrize("cols", [["a", "b", "c"], ["a", "a", "a"]])
def test_setitem_df_wrong_column_number(self, cols):
# GH#38604
df = DataFrame([[1, 2, 3]], columns=cols)
rhs = DataFrame([[10, 11]], columns=["d", "e"])
msg = "Columns must be same length as key"
with pytest.raises(ValueError, match=msg):
df["a"] = rhs
def test_setitem_listlike_indexer_duplicate_columns(self):
# GH#38604
df = DataFrame([[1, 2, 3]], columns=["a", "b", "b"])
rhs = DataFrame([[10, 11, 12]], columns=["a", "b", "b"])
df[["a", "b"]] = rhs
expected = DataFrame([[10, 11, 12]], columns=["a", "b", "b"])
tm.assert_frame_equal(df, expected)
df[["c", "b"]] = rhs
expected = | DataFrame([[10, 11, 12, 10]], columns=["a", "b", "b", "c"]) | pandas.DataFrame |
# ---
# jupyter:
# jupytext:
# formats: ipynb,py:percent
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.2'
# jupytext_version: 1.2.3
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %%
import matplotlib.pyplot as plt
import numpy as np
import torch
import pandas as pd
import pickle
from jupyterthemes import jtplot
jtplot.style('oceans16')
# %% [markdown]
# # Position Change to the Nearest Car between Frames
# %%
t1 = torch.load('../traffic-data/state-action-cost/data_i80_v0/trajectories-0400-0415/all_data.pth')
t2 = torch.load('../traffic-data/state-action-cost/data_i80_v0/trajectories-0500-0515/all_data.pth')
t3 = torch.load('../traffic-data/state-action-cost/data_i80_v0/trajectories-0515-0530/all_data.pth')
t_states_full = t1['states'] + t2['states'] + t3['states']
# %%
def get_xy_diff_with_closest_car(episode):
dists_t = torch.sqrt((episode[:, 0, :2][:, None, :] - episode[:, 1:, :2]).norm(2, dim=-1))
dists_t[dists_t<=1e-7] = 999999 # if there is no car there
min_dists_t_idx = dists_t.argmin(dim=-1)
dist_diff = (dists_t[1:] - dists_t[:-1])
return dist_diff.gather(dim=1, index=min_dists_t_idx[:-1].view(-1,1)).view(-1)
# %%
xy_diffs = torch.cat([
get_xy_diff_with_closest_car(k) for k in t_states_full
])
velocity = torch.cat(
[k[:-1] for k in t_states_full]
)[:, 0, 2:].norm(2, dim=-1)
# velocity *= 4*3.7/24 * 3.6
velocity_np = (velocity // 5 * 5).int().numpy()
xy_diffs_np = xy_diffs.numpy()
# %%
df = pd.DataFrame({'velocity':velocity_np, 'position_diff': -xy_diffs_np})
# %%
df_new = df[df['position_diff']>1e-7].copy()
df_new['position_diff'] = np.log10(df_new['position_diff'])
# %%
df_new.boxplot(column='position_diff', by='velocity', figsize=(14,8),)
plt.title("$\delta_{xy}(t, t+1)$ to the nearest car")
plt.suptitle("")
plt.ylabel('$\log_{10}(\delta)$')
plt.xlabel('speed')
# %% [markdown]
# # Gradients wrt Actions
# %%
act_grads = torch.load('../actions_grads_orig.pkl')
data = [np.array(k) for k in act_grads]
data = np.concatenate(data, axis=0)
xedges = [np.quantile(data[:,0], p) for p in np.linspace(0,1,21)]
df = pd.DataFrame(data)
df.columns = ['speed', 'grad_proximity', 'grad_lane']
df.speed = (df.speed//5 * 5).astype(int)
# %% [markdown]
# ## Non-Constant Slope
# %%
df_new = df.copy()
# %%
df_new.boxplot(column='grad_proximity', by='speed', figsize=(14,8),)
plt.title("Version non-constant slope, proximity cost: $\partial c_{t+1} / \partial a_t$ ")
plt.suptitle("")
plt.ylabel('$\partial c_{t+1} / \partial a_t$')
plt.xlabel('speed (km/h)')
# %%
df_new.boxplot(column='grad_lane', by='speed', figsize=(14,8))
plt.title("Version non-constant slope, lane cost: $\partial c_{t+1} / \partial a_t$ ")
plt.suptitle("")
plt.ylabel('$\partial c_{t+1} / \partial a_t$')
plt.xlabel('speed (km/h)')
# %%
df_new[['grad_proximity', 'grad_lane']] = np.log10(df_new[['grad_proximity', 'grad_lane']])
# %%
df_new.boxplot(column='grad_proximity', by='speed', figsize=(14,8))
plt.title("Version non-constant slope, proximity cost: $\partial c_{t+1} / \partial a_t$ ")
plt.suptitle("")
plt.ylabel('$\log_{10}(\partial c_{t+1} / \partial a_t)$')
plt.xlabel('speed (km/h)')
plt.ylim(-5, 1.2)
# %%
df_new.boxplot(column='grad_lane', by='speed', figsize=(14,8))
plt.title("Version non-constant slope, lane cost: $\partial c_{t+1} / \partial a_t$ ")
plt.suptitle("")
plt.ylabel('$\log_{10}(\partial c_{t+1} / \partial a_t)$')
plt.xlabel('speed (km/h)')
# %% [markdown]
# ## Constant Slope
# %%
act_grads = torch.load('../actions_grads.pkl')
data = [np.array(k) for k in act_grads]
data = np.concatenate(data, axis=0)
xedges = [np.quantile(data[:,0], p) for p in np.linspace(0,1,21)]
df = | pd.DataFrame(data) | pandas.DataFrame |
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.pipeline import Pipeline
from sklearn.ensemble import RandomForestClassifier
from sklearn.externals import joblib
import random
if __name__=="__main__":
data = []
prices = np.random.normal(loc=1500.0, scale=40.0, size=3000)
print(prices)
for price in prices:
data.append({'brand': 'Lenovo', 'price': price, 'approved': 1})
prices = np.random.normal(loc=2500.0, scale=40.0, size=3000)
for price in prices:
data.append({'brand': 'Apple', 'price': price, 'approved': 1})
data.append({'brand': 'Lenovo', 'price': price, 'approved': 0})
df = pd.DataFrame(data)
df.brand = pd.Categorical( | pd.factorize(df.brand) | pandas.factorize |
import os
import pandas as pd
import numpy as np
import argparse
import json
from treelib import Node, Tree, tree
from datetime import datetime, timedelta, date
def daterange(start_date, end_date):
for n in range(int ((end_date - start_date).days)):
yield start_date + timedelta(n)
def datespan(startDate, endDate, delta=timedelta(days=7)):
currentDate = startDate
while currentDate < endDate:
yield currentDate
currentDate += delta
from libs.lib_job_thread import *
from joblib import Parallel, delayed
import string
import random
letters = list(string.ascii_lowercase)
def rand(stri):
return random.choice(letters)
def create_dir(x_dir):
if not os.path.exists(x_dir):
os.makedirs(x_dir)
print("Created new dir. %s"%x_dir)
class Node(object):
def __init__(self, node_id,node_author,node_short_delay=0,node_long_delay=0):
self.node_id=node_id
self.node_author=node_author
self.node_short_delay=node_short_delay
self.node_long_delay=node_long_delay
def get_node_id(self):
return self.node_id
def get_node_author(self):
return self.node_author
def get_node_short_delay(self):
return self.node_short_delay
def get_node_long_delay(self):
return self.node_long_delay
class Cascade(object):
def __init__(self,platform,domain,scenario,infoID,cascade_records):
self.platform=platform
self.domain=domain
self.scenario=scenario
self.infoID=infoID
infoID_label=infoID.replace("/","_")
self.output_dir="./metadata/probs/%s/%s/%s/%s"%(self.platform,self.domain,self.scenario,infoID_label)
create_dir(self.output_dir)
self.pool = ThreadPool(128)
self.cascade_props=[]
self.cascade_records=cascade_records
self.cascade_records['actionType']='response'
self.cascade_records.loc[self.cascade_records["nodeID"]==self.cascade_records["parentID"],"actionType"]="seed"
print("# cascades: %d, # nodes: %d"%(self.cascade_records['rootID'].nunique(),self.cascade_records.shape[0]))
def prepare_data(self):
node_users=self.cascade_records[['nodeID','nodeTime']].drop_duplicates().dropna()
node_users.columns=['parentID','parentTime']
self.cascade_records=pd.merge(self.cascade_records,node_users,on='parentID',how='left')
self.cascade_records.loc[self.cascade_records['parentID'].isna()==True,'parentID']=self.cascade_records['nodeID']
self.cascade_records.loc[self.cascade_records['parentUserID'].isna()==True,'parentUserID']=self.cascade_records['nodeUserID']
self.cascade_records.loc[self.cascade_records['parentTime'].isna()==True,'parentTime']=self.cascade_records['nodeTime']
node_users.columns=['rootID','rootTime']
self.cascade_records=pd.merge(self.cascade_records,node_users,on='rootID',how='left')
self.cascade_records.loc[self.cascade_records['rootID'].isna()==True,'rootID']=self.cascade_records['parentID']
self.cascade_records.loc[self.cascade_records['rootUserID'].isna()==True,'rootUserID']=self.cascade_records['parentUserID']
self.cascade_records.loc[self.cascade_records['rootTime'].isna()==True,'rootTime']=self.cascade_records['parentTime']
self.cascade_records["short_propagation_delay"]=self.cascade_records['nodeTime']-self.cascade_records['parentTime']
self.cascade_records["long_propagation_delay"]=self.cascade_records['nodeTime']-self.cascade_records['rootTime']
self.cascade_records.to_pickle("%s/cascade_records.pkl.gz"%(self.output_dir))
def get_user_diffusion(self):
responses=self.cascade_records.query('actionType=="response"')
if responses.shape[0]<1:
responses=self.cascade_records.query('actionType=="seed"')
##responses.loc[responses['isNew']==True,'nodeUserID']="new_"
user_diffusion=responses.groupby(['parentUserID','nodeUserID']).size().reset_index(name='num_responses')
user_diffusion_=responses.groupby(['parentUserID']).size().reset_index(name='total_num_responses')
user_diffusion=pd.merge(user_diffusion,user_diffusion_,on='parentUserID',how='inner')
user_diffusion['prob']=user_diffusion['num_responses']/user_diffusion['total_num_responses']
user_diffusion.sort_values(['parentUserID','prob'],ascending=False,inplace=True)
user_diffusion.to_pickle("%s/user_diffusion.pkl.gz"%(self.output_dir))
# responses=self.cascade_records.query('actionType=="response"')
# responses.loc[responses['isNew']==True,'nodeUserID']="new_"#+cascade_records_chunk['nodeUserID'].str.replace('[a-z]',rand)
# user_diffusion0=responses.groupby(['parentUserID','nodeUserID']).size().reset_index(name='num_responses')
# user_diffusion1=responses.groupby(['parentUserID']).size().reset_index(name='num_children')
# user_diffusion1=pd.merge(user_diffusion0,user_diffusion1,on='parentUserID',how='inner')
# user_diffusion1['prob_parent']=user_diffusion1['num_responses']/user_diffusion1['num_children']
# user_diffusion2=responses.groupby(['nodeUserID']).size().reset_index(name='num_parents')
# user_diffusion2=pd.merge(user_diffusion0,user_diffusion2,on='nodeUserID',how='inner')
# user_diffusion2['prob_child']=user_diffusion2['num_responses']/user_diffusion2['num_parents']
# user_diffusion2.drop(columns=['num_responses'],inplace=True)
# user_diffusion=pd.merge(user_diffusion1,user_diffusion2,on=['parentUserID','nodeUserID'],how='inner')
# user_diffusion['prob']=(user_diffusion['prob_parent']+user_diffusion['prob_child'])/2
# user_diffusion.sort_values(['parentUserID','prob'],ascending=False,inplace=True)
# user_diffusion.to_pickle("%s/user_diffusion.pkl.gz"%(self.output_dir))
return user_diffusion
# def get_decay_user_diffusion(self):
# responses=self.cascade_records.query('actionType=="response"')
# responses.loc[responses['isNew']==True,'nodeUserID']="new_"
# pair_lifetime=responses.groupby(['parentUserID','nodeUserID'])['nodeTime'].min().reset_index(name='lifetime_min')
# pair_lifetime=pd.merge(responses,pair_min_lifetime,on=['parentUserID','nodeUserID'],how='inner')
# pair_lifetime['lifetime']=(pair_lifetime['nodeTime']-pair_lifetime['lifetime_min']).dt.days
# pair_lifetime['lifetime_max']=(start_sim_period_date-pair_lifetime['lifetime_min']).dt.days
# pair_lifetime=pair_lifetime[pair_lifetime['lifetime']>0]
# pair_lifetime.groupby(['parentUserID','nodeUserID'])['lifetime'].apply(set)
def get_user_spread_info(self):
self.spread_info1=self.cascade_records.query('actionType=="seed"').groupby(['nodeUserID'])['nodeID'].nunique().reset_index(name="num_seeds")
num_seed_users=self.spread_info1['nodeUserID'].nunique()
print("# seed users: %d"%num_seed_users)
self.spread_info2=self.cascade_records.query('actionType=="response"').groupby(['nodeUserID'])['nodeID'].nunique().reset_index(name="num_responses")
print("# responding users: %d"%self.spread_info2['nodeUserID'].nunique())
dataset_users=self.cascade_records[['nodeUserID','nodeID','actionType']].drop_duplicates()
dataset_users_only_seed=dataset_users.query('actionType=="seed"')
all_responses=self.cascade_records.query('actionType=="response"').groupby(['rootID'])['nodeID'].nunique().reset_index(name='num_responses')
all_responses_with_users=pd.merge(all_responses,dataset_users_only_seed,left_on='rootID',right_on='nodeID',how='inner')
dataset_responded_seeds=all_responses_with_users.groupby(['nodeUserID'])['rootID'].nunique().reset_index(name='num_responded_seeds')
dataset_responded_vol=all_responses_with_users.groupby(['nodeUserID'])['num_responses'].sum().reset_index(name='num_responses_recvd')
self.spread_info=pd.merge(self.spread_info1,self.spread_info2,on=['nodeUserID'],how='outer')
self.spread_info.fillna(0,inplace=True)
print("# Total users: %d"%self.spread_info['nodeUserID'].nunique())
self.spread_info=pd.merge(self.spread_info,dataset_responded_seeds,on=['nodeUserID'],how='left')
self.spread_info.fillna(0,inplace=True)
self.spread_info=pd.merge(self.spread_info,dataset_responded_vol,on=['nodeUserID'],how='left')
self.spread_info.fillna(0,inplace=True)
self.spread_info['spread_score']=(self.spread_info['num_responded_seeds']/self.spread_info['num_seeds'])*self.spread_info['num_responses_recvd']
self.spread_info.sort_values(by='spread_score',ascending=False,inplace=True)
self.spread_info.set_index('nodeUserID',inplace=True)
self.spread_info.to_pickle("%s/user_spread_info.pkl.gz"%(self.output_dir))
return self.spread_info
def get_cascade_tree(self,cascade_tuple):
rootID=cascade_tuple[0]
rootUserID=cascade_tuple[1]
childNodes=cascade_tuple[2]
cascadet=Tree()
parent=Node(rootID,rootUserID,0,0)
cascadet.create_node(rootID, rootID, data=parent)
print(rootID,rootUserID,childNodes)
for m in childNodes:
comment_id=m[0]
parent_post_id=m[1]
child_author_id=m[2]
short_delay=m[3]
long_delay=m[4]
child=Node(comment_id,child_author_id,short_delay,long_delay)
try:
parent_node=cascadet.get_node(parent_post_id)
child_parent_identifier=rootID
# if not parent_node:
# print("Let's create %s"%parent_post_id)
# cascadet.create_node(parent_post_id, parent_post_id, parent=rootID,data=parent_node)
# parent_node=cascadet.get_node(parent_post_id)
if parent_node:
child_parent_identifier=parent_node.identifier
cascadet.create_node(comment_id, comment_id, parent=child_parent_identifier,data=child)
except tree.DuplicatedNodeIdError as e:
print("**",e)
continue
print(cascadet)
return cascadet
def run_cascade_trees(self):
self.cascade_trees=self.cascade_records[["rootID","rootUserID","nodeID","parentID","nodeUserID","short_propagation_delay","long_propagation_delay"]]
self.cascade_trees["message"]=self.cascade_trees[["nodeID","parentID","nodeUserID","short_propagation_delay","long_propagation_delay"]].apply(lambda x: tuple(x),axis=1)
self.cascade_trees=self.cascade_trees.groupby(['rootID','rootUserID'])["message"].apply(list).to_frame().reset_index()
self.cascade_trees=self.cascade_trees[['rootID','rootUserID','message']].apply(self.get_cascade_tree,axis=1)
np.save("%s/cascade_trees.npy"%(self.output_dir),self.cascade_trees)
return self.cascade_trees
def get_cascade_props(self,ctree):
nodes=ctree.all_nodes()
depth=ctree.depth()
rid=ctree.root
rnode=ctree.get_node(rid)
#rnode_data=rnode.data
#rauthor=rnode_data.get_node_author()
for node in nodes:
nid=node.identifier
nlevel=ctree.level(nid)
nchildren=ctree.children(nid)
no_children=len(nchildren)
parent=ctree.parent(nid)
if(parent is not None):
pid=parent.identifier
##pchildren=ctree.children(pid)
##p_no_children=len(pchildren)
#pnode=ctree.get_node(pid)
#pnode_data=pnode.data
#pauthor=pnode_data.get_node_author()
else:
pid=-1
#pauthor=-1
##p_no_children=-1
#node_data=node.data
#nauthor=node_data.get_node_author()
#nshort_delay=node_data.get_node_short_delay()
#nlong_delay=node_data.get_node_long_delay()
#llist=[rid,rauthor,depth,nlevel,nid,nauthor,no_children,nshort_delay,nlong_delay,pid,pauthor]
llist=[rid,depth,nlevel,nid,no_children,pid]
## only include non-leaves
##if(no_children!=0):
self.cascade_props.append(llist)
def run_cascade_props(self):
for ctree in self.cascade_trees:
##self.get_cascade_props(ctree)
self.pool.add_task(self.get_cascade_props,ctree)
self.pool.wait_completion()
#columns=["rootID","rootUserID","max_depth","level","nodeID","nodeUserID","degree","short_delay","long_delay","parentID","parentUserID"]
columns=["rootID","max_depth","level","nodeID","degree","parentID"]
self.cascade_props=pd.DataFrame(self.cascade_props,columns=columns)
self.cascade_props.to_pickle("%s/cascade_props.pkl.gz"%(self.output_dir))
return self.cascade_props
def get_cascade_branching(self):
cascade_props_degree=self.cascade_props.groupby(["level"])["degree"].apply(list).reset_index(name="degreeV")
def _get_prob_vector(row):
level=row['level']
degree_list=row['degreeV']
degree_bins = np.bincount(degree_list)
degree_uniques = np.nonzero(degree_bins)[0]
degree_matrix=np.vstack((degree_uniques,degree_bins[degree_uniques])).T
degree_df=pd.DataFrame(degree_matrix,columns=["degree","count"])
degree_df["probability"]=degree_df["count"]/degree_df["count"].sum()
row['level']=level
row['degreeV']=degree_list
row['udegreeV']=degree_df['degree'].values
row['probV']=degree_df["probability"].values
return row
cascade_props_degree=cascade_props_degree.apply(_get_prob_vector,axis=1)
cascade_props_degree.set_index(["level"],inplace=True)
cascade_props_degree.to_pickle("%s/cascade_props_prob_level_degree.pkl.gz"%(self.output_dir))
return cascade_props_degree
# def get_cascade_user_branching(self):
# cascade_props_degree=self.cascade_props.groupby(["nodeUserID","level"])["degree"].apply(list).reset_index(name="degreeV")
# def _get_prob_vector(row):
# level=row['level']
# degree_list=row['degreeV']
# degree_bins = np.bincount(degree_list)
# degree_uniques = np.nonzero(degree_bins)[0]
# degree_matrix=np.vstack((degree_uniques,degree_bins[degree_uniques])).T
# degree_df=pd.DataFrame(degree_matrix,columns=["degree","count"])
# degree_df["probability"]=degree_df["count"]/degree_df["count"].sum()
# row['level']=level
# row['degreeV']=degree_list
# row['udegreeV']=degree_df['degree'].values
# row['probV']=degree_df["probability"].values
# return row
# cascade_props_degree=cascade_props_degree.apply(_get_prob_vector,axis=1)
# cascade_props_degree.set_index(["level"],inplace=True)
# cascade_props_degree.to_pickle("%s/cascade_props_prob_user_level_degree.pkl.gz"%(self.output_dir))
# return cascade_props_degree
# def get_cascade_delays(self):
# cascade_props_size=self.cascade_props.groupby("rootID").size().reset_index(name="size")
# cascade_props_delay=self.cascade_props.groupby("rootID")["long_delay"].apply(list).reset_index(name="delayV")
# cascade_props_delay=pd.merge(cascade_props_delay,cascade_props_size,on="rootID",how="inner")
# cascade_props_delay.to_pickle("%s/cascade_props_delay.pkl.gz"%(self.output_dir))
# return cascade_props_delay
parser = argparse.ArgumentParser(description='Simulation Parameters')
parser.add_argument('--config', dest='config_file_path', type=argparse.FileType('r'))
args = parser.parse_args()
config_json=json.load(args.config_file_path)
platform = config_json['PLATFORM']
domain = config_json['DOMAIN']
scenario = config_json["SCENARIO"]
start_sim_period=config_json["START_SIM_PERIOD"]
end_sim_period=config_json["END_SIM_PERIOD"]
oneD=timedelta(days=1)
start_sim_period_date=datetime.strptime(start_sim_period,"%Y-%m-%d")
end_sim_period_date=datetime.strptime(end_sim_period,"%Y-%m-%d")
num_sim_days=(end_sim_period_date-start_sim_period_date).days+1
training_data_num_days=config_json["TRAINING_DATA_X_MUL_SIM"]
training_data_num_days=num_sim_days*training_data_num_days
train_start_date=start_sim_period_date-(timedelta(days=training_data_num_days))
###train_start_date=start_sim_period_date-(timedelta(days=training_data_num_days*2))
print("Train start date: ",train_start_date)
print("Train end date: ",start_sim_period_date)
num_training_days=(start_sim_period_date-train_start_date).days
print("# training days: %d"%num_training_days)
print("# simulation days: %d"%num_sim_days)
info_ids_path = config_json['INFORMATION_IDS']
info_ids_path = info_ids_path.format(platform)
### Load information IDs
info_ids = pd.read_csv(info_ids_path, header=None)
info_ids.columns = ['informationID']
info_ids = sorted(list(info_ids['informationID']))
#info_ids = ['informationID_'+x if 'informationID' not in x else x for x in info_ids]
print(len(info_ids),info_ids)
input_data_path = config_json["INPUT_CASCADES_FILE_PATH"]
try:
cascade_records= | pd.read_pickle(input_data_path) | pandas.read_pickle |
# -*- coding: utf-8 -*-
from __future__ import print_function
from datetime import datetime, timedelta
import functools
import itertools
import numpy as np
import numpy.ma as ma
import numpy.ma.mrecords as mrecords
from numpy.random import randn
import pytest
from pandas.compat import (
PY3, PY36, OrderedDict, is_platform_little_endian, lmap, long, lrange,
lzip, range, zip)
from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike
from pandas.core.dtypes.common import is_integer_dtype
import pandas as pd
from pandas import (
Categorical, DataFrame, Index, MultiIndex, Series, Timedelta, Timestamp,
compat, date_range, isna)
from pandas.tests.frame.common import TestData
import pandas.util.testing as tm
MIXED_FLOAT_DTYPES = ['float16', 'float32', 'float64']
MIXED_INT_DTYPES = ['uint8', 'uint16', 'uint32', 'uint64', 'int8', 'int16',
'int32', 'int64']
class TestDataFrameConstructors(TestData):
def test_constructor(self):
df = DataFrame()
assert len(df.index) == 0
df = DataFrame(data={})
assert len(df.index) == 0
def test_constructor_mixed(self):
index, data = tm.getMixedTypeDict()
# TODO(wesm), incomplete test?
indexed_frame = DataFrame(data, index=index) # noqa
unindexed_frame = DataFrame(data) # noqa
assert self.mixed_frame['foo'].dtype == np.object_
def test_constructor_cast_failure(self):
foo = DataFrame({'a': ['a', 'b', 'c']}, dtype=np.float64)
assert foo['a'].dtype == object
# GH 3010, constructing with odd arrays
df = DataFrame(np.ones((4, 2)))
# this is ok
df['foo'] = np.ones((4, 2)).tolist()
# this is not ok
pytest.raises(ValueError, df.__setitem__, tuple(['test']),
np.ones((4, 2)))
# this is ok
df['foo2'] = np.ones((4, 2)).tolist()
def test_constructor_dtype_copy(self):
orig_df = DataFrame({
'col1': [1.],
'col2': [2.],
'col3': [3.]})
new_df = pd.DataFrame(orig_df, dtype=float, copy=True)
new_df['col1'] = 200.
assert orig_df['col1'][0] == 1.
def test_constructor_dtype_nocast_view(self):
df = DataFrame([[1, 2]])
should_be_view = DataFrame(df, dtype=df[0].dtype)
should_be_view[0][0] = 99
assert df.values[0, 0] == 99
should_be_view = DataFrame(df.values, dtype=df[0].dtype)
should_be_view[0][0] = 97
assert df.values[0, 0] == 97
def test_constructor_dtype_list_data(self):
df = DataFrame([[1, '2'],
[None, 'a']], dtype=object)
assert df.loc[1, 0] is None
assert df.loc[0, 1] == '2'
def test_constructor_list_frames(self):
# see gh-3243
result = DataFrame([DataFrame([])])
assert result.shape == (1, 0)
result = DataFrame([DataFrame(dict(A=lrange(5)))])
assert isinstance(result.iloc[0, 0], DataFrame)
def test_constructor_mixed_dtypes(self):
def _make_mixed_dtypes_df(typ, ad=None):
if typ == 'int':
dtypes = MIXED_INT_DTYPES
arrays = [np.array(np.random.rand(10), dtype=d)
for d in dtypes]
elif typ == 'float':
dtypes = MIXED_FLOAT_DTYPES
arrays = [np.array(np.random.randint(
10, size=10), dtype=d) for d in dtypes]
zipper = lzip(dtypes, arrays)
for d, a in zipper:
assert(a.dtype == d)
if ad is None:
ad = dict()
ad.update({d: a for d, a in zipper})
return DataFrame(ad)
def _check_mixed_dtypes(df, dtypes=None):
if dtypes is None:
dtypes = MIXED_FLOAT_DTYPES + MIXED_INT_DTYPES
for d in dtypes:
if d in df:
assert(df.dtypes[d] == d)
# mixed floating and integer coexinst in the same frame
df = _make_mixed_dtypes_df('float')
_check_mixed_dtypes(df)
# add lots of types
df = _make_mixed_dtypes_df('float', dict(A=1, B='foo', C='bar'))
_check_mixed_dtypes(df)
# GH 622
df = _make_mixed_dtypes_df('int')
_check_mixed_dtypes(df)
def test_constructor_complex_dtypes(self):
# GH10952
a = np.random.rand(10).astype(np.complex64)
b = np.random.rand(10).astype(np.complex128)
df = DataFrame({'a': a, 'b': b})
assert a.dtype == df.a.dtype
assert b.dtype == df.b.dtype
def test_constructor_dtype_str_na_values(self, string_dtype):
# https://github.com/pandas-dev/pandas/issues/21083
df = DataFrame({'A': ['x', None]}, dtype=string_dtype)
result = df.isna()
expected = DataFrame({"A": [False, True]})
tm.assert_frame_equal(result, expected)
assert df.iloc[1, 0] is None
df = DataFrame({'A': ['x', np.nan]}, dtype=string_dtype)
assert np.isnan(df.iloc[1, 0])
def test_constructor_rec(self):
rec = self.frame.to_records(index=False)
if PY3:
# unicode error under PY2
rec.dtype.names = list(rec.dtype.names)[::-1]
index = self.frame.index
df = DataFrame(rec)
tm.assert_index_equal(df.columns, pd.Index(rec.dtype.names))
df2 = DataFrame(rec, index=index)
tm.assert_index_equal(df2.columns, pd.Index(rec.dtype.names))
tm.assert_index_equal(df2.index, index)
rng = np.arange(len(rec))[::-1]
df3 = DataFrame(rec, index=rng, columns=['C', 'B'])
expected = DataFrame(rec, index=rng).reindex(columns=['C', 'B'])
tm.assert_frame_equal(df3, expected)
def test_constructor_bool(self):
df = DataFrame({0: np.ones(10, dtype=bool),
1: np.zeros(10, dtype=bool)})
assert df.values.dtype == np.bool_
def test_constructor_overflow_int64(self):
# see gh-14881
values = np.array([2 ** 64 - i for i in range(1, 10)],
dtype=np.uint64)
result = DataFrame({'a': values})
assert result['a'].dtype == np.uint64
# see gh-2355
data_scores = [(6311132704823138710, 273), (2685045978526272070, 23),
(8921811264899370420, 45),
(long(17019687244989530680), 270),
(long(9930107427299601010), 273)]
dtype = [('uid', 'u8'), ('score', 'u8')]
data = np.zeros((len(data_scores),), dtype=dtype)
data[:] = data_scores
df_crawls = DataFrame(data)
assert df_crawls['uid'].dtype == np.uint64
@pytest.mark.parametrize("values", [np.array([2**64], dtype=object),
np.array([2**65]), [2**64 + 1],
np.array([-2**63 - 4], dtype=object),
np.array([-2**64 - 1]), [-2**65 - 2]])
def test_constructor_int_overflow(self, values):
# see gh-18584
value = values[0]
result = DataFrame(values)
assert result[0].dtype == object
assert result[0][0] == value
def test_constructor_ordereddict(self):
import random
nitems = 100
nums = lrange(nitems)
random.shuffle(nums)
expected = ['A%d' % i for i in nums]
df = DataFrame(OrderedDict(zip(expected, [[0]] * nitems)))
assert expected == list(df.columns)
def test_constructor_dict(self):
frame = DataFrame({'col1': self.ts1,
'col2': self.ts2})
# col2 is padded with NaN
assert len(self.ts1) == 30
assert len(self.ts2) == 25
tm.assert_series_equal(self.ts1, frame['col1'], check_names=False)
exp = pd.Series(np.concatenate([[np.nan] * 5, self.ts2.values]),
index=self.ts1.index, name='col2')
tm.assert_series_equal(exp, frame['col2'])
frame = DataFrame({'col1': self.ts1,
'col2': self.ts2},
columns=['col2', 'col3', 'col4'])
assert len(frame) == len(self.ts2)
assert 'col1' not in frame
assert isna(frame['col3']).all()
# Corner cases
assert len(DataFrame({})) == 0
# mix dict and array, wrong size - no spec for which error should raise
# first
with pytest.raises(ValueError):
DataFrame({'A': {'a': 'a', 'b': 'b'}, 'B': ['a', 'b', 'c']})
# Length-one dict micro-optimization
frame = DataFrame({'A': {'1': 1, '2': 2}})
tm.assert_index_equal(frame.index, pd.Index(['1', '2']))
# empty dict plus index
idx = Index([0, 1, 2])
frame = DataFrame({}, index=idx)
assert frame.index is idx
# empty with index and columns
idx = Index([0, 1, 2])
frame = DataFrame({}, index=idx, columns=idx)
assert frame.index is idx
assert frame.columns is idx
assert len(frame._series) == 3
# with dict of empty list and Series
frame = DataFrame({'A': [], 'B': []}, columns=['A', 'B'])
tm.assert_index_equal(frame.index, Index([], dtype=np.int64))
# GH 14381
# Dict with None value
frame_none = DataFrame(dict(a=None), index=[0])
frame_none_list = DataFrame(dict(a=[None]), index=[0])
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
assert frame_none.get_value(0, 'a') is None
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
assert frame_none_list.get_value(0, 'a') is None
tm.assert_frame_equal(frame_none, frame_none_list)
# GH10856
# dict with scalar values should raise error, even if columns passed
msg = 'If using all scalar values, you must pass an index'
with pytest.raises(ValueError, match=msg):
DataFrame({'a': 0.7})
with pytest.raises(ValueError, match=msg):
DataFrame({'a': 0.7}, columns=['a'])
@pytest.mark.parametrize("scalar", [2, np.nan, None, 'D'])
def test_constructor_invalid_items_unused(self, scalar):
# No error if invalid (scalar) value is in fact not used:
result = DataFrame({'a': scalar}, columns=['b'])
expected = DataFrame(columns=['b'])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("value", [2, np.nan, None, float('nan')])
def test_constructor_dict_nan_key(self, value):
# GH 18455
cols = [1, value, 3]
idx = ['a', value]
values = [[0, 3], [1, 4], [2, 5]]
data = {cols[c]: Series(values[c], index=idx) for c in range(3)}
result = DataFrame(data).sort_values(1).sort_values('a', axis=1)
expected = DataFrame(np.arange(6, dtype='int64').reshape(2, 3),
index=idx, columns=cols)
tm.assert_frame_equal(result, expected)
result = DataFrame(data, index=idx).sort_values('a', axis=1)
tm.assert_frame_equal(result, expected)
result = DataFrame(data, index=idx, columns=cols)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("value", [np.nan, None, float('nan')])
def test_constructor_dict_nan_tuple_key(self, value):
# GH 18455
cols = Index([(11, 21), (value, 22), (13, value)])
idx = Index([('a', value), (value, 2)])
values = [[0, 3], [1, 4], [2, 5]]
data = {cols[c]: Series(values[c], index=idx) for c in range(3)}
result = (DataFrame(data)
.sort_values((11, 21))
.sort_values(('a', value), axis=1))
expected = DataFrame(np.arange(6, dtype='int64').reshape(2, 3),
index=idx, columns=cols)
tm.assert_frame_equal(result, expected)
result = DataFrame(data, index=idx).sort_values(('a', value), axis=1)
tm.assert_frame_equal(result, expected)
result = DataFrame(data, index=idx, columns=cols)
tm.assert_frame_equal(result, expected)
@pytest.mark.skipif(not PY36, reason='Insertion order for Python>=3.6')
def test_constructor_dict_order_insertion(self):
# GH19018
# initialization ordering: by insertion order if python>= 3.6
d = {'b': self.ts2, 'a': self.ts1}
frame = DataFrame(data=d)
expected = DataFrame(data=d, columns=list('ba'))
tm.assert_frame_equal(frame, expected)
@pytest.mark.skipif(PY36, reason='order by value for Python<3.6')
def test_constructor_dict_order_by_values(self):
# GH19018
# initialization ordering: by value if python<3.6
d = {'b': self.ts2, 'a': self.ts1}
frame = DataFrame(data=d)
expected = DataFrame(data=d, columns=list('ab'))
tm.assert_frame_equal(frame, expected)
def test_constructor_multi_index(self):
# GH 4078
# construction error with mi and all-nan frame
tuples = [(2, 3), (3, 3), (3, 3)]
mi = MultiIndex.from_tuples(tuples)
df = DataFrame(index=mi, columns=mi)
assert pd.isna(df).values.ravel().all()
tuples = [(3, 3), (2, 3), (3, 3)]
mi = MultiIndex.from_tuples(tuples)
df = DataFrame(index=mi, columns=mi)
assert pd.isna(df).values.ravel().all()
def test_constructor_error_msgs(self):
msg = "Empty data passed with indices specified."
# passing an empty array with columns specified.
with pytest.raises(ValueError, match=msg):
DataFrame(np.empty(0), columns=list('abc'))
msg = "Mixing dicts with non-Series may lead to ambiguous ordering."
# mix dict and array, wrong size
with pytest.raises(ValueError, match=msg):
DataFrame({'A': {'a': 'a', 'b': 'b'},
'B': ['a', 'b', 'c']})
# wrong size ndarray, GH 3105
msg = r"Shape of passed values is \(3, 4\), indices imply \(3, 3\)"
with pytest.raises(ValueError, match=msg):
DataFrame(np.arange(12).reshape((4, 3)),
columns=['foo', 'bar', 'baz'],
index=pd.date_range('2000-01-01', periods=3))
# higher dim raise exception
with pytest.raises(ValueError, match='Must pass 2-d input'):
DataFrame(np.zeros((3, 3, 3)), columns=['A', 'B', 'C'], index=[1])
# wrong size axis labels
msg = ("Shape of passed values "
r"is \(3, 2\), indices "
r"imply \(3, 1\)")
with pytest.raises(ValueError, match=msg):
DataFrame(np.random.rand(2, 3), columns=['A', 'B', 'C'], index=[1])
msg = ("Shape of passed values "
r"is \(3, 2\), indices "
r"imply \(2, 2\)")
with pytest.raises(ValueError, match=msg):
DataFrame(np.random.rand(2, 3), columns=['A', 'B'], index=[1, 2])
msg = ("If using all scalar "
"values, you must pass "
"an index")
with pytest.raises(ValueError, match=msg):
DataFrame({'a': False, 'b': True})
def test_constructor_with_embedded_frames(self):
# embedded data frames
df1 = DataFrame({'a': [1, 2, 3], 'b': [3, 4, 5]})
df2 = DataFrame([df1, df1 + 10])
df2.dtypes
str(df2)
result = df2.loc[0, 0]
tm.assert_frame_equal(result, df1)
result = df2.loc[1, 0]
tm.assert_frame_equal(result, df1 + 10)
def test_constructor_subclass_dict(self):
# Test for passing dict subclass to constructor
data = {'col1': tm.TestSubDict((x, 10.0 * x) for x in range(10)),
'col2': tm.TestSubDict((x, 20.0 * x) for x in range(10))}
df = DataFrame(data)
refdf = DataFrame({col: dict(compat.iteritems(val))
for col, val in compat.iteritems(data)})
tm.assert_frame_equal(refdf, df)
data = tm.TestSubDict(compat.iteritems(data))
df = DataFrame(data)
tm.assert_frame_equal(refdf, df)
# try with defaultdict
from collections import defaultdict
data = {}
self.frame['B'][:10] = np.nan
for k, v in compat.iteritems(self.frame):
dct = defaultdict(dict)
dct.update(v.to_dict())
data[k] = dct
frame = DataFrame(data)
tm.assert_frame_equal(self.frame.sort_index(), frame)
def test_constructor_dict_block(self):
expected = np.array([[4., 3., 2., 1.]])
df = DataFrame({'d': [4.], 'c': [3.], 'b': [2.], 'a': [1.]},
columns=['d', 'c', 'b', 'a'])
tm.assert_numpy_array_equal(df.values, expected)
def test_constructor_dict_cast(self):
# cast float tests
test_data = {
'A': {'1': 1, '2': 2},
'B': {'1': '1', '2': '2', '3': '3'},
}
frame = DataFrame(test_data, dtype=float)
assert len(frame) == 3
assert frame['B'].dtype == np.float64
assert frame['A'].dtype == np.float64
frame = DataFrame(test_data)
assert len(frame) == 3
assert frame['B'].dtype == np.object_
assert frame['A'].dtype == np.float64
# can't cast to float
test_data = {
'A': dict(zip(range(20), tm.makeStringIndex(20))),
'B': dict(zip(range(15), randn(15)))
}
frame = DataFrame(test_data, dtype=float)
assert len(frame) == 20
assert frame['A'].dtype == np.object_
assert frame['B'].dtype == np.float64
def test_constructor_dict_dont_upcast(self):
d = {'Col1': {'Row1': 'A String', 'Row2': np.nan}}
df = DataFrame(d)
assert isinstance(df['Col1']['Row2'], float)
dm = DataFrame([[1, 2], ['a', 'b']], index=[1, 2], columns=[1, 2])
assert isinstance(dm[1][1], int)
def test_constructor_dict_of_tuples(self):
# GH #1491
data = {'a': (1, 2, 3), 'b': (4, 5, 6)}
result = DataFrame(data)
expected = DataFrame({k: list(v) for k, v in compat.iteritems(data)})
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_constructor_dict_multiindex(self):
def check(result, expected):
return tm.assert_frame_equal(result, expected, check_dtype=True,
check_index_type=True,
check_column_type=True,
check_names=True)
d = {('a', 'a'): {('i', 'i'): 0, ('i', 'j'): 1, ('j', 'i'): 2},
('b', 'a'): {('i', 'i'): 6, ('i', 'j'): 5, ('j', 'i'): 4},
('b', 'c'): {('i', 'i'): 7, ('i', 'j'): 8, ('j', 'i'): 9}}
_d = sorted(d.items())
df = DataFrame(d)
expected = DataFrame(
[x[1] for x in _d],
index=MultiIndex.from_tuples([x[0] for x in _d])).T
expected.index = MultiIndex.from_tuples(expected.index)
check(df, expected)
d['z'] = {'y': 123., ('i', 'i'): 111, ('i', 'j'): 111, ('j', 'i'): 111}
_d.insert(0, ('z', d['z']))
expected = DataFrame(
[x[1] for x in _d],
index=Index([x[0] for x in _d], tupleize_cols=False)).T
expected.index = Index(expected.index, tupleize_cols=False)
df = DataFrame(d)
df = df.reindex(columns=expected.columns, index=expected.index)
check(df, expected)
def test_constructor_dict_datetime64_index(self):
# GH 10160
dates_as_str = ['1984-02-19', '1988-11-06', '1989-12-03', '1990-03-15']
def create_data(constructor):
return {i: {constructor(s): 2 * i}
for i, s in enumerate(dates_as_str)}
data_datetime64 = create_data(np.datetime64)
data_datetime = create_data(lambda x: datetime.strptime(x, '%Y-%m-%d'))
data_Timestamp = create_data(Timestamp)
expected = DataFrame([{0: 0, 1: None, 2: None, 3: None},
{0: None, 1: 2, 2: None, 3: None},
{0: None, 1: None, 2: 4, 3: None},
{0: None, 1: None, 2: None, 3: 6}],
index=[Timestamp(dt) for dt in dates_as_str])
result_datetime64 = DataFrame(data_datetime64)
result_datetime = DataFrame(data_datetime)
result_Timestamp = DataFrame(data_Timestamp)
tm.assert_frame_equal(result_datetime64, expected)
tm.assert_frame_equal(result_datetime, expected)
tm.assert_frame_equal(result_Timestamp, expected)
def test_constructor_dict_timedelta64_index(self):
# GH 10160
td_as_int = [1, 2, 3, 4]
def create_data(constructor):
return {i: {constructor(s): 2 * i}
for i, s in enumerate(td_as_int)}
data_timedelta64 = create_data(lambda x: np.timedelta64(x, 'D'))
data_timedelta = create_data(lambda x: timedelta(days=x))
data_Timedelta = create_data(lambda x: Timedelta(x, 'D'))
expected = DataFrame([{0: 0, 1: None, 2: None, 3: None},
{0: None, 1: 2, 2: None, 3: None},
{0: None, 1: None, 2: 4, 3: None},
{0: None, 1: None, 2: None, 3: 6}],
index=[Timedelta(td, 'D') for td in td_as_int])
result_timedelta64 = DataFrame(data_timedelta64)
result_timedelta = DataFrame(data_timedelta)
result_Timedelta = DataFrame(data_Timedelta)
tm.assert_frame_equal(result_timedelta64, expected)
tm.assert_frame_equal(result_timedelta, expected)
tm.assert_frame_equal(result_Timedelta, expected)
def test_constructor_period(self):
# PeriodIndex
a = pd.PeriodIndex(['2012-01', 'NaT', '2012-04'], freq='M')
b = pd.PeriodIndex(['2012-02-01', '2012-03-01', 'NaT'], freq='D')
df = pd.DataFrame({'a': a, 'b': b})
assert df['a'].dtype == a.dtype
assert df['b'].dtype == b.dtype
# list of periods
df = pd.DataFrame({'a': a.astype(object).tolist(),
'b': b.astype(object).tolist()})
assert df['a'].dtype == a.dtype
assert df['b'].dtype == b.dtype
def test_nested_dict_frame_constructor(self):
rng = pd.period_range('1/1/2000', periods=5)
df = DataFrame(randn(10, 5), columns=rng)
data = {}
for col in df.columns:
for row in df.index:
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
data.setdefault(col, {})[row] = df.get_value(row, col)
result = DataFrame(data, columns=rng)
tm.assert_frame_equal(result, df)
data = {}
for col in df.columns:
for row in df.index:
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
data.setdefault(row, {})[col] = df.get_value(row, col)
result = DataFrame(data, index=rng).T
tm.assert_frame_equal(result, df)
def _check_basic_constructor(self, empty):
# mat: 2d matrix with shape (3, 2) to input. empty - makes sized
# objects
mat = empty((2, 3), dtype=float)
# 2-D input
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert len(frame.index) == 2
assert len(frame.columns) == 3
# 1-D input
frame = DataFrame(empty((3,)), columns=['A'], index=[1, 2, 3])
assert len(frame.index) == 3
assert len(frame.columns) == 1
# cast type
frame = DataFrame(mat, columns=['A', 'B', 'C'],
index=[1, 2], dtype=np.int64)
assert frame.values.dtype == np.int64
# wrong size axis labels
msg = r'Shape of passed values is \(3, 2\), indices imply \(3, 1\)'
with pytest.raises(ValueError, match=msg):
DataFrame(mat, columns=['A', 'B', 'C'], index=[1])
msg = r'Shape of passed values is \(3, 2\), indices imply \(2, 2\)'
with pytest.raises(ValueError, match=msg):
DataFrame(mat, columns=['A', 'B'], index=[1, 2])
# higher dim raise exception
with pytest.raises(ValueError, match='Must pass 2-d input'):
DataFrame(empty((3, 3, 3)), columns=['A', 'B', 'C'],
index=[1])
# automatic labeling
frame = DataFrame(mat)
tm.assert_index_equal(frame.index, pd.Index(lrange(2)))
tm.assert_index_equal(frame.columns, pd.Index(lrange(3)))
frame = DataFrame(mat, index=[1, 2])
tm.assert_index_equal(frame.columns, pd.Index(lrange(3)))
frame = DataFrame(mat, columns=['A', 'B', 'C'])
tm.assert_index_equal(frame.index, pd.Index(lrange(2)))
# 0-length axis
frame = DataFrame(empty((0, 3)))
assert len(frame.index) == 0
frame = DataFrame(empty((3, 0)))
assert len(frame.columns) == 0
def test_constructor_ndarray(self):
self._check_basic_constructor(np.ones)
frame = DataFrame(['foo', 'bar'], index=[0, 1], columns=['A'])
assert len(frame) == 2
def test_constructor_maskedarray(self):
self._check_basic_constructor(ma.masked_all)
# Check non-masked values
mat = ma.masked_all((2, 3), dtype=float)
mat[0, 0] = 1.0
mat[1, 2] = 2.0
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert 1.0 == frame['A'][1]
assert 2.0 == frame['C'][2]
# what is this even checking??
mat = ma.masked_all((2, 3), dtype=float)
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert np.all(~np.asarray(frame == frame))
def test_constructor_maskedarray_nonfloat(self):
# masked int promoted to float
mat = ma.masked_all((2, 3), dtype=int)
# 2-D input
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert len(frame.index) == 2
assert len(frame.columns) == 3
assert np.all(~np.asarray(frame == frame))
# cast type
frame = DataFrame(mat, columns=['A', 'B', 'C'],
index=[1, 2], dtype=np.float64)
assert frame.values.dtype == np.float64
# Check non-masked values
mat2 = ma.copy(mat)
mat2[0, 0] = 1
mat2[1, 2] = 2
frame = DataFrame(mat2, columns=['A', 'B', 'C'], index=[1, 2])
assert 1 == frame['A'][1]
assert 2 == frame['C'][2]
# masked np.datetime64 stays (use NaT as null)
mat = ma.masked_all((2, 3), dtype='M8[ns]')
# 2-D input
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert len(frame.index) == 2
assert len(frame.columns) == 3
assert isna(frame).values.all()
# cast type
frame = DataFrame(mat, columns=['A', 'B', 'C'],
index=[1, 2], dtype=np.int64)
assert frame.values.dtype == np.int64
# Check non-masked values
mat2 = ma.copy(mat)
mat2[0, 0] = 1
mat2[1, 2] = 2
frame = DataFrame(mat2, columns=['A', 'B', 'C'], index=[1, 2])
assert 1 == frame['A'].view('i8')[1]
assert 2 == frame['C'].view('i8')[2]
# masked bool promoted to object
mat = ma.masked_all((2, 3), dtype=bool)
# 2-D input
frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])
assert len(frame.index) == 2
assert len(frame.columns) == 3
assert np.all(~np.asarray(frame == frame))
# cast type
frame = DataFrame(mat, columns=['A', 'B', 'C'],
index=[1, 2], dtype=object)
assert frame.values.dtype == object
# Check non-masked values
mat2 = ma.copy(mat)
mat2[0, 0] = True
mat2[1, 2] = False
frame = DataFrame(mat2, columns=['A', 'B', 'C'], index=[1, 2])
assert frame['A'][1] is True
assert frame['C'][2] is False
def test_constructor_mrecarray(self):
# Ensure mrecarray produces frame identical to dict of masked arrays
# from GH3479
assert_fr_equal = functools.partial(tm.assert_frame_equal,
check_index_type=True,
check_column_type=True,
check_frame_type=True)
arrays = [
('float', np.array([1.5, 2.0])),
('int', np.array([1, 2])),
('str', np.array(['abc', 'def'])),
]
for name, arr in arrays[:]:
arrays.append(('masked1_' + name,
np.ma.masked_array(arr, mask=[False, True])))
arrays.append(('masked_all', np.ma.masked_all((2,))))
arrays.append(('masked_none',
np.ma.masked_array([1.0, 2.5], mask=False)))
# call assert_frame_equal for all selections of 3 arrays
for comb in itertools.combinations(arrays, 3):
names, data = zip(*comb)
mrecs = mrecords.fromarrays(data, names=names)
# fill the comb
comb = {k: (v.filled() if hasattr(v, 'filled') else v)
for k, v in comb}
expected = DataFrame(comb, columns=names)
result = DataFrame(mrecs)
assert_fr_equal(result, expected)
# specify columns
expected = DataFrame(comb, columns=names[::-1])
result = DataFrame(mrecs, columns=names[::-1])
assert_fr_equal(result, expected)
# specify index
expected = DataFrame(comb, columns=names, index=[1, 2])
result = DataFrame(mrecs, index=[1, 2])
assert_fr_equal(result, expected)
def test_constructor_corner_shape(self):
df = DataFrame(index=[])
assert df.values.shape == (0, 0)
@pytest.mark.parametrize("data, index, columns, dtype, expected", [
(None, lrange(10), ['a', 'b'], object, np.object_),
(None, None, ['a', 'b'], 'int64', np.dtype('int64')),
(None, lrange(10), ['a', 'b'], int, np.dtype('float64')),
({}, None, ['foo', 'bar'], None, np.object_),
({'b': 1}, lrange(10), list('abc'), int, np.dtype('float64'))
])
def test_constructor_dtype(self, data, index, columns, dtype, expected):
df = DataFrame(data, index, columns, dtype)
assert df.values.dtype == expected
def test_constructor_scalar_inference(self):
data = {'int': 1, 'bool': True,
'float': 3., 'complex': 4j, 'object': 'foo'}
df = DataFrame(data, index=np.arange(10))
assert df['int'].dtype == np.int64
assert df['bool'].dtype == np.bool_
assert df['float'].dtype == np.float64
assert df['complex'].dtype == np.complex128
assert df['object'].dtype == np.object_
def test_constructor_arrays_and_scalars(self):
df = DataFrame({'a': randn(10), 'b': True})
exp = DataFrame({'a': df['a'].values, 'b': [True] * 10})
tm.assert_frame_equal(df, exp)
with pytest.raises(ValueError, match='must pass an index'):
DataFrame({'a': False, 'b': True})
def test_constructor_DataFrame(self):
df = DataFrame(self.frame)
tm.assert_frame_equal(df, self.frame)
df_casted = DataFrame(self.frame, dtype=np.int64)
assert df_casted.values.dtype == np.int64
def test_constructor_more(self):
# used to be in test_matrix.py
arr = randn(10)
dm = DataFrame(arr, columns=['A'], index=np.arange(10))
assert dm.values.ndim == 2
arr = randn(0)
dm = DataFrame(arr)
assert dm.values.ndim == 2
assert dm.values.ndim == 2
# no data specified
dm = DataFrame(columns=['A', 'B'], index=np.arange(10))
assert dm.values.shape == (10, 2)
dm = DataFrame(columns=['A', 'B'])
assert dm.values.shape == (0, 2)
dm = DataFrame(index=np.arange(10))
assert dm.values.shape == (10, 0)
# can't cast
mat = np.array(['foo', 'bar'], dtype=object).reshape(2, 1)
with pytest.raises(ValueError, match='cast'):
DataFrame(mat, index=[0, 1], columns=[0], dtype=float)
dm = DataFrame(DataFrame(self.frame._series))
tm.assert_frame_equal(dm, self.frame)
# int cast
dm = DataFrame({'A': np.ones(10, dtype=int),
'B': np.ones(10, dtype=np.float64)},
index=np.arange(10))
assert len(dm.columns) == 2
assert dm.values.dtype == np.float64
def test_constructor_empty_list(self):
df = DataFrame([], index=[])
expected = DataFrame(index=[])
tm.assert_frame_equal(df, expected)
# GH 9939
df = DataFrame([], columns=['A', 'B'])
expected = DataFrame({}, columns=['A', 'B'])
tm.assert_frame_equal(df, expected)
# Empty generator: list(empty_gen()) == []
def empty_gen():
return
yield
df = DataFrame(empty_gen(), columns=['A', 'B'])
tm.assert_frame_equal(df, expected)
def test_constructor_list_of_lists(self):
# GH #484
df = DataFrame(data=[[1, 'a'], [2, 'b']], columns=["num", "str"])
assert is_integer_dtype(df['num'])
assert df['str'].dtype == np.object_
# GH 4851
# list of 0-dim ndarrays
expected = DataFrame({0: np.arange(10)})
data = [np.array(x) for x in range(10)]
result = DataFrame(data)
tm.assert_frame_equal(result, expected)
def test_constructor_sequence_like(self):
# GH 3783
# collections.Squence like
class DummyContainer(compat.Sequence):
def __init__(self, lst):
self._lst = lst
def __getitem__(self, n):
return self._lst.__getitem__(n)
def __len__(self, n):
return self._lst.__len__()
lst_containers = [DummyContainer([1, 'a']), DummyContainer([2, 'b'])]
columns = ["num", "str"]
result = DataFrame(lst_containers, columns=columns)
expected = DataFrame([[1, 'a'], [2, 'b']], columns=columns)
tm.assert_frame_equal(result, expected, check_dtype=False)
# GH 4297
# support Array
import array
result = DataFrame({'A': array.array('i', range(10))})
expected = DataFrame({'A': list(range(10))})
tm.assert_frame_equal(result, expected, check_dtype=False)
expected = DataFrame([list(range(10)), list(range(10))])
result = DataFrame([array.array('i', range(10)),
array.array('i', range(10))])
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_constructor_iterable(self):
# GH 21987
class Iter():
def __iter__(self):
for i in range(10):
yield [1, 2, 3]
expected = DataFrame([[1, 2, 3]] * 10)
result = DataFrame(Iter())
tm.assert_frame_equal(result, expected)
def test_constructor_iterator(self):
expected = DataFrame([list(range(10)), list(range(10))])
result = DataFrame([range(10), range(10)])
tm.assert_frame_equal(result, expected)
def test_constructor_generator(self):
# related #2305
gen1 = (i for i in range(10))
gen2 = (i for i in range(10))
expected = DataFrame([list(range(10)), list(range(10))])
result = DataFrame([gen1, gen2])
tm.assert_frame_equal(result, expected)
gen = ([i, 'a'] for i in range(10))
result = DataFrame(gen)
expected = DataFrame({0: range(10), 1: 'a'})
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_constructor_list_of_dicts(self):
data = [OrderedDict([['a', 1.5], ['b', 3], ['c', 4], ['d', 6]]),
OrderedDict([['a', 1.5], ['b', 3], ['d', 6]]),
OrderedDict([['a', 1.5], ['d', 6]]),
OrderedDict(),
OrderedDict([['a', 1.5], ['b', 3], ['c', 4]]),
OrderedDict([['b', 3], ['c', 4], ['d', 6]])]
result = DataFrame(data)
expected = DataFrame.from_dict(dict(zip(range(len(data)), data)),
orient='index')
tm.assert_frame_equal(result, expected.reindex(result.index))
result = DataFrame([{}])
expected = DataFrame(index=[0])
tm.assert_frame_equal(result, expected)
def test_constructor_ordered_dict_preserve_order(self):
# see gh-13304
expected = DataFrame([[2, 1]], columns=['b', 'a'])
data = OrderedDict()
data['b'] = [2]
data['a'] = [1]
result = DataFrame(data)
tm.assert_frame_equal(result, expected)
data = OrderedDict()
data['b'] = 2
data['a'] = 1
result = DataFrame([data])
tm.assert_frame_equal(result, expected)
def test_constructor_ordered_dict_conflicting_orders(self):
# the first dict element sets the ordering for the DataFrame,
# even if there are conflicting orders from subsequent ones
row_one = OrderedDict()
row_one['b'] = 2
row_one['a'] = 1
row_two = OrderedDict()
row_two['a'] = 1
row_two['b'] = 2
row_three = {'b': 2, 'a': 1}
expected = DataFrame([[2, 1], [2, 1]], columns=['b', 'a'])
result = DataFrame([row_one, row_two])
tm.assert_frame_equal(result, expected)
expected = DataFrame([[2, 1], [2, 1], [2, 1]], columns=['b', 'a'])
result = DataFrame([row_one, row_two, row_three])
tm.assert_frame_equal(result, expected)
def test_constructor_list_of_series(self):
data = [OrderedDict([['a', 1.5], ['b', 3.0], ['c', 4.0]]),
OrderedDict([['a', 1.5], ['b', 3.0], ['c', 6.0]])]
sdict = OrderedDict(zip(['x', 'y'], data))
idx = Index(['a', 'b', 'c'])
# all named
data2 = [Series([1.5, 3, 4], idx, dtype='O', name='x'),
Series([1.5, 3, 6], idx, name='y')]
result = DataFrame(data2)
expected = DataFrame.from_dict(sdict, orient='index')
tm.assert_frame_equal(result, expected)
# some unnamed
data2 = [Series([1.5, 3, 4], idx, dtype='O', name='x'),
Series([1.5, 3, 6], idx)]
result = DataFrame(data2)
sdict = OrderedDict(zip(['x', 'Unnamed 0'], data))
expected = DataFrame.from_dict(sdict, orient='index')
tm.assert_frame_equal(result.sort_index(), expected)
# none named
data = [OrderedDict([['a', 1.5], ['b', 3], ['c', 4], ['d', 6]]),
OrderedDict([['a', 1.5], ['b', 3], ['d', 6]]),
OrderedDict([['a', 1.5], ['d', 6]]),
OrderedDict(),
OrderedDict([['a', 1.5], ['b', 3], ['c', 4]]),
OrderedDict([['b', 3], ['c', 4], ['d', 6]])]
data = [Series(d) for d in data]
result = DataFrame(data)
sdict = OrderedDict(zip(range(len(data)), data))
expected = DataFrame.from_dict(sdict, orient='index')
tm.assert_frame_equal(result, expected.reindex(result.index))
result2 = DataFrame(data, index=np.arange(6))
tm.assert_frame_equal(result, result2)
result = DataFrame([Series({})])
expected = DataFrame(index=[0])
tm.assert_frame_equal(result, expected)
data = [OrderedDict([['a', 1.5], ['b', 3.0], ['c', 4.0]]),
OrderedDict([['a', 1.5], ['b', 3.0], ['c', 6.0]])]
sdict = OrderedDict(zip(range(len(data)), data))
idx = Index(['a', 'b', 'c'])
data2 = [Series([1.5, 3, 4], idx, dtype='O'),
Series([1.5, 3, 6], idx)]
result = DataFrame(data2)
expected = DataFrame.from_dict(sdict, orient='index')
tm.assert_frame_equal(result, expected)
def test_constructor_list_of_series_aligned_index(self):
series = [pd.Series(i, index=['b', 'a', 'c'], name=str(i))
for i in range(3)]
result = pd.DataFrame(series)
expected = pd.DataFrame({'b': [0, 1, 2],
'a': [0, 1, 2],
'c': [0, 1, 2]},
columns=['b', 'a', 'c'],
index=['0', '1', '2'])
tm.assert_frame_equal(result, expected)
def test_constructor_list_of_derived_dicts(self):
class CustomDict(dict):
pass
d = {'a': 1.5, 'b': 3}
data_custom = [CustomDict(d)]
data = [d]
result_custom = DataFrame(data_custom)
result = DataFrame(data)
tm.assert_frame_equal(result, result_custom)
def test_constructor_ragged(self):
data = {'A': randn(10),
'B': randn(8)}
with pytest.raises(ValueError, match='arrays must all be same length'):
DataFrame(data)
def test_constructor_scalar(self):
idx = Index(lrange(3))
df = DataFrame({"a": 0}, index=idx)
expected = DataFrame({"a": [0, 0, 0]}, index=idx)
tm.assert_frame_equal(df, expected, check_dtype=False)
def test_constructor_Series_copy_bug(self):
df = DataFrame(self.frame['A'], index=self.frame.index, columns=['A'])
df.copy()
def test_constructor_mixed_dict_and_Series(self):
data = {}
data['A'] = {'foo': 1, 'bar': 2, 'baz': 3}
data['B'] = Series([4, 3, 2, 1], index=['bar', 'qux', 'baz', 'foo'])
result = DataFrame(data)
assert result.index.is_monotonic
# ordering ambiguous, raise exception
with pytest.raises(ValueError, match='ambiguous ordering'):
DataFrame({'A': ['a', 'b'], 'B': {'a': 'a', 'b': 'b'}})
# this is OK though
result = DataFrame({'A': ['a', 'b'],
'B': Series(['a', 'b'], index=['a', 'b'])})
expected = DataFrame({'A': ['a', 'b'], 'B': ['a', 'b']},
index=['a', 'b'])
tm.assert_frame_equal(result, expected)
def test_constructor_tuples(self):
result = DataFrame({'A': [(1, 2), (3, 4)]})
expected = DataFrame({'A': Series([(1, 2), (3, 4)])})
tm.assert_frame_equal(result, expected)
def test_constructor_namedtuples(self):
# GH11181
from collections import namedtuple
named_tuple = namedtuple("Pandas", list('ab'))
tuples = [named_tuple(1, 3), named_tuple(2, 4)]
expected = DataFrame({'a': [1, 2], 'b': [3, 4]})
result = DataFrame(tuples)
tm.assert_frame_equal(result, expected)
# with columns
expected = DataFrame({'y': [1, 2], 'z': [3, 4]})
result = DataFrame(tuples, columns=['y', 'z'])
tm.assert_frame_equal(result, expected)
def test_constructor_orient(self):
data_dict = self.mixed_frame.T._series
recons = DataFrame.from_dict(data_dict, orient='index')
expected = self.mixed_frame.sort_index()
tm.assert_frame_equal(recons, expected)
# dict of sequence
a = {'hi': [32, 3, 3],
'there': [3, 5, 3]}
rs = DataFrame.from_dict(a, orient='index')
xp = DataFrame.from_dict(a).T.reindex(list(a.keys()))
tm.assert_frame_equal(rs, xp)
def test_from_dict_columns_parameter(self):
# GH 18529
# Test new columns parameter for from_dict that was added to make
# from_items(..., orient='index', columns=[...]) easier to replicate
result = DataFrame.from_dict(OrderedDict([('A', [1, 2]),
('B', [4, 5])]),
orient='index', columns=['one', 'two'])
expected = DataFrame([[1, 2], [4, 5]], index=['A', 'B'],
columns=['one', 'two'])
tm.assert_frame_equal(result, expected)
msg = "cannot use columns parameter with orient='columns'"
with pytest.raises(ValueError, match=msg):
DataFrame.from_dict(dict([('A', [1, 2]), ('B', [4, 5])]),
orient='columns', columns=['one', 'two'])
with pytest.raises(ValueError, match=msg):
DataFrame.from_dict(dict([('A', [1, 2]), ('B', [4, 5])]),
columns=['one', 'two'])
def test_constructor_Series_named(self):
a = Series([1, 2, 3], index=['a', 'b', 'c'], name='x')
df = DataFrame(a)
assert df.columns[0] == 'x'
tm.assert_index_equal(df.index, a.index)
# ndarray like
arr = np.random.randn(10)
s = Series(arr, name='x')
df = DataFrame(s)
expected = DataFrame(dict(x=s))
tm.assert_frame_equal(df, expected)
s = Series(arr, index=range(3, 13))
df = DataFrame(s)
expected = DataFrame({0: s})
| tm.assert_frame_equal(df, expected) | pandas.util.testing.assert_frame_equal |
# -*- coding: utf-8 -*-
"""
@author: <NAME>
This script allows for the MarketCheck Inventory Search API to be queried.
Slight wrangling allows for data contained in nested lists to be retrieved,
data to be arrayed in dataframes, merged, and ultimately exported as CSV(s).
"""
import requests
import json
import pandas as pd
#####################
# Build API Query #
#####################
url = "http://api.marketcheck.com/v1/search?"
headers = {'Content-Type': 'application/json',
'host': 'marketcheck-prod.apigee.net'}
payload = {'api_key':'YOUR_KEY_HERE',
'radius':'100',
'car_type':'used',
'year':'2018,2017,2016,2015,2014,2013,2012,2011,2010,2009,2008,2007,2006,2005',
'make':'toyota',
'model':'prius',
'start': '0',
'rows': '50'
}
api_response = requests.get(url = url, headers = headers, params = payload)
##############
# Call API #
##############
def get_request_info():
if api_response.status_code == 200:
return json.loads(api_response.content.decode('utf-8'))
else:
return "API did not connect."
get_request_info()
json_response = json.loads(api_response.content.decode('utf-8'))
# See the JSON response in 'prettified' context:
print(json.dumps(json_response, indent = 4, sort_keys = True))
#################################
# Navigating the API Response #
#################################
# View the dictionary key (or 'folder') structure:
for item in json_response:
print('Key/Folder is: ' + item)
# Drill down into the 'listings' key/folder:
for item in json_response['listings']:
print(item)
# See the 'listings' folder content in 'prettified' context:
print(json.dumps(json_response['listings'], indent = 4, sort_keys = True))
###########################
# Data Injestion Set-Up #
###########################
#### VIN Numbers
# Create a dictionary with all of the VIN numbers in our response.
# As VINs are unique identifiers, we will use this variable to merge
# different dataframes we create from nested structures.
vins = []
for item in json_response['listings']:
print(item['vin']) # This isn't necessary but it's visual confirmation.
vins.append({'vin':item['vin']})
#### "Build" Features
# The car's build features are nested within our query response.
# We create a dictionary to store this feature list.
build = []
for item in json_response['listings']:
print(item['build']) # This isn't necessary but it's visual confirmation.
build.append(item['build'])
#### "Dealer" Features
# The car's dealer information is nested within our query response.
# We create a dictionary to store this feature list.
dealer = []
for item in json_response['listings']:
print(item['dealer']) # This isn't necessary but it's visual confirmation.
dealer.append(item['dealer'])
#### "Financing Options"
# This is also nested within our results. NOTE: Could not flatten this nested data...
#financing = []
#for item in json_response['listings']:
# print(item['financing_options']) # This isn't necessary but it's visual confirmation.
# financing.append(item['financing_options'])
##################
# Data Outputs #
##################
# Create our dataframes using the different dictionaries / lists we created:
# Listings
df_listings = pd.DataFrame(json_response['listings'])
df_listings = df_listings.drop(['build', 'dealer', 'media', 'financing_options'], axis = 1)
# Build
df_build = | pd.DataFrame(build) | pandas.DataFrame |
import pandas as pd
import numpy as np
import matplotlib.pyplot as pl
import os
from scipy import stats
from tqdm import tqdm
import mdtraj as md
########################################################
def get_3drobot_native(data_flag):
root_dir = '/home/hyang/bio/erf/data/decoys/3DRobot_set'
pdb_list = pd.read_csv(f'{root_dir}/pdb_no_missing_residue.csv')['pdb'].values
energy_native = []
for pdb_id in pdb_list:
df = pd.read_csv(f'{root_dir}/decoy_loss_{data_flag}/{pdb_id}_decoy_loss.csv')
energy_native.append(df['loss'].values[0])
energy_native = np.array(energy_native)
print(energy_native, np.mean(energy_native), np.min(energy_native), np.max(energy_native), np.std(energy_native))
def plot_3drobot(data_flag):
root_dir = '/home/hyang/bio/erf/data/decoys/3DRobot_set'
# pdb_list = pd.read_csv('pdb_local_rot.txt')['pdb'].values
# pdb_list = pd.read_csv('pdb_profile_diff.txt')['pdb'].values
# pdb_list = pd.read_csv(f'{root_dir}/pdb_profile_diff_match.txt')['pdb'].values
pdb_list = pd.read_csv(f'{root_dir}/pdb_no_missing_residue.csv')['pdb'].values
# data_flag = 'exp005_v2'
# data_flag = 'exp5'
# data_flag = 'exp6'
# data_flag = 'exp12'
# data_flag = 'exp14'
# data_flag = 'exp17'
# data_flag = 'exp21'
# data_flag = 'exp24'
# data_flag = 'exp29'
# data_flag = 'exp33'
# data_flag = 'exp35'
# data_flag = 'exp50'
# data_flag = 'exp50_relax'
# data_flag = 'exp49'
# data_flag = 'exp49_relax'
# data_flag = 'exp54'
# data_flag = 'exp61'
# data_flag = 'rosetta'
# data_flag = 'rosetta_relax'
# data_flag = 'rosetta_cen'
# if not os.path.exists(f'{root_dir}/fig_3drobot_{data_flag}'):
# os.system(f'mkdir -p {root_dir}/fig_3drobot_{data_flag}')
correct = 0
rank = []
for pdb_id in pdb_list:
df = pd.read_csv(f'{root_dir}/decoy_loss_{data_flag}/{pdb_id}_decoy_loss.csv')
decoy_name = df['NAME'].values
assert(decoy_name[0] == 'native.pdb')
ind = (df['loss'] != 999)
loss = df['loss'][ind].values
rmsd = df['RMSD'][ind].values
if np.argmin(loss) == 0:
correct += 1
num = np.arange(loss.shape[0]) + 1
rank_i = num[np.argsort(loss) == 0][0]
rank.append(rank_i)
if rank_i > 1:
print(pdb_id, rmsd[np.argmin(loss)])
fig = pl.figure()
pl.plot(rmsd, loss, 'bo')
pl.plot([rmsd[0]], [loss[0]], 'rs', markersize=12)
pl.title(f'{pdb_id}')
pl.xlabel('RMSD')
pl.ylabel('energy score')
# pl.savefig(f'{root_dir}/fig_3drobot_{data_flag}/{pdb_id}_score.pdf')
pl.savefig(f'{root_dir}/decoy_loss_{data_flag}/{pdb_id}_score.pdf')
pl.close(fig)
rank = np.array(rank)
print(rank)
fig = pl.figure()
pl.hist(rank, bins=np.arange(21)+0.5)
pl.savefig(f'{root_dir}/decoy_loss_{data_flag}/rank.pdf')
pl.close(fig)
########################################################
def plot_casp11_loss():
# pdb_list = pd.read_csv('pdb_list_new.txt')['pdb'].values
pdb_list = pd.read_csv('pdb_no_need_copy_native.txt')['pdb'].values
flist = pd.read_csv('list_casp11.txt')['fname'].values
casp_dict = {x.split('#')[1][:5]: x.split('_')[0] for x in flist}
df_tm = pd.read_csv('casp11_decoy.csv')
tm_score_dict = {x: y for x, y in zip(df_tm['Target'], df_tm['Decoys'])}
# data_flag = 'exp3_v2'
# data_flag = 'exp5'
# data_flag = 'exp7'
# data_flag = 'exp13'
# data_flag = 'exp15'
# data_flag = 'exp21'
# data_flag = 'exp24'
# data_flag = 'exp29'
# data_flag = 'exp33'
# data_flag = 'exp35'
data_flag = 'exp61'
if not os.path.exists(f'fig_casp11_{data_flag}'):
os.system(f'mkdir fig_casp11_{data_flag}')
correct = 0
rank = []
tm_score = []
for pdb_id in pdb_list:
data_path = f'data_casp11_{data_flag}/{pdb_id}_decoy_loss.csv'
if not os.path.exists(data_path):
continue
df = pd.read_csv(data_path)
decoy_name = df['NAME'].values
# ind = (df['loss'] != 999)
# loss = df['loss'][ind].values
tm_score.append(tm_score_dict[pdb_id])
loss = df['loss'].values
num = np.arange(loss.shape[0])
i = (decoy_name == f'{pdb_id}.native.pdb')
if num[i] == np.argmin(loss):
# print(num.shape[0] - num[i])
correct += 1
rank.append(num[np.argsort(loss) == num[i]][0] + 1)
fig = pl.figure()
pl.plot(num, loss, 'bo')
i = (decoy_name == f'{pdb_id}.Zhang-Server_model1.pdb')
pl.plot([num[i]], [loss[i]], 'g^', markersize=12, label='zhang')
i = (decoy_name == f'{pdb_id}.QUARK_model1.pdb')
pl.plot([num[i]], [loss[i]], 'c*', markersize=12, label='quark')
i = (decoy_name == f'{pdb_id}.native.pdb')
pl.plot([num[i]], [loss[i]], 'rs', markersize=12, label='native')
pdb_id = casp_dict[pdb_id]
pl.title(f'{pdb_id}')
pl.xlabel('num')
pl.ylabel('energy score')
pl.savefig(f'fig_casp11_{data_flag}/{pdb_id}_score.pdf')
pl.close(fig)
rank = np.array(rank)
tm_score = np.array(tm_score)
pl.figure()
pl.hist(rank, bins=np.arange(21)+0.5)
# pl.figure()
# pl.plot(tm_score, rank, 'bo')
a = (rank <= 5)
b = (rank > 5)
pl.figure()
pl.hist(tm_score[a], bins=np.arange(9)*0.1+0.2, label='rank=1 or 2', histtype='stepfilled')
pl.hist(tm_score[b], bins=np.arange(9)*0.1+0.2, label='rank>10', histtype='step')
pl.xlabel('Best TM-score in decoys')
pl.ylabel('Num')
pl.legend(loc=2)
########################################################
def plot_casp11(data_flag):
# plot RMSD vs. loss for CASP11
root_dir = '/home/hyang/bio/erf/data/decoys/casp11'
pdb_list = pd.read_csv(f'{root_dir}/casp11_rmsd/casp11_rmsd.txt')['pdb']
flist = pd.read_csv(f'{root_dir}/list_casp11.txt')['fname'].values
casp_dict = {x.split('#')[1][:5]: x.split('_')[0] for x in flist}
# data_flag = 'exp3_v2'
# data_flag = 'exp5'
# data_flag = 'exp7'
# data_flag = 'exp13'
# data_flag = 'exp21'
# data_flag = 'exp24'
# data_flag = 'exp29'
# data_flag = 'exp33'
# data_flag = 'exp35'
# data_flag = 'exp61'
for pdb_id in pdb_list:
data_path = f'{root_dir}/decoy_loss_{data_flag}/{pdb_id}_decoy_loss.csv'
if not os.path.exists(data_path):
continue
df = pd.read_csv(data_path)
decoy_name = df['NAME'].values
# ind = (df['loss'] != 999)
# loss = df['loss'][ind].values
loss = df['loss'].values
df2 = pd.read_csv(f'{root_dir}/casp11_rmsd/{pdb_id}_rmsd.csv')
rmsd = df2['rmsd'].values
assert(rmsd.shape[0] == loss.shape[0])
fig = pl.figure()
pl.plot(rmsd, loss, 'bo')
i = (decoy_name == f'{pdb_id}.Zhang-Server_model1.pdb')
pl.plot([rmsd[i]], [loss[i]], 'g^', markersize=12, label='zhang')
i = (decoy_name == f'{pdb_id}.QUARK_model1.pdb')
pl.plot([rmsd[i]], [loss[i]], 'c*', markersize=12, label='quark')
i = (decoy_name == f'{pdb_id}.native.pdb')
pl.plot([rmsd[i]], [loss[i]], 'rs', markersize=12, label='native')
pdb_id = casp_dict[pdb_id]
pl.title(f'{pdb_id}')
a = max(12, rmsd.max())
pl.xlim(-1, a)
pl.xlabel('RMSD')
pl.ylabel('energy score')
pl.savefig(f'{root_dir}/decoy_loss_{data_flag}/rmsd_{pdb_id}_score.pdf')
pl.close(fig)
########################################################
def prepare_casp13():
# prepare casp13 decoys
df = pd.read_csv('flist.txt')
pdb_count = df['pdb'].value_counts()
pdb_list = []
for pdb, count in zip(pdb_count.index, pdb_count.values):
if count > 1:
pdb_list.append(pdb)
else:
pdb_list.append(pdb + '-D1')
pdb_list = np.array(pdb_list)
pdb_list.sort()
df2 = pd.DataFrame({'pdb': pdb_list})
df2.to_csv('pdb_list.txt', index=False)
def plot_casp13(data_flag, casp_id='casp13', casp_score_type='GDT_TS'):
# plot results of casp13 / casp14 decoys
root_dir = f'/home/hyang/bio/erf/data/decoys/{casp_id}'
if casp_id == 'casp13':
pdb_list = pd.read_csv(f'{root_dir}/pdb_list_domain.txt')['pdb'].values
pdb_ids = [x.split('-')[0] for x in pdb_list]
else:
pdb_list = pd.read_csv(f'{root_dir}/pdb_list.txt')['pdb'].values
pdb_ids = pdb_list
# data_flag = 'exp61'
# if not os.path.exists(f'fig_casp13_{data_flag}'):
# os.system(f'mkdir fig_casp13_{data_flag}')
pearsonr_list = []
pearsonp_list = []
used_pdb_list = []
casp_score_max = []
casp_score_min = []
rank_1 = 0
for pdb_id, pdb_casp_name in zip(pdb_ids, pdb_list):
data_path = f'{root_dir}/decoy_loss_{data_flag}/{pdb_id}_decoy_loss.csv'
if not os.path.exists(data_path):
continue
df = pd.read_csv(data_path)
decoy_name = df['pdb'].values
# ind = (df['loss'] != 999)
# loss = df['loss'][ind].values
loss = df['loss'].values
if not os.path.exists(f'{root_dir}/casp_score/{pdb_casp_name}.txt'):
continue
df2 = pd.read_csv(f'{root_dir}/casp_score/{pdb_casp_name}.txt', sep='\s+')
casp_model = df2['Model']
if (casp_id == 'casp13') & (pdb_casp_name.endswith('-D1')):
casp_model = df2['Model'].apply(lambda x: x[:-3])
if casp_score_type == 'GDT_TS':
casp_score_data = df2['GDT_TS'].values
elif casp_score_type == 'RMSD_CA':
casp_score_data = df2['RMS_CA'].values
else:
raise ValueError('casp score type should be GDT_TS / RMSD_CA')
casp_dict = {x: y for x, y in zip(casp_model, casp_score_data)}
casp_score = []
for x in decoy_name:
try:
casp_score.append(casp_dict[x])
except KeyError:
casp_score.append(-1)
casp_score = np.array(casp_score)
idx = (casp_score > 0) & (loss > 0)
casp_score_good = casp_score[idx]
loss_good = loss[idx]
decoy_name_good = decoy_name[idx]
# if np.argmax(casp_score_good) == np.argmin(loss_good):
# rank_1 += 1
top5_idx = np.argpartition(loss_good, 5)[:5]
best_gdt_idx = np.argmax(casp_score_good)
if best_gdt_idx in top5_idx:
print(best_gdt_idx, top5_idx)
rank_1 += 1
print(pdb_casp_name, decoy_name_good[best_gdt_idx], decoy_name_good[top5_idx])
pearsonr = stats.pearsonr(casp_score_good, loss_good)
pearsonr_list.append(pearsonr[0])
pearsonp_list.append(pearsonr[1])
used_pdb_list.append(pdb_id)
casp_score_max.append(casp_score[idx].max())
casp_score_min.append(casp_score[idx].min())
df_i = pd.DataFrame({'pdb': decoy_name_good, casp_score_type: casp_score_good, 'energy': loss_good})
df_i.to_csv(f'{root_dir}/decoy_loss_{data_flag}/{pdb_id}_casp_score_{casp_score_type}_energy.csv', index=False)
fig = pl.figure()
# pl.plot(100.0, loss[0], 'rs')
pl.plot(casp_score[idx], loss[idx], 'bo')
pl.title(f'{pdb_id}')
# a = max(12, rmsd.max())
# pl.xlim(-1, a)
pl.xlabel(f'CASP {casp_score_type}')
pl.ylabel('energy score')
pl.savefig(f'{root_dir}/decoy_loss_{data_flag}/{pdb_id}_{casp_score_type}.pdf')
pl.close(fig)
fig = pl.figure()
# pl.plot(100.0, loss[0], 'rs')
pl.plot(casp_score_good, loss_good, 'bo')
for i in range(loss_good.shape[0]):
pl.text(casp_score_good[i], loss_good[i], decoy_name_good[i].split('S')[1][:-3], fontsize=6)
pl.title(f'{pdb_id}')
y_min = loss_good.min()
y_max = loss_good.max()
pl.ylim(y_min - (y_max - y_min) * 0.01, y_min + (y_max - y_min) * 0.15)
# a = max(12, rmsd.max())
pl.xlim(0, 100)
pl.xlabel(f'CASP {casp_score_type}')
pl.ylabel('energy score')
pl.savefig(f'{root_dir}/decoy_loss_{data_flag}/{pdb_id}_{casp_score_type}_zoom.pdf')
pl.close(fig)
print(f'rank_1 = {rank_1}')
df = pd.DataFrame({'pdb': used_pdb_list, 'pearsonr': pearsonr_list, 'pearsonp': pearsonp_list,
'casp_score_max': casp_score_max, 'casp_score_min': casp_score_min})
df.to_csv(f'{root_dir}/decoy_loss_{data_flag}/pearsonr_{casp_score_type}.txt', index=False)
fig = pl.figure()
if casp_score_type == 'GDT_TS':
pearsonr_bins = np.arange(11)*0.1-1
elif casp_score_type == 'RMSD_CA':
pearsonr_bins = np.arange(11)*0.1
else:
raise ValueError('casp score type should be gdt_ts / rmsd_ca')
pl.hist(df['pearsonr'], bins=pearsonr_bins)
pl.xlabel(r'Pearson $\rho$')
pl.ylabel('N')
pl.savefig(f'{root_dir}/decoy_loss_{data_flag}/pearsonr_{casp_score_type}.pdf')
pl.close(fig)
# casp_score_max = df['casp_score_max'].values
# fig = pl.figure()
# idx = (casp_score_max >= 50)
# pl.hist(df['pearsonr'][idx], bins=np.arange(11)*0.1-1)
# pl.xlabel(r'Pearson $\rho$')
# pl.ylabel('N')
# pl.savefig(f'{root_dir}/decoy_loss_{data_flag}/pearsonr_1.pdf')
# pl.close(fig)
# fig = pl.figure()
# idx = (casp_score_max < 50)
# pl.xlabel(r'Pearson $\rho$')
# pl.ylabel('N')
# pl.hist(df['pearsonr'][idx], bins=np.arange(11)*0.1-1)
# pl.savefig(f'{root_dir}/decoy_loss_{data_flag}/pearsonr_2.pdf')
# pl.close(fig)
########################################################
def plot_ru(decoy_set, decoy_loss_dir):
# decoy_set = '4state_reduced'
# decoy_set = 'lattice_ssfit'
# decoy_set = 'lmds'
# decoy_set = 'lmds_v2'
root_dir = f'/home/hyang/bio/erf/data/decoys/rudecoy/multiple/{decoy_set}'
# decoy_loss_dir = 'exp61'
if not os.path.exists(f'{root_dir}/{decoy_loss_dir}'):
os.system(f'mkdir -p {root_dir}/{decoy_loss_dir}')
pdb_id_list = pd.read_csv(f'{root_dir}/list', header=None, names=['pdb'])['pdb'].values
for pdb_id in pdb_id_list:
df = pd.read_csv(f'{root_dir}/{decoy_loss_dir}/{pdb_id}_decoy_loss.csv')
pdb_list = df['pdb'].values
loss = df['loss'].values
rmsd = df['score'].values
native_name = f'{pdb_id}.pdb'
i_native = np.arange(pdb_list.shape[0])[(pdb_list == native_name)]
i = np.argmin(loss)
print(i_native, i, pdb_list[i])
fig = pl.figure()
pl.plot(rmsd, loss, 'bo')
pl.plot([rmsd[i_native]], [loss[i_native]], 'rs', markersize=12)
pl.title(f'{pdb_id}')
pl.xlabel('RMSD')
pl.ylabel('energy score')
pl.savefig(f'{root_dir}/{decoy_loss_dir}/{pdb_id}_score.pdf')
pl.close(fig)
########################################################
def plot_md_trj(decoy_loss_dir):
# plot the MD trajectory data
root_dir = f'/home/hyang/bio/openmm/data'
if not os.path.exists(f'{root_dir}/{decoy_loss_dir}'):
os.system(f'mkdir -p {root_dir}/{decoy_loss_dir}')
pdb_id_list = pd.read_csv(f'{root_dir}/list', header=None, names=['pdb'])['pdb'].values
for pdb_id in pdb_id_list:
df = pd.read_csv(f'{root_dir}/{decoy_loss_dir}/{pdb_id}_decoy_loss.csv')
loss = df['loss'].values
rmsd = df['rmsd'].values
pdb = df['pdb'].values
# plot RMSD vs. Energy
fig = pl.figure()
idx = np.zeros(pdb.shape)
for i in range(pdb.shape[0]):
if pdb[i].startswith('T300'):
idx[i] = 1
elif pdb[i].startswith('T500'):
idx[i] = 2
pl.plot([rmsd[0]], [loss[0]], 'gs', markersize=12)
pl.plot([rmsd[1]], [loss[1]], 'g^', markersize=12)
pl.plot(rmsd[idx == 1], loss[idx == 1], 'g.', label='md_T300')
pl.plot(rmsd[idx == 2], loss[idx == 2], 'c.', label='md_T500')
pl.title(f'{pdb_id}')
pl.xlabel('RMSD')
pl.ylabel('energy score')
pl.savefig(f'{root_dir}/{decoy_loss_dir}/{pdb_id}_score.pdf')
pl.close(fig)
# plot RMSD vs. time & Energy vs. time
fig = pl.figure()
idx = np.zeros(pdb.shape)
for i in range(pdb.shape[0]):
if pdb[i].startswith('T300'):
idx[i] = 1
elif pdb[i].startswith('T500'):
idx[i] = 2
pl.subplot(211)
pl.plot(rmsd[idx == 1], 'g', label='md_T300')
pl.plot(rmsd[idx == 2], 'c', label='md_T500')
pl.ylabel('RMSD')
pl.legend()
pl.title(f'{pdb_id}')
pl.subplot(212)
pl.plot(loss[idx == 1], 'g')
pl.plot(loss[idx == 2], 'c')
pl.ylabel('energy score')
pl.xlabel('time-steps')
pl.savefig(f'{root_dir}/{decoy_loss_dir}/{pdb_id}_rmsd_energy_time.pdf')
pl.close(fig)
def plot_md_trj2():
# plot the MD trajectory data
root_dir = '/home/hyang/bio/erf/data/decoys/md/cullpdb_val_deep/'
pdb_id_list = pd.read_csv(f'{root_dir}/list', header=None, names=['pdb'])['pdb'].values
# pdb_id_list = ['3KXT']
for pdb_id in pdb_id_list:
df1 = pd.read_csv(f'{root_dir}/{pdb_id}_T300_energy_rmsd.csv')
loss1 = df1['energy'].values
rmsd1 = df1['rmsd'].values
df2 = pd.read_csv(f'{root_dir}/{pdb_id}_T500_energy_rmsd.csv')
loss2 = df2['energy'].values
rmsd2 = df2['rmsd'].values
# plot RMSD vs. Energy
fig = pl.figure()
pl.plot([rmsd1[0]], [loss1[0]], 'gs', markersize=12)
pl.plot(rmsd1, loss1, 'g.', label='T300')
pl.plot(rmsd2, loss2, 'c.', label='T500')
pl.title(f'{pdb_id}')
pl.xlabel('RMSD')
pl.ylabel('energy score')
pl.savefig(f'{root_dir}/{pdb_id}_score.pdf')
pl.close(fig)
# plot RMSD vs. time & Energy vs. time
fig = pl.figure()
pl.subplot(211)
pl.plot(rmsd1, 'g', label='md_T300')
pl.plot(rmsd2, 'c', label='md_T500')
pl.ylabel('RMSD')
pl.legend()
pl.title(f'{pdb_id}')
pl.subplot(212)
pl.plot(loss1, 'g')
pl.plot(loss2, 'c')
pl.ylabel('energy score')
pl.xlabel('time-steps')
pl.savefig(f'{root_dir}/{pdb_id}_rmsd_energy_time.pdf')
pl.close(fig)
def plot_md_trj3():
# plot the MD trajectory data
root_dir = '/home/hyang/bio/erf/data/decoys/md/BPTI'
df = pd.read_csv(f'{root_dir}/BPTI_energy_rmsd.csv')
loss1 = df['energy'].values
rmsd1 = df['rmsd'].values
# plot RMSD vs. Energy
fig = pl.figure()
pl.plot(rmsd1, loss1, 'g.', markersize=0.01)
pl.title('BPTI')
pl.xlabel('RMSD')
pl.ylabel('energy score')
pl.savefig(f'{root_dir}/BPTI_score.jpg')
pl.close(fig)
# plot RMSD vs. time & Energy vs. time
fig = pl.figure()
pl.subplot(211)
pl.plot(rmsd1, 'b.', markersize=0.01)
pl.ylabel('RMSD')
pl.title('BPTI')
pl.subplot(212)
pl.plot(loss1, 'g.', markersize=0.01)
pl.ylabel('energy score')
pl.xlabel('time-steps')
pl.savefig(f'{root_dir}/BPTI_rmsd_energy_time.jpg')
pl.close(fig)
def plot_bd_trj():
# plot the mixed Langevin dynamics trajectory data
root_dir = '/home/hyang/bio/erf/data/fold/exp205dynamics_val_deep501/'
pdb_selected = pd.read_csv(f'/home/hyang/bio/erf/data/fold/cullpdb_val_deep/sample.csv')['pdb'].values
pdb_selected = np.append(np.array(['1BPI_A']), pdb_selected)
for pdb_id in pdb_selected:
df1 = pd.read_csv(f'{root_dir}/{pdb_id}_energy.csv')
loss1 = df1['sample_energy'].values
rmsd1 = df1['sample_rmsd'].values
# plot RMSD vs. Energy
fig = pl.figure()
pl.plot([rmsd1[0]], [loss1[0]], 'gs', markersize=12)
pl.plot(rmsd1, loss1, 'go')
pl.title(f'{pdb_id}')
pl.xlabel('RMSD')
pl.ylabel('energy score')
pl.savefig(f'{root_dir}/{pdb_id}_score.pdf')
pl.close(fig)
# plot RMSD vs. time & Energy vs. time
fig = pl.figure()
pl.subplot(211)
pl.plot(rmsd1, 'go')
pl.ylabel('RMSD')
pl.title(f'{pdb_id}')
pl.subplot(212)
pl.plot(loss1, 'bs')
pl.ylabel('energy score')
pl.xlabel('time-steps')
pl.savefig(f'{root_dir}/{pdb_id}_rmsd_energy_time.pdf')
pl.close(fig)
def plot_openmm2():
root_dir = f'/home/hyang/bio/openmm/data'
decoy_loss_dir = 'exp63_65'
if not os.path.exists(f'{root_dir}/{decoy_loss_dir}'):
os.system(f'mkdir -p {root_dir}/{decoy_loss_dir}')
pdb_id_list = pd.read_csv(f'{root_dir}/list', header=None, names=['pdb'])['pdb'].values
for pdb_id in pdb_id_list:
fig = pl.figure()
df = pd.read_csv(f'{root_dir}/exp61/{pdb_id}_decoy_loss.csv')
loss = df['loss'].values * 15.0
rmsd = df['rmsd'].values
pl.plot(rmsd, loss, 'g.')
pl.plot([rmsd[0]], [loss[0]], 'g^', markersize=12)
df = pd.read_csv(f'{root_dir}/exp63/{pdb_id}_decoy_loss.csv')
loss = df['loss'].values
rmsd = df['rmsd'].values
pl.plot(rmsd, loss, 'bo')
pl.plot([rmsd[0]], [loss[0]], 'bs', markersize=12)
df = | pd.read_csv(f'{root_dir}/exp65/{pdb_id}_decoy_loss.csv') | pandas.read_csv |
import os
import SimpleITK as sitk
import numpy as np
import seaborn as sns
import pandas as pd
import matplotlib.pyplot as plt
age_list='raw_ages.txt'
reload=False
inpuath='/mnt/data7/NCP_mp_CTs/crop/lesions'
def showing(df,name,title):
plt.figure(figsize=(7,15))
plt.subplot(5,1,1)
idx=df[(df['sex']==1)].index
sns.distplot(df['tend_cls'][idx],label='male',kde=False,bins=np.arange(-0.5,8.5,1),hist=True,norm_hist=True)
idx=df[(df['sex']==0)].index
sns.distplot(df['tend_cls'][idx],label='female',kde=False,bins=np.arange(-0.5,8.5,1),hist=True,norm_hist=True)
plt.legend()
#plt.ylim([0,1])
plt.xlim([-0.5,8.5])
plt.xlabel('')
plt.title('all')
for age in range(1,5):
plt.subplot(5,1,age+1)
idx=df[(df['age'] ==age * (df['sex']==1))].index
sns.distplot(df['tend_cls'][idx],label='male',kde=False,bins=np.arange(-0.5,8.5,1),hist=True,norm_hist=True)
idx=df[(df['age'] ==age *(df['sex']==0))].index
sns.distplot(df['tend_cls'][idx],label='female',kde=False,bins=np.arange(-0.5,8.5,1),hist=True,norm_hist=True)
plt.xlim([-0.5, 8.5])
#plt.ylim([0, 1])
plt.xlabel('')
plt.legend()
plt.title(str(age*20))
plt.xlabel('Tendency Class')
#plt.legend()
plt.tight_layout()
plt.suptitle(title)
plt.subplots_adjust(top=0.90)
plt.savefig(name)
plt.show()
if reload:
Lsize=[]
df= | pd.read_csv(age_list,sep='\t') | pandas.read_csv |
#!/usr/bin/env python
"""A Vector class and subclasses.
:py:class:`Vector` inherets :py:class:`~solarwindpy.core.Base`. The subclass
:py:class:`BField:` inheretes :py:class:`Vector`.
"""
import pdb # noqa: F401
import numpy as np
import pandas as pd
# We rely on views via DataFrame.xs to reduce memory size and do not
# `.copy(deep=True)`, so we want to make sure that this doesn't
# accidentally cause a problem.
| pd.set_option("mode.chained_assignment", "raise") | pandas.set_option |
#!/usr/bin/env python
# coding: utf-8
#------------------------------
# Import the needed libraries
#------------------------------
import argparse
import csv
import logging
import os
import subprocess
import pandas as pd
logging.basicConfig(level=logging.INFO,
format='%(asctime)s : %(levelname)s : %(message)s',
datefmt='%H:%M:%S')
logger = logging.getLogger('ctat_boosting')
def main():
#add options to inputs
parser = argparse.ArgumentParser(formatter_class=argparse.RawTextHelpFormatter,
description = "Adds exon splice distance annotations to vcf file (report up to len 10 distance away from splice).\n")
parser.add_argument('--input_vcf', required=True, help="input vcf file")
parser.add_argument('--gtf', required=True, help='Path to CTAT Mutations library annotations file.')
parser.add_argument('--output_vcf', required=True, help="output vcf file including annotation for distance to splice neighbor")
parser.add_argument("--temp_dir", default="/tmp", help="tmp directory")
args = parser.parse_args()
input_vcf = args.input_vcf
if input_vcf.endswith('.gz'):
if not os.path.exists(input_vcf[:len(input_vcf) - 3]):
subprocess.run(['gunzip', input_vcf])
input_vcf = input_vcf[:len(input_vcf) - 3]
gtf = args.gtf
out_file = args.output_vcf
temp_dir = args.temp_dir
# path to the temp sorted file
if not os.path.exists(temp_dir):
os.makedirs(temp_dir)
temp_sorted_vcf = os.path.join(temp_dir, "temp.sorted.vcf")
logger.info("\n################################\n Annotating VCF: Calculating DJ \n################################\n")
#~~~~~~~~~~~~~~
# Sort VCF
#~~~~~~~~~~~~~~
# Sorting the VCF file by lexicographically
## have to do this for bedtools closest
logger.info("Sorting VCF")
cmd = "grep '^#' {} > {} && grep -v '^#' {} | LC_ALL=C sort -t $'\t' -k1,1 -k2,2n >> {}".format(input_vcf, temp_sorted_vcf, input_vcf, temp_sorted_vcf)
# logger.info("CMD: {}".format(cmd))
subprocess.run(cmd, shell=True, executable='/bin/bash')
#~~~~~~~~~~~~~~
# Load VCF
#~~~~~~~~~~~~~~
# Read in the input vcf as a data frame
logger.info("Loading input VCF")
input_vcf_df = pd.read_csv(temp_sorted_vcf,
sep='\t', low_memory=False, comment='#', header =None,
names=["CHROM", "POS", "ID", "REF", "ALT", "QUAL", "FILTER", "INFO", "FORMAT", "ENCODING"])
#~~~~~~~~~~~~~~
# Get Exons file ready
#~~~~~~~~~~~~~~
# Load and process annotation file
# Get the path to the reference annotation file ref_annot.gtf
if os.path.exists(gtf) == False:
exit("File doesnt exist:{}".format(gtf))
# Open the file
ref_annot = pd.read_csv(gtf,
sep='\t', low_memory=False, comment='#', header =None)
# Subset the data to get only exons
ref_annot = ref_annot[ref_annot[2] == "exon"]
# Sort by locations and subset the data to get "chr start end"
ref_annot = ref_annot.sort_values(by=[0,3,4])
ref_annot = ref_annot.iloc[:,[0,3,4]]
# create temp bed file to pass to bedtools
temp_ref_annot = os.path.join(temp_dir, "temp.ref_annot.bed")
ref_annot.to_csv(temp_ref_annot, header=False, index = False, sep = "\t")
# Run BEDTools closeestBed
logger.info("Running closestBed")
cmd = "bedtools closest -header -t first -a {} -b {}".format(temp_sorted_vcf, temp_ref_annot)
# logger.info("CMD: {}".format(cmd))
distance_output = subprocess.check_output(cmd, shell=True).decode()
# ~~~~~~~~~~~~~~~
# Process Distances
# ~~~~~~~~~~~~~~~
# Convert the VCF from a string to a pandas dataframe
temp = distance_output.split('\n')
temp.remove('')
variants = [x for x in temp if x[0] != "#"]
test = pd.DataFrame(variants)
if len(test) >0:
## split the one column into many
vcf = test[0].str.split("\t",expand = True)
# find the distances
logger.info("Generating Distances")
test = | pd.DataFrame() | pandas.DataFrame |
#!/usr/bin/env python
import argparse
import glob
import os
from abc import abstractmethod, ABC
from collections import defaultdict
import logging
import numpy as np
import pandas as pd
from sklearn.model_selection import RepeatedKFold
from qpputils import dataparser as dp
# TODO: change the functions to work with pandas methods such as idxmax
# TODO: Consider change to the folds file to be more convenient for pandas DF
parser = argparse.ArgumentParser(description='Cross Validation script',
usage='Use CV to optimize correlation',
epilog='Prints the average correlation')
parser.add_argument('-p', '--predictions', metavar='predictions_dir', default='predictions',
help='path to prediction results files directory')
parser.add_argument('--labeled', default='baseline/QLmap1000', help='path to labeled list res')
parser.add_argument('-r', '--repeats', default=30, help='number of repeats')
parser.add_argument('-k', '--splits', default=2, help='number of k-fold')
parser.add_argument('-m', '--measure', default='pearson', type=str,
help='default correlation measure type is pearson', choices=['pearson', 'spearman', 'kendall'], )
parser.add_argument("-g", "--generate", help="generate new CrossValidation sets", action="store_true")
parser.add_argument('-f', "--folds_file", metavar='CV_FILE_PATH', help="load existing CrossValidation JSON res",
default='2_folds_30_repetitions.json')
logging.basicConfig(format='%(asctime)s - %(message)s', datefmt='%d-%b-%y %H:%M:%S', level=logging.INFO)
class CrossValidation:
def __init__(self, folds_map_file=None, k=2, rep=30, predictions_dir=None, test='pearson', ap_file=None,
generate_folds=False, **kwargs):
logging.debug("testing logger")
self.k = k
self.rep = rep
self.test = test
assert predictions_dir, 'Specify predictions dir'
assert folds_map_file, 'Specify path for CV folds file'
predictions_dir = os.path.abspath(os.path.normpath(os.path.expanduser(predictions_dir)))
assert os.listdir(predictions_dir), f'{predictions_dir} is empty'
self.output_dir = dp.ensure_dir(predictions_dir.replace('predictions', 'evaluation'))
if ap_file:
self.full_set = self._build_full_set(predictions_dir, ap_file)
if '-' in ap_file:
self.ap_func = ap_file.split('-')[-1]
else:
self.ap_func = 'basic'
else:
self.full_set = self._build_full_set(predictions_dir)
if generate_folds:
self.index = self.full_set.index
self.folds_file = self._generate_k_folds()
self.__load_k_folds()
else:
try:
self.folds_file = dp.ensure_file(folds_map_file)
except FileExistsError:
print("The folds file specified doesn't exist, going to generate the file and save")
self.__load_k_folds()
# self.corr_df = NotImplemented
@abstractmethod
def calc_function(self, df: pd.DataFrame):
raise NotImplementedError
@staticmethod
def _build_full_set(predictions_dir, ap_file=None):
"""Assuming the predictions files are named : predictions-[*]"""
all_files = glob.glob(predictions_dir + "/*predictions*")
if 'uef' in predictions_dir:
# Excluding all the 5 and 10 docs predictions
if 'qf' in predictions_dir:
all_files = [fn for fn in all_files if
not os.path.basename(fn).endswith('-5+', 11, 14) and not os.path.basename(fn).endswith(
'-10+', 11, 15)]
else:
all_files = [fn for fn in all_files if
not os.path.basename(fn).endswith('-5') and not os.path.basename(fn).endswith('-10')]
list_ = []
for file_ in all_files:
fname = file_.split('-')[-1]
df = dp.ResultsReader(file_, 'predictions').data_df
df = df.rename(columns={"score": f'score_{fname}'})
list_.append(df)
if ap_file:
ap_df = dp.ResultsReader(ap_file, 'ap').data_df
list_.append(ap_df)
full_set = pd.concat(list_, axis=1, sort=True)
assert not full_set.empty, f'The Full set DF is empty, make sure that {predictions_dir} is not empty'
return full_set
def _generate_k_folds(self):
# FIXME: Need to fix it to generate a DF with folds, without redundancy
""" Generates a k-folds json res
:rtype: str (returns the saved JSON filename)
"""
rkf = RepeatedKFold(n_splits=self.k, n_repeats=self.rep)
count = 1
# {'set_id': {'train': [], 'test': []}}
results = defaultdict(dict)
for train, test in rkf.split(self.index):
train_index, test_index = self.index[train], self.index[test]
if count % 1 == 0:
results[int(count)]['a'] = {'train': train_index, 'test': test_index}
else:
results[int(count)]['b'] = {'train': train_index, 'test': test_index}
count += 0.5
temp = pd.DataFrame(results)
temp.to_json(f'{self.k}_folds_{self.rep}_repetitions.json')
return f'{self.k}_folds_{self.rep}_repetitions.json'
def __load_k_folds(self):
# self.data_sets_map = pd.read_json(self.file_name).T['a'].apply(pd.Series).rename(
# mapper={'train': 'fold-1', 'test': 'fold-2'}, axis='columns')
self.data_sets_map = pd.read_json(self.folds_file)
def _calc_eval_metric_df(self):
sets = self.data_sets_map.index
folds = self.data_sets_map.columns
corr_results = defaultdict(dict)
for set_id in sets:
for fold in folds:
train_queries = set()
# a hack to create a set out of train queries, from multiple lists
_ = {train_queries.update(i) for i in self.data_sets_map.loc[set_id, folds != fold].values}
test_queries = set(self.data_sets_map.loc[set_id, fold])
train_set = self.full_set.loc[map(str, train_queries)]
test_set = self.full_set.loc[map(str, test_queries)]
corr_results[set_id][fold] = pd.DataFrame(
{'train': self.calc_function(train_set), 'test': self.calc_function(test_set)})
corr_df = pd.DataFrame.from_dict(corr_results, orient='index')
try:
corr_df.to_pickle(
f'{self.output_dir}/correlations_for_{self.k}_folds_{self.rep}_repetitions_{self.ap_func}.pkl')
except AttributeError:
corr_df.to_pickle(f'{self.output_dir}/correlations_for_{self.k}_folds_{self.rep}_repetitions_pageRank.pkl')
return corr_df
def calc_test_results(self):
if not hasattr(self, 'corr_df'):
self.corr_df = self._calc_eval_metric_df()
sets = self.data_sets_map.index
full_results = defaultdict(dict)
simple_results = defaultdict()
for set_id in sets:
_res_per_set = []
for fold in self.corr_df.loc[set_id].index:
max_train_param = self.corr_df.loc[set_id, fold].idxmax()['train']
train_result, test_result = self.corr_df.loc[set_id, fold].loc[max_train_param]
_res_per_set.append(test_result)
full_results[set_id, fold] = {'best_train_param': max_train_param.split('_')[1],
'best_train_val': train_result, 'test_val': test_result}
simple_results[f'set_{set_id}'] = np.mean(_res_per_set)
full_results_df = pd.DataFrame.from_dict(full_results, orient='index')
try:
full_results_df.to_json(
f'{self.output_dir}/'
f'full_results_vector_for_{self.k}_folds_{self.rep}_repetitions_{self.ap_func}_{self.test}.json')
except AttributeError:
full_results_df.to_json(
f'{self.output_dir}/'
f'full_results_vector_for_{self.k}_folds_{self.rep}_repetitions_pageRank_{self.test}.json')
simple_results_df = pd.Series(simple_results)
try:
simple_results_df.to_json(
f'{self.output_dir}/'
f'simple_results_vector_for_{self.k}_folds_{self.rep}_repetitions_{self.ap_func}.json')
except AttributeError:
simple_results_df.to_json(
f'{self.output_dir}/'
f'simple_results_vector_for_{self.k}_folds_{self.rep}_repetitions_pageRank.json')
mean = simple_results_df.mean()
return f'{mean:.3f}'
@staticmethod
def read_eval_results(results_file):
# FIXME: need to fix it after changing the format of the eval files
temp_df = pd.read_json(results_file, orient='index')
# Split column of lists into several columns
res_df = pd.DataFrame(temp_df['best train a'].values.tolist(), index=temp_df.index.str.split().str[1],
columns=['a', 'train_correlation_a'])
res_df.rename_axis('set', inplace=True)
res_df[['b', 'train_correlation_b']] = pd.DataFrame(temp_df['best train b'].values.tolist(),
index=temp_df.index.str.split().str[1])
return res_df
class InterTopicCrossValidation(CrossValidation, ABC):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.calc_function = self.calc_inter_topic_corr if kwargs.get('ap_file') else self.calc_inter_topic_scores
# self.corr_df = self._calc_eval_metric_df()
def calc_inter_topic_corr(self, df):
dict_ = {}
for col in df.columns:
if col != 'ap':
dict_[col] = df[col].corr(df['ap'], method=self.test)
else:
continue
return | pd.Series(dict_) | pandas.Series |
#!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import print_function
import nltk.tokenize
import psycopg2
import pandas as pd
import sys
def getDocData(PORT, documentTable = 'document'):
df = | pd.DataFrame() | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Sat Apr 11 13:39:16 2020
@author: PicSyn
"""
import streamlit as st
import pandas as pd
import numpy as np
st.title('Uber')
DATE_COLUMN= 'date/time'
DATA_URL=('https://s3-us-west-2.amazonaws.com/'
'streamlit-demo-data/uber-raw-data-sep14.csv.gz')
@st.cache
def load_data(nrows):
data=pd.read_csv(DATA_URL,nrows=nrows)
lowercase=lambda x: str(x).lower()
data.rename(lowercase,axis='columns',inplace='True')
data[DATE_COLUMN]= | pd.to_datetime(data[DATE_COLUMN]) | pandas.to_datetime |
"""
Tests the usecols functionality during parsing
for all of the parsers defined in parsers.py
"""
from io import StringIO
import numpy as np
import pytest
from pandas._libs.tslib import Timestamp
from pandas import DataFrame, Index
import pandas._testing as tm
_msg_validate_usecols_arg = (
"'usecols' must either be list-like "
"of all strings, all unicode, all "
"integers or a callable."
)
_msg_validate_usecols_names = (
"Usecols do not match columns, columns expected but not found: {0}"
)
def test_raise_on_mixed_dtype_usecols(all_parsers):
# See gh-12678
data = """a,b,c
1000,2000,3000
4000,5000,6000
"""
usecols = [0, "b", 2]
parser = all_parsers
with pytest.raises(ValueError, match=_msg_validate_usecols_arg):
parser.read_csv(StringIO(data), usecols=usecols)
@pytest.mark.parametrize("usecols", [(1, 2), ("b", "c")])
def test_usecols(all_parsers, usecols):
data = """\
a,b,c
1,2,3
4,5,6
7,8,9
10,11,12"""
parser = all_parsers
result = parser.read_csv(StringIO(data), usecols=usecols)
expected = DataFrame([[2, 3], [5, 6], [8, 9], [11, 12]], columns=["b", "c"])
tm.assert_frame_equal(result, expected)
def test_usecols_with_names(all_parsers):
data = """\
a,b,c
1,2,3
4,5,6
7,8,9
10,11,12"""
parser = all_parsers
names = ["foo", "bar"]
result = parser.read_csv(StringIO(data), names=names, usecols=[1, 2], header=0)
expected = DataFrame([[2, 3], [5, 6], [8, 9], [11, 12]], columns=names)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"names,usecols", [(["b", "c"], [1, 2]), (["a", "b", "c"], ["b", "c"])]
)
def test_usecols_relative_to_names(all_parsers, names, usecols):
data = """\
1,2,3
4,5,6
7,8,9
10,11,12"""
parser = all_parsers
result = parser.read_csv(StringIO(data), names=names, header=None, usecols=usecols)
expected = DataFrame([[2, 3], [5, 6], [8, 9], [11, 12]], columns=["b", "c"])
tm.assert_frame_equal(result, expected)
def test_usecols_relative_to_names2(all_parsers):
# see gh-5766
data = """\
1,2,3
4,5,6
7,8,9
10,11,12"""
parser = all_parsers
result = parser.read_csv(
StringIO(data), names=["a", "b"], header=None, usecols=[0, 1]
)
expected = DataFrame([[1, 2], [4, 5], [7, 8], [10, 11]], columns=["a", "b"])
tm.assert_frame_equal(result, expected)
def test_usecols_name_length_conflict(all_parsers):
data = """\
1,2,3
4,5,6
7,8,9
10,11,12"""
parser = all_parsers
msg = "Number of passed names did not match number of header fields in the file"
with pytest.raises(ValueError, match=msg):
parser.read_csv(StringIO(data), names=["a", "b"], header=None, usecols=[1])
def test_usecols_single_string(all_parsers):
# see gh-20558
parser = all_parsers
data = """foo, bar, baz
1000, 2000, 3000
4000, 5000, 6000"""
with pytest.raises(ValueError, match=_msg_validate_usecols_arg):
parser.read_csv(StringIO(data), usecols="foo")
@pytest.mark.parametrize(
"data", ["a,b,c,d\n1,2,3,4\n5,6,7,8", "a,b,c,d\n1,2,3,4,\n5,6,7,8,"]
)
def test_usecols_index_col_false(all_parsers, data):
# see gh-9082
parser = all_parsers
usecols = ["a", "c", "d"]
expected = DataFrame({"a": [1, 5], "c": [3, 7], "d": [4, 8]})
result = parser.read_csv(StringIO(data), usecols=usecols, index_col=False)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("index_col", ["b", 0])
@pytest.mark.parametrize("usecols", [["b", "c"], [1, 2]])
def test_usecols_index_col_conflict(all_parsers, usecols, index_col):
# see gh-4201: test that index_col as integer reflects usecols
parser = all_parsers
data = "a,b,c,d\nA,a,1,one\nB,b,2,two"
expected = DataFrame({"c": [1, 2]}, index=Index(["a", "b"], name="b"))
result = parser.read_csv(StringIO(data), usecols=usecols, index_col=index_col)
tm.assert_frame_equal(result, expected)
def test_usecols_index_col_conflict2(all_parsers):
# see gh-4201: test that index_col as integer reflects usecols
parser = all_parsers
data = "a,b,c,d\nA,a,1,one\nB,b,2,two"
expected = DataFrame({"b": ["a", "b"], "c": [1, 2], "d": ("one", "two")})
expected = expected.set_index(["b", "c"])
result = parser.read_csv(
StringIO(data), usecols=["b", "c", "d"], index_col=["b", "c"]
)
tm.assert_frame_equal(result, expected)
def test_usecols_implicit_index_col(all_parsers):
# see gh-2654
parser = all_parsers
data = "a,b,c\n4,apple,bat,5.7\n8,orange,cow,10"
result = parser.read_csv(StringIO(data), usecols=["a", "b"])
expected = DataFrame({"a": ["apple", "orange"], "b": ["bat", "cow"]}, index=[4, 8])
tm.assert_frame_equal(result, expected)
def test_usecols_regex_sep(all_parsers):
# see gh-2733
parser = all_parsers
data = "a b c\n4 apple bat 5.7\n8 orange cow 10"
result = parser.read_csv(StringIO(data), sep=r"\s+", usecols=("a", "b"))
expected = DataFrame({"a": ["apple", "orange"], "b": ["bat", "cow"]}, index=[4, 8])
tm.assert_frame_equal(result, expected)
def test_usecols_with_whitespace(all_parsers):
parser = all_parsers
data = "a b c\n4 apple bat 5.7\n8 orange cow 10"
result = parser.read_csv(StringIO(data), delim_whitespace=True, usecols=("a", "b"))
expected = DataFrame({"a": ["apple", "orange"], "b": ["bat", "cow"]}, index=[4, 8])
| tm.assert_frame_equal(result, expected) | pandas._testing.assert_frame_equal |
'''
Plotter to collect all plotting functionality at one place.
If available, it uses simple plotting functionalities included into the different classes.
Merges them together to create more meaningfull plots.
'''
from __future__ import print_function, division
import numpy as np
import pandas as pd
import math
from warnings import warn
#from .metergroup import MeterGroup, iterate_through_submeters_of_two_metergroups
#from .electric import align_two_meters
import matplotlib as mpl
import matplotlib.pyplot as plt
import itertools
import seaborn as sns
from nilmtk import TimeFrameGroup
import itertools
from nilmtk import TimeFrameGroup, TimeFrame
import matplotlib.dates as mdates
#############################################################
#region Nilm Plotting
def plot_overall_power_vs_disaggregation(main_meter, disaggregations, verbose = False):
""" The plot for validating the NILM algorithm.
Plots the disaggregation below the overall powerflow together with
orientation lines.
Parameters
----------
predictions: nilmtk.Electrical
Electrical with the disaggregation of the meters.
ground_truth : nilmtk.MeterGroup
MeterGroup with all the disaggregated meters.
verbose:
Whether additional ouput is printed.
"""
# Create the main figure
fig = plt.figure() #, tight_layout=True)
# Create one bigger subplot for the overall power
timeframe = disaggregations.get_timeframe(intersection_instead_union = False)
timeframe.start = timeframe.end - pd.Timedelta("48h")
ax = fig.add_subplot(4,1,1)
if not main_meter is None:
main_meter.plot(ax, timeframe=timeframe, sample_period=2)
ax.set_xlim([timeframe.start, timeframe.end])
ax.set_xlabel('Time', fontsize=12)
ax.set_title('Disaggregation', fontsize=14)
#ax.set_ylabel('{0}'.format(i), fontsize=12)
# Create multiple smaller ones for the disaggregated flows
n = len(disaggregations.meters)
sections = math.ceil(n / 2 * 3)
size_main_figure = math.ceil(sections / 3)
for i, dis in enumerate(disaggregations.meters):
if verbose:
print(str(i) + "/" + str(n))
sub_ax = fig.add_subplot(sections, 1, size_main_figure+i+1)
dis.plot(sub_ax,timeframe=timeframe, legend = False, sample_period = 2)
ax.get_shared_x_axes().join(ax, sub_ax)
ax.get_shared_y_axes().join(ax, sub_ax)
sub_ax.set_ylim(ax.get_ylim())
if i != 2:
ax.set_ylabel("")
#sub_ax.set_xlim([timeframe.start, timeframe.end])
# Link the axis
plt.setp(ax.get_xticklabels(), visible=True)
#fig.subplots_adjust(hspace=0.0)
return fig
def plot_phases(building, interval = pd.Timedelta("1d"), verbose = False):
''' Simply plots all three phases to see the output.
This is equal to plotting the different sitemeters of the building.
Parameters
----------
building: nilmtk.building
The building for which the different phases are plottet.
interval: pd.Timedelta
The timedelta to plot.
verbose: bool
Whether to plot additional output.
'''
fig = plt.figure()
start = building.elec.sitemeters()[1].get_timeframe().start
new_timeframe = TimeFrameGroup([TimeFrame(start=start, end = start + interval)])
flows = []
for i in range(1,4):
if verbose:
print("Load {0}/{1}".format(i,3))
flows.append(building.elec.sitemeters()[i].power_series_all_data(sections=new_timeframe))
all = pd.concat(flows, axis = 1)
all.columns = ['Phase 1', 'Phase 2', 'Phase 3']
all.plot(colors=['r', 'g', 'b'], ax = fig.add_subplot(111))
return fig
def plot_stackplot(disaggregations, total_power = None, stacked = True, verbose = True):
""" Plots a stackplot, which stacks all disaggregation results on top of each other.
Parameters
----------
disaggregations: nilmtk.MeterGroup
Remember appliance 0 is the rest powerflow
plot_total_power: nilmtk.Electric (optional)
Just for comparison an additional plot with the whole powerflow.
Should be the same as all the diaggregated meters stacked together.
verbose: bool
Whether to print additional information
Returns
-------
fig: matplotlib.figure.Figure
The newly plot figure
"""
timeframe = disaggregations.get_timeframe(intersection_instead_union = False)
timeframe.start = timeframe.end - pd.Timedelta("48h")
# Additional total power plot if demanded
fig = plt.figure()
if not total_power is None:
ax = fig.add_subplot(211)
total_power.power_series_all_data(sections=[timeframe], sample_period=2).plot(ax = ax)
ax = fig.add_subplot(212)
else:
ax = fig.add_subplot(111)
# The stacked plot
all = pd.DataFrame(disaggregations.meters[0].power_series_all_data(sections=[timeframe], sample_period=2).rename('Rest'))
for i, dis in enumerate(disaggregations.meters):
if i == 0:
continue
name = "Appliance " + str(i)
if verbose:
print(name)
all[name] = dis.power_series_all_data(sections=[timeframe], sample_period=2)
all = all.fillna(0)
all.plot.area(ax = ax, stacked = stacked)
ax.set_xscale("log", nonposx='clip')
ax.set_xlim([timeframe.start, timeframe.end])
return fig
def plot_segments(transitions, steady_states, ax = None):
'''
This function takes the events and plots the segments.
Paramters
---------
transitions:
The transitions with the 'segment' field set
steady_states:
The transitions with the 'segment' field set
ax: matplotlib.axes.Axes
An axis object to print to.
Returns
-------
fig: matplotlib.figure.Figure
The newly plot figure
'''
# Prepare plot
fig = plt.figure()
ax = fig.add_subplot(111)
if ax is None:
ax = plt.gca()
#ax.xaxis.axis_date()
# Sort segments to always plot lower segment on top
steady_states['segment'] = transitions.set_index('starts')['segment']
steady_states.sort_index(ascending = True, inplace = True)
steady_states['starts'] = steady_states.index
firsts = steady_states.groupby('segment').first()
firsts = firsts.sort_values('starts', ascending = False).index
# Fill_between does the trick
for cur in firsts:
rows = steady_states[steady_states['segment'] == cur]
ax.fill_between(rows.index.to_pydatetime(), rows['active average'].values, 0, step='post')
ax.set_xlabel("Time", fontsize = "12")
ax.set_ylabel("Power [W]", fontsize = "12")
ax.autoscale_view()
ax.xaxis.set_major_formatter(mdates.DateFormatter("%H:%M"))
return fig
def plot_evaluation_assignments(sec_ground_truth, sec_disaggregations, assignments,
gt_meters = None, timeframe = None, verbose = False):
'''
This function plots the assignments of the preassignment during the NILM evaluation.
The plot has three columns:
- The original disaggregated meters
- The ground_truth meters
- the combination of the meters assigned to the ground truth meters.
Paramters
---------
sec_ground_truth: [nilmtk.TimeFrameGroup]
The on-sections of the ground truth.
sec_disaggregations: [nilmtk.TimeFrameGroup]
The on sections of the disaggregated meters. Some of these purely
disaggregated meters might belong to the same ground truth appliance.
assignments: dict(int -> [int])
A dictionary with its entries mapping from a number of the ground_truth meters to a
list of disaggregation meters. This enables the combination of the disaggregation meters.
gt_meters: nilmtk.Electric
If set, the meters are used to get the captions for the plots
timeframe: nilmtk.Timeframe
A timeframe for which the plot shall be drawn. If kept None, the whole timeframe
of the ground_truth is plotted.
verbose: bool
If additional output is generated
Returns
-------
fig: matplotlib.figure.Figure
The newly plotted figure
'''
fig = plt.figure(figsize=(50,50)) #, tight_layout=True)
if timeframe is None:
timeframe = TimeFrameGroup(map(lambda cur: cur.get_timeframe(), sec_ground_truth)).get_timeframe()
limit = TimeFrameGroup([timeframe])
overall_length = max([len(sec_ground_truth), len(sec_disaggregations)])
# Plot before assignment
for i, cur_nonzero in enumerate(sec_disaggregations):
ax = fig.add_subplot(overall_length,3,1+i*3)
limited = cur_nonzero.intersection(limit)
if verbose:
print(str(i) + ": " + str(len(limited._df)))
limited.plot(ax=ax)
ax.set_xlim([timeframe.start, timeframe.end])
plt.setp(ax.get_xticklabels(), visible=False)
plt.setp(ax.get_yticklabels(), visible=False)
ax.set_xlabel("Time")
ax.set_ylabel("Activation")
# Plot the original load
for i, cur_nonzero in enumerate(sec_ground_truth):
ax = fig.add_subplot(overall_length,3,2+i*3)
limited = cur_nonzero.intersection(limit)
if verbose:
print(str(i) + ": " + str(len(limited._df)))
limited.plot(ax=ax)
if not gt_meters is None:
ax.set_title(gt_meters.meters[i].appliances[0].metadata['type'])
ax.set_xlim([timeframe.start, timeframe.end])
plt.setp(ax.get_xticklabels(), visible=False)
plt.setp(ax.get_yticklabels(), visible=False)
ax.set_xlabel("Time")
ax.set_ylabel("Activation")
# Plot assigned disaggregations right
for i in range(len(sec_ground_truth)):
cur_nonzero = TimeFrameGroup.union_many(map(lambda a: sec_disaggregations[a], assignments[i]))
ax = fig.add_subplot(overall_length,3,3+i*3)
limited = cur_nonzero.intersection(limit)
if verbose:
print(str(i) + ": " + str(len(limited._df)))
limited.plot(ax=ax)
ax.set_title(str(assignments[i]))
ax.set_xlim([timeframe.start, timeframe.end])
plt.setp(ax.get_xticklabels(), visible=False)
plt.setp(ax.get_yticklabels(), visible=False)
ax.set_xlabel("Time")
ax.set_ylabel("Activation")
return fig
def plot_multiphase_event(original_powerflows, original_adapted, multiphase_events, section,
surrounding = 30, col = "active transition", plot_freq = "2s", verbose = False):
''' This function is used to plot multiphase events.
It shows how the multiphase events are cut out and put inside separate poweflows.
Parameters
----------
original_powerflows: [pd.DataFrame]
The original transients as DataFrame one per phase
original_adapted: [pd.DataFrame]
The new original phases where the multiphaseevents
are removed.
multiphase_events:
The separated transients appearing in multiple phases.
section: nilmtk.TimeFrame
The section which shall be plotted.
surrounding: int
Minutes in the original power flows plottet
arround the interesting section.
col: index
Which is the power transient index
plot_freq: str
The frequency with which the powerflows are resampled before being plotted.
verbose: bool
Whether to print additional information
Returns
-------
fig: matplotlib.figure.Figure
The newly plotted figure
'''
if not type(surrounding) is pd.Timedelta:
surrounding = | pd.Timedelta(minutes=surrounding) | pandas.Timedelta |
import unittest
from pydre import project
from pydre import core
from pydre import filters
from pydre import metrics
import os
import glob
import contextlib
import io
from tests.sample_pydre import project as samplePD
from tests.sample_pydre import core as c
import pandas
import numpy as np
from datetime import timedelta
import logging
import sys
class WritableObject:
def __init__(self):
self.content = []
def write(self, string):
self.content.append(string)
# Test cases of following functions are not included:
# Reason: unmaintained
# in common.py:
# tbiReaction()
# tailgatingTime() & tailgatingPercentage()
# ecoCar()
# gazeNHTSA()
#
# Reason: incomplete
# in common.py:
# findFirstTimeOutside()
# brakeJerk()
class TestPydre(unittest.TestCase):
ac_diff = 0.000001
# the acceptable difference between expected & actual results when testing scipy functions
def setUp(self):
# self.whatever to access them in the rest of the script, runs before other scripts
self.projectlist = ["honda.json"]
self.datalist = ["Speedbump_Sub_8_Drive_1.dat", "ColTest_Sub_10_Drive_1.dat"]
self.zero = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
funcName = ' [ ' + self._testMethodName + ' ] ' # the name of test function that will be executed right after this setUp()
print(' ')
print (funcName.center(80,'#'))
print(' ')
def tearDown(self):
print(' ')
print('[ END ]'.center(80, '#'))
print(' ')
# ----- Helper Methods -----
def projectfileselect(self, index: int):
projectfile = self.projectlist[index]
fullpath = os.path.join("tests/test_projectfiles/", projectfile)
return fullpath
def datafileselect(self, index: int):
datafile = self.datalist[index]
fullpath = glob.glob(os.path.join(os.getcwd(), "tests/test_datfiles/", datafile))
return fullpath
def secs_to_timedelta(self, secs):
return timedelta(weeks=0, days=0, hours=0, minutes=0, seconds=secs)
def compare_cols(self, result_df, expected_df, cols):
result = True
for names in cols:
result = result and result_df[names].equals(expected_df[names])
if not result:
print(names)
print(result_df[names])
print("===")
print(expected_df[names])
return False
return result
# convert a drivedata object to a str
def dd_to_str(self, drivedata: core.DriveData):
output = ""
output += str(drivedata.PartID)
output += str(drivedata.DriveID)
output += str(drivedata.roi)
output += str(drivedata.data)
output += str(drivedata.sourcefilename)
return output
# ----- Test Cases -----
def test_datafile_exist(self):
datafiles = self.datafileselect(0)
self.assertFalse(0 == len(datafiles))
for f in datafiles:
self.assertTrue(os.path.isfile(f))
def test_reftest(self):
desiredproj = self.projectfileselect(0)
p = project.Project(desiredproj)
results = p.run(self.datafileselect(0))
results.Subject.astype('int64')
sample_p = samplePD.Project(desiredproj)
expected_results = (sample_p.run(self.datafileselect(0)))
self.assertTrue(self.compare_cols(results, expected_results, ['ROI', 'getTaskNum']))
def test_columnMatchException_excode(self):
f = io.StringIO()
with self.assertRaises(SystemExit) as cm:
desiredproj = self.projectfileselect(0)
p = project.Project(desiredproj)
result = p.run(self.datafileselect(1))
self.assertEqual(cm.exception.code, 1)
def test_columnMatchException_massage(self):
d3 = {'DatTime': [0.017, 0.034, 0.05, 0.067, 0.084, 0.1, 0.117, 0.134, 0.149, 0.166, 0.184]}
df = pandas.DataFrame(data=d3)
data_object = core.DriveData( data=df, sourcefilename="test_file3.csv")
handler = logging.FileHandler(filename='tests\\temp.log')
filters.logger.addHandler(handler)
with self.assertRaises(core.ColumnsMatchError):
result = filters.smoothGazeData(data_object)
expected_console_output = "Can't find needed columns {'FILTERED_GAZE_OBJ_NAME'} in data file ['test_file3.csv'] | function: smoothGazeData"
temp_log = open('tests\\temp.log')
msg_list = temp_log.readlines()
msg = ' '.join(msg_list)
filters.logger.removeHandler(handler)
#self.assertIn(expected_console_output, msg)
#Isolate this test case No more sliceByTime Function in pydre.core
def test_core_sliceByTime_1(self):
d = {'col1': [1, 2, 3, 4, 5, 6], 'col2': [7, 8, 9, 10, 11, 12]}
df = pandas.DataFrame(data=d)
result = (c.sliceByTime(1, 3, "col1", df).to_string()).lstrip()
expected_result = "col1 col2\n0 1 7\n1 2 8\n2 3 9"
self.assertEqual(result, expected_result)
#Isolate this test case No more sliceByTime Function in pydre.core
def test_core_sliceByTime_2(self):
d = {'col1': [1, 1.1, 3, 4, 5, 6], 'col2': [7, 8, 9, 10, 11, 12]}
df = pandas.DataFrame(data=d)
result = (c.sliceByTime(1, 2, "col1", df).to_string()).lstrip()
expected_result = "col1 col2\n0 1.0 7\n1 1.1 8"
self.assertEqual(result, expected_result)
def test_core_mergeBySpace(self):
d1 = {'SimTime': [1, 2], 'XPos': [1, 3], 'YPos': [4, 3]}
df1 = pandas.DataFrame(data=d1)
d2 = {'SimTime': [3, 4], 'XPos': [10, 12], 'YPos': [15, 16]}
df2 = pandas.DataFrame(data=d2)
data_object1 = core.DriveData.initV2(PartID=0,DriveID=1, data=df1, sourcefilename="test_file.csv")
data_object2 = core.DriveData.initV2(PartID=0, DriveID=2, data=df2, sourcefilename="test_file.csv")
param = []
param.append(data_object1)
param.append(data_object2)
result = self.dd_to_str(core.mergeBySpace(param))
expected_result = "01None SimTime XPos YPos\n0 1 1 4\n1 2 3 3\n0 2 10 15\n1 3 12 16test_file.csv"
self.assertEqual(result, expected_result)
def test_filter_numberSwitchBlocks_1(self):
d = {'DatTime': [0.017, 0.034, 0.050, 0.067, 0.084, 0.1, 0.117, 0.134, 0.149, 0.166, 0.184],
'TaskStatus': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]} #input
df = pandas.DataFrame(data=d)
data_object3 = core.DriveData( data=df, sourcefilename="test_file3.csv")
result = filters.numberSwitchBlocks(drivedata=data_object3)
expected = {'DatTime': [0.017, 0.034, 0.05, 0.067, 0.084, 0.1, 0.117, 0.134, 0.149, 0.166, 0.184],
'TaskStatus': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
'taskblocks': [np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan]}
expected_result_df = pandas.DataFrame(data=expected)
expected_result = core.DriveData( data=expected_result_df, sourcefilename="test_file3.csv")
print(result.data)
print(expected_result.data)
self.assertEqual(len(result.data), len(expected_result.data))
self.assertTrue((self.compare_cols(expected_result.data, result.data, ['DatTime', 'TaskStatus', 'taskblocks'])))
self.assertEqual(result.sourcefilename, expected_result.sourcefilename)
def test_filter_numberSwitchBlocks_2(self):
d = {'DatTime': [0.017, 0.034, 0.050, 0.067, 0.084, 0.1, 0.117, 0.134, 0.149, 0.166, 0.184],
'TaskStatus': [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]} #input
df = pandas.DataFrame(data=d)
data_object3 = core.DriveData( data=df, sourcefilename="test_file3.csv")
result = filters.numberSwitchBlocks(drivedata=data_object3)
#print(result.to_string())
expected = {'DatTime': [0.017, 0.034, 0.05, 0.067, 0.084, 0.1, 0.117, 0.134, 0.149, 0.166, 0.184],
'TaskStatus': [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
'taskblocks': [0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5]}
expected_result_df = pandas.DataFrame(data=expected)
expected_result = core.DriveData( data=expected_result_df, sourcefilename="test_file3.csv")
self.assertEqual(len(result.data), len(expected_result.data))
self.assertTrue((self.compare_cols(expected_result.data, result.data, ['DatTime', 'TaskStatus', 'taskblocks'])))
self.assertEqual(result.sourcefilename, expected_result.sourcefilename)
def test_filter_numberSwitchBlocks_3(self):
d = {'DatTime': [0.017, 0.034, 0.050, 0.067, 0.084, 0.1, 0.117, 0.134, 0.149, 0.166, 0.184],
'TaskStatus': [0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0]} #input
df = pandas.DataFrame(data=d)
data_object3 = core.DriveData( data=df, sourcefilename="test_file3.csv")
result = filters.numberSwitchBlocks(drivedata=data_object3)
#print(result.to_string())
expected = {'DatTime': [0.017, 0.034, 0.05, 0.067, 0.084, 0.1, 0.117, 0.134, 0.149, 0.166, 0.184],
'TaskStatus': [0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0],
'taskblocks': [np.nan, np.nan, np.nan, np.nan, np.nan, 1.0, 1.0, 1.0, 1.0, np.nan, np.nan]}
expected_result_df = pandas.DataFrame(data=expected)
expected_result = core.DriveData( data=expected_result_df, sourcefilename="test_file3.csv")
self.assertEqual(len(result.data), len(expected_result.data))
self.assertTrue((self.compare_cols(expected_result.data, result.data, ['DatTime', 'TaskStatus', 'taskblocks'])))
self.assertEqual(result.sourcefilename, expected_result.sourcefilename)
def test_filter_smoothGazeData_1(self):
d3 = {'DatTime': [0.017, 0.034, 0.05, 0.067, 0.084, 0.1, 0.117, 0.134, 0.149, 0.166, 0.184],
'FILTERED_GAZE_OBJ_NAME': ['localCS.CSLowScreen', 'localCS.CSLowScreen', 'localCS.CSLowScreen',
'localCS.CSLowScreen', 'localCS.CSLowScreen', 'localCS.CSLowScreen',
'localCS.CSLowScreen', 'localCS.CSLowScreen', 'localCS.CSLowScreen',
'localCS.CSLowScreen', 'localCS.CSLowScreen']}
# the func should be able to identify this in-valid input and returns None after prints
# "Bad gaze data, not enough variety. Aborting"
print("expected console output: Bad gaze data, not enough variety. Aborting")
df = pandas.DataFrame(data=d3)
data_object = core.DriveData( data=df, sourcefilename="test_file3.csv")
result = filters.smoothGazeData(data_object)
#print(result.to_string())
self.assertEqual(None, result)
def test_filter_smoothGazeData_2(self):
d3 = {'DatTime': [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7, 1.8,
1.9, 2.0, 2.1, 2.2, 2.3, 2.4],
'FILTERED_GAZE_OBJ_NAME': ['localCS.dashPlane', 'localCS.dashPlane', 'localCS.dashPlane',
'localCS.dashPlane', 'localCS.WindScreen', 'localCS.WindScreen',
'localCS.WindScreen', 'localCS.WindScreen', 'localCS.WindScreen',
'localCS.WindScreen', 'localCS.WindScreen', 'localCS.WindScreen',
'localCS.WindScreen', 'localCS.WindScreen', 'localCS.WindScreen',
'localCS.dashPlane', 'localCS.dashPlane', 'localCS.dashPlane',
'localCS.dashPlane', 'localCS.dashPlane', 'localCS.dashPlane',
'localCS.dashPlane', 'localCS.dashPlane', 'localCS.dashPlane']}
df = pandas.DataFrame(data=d3)
data_object = core.DriveData( data=df, sourcefilename="test_file3.csv")
result = filters.smoothGazeData(data_object, latencyShift=0)
dat_time_col = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7, 1.8,
1.9, 2.0, 2.1, 2.2, 2.3, 2.4]
timedelta_col = []
for t in dat_time_col:
timedelta_col.append(self.secs_to_timedelta(t))
expected = {'timedelta': timedelta_col, 'DatTime': dat_time_col,
'FILTERED_GAZE_OBJ_NAME': ['localCS.dashPlane', 'localCS.dashPlane', 'localCS.dashPlane',
'localCS.dashPlane', 'localCS.WindScreen', 'localCS.WindScreen',
'localCS.WindScreen', 'localCS.WindScreen', 'localCS.WindScreen',
'localCS.WindScreen', 'localCS.WindScreen', 'localCS.WindScreen',
'localCS.WindScreen', 'localCS.WindScreen', 'localCS.WindScreen',
'localCS.dashPlane', 'localCS.dashPlane', 'localCS.dashPlane',
'localCS.dashPlane', 'localCS.dashPlane', 'localCS.dashPlane',
'localCS.dashPlane', 'localCS.dashPlane', 'localCS.dashPlane'],
'gaze': ["offroad", "offroad", "offroad", "offroad", "onroad", "onroad", "onroad", "onroad", "onroad", "onroad", "onroad",
"onroad", "onroad", "onroad", "onroad", "offroad", "offroad", "offroad", "offroad", "offroad", "offroad", "offroad", "offroad",
"offroad"],
'gazenum': np.array([1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3], dtype=np.int32)}
expected_result_df = pandas.DataFrame(data=expected)
self.assertTrue(expected_result_df.equals(result.data));
#self.assertTrue(self.compare_cols(result.data[0], expected_result_df, ['DatTime', 'FILTERED_GAZE_OBJ_NAME', 'gaze', 'gazenum']))
def test_filter_smoothGazeData_3(self):
# --- Construct input ---
dat_time_col = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7, 1.8,
1.9, 2.0, 2.1, 2.2, 2.3, 2.4]
gaze_col = ['localCS.dashPlane', 'localCS.dashPlane', 'localCS.dashPlane',
'localCS.dashPlane', 'localCS.WindScreen', 'localCS.dashPlane',
'localCS.WindScreen', 'localCS.dashPlane', 'localCS.WindScreen',
'localCS.dashPlane', 'localCS.WindScreen', 'localCS.dashPlane',
'localCS.WindScreen', 'localCS.dashPlane', 'localCS.WindScreen',
'localCS.dashPlane', 'localCS.dashPlane', 'localCS.dashPlane',
'localCS.dashPlane', 'localCS.dashPlane', 'localCS.dashPlane',
'localCS.dashPlane', 'localCS.dashPlane', 'localCS.dashPlane']
d3 = {'DatTime': dat_time_col, 'FILTERED_GAZE_OBJ_NAME': gaze_col}
df = pandas.DataFrame(data=d3)
data_object = core.DriveData( data=df, sourcefilename="test_file3.csv")
# ----------------------
result = filters.smoothGazeData(data_object, latencyShift=0)
print(result.data)
timedelta_col = []
for t in dat_time_col:
timedelta_col.append(self.secs_to_timedelta(t))
expected = {'timedelta': timedelta_col, 'DatTime': dat_time_col,
'FILTERED_GAZE_OBJ_NAME': gaze_col,
'gaze': ["offroad", "offroad", "offroad", "offroad", "offroad", "offroad",
"offroad", "offroad", "offroad", "offroad", "offroad", "offroad",
"offroad", "offroad", "offroad", "offroad", "offroad", "offroad",
"offroad", "offroad", "offroad", "offroad", "offroad", "offroad"],
'gazenum': np.array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], dtype=np.int32)}
expected_result_df = pandas.DataFrame(data=expected)
self.assertTrue(expected_result_df.equals(result.data));
#self.assertTrue(self.compare_cols(result.data[0], expected_result_df, ['DatTime', 'FILTERED_GAZE_OBJ_NAME', 'gaze', 'gazenum']))
def test_metrics_findFirstTimeAboveVel_1(self):
# --- construct input ---
d = {'DatTime': [0.017, 0.034, 0.050, 0.067, 0.084, 0.1, 0.117, 0.134, 0.149, 0.166, 0.184],
'Velocity': [-0.000051, -0.000051, -0.000041, -0.000066, -0.000111, -0.000158, -0.000194, -0.000207, 0.000016, 0.000107, 0.000198]} #input
df = pandas.DataFrame(data=d)
data_object = core.DriveData( data=df, sourcefilename="test_file3.csv")
# -----------------------
result = metrics.common.findFirstTimeAboveVel(data_object)
expected_result = -1
self.assertEqual(expected_result, result)
def test_metrics_findFirstTimeAboveVel_2(self):
# --- construct input ---
d = {'DatTime': [0.017, 0.034, 0.050, 0.067, 0.084, 0.1, 0.117, 0.134, 0.149, 0.166, 0.184],
'Velocity': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]} #input
df = pandas.DataFrame(data=d)
data_object = core.DriveData( data=df, sourcefilename="test_file3.csv")
# -----------------------
result = metrics.common.findFirstTimeAboveVel(data_object)
expected_result = -1
self.assertEqual(expected_result, result)
def test_metrics_findFirstTimeAboveVel_3(self):
# --- construct input ---
d = {'DatTime': [0.017, 0.034, 0.050, 0.067, 0.084, 0.1, 0.117, 0.134, 0.149, 0.166, 0.184],
'Velocity': [0, 20.1, 21.0, 22.0, 23.12, 25.1, 26.3, 27.9, 30.1036, 31.3, 32.5]} #input
df = pandas.DataFrame(data=d)
data_object = core.DriveData( data=df, sourcefilename="test_file3.csv")
# -----------------------
result = metrics.common.findFirstTimeAboveVel(data_object)
expected_result = 5
self.assertEqual(expected_result, result)
def test_metrics_findFirstTimeAboveVel_4(self):
# --- construct input ---
d = {'DatTime': [0.017, 0.034, 0.050, 0.067, 0.084, 0.1, 0.117, 0.134, 0.149, 0.166, 0.184],
'Velocity': [25, 25, 25, 25, 25, 25, 25, 25, 25, 25, 25]} #input
df = pandas.DataFrame(data=d)
data_object = core.DriveData( data=df, sourcefilename="test_file3.csv")
# -----------------------
result = metrics.common.findFirstTimeAboveVel(data_object)
expected_result = 0
self.assertEqual(expected_result, result)
def test_metrics_findFirstTimeOutside_1(self):
pass
# --- construct input ---
d = {'SimTime': [0.017, 0.034, 0.050, 0.067, 0.084, 0.1, 0.117, 0.134, 0.149, 0.166, 0.184],
'position': [0, 20.1, 21.0, 22.0, 23.12, 25.1, 26.3, 27.9, 30.1036, 31.3, 32.5]} #input
df = pandas.DataFrame(data=d)
data_object = core.DriveData( data=df, sourcefilename="test_file3.csv")
# -----------------------
#result = metrics.common.findFirstTimeOutside(data_object)
#expected_result = 0
#self.assertEqual(expected_result, result)
#err: NameError: name 'pos' is not defined --------------------------------------------------------!!!!!!!!!
def test_metrics_colMean_1(self):
# --- construct input ---
d = {'SimTime': [0.017, 0.034, 0.050, 0.067, 0.084, 0.1, 0.117, 0.134, 0.149, 0.166, 0.184],
'position': [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]} #input
df = pandas.DataFrame(data=d)
data_object = core.DriveData( data=df, sourcefilename="test_file3.csv")
# -----------------------
result = metrics.common.colMean(data_object, 'position')
expected_result = 5
self.assertEqual(expected_result, result)
def test_metrics_colMean_2(self):
# --- construct input ---
d = {'SimTime': [0.017, 0.034, 0.050, 0.067, 0.084, 0.1, 0.117, 0.134, 0.149, 0.166, 0.184],
'position': [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]} #input
df = pandas.DataFrame(data=d)
data_object = core.DriveData( data=df, sourcefilename="test_file3.csv")
# -----------------------
result = metrics.common.colMean(data_object, 'position', 3)
expected_result = 6.5
self.assertEqual(expected_result, result)
def test_metrics_colMean_3(self):
# --- construct input ---
d = {'SimTime': [0.017, 0.034, 0.050, 0.067, 0.084, 0.1, 0.117, 0.134, 0.149, 0.166, 0.184],
'position': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]} #input
df = pandas.DataFrame(data=d)
data_object = core.DriveData( data=df, sourcefilename="test_file3.csv")
# -----------------------
result = metrics.common.colMean(data_object, 'position', 3)
expected_result = np.nan
#self.assertEqual(expected_result, result)
np.testing.assert_equal(expected_result, result)
def test_metrics_colSD_1(self):
# --- construct input ---
d = {'SimTime': [0.017, 0.034, 0.050, 0.067, 0.084, 0.1, 0.117, 0.134, 0.149, 0.166, 0.184],
'position': [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]} #input
df = pandas.DataFrame(data=d)
data_object = core.DriveData( data=df, sourcefilename="test_file3.csv")
# -----------------------
result = metrics.common.colSD(data_object, 'position')
expected_result = 3.1622776601683795
self.assertTrue(self.ac_diff > abs(expected_result - result))
def test_metrics_colSD_2(self):
# --- construct input ---
d = {'SimTime': [0.017, 0.034, 0.050, 0.067, 0.084, 0.1, 0.117, 0.134, 0.149, 0.166, 0.184],
'position': [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]} #input
df = pandas.DataFrame(data=d)
data_object = core.DriveData( data=df, sourcefilename="test_file3.csv")
# -----------------------
result = metrics.common.colSD(data_object, 'position', 3)
expected_result = 2.29128784747792
self.assertTrue(self.ac_diff > abs(expected_result - result))
def test_metrics_colSD_3(self):
# --- construct input ---
d = {'SimTime': [0.017, 0.034, 0.050, 0.067, 0.084, 0.1, 0.117, 0.134, 0.149, 0.166, 0.184],
'position': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]} #input
df = pandas.DataFrame(data=d)
data_object = core.DriveData( data=df, sourcefilename="test_file3.csv")
# -----------------------
result = metrics.common.colSD(data_object, 'position')
expected_result = 0
self.assertTrue(self.ac_diff > abs(expected_result - result))
def test_metrics_colMax_1(self):
# --- construct input ---
d = {'SimTime': [0.017, 0.034, 0.050, 0.067, 0.084, 0.1, 0.117, 0.134, 0.149, 0.166, 0.184],
'position': [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]} #input
df = pandas.DataFrame(data=d)
data_object = core.DriveData( data=df, sourcefilename="test_file3.csv")
# -----------------------
result = metrics.common.colMax(data_object, 'position')
expected_result = 10
self.assertEqual(expected_result, result)
def test_metrics_colMax_2(self):
# --- construct input ---
d = {'SimTime': [0.017, 0.034, 0.050, 0.067, 0.084, 0.1, 0.117, 0.134, 0.149, 0.166, 0.184],
'position': [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 9]} #input
df = pandas.DataFrame(data=d)
data_object = core.DriveData( data=df, sourcefilename="test_file3.csv")
# -----------------------
result = metrics.common.colMax(data_object, 'position')
expected_result = 9
self.assertEqual(expected_result, result)
def test_metrics_colMax_3(self):
# --- construct input ---
d = {'SimTime': [0.017, 0.034, 0.050, 0.067, 0.084, 0.1, 0.117, 0.134, 0.149, 0.166, 0.184],
'position': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]} #input
df = | pandas.DataFrame(data=d) | pandas.DataFrame |
'''
Cleans and combines the census and INSPECTION data.
'''
#Import statements
import pandas as pd
from . import cleaning_functions as cf
from . import merging_functions as mf
#Constants
NAME = 'Name'
CITY = 'City'
VIOLATIONTYPE = 'Violation Type'
ZIPCODE = 'zipcode'
ZIPCODESPACE = 'Zip Code'
LONGITITUDE = 'Longitude'
INSPECTDATE = 'Inspection Date'
RESTAURANTNAME = 'Program Identifier'
ADDRESS = 'Address'
GRADE = 'Grade'
GEOID = 'GEO.id2'
HC01 = 'HC01_EST_VC01'
HC02 = 'HC02_EST_VC01'
HC03 = 'HC03_EST_VC01'
HC04 = 'HC04_EST_VC01'
HC05 = 'HC05_EST_VC01'
HC06 = 'HC06_EST_VC01'
POPULATION = 'Population'
MARRIED = 'No_Married(%)'
WIDOWED = 'Widowed(%)'
DIVORCED = 'Divorced(%)'
SEPARATED = 'Separated(%)'
NEVERMARRIED = 'Never_Married(%)'
MARRIEDTOT = 'No_Married'
WIDOWEDTOT = 'Widowed'
DIVORCEDTOT = 'Divorced'
SEAPARATEDTOT = 'Separated'
NEVERMARRIEDTOT = 'Never_Married'
HOUSEHOLDS = 'Number_Households'
PERDISTHOUSE = 'Percent_Distribution_Housesholds'
MEDINCOME = 'Median_Income_Households'
SERIAL = 'Inspection_Serial_Num'
VIOLATIONID = 'Violation_Record_ID'
PHONE = 'Phone'
PROGID = 'Program Identifier'
MARITAL_CENSUS = './data/raw_data/Marital_ACS_17_5YR_S1201_with_ann.csv'
INCOME_CENSUS = './data/raw_data/Income_ACS_17_5YR_S1903_with_ann.csv'
FOOD_INSPECTION = './data/raw_data/Food_Establishment_INSPECTION.csv'
INSPECTION_OUTPUT = './data/clean_data/clean_inspection.csv'
CENSUS_OUTPUT = './data/clean_data/clean_census.csv'
COMBINED_OUTPUT = './data/clean_data/combined.csv'
SEATTLEZIPS = [98101, 98102, 98103, 98104, 98105, 98106, 98107, 98108, 98109,
98112, 98115, 98116, 98117, 98118, 98119, 98121, 98122, 98125,
98126, 98133, 98134, 98136, 98144, 98146, 98154, 98155, 98164,
98168, 98174, 98177, 98178, 98195, 98199]
### Marital dataframe creation
DF_MARITAL = | pd.read_csv(MARITAL_CENSUS) | pandas.read_csv |
import logging
# Our imports
import emission.core.get_database as edb
import emission.analysis.modelling.tour_model.cluster_pipeline as pipeline
import emission.analysis.modelling.tour_model.similarity as similarity
import emission.analysis.modelling.tour_model.featurization as featurization
import emission.analysis.modelling.tour_model.representatives as representatives
import emission.storage.decorations.analysis_timeseries_queries as esda
import pandas as pd
from numpy import *
# Spanish words to English
span_eng_dict = {'revisado_bike':'test ride with bike','placas_de carro':'car plates','aseguranza':'insurance',
'iglesia':'church','curso':'course','mi_hija recién aliviada':'my daughter just had a new baby',
'servicio_comunitario':'community service','pago_de aseguranza':'insurance payment',
'grupo_comunitario':'community group','caminata_comunitaria':'community walk'}
# Convert purpose
map_pur_dict = {'course':'school','work_- lunch break':'lunch_break','on_the way home':'home',
'insurance_payment':'insurance'}
# precision_bins takes five parameters
# - all_bins_preci: the list that collects precision of each bin, should pass in an empty list
# - sp2en=None means no need to translate language
# sp2en='True' will use span_eng_dict to change Spanish to English
#
# - cvt_purpose=None means no need to convert purposes
# cvt_purpose='True' will use map_pur_dict to convert purposes
# using this parameter should also set sp2en='True'
def precision_bins (all_bins_preci,bins,non_empty_trips,sp2en=None,cvt_purpose=None):
for bin in bins:
bin_user_input = (non_empty_trips[i].data["user_input"] for i in bin if
non_empty_trips[i].data["user_input"] != {})
bin_df = | pd.DataFrame(data=bin_user_input) | pandas.DataFrame |
# -*- coding: utf-8 -*-
# pylint: disable=E1101
# flake8: noqa
from datetime import datetime
import csv
import os
import sys
import re
import nose
import platform
from multiprocessing.pool import ThreadPool
from numpy import nan
import numpy as np
from pandas.io.common import DtypeWarning
from pandas import DataFrame, Series, Index, MultiIndex, DatetimeIndex
from pandas.compat import(
StringIO, BytesIO, PY3, range, long, lrange, lmap, u
)
from pandas.io.common import URLError
import pandas.io.parsers as parsers
from pandas.io.parsers import (read_csv, read_table, read_fwf,
TextFileReader, TextParser)
import pandas.util.testing as tm
import pandas as pd
from pandas.compat import parse_date
import pandas.lib as lib
from pandas import compat
from pandas.lib import Timestamp
from pandas.tseries.index import date_range
import pandas.tseries.tools as tools
from numpy.testing.decorators import slow
import pandas.parser
class ParserTests(object):
"""
Want to be able to test either C+Cython or Python+Cython parsers
"""
data1 = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
def read_csv(self, *args, **kwargs):
raise NotImplementedError
def read_table(self, *args, **kwargs):
raise NotImplementedError
def setUp(self):
import warnings
warnings.filterwarnings(action='ignore', category=FutureWarning)
self.dirpath = tm.get_data_path()
self.csv1 = os.path.join(self.dirpath, 'test1.csv')
self.csv2 = os.path.join(self.dirpath, 'test2.csv')
self.xls1 = os.path.join(self.dirpath, 'test.xls')
def construct_dataframe(self, num_rows):
df = DataFrame(np.random.rand(num_rows, 5), columns=list('abcde'))
df['foo'] = 'foo'
df['bar'] = 'bar'
df['baz'] = 'baz'
df['date'] = pd.date_range('20000101 09:00:00',
periods=num_rows,
freq='s')
df['int'] = np.arange(num_rows, dtype='int64')
return df
def generate_multithread_dataframe(self, path, num_rows, num_tasks):
def reader(arg):
start, nrows = arg
if not start:
return pd.read_csv(path, index_col=0, header=0, nrows=nrows,
parse_dates=['date'])
return pd.read_csv(path,
index_col=0,
header=None,
skiprows=int(start) + 1,
nrows=nrows,
parse_dates=[9])
tasks = [
(num_rows * i / num_tasks,
num_rows / num_tasks) for i in range(num_tasks)
]
pool = ThreadPool(processes=num_tasks)
results = pool.map(reader, tasks)
header = results[0].columns
for r in results[1:]:
r.columns = header
final_dataframe = pd.concat(results)
return final_dataframe
def test_converters_type_must_be_dict(self):
with tm.assertRaisesRegexp(TypeError, 'Type converters.+'):
self.read_csv(StringIO(self.data1), converters=0)
def test_empty_decimal_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
self.assertRaises(ValueError, read_csv, StringIO(data), decimal='')
def test_empty_thousands_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
self.assertRaises(ValueError, read_csv, StringIO(data), thousands='')
def test_multi_character_decimal_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
self.assertRaises(ValueError, read_csv, StringIO(data), thousands=',,')
def test_empty_string(self):
data = """\
One,Two,Three
a,1,one
b,2,two
,3,three
d,4,nan
e,5,five
nan,6,
g,7,seven
"""
df = self.read_csv(StringIO(data))
xp = DataFrame({'One': ['a', 'b', np.nan, 'd', 'e', np.nan, 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', np.nan, 'five',
np.nan, 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(StringIO(data), na_values={'One': [], 'Three': []},
keep_default_na=False)
xp = DataFrame({'One': ['a', 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', 'nan', 'five',
'', 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(
StringIO(data), na_values=['a'], keep_default_na=False)
xp = DataFrame({'One': [np.nan, 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', 'nan', 'five', '',
'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(StringIO(data), na_values={'One': [], 'Three': []})
xp = DataFrame({'One': ['a', 'b', np.nan, 'd', 'e', np.nan, 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', np.nan, 'five',
np.nan, 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
# GH4318, passing na_values=None and keep_default_na=False yields
# 'None' as a na_value
data = """\
One,Two,Three
a,1,None
b,2,two
,3,None
d,4,nan
e,5,five
nan,6,
g,7,seven
"""
df = self.read_csv(
StringIO(data), keep_default_na=False)
xp = DataFrame({'One': ['a', 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['None', 'two', 'None', 'nan', 'five', '',
'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
def test_read_csv(self):
if not compat.PY3:
if compat.is_platform_windows():
prefix = u("file:///")
else:
prefix = u("file://")
fname = prefix + compat.text_type(self.csv1)
# it works!
read_csv(fname, index_col=0, parse_dates=True)
def test_dialect(self):
data = """\
label1,label2,label3
index1,"a,c,e
index2,b,d,f
"""
dia = csv.excel()
dia.quoting = csv.QUOTE_NONE
df = self.read_csv(StringIO(data), dialect=dia)
data = '''\
label1,label2,label3
index1,a,c,e
index2,b,d,f
'''
exp = self.read_csv(StringIO(data))
exp.replace('a', '"a', inplace=True)
tm.assert_frame_equal(df, exp)
def test_dialect_str(self):
data = """\
fruit:vegetable
apple:brocolli
pear:tomato
"""
exp = DataFrame({
'fruit': ['apple', 'pear'],
'vegetable': ['brocolli', 'tomato']
})
dia = csv.register_dialect('mydialect', delimiter=':') # noqa
df = self.read_csv(StringIO(data), dialect='mydialect')
tm.assert_frame_equal(df, exp)
csv.unregister_dialect('mydialect')
def test_1000_sep(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
expected = DataFrame({
'A': [1, 10],
'B': [2334, 13],
'C': [5, 10.]
})
df = self.read_csv(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
def test_1000_sep_with_decimal(self):
data = """A|B|C
1|2,334.01|5
10|13|10.
"""
expected = DataFrame({
'A': [1, 10],
'B': [2334.01, 13],
'C': [5, 10.]
})
tm.assert_equal(expected.A.dtype, 'int64')
tm.assert_equal(expected.B.dtype, 'float')
tm.assert_equal(expected.C.dtype, 'float')
df = self.read_csv(StringIO(data), sep='|', thousands=',', decimal='.')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data), sep='|',
thousands=',', decimal='.')
tm.assert_frame_equal(df, expected)
data_with_odd_sep = """A|B|C
1|2.334,01|5
10|13|10,
"""
df = self.read_csv(StringIO(data_with_odd_sep),
sep='|', thousands='.', decimal=',')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data_with_odd_sep),
sep='|', thousands='.', decimal=',')
tm.assert_frame_equal(df, expected)
def test_separator_date_conflict(self):
# Regression test for issue #4678: make sure thousands separator and
# date parsing do not conflict.
data = '06-02-2013;13:00;1-000.215'
expected = DataFrame(
[[datetime(2013, 6, 2, 13, 0, 0), 1000.215]],
columns=['Date', 2]
)
df = self.read_csv(StringIO(data), sep=';', thousands='-',
parse_dates={'Date': [0, 1]}, header=None)
tm.assert_frame_equal(df, expected)
def test_squeeze(self):
data = """\
a,1
b,2
c,3
"""
idx = Index(['a', 'b', 'c'], name=0)
expected = Series([1, 2, 3], name=1, index=idx)
result = self.read_table(StringIO(data), sep=',', index_col=0,
header=None, squeeze=True)
| tm.assertIsInstance(result, Series) | pandas.util.testing.assertIsInstance |
import pandas as pd
import numpy as np
from sklearn.preprocessing import MinMaxScaler
from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import train_test_split
def train(X_train, y_train):
regr = RandomForestRegressor(max_depth=4, random_state=0)
regr.fit(X_train, y_train)
feat_imp = regr.feature_importances_
feat_labels = list(X_train.columns)
feat_importances = {}
for key, val in zip(feat_labels, feat_imp):
feat_importances[key] = val
feat_importances = dict(sorted(feat_importances.items(), key=lambda x: x[1], reverse=True))
print("Top-5 most important features:")
print(list(feat_importances.items())[0:5], "\n")
return regr
def testRegression(model, X_test, y_test):
r2_val = model.score(X_test, y_test)
n = X_test.shape[0]
p = X_test.shape[1]
adjusted_r2_val = 1 - ((1 - r2_val) * (n - 1) / (n - p - 1))
print("Regression results: ")
print("R^2 value = ", r2_val)
print("Adjusted R^2 value = ", adjusted_r2_val, "\n")
def testClassification(model, X_test, y_test):
y_pred = model.predict(X_test)
y_pred = list(y_pred)
y_test = list(y_test)
y_pred_scores = {}
y_test_scores = {}
keys = []
index = 0
for i, j in zip(y_pred, y_test):
y_pred_scores[index] = i
y_test_scores[index] = j
keys.append(index)
index += 1
tp = 0
fp = 0
tn = 0
fn = 0
n = len(keys)
num_samples = 3
for ind in range(5, n - 5):
cur_key = keys[ind]
prev_keys = np.random.choice(keys[:ind], num_samples, replace=False)
next_keys = np.random.choice(keys[ind + 1:], num_samples, replace=False)
for p_key in prev_keys:
if y_test_scores[cur_key] >= y_test_scores[p_key]:
if y_pred_scores[cur_key] >= y_pred_scores[p_key]:
tp += 1
else:
fn += 1
else:
if y_pred_scores[cur_key] < y_pred_scores[p_key]:
tn += 1
else:
fp += 1
for n_key in next_keys:
if y_test_scores[cur_key] >= y_test_scores[n_key]:
if y_pred_scores[cur_key] >= y_pred_scores[n_key]:
tp += 1
else:
fn += 1
else:
if y_pred_scores[cur_key] < y_pred_scores[n_key]:
tn += 1
else:
fp += 1
precision = tp / (tp + fp)
recall = tp / (tp + fn)
f1_score = 2 * precision * recall / (precision + recall)
print("Classification results: ")
print("Precision = ", precision)
print("Recall = ", recall)
print("F1 score = ", f1_score, "\n")
def normalizeCols(col_name, df, scaler):
out_data = scaler.fit_transform(np.array(df[col_name]).reshape(-1, 1))
out_data = np.squeeze(out_data)
return out_data
if __name__ == "__main__":
regression_dataset_file = "gpu_regression_dataset.csv"
df = pd.read_csv(regression_dataset_file)
# Handle Categorical Variables:
memory_type_feat_cols = pd.get_dummies(df["memory_type"], prefix="memory_type")
# Drop low importance features:
df.drop(columns=["gpu_chip", "memory_type", "bus_info"], inplace=True)
df_total = | pd.concat([df, memory_type_feat_cols], axis=1) | pandas.concat |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
gen_sgRNAs.py generates sgRNAs as part of ExcisionFinder. New Cas enzymes can be added by modifying CAS_LIST.txt.
Written in Python v 3.6.1.
<NAME> et al 2018.
Usage:
gen_sgRNAs.py [-chvrd] <bcf> <annots_file> <locus> <pams_dir> <ref_fasta> <out> <cas_types> <guide_length> [<gene_vars>] [--crispor=<ref_gen>] [--hom] [--bed] [--max_indel=<S>] [--strict]
gen_sgRNAs.py [-chvrd] <locus> <pams_dir> <ref_fasta> <out> <cas_types> <guide_length> [<gene_vars>] [--crispor=<ref_gen>] [--hom] [--bed] [--max_indel=<S>] --ref_guides [--strict]
gen_sgRNAs.py -C | --cas-list
Arguments:
bcf BCF/VCF file with genotypes.
annots_file Annotated variant for whether each generates an allele-specific sgRNA site.
locus Locus of interest in format chrom:start-stop. Put filepath to BED file here if '--bed'.
pams_dir Directory where pam locations in the reference genome are located.
ref_genome_fasta Fasta file for reference genome used, e.g. hg38.
out Directory in which to save the output files.
cas_types Cas types you would like to analyze, comma-separated (e.g. SpCas9,SaCas9).
guide_length Guide length, commonly 20 bp, comma-separated if different for different cas types.
Options:
gene_vars Optional. 1KGP originating file to add rsID and allele frequency (AF) data to variants.
-h --help Show this screen and exit.
-c Do not take the reverse complement of the guide sequence for '-' stranded guides (when the PAM is on the 5' end).
-v Run in verbose mode (especially useful for debugging, but also for knowing status of script)
--hom Use 'homozygous' mode, personalized sgRNA design. Do not use if ref_guides is specified, they are redundant and non-compatible.
--crispor=<ref_gen> Add CRISPOR specificity scores to outputted guides. From Haeussler et al. Genome Biology 2016.
Equals directory name of reference genome (complete).
--bed Design sgRNAs for multiple regions specified in a BED file.
--max_indel=<S> Maximum size for INDELS. Must be smaller than guide_length [default: 5].
-r Return guides as RNA sequences rather than DNA sequences.
-d Return dummy guides (all --- as opposed to GGG or CCC) for variants without a PAM, e.g. when variant makes or breaks a PAM.
-C --cas-list List available cas types and exits.
--ref_guides Design guides for reference genome, ignoring variants in region.
--strict Only design allele-specific guides where the variant makes or breaks a PAM site.
"""
import pandas as pd
import numpy as np
from docopt import docopt
import os
import cas_object
from pyfaidx import Fasta
from collections import Counter
import regex
import re
from Bio import SeqIO
import subprocess
from io import StringIO
import logging
__version__ = "0.0.1"
REQUIRED_BCFTOOLS_VER = "1.5"
# COLUMN_ORDER=['chrom','variant_position','ref','alt','gRNA_ref','gRNA_alt',
# 'variant_position_in_guide','start','stop','strand','cas_type','guide_id','rsID','AF']
# get rid of annoying false positive Pandas error
pd.options.mode.chained_assignment = None
def find_spec_pams(cas_obj, python_string, orient):
# orient specifies whether this is a 3prime PAM (e.g. Cas9, PAM seq 3' of sgRNA)
# or a 5prime PAM (e.g. cpf1, PAM 5' of sgRNA)
# get sequence
sequence = python_string
# get PAM sites (the five prime three prime thing will need to be reversed for cpf1)
def get_pam_fiveprime(pam_regex, sequence):
starts = []
for pam in regex.finditer(
pam_regex, sequence, regex.IGNORECASE, overlapped=True
):
starts.append(pam.start())
return starts
def get_pam_threeprime(pam_regex, sequence):
starts = []
for pam in regex.finditer(
pam_regex, sequence, regex.IGNORECASE, overlapped=True
):
starts.append(pam.end())
return starts
if orient == "3'":
for_starts = get_pam_fiveprime(cas_obj.forwardPam_regex(), sequence)
rev_starts = get_pam_threeprime(cas_obj.reversePam_regex(), sequence)
elif orient == "5'":
for_starts = get_pam_threeprime(cas_obj.forwardPam_regex(), sequence)
rev_starts = get_pam_fiveprime(cas_obj.reversePam_regex(), sequence)
return (for_starts, rev_starts)
def het(genotype):
# if genotype == '.':
# return False
if ':' in genotype:
gen1, gen2 = re.split("/|\|", genotype.split(':')[0])
else:
gen1, gen2 = re.split("/|\|", genotype)
return gen1 != gen2
def check_bcftools():
"""
Checks bcftools version, and exits the program if the version is incorrect
"""
version = (
subprocess.run(
"bcftools -v | head -1 | cut -d ' ' -f2", shell=True, stdout=subprocess.PIPE
)
.stdout.decode("utf-8")
.rstrip()
)
if float(version) >= float(REQUIRED_BCFTOOLS_VER):
logging.info(f"bcftools version {version} running")
else:
logging.error(
f"Error: bcftools must be >={REQUIRED_BCFTOOLS_VER}. Current version: {version}"
)
exit(1)
def get_alt_seq(
chrom,
pam_start,
var_pos,
ref,
alt,
guide_length,
ref_genome,
strand="positive",
var_type="near_pam",
):
chrom = chrom.replace("chr", "")
if strand == "positive":
if var_type == "near_pam":
# reference sgRNA
ref_seq = ref_genome["chr" + str(chrom)][
pam_start - guide_length - 1 : pam_start - 1
]
# alt sgRNA
alt_seq = (
ref_genome["chr" + str(chrom)][
pam_start - guide_length - len(alt) : var_pos - 1
].lower()
+ alt.upper()
+ ref_genome["chr" + str(chrom)][
var_pos: pam_start - 1
].lower()
)
elif var_type == "destroys_pam":
# reference sgRNA
ref_seq = ref_genome["chr" + str(chrom)][
pam_start - guide_length - 1 : pam_start - 1
]
# in this case, variant is destroying a PAM, rendering the alternate allele no longer a CRISPR site
# therefore, for lack of a better solution, return empty alt_seq
alt_seq = "G" * guide_length
elif var_type == "makes_pam": # this might break with indels
# reference sgRNA
ref_seq = "G" * guide_length
# in this case, variant is destroying a PAM, rendering the reference allele no longer a CRISPR site
# therefore, for lack of a better solution, return empty ref_seq
alt_seq = ref_genome["chr" + str(chrom)][
pam_start - guide_length : pam_start
]
return ref_seq.upper(), alt_seq.upper()
elif strand == "negative":
if var_type == "near_pam":
# reference sgRNA
ref_seq = ref_genome["chr" + str(chrom)][
pam_start : pam_start + guide_length
]
# alt sgRNA
alt_seq = (
ref_genome["chr" + str(chrom)][pam_start : var_pos - 1].lower()
+ alt.upper()
+ ref_genome["chr" + str(chrom)][
var_pos : pam_start + guide_length - len(alt) + 1
].lower()
)
elif var_type == "destroys_pam":
# reference sgRNA
ref_seq = ref_genome["chr" + str(chrom)][
pam_start : pam_start + guide_length
]
# in this case, variant is destroying a PAM, rendering the alternate allele no longer a CRISPR site
# therefore, for lack of a better solution, return empty alt_seq
alt_seq = "G" * guide_length
elif var_type == "makes_pam": # this might break with indels
# reference sgRNA
ref_seq = "G" * guide_length
alt_seq = ref_genome["chr" + str(chrom)][
pam_start : pam_start + guide_length
]
return ref_seq.upper(), alt_seq.upper()
else:
logging.info("Must specify strand.")
exit(1)
def make_rev_comp(s):
"""
Generates reverse comp sequences from an input sequence.
"""
return s[::-1].translate(s[::-1].maketrans("ACGT", "TGCA"))
def get_crispor_scores(out_df, outdir, ref_gen):
guide_seqs_ref = [">ref_guide_seqs\n"]
guide_seqs_alt = [">alt_guide_seqs\n"]
for index, row in out_df.iterrows():
guide_seqs_ref.append(
row["gRNA_ref"] + "GGGNN\n"
) # the NN splits things up for CRISPOR
guide_seqs_alt.append(row["gRNA_alt"] + "GGGNN\n")
with open("ref_seqs_nosave.fa", "w") as f:
for seq in guide_seqs_ref:
f.write(seq)
with open("alt_seqs_nosave.fa", "w") as f:
for seq in guide_seqs_alt:
f.write(seq)
# get script dir
scriptsdir = os.path.join(os.path.dirname(__file__), "crispor")
run_name = os.path.join(
scriptsdir, f"crispor.py --skipAlign --noEffScores -g {ref_gen} {ref_gen}"
)
print("Running crispor.")
# error_out = os.path.join(outdir, 'crispor_error.txt')
error_out = os.path.join(os.path.dirname(outdir), "crispor_error.txt")
command = f"source activate crispor; \
python2 {run_name} ref_seqs_nosave.fa nosave_ref_scores.tsv &> {error_out};\
python2 {run_name} alt_seqs_nosave.fa nosave_alt_scores.tsv &> {error_out};\
source deactivate crispor"
subprocess.run(command, shell=True)
print("crispor done")
# subprocess.run('source deactivate crispor', shell=True)
# remove seq files
os.remove("ref_seqs_nosave.fa")
os.remove("alt_seqs_nosave.fa")
# grab scores from files outputted from CRISPOR
score_dir_ref = pd.read_csv(
"nosave_ref_scores.tsv",
sep="\t",
header=None,
names=[
"seqId",
"guideId",
"targetSeq",
"mitSpecScore",
"offtargetCount",
"targetGenomeGeneLocus",
],
)
score_dir_alt = pd.read_csv(
"nosave_alt_scores.tsv",
sep="\t",
header=None,
names=[
"seqId",
"guideId",
"targetSeq",
"mitSpecScore",
"offtargetCount",
"targetGenomeGeneLocus",
],
)
# remove original score files
os.remove("nosave_ref_scores.tsv")
os.remove("nosave_alt_scores.tsv")
# merge score info with original out_df
merge_df_ref = pd.DataFrame()
merge_df_ref["scores_ref"] = score_dir_ref["mitSpecScore"]
merge_df_ref["offtargcount_ref"] = score_dir_ref["offtargetCount"]
merge_df_ref["gRNA_ref"] = score_dir_ref["targetSeq"].str[
:-3
] # get rid of added on PAM site
merge_df_alt = pd.DataFrame()
merge_df_alt["scores_alt"] = score_dir_alt["mitSpecScore"]
merge_df_alt["offtargcount_alt"] = score_dir_alt["offtargetCount"]
merge_df_alt["gRNA_alt"] = score_dir_alt["targetSeq"].str[
:-3
] # get rid of added on PAM site
# output outdir with its new score columns
outdf = out_df.merge(merge_df_ref, how="left", on="gRNA_ref")
outdf = outdf.merge(merge_df_alt, how="left", on="gRNA_alt")
return outdf
def verify_hdf_files(gen_file, annots_file, chrom, start, stop, max_indel):
"""
Compares the hdf files, and makes sure the hdf files contain
variants in the specified range.
"""
if gen_file.shape != annots_file.shape:
annots_file = annots_file.merge(gen_file, on=['chrom','pos','ref','alt'], how='right')[annots_file.columns]
return gen_file, annots_file
else:
indel_too_large = [
all(len(i) <= max_indel for i in (row["ref"], row["alt"]))
for _, row in gen_file.iterrows()
]
return gen_file[indel_too_large], annots_file[indel_too_large]
def filter_out_N_in_PAM(outdf, cas_ins):
"""
Using the given cas list, find N indexes and remove rows with N's.
"""
filt = []
for cas in cas_ins:
current_cas = cas_object.get_cas_enzyme(cas)
if current_cas.primeness == "5'":
PAM_sequence = current_cas.forwardPam
else:
PAM_sequence = current_cas.forwardPam[::-1]
n_index = [i for i, l in enumerate(PAM_sequence) if l == "N"]
filt += [
i
for i, row in outdf.iterrows()
if row["variant_position_in_guide"] in n_index and row["cas_type"] == cas
]
outdf = outdf.drop(filt)
return outdf
def filter_out_non_N_in_PAM(outdf, cas_ins):
"""
Using the given cas list, find N indexes and remove rows with N's.
"""
filt = []
for cas in cas_ins:
current_cas = cas_object.get_cas_enzyme(cas)
if current_cas.primeness == "5'":
PAM_sequence = current_cas.forwardPam
else:
PAM_sequence = current_cas.forwardPam[::-1]
n_index = [i for i, l in enumerate(PAM_sequence) if l != "N"]
filt += [
i
for i, row in outdf.iterrows()
if row["variant_position_in_guide"] in n_index and row["cas_type"] == cas
]
outdf = outdf.drop(filt)
return outdf
def get_allele_spec_guides(args, locus="ignore"):
"""
Outputs dataframe with allele-specific guides.
"""
# load genotypes
bcf = args["<bcf>"]
# parse locus
if locus == "ignore":
chrom, start, stop = parse_locus(args["<locus>"])
else:
chrom, start, stop = parse_locus(locus)
# get location of pams directory with stored locations of PAMs in reference genome
pams_dir = args["<pams_dir>"]
# get guide length
guide_length = int(args["<guide_length>"])
# get ref_genome
ref_genome = Fasta(args["<ref_fasta>"], as_raw=True)
# figure out annotation of VCF/BCF chromosome (i.e. starts with 'chr' or not)
vcf_chrom = str(
subprocess.Popen(
f'bcftools view -H {args["<bcf>"]} | cut -f1 | head -1',
shell=True,
stdout=subprocess.PIPE,
).communicate()[0].decode('utf-8')
)
# See if chrom contains chr
if vcf_chrom.startswith("chr"):
chrstart = True
else:
chrstart = False
chrom = norm_chr(chrom, chrstart)
# eliminates rows with missing genotypes and gets those where heterozygous
# bcl_v = f"bcftools view -g ^miss -g het -r {chrom}:{start}-{stop} -H {bcf}"
bcl_view = subprocess.Popen(f'bcftools view -g ^miss -g het -r {chrom}:{start}-{stop} {bcf} -Ou | bcftools query -f"%CHROM\t%POS\t%REF\t[%TGT]\n"',
shell=True, stdout=subprocess.PIPE)
# bcl_v = f'bcftools query -f"%CHROM\t%POS\t%REF\t[%TGT]\n"'
col_names = ["chrom","pos","ref","translated_genotype"]
# bcl_view = subprocess.Popen(f'bcftools query -f"%CHROM\t%POS\t%REF\t[%TGT]\n"', shell=True, stdin=bcf_orig.stdout, stdout=subprocess.PIPE)
bcl_view.wait()
try:
gens = pd.read_csv(
StringIO(bcl_view.communicate()[0].decode("utf-8")),
sep="\t",
header=None)
except pd.io.common.EmptyDataError:
gens = pd.DataFrame()
# load variant annotations
var_annots = | pd.read_hdf(args["<annots_file>"]) | pandas.read_hdf |
import bz2
from collections import Counter
from contextlib import contextmanager
from datetime import datetime
from functools import wraps
import gzip
import operator
import os
import re
import string
from typing import (
Any,
Callable,
ContextManager,
List,
Optional,
Sequence,
Tuple,
Type,
Union,
cast,
)
import warnings
import zipfile
import numpy as np
from pandas._config.localization import ( # noqa:F401
can_set_locale,
get_locales,
set_locale,
)
from pandas._typing import Dtype, FilePathOrBuffer, FrameOrSeries
from pandas.compat import get_lzma_file, import_lzma
from pandas.core.dtypes.common import (
is_datetime64_dtype,
is_datetime64tz_dtype,
is_period_dtype,
is_sequence,
is_timedelta64_dtype,
)
import pandas as pd
from pandas import (
Categorical,
CategoricalIndex,
DataFrame,
DatetimeIndex,
Index,
IntervalIndex,
MultiIndex,
RangeIndex,
Series,
bdate_range,
)
from pandas._testing.asserters import ( # noqa:F401
assert_almost_equal,
assert_attr_equal,
assert_categorical_equal,
assert_class_equal,
assert_contains_all,
assert_copy,
assert_datetime_array_equal,
assert_dict_equal,
assert_equal,
assert_extension_array_equal,
assert_frame_equal,
assert_index_equal,
assert_interval_array_equal,
assert_is_sorted,
assert_is_valid_plot_return_object,
assert_numpy_array_equal,
assert_period_array_equal,
assert_series_equal,
assert_sp_array_equal,
assert_timedelta_array_equal,
raise_assert_detail,
)
from pandas._testing.contexts import ( # noqa:F401
decompress_file,
ensure_clean,
ensure_clean_dir,
ensure_safe_environment_variables,
set_timezone,
use_numexpr,
with_csv_dialect,
)
from pandas.core.arrays import DatetimeArray, PeriodArray, TimedeltaArray, period_array
from pandas.io.common import urlopen
lzma = import_lzma()
_N = 30
_K = 4
_RAISE_NETWORK_ERROR_DEFAULT = False
UNSIGNED_INT_DTYPES: List[Dtype] = ["uint8", "uint16", "uint32", "uint64"]
UNSIGNED_EA_INT_DTYPES: List[Dtype] = ["UInt8", "UInt16", "UInt32", "UInt64"]
SIGNED_INT_DTYPES: List[Dtype] = [int, "int8", "int16", "int32", "int64"]
SIGNED_EA_INT_DTYPES: List[Dtype] = ["Int8", "Int16", "Int32", "Int64"]
ALL_INT_DTYPES = UNSIGNED_INT_DTYPES + SIGNED_INT_DTYPES
ALL_EA_INT_DTYPES = UNSIGNED_EA_INT_DTYPES + SIGNED_EA_INT_DTYPES
FLOAT_DTYPES: List[Dtype] = [float, "float32", "float64"]
FLOAT_EA_DTYPES: List[Dtype] = ["Float32", "Float64"]
COMPLEX_DTYPES: List[Dtype] = [complex, "complex64", "complex128"]
STRING_DTYPES: List[Dtype] = [str, "str", "U"]
DATETIME64_DTYPES: List[Dtype] = ["datetime64[ns]", "M8[ns]"]
TIMEDELTA64_DTYPES: List[Dtype] = ["timedelta64[ns]", "m8[ns]"]
BOOL_DTYPES = [bool, "bool"]
BYTES_DTYPES = [bytes, "bytes"]
OBJECT_DTYPES = [object, "object"]
ALL_REAL_DTYPES = FLOAT_DTYPES + ALL_INT_DTYPES
ALL_NUMPY_DTYPES = (
ALL_REAL_DTYPES
+ COMPLEX_DTYPES
+ STRING_DTYPES
+ DATETIME64_DTYPES
+ TIMEDELTA64_DTYPES
+ BOOL_DTYPES
+ OBJECT_DTYPES
+ BYTES_DTYPES
)
NULL_OBJECTS = [None, np.nan, pd.NaT, float("nan"), pd.NA]
EMPTY_STRING_PATTERN = re.compile("^$")
# set testing_mode
_testing_mode_warnings = (DeprecationWarning, ResourceWarning)
def set_testing_mode():
# set the testing mode filters
testing_mode = os.environ.get("PANDAS_TESTING_MODE", "None")
if "deprecate" in testing_mode:
# pandas\_testing.py:119: error: Argument 2 to "simplefilter" has
# incompatible type "Tuple[Type[DeprecationWarning],
# Type[ResourceWarning]]"; expected "Type[Warning]"
warnings.simplefilter(
"always", _testing_mode_warnings # type: ignore[arg-type]
)
def reset_testing_mode():
# reset the testing mode filters
testing_mode = os.environ.get("PANDAS_TESTING_MODE", "None")
if "deprecate" in testing_mode:
# pandas\_testing.py:126: error: Argument 2 to "simplefilter" has
# incompatible type "Tuple[Type[DeprecationWarning],
# Type[ResourceWarning]]"; expected "Type[Warning]"
warnings.simplefilter(
"ignore", _testing_mode_warnings # type: ignore[arg-type]
)
set_testing_mode()
def reset_display_options():
"""
Reset the display options for printing and representing objects.
"""
pd.reset_option("^display.", silent=True)
def round_trip_pickle(
obj: Any, path: Optional[FilePathOrBuffer] = None
) -> FrameOrSeries:
"""
Pickle an object and then read it again.
Parameters
----------
obj : any object
The object to pickle and then re-read.
path : str, path object or file-like object, default None
The path where the pickled object is written and then read.
Returns
-------
pandas object
The original object that was pickled and then re-read.
"""
_path = path
if _path is None:
_path = f"__{rands(10)}__.pickle"
with ensure_clean(_path) as temp_path:
pd.to_pickle(obj, temp_path)
return pd.read_pickle(temp_path)
def round_trip_pathlib(writer, reader, path: Optional[str] = None):
"""
Write an object to file specified by a pathlib.Path and read it back
Parameters
----------
writer : callable bound to pandas object
IO writing function (e.g. DataFrame.to_csv )
reader : callable
IO reading function (e.g. pd.read_csv )
path : str, default None
The path where the object is written and then read.
Returns
-------
pandas object
The original object that was serialized and then re-read.
"""
import pytest
Path = pytest.importorskip("pathlib").Path
if path is None:
path = "___pathlib___"
with ensure_clean(path) as path:
writer(Path(path))
obj = reader(Path(path))
return obj
def round_trip_localpath(writer, reader, path: Optional[str] = None):
"""
Write an object to file specified by a py.path LocalPath and read it back.
Parameters
----------
writer : callable bound to pandas object
IO writing function (e.g. DataFrame.to_csv )
reader : callable
IO reading function (e.g. pd.read_csv )
path : str, default None
The path where the object is written and then read.
Returns
-------
pandas object
The original object that was serialized and then re-read.
"""
import pytest
LocalPath = pytest.importorskip("py.path").local
if path is None:
path = "___localpath___"
with ensure_clean(path) as path:
writer(LocalPath(path))
obj = reader(LocalPath(path))
return obj
def write_to_compressed(compression, path, data, dest="test"):
"""
Write data to a compressed file.
Parameters
----------
compression : {'gzip', 'bz2', 'zip', 'xz'}
The compression type to use.
path : str
The file path to write the data.
data : str
The data to write.
dest : str, default "test"
The destination file (for ZIP only)
Raises
------
ValueError : An invalid compression value was passed in.
"""
args: Tuple[Any, ...] = (data,)
mode = "wb"
method = "write"
compress_method: Callable
if compression == "zip":
compress_method = zipfile.ZipFile
mode = "w"
args = (dest, data)
method = "writestr"
elif compression == "gzip":
compress_method = gzip.GzipFile
elif compression == "bz2":
compress_method = bz2.BZ2File
elif compression == "xz":
compress_method = get_lzma_file(lzma)
else:
raise ValueError(f"Unrecognized compression type: {compression}")
with compress_method(path, mode=mode) as f:
getattr(f, method)(*args)
def randbool(size=(), p: float = 0.5):
return np.random.rand(*size) <= p
RANDS_CHARS = np.array(list(string.ascii_letters + string.digits), dtype=(np.str_, 1))
RANDU_CHARS = np.array(
list("".join(map(chr, range(1488, 1488 + 26))) + string.digits),
dtype=(np.unicode_, 1),
)
def rands_array(nchars, size, dtype="O"):
"""
Generate an array of byte strings.
"""
retval = (
np.random.choice(RANDS_CHARS, size=nchars * np.prod(size))
.view((np.str_, nchars))
.reshape(size)
)
return retval.astype(dtype)
def randu_array(nchars, size, dtype="O"):
"""
Generate an array of unicode strings.
"""
retval = (
np.random.choice(RANDU_CHARS, size=nchars * np.prod(size))
.view((np.unicode_, nchars))
.reshape(size)
)
return retval.astype(dtype)
def rands(nchars):
"""
Generate one random byte string.
See `rands_array` if you want to create an array of random strings.
"""
return "".join(np.random.choice(RANDS_CHARS, nchars))
def close(fignum=None):
from matplotlib.pyplot import close as _close, get_fignums
if fignum is None:
for fignum in get_fignums():
_close(fignum)
else:
_close(fignum)
# -----------------------------------------------------------------------------
# Comparators
def equalContents(arr1, arr2) -> bool:
"""
Checks if the set of unique elements of arr1 and arr2 are equivalent.
"""
return frozenset(arr1) == frozenset(arr2)
def box_expected(expected, box_cls, transpose=True):
"""
Helper function to wrap the expected output of a test in a given box_class.
Parameters
----------
expected : np.ndarray, Index, Series
box_cls : {Index, Series, DataFrame}
Returns
-------
subclass of box_cls
"""
if box_cls is pd.array:
expected = pd.array(expected)
elif box_cls is pd.Index:
expected = pd.Index(expected)
elif box_cls is pd.Series:
expected = pd.Series(expected)
elif box_cls is pd.DataFrame:
expected = pd.Series(expected).to_frame()
if transpose:
# for vector operations, we need a DataFrame to be a single-row,
# not a single-column, in order to operate against non-DataFrame
# vectors of the same length.
expected = expected.T
elif box_cls is PeriodArray:
# the PeriodArray constructor is not as flexible as period_array
expected = period_array(expected)
elif box_cls is DatetimeArray:
expected = DatetimeArray(expected)
elif box_cls is TimedeltaArray:
expected = TimedeltaArray(expected)
elif box_cls is np.ndarray:
expected = np.array(expected)
elif box_cls is to_array:
expected = to_array(expected)
else:
raise NotImplementedError(box_cls)
return expected
def to_array(obj):
# temporary implementation until we get pd.array in place
dtype = getattr(obj, "dtype", None)
if is_period_dtype(dtype):
return period_array(obj)
elif is_datetime64_dtype(dtype) or is_datetime64tz_dtype(dtype):
return DatetimeArray._from_sequence(obj)
elif is_timedelta64_dtype(dtype):
return TimedeltaArray._from_sequence(obj)
else:
return np.array(obj)
# -----------------------------------------------------------------------------
# Others
def getCols(k):
return string.ascii_uppercase[:k]
# make index
def makeStringIndex(k=10, name=None):
return Index(rands_array(nchars=10, size=k), name=name)
def makeUnicodeIndex(k=10, name=None):
return Index(randu_array(nchars=10, size=k), name=name)
def makeCategoricalIndex(k=10, n=3, name=None, **kwargs):
""" make a length k index or n categories """
x = rands_array(nchars=4, size=n)
return CategoricalIndex(
Categorical.from_codes(np.arange(k) % n, categories=x), name=name, **kwargs
)
def makeIntervalIndex(k=10, name=None, **kwargs):
""" make a length k IntervalIndex """
x = np.linspace(0, 100, num=(k + 1))
return IntervalIndex.from_breaks(x, name=name, **kwargs)
def makeBoolIndex(k=10, name=None):
if k == 1:
return Index([True], name=name)
elif k == 2:
return Index([False, True], name=name)
return Index([False, True] + [False] * (k - 2), name=name)
def makeIntIndex(k=10, name=None):
return Index(list(range(k)), name=name)
def makeUIntIndex(k=10, name=None):
return Index([2 ** 63 + i for i in range(k)], name=name)
def makeRangeIndex(k=10, name=None, **kwargs):
return RangeIndex(0, k, 1, name=name, **kwargs)
def makeFloatIndex(k=10, name=None):
values = sorted(np.random.random_sample(k)) - np.random.random_sample(1)
return Index(values * (10 ** np.random.randint(0, 9)), name=name)
def makeDateIndex(k=10, freq="B", name=None, **kwargs):
dt = datetime(2000, 1, 1)
dr = bdate_range(dt, periods=k, freq=freq, name=name)
return DatetimeIndex(dr, name=name, **kwargs)
def makeTimedeltaIndex(k=10, freq="D", name=None, **kwargs):
return pd.timedelta_range(start="1 day", periods=k, freq=freq, name=name, **kwargs)
def makePeriodIndex(k=10, name=None, **kwargs):
dt = datetime(2000, 1, 1)
return pd.period_range(start=dt, periods=k, freq="B", name=name, **kwargs)
def makeMultiIndex(k=10, names=None, **kwargs):
return MultiIndex.from_product((("foo", "bar"), (1, 2)), names=names, **kwargs)
_names = [
"Alice",
"Bob",
"Charlie",
"Dan",
"Edith",
"Frank",
"George",
"Hannah",
"Ingrid",
"Jerry",
"Kevin",
"Laura",
"Michael",
"Norbert",
"Oliver",
"Patricia",
"Quinn",
"Ray",
"Sarah",
"Tim",
"Ursula",
"Victor",
"Wendy",
"Xavier",
"Yvonne",
"Zelda",
]
def _make_timeseries(start="2000-01-01", end="2000-12-31", freq="1D", seed=None):
"""
Make a DataFrame with a DatetimeIndex
Parameters
----------
start : str or Timestamp, default "2000-01-01"
The start of the index. Passed to date_range with `freq`.
end : str or Timestamp, default "2000-12-31"
The end of the index. Passed to date_range with `freq`.
freq : str or Freq
The frequency to use for the DatetimeIndex
seed : int, optional
The random state seed.
* name : object dtype with string names
* id : int dtype with
* x, y : float dtype
Examples
--------
>>> _make_timeseries()
id name x y
timestamp
2000-01-01 982 Frank 0.031261 0.986727
2000-01-02 1025 Edith -0.086358 -0.032920
2000-01-03 982 Edith 0.473177 0.298654
2000-01-04 1009 Sarah 0.534344 -0.750377
2000-01-05 963 Zelda -0.271573 0.054424
... ... ... ... ...
2000-12-27 980 Ingrid -0.132333 -0.422195
2000-12-28 972 Frank -0.376007 -0.298687
2000-12-29 1009 Ursula -0.865047 -0.503133
2000-12-30 1000 Hannah -0.063757 -0.507336
2000-12-31 972 Tim -0.869120 0.531685
"""
index = pd.date_range(start=start, end=end, freq=freq, name="timestamp")
n = len(index)
state = np.random.RandomState(seed)
columns = {
"name": state.choice(_names, size=n),
"id": state.poisson(1000, size=n),
"x": state.rand(n) * 2 - 1,
"y": state.rand(n) * 2 - 1,
}
df = pd.DataFrame(columns, index=index, columns=sorted(columns))
if df.index[-1] == end:
df = df.iloc[:-1]
return df
def index_subclass_makers_generator():
make_index_funcs = [
makeDateIndex,
makePeriodIndex,
makeTimedeltaIndex,
makeRangeIndex,
makeIntervalIndex,
makeCategoricalIndex,
makeMultiIndex,
]
yield from make_index_funcs
def all_timeseries_index_generator(k=10):
"""
Generator which can be iterated over to get instances of all the classes
which represent time-series.
Parameters
----------
k: length of each of the index instances
"""
make_index_funcs = [makeDateIndex, makePeriodIndex, makeTimedeltaIndex]
for make_index_func in make_index_funcs:
# pandas\_testing.py:1986: error: Cannot call function of unknown type
yield make_index_func(k=k) # type: ignore[operator]
# make series
def makeFloatSeries(name=None):
index = makeStringIndex(_N)
return Series(np.random.randn(_N), index=index, name=name)
def makeStringSeries(name=None):
index = makeStringIndex(_N)
return Series(np.random.randn(_N), index=index, name=name)
def makeObjectSeries(name=None):
data = makeStringIndex(_N)
data = | Index(data, dtype=object) | pandas.Index |
import pandas as pd
def load_payment(payment_file: str) -> pd.DataFrame:
payments = | pd.read_csv(payment_file, sep=',') | pandas.read_csv |
# Series of helper classes and functions specific to this project.
from collections import defaultdict
import matplotlib.pyplot as plt
import numpy as np
import os
import pandas as pd
#---------- Classes ---------- #
class DroneAttributes(object):
"""Drone attributes as a function of time, derived from log file data."""
def __init__(self, log_file_name):
self._log_file_dict = None
self._load_log(log_file_name)
@property
def log_file_dict(self):
return self._log_file_dict
@property
def airspeed(self):
array_t = np.array(self.local_velocity.index)
array_airspeed_mag = np.linalg.norm(self.local_velocity, axis=1)
df = pd.DataFrame(array_airspeed_mag, index=array_t, columns=["mag"])
df.index.name = "t"
return df
@property
def airspeed_rate(self):
df = self.airspeed
# Keep only 1 row out of each 100 rows.
# This reduces problems of divenging derivatives if dividing by a very small time step.
df = df.iloc[::100,:]
t0 = df.index[:-1].values # All values, excluding the last one.
t1 = df.index[1:].values # All values, excluding the first one.
delta_t = t1-t0
airspeed_t0 = df.mag.iloc[0:-1].values # All values, excluding the last one.
airspeed_t1 = df.mag.iloc[1:].values # All values, excluding the first one.
delta_airspeed = airspeed_t1 - airspeed_t0
data = np.array([delta_t, delta_airspeed]).T
df = pd.DataFrame(data, index=t1, columns=["delta_t", "delta_airspeed"])
df.index.name = "t"
df = df[df.delta_t != 0] # Drop all lines where delta_t equals 0 (would cause NaN or Inf values)
df["mag"] = df["delta_airspeed"] / df["delta_t"]
df = df.drop(columns=["delta_t", "delta_airspeed"])
return df
@property
def global_position(self):
l = self._log_file_dict["GLOBAL_POSITION"]
df = pd.DataFrame(l, columns=["t","lat","lon","alt"], dtype=np.float32)
df = df.set_index("t")
return(df)
@property
def local_position(self):
l = self._log_file_dict["LOCAL_POSITION"]
df = pd.DataFrame(l, columns=["t","x","y","z"], dtype=np.float32)
df = df.set_index("t")
return(df)
@property
def local_velocity(self):
l = self._log_file_dict["LOCAL_VELOCITY"]
df = | pd.DataFrame(l, columns=["t","vx","vy","vz"], dtype=np.float32) | pandas.DataFrame |
import pandas as pd
import numpy as np
class IdleSleepModeConverter:
def __init__(self, init_fill=None, init_is_ism=None):
if init_fill is not None:
self._current_fill = init_fill
else:
self._current_fill = pd.DataFrame()
if init_is_ism is not None:
self._before_is_ism = init_is_ism
else:
self._before_is_ism = pd.DataFrame()
self._before_10s = pd.DataFrame()
self._converted = pd.DataFrame()
def get_ism(self):
return self._ism_df
def get_closest_fill(self, st):
df = self._df.set_index(self._df.columns[0])
fills = self._is_fill[self._is_fill.index <= st]
if not fills.empty:
fill_et = fills.index[-1] + pd.Timedelta(1, unit='s')
fill_st = fill_et - pd.Timedelta(10, unit='s')
current_fill = df[(df.index >= fill_st)
& (df.index < fill_et)]
if current_fill.index[-1] - current_fill.index[0] > pd.Timedelta(9, unit='s'):
self._current_fill = current_fill
return self._current_fill
def get_last_10s(self):
df = self._df.set_index('HEADER_TIME_STAMP')
et = df.index[-1] + pd.Timedelta(1, unit='s')
st = et - pd.Timedelta(10, unit='s')
return df[(df.index >= st) & (df.index < et)].reset_index(drop=False)
def detect_ism(self):
self._current_is_ism = self._df.groupby(pd.TimeGrouper(
freq='1s', key=self._df.columns[0])).apply(self._detect_ism)
def _detect_ism(self, df):
df = df.set_index(df.columns[0])
unique_counts = df.apply(lambda col: len(col.unique()), axis=0)
return np.all(unique_counts == 1)
def detect_fill(self):
if self._before_is_ism is not None:
is_ism = pd.concat(
[self._before_is_ism.iloc[-9:, ], self._current_is_ism], axis=0)
else:
is_ism = self._current_is_ism
self._is_fill = is_ism.rolling(10).apply(
lambda df: np.all(df == False)).dropna()
self._is_fill = self._is_fill[self._is_fill.values == 1]
def reverse_ism(self):
current_is_ism = self._current_is_ism.reset_index(drop=False)
self._filled_ism_counts = current_is_ism.groupby(
pd.TimeGrouper(freq='10s', key=current_is_ism.columns[0])).apply(self._reverse_ism)
def _reverse_ism(self, is_ism):
is_ism = is_ism.set_index(is_ism.columns[0])
st = is_ism.index[0]
et = is_ism.index[-1] + | pd.Timedelta(1, unit='s') | pandas.Timedelta |
import pandas as pd
import numpy as np
def simple_ma(close, period=10):
"""
Takes the arithmetic mean of a given set of prices over the specific number of days in the past.
Usually calculated to identify the trend direction of a stock
:param close: closing price
:param period: specified period (default: 10)
:return: simple moving average
"""
return close.rolling(window=period).mean()
def exp_ma(close, period=10):
"""
Gives more weight to recent prices in an attempt to make it more responsive to new information.
:param close: closing price
:param period: specified period (default: 10)
:return: exponential moving average
"""
return close.ewm(span=period).mean()
def bollinger_bands(close, period=20):
"""
Set of trendlines plotted two standard deviations (positively and negatively)
away from a simple moving average (SMA) of a price
:param close: Closing price
:param period: specified period (default: 20)
:return:
BB_MID: simple moving average,
BB_UPPER: upper band - 2 standard deviations away from sma,
BB_LOWER: upper band - 2 standard deviations away from sma
"""
BB_MID = pd.Series(simple_ma(close, period=period), name='BB_MID')
BB_UPPER = pd.Series(BB_MID + 2 * close.rolling(window=period).std(), name='BB_UPPER')
BB_LOWER = pd.Series(BB_MID - 2 * close.rolling(window=period).std(), name='BB_LOWER')
return pd.concat([BB_MID, BB_UPPER, BB_LOWER], axis=1)
def volume_log(volume):
"""
Converts to log scale
:param volume: volume
:return: volume in log scale
"""
return volume.apply(np.log)
def rate_of_change(volume):
"""
Percent change between previous and current
:param volume: volume
:return: percent change
"""
return volume.pct_change()
def RSI(close, period=14):
"""
Calculates Relative Strength Index.
This indicator measures the magnitude of recent price changes.
Commonly used in technical analysis to evaluate overbought or oversold conditions in the price of a stock.
A stock is considered overbought when the RSI is above 70% and oversold when it is below 30%.
:param close: (pandas Series) Closing price
:param period: (int) specified period (default=14)
:return: rsi: (pandas Series) Relative Strength Index
"""
delta = close.diff(1) # Price difference
gain, loss = delta.copy(), delta.copy()
gain[gain < 0] = 0
loss[loss > 0] = 0
# EMA for gains and losses
mean_gain = gain.ewm(alpha=1.0 / period).mean()
mean_loss = loss.ewm(alpha=1.0 / period).mean()
rs = abs(mean_gain / mean_loss)
rsi = 100 - 100 / (1 + rs)
return pd.Series(rsi, name="{}-day period".format(period))
def MACD(close, period_fast=12, period_slow=26, signal_period=9):
"""
trend-following momentum indicator that demonstrates the relationship between two moving averages
- long-term and short-term. MACD is calculated as
MACD = EMA(26-period) - EMA(12-period)
The signal line is a 9-day EMA of the MACD Line. As a moving average of the indicator,
it trails the MACD and makes it easier to spot MACD turns.
:param close: closing price
:param period_fast: period for short-term moving average (default: 12)
:param period_slow: period for long-term moving average (default: 26)
:param signal_period: period for moving average of the indicator (default: 9)
:return:
MACD_vals: difference between long-term and short-term sma
MACD_signal_line: moving average of the indicator
"""
EMA_short_term = exp_ma(close, period=period_fast)
EMA_long_term = exp_ma(close, period=period_slow)
MACD_vals = pd.Series(EMA_short_term - EMA_long_term, name='MACD')
MACD_signal_line = pd.Series(MACD_vals.ewm(span=signal_period).mean(), name='MACD_signal')
return pd.concat([MACD_vals, MACD_signal_line], axis=1)
def stochastic_oscillator(close, high, low, period=14):
"""
Momentum indicator that compares a specific closing price of a security to its high-low range
over a certain period of time.
:param close: closing price
:param high: highest price
:param low: lowest price
:param period: specified period (default: 14)
:return: STOCHO: stochastic oscillator
"""
max_high = high.rolling(window=period).max()
min_low = low.rolling(window=period).min()
STOCHO = pd.Series((close - min_low) / (max_high - min_low) * 100,
name="{} period stochastic oscillator".format(period))
return STOCHO
def accumulation_distribution(close, low, high, volume):
"""
Cumulative indicator that makes us of price and volume to assess
whether an asset is being accumulated or distributed.
:param close: closing price
:param low: lowest price
:param high: highest price
:param volume: daily volume
:return: ADI: Accumulation/Distribution Indicator
"""
# Calculate current money flow volume
cmfv = (((close - low) - (high - close)) / (high - low)) * volume
ADI = cmfv.cumsum()
return ADI
def true_range(high, low, close):
"""
:param high: highest price
:param low: lowest price
:param close: closing price
:return: TR: true range
"""
ranges = [high - low, high - close.shift(), close.shift() - low]
TR = | pd.DataFrame(ranges) | pandas.DataFrame |
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import io
import os
import pkgutil
import unittest
from datetime import timedelta
from unittest import TestCase
import numpy as np
import pandas as pd
from kats.consts import TimeSeriesData
from kats.utils.decomposition import TimeSeriesDecomposition
from kats.utils.simulator import Simulator
def load_data(file_name):
ROOT = "kats"
if "kats" in os.getcwd().lower():
path = "data/"
else:
path = "kats/data/"
data_object = pkgutil.get_data(ROOT, path + file_name)
return pd.read_csv(io.BytesIO(data_object), encoding="utf8")
class DecompositionTest(TestCase):
def setUp(self):
data = load_data("air_passengers.csv")
data.columns = ["time", "y"]
self.ts_data = TimeSeriesData(data)
data_nonstandard_name = data.copy()
data_nonstandard_name.columns = ["ds", "y"]
self.ts_data_nonstandard_name = TimeSeriesData(
df=data_nonstandard_name, time_col_name="ds"
)
daily_data = load_data("peyton_manning.csv")
daily_data.columns = ["time", "y"]
self.ts_data_daily = TimeSeriesData(daily_data)
DATA_multi = load_data("multivariate_anomaly_simulated_data.csv")
self.TSData_multi = TimeSeriesData(DATA_multi)
def test_asserts(self) -> None:
with self.assertRaises(ValueError):
TimeSeriesDecomposition(self.TSData_multi, "additive")
def test_defaults(self) -> None:
m1 = TimeSeriesDecomposition(self.ts_data, "additive")
output1 = m1.decomposer()
m2 = TimeSeriesDecomposition(self.ts_data, "logarithmic")
output2 = m2.decomposer()
self.assertEqual(output1["trend"].value.all(), output2["trend"].value.all())
self.assertEqual(
output1["seasonal"].value.all(), output2["seasonal"].value.all()
)
self.assertEqual(output1["rem"].value.all(), output2["rem"].value.all())
m3 = TimeSeriesDecomposition(self.ts_data, "additive", "STL2")
output3 = m3.decomposer()
self.assertEqual(output1["trend"].value.all(), output3["trend"].value.all())
self.assertEqual(
output1["seasonal"].value.all(), output3["seasonal"].value.all()
)
self.assertEqual(output1["rem"].value.all(), output3["rem"].value.all())
def test_nonstandard_time_col_name(self) -> None:
m = TimeSeriesDecomposition(self.ts_data_nonstandard_name, "multiplicative")
m.decomposer()
self.assertEqual(
# pyre-fixme[16]: `TimeSeriesDecomposition` has no attribute `results`.
m.results["trend"].time_col_name,
self.ts_data_nonstandard_name.time_col_name,
)
self.assertEqual(
m.results["seasonal"].time_col_name,
self.ts_data_nonstandard_name.time_col_name,
)
self.assertEqual(
m.results["rem"].time_col_name, self.ts_data_nonstandard_name.time_col_name
)
def test_decomposition_additive(self) -> None:
m = TimeSeriesDecomposition(self.ts_data, "additive")
output = m.decomposer()
out = pd.merge(
pd.DataFrame.from_dict(
{"time": pd.DatetimeIndex(self.ts_data.time), "y": self.ts_data.value}
),
pd.DataFrame.from_dict(
{
"time": output["trend"].time,
"y": output["trend"].value
+ output["seasonal"].value
+ output["rem"].value,
}
),
how="inner",
on="time",
suffixes=("_actuals", "_decomposed"),
)
self.assertAlmostEqual(
np.mean((out["y_actuals"] - out["y_decomposed"]) ** 2), 0, 5
)
m_seasonal = TimeSeriesDecomposition(
self.ts_data, "additive", "seasonal_decompose"
)
output = m_seasonal.decomposer()
out = pd.merge(
pd.DataFrame.from_dict(
{"time": pd.DatetimeIndex(self.ts_data.time), "y": self.ts_data.value}
),
pd.DataFrame.from_dict(
{
"time": output["trend"].time,
"y": output["trend"].value
+ output["seasonal"].value
+ output["rem"].value,
}
),
how="inner",
on="time",
suffixes=("_actuals", "_decomposed"),
)
self.assertAlmostEqual(
np.mean((out["y_actuals"] - out["y_decomposed"]) ** 2), 0, 5
)
m2 = TimeSeriesDecomposition(self.ts_data_daily, "additive")
output = m2.decomposer()
out2 = pd.merge(
pd.DataFrame.from_dict(
{
"time": pd.DatetimeIndex(self.ts_data_daily.time),
"y": self.ts_data_daily.value,
}
),
pd.DataFrame.from_dict(
{
"time": output["trend"].time,
"y": output["trend"].value
+ output["seasonal"].value
+ output["rem"].value,
}
),
how="inner",
on="time",
suffixes=("_actuals", "_decomposed"),
)
self.assertAlmostEqual(
np.mean((out2["y_actuals"] - out2["y_decomposed"]) ** 2), 0, 5
)
m2_seasonal = TimeSeriesDecomposition(
self.ts_data_daily, "additive", "seasonal_decompose"
)
output = m2_seasonal.decomposer()
out2 = pd.merge(
pd.DataFrame.from_dict(
{
"time": pd.DatetimeIndex(self.ts_data_daily.time),
"y": self.ts_data_daily.value,
}
),
pd.DataFrame.from_dict(
{
"time": output["trend"].time,
"y": output["trend"].value
+ output["seasonal"].value
+ output["rem"].value,
}
),
how="inner",
on="time",
suffixes=("_actuals", "_decomposed"),
)
self.assertAlmostEqual(
np.mean((out2["y_actuals"] - out2["y_decomposed"]) ** 2), 0, 5
)
def test_decomposition_multiplicative(self) -> None:
m = TimeSeriesDecomposition(self.ts_data, "multiplicative")
output = m.decomposer()
out = pd.merge(
pd.DataFrame.from_dict(
{"time": pd.DatetimeIndex(self.ts_data.time), "y": self.ts_data.value}
),
pd.DataFrame.from_dict(
{
"time": output["trend"].time,
"y": output["trend"].value
* output["seasonal"].value
* output["rem"].value,
}
),
how="inner",
on="time",
suffixes=("_actuals", "_decomposed"),
)
self.assertAlmostEqual(
np.mean((out["y_actuals"] - out["y_decomposed"]) ** 2), 0, 5
)
m_seas = TimeSeriesDecomposition(
self.ts_data, "multiplicative", "seasonal_decompose"
)
output = m_seas.decomposer()
out = pd.merge(
pd.DataFrame.from_dict(
{"time": pd.DatetimeIndex(self.ts_data.time), "y": self.ts_data.value}
),
pd.DataFrame.from_dict(
{
"time": output["trend"].time,
"y": output["trend"].value
* output["seasonal"].value
* output["rem"].value,
}
),
how="inner",
on="time",
suffixes=("_actuals", "_decomposed"),
)
self.assertAlmostEqual(
np.mean((out["y_actuals"] - out["y_decomposed"]) ** 2), 0, 5
)
m2 = TimeSeriesDecomposition(self.ts_data_daily, "multiplicative")
output = m2.decomposer()
out2 = pd.merge(
pd.DataFrame.from_dict(
{
"time": pd.DatetimeIndex(self.ts_data_daily.time),
"y": self.ts_data_daily.value,
}
),
pd.DataFrame.from_dict(
{
"time": output["trend"].time,
"y": output["trend"].value
* output["seasonal"].value
* output["rem"].value,
}
),
how="inner",
on="time",
suffixes=("_actuals", "_decomposed"),
)
self.assertAlmostEqual(
np.mean((out2["y_actuals"] - out2["y_decomposed"]) ** 2), 0, 5
)
m2_seas = TimeSeriesDecomposition(
self.ts_data_daily, "multiplicative", "seasonal_decompose"
)
output = m2_seas.decomposer()
out2 = pd.merge(
pd.DataFrame.from_dict(
{
"time": pd.DatetimeIndex(self.ts_data_daily.time),
"y": self.ts_data_daily.value,
}
),
pd.DataFrame.from_dict(
{
"time": output["trend"].time,
"y": output["trend"].value
* output["seasonal"].value
* output["rem"].value,
}
),
how="inner",
on="time",
suffixes=("_actuals", "_decomposed"),
)
self.assertAlmostEqual(
np.mean((out2["y_actuals"] - out2["y_decomposed"]) ** 2), 0, 5
)
def test_plot(self) -> None:
m = TimeSeriesDecomposition(self.ts_data, "multiplicative")
m.decomposer()
m.plot()
def test_multiplicative_assert(self) -> None:
data_new = self.ts_data.to_dataframe().copy()
data_new["y"] = -1.0 * data_new["y"]
ts_data_new = TimeSeriesData(data_new)
print(ts_data_new)
with self.assertLogs(level="ERROR"):
m = TimeSeriesDecomposition(ts_data_new, "multiplicative")
m.decomposer()
def test_new_freq(self) -> None:
DATA_multi = self.TSData_multi.to_dataframe()
df_15_min = DATA_multi[["time", "1"]]
df_15_min["time"] = list(
pd.date_range(end="2020-02-01", periods=df_15_min.shape[0], freq="25T")
)
df_15_min["time"] = df_15_min["time"].astype("str")
df_15_min.columns = ["time", "y"]
df_ts = TimeSeriesData(df_15_min)
m = TimeSeriesDecomposition(df_ts, "additive", method="STL")
m.decomposer()
m2 = TimeSeriesDecomposition(df_ts, "additive", method="seasonal_decompose")
m2.decomposer()
# class KDEResidualTranslatorTest(TestCase):
# def setUp(self) -> None:
# self._y = ts_data
# yhat = pd.DataFrame(
# {"value": self._y.value.rolling(7).mean().shift(1), "time": self._y.time}
# )
# self._yhat = TimeSeriesData(yhat)
# self._residual = self._y - self._yhat
# def test_setup(self) -> None:
# self.assertEquals(self._yhat.value.isnull().sum(), 7)
# def test_illegal_truncated_fracs(self) -> None:
# with self.assertRaises(ValueError):
# KDEResidualTranslator(-0.1, 0.9)
# with self.assertRaises(ValueError):
# KDEResidualTranslator(1.1, 2.0)
# with self.assertRaises(ValueError):
# KDEResidualTranslator(0.1, -0.9)
# with self.assertRaises(ValueError):
# KDEResidualTranslator(0.1, 1.9)
# with self.assertRaises(ValueError):
# KDEResidualTranslator(0.9, 0.8)
# def test_y_yhat(self) -> None:
# trn = KDEResidualTranslator()
# trn = trn.fit(y=self._y, yhat=self._yhat)
# self._test_residual_trn(trn)
# def _test_residual(self) -> None:
# trn = KDEResidualTranslator()
# for name in self._series_names:
# dataset = self._get_dataset_for_name(name)[["y", "yhat"]]
# dataset["residual"] = dataset.yhat - dataset.y
# dataset.drop(["y", "yhat"], axis=1, inplace=True)
# trn = trn.fit(dataset)
# self._test_residual_trn(trn)
# def _test_residual_trn(self, trn: KDEResidualTranslator) -> None:
# np.testing.assert_allclose(
# np.exp(trn.predict_log_proba(residual=self._residual).value),
# trn.predict_proba(residual=self._residual).value,
# )
# proba = trn.predict_proba(residual=self._residual)
# self.assertTrue(np.all((proba.value >= 0) & (proba.value <= 1)))
# ks = ks_2samp(
# trn.kde_.sample(len(self._residual)).flatten(), self._residual.value
# )
# self.assertTrue(ks.statistic < 0.1 or ks.pvalue >= 0.2)
class SimulatorTest(TestCase):
def test_arima_sim(self) -> None:
sim = Simulator(n=10, freq="MS", start=pd.to_datetime("2011-01-01 00:00:00"))
np.random.seed(100)
ts = sim.arima_sim(ar=[0.1, 0.05], ma=[0.04, 0.1], d=1)
expected_value = pd.Series(
[
0.797342,
1.494317,
1.608064,
1.186103,
2.147635,
1.772615,
0.750320,
2.159774,
3.744138,
3.944730,
]
)
self.assertEqual(True, (ts.value - expected_value).all())
self.assertEqual(len(ts.time), 10)
def test_stl_sim_additive(self) -> None:
# Create a STL-based simulated object
sim = Simulator(n=100, freq="1D", start=pd.to_datetime("2011-01-01"))
np.random.seed(614)
sim.add_trend(magnitude=10)
sim.add_seasonality(5, period=timedelta(days=7))
sim.add_noise(magnitude=2)
sim_ts = sim.stl_sim()
# Compare the obtained simulated time series to
# the original simulated data
generator1 = Simulator(n=100, freq="D", start="2011-01-01")
generator1.add_trend(magnitude=10)
np.random.seed(614)
generator1.add_seasonality(magnitude=5, period=timedelta(days=7))
generator1.add_noise(magnitude=2)
gen_ts_series = generator1.stl_sim()
# pyre-fixme[16]: `bool` has no attribute `all`.
self.assertEqual(True, (gen_ts_series.value == sim_ts.value).all())
self.assertEqual(True, (gen_ts_series.time == sim_ts.time).all())
def test_stl_sim_multiplicative(self) -> None:
# Create a STL-based simulated object
sim = Simulator(n=100, freq="1D", start= | pd.to_datetime("2011-01-01") | pandas.to_datetime |
"""Electric grid models module."""
import cvxpy as cp
import itertools
from multimethod import multimethod
import natsort
import numpy as np
import opendssdirect
import pandas as pd
import scipy.sparse as sp
import scipy.sparse.linalg
import typing
import mesmo.config
import mesmo.data_interface
import mesmo.utils
logger = mesmo.config.get_logger(__name__)
class ElectricGridModel(mesmo.utils.ObjectBase):
"""Electric grid model object.
Note:
This abstract class only defines the expected variables of linear electric grid model objects,
but does not implement any functionality.
Attributes:
timesteps (pd.Index): Index set of time steps of the current scenario. This is needed for optimization problem
definitions within linear electric grid models (see ``LinearElectricGridModel``).
phases (pd.Index): Index set of the phases.
node_names (pd.Index): Index set of the node names.
node_types (pd.Index): Index set of the node types.
line_names (pd.Index): Index set of the line names.
transformer_names (pd.Index): Index set of the transformer names.
branch_names (pd.Index): Index set of the branch names, i.e., all line names and transformer names.
branch_types (pd.Index): Index set of the branch types.
der_names (pd.Index): Index set of the DER names.
der_types (pd.Index): Index set of the DER types.
nodes (pd.Index): Multi-level / tuple index set of the node types, node names and phases
corresponding to the dimension of the node admittance matrices.
branches (pd.Index): Multi-level / tuple index set of the branch types, branch names and phases
corresponding to the dimension of the branch admittance matrices.
lines (pd.Index): Multi-level / tuple index set of the branch types, branch names and phases
for the lines only.
transformers (pd.Index): Multi-level / tuple index set of the branch types, branch names and phases
for the transformers only.
ders (pd.Index): Index set of the DER names, corresponding to the dimension of the DER power vector.
node_voltage_vector_reference (np.ndarray): Node voltage reference / no load vector.
branch_power_vector_magnitude_reference (np.ndarray): Branch power reference / rated power vector.
der_power_vector_reference (np.ndarray): DER power reference / nominal power vector.
is_single_phase_equivalent (bool): Singe-phase-equivalent modelling flag. If true, electric grid is modelled
as single-phase-equivalent of three-phase balanced system.
"""
timesteps: pd.Index
phases: pd.Index
node_names: pd.Index
node_types: pd.Index
line_names: pd.Index
transformer_names: pd.Index
branch_names: pd.Index
branch_types: pd.Index
der_names: pd.Index
der_types: pd.Index
nodes: pd.Index
branches: pd.Index
lines: pd.Index
transformers: pd.Index
ders: pd.Index
node_voltage_vector_reference: np.ndarray
branch_power_vector_magnitude_reference: np.ndarray
der_power_vector_reference: np.ndarray
is_single_phase_equivalent: bool
def __init__(
self,
electric_grid_data: mesmo.data_interface.ElectricGridData
):
# Process overhead line type definitions.
# - This is implemented as direct modification on the electric grid data object and therefore done first.
electric_grid_data = self.process_line_types_overhead(electric_grid_data)
# Obtain index set for time steps.
# - This is needed for optimization problem definitions within linear electric grid models.
self.timesteps = electric_grid_data.scenario_data.timesteps
# Obtain index sets for phases / node names / node types / line names / transformer names /
# branch types / DER names.
self.phases = (
pd.Index(
np.unique(np.concatenate(
electric_grid_data.electric_grid_nodes.apply(
mesmo.utils.get_element_phases_array,
axis=1
).values
))
)
)
self.node_names = pd.Index(electric_grid_data.electric_grid_nodes['node_name'])
self.node_types = pd.Index(['source', 'no_source'])
self.line_names = pd.Index(electric_grid_data.electric_grid_lines['line_name'])
self.transformer_names = pd.Index(electric_grid_data.electric_grid_transformers['transformer_name'])
self.branch_types = pd.Index(['line', 'transformer'])
self.der_names = pd.Index(electric_grid_data.electric_grid_ders['der_name'])
self.der_types = pd.Index(electric_grid_data.electric_grid_ders['der_type'].unique())
# Obtain nodes index set, i.e., collection of all phases of all nodes
# for generating indexing functions for the admittance matrix.
# - The admittance matrix has one entry for each phase of each node in both dimensions.
# - There cannot be "empty" dimensions for missing phases of nodes, because the matrix would become singular.
# - Therefore the admittance matrix must have the exact number of existing phases of all nodes.
node_dimension = (
int(electric_grid_data.electric_grid_nodes.loc[
:,
[
'is_phase_1_connected',
'is_phase_2_connected',
'is_phase_3_connected'
]
].sum().sum())
)
self.nodes = (
pd.DataFrame(
None,
index=range(node_dimension),
columns=[
'node_type',
'node_name',
'phase'
]
)
)
# Fill `node_name`.
self.nodes['node_name'] = (
pd.concat([
electric_grid_data.electric_grid_nodes.loc[
electric_grid_data.electric_grid_nodes['is_phase_1_connected'] == 1,
'node_name'
],
electric_grid_data.electric_grid_nodes.loc[
electric_grid_data.electric_grid_nodes['is_phase_2_connected'] == 1,
'node_name'
],
electric_grid_data.electric_grid_nodes.loc[
electric_grid_data.electric_grid_nodes['is_phase_3_connected'] == 1,
'node_name'
]
], ignore_index=True)
)
# Fill `phase`.
self.nodes['phase'] = (
np.concatenate([
np.repeat(1, sum(electric_grid_data.electric_grid_nodes['is_phase_1_connected'] == 1)),
np.repeat(2, sum(electric_grid_data.electric_grid_nodes['is_phase_2_connected'] == 1)),
np.repeat(3, sum(electric_grid_data.electric_grid_nodes['is_phase_3_connected'] == 1))
])
)
# Fill `node_type`.
self.nodes['node_type'] = 'no_source'
# Set `node_type` for source node.
self.nodes.loc[
self.nodes['node_name'] == (electric_grid_data.electric_grid['source_node_name']),
'node_type'
] = 'source'
# Sort by `node_name`.
self.nodes = (
self.nodes.reindex(index=natsort.order_by_index(
self.nodes.index,
natsort.index_natsorted(self.nodes.loc[:, 'node_name'])
))
)
self.nodes = pd.MultiIndex.from_frame(self.nodes)
# Obtain branches index set, i.e., collection of phases of all branches
# for generating indexing functions for the branch admittance matrices.
# - Branches consider all power delivery elements, i.e., lines as well as transformers.
# - The second dimension of the branch admittance matrices is the number of phases of all nodes.
# - Transformers must have same number of phases per winding and exactly two windings.
line_dimension = (
int(electric_grid_data.electric_grid_lines.loc[
:,
[
'is_phase_1_connected',
'is_phase_2_connected',
'is_phase_3_connected'
]
].sum().sum())
)
transformer_dimension = (
int(electric_grid_data.electric_grid_transformers.loc[
:,
[
'is_phase_1_connected',
'is_phase_2_connected',
'is_phase_3_connected'
]
].sum().sum())
)
self.branches = (
pd.DataFrame(
None,
index=range(line_dimension + transformer_dimension),
columns=[
'branch_type',
'branch_name',
'phase'
]
)
)
# Fill `branch_name`.
self.branches['branch_name'] = (
pd.concat([
electric_grid_data.electric_grid_lines.loc[
electric_grid_data.electric_grid_lines['is_phase_1_connected'] == 1,
'line_name'
],
electric_grid_data.electric_grid_lines.loc[
electric_grid_data.electric_grid_lines['is_phase_2_connected'] == 1,
'line_name'
],
electric_grid_data.electric_grid_lines.loc[
electric_grid_data.electric_grid_lines['is_phase_3_connected'] == 1,
'line_name'
],
electric_grid_data.electric_grid_transformers.loc[
electric_grid_data.electric_grid_transformers['is_phase_1_connected'] == 1,
'transformer_name'
],
electric_grid_data.electric_grid_transformers.loc[
electric_grid_data.electric_grid_transformers['is_phase_2_connected'] == 1,
'transformer_name'
],
electric_grid_data.electric_grid_transformers.loc[
electric_grid_data.electric_grid_transformers['is_phase_3_connected'] == 1,
'transformer_name'
]
], ignore_index=True)
)
# Fill `phase`.
self.branches['phase'] = (
np.concatenate([
np.repeat(1, sum(electric_grid_data.electric_grid_lines['is_phase_1_connected'] == 1)),
np.repeat(2, sum(electric_grid_data.electric_grid_lines['is_phase_2_connected'] == 1)),
np.repeat(3, sum(electric_grid_data.electric_grid_lines['is_phase_3_connected'] == 1)),
np.repeat(1, sum(electric_grid_data.electric_grid_transformers['is_phase_1_connected'] == 1)),
np.repeat(2, sum(electric_grid_data.electric_grid_transformers['is_phase_2_connected'] == 1)),
np.repeat(3, sum(electric_grid_data.electric_grid_transformers['is_phase_3_connected'] == 1))
])
)
# Fill `branch_type`.
self.branches['branch_type'] = (
np.concatenate([
np.repeat('line', line_dimension),
np.repeat('transformer', transformer_dimension)
])
)
# Sort by `branch_type` / `branch_name`.
self.branches = (
self.branches.reindex(index=natsort.order_by_index(
self.branches.index,
natsort.index_natsorted(self.branches.loc[:, 'branch_name'])
))
)
self.branches = (
self.branches.reindex(index=natsort.order_by_index(
self.branches.index,
natsort.index_natsorted(self.branches.loc[:, 'branch_type'])
))
)
self.branches = pd.MultiIndex.from_frame(self.branches)
# Obtain index sets for lines / transformers corresponding to branches.
self.lines = (
self.branches[
mesmo.utils.get_index(self.branches, raise_empty_index_error=False, branch_type='line')
]
)
self.transformers = (
self.branches[
mesmo.utils.get_index(self.branches, raise_empty_index_error=False, branch_type='transformer')
]
)
# Obtain index set for DERs.
self.ders = pd.MultiIndex.from_frame(electric_grid_data.electric_grid_ders[['der_type', 'der_name']])
# Obtain reference / no load voltage vector.
self.node_voltage_vector_reference = np.zeros(len(self.nodes), dtype=complex)
voltage_phase_factors = (
np.array([
np.exp(0 * 1j), # Phase 1.
np.exp(- 2 * np.pi / 3 * 1j), # Phase 2.
np.exp(2 * np.pi / 3 * 1j) # Phase 3.
])
)
for node_name, node in electric_grid_data.electric_grid_nodes.iterrows():
# Obtain phases index & node index for positioning the node voltage in the voltage vector.
phases_index = mesmo.utils.get_element_phases_array(node) - 1
node_index = mesmo.utils.get_index(self.nodes, node_name=node_name)
# Insert voltage into voltage vector.
self.node_voltage_vector_reference[node_index] = (
voltage_phase_factors[phases_index]
* node.at['voltage'] / np.sqrt(3)
)
# Obtain reference / rated branch power vector.
self.branch_power_vector_magnitude_reference = np.zeros(len(self.branches), dtype=float)
for line_name, line in electric_grid_data.electric_grid_lines.iterrows():
# Obtain branch index.
branch_index = mesmo.utils.get_index(self.branches, branch_type='line', branch_name=line_name)
# Insert rated power into branch power vector.
self.branch_power_vector_magnitude_reference[branch_index] = (
line.at['maximum_current']
* electric_grid_data.electric_grid_nodes.at[line.at['node_1_name'], 'voltage']
/ np.sqrt(3)
)
for transformer_name, transformer in electric_grid_data.electric_grid_transformers.iterrows():
# Obtain branch index.
branch_index = mesmo.utils.get_index(self.branches, branch_type='transformer', branch_name=transformer_name)
# Insert rated power into branch flow vector.
self.branch_power_vector_magnitude_reference[branch_index] = (
transformer.at['apparent_power']
/ len(branch_index) # Divide total capacity by number of phases.
)
# Obtain reference / nominal DER power vector.
self.der_power_vector_reference = (
(
electric_grid_data.electric_grid_ders.loc[:, 'active_power_nominal']
+ 1.0j * electric_grid_data.electric_grid_ders.loc[:, 'reactive_power_nominal']
).values
)
# Obtain flag for single-phase-equivalent modelling.
if electric_grid_data.electric_grid.at['is_single_phase_equivalent'] == 1:
if len(self.phases) != 1:
raise ValueError(f"Cannot model electric grid with {len(self.phases)} phase as single-phase-equivalent.")
self.is_single_phase_equivalent = True
else:
self.is_single_phase_equivalent = False
# Make modifications for single-phase-equivalent modelling.
if self.is_single_phase_equivalent:
self.branch_power_vector_magnitude_reference[mesmo.utils.get_index(self.branches, branch_type='line')] *= 3
@staticmethod
def process_line_types_overhead(
electric_grid_data: mesmo.data_interface.ElectricGridData
) -> mesmo.data_interface.ElectricGridData:
"""Process overhead line type definitions in electric grid data object."""
# Process over-head line type definitions.
for line_type, line_type_data in electric_grid_data.electric_grid_line_types_overhead.iterrows():
# Obtain data shorthands.
# - Only for phases which have `conductor_id` defined in `electric_grid_line_types_overhead`.
phases = (
pd.Index([
1 if pd.notnull(line_type_data.at['phase_1_conductor_id']) else None,
2 if pd.notnull(line_type_data.at['phase_2_conductor_id']) else None,
3 if pd.notnull(line_type_data.at['phase_3_conductor_id']) else None,
'n' if pd.notnull(line_type_data.at['neutral_conductor_id']) else None
]).dropna()
)
phase_conductor_id = (
pd.Series({
1: line_type_data.at['phase_1_conductor_id'],
2: line_type_data.at['phase_2_conductor_id'],
3: line_type_data.at['phase_3_conductor_id'],
'n': line_type_data.at['neutral_conductor_id']
}).loc[phases]
)
phase_y = (
pd.Series({
1: line_type_data.at['phase_1_y'],
2: line_type_data.at['phase_2_y'],
3: line_type_data.at['phase_3_y'],
'n': line_type_data.at['neutral_y']
}).loc[phases]
)
phase_xy = (
pd.Series({
1: np.array([line_type_data.at['phase_1_x'], line_type_data.at['phase_1_y']]),
2: np.array([line_type_data.at['phase_2_x'], line_type_data.at['phase_2_y']]),
3: np.array([line_type_data.at['phase_3_x'], line_type_data.at['phase_3_y']]),
'n': np.array([line_type_data.at['neutral_x'], line_type_data.at['neutral_y']])
}).loc[phases]
)
phase_conductor_diameter = (
pd.Series([
electric_grid_data.electric_grid_line_types_overhead_conductors.at[
phase_conductor_id.at[phase], 'conductor_diameter'
]
for phase in phases
], index=phases)
* 1e-3 # mm to m.
)
phase_conductor_geometric_mean_radius = (
pd.Series([
electric_grid_data.electric_grid_line_types_overhead_conductors.at[
phase_conductor_id.at[phase], 'conductor_geometric_mean_radius'
]
for phase in phases
], index=phases)
* 1e-3 # mm to m.
)
phase_conductor_resistance = (
pd.Series([
electric_grid_data.electric_grid_line_types_overhead_conductors.at[
phase_conductor_id.at[phase], 'conductor_resistance'
]
for phase in phases
], index=phases)
)
phase_conductor_maximum_current = (
pd.Series([
electric_grid_data.electric_grid_line_types_overhead_conductors.at[
phase_conductor_id.at[phase], 'conductor_maximum_current'
]
for phase in phases
], index=phases)
)
# Obtain shorthands for neutral / non-neutral phases.
# - This is needed for Kron reduction.
phases_neutral = phases[phases.isin(['n'])]
phases_non_neutral = phases[~phases.isin(['n'])]
# Other parameter shorthands.
frequency = electric_grid_data.electric_grid.at['base_frequency'] # In Hz.
earth_resistivity = line_type_data.at['earth_resistivity'] # In Ωm.
air_permittivity = line_type_data.at['air_permittivity'] # In nF/km.
g_factor = 1e-4 # In Ω/km from 0.1609347e-3 Ω/mile from Kersting <https://doi.org/10.1201/9781315120782>.
# Obtain impedance matrix in Ω/km based on Kersting <https://doi.org/10.1201/9781315120782>.
z_matrix = pd.DataFrame(index=phases, columns=phases, dtype=complex)
for phase_row, phase_col in itertools.product(phases, phases):
# Calculate geometric parameters.
d_distance = np.linalg.norm(phase_xy.at[phase_row] - phase_xy.at[phase_col])
s_distance = np.linalg.norm(phase_xy.at[phase_row] - np.array([1, -1]) * phase_xy.at[phase_col])
s_angle = np.pi / 2 - np.arcsin((phase_y.at[phase_row] + phase_y.at[phase_col]) / s_distance)
# Calculate Kersting / Carson parameters.
k_factor = (
8.565e-4 * s_distance * np.sqrt(frequency / earth_resistivity)
)
p_factor = (
np.pi / 8
- (3 * np.sqrt(2)) ** -1 * k_factor * np.cos(s_angle)
- k_factor ** 2 / 16 * np.cos(2 * s_angle) * (0.6728 + np.log(2 / k_factor))
)
q_factor = (
-0.0386
+ 0.5 * np.log(2 / k_factor)
+ (3 * np.sqrt(2)) ** -1 * k_factor * np.cos(2 * s_angle)
)
x_factor = (
2 * np.pi * frequency * g_factor
* np.log(
phase_conductor_diameter[phase_row]
/ phase_conductor_geometric_mean_radius.at[phase_row]
)
)
# Calculate admittance according to Kersting / Carson <https://doi.org/10.1201/9781315120782>.
if phase_row == phase_col:
z_matrix.at[phase_row, phase_col] = (
phase_conductor_resistance.at[phase_row]
+ 4 * np.pi * frequency * p_factor * g_factor
+ 1j * (
x_factor
+ 2 * np.pi * frequency * g_factor
* np.log(s_distance / phase_conductor_diameter[phase_row])
+ 4 * np.pi * frequency * q_factor * g_factor
)
)
else:
z_matrix.at[phase_row, phase_col] = (
4 * np.pi * frequency * p_factor * g_factor
+ 1j * (
2 * np.pi * frequency * g_factor
* np.log(s_distance / d_distance)
+ 4 * np.pi * frequency * q_factor * g_factor
)
)
# Apply Kron reduction.
z_matrix = (
pd.DataFrame(
(
z_matrix.loc[phases_non_neutral, phases_non_neutral].values
- z_matrix.loc[phases_non_neutral, phases_neutral].values
@ z_matrix.loc[phases_neutral, phases_neutral].values ** -1 # Inverse of scalar value.
@ z_matrix.loc[phases_neutral, phases_non_neutral].values
),
index=phases_non_neutral,
columns=phases_non_neutral
)
)
# Obtain potentials matrix in km/nF based on Kersting <https://doi.org/10.1201/9781315120782>.
p_matrix = pd.DataFrame(index=phases, columns=phases, dtype=float)
for phase_row, phase_col in itertools.product(phases, phases):
# Calculate geometric parameters.
d_distance = np.linalg.norm(phase_xy.at[phase_row] - phase_xy.at[phase_col])
s_distance = np.linalg.norm(phase_xy.at[phase_row] - np.array([1, -1]) * phase_xy.at[phase_col])
# Calculate potential according to Kersting <https://doi.org/10.1201/9781315120782>.
if phase_row == phase_col:
p_matrix.at[phase_row, phase_col] = (
1 / (2 * np.pi * air_permittivity)
* np.log(s_distance / phase_conductor_diameter.at[phase_row])
)
else:
p_matrix.at[phase_row, phase_col] = (
1 / (2 * np.pi * air_permittivity)
* np.log(s_distance / d_distance)
)
# Apply Kron reduction.
p_matrix = (
pd.DataFrame(
(
p_matrix.loc[phases_non_neutral, phases_non_neutral].values
- p_matrix.loc[phases_non_neutral, phases_neutral].values
@ p_matrix.loc[phases_neutral, phases_neutral].values ** -1 # Inverse of scalar value.
@ p_matrix.loc[phases_neutral, phases_non_neutral].values
),
index=phases_non_neutral,
columns=phases_non_neutral
)
)
# Obtain capacitance matrix in nF/km.
c_matrix = pd.DataFrame(np.linalg.inv(p_matrix), index=phases_non_neutral, columns=phases_non_neutral)
# Obtain final element matrices.
resistance_matrix = z_matrix.apply(np.real) # In Ω/km.
reactance_matrix = z_matrix.apply(np.imag) # In Ω/km.
capacitance_matrix = c_matrix # In nF/km.
# Add to line type matrices definition.
for phase_row in phases_non_neutral:
for phase_col in phases_non_neutral[phases_non_neutral <= phase_row]:
electric_grid_data.electric_grid_line_types_matrices = (
electric_grid_data.electric_grid_line_types_matrices.append(
pd.Series({
'line_type': line_type,
'row': phase_row,
'col': phase_col,
'resistance': resistance_matrix.at[phase_row, phase_col],
'reactance': reactance_matrix.at[phase_row, phase_col],
'capacitance': capacitance_matrix.at[phase_row, phase_col]
}),
ignore_index=True
)
)
# Obtain number of phases.
electric_grid_data.electric_grid_line_types.loc[line_type, 'n_phases'] = len(phases_non_neutral)
# Obtain maximum current.
# TODO: Validate this.
electric_grid_data.electric_grid_line_types.loc[line_type, 'maximum_current'] = (
phase_conductor_maximum_current.loc[phases_non_neutral].mean()
)
return electric_grid_data
class ElectricGridModelDefault(ElectricGridModel):
"""Electric grid model object consisting of the index sets for node names / branch names / der names / phases /
node types / branch types, the nodal admittance / transformation matrices, branch admittance /
incidence matrices and DER incidence matrices.
:syntax:
- ``ElectricGridModelDefault(electric_grid_data)``: Instantiate electric grid model for given
`electric_grid_data`.
- ``ElectricGridModelDefault(scenario_name)``: Instantiate electric grid model for given `scenario_name`.
The required `electric_grid_data` is obtained from the database.
Arguments:
electric_grid_data (mesmo.data_interface.ElectricGridData): Electric grid data object.
scenario_name (str): MESMO scenario name.
Attributes:
phases (pd.Index): Index set of the phases.
node_names (pd.Index): Index set of the node names.
node_types (pd.Index): Index set of the node types.
line_names (pd.Index): Index set of the line names.
transformer_names (pd.Index): Index set of the transformer names.
branch_names (pd.Index): Index set of the branch names, i.e., all line names and transformer names.
branch_types (pd.Index): Index set of the branch types.
der_names (pd.Index): Index set of the DER names.
der_types (pd.Index): Index set of the DER types.
nodes (pd.Index): Multi-level / tuple index set of the node types, node names and phases
corresponding to the dimension of the node admittance matrices.
branches (pd.Index): Multi-level / tuple index set of the branch types, branch names and phases
corresponding to the dimension of the branch admittance matrices.
lines (pd.Index): Multi-level / tuple index set of the branch types, branch names and phases
for the lines only.
transformers (pd.Index): Multi-level / tuple index set of the branch types, branch names and phases
for the transformers only.
ders (pd.Index): Index set of the DER names, corresponding to the dimension of the DER power vector.
node_voltage_vector_reference (np.ndarray): Node voltage reference / no load vector.
branch_power_vector_magnitude_reference (np.ndarray): Branch power reference / rated power vector.
der_power_vector_reference (np.ndarray): DER power reference / nominal power vector.
is_single_phase_equivalent (bool): Singe-phase-equivalent modelling flag. If true, electric grid is modelled
as single-phase-equivalent of three-phase balanced system.
node_admittance_matrix (sp.spmatrix): Nodal admittance matrix.
node_transformation_matrix (sp.spmatrix): Nodal transformation matrix.
branch_admittance_1_matrix (sp.spmatrix): Branch admittance matrix in the 'from' direction.
branch_admittance_2_matrix (sp.spmatrix): Branch admittance matrix in the 'to' direction.
branch_incidence_1_matrix (sp.spmatrix): Branch incidence matrix in the 'from' direction.
branch_incidence_2_matrix (sp.spmatrix): Branch incidence matrix in the 'to' direction.
der_incidence_wye_matrix (sp.spmatrix): Load incidence matrix for 'wye' DERs.
der_incidence_delta_matrix (sp.spmatrix): Load incidence matrix for 'delta' DERs.
node_admittance_matrix_no_source (sp.spmatrix): Nodal admittance matrix from no-source to no-source nodes.
node_transformation_matrix_no_source (sp.spmatrix): Nodal admittance matrix from source to no-source nodes.
der_incidence_wye_matrix_no_source (sp.spmatrix): Incidence matrix from wye-conn. DERs to no-source nodes.
der_incidence_delta_matrix_no_source (sp.spmatrix): Incidence matrix from delta-conn. DERs to no-source nodes.
node_voltage_vector_reference_no_source (sp.spmatrix): Nodal reference voltage vector for no-source nodes.
node_voltage_vector_reference_source (sp.spmatrix): Nodal reference voltage vector for source nodes.
node_admittance_matrix_no_source_inverse (sp.spmatrix): Inverse of no-source nodal admittance matrix.
"""
node_admittance_matrix: sp.spmatrix
node_transformation_matrix: sp.spmatrix
branch_admittance_1_matrix: sp.spmatrix
branch_admittance_2_matrix: sp.spmatrix
branch_incidence_1_matrix: sp.spmatrix
branch_incidence_2_matrix: sp.spmatrix
der_incidence_wye_matrix: sp.spmatrix
der_incidence_delta_matrix: sp.spmatrix
node_admittance_matrix_no_source: sp.spmatrix
node_admittance_matrix_source_to_no_source: sp.spmatrix
node_transformation_matrix_no_source: sp.spmatrix
der_incidence_wye_matrix_no_source: sp.spmatrix
der_incidence_delta_matrix_no_source: sp.spmatrix
node_voltage_vector_reference_no_source: sp.spmatrix
node_voltage_vector_reference_source: sp.spmatrix
node_admittance_matrix_no_source_inverse: sp.spmatrix
@multimethod
def __init__(
self,
scenario_name: str
):
# Obtain electric grid data.
electric_grid_data = mesmo.data_interface.ElectricGridData(scenario_name)
# Instantiate electric grid model object.
self.__init__(
electric_grid_data
)
@multimethod
def __init__(
self,
electric_grid_data: mesmo.data_interface.ElectricGridData,
):
# Obtain electric grid indexes, via `ElectricGridModel.__init__()`.
super().__init__(electric_grid_data)
# Define sparse matrices for nodal admittance, nodal transformation,
# branch admittance, branch incidence and der incidence matrix entries.
self.node_admittance_matrix = (
sp.dok_matrix((len(self.nodes), len(self.nodes)), dtype=complex)
)
self.node_transformation_matrix = (
sp.dok_matrix((len(self.nodes), len(self.nodes)), dtype=int)
)
self.branch_admittance_1_matrix = (
sp.dok_matrix((len(self.branches), len(self.nodes)), dtype=complex)
)
self.branch_admittance_2_matrix = (
sp.dok_matrix((len(self.branches), len(self.nodes)), dtype=complex)
)
self.branch_incidence_1_matrix = (
sp.dok_matrix((len(self.branches), len(self.nodes)), dtype=int)
)
self.branch_incidence_2_matrix = (
sp.dok_matrix((len(self.branches), len(self.nodes)), dtype=int)
)
self.der_incidence_wye_matrix = (
sp.dok_matrix((len(self.nodes), len(self.ders)), dtype=float)
)
self.der_incidence_delta_matrix = (
sp.dok_matrix((len(self.nodes), len(self.ders)), dtype=float)
)
# Add lines to admittance, transformation and incidence matrices.
for line_index, line in electric_grid_data.electric_grid_lines.iterrows():
# Obtain phases vector.
phases_vector = mesmo.utils.get_element_phases_array(line)
# Obtain line resistance / reactance / capacitance matrix entries for the line.
matrices_index = (
electric_grid_data.electric_grid_line_types_matrices.loc[:, 'line_type'] == line['line_type']
)
resistance_matrix = (
electric_grid_data.electric_grid_line_types_matrices.loc[matrices_index, 'resistance'].values
)
reactance_matrix = (
electric_grid_data.electric_grid_line_types_matrices.loc[matrices_index, 'reactance'].values
)
capacitance_matrix = (
electric_grid_data.electric_grid_line_types_matrices.loc[matrices_index, 'capacitance'].values
)
# Obtain the full line resistance and reactance matrices.
# Data only contains upper half entries.
matrices_full_index = (
np.array([
[1, 2, 4],
[2, 3, 5],
[4, 5, 6]
]) - 1
)
matrices_full_index = (
matrices_full_index[:len(phases_vector), :len(phases_vector)]
)
resistance_matrix = resistance_matrix[matrices_full_index]
reactance_matrix = reactance_matrix[matrices_full_index]
capacitance_matrix = capacitance_matrix[matrices_full_index]
# Construct line series admittance matrix.
series_admittance_matrix = (
np.linalg.inv(
(resistance_matrix + 1j * reactance_matrix)
* line['length']
)
)
# Construct line shunt admittance.
# Note: nF to Ω with X = 1 / (2π * f * C)
# TODO: Check line shunt admittance.
shunt_admittance_matrix = (
capacitance_matrix
* 2 * np.pi * electric_grid_data.electric_grid.at['base_frequency'] * 1e-9
* 0.5j
* line['length']
)
# Construct line element admittance matrices according to:
# https://doi.org/10.1109/TPWRS.2017.2728618
admittance_matrix_11 = (
series_admittance_matrix
+ shunt_admittance_matrix
)
admittance_matrix_12 = (
- series_admittance_matrix
)
admittance_matrix_21 = (
- series_admittance_matrix
)
admittance_matrix_22 = (
series_admittance_matrix
+ shunt_admittance_matrix
)
# Obtain indexes for positioning the line element matrices
# in the full admittance matrices.
node_index_1 = (
mesmo.utils.get_index(
self.nodes,
node_name=line['node_1_name'],
phase=phases_vector
)
)
node_index_2 = (
mesmo.utils.get_index(
self.nodes,
node_name=line['node_2_name'],
phase=phases_vector
)
)
branch_index = (
mesmo.utils.get_index(
self.branches,
branch_type='line',
branch_name=line['line_name']
)
)
# Add line element matrices to the nodal admittance matrix.
self.node_admittance_matrix[np.ix_(node_index_1, node_index_1)] += admittance_matrix_11
self.node_admittance_matrix[np.ix_(node_index_1, node_index_2)] += admittance_matrix_12
self.node_admittance_matrix[np.ix_(node_index_2, node_index_1)] += admittance_matrix_21
self.node_admittance_matrix[np.ix_(node_index_2, node_index_2)] += admittance_matrix_22
# Add line element matrices to the branch admittance matrices.
self.branch_admittance_1_matrix[np.ix_(branch_index, node_index_1)] += admittance_matrix_11
self.branch_admittance_1_matrix[np.ix_(branch_index, node_index_2)] += admittance_matrix_12
self.branch_admittance_2_matrix[np.ix_(branch_index, node_index_1)] += admittance_matrix_21
self.branch_admittance_2_matrix[np.ix_(branch_index, node_index_2)] += admittance_matrix_22
# Add line element matrices to the branch incidence matrices.
self.branch_incidence_1_matrix[np.ix_(branch_index, node_index_1)] += (
np.identity(len(branch_index), dtype=int)
)
self.branch_incidence_2_matrix[np.ix_(branch_index, node_index_2)] += (
np.identity(len(branch_index), dtype=int)
)
# Add transformers to admittance, transformation and incidence matrices.
# - Note: This setup only works for transformers with exactly two windings
# and identical number of phases at each winding / side.
# Define transformer factor matrices according to:
# https://doi.org/10.1109/TPWRS.2017.2728618
transformer_factors_1 = (
np.array([
[1, 0, 0],
[0, 1, 0],
[0, 0, 1]
])
)
transformer_factors_2 = (
1 / 3
* np.array([
[2, -1, -1],
[-1, 2, -1],
[-1, -1, 2]
])
)
transformer_factors_3 = (
1 / np.sqrt(3)
* np.array([
[-1, 1, 0],
[0, -1, 1],
[1, 0, -1]
])
)
# Add transformers to admittance matrix.
for transformer_index, transformer in electric_grid_data.electric_grid_transformers.iterrows():
# Raise error if transformer nominal power is not valid.
if not (transformer.at['apparent_power'] > 0):
raise ValueError(
f"At transformer '{transformer.at['transformer_name']}', "
f"found invalid value for `apparent_power`: {transformer.at['apparent_power']}`"
)
# Calculate transformer admittance.
admittance = (
(
(
2 * transformer.at['resistance_percentage'] / 100
+ 1j * transformer.at['reactance_percentage'] / 100
)
* (
electric_grid_data.electric_grid_nodes.at[transformer.at['node_2_name'], 'voltage'] ** 2
/ transformer.at['apparent_power']
)
) ** -1
)
# Calculate turn ratio.
turn_ratio = (
(
1.0 # TODO: Replace `1.0` with actual tap position.
* electric_grid_data.electric_grid_nodes.at[transformer.at['node_1_name'], 'voltage']
)
/ (
1.0 # TODO: Replace `1.0` with actual tap position.
* electric_grid_data.electric_grid_nodes.at[transformer.at['node_2_name'], 'voltage']
)
)
# Construct transformer element admittance matrices according to:
# https://doi.org/10.1109/TPWRS.2017.2728618
if transformer.at['connection'] == "wye-wye":
admittance_matrix_11 = (
admittance
* transformer_factors_1
/ turn_ratio ** 2
)
admittance_matrix_12 = (
- 1 * admittance
* transformer_factors_1
/ turn_ratio
)
admittance_matrix_21 = (
- 1 * admittance
* transformer_factors_1
/ turn_ratio
)
admittance_matrix_22 = (
admittance
* transformer_factors_1
)
elif transformer.at['connection'] == "delta-wye":
admittance_matrix_11 = (
admittance
* transformer_factors_2
/ turn_ratio ** 2
)
admittance_matrix_12 = (
- 1 * admittance
* - 1 * np.transpose(transformer_factors_3)
/ turn_ratio
)
admittance_matrix_21 = (
- 1 * admittance
* - 1 * transformer_factors_3
/ turn_ratio
)
admittance_matrix_22 = (
admittance
* transformer_factors_1
)
elif transformer.at['connection'] == "wye-delta":
admittance_matrix_11 = (
admittance
* transformer_factors_1
/ turn_ratio ** 2
)
admittance_matrix_12 = (
- 1 * admittance
* - 1 * transformer_factors_3
/ turn_ratio
)
admittance_matrix_21 = (
- 1 * admittance
* - 1 * np.transpose(transformer_factors_3)
/ turn_ratio
)
admittance_matrix_22 = (
admittance
* transformer_factors_2
)
elif transformer.at['connection'] == "delta-delta":
admittance_matrix_11 = (
admittance
* transformer_factors_2
/ turn_ratio ** 2
)
admittance_matrix_12 = (
- 1 * admittance
* transformer_factors_2
/ turn_ratio
)
admittance_matrix_21 = (
- 1 * admittance
* transformer_factors_2
/ turn_ratio
)
admittance_matrix_22 = (
admittance
* transformer_factors_2
)
else:
raise ValueError(f"Unknown transformer type: {transformer.at['connection']}")
# Obtain phases vector.
phases_vector = mesmo.utils.get_element_phases_array(transformer)
# Obtain element admittance matrices for correct phases.
admittance_matrix_11 = (
admittance_matrix_11[np.ix_(phases_vector - 1, phases_vector - 1)]
)
admittance_matrix_12 = (
admittance_matrix_12[np.ix_(phases_vector - 1, phases_vector - 1)]
)
admittance_matrix_21 = (
admittance_matrix_21[np.ix_(phases_vector - 1, phases_vector - 1)]
)
admittance_matrix_22 = (
admittance_matrix_22[np.ix_(phases_vector - 1, phases_vector - 1)]
)
# Obtain indexes for positioning the transformer element
# matrices in the full matrices.
node_index_1 = (
mesmo.utils.get_index(
self.nodes,
node_name=transformer.at['node_1_name'],
phase=phases_vector
)
)
node_index_2 = (
mesmo.utils.get_index(
self.nodes,
node_name=transformer.at['node_2_name'],
phase=phases_vector
)
)
branch_index = (
mesmo.utils.get_index(
self.branches,
branch_type='transformer',
branch_name=transformer['transformer_name']
)
)
# Add transformer element matrices to the nodal admittance matrix.
self.node_admittance_matrix[np.ix_(node_index_1, node_index_1)] += admittance_matrix_11
self.node_admittance_matrix[np.ix_(node_index_1, node_index_2)] += admittance_matrix_12
self.node_admittance_matrix[np.ix_(node_index_2, node_index_1)] += admittance_matrix_21
self.node_admittance_matrix[np.ix_(node_index_2, node_index_2)] += admittance_matrix_22
# Add transformer element matrices to the branch admittance matrices.
self.branch_admittance_1_matrix[np.ix_(branch_index, node_index_1)] += admittance_matrix_11
self.branch_admittance_1_matrix[np.ix_(branch_index, node_index_2)] += admittance_matrix_12
self.branch_admittance_2_matrix[np.ix_(branch_index, node_index_1)] += admittance_matrix_21
self.branch_admittance_2_matrix[np.ix_(branch_index, node_index_2)] += admittance_matrix_22
# Add transformer element matrices to the branch incidence matrices.
self.branch_incidence_1_matrix[np.ix_(branch_index, node_index_1)] += (
np.identity(len(branch_index), dtype=int)
)
self.branch_incidence_2_matrix[np.ix_(branch_index, node_index_2)] += (
np.identity(len(branch_index), dtype=int)
)
# Define transformation matrix according to:
# https://doi.org/10.1109/TPWRS.2018.2823277
transformation_entries = (
np.array([
[1, -1, 0],
[0, 1, -1],
[-1, 0, 1]
])
)
for node_name, node in electric_grid_data.electric_grid_nodes.iterrows():
# Obtain node phases index.
phases_index = mesmo.utils.get_element_phases_array(node) - 1
# Construct node transformation matrix.
transformation_matrix = transformation_entries[np.ix_(phases_index, phases_index)]
# Obtain index for positioning node transformation matrix in full transformation matrix.
node_index = (
mesmo.utils.get_index(
self.nodes,
node_name=node['node_name']
)
)
# Add node transformation matrix to full transformation matrix.
self.node_transformation_matrix[np.ix_(node_index, node_index)] = transformation_matrix
# Add DERs to der incidence matrix.
for der_name, der in electric_grid_data.electric_grid_ders.iterrows():
# Obtain der connection type.
connection = der['connection']
# Obtain indexes for positioning the DER in the incidence matrix.
node_index = (
mesmo.utils.get_index(
self.nodes,
node_name=der['node_name'],
phase=mesmo.utils.get_element_phases_array(der)
)
)
der_index = (
mesmo.utils.get_index(
self.ders,
der_name=der['der_name']
)
)
if connection == "wye":
# Define incidence matrix entries.
# - Wye ders are represented as balanced ders across all
# their connected phases.
incidence_matrix = (
np.ones((len(node_index), 1), dtype=float)
/ len(node_index)
)
self.der_incidence_wye_matrix[np.ix_(node_index, der_index)] = incidence_matrix
elif connection == "delta":
# Obtain phases of the delta der.
phases_list = mesmo.utils.get_element_phases_array(der).tolist()
# Select connection node based on phase arrangement of delta der.
# TODO: Why no multi-phase delta DERs?
# - Delta DERs must be single-phase.
if phases_list in ([1, 2], [2, 3]):
node_index = [node_index[0]]
elif phases_list == [1, 3]:
node_index = [node_index[1]]
else:
raise ValueError(f"Unknown delta phase arrangement: {phases_list}")
# Define incidence matrix entry.
# - Delta ders are assumed to be single-phase.
incidence_matrix = np.array([1])
self.der_incidence_delta_matrix[np.ix_(node_index, der_index)] = incidence_matrix
else:
raise ValueError(f"Unknown der connection type: {connection}")
# Make modifications for single-phase-equivalent modelling.
if self.is_single_phase_equivalent:
self.der_incidence_wye_matrix /= 3
# Note that there won't be any delta loads in the single-phase-equivalent grid.
# Convert sparse matrices for nodal admittance, nodal transformation,
# branch admittance, branch incidence and der incidence matrices.
# - Converting from DOK to CSR format for more efficient calculations
# according to <https://docs.scipy.org/doc/scipy/reference/sparse.html>.
self.node_admittance_matrix = self.node_admittance_matrix.tocsr()
self.node_transformation_matrix = self.node_transformation_matrix.tocsr()
self.branch_admittance_1_matrix = self.branch_admittance_1_matrix.tocsr()
self.branch_admittance_2_matrix = self.branch_admittance_2_matrix.tocsr()
self.branch_incidence_1_matrix = self.branch_incidence_1_matrix.tocsr()
self.branch_incidence_2_matrix = self.branch_incidence_2_matrix.tocsr()
self.der_incidence_wye_matrix = self.der_incidence_wye_matrix.tocsr()
self.der_incidence_delta_matrix = self.der_incidence_delta_matrix.tocsr()
# Define shorthands for no-source variables.
# TODO: Add in class documentation.
# TODO: Replace local variables in power flow / linear models.
self.node_admittance_matrix_no_source = (
self.node_admittance_matrix[np.ix_(
mesmo.utils.get_index(self.nodes, node_type='no_source'),
mesmo.utils.get_index(self.nodes, node_type='no_source')
)]
)
self.node_admittance_matrix_source_to_no_source = (
self.node_admittance_matrix[np.ix_(
mesmo.utils.get_index(self.nodes, node_type='no_source'),
mesmo.utils.get_index(self.nodes, node_type='source')
)]
)
self.node_transformation_matrix_no_source = (
self.node_transformation_matrix[np.ix_(
mesmo.utils.get_index(self.nodes, node_type='no_source'),
mesmo.utils.get_index(self.nodes, node_type='no_source')
)]
)
self.der_incidence_wye_matrix_no_source = (
self.der_incidence_wye_matrix[
np.ix_(
mesmo.utils.get_index(self.nodes, node_type='no_source'),
range(len(self.ders))
)
]
)
self.der_incidence_delta_matrix_no_source = (
self.der_incidence_delta_matrix[
np.ix_(
mesmo.utils.get_index(self.nodes, node_type='no_source'),
range(len(self.ders))
)
]
)
self.node_voltage_vector_reference_no_source = (
self.node_voltage_vector_reference[
mesmo.utils.get_index(self.nodes, node_type='no_source')
]
)
self.node_voltage_vector_reference_source = (
self.node_voltage_vector_reference[
mesmo.utils.get_index(self.nodes, node_type='source')
]
)
# Calculate inverse of no-source node admittance matrix.
# - Raise error if not invertible.
# - Only checking invertibility of no-source node admittance matrix, because full node admittance matrix may
# be non-invertible, e.g. zero entries when connecting a multi-phase line at three-phase source node.
try:
self.node_admittance_matrix_no_source_inverse = (
scipy.sparse.linalg.inv(self.node_admittance_matrix_no_source.tocsc())
)
assert not np.isnan(self.node_admittance_matrix_no_source_inverse.data).any()
except (RuntimeError, AssertionError) as exception:
raise (
ValueError(f"Node admittance matrix could not be inverted. Please check electric grid definition.")
) from exception
class ElectricGridModelOpenDSS(ElectricGridModel):
"""OpenDSS electric grid model object.
- Instantiate OpenDSS circuit by running generating OpenDSS commands corresponding to given `electric_grid_data`,
utilizing the `OpenDSSDirect.py` package.
- The OpenDSS circuit can be accessed with the API of
`OpenDSSDirect.py`: http://dss-extensions.org/OpenDSSDirect.py/opendssdirect.html
- Due to dependency on `OpenDSSDirect.py`, creating multiple objects of this type may result in erroneous behavior.
:syntax:
- ``ElectricGridModelOpenDSS(electric_grid_data)``: Initialize OpenDSS circuit model for given
`electric_grid_data`.
- ``ElectricGridModelOpenDSS(scenario_name)`` Initialize OpenDSS circuit model for given `scenario_name`.
The required `electric_grid_data` is obtained from the database.
Parameters:
scenario_name (str): MESMO scenario name.
electric_grid_data (mesmo.data_interface.ElectricGridData): Electric grid data object.
Attributes:
phases (pd.Index): Index set of the phases.
node_names (pd.Index): Index set of the node names.
node_types (pd.Index): Index set of the node types.
line_names (pd.Index): Index set of the line names.
transformer_names (pd.Index): Index set of the transformer names.
branch_names (pd.Index): Index set of the branch names, i.e., all line names and transformer names.
branch_types (pd.Index): Index set of the branch types.
der_names (pd.Index): Index set of the DER names.
der_types (pd.Index): Index set of the DER types.
nodes (pd.Index): Multi-level / tuple index set of the node types, node names and phases
corresponding to the dimension of the node admittance matrices.
branches (pd.Index): Multi-level / tuple index set of the branch types, branch names and phases
corresponding to the dimension of the branch admittance matrices.
lines (pd.Index): Multi-level / tuple index set of the branch types, branch names and phases
for the lines only.
transformers (pd.Index): Multi-level / tuple index set of the branch types, branch names and phases
for the transformers only.
ders (pd.Index): Index set of the DER names, corresponding to the dimension of the DER power vector.
node_voltage_vector_reference (np.ndarray): Node voltage reference / no load vector.
branch_power_vector_magnitude_reference (np.ndarray): Branch power reference / rated power vector.
der_power_vector_reference (np.ndarray): DER power reference / nominal power vector.
is_single_phase_equivalent (bool): Singe-phase-equivalent modelling flag. If true, electric grid is modelled
as single-phase-equivalent of three-phase balanced system.
circuit_name (str): Circuit name, stored for validation that the correct OpenDSS model is being accessed.
electric_grid_data: (mesmo.data_interface.ElectricGridData): Electric grid data object, stored for
possible reinitialization of the OpenDSS model.
"""
circuit_name: str
electric_grid_data: mesmo.data_interface.ElectricGridData
@multimethod
def __init__(
self,
scenario_name: str
):
# Obtain electric grid data.
electric_grid_data = (
mesmo.data_interface.ElectricGridData(scenario_name)
)
self.__init__(
electric_grid_data
)
@multimethod
def __init__(
self,
electric_grid_data: mesmo.data_interface.ElectricGridData
):
# TODO: Add reset method to ensure correct circuit model is set in OpenDSS when handling multiple models.
# Obtain electric grid indexes, via `ElectricGridModel.__init__()`.
super().__init__(electric_grid_data)
# Obtain circuit name.
self.circuit_name = electric_grid_data.electric_grid.at['electric_grid_name']
# Store electric grid data.
self.electric_grid_data = electric_grid_data
# Clear OpenDSS.
opendss_command_string = "clear"
logger.debug(f"opendss_command_string = \n{opendss_command_string}")
opendssdirect.run_command(opendss_command_string)
# Obtain source voltage.
source_voltage = (
electric_grid_data.electric_grid_nodes.at[
electric_grid_data.electric_grid.at['source_node_name'],
'voltage'
]
)
# Adjust source voltage for single-phase, non-single-phase-equivalent modelling.
if (len(self.phases) == 1) and not self.is_single_phase_equivalent:
source_voltage /= np.sqrt(3)
# Add circuit info to OpenDSS command string.
opendss_command_string = (
f"set defaultbasefrequency={electric_grid_data.electric_grid.at['base_frequency']}"
+ f"\nnew circuit.{self.circuit_name}"
+ f" phases={len(self.phases)}"
+ f" bus1={electric_grid_data.electric_grid.at['source_node_name']}"
+ f" basekv={source_voltage / 1000}"
+ f" mvasc3=9999999999 9999999999" # Set near-infinite power limit for source node.
)
# Create circuit in OpenDSS.
logger.debug(f"opendss_command_string = \n{opendss_command_string}")
opendssdirect.run_command(opendss_command_string)
# Define line codes.
for line_type_index, line_type in electric_grid_data.electric_grid_line_types.iterrows():
# Obtain line resistance and reactance matrix entries for the line.
matrices = (
electric_grid_data.electric_grid_line_types_matrices.loc[
(
electric_grid_data.electric_grid_line_types_matrices.loc[:, 'line_type']
== line_type.at['line_type']
),
['resistance', 'reactance', 'capacitance']
]
)
# Obtain number of phases.
# - Only define as line types for as many phases as needed for current grid.
n_phases = min(line_type.at['n_phases'], len(self.phases))
# Add line type name and number of phases to OpenDSS command string.
opendss_command_string = (
f"new linecode.{line_type.at['line_type']}"
+ f" nphases={n_phases}"
)
# Add resistance and reactance matrix entries to OpenDSS command string,
# with formatting depending on number of phases.
if n_phases == 1:
opendss_command_string += (
" rmatrix = "
+ "[{:.8f}]".format(*matrices.loc[:, 'resistance'])
+ " xmatrix = "
+ "[{:.8f}]".format(*matrices.loc[:, 'reactance'])
+ " cmatrix = "
+ "[{:.8f}]".format(*matrices.loc[:, 'capacitance'])
)
elif n_phases == 2:
opendss_command_string += (
" rmatrix = "
+ "[{:.8f} | {:.8f} {:.8f}]".format(*matrices.loc[:, 'resistance'])
+ " xmatrix = "
+ "[{:.8f} | {:.8f} {:.8f}]".format(*matrices.loc[:, 'reactance'])
+ " cmatrix = "
+ "[{:.8f} | {:.8f} {:.8f}]".format(*matrices.loc[:, 'capacitance'])
)
elif n_phases == 3:
opendss_command_string += (
" rmatrix = "
+ "[{:.8f} | {:.8f} {:.8f} | {:.8f} {:.8f} {:.8f}]".format(*matrices.loc[:, 'resistance'])
+ f" xmatrix = "
+ "[{:.8f} | {:.8f} {:.8f} | {:.8f} {:.8f} {:.8f}]".format(*matrices.loc[:, 'reactance'])
+ f" cmatrix = "
+ "[{:.8f} | {:.8f} {:.8f} | {:.8f} {:.8f} {:.8f}]".format(*matrices.loc[:, 'capacitance'])
)
# Create line code in OpenDSS.
logger.debug(f"opendss_command_string = \n{opendss_command_string}")
opendssdirect.run_command(opendss_command_string)
# Define lines.
for line_index, line in electric_grid_data.electric_grid_lines.iterrows():
# Obtain number of phases for the line.
n_phases = len(mesmo.utils.get_element_phases_array(line))
# Add line name, phases, node connections, line type and length
# to OpenDSS command string.
opendss_command_string = (
f"new line.{line['line_name']}"
+ f" phases={n_phases}"
+ f" bus1={line['node_1_name']}{mesmo.utils.get_element_phases_string(line)}"
+ f" bus2={line['node_2_name']}{mesmo.utils.get_element_phases_string(line)}"
+ f" linecode={line['line_type']}"
+ f" length={line['length']}"
)
# Create line in OpenDSS.
logger.debug(f"opendss_command_string = \n{opendss_command_string}")
opendssdirect.run_command(opendss_command_string)
# Define transformers.
for transformer_index, transformer in electric_grid_data.electric_grid_transformers.iterrows():
# Obtain number of phases.
n_phases = len(mesmo.utils.get_element_phases_array(transformer))
# Add transformer name, number of phases / windings and reactances to OpenDSS command string.
opendss_command_string = (
f"new transformer.{transformer.at['transformer_name']}"
+ f" phases={n_phases}"
+ f" windings=2"
+ f" xscarray=[{transformer.at['reactance_percentage']}]"
)
# Add windings to OpenDSS command string.
windings = [1, 2]
for winding in windings:
# Obtain nominal voltage level for each winding.
voltage = electric_grid_data.electric_grid_nodes.at[transformer.at[f'node_{winding}_name'], 'voltage']
# Obtain node phases connection string for each winding.
connection = transformer.at['connection'].split('-')[winding - 1]
if connection == "wye":
node_phases_string = (
mesmo.utils.get_element_phases_string(transformer)
+ ".0" # Enforce wye-grounded connection.
)
elif connection == "delta":
node_phases_string = (
mesmo.utils.get_element_phases_string(transformer)
)
else:
raise ValueError(f"Unknown transformer connection type: {connection}")
# Add node connection, nominal voltage / power, resistance and maximum / minimum tap level
# to OpenDSS command string for each winding.
opendss_command_string += (
f" wdg={winding}"
+ f" bus={transformer.at[f'node_{winding}_name']}" + node_phases_string
+ f" conn={connection}"
+ f" kv={voltage / 1000}"
+ f" kva={transformer.at['apparent_power'] / 1000}"
+ f" %r={transformer.at['resistance_percentage']}"
+ f" maxtap="
+ f"{transformer.at['tap_maximum_voltage_per_unit']}"
+ f" mintap="
+ f"{transformer.at['tap_minimum_voltage_per_unit']}"
)
# Create transformer in OpenDSS.
logger.debug(f"opendss_command_string = \n{opendss_command_string}")
opendssdirect.run_command(opendss_command_string)
# Define DERs.
# TODO: At the moment, all DERs are modelled as loads in OpenDSS.
for der_index, der in electric_grid_data.electric_grid_ders.iterrows():
# Obtain number of phases for the DER.
n_phases = len(mesmo.utils.get_element_phases_array(der))
# Obtain nominal voltage level for the DER.
voltage = electric_grid_data.electric_grid_nodes.at[der['node_name'], 'voltage']
# Convert to line-to-neutral voltage for single-phase DERs, according to:
# https://sourceforge.net/p/electricdss/discussion/861976/thread/9c9e0efb/
# - Not needed for single-phase-equivalent modelling.
if (n_phases == 1) and not self.is_single_phase_equivalent:
voltage /= np.sqrt(3)
# Add explicit ground-phase connection for single-phase, wye DERs, according to:
# https://sourceforge.net/p/electricdss/discussion/861976/thread/d420e8fb/
# - This does not seem to make a difference if omitted, but is kept here to follow the recommendation.
# - Not needed for single-phase-equivalent modelling.
if (n_phases == 1) and (der['connection'] == 'wye') and not self.is_single_phase_equivalent:
ground_phase_string = ".0"
else:
ground_phase_string = ""
# Add node connection, model type, voltage, nominal power to OpenDSS command string.
opendss_command_string = (
f"new load.{der['der_name']}"
+ f" bus1={der['node_name']}{ground_phase_string}{mesmo.utils.get_element_phases_string(der)}"
+ f" phases={n_phases}"
+ f" conn={der['connection']}"
# All loads are modelled as constant P/Q according to:
# OpenDSS Manual April 2018, page 150, "Model"
+ f" model=1"
+ f" kv={voltage / 1000}"
+ f" kw={- der['active_power_nominal'] / 1000}"
+ f" kvar={- der['reactive_power_nominal'] / 1000}"
# Set low V_min to avoid switching to impedance model according to:
# OpenDSS Manual April 2018, page 150, "Vminpu"
+ f" vminpu=0.6"
# Set high V_max to avoid switching to impedance model according to:
# OpenDSS Manual April 2018, page 150, "Vmaxpu"
+ f" vmaxpu=1.4"
)
# Create DER in OpenDSS.
logger.debug(f"opendss_command_string = \n{opendss_command_string}")
opendssdirect.run_command(opendss_command_string)
# Obtain voltage bases.
voltage_bases = (
np.unique(
electric_grid_data.electric_grid_nodes.loc[:, 'voltage'].values / 1000
).tolist()
)
# Set control mode and voltage bases.
opendss_command_string = (
f"set voltagebases={voltage_bases}"
+ f"\nset controlmode=off"
+ f"\ncalcvoltagebases"
)
logger.debug(f"opendss_command_string = \n{opendss_command_string}")
opendssdirect.run_command(opendss_command_string)
# Set solution mode to "single snapshot power flow" according to:
# OpenDSSComDoc, November 2016, page 1
opendss_command_string = "set mode=0"
logger.debug(f"opendss_command_string = \n{opendss_command_string}")
opendssdirect.run_command(opendss_command_string)
class ElectricGridDEROperationResults(mesmo.utils.ResultsBase):
der_active_power_vector: pd.DataFrame
der_active_power_vector_per_unit: pd.DataFrame
der_reactive_power_vector: pd.DataFrame
der_reactive_power_vector_per_unit: pd.DataFrame
class ElectricGridOperationResults(ElectricGridDEROperationResults):
electric_grid_model: ElectricGridModel
node_voltage_magnitude_vector: pd.DataFrame
node_voltage_magnitude_vector_per_unit: pd.DataFrame
node_voltage_angle_vector: pd.DataFrame
branch_power_magnitude_vector_1: pd.DataFrame
branch_power_magnitude_vector_1_per_unit: pd.DataFrame
branch_active_power_vector_1: pd.DataFrame
branch_active_power_vector_1_per_unit: pd.DataFrame
branch_reactive_power_vector_1: pd.DataFrame
branch_reactive_power_vector_1_per_unit: pd.DataFrame
branch_power_magnitude_vector_2: pd.DataFrame
branch_power_magnitude_vector_2_per_unit: pd.DataFrame
branch_active_power_vector_2: pd.DataFrame
branch_active_power_vector_2_per_unit: pd.DataFrame
branch_reactive_power_vector_2: pd.DataFrame
branch_reactive_power_vector_2_per_unit: pd.DataFrame
loss_active: pd.DataFrame
loss_reactive: pd.DataFrame
class ElectricGridDLMPResults(mesmo.utils.ResultsBase):
electric_grid_energy_dlmp_node_active_power: pd.DataFrame
electric_grid_voltage_dlmp_node_active_power: pd.DataFrame
electric_grid_congestion_dlmp_node_active_power: pd.DataFrame
electric_grid_loss_dlmp_node_active_power: pd.DataFrame
electric_grid_total_dlmp_node_active_power: pd.DataFrame
electric_grid_voltage_dlmp_node_reactive_power: pd.DataFrame
electric_grid_congestion_dlmp_node_reactive_power: pd.DataFrame
electric_grid_loss_dlmp_node_reactive_power: pd.DataFrame
electric_grid_energy_dlmp_node_reactive_power: pd.DataFrame
electric_grid_total_dlmp_node_reactive_power: pd.DataFrame
electric_grid_energy_dlmp_der_active_power: pd.DataFrame
electric_grid_voltage_dlmp_der_active_power: pd.DataFrame
electric_grid_congestion_dlmp_der_active_power: pd.DataFrame
electric_grid_loss_dlmp_der_active_power: pd.DataFrame
electric_grid_total_dlmp_der_active_power: pd.DataFrame
electric_grid_voltage_dlmp_der_reactive_power: pd.DataFrame
electric_grid_congestion_dlmp_der_reactive_power: pd.DataFrame
electric_grid_loss_dlmp_der_reactive_power: pd.DataFrame
electric_grid_energy_dlmp_der_reactive_power: pd.DataFrame
electric_grid_total_dlmp_der_reactive_power: pd.DataFrame
electric_grid_total_dlmp_price_timeseries: pd.DataFrame
class PowerFlowSolution(mesmo.utils.ObjectBase):
"""Power flow solution object consisting of DER power vector and the corresponding solution for
nodal voltage vector / branch power vector and total loss (all complex valued).
"""
der_power_vector: np.ndarray
node_voltage_vector: np.ndarray
branch_power_vector_1: np.ndarray
branch_power_vector_2: np.ndarray
loss: complex
class PowerFlowSolutionFixedPoint(PowerFlowSolution):
"""Fixed point power flow solution object."""
@multimethod
def __init__(
self,
scenario_name: str,
**kwargs
):
# Obtain `electric_grid_model`.
electric_grid_model = ElectricGridModelDefault(scenario_name)
self.__init__(
electric_grid_model,
**kwargs
)
@multimethod
def __init__(
self,
electric_grid_model: ElectricGridModelDefault,
**kwargs
):
# Obtain `der_power_vector`, assuming nominal power conditions.
der_power_vector = electric_grid_model.der_power_vector_reference
self.__init__(
electric_grid_model,
der_power_vector,
**kwargs
)
@multimethod
def __init__(
self,
electric_grid_model: ElectricGridModelDefault,
der_power_vector: np.ndarray,
**kwargs
):
# Store DER power vector.
self.der_power_vector = der_power_vector.ravel()
# Obtain voltage solution.
self.node_voltage_vector = (
self.get_voltage(
electric_grid_model,
self.der_power_vector,
**kwargs
)
)
# Obtain branch flow solution.
(
self.branch_power_vector_1,
self.branch_power_vector_2
) = (
self.get_branch_power(
electric_grid_model,
self.node_voltage_vector
)
)
# Obtain loss solution.
self.loss = (
self.get_loss(
electric_grid_model,
self.node_voltage_vector
)
)
@staticmethod
def check_solution_conditions(
electric_grid_model: ElectricGridModelDefault,
node_power_vector_wye_initial_no_source: np.ndarray,
node_power_vector_delta_initial_no_source: np.ndarray,
node_power_vector_wye_candidate_no_source: np.ndarray,
node_power_vector_delta_candidate_no_source: np.ndarray,
node_voltage_vector_initial_no_source: np.ndarray
) -> bool:
"""Check conditions for fixed-point solution existence, uniqueness and non-singularity for
given power vector candidate and initial point.
- Conditions are formulated according to: <https://arxiv.org/pdf/1702.03310.pdf>
- Note the performance issues of this condition check algorithm due to the
requirement for matrix inversions / solving of linear equations.
"""
# Calculate norm of the initial nodal power vector.
xi_initial = (
np.max(np.sum(
np.abs(
(electric_grid_model.node_voltage_vector_reference_no_source ** -1)
* scipy.sparse.linalg.spsolve(
electric_grid_model.node_admittance_matrix_no_source,
(
(electric_grid_model.node_voltage_vector_reference_no_source ** -1)
* node_power_vector_wye_initial_no_source
)
)
),
axis=1
))
+ np.max(np.sum(
np.abs(
(electric_grid_model.node_voltage_vector_reference_no_source ** -1)
* scipy.sparse.linalg.spsolve(
electric_grid_model.node_admittance_matrix_no_source,
(
(
electric_grid_model.node_transformation_matrix_no_source
* (
np.abs(electric_grid_model.node_transformation_matrix_no_source)
@ np.abs(electric_grid_model.node_voltage_vector_reference_no_source)
) ** -1
)
* node_power_vector_delta_initial_no_source
)
)
),
axis=1
))
)
# Calculate norm of the candidate nodal power vector.
xi_candidate = (
np.max(np.sum(
np.abs(
(electric_grid_model.node_voltage_vector_reference_no_source ** -1)
* scipy.sparse.linalg.spsolve(
electric_grid_model.node_admittance_matrix_no_source,
(
(electric_grid_model.node_voltage_vector_reference_no_source ** -1)
* (
node_power_vector_wye_candidate_no_source
- node_power_vector_wye_initial_no_source
)
)
)
),
axis=1
))
+ np.max(np.sum(
np.abs(
(electric_grid_model.node_voltage_vector_reference_no_source ** -1)
* scipy.sparse.linalg.spsolve(
electric_grid_model.node_admittance_matrix_no_source,
(
(
electric_grid_model.node_transformation_matrix_no_source
* (
np.abs(electric_grid_model.node_transformation_matrix_no_source)
@ np.abs(electric_grid_model.node_voltage_vector_reference_no_source)
) ** -1
) * (
node_power_vector_delta_candidate_no_source
- node_power_vector_delta_initial_no_source
)
)
)
),
axis=1
))
)
# Calculate norm of the initial nodal voltage vector.
gamma = (
np.min([
np.min(
np.abs(node_voltage_vector_initial_no_source)
/ np.abs(electric_grid_model.node_voltage_vector_reference_no_source)
),
np.min(
np.abs(
electric_grid_model.node_transformation_matrix_no_source
* node_voltage_vector_initial_no_source
)
/ (
np.abs(electric_grid_model.node_transformation_matrix_no_source)
* np.abs(electric_grid_model.node_voltage_vector_reference_no_source)
)
)
])
)
# Obtain conditions for solution existence, uniqueness and non-singularity.
condition_initial = (
xi_initial
<
(gamma ** 2)
)
condition_candidate = (
xi_candidate
<
(0.25 * (((gamma ** 2) - xi_initial) / gamma) ** 2)
)
is_valid = (
condition_initial
& condition_candidate
)
# If `condition_initial` is violated, the given initial nodal voltage vector and power vectors are not valid.
# This suggests an error in the problem setup and hence triggers a warning.
if ~condition_initial:
logger.warning("Fixed point solution condition is not satisfied for the provided initial point.")
return is_valid
@staticmethod
def get_voltage(
electric_grid_model: ElectricGridModelDefault,
der_power_vector: np.ndarray,
outer_iteration_limit=100,
outer_solution_algorithm='check_solution', # Choices: `check_conditions`, `check_solution`.
power_candidate_iteration_limit=100,
power_candidate_reduction_factor=0.5,
voltage_iteration_limit=100,
voltage_tolerance=1e-2
) -> np.ndarray:
"""Get nodal voltage vector by solving with the fixed point algorithm.
- Initial DER power vector / node voltage vector must be a valid
solution to te fixed-point equation, e.g., a previous solution from a past
operation point.
- Fixed point equation according to: <https://arxiv.org/pdf/1702.03310.pdf>
"""
# TODO: Add proper documentation.
# TODO: Validate fixed-point solution conditions.
# Debug message.
logger.debug("Starting fixed point solution algorithm...")
# Obtain nodal power vectors.
node_power_vector_wye_no_source = (
electric_grid_model.der_incidence_wye_matrix_no_source
@ np.transpose([der_power_vector])
).ravel()
node_power_vector_delta_no_source = (
electric_grid_model.der_incidence_delta_matrix_no_source
@ np.transpose([der_power_vector])
).ravel()
# Obtain initial nodal power and voltage vectors, assuming no power conditions.
# TODO: Enable passing previous solution for fixed-point initialization.
node_power_vector_wye_initial_no_source = np.zeros(node_power_vector_wye_no_source.shape, dtype=complex)
node_power_vector_delta_initial_no_source = np.zeros(node_power_vector_delta_no_source.shape, dtype=complex)
node_voltage_vector_initial_no_source = electric_grid_model.node_voltage_vector_reference_no_source.copy()
# Define nodal power vector candidate to the desired nodal power vector.
node_power_vector_wye_candidate_no_source = node_power_vector_wye_no_source.copy()
node_power_vector_delta_candidate_no_source = node_power_vector_delta_no_source.copy()
# Instantiate outer iteration variables.
is_final = False
outer_iteration = 0
# Outer iteration between power vector candidate selection and fixed point voltage solution algorithm
# until a final solution is found.
while (
~is_final
& (outer_iteration < outer_iteration_limit)
):
# Outer solution algorithm based on fixed-point solution conditions check.
# - Checks solution conditions and adjust power vector candidate if necessary, before solving for voltage.
if outer_solution_algorithm == 'check_conditions':
# Reset nodal power vector candidate to the desired nodal power vector.
node_power_vector_wye_candidate_no_source = node_power_vector_wye_no_source.copy()
node_power_vector_delta_candidate_no_source = node_power_vector_delta_no_source.copy()
# Check solution conditions for nodal power vector candidate.
is_final = (
PowerFlowSolutionFixedPoint.check_solution_conditions(
electric_grid_model,
node_power_vector_wye_initial_no_source,
node_power_vector_delta_initial_no_source,
node_power_vector_wye_candidate_no_source,
node_power_vector_delta_candidate_no_source,
node_voltage_vector_initial_no_source
)
)
# Instantiate power candidate iteration variable.
power_candidate_iteration = 0
is_valid = is_final.copy()
# If solution conditions are violated, iteratively reduce power to find a power vector candidate
# which satisfies the solution conditions.
while (
~is_valid
& (power_candidate_iteration < power_candidate_iteration_limit)
):
# Reduce nodal power vector candidate.
node_power_vector_wye_candidate_no_source -= (
power_candidate_reduction_factor
* (
node_power_vector_wye_candidate_no_source
- node_power_vector_wye_initial_no_source
)
)
node_power_vector_delta_candidate_no_source -= (
power_candidate_reduction_factor
* (
node_power_vector_delta_candidate_no_source
- node_power_vector_delta_initial_no_source
)
)
is_valid = (
PowerFlowSolutionFixedPoint.check_solution_conditions(
electric_grid_model,
node_power_vector_wye_initial_no_source,
node_power_vector_delta_initial_no_source,
node_power_vector_wye_candidate_no_source,
node_power_vector_delta_candidate_no_source,
node_voltage_vector_initial_no_source,
)
)
power_candidate_iteration += 1
# Reaching the iteration limit is considered undesired and triggers a warning.
if power_candidate_iteration >= power_candidate_iteration_limit:
logger.warning(
"Power vector candidate selection algorithm for fixed-point solution reached "
f"maximum limit of {power_candidate_iteration_limit} iterations."
)
# Store current candidate power vectors as initial power vectors
# for next round of computation of solution conditions.
node_power_vector_wye_initial_no_source = (
node_power_vector_wye_candidate_no_source.copy()
)
node_power_vector_delta_initial_no_source = (
node_power_vector_delta_candidate_no_source.copy()
)
# Instantiate fixed point iteration variables.
voltage_iteration = 0
voltage_change = np.inf
while (
(voltage_iteration < voltage_iteration_limit)
& (voltage_change > voltage_tolerance)
):
# Calculate fixed point equation.
node_voltage_vector_estimate_no_source = (
np.transpose([electric_grid_model.node_voltage_vector_reference_no_source])
+ np.transpose([
scipy.sparse.linalg.spsolve(
electric_grid_model.node_admittance_matrix_no_source,
(
(
(
np.conj(np.transpose([node_voltage_vector_initial_no_source])) ** -1
)
* np.conj(np.transpose([node_power_vector_wye_candidate_no_source]))
)
+ (
np.transpose(electric_grid_model.node_transformation_matrix_no_source)
@ (
(
(
electric_grid_model.node_transformation_matrix_no_source
@ np.conj(np.transpose([node_voltage_vector_initial_no_source]))
) ** -1
)
* np.conj(np.transpose([node_power_vector_delta_candidate_no_source]))
)
)
)
)
])
).ravel()
# Calculate voltage change from previous iteration.
voltage_change = (
np.max(np.abs(
node_voltage_vector_estimate_no_source
- node_voltage_vector_initial_no_source
))
)
# Set voltage solution as initial voltage for next iteration.
node_voltage_vector_initial_no_source = node_voltage_vector_estimate_no_source.copy()
# Increment voltage iteration counter.
voltage_iteration += 1
# Outer solution algorithm based on voltage solution check.
# - Checks if voltage solution exceeded iteration limit and adjusts power vector candidate if needed.
if outer_solution_algorithm == 'check_solution':
# If voltage solution exceeds iteration limit, reduce power and re-try voltage solution.
if voltage_iteration >= voltage_iteration_limit:
# Reduce nodal power vector candidate.
node_power_vector_wye_candidate_no_source *= power_candidate_reduction_factor
node_power_vector_delta_candidate_no_source *= power_candidate_reduction_factor
# Reset initial nodal voltage vector.
node_voltage_vector_initial_no_source = (
electric_grid_model.node_voltage_vector_reference_no_source.copy()
)
# Otherwise, if power has previously been reduced, raise back power and re-try voltage solution.
else:
if (
(node_power_vector_wye_candidate_no_source != node_power_vector_wye_no_source).any()
or (node_power_vector_delta_candidate_no_source != node_power_vector_delta_no_source).any()
):
# Increase nodal power vector candidate.
node_power_vector_wye_candidate_no_source *= power_candidate_reduction_factor ** -1
node_power_vector_delta_candidate_no_source *= power_candidate_reduction_factor ** -1
else:
is_final = True
# For fixed-point algorithm, reaching the iteration limit is considered undesired and triggers a warning
elif voltage_iteration >= voltage_iteration_limit:
logger.warning(
"Fixed point voltage solution algorithm reached "
f"maximum limit of {voltage_iteration_limit} iterations."
)
# Increment outer iteration counter.
outer_iteration += 1
# Reaching the outer iteration limit is considered undesired and triggers a warning.
if outer_iteration >= outer_iteration_limit:
logger.warning(
"Outer wrapper algorithm for fixed-point solution reached "
f"maximum limit of {outer_iteration_limit} iterations."
)
# Debug message.
logger.debug(
"Completed fixed point solution algorithm. "
f"Outer wrapper iterations: {outer_iteration}"
)
# Get full voltage vector.
node_voltage_vector = np.zeros(len(electric_grid_model.nodes), dtype=complex)
node_voltage_vector[mesmo.utils.get_index(electric_grid_model.nodes, node_type='source')] += (
electric_grid_model.node_voltage_vector_reference_source
)
node_voltage_vector[mesmo.utils.get_index(electric_grid_model.nodes, node_type='no_source')] += (
node_voltage_vector_initial_no_source # Takes value of `node_voltage_vector_estimate_no_source`.
)
return node_voltage_vector
@staticmethod
def get_branch_power(
electric_grid_model: ElectricGridModelDefault,
node_voltage_vector: np.ndarray
):
"""Get branch power vectors by calculating power flow with given nodal voltage.
- Returns two branch power vectors, where `branch_power_vector_1` represents the
"from"-direction and `branch_power_vector_2` represents the "to"-direction.
"""
# Obtain branch admittance and incidence matrices.
branch_admittance_1_matrix = (
electric_grid_model.branch_admittance_1_matrix
)
branch_admittance_2_matrix = (
electric_grid_model.branch_admittance_2_matrix
)
branch_incidence_1_matrix = (
electric_grid_model.branch_incidence_1_matrix
)
branch_incidence_2_matrix = (
electric_grid_model.branch_incidence_2_matrix
)
# Calculate branch power vectors.
branch_power_vector_1 = (
(
branch_incidence_1_matrix
@ np.transpose([node_voltage_vector])
)
* np.conj(
branch_admittance_1_matrix
@ np.transpose([node_voltage_vector])
)
).ravel()
branch_power_vector_2 = (
(
branch_incidence_2_matrix
@ np.transpose([node_voltage_vector])
)
* np.conj(
branch_admittance_2_matrix
@ np.transpose([node_voltage_vector])
)
).ravel()
# Make modifications for single-phase-equivalent modelling.
if electric_grid_model.is_single_phase_equivalent:
branch_power_vector_1 *= 3
branch_power_vector_2 *= 3
return (
branch_power_vector_1,
branch_power_vector_2
)
@staticmethod
def get_loss(
electric_grid_model: ElectricGridModelDefault,
node_voltage_vector: np.ndarray
):
"""Get total electric losses with given nodal voltage."""
# Calculate total losses.
# TODO: Check if summing up branch power is faster.
# loss = (
# np.sum(
# branch_power_vector_1
# + branch_power_vector_2
# )
# )
loss = (
np.array([node_voltage_vector])
@ np.conj(electric_grid_model.node_admittance_matrix)
@ np.transpose([np.conj(node_voltage_vector)])
).ravel()
# Make modifications for single-phase-equivalent modelling.
if electric_grid_model.is_single_phase_equivalent:
loss *= 3
return loss
class PowerFlowSolutionZBus(PowerFlowSolutionFixedPoint):
"""Implicit Z-bus power flow solution object."""
# Overwrite `check_solution_conditions`, which is invalid for the Z-bus power flow.
@staticmethod
def check_solution_conditions(*args, **kwargs):
raise NotImplementedError("This method is invalid for the Z-bus power flow.")
@staticmethod
def get_voltage(
electric_grid_model: ElectricGridModelDefault,
der_power_vector: np.ndarray,
voltage_iteration_limit=100,
voltage_tolerance=1e-2,
**kwargs
) -> np.ndarray:
"""Get nodal voltage vector by solving with the implicit Z-bus method."""
# Implicit Z-bus power flow solution (<NAME>).
# - “Can, Can, Lah!” (literal meaning, can accomplish)
# - <https://www.financialexpress.com/opinion/singapore-turns-50-the-remarkable-nation-that-can-lah/115775/>
# Obtain nodal power vectors.
node_power_vector_wye_no_source = (
electric_grid_model.der_incidence_wye_matrix_no_source
@ np.transpose([der_power_vector])
).ravel()
node_power_vector_delta_no_source = (
electric_grid_model.der_incidence_delta_matrix_no_source
@ np.transpose([der_power_vector])
).ravel()
# Obtain utility variables.
node_admittance_matrix_no_source_inverse = (
scipy.sparse.linalg.inv(electric_grid_model.node_admittance_matrix_no_source.tocsc())
)
node_admittance_matrix_source_to_no_source = (
electric_grid_model.node_admittance_matrix[np.ix_(
mesmo.utils.get_index(electric_grid_model.nodes, node_type='no_source'),
mesmo.utils.get_index(electric_grid_model.nodes, node_type='source')
)]
)
node_voltage_vector_initial_no_source = (
electric_grid_model.node_voltage_vector_reference_no_source.copy()
)
# Instantiate implicit Z-bus power flow iteration variables.
voltage_iteration = 0
voltage_change = np.inf
while (
(voltage_iteration < voltage_iteration_limit)
& (voltage_change > voltage_tolerance)
):
# Calculate current injections.
node_current_injection_delta_in_wye_no_source = (
electric_grid_model.node_transformation_matrix_no_source.transpose()
@ np.conj(
np.linalg.inv(np.diag((
electric_grid_model.node_transformation_matrix_no_source
@ node_voltage_vector_initial_no_source
).ravel()))
@ node_power_vector_wye_no_source
)
)
node_current_injection_wye_no_source = (
np.conj(node_power_vector_delta_no_source)
/ np.conj(node_voltage_vector_initial_no_source)
)
node_current_injection_no_source = (
node_current_injection_delta_in_wye_no_source
+ node_current_injection_wye_no_source
)
# Calculate voltage.
node_voltage_vector_estimate_no_source = (
node_admittance_matrix_no_source_inverse @ (
- node_admittance_matrix_source_to_no_source
@ electric_grid_model.node_voltage_vector_reference_source
+ node_current_injection_no_source
)
)
# node_voltage_vector_estimate_no_source = (
# electric_grid_model.node_voltage_vector_reference_no_source
# + node_admittance_matrix_no_source_inverse @ node_current_injection_no_source
# )
# Calculate voltage change from previous iteration.
voltage_change = (
np.max(np.abs(
node_voltage_vector_estimate_no_source
- node_voltage_vector_initial_no_source
))
)
# Set voltage estimate as new initial voltage for next iteration.
node_voltage_vector_initial_no_source = node_voltage_vector_estimate_no_source.copy()
# Increment voltage iteration counter.
voltage_iteration += 1
# Reaching the iteration limit is considered undesired and triggers a warning.
if voltage_iteration >= voltage_iteration_limit:
logger.warning(
"Z-bus solution algorithm reached "
f"maximum limit of {voltage_iteration_limit} iterations."
)
# Get full voltage vector.
node_voltage_vector = np.zeros(len(electric_grid_model.nodes), dtype=complex)
node_voltage_vector[mesmo.utils.get_index(electric_grid_model.nodes, node_type='source')] += (
electric_grid_model.node_voltage_vector_reference_source
)
node_voltage_vector[mesmo.utils.get_index(electric_grid_model.nodes, node_type='no_source')] += (
node_voltage_vector_initial_no_source # Takes value of `node_voltage_vector_estimate_no_source`.
)
return node_voltage_vector
class PowerFlowSolutionOpenDSS(PowerFlowSolution):
"""OpenDSS power flow solution object."""
@multimethod
def __init__(
self,
scenario_name: str,
**kwargs
):
# Obtain `electric_grid_model`.
electric_grid_model = ElectricGridModelOpenDSS(scenario_name)
self.__init__(
electric_grid_model,
**kwargs
)
@multimethod
def __init__(
self,
electric_grid_model: ElectricGridModelOpenDSS,
**kwargs
):
# Obtain `der_power_vector`, assuming nominal power conditions.
der_power_vector = electric_grid_model.der_power_vector_reference
self.__init__(
electric_grid_model,
der_power_vector,
**kwargs
)
@multimethod
def __init__(
self,
electric_grid_model: ElectricGridModelOpenDSS,
der_power_vector: np.ndarray,
**kwargs
):
# Store DER power vector.
self.der_power_vector = der_power_vector.ravel()
# Check if correct OpenDSS circuit is initialized, otherwise reinitialize.
if opendssdirect.Circuit.Name() != electric_grid_model.circuit_name:
electric_grid_model.__init__(electric_grid_model.electric_grid_data)
# Set DER power vector in OpenDSS model.
for der_index, der_name in enumerate(electric_grid_model.der_names):
# TODO: For OpenDSS, all DERs are assumed to be loads.
opendss_command_string = (
f"load.{der_name}.kw = {- np.real(self.der_power_vector[der_index]) / 1000.0}"
+ f"\nload.{der_name}.kvar = {- np.imag(self.der_power_vector[der_index]) / 1000.0}"
)
logger.debug(f"opendss_command_string = \n{opendss_command_string}")
opendssdirect.run_command(opendss_command_string)
# Solve OpenDSS model.
opendssdirect.run_command("solve")
# Obtain voltage solution.
self.node_voltage_vector = (
self.get_voltage(
electric_grid_model
)
)
# Obtain branch flow solution.
(
self.branch_power_vector_1,
self.branch_power_vector_2
) = (
self.get_branch_power()
)
# Obtain loss solution.
self.loss = (
self.get_loss()
)
@staticmethod
def get_voltage(
electric_grid_model: ElectricGridModelOpenDSS
):
"""Get nodal voltage vector by solving OpenDSS model.
- OpenDSS model must be readily set up, with the desired power being set for all DERs.
"""
# Create index for OpenDSS nodes.
opendss_nodes = pd.Series(opendssdirect.Circuit.AllNodeNames()).str.split('.', expand=True)
opendss_nodes.columns = ['node_name', 'phase']
opendss_nodes.loc[:, 'phase'] = opendss_nodes.loc[:, 'phase'].astype(int)
opendss_nodes = pd.MultiIndex.from_frame(opendss_nodes)
# Extract nodal voltage vector and reindex to match MESMO nodes order.
node_voltage_vector_solution = (
pd.Series(
(
np.array(opendssdirect.Circuit.AllBusVolts()[0::2])
+ 1j * np.array(opendssdirect.Circuit.AllBusVolts()[1::2])
),
index=opendss_nodes
).reindex(
electric_grid_model.nodes.droplevel('node_type')
).values
)
# Make modifications for single-phase-equivalent modelling.
if electric_grid_model.is_single_phase_equivalent:
node_voltage_vector_solution /= np.sqrt(3)
return node_voltage_vector_solution
@staticmethod
def get_branch_power():
"""Get branch power vectors by solving OpenDSS model.
- OpenDSS model must be readily set up, with the desired power being set for all DERs.
"""
# Solve OpenDSS model.
opendssdirect.run_command("solve")
# Instantiate branch vectors.
branch_power_vector_1 = (
np.full(((opendssdirect.Lines.Count() + opendssdirect.Transformers.Count()), 3), np.nan, dtype=complex)
)
branch_power_vector_2 = (
np.full(((opendssdirect.Lines.Count() + opendssdirect.Transformers.Count()), 3), np.nan, dtype=complex)
)
# Instantiate iteration variables.
branch_vector_index = 0
line_index = opendssdirect.Lines.First()
# Obtain line branch power vectors.
while line_index > 0:
branch_power_opendss = np.array(opendssdirect.CktElement.Powers()) * 1000.0
branch_phase_count = opendssdirect.CktElement.NumPhases()
branch_power_vector_1[branch_vector_index, :branch_phase_count] = (
branch_power_opendss[0:(branch_phase_count * 2):2]
+ 1.0j * branch_power_opendss[1:(branch_phase_count * 2):2]
)
branch_power_vector_2[branch_vector_index, :branch_phase_count] = (
branch_power_opendss[0 + (branch_phase_count * 2)::2]
+ 1.0j * branch_power_opendss[1 + (branch_phase_count * 2)::2]
)
branch_vector_index += 1
line_index = opendssdirect.Lines.Next()
# Obtain transformer branch power vectors.
transformer_index = opendssdirect.Transformers.First()
while transformer_index > 0:
branch_power_opendss = np.array(opendssdirect.CktElement.Powers()) * 1000.0
branch_phase_count = opendssdirect.CktElement.NumPhases()
skip_phase = 2 if 0 in opendssdirect.CktElement.NodeOrder() else 0 # Ignore ground nodes.
branch_power_vector_1[branch_vector_index, :branch_phase_count] = (
branch_power_opendss[0:(branch_phase_count * 2):2]
+ 1.0j * branch_power_opendss[1:(branch_phase_count * 2):2]
)
branch_power_vector_2[branch_vector_index, :branch_phase_count] = (
branch_power_opendss[0 + (branch_phase_count * 2) + skip_phase:-skip_phase:2]
+ 1.0j * branch_power_opendss[1 + (branch_phase_count * 2) + skip_phase:-skip_phase:2]
)
branch_vector_index += 1
transformer_index = opendssdirect.Transformers.Next()
# Reshape branch power vectors to appropriate size and remove entries for nonexistent phases.
# TODO: Sort vector by branch name if not in order.
branch_power_vector_1 = branch_power_vector_1.flatten()
branch_power_vector_2 = branch_power_vector_2.flatten()
branch_power_vector_1 = branch_power_vector_1[~np.isnan(branch_power_vector_1)]
branch_power_vector_2 = branch_power_vector_2[~np.isnan(branch_power_vector_2)]
return (
branch_power_vector_1,
branch_power_vector_2
)
@staticmethod
def get_loss():
"""Get total loss by solving OpenDSS model.
- OpenDSS model must be readily set up, with the desired power being set for all DERs.
"""
# Solve OpenDSS model.
opendssdirect.run_command("solve")
# Obtain loss.
loss = opendssdirect.Circuit.Losses()[0] + 1.0j * opendssdirect.Circuit.Losses()[1]
return loss
class PowerFlowSolutionSet(mesmo.utils.ObjectBase):
power_flow_solutions: typing.Dict[pd.Timestamp, PowerFlowSolution]
electric_grid_model: ElectricGridModelDefault
der_power_vector: pd.DataFrame
timesteps: pd.Index
@multimethod
def __init__(
self,
electric_grid_model: ElectricGridModelDefault,
der_operation_results: ElectricGridDEROperationResults,
**kwargs
):
der_power_vector = (
der_operation_results.der_active_power_vector
+ 1.0j * der_operation_results.der_reactive_power_vector
)
self.__init__(
electric_grid_model,
der_power_vector,
**kwargs
)
@multimethod
def __init__(
self,
electric_grid_model: ElectricGridModelDefault,
der_power_vector: pd.DataFrame,
power_flow_solution_method=PowerFlowSolutionFixedPoint
):
# Store attributes.
self.electric_grid_model = electric_grid_model
self.der_power_vector = der_power_vector
self.timesteps = self.electric_grid_model.timesteps
# Obtain power flow solutions.
power_flow_solutions = (
mesmo.utils.starmap(
power_flow_solution_method,
zip(
itertools.repeat(self.electric_grid_model),
der_power_vector.values
)
)
)
self.power_flow_solutions = dict(zip(self.timesteps, power_flow_solutions))
def get_results(self) -> ElectricGridOperationResults:
# Instantiate results variables.
der_power_vector = (
pd.DataFrame(columns=self.electric_grid_model.ders, index=self.timesteps, dtype=complex)
)
node_voltage_vector = (
pd.DataFrame(columns=self.electric_grid_model.nodes, index=self.timesteps, dtype=complex)
)
branch_power_vector_1 = (
pd.DataFrame(columns=self.electric_grid_model.branches, index=self.timesteps, dtype=complex)
)
branch_power_vector_2 = (
pd.DataFrame(columns=self.electric_grid_model.branches, index=self.timesteps, dtype=complex)
)
loss = pd.DataFrame(columns=['total'], index=self.timesteps, dtype=complex)
# Obtain results.
for timestep in self.timesteps:
power_flow_solution = self.power_flow_solutions[timestep]
der_power_vector.loc[timestep, :] = power_flow_solution.der_power_vector
node_voltage_vector.loc[timestep, :] = power_flow_solution.node_voltage_vector
branch_power_vector_1.loc[timestep, :] = power_flow_solution.branch_power_vector_1
branch_power_vector_2.loc[timestep, :] = power_flow_solution.branch_power_vector_2
loss.loc[timestep, :] = power_flow_solution.loss
der_active_power_vector = der_power_vector.apply(np.real)
der_reactive_power_vector = der_power_vector.apply(np.imag)
node_voltage_magnitude_vector = np.abs(node_voltage_vector)
branch_power_magnitude_vector_1 = np.abs(branch_power_vector_1)
branch_power_magnitude_vector_2 = np.abs(branch_power_vector_2)
loss_active = loss.apply(np.real)
loss_reactive = loss.apply(np.imag)
# Obtain per-unit values.
der_active_power_vector_per_unit = (
der_active_power_vector
* mesmo.utils.get_inverse_with_zeros(np.real(self.electric_grid_model.der_power_vector_reference))
)
der_reactive_power_vector_per_unit = (
der_reactive_power_vector
* mesmo.utils.get_inverse_with_zeros(np.imag(self.electric_grid_model.der_power_vector_reference))
)
node_voltage_magnitude_vector_per_unit = (
node_voltage_magnitude_vector
* mesmo.utils.get_inverse_with_zeros(np.abs(self.electric_grid_model.node_voltage_vector_reference))
)
branch_power_magnitude_vector_1_per_unit = (
branch_power_magnitude_vector_1
* mesmo.utils.get_inverse_with_zeros(self.electric_grid_model.branch_power_vector_magnitude_reference)
)
branch_power_magnitude_vector_2_per_unit = (
branch_power_magnitude_vector_2
* mesmo.utils.get_inverse_with_zeros(self.electric_grid_model.branch_power_vector_magnitude_reference)
)
# Store results.
return ElectricGridOperationResults(
electric_grid_model=self.electric_grid_model,
der_active_power_vector=der_active_power_vector,
der_active_power_vector_per_unit=der_active_power_vector_per_unit,
der_reactive_power_vector=der_reactive_power_vector,
der_reactive_power_vector_per_unit=der_reactive_power_vector_per_unit,
node_voltage_magnitude_vector=node_voltage_magnitude_vector,
node_voltage_magnitude_vector_per_unit=node_voltage_magnitude_vector_per_unit,
branch_power_magnitude_vector_1=branch_power_magnitude_vector_1,
branch_power_magnitude_vector_1_per_unit=branch_power_magnitude_vector_1_per_unit,
branch_power_magnitude_vector_2=branch_power_magnitude_vector_2,
branch_power_magnitude_vector_2_per_unit=branch_power_magnitude_vector_2_per_unit,
loss_active=loss_active,
loss_reactive=loss_reactive
)
class LinearElectricGridModel(mesmo.utils.ObjectBase):
"""Abstract linear electric model object, consisting of the sensitivity matrices for
voltage / voltage magnitude / squared branch power / active loss / reactive loss by changes in nodal wye power /
nodal delta power.
Note:
This abstract class only defines the expected variables of linear electric grid model objects,
but does not implement any functionality.
Attributes:
electric_grid_model (ElectricGridModelDefault): Electric grid model object.
power_flow_solution (PowerFlowSolution): Reference power flow solution object.
sensitivity_voltage_by_power_wye_active (sp.spmatrix): Sensitivity matrix for complex voltage vector
by active wye power vector.
sensitivity_voltage_by_power_wye_reactive (sp.spmatrix): Sensitivity matrix for complex voltage
vector by reactive wye power vector.
sensitivity_voltage_by_power_delta_active (sp.spmatrix): Sensitivity matrix for complex voltage vector
by active delta power vector.
sensitivity_voltage_by_power_delta_reactive (sp.spmatrix): Sensitivity matrix for complex voltage
vector by reactive delta power vector.
sensitivity_voltage_by_der_power_active (sp.spmatrix): Sensitivity matrix for
complex voltage vector by DER active power vector.
sensitivity_voltage_by_der_power_reactive (sp.spmatrix): Sensitivity matrix for
complex voltage vector by DER reactive power vector.
sensitivity_voltage_magnitude_by_power_wye_active (sp.spmatrix): Sensitivity matrix for voltage
magnitude vector by active wye power vector.
sensitivity_voltage_magnitude_by_power_wye_reactive (sp.spmatrix): Sensitivity matrix for
voltage magnitude vector by reactive wye power vector.
sensitivity_voltage_magnitude_by_power_delta_active (sp.spmatrix): Sensitivity matrix for
voltage magnitude vector by active delta power vector.
sensitivity_voltage_magnitude_by_power_delta_reactive (sp.spmatrix): Sensitivity matrix for
voltage magnitude vector by reactive delta power vector.
sensitivity_voltage_magnitude_by_der_power_active (sp.spmatrix): Sensitivity matrix for
voltage magnitude vector by DER active power vector.
sensitivity_voltage_magnitude_by_der_power_reactive (sp.spmatrix): Sensitivity matrix for
voltage magnitude vector by DER reactive power vector.
sensitivity_branch_power_1_magnitude_by_power_wye_active (sp.spmatrix): Sensitivity matrix for
branch flow power magnitude vector 1 ('from' direction) by active wye power vector.
sensitivity_branch_power_1_magnitude_by_power_wye_reactive (sp.spmatrix): Sensitivity matrix for
branch flow power magnitude vector 1 ('from' direction) by reactive wye power vector.
sensitivity_branch_power_1_magnitude_by_power_delta_active (sp.spmatrix): Sensitivity matrix for
branch flow power magnitude vector 1 ('from' direction) by active delta power vector.
sensitivity_branch_power_1_magnitude_by_power_delta_reactive (sp.spmatrix): Sensitivity matrix for
branch flow power magnitude vector 1 ('from' direction) by reactive delta power vector.
sensitivity_branch_power_1_magnitude_by_der_power_active (sp.spmatrix): Sensitivity matrix for
branch flow power magnitude vector 1 by DER active power vector.
sensitivity_branch_power_1_magnitude_by_der_power_reactive (sp.spmatrix): Sensitivity matrix for
branch flow power magnitude vector 1 by DER reactive power vector.
sensitivity_branch_power_2_magnitude_by_power_wye_active (sp.spmatrix): Sensitivity matrix for
branch flow power magnitude vector 2 ('to' direction) by active wye power vector.
sensitivity_branch_power_2_magnitude_by_power_wye_reactive (sp.spmatrix): Sensitivity matrix for
branch flow power magnitude vector 2 ('to' direction) by reactive wye power vector.
sensitivity_branch_power_2_magnitude_by_power_delta_active (sp.spmatrix): Sensitivity matrix for
branch flow power magnitude vector 2 ('to' direction) by active delta power vector.
sensitivity_branch_power_2_magnitude_by_power_delta_reactive (sp.spmatrix): Sensitivity matrix for
branch flow power magnitude vector 2 ('to' direction) by reactive delta power vector.
sensitivity_branch_power_2_magnitude_by_der_power_active (sp.spmatrix): Sensitivity matrix for
branch flow power magnitude vector 2 by DER active power vector.
sensitivity_branch_power_2_magnitude_by_der_power_reactive (sp.spmatrix): Sensitivity matrix for
branch flow power magnitude vector 2 by DER reactive power vector.
sensitivity_branch_power_1_squared_by_power_wye_active (sp.spmatrix): Sensitivity matrix for
squared branch flow power vector 1 ('from' direction) by active wye power vector.
sensitivity_branch_power_1_squared_by_power_wye_reactive (sp.spmatrix): Sensitivity matrix for
squared branch flow power vector 1 ('from' direction) by reactive wye power vector.
sensitivity_branch_power_1_squared_by_power_delta_active (sp.spmatrix): Sensitivity matrix for
squared branch flow power vector 1 ('from' direction) by active delta power vector.
sensitivity_branch_power_1_squared_by_power_delta_reactive (sp.spmatrix): Sensitivity matrix for
squared branch flow power vector 1 ('from' direction) by reactive delta power vector.
sensitivity_branch_power_1_squared_by_der_power_active (sp.spmatrix): Sensitivity matrix for
squared branch flow power vector 1 by DER active power vector.
sensitivity_branch_power_1_squared_by_der_power_reactive (sp.spmatrix): Sensitivity matrix for
squared branch flow power vector 1 by DER reactive power vector.
sensitivity_branch_power_2_squared_by_power_wye_active (sp.spmatrix): Sensitivity matrix for
squared branch flow power vector 2 ('to' direction) by active wye power vector.
sensitivity_branch_power_2_squared_by_power_wye_reactive (sp.spmatrix): Sensitivity matrix for
squared branch flow power vector 2 ('to' direction) by reactive wye power vector.
sensitivity_branch_power_2_squared_by_power_delta_active (sp.spmatrix): Sensitivity matrix for
squared branch flow power vector 2 ('to' direction) by active delta power vector.
sensitivity_branch_power_2_squared_by_power_delta_reactive (sp.spmatrix): Sensitivity matrix for
squared branch flow power vector 2 ('to' direction) by reactive delta power vector.
sensitivity_branch_power_2_squared_by_der_power_active (sp.spmatrix): Sensitivity matrix for
squared branch flow power vector 2 by DER active power vector.
sensitivity_branch_power_2_squared_by_der_power_reactive (sp.spmatrix): Sensitivity matrix for
squared branch flow power vector 2 by DER reactive power vector.
sensitivity_loss_active_by_power_wye_active (sp.spmatrix): Sensitivity matrix for
active loss by active wye power vector.
sensitivity_loss_active_by_power_wye_reactive (sp.spmatrix): Sensitivity matrix for
active loss by reactive wye power vector.
sensitivity_loss_active_by_power_delta_active (sp.spmatrix): Sensitivity matrix for
active loss by active delta power vector.
sensitivity_loss_active_by_power_delta_reactive (sp.spmatrix): Sensitivity matrix for
active loss by reactive delta power vector.
sensitivity_loss_active_by_der_power_active (sp.spmatrix): Sensitivity matrix for
active loss by DER active power vector.
sensitivity_loss_active_by_der_power_reactive (sp.spmatrix): Sensitivity matrix for
active loss by DER reactive power vector.
sensitivity_loss_reactive_by_power_wye_active (sp.spmatrix): Sensitivity matrix for
reactive loss by active wye power vector.
sensitivity_loss_reactive_by_power_wye_reactive (sp.spmatrix): Sensitivity matrix for
reactive loss by reactive wye power vector.
sensitivity_loss_reactive_by_power_delta_active (sp.spmatrix): Sensitivity matrix for
reactive loss by active delta power vector.
sensitivity_loss_reactive_by_power_delta_reactive (sp.spmatrix): Sensitivity matrix for
reactive loss by reactive delta power vector.
sensitivity_loss_reactive_by_der_power_active (sp.spmatrix): Sensitivity matrix for
reactive loss by DER active power vector.
sensitivity_loss_reactive_by_der_power_reactive (sp.spmatrix): Sensitivity matrix for
reactive loss by DER reactive power vector.
"""
electric_grid_model: ElectricGridModelDefault
power_flow_solution: PowerFlowSolution
sensitivity_voltage_by_power_wye_active: sp.spmatrix
sensitivity_voltage_by_power_wye_reactive: sp.spmatrix
sensitivity_voltage_by_power_delta_active: sp.spmatrix
sensitivity_voltage_by_power_delta_reactive: sp.spmatrix
sensitivity_voltage_by_der_power_active: sp.spmatrix
sensitivity_voltage_by_der_power_reactive: sp.spmatrix
sensitivity_voltage_magnitude_by_power_wye_active: sp.spmatrix
sensitivity_voltage_magnitude_by_power_wye_reactive: sp.spmatrix
sensitivity_voltage_magnitude_by_power_delta_active: sp.spmatrix
sensitivity_voltage_magnitude_by_power_delta_reactive: sp.spmatrix
sensitivity_voltage_magnitude_by_der_power_active: sp.spmatrix
sensitivity_voltage_magnitude_by_der_power_reactive: sp.spmatrix
sensitivity_branch_power_1_magnitude_by_power_wye_active: sp.spmatrix
sensitivity_branch_power_1_magnitude_by_power_wye_reactive: sp.spmatrix
sensitivity_branch_power_1_magnitude_by_power_delta_active: sp.spmatrix
sensitivity_branch_power_1_magnitude_by_power_delta_reactive: sp.spmatrix
sensitivity_branch_power_1_magnitude_by_der_power_active: sp.spmatrix
sensitivity_branch_power_1_magnitude_by_der_power_reactive: sp.spmatrix
sensitivity_branch_power_2_magnitude_by_power_wye_active: sp.spmatrix
sensitivity_branch_power_2_magnitude_by_power_wye_reactive: sp.spmatrix
sensitivity_branch_power_2_magnitude_by_power_delta_active: sp.spmatrix
sensitivity_branch_power_2_magnitude_by_power_delta_reactive: sp.spmatrix
sensitivity_branch_power_2_magnitude_by_der_power_active: sp.spmatrix
sensitivity_branch_power_2_magnitude_by_der_power_reactive: sp.spmatrix
sensitivity_branch_power_1_squared_by_power_wye_active: sp.spmatrix
sensitivity_branch_power_1_squared_by_power_wye_reactive: sp.spmatrix
sensitivity_branch_power_1_squared_by_power_delta_active: sp.spmatrix
sensitivity_branch_power_1_squared_by_power_delta_reactive: sp.spmatrix
sensitivity_branch_power_1_squared_by_der_power_active: sp.spmatrix
sensitivity_branch_power_1_squared_by_der_power_reactive: sp.spmatrix
sensitivity_branch_power_2_squared_by_power_wye_active: sp.spmatrix
sensitivity_branch_power_2_squared_by_power_wye_reactive: sp.spmatrix
sensitivity_branch_power_2_squared_by_power_delta_active: sp.spmatrix
sensitivity_branch_power_2_squared_by_power_delta_reactive: sp.spmatrix
sensitivity_branch_power_2_squared_by_der_power_active: sp.spmatrix
sensitivity_branch_power_2_squared_by_der_power_reactive: sp.spmatrix
sensitivity_loss_active_by_power_wye_active: sp.spmatrix
sensitivity_loss_active_by_power_wye_reactive: sp.spmatrix
sensitivity_loss_active_by_power_delta_active: sp.spmatrix
sensitivity_loss_active_by_power_delta_reactive: sp.spmatrix
sensitivity_loss_active_by_der_power_active: sp.spmatrix
sensitivity_loss_active_by_der_power_reactive: sp.spmatrix
sensitivity_loss_reactive_by_power_wye_active: sp.spmatrix
sensitivity_loss_reactive_by_power_wye_reactive: sp.spmatrix
sensitivity_loss_reactive_by_power_delta_active: sp.spmatrix
sensitivity_loss_reactive_by_power_delta_reactive: sp.spmatrix
sensitivity_loss_reactive_by_der_power_active: sp.spmatrix
sensitivity_loss_reactive_by_der_power_reactive: sp.spmatrix
class LinearElectricGridModelGlobal(LinearElectricGridModel):
"""Linear electric grid model object based on global approximations, consisting of the sensitivity matrices for
voltage / voltage magnitude / squared branch power / active loss / reactive loss by changes in nodal wye power /
nodal delta power.
:syntax:
- ``LinearElectricGridModelGlobal(electric_grid_model, power_flow_solution)``: Instantiate linear electric grid
model object for given `electric_grid_model` and `power_flow_solution`.
- ``LinearElectricGridModelGlobal(scenario_name)``: Instantiate linear electric grid model for given
`scenario_name`. The required `electric_grid_model` is obtained for given `scenario_name` and the
`power_flow_solution` is obtained for nominal power conditions.
Parameters:
electric_grid_model (ElectricGridModelDefault): Electric grid model object.
power_flow_solution (PowerFlowSolution): Power flow solution object.
scenario_name (str): MESMO scenario name.
Attributes:
electric_grid_model (ElectricGridModelDefault): Electric grid model object.
power_flow_solution (PowerFlowSolution): Reference power flow solution object.
sensitivity_voltage_by_power_wye_active (sp.spmatrix): Sensitivity matrix for complex voltage vector
by active wye power vector.
sensitivity_voltage_by_power_wye_reactive (sp.spmatrix): Sensitivity matrix for complex voltage
vector by reactive wye power vector.
sensitivity_voltage_by_power_delta_active (sp.spmatrix): Sensitivity matrix for complex voltage vector
by active delta power vector.
sensitivity_voltage_by_power_delta_reactive (sp.spmatrix): Sensitivity matrix for complex voltage
vector by reactive delta power vector.
sensitivity_voltage_by_der_power_active (sp.spmatrix): Sensitivity matrix for
complex voltage vector by DER active power vector.
sensitivity_voltage_by_der_power_reactive (sp.spmatrix): Sensitivity matrix for
complex voltage vector by DER reactive power vector.
sensitivity_voltage_magnitude_by_power_wye_active (sp.spmatrix): Sensitivity matrix for voltage
magnitude vector by active wye power vector.
sensitivity_voltage_magnitude_by_power_wye_reactive (sp.spmatrix): Sensitivity matrix for
voltage magnitude vector by reactive wye power vector.
sensitivity_voltage_magnitude_by_power_delta_active (sp.spmatrix): Sensitivity matrix for
voltage magnitude vector by active delta power vector.
sensitivity_voltage_magnitude_by_power_delta_reactive (sp.spmatrix): Sensitivity matrix for
voltage magnitude vector by reactive delta power vector.
sensitivity_voltage_magnitude_by_der_power_active (sp.spmatrix): Sensitivity matrix for
voltage magnitude vector by DER active power vector.
sensitivity_voltage_magnitude_by_der_power_reactive (sp.spmatrix): Sensitivity matrix for
voltage magnitude vector by DER reactive power vector.
sensitivity_branch_power_1_magnitude_by_power_wye_active (sp.spmatrix): Sensitivity matrix for
branch flow power magnitude vector 1 ('from' direction) by active wye power vector.
sensitivity_branch_power_1_magnitude_by_power_wye_reactive (sp.spmatrix): Sensitivity matrix for
branch flow power magnitude vector 1 ('from' direction) by reactive wye power vector.
sensitivity_branch_power_1_magnitude_by_power_delta_active (sp.spmatrix): Sensitivity matrix for
branch flow power magnitude vector 1 ('from' direction) by active delta power vector.
sensitivity_branch_power_1_magnitude_by_power_delta_reactive (sp.spmatrix): Sensitivity matrix for
branch flow power magnitude vector 1 ('from' direction) by reactive delta power vector.
sensitivity_branch_power_1_magnitude_by_der_power_active (sp.spmatrix): Sensitivity matrix for
branch flow power magnitude vector 1 by DER active power vector.
sensitivity_branch_power_1_magnitude_by_der_power_reactive (sp.spmatrix): Sensitivity matrix for
branch flow power magnitude vector 1 by DER reactive power vector.
sensitivity_branch_power_2_magnitude_by_power_wye_active (sp.spmatrix): Sensitivity matrix for
branch flow power magnitude vector 2 ('to' direction) by active wye power vector.
sensitivity_branch_power_2_magnitude_by_power_wye_reactive (sp.spmatrix): Sensitivity matrix for
branch flow power magnitude vector 2 ('to' direction) by reactive wye power vector.
sensitivity_branch_power_2_magnitude_by_power_delta_active (sp.spmatrix): Sensitivity matrix for
branch flow power magnitude vector 2 ('to' direction) by active delta power vector.
sensitivity_branch_power_2_magnitude_by_power_delta_reactive (sp.spmatrix): Sensitivity matrix for
branch flow power magnitude vector 2 ('to' direction) by reactive delta power vector.
sensitivity_branch_power_2_magnitude_by_der_power_active (sp.spmatrix): Sensitivity matrix for
branch flow power magnitude vector 2 by DER active power vector.
sensitivity_branch_power_2_magnitude_by_der_power_reactive (sp.spmatrix): Sensitivity matrix for
branch flow power magnitude vector 2 by DER reactive power vector.
sensitivity_branch_power_1_squared_by_power_wye_active (sp.spmatrix): Sensitivity matrix for
squared branch flow power vector 1 ('from' direction) by active wye power vector.
sensitivity_branch_power_1_squared_by_power_wye_reactive (sp.spmatrix): Sensitivity matrix for
squared branch flow power vector 1 ('from' direction) by reactive wye power vector.
sensitivity_branch_power_1_squared_by_power_delta_active (sp.spmatrix): Sensitivity matrix for
squared branch flow power vector 1 ('from' direction) by active delta power vector.
sensitivity_branch_power_1_squared_by_power_delta_reactive (sp.spmatrix): Sensitivity matrix for
squared branch flow power vector 1 ('from' direction) by reactive delta power vector.
sensitivity_branch_power_1_squared_by_der_power_active (sp.spmatrix): Sensitivity matrix for
squared branch flow power vector 1 by DER active power vector.
sensitivity_branch_power_1_squared_by_der_power_reactive (sp.spmatrix): Sensitivity matrix for
squared branch flow power vector 1 by DER reactive power vector.
sensitivity_branch_power_2_squared_by_power_wye_active (sp.spmatrix): Sensitivity matrix for
squared branch flow power vector 2 ('to' direction) by active wye power vector.
sensitivity_branch_power_2_squared_by_power_wye_reactive (sp.spmatrix): Sensitivity matrix for
squared branch flow power vector 2 ('to' direction) by reactive wye power vector.
sensitivity_branch_power_2_squared_by_power_delta_active (sp.spmatrix): Sensitivity matrix for
squared branch flow power vector 2 ('to' direction) by active delta power vector.
sensitivity_branch_power_2_squared_by_power_delta_reactive (sp.spmatrix): Sensitivity matrix for
squared branch flow power vector 2 ('to' direction) by reactive delta power vector.
sensitivity_branch_power_2_squared_by_der_power_active (sp.spmatrix): Sensitivity matrix for
squared branch flow power vector 2 by DER active power vector.
sensitivity_branch_power_2_squared_by_der_power_reactive (sp.spmatrix): Sensitivity matrix for
squared branch flow power vector 2 by DER reactive power vector.
sensitivity_loss_active_by_power_wye_active (sp.spmatrix): Sensitivity matrix for
active loss by active wye power vector.
sensitivity_loss_active_by_power_wye_reactive (sp.spmatrix): Sensitivity matrix for
active loss by reactive wye power vector.
sensitivity_loss_active_by_power_delta_active (sp.spmatrix): Sensitivity matrix for
active loss by active delta power vector.
sensitivity_loss_active_by_power_delta_reactive (sp.spmatrix): Sensitivity matrix for
active loss by reactive delta power vector.
sensitivity_loss_active_by_der_power_active (sp.spmatrix): Sensitivity matrix for
active loss by DER active power vector.
sensitivity_loss_active_by_der_power_reactive (sp.spmatrix): Sensitivity matrix for
active loss by DER reactive power vector.
sensitivity_loss_reactive_by_power_wye_active (sp.spmatrix): Sensitivity matrix for
reactive loss by active wye power vector.
sensitivity_loss_reactive_by_power_wye_reactive (sp.spmatrix): Sensitivity matrix for
reactive loss by reactive wye power vector.
sensitivity_loss_reactive_by_power_delta_active (sp.spmatrix): Sensitivity matrix for
reactive loss by active delta power vector.
sensitivity_loss_reactive_by_power_delta_reactive (sp.spmatrix): Sensitivity matrix for
reactive loss by reactive delta power vector.
sensitivity_loss_reactive_by_der_power_active (sp.spmatrix): Sensitivity matrix for
reactive loss by DER active power vector.
sensitivity_loss_reactive_by_der_power_reactive (sp.spmatrix): Sensitivity matrix for
reactive loss by DER reactive power vector.
"""
@multimethod
def __init__(
self,
scenario_name: str,
):
# Obtain electric grid model.
electric_grid_model = (
ElectricGridModelDefault(scenario_name)
)
# Obtain der power vector.
der_power_vector = (
electric_grid_model.der_power_vector_reference
)
# Obtain power flow solution.
power_flow_solution = (
PowerFlowSolutionFixedPoint(
electric_grid_model,
der_power_vector
)
)
self.__init__(
electric_grid_model,
power_flow_solution
)
@multimethod
def __init__(
self,
electric_grid_model: ElectricGridModelDefault,
power_flow_solution: PowerFlowSolution
):
# TODO: Validate linear model with delta DERs.
# Store power flow solution.
self.power_flow_solution = power_flow_solution
# Store electric grid model.
self.electric_grid_model = electric_grid_model
# Obtain shorthands for no-source matrices and vectors.
electric_grid_model.node_admittance_matrix_no_source = (
electric_grid_model.node_admittance_matrix[np.ix_(
mesmo.utils.get_index(electric_grid_model.nodes, node_type='no_source'),
mesmo.utils.get_index(electric_grid_model.nodes, node_type='no_source')
)]
)
electric_grid_model.node_transformation_matrix_no_source = (
electric_grid_model.node_transformation_matrix[np.ix_(
mesmo.utils.get_index(electric_grid_model.nodes, node_type='no_source'),
mesmo.utils.get_index(electric_grid_model.nodes, node_type='no_source')
)]
)
node_voltage_no_source = (
self.power_flow_solution.node_voltage_vector[
mesmo.utils.get_index(electric_grid_model.nodes, node_type='no_source')
]
)
# Instantiate voltage sensitivity matrices.
self.sensitivity_voltage_by_power_wye_active = (
sp.dok_matrix(
(len(electric_grid_model.nodes), len(electric_grid_model.nodes)),
dtype=complex
)
)
self.sensitivity_voltage_by_power_wye_reactive = (
sp.dok_matrix(
(len(electric_grid_model.nodes), len(electric_grid_model.nodes)),
dtype=complex
)
)
self.sensitivity_voltage_by_power_delta_active = (
sp.dok_matrix(
(len(electric_grid_model.nodes), len(electric_grid_model.nodes)),
dtype=complex
)
)
self.sensitivity_voltage_by_power_delta_reactive = (
sp.dok_matrix(
(len(electric_grid_model.nodes), len(electric_grid_model.nodes)),
dtype=complex
)
)
# Calculate voltage sensitivity matrices.
# TODO: Document the change in sign in the reactive part compared to Hanif.
self.sensitivity_voltage_by_power_wye_active[np.ix_(
mesmo.utils.get_index(electric_grid_model.nodes, node_type='no_source'),
mesmo.utils.get_index(electric_grid_model.nodes, node_type='no_source')
)] = (
scipy.sparse.linalg.spsolve(
electric_grid_model.node_admittance_matrix_no_source.tocsc(),
sp.diags(np.conj(node_voltage_no_source) ** -1, format='csc')
)
)
self.sensitivity_voltage_by_power_wye_reactive[np.ix_(
mesmo.utils.get_index(electric_grid_model.nodes, node_type='no_source'),
mesmo.utils.get_index(electric_grid_model.nodes, node_type='no_source')
)] = (
scipy.sparse.linalg.spsolve(
1.0j * electric_grid_model.node_admittance_matrix_no_source.tocsc(),
sp.diags(np.conj(node_voltage_no_source) ** -1, format='csc')
)
)
self.sensitivity_voltage_by_power_delta_active[np.ix_(
mesmo.utils.get_index(electric_grid_model.nodes, node_type='no_source'),
mesmo.utils.get_index(electric_grid_model.nodes, node_type='no_source')
)] = (
scipy.sparse.linalg.spsolve(
electric_grid_model.node_admittance_matrix_no_source.tocsc(),
np.transpose(electric_grid_model.node_transformation_matrix_no_source)
)
@ sp.diags(
(
(
electric_grid_model.node_transformation_matrix_no_source
@ np.conj(node_voltage_no_source)
) ** -1
).ravel()
)
)
self.sensitivity_voltage_by_power_delta_reactive[np.ix_(
mesmo.utils.get_index(electric_grid_model.nodes, node_type='no_source'),
mesmo.utils.get_index(electric_grid_model.nodes, node_type='no_source')
)] = (
scipy.sparse.linalg.spsolve(
1.0j * electric_grid_model.node_admittance_matrix_no_source.tocsc(),
np.transpose(electric_grid_model.node_transformation_matrix_no_source)
)
@ sp.diags(
(
(
electric_grid_model.node_transformation_matrix_no_source
* np.conj(node_voltage_no_source)
) ** -1
).ravel()
)
)
self.sensitivity_voltage_by_der_power_active = (
self.sensitivity_voltage_by_power_wye_active
@ electric_grid_model.der_incidence_wye_matrix
+ self.sensitivity_voltage_by_power_delta_active
@ electric_grid_model.der_incidence_delta_matrix
)
self.sensitivity_voltage_by_der_power_reactive = (
self.sensitivity_voltage_by_power_wye_reactive
@ electric_grid_model.der_incidence_wye_matrix
+ self.sensitivity_voltage_by_power_delta_reactive
@ electric_grid_model.der_incidence_delta_matrix
)
self.sensitivity_voltage_magnitude_by_power_wye_active = (
sp.diags(abs(self.power_flow_solution.node_voltage_vector) ** -1)
@ np.real(
sp.diags(np.conj(self.power_flow_solution.node_voltage_vector))
@ self.sensitivity_voltage_by_power_wye_active
)
)
self.sensitivity_voltage_magnitude_by_power_wye_reactive = (
sp.diags(abs(self.power_flow_solution.node_voltage_vector) ** -1)
@ np.real(
sp.diags(np.conj(self.power_flow_solution.node_voltage_vector))
@ self.sensitivity_voltage_by_power_wye_reactive
)
)
self.sensitivity_voltage_magnitude_by_power_delta_active = (
sp.diags(abs(self.power_flow_solution.node_voltage_vector) ** -1)
@ np.real(
sp.diags(np.conj(self.power_flow_solution.node_voltage_vector))
@ self.sensitivity_voltage_by_power_delta_active
)
)
self.sensitivity_voltage_magnitude_by_power_delta_reactive = (
sp.diags(abs(self.power_flow_solution.node_voltage_vector) ** -1)
@ np.real(
sp.diags(np.conj(self.power_flow_solution.node_voltage_vector))
@ self.sensitivity_voltage_by_power_delta_reactive
)
)
self.sensitivity_voltage_magnitude_by_der_power_active = (
self.sensitivity_voltage_magnitude_by_power_wye_active
@ electric_grid_model.der_incidence_wye_matrix
+ self.sensitivity_voltage_magnitude_by_power_delta_active
@ electric_grid_model.der_incidence_delta_matrix
)
self.sensitivity_voltage_magnitude_by_der_power_reactive = (
self.sensitivity_voltage_magnitude_by_power_wye_reactive
@ electric_grid_model.der_incidence_wye_matrix
+ self.sensitivity_voltage_magnitude_by_power_delta_reactive
@ electric_grid_model.der_incidence_delta_matrix
)
# Calculate branch power sensitivity matrices.
sensitivity_branch_power_1_by_voltage = (
sp.diags((
np.conj(electric_grid_model.branch_admittance_1_matrix)
@ np.conj(self.power_flow_solution.node_voltage_vector)
).ravel())
@ electric_grid_model.branch_incidence_1_matrix
+ sp.diags((
electric_grid_model.branch_incidence_1_matrix
@ np.conj(self.power_flow_solution.node_voltage_vector)
).ravel())
@ electric_grid_model.branch_admittance_1_matrix
* np.sqrt(3)
)
sensitivity_branch_power_2_by_voltage = (
sp.diags((
np.conj(electric_grid_model.branch_admittance_2_matrix)
@ np.conj(self.power_flow_solution.node_voltage_vector)
).ravel())
@ electric_grid_model.branch_incidence_2_matrix
+ sp.diags((
electric_grid_model.branch_incidence_2_matrix
@ np.conj(self.power_flow_solution.node_voltage_vector)
).ravel())
@ electric_grid_model.branch_admittance_2_matrix
* np.sqrt(3)
)
self.sensitivity_branch_power_1_magnitude_by_power_wye_active = (
sp.diags(abs(self.power_flow_solution.branch_power_vector_1) ** -1)
@ np.real(
sp.diags(np.conj(self.power_flow_solution.branch_power_vector_1))
@ sensitivity_branch_power_1_by_voltage
@ self.sensitivity_voltage_by_power_wye_active
)
)
self.sensitivity_branch_power_1_magnitude_by_power_wye_reactive = (
sp.diags(abs(self.power_flow_solution.branch_power_vector_1) ** -1)
@ np.real(
sp.diags(np.conj(self.power_flow_solution.branch_power_vector_1))
@ sensitivity_branch_power_1_by_voltage
@ self.sensitivity_voltage_by_power_wye_reactive
)
)
self.sensitivity_branch_power_1_magnitude_by_power_delta_active = (
sp.diags(abs(self.power_flow_solution.branch_power_vector_1) ** -1)
@ np.real(
sp.diags(np.conj(self.power_flow_solution.branch_power_vector_1))
@ sensitivity_branch_power_1_by_voltage
@ self.sensitivity_voltage_by_power_delta_active
)
)
self.sensitivity_branch_power_1_magnitude_by_power_delta_reactive = (
sp.diags(abs(self.power_flow_solution.branch_power_vector_1) ** -1)
@ np.real(
sp.diags(np.conj(self.power_flow_solution.branch_power_vector_1))
@ sensitivity_branch_power_1_by_voltage
@ self.sensitivity_voltage_by_power_delta_reactive
)
)
self.sensitivity_branch_power_2_magnitude_by_power_wye_active = (
sp.diags(abs(self.power_flow_solution.branch_power_vector_2) ** -1)
@ np.real(
sp.diags(np.conj(self.power_flow_solution.branch_power_vector_2))
@ sensitivity_branch_power_2_by_voltage
@ self.sensitivity_voltage_by_power_wye_active
)
)
self.sensitivity_branch_power_2_magnitude_by_power_wye_reactive = (
sp.diags(abs(self.power_flow_solution.branch_power_vector_2) ** -1)
@ np.real(
sp.diags(np.conj(self.power_flow_solution.branch_power_vector_2))
@ sensitivity_branch_power_2_by_voltage
@ self.sensitivity_voltage_by_power_wye_reactive
)
)
self.sensitivity_branch_power_2_magnitude_by_power_delta_active = (
sp.diags(abs(self.power_flow_solution.branch_power_vector_2) ** -1)
@ np.real(
sp.diags(np.conj(self.power_flow_solution.branch_power_vector_2))
@ sensitivity_branch_power_2_by_voltage
@ self.sensitivity_voltage_by_power_delta_active
)
)
self.sensitivity_branch_power_2_magnitude_by_power_delta_reactive = (
sp.diags(abs(self.power_flow_solution.branch_power_vector_2) ** -1)
@ np.real(
sp.diags(np.conj(self.power_flow_solution.branch_power_vector_2))
@ sensitivity_branch_power_2_by_voltage
@ self.sensitivity_voltage_by_power_delta_reactive
)
)
self.sensitivity_branch_power_1_magnitude_by_der_power_active = (
self.sensitivity_branch_power_1_magnitude_by_power_wye_active
@ electric_grid_model.der_incidence_wye_matrix
+ self.sensitivity_branch_power_1_magnitude_by_power_delta_active
@ electric_grid_model.der_incidence_delta_matrix
)
self.sensitivity_branch_power_1_magnitude_by_der_power_reactive = (
self.sensitivity_branch_power_1_magnitude_by_power_wye_reactive
@ electric_grid_model.der_incidence_wye_matrix
+ self.sensitivity_branch_power_1_magnitude_by_power_delta_reactive
@ electric_grid_model.der_incidence_delta_matrix
)
self.sensitivity_branch_power_2_magnitude_by_der_power_active = (
self.sensitivity_branch_power_2_magnitude_by_power_wye_active
@ electric_grid_model.der_incidence_wye_matrix
+ self.sensitivity_branch_power_2_magnitude_by_power_delta_active
@ electric_grid_model.der_incidence_delta_matrix
)
self.sensitivity_branch_power_2_magnitude_by_der_power_reactive = (
self.sensitivity_branch_power_2_magnitude_by_power_wye_reactive
@ electric_grid_model.der_incidence_wye_matrix
+ self.sensitivity_branch_power_2_magnitude_by_power_delta_reactive
@ electric_grid_model.der_incidence_delta_matrix
)
self.sensitivity_branch_power_1_squared_by_power_wye_active = (
(
sp.diags(np.real(self.power_flow_solution.branch_power_vector_1))
@ np.real(
sensitivity_branch_power_1_by_voltage
@ self.sensitivity_voltage_by_power_wye_active
)
)
+ (
sp.diags(np.imag(self.power_flow_solution.branch_power_vector_1))
@ np.imag(
sensitivity_branch_power_1_by_voltage
@ self.sensitivity_voltage_by_power_wye_active
)
)
)
self.sensitivity_branch_power_1_squared_by_power_wye_reactive = (
(
sp.diags(np.real(self.power_flow_solution.branch_power_vector_1))
@ np.real(
sensitivity_branch_power_1_by_voltage
@ self.sensitivity_voltage_by_power_wye_reactive
)
)
+ (
sp.diags(np.imag(self.power_flow_solution.branch_power_vector_1))
@ np.imag(
sensitivity_branch_power_1_by_voltage
@ self.sensitivity_voltage_by_power_wye_reactive
)
)
)
self.sensitivity_branch_power_1_squared_by_power_delta_active = (
(
sp.diags(np.real(self.power_flow_solution.branch_power_vector_1))
@ np.real(
sensitivity_branch_power_1_by_voltage
@ self.sensitivity_voltage_by_power_delta_active
)
)
+ (
sp.diags(np.imag(self.power_flow_solution.branch_power_vector_1))
@ np.imag(
sensitivity_branch_power_1_by_voltage
@ self.sensitivity_voltage_by_power_delta_active
)
)
)
self.sensitivity_branch_power_1_squared_by_power_delta_reactive = (
(
sp.diags(np.real(self.power_flow_solution.branch_power_vector_1))
@ np.real(
sensitivity_branch_power_1_by_voltage
@ self.sensitivity_voltage_by_power_delta_reactive
)
)
+ (
sp.diags(np.imag(self.power_flow_solution.branch_power_vector_1))
@ np.imag(
sensitivity_branch_power_1_by_voltage
@ self.sensitivity_voltage_by_power_delta_reactive
)
)
)
self.sensitivity_branch_power_2_squared_by_power_wye_active = (
(
sp.diags(np.real(self.power_flow_solution.branch_power_vector_2))
@ np.real(
sensitivity_branch_power_2_by_voltage
@ self.sensitivity_voltage_by_power_wye_active
)
)
+ (
sp.diags(np.imag(self.power_flow_solution.branch_power_vector_2))
@ np.imag(
sensitivity_branch_power_2_by_voltage
@ self.sensitivity_voltage_by_power_wye_active
)
)
)
self.sensitivity_branch_power_2_squared_by_power_wye_reactive = (
(
sp.diags(np.real(self.power_flow_solution.branch_power_vector_2))
@ np.real(
sensitivity_branch_power_2_by_voltage
@ self.sensitivity_voltage_by_power_wye_reactive
)
)
+ (
sp.diags(np.imag(self.power_flow_solution.branch_power_vector_2))
@ np.imag(
sensitivity_branch_power_2_by_voltage
@ self.sensitivity_voltage_by_power_wye_reactive
)
)
)
self.sensitivity_branch_power_2_squared_by_power_delta_active = (
(
sp.diags(np.real(self.power_flow_solution.branch_power_vector_2))
@ np.real(
sensitivity_branch_power_2_by_voltage
@ self.sensitivity_voltage_by_power_delta_active
)
)
+ (
sp.diags(np.imag(self.power_flow_solution.branch_power_vector_2))
@ np.imag(
sensitivity_branch_power_2_by_voltage
@ self.sensitivity_voltage_by_power_delta_active
)
)
)
self.sensitivity_branch_power_2_squared_by_power_delta_reactive = (
(
sp.diags(np.real(self.power_flow_solution.branch_power_vector_2))
@ np.real(
sensitivity_branch_power_2_by_voltage
@ self.sensitivity_voltage_by_power_delta_reactive
)
)
+ (
sp.diags(np.imag(self.power_flow_solution.branch_power_vector_2))
@ np.imag(
sensitivity_branch_power_2_by_voltage
@ self.sensitivity_voltage_by_power_delta_reactive
)
)
)
self.sensitivity_branch_power_1_squared_by_der_power_active = (
self.sensitivity_branch_power_1_squared_by_power_wye_active
@ electric_grid_model.der_incidence_wye_matrix
+ self.sensitivity_branch_power_1_squared_by_power_delta_active
@ electric_grid_model.der_incidence_delta_matrix
)
self.sensitivity_branch_power_1_squared_by_der_power_reactive = (
self.sensitivity_branch_power_1_squared_by_power_wye_reactive
@ electric_grid_model.der_incidence_wye_matrix
+ self.sensitivity_branch_power_1_squared_by_power_delta_reactive
@ electric_grid_model.der_incidence_delta_matrix
)
self.sensitivity_branch_power_2_squared_by_der_power_active = (
self.sensitivity_branch_power_2_squared_by_power_wye_active
@ electric_grid_model.der_incidence_wye_matrix
+ self.sensitivity_branch_power_2_squared_by_power_delta_active
@ electric_grid_model.der_incidence_delta_matrix
)
self.sensitivity_branch_power_2_squared_by_der_power_reactive = (
self.sensitivity_branch_power_2_squared_by_power_wye_reactive
@ electric_grid_model.der_incidence_wye_matrix
+ self.sensitivity_branch_power_2_squared_by_power_delta_reactive
@ electric_grid_model.der_incidence_delta_matrix
)
# Calculate loss sensitivity matrices.
# sensitivity_loss_by_voltage = (
# np.array([self.power_flow_solution.node_voltage_vector])
# @ np.conj(electric_grid_model.node_admittance_matrix)
# + np.transpose(
# electric_grid_model.node_admittance_matrix
# @ np.transpose([self.power_flow_solution.node_voltage_vector])
# )
# )
sensitivity_loss_by_voltage = (
sum(np.transpose(
np.transpose(sensitivity_branch_power_1_by_voltage)
+ np.transpose(sensitivity_branch_power_2_by_voltage)
))
)
self.sensitivity_loss_active_by_power_wye_active = (
np.real(
sensitivity_loss_by_voltage
@ self.sensitivity_voltage_by_power_wye_active
)
/ (2 * np.sqrt(3))
)
self.sensitivity_loss_active_by_power_wye_reactive = (
np.real(
sensitivity_loss_by_voltage
@ self.sensitivity_voltage_by_power_wye_reactive
)
/ (2 * np.sqrt(3))
)
self.sensitivity_loss_active_by_power_delta_active = (
np.real(
sensitivity_loss_by_voltage
@ self.sensitivity_voltage_by_power_delta_active
)
/ (2 * np.sqrt(3))
)
self.sensitivity_loss_active_by_power_delta_reactive = (
np.real(
sensitivity_loss_by_voltage
@ self.sensitivity_voltage_by_power_delta_reactive
)
/ (2 * np.sqrt(3))
)
self.sensitivity_loss_reactive_by_power_wye_active = (
np.imag(
sensitivity_loss_by_voltage
@ self.sensitivity_voltage_by_power_wye_active
)
* -1 * np.sqrt(3)
)
self.sensitivity_loss_reactive_by_power_wye_reactive = (
np.imag(
sensitivity_loss_by_voltage
@ self.sensitivity_voltage_by_power_wye_reactive
)
* -1 * np.sqrt(3)
)
self.sensitivity_loss_reactive_by_power_delta_active = (
np.imag(
sensitivity_loss_by_voltage
@ self.sensitivity_voltage_by_power_delta_active
)
* -1 * np.sqrt(3)
)
self.sensitivity_loss_reactive_by_power_delta_reactive = (
np.imag(
sensitivity_loss_by_voltage
@ self.sensitivity_voltage_by_power_delta_reactive
)
* -1 * np.sqrt(3)
)
self.sensitivity_loss_active_by_der_power_active = (
self.sensitivity_loss_active_by_power_wye_active
@ electric_grid_model.der_incidence_wye_matrix
+ self.sensitivity_loss_active_by_power_delta_active
@ electric_grid_model.der_incidence_delta_matrix
)
self.sensitivity_loss_active_by_der_power_reactive = (
self.sensitivity_loss_active_by_power_wye_reactive
@ electric_grid_model.der_incidence_wye_matrix
+ self.sensitivity_loss_active_by_power_delta_reactive
@ electric_grid_model.der_incidence_delta_matrix
)
self.sensitivity_loss_reactive_by_der_power_active = (
self.sensitivity_loss_reactive_by_power_wye_active
@ electric_grid_model.der_incidence_wye_matrix
+ self.sensitivity_loss_reactive_by_power_delta_active
@ electric_grid_model.der_incidence_delta_matrix
)
self.sensitivity_loss_reactive_by_der_power_reactive = (
self.sensitivity_loss_reactive_by_power_wye_reactive
@ electric_grid_model.der_incidence_wye_matrix
+ self.sensitivity_loss_reactive_by_power_delta_reactive
@ electric_grid_model.der_incidence_delta_matrix
)
class LinearElectricGridModelLocal(LinearElectricGridModel):
"""Linear electric grid model object based on local approximations, consisting of the sensitivity matrices for
voltage / voltage magnitude / squared branch power / active loss / reactive loss by changes in nodal wye power /
nodal delta power.
:syntax:
- ``LinearElectricGridModelLocal(electric_grid_model, power_flow_solution)``: Instantiate linear electric grid
model object for given `electric_grid_model` and `power_flow_solution`.
- ``LinearElectricGridModelLocal(scenario_name)``: Instantiate linear electric grid model for given
`scenario_name`. The required `electric_grid_model` is obtained for given `scenario_name` and the
`power_flow_solution` is obtained for nominal power conditions.
Parameters:
electric_grid_model (ElectricGridModelDefault): Electric grid model object.
power_flow_solution (PowerFlowSolution): Power flow solution object.
scenario_name (str): MESMO scenario name.
Attributes:
electric_grid_model (ElectricGridModelDefault): Electric grid model object.
power_flow_solution (PowerFlowSolution): Reference power flow solution object.
sensitivity_voltage_by_power_wye_active (sp.spmatrix): Sensitivity matrix for complex voltage vector
by active wye power vector.
sensitivity_voltage_by_power_wye_reactive (sp.spmatrix): Sensitivity matrix for complex voltage
vector by reactive wye power vector.
sensitivity_voltage_by_power_delta_active (sp.spmatrix): Sensitivity matrix for complex voltage vector
by active delta power vector.
sensitivity_voltage_by_power_delta_reactive (sp.spmatrix): Sensitivity matrix for complex voltage
vector by reactive delta power vector.
sensitivity_voltage_by_der_power_active (sp.spmatrix): Sensitivity matrix for
complex voltage vector by DER active power vector.
sensitivity_voltage_by_der_power_reactive (sp.spmatrix): Sensitivity matrix for
complex voltage vector by DER reactive power vector.
sensitivity_voltage_magnitude_by_power_wye_active (sp.spmatrix): Sensitivity matrix for voltage
magnitude vector by active wye power vector.
sensitivity_voltage_magnitude_by_power_wye_reactive (sp.spmatrix): Sensitivity matrix for
voltage magnitude vector by reactive wye power vector.
sensitivity_voltage_magnitude_by_power_delta_active (sp.spmatrix): Sensitivity matrix for
voltage magnitude vector by active delta power vector.
sensitivity_voltage_magnitude_by_power_delta_reactive (sp.spmatrix): Sensitivity matrix for
voltage magnitude vector by reactive delta power vector.
sensitivity_voltage_magnitude_by_der_power_active (sp.spmatrix): Sensitivity matrix for
voltage magnitude vector by DER active power vector.
sensitivity_voltage_magnitude_by_der_power_reactive (sp.spmatrix): Sensitivity matrix for
voltage magnitude vector by DER reactive power vector.
sensitivity_branch_power_1_magnitude_by_power_wye_active (sp.spmatrix): Sensitivity matrix for
branch flow power magnitude vector 1 ('from' direction) by active wye power vector.
sensitivity_branch_power_1_magnitude_by_power_wye_reactive (sp.spmatrix): Sensitivity matrix for
branch flow power magnitude vector 1 ('from' direction) by reactive wye power vector.
sensitivity_branch_power_1_magnitude_by_power_delta_active (sp.spmatrix): Sensitivity matrix for
branch flow power magnitude vector 1 ('from' direction) by active delta power vector.
sensitivity_branch_power_1_magnitude_by_power_delta_reactive (sp.spmatrix): Sensitivity matrix for
branch flow power magnitude vector 1 ('from' direction) by reactive delta power vector.
sensitivity_branch_power_1_magnitude_by_der_power_active (sp.spmatrix): Sensitivity matrix for
branch flow power magnitude vector 1 by DER active power vector.
sensitivity_branch_power_1_magnitude_by_der_power_reactive (sp.spmatrix): Sensitivity matrix for
branch flow power magnitude vector 1 by DER reactive power vector.
sensitivity_branch_power_2_magnitude_by_power_wye_active (sp.spmatrix): Sensitivity matrix for
branch flow power magnitude vector 2 ('to' direction) by active wye power vector.
sensitivity_branch_power_2_magnitude_by_power_wye_reactive (sp.spmatrix): Sensitivity matrix for
branch flow power magnitude vector 2 ('to' direction) by reactive wye power vector.
sensitivity_branch_power_2_magnitude_by_power_delta_active (sp.spmatrix): Sensitivity matrix for
branch flow power magnitude vector 2 ('to' direction) by active delta power vector.
sensitivity_branch_power_2_magnitude_by_power_delta_reactive (sp.spmatrix): Sensitivity matrix for
branch flow power magnitude vector 2 ('to' direction) by reactive delta power vector.
sensitivity_branch_power_2_magnitude_by_der_power_active (sp.spmatrix): Sensitivity matrix for
branch flow power magnitude vector 2 by DER active power vector.
sensitivity_branch_power_2_magnitude_by_der_power_reactive (sp.spmatrix): Sensitivity matrix for
branch flow power magnitude vector 2 by DER reactive power vector.
sensitivity_branch_power_1_squared_by_power_wye_active (sp.spmatrix): Sensitivity matrix for
squared branch flow power vector 1 ('from' direction) by active wye power vector.
sensitivity_branch_power_1_squared_by_power_wye_reactive (sp.spmatrix): Sensitivity matrix for
squared branch flow power vector 1 ('from' direction) by reactive wye power vector.
sensitivity_branch_power_1_squared_by_power_delta_active (sp.spmatrix): Sensitivity matrix for
squared branch flow power vector 1 ('from' direction) by active delta power vector.
sensitivity_branch_power_1_squared_by_power_delta_reactive (sp.spmatrix): Sensitivity matrix for
squared branch flow power vector 1 ('from' direction) by reactive delta power vector.
sensitivity_branch_power_1_squared_by_der_power_active (sp.spmatrix): Sensitivity matrix for
squared branch flow power vector 1 by DER active power vector.
sensitivity_branch_power_1_squared_by_der_power_reactive (sp.spmatrix): Sensitivity matrix for
squared branch flow power vector 1 by DER reactive power vector.
sensitivity_branch_power_2_squared_by_power_wye_active (sp.spmatrix): Sensitivity matrix for
squared branch flow power vector 2 ('to' direction) by active wye power vector.
sensitivity_branch_power_2_squared_by_power_wye_reactive (sp.spmatrix): Sensitivity matrix for
squared branch flow power vector 2 ('to' direction) by reactive wye power vector.
sensitivity_branch_power_2_squared_by_power_delta_active (sp.spmatrix): Sensitivity matrix for
squared branch flow power vector 2 ('to' direction) by active delta power vector.
sensitivity_branch_power_2_squared_by_power_delta_reactive (sp.spmatrix): Sensitivity matrix for
squared branch flow power vector 2 ('to' direction) by reactive delta power vector.
sensitivity_branch_power_2_squared_by_der_power_active (sp.spmatrix): Sensitivity matrix for
squared branch flow power vector 2 by DER active power vector.
sensitivity_branch_power_2_squared_by_der_power_reactive (sp.spmatrix): Sensitivity matrix for
squared branch flow power vector 2 by DER reactive power vector.
sensitivity_loss_active_by_power_wye_active (sp.spmatrix): Sensitivity matrix for
active loss by active wye power vector.
sensitivity_loss_active_by_power_wye_reactive (sp.spmatrix): Sensitivity matrix for
active loss by reactive wye power vector.
sensitivity_loss_active_by_power_delta_active (sp.spmatrix): Sensitivity matrix for
active loss by active delta power vector.
sensitivity_loss_active_by_power_delta_reactive (sp.spmatrix): Sensitivity matrix for
active loss by reactive delta power vector.
sensitivity_loss_active_by_der_power_active (sp.spmatrix): Sensitivity matrix for
active loss by DER active power vector.
sensitivity_loss_active_by_der_power_reactive (sp.spmatrix): Sensitivity matrix for
active loss by DER reactive power vector.
sensitivity_loss_reactive_by_power_wye_active (sp.spmatrix): Sensitivity matrix for
reactive loss by active wye power vector.
sensitivity_loss_reactive_by_power_wye_reactive (sp.spmatrix): Sensitivity matrix for
reactive loss by reactive wye power vector.
sensitivity_loss_reactive_by_power_delta_active (sp.spmatrix): Sensitivity matrix for
reactive loss by active delta power vector.
sensitivity_loss_reactive_by_power_delta_reactive (sp.spmatrix): Sensitivity matrix for
reactive loss by reactive delta power vector.
sensitivity_loss_reactive_by_der_power_active (sp.spmatrix): Sensitivity matrix for
reactive loss by DER active power vector.
sensitivity_loss_reactive_by_der_power_reactive (sp.spmatrix): Sensitivity matrix for
reactive loss by DER reactive power vector.
"""
@multimethod
def __init__(
self,
scenario_name: str,
):
# Obtain electric grid model.
electric_grid_model = (
ElectricGridModelDefault(scenario_name)
)
# Obtain der power vector.
der_power_vector = (
electric_grid_model.der_power_vector_reference
)
# Obtain power flow solution.
power_flow_solution = (
PowerFlowSolutionFixedPoint(
electric_grid_model,
der_power_vector
)
)
self.__init__(
electric_grid_model,
power_flow_solution
)
@multimethod
def __init__(
self,
electric_grid_model: ElectricGridModelDefault,
power_flow_solution: PowerFlowSolution
):
# Store power flow solution.
self.power_flow_solution = power_flow_solution
# Store electric grid model.
self.electric_grid_model = electric_grid_model
# Obtain shorthands for no-source matrices and vectors.
electric_grid_model.node_admittance_matrix_no_source = (
electric_grid_model.node_admittance_matrix[np.ix_(
mesmo.utils.get_index(electric_grid_model.nodes, node_type='no_source'),
mesmo.utils.get_index(electric_grid_model.nodes, node_type='no_source')
)]
)
electric_grid_model.node_transformation_matrix_no_source = (
electric_grid_model.node_transformation_matrix[np.ix_(
mesmo.utils.get_index(electric_grid_model.nodes, node_type='no_source'),
mesmo.utils.get_index(electric_grid_model.nodes, node_type='no_source')
)]
)
node_voltage_no_source = (
self.power_flow_solution.node_voltage_vector[
mesmo.utils.get_index(electric_grid_model.nodes, node_type='no_source')
]
)
# Instantiate voltage sensitivity matrices.
self.sensitivity_voltage_by_power_wye_active = (
sp.dok_matrix(
(len(electric_grid_model.nodes), len(electric_grid_model.nodes)),
dtype=complex
)
)
self.sensitivity_voltage_by_power_wye_reactive = (
sp.dok_matrix(
(len(electric_grid_model.nodes), len(electric_grid_model.nodes)),
dtype=complex
)
)
self.sensitivity_voltage_by_power_delta_active = (
sp.dok_matrix(
(len(electric_grid_model.nodes), len(electric_grid_model.nodes)),
dtype=complex
)
)
self.sensitivity_voltage_by_power_delta_reactive = (
sp.dok_matrix(
(len(electric_grid_model.nodes), len(electric_grid_model.nodes)),
dtype=complex
)
)
# Calculate utility matrices.
A_matrix_inverse = (
sp.diags((
electric_grid_model.node_admittance_matrix_source_to_no_source
@ electric_grid_model.node_voltage_vector_reference_source
+ electric_grid_model.node_admittance_matrix_no_source
@ node_voltage_no_source
) ** -1)
)
A_matrix_conjugate = (
sp.diags(np.conj(
electric_grid_model.node_admittance_matrix_source_to_no_source
@ electric_grid_model.node_voltage_vector_reference_source
+ electric_grid_model.node_admittance_matrix_no_source
@ node_voltage_no_source
))
)
B_matrix = (
A_matrix_conjugate
- sp.diags(node_voltage_no_source)
@ np.conj(electric_grid_model.node_admittance_matrix_no_source)
@ A_matrix_inverse
@ sp.diags(np.conj(node_voltage_no_source))
@ electric_grid_model.node_admittance_matrix_no_source
)
# Calculate voltage sensitivity matrices.
# - TODO: Consider delta loads.
self.sensitivity_voltage_by_power_wye_active[np.ix_(
mesmo.utils.get_index(electric_grid_model.nodes, node_type='no_source'),
mesmo.utils.get_index(electric_grid_model.nodes, node_type='no_source')
)] = (
scipy.sparse.linalg.spsolve(
B_matrix.tocsc(),
(
sp.identity(len(node_voltage_no_source))
- sp.diags(node_voltage_no_source)
@ np.conj(electric_grid_model.node_admittance_matrix_no_source)
@ A_matrix_inverse
@ sp.identity(len(node_voltage_no_source))
).tocsc()
)
)
self.sensitivity_voltage_by_power_wye_reactive[np.ix_(
mesmo.utils.get_index(electric_grid_model.nodes, node_type='no_source'),
mesmo.utils.get_index(electric_grid_model.nodes, node_type='no_source')
)] = (
scipy.sparse.linalg.spsolve(
B_matrix.tocsc(),
(
(1.0j * sp.identity(len(node_voltage_no_source)))
- sp.diags(node_voltage_no_source)
@ np.conj(electric_grid_model.node_admittance_matrix_no_source)
@ A_matrix_inverse
@ (-1.0j * sp.identity(len(node_voltage_no_source)))
).tocsc()
)
)
# self.sensitivity_voltage_by_power_delta_active[np.ix_(
# mesmo.utils.get_index(electric_grid_model.nodes, node_type='no_source'),
# mesmo.utils.get_index(electric_grid_model.nodes, node_type='no_source')
# )] = (
# ???
# )
# self.sensitivity_voltage_by_power_delta_reactive[np.ix_(
# mesmo.utils.get_index(electric_grid_model.nodes, node_type='no_source'),
# mesmo.utils.get_index(electric_grid_model.nodes, node_type='no_source')
# )] = (
# ???
# )
self.sensitivity_voltage_by_der_power_active = (
self.sensitivity_voltage_by_power_wye_active
@ electric_grid_model.der_incidence_wye_matrix
+ self.sensitivity_voltage_by_power_delta_active
@ electric_grid_model.der_incidence_delta_matrix
)
self.sensitivity_voltage_by_der_power_reactive = (
self.sensitivity_voltage_by_power_wye_reactive
@ electric_grid_model.der_incidence_wye_matrix
+ self.sensitivity_voltage_by_power_delta_reactive
@ electric_grid_model.der_incidence_delta_matrix
)
self.sensitivity_voltage_magnitude_by_power_wye_active = (
sp.diags(abs(self.power_flow_solution.node_voltage_vector) ** -1)
@ np.real(
sp.diags(np.conj(self.power_flow_solution.node_voltage_vector))
@ self.sensitivity_voltage_by_power_wye_active
)
)
self.sensitivity_voltage_magnitude_by_power_wye_reactive = (
sp.diags(abs(self.power_flow_solution.node_voltage_vector) ** -1)
@ np.real(
sp.diags(np.conj(self.power_flow_solution.node_voltage_vector))
@ self.sensitivity_voltage_by_power_wye_reactive
)
)
self.sensitivity_voltage_magnitude_by_power_delta_active = (
sp.diags(abs(self.power_flow_solution.node_voltage_vector) ** -1)
@ np.real(
sp.diags(np.conj(self.power_flow_solution.node_voltage_vector))
@ self.sensitivity_voltage_by_power_delta_active
)
)
self.sensitivity_voltage_magnitude_by_power_delta_reactive = (
sp.diags(abs(self.power_flow_solution.node_voltage_vector) ** -1)
@ np.real(
sp.diags(np.conj(self.power_flow_solution.node_voltage_vector))
@ self.sensitivity_voltage_by_power_delta_reactive
)
)
self.sensitivity_voltage_magnitude_by_der_power_active = (
self.sensitivity_voltage_magnitude_by_power_wye_active
@ electric_grid_model.der_incidence_wye_matrix
+ self.sensitivity_voltage_magnitude_by_power_delta_active
@ electric_grid_model.der_incidence_delta_matrix
)
self.sensitivity_voltage_magnitude_by_der_power_reactive = (
self.sensitivity_voltage_magnitude_by_power_wye_reactive
@ electric_grid_model.der_incidence_wye_matrix
+ self.sensitivity_voltage_magnitude_by_power_delta_reactive
@ electric_grid_model.der_incidence_delta_matrix
)
# Calculate branch power sensitivity matrices.
sensitivity_branch_power_1_by_voltage = (
sp.diags((
np.conj(electric_grid_model.branch_admittance_1_matrix)
@ np.conj(self.power_flow_solution.node_voltage_vector)
).ravel())
@ electric_grid_model.branch_incidence_1_matrix
+ sp.diags((
electric_grid_model.branch_incidence_1_matrix
@ np.conj(self.power_flow_solution.node_voltage_vector)
).ravel())
@ electric_grid_model.branch_admittance_1_matrix
* np.sqrt(3)
)
sensitivity_branch_power_2_by_voltage = (
sp.diags((
np.conj(electric_grid_model.branch_admittance_2_matrix)
@ np.conj(self.power_flow_solution.node_voltage_vector)
).ravel())
@ electric_grid_model.branch_incidence_2_matrix
+ sp.diags((
electric_grid_model.branch_incidence_2_matrix
@ np.conj(self.power_flow_solution.node_voltage_vector)
).ravel())
@ electric_grid_model.branch_admittance_2_matrix
* np.sqrt(3)
)
self.sensitivity_branch_power_1_magnitude_by_power_wye_active = (
sp.diags(abs(self.power_flow_solution.branch_power_vector_1) ** -1)
@ np.real(
sp.diags(np.conj(self.power_flow_solution.branch_power_vector_1))
@ sensitivity_branch_power_1_by_voltage
@ self.sensitivity_voltage_by_power_wye_active
)
)
self.sensitivity_branch_power_1_magnitude_by_power_wye_reactive = (
sp.diags(abs(self.power_flow_solution.branch_power_vector_1) ** -1)
@ np.real(
sp.diags(np.conj(self.power_flow_solution.branch_power_vector_1))
@ sensitivity_branch_power_1_by_voltage
@ self.sensitivity_voltage_by_power_wye_reactive
)
)
self.sensitivity_branch_power_1_magnitude_by_power_delta_active = (
sp.diags(abs(self.power_flow_solution.branch_power_vector_1) ** -1)
@ np.real(
sp.diags(np.conj(self.power_flow_solution.branch_power_vector_1))
@ sensitivity_branch_power_1_by_voltage
@ self.sensitivity_voltage_by_power_delta_active
)
)
self.sensitivity_branch_power_1_magnitude_by_power_delta_reactive = (
sp.diags(abs(self.power_flow_solution.branch_power_vector_1) ** -1)
@ np.real(
sp.diags(np.conj(self.power_flow_solution.branch_power_vector_1))
@ sensitivity_branch_power_1_by_voltage
@ self.sensitivity_voltage_by_power_delta_reactive
)
)
self.sensitivity_branch_power_2_magnitude_by_power_wye_active = (
sp.diags(abs(self.power_flow_solution.branch_power_vector_2) ** -1)
@ np.real(
sp.diags(np.conj(self.power_flow_solution.branch_power_vector_2))
@ sensitivity_branch_power_2_by_voltage
@ self.sensitivity_voltage_by_power_wye_active
)
)
self.sensitivity_branch_power_2_magnitude_by_power_wye_reactive = (
sp.diags(abs(self.power_flow_solution.branch_power_vector_2) ** -1)
@ np.real(
sp.diags(np.conj(self.power_flow_solution.branch_power_vector_2))
@ sensitivity_branch_power_2_by_voltage
@ self.sensitivity_voltage_by_power_wye_reactive
)
)
self.sensitivity_branch_power_2_magnitude_by_power_delta_active = (
sp.diags(abs(self.power_flow_solution.branch_power_vector_2) ** -1)
@ np.real(
sp.diags(np.conj(self.power_flow_solution.branch_power_vector_2))
@ sensitivity_branch_power_2_by_voltage
@ self.sensitivity_voltage_by_power_delta_active
)
)
self.sensitivity_branch_power_2_magnitude_by_power_delta_reactive = (
sp.diags(abs(self.power_flow_solution.branch_power_vector_2) ** -1)
@ np.real(
sp.diags(np.conj(self.power_flow_solution.branch_power_vector_2))
@ sensitivity_branch_power_2_by_voltage
@ self.sensitivity_voltage_by_power_delta_reactive
)
)
self.sensitivity_branch_power_1_magnitude_by_der_power_active = (
self.sensitivity_branch_power_1_magnitude_by_power_wye_active
@ electric_grid_model.der_incidence_wye_matrix
+ self.sensitivity_branch_power_1_magnitude_by_power_delta_active
@ electric_grid_model.der_incidence_delta_matrix
)
self.sensitivity_branch_power_1_magnitude_by_der_power_reactive = (
self.sensitivity_branch_power_1_magnitude_by_power_wye_reactive
@ electric_grid_model.der_incidence_wye_matrix
+ self.sensitivity_branch_power_1_magnitude_by_power_delta_reactive
@ electric_grid_model.der_incidence_delta_matrix
)
self.sensitivity_branch_power_2_magnitude_by_der_power_active = (
self.sensitivity_branch_power_2_magnitude_by_power_wye_active
@ electric_grid_model.der_incidence_wye_matrix
+ self.sensitivity_branch_power_2_magnitude_by_power_delta_active
@ electric_grid_model.der_incidence_delta_matrix
)
self.sensitivity_branch_power_2_magnitude_by_der_power_reactive = (
self.sensitivity_branch_power_2_magnitude_by_power_wye_reactive
@ electric_grid_model.der_incidence_wye_matrix
+ self.sensitivity_branch_power_2_magnitude_by_power_delta_reactive
@ electric_grid_model.der_incidence_delta_matrix
)
self.sensitivity_branch_power_1_squared_by_power_wye_active = (
(
sp.diags(np.real(self.power_flow_solution.branch_power_vector_1))
@ np.real(
sensitivity_branch_power_1_by_voltage
@ self.sensitivity_voltage_by_power_wye_active
)
)
+ (
sp.diags(np.imag(self.power_flow_solution.branch_power_vector_1))
@ np.imag(
sensitivity_branch_power_1_by_voltage
@ self.sensitivity_voltage_by_power_wye_active
)
)
)
self.sensitivity_branch_power_1_squared_by_power_wye_reactive = (
(
sp.diags(np.real(self.power_flow_solution.branch_power_vector_1))
@ np.real(
sensitivity_branch_power_1_by_voltage
@ self.sensitivity_voltage_by_power_wye_reactive
)
)
+ (
sp.diags(np.imag(self.power_flow_solution.branch_power_vector_1))
@ np.imag(
sensitivity_branch_power_1_by_voltage
@ self.sensitivity_voltage_by_power_wye_reactive
)
)
)
self.sensitivity_branch_power_1_squared_by_power_delta_active = (
(
sp.diags(np.real(self.power_flow_solution.branch_power_vector_1))
@ np.real(
sensitivity_branch_power_1_by_voltage
@ self.sensitivity_voltage_by_power_delta_active
)
)
+ (
sp.diags(np.imag(self.power_flow_solution.branch_power_vector_1))
@ np.imag(
sensitivity_branch_power_1_by_voltage
@ self.sensitivity_voltage_by_power_delta_active
)
)
)
self.sensitivity_branch_power_1_squared_by_power_delta_reactive = (
(
sp.diags(np.real(self.power_flow_solution.branch_power_vector_1))
@ np.real(
sensitivity_branch_power_1_by_voltage
@ self.sensitivity_voltage_by_power_delta_reactive
)
)
+ (
sp.diags(np.imag(self.power_flow_solution.branch_power_vector_1))
@ np.imag(
sensitivity_branch_power_1_by_voltage
@ self.sensitivity_voltage_by_power_delta_reactive
)
)
)
self.sensitivity_branch_power_2_squared_by_power_wye_active = (
(
sp.diags(np.real(self.power_flow_solution.branch_power_vector_2))
@ np.real(
sensitivity_branch_power_2_by_voltage
@ self.sensitivity_voltage_by_power_wye_active
)
)
+ (
sp.diags(np.imag(self.power_flow_solution.branch_power_vector_2))
@ np.imag(
sensitivity_branch_power_2_by_voltage
@ self.sensitivity_voltage_by_power_wye_active
)
)
)
self.sensitivity_branch_power_2_squared_by_power_wye_reactive = (
(
sp.diags(np.real(self.power_flow_solution.branch_power_vector_2))
@ np.real(
sensitivity_branch_power_2_by_voltage
@ self.sensitivity_voltage_by_power_wye_reactive
)
)
+ (
sp.diags(np.imag(self.power_flow_solution.branch_power_vector_2))
@ np.imag(
sensitivity_branch_power_2_by_voltage
@ self.sensitivity_voltage_by_power_wye_reactive
)
)
)
self.sensitivity_branch_power_2_squared_by_power_delta_active = (
(
sp.diags(np.real(self.power_flow_solution.branch_power_vector_2))
@ np.real(
sensitivity_branch_power_2_by_voltage
@ self.sensitivity_voltage_by_power_delta_active
)
)
+ (
sp.diags(np.imag(self.power_flow_solution.branch_power_vector_2))
@ np.imag(
sensitivity_branch_power_2_by_voltage
@ self.sensitivity_voltage_by_power_delta_active
)
)
)
self.sensitivity_branch_power_2_squared_by_power_delta_reactive = (
(
sp.diags(np.real(self.power_flow_solution.branch_power_vector_2))
@ np.real(
sensitivity_branch_power_2_by_voltage
@ self.sensitivity_voltage_by_power_delta_reactive
)
)
+ (
sp.diags(np.imag(self.power_flow_solution.branch_power_vector_2))
@ np.imag(
sensitivity_branch_power_2_by_voltage
@ self.sensitivity_voltage_by_power_delta_reactive
)
)
)
self.sensitivity_branch_power_1_squared_by_der_power_active = (
self.sensitivity_branch_power_1_squared_by_power_wye_active
@ electric_grid_model.der_incidence_wye_matrix
+ self.sensitivity_branch_power_1_squared_by_power_delta_active
@ electric_grid_model.der_incidence_delta_matrix
)
self.sensitivity_branch_power_1_squared_by_der_power_reactive = (
self.sensitivity_branch_power_1_squared_by_power_wye_reactive
@ electric_grid_model.der_incidence_wye_matrix
+ self.sensitivity_branch_power_1_squared_by_power_delta_reactive
@ electric_grid_model.der_incidence_delta_matrix
)
self.sensitivity_branch_power_2_squared_by_der_power_active = (
self.sensitivity_branch_power_2_squared_by_power_wye_active
@ electric_grid_model.der_incidence_wye_matrix
+ self.sensitivity_branch_power_2_squared_by_power_delta_active
@ electric_grid_model.der_incidence_delta_matrix
)
self.sensitivity_branch_power_2_squared_by_der_power_reactive = (
self.sensitivity_branch_power_2_squared_by_power_wye_reactive
@ electric_grid_model.der_incidence_wye_matrix
+ self.sensitivity_branch_power_2_squared_by_power_delta_reactive
@ electric_grid_model.der_incidence_delta_matrix
)
# Calculate loss sensitivity matrices.
# sensitivity_loss_by_voltage = (
# np.array([self.power_flow_solution.node_voltage_vector])
# @ np.conj(electric_grid_model.node_admittance_matrix)
# + np.transpose(
# electric_grid_model.node_admittance_matrix
# @ np.transpose([self.power_flow_solution.node_voltage_vector])
# )
# )
sensitivity_loss_by_voltage = (
sum(np.transpose(
np.transpose(sensitivity_branch_power_1_by_voltage)
+ np.transpose(sensitivity_branch_power_2_by_voltage)
))
)
self.sensitivity_loss_active_by_power_wye_active = (
np.real(
sensitivity_loss_by_voltage
@ self.sensitivity_voltage_by_power_wye_active
)
/ (2 * np.sqrt(3))
)
self.sensitivity_loss_active_by_power_wye_reactive = (
np.real(
sensitivity_loss_by_voltage
@ self.sensitivity_voltage_by_power_wye_reactive
)
/ (2 * np.sqrt(3))
)
self.sensitivity_loss_active_by_power_delta_active = (
np.real(
sensitivity_loss_by_voltage
@ self.sensitivity_voltage_by_power_delta_active
)
/ (2 * np.sqrt(3))
)
self.sensitivity_loss_active_by_power_delta_reactive = (
np.real(
sensitivity_loss_by_voltage
@ self.sensitivity_voltage_by_power_delta_reactive
)
/ (2 * np.sqrt(3))
)
self.sensitivity_loss_reactive_by_power_wye_active = (
np.imag(
sensitivity_loss_by_voltage
@ self.sensitivity_voltage_by_power_wye_active
)
* -1 * np.sqrt(3)
)
self.sensitivity_loss_reactive_by_power_wye_reactive = (
np.imag(
sensitivity_loss_by_voltage
@ self.sensitivity_voltage_by_power_wye_reactive
)
* -1 * np.sqrt(3)
)
self.sensitivity_loss_reactive_by_power_delta_active = (
np.imag(
sensitivity_loss_by_voltage
@ self.sensitivity_voltage_by_power_delta_active
)
* -1 * np.sqrt(3)
)
self.sensitivity_loss_reactive_by_power_delta_reactive = (
np.imag(
sensitivity_loss_by_voltage
@ self.sensitivity_voltage_by_power_delta_reactive
)
* -1 * np.sqrt(3)
)
self.sensitivity_loss_active_by_der_power_active = (
self.sensitivity_loss_active_by_power_wye_active
@ electric_grid_model.der_incidence_wye_matrix
+ self.sensitivity_loss_active_by_power_delta_active
@ electric_grid_model.der_incidence_delta_matrix
)
self.sensitivity_loss_active_by_der_power_reactive = (
self.sensitivity_loss_active_by_power_wye_reactive
@ electric_grid_model.der_incidence_wye_matrix
+ self.sensitivity_loss_active_by_power_delta_reactive
@ electric_grid_model.der_incidence_delta_matrix
)
self.sensitivity_loss_reactive_by_der_power_active = (
self.sensitivity_loss_reactive_by_power_wye_active
@ electric_grid_model.der_incidence_wye_matrix
+ self.sensitivity_loss_reactive_by_power_delta_active
@ electric_grid_model.der_incidence_delta_matrix
)
self.sensitivity_loss_reactive_by_der_power_reactive = (
self.sensitivity_loss_reactive_by_power_wye_reactive
@ electric_grid_model.der_incidence_wye_matrix
+ self.sensitivity_loss_reactive_by_power_delta_reactive
@ electric_grid_model.der_incidence_delta_matrix
)
class LinearElectricGridModelSet(mesmo.utils.ObjectBase):
linear_electric_grid_models: typing.Dict[pd.Timestamp, LinearElectricGridModel]
electric_grid_model: ElectricGridModelDefault
timesteps: pd.Index
@multimethod
def __init__(
self,
scenario_name: str
):
# Obtain electric grid model & reference power flow solution.
electric_grid_model = ElectricGridModelDefault(scenario_name)
power_flow_solution = PowerFlowSolutionFixedPoint(electric_grid_model)
self.__init__(
electric_grid_model,
power_flow_solution
)
@multimethod
def __init__(
self,
electric_grid_model: ElectricGridModelDefault,
power_flow_solution: PowerFlowSolution,
linear_electric_grid_model_method: typing.Type[LinearElectricGridModel] = LinearElectricGridModelGlobal
):
self.check_linear_electric_grid_model_method(linear_electric_grid_model_method)
# Obtain linear electric grid models.
linear_electric_grid_model = linear_electric_grid_model_method(electric_grid_model, power_flow_solution)
linear_electric_grid_models = (
dict(zip(electric_grid_model.timesteps, itertools.repeat(linear_electric_grid_model)))
)
self.__init__(
electric_grid_model,
linear_electric_grid_models
)
@multimethod
def __init__(
self,
electric_grid_model: ElectricGridModelDefault,
power_flow_solution_set: PowerFlowSolutionSet,
linear_electric_grid_model_method: typing.Type[LinearElectricGridModel] = LinearElectricGridModelLocal
):
self.check_linear_electric_grid_model_method(linear_electric_grid_model_method)
# Obtain linear electric grid models.
linear_electric_grid_models = (
mesmo.utils.starmap(
linear_electric_grid_model_method,
zip(
itertools.repeat(electric_grid_model),
power_flow_solution_set.power_flow_solutions.values()
)
)
)
linear_electric_grid_models = (
dict(zip(electric_grid_model.timesteps, linear_electric_grid_models))
)
self.__init__(
electric_grid_model,
linear_electric_grid_models
)
@multimethod
def __init__(
self,
electric_grid_model: ElectricGridModelDefault,
linear_electric_grid_models: typing.Dict[pd.Timestamp, LinearElectricGridModel]
):
# Store attributes.
self.electric_grid_model = electric_grid_model
self.timesteps = self.electric_grid_model.timesteps
self.linear_electric_grid_models = linear_electric_grid_models
@staticmethod
def check_linear_electric_grid_model_method(linear_electric_grid_model_method):
if not issubclass(linear_electric_grid_model_method, LinearElectricGridModel):
raise ValueError(f"Invalid linear electric grid model method: {linear_electric_grid_model_method}")
def define_optimization_problem(
self,
optimization_problem: mesmo.utils.OptimizationProblem,
price_data: mesmo.data_interface.PriceData,
scenarios: typing.Union[list, pd.Index] = None,
**kwargs
):
# Defined optimization problem definitions through respective sub-methods.
self.define_optimization_variables(optimization_problem, scenarios=scenarios)
self.define_optimization_parameters(
optimization_problem,
price_data,
scenarios=scenarios,
**kwargs
)
self.define_optimization_constraints(optimization_problem, scenarios=scenarios)
self.define_optimization_objective(optimization_problem, scenarios=scenarios)
def define_optimization_variables(
self,
optimization_problem: mesmo.utils.OptimizationProblem,
scenarios: typing.Union[list, pd.Index] = None
):
# If no scenarios given, obtain default value.
if scenarios is None:
scenarios = [None]
# Define DER power vector variables.
optimization_problem.define_variable(
'der_active_power_vector', scenario=scenarios, timestep=self.timesteps,
der=self.electric_grid_model.ders
)
optimization_problem.define_variable(
'der_reactive_power_vector', scenario=scenarios, timestep=self.timesteps,
der=self.electric_grid_model.ders
)
# Define node voltage magnitude variable.
optimization_problem.define_variable(
'node_voltage_magnitude_vector', scenario=scenarios, timestep=self.timesteps,
node=self.electric_grid_model.nodes
)
# Define branch power magnitude variables.
optimization_problem.define_variable(
'branch_power_magnitude_vector_1', scenario=scenarios, timestep=self.timesteps,
branch=self.electric_grid_model.branches
)
optimization_problem.define_variable(
'branch_power_magnitude_vector_2', scenario=scenarios, timestep=self.timesteps,
branch=self.electric_grid_model.branches
)
# Define loss variables.
optimization_problem.define_variable(
'loss_active', scenario=scenarios, timestep=self.timesteps
)
optimization_problem.define_variable(
'loss_reactive', scenario=scenarios, timestep=self.timesteps
)
def define_optimization_parameters(
self,
optimization_problem: mesmo.utils.OptimizationProblem,
price_data: mesmo.data_interface.PriceData,
node_voltage_magnitude_vector_minimum: np.ndarray = None,
node_voltage_magnitude_vector_maximum: np.ndarray = None,
branch_power_magnitude_vector_maximum: np.ndarray = None,
scenarios: typing.Union[list, pd.Index] = None
):
# If no scenarios given, obtain default value.
if scenarios is None:
scenarios = [None]
# Obtain timestep interval in hours, for conversion of power to energy.
timestep_interval_hours = (self.timesteps[1] - self.timesteps[0]) / pd.Timedelta('1h')
# Define voltage variable terms.
optimization_problem.define_parameter(
'voltage_active_term',
sp.block_diag([
sp.diags(np.abs(linear_electric_grid_model.electric_grid_model.node_voltage_vector_reference) ** -1)
@ linear_electric_grid_model.sensitivity_voltage_magnitude_by_der_power_active
@ sp.diags(np.real(linear_electric_grid_model.electric_grid_model.der_power_vector_reference))
for linear_electric_grid_model in self.linear_electric_grid_models.values()
])
)
optimization_problem.define_parameter(
'voltage_reactive_term',
sp.block_diag([
sp.diags(np.abs(linear_electric_grid_model.electric_grid_model.node_voltage_vector_reference) ** -1)
@ linear_electric_grid_model.sensitivity_voltage_magnitude_by_der_power_reactive
@ sp.diags(np.imag(linear_electric_grid_model.electric_grid_model.der_power_vector_reference))
for linear_electric_grid_model in self.linear_electric_grid_models.values()
])
)
# Define voltage constant term.
optimization_problem.define_parameter(
'voltage_constant',
np.concatenate([
sp.diags(np.abs(linear_electric_grid_model.electric_grid_model.node_voltage_vector_reference) ** -1)
@ (
np.transpose([np.abs(linear_electric_grid_model.power_flow_solution.node_voltage_vector)])
- linear_electric_grid_model.sensitivity_voltage_magnitude_by_der_power_active
@ np.transpose([np.real(linear_electric_grid_model.power_flow_solution.der_power_vector)])
- linear_electric_grid_model.sensitivity_voltage_magnitude_by_der_power_reactive
@ np.transpose([np.imag(linear_electric_grid_model.power_flow_solution.der_power_vector)])
) for linear_electric_grid_model in self.linear_electric_grid_models.values()
])
)
# Define branch flow (direction 1) variable terms.
optimization_problem.define_parameter(
'branch_power_1_active_term',
sp.block_diag([
sp.diags(linear_electric_grid_model.electric_grid_model.branch_power_vector_magnitude_reference ** -1)
@ linear_electric_grid_model.sensitivity_branch_power_1_magnitude_by_der_power_active
@ sp.diags(np.real(linear_electric_grid_model.electric_grid_model.der_power_vector_reference))
for linear_electric_grid_model in self.linear_electric_grid_models.values()
])
)
optimization_problem.define_parameter(
'branch_power_1_reactive_term',
sp.block_diag([
sp.diags(linear_electric_grid_model.electric_grid_model.branch_power_vector_magnitude_reference ** -1)
@ linear_electric_grid_model.sensitivity_branch_power_1_magnitude_by_der_power_reactive
@ sp.diags(np.imag(linear_electric_grid_model.electric_grid_model.der_power_vector_reference))
for linear_electric_grid_model in self.linear_electric_grid_models.values()
])
)
# Define branch flow (direction 1) constant terms.
optimization_problem.define_parameter(
'branch_power_1_constant',
np.concatenate([
sp.diags(linear_electric_grid_model.electric_grid_model.branch_power_vector_magnitude_reference ** -1)
@ (
np.transpose([np.abs(linear_electric_grid_model.power_flow_solution.branch_power_vector_1)])
- linear_electric_grid_model.sensitivity_branch_power_1_magnitude_by_der_power_active
@ np.transpose([np.real(linear_electric_grid_model.power_flow_solution.der_power_vector)])
- linear_electric_grid_model.sensitivity_branch_power_1_magnitude_by_der_power_reactive
@ np.transpose([np.imag(linear_electric_grid_model.power_flow_solution.der_power_vector)])
) for linear_electric_grid_model in self.linear_electric_grid_models.values()
])
)
# Define branch flow (direction 2) variable terms.
optimization_problem.define_parameter(
'branch_power_2_active_term',
sp.block_diag([
sp.diags(linear_electric_grid_model.electric_grid_model.branch_power_vector_magnitude_reference ** -1)
@ linear_electric_grid_model.sensitivity_branch_power_2_magnitude_by_der_power_active
@ sp.diags(np.real(linear_electric_grid_model.electric_grid_model.der_power_vector_reference))
for linear_electric_grid_model in self.linear_electric_grid_models.values()
])
)
optimization_problem.define_parameter(
'branch_power_2_reactive_term',
sp.block_diag([
sp.diags(linear_electric_grid_model.electric_grid_model.branch_power_vector_magnitude_reference ** -1)
@ linear_electric_grid_model.sensitivity_branch_power_2_magnitude_by_der_power_reactive
@ sp.diags(np.imag(linear_electric_grid_model.electric_grid_model.der_power_vector_reference))
for linear_electric_grid_model in self.linear_electric_grid_models.values()
])
)
# Define branch flow (direction 2) constant term.
optimization_problem.define_parameter(
'branch_power_2_constant',
np.concatenate([
sp.diags(linear_electric_grid_model.electric_grid_model.branch_power_vector_magnitude_reference ** -1)
@ (
np.transpose([np.abs(linear_electric_grid_model.power_flow_solution.branch_power_vector_2)])
- linear_electric_grid_model.sensitivity_branch_power_2_magnitude_by_der_power_active
@ np.transpose([np.real(linear_electric_grid_model.power_flow_solution.der_power_vector)])
- linear_electric_grid_model.sensitivity_branch_power_2_magnitude_by_der_power_reactive
@ np.transpose([np.imag(linear_electric_grid_model.power_flow_solution.der_power_vector)])
) for linear_electric_grid_model in self.linear_electric_grid_models.values()
])
)
# Define active loss variable terms.
optimization_problem.define_parameter(
'loss_active_active_term',
sp.block_diag([
linear_electric_grid_model.sensitivity_loss_active_by_der_power_active
@ sp.diags(np.real(linear_electric_grid_model.electric_grid_model.der_power_vector_reference))
for linear_electric_grid_model in self.linear_electric_grid_models.values()
])
)
optimization_problem.define_parameter(
'loss_active_reactive_term',
sp.block_diag([
linear_electric_grid_model.sensitivity_loss_active_by_der_power_reactive
@ sp.diags(np.imag(linear_electric_grid_model.electric_grid_model.der_power_vector_reference))
for linear_electric_grid_model in self.linear_electric_grid_models.values()
])
)
# Define active loss constant term.
optimization_problem.define_parameter(
'loss_active_constant',
np.concatenate([
np.real(linear_electric_grid_model.power_flow_solution.loss)
- linear_electric_grid_model.sensitivity_loss_active_by_der_power_active
@ np.transpose([np.real(linear_electric_grid_model.power_flow_solution.der_power_vector)])
- linear_electric_grid_model.sensitivity_loss_active_by_der_power_reactive
@ np.transpose([np.imag(linear_electric_grid_model.power_flow_solution.der_power_vector)])
for linear_electric_grid_model in self.linear_electric_grid_models.values()
])
)
# Define reactive loss variable terms.
optimization_problem.define_parameter(
'loss_reactive_active_term',
sp.block_diag([
linear_electric_grid_model.sensitivity_loss_reactive_by_der_power_active
@ sp.diags(np.real(linear_electric_grid_model.electric_grid_model.der_power_vector_reference))
for linear_electric_grid_model in self.linear_electric_grid_models.values()
])
)
optimization_problem.define_parameter(
'loss_reactive_reactive_term',
sp.block_diag([
linear_electric_grid_model.sensitivity_loss_reactive_by_der_power_reactive
@ sp.diags(np.imag(linear_electric_grid_model.electric_grid_model.der_power_vector_reference))
for linear_electric_grid_model in self.linear_electric_grid_models.values()
])
)
# Define active loss constant term.
optimization_problem.define_parameter(
'loss_reactive_constant',
np.concatenate([
np.imag(linear_electric_grid_model.power_flow_solution.loss)
- linear_electric_grid_model.sensitivity_loss_reactive_by_der_power_active
@ np.transpose([np.real(linear_electric_grid_model.power_flow_solution.der_power_vector)])
- linear_electric_grid_model.sensitivity_loss_reactive_by_der_power_reactive
@ np.transpose([np.imag(linear_electric_grid_model.power_flow_solution.der_power_vector)])
for linear_electric_grid_model in self.linear_electric_grid_models.values()
])
)
# Define voltage limits.
optimization_problem.define_parameter(
'voltage_limit_minimum',
np.concatenate([
node_voltage_magnitude_vector_minimum.ravel()
/ np.abs(linear_electric_grid_model.electric_grid_model.node_voltage_vector_reference)
for linear_electric_grid_model in self.linear_electric_grid_models.values()
])
if node_voltage_magnitude_vector_minimum is not None
else -np.inf * np.ones((len(self.electric_grid_model.nodes) * len(self.timesteps), ))
)
optimization_problem.define_parameter(
'voltage_limit_maximum',
np.concatenate([
node_voltage_magnitude_vector_maximum.ravel()
/ np.abs(linear_electric_grid_model.electric_grid_model.node_voltage_vector_reference)
for linear_electric_grid_model in self.linear_electric_grid_models.values()
])
if node_voltage_magnitude_vector_maximum is not None
else +np.inf * np.ones((len(self.electric_grid_model.nodes) * len(self.timesteps), ))
)
# Define branch flow limits.
optimization_problem.define_parameter(
'branch_power_minimum',
np.concatenate([
- branch_power_magnitude_vector_maximum.ravel()
/ linear_electric_grid_model.electric_grid_model.branch_power_vector_magnitude_reference
for linear_electric_grid_model in self.linear_electric_grid_models.values()
])
if branch_power_magnitude_vector_maximum is not None
else -np.inf * np.ones((len(self.electric_grid_model.branches) * len(self.timesteps), ))
)
optimization_problem.define_parameter(
'branch_power_maximum',
np.concatenate([
branch_power_magnitude_vector_maximum.ravel()
/ linear_electric_grid_model.electric_grid_model.branch_power_vector_magnitude_reference
for linear_electric_grid_model in self.linear_electric_grid_models.values()
])
if branch_power_magnitude_vector_maximum is not None
else +np.inf * np.ones((len(self.electric_grid_model.branches) * len(self.timesteps), ))
)
# Define objective parameters.
optimization_problem.define_parameter(
'electric_grid_active_power_cost',
np.array([price_data.price_timeseries.loc[:, ('active_power', 'source', 'source')].values])
* -1.0 * timestep_interval_hours # In Wh.
@ sp.block_diag(
[np.array([np.real(self.electric_grid_model.der_power_vector_reference)])] * len(self.timesteps)
)
)
optimization_problem.define_parameter(
'electric_grid_active_power_cost_sensitivity',
price_data.price_sensitivity_coefficient
* timestep_interval_hours # In Wh.
* np.concatenate([np.real(self.electric_grid_model.der_power_vector_reference) ** 2] * len(self.timesteps))
)
optimization_problem.define_parameter(
'electric_grid_reactive_power_cost',
np.array([price_data.price_timeseries.loc[:, ('reactive_power', 'source', 'source')].values])
* -1.0 * timestep_interval_hours # In Wh.
@ sp.block_diag(
[np.array([np.imag(self.electric_grid_model.der_power_vector_reference)])] * len(self.timesteps)
)
)
optimization_problem.define_parameter(
'electric_grid_reactive_power_cost_sensitivity',
price_data.price_sensitivity_coefficient
* timestep_interval_hours # In Wh.
* np.concatenate([np.imag(self.electric_grid_model.der_power_vector_reference) ** 2] * len(self.timesteps))
)
optimization_problem.define_parameter(
'electric_grid_loss_active_cost',
price_data.price_timeseries.loc[:, ('active_power', 'source', 'source')].values
* timestep_interval_hours # In Wh.
)
optimization_problem.define_parameter(
'electric_grid_loss_active_cost_sensitivity',
price_data.price_sensitivity_coefficient
* timestep_interval_hours # In Wh.
)
def define_optimization_constraints(
self,
optimization_problem: mesmo.utils.OptimizationProblem,
scenarios: typing.Union[list, pd.Index] = None
):
# If no scenarios given, obtain default value.
if scenarios is None:
scenarios = [None]
# Define voltage equation.
optimization_problem.define_constraint(
('variable', 1.0, dict(
name='node_voltage_magnitude_vector', scenario=scenarios, timestep=self.timesteps,
node=self.electric_grid_model.nodes
)),
'==',
('variable', 'voltage_active_term', dict(
name='der_active_power_vector', scenario=scenarios, timestep=self.timesteps,
der=self.electric_grid_model.ders
)),
('variable', 'voltage_reactive_term', dict(
name='der_reactive_power_vector', scenario=scenarios, timestep=self.timesteps,
der=self.electric_grid_model.ders
)),
('constant', 'voltage_constant', dict(scenario=scenarios, timestep=self.timesteps)),
broadcast='scenario'
)
# Define branch flow (direction 1) equation.
optimization_problem.define_constraint(
('variable', 1.0, dict(
name='branch_power_magnitude_vector_1', scenario=scenarios, timestep=self.timesteps,
branch=self.electric_grid_model.branches
)),
'==',
('variable', 'branch_power_1_active_term', dict(
name='der_active_power_vector', scenario=scenarios, timestep=self.timesteps,
der=self.electric_grid_model.ders
)),
('variable', 'branch_power_1_reactive_term', dict(
name='der_reactive_power_vector', scenario=scenarios, timestep=self.timesteps,
der=self.electric_grid_model.ders
)),
('constant', 'branch_power_1_constant', dict(scenario=scenarios, timestep=self.timesteps)),
broadcast='scenario'
)
# Define branch flow (direction 2) equation.
optimization_problem.define_constraint(
('variable', 1.0, dict(
name='branch_power_magnitude_vector_2', scenario=scenarios, timestep=self.timesteps,
branch=self.electric_grid_model.branches
)),
'==',
('variable', 'branch_power_2_active_term', dict(
name='der_active_power_vector', scenario=scenarios, timestep=self.timesteps,
der=self.electric_grid_model.ders
)),
('variable', 'branch_power_2_reactive_term', dict(
name='der_reactive_power_vector', scenario=scenarios, timestep=self.timesteps,
der=self.electric_grid_model.ders
)),
('constant', 'branch_power_2_constant', dict(scenario=scenarios, timestep=self.timesteps)),
broadcast='scenario'
)
# Define active loss equation.
optimization_problem.define_constraint(
('variable', 1.0, dict(name='loss_active', scenario=scenarios, timestep=self.timesteps)),
'==',
('variable', 'loss_active_active_term', dict(
name='der_active_power_vector', scenario=scenarios, timestep=self.timesteps,
der=self.electric_grid_model.ders
)),
('variable', 'loss_active_reactive_term', dict(
name='der_reactive_power_vector', scenario=scenarios, timestep=self.timesteps,
der=self.electric_grid_model.ders
)),
('constant', 'loss_active_constant', dict(scenario=scenarios, timestep=self.timesteps)),
broadcast='scenario'
)
# Define reactive loss equation.
optimization_problem.define_constraint(
('variable', 1.0, dict(name='loss_reactive', scenario=scenarios, timestep=self.timesteps)),
'==',
('variable', 'loss_reactive_active_term', dict(
name='der_active_power_vector', scenario=scenarios, timestep=self.timesteps,
der=self.electric_grid_model.ders
)),
('variable', 'loss_reactive_reactive_term', dict(
name='der_reactive_power_vector', scenario=scenarios, timestep=self.timesteps,
der=self.electric_grid_model.ders
)),
('constant', 'loss_reactive_constant', dict(scenario=scenarios, timestep=self.timesteps)),
broadcast='scenario'
)
# Define voltage limits.
# Add dedicated keys to enable retrieving dual variables.
optimization_problem.define_constraint(
('variable', 1.0, dict(
name='node_voltage_magnitude_vector', scenario=scenarios, timestep=self.timesteps,
node=self.electric_grid_model.nodes
)),
'>=',
('constant', 'voltage_limit_minimum', dict(scenario=scenarios, timestep=self.timesteps)),
keys=dict(
name='voltage_magnitude_vector_minimum_constraint', scenario=scenarios, timestep=self.timesteps,
node=self.electric_grid_model.nodes
),
broadcast='scenario'
)
optimization_problem.define_constraint(
('variable', 1.0, dict(
name='node_voltage_magnitude_vector', scenario=scenarios, timestep=self.timesteps,
node=self.electric_grid_model.nodes
)),
'<=',
('constant', 'voltage_limit_maximum', dict(scenario=scenarios, timestep=self.timesteps)),
keys=dict(
name='voltage_magnitude_vector_maximum_constraint', scenario=scenarios, timestep=self.timesteps,
node=self.electric_grid_model.nodes
),
broadcast='scenario'
)
# Define branch flow limits.
# Add dedicated keys to enable retrieving dual variables.
optimization_problem.define_constraint(
('variable', 1.0, dict(
name='branch_power_magnitude_vector_1', scenario=scenarios, timestep=self.timesteps,
branch=self.electric_grid_model.branches
)),
'>=',
('constant', 'branch_power_minimum', dict(scenario=scenarios, timestep=self.timesteps)),
keys=dict(
name='branch_power_magnitude_vector_1_minimum_constraint', scenario=scenarios, timestep=self.timesteps,
branch=self.electric_grid_model.branches
),
broadcast='scenario'
)
optimization_problem.define_constraint(
('variable', 1.0, dict(
name='branch_power_magnitude_vector_1', scenario=scenarios, timestep=self.timesteps,
branch=self.electric_grid_model.branches
)),
'<=',
('constant', 'branch_power_maximum', dict(scenario=scenarios, timestep=self.timesteps)),
keys=dict(
name='branch_power_magnitude_vector_1_maximum_constraint', scenario=scenarios, timestep=self.timesteps,
branch=self.electric_grid_model.branches
),
broadcast='scenario'
)
optimization_problem.define_constraint(
('variable', 1.0, dict(
name='branch_power_magnitude_vector_2', scenario=scenarios, timestep=self.timesteps,
branch=self.electric_grid_model.branches
)),
'>=',
('constant', 'branch_power_minimum', dict(scenario=scenarios, timestep=self.timesteps)),
keys=dict(
name='branch_power_magnitude_vector_2_minimum_constraint', scenario=scenarios, timestep=self.timesteps,
branch=self.electric_grid_model.branches
),
broadcast='scenario'
)
optimization_problem.define_constraint(
('variable', 1.0, dict(
name='branch_power_magnitude_vector_2', scenario=scenarios, timestep=self.timesteps,
branch=self.electric_grid_model.branches
)),
'<=',
('constant', 'branch_power_maximum', dict(scenario=scenarios, timestep=self.timesteps)),
keys=dict(
name='branch_power_magnitude_vector_2_maximum_constraint', scenario=scenarios, timestep=self.timesteps,
branch=self.electric_grid_model.branches
),
broadcast='scenario'
)
def define_optimization_objective(
self,
optimization_problem: mesmo.utils.OptimizationProblem,
scenarios: typing.Union[list, pd.Index] = None
):
# If no scenarios given, obtain default value.
if scenarios is None:
scenarios = [None]
# Set objective flag.
optimization_problem.flags['has_electric_grid_objective'] = True
# Define objective for electric loads.
# - Defined as cost of electric supply at electric grid source node.
# - Only defined here, if not yet defined as cost of electric power supply at the DER node
# in `mesmo.der_models.DERModel.define_optimization_objective`.
if not optimization_problem.flags.get('has_der_objective'):
# Active power cost / revenue.
# - Cost for load / demand, revenue for generation / supply.
optimization_problem.define_objective(
('variable', 'electric_grid_active_power_cost', dict(
name='der_active_power_vector', scenario=scenarios, timestep=self.timesteps,
der=self.electric_grid_model.ders
)),
('variable', 'electric_grid_active_power_cost_sensitivity', dict(
name='der_active_power_vector', scenario=scenarios, timestep=self.timesteps,
der=self.electric_grid_model.ders
), dict(
name='der_active_power_vector', scenario=scenarios, timestep=self.timesteps,
der=self.electric_grid_model.ders
)),
broadcast='scenario'
)
# Reactive power cost / revenue.
# - Cost for load / demand, revenue for generation / supply.
optimization_problem.define_objective(
('variable', 'electric_grid_reactive_power_cost', dict(
name='der_reactive_power_vector', scenario=scenarios, timestep=self.timesteps,
der=self.electric_grid_model.ders
)),
('variable', 'electric_grid_reactive_power_cost_sensitivity', dict(
name='der_reactive_power_vector', scenario=scenarios, timestep=self.timesteps,
der=self.electric_grid_model.ders
), dict(
name='der_reactive_power_vector', scenario=scenarios, timestep=self.timesteps,
der=self.electric_grid_model.ders
)),
broadcast='scenario'
)
# Define active loss cost.
optimization_problem.define_objective(
('variable', 'electric_grid_loss_active_cost', dict(
name='loss_active', scenario=scenarios, timestep=self.timesteps
)),
('variable', 'electric_grid_loss_active_cost_sensitivity', dict(
name='loss_active', scenario=scenarios, timestep=self.timesteps
), dict(
name='loss_active', scenario=scenarios, timestep=self.timesteps
)),
broadcast='scenario'
)
def evaluate_optimization_objective(
self,
results: ElectricGridOperationResults,
price_data: mesmo.data_interface.PriceData
) -> float:
# Instantiate optimization problem.
optimization_problem = mesmo.utils.OptimizationProblem()
self.define_optimization_parameters(optimization_problem, price_data)
self.define_optimization_variables(optimization_problem)
self.define_optimization_objective(optimization_problem)
# Instantiate variable vector.
x_vector = np.zeros((len(optimization_problem.variables), 1))
# Set variable vector values.
objective_variable_names = [
'der_active_power_vector_per_unit',
'der_reactive_power_vector_per_unit',
'loss_active'
]
for variable_name in objective_variable_names:
index = mesmo.utils.get_index(optimization_problem.variables, name=variable_name.replace('_per_unit', ''))
x_vector[index, 0] = results[variable_name].values.ravel()
# Obtain objective value.
objective = optimization_problem.evaluate_objective(x_vector)
return objective
def get_optimization_dlmps(
self,
optimization_problem: mesmo.utils.OptimizationProblem,
price_data: mesmo.data_interface.PriceData,
scenarios: typing.Union[list, pd.Index] = None
) -> ElectricGridDLMPResults:
# Obtain results index sets, depending on if / if not scenarios given.
if scenarios in [None, [None]]:
scenarios = [None]
ders = self.electric_grid_model.ders
nodes = self.electric_grid_model.nodes
branches = self.electric_grid_model.branches
else:
ders = (
pd.MultiIndex.from_product(
(scenarios, self.electric_grid_model.ders.to_flat_index()),
names=['scenario', 'der']
)
)
nodes = (
pd.MultiIndex.from_product(
(scenarios, self.electric_grid_model.nodes.to_flat_index()),
names=['scenario', 'node']
)
)
branches = (
pd.MultiIndex.from_product(
(scenarios, self.electric_grid_model.branches.to_flat_index()),
names=['scenario', 'branch']
)
)
# Obtain individual duals.
voltage_magnitude_vector_minimum_dual = (
optimization_problem.duals['voltage_magnitude_vector_minimum_constraint'].loc[
self.electric_grid_model.timesteps, nodes
]
/ np.concatenate([np.abs(self.electric_grid_model.node_voltage_vector_reference)] * len(scenarios))
)
voltage_magnitude_vector_maximum_dual = (
-1.0 * optimization_problem.duals['voltage_magnitude_vector_maximum_constraint'].loc[
self.electric_grid_model.timesteps, nodes
]
/ np.concatenate([np.abs(self.electric_grid_model.node_voltage_vector_reference)] * len(scenarios))
)
branch_power_magnitude_vector_1_minimum_dual = (
optimization_problem.duals['branch_power_magnitude_vector_1_minimum_constraint'].loc[
self.electric_grid_model.timesteps, branches
]
/ np.concatenate([self.electric_grid_model.branch_power_vector_magnitude_reference] * len(scenarios))
)
branch_power_magnitude_vector_1_maximum_dual = (
-1.0 * optimization_problem.duals['branch_power_magnitude_vector_1_maximum_constraint'].loc[
self.electric_grid_model.timesteps, branches
]
/ np.concatenate([self.electric_grid_model.branch_power_vector_magnitude_reference] * len(scenarios))
)
branch_power_magnitude_vector_2_minimum_dual = (
optimization_problem.duals['branch_power_magnitude_vector_2_minimum_constraint'].loc[
self.electric_grid_model.timesteps, branches
]
/ np.concatenate([self.electric_grid_model.branch_power_vector_magnitude_reference] * len(scenarios))
)
branch_power_magnitude_vector_2_maximum_dual = (
-1.0 * optimization_problem.duals['branch_power_magnitude_vector_2_maximum_constraint'].loc[
self.electric_grid_model.timesteps, branches
]
/ np.concatenate([self.electric_grid_model.branch_power_vector_magnitude_reference] * len(scenarios))
)
# Instantiate DLMP variables.
# TODO: Consider delta connections in nodal DLMPs.
# TODO: Consider single-phase DLMPs.
electric_grid_energy_dlmp_node_active_power = (
pd.DataFrame(columns=nodes, index=self.electric_grid_model.timesteps, dtype=float)
)
electric_grid_voltage_dlmp_node_active_power = (
pd.DataFrame(columns=nodes, index=self.electric_grid_model.timesteps, dtype=float)
)
electric_grid_congestion_dlmp_node_active_power = (
pd.DataFrame(columns=nodes, index=self.electric_grid_model.timesteps, dtype=float)
)
electric_grid_loss_dlmp_node_active_power = (
pd.DataFrame(columns=nodes, index=self.electric_grid_model.timesteps, dtype=float)
)
electric_grid_energy_dlmp_node_reactive_power = (
pd.DataFrame(columns=nodes, index=self.electric_grid_model.timesteps, dtype=float)
)
electric_grid_voltage_dlmp_node_reactive_power = (
pd.DataFrame(columns=nodes, index=self.electric_grid_model.timesteps, dtype=float)
)
electric_grid_congestion_dlmp_node_reactive_power = (
pd.DataFrame(columns=nodes, index=self.electric_grid_model.timesteps, dtype=float)
)
electric_grid_loss_dlmp_node_reactive_power = (
pd.DataFrame(columns=nodes, index=self.electric_grid_model.timesteps, dtype=float)
)
electric_grid_energy_dlmp_der_active_power = (
pd.DataFrame(columns=ders, index=self.electric_grid_model.timesteps, dtype=float)
)
electric_grid_voltage_dlmp_der_active_power = (
| pd.DataFrame(columns=ders, index=self.electric_grid_model.timesteps, dtype=float) | pandas.DataFrame |
import streamlit as st
from streamlit.report_thread import get_report_ctx
import pandas as pd
import pickle
# essential libraries
import math
import random
import string
from datetime import timedelta, date
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import plotly.express as px
import plotly.graph_objs as go
import plotly.figure_factory as ff
from plotly.subplots import make_subplots
cnf, dth, rec, act = '#393e46', '#ff2e63', '#21bf73', '#fe9801'
from pandas.plotting import register_matplotlib_converters
| register_matplotlib_converters() | pandas.plotting.register_matplotlib_converters |
import numpy as np
import pandas as pd
import decorators
from scipy import optimize
import settings
import utility_functions as utilfunc
import agent_mutation
import PySAM.Battwatts as battery
import PySAM.BatteryTools as batt_tools
import PySAM.Utilityrate5 as utility
import PySAM.Cashloan as cashloan
#==============================================================================
# Load logger
logger = utilfunc.get_logger()
#==============================================================================
#%%
def calc_system_performance(kw, pv, utilityrate, loan, batt, costs, agent, en_batt=True, batt_simple_dispatch=0):
"""
Executes Battwatts, Utilityrate5, and Cashloan PySAM modules with system sizes (kw) as input
Parameters
----------
kw: Capacity (in kW)
pv: Dictionary with generation_hourly and consumption_hourly
utilityrate: PySAM Utilityrate5 module
loan: PySAM Cashloan module
batt: PySAM Battwatts module
costs: Dictionary with system costs
agent: pd.Series with agent attributes
en_batt: Enable battery
batt_simple_dispatch: batt.Battery.batt_simple_dispatch
- batt_simple_dispatch = 0 (peak shaving look ahead)
- batt_simple_dispatch = 1 (peak shaving look behind)
Returns
-------
-loan.Outputs.npv: the negative net present value of system + storage to be optimized for system sizing
"""
inv_eff = 0.96 # default SAM inverter efficiency for PV
gen_hourly = pv['generation_hourly']
load_hourly = pv['consumption_hourly'] # same field as 'load_kwh_per_customer_in_bin_initial' when summed
dc = [(i * kw) * 1000 for i in gen_hourly] # W
ac = [i * inv_eff for i in dc] # W
gen = [i / 1000 for i in ac] # W to kW
# Set up battery, with system generation conditional on the battery generation being included
if en_batt:
batt.Battery.dc = dc
batt.Battery.ac = ac
batt.Battery.batt_simple_enable = 1
batt.Battery.batt_simple_chemistry = 1 # default value is 1: li ion for residential
batt.Battery.batt_simple_dispatch = batt_simple_dispatch
batt.Battery.batt_simple_meter_position = 0 # default value
batt.Battery.inverter_efficiency = 100 # recommended by Darice for dc-connected
batt.Battery.load = load_hourly
# PV to Battery ratio (kW) - From Ashreeta, 02/08/2020
pv_to_batt_ratio = 1.31372
batt_capacity_to_power_ratio = 2 # hours of operation
desired_size = kw / pv_to_batt_ratio # Default SAM value for residential systems is 10
desired_power = desired_size / batt_capacity_to_power_ratio
batt_inputs = {
'batt_chem': batt.Battery.batt_simple_chemistry,
'batt_Qfull': 2.5, # default SAM value
'batt_Vnom_default': 3.6, # default SAM value
'batt_ac_or_dc': 0, # dc-connected
'desired_power': desired_power,
'desired_capacity': desired_size,
'desired_voltage': 500,
'size_by_ac_not_dc': 0, # dc-connected
'inverter_eff': batt.Battery.inverter_efficiency
# 'batt_dc_dc_efficiency': (optional)
}
# Default values for lead acid batteries
if batt.Battery.batt_simple_chemistry == 0:
batt_inputs['LeadAcid_q10'] = 93.2
batt_inputs['LeadAcid_q20'] = 100
batt_inputs['LeadAcid_qn'] = 58.12
# batt_inputs['LeadAcid_tn']: (optional)
# PySAM.BatteryTools.size_li_ion_battery is the same as dGen_battery_sizing_battwatts.py
batt_outputs = batt_tools.size_li_ion_battery(batt_inputs)
computed_size = batt_outputs['batt_computed_bank_capacity']
computed_power = batt_outputs['batt_power_discharge_max_kwdc']
batt.Battery.batt_simple_kwh = computed_size
batt.Battery.batt_simple_kw = computed_power
batt.execute()
# declare value for net billing sell rate
if agent.loc['compensation_style']=='none':
net_billing_sell_rate = 0.
else:
net_billing_sell_rate = agent.loc['wholesale_elec_price_dollars_per_kwh'] * agent.loc['elec_price_multiplier']
utilityrate = process_tariff(utilityrate, agent.loc['tariff_dict'], net_billing_sell_rate)
utilityrate.SystemOutput.gen = batt.Outputs.gen
loan.BatterySystem.en_batt = 1
loan.BatterySystem.batt_computed_bank_capacity = batt.Outputs.batt_bank_installed_capacity
loan.BatterySystem.batt_bank_replacement = batt.Outputs.batt_bank_replacement
# Battery capacity-based System Costs amount [$/kWhcap]
loan.BatterySystem.battery_per_kWh = costs['batt_capex_per_kwh']
# specify number of O&M types (1 = PV+batt)
loan.SystemCosts.add_om_num_types = 1
# specify O&M variables
loan.SystemCosts.om_capacity = [costs['system_om_per_kw'] + costs['system_variable_om_per_kw']]
loan.SystemCosts.om_capacity1 = [costs['batt_om_per_kw']]
loan.SystemCosts.om_production1 = [costs['batt_om_per_kwh'] * 1000]
loan.SystemCosts.om_replacement_cost1 = [0.]
# Battery capacity for System Costs values [kW]
loan.SystemCosts.om_capacity1_nameplate = batt.Battery.batt_simple_kw
# Battery production for System Costs values [kWh]
loan.SystemCosts.om_production1_values = [batt.Battery.batt_simple_kwh]
batt_costs = ((costs['batt_capex_per_kw']*batt.Battery.batt_simple_kw) +
(costs['batt_capex_per_kwh'] * batt.Battery.batt_simple_kwh))
else:
batt.Battery.batt_simple_enable = 0
loan.BatterySystem.en_batt = 0
computed_power = computed_size = 0
# declare value for net billing sell rate
if agent.loc['compensation_style']=='none':
net_billing_sell_rate = 0.
else:
net_billing_sell_rate = agent.loc['wholesale_elec_price_dollars_per_kwh'] * agent.loc['elec_price_multiplier']
utilityrate = process_tariff(utilityrate, agent.loc['tariff_dict'], net_billing_sell_rate)
utilityrate.SystemOutput.gen = gen
# specify number of O&M types (0 = PV only)
loan.SystemCosts.add_om_num_types = 0
# specify O&M variables
loan.SystemCosts.om_capacity = [costs['system_om_per_kw'] + costs['system_variable_om_per_kw']]
loan.SystemCosts.om_replacement_cost1 = [0.]
system_costs = costs['system_capex_per_kw'] * kw
batt_costs = 0
# Execute utility rate module
utilityrate.Load.load = load_hourly
utilityrate.execute()
# Process payment incentives
loan = process_incentives(loan, kw, computed_power, computed_size, gen_hourly, agent)
# Specify final Cashloan parameters
loan.FinancialParameters.system_capacity = kw
loan.SystemOutput.annual_energy_value = utilityrate.Outputs.annual_energy_value
loan.SystemOutput.gen = utilityrate.SystemOutput.gen
loan.ThirdPartyOwnership.elec_cost_with_system = utilityrate.Outputs.elec_cost_with_system
loan.ThirdPartyOwnership.elec_cost_without_system = utilityrate.Outputs.elec_cost_without_system
# Calculate system costs
direct_costs = (system_costs + batt_costs) * costs['cap_cost_multiplier']
sales_tax = 0
loan.SystemCosts.total_installed_cost = direct_costs + sales_tax
# Execute financial module
loan.execute()
return -loan.Outputs.npv
def calc_system_size_and_performance_pv(agent, sectors, rate_switch_table=None):
"""
Calculate the optimal system and battery size and generation profile, and resulting bill savings and financial metrics.
Parameters
----------
agent : 'pd.df'
individual agent object.
Returns
-------
agent: 'pd.df'
Adds several features to the agent dataframe:
- **agent_id**
- **system_kw** - system capacity selected by agent
- **batt_kw** - battery capacity selected by agent
- **batt_kwh** - battery energy capacity
- **npv** - net present value of system + storage
- **cash_flow** - array of annual cash flows from system adoption
- **batt_dispatch_profile** - array of hourly battery dispatch
- **annual_energy_production_kwh** - annual energy production (kwh) of system
- **naep** - normalized annual energy production (kwh/kW) of system
- **capacity_factor** - annual capacity factor
- **first_year_elec_bill_with_system** - first year electricity bill with adopted system ($/yr)
- **first_year_elec_bill_savings** - first year electricity bill savings with adopted system ($/yr)
- **first_year_elec_bill_savings_frac** - fraction of savings on electricity bill in first year of system adoption
- **max_system_kw** - maximum system size allowed as constrained by roof size or not exceeding annual consumption
- **first_year_elec_bill_without_system** - first year electricity bill without adopted system ($/yr)
- **avg_elec_price_cents_per_kwh** - first year electricity price (c/kwh)
- **cbi** - ndarray of capacity-based incentives applicable to agent
- **ibi** - ndarray of investment-based incentives applicable to agent
- **pbi** - ndarray of performance-based incentives applicable to agent
- **cash_incentives** - ndarray of cash-based incentives applicable to agent
- **export_tariff_result** - summary of structure of retail tariff applied to agent
"""
# Initialize new DB connection
model_settings = settings.init_model_settings()
con, cur = utilfunc.make_con(model_settings.pg_conn_string, model_settings.role)
# PV
pv = dict()
# Extract load profile after scaling hourly load to annual total
load_profile_df = agent_mutation.elec.get_and_apply_agent_load_profiles(con, agent)
pv['consumption_hourly'] = pd.Series(load_profile_df['consumption_hourly']).iloc[0]
del load_profile_df
# Using the scale offset factor of 1E6 for capacity factors
norm_scaled_pv_cf_profiles_df = agent_mutation.elec.get_and_apply_normalized_hourly_resource_solar(con, agent)
pv['generation_hourly'] = pd.Series(norm_scaled_pv_cf_profiles_df['solar_cf_profile'].iloc[0]) / 1e6
del norm_scaled_pv_cf_profiles_df
# Calculate normalized annual energy production
agent.loc['naep'] = float(np.sum(pv['generation_hourly']))
# Battwatts
if agent.loc['sector_abbr'] == 'res':
batt = battery.default("PVWattsBatteryResidential")
else:
batt = battery.default("PVWattsBatteryCommercial")
# Utilityrate5
if agent.loc['sector_abbr'] == 'res':
utilityrate = utility.default("PVWattsBatteryResidential")
else:
utilityrate = utility.default("PVWattsBatteryCommercial")
######################################
###--------- UTILITYRATE5 ---------###
###--- SYSTEM LIFETIME SETTINGS ---###
######################################
# Inflation rate [%]
utilityrate.Lifetime.inflation_rate = agent.loc['inflation_rate'] * 100
# Number of years in analysis [years]
utilityrate.Lifetime.analysis_period = agent.loc['economic_lifetime_yrs']
# Lifetime hourly system outputs [0/1]; Options: 0=hourly first year,1=hourly lifetime
utilityrate.Lifetime.system_use_lifetime_output = 0
######################################
###--------- UTILITYRATE5 ---------###
###---- DEGRADATION/ESCALATION ----###
######################################
# Annual energy degradation [%]
utilityrate.SystemOutput.degradation = [agent.loc['pv_degradation_factor'] * 100] # convert decimal to %
# Annual electricity rate escalation [%/year]
utilityrate.ElectricityRates.rate_escalation = [agent.loc['elec_price_escalator'] * 100] # convert decimal to %
######################################
###--------- UTILITYRATE5 ---------###
###---- NET METERING SETTINGS -----###
######################################
# Dictionary to map dGen compensation styles to PySAM options
nem_options = {'net metering':0, 'net billing':2, 'buy all sell all':4, 'none':2}
# Metering options [0=net energy metering,1=net energy metering with $ credits,2=net billing,3=net billing with carryover to next month,4=buy all - sell all]
utilityrate.ElectricityRates.ur_metering_option = nem_options[agent.loc['compensation_style']]
# Year end sell rate [$/kWh]
utilityrate.ElectricityRates.ur_nm_yearend_sell_rate = agent.loc['wholesale_elec_price_dollars_per_kwh'] * agent.loc['elec_price_multiplier']
if agent.loc['compensation_style']=='none':
net_billing_sell_rate = 0.
else:
net_billing_sell_rate = agent.loc['wholesale_elec_price_dollars_per_kwh'] * agent.loc['elec_price_multiplier']
######################################
###--------- UTILITYRATE5 ---------###
###-------- BUY/SELL RATES --------###
######################################
# Enable time step sell rates [0/1]
utilityrate.ElectricityRates.ur_en_ts_sell_rate = 0
# Time step sell rates [0/1]
utilityrate.ElectricityRates.ur_ts_sell_rate = [0.]
# Set sell rate equal to buy rate [0/1]
utilityrate.ElectricityRates.ur_sell_eq_buy = 0
######################################
###--------- UTILITYRATE5 ---------###
###-------- MISC. SETTINGS --------###
######################################
# Use single monthly peak for TOU demand charge; options: 0=use TOU peak,1=use flat peak
utilityrate.ElectricityRates.TOU_demand_single_peak = 0 # ?
# Optionally enable/disable electricity_rate [years]
utilityrate.ElectricityRates.en_electricity_rates = 1
######################################
###--------- UTILITYRATE5 ---------###
###----- TARIFF RESTRUCTURING -----###
######################################
utilityrate = process_tariff(utilityrate, agent.loc['tariff_dict'], net_billing_sell_rate)
######################################
###----------- CASHLOAN -----------###
###----- FINANCIAL PARAMETERS -----###
######################################
# Initiate cashloan model and set market-specific variables
# Assume res agents do not evaluate depreciation at all
# Assume non-res agents only evaluate federal depreciation (not state)
if agent.loc['sector_abbr'] == 'res':
loan = cashloan.default("PVWattsBatteryResidential")
loan.FinancialParameters.market = 0
else:
loan = cashloan.default("PVWattsBatteryCommercial")
loan.FinancialParameters.market = 1
loan.FinancialParameters.analysis_period = agent.loc['economic_lifetime_yrs']
loan.FinancialParameters.debt_fraction = 100 - (agent.loc['down_payment_fraction'] * 100)
loan.FinancialParameters.federal_tax_rate = [(agent.loc['tax_rate'] * 100) * 0.7] # SAM default
loan.FinancialParameters.inflation_rate = agent.loc['inflation_rate'] * 100
loan.FinancialParameters.insurance_rate = 0
loan.FinancialParameters.loan_rate = agent.loc['loan_interest_rate'] * 100
loan.FinancialParameters.loan_term = agent.loc['loan_term_yrs']
loan.FinancialParameters.mortgage = 0 # default value - standard loan (no mortgage)
loan.FinancialParameters.prop_tax_assessed_decline = 5 # PySAM default
loan.FinancialParameters.prop_tax_cost_assessed_percent = 95 # PySAM default
loan.FinancialParameters.property_tax_rate = 0 # PySAM default
loan.FinancialParameters.real_discount_rate = agent.loc['real_discount_rate'] * 100
loan.FinancialParameters.salvage_percentage = 0
loan.FinancialParameters.state_tax_rate = [(agent.loc['tax_rate'] * 100) * 0.3] # SAM default
loan.FinancialParameters.system_heat_rate = 0
######################################
###----------- CASHLOAN -----------###
###--------- SYSTEM COSTS ---------###
######################################
# System costs that are input to loan.SystemCosts will depend on system configuration (PV, batt, PV+batt)
# and are therefore specified in calc_system_performance()
system_costs = dict()
system_costs['system_capex_per_kw'] = agent.loc['system_capex_per_kw']
system_costs['system_om_per_kw'] = agent.loc['system_om_per_kw']
system_costs['system_variable_om_per_kw'] = agent.loc['system_variable_om_per_kw']
system_costs['cap_cost_multiplier'] = agent.loc['cap_cost_multiplier']
system_costs['batt_capex_per_kw'] = agent.loc['batt_capex_per_kw']
system_costs['batt_capex_per_kwh'] = agent.loc['batt_capex_per_kwh']
system_costs['batt_om_per_kw'] = agent.loc['batt_om_per_kw']
system_costs['batt_om_per_kwh'] = agent.loc['batt_om_per_kwh']
######################################
###----------- CASHLOAN -----------###
###---- DEPRECIATION PARAMETERS ---###
######################################
if agent.loc['sector_abbr'] == 'res':
loan.Depreciation.depr_fed_type = 0
loan.Depreciation.depr_sta_type = 0
else:
loan.Depreciation.depr_fed_type = 1
loan.Depreciation.depr_sta_type = 0
######################################
###----------- CASHLOAN -----------###
###----- TAX CREDIT INCENTIVES ----###
######################################
loan.TaxCreditIncentives.itc_fed_percent = agent.loc['itc_fraction_of_capex'] * 100
######################################
###----------- CASHLOAN -----------###
###-------- BATTERY SYSTEM --------###
######################################
loan.BatterySystem.batt_replacement_option = 2 # user schedule
batt_replacement_schedule = [0 for i in range(0, agent.loc['batt_lifetime_yrs'] - 1)] + [1]
loan.BatterySystem.batt_replacement_schedule = batt_replacement_schedule
######################################
###----------- CASHLOAN -----------###
###-------- SYSTEM OUTPUT ---------###
######################################
loan.SystemOutput.degradation = [agent.loc['pv_degradation_factor'] * 100]
######################################
###----------- CASHLOAN -----------###
###----------- LIFETIME -----------###
######################################
loan.Lifetime.system_use_lifetime_output = 0
# From dGen - calc_system_size_and_financial_performance()
max_size_load = agent.loc['load_kwh_per_customer_in_bin'] / agent.loc['naep']
max_size_roof = agent.loc['developable_roof_sqft'] * agent.loc['pv_kw_per_sqft']
max_system_kw = min(max_size_load, max_size_roof)
# set tolerance for minimize_scalar based on max_system_kw value
tol = min(0.25 * max_system_kw, 0.5)
# Calculate the PV system size that maximizes the agent's NPV, to a tolerance of 0.5 kW.
# Note that the optimization is technically minimizing negative NPV
# ! As is, because of the tolerance this function would not necessarily return a system size of 0 or max PV size if those are optimal
res_with_batt = optimize.minimize_scalar(calc_system_performance,
args = (pv, utilityrate, loan, batt, system_costs, True, 0),
bounds = (0, max_system_kw),
method = 'bounded',
tol = tol)
# PySAM Module outputs with battery
batt_loan_outputs = loan.Outputs.export()
batt_util_outputs = utilityrate.Outputs.export()
batt_annual_energy_kwh = np.sum(utilityrate.SystemOutput.gen)
batt_kw = batt.Battery.batt_simple_kw
batt_kwh = batt.Battery.batt_simple_kwh
batt_dispatch_profile = batt.Outputs.batt_power # ?
# Run without battery
res_no_batt = optimize.minimize_scalar(calc_system_performance,
args = (pv, utilityrate, loan, batt, system_costs, False, 0),
bounds = (0, max_system_kw),
method = 'bounded',
tol = tol)
# PySAM Module outputs without battery
no_batt_loan_outputs = loan.Outputs.export()
no_batt_util_outputs = utilityrate.Outputs.export()
no_batt_annual_energy_kwh = np.sum(utilityrate.SystemOutput.gen)
# Retrieve NPVs of system with batt and system without batt
npv_w_batt = batt_loan_outputs['npv']
npv_no_batt = no_batt_loan_outputs['npv']
# Choose the system with the higher NPV
if npv_w_batt >= npv_no_batt:
system_kw = res_with_batt.x
annual_energy_production_kwh = batt_annual_energy_kwh
first_year_elec_bill_with_system = batt_util_outputs['elec_cost_with_system_year1']
first_year_elec_bill_without_system = batt_util_outputs['elec_cost_without_system_year1']
npv = npv_w_batt
payback = batt_loan_outputs['payback']
cash_flow = list(batt_loan_outputs['cf_payback_with_expenses']) # ?
cbi_total = batt_loan_outputs['cbi_total']
cbi_total_fed = batt_loan_outputs['cbi_total_fed']
cbi_total_oth = batt_loan_outputs['cbi_total_oth']
cbi_total_sta = batt_loan_outputs['cbi_total_sta']
cbi_total_uti = batt_loan_outputs['cbi_total_uti']
ibi_total = batt_loan_outputs['ibi_total']
ibi_total_fed = batt_loan_outputs['ibi_total_fed']
ibi_total_oth = batt_loan_outputs['ibi_total_oth']
ibi_total_sta = batt_loan_outputs['ibi_total_sta']
ibi_total_uti = batt_loan_outputs['ibi_total_uti']
cf_pbi_total = batt_loan_outputs['cf_pbi_total']
pbi_total_fed = batt_loan_outputs['cf_pbi_total_fed']
pbi_total_oth = batt_loan_outputs['cf_pbi_total_oth']
pbi_total_sta = batt_loan_outputs['cf_pbi_total_sta']
pbi_total_uti = batt_loan_outputs['cf_pbi_total_uti']
else:
system_kw = res_no_batt.x
annual_energy_production_kwh = no_batt_annual_energy_kwh
first_year_elec_bill_with_system = no_batt_util_outputs['elec_cost_with_system_year1']
first_year_elec_bill_without_system = no_batt_util_outputs['elec_cost_without_system_year1']
npv = npv_no_batt
payback = no_batt_loan_outputs['payback']
cash_flow = list(no_batt_loan_outputs['cf_payback_with_expenses'])
batt_kw = 0
batt_kwh = 0
batt_dispatch_profile = np.nan
cbi_total = no_batt_loan_outputs['cbi_total']
cbi_total_fed = no_batt_loan_outputs['cbi_total_fed']
cbi_total_oth = no_batt_loan_outputs['cbi_total_oth']
cbi_total_sta = no_batt_loan_outputs['cbi_total_sta']
cbi_total_uti = no_batt_loan_outputs['cbi_total_uti']
ibi_total = no_batt_loan_outputs['ibi_total']
ibi_total_fed = no_batt_loan_outputs['ibi_total_fed']
ibi_total_oth = no_batt_loan_outputs['ibi_total_oth']
ibi_total_sta = no_batt_loan_outputs['ibi_total_sta']
ibi_total_uti = no_batt_loan_outputs['ibi_total_uti']
cf_pbi_total = no_batt_loan_outputs['cf_pbi_total']
pbi_total_fed = no_batt_loan_outputs['cf_pbi_total_fed']
pbi_total_oth = no_batt_loan_outputs['cf_pbi_total_oth']
pbi_total_sta = no_batt_loan_outputs['cf_pbi_total_sta']
pbi_total_uti = no_batt_loan_outputs['cf_pbi_total_uti']
# change 0 value to 1 to avoid divide by zero errors
if first_year_elec_bill_without_system == 0:
first_year_elec_bill_without_system = 1.0
# Add outputs to agent df
naep = annual_energy_production_kwh / system_kw
first_year_elec_bill_savings = first_year_elec_bill_without_system - first_year_elec_bill_with_system
first_year_elec_bill_savings_frac = first_year_elec_bill_savings / first_year_elec_bill_without_system
avg_elec_price_cents_per_kwh = first_year_elec_bill_without_system / agent.loc['load_kwh_per_customer_in_bin']
agent.loc['system_kw'] = system_kw
agent.loc['npv'] = npv
agent.loc['payback_period'] = np.round(np.where(np.isnan(payback), 30.1, payback), 1).astype(float)
agent.loc['cash_flow'] = cash_flow
agent.loc['annual_energy_production_kwh'] = annual_energy_production_kwh
agent.loc['naep'] = naep
agent.loc['capacity_factor'] = agent.loc['naep'] / 8760
agent.loc['first_year_elec_bill_with_system'] = first_year_elec_bill_with_system
agent.loc['first_year_elec_bill_savings'] = first_year_elec_bill_savings
agent.loc['first_year_elec_bill_savings_frac'] = first_year_elec_bill_savings_frac
agent.loc['max_system_kw'] = max_system_kw
agent.loc['first_year_elec_bill_without_system'] = first_year_elec_bill_without_system
agent.loc['avg_elec_price_cents_per_kwh'] = avg_elec_price_cents_per_kwh
agent.loc['batt_kw'] = batt_kw
agent.loc['batt_kwh'] = batt_kwh
agent.loc['batt_dispatch_profile'] = batt_dispatch_profile
# Financial outputs (find out which ones to include):
agent.loc['cbi'] = np.array({'cbi_total': cbi_total,
'cbi_total_fed': cbi_total_fed,
'cbi_total_oth': cbi_total_oth,
'cbi_total_sta': cbi_total_sta,
'cbi_total_uti': cbi_total_uti
})
agent.loc['ibi'] = np.array({'ibi_total': ibi_total,
'ibi_total_fed': ibi_total_fed,
'ibi_total_oth': ibi_total_oth,
'ibi_total_sta': ibi_total_sta,
'ibi_total_uti': ibi_total_uti
})
agent.loc['pbi'] = np.array({'pbi_total': cf_pbi_total,
'pbi_total_fed': pbi_total_fed,
'pbi_total_oth': pbi_total_oth,
'pbi_total_sta': pbi_total_sta,
'pbi_total_uti': pbi_total_uti
})
agent.loc['cash_incentives'] = ''
agent.loc['export_tariff_results'] = ''
out_cols = ['agent_id',
'system_kw',
'batt_kw',
'batt_kwh',
'npv',
'payback_period',
'cash_flow',
'batt_dispatch_profile',
'annual_energy_production_kwh',
'naep',
'capacity_factor',
'first_year_elec_bill_with_system',
'first_year_elec_bill_savings',
'first_year_elec_bill_savings_frac',
'max_system_kw',
'first_year_elec_bill_without_system',
'avg_elec_price_cents_per_kwh',
'cbi',
'ibi',
'pbi',
'cash_incentives',
'export_tariff_results'
]
return agent[out_cols]
#%%
def calc_financial_performance_wind(agent, sectors, rate_switch_table=None):
"""
Calculate bill savings and financial metrics based on pre-selected wind system size.
Parameters
----------
agent : 'pd.df'
individual agent object.
Returns
-------
agent: 'pd.df'
Adds several features to the agent dataframe:
- **agent_id**
- **system_kw** - system capacity selected by agent
- **npv** - net present value of system + storage
- **cash_flow** - array of annual cash flows from system adoption
- **batt_dispatch_profile** - array of hourly battery dispatch
- **annual_energy_production_kwh** - annual energy production (kwh) of system
- **naep** - normalized annual energy production (kwh/kW) of system
- **capacity_factor** - annual capacity factor
- **first_year_elec_bill_with_system** - first year electricity bill with adopted system ($/yr)
- **first_year_elec_bill_savings** - first year electricity bill savings with adopted system ($/yr)
- **first_year_elec_bill_savings_frac** - fraction of savings on electricity bill in first year of system adoption
- **max_system_kw** - maximum system size allowed as constrained by roof size or not exceeding annual consumption
- **first_year_elec_bill_without_system** - first year electricity bill without adopted system ($/yr)
- **avg_elec_price_cents_per_kwh** - first year electricity price (c/kwh)
- **cbi** - ndarray of capacity-based incentives applicable to agent
- **ibi** - ndarray of investment-based incentives applicable to agent
- **pbi** - ndarray of performance-based incentives applicable to agent
- **cash_incentives** - ndarray of cash-based incentives applicable to agent
- **export_tariff_result** - summary of structure of retail tariff applied to agent
"""
# Initialize new DB connection
model_settings = settings.init_model_settings()
con, cur = utilfunc.make_con(model_settings.pg_conn_string, model_settings.role)
# Extract load profile after scaling hourly load to annual total
load_profile_df = agent_mutation.elec.get_and_apply_agent_load_profiles(con, agent)
consumption_hourly = pd.Series(load_profile_df['consumption_hourly']).iloc[0]
del load_profile_df
# Using the scale offset factor of 1E6 for capacity factors
norm_scaled_wind_profiles_df = agent_mutation.elec.get_and_apply_normalized_hourly_resource_wind(con, agent)
generation_hourly = pd.Series(norm_scaled_wind_profiles_df['generation_hourly']).iloc[0]
del norm_scaled_wind_profiles_df
# Instantiate utilityrate5 model based on agent sector
if agent.loc['sector_abbr'] == 'res':
utilityrate = utility.default('WindPowerResidential')
else:
utilityrate = utility.default('WindPowerCommercial')
######################################
###--------- UTILITYRATE5 ---------###
###------- ELECTRICITYRATES -------###
######################################
# Use single monthly peak for TOU demand charge; options: 0=use TOU peak,1=use flat peak
utilityrate.ElectricityRates.TOU_demand_single_peak = 0 # ?
# Optionally enable/disable electricity_rate [years]
utilityrate.ElectricityRates.en_electricity_rates = 1
# Annual electricity rate escalation [%/year]
utilityrate.ElectricityRates.rate_escalation = [agent.loc['elec_price_escalator'] * 100] # convert decimal to %
# Enable time step sell rates [0/1]
utilityrate.ElectricityRates.ur_en_ts_sell_rate = 0
# Time step sell rates [0/1]
utilityrate.ElectricityRates.ur_ts_sell_rate = [0.]
# Set sell rate equal to buy rate [0/1]
utilityrate.ElectricityRates.ur_sell_eq_buy = 0
# Dictionary to map dGen compensation styles to PySAM options
nem_options = {'net metering':0, 'net billing':2, 'buy all sell all':4, 'none':2}
# Metering options [0=net energy metering,1=net energy metering with $ credits,2=net billing,3=net billing with carryover to next month,4=buy all - sell all]
utilityrate.ElectricityRates.ur_metering_option = nem_options[agent.loc['compensation_style']]
# Year end sell rate [$/kWh]
utilityrate.ElectricityRates.ur_nm_yearend_sell_rate = agent.loc['wholesale_elec_price_dollars_per_kwh'] * agent.loc['elec_price_multiplier']
if agent.loc['compensation_style']=='none':
net_billing_sell_rate = 0.
else:
net_billing_sell_rate = agent.loc['wholesale_elec_price_dollars_per_kwh'] * agent.loc['elec_price_multiplier']
# Restructure tariff object for PySAM compatibility
utilityrate = process_tariff(utilityrate, agent.loc['tariff_dict'], net_billing_sell_rate)
######################################
###--------- UTILITYRATE5 ---------###
###----------- LIFETIME -----------###
######################################
# Number of years in analysis [years]
utilityrate.Lifetime.analysis_period = agent.loc['economic_lifetime_yrs']
# Inflation rate [%]
utilityrate.Lifetime.inflation_rate = agent.loc['inflation_rate'] * 100
# Lifetime hourly system outputs [0/1]; Options: 0=hourly first year,1=hourly lifetime
utilityrate.Lifetime.system_use_lifetime_output = 0
######################################
###--------- UTILITYRATE5 ---------###
###-------- SYSTEM OUTPUT ---------###
######################################
# Annual energy degradation [%] -- Wind degradation already applied via 'derate_factor'
utilityrate.SystemOutput.degradation = [0.]
# System power generated [kW]
utilityrate.SystemOutput.gen = generation_hourly
######################################
###--------- UTILITYRATE5 ---------###
###-------- SYSTEM OUTPUT ---------###
######################################
# Electricity load (year 1) [kW]
utilityrate.Load.load = consumption_hourly
######################################
###--------- UTILITYRATE5 ---------###
###------------ EXECUTE -----------###
######################################
utilityrate.execute()
######################################
###----------- CASHLOAN -----------###
###----- FINANCIAL PARAMETERS -----###
######################################
# Initiate cashloan model and set market-specific variables
if agent.loc['sector_abbr'] == 'res':
loan = cashloan.default('WindPowerResidential')
loan.FinancialParameters.market = 0
else:
loan = cashloan.default('WindPowerCommercial')
loan.FinancialParameters.market = 1
loan.FinancialParameters.analysis_period = agent.loc['economic_lifetime_yrs']
loan.FinancialParameters.debt_fraction = 100 - (agent.loc['down_payment_fraction'] * 100)
loan.FinancialParameters.federal_tax_rate = [(agent.loc['tax_rate'] * 100) * 0.7] # SAM default
loan.FinancialParameters.inflation_rate = agent.loc['inflation_rate'] * 100
loan.FinancialParameters.insurance_rate = 0
loan.FinancialParameters.loan_rate = agent.loc['loan_interest_rate'] * 100
loan.FinancialParameters.loan_term = agent.loc['loan_term_yrs']
loan.FinancialParameters.mortgage = 0 # default value - standard loan (no mortgage)
loan.FinancialParameters.prop_tax_assessed_decline = 5 # PySAM default
loan.FinancialParameters.prop_tax_cost_assessed_percent = 95 # PySAM default
loan.FinancialParameters.property_tax_rate = 0 # PySAM default
loan.FinancialParameters.real_discount_rate = agent.loc['real_discount_rate'] * 100
loan.FinancialParameters.salvage_percentage = 0
loan.FinancialParameters.state_tax_rate = [(agent.loc['tax_rate'] * 100) * 0.3] # SAM default
loan.FinancialParameters.system_heat_rate = 0
loan.FinancialParameters.system_capacity = agent.loc['system_size_kw']
######################################
###----------- CASHLOAN -----------###
###--------- SYSTEM COSTS ---------###
######################################
# specify number of O&M types (0 = system only)
loan.SystemCosts.add_om_num_types = 0
# specify O&M variables
loan.SystemCosts.om_capacity = [agent.loc['system_om_per_kw'] + agent.loc['system_variable_om_per_kw']]
# Calculate and specify system costs
system_costs = agent.loc['system_capex_per_kw'] * agent.loc['system_size_kw']
batt_costs = 0
sales_tax = 0
direct_costs = (system_costs + batt_costs) * agent.loc['cap_cost_multiplier']
loan.SystemCosts.total_installed_cost = direct_costs + sales_tax
######################################
###----------- CASHLOAN -----------###
###---- DEPRECIATION PARAMETERS ---###
######################################
# Federal and State depreciation type
# Options: 0=none, 1=MACRS half year, 2=straight-line, 3=custom
if agent.loc['sector_abbr'] == 'res':
loan.Depreciation.depr_fed_type = 0
loan.Depreciation.depr_sta_type = 0
else:
loan.Depreciation.depr_fed_type = 1
loan.Depreciation.depr_sta_type = 0
######################################
###----------- CASHLOAN -----------###
###----- TAX CREDIT INCENTIVES ----###
######################################
# Federal percentage-based ITC percent [%]
loan.TaxCreditIncentives.itc_fed_percent = agent.loc['itc_fraction_of_capex'] * 100
######################################
###----------- CASHLOAN -----------###
###------ PAYMENT INCENTIVES ------###
######################################
# Specify payment incentives within Cashloan object
loan = process_incentives(loan, agent.loc['system_size_kw'], 0, 0, generation_hourly, agent)
######################################
###----------- CASHLOAN -----------###
###-------- BATTERY SYSTEM --------###
######################################
# Enable battery storage model [0/1]
loan.BatterySystem.en_batt = 0
######################################
###----------- CASHLOAN -----------###
###-------- SYSTEM OUTPUT ---------###
######################################
# Energy value [$] -- i.e. "bill savings"
loan.SystemOutput.annual_energy_value = utilityrate.Outputs.annual_energy_value
# Annual energy degradation [%] -- Wind degradation already applied via 'derate_factor'
loan.SystemOutput.degradation = [0.]
# Power generated by renewable resource [kW]
loan.SystemOutput.gen = utilityrate.SystemOutput.gen
######################################
###----------- CASHLOAN -----------###
###----------- LIFETIME -----------###
######################################
loan.Lifetime.system_use_lifetime_output = 0
######################################
###----------- CASHLOAN -----------###
###----- THIRD PARTY OWNERSHIP ----###
######################################
# Energy value [$]
loan.ThirdPartyOwnership.elec_cost_with_system = utilityrate.Outputs.elec_cost_with_system
# Energy value [$]
loan.ThirdPartyOwnership.elec_cost_without_system = utilityrate.Outputs.elec_cost_without_system
######################################
###-------- POSTPROCESSING --------###
###------------ RESULTS -----------###
######################################
# Get outputs from Utilityrate5 model
util_outputs = utilityrate.Outputs.export()
# Assign variables from Utilityrate5 outputs, others
system_kw = agent.loc['system_size_kw']
first_year_elec_bill_with_system = util_outputs['elec_cost_with_system_year1']
first_year_elec_bill_without_system = util_outputs['elec_cost_without_system_year1']
# PySAM cannot evaluate system sizes of 0 kW -- check and manually assign values if system_size_kw = 0
if system_kw > 0:
# Execute Cashloan model
loan.execute()
loan_outputs = loan.Outputs.export()
npv = loan_outputs['npv']
payback = loan_outputs['payback']
cash_flow = list(loan_outputs['cf_payback_with_expenses'])
cbi_total = loan_outputs['cbi_total']
cbi_total_fed = loan_outputs['cbi_total_fed']
cbi_total_oth = loan_outputs['cbi_total_oth']
cbi_total_sta = loan_outputs['cbi_total_sta']
cbi_total_uti = loan_outputs['cbi_total_uti']
ibi_total = loan_outputs['ibi_total']
ibi_total_fed = loan_outputs['ibi_total_fed']
ibi_total_oth = loan_outputs['ibi_total_oth']
ibi_total_sta = loan_outputs['ibi_total_sta']
ibi_total_uti = loan_outputs['ibi_total_uti']
cf_pbi_total = loan_outputs['cf_pbi_total']
pbi_total_fed = loan_outputs['cf_pbi_total_fed']
pbi_total_oth = loan_outputs['cf_pbi_total_oth']
pbi_total_sta = loan_outputs['cf_pbi_total_sta']
pbi_total_uti = loan_outputs['cf_pbi_total_uti']
else:
npv = 0.
payback = 30.1
cash_flow = [0.] * (agent.loc['economic_lifetime_yrs'] + 1)
cbi_total = cbi_total_fed = cbi_total_oth = cbi_total_sta = cbi_total_uti = 0.
ibi_total = ibi_total_fed = ibi_total_oth = ibi_total_sta = ibi_total_uti = 0.
cf_pbi_total = pbi_total_fed = pbi_total_oth = pbi_total_sta = pbi_total_uti = 0.
# change 0 value to 1 to avoid divide by zero errors
if first_year_elec_bill_without_system == 0:
first_year_elec_bill_without_system = 1.0
# Add outputs to agent df
first_year_elec_bill_savings = first_year_elec_bill_without_system - first_year_elec_bill_with_system
first_year_elec_bill_savings_frac = first_year_elec_bill_savings / first_year_elec_bill_without_system
avg_elec_price_cents_per_kwh = first_year_elec_bill_without_system / agent.loc['load_kwh_per_customer_in_bin']
# Specify variables to write to agent df -- also write placeholder batt values
agent.loc['system_kw'] = system_kw
agent.loc['npv'] = npv
agent.loc['payback_period'] = np.round(np.where(np.isnan(payback), 30.1, payback), 1).astype(float)
agent.loc['cash_flow'] = cash_flow
agent.loc['first_year_elec_bill_with_system'] = first_year_elec_bill_with_system
agent.loc['first_year_elec_bill_savings'] = first_year_elec_bill_savings
agent.loc['first_year_elec_bill_savings_frac'] = first_year_elec_bill_savings_frac
agent.loc['first_year_elec_bill_without_system'] = first_year_elec_bill_without_system
agent.loc['avg_elec_price_cents_per_kwh'] = avg_elec_price_cents_per_kwh
agent.loc['batt_kw'] = 0.
agent.loc['batt_kwh'] = 0.
agent.loc['batt_dispatch_profile'] = np.nan
# Specify incentive outputs
agent.loc['cbi'] = np.array({'cbi_total': cbi_total,
'cbi_total_fed': cbi_total_fed,
'cbi_total_oth': cbi_total_oth,
'cbi_total_sta': cbi_total_sta,
'cbi_total_uti': cbi_total_uti
})
agent.loc['ibi'] = np.array({'ibi_total': ibi_total,
'ibi_total_fed': ibi_total_fed,
'ibi_total_oth': ibi_total_oth,
'ibi_total_sta': ibi_total_sta,
'ibi_total_uti': ibi_total_uti
})
agent.loc['pbi'] = np.array({'pbi_total': cf_pbi_total,
'pbi_total_fed': pbi_total_fed,
'pbi_total_oth': pbi_total_oth,
'pbi_total_sta': pbi_total_sta,
'pbi_total_uti': pbi_total_uti
})
agent.loc['cash_incentives'] = ''
agent.loc['export_tariff_results'] = ''
out_cols = ['agent_id',
'system_kw',
'npv',
'payback_period',
'cash_flow',
'first_year_elec_bill_with_system',
'first_year_elec_bill_savings',
'first_year_elec_bill_savings_frac',
'first_year_elec_bill_without_system',
'avg_elec_price_cents_per_kwh',
'cbi',
'ibi',
'pbi',
'cash_incentives',
'export_tariff_results',
'batt_kw',
'batt_kwh',
'batt_dispatch_profile'
]
return agent[out_cols]
#%%
def process_tariff(utilityrate, tariff_dict, net_billing_sell_rate):
"""
Instantiate the utilityrate5 PySAM model and process the agent's rate json object to conform with PySAM input formatting.
Parameters
----------
agent : 'pd.Series'
Individual agent object.
Returns
-------
utilityrate: 'PySAM.Utilityrate5'
"""
######################################
###--------- UTILITYRATE5 ---------###
###--- FIXED AND ANNUAL CHARGES ---###
######################################
# Monthly fixed charge [$]
utilityrate.ElectricityRates.ur_monthly_fixed_charge = tariff_dict['fixed_charge']
# Annual minimum charge [$]
utilityrate.ElectricityRates.ur_annual_min_charge = 0. # not currently tracked in URDB rate attribute downloads
# Monthly minimum charge [$]
utilityrate.ElectricityRates.ur_monthly_min_charge = 0. # not currently tracked in URDB rate attribute downloads
######################################
###--------- UTILITYRATE5 ---------###
###-------- DEMAND CHARGES --------###
######################################
# Enable demand charge
utilityrate.ElectricityRates.ur_dc_enable = (tariff_dict['d_flat_exists']) | (tariff_dict['d_tou_exists'])
if utilityrate.ElectricityRates.ur_dc_enable:
if tariff_dict['d_flat_exists']:
# Reformat demand charge table from dGen format
n_periods = len(tariff_dict['d_flat_levels'][0])
n_tiers = len(tariff_dict['d_flat_levels'])
ur_dc_flat_mat = []
for period in range(n_periods):
for tier in range(n_tiers):
row = [period, tier+1, tariff_dict['d_flat_levels'][tier][period], tariff_dict['d_flat_prices'][tier][period]]
ur_dc_flat_mat.append(row)
# Demand rates (flat) table
utilityrate.ElectricityRates.ur_dc_flat_mat = ur_dc_flat_mat
if tariff_dict['d_tou_exists']:
# Reformat demand charge table from dGen format
n_periods = len(tariff_dict['d_tou_levels'][0])
n_tiers = len(tariff_dict['d_tou_levels'])
ur_dc_tou_mat = []
for period in range(n_periods):
for tier in range(n_tiers):
row = [period+1, tier+1, tariff_dict['d_tou_levels'][tier][period], tariff_dict['d_tou_prices'][tier][period]]
ur_dc_tou_mat.append(row)
# Demand rates (TOU) table
utilityrate.ElectricityRates.ur_dc_tou_mat = ur_dc_tou_mat
# Reformat 12x24 tables - original are indexed to 0, PySAM needs index starting at 1
d_wkday_12by24 = []
for m in range(len(tariff_dict['d_wkday_12by24'])):
row = [x+1 for x in tariff_dict['d_wkday_12by24'][m]]
d_wkday_12by24.append(row)
d_wkend_12by24 = []
for m in range(len(tariff_dict['d_wkend_12by24'])):
row = [x+1 for x in tariff_dict['d_wkend_12by24'][m]]
d_wkend_12by24.append(row)
# Demand charge weekday schedule
utilityrate.ElectricityRates.ur_dc_sched_weekday = d_wkday_12by24
# Demand charge weekend schedule
utilityrate.ElectricityRates.ur_dc_sched_weekend = d_wkend_12by24
######################################
###--------- UTILITYRATE5 ---------###
###-------- ENERGY CHARGES --------###
######################################
if tariff_dict['e_exists']:
# Dictionary to map dGen max usage units to PySAM options
max_usage_dict = {'kWh':0, 'kWh/kW':1, 'kWh daily':2, 'kWh/kW daily':3}
# If max usage units are 'kWh daily', divide max usage by 30 -- rate download procedure converts daily to monthly
modifier = 30. if tariff_dict['energy_rate_unit'] == 'kWh daily' else 1.
# Reformat energy charge table from dGen format
n_periods = len(tariff_dict['e_levels'][0])
n_tiers = len(tariff_dict['e_levels'])
ur_ec_tou_mat = []
for period in range(n_periods):
for tier in range(n_tiers):
row = [period+1, tier+1, tariff_dict['e_levels'][tier][period]/modifier, max_usage_dict[tariff_dict['energy_rate_unit']], tariff_dict['e_prices'][tier][period], net_billing_sell_rate]
ur_ec_tou_mat.append(row)
# Energy rates table
utilityrate.ElectricityRates.ur_ec_tou_mat = ur_ec_tou_mat
# Reformat 12x24 tables - original are indexed to 0, PySAM needs index starting at 1
e_wkday_12by24 = []
for m in range(len(tariff_dict['e_wkday_12by24'])):
row = [x+1 for x in tariff_dict['e_wkday_12by24'][m]]
e_wkday_12by24.append(row)
e_wkend_12by24 = []
for m in range(len(tariff_dict['e_wkend_12by24'])):
row = [x+1 for x in tariff_dict['e_wkend_12by24'][m]]
e_wkend_12by24.append(row)
# Energy charge weekday schedule
utilityrate.ElectricityRates.ur_ec_sched_weekday = e_wkday_12by24
# Energy charge weekend schedule
utilityrate.ElectricityRates.ur_ec_sched_weekend = e_wkend_12by24
return utilityrate
#%%
def process_incentives(loan, kw, batt_kw, batt_kwh, generation_hourly, agent):
######################################
###----------- CASHLOAN -----------###
###------ PAYMENT INCENTIVES ------###
######################################
# Read incentive dataframe from agent attributes
incentive_df = agent.loc['state_incentives']
# Check dtype of incentive_df - process incentives if pd.DataFrame, otherwise do not assign incentive values to cashloan
if isinstance(incentive_df, pd.DataFrame):
# Fill NaNs in incentive_df - assume max incentive duration of 5 years and max incentive value of $10,000
incentive_df = incentive_df.fillna(value={'incentive_duration_yrs' : 5, 'max_incentive_usd' : 10000})
# Filter for CBI's in incentive_df
cbi_df = (incentive_df.loc[pd.notnull(incentive_df['cbi_usd_p_w'])]
.sort_values(['cbi_usd_p_w'], axis=0, ascending=False)
.reset_index(drop=True)
)
# For multiple CBIs that are applicable to the agent, cap at 2 and use PySAM's "state" and "other" option
if len(cbi_df) == 1:
loan.PaymentIncentives.cbi_sta_amount = cbi_df['cbi_usd_p_w'].iloc[0]
loan.PaymentIncentives.cbi_sta_deprbas_fed = 0
loan.PaymentIncentives.cbi_sta_deprbas_sta = 0
loan.PaymentIncentives.cbi_sta_maxvalue = cbi_df['max_incentive_usd'].iloc[0]
loan.PaymentIncentives.cbi_sta_tax_fed = 0
loan.PaymentIncentives.cbi_sta_tax_sta = 0
elif len(cbi_df) >= 2:
loan.PaymentIncentives.cbi_sta_amount = cbi_df['cbi_usd_p_w'].iloc[0]
loan.PaymentIncentives.cbi_sta_deprbas_fed = 0
loan.PaymentIncentives.cbi_sta_deprbas_sta = 0
loan.PaymentIncentives.cbi_sta_maxvalue = cbi_df['max_incentive_usd'].iloc[0]
loan.PaymentIncentives.cbi_sta_tax_fed = 1
loan.PaymentIncentives.cbi_sta_tax_sta = 1
loan.PaymentIncentives.cbi_oth_amount = cbi_df['cbi_usd_p_w'].iloc[1]
loan.PaymentIncentives.cbi_oth_deprbas_fed = 0
loan.PaymentIncentives.cbi_oth_deprbas_sta = 0
loan.PaymentIncentives.cbi_oth_maxvalue = cbi_df['max_incentive_usd'].iloc[1]
loan.PaymentIncentives.cbi_oth_tax_fed = 1
loan.PaymentIncentives.cbi_oth_tax_sta = 1
else:
pass
# Filter for PBI's in incentive_df
pbi_df = (incentive_df.loc[ | pd.notnull(incentive_df['pbi_usd_p_kwh']) | pandas.notnull |
#!/usr/bin/python
# -*- coding:utf-8 -*-
import sys, getopt
import pandas
import csv
#import statsmodels.formula.api as smf
from sklearn import preprocessing
import math
import time
from heapq import *
import operator
sys.path.append('./')
sys.path.append('../')
from similarity_calculation.category_similarity_matrix import *
from similarity_calculation.category_network_embedding import *
from utils import *
from constraint_definition.LocalRegressionConstraint import *
DEFAULT_RESULT_PATH = './input/query_res.csv'
DEFAULT_QUESTION_PATH = './input/user_question.csv'
DEFAULT_CONSTRAINT_PATH = './input/CONSTRAINTS'
EXAMPLE_NETWORK_EMBEDDING_PATH = './input/NETWORK_EMBEDDING'
EXAMPLE_SIMILARITY_MATRIX_PATH = './input/SIMILARITY_DEFINITION'
DEFAULT_AGGREGATE_COLUMN = 'count'
DEFAULT_CONSTRAINT_EPSILON = 0.05
TOP_K = 5
def build_local_regression_constraint(data, column_index, t, con, epsilon, agg_col, regression_package):
"""Build local regression constraint from Q(R), t, and global regression constraint
Args:
data: result of Q(R)
column_index: index for values in each column
t: target tuple in Q(R)
con: con[0] is the list of fixed attributes in Q(R), con[1] is the list of variable attributes in Q(R)
epsilon: threshold for local regression constraint
regression_package: which package is used to compute regression
Returns:
A LocalRegressionConstraint object whose model is trained on \pi_{con[1]}(Q_{t[con[0]]}(R))
"""
tF = get_F_value(con[0], t)
local_con = LocalRegressionConstraint(con[0], tF, con[1], agg_col, epsilon)
train_data = {agg_col: []}
for v in con[1]:
train_data[v] = []
# for index, row in data['df'].iterrows():
# if get_F_value(con[0], row) == tF:
# for v in con[1]:
# train_data[v].append(row[v])
# train_data[agg_col].append(row[agg_col])
for idx in column_index[con[0][0]][tF[0]]:
row = data['df'].loc[data['df']['index'] == idx]
row = row.to_dict('records')[0]
#print row
if get_F_value(con[0], row) == tF:
for v in con[1]:
train_data[v].append(row[v])
train_data[agg_col].append(row[agg_col])
if regression_package == 'scikit-learn':
train_x = {}
for v in con[1]:
if v in data['le']:
train_data[v] = data['le'][v].transform(train_data[v])
train_data[v] = data['ohe'][v].transform(train_data[v].reshape(-1, 1))
#print data['ohe'][v].transform(train_data[v].reshape(-1, 1))
train_x[v] = train_data[v]
else:
if v != agg_col:
train_x[v] = np.array(train_data[v]).reshape(-1, 1)
train_y = np.array(train_data[agg_col]).reshape(-1, 1)
train_x = np.concatenate(list(train_x.values()), axis=-1)
local_con.train_sklearn(train_x, train_y)
else:
#train_data = pandas.DataFrame(train_data)
formula = agg_col + ' ~ ' + ' + '.join(con[1])
print
local_con.train(train_data, formula)
return local_con
def validate_local_regression_constraint(data, local_con, t, dir, agg_col, regression_package):
"""Check the validicity of the user question under a local regression constraint
Args:
data: data['df'] is the data frame storing Q(R)
data['le'] is the label encoder, data['ohe'] is the one-hot encoder
local_con: a LocalRegressionConstraint object
t: target tuple in Q(R)
dir: whether user thinks t[agg(B)] is high or low
agg_col: the column of aggregated value
regression_package: which package is used to compute regression
Returns:
the actual direction that t[agg(B)] compares to its expected value, and the expected value from local_con
"""
test_tuple = {}
for v in local_con.var_attr:
test_tuple[v] = [t[v]]
if regression_package == 'scikit-learn':
for v in local_con.var_attr:
if v in data['le']:
test_tuple[v] = data['le'][v].transform(test_tuple[v])
test_tuple[v] = data['ohe'][v].transform(test_tuple[v].reshape(-1, 1))
else:
test_tuple[v] = np.array(test_tuple[v]).reshape(-1, 1)
test_tuple = np.concatenate(list(test_tuple.values()), axis=-1)
predictY = local_con.predict_sklearn(test_tuple)
else:
predictY = local_con.predict( | pandas.DataFrame(test_tuple) | pandas.DataFrame |
import nose
import os
import numpy as np
import pandas as pd
from pandas import (merge_asof, read_csv,
to_datetime, Timedelta)
from pandas.tools.merge import MergeError
from pandas.util import testing as tm
from pandas.util.testing import assert_frame_equal
class TestAsOfMerge(tm.TestCase):
_multiprocess_can_split_ = True
def read_data(self, name, dedupe=False):
path = os.path.join(tm.get_data_path(), name)
x = read_csv(path)
if dedupe:
x = (x.drop_duplicates(['time', 'ticker'], keep='last')
.reset_index(drop=True)
)
x.time = to_datetime(x.time)
return x
def setUp(self):
self.trades = self.read_data('trades.csv')
self.quotes = self.read_data('quotes.csv', dedupe=True)
self.asof = self.read_data('asof.csv')
self.tolerance = self.read_data('tolerance.csv')
self.allow_exact_matches = self.read_data('allow_exact_matches.csv')
self.allow_exact_matches_and_tolerance = self.read_data(
'allow_exact_matches_and_tolerance.csv')
def test_examples1(self):
""" doc-string examples """
left = pd.DataFrame({'a': [1, 5, 10],
'left_val': ['a', 'b', 'c']})
right = pd.DataFrame({'a': [1, 2, 3, 6, 7],
'right_val': [1, 2, 3, 6, 7]})
pd.merge_asof(left, right, on='a')
def test_examples2(self):
""" doc-string examples """
trades = pd.DataFrame({
'time': pd.to_datetime(['20160525 13:30:00.023',
'20160525 13:30:00.038',
'20160525 13:30:00.048',
'20160525 13:30:00.048',
'20160525 13:30:00.048']),
'ticker': ['MSFT', 'MSFT',
'GOOG', 'GOOG', 'AAPL'],
'price': [51.95, 51.95,
720.77, 720.92, 98.00],
'quantity': [75, 155,
100, 100, 100]},
columns=['time', 'ticker', 'price', 'quantity'])
quotes = pd.DataFrame({
'time': pd.to_datetime(['20160525 13:30:00.023',
'20160525 13:30:00.023',
'20160525 13:30:00.030',
'20160525 13:30:00.041',
'20160525 13:30:00.048',
'20160525 13:30:00.049',
'20160525 13:30:00.072',
'20160525 13:30:00.075']),
'ticker': ['GOOG', 'MSFT', 'MSFT',
'MSFT', 'GOOG', 'AAPL', 'GOOG',
'MSFT'],
'bid': [720.50, 51.95, 51.97, 51.99,
720.50, 97.99, 720.50, 52.01],
'ask': [720.93, 51.96, 51.98, 52.00,
720.93, 98.01, 720.88, 52.03]},
columns=['time', 'ticker', 'bid', 'ask'])
pd.merge_asof(trades, quotes,
on='time',
by='ticker')
pd.merge_asof(trades, quotes,
on='time',
by='ticker',
tolerance=pd.Timedelta('2ms'))
pd.merge_asof(trades, quotes,
on='time',
by='ticker',
tolerance=pd.Timedelta('10ms'),
allow_exact_matches=False)
def test_basic(self):
expected = self.asof
trades = self.trades
quotes = self.quotes
result = merge_asof(trades, quotes,
on='time',
by='ticker')
| assert_frame_equal(result, expected) | pandas.util.testing.assert_frame_equal |
# Lint as: python3
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for google3.corp.gtech.ads.ds_utils.feature_graph_visualization."""
import networkx as nx
import numpy as np
import pandas as pd
import pandas.util.testing as pandas_testing
from sklearn import datasets
from gps_building_blocks.analysis import feature_graph_visualization as fgv
from absl.testing import absltest
class FeatureGraphVisualizationTest(absltest.TestCase):
def setUp(self):
super().setUp()
iris = datasets.load_iris()
iris_df = pd.DataFrame(data=np.c_[iris['data'], iris['target']],
columns=iris['feature_names']+['target'])
self.correlation = iris_df[iris['feature_names']].corr()
features = self.correlation.columns
self.threshold = 0.3
corr_matrix = np.array(self.correlation)
inds = np.argwhere(abs(np.tril(corr_matrix, -1)) > self.threshold)
linked_features = [(features[i1], features[i2]) for i1, i2 in inds]
self.num_edges = len(linked_features)
self.feature_graph = nx.Graph()
self.feature_graph.add_edges_from(linked_features)
def test_edge_colors(self):
expected_edge_colors = [
'red' if self.correlation.loc[edge[0], edge[1]] > 0 else 'blue'
for edge in self.feature_graph.edges()
]
fig = fgv.feature_graph_visualization(self.correlation,
threshold=self.threshold)
self.assertEqual(
expected_edge_colors,
[fig.data[i]['line']['color'] for i in range(self.num_edges)])
def test_edge_widths(self):
expected_edge_widths = [
abs(self.correlation.loc[edge[0], edge[1]]) * 5
for edge in self.feature_graph.edges()
]
fig = fgv.feature_graph_visualization(self.correlation,
threshold=self.threshold)
self.assertEqual(
expected_edge_widths,
[fig.data[i]['line']['width'] for i in range(self.num_edges)])
def test_correlation_matrix_error(self):
wrong_correlation = self.correlation.drop(self.correlation.index[0])
with self.assertRaises(ValueError):
fgv.feature_graph_visualization(wrong_correlation,
threshold=self.threshold)
# TODO (): parameterize and test output colormap
def test_other_colormaps(self):
fgv.feature_graph_visualization(
self.correlation, threshold=self.threshold, color_map='coolwarm')
def test_cluster_to_sim(self):
cluster = {'A': 1,
'B': 2,
'C': 1,
'D': 3,
'E': 2}
similarity = fgv.cluster_to_sim(cluster)
expected_output = pd.DataFrame(
np.array([[1, 0, 1, 0, 0], [0, 1, 0, 0, 1], [1, 0, 1, 0, 0],
[0, 0, 0, 1, 0], [0, 1, 0, 0, 1]]),
columns=['A', 'B', 'C', 'D', 'E'],
index=['A', 'B', 'C', 'D', 'E'])
| pandas_testing.assert_frame_equal(similarity, expected_output) | pandas.util.testing.assert_frame_equal |
import ntpath
import dask.bag as db
import pandas as pd
from dask import dataframe as dd
import optimus.helpers.functions_spark
from optimus.engines.base.io.load import BaseLoad
from optimus.helpers.core import val_to_list
from optimus.helpers.functions import prepare_path
from optimus.helpers.logger import logger
class Load(BaseLoad):
@staticmethod
def json(path, multiline=False, *args, **kwargs):
"""
Return a dask dataframe from a json file.
:param path: path or location of the file.
:param multiline:
:return:
"""
file, file_name = prepare_path(path, "json")[0]
try:
# TODO: Check a better way to handle this Spark.instance.spark. Very verbose.
df = dd.read_json(path, lines=multiline, *args, **kwargs)
df.ext.reset()
df.meta.set("file_name", file_name)
except IOError as error:
logger.print(error)
raise
return df
@staticmethod
def tsv(path, header=True, infer_schema=True, *args, **kwargs):
"""
Return a dataframe from a tsv file.
:param path: path or location of the file.
:param header: tell the function whether dataset has a header row. True default.
:param infer_schema: infers the input schema automatically from data.
It requires one extra pass over the data. True default.
:return:
"""
return Load.csv(path, sep='\t', header=header, infer_schema=infer_schema, *args, **kwargs)
@staticmethod
def csv(path, sep=',', header=True, infer_schema=True, na_values=None, encoding="utf-8", n_rows=-1, cache=False,
quoting=0, lineterminator=None, error_bad_lines=False, engine="python", keep_default_na=False,
na_filter=False, *args,
**kwargs):
"""
Return a dataframe from a csv file. It is the same read.csv Spark function with some predefined
params
:param path: path or location of the file.
:param sep: usually delimiter mark are ',' or ';'.
:param header: tell the function whether dataset has a header row. True default.
:param infer_schema: infers the input schema automatically from data.
:param encoding:
:param na_values:
:param n_rows:
:param quoting:
:param engine: 'python' or 'c'. 'python' slower but support better error handling
:param lineterminator:
:param error_bad_lines:
:param keep_default_na:
:param cache: If calling from a url we cache save the path to the temp file so we do not need to download the file again
"""
if cache is False:
prepare_path.cache_clear()
# file, file_name = prepare_path(path, "csv")[0]
try:
# From the panda docs using na_filter
# Detect missing value markers (empty strings and the value of na_values). In data without any NAs,
# passing na_filter=False can improve the performance of reading a large file.
df = dd.read_csv(path, sep=sep, header=0 if header else None, encoding=encoding,
quoting=quoting, lineterminator=lineterminator, error_bad_lines=error_bad_lines,
keep_default_na=True, na_values=None, engine=engine, na_filter=na_filter,
*args, **kwargs)
# print(len(df))
if n_rows > -1:
df = dd.from_pandas(df.head(n_rows), npartitions=1)
df.ext.reset()
df.meta.set("file_name", path)
except IOError as error:
logger.print(error)
raise
return df
@staticmethod
def parquet(path, columns=None, engine="pyarrow", *args, **kwargs):
"""
Return a dataframe from a parquet file.
:param path: path or location of the file. Must be string dataType
:param columns: select the columns that will be loaded. In this way you do not need to load all the dataframe
:param engine:
:param args: custom argument to be passed to the spark parquet function
:param kwargs: custom keyword arguments to be passed to the spark parquet function
:return: Spark Dataframe
"""
try:
df = dd.read_parquet(path, columns=columns, engine=engine, *args, **kwargs)
df.ext.reset()
df.meta.set("file_name", path)
except IOError as error:
logger.print(error)
raise
return df
@staticmethod
def zip(path, sep=',', header=True, infer_schema=True, charset="UTF-8", null_value="None", n_rows=-1, *args,
**kwargs):
file, file_name = prepare_path(path, "zip")
from zipfile import ZipFile
import dask.dataframe as dd
import os
wd = '/path/to/zip/files'
file_list = os.listdir(wd)
destdir = '/extracted/destination/'
ddf = dd.from_pandas(pd.DataFrame())
for f in file_list:
with ZipFile(wd + f, "r") as zip:
zip.extractall(destdir, None, None)
df = dd.read_csv(zip.namelist(), usecols=['Enter', 'Columns', 'Here'], parse_dates=['Date'])
ddf = optimus.helpers.functions_spark.append(df)
ddf.compute()
# print("---",path, file, file_name)
try:
df = dd.read_csv(file, sep=sep, header=0 if header else None, encoding=charset, na_values=null_value,
compression="gzip", *args,
**kwargs)
if n_rows > -1:
df = df.head(n_rows)
df.meta.set("file_name", file_name)
except IOError as error:
logger.print(error)
raise
df.ext.reset()
return df
@staticmethod
def avro(path, *args, **kwargs):
"""
Return a dataframe from a avro file.
:param path: path or location of the file. Must be string dataType
:param args: custom argument to be passed to the avro function
:param kwargs: custom keyword arguments to be passed to the avro function
:return: Spark Dataframe
"""
file, file_name = prepare_path(path, "avro")
try:
df = db.read_avro(path, *args, **kwargs).to_dataframe()
df.ext.reset()
df.meta.set("file_name", file_name)
except IOError as error:
logger.print(error)
raise
return df
@staticmethod
def excel(path, sheet_name=0, merge_sheets=False, skiprows=1, n_rows=-1, n_partitions=1, *args, **kwargs):
"""
Return a dataframe from a excel file.
:param path: Path or location of the file. Must be string dataType
:param sheet_name: excel sheet name
:param merge_sheets:
:param args: custom argument to be passed to the excel function
:param kwargs: custom keyword arguments to be passed to the excel function
"""
file, file_name = prepare_path(path)
header = None
if merge_sheets is True:
skiprows = -1
else:
header = 0
skiprows = 0
if n_rows == -1:
n_rows = None
pdfs = pd.read_excel(file, sheet_name=sheet_name, header=header, skiprows=skiprows, nrows=n_rows, *args,
**kwargs)
sheet_names = list( | pd.read_excel(file, None) | pandas.read_excel |
#!/usr/bin/env python
# coding: utf-8
# # 机器学习工程师纳米学位(试学班)
# ## 项目 0: 预测你的下一道世界料理
#
#
# 欢迎来到机器学习的预测烹饪菜系项目!在此文件中,有些示例代码已经提供给你,但你还需要实现更多的功能来让项目成功运行。除非有明确要求,你无须修改任何已给出的代码。以**编程练习**开始的标题表示接下来的内容中有需要你必须实现的功能。每一部分都会有详细的指导,需要实现的部分也会在注释中以**TODO**标出。请仔细阅读所有的提示!
#
# - **实验任务**:给定佐料名称,预测菜品所属的菜系。
# - **实验步骤**:菜品数据载入;佐料名称预处理,并预览数据集结构;载入逻辑回归模型,并训练;结果测试并提交,查看实验分数。
#
# >**提示:**Code 和 Markdown 区域可通过 **Shift + Enter** 快捷键运行。此外,Markdown可以通过双击进入编辑模式。
# In[1]:
### 运行以下代码安装环境
get_ipython().system('python -c "import nltk; nltk.download(\'wordnet\')"')
# ---
# ## 第一步. 下载并导入数据
# 在这个项目中,你将利用[Yummly](https://www.yummly.com/)所提供的数据集来训练和测试一个模型,并对模型的性能和预测能力进行测试。通过该数据训练后的好的模型可以被用来对菜系进行预测。
#
# 此项目的数据集来自[Kaggle What's Cooking 竞赛](https://www.kaggle.com/c/whats-cooking/data)。共 39774/9944 个训练和测试数据点,涵盖了中国菜、越南菜、法国菜等的信息。数据集包含以下特征:
# - 'id':24717, 数据编号
# - 'cuisine':"indian", 菜系
# - 'ingredients':["tumeric", "vegetable stock", ...] 此菜所包含的佐料
#
# 首先你需要前往此 [菜系数据集](https://www.kaggle.com/c/whats-cooking/data) 下载(选择 **Download All** )。如果不能正常下载,请参考教室中的下载教程。然后运行下面区域的代码以载入数据集,以及一些此项目所需的 Python 库。如果成功返回数据集的大小,表示数据集已载入成功。
# ### 1.1 配置环境
# 首先按照本目录中`README.md`文件中的第一部分内容,配置实验开发环境和所需库函数。
# ### 1.2 加载数据
# 其次,在下载完实验数据集后,我们将其解压至当前目录中(即:`MLND-cn-trial\`目录下面), 然后依次输入以下代码,加载本次实验的训练集和测试集。
# In[2]:
## 请不要修改下方代码
# 导入依赖库
import json
import codecs
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
get_ipython().run_line_magic('matplotlib', 'inline')
# 加载数据集
train_filename='train.json'
train_content = pd.read_json(codecs.open(train_filename, mode='r', encoding='utf-8'))
test_filename = 'test.json'
test_content = pd.read_json(codecs.open(test_filename, mode='r', encoding='utf-8'))
# 打印加载的数据集数量
print("菜名数据集一共包含 {} 训练数据 和 {} 测试样例。\n".format(len(train_content), len(test_content)))
if len(train_content)==39774 and len(test_content)==9944:
print("数据成功载入!")
else:
print("数据载入有问题,请检查文件路径!")
# ### 1.3 数据预览
# 为了查看我们的数据集的分布和菜品总共的种类,我们打印出部分数据样例。
# In[3]:
## 请不要修改下方代码
pd.set_option('display.max_colwidth',120)
# ### 编程练习
# 你需要通过`head()`函数来预览训练集`train_content`数据。(输出前5条)
# In[4]:
### TODO:打印train_content中前5个数据样例以预览数据
train_content.head()
# In[5]:
## 请不要修改下方代码
## 查看总共菜品分类
categories=np.unique(train_content['cuisine'])
print("一共包含 {} 种菜品,分别是:\n{}".format(len(categories),categories))
# ---
# ## 第二步. 分析数据
# 在项目的第二个部分,你会对菜肴数据进行初步的观察并给出你的分析。通过对数据的探索来熟悉数据可以让你更好地理解和解释你的结果。
#
# 由于这个项目的最终目标是建立一个预测世界菜系的模型,我们需要将数据集分为**特征(Features)**和**目标变量(Target Variables)**。
# - **特征**: `'ingredients'`,给我们提供了每个菜品所包含的佐料名称。
# - **目标变量**:` 'cuisine'`,是我们希望预测的菜系分类。
#
# 他们分别被存在 `train_ingredients` 和 `train_targets` 两个变量名中。
# ### 编程练习:数据提取
# * 将`train_content`中的`ingredients`赋值到`train_integredients`
# * 将`train_content`中的`cuisine`赋值到`train_targets`
# In[6]:
### TODO:将特征与目标变量分别赋值
train_ingredients = train_content['ingredients']
train_targets = train_content['cuisine']
### TODO: 打印结果,检查是否正确赋值
print("佐料名称是:\n",train_ingredients.head())
print("菜系分类是:\n",train_targets.head(10))
# ### 编程练习:基础统计运算
# 你的第一个编程练习是计算有关菜系佐料的统计数据。我们已为你导入了 `numpy`,你需要使用这个库来执行必要的计算。这些统计数据对于分析模型的预测结果非常重要的。
# 在下面的代码中,你要做的是:
# - 使用最频繁的佐料前10分别有哪些?
# - 意大利菜中最常见的10个佐料有哪些?
# In[7]:
## TODO: 统计佐料出现次数,并赋值到sum_ingredients字典
sum_ingredients = {}
for index,g in enumerate(train_ingredients):
for j in train_ingredients[index]:
if j in sum_ingredients:
sum_ingredients[j] +=1
else:
sum_ingredients[j] = 1
print(sum_ingredients)
# In[8]:
## 请不要修改下方代码
# Finally, plot the 10 most used ingredients
plt.style.use(u'ggplot')
fig = pd.DataFrame(sum_ingredients, index=[0]).transpose()[0].sort_values(ascending=False, inplace=False)[:10].plot(kind='barh')
fig.invert_yaxis()
fig = fig.get_figure()
fig.tight_layout()
fig.show()
# In[9]:
## TODO: 统计意大利菜系中佐料出现次数,并赋值到italian_ingredients字典中
italian_ingredients = {}
for cu,name in enumerate(train_targets):
if name == 'italian':
for j in train_ingredients[cu]:
if j in italian_ingredients:
italian_ingredients[j] += 1
else:
italian_ingredients[j] = 1
print(italian_ingredients)
# In[10]:
## 请不要修改下方代码
# Finally, plot the 10 most used ingredients
fig = pd.DataFrame(italian_ingredients, index=[0]).transpose()[0].sort_values(ascending=False, inplace=False)[:10].plot(kind='barh')
fig.invert_yaxis()
fig = fig.get_figure()
fig.tight_layout()
fig.show()
# 若想要对数据分析做更深入的了解,可以参考[数据分析师入门课程](https://cn.udacity.com/dand)或者[基于Python语言的人工智能Nano课程](https://www.udacity.com/legal/ai-programming).
# ---
# ## 第三步. 建立模型
# 在项目的第三步中,你需要了解必要的工具和技巧来让你的模型进行预测。用这些工具和技巧对每一个模型的表现做精确的衡量可以极大地增强你预测的信心。
# ### 3.1 单词清洗
# 由于菜品包含的佐料众多,同一种佐料也可能有单复数、时态等变化,为了去除这类差异,我们考虑将**ingredients** 进行过滤
# In[11]:
## 请不要修改下方代码
import re
from nltk.stem import WordNetLemmatizer
import numpy as np
def text_clean(ingredients):
#去除单词的标点符号,只保留 a..z A...Z的单词字符
ingredients= np.array(ingredients).tolist()
print("菜品佐料:\n{}".format(ingredients[9]))
ingredients=[[re.sub('[^A-Za-z]', ' ', word) for word in component]for component in ingredients]
print("去除标点符号之后的结果:\n{}".format(ingredients[9]))
# 去除单词的单复数,时态,只保留单词的词干
lemma=WordNetLemmatizer()
ingredients=[" ".join([ " ".join([lemma.lemmatize(w) for w in words.split(" ")]) for words in component]) for component in ingredients]
print("去除时态和单复数之后的结果:\n{}".format(ingredients[9]))
return ingredients
print("\n处理训练集...")
train_ingredients = text_clean(train_content['ingredients'])
print("\n处理测试集...")
test_ingredients = text_clean(test_content['ingredients'])
# ### 3.2 特征提取
# 在该步骤中,我们将菜品的佐料转换成数值特征向量。考虑到绝大多数菜中都包含`salt, water, sugar, butter`等,采用one-hot的方法提取的向量将不能很好的对菜系作出区分。我们将考虑按照佐料出现的次数对佐料做一定的加权,即:佐料出现次数越多,佐料的区分性就越低。我们采用的特征为TF-IDF,相关介绍内容可以参考:[TF-IDF与余弦相似性的应用(一):自动提取关键词](http://www.ruanyifeng.com/blog/2013/03/tf-idf.html)。
# In[12]:
## 请不要修改下方代码
from sklearn.feature_extraction.text import TfidfVectorizer
# 将佐料转换成特征向量
# 处理 训练集
vectorizer = TfidfVectorizer(stop_words='english', ngram_range=(1, 1),
analyzer='word', max_df=.57, binary=False,
token_pattern=r"\w+",sublinear_tf=False)
train_tfidf = vectorizer.fit_transform(train_ingredients).todense()
## 处理 测试集
test_tfidf = vectorizer.transform(test_ingredients)
# In[13]:
## 请不要修改下方代码
train_targets=np.array(train_content['cuisine']).tolist()
train_targets[:10]
# ### 编程练习
# 这里我们为了防止前面步骤中累积的错误,导致以下步骤无法正常运行。我们在此检查处理完的实验数据是否正确,请打印`train_tfidf`和`train_targets`中前五个数据。
# In[15]:
# 你需要通过head()函数来预览训练集train_tfidf,train_targets数据
train_tfidf=pd.DataFrame(train_tfidf)
print(train_tfidf.head())
train_targets=pd.DataFrame(train_targets)
train_targets.head()
# ### 3.3 验证集划分
# 为了在实验中大致估计模型的精确度我们将从原本的`train_ingredients` 划分出 `20%` 的数据用作`valid_ingredients`。
# ### 编程练习:数据分割与重排
# 调用`train_test_split`函数将训练集划分为新的训练集和验证集,便于之后的模型精度观测。
# * 从`sklearn.model_selection`中导入`train_test_split`
# * 将`train_tfidf`和`train_targets`作为`train_test_split`的输入变量
# * 设置`test_size`为0.2,划分出20%的验证集,80%的数据留作新的训练集。
# * 设置`random_state`随机种子,以确保每一次运行都可以得到相同划分的结果。(随机种子固定,生成的随机序列就是确定的)
# In[16]:
### TODO:划分出验证集
from sklearn.model_selection import train_test_split
X_train , X_valid , y_train, y_valid = train_test_split(train_tfidf,train_targets,test_size = 0.2,random_state = 42)
# ### 3.2 建立模型
# 调用 `sklearn` 中的逻辑回归模型(Logistic Regression)。
# ### 编程练习:训练模型
# * 从`sklearn.linear_model`导入`LogisticRegression`
# * 从`sklearn.model_selection`导入`GridSearchCV`, 参数自动搜索,只要把参数输进去,就能给出最优的结果和参数,这个方法适合小数据集。
# * 定义`parameters`变量:为`C`参数创造一个字典,它的值是从1至10的数组;
# * 定义`classifier`变量: 使用导入的`LogisticRegression`创建一个分类函数;
# * 定义`grid`变量: 使用导入的`GridSearchCV`创建一个网格搜索对象;将变量'classifier', 'parameters'作为参数传至这个对象构造函数中;
# In[17]:
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import GridSearchCV
## TODO: 建立逻辑回归模型
parameters = {'C':[1,2,3,4,5,6,7,8,9,10]}
classifier = LogisticRegression()
grid = GridSearchCV(classifier, parameters, cv=5)
## 请不要修改下方代码
grid = grid.fit(X_train, y_train)
# 模型训练结束之后,我们计算模型在验证集`X_valid`上预测结果,并计算模型的预测精度(与`y_valid`逐个比较)。
# In[18]:
## 请不要修改下方代码
from sklearn.metrics import accuracy_score ## 计算模型的准确率
valid_predict = grid.predict(X_valid)
valid_score=accuracy_score(y_valid,valid_predict)
print("验证集上的得分为:{}".format(valid_score))
# ---
# ## 第四步. 模型预测(可选)
# ### 4.1 预测测试集
#
# ### 编程练习
# * 将模型`grid`对测试集`test_tfidf`做预测,然后查看预测结果。
# In[20]:
### TODO:预测测试结果
predictions = grid.predict(test_tfidf)
## 请不要修改下方代码
print("预测的测试集个数为:{}".format(len(predictions)))
test_content['cuisine']=predictions
test_content.head(10)
# ### 4.2 提交结果
# 为了更好的测试模型的效果,同时比较与其他人的差距,我们将模型的测试集上的结果提交至 [kaggle What's Cooking?](https://www.kaggle.com/c/whats-cooking/submit) (需要提前注册kaggle账号)。
#
# **注意**:在提交作业时,请将提交排名得分截图,附在压缩包中。
#
# In[21]:
## 加载结果格式
submit_frame = pd.read_csv("sample_submission.csv")
## 保存结果
result = | pd.merge(submit_frame, test_content, on="id", how='left') | pandas.merge |
# -*- coding: utf-8 -*-
"""
Created on Mon Jul 6 09:54:15 2020
@author: dhulse
"""
## This file shows different data visualization of trade-off analysis of the cost models with different design variables
# like battery, rotor config, operational height at a level of resilience policy.
# The plots gives a general understanding of the design space, trade-offs between cost models (obj func.), sensitivity of
# subsystem w.r.t models, and effect of subsystem config and operational variables on different cost models
# Few examples have been provided for interpretation. However, different plotting other than shown here can be done depending
# on the analysis question or for better visualization.
import sys
sys.path.append('../../')
import fmdtools.faultsim.propagate as propagate
import fmdtools.resultdisp as rd
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import pandas as pd
import numpy as np
import seaborn as sns; sns.set(style="ticks", color_codes=True)
# from drone_mdl import *
# import time
# from drone_opt import *
# import pandas as pd
# import numpy as np
#
# # Design Model
# xdes1 = [0, 1]
# desC1 = x_to_dcost(xdes1)
# print(desC1)
#
# # Operational Model
# xoper1 = [122] #in m or ft?
# desO1 = x_to_ocost(xdes1, xoper1)
# print(desO1)
#
# #Resilience Model
# xres1 = [0, 0]
# desR1 = x_to_rcost(xdes1, xoper1, xres1)
# print(desR1)
#
# #all-in-one model
# xdes1 = [3,2]
# xoper1 = [65]
# xres1 = [0,0]
#
# a,b,c,d = x_to_ocost(xdes1, xoper1)
#
# mdl = x_to_mdl([0,2,100,0,0])
#
#
# endresults, resgraph, mdlhist = propagate.nominal(mdl)
#
# rd.plot.mdlhistvals(mdlhist, fxnflowvals={'StoreEE':'soc'})
# Read the dataset of cost model values and constraint validation for a large grid of design variables
grid_results= pd.read_csv('grid_results_new.csv')
#print(grid_results.head())
#print(grid_results.shape)
# Portion of feasible data among the whole dataset
feasible_DS =(grid_results['c_cum'].isin([0]).sum())/len(grid_results)
#print("The portion of feasible design space from the grid results")
#print(feasible_DS)
#Subsetting only feasible data
grid_results_FS = grid_results[(grid_results['c_cum']==0)]
g = sns.pairplot(grid_results_FS, hue="ResPolBat", vars=["Bat", "Rotor","Height","desC","operC","resC"], corner=True, diag_kind="kde",kind="reg")
plt.show()
########################## Optimization results from different framework#################################
# Optimization framework involved: Bi-level, Two-Stage and Single MOO (Weighted Tchebycheff)
opt_results= pd.read_csv('opt_results.csv')
#print(opt_results.head())
#print(opt_results.shape)
obj1 = pd.Series.tolist(opt_results['Obj1'])
obj2 = pd.Series.tolist(opt_results['Obj2'])
index= ['Bi-LevelP1000', 'Bi-LevelP100', 'Bi-LevelP10/1', 'Two-Stage', 'MOO:w1=0','MOO:w1=[0.1,0.2,0.3]','MOO:w1=0.4','MOO:w1=[0.5,0.6,..,1]']
df_y = | pd.DataFrame({'Obj1:DesC+OperC':obj1, 'Obj2:FailureC': obj2}, index=index) | pandas.DataFrame |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import sys
cardData = pd.read_csv('CardData.csv', header=0, encoding='utf-8-sig')
coinData = pd.read_csv('CoinData.csv', header=0, encoding='utf-8-sig')
baseCost = | pd.read_csv('BaseCost.csv', header=0, encoding='utf-8-sig') | pandas.read_csv |
# this script is the same as the others, but has all of the functions, etc. in one .py script in case there are issues
# using the other versions. Primary use case for this would be debugging what is going on, or understanding
# the overall pipeline.
"""
Pipeline for Zero-shot transcription of a lecture video file to text using facebook's wav2vec2 model
This script is the 'single-file' edition
<NAME>
large model link / doc from host website (huggingface)
https://huggingface.co/facebook/wav2vec2-large-960h-lv60-self
sections in this file:
- define model parameters (pretrained model)
- basic user inputs (vid file name / directory)
- convert video to audio chunks of duration X*
- pass all X audio chunks through wav2vec2model, store results in a list
- write all results of the list into a text file, store various runtime metrics
- pass created text file through a spell checker and autocorrect spelling. save as new file
- run basic keyword extraction from (via YAKE) on spell-corrected file, save in the same directory as other results
- cleanup tasks (delete the X .wav files created for audio transcription, etc), report runtime, and exit
* (where X is some duration that does not overload your computer or crash your IDE)
"""
import math
import os
import pprint as pp
import re
import shutil
import sys
import time
from datetime import datetime
from io import StringIO
from os import listdir
from os.path import basename, dirname, isfile, join
import GPUtil as GPU
import humanize
import librosa
import moviepy.editor as mp
import neuspell
import pandas as pd
import pkg_resources
import plotly.express as px
import psutil
import pysbd
import torch
import wordninja
import yake
from cleantext import clean
from natsort import natsorted
from symspellpy import SymSpell
from tqdm.auto import tqdm
from transformers import Wav2Vec2ForCTC, Wav2Vec2Tokenizer
# --------------------------------------------------------------------------
# Function Definitions
# --------------------------------------------------------------------------
# General Utilities
def corr(s):
# adds space after period if there isn't one
# removes extra spaces
return re.sub(r'\.(?! )', '. ', re.sub(r' +', ' ', s))
def shorten_title(title_text, max_no=20):
if len(title_text) < max_no:
return title_text
else:
return title_text[:max_no] + "..."
class NullIO(StringIO):
# used to redirect system output for things that print a lot to console
def write(self, txt):
pass
def cleantxt_wrap(ugly_text):
# a wrapper for clean text with options different than default
# https://pypi.org/project/clean-text/
cleaned_text = clean(ugly_text,
fix_unicode=True, # fix various unicode errors
to_ascii=True, # transliterate to closest ASCII representation
lower=True, # lowercase text
no_line_breaks=True, # fully strip line breaks as opposed to only normalizing them
no_urls=True, # replace all URLs with a special token
no_emails=True, # replace all email addresses with a special token
no_phone_numbers=True, # replace all phone numbers with a special token
no_numbers=False, # replace all numbers with a special token
no_digits=False, # replace all digits with a special token
no_currency_symbols=True, # replace all currency symbols with a special token
no_punct=True, # remove punctuations
replace_with_punct="", # instead of removing punctuations you may replace them
replace_with_url="<URL>",
replace_with_email="<EMAIL>",
replace_with_phone_number="<PHONE>",
replace_with_number="<NUM>",
replace_with_digit="0",
lang="en" # set to 'de' for German special handling
)
return cleaned_text
def beautify_filename(filename, num_words=20, start_reverse=False,
word_separator="_"):
# takes a filename stored as text, removes extension, separates into X words ...
# and returns a nice filename with the words separateed by
# useful for when you are reading files, doing things to them, and making new files
filename = str(filename)
index_file_Ext = filename.rfind('.')
current_name = str(filename)[:index_file_Ext] # get rid of extension
clean_name = cleantxt_wrap(current_name) # wrapper with custom defs
file_words = wordninja.split(clean_name)
# splits concatenated text into a list of words based on common word freq
if len(file_words) <= num_words:
num_words = len(file_words)
if start_reverse:
t_file_words = file_words[-num_words:]
else:
t_file_words = file_words[:num_words]
pretty_name = word_separator.join(t_file_words) # see function argument
# NOTE IT DOES NOT RETURN THE EXTENSION
return pretty_name[: (len(pretty_name) - 1)] # there is a space always at the end, so -1
def quick_keys(filename, filepath, max_ngrams=3, num_keywords=20, save_db=False,
verbose=False, txt_lang='en', ddup_thresh=0.3):
# uses YAKE to quickly determine keywords in a text file. Saves Keywords and YAKE score (0 means very important) in
with open(join(filepath, filename), 'r', encoding="utf-8", errors='ignore') as file:
text = file.read()
custom_kw_extractor = yake.KeywordExtractor(lan=txt_lang, n=max_ngrams, dedupLim=ddup_thresh,
top=num_keywords, features=None)
yake_keywords = custom_kw_extractor.extract_keywords(text)
phrase_db = pd.DataFrame(yake_keywords)
if len(phrase_db) == 0:
print("warning - no phrases were able to be extracted... ")
return None
if verbose:
print("YAKE keywords are: \n", yake_keywords)
print("dataframe structure: \n")
pp.pprint(phrase_db.head())
phrase_db.columns = ['key_phrase', 'YAKE_score']
# add a column for how many words the phrases contain
yake_kw_len = []
yake_kw_freq = []
for entry in yake_keywords:
entry_wordcount = len(str(entry).split(" ")) - 1
yake_kw_len.append(entry_wordcount)
for index, row in phrase_db.iterrows():
search_term = row["key_phrase"]
entry_freq = text.count(str(search_term))
yake_kw_freq.append(entry_freq)
word_len_series = pd.Series(yake_kw_len, name='No. Words in Phrase')
word_freq_series = pd.Series(yake_kw_freq, name='Phrase Freq. in Text')
phrase_db2 = | pd.concat([phrase_db, word_len_series, word_freq_series], axis=1) | pandas.concat |
import pandas as pd
import requests
import datetime
import collections
import pandas_datareader as dr
import matplotlib as plt
import numpy as np
from matplotlib.ticker import FuncFormatter
from yahoo_fin.stock_info import get_data
from datetime import datetime as dt
from dateutil.relativedelta import relativedelta
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import accuracy_score
import yfinance as yf
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
DESKTOP_PATH = r'C:/Users/<NAME>/Desktop'
CSV = DESKTOP_PATH + '/Proof_Of_Concept/2020/NYSE_2020_10KBySection3.csv'
if __name__ == "__main__":
test_df = collections.defaultdict(list)
Scrap_File = pd.read_csv(CSV, sep=',')
Scrap_File = Scrap_File.iloc[:, 1:4]
date_list = pd.date_range(start='2007-01-01', end='2021-08-01', freq='d')
for dte in date_list:
string_date = dte.strftime('%Y-%m-%d')
test_df['Date'].append(string_date)
frame1 = pd.DataFrame.from_dict(test_df)
# 65 + 1 + 1 + 9 + 26 + 19 + 7 + 8 + 15 + 13 + 22 + 3 + 5 + 2 + 7 + 6 + 15 + 67 + 21 + 15 + 2 + 13 + 43 + 6 + 18 + 3 + 3 + 9 + 10
start = 0
end = -1
ticker_list = []
for num, company in enumerate(Scrap_File['Company Name'][start:end]):
company_ticker = Scrap_File['Ticker'][num + start]
if company_ticker == 'UTX':
company_ticker = 'RTX'
if company_ticker == 'UNT':
company_ticker = 'UNTC'
if company_ticker == 'TMK':
company_ticker = 'GL'
if company_ticker == 'POL':
company_ticker = 'AVNT'
if company_ticker == 'POL':
company_ticker = 'AVNT'
if company_ticker == 'PJC':
company_ticker = 'PIPR'
if company_ticker == 'NOVS':
company_ticker = 'NOVC'
if company_ticker == 'CAP':
company_ticker = 'CAI'
if company_ticker == 'CBL':
company_ticker = 'CBLAQ'
if company == 'Acco Brands Corp':
company_ticker = 'ACCO'
if company == 'Bridgepoint Education Inc':
company_ticker = 'ZVO'
if company_ticker == 'CBP':
company_ticker = 'CBPI'
if company_ticker == 'CTL':
company_ticker = 'LUMN'
if company_ticker == 'DLPH':
company_ticker = 'APTV'
if company_ticker == 'DSW':
company_ticker = 'DBI'
if company_ticker == 'ECT':
company_ticker = 'ECTM'
if company_ticker == 'FGP':
company_ticker = 'FGPR'
if company_ticker == 'GNV':
company_ticker = 'SAR'
if company_ticker == 'HCP':
company_ticker = 'PEAK'
if company_ticker == 'HHS':
company_ticker = 'HRTH'
if company_ticker == 'HK':
company_ticker = 'BATL'
if company_ticker == 'HTZ':
company_ticker = 'HTZZ'
if company_ticker == 'INB':
company_ticker = 'INBP'
if company_ticker in ['AVP', 'CKH', 'EV', 'KEM', 'OZM', 'QTS', 'TIF', 'VAR', 'WDR', 'WRI']:
df2 = | pd.DataFrame(columns=['Date', 'High', 'Low', 'Open', 'Close', 'Volume']) | pandas.DataFrame |
import unittest
from enda.timeseries import TimeSeries
import pandas as pd
import pytz
class TestTimeSeries(unittest.TestCase):
def test_collapse_dt_series_into_periods(self):
# periods is a list of (start, end) pairs.
periods = [
(pd.to_datetime('2018-01-01 00:15:00+01:00'), pd.to_datetime('2018-01-01 00:45:00+01:00')),
(pd.to_datetime('2018-01-01 10:15:00+01:00'), pd.to_datetime('2018-01-01 15:45:00+01:00')),
(pd.to_datetime('2018-01-01 20:15:00+01:00'), pd.to_datetime('2018-01-01 21:45:00+01:00')),
]
# expand periods to build a time-series with gaps
dti = pd.DatetimeIndex([])
for s, e in periods:
dti = dti.append(pd.date_range(s, e, freq="30min"))
self.assertEqual(2+12+4, dti.shape[0])
# now find periods in the time-series
# should work with 2 types of freq arguments
for freq in ["30min", pd.to_timedelta("30min")]:
computed_periods = TimeSeries.collapse_dt_series_into_periods(dti, freq)
self.assertEqual(len(computed_periods), len(periods))
for i in range(len(periods)):
self.assertEqual(computed_periods[i][0], periods[i][0])
self.assertEqual(computed_periods[i][1], periods[i][1])
def test_collapse_dt_series_into_periods_2(self):
dti = pd.DatetimeIndex([
pd.to_datetime('2018-01-01 00:15:00+01:00'),
pd.to_datetime('2018-01-01 00:45:00+01:00'),
pd.to_datetime('2018-01-01 00:30:00+01:00'),
pd.to_datetime('2018-01-01 01:00:00+01:00')
])
with self.assertRaises(ValueError):
# should raise an error because 15min gaps are not multiples of freq=30min
TimeSeries.collapse_dt_series_into_periods(dti, freq="30min")
def test_collapse_dt_series_into_periods_3(self):
dti = pd.DatetimeIndex([
pd.to_datetime('2018-01-01 00:00:00+01:00'),
pd.to_datetime('2018-01-01 00:15:00+01:00'),
pd.to_datetime('2018-01-01 00:30:00+01:00'),
pd.to_datetime('2018-01-01 00:45:00+01:00')
])
with self.assertRaises(ValueError):
# should raise an error because 15min gaps are not multiples of freq=30min
TimeSeries.collapse_dt_series_into_periods(dti, "30min")
def test_find_missing_and_extra_periods_1(self):
dti = pd.DatetimeIndex([
pd.to_datetime('2018-01-01 00:00:00+01:00'),
pd.to_datetime('2018-01-01 00:15:00+01:00'),
pd.to_datetime('2018-01-01 00:30:00+01:00'),
pd.to_datetime('2018-01-01 00:45:00+01:00'),
pd.to_datetime('2018-01-01 00:50:00+01:00'),
pd.to_datetime('2018-01-01 01:00:00+01:00'),
pd.to_datetime('2018-01-01 02:00:00+01:00'),
pd.to_datetime('2018-01-01 02:20:00+01:00')
])
freq, missing_periods, extra_points = TimeSeries.find_missing_and_extra_periods(dti, expected_freq="15min")
self.assertEqual(len(missing_periods), 2) # (01:15:00 -> 01:45:00), (02:15:00 -> 02:15:00)
self.assertEqual(len(extra_points), 2) # [00:50:00, 02:20:00]
def test_find_missing_and_extra_periods_2(self):
dti = pd.DatetimeIndex([
pd.to_datetime('2018-01-01 00:00:00+01:00'),
pd.to_datetime('2018-01-01 00:15:00+01:00'),
pd.to_datetime('2018-01-01 00:30:00+01:00'),
pd.to_datetime('2018-01-01 00:45:00+01:00'),
pd.to_datetime('2018-01-01 00:50:00+01:00'),
pd.to_datetime('2018-01-01 01:00:00+01:00'),
pd.to_datetime('2018-01-01 02:00:00+01:00'),
pd.to_datetime('2018-01-01 02:20:00+01:00')
])
# should work when we infer "expected_freq"
freq, missing_periods, extra_points = TimeSeries.find_missing_and_extra_periods(dti, expected_freq=None)
self.assertEqual(freq, pd.Timedelta("15min")) # inferred a 15min freq
self.assertEqual(len(missing_periods), 2) # (01:15:00 -> 01:45:00), (02:15:00 -> 02:15:00)
self.assertEqual(len(extra_points), 2) # [00:50:00, 02:20:00]
def test_find_missing_and_extra_periods_3(self):
dti = pd.DatetimeIndex([
pd.to_datetime('2018-01-01'),
pd.to_datetime('2018-01-02'),
pd.to_datetime('2018-01-03'),
| pd.to_datetime('2018-01-03 12:00:00') | pandas.to_datetime |
#!/usr/bin/env python3.6
import pandas as pd
from collections import defaultdict, Counter
import argparse
import sys
import os
import subprocess
import re
import numpy as np
from datetime import datetime
from itertools import chain
from pyranges import PyRanges
from SV_modules import *
pd.set_option('display.max_columns', None)
pd.set_option('display.expand_frame_repr', False)
pd.set_option('max_colwidth', None)
pd.options.display.max_rows = 999
class Namespace:
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
def createGeneSyndromeDict(database_df):
dict = defaultdict(list)
for var, hpo in database_df.itertuples(index=False): # var can either be gene or syndrome
dict[var].append(hpo)
return(dict)
def createWeightDict(weights):
try:
w_df = pd.read_csv(weights, sep = ' ', names=["HPO_id", "weight"], comment = '#')
except OSError:
print("Count not open/read the input file:" + weights)
sys.exit()
weightDict = dict(zip(w_df.HPO_id, w_df.weight))
return(weightDict)
def getClinicalPhenome(args):
# Get the clinical phenome and store as a set
try:
clinical_phenome = set(open("./results/" + args.sampleid + "/" + args.sampleid + "_hpo_inexact.txt").read().splitlines())
except OSError:
print("Count not open/read the input file:" + "./results/" + args.sampleid + "/" + args.sampleid + "_hpo_inexact.txt")
sys.exit()
return(clinical_phenome)
def calculateGeneSumScore(args, hpo_gene_dict, weightDict, clinical_phenome, omim_gene):
# Go through genes in genelist found in the patients
try:
genes = open("./results/" + args.sampleid + "/" + args.sampleid + "_gene_list.txt", 'r')
except OSError:
print("Count not open/read the input file:" + "./results/" + args.sampleid + "/" + args.sampleid + "_gene_list.txt")
sys.exit()
with genes:
gene = genes.read().splitlines()
gene_sum_score = 0
gene_score_result = pd.DataFrame(columns=['gene', 'score'])
for query in gene:
#print(query)
hpo_pheno = set(hpo_gene_dict[query]) # To get the phenotypic features for a given gene
overlap = hpo_pheno.intersection(clinical_phenome) # overlap all the phenotypic features with the clinical phenomes
for term in overlap:
gene_sum_score += weightDict[term]
gene_score_result = gene_score_result.append({'gene':query, 'score':gene_sum_score}, ignore_index=True)
gene_score_result_r = gene_score_result.iloc[::-1]
gene_score_result_r = pd.concat([gene_score_result_r, omim_gene])
gene_score_result_r = normalizeRawScore(args, gene_score_result_r, 'gene')
return(gene_score_result_r)
def getParentsGeno(filtered_intervar, inheritance_mode, ov_allele):
# Create two new columns and initialize to 0
filtered_intervar[inheritance_mode] = 0
filtered_intervar = filtered_intervar.reset_index(drop=True)
for idx, row in enumerate(filtered_intervar.itertuples(index=False)):
if int(getattr(row, 'Start')) in set(ov_allele['Start']):
#parents_geno = ov_allele.loc[ov_allele['Start'] == getattr(row, 'Start'), 'geno'].head(1)
#print(parents_geno)
parents_geno = ov_allele.loc[ov_allele['Start']==getattr(row,'Start'),'geno'].head(1).item()
filtered_intervar.loc[idx, inheritance_mode] = parents_geno
return(filtered_intervar)
def rerankSmallVariant(df):
df['Clinvar_idx'] = df.Clinvar.str[9:-1]
df['InterVar_idx'] = df.InterVar_InterVarandEvidence.str[10:].str.split('PVS1').str[0]
df[['Clinvar_idx', 'InterVar_idx']] = df[['Clinvar_idx', 'InterVar_idx']].apply(lambda x:x.astype(str).str.lower())
df['Clinvar_score'], df['InterVar_score'] = 3, 3
# Calculate Clinvar score
df.loc[(df['Clinvar_idx'].str.contains('benign')), 'Clinvar_score'] = 1
df.loc[((df['Clinvar_idx'].str.contains('benign')) & (df['Clinvar_idx'].str.contains('likely'))), 'Clinvar_score'] = 2
df.loc[(df['Clinvar_idx'].str.contains('pathogenic')), 'Clinvar_score'] = 5
df.loc[((df['Clinvar_idx'].str.contains('pathogenic')) & (df['Clinvar_idx'].str.contains('likely'))), 'Clinvar_score'] = 4
df.loc[(df['Clinvar_idx'].str.contains('conflicting')), 'Clinvar_score'] = 3
# Calculate Intervar score
df.loc[(df['InterVar_idx'].str.contains('benign')), 'InterVar_score'] = 1
df.loc[((df['InterVar_idx'].str.contains('benign')) & (df['InterVar_idx'].str.contains('likely'))), 'InterVar_score'] = 2
df.loc[(df['InterVar_idx'].str.contains('pathogenic')), 'InterVar_score'] = 5
df.loc[((df['InterVar_idx'].str.contains('pathogenic')) & (df['InterVar_idx'].str.contains('likely'))), 'InterVar_score'] = 4
# Add them up
df['Patho_score'] = df['Clinvar_score'] + df['InterVar_score']
# Sort by the total patho_score
df = df.sort_values(by=['Patho_score', 'score'], ascending=False)
df = df.drop(['Clinvar_idx', 'InterVar_idx', 'Clinvar_score', 'InterVar_score', 'Patho_score'], axis=1)
return df
def smallVariantGeneOverlapCheckInheritance(args, smallVariantFile, interVarFinalFile, gene_score_result_r, famid):
# Overlap gene_score_result_r with small variants genes found in the proband
gene_score_result_r = gene_score_result_r[gene_score_result_r.gene.isin(smallVariantFile.gene)]
# Subset the intervar files further to store entries relevant to these set of genes
filtered_intervar = pd.merge(interVarFinalFile, gene_score_result_r, left_on='Ref_Gene', right_on='gene',how='inner')
# Remove common artifacts
try:
artifacts = pd.read_csv("./common_artifacts_20.txt", names = ["gene"])
filtered_intervar = filtered_intervar.loc[~filtered_intervar['Ref_Gene'].isin(artifacts['gene'])]
except OSError:
print("Could not open/read the input file: common_artifacts_20.txt")
sys.exit()
# If custom artifact bed file is provided, filter dataframe
if os.path.exists(args.artifact):
#print(filtered_intervar)
custom_artifact = pd.read_csv(args.artifact, sep='\t', usecols=[0, 2] ,names=["Chr", "End"])
keys = list(custom_artifact.columns.values)
i1 = filtered_intervar.set_index(keys).index
i2 = custom_artifact.set_index(keys).index
filtered_intervar = filtered_intervar.loc[~i1.isin(i2)]
# Create a bed file and write it out
pd.DataFrame(filtered_intervar).to_csv('./results/' + args.sampleid + "/" + args.sampleid + '_smallVariant_candidates.txt', index=False, sep='\t',header=False) # Write out a subset of the variant first
filtered_intervar_bed = filtered_intervar[['Chr', 'Start', 'End']]
filtered_intervar_bed.loc[:,'Chr'] = 'chr' + filtered_intervar_bed.loc[:,'Chr'].astype(str)
filtered_intervar_bed.loc[:,'Start'] -= 1
pd.DataFrame(filtered_intervar_bed).to_csv('./results/' + args.sampleid + "/" + args.sampleid + '_target.bed', index=False, sep='\t', header=False)
# Create two new columns and initialize to -1
# will later get overwritten to 0/1/2 if parents vcf files are provided
filtered_intervar['paternal'] = -1
filtered_intervar['maternal'] = -1
if args.type != 'singleton':
# Get overlapping variants from the parents so we know which variants are inherited
print('[run_clinical_interpretor.py]: ' + datetime.now().strftime("%d/%m/%Y %H:%M:%S") + ' Comparing small variants (SNPs/indels) inheritance')
cmd1 = "bcftools view -R ./results/" + args.sampleid + "/" + args.sampleid + "_target.bed " + args.fathervcf + " > ./results/" + args.sampleid + "/" + args.sampleid + "_paternal_inherited_smallVariants.vcf"
cmd2 = "bcftools view -R ./results/" + args.sampleid + "/" + args.sampleid + "_target.bed " + args.mothervcf + " > ./results/" + args.sampleid + "/" + args.sampleid + "_maternal_inherited_smallVariants.vcf"
if args.type == 'duo':
if args.father_duo:
cmds = [cmd1]
else:
cmds = [cmd2]
else:
cmds = [cmd1, cmd2]
for cmd in cmds:
p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
if p.returncode != 0:
raise Exception(stderr)
# Go through every row in filtered_intervar and see if the same variant is found in either of the parents
# We will only compare allele start position (we always assume the alt allele is the same)
if args.type=='trio' or args.father_duo:
try:
paternal_ov_allele = pd.read_csv("./results/" + args.sampleid + "/" + args.sampleid + "_paternal_inherited_smallVariants.vcf", sep='\t',usecols=[1,9], names=["Start", "geno"], comment='#')
paternal_ov_allele['geno'] = paternal_ov_allele['geno'].str[:1].astype(int) + paternal_ov_allele['geno'].str[2:3].astype(int)
filtered_intervar = getParentsGeno(filtered_intervar, 'paternal', paternal_ov_allele)
except OSError:
print("Could not open/read the input file: ./results/" + args.sampleid + "/" + args.sampleid + "_paternal_inherited_smallVariants.vcf")
sys.exit()
if args.type=="trio" or args.mother_duo:
try:
maternal_ov_allele = pd.read_csv("./results/" + args.sampleid + "/" + args.sampleid + "_maternal_inherited_smallVariants.vcf", sep='\t',usecols=[1,9], names=["Start", "geno"], comment='#')
maternal_ov_allele['geno'] = maternal_ov_allele['geno'].str[:1].astype(int) + maternal_ov_allele['geno'].str[2:3].astype(int)
filtered_intervar = getParentsGeno(filtered_intervar, 'maternal', maternal_ov_allele)
except OSError:
print("Could not open/read the input file: ./results/" + args.sampleid + "/" + args.sampleid + "_maternal_inherited_smallVariants.vcf")
sys.exit()
# Rerank variants based on reported or predicted pathogeneicity
filtered_intervar = rerankSmallVariant(filtered_intervar)
if args.type=='trio':
# Divide the dataset into recessive, dominant, de novo, compound het
## Recessive
recessive = filtered_intervar[(filtered_intervar['paternal'] == 1) & (filtered_intervar['maternal'] == 1) & (filtered_intervar['Otherinfo'] == 'hom')]
## Dominant
dominant_inherited = filtered_intervar[((filtered_intervar['paternal'] == 1) & (filtered_intervar['maternal'] == 0)) | ((filtered_intervar['maternal'] == 1) & (filtered_intervar['paternal'] == 0))]
## De novo
denovo = filtered_intervar[(filtered_intervar['paternal'] == 0) & (filtered_intervar['maternal'] == 0)]
#Compound het
filtered_intervar_compoundhet = filtered_intervar[(filtered_intervar['Otherinfo'] == 'het')]
filtered_intervar_compoundhet = filtered_intervar_compoundhet[(filtered_intervar_compoundhet['maternal'] != 2) & (filtered_intervar_compoundhet['paternal'] != 2) & ((filtered_intervar_compoundhet['paternal'] == 1) & (filtered_intervar_compoundhet['maternal'] == 0)) | ((filtered_intervar_compoundhet['maternal'] == 1) & (filtered_intervar_compoundhet['paternal'] == 0)) | ((filtered_intervar_compoundhet['maternal'] == 0) & (filtered_intervar_compoundhet['paternal'] == 0))]
count = Counter(filtered_intervar_compoundhet['Ref_Gene'])
compoundhet_genes = [x for x, cnt in count.items() if cnt > 1]
compoundhet = filtered_intervar_compoundhet[filtered_intervar_compoundhet['Ref_Gene'].isin(compoundhet_genes)]
discard = []
for gene in compoundhet_genes:
df = compoundhet[compoundhet['Ref_Gene'].str.contains(gene)]
row_count = len(df.index)
col_list = ['paternal', 'maternal']
res = df[col_list].sum(axis=0)
if ((res[0] == 0) & (res[1] == row_count)) or (res[1] == 0 & (res[0] == row_count)):
discard.append(gene)
compoundhet = compoundhet[~compoundhet['Ref_Gene'].isin(discard)]
# Print all the variants according to inheritance mode
# Recessive
pd.DataFrame(recessive).to_csv('./results/' + args.sampleid + "/confident_set/" + args.sampleid + '_smallVariants_recessive_candidates.txt', index=False, sep='\t', header=True)
# Dominant
pd.DataFrame(dominant_inherited).to_csv('./results/' + args.sampleid + "/confident_set/" + args.sampleid + '_dominant_inherited_smallVariants_candidates.txt', index=False, sep='\t', header=True)
# De novo
pd.DataFrame(denovo).to_csv('./results/' + args.sampleid + "/confident_set/" + args.sampleid + '_smallVariants_denovo_candidates.txt', index=False, sep='\t', header=True)
# Compound het
pd.DataFrame(compoundhet).to_csv('./results/' + args.sampleid + "/confident_set/" + args.sampleid + '_smallVariants_compoundhet_candidates.txt', index=False, sep='\t', header=True)
if args.xlink:
xlink = filtered_intervar.loc[(filtered_intervar['maternal']!=2) & (filtered_intervar['paternal']==0) & (filtered_intervar['Chr'] == 'X')]
pd.DataFrame(xlink).to_csv('./results/' + args.sampleid + "/confident_set/" + args.sampleid + '_smallVariants_xlink_candidates.txt', index=False, sep='\t', header=True)
# All
filtered_intervar = rerankSmallVariant(filtered_intervar)
pd.DataFrame(filtered_intervar).to_csv('./results/' + args.sampleid + "/" + args.sampleid + '_smallVariants_ALL_candidates.txt', index=False, sep='\t', header=True)
if args.type=='trio':
# We want to return everything except recessive variants
filtered_intervar = filtered_intervar.loc[~filtered_intervar['Start'].isin(recessive['Start'])] # don't have recessive if singleton or duo
return filtered_intervar
def differentialDiangosis(hpo_syndrome_dict, weightSyndromeDict, clinical_phenome, args, cyto_10x_del, cyto_10x_del_largeSV, cyto_10x_dup_largeSV, cyto_BN_del, cyto_BN_dup,hpo_syndromes_mim_df):
syndrome_score_result = pd.DataFrame(columns=['syndrome', 'score'])
# Check every syndrome and its overlapping hpo terms
for syndrome in hpo_syndrome_dict:
hpo_terms = set(hpo_syndrome_dict[syndrome])
score = 0
for term in hpo_terms:
if term in clinical_phenome:
score += weightSyndromeDict[term]
if score != 0:
syndrome_score_result = syndrome_score_result.append({'syndrome': syndrome, 'score': score}, ignore_index=True)
syndrome_score_result_r = syndrome_score_result.sort_values(by='score', ascending=False)
syndrome_score_result_r['syndrome'] = syndrome_score_result_r['syndrome'].str.upper()
# Add a normalized score column
syndrome_score_result_r = normalizeRawScore(args, syndrome_score_result_r, 'syndrome')
# Specifically look for deletion/duplication syndrome
delDupSyndrome(syndrome_score_result_r, args, cyto_10x_del, cyto_10x_del_largeSV, cyto_10x_dup_largeSV, cyto_BN_del, cyto_BN_dup, hpo_syndromes_mim_df)
return(syndrome_score_result_r)
def findGenomicLocation(cytoband_key, cytobandDict):
#print(cytoband_key)
keys = [key for key in cytobandDict if key.startswith(cytoband_key)]
#print(keys)
if len(keys)==0:
cytoband_key = cytoband_key[:-1]
keys = [key for key in cytobandDict if key.startswith(cytoband_key)]
genomic_coords_list = []
for key in keys:
genomic_coords_list.append(str(cytobandDict[key]).split('-'))
#print(genomic_coords_list)
genomic_coords_list = list(chain.from_iterable(genomic_coords_list))
min_coords = min(genomic_coords_list)
max_coords = max(genomic_coords_list)
genomic_range = str(min_coords) + '-' + str(max_coords)
return genomic_range
def parseSyndromeNameToCytoband(df, cytobandDict, type, hpo_syndromes_mim_df,args):
if type=='deldup':
df['cytoband'] = float('Nan')
regex = r'((^|\W)[0-9XY]{1,2}[PQ]{1}[\w\\.\\-]{1,15}[\s$])'
for index, row in df.iterrows():
m = re.search(regex, str(row))
if m is not None:
df.loc[index, 'cytoband'] = m.group(1)
df.dropna(subset=['cytoband'], inplace=True)
if df.empty: # df can be empty after dropping NA
return pd.DataFrame()
if type=='all':
df = df.merge(hpo_syndromes_mim_df, on=['syndrome'])
try:
morbid = pd.read_csv(args.workdir + '/morbidmap.txt', sep='\t', usecols=[2, 3], names=["MIM", "cytoband"], comment='#')
df = df.merge(morbid, on='MIM')
df = df.loc[~df['cytoband'].astype(str).str.contains("Chr")]
end_string = ('p','q')
df = df.loc[~df['cytoband'].str.endswith(end_string)] #Remove cytoband entries that span the whole chromosomal arm like 2p
except OSError:
print("Could not open/read the input file: " + args.workdir + '/morbidmap.txt')
sys.exit()
df['cytoband'] = df['cytoband'].astype(str).str.lower()
df['cytoband'] = df['cytoband'].str.replace('x', 'X')
df['cytoband'] = df['cytoband'].str.replace('y', 'Y')
df['cytoband'] = df['cytoband'].str.strip('\(\)')
df[['Chromosome', 'discard']] = df.cytoband.str.split('p|q', 1, expand=True)
df = df.drop('discard', axis=1)
if df.cytoband.str.contains('-').any():
df[['cytoband_start', 'cytoband_stop']] = df.cytoband.str.split('-', expand=True)
else:
df['cytoband_start'] = df.cytoband
df['cytoband_stop'] = None
df['arm'] = np.where(df['cytoband_start'].str.contains('p'), 'p', 'q')
df['cytoband_stop'] = np.where(df['cytoband_start'].str.count('p|q')>1, df['arm'] + df['cytoband_start'].str.split('p|q').str[2], df['cytoband_stop'])
df['cytoband_start'] = np.where(df['cytoband_start'].str.count('p|q')>1, df['cytoband_start'].str.split('p|q').str[0] + df['arm'] + df['cytoband_start'].str.split('p|q').str[1], df['cytoband_start'])
for idx, row in df.iterrows():
cytoband_start_key = row['cytoband_start'].replace(" ","")
if cytoband_start_key in cytobandDict:
coords_start = cytobandDict[cytoband_start_key]
else:
genomic_range = findGenomicLocation(cytoband_start_key, cytobandDict)
coords_start = genomic_range
if row['cytoband_stop'] is not None: # Fix cytoband_stop column for quick cytobandDict lookup
current_chr = np.where(('p' in str(row['cytoband_stop'])) or ('q' in str(row['cytoband_stop'])), str(row['Chromosome']), str(row['Chromosome']) + str(row['arm']))
edited_cytoband_stop = str(current_chr) + row['cytoband_stop']
edited_cytoband_stop = edited_cytoband_stop.replace(" ", "")
df.at[idx, 'cytoband_stop'] = edited_cytoband_stop
if edited_cytoband_stop in cytobandDict:
coords_stop = cytobandDict[edited_cytoband_stop]
else:
genomic_range = findGenomicLocation(edited_cytoband_stop, cytobandDict)
coords_stop = genomic_range
# New coords will be the the beginning of coords_start and end of coords_stop
df.at[idx, 'Start'] = coords_start.split('-')[0]
df.at[idx, 'End'] = coords_stop.split('-')[1]
else:
df.at[idx, 'Start'] = coords_start.split('-')[0]
df.at[idx, 'End'] = coords_start.split('-')[1]
return df
def createCytobandDict(args):
try:
cyto = pd.read_csv(args.workdir + '/cytoband.txt', sep = '\t', names=["cytoband", "coords"], comment = '#')
except OSError:
print("Count not open/read the input file:" + args.workdir + '/cytoband.txt')
sys.exit()
cytobandDict = dict(zip(cyto.cytoband, cyto.coords))
return(cytobandDict)
def delDupSyndrome(syndrome_score_result_r, args, cyto_10x_del, cyto_10x_del_largeSV, cyto_10x_dup_largeSV, cyto_BN_del, cyto_BN_dup, hpo_syndromes_mim_df):
#print(syndrome_score_result_r)
syndrome_score_result_r.to_csv('./results/' + args.sampleid + "/" + args.sampleid + '_syndrome_score_result_r.txt', sep='\t', index=False)
# Create cytoband <-> genomic coordinates dict
cytobandDict = createCytobandDict(args)
del_cond = syndrome_score_result_r['syndrome'].str.contains('DELETION')
dup_cond = syndrome_score_result_r['syndrome'].str.contains('DUPLICATION')
del_df = syndrome_score_result_r[del_cond]
dup_df = syndrome_score_result_r[dup_cond]
del_df = parseSyndromeNameToCytoband(del_df, cytobandDict,'deldup',hpo_syndromes_mim_df, args)
dup_df = parseSyndromeNameToCytoband(dup_df, cytobandDict,'deldup',hpo_syndromes_mim_df, args)
all_omim_syndromes = parseSyndromeNameToCytoband(syndrome_score_result_r, cytobandDict,'all', hpo_syndromes_mim_df, args)
if args.bionano:
cols = ['Chromosome', 'Start', 'End', 'SmapEntryID', 'Confidence', 'Type', 'Zygosity', 'Genotype', 'SV_size', 'Found_in_Father', 'Found_in_Mother', 'syndrome', 'cytoband', 'score', 'normalized_score']
# Overlap with del/dup syndromes
if cyto_BN_dup is not None: # It can be None because old Bionano pipeline doesn't call duplications...
# dup_df.to_csv('./results/' + args.sampleid + "/" + args.sampleid + '_input1.txt', sep='\t', index=False)
# cyto_BN_dup.to_csv('./results/' + args.sampleid + "/" + args.sampleid + '_input2.txt', sep='\t',index=False)
# cyto_10x_dup_largeSV.to_csv('./results/' + args.sampleid + "/" + args.sampleid + '_input2.txt', sep='\t',index=False)
overlap_dup_BN = delDupSyndromeSVOverlap(dup_df, cyto_BN_dup, cols)
overlap_dup_BN.to_csv('./results/' + args.sampleid + "/" + args.sampleid + '_Bionano_duplication_syndrome.txt', sep='\t', index=False)
else:
overlap_dup_BN = None
pd.DataFrame().to_csv('./results/' + args.sampleid + "/" + args.sampleid + '_Bionano_duplication_syndrome.txt', sep='\t', index=False)
overlap_del_BN = delDupSyndromeSVOverlap(del_df, cyto_BN_del, cols)
overlap_del_BN.to_csv('./results/' + args.sampleid + "/" + args.sampleid + '_Bionano_deletion_syndrome.txt', sep='\t', index=False)
all_BN = pd.concat([cyto_BN_dup, cyto_BN_del], ignore_index=True)
overlap_all_BN = delDupSyndromeSVOverlap(all_omim_syndromes, all_BN, cols)
overlap_all_BN.to_csv('./results/' + args.sampleid + "/" + args.sampleid + '_Bionano_all_syndrome.txt', sep='\t', index=False)
if args.linkedreadSV:
cols = ['Chromosome', 'Start', 'End', 'ID', 'REF', 'ALT_1', 'QUAL', 'FILTER_PASS', 'SVLEN', 'Found_in_Father', 'Found_in_Mother', 'syndrome', 'cytoband', 'score', 'normalized_score']
# dup_df.to_csv('./results/' + args.sampleid + "/" + args.sampleid + '_input1.txt', sep='\t', index=False)
# cyto_10x_dup_largeSV.to_csv('./results/' + args.sampleid + "/" + args.sampleid + '_input2.txt', sep='\t', index=False)
overlap_dup_largeSV_10x = delDupSyndromeSVOverlap(dup_df, cyto_10x_dup_largeSV, cols)
overlap_dup_largeSV_10x.to_csv('./results/' + args.sampleid + "/" + args.sampleid + '_10x_duplication_largeSV_syndrome.txt', sep='\t', index=False)
overlap_del_largeSV_10x = delDupSyndromeSVOverlap(del_df, cyto_10x_del_largeSV, cols)
overlap_del_largeSV_10x.to_csv('./results/' + args.sampleid + "/" + args.sampleid + '_10x_deletion_largeSV_syndrome.txt', sep='\t', index=False)
overlap_del_10x = delDupSyndromeSVOverlap(del_df, cyto_10x_del, cols)
overlap_del_10x.to_csv('./results/' + args.sampleid + "/" + args.sampleid + '_10x_deletion_syndrome.txt', sep='\t', index=False)
all_10x = pd.concat([cyto_10x_dup_largeSV, cyto_10x_del_largeSV, cyto_10x_del], ignore_index=True)
overlap_all_10x = delDupSyndromeSVOverlap(all_omim_syndromes, all_10x, cols)
overlap_all_10x.to_csv('./results/' + args.sampleid + "/" + args.sampleid + '_10x_all_syndrome.txt', sep='\t', index=False)
if args.linkedreadSV and args.bionano:
cols = ['Chromosome', 'Start', 'End', 'ID', 'REF', 'ALT_1', 'QUAL', 'FILTER_PASS', 'SVLEN', 'Found_in_Father', 'Found_in_Mother', 'syndrome', 'cytoband', 'SmapEntryID', 'Confidence', 'Type', 'Zygosity', 'Genotype', 'SV_size', 'Found_in_Father_b', 'Found_in_Mother_b', 'score', 'normalized_score',]
# syndrome appearing in both 10x and bionano --> confident set
## for duplications
if ((overlap_dup_BN is not None) and (not overlap_dup_BN.empty) and (not overlap_dup_largeSV_10x.empty)):
overlap_dup_largeSV_10x = overlap_dup_largeSV_10x.loc[overlap_dup_largeSV_10x['SVLEN'] >= 1000]
confident_dup_syndrome = delDupSyndromeSVOverlap(overlap_dup_largeSV_10x, overlap_dup_BN, cols)
if not confident_dup_syndrome.empty:
confident_dup_syndrome.to_csv('./results/' + args.sampleid + "/confident_set/" + args.sampleid + '_confident_duplication_syndrome.txt', sep='\t',index=False)
else: # Write an empty dataframe
pd.DataFrame().to_csv('./results/' + args.sampleid + "/confident_set/" + args.sampleid + '_confident_duplication_syndrome.txt', sep='\t',index=False)
## for deletions
del_10x = pd.concat([overlap_del_largeSV_10x, overlap_del_10x])
if ((not overlap_del_BN.empty) and (not del_10x.empty)):
del_10x = del_10x.loc[del_10x['SVLEN'] <= (-1000)]
confidnet_del_syndrome = delDupSyndromeSVOverlap(del_10x, overlap_del_BN, cols)
#confidnet_del_syndrome = pd.merge(del_10x, overlap_del_BN, on='syndrome', how='inner')
if not confidnet_del_syndrome.empty:
confidnet_del_syndrome.to_csv('./results/' + args.sampleid + "/confident_set/" + args.sampleid + '_confident_deletion_syndrome.txt', sep='\t',index=False)
else:
pd.DataFrame().to_csv('./results/' + args.sampleid + "/confident_set/" + args.sampleid + '_confident_deletion_syndrome.txt', sep='\t',index=False)
# for all omim syndromes
if ((not overlap_all_BN.empty) and (not overlap_all_10x.empty)):
overlap_all_10x = overlap_all_10x.loc[(overlap_all_10x['SVLEN'] <= (-1000)) | (overlap_all_10x['SVLEN'] >=1000)]
confident_all_syndrome = delDupSyndromeSVOverlap(overlap_all_10x, overlap_all_BN, cols)
if not confident_all_syndrome.empty:
confident_all_syndrome.to_csv('./results/' + args.sampleid + "/confident_set/" + args.sampleid + '_confident_all_syndrome.txt', sep='\t',index=False)
else:
pd.DataFrame().to_csv('./results/' + args.sampleid + "/confident_set/" + args.sampleid + '_confident_all_syndrome.txt', sep='\t',index=False)
def delDupSyndromeSVOverlap(del_df, cyto_BN_del, cols):
if del_df.empty:
return pd.DataFrame()
del_df['Chromosome'] = del_df['Chromosome'].str.strip()
if 'cytoband_stop' in list(del_df.columns):
del_df = del_df.drop(['cytoband_start','cytoband_stop'], axis=1)
del_df.dropna( inplace=True)
overlap_del_BN = PyRanges(cyto_BN_del).join(PyRanges(del_df))
if not overlap_del_BN.df.empty:
overlap_del_BN = overlap_del_BN.df
overlap_del_BN['overlap_len'] = np.maximum(0, np.minimum(overlap_del_BN.End, overlap_del_BN.End_b) - np.maximum(overlap_del_BN.Start,overlap_del_BN.Start_b))
#overlap_del_BN = overlap_del_BN.drop(like="_b")
overlap_del_BN = overlap_del_BN.sort_values(by='score', ascending=False)
overlap_del_BN = overlap_del_BN.loc[overlap_del_BN['overlap_len'] > 0]
# print(overlap_del_BN)
#overlap_del_BN = overlap_del_BN.df.sort_values(by='score', ascending=False)
# Rearrange the column
overlap_del_BN = overlap_del_BN[cols].drop_duplicates()
return overlap_del_BN
else:
return overlap_del_BN.df
def normalizeRawScore(args, raw_score, mode):
# Normalize all the scores to 1-100
max_score = max(raw_score['score'])
raw_score.loc[:,'normalized_score'] = raw_score.loc[:,'score']/max_score * 100
return(raw_score)
def compileControlFiles(control_files_path, famid):
full_paths = []
for path in control_files_path:
control_files = os.listdir(path)
for file in control_files:
if not (re.match('BC...0[34]{1}', file) or re.match(rf"BC{famid}..", file)): # Discard trio of interest and all probands
full_paths.append(os.path.join(path, file))
full_paths.append(os.path.join(path, file))
return full_paths
def bionanoSV(args, famid, gene_score_result_r, all_small_variants):
# Generate controls files (1KGP BN samples + CIAPM parents (excluding parents of the proband of interest)
print('[run_clinical_interpretor.py]: ' + datetime.now().strftime("%d/%m/%Y %H:%M:%S") + ' Generating bionano control file...')
control_files_path = [args.workdir + "/bionano_sv/controls/DLE", args.workdir + "/bionano_sv/controls/BspQI", args.workdir + "/bionano_sv/cases/DLE", args.workdir + "/bionano_sv/cases/BspQI"]
full_paths = compileControlFiles(control_files_path, famid)
## Write an empty file
with open(args.workdir + "/results/" + args.sampleid + "/bionano_control.smap.gz", 'w'): # So it will overwrite the old file
pass
for path in full_paths:
cmd = "cat " + path + "/exp_refineFinal1_merged_filter.smap | gzip >> " + args.workdir + "/results/" + args.sampleid + "/bionano_control.smap.gz"
p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
if p.returncode != 0:
raise Exception(stderr)
# Create a BN arg object
BN_args = Namespace(sampleID = args.sampleid,
samplepath = args.workdir + "/bionano_sv/cases/" + args.enzyme + "/" + args.sampleid + "/exp_refineFinal1_merged_filter.smap",
fpath = args.workdir + "/bionano_sv/cases/" + args.enzyme + "/BC" + famid + "01/exp_refineFinal1_merged_filter.smap",
mpath = args.workdir + "/bionano_sv/cases/" + args.enzyme + "/BC" + famid + "02/exp_refineFinal1_merged_filter.smap",
referencepath = args.workdir + "/results/" + args.sampleid + "/bionano_control.smap.gz",
outputdirectory = args.workdir + '/results/' + args.sampleid,
exons = args.workdir + '/annotatedExon.bed',
genes=args.workdir + '/annotatedGene.bed',
genelist = gene_score_result_r,
type = args.type,
father_duo = args.father_duo,
mother_duo = args.mother_duo)
# Call bionano translocation
print('[run_clinical_interpretor.py]: ' + datetime.now().strftime("%d/%m/%Y %H:%M:%S") + ' Detecting bionano translocations on ' + args.sampleid + '...')
BN_translocation(BN_args)
# Call bionano deletion
print('[run_clinical_interpretor.py]: ' + datetime.now().strftime("%d/%m/%Y %H:%M:%S") + ' Detecting bionano deletions on ' + args.sampleid + '...')
cyto_BN_del, exon_calls_BN_del = BN_deletion(BN_args)
# Call bionano insertion
print('[run_clinical_interpretor.py]: ' + datetime.now().strftime("%d/%m/%Y %H:%M:%S") + ' Detecting bionano insertions on ' + args.sampleid + '...')
BN_insertion(BN_args)
# Call bionano duplications
print('[run_clinical_interpretor.py]: ' + datetime.now().strftime("%d/%m/%Y %H:%M:%S") + ' Detecting bionano duplications on ' + args.sampleid + '...')
cyto_BN_dup, exon_calls_BN_dup = BN_duplication(BN_args)
# Call bionano inversions
print('[run_clinical_interpretor.py]: ' + datetime.now().strftime("%d/%m/%Y %H:%M:%S") + ' Detecting bionano inversions on ' + args.sampleid + '...')
BN_inversion(BN_args)
# Check potential compoundhets with SNPs and indels
BN_exons = pd.concat([exon_calls_BN_del, exon_calls_BN_dup])
if BN_exons.empty:
pd.DataFrame().to_csv('./results/' + args.sampleid + "/confident_set/" + args.sampleid + '_Bionano_SV_SNPsIndels_compoundhet_candidates.txt', sep='\t', index=False)
else:
BN_exons = pd.merge(BN_exons, all_small_variants, left_on='gene', right_on='Ref_Gene', how='inner')
BN_exons.to_csv('./results/' + args.sampleid + "/confident_set/" + args.sampleid + '_Bionano_SV_SNPsIndels_compoundhet_candidates.txt', sep='\t', index=False)
return cyto_BN_del, cyto_BN_dup, exon_calls_BN_del, exon_calls_BN_dup
def linkedreadSV(args, famid, gene_score_result_r, all_small_variants):
# Need to generate a reference file for all the medium size deletions
print('[run_clinical_interpretor.py]: ' + datetime.now().strftime("%d/%m/%Y %H:%M:%S") + ' Generating linked-reads control files...')
control_files_path = [args.workdir + "/linkedRead_sv/controls", args.workdir + "/linkedRead_sv/cases"]
full_paths = compileControlFiles(control_files_path, famid)
## Write an empty file
with open(args.workdir + "/results/" + args.sampleid + "/10x_del_control.vcf.gz",'w'): # So it will overwrite the old file
pass
for path in full_paths:
cmd = "zcat " + path + "/dels.vcf.gz | gzip >> " + args.workdir + "/results/" + args.sampleid + "/10x_del_control.vcf.gz"
p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
if p.returncode != 0:
raise Exception(stderr)
# Need to generate another reference file for large SVs
with open(args.workdir + "/results/" + args.sampleid + "/10x_largeSV_control.vcf.gz",'w'): # So it will overwrite the old file
pass
for path in full_paths:
cmd = "zcat " + path + "/large_svs.vcf.gz | gzip >> " + args.workdir + "/results/" + args.sampleid + "/10x_largeSV_control.vcf.gz"
p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
if p.returncode != 0:
raise Exception(stderr)
tenx_args_del = Namespace(sampleID = args.sampleid,
samplepath = args.workdir + "/linkedRead_sv/cases/" + args.sampleid + "/dels.vcf.gz",
fpath = args.workdir + "/linkedRead_sv/cases/BC" + famid + "01/dels.vcf.gz",
mpath = args.workdir + "/linkedRead_sv/cases/BC" + famid + "02/dels.vcf.gz",
referencepath = args.workdir + "/results/" + args.sampleid + "/10x_del_control.vcf.gz",
outputdirectory = args.workdir + '/results/' + args.sampleid,
exons = args.workdir + '/annotatedExon.bed',
genes = args.workdir + '/annotatedGene.bed',
genelist = gene_score_result_r,
type = args.type,
father_duo = args.father_duo,
mother_duo = args.mother_duo)
tenx_args_largeSV = Namespace(sampleID = args.sampleid,
samplepath = args.workdir + "/linkedRead_sv/cases/" + args.sampleid + "/large_svs.vcf.gz",
fpath = args.workdir + "/linkedRead_sv/cases/BC" + famid + "01/large_svs.vcf.gz",
mpath = args.workdir + "/linkedRead_sv/cases/BC" + famid + "02/large_svs.vcf.gz",
referencepath = args.workdir + "/results/" + args.sampleid + "/10x_largeSV_control.vcf.gz",
outputdirectory = args.workdir + '/results/' + args.sampleid,
exons = args.workdir + '/annotatedExon.bed',
genes=args.workdir + '/annotatedGene.bed',
genelist = gene_score_result_r,
type = args.type,
father_duo = args.father_duo,
mother_duo = args.mother_duo)
# Call medium size deletions
print('[run_clinical_interpretor.py]: ' + datetime.now().strftime("%d/%m/%Y %H:%M:%S") + ' Detecting linked-reads medium deletions on ' + args.sampleid + '...')
cyto_10x_del, exon_calls_10x_del = tenxdeletions(tenx_args_del)
# Call large deletions
print('[run_clinical_interpretor.py]: ' + datetime.now().strftime("%d/%m/%Y %H:%M:%S") + ' Detecting linked-reads large deletions on ' + args.sampleid + '...')
cyto_10x_del_largeSV, exon_calls_10x_largeSV_del = tenxlargesvdeletions(tenx_args_largeSV)
# Call large duplications
print('[run_clinical_interpretor.py]: ' + datetime.now().strftime("%d/%m/%Y %H:%M:%S") + ' Detecting linked-reads large duplications on ' + args.sampleid + '...')
cyto_10x_dup_largeSV, exon_calls_10x_largeSV_dup = tenxlargesvduplications(tenx_args_largeSV)
# Call large inversions
print('[run_clinical_interpretor.py]: ' + datetime.now().strftime("%d/%m/%Y %H:%M:%S") + ' Detecting linked-reads large inversions on ' + args.sampleid + '...')
tenxlargesvinversions(tenx_args_largeSV)
# Call large breakends
print('[run_clinical_interpretor.py]: ' + datetime.now().strftime("%d/%m/%Y %H:%M:%S") + ' Detecting linked-reads large breakends on ' + args.sampleid + '...')
tenxlargesvbreakends(tenx_args_largeSV)
# Call large unknwon calls
print('[run_clinical_interpretor.py]: ' + datetime.now().strftime("%d/%m/%Y %H:%M:%S") + ' Detecting linked-reads large unknown on ' + args.sampleid + '...')
tenxlargesvunknown(tenx_args_largeSV)
# Check potential compoundhets with SNPs and indels
tenx_exons = pd.concat([exon_calls_10x_del, exon_calls_10x_largeSV_del, exon_calls_10x_largeSV_dup])
if tenx_exons.empty:
pd.DataFrame().to_csv('./results/' + args.sampleid + "/confident_set/" + args.sampleid + '_10x_SV_SNPsIndels_compoundhet_candidates.txt', sep='\t', index=False)
else:
tenx_exons = pd.merge(tenx_exons, all_small_variants, left_on='gene', right_on='Ref_Gene', how='inner')
tenx_exons.to_csv('./results/' + args.sampleid + "/confident_set/" + args.sampleid + '_10x_SV_SNPsIndels_compoundhet_candidates.txt', sep='\t', index=False)
return cyto_10x_del, cyto_10x_del_largeSV, cyto_10x_dup_largeSV, exon_calls_10x_del, exon_calls_10x_largeSV_del, exon_calls_10x_largeSV_dup
def pyrangeJoin(df1_10x, df2_BN):
if df1_10x.empty or df2_BN.empty:
return pd.DataFrame()
df1_10x['Chromosome'], df1_10x['Start'], df1_10x['End'] = [df1_10x['CHROM'], df1_10x['POS'], df1_10x['END']]
df2_BN['Chromosome'], df2_BN['Start'], df2_BN['End'] = [df2_BN['RefcontigID1'], df2_BN['RefStartPos'], df2_BN['RefEndPos']]
overlap = PyRanges(df1_10x).join(PyRanges(df2_BN))
#print(overlap)
if not overlap.df.empty:
overlap = overlap.df
overlap['overlap_len'] = np.maximum(0, np.minimum(overlap.End, overlap.End_b) - np.maximum(overlap.Start,overlap.Start_b))
#overlap = overlap.drop(like="_b")
overlap = overlap.drop(['Chromosome', 'Start', 'End'], axis = 1)
overlap = overlap.loc[overlap['overlap_len'] > 0]
return overlap
else:
return overlap.df
def findConfDelDup(args, exon_calls_10x_del, exon_calls_10x_largeSV_del, exon_calls_10x_largeSV_dup, exon_calls_BN_del, exon_calls_BN_dup):
tenx_del = pd.concat([exon_calls_10x_del, exon_calls_10x_largeSV_del])
overlap_del_10x_BN = pyrangeJoin(tenx_del, exon_calls_BN_del)
overlap_del_10x_BN.to_csv('./results/' + args.sampleid + "/confident_set/" + args.sampleid + '_confident_deletion_exons.txt', sep='\t', index=False)
if exon_calls_BN_dup is not None: # some bionano assemblies were generated with old pipelines
overlap_dup_10x_BN = pyrangeJoin(exon_calls_10x_largeSV_dup, exon_calls_BN_dup)
overlap_dup_10x_BN.to_csv('./results/' + args.sampleid + "/confident_set/" + args.sampleid + '_confident_duplication_exons.txt', sep='\t', index=False)
else:
pd.DataFrame().to_csv('./results/' + args.sampleid + "/confident_set/" + args.sampleid + '_confident_duplication_exons.txt', sep='\t', index=False)
def main():
# Parse argument
parser = argparse.ArgumentParser(description="This software ranks genes based on the clinical phenome.")
parser.add_argument("-s", "--sampleid",help="Sample ID",dest="sampleid", type=str, required = True)
parser.add_argument("-w", "--workdir", help="This is the base work directory.", dest="workdir", type=str, required = True)
parser.add_argument("-i", "--intervar", help="Path to InterVar output folder", dest="intervar", type=str, required = True)
parser.add_argument("-b", "--bionano", help="Set this flag to evaluate bionano SVs.", dest="bionano", action='store_true')
parser.add_argument("-l", "--linkedreadSV", help="Set this flag to evaluate linkedread SVs.", dest="linkedreadSV", action='store_true')
parser.add_argument("-e", "--enzyme", help="Bionano enzyme used (BspQI or DLE). Only set this flag if -b is set", dest="enzyme", type=str)
parser.add_argument("-f", "--fathervcf", help="Path to father SNP VCF file. Only set this flag if -S is not set", dest="fathervcf", type=str)
parser.add_argument("-m", "--mothervcf", help="Path to mather SNP VCF file. Only set this flag if -S is not set", dest="mothervcf", type=str)
parser.add_argument("-r", "--ref", help="Reference version. Either hg19 or hg38", dest="ref", type=str)
parser.add_argument("-a", "--artifact", help="Custom artifact tab-delimited bed file. Can be None", dest="artifact", type=str)
parser.add_argument("-t", "--type", help="Specify whether this is a trio, duo, or singleton case", dest="type", type=str)
parser.add_argument("-F", help="Set this flag if this is a duo case AND only father is sequenced", dest="father_duo", action='store_true')
parser.add_argument("-M", help="Set this flag if this is a duo case AND only mother is sequenced", dest="mother_duo", action='store_true')
parser.add_argument("-X", help="Set this flag if the proband is male AND users would like to output potential X-linked recessive SNPs/indels", dest="xlink", action='store_true')
args = parser.parse_args()
# Change work dir
os.chdir(args.workdir)
# Define variables
## Read the database files
hpo_genes = args.workdir + "/human_pheno_ontology_b1270/genes_to_phenotype.txt"
hpo_syndromes = args.workdir + "/human_pheno_ontology_b1270/phenotype_annotation.tab"
omim_gene = args.workdir + "/annotatedGene.bed"
smallVariantFileName = args.intervar + "/example/" + args.sampleid + "_smallVariant_geneList.txt"
interVarFinalFileName = args.intervar + "/example/" + args.sampleid + "." + args.ref + "_multianno.txt.intervar.FINAL"
try:
hpo_genes_df = | pd.read_csv(hpo_genes, sep='\t', usecols=[1, 2], names=["gene_name", "HPO_id"], comment='#') | pandas.read_csv |
"""
Command for bandwidth testing.
"""
import json
import os
import random
from subprocess import call
import click
import pandas as pd
from netbench.config import Config
from netbench.types import IPaddress
from netbench.utils import write_results
@click.command()
@click.pass_obj
@click.option('-t', default=10, show_default=True, help='Duration of the benchmark in seconds')
@click.option('-s', default=1, show_default=True, help='Number of streams to use.')
@click.option('-u', default=False, is_flag=True, help='Use UDP instead of TCP.')
@click.argument('server_addr', type=IPaddress())
def bandwidth(config: Config, server_addr: str, t: int, s: int, u: bool):
"""
Bandwidth benchmarking using iperf3.
The remote server's IP address must be specified.
"""
print('Starting iperf3 benchmark.')
output_file = f'/tmp/iperf-test-{random.randint(0, 10000)}.json'
call(f'iperf3 -c {server_addr} {"-u" if u else ""} -t {t} -P {s}' +
f' -J > {output_file}', shell=True)
print('Benchmark finished, saving results.')
df = pd.DataFrame(columns=['bytes', 'seconds', 'bits_per_second'])
with open(output_file, 'r') as output:
results = json.load(output)
for interval in results['intervals']:
df = df.append(pd.DataFrame({
'bytes': pd.Series([interval['sum']['bytes']], dtype='int'),
'seconds': | pd.Series([interval['sum']['seconds']], dtype='float') | pandas.Series |
"""
(C) Copyright 2019 IBM Corp.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Created on Jun 21, 2017
"""
from __future__ import division
import warnings
import networkx as nx
import numpy as np
import pandas as pd
import scipy.stats as stats
from ..utils.stat_utils import robust_lookup
# TODO: support categorical (non-numeric) data predecessors.
COVARIATE = "covariate"
HIDDEN = "hidden"
TREATMENT = "treatment"
OUTCOME = "outcome"
CENSOR = "censor"
EFFECT_MODIFIER = "effect_modifier"
VALID_VAR_TYPES = {COVARIATE, HIDDEN, TREATMENT, OUTCOME, CENSOR, EFFECT_MODIFIER}
CATEGORICAL = "categorical"
SURVIVAL = "survival"
CONTINUOUS = "continuous"
PROBABILITY = "probability"
DEFAULT_LINK_TYPE = "linear"
BASELINE_SURVIVAL_PARAM = 1.0
class CausalSimulator3(object):
TREATMENT_METHODS = {"random": lambda x, p, snr, params: CausalSimulator3._treatment_random(x, p),
"odds_ratio": lambda x, p, snr, params: CausalSimulator3._treatment_odds_ratio(x, p, snr),
"quantile_gauss_fit": lambda x, p, snr, params: CausalSimulator3._treatment_quantile_gauss_fit(
x, p, snr),
"logistic": lambda x, p, snr, params: CausalSimulator3._treatment_logistic_dichotomous(x, p,
params=params),
"gaussian": lambda x, p, snr, params: CausalSimulator3._treatment_gaussian_dichotomous(x, p,
snr)}
# G for general - applicable to all types of variables
G_LINKING_METHODS = {"linear": lambda x, beta=None: CausalSimulator3._linear_link(x, beta),
"affine": lambda x, beta=None: CausalSimulator3._affine_link(x, beta),
"exp": lambda x, beta=None: CausalSimulator3._exp_linking(x, beta),
"log": lambda x, beta=None: CausalSimulator3._log_linking(x, beta),
"poly": lambda x, beta=None: CausalSimulator3._poly_linking(x, beta)}
# O for outcome - outcome specific linking
O_LINKING_METHODS = {
"marginal_structural_model": lambda x, t, m, beta=None: CausalSimulator3._marginal_structural_model_link(
x, t, m, beta=beta),
None: lambda x, beta=None: x
}
def __init__(self, topology, var_types, prob_categories, link_types, snr, treatment_importances,
treatment_methods="gaussian", outcome_types=CATEGORICAL, effect_sizes=None,
survival_distribution="expon", survival_baseline=1, params=None):
"""
Constructor
Args:
topology (np.ndarray): A boolean adjacency matrix for variables (including covariates, treatment and outcome
variables of the model).
Every row is a binary vector for a variable, where v[i, j] = 1 iff j is a parent of i
var_types (Sequence[str]): Vector the size of variables stating every variable to be "covariate",
"hidden", "outcome", "treatment", "censor".
**Notes**: if type(pd.Series) variable names will be var_types.index, otherwise,
if no-key-vector - var names will be just range(num-of-variables).
prob_categories (Sequence[float|None]): vector the size of the number of variables.
if prob_categories[i] = None -> than variable i is considered continuous.
otherwise -> prob_categories[i] should be a list (or any iterable) which
size specifies number of categories variable i has, and it contains
multinomial probabilities for those categories (i.e. list non negative and
sums to 1).
link_types (str|Sequence[str]): set of string the size or string or specifying the relation between
covariate parents to the covariate itself
snr (float|Sequence[float]): Signal to noise ratio (use 1.0 to eliminate noise in the system).
May be a vector the size of number of variables for stating different snr
values for different variables.
treatment_importances (float|Sequence[float]): The effect of treatment on the outcome. A float between 0
and 1.0 stating how much weight the treatment variable have
vs. the other parents of an outcome variable.
*To support multi-treatment* - place a list the size of the
number of treatment variables (as stated in var_types).
The matching between treatment variable and its importance
will be according to the order of the treatment variables
and the order of the list. If all treatments variables has
the same importance - pass the float value.
treatment_methods (str|Sequence[str]): method for creating treatment assignment and propensities, can be
one of {"random", "gaussian", "logistic"}.
*To support multi-treatment* - place a list the size of the number of
treatment variables. The matching between treatment variable and its
creation method will be according to the order of the treatment
variables and the order of the list. If all treatment variables has the
same type - pass the str value.
outcome_types (str|Sequence[str]): outcome can either be 'survival' or 'binary'.
*To support multi-outcome* - place a list the size of the number of outcome
variables (as stated in var_types). The matching between outcome variable and
its type will be according to the order of the outcome variables and the order
of the list. If all outcome variables has the same type - pass the str value.
effect_sizes (float|Sequence[float|None]|None): The wanted mean effect size between two counterfactuals.
If None - The mean effect size will not be adjusted, but will be
whatever generated.
If float - The mean effect size will be adjusted to be approximately
the given number (considering the noise)
*To support multi-outcome* - a list the size the number of the outcome
variables (as stated in var_types). The matching between outcome
variable and its effect size will be according to the order of the
outcome variables and the order of the list.
survival_distribution (Sequence[str] or str): The distribution family from which to generate the outcome
values of outcome variables that their corresponding outcome_types is
"survival".
Default value is exponent distribution.
The same survival distribution will be used for the corresponding
censoring variable as well.
*To support multi-outcome* - place a list the size of the number of
outcome variables of type "survival" (as stated in outcome_types). The
matching between survival outcome variable and its survival distribution
will be according to the order of the outcome variables and the order of
the list. If all outcome variables has the same survival distribution -
pass the str value (if present).
*Ignore if no outcome variable is of type survival*
survival_baseline (Sequence[float] or float): The survival baseline from the CoxPH model that will be the
basics for the parameters of the corresponding survival_distribution.
The same survival baseline will be used for the corresponding censoring
variable as well (if present).
Default value is 1 (no multiplicative meaning for baseline value).
*To support multi-outcome* - place a list the size of the number of
outcome variables of type "survival" (as stated in outcome_types). The
matching between survival outcome variable and its survival distribution
will be according to the order of the outcome variables and the order of
the list. If all outcome variables has the same survival distribution -
pass the str value.
*Ignore if no outcome variable is of type survival*
params (dict | None): Various parameters related to the generation process (e.g. the slope for
sigmoid-based functions etc.).
The form of: {var_name: {param_name: param_value, ...}, ...}
"""
# Find the indices of each type of variable:
var_types = pd.Series(var_types)
self.var_names = var_types.index.to_series().reset_index(drop=True)
self.var_types = var_types
self.treatment_indices = var_types[var_types == TREATMENT].index
self.outcome_indices = var_types[var_types == OUTCOME].index
self.covariate_indices = var_types[(var_types == COVARIATE) | (var_types == HIDDEN)].index
self.hidden_indices = var_types[var_types == HIDDEN].index
self.censor_indices = var_types[var_types == CENSOR].index
self.effmod_indices = var_types[var_types == EFFECT_MODIFIER].index
self.linking_coefs = {} # will accumulate the generated coefficients. {var: Series(coef, predecessors)}
# COMPLETE topology INTO A SQUARE ADJACENCY MATRIX:
# # let M be number of total variables, H number of variables to generate and L=M-H number of variables in a
# # given baseline dataset (that generated variables can be based on). Given Topology matrix can have either a
# # shape of MxM or HxM - in the latter case the matrix is completed into MxM by adding zero rows (since L
# # given variables would not be re-genreated anyway, they will be consider independent variables).
# if topology.shape[0] != topology.shape[1]:
# rows, cols = topology.shape
# if cols > rows:
# null_submatrix = np.zeros((cols - rows, cols), dtype=bool)
# topology = np.row_stack((topology, null_submatrix))
# else:
# raise ValueError("Topology matrix has {rows} rows and {cols} columns. This is not supported since"
# "T[i,j] = 1 iff j is parent of i. ")
if topology.shape[0] != len(var_types):
raise ValueError("Number of variables in topology graph do not correspond to the number of variables states"
" in the variable types")
self.m = len(var_types) # number of variables
# Create a graph out of matrix topology:
self.topology = topology
self.graph_topology = nx.from_numpy_matrix(topology.transpose(), create_using=nx.DiGraph()) # type:nx.DiGraph
self.graph_topology = nx.relabel_nodes(self.graph_topology,
dict(list(zip(list(range(self.m)), self.var_names))))
# check that outcome variable is not dependant on more than 1 treatment variable
for i in self.outcome_indices:
predecessors = list(self.graph_topology.predecessors(i))
treatment_predecessors = self.treatment_indices.intersection(predecessors)
if len(treatment_predecessors) > 1: # outcome variable is dependent on more than one treatment
raise ValueError(
"Outcome {outcome} should have only one treatment affecting it. The current topology has outcome"
" variable dependant on {n_parent_treat} treatment parents which are: "
"{treatment_parents}".format(outcome=i, n_parent_treat=len(treatment_predecessors),
treatment_parents=treatment_predecessors))
elif len(treatment_predecessors) == 0: # outcome variable is dependent on exactly one treatment
warnings.warn("Outcome variable {} has no treatment effecting it".format(i), UserWarning)
# check that outcome variable is dependant on most 1 censor variable
for i in self.outcome_indices:
predecessors = list(self.graph_topology.predecessors(i))
censor_predecessors = self.censor_indices.intersection(predecessors)
if len(censor_predecessors) > 1: # outcome variable is dependent on more than one treatment
raise ValueError(
"Outcome {outcome} should have at most one censor variable affecting it. The current topology has "
"outcome variable dependant on {n_parent_cens} treatment parents which are: "
"{cens_parents}".format(outcome=i, n_parent_cens=len(censor_predecessors),
cens_parents=censor_predecessors))
# check that effect modifier is independent on treatment and affects only the outcome:
for i in self.effmod_indices:
successors = self.graph_topology.successors(i)
if len(successors) == 0 or self.outcome_indices.intersection(successors).size < 1:
raise ValueError("Effect modifier variable {name} must affect an outcome variable".format(name=i))
ancestors = nx.ancestors(self.graph_topology, i)
if self.treatment_indices.intersection(ancestors).size > 0:
raise ValueError("Effect modifier variable {name} must not be affected by "
"treatment variable (which is one of {ances})".format(name=i, ances=ancestors))
# convert scalars to vectors if necessary.
self.prob_categories = self._convert_scalars_to_vectors(x=prob_categories, default_value=None,
x_type="prob_categories")
self.prob_categories = self.prob_categories.map(lambda x: pd.Series(x) if x is not None else x)
if self.prob_categories.isnull().all():
warnings.warn("Got all Nones in prob_categories. If simulation has Treatment variables in it, "
"this will throw an exception, as treatment variables must be categorical", UserWarning)
# Check that all treatment variables are categorical:
for i in self.treatment_indices:
if self.prob_categories[i] is None:
raise ValueError("Only categorical treatment is currently supported. However, treatment variable {t} "
"is not categorical. Please specify corresponding category_probabilities".format(t=i))
self.snr = self._convert_scalars_to_vectors(x=snr, default_value=1, x_type="snr")
self.link_types = self._convert_scalars_to_vectors(x=link_types, default_value=DEFAULT_LINK_TYPE,
x_type="link_type")
# if not all([x in self.VALID_LINK_TYPES for x in self.link_types]):
all_linking_types = list(self.G_LINKING_METHODS.keys()) + list(self.O_LINKING_METHODS.keys())
if not self.link_types.isin(all_linking_types).all():
raise ValueError("link type must be one of {}, "
"got {} instead.".format(list(all_linking_types),
list(set(link_types) - set(all_linking_types))))
self.treatment_methods = self._map_properties_to_variables(values=treatment_methods,
keys=self.treatment_indices, var_type="treatment",
value_type="methods")
# if not all([x in TREATMENT_METHODS.keys() for x in self.treatment_methods.values()]):
if not self.treatment_methods.isin(list(self.TREATMENT_METHODS.keys())).all():
raise ValueError("link type must be one of {}, "
"got {} instead.".format(list(self.TREATMENT_METHODS.keys()),
list(
set(treatment_methods) - set(self.TREATMENT_METHODS.keys()))))
self.treatment_importances = self._map_properties_to_variables(values=treatment_importances,
keys=self.treatment_indices,
var_type="treatment", value_type="importance")
self.outcome_types = self._map_properties_to_variables(values=outcome_types, keys=self.outcome_indices,
var_type="outcome", value_type="type")
for i in self.outcome_indices:
if self.outcome_types[i] is CONTINUOUS and self.prob_categories[i] is not None:
raise ValueError("Continuous outcome must be associated with None category probability. "
"This was not the case in variable {outcome_var}. "
"Might lead to undefined behaviour.".format(outcome_var=i))
if self.outcome_types[i] is CATEGORICAL and self.prob_categories[i] is None:
raise ValueError("Categorical outcome must be associated with category probability. However, None was"
"associated with variable {outcome_var}".format(outcome_var=i))
self.effect_sizes = self._map_properties_to_variables(values=effect_sizes, keys=self.outcome_indices,
var_type="outcome", value_type="effect size")
# map survival_related properties to survival outcome and their corresponding censor variables.
survival_outcome_variables = self.outcome_types[self.outcome_types.eq("survival")].index
self.survival_distribution = self._map_properties_to_variables(values=survival_distribution,
keys=survival_outcome_variables,
var_type="outcome",
value_type="survival_distribution")
self.survival_distribution[self.survival_distribution.isnull()] = "expon" # default is exponent distribution
self.survival_baseline = self._map_properties_to_variables(values=survival_baseline,
keys=survival_outcome_variables, var_type="outcome",
value_type="survival_baseline")
self.survival_baseline[self.survival_baseline.isnull()] = np.abs(np.random.normal(
loc=0.0, scale=1.0, size=self.survival_baseline.isnull().sum()))
for i in survival_outcome_variables:
topology_predecessors = list(self.graph_topology.predecessors(i))
censor_predecessors = self.censor_indices.intersection(topology_predecessors)
if len(censor_predecessors) > 0:
censor_predecessors = censor_predecessors[0]
# match between the outcome value and it's matching censor variable:
self.survival_distribution[censor_predecessors] = self.survival_distribution[i]
self.survival_baseline[censor_predecessors] = self.survival_baseline[i]
# self.params = params if params is not None else dict(zip(self.var_names, [None] * self.var_names.size))
self.params = params if params is not None else {}
# ### Initializing helper functions ### #
def _convert_scalars_to_vectors(self, x, default_value, x_type):
"""
Converts scalars (e.g. float, int, str, etc.) into vectors. Mapping between variable names to the desired value.
In context: If arguments given to the class init are scalar (i.e. float, int, str, etc.), converts them into
vector shape - mapping every variable to the given value
Args:
x (Any): the value wished to map to the variables.
if supplied with some sequence (e.g. list, array, Series, etc.) it will map the sequence to
variable names. if supplied with a scalar - it will duplicate the single value to all vars.
default_value (str|float|int|None): in case x=None (no value is supplied), map default_value to all vars
x_type (str): The type of value that currently being processed (e.g. the variable name in the python code),
so in case there is an error, it can display the python-variable that caused the error.
Returns:
x (pd.Series): A Series mapping between variable name and a some wanted value.
Raises:
ValueError: If a sequence is given, but its length doesn't match the number of variables in topology.
"""
if np.isscalar(x) or x is None: # a scalar, not a sequence
if x is None: # put default value
x = pd.Series(data=default_value, index=self.var_names)
else: # a scalar is given, map it to all variables
x = pd.Series(data=x, index=self.var_names)
else:
# a sequence has been provided:
if len(x) != self.m:
raise ValueError("{x_type} should have same size as number of variables."
"Got {emp} instead of {sup}".format(x_type=x_type, emp=len(x), sup=self.m))
if isinstance(x, pd.Series) and x.index.difference(self.var_names).empty:
# if supplied with a Series which has it own indexing, and it matches the the topology variables, then
# keep it as is.
x = x
else:
# either a simpler sequence or a Series with bad indexing, map to variable names.
x = pd.Series(data=x, index=self.var_names)
return x
@staticmethod
def _map_properties_to_variables(values, keys, var_type, value_type):
"""
Maps between covariate variables properties to these properties.
Args:
values (Any): some property of some variable (e.g. 0.7 for treatment_importance or
"binary" for outcome_type)
keys (Sequence[Any]): The names indices to map the given properties (values) (e.g. treatment_indices)
var_type (str {"covariate", "hidden", "treatment", "outcome", "censor"}): The type of variable the
properties being mapped to (e.g. "treatment", "outcome", "covariate")
value_type (str): The name type that the property belongs to. (e.g. the variable name in the python code),
so in case there's an error, it can display the python-variable that caused the error.
Returns:
res (pd.Series): A map between the given keys (some covariate variable names indices) to the given values
Raises:
ValueError: When a Sequence is given as values (e.g. list of properties) but it does not match the length
of the keys.
Warnings:
UserWarning: If a values is a dict, it can may not be touched, unless its keys' do not match the variable
names. A warning is issued.
Examples:
Where effect_sizes is a Sequence or a float, outcome_indices are the indices names of the outcome variables
in the graph. the variable type discussed is "outcome" (since it is effect-size). The python variable name
is effect_size, thus the value_type is effect_size.
map_properties_to_variables(values=effect_sizes, keys=self.outcome_indices, var_type="outcome",
value_type="effect size")
"""
if np.isscalar(values) or values is None:
# values is a single value (i.e. int ot string), map its value to all given treatment variables:
res = dict(list(zip(keys, [values] * len(keys))))
else:
# some sequence provided
if len(keys) != len(values):
raise ValueError("The number of {var_t} variables: {n_keys} does not match the size of the list "
"depicting the {val_t} of creating each {var_t} variable: "
"{n_vals}".format(var_t=var_type, n_keys=len(keys),
val_t=value_type, n_vals=len(values)))
# values = values.values() if isinstance(values, dict) else values
if isinstance(values, dict):
# if given property is given by a dictionary, make sure this dict keys matches to the indices it
# suppose to map to:
res = values
if list(values.keys()) != keys:
warnings.warn("{var_t} {val_t} was given as dictionary but its keys ({val}) does not match the "
"{var_t} indices provided in topology ({keys}). You may expect "
"undefined behaviour".format(var_t=var_type, val_t=value_type,
val=list(values.keys()), keys=keys), UserWarning)
else:
res = dict(list(zip(keys, values)))
res = pd.Series(res, dtype=np.dtype(object))
res = res.infer_objects()
return res
# ### Main functionality ### #
def generate_data(self, X_given=None, num_samples=None, random_seed=None):
"""
Generates tables of dataset given the object's initial parameters.
Args:
num_samples (int): Number of samples that will be in the dataset.
X_given (pd.DataFrame): A baseline dataset to generate from. This dataset may contain only some of variables
stated in the initialized topology. The rest of the dataset (variables which are
stated in the topology and not in this dataset) will be generated.
**Notes**: The data given will not be overwritten and will be taken as is. It is
user responsibility to see that the given table has no dependant variables since
they will not be re-generated according to the graph.
random_seed (int): A seed for the pseudo-random-number-generator in order to reproduce results.
Returns:
(pd.DataFrame, pd.DataFrame, pd.DataFrame): 3-element tuple containing:
- **X** (*pd.DataFrame*): A (num_samples x num_covariates) matrix of all covariates
(including treatments and outcomes) over samples.
- **propensities** (*pd.DataFrame*): A (num_samples x num_treatments) matrix (or vector) of propensity
values of every treatment.
- **counterfactuals** (*pd.DataFrame*): A (num_samples x num_outcomes) matrix -
"""
if random_seed is not None:
np.random.seed(random_seed)
if num_samples is None and X_given is None:
raise ValueError("Must supply either a dataset (X) or number of samples to generate")
if num_samples is not None and X_given is not None:
warnings.warn("Got both number of samples (num_samples) and a baseline dataset (X_given). "
"Number of samples will be ignored and only X_given will be used.", UserWarning)
if X_given is None:
num_samples = num_samples
patients_index = list(range(num_samples))
else:
num_samples = X_given.index.size
patients_index = X_given.index
# generate latent continuous covariates - every variable is guaranteed to have a population variance of 1.0
# X_latent = pd.DataFrame(index=patients_index, columns=self.var_types.index)
X = pd.DataFrame(index=patients_index, columns=self.var_types.index)
if X_given is not None: # if a dataset is given, integrate it to the current dataset being build.
X.loc[:, X_given.columns] = X_given
for col in X_given.columns:
X.loc[:, col] = X[col].astype(X_given.dtypes[col]) # insist of keeping original types.
propensities = pd.DataFrame(index=patients_index,
columns=pd.MultiIndex.from_tuples([(i, j) for i in self.treatment_indices
for j in self.prob_categories[i].index]))
cf_columns = []
for outcome in self.outcome_indices:
predecessors = list(self.graph_topology.predecessors(outcome))
treatment_predecessor = self.treatment_indices.intersection(predecessors)
if not treatment_predecessor.empty:
treatment_predecessor = treatment_predecessor[0]
for j in self.prob_categories[treatment_predecessor].index:
cf_columns.append((outcome, j))
else:
cf_columns.append((outcome, "null"))
counterfactuals = pd.DataFrame(index=patients_index, columns=pd.MultiIndex.from_tuples(cf_columns))
# create the variables according to their topological order to avoid creating variables before their
# dependencies are created:
for i in nx.topological_sort(self.graph_topology):
# i = self.var_names[i] # get the name corresponding to the i'th location in topology
if X.loc[:, i].notnull().any():
# current column has non-NAN values meaning it has some data in it so it will not be overwritten
continue
var_type = self.var_types[i]
X_parents = X.loc[:, self.topology[self.var_names[self.var_names == i].index[0], :]]
if var_type == COVARIATE or var_type == HIDDEN or var_type == EFFECT_MODIFIER:
X_signal, beta = self.generate_covariate_col(X_parents=X_parents, link_type=self.link_types[i],
snr=self.snr[i], prob_category=self.prob_categories[i],
num_samples=num_samples, var_name=i)
elif var_type == TREATMENT:
X_signal, propensity, beta = self.generate_treatment_col(X_parents=X_parents,
link_type=self.link_types[i],
snr=self.snr[i],
method=self.treatment_methods[i],
prob_category=self.prob_categories[i],
var_name=i)
propensities[i] = propensity
elif var_type == OUTCOME:
X_signal, cf, beta = self.generate_outcome_col(X_parents=X_parents, link_type=self.link_types[i],
snr=self.snr[i], prob_category=self.prob_categories[i],
effect_size=self.effect_sizes[i],
outcome_type=self.outcome_types[i],
survival_distribution=self.survival_distribution.get(i),
survival_baseline=self.survival_baseline.get(i),
var_name=i)
counterfactuals[i] = cf
# print 'mean treatment effect: %0.3f' % (np.mean(cf1 - cf0))
elif var_type == CENSOR:
outcome_successor = self.outcome_indices.intersection(self.graph_topology.successors(i))[0]
treatment_predecessor = self.treatment_indices.intersection(self.graph_topology.predecessors(i))
treatment_predecessor = treatment_predecessor[0] if len(treatment_predecessor) > 0 else None
X_signal, beta = self.generate_censor_col(X_parents=X_parents, link_type=self.link_types[i],
snr=self.snr[i], prob_category=self.prob_categories[i],
outcome_type=self.outcome_types[outcome_successor],
treatment_importance=self.treatment_importances.
get(treatment_predecessor),
survival_distribution=self.survival_distribution.get(i),
survival_baseline=self.survival_baseline.get(i),
var_name=i)
else:
raise ValueError("{c_type} is not supported type of variable. "
"Supported types are {s_types}".format(c_type=var_type, s_types=VALID_VAR_TYPES))
X.loc[:, i] = X_signal
self.linking_coefs[i] = beta
# print X_latent.var(axis=0, ddof=1)
# print X.var(axis=0, ddof=1)
return X, propensities, counterfactuals
def generate_covariate_col(self, X_parents, link_type, snr, prob_category, num_samples, var_name=None):
"""
Generates a single signal (covariate) column
Args:
X_parents (pd.DataFrame): Sub-dataset containing only the relevant columns (features which are topological
parents to the current covariate being created)
link_type (str): How the parents variables (parents covariate columns) influence the current generated
column. What relation is there between them.
snr (float): Signal to noise ratio that controls the amount of noise to add (value of 1.0 will not generate
noise)
prob_category (pd.Series|None): A vector which length states the number of classes (number of discrete
values) and every value is fractional - the probability of the corresponding
class.
**Notes**: vector must sum to 1 If None - the covariate column is left
untouched (i.e. continuous)
num_samples (int): number of samples to generate
var_name (int|str): The name of the variable currently being generated. Optional.
Returns:
(pd.Series, pd.Series): 2-element tuple containing:
- **X_final** (*pd.Series*): The final (i.e. noised and discretize [if needed]) covariate column.
- **beta** (*pd.Series*): The coefficients used to generate current variable from it predecessors.
Raises:
ValueError: if the given link_type is not a valid link_type. (Supported link types are placed in
self.G_LINKING_METHODS)
"""
# if variable has no parents - just sample from normal Gaussian distribution:
if X_parents.empty:
X_new = pd.Series(np.random.normal(loc=0.0, scale=1.0, size=num_samples), index=X_parents.index)
beta = pd.Series(dtype=np.float64)
else:
# generate covariate column based on the parents' variables
linking_method = self.G_LINKING_METHODS.get(link_type)
if linking_method is None:
raise KeyError("link type must be one of {},got {} instead.".format(list(self.G_LINKING_METHODS.keys()),
link_type))
beta = self.linking_coefs.get(var_name)
X_new, beta = linking_method(X_parents, beta=beta)
# noise the sample
X_noised_cont, _, _ = self._noise_col(X_new, snr=snr)
# discretize variables if required:
X_final = self._discretize_col(X_noised_cont, prob_category)
return X_final, beta
def generate_treatment_col(self, X_parents, link_type, snr, prob_category, method="logistic", var_name=None):
"""
Generates a single treatment variable column.
Args:
X_parents (pd.DataFrame): Sub-dataset containing only the relevant columns (features which are topological
parents to the current covariate being created)
link_type (str): How the parents variables (parents covariate columns) influence the current generated
column. What relation is there between them.
snr (float): Signal to noise ratio that controls the amount of noise to add (value of 1.0 will not generate
noise)
prob_category (pd.Series|None): A k-length distribution vector over k-1 treatments with the probability
of being untreated in prob_category[0] (prob_category.iloc[0]) and all
other k-1 probabilities corresponds to k-1 treatments.
**Notes**: vector must sum to 1. If None - the covariate column is left
untouched (i.e. continuous)
method (str): A type of method to generate the treatment signal and the corresponding propensities.
var_name (int|str): The name of the variable currently being generated. Optional.
Returns:
(pd.Series, pd.DataFrame, pd.Series): 3-element tuple containing:
- **treatment** (*pd.Series*): Treatment assignment to each sample.
- **propensity** (*pd.DataFrame*): The marginal conditional probability of treatment given covariates.
A DataFrame shaped (num_samples x num_of_possible_treatment_categories).
- **beta** (*pd.Series*): The coefficients used to generate current variable from it predecessors.
Raises:
ValueError: if prob_category is None (treatment must be categorical)
ValueError: If prob_category is not a legitimate probability vector (non negative, sums to 1)
"""
# Check input validity:
if prob_category is None:
raise ValueError("Treatment variable must be categorical, therefore it must have a legitimate distribution "
"over its possible values. Got None instead.")
CausalSimulator3._check_for_legitimate_probabilities(prob_category)
# generate only the continuous signal since it is later processed (therefore prob_category = None)
x_continuous, beta = self.generate_covariate_col(X_parents=X_parents, link_type=link_type, snr=snr,
prob_category=None, num_samples=X_parents.index.size,
var_name=var_name)
generation_method = self.TREATMENT_METHODS.get(method)
if generation_method is None:
raise KeyError("The given method {method} is not supported, "
"only {valid_methods}.".format(valid_methods=list(self.TREATMENT_METHODS.keys()),
method=method))
else:
params = self.params.get(var_name, {})
propensity, treatment = generation_method(x_continuous, prob_category, snr=snr, params=params)
return treatment.astype(int), propensity.astype(float), beta
def generate_outcome_col(self, X_parents, link_type, snr, prob_category, outcome_type, treatment_importance=None,
effect_size=None, survival_distribution=None, survival_baseline=None, var_name=None):
"""
Generates a single outcome variable column.
Args:
X_parents (pd.DataFrame): Sub-dataset containing only the relevant columns (features which are topological
parents to the current covariate being created)
link_type (str): How the parents variables (parents covariate columns) influence the current generated
column. What relation is there between them.
treatment_importance (float): The effect power of the treatment on the current generated outcome variable,
as opposed to other variables that may influence on it.
snr (float): Signal to noise ratio that controls the amount of noise to add (value of 1.0 will not generate
noise)
prob_category (pd.Series|None): A k-length distribution vector over k-1 treatments with the probability
of being untreated in prob_category[0] (prob_category.iloc[0]) and all
other k-1 probabilities corresponds to k-1 treatments.
**Notes**: vector must sum to 1. If None - the covariate column is left
untouched (i.e. continuous)
effect_size (float): wanted mean effect size.
outcome_type (str): Type of outcome variable. Either categorical (and continuous) or survival
survival_distribution (str): The type of the distribution of which to sample the survival time from.
relevant only if outcome_type is "survival"
survival_baseline: The baseline value of the the cox ph model. relevant only if outcome_type is "survival"
var_name (int|str): The name of the variable currently being generated. Optional.
Returns:
(pd.Series, pd.DataFrame, pd.DataFrame): 3-element tuple containing:
- **x_outcome** (*pd.Series*): Outcome assignment for each sample.
- **cf** (*pd.DataFrame*): Holding the counterfactuals for every possible treatment category of the
outcome's treatment predecessor variable.
- **beta** (*pd.DataFrame*): The coefficients used to generate current variable from it predecessors.
Raises:
ValueError: if the given link_type is not a valid link_type. (Supported link types are placed in
self.G_LINKING_METHODS)
ValueError: if prob_category is neither None nor a legitimate distribution vector.
"""
# drop censor indices as they do not affect the actual values of the outcome, only the masking later:
X_parents = X_parents.drop(self.censor_indices, axis='columns') # type: pd.DataFrame
if X_parents.columns.size == 0:
raise ValueError("Outcome variable cannot be independent variable (i.e. have no parent in graph topology)")
# get effect modifiers:
effect_modifier = self.effmod_indices.intersection(X_parents.columns)
X_effmod = X_parents.loc[:, effect_modifier] # type: pd.DataFrame
X_covariates = X_parents.drop(effect_modifier, axis="columns") # type: pd.DataFrame
# get the treatment variable that affect current outcome.
treatment_parent = self.treatment_indices.intersection(X_covariates.columns)
if len(treatment_parent) > 1: # outcome variable is dependent on more than one treatment
raise ValueError(
"Outcome should have only one treatment affecting it. The current topology has outcome"
" variable dependant on {n_parent_treat} treatment parents which are: "
"{treatment_parents}".format(n_parent_treat=len(treatment_parent),
treatment_parents=treatment_parent))
else:
try: # len(treatment_parents) == 0 outcome variable is dependent on exactly one treatment
treatment_parent = treatment_parent[0]
X_treatment = X_covariates.loc[:, treatment_parent] # type: pd.Series
X_covariates = X_covariates.drop(treatment_parent, axis="columns") # type: pd.DataFrame
except IndexError: # len(treatment_parents) == 0 outcome variable is independent of treatment variables
treatment_parent = None
X_treatment = pd.Series(dtype=np.float64)
has_treatment_parent = not X_treatment.empty
treatment_importance = treatment_importance or self.treatment_importances.get(treatment_parent)
original_treatment_categories = X_treatment.unique().astype(int) # before being manipulated
# convexly re-weight variables according if treatment has different importance than the covariates:
if treatment_importance is not None:
# !knowingly not weighting (especially weighting-down) effect modifiers! (so only re-weighting covariates)
X_treatment *= treatment_importance # how much the treatment affects the outcome
if not X_covariates.columns.empty: # how much non-treatments (regular covariates) affect outcome
X_covariates *= float(float(1 - treatment_importance) / X_covariates.columns.size)
X_parents = pd.concat([X_covariates, X_effmod, X_treatment], axis="columns", ignore_index=False)
if link_type in list(self.G_LINKING_METHODS.keys()):
# generate counterfactuals
treatment_importance = 1 if treatment_importance is None else treatment_importance
cf = {}
for treatment_cat in original_treatment_categories:
cf[treatment_cat] = X_parents.drop(treatment_parent, axis="columns")
cf[treatment_cat].loc[:, treatment_parent] = treatment_cat * treatment_importance
linking_method = self.G_LINKING_METHODS.get(link_type)
beta = self.linking_coefs.get(var_name)
x_outcome, beta = linking_method(X_parents, beta=beta)
cf = {i: linking_method(cf[i], beta=beta)[0] for i in list(cf.keys())}
elif link_type in self.O_LINKING_METHODS:
linking_method = self.O_LINKING_METHODS.get(link_type)
beta = self.linking_coefs.get(var_name)
x_outcome, cf, beta = linking_method(X_covariates, X_effmod, X_treatment, beta=beta)
cf = {col: cf[col] for col in cf.columns}
else:
raise KeyError("link type: {lt} is not a supported type of linking".format(lt=link_type))
# noise the sample:
x_outcome, cov_std, noise = self._noise_col(x_outcome, snr=snr)
cf = {i: self._noise_col(cf[i], snr, cov_std, noise)[0] for i in list(cf.keys())}
if effect_size is not None:
warnings.warn("Stating effect size is not yet supported. Supplying it has no effect on results",
UserWarning)
# TODO: support given effect size
pass
# aggregate according to type:
if outcome_type == CATEGORICAL:
x_outcome, bins = self._discretize_col(x_outcome, prob_category, retbins=True)
# redefine bins edges so it could accommodate for values in the cfs that weren't present in the outcome:
bins.iloc[0] = -np.inf
bins.iloc[-1] = np.inf
cf = {i: self._discretize_col(cf[i], prob_category, bins=bins) if has_treatment_parent else cf[i]
for i in list(cf.keys())}
elif outcome_type == CONTINUOUS:
pass
elif outcome_type == PROBABILITY:
x_outcome = self._sigmoid(x_outcome)
cf = {i: self._sigmoid(cf[i]) for i in list(cf.keys())}
elif outcome_type == SURVIVAL:
if survival_distribution == "expon":
rnd_state = np.random.randint(low=0, high=999999)
param = survival_baseline * np.exp(x_outcome)
x_outcome = pd.Series(
stats.expon(loc=0.0, scale=(1.0 / param)).rvs(x_outcome.size, random_state=rnd_state),
index=x_outcome.index)
cf = {i: pd.Series(
stats.expon(loc=0.0, scale=(1 / (survival_baseline * np.exp(cf[i])))).rvs(x_outcome.size,
random_state=rnd_state),
index=x_outcome.index)
if has_treatment_parent else cf[i] for i in list(cf.keys())}
# Supplying the random state assures that the resulting outcome and cfs is consistent while sampling rvs
else:
raise ValueError("survival distribution: {0}, is not supported".format(survival_distribution))
else:
raise ValueError("outcome type: {0}, is not supported outcome type".format(outcome_type))
if not cf: # dictionary is empty - outcome variable has no treatment parent
cf = {"null": pd.DataFrame(data=None, index=X_parents.index, columns=["null"])}
cf = | pd.DataFrame(cf) | pandas.DataFrame |
import concurrent
from concurrent.futures.thread import ThreadPoolExecutor
from datetime import datetime
import pandas as pd
import time
import requests
from threading import Lock, Thread
MAX_EPSILON = 10**10
def log_debug(*args, **kwargs):
pass#print(*args, **kwargs)
class ProxyupRetriever:
URL = "https://api.proxyscrape.com/?request=getproxies&proxytype={}&timeout={}&country={}&ssl=all&anonymity=all"
CHECK_URL = "https://www.google.com/"
def __init__(self, proxy_type="http", proxy_country="all", proxy_timeout=500, pool_njobs=5, check_url=CHECK_URL,
update_interval_seconds=120, check_interval_seconds=60, auto_start=True, proxy_cache_size=1000):
self._proxy_type = proxy_type
self._pool_checker = ThreadPoolExecutor(pool_njobs, thread_name_prefix="proxyup")
self._update_interval_seconds = update_interval_seconds
self._check_interval_seconds = check_interval_seconds
self._proxy_cache_size = proxy_cache_size
self._proxy_timeout = proxy_timeout
self._proxy_country = proxy_country
self._check_url = check_url
self._num_proxies_to_deliver_simultaneously = 1
self._timeout_iteration_seconds = 0
self._finish = False
self._thread = None
self._lock = Lock()
self._proxies = pd.DataFrame([], columns=["Confirmed", "LastCheck"], index= | pd.Index([], name="Address") | pandas.Index |
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Data loading and preprocessing functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import io
import os
import zipfile
from PIL import Image
import numpy as np
import pandas as pd
from six.moves import urllib
from sklearn import preprocessing
import tensorflow.compat.v1 as tf
from tensorflow.compat.v1.keras import backend
from tensorflow.compat.v1.keras import datasets
from sklearn.model_selection import train_test_split
import dvrl_utils
def load_tabular_data(data_name, dict_no, noise_rate):
"""Loads Adult Income and Blog Feedback datasets.
This module loads the two tabular datasets and saves train.csv, valid.csv and
test.csv files under data_files directory.
UCI Adult data link: https://archive.ics.uci.edu/ml/datasets/Adult
UCI Blog data link: https://archive.ics.uci.edu/ml/datasets/BlogFeedback
If noise_rate > 0.0, adds noise on the datasets.
Then, saves train.csv, valid.csv, test.csv on './data_files/' directory
Args:
data_name: 'adult' or 'blog'
dict_no: training and validation set numbers
noise_rate: label corruption ratio
Returns:
noise_idx: indices of noisy samples
"""
# Loads datasets from links
uci_base_url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/'
# Adult Income dataset
if data_name == 'adult':
train_url = uci_base_url + 'adult/adult.data'
test_url = uci_base_url + 'adult/adult.test'
data_train = pd.read_csv(train_url, header=None)
data_test = pd.read_csv(test_url, skiprows=1, header=None)
df = pd.concat((data_train, data_test), axis=0)
# Column names
df.columns = ['Age', 'WorkClass', 'fnlwgt', 'Education', 'EducationNum',
'MaritalStatus', 'Occupation', 'Relationship', 'Race',
'Gender', 'CapitalGain', 'CapitalLoss', 'HoursPerWeek',
'NativeCountry', 'Income']
# Creates binary labels
df['Income'] = df['Income'].map({' <=50K': 0, ' >50K': 1,
' <=50K.': 0, ' >50K.': 1})
# Changes string to float
df.Age = df.Age.astype(float)
df.fnlwgt = df.fnlwgt.astype(float)
df.EducationNum = df.EducationNum.astype(float)
df.EducationNum = df.EducationNum.astype(float)
df.CapitalGain = df.CapitalGain.astype(float)
df.CapitalLoss = df.CapitalLoss.astype(float)
# One-hot encoding
df = pd.get_dummies(df, columns=['WorkClass', 'Education', 'MaritalStatus',
'Occupation', 'Relationship',
'Race', 'Gender', 'NativeCountry'])
# Sets label name as Y
df = df.rename(columns={'Income': 'Y'})
df['Y'] = df['Y'].astype(int)
# Resets index
df = df.reset_index()
df = df.drop(columns=['index'])
# Blog Feedback dataset
elif data_name == 'blog':
resp = urllib.request.urlopen(uci_base_url + '00304/BlogFeedback.zip')
zip_file = zipfile.ZipFile(io.BytesIO(resp.read()))
# Loads train dataset
train_file_name = 'blogData_train.csv'
data_train = pd.read_csv(zip_file.open(train_file_name), header=None)
# Loads test dataset
data_test = []
for i in range(29):
if i < 9:
file_name = 'blogData_test-2012.02.0'+ str(i+1) + '.00_00.csv'
else:
file_name = 'blogData_test-2012.02.'+ str(i+1) + '.00_00.csv'
temp_data = pd.read_csv(zip_file.open(file_name), header=None)
if i == 0:
data_test = temp_data
else:
data_test = pd.concat((data_test, temp_data), axis=0)
for i in range(31):
if i < 9:
file_name = 'blogData_test-2012.03.0'+ str(i+1) + '.00_00.csv'
elif i < 25:
file_name = 'blogData_test-2012.03.'+ str(i+1) + '.00_00.csv'
else:
file_name = 'blogData_test-2012.03.'+ str(i+1) + '.01_00.csv'
temp_data = pd.read_csv(zip_file.open(file_name), header=None)
data_test = pd.concat((data_test, temp_data), axis=0)
df = pd.concat((data_train, data_test), axis=0)
# Removes rows with missing data
df = df.dropna()
# Sets label and named as Y
df.columns = df.columns.astype(str)
df['280'] = 1*(df['280'] > 0)
df = df.rename(columns={'280': 'Y'})
df['Y'] = df['Y'].astype(int)
# Resets index
df = df.reset_index()
df = df.drop(columns=['index'])
# load california housing dataset (./data_files/california_housing_train.csv
# and ./data_files/california_housing_test.csv)
elif data_name == 'cali':
train_url = './data_files/california_housing_train.csv'
test_url = './data_files/california_housing_test.csv'
data_train = pd.read_csv(train_url, header=0)
data_test = pd.read_csv(test_url, header=0)
df = pd.concat((data_train, data_test), axis=0)
# Column names
df.columns = ['longitude', 'latitude', 'housing_median_age', 'total_rooms', 'total_bedrooms',
'population', 'households', 'median_income', 'median_house_value']
df['longitude'] = pd.to_numeric(df['longitude'], downcast="float")
df['latitude'] = pd.to_numeric(df['latitude'], downcast="float")
df['housing_median_age'] = | pd.to_numeric(df['housing_median_age'], downcast="float") | pandas.to_numeric |
# -*- coding: utf-8 -*-
"""Unit tests for cartoframes.data.Dataset"""
import json
import os
import sys
import unittest
import warnings
import numpy as np
import pandas as pd
import geopandas as gpd
from carto.exceptions import CartoException
from cartoframes.auth import Credentials
from cartoframes.data import Dataset
from cartoframes.data.clients import SQLClient
from cartoframes.data.dataset.registry.base_dataset import BaseDataset
from cartoframes.data.dataset.registry.strategies_registry import StrategiesRegistry
from cartoframes.data.dataset.registry.dataframe_dataset import (
DataFrameDataset, _rows)
from cartoframes.data.dataset.registry.query_dataset import QueryDataset
from cartoframes.data.dataset.registry.table_dataset import TableDataset
from cartoframes.lib import context
from cartoframes.utils.columns import DataframeColumnsInfo, normalize_name
from cartoframes.utils.geom_utils import setting_value_exception
from cartoframes.utils.utils import load_geojson
from tests.e2e.helpers import _UserUrlLoader
from tests.unit.mocks.context_mock import ContextMock
from tests.unit.mocks.dataset_mock import DatasetMock, QueryDatasetMock
try:
from unittest.mock import Mock
except ImportError:
from mock import Mock
WILL_SKIP = False
warnings.filterwarnings('ignore')
class TestDataset(unittest.TestCase, _UserUrlLoader):
"""Tests for cartoframes.CARTOframes"""
def setUp(self):
if (os.environ.get('APIKEY') is None or
os.environ.get('USERNAME') is None):
try:
creds = json.loads(open('tests/e2e/secret.json').read())
self.apikey = creds['APIKEY']
self.username = creds['USERNAME']
except Exception:
warnings.warn("Skipping Context tests. To test it, "
"create a `secret.json` file in test/ by "
"renaming `secret.json.sample` to `secret.json` "
"and updating the credentials to match your "
"environment.")
self.apikey = None
self.username = None
else:
self.apikey = os.environ['APIKEY']
self.username = os.environ['USERNAME']
# table naming info
has_mpl = 'mpl' if os.environ.get('MPLBACKEND') else 'nonmpl'
has_gpd = 'gpd' if os.environ.get('USE_GEOPANDAS') else 'nongpd'
pyver = sys.version[0:3].replace('.', '_')
buildnum = os.environ.get('TRAVIS_BUILD_NUMBER') or 'none'
test_slug = '{ver}_{num}_{mpl}_{gpd}'.format(
ver=pyver, num=buildnum, mpl=has_mpl, gpd=has_gpd
)
# for writing to carto
self.test_write_table = normalize_name(
'cf_test_table_{}'.format(test_slug)
)
self.base_url = self.user_url().format(username=self.username)
self.credentials = Credentials(self.username, self.apikey, self.base_url)
self.sql_client = SQLClient(self.credentials)
self.test_geojson = {
"type": "FeatureCollection",
"features": [
{
"type": "Feature",
"properties": {},
"geometry": {
"type": "Point",
"coordinates": [
-3.1640625,
42.032974332441405
]
}
}
]
}
self.tearDown()
def tearDown(self):
"""restore to original state"""
tables = (self.test_write_table, )
sql_drop = 'DROP TABLE IF EXISTS {};'
for table in tables:
try:
Dataset(table, credentials=self.credentials).delete()
self.sql_client.query(sql_drop.format(table))
except CartoException:
warnings.warn('Error deleting tables')
StrategiesRegistry.instance = None
def test_dataset_upload_validation_fails_only_with_table_name(self):
table_name = 'fake_table'
dataset = Dataset(table_name, credentials=self.credentials)
err_msg = 'Nothing to upload. We need data in a DataFrame or GeoDataFrame or a query to upload data to CARTO.'
with self.assertRaises(ValueError, msg=err_msg):
dataset.upload()
def test_dataset_upload_validation_query_fails_without_table_name(self):
query = 'SELECT 1'
dataset = Dataset(query, credentials=self.credentials)
with self.assertRaises(ValueError, msg='You should provide a table_name and context to upload data.'):
dataset.upload()
def test_dataset_upload_validation_df_fails_without_table_name_and_context(self):
df = load_geojson(self.test_geojson)
dataset = Dataset(df)
with self.assertRaises(ValueError, msg='You should provide a table_name and context to upload data.'):
dataset.upload()
def test_dataset_upload_validation_df_fails_without_context(self):
df = load_geojson(self.test_geojson)
dataset = Dataset(df)
with self.assertRaises(ValueError, msg='You should provide a table_name and context to upload data.'):
dataset.upload(table_name=self.test_write_table)
@unittest.skipIf(WILL_SKIP, 'no carto credentials, skipping this test')
def test_dataset_upload_into_existing_table_fails_without_replace_property(self):
query = 'SELECT 1'
dataset = Dataset(query, credentials=self.credentials)
dataset.upload(table_name=self.test_write_table)
dataset = Dataset(query, credentials=self.credentials)
err_msg = ('Table with name {t} and schema {s} already exists in CARTO. Please choose a different `table_name`'
'or use if_exists="replace" to overwrite it').format(t=self.test_write_table, s='public')
with self.assertRaises(CartoException, msg=err_msg):
dataset.upload(table_name=self.test_write_table)
dataset.upload(table_name=self.test_write_table, if_exists=Dataset.IF_EXISTS_REPLACE)
def test_dataset_upload_validation_fails_with_query_and_append(self):
query = 'SELECT 1'
dataset = Dataset(query, credentials=self.credentials)
err_msg = 'Error using append with a query Dataset. It is not possible to append data to a query'
with self.assertRaises(CartoException, msg=err_msg):
dataset.upload(table_name=self.test_write_table, if_exists=Dataset.IF_EXISTS_APPEND)
@unittest.skipIf(WILL_SKIP, 'no carto credentials, skipping this test')
def test_dataset_download_validations(self):
self.assertNotExistsTable(self.test_write_table)
df = load_geojson(self.test_geojson)
dataset = Dataset(df)
error_msg = 'You should provide a context and a table_name or query to download data.'
with self.assertRaises(ValueError, msg=error_msg):
dataset.download()
query = 'SELECT 1 as fakec'
dataset = Dataset(query, credentials=self.credentials)
dataset.upload(table_name=self.test_write_table)
dataset._table_name = 'non_used_table'
df = dataset.download()
self.assertEqual('fakec' in df.columns, True)
dataset = Dataset(self.test_write_table, credentials=self.credentials)
df = dataset.download()
self.assertEqual('fakec' in df.columns, True)
def test_dataset_download_and_upload(self):
self.assertNotExistsTable(self.test_write_table)
query = 'SELECT 1 as fakec'
dataset = Dataset(query, credentials=self.credentials)
df = dataset.download()
dataset = Dataset(df)
dataset.upload(table_name=self.test_write_table,
credentials=self.credentials)
self.assertExistsTable(self.test_write_table)
dataset = Dataset(self.test_write_table, credentials=self.credentials)
df = dataset.download()
dataset = Dataset(df)
dataset.upload(table_name=self.test_write_table,
credentials=self.credentials,
if_exists=Dataset.IF_EXISTS_REPLACE)
def test_dataset_upload_and_download_special_values(self):
self.assertNotExistsTable(self.test_write_table)
orig_df = pd.DataFrame({
'lat': [0, 1, 2],
'lng': [0, 1, 2],
'svals': [np.inf, -np.inf, np.nan]
})
dataset = Dataset(orig_df)
dataset.upload(table_name=self.test_write_table,
with_lnglat=('lng', 'lat'),
credentials=self.credentials)
self.assertExistsTable(self.test_write_table)
dataset = Dataset(self.test_write_table, credentials=self.credentials)
df = dataset.download()
assert df.lat.equals(orig_df.lat)
assert df.lng.equals(orig_df.lng)
assert df.svals.equals(orig_df.svals)
assert df.the_geom.notnull().all()
def test_dataset_download_bool_null(self):
self.assertNotExistsTable(self.test_write_table)
query = 'SELECT * FROM (values (true, true), (false, false), (false, null)) as x(fakec_bool, fakec_bool_null)'
dataset = Dataset(query, credentials=self.credentials)
dataset.upload(table_name=self.test_write_table)
dataset = Dataset(self.test_write_table, credentials=self.credentials)
df = dataset.download()
self.assertEqual(df['fakec_bool'].dtype, 'bool')
self.assertEqual(df['fakec_bool_null'].dtype, 'object')
self.assertEqual(list(df['fakec_bool']), [True, False, False])
self.assertEqual(list(df['fakec_bool_null']), [True, False, None])
@unittest.skipIf(WILL_SKIP, 'no carto credentials, skipping this test')
def test_dataset_write_points_dataset(self):
self.assertNotExistsTable(self.test_write_table)
from cartoframes.examples import read_mcdonalds_nyc
df = read_mcdonalds_nyc(limit=100)
dataset = Dataset(df).upload(table_name=self.test_write_table, credentials=self.credentials)
self.test_write_table = dataset.table_name
query = 'SELECT cartodb_id FROM {} WHERE the_geom IS NOT NULL'.format(self.test_write_table)
result = self.sql_client.query(query, verbose=True)
self.assertEqual(result['total_rows'], 100)
@unittest.skipIf(WILL_SKIP, 'no carto credentials, skipping this test')
def test_dataset_write_lines_dataset(self):
self.assertNotExistsTable(self.test_write_table)
from cartoframes.examples import read_ne_50m_graticules_15
df = read_ne_50m_graticules_15()
dataset = Dataset(df).upload(table_name=self.test_write_table, credentials=self.credentials)
self.test_write_table = dataset.table_name
query = 'SELECT cartodb_id FROM {} WHERE the_geom IS NOT NULL'.format(self.test_write_table)
result = self.sql_client.query(query, verbose=True)
self.assertEqual(result['total_rows'], 35)
@unittest.skipIf(WILL_SKIP, 'no carto credentials, skipping this test')
def test_dataset_write_polygons_dataset(self):
self.assertNotExistsTable(self.test_write_table)
from cartoframes.examples import read_brooklyn_poverty
df = read_brooklyn_poverty()
dataset = Dataset(df).upload(table_name=self.test_write_table, credentials=self.credentials)
self.test_write_table = dataset.table_name
query = 'SELECT cartodb_id FROM {} WHERE the_geom IS NOT NULL'.format(self.test_write_table)
result = self.sql_client.query(query, verbose=True)
self.assertEqual(result['total_rows'], 2052)
@unittest.skipIf(WILL_SKIP, 'no carto credentials, skipping this test')
def test_dataset_write_lnglat_dataset(self):
self.assertNotExistsTable(self.test_write_table)
from cartoframes.examples import read_taxi
df = read_taxi(limit=50)
lnglat = ('dropoff_longitude', 'dropoff_latitude')
Dataset(df).upload(with_lnglat=lnglat, table_name=self.test_write_table, credentials=self.credentials)
self.assertExistsTable(self.test_write_table)
query = 'SELECT cartodb_id FROM {} WHERE the_geom IS NOT NULL'.format(self.test_write_table)
result = self.sql_client.query(query, verbose=True)
self.assertEqual(result['total_rows'], 50)
@unittest.skipIf(WILL_SKIP, 'no carto credentials, skipping this test')
def test_dataset_write_null_geometry_column(self):
self.assertNotExistsTable(self.test_write_table)
from cartoframes.examples import read_taxi
df = read_taxi(limit=10)
dataset = Dataset(df).upload(table_name=self.test_write_table, credentials=self.credentials)
self.test_write_table = dataset.table_name
self.assertExistsTable(self.test_write_table)
query = 'SELECT cartodb_id FROM {} WHERE the_geom_webmercator IS NULL'.format(self.test_write_table)
result = self.sql_client.query(query, verbose=True)
self.assertEqual(result['total_rows'], 10)
@unittest.skipIf(WILL_SKIP, 'no carto credentials, skipping this test')
def test_dataset_write_with_different_geometry_column(self):
self.assertNotExistsTable(self.test_write_table)
from cartoframes.examples import read_brooklyn_poverty
df = read_brooklyn_poverty()
df.rename(columns={'the_geom': 'geometry'}, inplace=True)
dataset = Dataset(df).upload(table_name=self.test_write_table, credentials=self.credentials)
self.test_write_table = dataset.table_name
self.assertExistsTable(self.test_write_table)
query = 'SELECT cartodb_id FROM {} WHERE the_geom IS NOT NULL'.format(self.test_write_table)
result = self.sql_client.query(query, verbose=True)
self.assertEqual(result['total_rows'], 2052)
@unittest.skipIf(WILL_SKIP, 'no carto credentials, skipping this test')
def test_dataset_write_with_different_geom_column(self):
self.assertNotExistsTable(self.test_write_table)
from cartoframes.examples import read_brooklyn_poverty
df = read_brooklyn_poverty()
df.rename(columns={'the_geom': 'geom'}, inplace=True)
dataset = Dataset(df).upload(table_name=self.test_write_table, credentials=self.credentials)
self.test_write_table = dataset.table_name
self.assertExistsTable(self.test_write_table)
query = 'SELECT cartodb_id FROM {} WHERE the_geom IS NOT NULL'.format(self.test_write_table)
result = self.sql_client.query(query, verbose=True)
self.assertEqual(result['total_rows'], 2052)
@unittest.skipIf(WILL_SKIP, 'no carto credentials, skipping this test')
def test_dataset_write_geopandas(self):
self.assertNotExistsTable(self.test_write_table)
from cartoframes.examples import read_taxi
import shapely
df = read_taxi(limit=50)
df.drop(['the_geom'], axis=1, inplace=True)
gdf = gpd.GeoDataFrame(df.drop(['dropoff_longitude', 'dropoff_latitude'], axis=1),
crs={'init': 'epsg:4326'},
geometry=[shapely.geometry.Point(xy) for xy in
zip(df.dropoff_longitude, df.dropoff_latitude)])
# TODO: use from_geodataframe
dataset = Dataset(gdf).upload(table_name=self.test_write_table, credentials=self.credentials)
self.test_write_table = dataset.table_name
self.assertExistsTable(self.test_write_table)
query = 'SELECT cartodb_id FROM {} WHERE the_geom IS NOT NULL'.format(self.test_write_table)
result = self.sql_client.query(query, verbose=True)
self.assertEqual(result['total_rows'], 50)
@unittest.skipIf(WILL_SKIP, 'no carto credentials, skipping this test')
def test_dataset_write_wkt(self):
self.assertNotExistsTable(self.test_write_table)
from cartoframes.examples import read_taxi
df = read_taxi(limit=50)
df['the_geom'] = df.apply(lambda x: 'POINT ({x} {y})'
.format(x=x['dropoff_longitude'], y=x['dropoff_latitude']), axis=1)
dataset = Dataset(df).upload(table_name=self.test_write_table, credentials=self.credentials)
self.test_write_table = dataset.table_name
self.assertExistsTable(self.test_write_table)
query = 'SELECT cartodb_id FROM {} WHERE the_geom IS NOT NULL'.format(self.test_write_table)
result = self.sql_client.query(query, verbose=True)
self.assertEqual(result['total_rows'], 50)
@unittest.skipIf(WILL_SKIP, 'no carto credentials, skipping this test')
def test_dataset_write_if_exists_fail_by_default(self):
self.assertNotExistsTable(self.test_write_table)
from cartoframes.examples import read_brooklyn_poverty
df = read_brooklyn_poverty()
dataset = Dataset(df).upload(table_name=self.test_write_table, credentials=self.credentials)
self.test_write_table = dataset.table_name
err_msg = ('Table with name {t} and schema {s} already exists in CARTO. Please choose a different `table_name`'
'or use if_exists="replace" to overwrite it').format(t=self.test_write_table, s='public')
with self.assertRaises(CartoException, msg=err_msg):
dataset = Dataset(df).upload(table_name=self.test_write_table, credentials=self.credentials)
self.assertExistsTable(self.test_write_table)
query = 'SELECT cartodb_id FROM {} WHERE the_geom IS NOT NULL'.format(self.test_write_table)
result = self.sql_client.query(query, verbose=True)
self.assertEqual(result['total_rows'], 2052)
@unittest.skipIf(WILL_SKIP, 'no carto credentials, skipping this test')
def test_dataset_write_if_exists_append(self):
from cartoframes.examples import read_brooklyn_poverty
df = read_brooklyn_poverty()
Dataset(df).upload(table_name=self.test_write_table, credentials=self.credentials)
# avoid uploading the same index or cartodb_id
df.index += df.index.max() + 1
df['cartodb_id'] += df['cartodb_id'].max() + 1
Dataset(df).upload(if_exists=Dataset.IF_EXISTS_APPEND,
table_name=self.test_write_table,
credentials=self.credentials)
self.assertExistsTable(self.test_write_table)
query = 'SELECT cartodb_id FROM {} WHERE the_geom IS NOT NULL'.format(self.test_write_table)
result = self.sql_client.query(query, verbose=True)
self.assertEqual(result['total_rows'], 2052 * 2)
@unittest.skipIf(WILL_SKIP, 'no carto credentials, skipping this test')
def test_dataset_write_if_exists_replace(self):
from cartoframes.examples import read_brooklyn_poverty
df = read_brooklyn_poverty()
dataset = Dataset(df).upload(table_name=self.test_write_table, credentials=self.credentials)
self.test_write_table = dataset.table_name
dataset = Dataset(df).upload(
if_exists=Dataset.IF_EXISTS_REPLACE, table_name=self.test_write_table, credentials=self.credentials)
self.assertExistsTable(self.test_write_table)
query = 'SELECT cartodb_id FROM {} WHERE the_geom IS NOT NULL'.format(self.test_write_table)
result = self.sql_client.query(query, verbose=True)
self.assertEqual(result['total_rows'], 2052)
def test_dataset_schema_from_parameter(self):
schema = 'fake_schema'
dataset = Dataset('fake_table', schema=schema, credentials=self.credentials)
self.assertEqual(dataset.schema, schema)
def test_dataset_schema_from_non_org_context(self):
dataset = Dataset('fake_table', credentials=self.credentials)
self.assertEqual(dataset.schema, 'public')
def test_dataset_schema_from_org_context(self):
pass
# dataset = DatasetMock('fake_table', credentials=self.credentials)
# self.assertEqual(dataset.schema, 'fake_username')
# FIXME does not work in python 2.7 (COPY stucks and blocks the table, fix after
# https://github.com/CartoDB/CartoDB-SQL-API/issues/579 is fixed)
# @unittest.skipIf(WILL_SKIP, 'no carto credentials, skipping this test')
# def test_dataset_write_with_encoding(self):
# df = pd.DataFrame({'vals': [1, 2], 'strings': ['a', 'ô']})
# dataset = self.con.write(df, self.test_write_table)
# self.test_write_table = dataset.table_name
# self.assertExistsTable(self.test_write_table)
def assertExistsTable(self, table_name):
resp = self.sql_client.query('''
SELECT *
FROM {table}
LIMIT 0
'''.format(table=table_name))
self.assertIsNotNone(resp)
def assertNotExistsTable(self, table_name):
try:
self.sql_client.query('''
SELECT *
FROM {table}
LIMIT 0
'''.format(table=table_name))
except CartoException as e:
self.assertTrue('relation "{}" does not exist'.format(table_name) in str(e))
class TestDatasetInfo(unittest.TestCase):
def setUp(self):
self.username = 'fake_username'
self.api_key = 'fake_api_key'
self.credentials = Credentials(username=self.username, api_key=self.api_key)
self._context_mock = ContextMock()
# Mock create_context method
self.original_create_context = context.create_context
context.create_context = lambda c: self._context_mock
def tearDown(self):
context.create_context = self.original_create_context
def test_dataset_info_should_work_from_table(self):
table_name = 'fake_table'
dataset = DatasetMock(table_name, credentials=self.credentials)
self.assertEqual(dataset.dataset_info.privacy, Dataset.PRIVACY_PRIVATE)
def test_dataset_get_privacy_from_new_table(self):
query = 'SELECT 1'
dataset = DatasetMock(query, credentials=self.credentials)
dataset.upload(table_name='fake_table')
dataset = DatasetMock('fake_table', credentials=self.credentials)
self.assertEqual(dataset.dataset_info.privacy, Dataset.PRIVACY_PRIVATE)
def test_dataset_set_privacy_to_new_table(self):
query = 'SELECT 1'
dataset = DatasetMock(query, credentials=self.credentials)
dataset.upload(table_name='fake_table')
dataset = DatasetMock('fake_table', credentials=self.credentials)
dataset.update_dataset_info(privacy=Dataset.PRIVACY_PUBLIC)
self.assertEqual(dataset.dataset_info.privacy, Dataset.PRIVACY_PUBLIC)
def test_dataset_set_privacy_with_wrong_parameter(self):
query = 'SELECT 1'
dataset = DatasetMock(query, credentials=self.credentials)
dataset.upload(table_name='fake_table')
wrong_privacy = 'wrong_privacy'
error_msg = 'Wrong privacy. The privacy: {p} is not valid. You can use: {o1}, {o2}, {o3}'.format(
p=wrong_privacy, o1=Dataset.PRIVACY_PRIVATE, o2=Dataset.PRIVACY_PUBLIC, o3=Dataset.PRIVACY_LINK)
with self.assertRaises(ValueError, msg=error_msg):
dataset.update_dataset_info(privacy=wrong_privacy)
def test_dataset_info_props_are_private(self):
table_name = 'fake_table'
dataset = DatasetMock(table_name, credentials=self.credentials)
dataset_info = dataset.dataset_info
self.assertEqual(dataset_info.privacy, Dataset.PRIVACY_PRIVATE)
privacy = Dataset.PRIVACY_PUBLIC
error_msg = str(setting_value_exception('privacy', privacy))
with self.assertRaises(CartoException, msg=error_msg):
dataset_info.privacy = privacy
self.assertEqual(dataset_info.privacy, Dataset.PRIVACY_PRIVATE)
def test_dataset_info_from_dataframe(self):
df = pd.DataFrame.from_dict({'test': [True, [1, 2]]})
dataset = DatasetMock(df)
error_msg = ('Your data is not synchronized with CARTO.'
'First of all, you should call upload method '
'to save your data in CARTO.')
with self.assertRaises(CartoException, msg=error_msg):
self.assertIsNotNone(dataset.dataset_info)
def test_dataset_info_from_dataframe_sync(self):
df = pd.DataFrame.from_dict({'test': [True, [1, 2]]})
dataset = DatasetMock(df)
dataset.upload(table_name='fake_table', credentials=self.credentials)
dataset = DatasetMock('fake_table', credentials=self.credentials)
self.assertEqual(dataset.dataset_info.privacy, Dataset.PRIVACY_PRIVATE)
def test_dataset_info_from_query(self):
query = 'SELECT 1'
dataset = DatasetMock(query, credentials=self.credentials)
error_msg = ('We can not extract Dataset info from a QueryDataset. Use a TableDataset '
'`Dataset(table_name)` to get or modify the info from a CARTO table.')
with self.assertRaises(ValueError, msg=error_msg):
self.assertIsNotNone(dataset.dataset_info)
def test_dataset_info_from_query_update(self):
query = 'SELECT 1'
dataset = DatasetMock(query, credentials=self.credentials)
error_msg = ('We can not extract Dataset info from a QueryDataset. Use a TableDataset '
'`Dataset(table_name)` to get or modify the info from a CARTO table.')
with self.assertRaises(ValueError, msg=error_msg):
dataset.update_dataset_info()
class TestDatasetUnit(unittest.TestCase, _UserUrlLoader):
"""Unit tests for cartoframes.Dataset"""
def setUp(self):
self.username = 'fake_username'
self.api_key = 'fake_api_key'
self.credentials = Credentials(username=self.username, api_key=self.api_key)
self.test_geojson = {
"type": "FeatureCollection",
"features": [
{
"type": "Feature",
"properties": {},
"geometry": {
"type": "Point",
"coordinates": [
-3.1640625,
42.032974332441405
]
}
}
]
}
self._context_mock = ContextMock()
# Mock create_context method
self.original_create_context = context.create_context
context.create_context = lambda c: self._context_mock
def tearDown(self):
StrategiesRegistry.instance = None
context.create_context = self.original_create_context
def assertIsTableDatasetInstance(self, table_name):
ds = DatasetMock(table_name, credentials=self.credentials)
error = "Dataset('{}')._strategy is not an instance of TableDataset".format(table_name)
self.assertTrue(isinstance(ds._strategy, TableDataset), msg=error)
def assertIsQueryDatasetInstance(self, query):
ds = DatasetMock(query, credentials=self.credentials)
error = "Dataset('{}')._strategy is not an instance of QueryDataset".format(query)
self.assertTrue(isinstance(ds._strategy, QueryDataset), msg=error)
def assertIsDataFrameDatasetInstance(self, data):
ds = DatasetMock(data)
error = "Dataset('{}')._strategy is not an instance of DataFrameDataset".format(data)
self.assertTrue(isinstance(ds._strategy, DataFrameDataset), msg=error)
def test_creation_from_valid_table_names(self):
table_names = ['myt', 'my_t', 'tgeojson', 't_geojson', 'geojson', 'json', 'select_t']
for table_name in table_names:
self.assertIsTableDatasetInstance(table_name)
def test_creation_from_valid_queries(self):
queries = ['SELECT * FROM', 'select * from', 'select c', 'with n as', 'WITH n AS', 'select * from json',
'select * from geojson']
for query in queries:
self.assertIsQueryDatasetInstance(query)
def test_creation_from_valid_dataframe(self):
df = pd.DataFrame.from_dict({'test': [True, [1, 2]]})
self.assertIsDataFrameDatasetInstance(df)
def test_creation_from_valid_geodataframe(self):
df = pd.DataFrame.from_dict({'test': [True, [1, 2]]})
gdf = gpd.GeoDataFrame(df)
self.assertIsDataFrameDatasetInstance(gdf)
def test_creation_from_valid_localgeojson(self):
self.assertIsDataFrameDatasetInstance(self.test_geojson)
def test_creation_from_valid_geojson_file_path(self):
paths = [os.path.abspath('tests/e2e/data/dataset/fixtures/valid.geojson'),
os.path.abspath('tests/e2e/data/dataset/fixtures/validgeo.json')]
for path in paths:
self.assertIsDataFrameDatasetInstance(path)
def test_creation_from_wrong_geojson_file_path(self):
geojson_file_path = os.path.abspath('tests/e2e/data/dataset/fixtures/wrong.geojson')
with self.assertRaises(Exception):
self.assertIsDataFrameDatasetInstance(geojson_file_path)
def test_creation_from_unexisting_geojson_file_path(self):
geojson_file_path = os.path.abspath('unexisting.geojson')
with self.assertRaises(ValueError, msg='We can not detect the Dataset type'):
self.assertIsDataFrameDatasetInstance(geojson_file_path)
def test_dataset_from_table(self):
table_name = 'fake_table'
dataset = DatasetMock(table_name, credentials=self.credentials)
self.assertIsInstance(dataset, Dataset)
self.assertEqual(dataset.table_name, table_name)
self.assertEqual(dataset.schema, 'public')
self.assertEqual(dataset.credentials, self.credentials)
def test_dataset_from_query(self):
query = 'SELECT * FROM fake_table'
dataset = DatasetMock(query, credentials=self.credentials)
self.assertIsInstance(dataset, Dataset)
self.assertEqual(dataset.query, query)
self.assertEqual(dataset.credentials, self.credentials)
self.assertIsNone(dataset.table_name)
def test_dataset_from_dataframe(self):
df = load_geojson(self.test_geojson)
dataset = Dataset(df)
self.assertIsInstance(dataset, Dataset)
self.assertIsNotNone(dataset.dataframe)
self.assertIsNone(dataset.table_name)
self.assertIsNone(dataset.credentials)
def test_dataset_from_geodataframe(self):
gdf = load_geojson(self.test_geojson)
dataset = Dataset(gdf)
self.assertIsInstance(dataset, Dataset)
self.assertIsNotNone(dataset.dataframe)
self.assertIsNone(dataset.table_name)
self.assertIsNone(dataset.credentials)
def test_dataset_from_geojson(self):
geojson = self.test_geojson
dataset = Dataset(geojson)
self.assertIsInstance(dataset, Dataset)
self.assertIsNotNone(dataset.dataframe)
self.assertIsNone(dataset.table_name)
self.assertIsNone(dataset.credentials)
def test_dataset_from_table_without_credentials(self):
table_name = 'fake_table'
error_msg = ('Credentials attribute is required. '
'Please pass a `Credentials` instance '
'or use the `set_default_credentials` function.')
with self.assertRaises(AttributeError, msg=error_msg):
Dataset(table_name)
def test_dataset_from_query_without_credentials(self):
query = 'SELECT * FROM fake_table'
error_msg = ('Credentials attribute is required. '
'Please pass a `Credentials` instance '
'or use the `set_default_credentials` function.')
with self.assertRaises(AttributeError, msg=error_msg):
Dataset(query)
def test_dataset_get_table_names_from_table(self):
table_name = 'fake_table'
dataset = DatasetMock(table_name, credentials=self.credentials)
self.assertEqual(dataset.get_table_names(), [table_name])
def test_dataset_get_table_names_from_query(self):
table_name = 'fake_table'
QueryDatasetMock.get_table_names = Mock(return_value=[table_name])
query = 'SELECT * FROM {}'.format(table_name)
dataset = DatasetMock(query, credentials=self.credentials)
self.assertEqual(dataset.get_table_names(), [table_name])
def test_dataset_get_table_names_from_dataframe(self):
df = load_geojson(self.test_geojson)
dataset = Dataset(df)
error_msg = ('Your data is not synchronized with CARTO.'
'First of all, you should call upload method '
'to save your data in CARTO.')
with self.assertRaises(CartoException, msg=error_msg):
dataset.get_table_names()
def test_create_table_query(self):
df = pd.DataFrame.from_dict({'cartodb_id': [1], 'the_geom': ['POINT (1 1)']})
dataframe_columns_info = DataframeColumnsInfo(df, None)
table_name = 'fake_table'
expected_result = 'CREATE TABLE {} (cartodb_id bigint, the_geom geometry(Point, 4326))'.format(table_name)
dataset = DataFrameDataset(df)
dataset.table_name = table_name
result = dataset._create_table_query(dataframe_columns_info.columns)
self.assertEqual(result, expected_result)
def test_create_table_query_without_geom(self):
df = pd.DataFrame.from_dict({'cartodb_id': [1]})
dataframe_columns_info = DataframeColumnsInfo(df, None)
table_name = 'fake_table'
expected_result = 'CREATE TABLE {} (cartodb_id bigint)'.format(table_name)
dataset = DataFrameDataset(df)
dataset.table_name = table_name
result = dataset._create_table_query(dataframe_columns_info.columns)
self.assertEqual(result, expected_result)
def test_create_table_query_with_several_geometry_columns_prioritize_the_geom(self):
df = | pd.DataFrame([['POINT (0 0)', 'POINT (1 1)', 'POINT (2 2)']], columns=['geom', 'the_geom', 'geometry']) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Wed FEB 17 20:30:57 2021
Last modified 13 MAR 2021
<NAME>, M.D.
Naval Biotechnology Group
Naval Medical Center Portsmouth
Portsmouth, VA 23323
in collaboration with:
<NAME>, M.D.
<NAME>, M.D.
<NAME>, M.D.
<NAME>, M.D.
<NAME>, M.D.
"""
from bayes_opt import BayesianOptimization
import xgboost as xgb
from sklearn.calibration import CalibratedClassifierCV
from sklearn.metrics import brier_score_loss
from sklearn.model_selection import RepeatedStratifiedKFold
from sklearn.utils import shuffle
from sklearn.metrics import confusion_matrix
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from collections import Counter
from numpy import array
from matplotlib import pyplot as plt
from sklearn.calibration import calibration_curve
from sklearn.metrics import roc_auc_score
import shap
def process_ortho_data(cpt_code,Train = True):
#define which NSQIP variables you want to extract so you don't load unnecessary data
cols3 = ['CPT','OTHERCPT1','ELECTSURG','AGE','SEX','FNSTATUS2','WNDINF',
'EMERGNCY','PRSEPIS','DIABETES','DYSPNEA','ASACLAS','STEROID',
'ASCITES','VENTILAT','DISCANCR','HYPERMED','HXCHF','SMOKE',
'HXCOPD','DIALYSIS','RENAFAIL','HEIGHT','WEIGHT','DISCHDEST']
#load all the datasets from disk (need to request NSQIP datasets first)
if Train:
df15 = pd.read_spss(r'C:', usecols=cols3)
df15.rename(columns={"AGE": "Age"},inplace=True)
df16 = pd.read_spss(r'C:', usecols=cols3)
df16.rename(columns={"AGE": "Age"},inplace=True)
df17 = pd.read_spss(r'C:', usecols=cols3)
df17.rename(columns={"AGE": "Age"},inplace=True)
df18 = pd.read_spss(r'C:', usecols=cols3)
df18.rename(columns={"AGE": "Age"},inplace=True)
#combine each year into one large dataframe
data2=pd.concat([df15,df16,df17,df18],axis=0)
else:
df19 = pd.read_spss(r'C:', usecols=cols3)
df19.rename(columns={"AGE": "Age"},inplace=True)
data2 = df19.copy()
data2 = shuffle(data2)
data2 = data2.reset_index(drop=True)
#exclusions
#first load cpt of interest. NSQIP mixes strings, floats, and integers
data2=data2[(data2['CPT']==cpt_code) | (data2['CPT']==float(cpt_code))|
(data2['CPT']==str(cpt_code))|(data2['CPT']==str(float(cpt_code)))]
print('Total cpt {:d} ='.format(cpt_code),data2.shape[0])
#drop any cases that had secondary CPTs
data2=data2[data2['OTHERCPT1']=='NULL']
print('After excluding secondary CPT, Total = {:d}'.format(data2.shape[0]))
#exclude non-elective (fractures)
data2=data2[data2['ELECTSURG']=='Yes']
print('After excluding non-elective cases, Total = {:d}'.format(data2.shape[0]))
#exclude unknown discharge dest
data2 = data2.drop(data2[(data2['DISCHDEST'] == 'NULL')|
(data2['DISCHDEST'] == 'Unknown')|
(data2['DISCHDEST'] == 'Expired')|
(data2['DISCHDEST'] == 'Against Medical Advice (AMA)')].index)
print('After excluding unknown discharge location, Total = {:d}'.format(data2.shape[0]))
#drop ASA 5
data2=data2.drop(data2[data2['ASACLAS']=='5-Moribund'].index)
print('After excluding ASA 5, Total = {:d}'.format(data2.shape[0]))
#drop sepsis or septic shock
data2=data2.drop(data2[(data2['PRSEPIS']=='Sepsis')|
(data2['PRSEPIS']=='Septic Shock')|
(data2['PRSEPIS']=='Septic')].index)
#drop wound infection
data2=data2.drop(data2[(data2['WNDINF']=='Yes')].index)
print('After excluding sepsis or wound infections, Total = {:d}'.format(data2.shape[0]))
#we will drop rows with missing data later after processing the various names
#used for missing data (e.g. 'NUL','NULL','Unknown',etc)
#define targets - assign 0 to Home or Facility which was home, 1 to everything else
dest_pos = ['Rehab','Separate Acute Care','Unskilled Facility Not Home',
'Skilled Care, Not Home','Unskilled Facility Not','Hospice',
'Multi-level Senior Community']
dest_neg = ['Home','Facility Which was Home']
data2['DISCHDEST']=data2['DISCHDEST'].replace(to_replace=dest_neg,value='0')
data2['DISCHDEST']=data2['DISCHDEST'].replace(to_replace=dest_pos,value='1')
data2['DISCHDEST']=data2['DISCHDEST'].astype(int)
targets_data = data2['DISCHDEST']
targets_data=array(targets_data)
targets_data=targets_data.reshape(-1,1)
#now process all the inputs and handle missing data
#process BMI
BMI=[]
weights1=data2['WEIGHT'].to_numpy()
heights1=data2['HEIGHT'].to_numpy()
for i in range(len(data2)):
if (weights1[i]!=-99) and (heights1[i]!=-99):
#convert height and weight to BMI if both are known
BMI.append((703*weights1[i])/((heights1[i])**2))
else:
BMI.append(-99)
for i in range(len(BMI)):
if BMI[i]>=70:
BMI[i]=70
if BMI[i] < 15 and BMI[i]>0:
BMI[i]=15
if (BMI[i]==-99):
BMI[i]=np.nan
BMI=array(BMI).reshape(-1,1)
#process age
data2['Age'] = data2['Age'] .astype(str).replace('\.0', '', regex=True)
x00=data2['Age']
x0=x00.copy()
for i in range(len(x00)):
if x00.iloc[i]=='90+':
x0.iloc[i]='90'
elif x00.iloc[i]=='-99':
x0.iloc[i]='nan'
x0=x0.replace({'nan':'10'})
x0=x0.astype(float)
x0=x0.replace({10:np.nan})
x0=x0.to_numpy().reshape(-1,1)
x1=data2['SEX']
x1=x1.replace({'NULL':np.nan,'non-bi':np.nan,'male':0,'female':1})
x1=x1.to_numpy().reshape(-1,1)
x2 = data2['FNSTATUS2']
x2=x2.replace({'Independent':0,'Partially Dependent':1,'Partially D':1,
'Totally Dep':2,'Totally Dependent':2,'Unknown':np.nan})
x2=x2.to_numpy().reshape(-1,1)
x4=data2['ASACLAS']
x4=x4.replace({'NULL':np.nan,'Null':np.nan,'None assigned':np.nan,
'1-No Disturb':1,'2-Mild Disturb':2,'3-Severe Disturb':3,
'4-Life Threat':4})
x4=x4.to_numpy().reshape(-1,1)
x5=data2['STEROID']
x5=x5.replace({'NULL':np.nan,'NUL':np.nan,'No':0,'Yes':1})
x5=x5.to_numpy().reshape(-1,1)
x6=data2['ASCITES']
x6=x6.replace({'NULL':np.nan,'NUL':np.nan,'No':0,'Yes':1,'Ye':1})
x6=x6.to_numpy().reshape(-1,1)
x77 = data2['PRSEPIS']
x77=x77.replace({'NULL':np.nan,'None':0,'SIRS':1})
x7=x77.to_numpy().reshape(-1,1)
x8=data2['VENTILAT']
x8=x8.replace({'NULL':np.nan,'NUL':np.nan,'No':0,'Yes':1})
x8=x8.to_numpy().reshape(-1,1)
x9=data2['DISCANCR']
x9=x9.replace({'NULL':np.nan,'NUL':np.nan,'No':0,'Yes':1})
x9=x9.to_numpy().reshape(-1,1)
x101 = data2['DIABETES']
x101=x101.replace({'NULL':np.nan,'NO':0,'ORAL':1,
'NON-INSULIN':1,'INSULIN':1})
x10=x101.to_numpy().reshape(-1,1)
x11=data2['HYPERMED']
x11=x11.replace({'NULL':np.nan,'NUL':np.nan,'No':0,'Yes':1})
x11=x11.to_numpy().reshape(-1,1)
x13=data2['HXCHF']
x13=x13.replace({'NULL':np.nan,'NUL':np.nan,'No':0,'Yes':1,'Ye':1})
x13=x13.to_numpy().reshape(-1,1)
x14= data2['DYSPNEA']
x14=x14.replace({'NULL':np.nan,'No':0,'MODERATE EXERTION':1,'AT REST':1})
x14=x14.to_numpy().reshape(-1,1)
x15=data2['SMOKE']
x15=x15.replace({'NULL':np.nan,'NUL':np.nan,'No':0,'Yes':1})
x15=x15.to_numpy().reshape(-1,1)
x16=data2['HXCOPD']
x16=x16.replace({'NULL':np.nan,'NUL':np.nan,'No':0,'Yes':1})
x16=x16.to_numpy().reshape(-1,1)
x17=data2['DIALYSIS']
x17=x17.replace({'NULL':np.nan,'NUL':np.nan,'No':0,'Yes':1,'Ye':1})
x17=x17.to_numpy().reshape(-1,1)
x18=data2['RENAFAIL']
x18=x18.replace({'NULL':np.nan,'NU':np.nan,'No':0,'Yes':1,'Ye':1})
x18=x18.to_numpy().reshape(-1,1)
x19 = BMI.reshape(-1,1)
#put all inputs together into one array
inputs_aggregate = np.concatenate([x0,x1,x2,x4,x5,x6,x7,x8,x9,x10,x11,x13,
x14,x15,x16,x17,x18,x19],axis=1)
#drop nans
data3 = inputs_aggregate.copy()
data4 = targets_data.copy()
data5 = np.concatenate([data3,data4],axis=1)
data5=data5[~np.isnan(data5).any(axis=1)]
print('final size of data for CPT {:d} ='.format(cpt_code),data5.shape)
inputs_aggregate = data5[:,-19:-1]
targets_data = data5[:,-1].reshape(-1,1)
#make inputs for training (70% of data) inputs_NSQIP and targets_train
#then split up the holdout data for calibration and testing
#inputs_NSQIP, inputs_holdout, targets_train, targets_holdout = train_test_split(inputs_aggregate, targets_data, test_size=0.3, random_state=444,stratify=targets_data)
#inputs_cal, inputs_test, targets_cal, targets_test= train_test_split(inputs_holdout, targets_holdout, test_size=0.67, random_state=444,stratify=targets_holdout)
#print('train data = ',inputs_NSQIP.shape[0], '\ncal data = ',inputs_cal.shape[0], '\ntest data = ',inputs_test.shape[0])
return inputs_aggregate, targets_data
#now for the fun part
#feel free to adjust the hyperparameters within the function
#this will return optimal hyperparameters after bayesian optimization
def optimized_data(inputs_NSQIP,targets_train,rand_points=40,search_number=40):
def xgboost_bayesian(max_depth,learning_rate,colsample_bytree, min_child_weight,reg_alpha,gamma):
optimizer = xgb.XGBClassifier(max_depth=int(max_depth),
learning_rate= learning_rate,
n_estimators= 200,
reg_alpha = reg_alpha,
gamma = gamma,
nthread = -1,
colsample_bytree = colsample_bytree,
min_child_weight = min_child_weight,
objective='binary:logistic',
seed = 444,
scale_pos_weight = 1)
roc_auc_holder=[]
rskf = RepeatedStratifiedKFold(n_splits=5, n_repeats=1,random_state=444)
for train_index, test_index in rskf.split(inputs_NSQIP, targets_train):
x_train, x_test = inputs_NSQIP[train_index],inputs_NSQIP[test_index]
y_train, y_test = targets_train[train_index], targets_train[test_index]
optimizer.fit(x_train,y_train.ravel(),eval_set = [(x_test,y_test.ravel())], eval_metric = 'logloss',early_stopping_rounds = 10)
probs = optimizer.predict_proba(x_test)
probs = probs[:,1]
roc1 = roc_auc_score(y_test,probs)
roc_auc_holder.append(roc1)
return sum(roc_auc_holder)/len(roc_auc_holder)
hyperparameters = {
'max_depth': (3, 12),
'learning_rate': (0.01, 0.3),
'reg_alpha': (0, 0.5),
'gamma': (0, 0.5),
'min_child_weight': (5,30),
'colsample_bytree': (0.1, 1)
}
bayesian_object = BayesianOptimization(f = xgboost_bayesian,
pbounds = hyperparameters,
verbose = 2)
bayesian_object.maximize(init_points=rand_points,n_iter=search_number,
acq='ucb', kappa= 2, alpha = 1e-7)
#now we have optimal parameters
OrthoV1 = xgb.XGBClassifier(max_depth=int(bayesian_object.max['params']['max_depth']),
learning_rate= bayesian_object.max['params']['learning_rate'],
n_estimators= 200,
reg_alpha = bayesian_object.max['params']['reg_alpha'],
gamma = bayesian_object.max['params']['gamma'],
nthread = -1,
colsample_bytree = bayesian_object.max['params']['colsample_bytree'],
min_child_weight = bayesian_object.max['params']['min_child_weight'],
objective='binary:logistic',
seed = 444,
scale_pos_weight = 1)
#now refit all data an determine optimal n_estimators via cross-val
rskf = RepeatedStratifiedKFold(n_splits=5, n_repeats=1,random_state=444)
best_it_hold = []
for train_index, test_index in rskf.split(inputs_NSQIP, targets_train):
x_train, x_test = inputs_NSQIP[train_index],inputs_NSQIP[test_index]
y_train, y_test = targets_train[train_index], targets_train[test_index]
OrthoV1.fit(x_train,y_train.ravel(), eval_set=[(x_test,y_test.ravel())],eval_metric = 'logloss',early_stopping_rounds=10)
best_it_hold.append(OrthoV1.best_iteration)
best_training_iteration = int(round(sum(best_it_hold)/len(best_it_hold)))
optimized_params = {'max_depthV1':int(round(bayesian_object.max['params']['max_depth'])),
'colsample_bytreeV1':bayesian_object.max['params']['colsample_bytree'],
'gammaV1':bayesian_object.max['params']['gamma'],
'learning_rateV1': bayesian_object.max['params']['learning_rate'],
'min_child_weightV1':bayesian_object.max['params']['min_child_weight'],
'reg_alphaV1':bayesian_object.max['params']['reg_alpha'],
'best_training_iteration':best_training_iteration,
'roc':bayesian_object.max['target']}
return optimized_params
def calibration_split(inputs_test,targets_test):
inputs_cal, inputs_test_final, targets_cal, targets_test_final = train_test_split(inputs_test, targets_test, test_size=0.5, random_state=444,stratify=targets_test)
return inputs_cal, inputs_test_final, targets_cal, targets_test_final
#now we need to fit the final model (above was just estimating performance)
#and then perform calibration
def calibrate_model(inputs_cal, inputs_test_final, targets_cal, targets_test_final,OrthoV1):
cal_iso = CalibratedClassifierCV(OrthoV1, method='isotonic', cv='prefit')
cal_iso.fit(inputs_cal, targets_cal.ravel())
#make some predictions!
probs = OrthoV1.predict_proba(inputs_test_final)
probs = probs[:,1]
probs2 = cal_iso.predict_proba(inputs_test_final)
probs2=probs2[:,1]
#brier for all three
b_loss = brier_score_loss(targets_test_final,probs)
b_loss_iso = brier_score_loss(targets_test_final,probs2)
#calibration curves for all three
fop, mpv = calibration_curve(targets_test_final,probs,n_bins = 10,strategy='uniform')
fop2, mpv2 = calibration_curve(targets_test_final,probs2,n_bins = 10,strategy='uniform')
#Calibration curves
fig, ax = plt.subplots()
fig.set_size_inches(6,4)
ax.plot(mpv,fop,'b',label='No Cal, Brier = {:.3f}'.format(b_loss))
ax.plot(mpv2,fop2,'r:',label='Isotonic, Brier = {:.3f}'.format(b_loss_iso))
ax.plot([0,0.9],[0,0.9],'k--',label='Perfect Calibration')
ax.legend(loc = 'lower right')
ax.plot(mpv,fop,'bs')
ax.plot(mpv2,fop2,'ro')
ax.set_title('Calibration Curves for Validation Data, Hip')
ax.set_xlabel('Mean Predicted Value')
ax.set_ylabel('Fraction of Positives')
plt.show()
return cal_iso
def create_model(train,targets,test,optimized_params):
OrthoV1 = xgb.XGBClassifier(max_depth = optimized_params['max_depthV1'],
learning_rate= optimized_params['learning_rateV1'],
n_estimators= optimized_params['best_training_iteration'],
reg_alpha = optimized_params['reg_alphaV1'],
gamma = optimized_params['gammaV1'],
nthread = -1,
colsample_bytree = optimized_params['colsample_bytreeV1'],
min_child_weight = optimized_params['min_child_weightV1'],
objective='binary:logistic',
seed = 444,
scale_pos_weight = 1)
OrthoV1.fit(train,targets.ravel())
model_probs=OrthoV1.predict_proba(test)
model_probs=model_probs[:,1]
return model_probs
def bootstrap_internal(inputs_NSQIP,targets_train,optimized_params):
#bootstrap code for internal validity (Fitting model each time)
roc_hold=[]
brier_hold=[]
index_holder=range(0,len(inputs_NSQIP))
j=0
x_train=[]
y_train=[]
for i in range(500):
x_train=[]
y_train=[]
x_test=[]
y_test=[]
boot = np.random.choice(index_holder,size=(len(index_holder)),replace=True)
test_index = [x for x in index_holder if x not in boot]
for k in range(len(boot)):
x_train.append(inputs_NSQIP[boot[k]])
y_train.append(targets_train[boot[k]])
x_train=np.array(x_train)
y_train=np.array(y_train)
#define test data (data not in bootstrap)
for k in range(len(test_index)):
x_test.append(inputs_NSQIP[test_index[k]])
y_test.append(targets_train[test_index[k]])
x_test=np.array(x_test)
y_test=np.array(y_test)
preds=create_model(x_train,y_train,x_test,optimized_params)
auc_roc = roc_auc_score(y_test,preds)
##Brier score
b_loss = brier_score_loss(y_test, preds)
#print('brier score ='+str(b_loss))
print('be patient, iteration',j,' ROC = ',auc_roc)
j=j+1
roc_hold.append(auc_roc)
brier_hold.append(b_loss)
roc_hold=array(sorted(roc_hold))
brier_hold=array(sorted(brier_hold))
av_brier = sum(brier_hold)/len(brier_hold)
av_roc = sum(roc_hold)/len(roc_hold)
print('ROC AUC = ',av_roc,', 95% CI = ',(roc_hold[11]+roc_hold[12])/2,'to ',(roc_hold[486]+roc_hold[487])/2)
print('Brier Score = ',av_brier,', 95% CI = ',(brier_hold[11]+brier_hold[12])/2,'to ',(brier_hold[486]+brier_hold[487])/2)
def bootstrap_test(inputs_NSQIP,targets_train,inputs_test_final,targets_test_final,optimized_params,cal_iso_model):
preds= cal_iso_model.predict_proba(inputs_test_final)
preds=preds[:,1]
roc_hold=[]
brier_hold=[]
index_holder=range(0,len(targets_test_final))
j=0
y_test=[]
y_preds=[]
for i in range(500):
y_test=[]
y_preds=[]
boot = np.random.choice(index_holder,size=(len(index_holder)),replace=True)
for k in range(len(boot)):
y_test.append(targets_test_final[boot[k]])
y_preds.append(preds[boot[k]])
y_test=np.array(y_test)
y_preds=np.array(y_preds)
auc_roc = roc_auc_score(y_test,y_preds)
b_loss = brier_score_loss(y_test, y_preds)
print('be patient, iteration',j)
j=j+1
roc_hold.append(auc_roc)
brier_hold.append(b_loss)
av_brier = sum(brier_hold)/len(brier_hold)
av_roc = sum(roc_hold)/len(roc_hold)
roc_hold=sorted(roc_hold)
brier_hold=sorted(brier_hold)
print('ROC AUC = ',av_roc,', 95% CI = ',(roc_hold[11]+roc_hold[12])/2,'to ',(roc_hold[486]+roc_hold[487])/2)
print('Brier Score = ',av_brier,', 95% CI = ',(brier_hold[11]+brier_hold[12])/2,'to ',(brier_hold[486]+brier_hold[487])/2)
def clinical_impact(x_train,y_train,x_test,targets_test,optimized_params,cal_iso_model):
#clinical impact
preds=cal_iso_model.predict_proba(x_test)[:,1].reshape(-1,1)
Thresholds = np.linspace(0.001, 0.6, 100, endpoint=True)
sens_XGB = []
spec_XGB = []
ppv_XGB=[]
num_tp = []
num_fn = []
num_fp = []
dca = []
all_treat = []
no_treat = []
prevalence = (targets_test==1).sum()/targets_test.shape[0]
for j in range(len(Thresholds)):
y_pred_XGB = [1 if i>Thresholds[j] else 0 for i in preds]
CM_XGB = confusion_matrix(targets_test, y_pred_XGB)
#sens and ppv
tp_XGB = CM_XGB[1,1]
fp_XGB = CM_XGB[0,1]
fn_XGB = CM_XGB[1,0]
tn_XGB = CM_XGB[0,0]
pr_XGB = tp_XGB/[tp_XGB+fp_XGB]
rec_XGB = tp_XGB/[tp_XGB+fn_XGB]
spec_XGB_hold = tn_XGB/[tn_XGB+fp_XGB]
sens_XGB.append(rec_XGB)
spec_XGB.append(spec_XGB_hold)
ppv_XGB.append(pr_XGB)
num_tp.append(tp_XGB)
num_fn.append(fn_XGB)
num_fp.append(fp_XGB)
dca.append((tp_XGB/(preds.shape[0]))-(fp_XGB/(preds.shape[0]))*(Thresholds[j]/(1-Thresholds[j])))
no_treat.append(0)
all_treat.append((prevalence)-(1-prevalence)*(Thresholds[j]/(1-Thresholds[j])))
fig, ax = plt.subplots()
fig.set_size_inches(6,4)
ax.plot(Thresholds,no_treat,'k',label='No Treatment')
ax.plot(Thresholds,all_treat,'b-.',label='Treat All')
ax.plot(Thresholds,dca,'r--',label='Model')
ax.legend(loc = 'upper right')
ax.set_title('Decision Curve, Knee Model')
ax.set_xlabel('Decision Threshold (%)')
ax.set_ylabel('Net Clinical Benefit')
plt.xlim([0,0.5])
plt.ylim([-0.005, .1])
plt.show()
def demographics(inputs_NSQIP,inputs_test_final):
total_train_inputs = inputs_NSQIP
place_hold=np.nan
#now get all the means, std, and counts
age_train_mean = np.nanmean(total_train_inputs[:,0])
age_train_std = np.nanstd(total_train_inputs[:,0])
age_test_mean = np.nanmean(inputs_test[:,0])
age_test_std = np.nanstd(inputs_test[:,0])
ages=array([age_train_mean,age_train_std,age_test_mean,age_test_std,place_hold,place_hold,place_hold,place_hold]).reshape(-1,1)
sex_train_male = (total_train_inputs[:,1]==0).sum()
sex_train_female = (total_train_inputs[:,1]==1).sum()
sex_test_male = (inputs_test[:,1]==0).sum()
sex_test_female = (inputs_test[:,1]==1).sum()
sexes = array([sex_train_male,sex_train_female,sex_test_male,sex_test_female,place_hold,place_hold,place_hold,place_hold]).reshape(-1,1)
fxn_train_ind = (total_train_inputs[:,2]==0).sum()
fxn_train_part = (total_train_inputs[:,2]==1).sum()
fxn_train_tot = (total_train_inputs[:,2]==2).sum()
fxn_test_ind = (inputs_test[:,2]==0).sum()
fxn_test_part = (inputs_test[:,2]==1).sum()
fxn_test_tot = (inputs_test[:,2]==2).sum()
fxns = array([fxn_train_ind,fxn_train_part,fxn_train_tot,fxn_test_ind,fxn_test_part,fxn_test_tot,place_hold,place_hold]).reshape(-1,1)
asa_train_1 = Counter(total_train_inputs[:,3])[1.0]
asa_train_2 = Counter(total_train_inputs[:,3])[2.0]
asa_train_3 = Counter(total_train_inputs[:,3])[3.0]
asa_train_4 = Counter(total_train_inputs[:,3])[4.0]
asa_test_1 = Counter(inputs_test[:,3])[1.0]
asa_test_2 = Counter(inputs_test[:,3])[2.0]
asa_test_3 = Counter(inputs_test[:,3])[3.0]
asa_test_4 = Counter(inputs_test[:,3])[4.0]
asas=array([asa_train_1,asa_train_2,asa_train_3,asa_train_4,asa_test_1,asa_test_2,asa_test_3,asa_test_4]).reshape(-1,1)
steroids_train_yes = Counter(total_train_inputs[:,4])[1.0]
steroids_train_no = Counter(total_train_inputs[:,4])[0.0]
steroids_test_yes = Counter(inputs_test[:,4])[1.0]
steroids_test_no = Counter(inputs_test[:,4])[0.0]
steroids = array([steroids_train_yes,steroids_train_no,steroids_test_yes,steroids_test_no,place_hold,place_hold,place_hold,place_hold]).reshape(-1,1)
ascites_train_yes = Counter(total_train_inputs[:,5])[1.0]
ascites_train_no = Counter(total_train_inputs[:,5])[0.0]
ascites_test_yes = Counter(inputs_test[:,5])[1.0]
ascites_test_no = Counter(inputs_test[:,5])[0.0]
ascites=array([ascites_train_yes,ascites_train_no,ascites_test_yes,ascites_test_no,place_hold,place_hold,place_hold,place_hold]).reshape(-1,1)
sirs_train_yes = Counter(total_train_inputs[:,6])[1.0]
sirs_train_no = Counter(total_train_inputs[:,6])[0.0]
sirs_test_yes = Counter(inputs_test[:,6])[1.0]
sirs_test_no = Counter(inputs_test[:,6])[0.0]
sirs=array([sirs_train_yes,sirs_train_no,sirs_test_yes,sirs_test_no,place_hold,place_hold,place_hold,place_hold]).reshape(-1,1)
vent_train_yes = Counter(total_train_inputs[:,7])[1.0]
vent_train_no = Counter(total_train_inputs[:,7])[0.0]
vent_test_yes = Counter(inputs_test[:,7])[1.0]
vent_test_no = Counter(inputs_test[:,7])[0.0]
vents = array([vent_train_yes,vent_train_no,vent_test_yes,vent_test_no,place_hold,place_hold,place_hold,place_hold]).reshape(-1,1)
cancer_train_yes = Counter(total_train_inputs[:,8])[1.0]
cancer_train_no = Counter(total_train_inputs[:,8])[0.0]
cancer_test_yes = Counter(inputs_test[:,8])[1.0]
cancer_test_no = Counter(inputs_test[:,8])[0.0]
cancers=array([cancer_train_yes,cancer_train_no,cancer_test_yes,cancer_test_no,place_hold,place_hold,place_hold,place_hold]).reshape(-1,1)
diabetes_train_yes = Counter(total_train_inputs[:,9])[1.0]
diabetes_train_no = Counter(total_train_inputs[:,9])[0.0]
diabetes_test_yes = Counter(inputs_test[:,9])[1.0]
diabetes_test_no = Counter(inputs_test[:,9])[0.0]
diabetes=array([diabetes_train_yes,diabetes_train_no,diabetes_test_yes,diabetes_test_no,place_hold,place_hold,place_hold,place_hold]).reshape(-1,1)
htn_train_yes = Counter(total_train_inputs[:,10])[1.0]
htn_train_no = Counter(total_train_inputs[:,10])[0.0]
htn_test_yes = Counter(inputs_test[:,10])[1.0]
htn_test_no = Counter(inputs_test[:,10])[0.0]
htn = array([htn_train_yes,htn_train_no,htn_test_yes,htn_test_no,place_hold,place_hold,place_hold,place_hold]).reshape(-1,1)
chf_train_yes = Counter(total_train_inputs[:,11])[1.0]
chf_train_no = Counter(total_train_inputs[:,11])[0.0]
chf_test_yes = Counter(inputs_test[:,11])[1.0]
chf_test_no = Counter(inputs_test[:,11])[0.0]
chf = array([chf_train_yes,chf_train_no,chf_test_yes,chf_test_no,place_hold,place_hold,place_hold,place_hold]).reshape(-1,1)
dyspnea_train_yes = Counter(total_train_inputs[:,12])[1.0]
dyspnea_train_no = Counter(total_train_inputs[:,12])[0.0]
dyspnea_test_yes = Counter(inputs_test[:,12])[1.0]
dyspnea_test_no = Counter(inputs_test[:,12])[0.0]
dyspnea = array([dyspnea_train_yes,dyspnea_train_no,dyspnea_test_yes,dyspnea_test_no,place_hold,place_hold,place_hold,place_hold]).reshape(-1,1)
smoker_train_yes = Counter(total_train_inputs[:,13])[1.0]
smoker_train_no = Counter(total_train_inputs[:,13])[0.0]
smoker_test_yes = Counter(inputs_test[:,13])[1.0]
smoker_test_no = Counter(inputs_test[:,13])[0.0]
smoker = array([smoker_train_yes,smoker_train_no,smoker_test_yes,smoker_test_no,place_hold,place_hold,place_hold,place_hold]).reshape(-1,1)
COPD_train_yes = Counter(total_train_inputs[:,14])[1.0]
COPD_train_no = Counter(total_train_inputs[:,14])[0.0]
COPD_test_yes = Counter(inputs_test[:,14])[1.0]
COPD_test_no = Counter(inputs_test[:,14])[0.0]
COPD = array([COPD_train_yes,COPD_train_no,COPD_test_yes,COPD_test_no,place_hold,place_hold,place_hold,place_hold]).reshape(-1,1)
dialysis_train_yes = Counter(total_train_inputs[:,15])[1.0]
dialysis_train_no = Counter(total_train_inputs[:,15])[0.0]
dialysis_test_yes = Counter(inputs_test[:,15])[1.0]
dialysis_test_no = Counter(inputs_test[:,15])[0.0]
dialysis = array([dialysis_train_yes,dialysis_train_no,dialysis_test_yes,dialysis_test_no,place_hold,place_hold,place_hold,place_hold]).reshape(-1,1)
renalfail_train_yes = Counter(total_train_inputs[:,16])[1.0]
renalfail_train_no = Counter(total_train_inputs[:,16])[0.0]
renalfail_test_yes = Counter(inputs_test[:,16])[1.0]
renalfail_test_no = Counter(inputs_test[:,16])[0.0]
renalfail=array([renalfail_train_yes,renalfail_train_no,renalfail_test_yes,renalfail_test_no,place_hold,place_hold,place_hold,place_hold]).reshape(-1,1)
bmi_train_mean = np.nanmean(total_train_inputs[:,17])
bmi_train_std = np.nanstd(total_train_inputs[:,17])
bmi_test_mean = np.nanmean(inputs_test[:,17])
bmi_test_std = np.nanstd(inputs_test[:,17])
bmi=array([bmi_train_mean,bmi_train_std,bmi_test_mean,bmi_test_std,place_hold,place_hold,place_hold,place_hold]).reshape(-1,1)
names_dem = ['Age','Sex','ASA PS','BMI','HTN','Diabetes','COPD','Functional',
'Smoker','Dyspnea','Steroids','CHF','Dialysis','Cancer','SIRS',
'Renal Failure','Vent','Ascites']
dem_data = np.concatenate([ages,sexes,asas,bmi,htn,diabetes,COPD,fxns,smoker,dyspnea,steroids,chf,dialysis,cancers,sirs,renalfail,vents,ascites],axis=1)
dem_pd=pd.DataFrame(dem_data)
dem_pd.columns=names_dem
dem_pd.to_excel(r'C:', index = False)
print(dem_pd)
def shap_plots(inputs_NSQIP, inputs_test_final, targets_train, targets_test_final,optimized_params,OrthoV1):
#summary plot
names2=['Age','Sex','Fxn Status','ASA PS','Steroids','Ascites','SIRS','Ventilator','Cancer','Diabetes','HTN','CHF','Dyspnea','Smoker','COPD','Dialysis','Renal Failure','BMI']
explainer=shap.TreeExplainer(OrthoV1)
shap_values=explainer.shap_values(inputs_test_final)
shap.summary_plot(shap_values,inputs_test_final,feature_names=names2,show=False)
#shap.summary_plot(shap_values,inputs_test_final,feature_names=names2,plot_type='bar',show=False)
#force plots for sensitivity analysis
j=0
k=1
sens_true_pos_hold =[]
sens_false_neg_hold = []
least_conf=[]
probs=create_model(inputs_NSQIP,targets_train,inputs_test_final,optimized_params)
preds_hold = np.concatenate([probs.reshape(-1,1),targets_test_final],axis=1)
preds_hold_sorted=preds_hold.copy()
preds_hold_sorted=preds_hold_sorted[preds_hold_sorted[:,0].argsort()]
#first true positives
while j<5:
c = preds_hold_sorted[-k,0]
if preds_hold_sorted[-k,1]==1:
sens_true_pos_hold.append(np.where(probs==c))
j=j+1
k=k+1
#now false negatives
j=0
k=1
while j<5:
c = preds_hold_sorted[-k,0]
if preds_hold_sorted[-k,1]==0:
sens_false_neg_hold.append(np.where(probs==c))
j=j+1
k=k+1
#now find the least confident predictors
j=0
k=5;
least_conf=[]
while len(least_conf)<5:
c = preds_hold_sorted[j,0]
hold1=np.where(probs==c)
hold1=array(hold1)
if hold1.shape[1]>1:
for i in range((hold1.shape[1])):
least_conf.append(hold1[0,i])
k=k-hold1.shape[1]
else:
least_conf.append(hold1)
j=j+1
print("j =",j)
print('k=',k)
least_conf=least_conf[0:5]
least_conf=array(least_conf)
least_conf=least_conf.astype(int)
#account for any duplicates
sens_false_neg_hold = np.concatenate(sens_false_neg_hold,axis=1)
sens_false_neg_hold = sens_false_neg_hold[0,0:5]
sens_true_pos_hold = np.concatenate(sens_true_pos_hold,axis=1)
sens_true_pos_hold = sens_true_pos_hold[0,0:5]
sens_true_pos_hold=np.squeeze(sens_true_pos_hold)
sens_false_neg_hold=np.squeeze(sens_false_neg_hold)
least_conf=np.squeeze(least_conf)
##now we have indices of most confident correct and most confident but incorrect
sens_true_pos_hold=array(sens_true_pos_hold)
data_true_pos = inputs_test_final[sens_true_pos_hold]
sens_false_neg_hold=array(sens_false_neg_hold)
data_false_neg = inputs_test_final[sens_false_neg_hold]
#plot all of the force_plots
#true positives
#basic formatting for display purposes only
inputs_test2=inputs_test_final.copy()
for i in range(len(inputs_test2)):
inputs_test2[i,-1]=round(inputs_test2[i,-1],1)
for i in range(0,3):
shap_display=shap.force_plot(explainer.expected_value,shap_values[sens_true_pos_hold[i],:],inputs_test2[sens_true_pos_hold[i],:],matplotlib=True,feature_names=names2,show=False,text_rotation=60)
print('patient: ',sens_true_pos_hold[i],preds_hold[sens_true_pos_hold[i],0],preds_hold[sens_true_pos_hold[i],1])
#false negatives
for i in range(0,3):
shap_display=shap.force_plot(explainer.expected_value,shap_values[sens_false_neg_hold[i],:],inputs_test2[sens_false_neg_hold[i],:],matplotlib=True,feature_names=names2,show=False,text_rotation=60)
print('patient: ',sens_false_neg_hold[i],preds_hold[sens_false_neg_hold[i],0],preds_hold[sens_false_neg_hold[i],1])
#least confident
for i in range(0,3):
shap_display=shap.force_plot(explainer.expected_value,shap_values[least_conf[i],:],inputs_test2[least_conf[i],:],matplotlib=True,feature_names=names2,show=False,text_rotation=60)
print('patient: ',least_conf[i],preds_hold[least_conf[i],0],preds_hold[least_conf[i],1])
#SHAP dependency plots
df_inputs_test= | pd.DataFrame(inputs_test_final, columns=names2) | pandas.DataFrame |
import os
import pandas as pd
from typing import Union
import numpy as np
from collections import defaultdict
import data_parser as mypars
import seaborn as sns
from matplotlib.colors import rgb2hex, colorConverter
import bioinfo as mybio
clusters = ['Opc', 'In', 'Ex', 'Mic', 'Ast', 'Oli']
cols = ['Healthy vs Early-AD', 'Healthy vs Late-AD', 'Early-AD vs Late-AD (less)', 'Early-AD vs Late-AD (greater)']
ordered_labs = ['Ast_0', 'Ast_1', 'Ast_3', 'Ast_4', 'Ast [cell type]',
'Ex_0', 'Ex_1', 'Ex_2', 'Ex_3', 'Ex_4', 'Ex_5', 'Ex_6', 'Ex_7',
'Ex_8', 'Ex_9', 'Ex_10', 'Ex_11', 'Ex_12', 'Ex [cell type]',
'In_0', 'In_1', 'In_2', 'In_3', 'In_4', 'In_5', 'In_6', 'In_7', 'In [cell type]',
'Mic_0', 'Mic_1', 'Mic_2', 'Mic_3', 'Mic_4', 'Mic [cell type]',
'Oli_0', 'Oli_1', 'Oli_3', 'Oli_4', 'Oli [cell type]',
'Opc_0', 'Opc_1', 'Opc_2', 'Opc_3', 'Opc [cell type]']
ct_lab = ['Ex', 'In', 'Ast', 'Mic', 'Oli', 'Opc'] # cell_types labels
def load_Xwas(dir_xwas: str = None):
"""
load folder with the lists of genes of interest
Returns
-------
"""
dir_was = dir_xwas if dir_xwas is not None else '/Users/a.possenti/OneDrive - University Of Cambridge/' \
'Python/Repositories/Gene_Lists'
nwas = list(map(str.strip, open(f"{dir_was}/Genes_nwas.txt").readlines()))
gwas = list(map(str.strip, open(f"{dir_was}/Genes_gwas.txt").readlines()))
pwas = list(map(str.strip, open(f"{dir_was}/Genes_pwas.txt").readlines()))
gwas = list(set(gwas + ['PSEN1', 'PSEN2', 'ACE', 'ADAMTS1',
'IQCK', 'SLC24A4', 'SPI1', 'TXNDC3', 'WWOX'])) # these were most recents addition
# from published meta-analysis
return nwas, gwas, pwas
def load_results_from_pathifier(path_to_res: Union[str, None], results_file_generic: str = None):
"""
Parameters
----------
path_to_res path to the results folder (assuming the folder is in the same location as the src)
results_file_generic suffix to results file
Returns
-------
"""
input_dirs = path_to_res if path_to_res is not None else os.listdir()
results_generic_file = '_Data_KEGG.xlsx' if results_file_generic is None else results_file_generic
results_dict = dict()
for d in set(input_dirs).intersection(clusters):
xls = pd.ExcelFile(os.path.join(os.getcwd(), d, ''.join([d, results_file_generic])))
results_dict[d] = dict()
for subc in xls.sheet_names:
results_dict[d][subc] = pd.read_excel(xls, sheet_name=subc, index_col=0)
results_dict[d][subc] = results_dict[d][subc].apply(pd.to_numeric)
return results_dict
def populate_df_for_stats(results_dict: dict):
f"""
Parameters
----------
results_dict: dictionary populated with the function {load_results_from_pathifier.__name__}
Returns
-------
"""
stat_tests = {'Healthy vs Early-AD': 'pval-no_AD-vs-early_AD',
'Healthy vs Late-AD': 'pval-no_AD-vs-late_AD',
'Early-AD vs Late-AD (less)': 'pval-early_AD-vs-late_AD',
'Early-AD vs Late-AD (greater)': 'pval-early_AD-vs-late_AD'}
kegg_p = mybio.get_keggs(path_or_file='Default')
kegg_pathways = [kegg_p[k][0] for k in kegg_p]
significance_dict = dict()
for s in stat_tests:
significance_dict[s] = dict()
for c in clusters:
significance_dict[s][c] = pd.DataFrame(index=kegg_pathways, columns=results_dict[c].keys(), dtype=np.float)
for subc in results_dict[c]:
found_paths = results_dict[c][subc].columns
significance_dict[s][c].loc[found_paths, subc] = results_dict[c][subc].loc[stat_tests[s], :].values
return significance_dict
def enumerate_significant_pathways_per_condition(significance_dict: dict, alpha: float = 0.05):
"""
Parameters
----------
significance_dict: dictionary of dataframes (one key per cluster)
alpha: significance level
Returns
-------
pd.DataFrame of significant pathways
"""
complete_df = pd.DataFrame(columns=cols, index=ordered_labs)
for s in significance_dict:
for c in significance_dict[s]:
complete_df.loc[significance_dict[s][c].columns, s] = (significance_dict[s][c] < alpha).sum()
return complete_df
# class for html visualisation of the clusters of pathways
class Clusters(dict):
def __init__(self, **kw):
if 'map_to_id' in kw:
self.map_to_id = kw.get('map_to_id')
else:
self.map_to_id = False
if self.map_to_id:
self.map_dict = kw.get('path_mapping')
def _map_pathname_to_id(self, col):
self.c_ids = []
for x in self[col]:
self.c_ids.append(self.map_dict[x])
def _repr_html_(self):
html = '<table style="border: 0;">'
for c in self:
col, i = c.split('_')
hx = rgb2hex(colorConverter.to_rgb(col))
if self.map_to_id:
self._map_pathname_to_id(c)
else:
self.c_ids = self[c]
html += '<tr style="border: 0;">' \
'<td style="background-color: {0}; ' \
'border: 0;">' \
'<code style="background-color: {0};">'.format(hx)
html += f'Module_{int(i) + 1}' + '</code></td>'
html += '<td style="border: 0"><code>'
html += ', '.join(self.c_ids) + '</code>'
html += '</td></tr>'
html += '</table>'
return html
def get_cluster_classes(den, label='ivl', **kwargs):
"""
**kwargs: map_to_id = True to map the pathway name to its own ID
[the dict of mapping should be provided in this case]
"""
cluster_idxs = defaultdict(list)
# kgg_map = kwargs['path_map']
for c, pi in zip(den['color_list'], den['icoord']):
for leg in pi[1:3]:
i = (leg - 5.0) / 10.0
if abs(i - int(i)) < 1e-5:
cluster_idxs[c].append(int(i))
cluster_classes = Clusters(**kwargs)
for idx, (c, l) in enumerate(cluster_idxs.items()):
i_l = [den[label][i] for i in l]
cluster_classes[f'{c}_{idx}'] = i_l
return cluster_classes
def load_mathys_results(mathys_results: str = '../../Supplementary/41586_2019_1195_MOESM4_ESM.xlsx'):
de_mathys_dfs = dict()
de_xls = pd.ExcelFile(mathys_results)
for shname in de_xls.sheet_names:
if shname in ct_lab:
print(f"Processing {shname}")
de_mathys_dfs[shname] = dict()
tmp = pd.read_excel(de_xls, sheet_name=shname, skiprows=1, true_values='TRUE', false_values='FALSE')
de_mathys_dfs[shname]['HC-vs-AD'] = tmp.iloc[:, :9].copy(deep=True)
de_mathys_dfs[shname]['HC-vs-AD'].set_index('Unnamed: 0', inplace=True)
de_mathys_dfs[shname]['HC-vs-AD'].apply(pd.to_numeric)
de_mathys_dfs[shname]['HC-vs-AD'].dropna(how='all', axis=0, inplace=True)
del de_mathys_dfs[shname]['HC-vs-AD'].index.name
de_mathys_dfs[shname]['HC-vs-AD'].iloc[:, -2:] = de_mathys_dfs[shname]['HC-vs-AD'].iloc[:, -2:].astype(bool)
last_col = de_mathys_dfs[shname]['HC-vs-AD'].columns[-1]
de_mathys_dfs[shname]['HC-vs-AD'] = de_mathys_dfs[shname]['HC-vs-AD'][
de_mathys_dfs[shname]['HC-vs-AD'][last_col] == True]
de_mathys_dfs[shname]['HC-vs-Early_AD'] = tmp.iloc[:, 11:20].copy(deep=True)
de_mathys_dfs[shname]['HC-vs-Early_AD'].set_index('Unnamed: 11', inplace=True)
de_mathys_dfs[shname]['HC-vs-Early_AD'].apply(pd.to_numeric)
de_mathys_dfs[shname]['HC-vs-Early_AD'].dropna(how='all', axis=0, inplace=True)
del de_mathys_dfs[shname]['HC-vs-Early_AD'].index.name
de_mathys_dfs[shname]['HC-vs-Early_AD'].iloc[:, -2:] = de_mathys_dfs[shname]['HC-vs-Early_AD'].iloc[:, -2:].astype(bool)
last_col = de_mathys_dfs[shname]['HC-vs-Early_AD'].columns[-1]
de_mathys_dfs[shname]['HC-vs-Early_AD'] = de_mathys_dfs[shname]['HC-vs-Early_AD'][
de_mathys_dfs[shname]['HC-vs-Early_AD'][last_col] is True]
de_mathys_dfs[shname]['Early-vs-Late_AD'] = tmp.iloc[:, 22:32].copy(deep=True)
de_mathys_dfs[shname]['Early-vs-Late_AD'].set_index('Unnamed: 22', inplace=True)
de_mathys_dfs[shname]['Early-vs-Late_AD'].apply(pd.to_numeric)
de_mathys_dfs[shname]['Early-vs-Late_AD'].dropna(how='all', axis=0, inplace=True)
del de_mathys_dfs[shname]['Early-vs-Late_AD'].index.name
de_mathys_dfs[shname]['Early-vs-Late_AD'].iloc[:, -2:] = de_mathys_dfs[shname]['Early-vs-Late_AD'].iloc[:, -2:].astype(bool)
last_col = de_mathys_dfs[shname]['Early-vs-Late_AD'].columns[-1]
de_mathys_dfs[shname]['Early-vs-Late_AD'] = de_mathys_dfs[shname]['Early-vs-Late_AD'][
de_mathys_dfs[shname]['Early-vs-Late_AD'][last_col] is True]
return de_mathys_dfs
def load_pathifier_loadings(pathways_dict: dict):
clusters = ['Opc', 'In', 'Ex', 'Mic', 'Ast', 'Oli']
loadings_dict = dict()
for d in set(os.listdir()).intersection(clusters):
for fname in os.listdir(os.path.join(os.getcwd(), d)):
if 'Loadings_' in fname:
print(f'Loading file: {fname}')
dh, t_0 = mypars.create_status_bar()
xls = pd.ExcelFile(os.path.join(os.getcwd(), d, fname))
subc = '_'.join(fname.split('_')[1:3])
loadings_dict[subc] = dict()
for i, hsa_path in enumerate(xls.sheet_names):
tmp = | pd.read_excel(xls, sheet_name=hsa_path, skiprows=6) | pandas.read_excel |
__author__ = "<NAME>"
__license__ = 'MIT'
# -------------------------------------------------------------------------------------------------------------------- #
# IMPORTS
# Modules
import data_process_functions as dp
import pandas as pd
from matplotlib import pyplot as plt
import numpy as np
import datetime
# RiBuild Modules
# -------------------------------------------------------------------------------------------------------------------- #
# RIBuild
acronym_file = r'U:\RIBuild\2D_1D\4A_36_Acronyms.xlsx'
out_folder = r'C:\Users\ocni\PycharmProjects\delphin_6_automation\data_process\2d_1d\processed_data'
result_folder = r'U:\RIBuild\2D_1D\Results'
graphic_folder = r'U:\RIBuild\2D_1D\Processed Results\4A'
#dp.process_results(acronym_file, result_folder, out_folder)
quantities = ['heat loss', 'temperature', 'relative humidity', 'moisture content', 'moisture integral']
quantity = quantities[0]
hdf_file = out_folder + '/' + quantity + '.h5'
# Open HDF
#total_uninsulated_4a = pd.read_hdf(hdf_file, 'total_4a_36_uninsulated')
#total_insulated_4a = pd.read_hdf(hdf_file, 'total_4a_36_insulated')
def uninsulated(save=False):
dp.abs_diff_boxplot(total_uninsulated_4a, (-2, 2), quantity.title(), '4A 36cm Uninsulated')
if save:
plt.savefig(f'{graphic_folder}/{quantity}_abs_diff_uninsulated.png')
dp.rel_diff_boxplot(total_uninsulated_4a, (0, 250), quantity.title(), '4A 36cm Uninsulated', log=False)
if save:
plt.savefig(f'{graphic_folder}/{quantity}_rel_diff_uninsulated.png')
dp.plot_linear_relation(total_uninsulated_4a, 'mortar', (-50, 200), quantity.title(), '4A 36cm Uninsulated')
if save:
plt.savefig(f'{graphic_folder}/{quantity}_linear_relation_mortar_uninsulated.png')
dp.plot_linear_relation(total_uninsulated_4a, 'brick', (-50, 200), quantity.title(), '4A 36cm Uninsulated')
if save:
plt.savefig(f'{graphic_folder}/{quantity}_linear_relation_brick_uninsulated.png')
#uninsulated(False)
def insulated(save=False):
dp.abs_diff_boxplot(total_insulated_4a, (-5, 5), quantity.title(), '4A 36cm Insulated')
if save:
plt.savefig(f'{graphic_folder}/{quantity}_abs_diff_insulated.png')
dp.rel_diff_boxplot(total_insulated_4a, (0.1, 120), quantity.title(), '4A 36cm Insulated', log=False)
if save:
plt.savefig(f'{graphic_folder}/{quantity}_rel_diff_insulated.png')
dp.plot_linear_relation(total_insulated_4a, 'mortar', (0, 5), quantity.title(), '4A 36cm Insulated')
if save:
plt.savefig(f'{graphic_folder}/{quantity}_linear_relation_mortar_insulated.png')
dp.plot_linear_relation(total_insulated_4a, 'brick', (0, 5), quantity.title(), '4A 36cm Insulated')
if save:
plt.savefig(f'{graphic_folder}/{quantity}_linear_relation_brick_insulated.png')
#insulated(True)
def rolling_mean_plots(mean_hours, save=False):
insulation = 'insulated'
acros = [f'dresden_zp_high_cement_{insulation}_36_4a', f'dresden_zd_high_cement_{insulation}_36_4a',
f'potsdam_high_cement_{insulation}_36_4a', f'dresden_zp_low_cement_{insulation}_36_4a',
f'dresden_zd_low_cement_{insulation}_36_4a', f'potsdam_low_cement_{insulation}_36_4a']
time_frame = pd.DataFrame()
for acro in acros:
acro_data_frame = pd.read_hdf(hdf_file, acro)
time_frame = pd.concat([time_frame,
acro_data_frame.loc[:, pd.IndexSlice[:, :, 'out']].rolling(mean_hours).mean()],
ignore_index=True)
time_frame = dp.compute_differences(time_frame)
#time_frame_mortar = dp.remove_outlier(time_frame, 'abs_diff', 'brick')
#time_frame_brick = dp.remove_outlier(time_frame, 'abs_diff', 'mortar')
dp.abs_diff_boxplot(time_frame, (-2.5, 2.5), quantity.title(),
f'4A 36cm {insulation.capitalize()}\nRolling Mean of {mean_hours} Hours')
if save:
plt.savefig(f'{graphic_folder}/{quantity}_rolling_mean_absolute_difference_{insulation}.png')
dp.rel_diff_boxplot(time_frame, (0., 40), quantity.title(),
f'4A 36cm {insulation.capitalize()}\nRolling Mean of {mean_hours} Hours', log=False)
if save:
plt.savefig(f'{graphic_folder}/{quantity}_rolling_mean_relative_difference_{insulation}.png')
dp.plot_linear_relation(time_frame, 'brick', (-10, 20), quantity.title(),
f'4A 36cm {insulation.capitalize()}\nRolling Mean of {mean_hours} Hours')
if save:
plt.savefig(f'{graphic_folder}/{quantity}_rolling_mean_linear_relation_brick_{insulation}.png')
dp.plot_linear_relation(time_frame, 'mortar', (-10, 20), quantity.title(),
f'4A 36cm {insulation.capitalize()}\nRolling Mean of {mean_hours} Hours')
if save:
plt.savefig(f'{graphic_folder}/{quantity}_rolling_mean_linear_relation_mortar_{insulation}.png')
#rolling_mean_plots(24, True)
def accumulated_plots(save=False):
insulation = 'uninsulated'
acros = [f'dresden_zp_high_cement_{insulation}_36_4a', f'dresden_zd_high_cement_{insulation}_36_4a',
f'potsdam_high_cement_{insulation}_36_4a', f'dresden_zp_low_cement_{insulation}_36_4a',
f'dresden_zd_low_cement_{insulation}_36_4a', f'potsdam_low_cement_{insulation}_36_4a']
time_frame = pd.DataFrame()
for acro in acros:
acro_data_frame = pd.read_hdf(hdf_file, acro)
time_frame = pd.concat([time_frame, acro_data_frame.loc[:, pd.IndexSlice[:, :, 'out']].cumsum()],
ignore_index=True)
time_frame = time_frame.divide(len(time_frame))
time_frame = dp.compute_differences(time_frame)
dp.abs_diff_boxplot(time_frame, (-15000, 5000), quantity.title(),
f'4A 36cm {insulation.capitalize()}\nAccumulated Sum')
if save:
plt.savefig(f'{graphic_folder}/{quantity}_accumulated_sum_absolute_difference_{insulation}.png')
dp.rel_diff_boxplot(time_frame, (0.0, 200), quantity.title(),
f'4A 36cm {insulation.capitalize()}\nAccumulated Sum', log=False)
if save:
plt.savefig(f'{graphic_folder}/{quantity}_accumulated_sum_relative_difference_{insulation}.png')
types = ['mortar', 'brick']
for type_ in types:
dp.plot_linear_relation(time_frame, type_, (-10, 30), quantity.title(),
f'4A 36cm {insulation.capitalize()}\nAccumulated Sum')
if save:
plt.savefig(f'{graphic_folder}/{quantity}_accumulated_sum_linear_relation_{type_}_{insulation}.png')
#accumulated_plots(False)
def time_plots(save=False):
insulation = 'uninsulated'
acros = [f'dresden_zp_high_cement_{insulation}_36_4a', f'potsdam_low_cement_{insulation}_36_4a']
time_frame = | pd.DataFrame() | pandas.DataFrame |
#!/usr/bin/env python
# coding: utf-8
# # Simple tool to analyze data from www.data.gouv.fr
#
# **Note:** This is a Jupyter notebook which is also available as its executable export as a Python 3 script (therefore with automatically generated comments).
# # Libraries
# In[ ]:
import sys,os
addPath= [os.path.abspath("../source"),
os.path.abspath("../venv/lib/python3.9/site-packages/")]
addPath.extend(sys.path)
sys.path = addPath
# In[ ]:
# Sys import
import sys, os, re
# Common imports
import math
import numpy as NP
import numpy.random as RAND
import scipy.stats as STATS
from scipy import sparse
from scipy import linalg
# Better formatting functions
from IPython.display import display, HTML
from IPython import get_ipython
import matplotlib as MPL
import matplotlib.pyplot as PLT
import seaborn as SNS
SNS.set(font_scale=1)
# Python programming
from itertools import cycle
from time import time
import datetime
# Using pandas
import pandas as PAN
import xlrd
# In[ ]:
import warnings
warnings.filterwarnings('ignore')
print("For now, reduce python warnings, I will look into this later")
# ### Import my own modules
# The next cell attempts to give user some information if things improperly setup.
# Intended to work both in Jupyter and when executing the Python file directly.
# In[ ]:
if not get_ipython() is None and os.path.abspath("../source/") not in sys.path:
sys.path.append(os.path.abspath("../source/"))
try:
from lib.utilities import *
from lib.figureHelpers import *
from lib.DataMgrJSON import *
from lib.DataMgr import *
import lib.basicDataCTE as DCTE
except Exception as err:
print("Could not find library 'lib' with contents 'DataGouvFr' ")
if get_ipython() is None:
print("Check the PYTHONPATH environment variable which should point to 'source' wich contains 'lib'")
else:
print("You are supposed to be running in JupySessions, and '../source/lib' should exist")
raise err
# ## Check environment
#
# It is expected that:
# - your working directory is named `JupySessions`,
# - that it has subdirectories
# - `images/*` where generated images may be stored to avoid overcrowding.
# - At the same level as your working dir there should be directories
# - `../data` for storing input data and
# - `../source` for python scripts.
#
# My package library is in `../source/lib`, and users running under Python (not in Jupyter) should
# set their PYTHONPATH to include "../source" ( *or whatever appropriate* ).
# In[ ]:
checkSetup(chap="Chap01")
ImgMgr = ImageMgr(chapdir="Chap01")
# # Load Data
# Vaccination data: https://www.data.gouv.fr/en/datasets/donnees-relatives-aux-personnes-vaccinees-contre-la-covid-19/
# https://www.data.gouv.fr/en/datasets/r/eb672d49-7cc7-4114-a5a1-fa6fd147406b
# https://www.data.gouv.fr/en/datasets/r/eb672d49-7cc7-4114-a5a1-fa6fd147406b
# https://www.data.gouv.fr/en/datasets/r/b234a041-b5ea-4954-889b-67e64a25ce0d
#
# Badges: vaccins covid19 (sans tiret???)
# ## Functions
# ## Load CSV and XLSX data from remote
# The `dataFileVMgr` will manage a cache of data files in `../data`, the data will be downloaded
# from www.data.gouv.fr using a request for datasets with badge '`covid-19`' if a more recent
# version is present on the remote site. The meta information is stored/cached in `../data/.data`
# as the pickle of a json.
#
# We check what is in the cache/data directory; for each file, we identify the latest version,
# and list this below to make sure. The file name will usually contain a time stamp; this has to do with
# the version management/identification technique used when downloading from www.data.gouv.fr.
#
# For the files used in this notebook, the latest version is used/loaded irrespective of the
# timestamp used in the notebook.
# In[ ]:
tagset1 = ({"tag":"covid"}, {"tag":"covid19"})
# In[ ]:
specOpts={ 'cacheFname': '.cache.rqtTest2.json',
"dumpMetaFile" : "rqtTest2.meta.dump",
"dumpMetaInfoFile" : "rqtTest2.metainfo.dump",
'ApiInq' : 'datasets',
'ApiInqQuery' : tagset1,
'InqParmsDir' : {},
}
# In[ ]:
rex = re.compile('(.*sursaud|^donnees-hospitalieres|^covid-hospit-incid|^sp-pos-).*')
def uselFn(urqt):
return rex.match(urqt.fname) or rex.match(urqt.url)
# In[ ]:
dataFileVMgr = manageAndCacheDataFilesFRAPI("../data", maxDirSz= 170*(2**10)**2,
**specOpts)
# In[ ]:
dataFileVMgr.getRemoteInfo()
dataFileVMgr.updatePrepare()
dataFileVMgr.updateSelect(displayCount=10 , URqtSelector = uselFn)
dataFileVMgr.cacheUpdate()
# In[ ]:
print("Most recent versions of files in data directory:")
for f in dataFileVMgr.listMostRecent() :
print(f"\t{f}")
# In[ ]:
last = lambda x: dataFileVMgr.getRecentVersion(x,default=True)
# This ensures we load the most recent version, so that it is not required to update the list
# below. The timestamps shown in the following sequence will be update by the call to `getRecentVersion`.
# In[ ]:
dailyDepCsv = last("sursaud-corona-quot-dep-2021-04-08-21h20.csv")
dailyRegionCsv = last("sursaud-corona-quot-reg-2021-04-03-19h33.csv")
dailyFranceCsv = last("sursaud-covid19-quotidien-2020-04-12-19h00-france.csv")
dailyXlsx = last("sursaud-covid19-quotidien-2020-04-12-19h00.xlsx")
weeklyCsv = last("sursaud-covid19-hebdomadaire-2020-04-08-19h00.csv")
hospAgeCsv = last("donnees-hospitalieres-classe-age-covid19-2020-04-11-19h00.csv")
hospNouveauCsv = last("donnees-hospitalieres-nouveaux-covid19-2020-04-11-19h00.csv")
hospCsv = last("donnees-hospitalieres-covid19-2020-04-11-19h00.csv")
hospEtablCsv = last("donnees-hospitalieres-etablissements-covid19-2020-04-12-19h00.csv")
weeklyLabCsv = last("sp-pos-heb-fra-2021-08-09-19h06.csv")
dailyLabCsv = last("sp-pos-quot-fra-2021-08-09-19h06.csv")
S1 = set (dataFileVMgr.listMostRecent())
S2 =set((dailyDepCsv,dailyRegionCsv,dailyFranceCsv, dailyXlsx, weeklyCsv,
hospAgeCsv, hospNouveauCsv, hospCsv, hospEtablCsv, weeklyLabCsv, dailyLabCsv ))
missing = S1. difference(S2)
if len(missing) > 0:
print (f"Missing comparing with most recent files in ../data:")
for f in missing:
print(f"\t{f}")
metaHebdoCsv = "../data/metadonnee-urgenceshos-sosmedecins-covid19-hebdo.csv"
metaQuotRegCsv = "../data/metadonnee-urgenceshos-sosmedecin-covid19-quot-reg.csv"
metaQuotFraCsv = "../data/metadonnee-urgenceshos-sosmedecin-covid19-quot-fra.csv"
metaQuotCsv = "../data/metadonnee-urgenceshos-sosmedecin-covid19-quot.csv"
metaHospservices = "../data/metadonnees-services-hospitaliers-covid19.csv"
metaHospAge = "../data/metadonnees-donnees-hospitalieres-covid19-classes-age.csv"
metaHospIncid = "../data/metadonnees-hospit-incid.csv"
metaHosp = "../data/metadonnees-donnees-hospitalieres-covid19.csv"
metaHospEtabl = "../data/donnees-hospitalieres-etablissements-covid19-2020-04-11-19h00.csv"
metaSexeCsv = "../data/metadonnees-sexe.csv"
metaRegionsCsv="../data/regions-france.csv"
metaTranchesAgeCsv="../data/code-tranches-dage.csv"
# In[ ]:
ad = lambda x: "../data/"+x
S1 = set (map(ad, dataFileVMgr.listMostRecent(nonTS=True)))
S2 =set((metaHebdoCsv, metaQuotRegCsv, metaQuotFraCsv, metaQuotCsv,
metaHospservices, metaHospAge, metaHospIncid, metaHosp, metaHospEtabl, metaRegionsCsv, metaTranchesAgeCsv ))
missing = S1. difference(S2)
if len(missing) > 0:
print (f"Missing comparing with non timestamped files in ../data:")
print ("These may eventually be exploited in other notebooks (e.g. COVID-MoreData-FromGouv)")
for f in missing:
print(f"\t{f}")
# Now load the stuff
#
# In[ ]:
ad = lambda x: "../data/"+x
data_dailyRegion = read_csvPandas(ad(dailyRegionCsv), error_bad_lines=False,sep=";" )
data_dailyDep = read_csvPandas(ad(dailyDepCsv), error_bad_lines=False,sep=";")
data_dailyFrance = read_csvPandas(ad(dailyFranceCsv), error_bad_lines=False,sep=",")
data_daily = read_xlsxPandas(ad(dailyXlsx))
data_weekly = read_csvPandas(ad(weeklyCsv), error_bad_lines=False,sep=";")
data_hospNouveau = read_csvPandas(ad(hospNouveauCsv), error_bad_lines=False,sep=";")
data_hosp = read_csvPandas(ad(hospCsv), error_bad_lines=False,sep=";")
data_hospAge = read_csvPandas(ad(hospAgeCsv), error_bad_lines=False,sep=";")
data_hospEtabl = read_csvPandas(ad(hospEtablCsv), error_bad_lines=False,sep=";")
data_weeklyLab = read_csvPandas(ad(weeklyLabCsv), error_bad_lines=False,sep=";")
data_dailyLab = read_csvPandas(ad(dailyLabCsv), error_bad_lines=False,sep=";")
meta_Hebdo = read_csvPandas(metaHebdoCsv, clearNaN=True, error_bad_lines=False,sep=";", header=2)
meta_QuotReg = read_csvPandas(metaQuotRegCsv, clearNaN=True, error_bad_lines=False,sep=";", header=1)
meta_QuotFra = read_csvPandas(metaQuotFraCsv, clearNaN=True, error_bad_lines=False,sep=";", header=1)
meta_Quot = read_csvPandas(metaQuotCsv, clearNaN=True, error_bad_lines=False,sep=";", header=1)
meta_HospServices = read_csvPandas(metaHospservices, clearNaN=True, error_bad_lines=False,sep=";")
meta_HospAge = read_csvPandas(metaHospAge, clearNaN=True, error_bad_lines=False,sep=";")
meta_HospIncid = read_csvPandas(metaHospIncid, clearNaN=True, error_bad_lines=False,sep=";")
meta_Hosp = read_csvPandas(metaHosp, clearNaN=True, error_bad_lines=False,sep=";")
meta_Sexe = read_csvPandas(metaSexeCsv, clearNaN=True, error_bad_lines=False,sep=";",header=0)
meta_Regions = read_csvPandas(metaRegionsCsv, clearNaN=True, error_bad_lines=False,sep=",")
meta_Ages = read_csvPandas(metaTranchesAgeCsv, clearNaN=True, error_bad_lines=False,sep=";")
# ## Figure out data characteristics
# In[ ]:
def showBasics(data,dataName):
print(f"{dataName:24}\thas shape {data.shape}")
dataListDescr = ((data_dailyRegion, "data_dailyRegion"),
(data_dailyDep,"data_dailyDep"),
(data_hospAge,"data_hospAge"),
(data_dailyFrance, "data_dailyFrance"),
(data_daily,"data_daily"),
(data_weekly , "data_weekly "),
(data_hospNouveau,"data_hospNouveau"),
(data_hosp,"data_hosp"),
(data_hospAge,"data_hospAge"),
(data_hospEtabl,"data_hospEtabl"),
(data_weeklyLab,"data_weeklyLab"),
(data_dailyLab ,"data_dailyLab"),
(meta_Hebdo,"meta_Hebdo"),
(meta_QuotReg,"meta_QuotReg"),
(meta_QuotFra,"meta_QuotFra"),
(meta_Quot,"meta_Quot"),
(meta_HospServices,"meta_HospServices"),
(meta_HospAge,"meta_HospAge"),
(meta_HospIncid,"meta_HospIncid"),
(meta_Hosp,"meta_Hosp"),
(meta_Sexe,"meta_Sexe"),
(meta_Regions,'meta_Regions'),
(meta_Ages,'meta_Ages'))
for (dat,name) in dataListDescr:
showBasics(dat,name)
# ### Help with meta data
# Of course I encountered some surprises, see `checkRepresentedRegions` issue with unknown codes which
# did occur in some files!
# In[ ]:
def checkRepresentedRegions(df,col='reg',**kwOpts):
"list regions represented in a dataframe, if kwd print=True, will print list of code->string"
regs = set(df[col])
if "print" in kwOpts:
for r in regs:
extract = meta_Regions[ meta_Regions['code_region'] == r]
# print (f"r={r}\t{extract}\t{extract.shape}")
if extract.shape[0] == 0:
lib = f"**Unknown:{r}**"
else:
lib=extract.iloc[0]. at ['nom_region']
print(f"Region: code={r}\t->{lib}")
return regs
# In[ ]:
for (dat,name) in dataListDescr:
if name[0:5]=="meta_": continue
print(f"\nDescription of data in '{name}'\n")
display(dat.describe().transpose())
# In[ ]:
for (dat,name) in dataListDescr:
if name[0:5]!="meta_": continue
print(f"\nMeta data in '{name}'\n")
display(dat)
# ## Read the meta data characterising resources on the remote site
# This is a demo of the capabilities of class `manageAndCacheDataFile`.
# In[ ]:
dataFileVMgr.pprintDataItem( item=".*org.*/^(name|class)$")
dataFileVMgr.pprintDataItem( item="resource.*/(f.*|title.*)")
# ## Get some demographics data from INSEE
# For the time being, these data are obtained / loaded from Insee web site using a manual process and are placed in a different directory, therefore a distinct FileManager is used, and loading this data is done here; for more details see the notebook `Pop-Data-FromGouv.ipy`
#
# Using the base version which does not try to update the "../dataPop" directory
# In[ ]:
dataFileVMgrInsee = manageDataFileVersions("../dataPop")
inseeDepXLS ="../dataPop/InseeDep.xls"
inseeDep = read_xlsxPandas(inseeDepXLS, sheet_name=1, header=7)
inseeReg = read_xlsxPandas(inseeDepXLS, sheet_name=0, header=7)
# Now we can display our demographics data (summarized)
# In[ ]:
display(inseeDep.iloc[:,4:].sum())
display(inseeReg.iloc[:,4:].sum())
# ## Let's do some graphics!
# ### Données de urgences hospitalières et de SOS médecins
# Df: dailyRegion ( file sursaud-covid19-quotidien)
# #### Structure the data
# Select age category '0', thus getting all ages
# In[ ]:
def select_Ages(df, ageGroup='0'):
return df.loc[df['sursaud_cl_age_corona'] == ageGroup]
def select_AllAges(df):
return select_Ages(df)
# In[ ]:
def groupByDate(df):
return df.groupby('date_de_passage')
# First, I work with the dailyRegion data, summing up for all regions.
# In[ ]:
gr_all_age_regions = groupByDate(select_AllAges(data_dailyRegion)).sum()
checkRepresentedRegions(data_dailyRegion, print=True);
# In[ ]:
dfGr = PAN.DataFrame(gr_all_age_regions.copy(), columns=gr_all_age_regions.columns[1:])
painter = figureTSFromFrame(dfGr,figsize=(12,8))
painter.doPlot()
painter.setAttrs(label=f"Days since {painter.dt[0]}",
title="Whole France/Data ER + SOS-medecin\nAll age groups",
legend=True,
xlabel=f"Days since {painter.dt[0]}")
PAN.set_option('display.max_colwidth', None)
display(meta_QuotReg[[ "Colonne","Description_FR" ]])
ImgMgr.save_fig("FIG002")
# Then, I look at the national data, as represented in `data_dailyFrance` and `data_daily`
# In[ ]:
print(f"data_daily: {data_daily.shape}")
print(f"{','.join(data_daily.columns)}")
display(data_daily.describe())
display(data_daily[:5])
print("data_dailyFrance: {data_dailyFrance.shape}")
print(f"{','.join(data_dailyFrance.columns)}")
display(data_dailyFrance.describe())
display(data_dailyFrance[:5])
# ### Hospital data
# DF: hospNouveau File: donnees-hospitalieres-nouveaux-covid19
# In[ ]:
gr_all_data_hospNouveau=data_hospNouveau.groupby('jour').sum()
dfGrHN = PAN.DataFrame(gr_all_data_hospNouveau)
colOpts = {'incid_dc': {"c":"b","marker":"v"},
'incid_rea': {"c":"r","marker":"o", "linestyle":"--"},
'incid_rad': {"marker":"+"},
'incid_hosp': {"marker":"*"}
}
painter = figureTSFromFrame(dfGrHN)
painter.doPlot()
painter.setAttrs(colOpts=colOpts,
xlabel=f"Days since {painter.dt[0]}",
title="Whole France (Hospital)\nDaily variation in patient status",
legend=True )
PAN.set_option('display.max_colwidth', None)
display(meta_HospIncid[[ "Colonne","Description_EN" ]])
# In[ ]:
gr_all_data_hosp=data_hosp.loc[data_hosp["sexe"] == 0 ].groupby('jour').sum()
cols = [ c for c in gr_all_data_hosp.columns if c != 'sexe']
dfGrH = PAN.DataFrame(gr_all_data_hosp[cols])
colOpts = { 'dc': {"c":"b","marker":"v"},
'rea': {"c":"r","marker":"o", "linestyle":"--"},
'rad': {"marker":"+"},
'hosp': {"marker":"*"}
}
painter = figureTSFromFrame(dfGrH)
painter.doPlot()
painter.setAttrs( colOpts=colOpts,
xlabel=f"Days since {painter.dt[0]}",
title="Whole France / Hospital\n:Daily patient status (ICU,Hosp) / Accumulated (discharged, dead)",
legend=True)
display(meta_Hosp[[ "Colonne","Description_EN" ]])
ImgMgr.save_fig("FIG003")
# ### Now analyze hospital data according to sex
# In[ ]:
data_hosp_DepSex=data_hosp.set_index(["dep","sexe"])
data_hosp_DepSex[data_hosp_DepSex.index.get_level_values(1)!=0]
d1 = data_hosp_DepSex[data_hosp_DepSex.index.get_level_values(1)==1]
d2 = data_hosp_DepSex[data_hosp_DepSex.index.get_level_values(1)==2]
d1s=d1.groupby("jour").sum()
d2s=d2.groupby("jour").sum()
dm= PAN.concat([d1s,d2s], axis=1)
cols1 = list(map (lambda x: x+"_M", d1s.columns))
cols2 = list(map (lambda x: x+"_F", d2s.columns))
dm.columns = (*cols1,*cols2)
# In[ ]:
painter = figureTSFromFrame(dm)
colOpts = {'dc_F': {"c":"r", "marker":"v"},
'dc_M': {"c":"b", "marker":"v"},
'rea_F': {"c":"r", "marker":"o", "linestyle":"--"},
'rea_M': {"c":"b", "marker":"o", "linestyle":"--"},
'rad_F': {"c":"k", "marker":"+"},
'rad_M': {"c":"y", "marker":"+"},
'hosp_M':{"c":"b"},
'HospConv_M':{'c':'c'},
'SSR_USLD_M' :{'c':'c',"marker":'p' },
'hosp_F':{'c':'r'},
'HospConv_F':{'c':'m'},
'SSR_USLD_F' :{'c':'m',"marker":'p'},
'autres_M':{'c':'c', "linestyle":":"},
'autres_F':{'c':'m', "linestyle":":"}
}
painter.doPlotBycol()
painter.setAttrs(colOpts = colOpts,
xlabel = f"Days since {painter.dt[0]}",
title="Whole France\ / Hospital\n Male / Female\n:Daily patient status (ICU,Hosp) / Accumulated (discharged, dead)",
legend=True )
display(meta_Hosp[[ "Colonne","Description_EN" ]])
ImgMgr.save_fig("FIG004")
# ### Now analyze hospital data according to age
# For now the data available in table `data_hospAge` covers a small number of days.... hopefully this may improve, either by more earlier data becoming available, or just by more data being collected day after day!
# In[ ]:
data_hosp_RegAge=data_hospAge.set_index(["reg","jour",'cl_age90'])
ddd= data_hosp_RegAge[ data_hosp_RegAge.index.get_level_values(2)!=0 ]
# We may have multiple entries for same day, this is an issue in the way
# this table is made up. For now, seems that best strategy is to sum!
# We keep track of previous strategy which was to arbitrarily select a value among duplicate indices,
# therefore the if True
if True:
dhRA = ddd.groupby(by=list(ddd.index.names)).sum().copy()
dhRAg = dhRA.unstack('cl_age90').groupby("jour").sum()
else:
# older strategy, kept for referral, parameter keep has several possible values
# remove duplicate entries, not performing selection between multiple values
duplic = ~ddd.duplicated(keep=False)
print( f"Number of duplicated lines: {duplic.sum()} {duplic.sum()/duplic.size*100:.2f}%")
dhRA = ddd[ duplic ].unstack('cl_age90')
dhRAg = dhRA.groupby("jour").sum()
# In[ ]:
ageClasses = sorted(set(dhRAg.columns.get_level_values(1)))
print(f"age classes = {ageClasses}")
levCat = sorted(set(dhRAg.columns.get_level_values(0)))
levAge = sorted(set(dhRAg.columns.get_level_values(1)))
subnodeSpec=(lambda i,j:{"nrows":i,"ncols":j})(*subPlotShape(len(levAge),maxCol=6))
print(f"nb age classes:{len(levAge)}\tsubnodeSpec:{subnodeSpec}")
if len(levAge) != len(ageClasses):
raise RuntimeError("Inconsistent values for number of age classes")
# In[ ]:
colOpts = {'dc': {"c":"b","marker":"v"},
'rea': {"c":"b","marker":"o", "linestyle":"--"},
'rad': {"c":"r", "marker":"+"},
'hosp':{"c":"k", "linestyle":"-"},
'HospConv':{"c":"c", "linestyle":"-"},
'SSR_USLD' :{"c":"g","linestyle":"-"},
'autres':{'c':'m'}
}
# In[ ]:
painter = figureTSFromFrame(None, subplots=subnodeSpec, figsize=(15,15))
for i in range(len(levAge)):
cat = ageClasses[i]
if cat < 90:
title = f"Age {cat-9}-{cat}"
else:
title = "Age 90+"
dfExtract = dhRAg.loc(axis=1)[:,cat]
# remove the now redundant information labeled 'cl_age90'
dfExtract.columns = dfExtract.columns.levels[0]
painter.doPlotBycol(dfExtract);
painter.setAttrs(colOpts = colOpts,
xlabel = f"Days since {painter.dt[0]}",
title = title,
legend = True )
painter.advancePlotIndex()
display(meta_Hosp[[ "Colonne","Description_EN" ]])
ImgMgr.save_fig("FIG005")
# ## Testing : Laboratory data
#
# This concerns testing (I have not found the meta data yet, but column labels are clear enough).
# The `data_dailyLab` data is split between age classes and departements.
# In[ ]:
todayStr = datetime.date.today().isoformat() # handle a data error that appeared on 5/5/2020
# Modif. August 2021:
# - we do not have the comfort of cat 0 summing all ages anymore
# - may be it would be better to divide per population.. this would give a different figure
# - should we rename columns with the old labels?
# In[ ]:
msk=d=data_dailyLab.loc[:,"jour"]<=todayStr #there is an error in the version of the data distrib 05/05/2020
dl=data_dailyLab.loc[msk,:]
dlGrA = dl.groupby('jour').sum()
#dlGr["cl_age90"]=0 #
dlGr = dlGrA.drop(columns=["cl_age90", "pop"])
# In[ ]:
dlGr.columns
# In[ ]:
def LaboRelabCols(tble):
corresp={
'P_f' : 'Pos_f', 'P_h': 'Pos_h', 'P':'Positive',
'T_f':'Tested_f', 'T_h':'Tested_h', 'T':'Tested'
}
rc = [corresp.get(x) for x in tble.columns]
tble.columns=rc
# In[ ]:
LaboRelabCols(dlGr)
# In[ ]:
painter = figureTSFromFrame(dlGr)
colOpts = {'Tested': {"c":"b", "marker":"*"},
'Positive': {"c":"r", "marker":"+"},
'Tested_h': {"c":"b","marker":"o", "linestyle":"--"},
'Tested_f': {"c":"g","marker":"o", "linestyle":"--"},
'Pos_h': {"c":"b", "marker":"+"},
'Pos_f': {"c":"g", "marker":"+"}
}
painter.doPlotBycol()
painter.setAttrs(colOpts = colOpts,
xlabel = f"Days since {painter.dt[0]}",
title="Whole France laboratory: tested, positive for male(h) and female(f)",
legend=True )
ImgMgr.save_fig("FIG006")
# Analyze laboratory data according to age
# In[ ]:
data_dailyLab.columns
dataDLab = data_dailyLab.loc[msk,:].copy()
# In[ ]:
dataDLab
# In[ ]:
dhRA
# In[ ]:
dataDLab=dataDLab.set_index(["jour",'cl_age90'])
# In[ ]:
dhRA = dataDLab.drop(columns=["pop"]).unstack('cl_age90')
dhRAg = dhRA.groupby("jour").sum()
# In[ ]:
ageClasses = sorted(set(dhRAg.columns.get_level_values(1)))
print(f"age classes = {ageClasses}")
levCat = sorted(set(dhRA.columns.get_level_values(0)))
levAge = sorted(set(dhRA.columns.get_level_values(1)))
subnodeSpec=(lambda i,j:{"nrows":i,"ncols":j})(*subPlotShape(len(levAge),maxCol=6))
print(f"nb age classes:{len(levAge)}\tsubnodeSpec:{subnodeSpec}")
if len(levAge) != len(ageClasses):
raise RuntimeError("Inconsistent values for number of age classes")
ageLabs=['All']+[f"{x-9}-{x}" for x in ageClasses[1:-1]]+["90+"]
# In[ ]:
colOpts = {'Tested': {"c":"b", "marker":"*"},
'Positive': {"c":"r", "marker":"+"},
'Tested_h': {"c":"b","marker":"o", "linestyle":"--"},
'Tested_f': {"c":"g","marker":"o", "linestyle":"--"},
'Pos_h': {"c":"b", "marker":"+"},
'Pos_f': {"c":"g", "marker":"+"}
}
# In[ ]:
painter = figureTSFromFrame(None, subplots=subnodeSpec, figsize=(15,15))
for i in range(len(ageClasses)):
cat = ageLabs[i]
ageSpec = ageClasses[i]
title = f"Labo Tests\nAge: {cat}"
dfExtract = dhRAg.loc(axis=1)[:,ageSpec]
# remove the not needed information since we selected by ageSpec
dfExtract.columns = [col[0] for col in dfExtract.columns]
LaboRelabCols(dfExtract)
painter.doPlotBycol(dfExtract);
painter.setAttrs(colOpts = colOpts,
xlabel = f"Days since {painter.dt[0]}",
title = title,
legend = True )
painter.advancePlotIndex()
ImgMgr.save_fig("FIG007")
# # Merge COVID and demographics data
# See the `Pop-Data-FromGouv.ipynb` notebook for more details on the demographics data obtained from
# INSEE (https://www.insee.fr/fr/accueil).
# Prepare the data for a database style join/merge, documented on https://pandas.pydata.org/pandas-docs/stable/user_guide/merging.html.
# First we need to establish "dep" as an index in hospital data:
# In[ ]:
hndDf = data_hospNouveau.copy()
hndDf.set_index("dep");
# Then we extract the demographic information and set index "dep"
# In[ ]:
depStats = inseeDep.iloc[:,[2,3,7,8]].copy()
cols = depStats.columns.values
cols[0]="dep"
depStats.columns = cols
depStats.set_index("dep");
# Now we perform the merge, and group by date and 'départements':
# In[ ]:
hndMerged = | PAN.merge(hndDf,depStats, on="dep" ) | pandas.merge |
from functools import partial
import json
import logging
import os
from pkg_resources import resource_filename, Requirement
import pandas as pd
from requests.exceptions import HTTPError
from solarforecastarbiter.io.fetch import arm
from solarforecastarbiter.io.reference_observations import (
common, default_forecasts)
DEFAULT_SITEFILE = resource_filename(
Requirement.parse('solarforecastarbiter'),
'solarforecastarbiter/io/reference_observations/'
'arm_reference_sites.json')
DOE_ARM_SITE_VARIABLES = {
'qcrad': arm.IRRAD_VARIABLES,
'met': arm.MET_VARIABLES,
}
DOE_ARM_VARIABLE_MAP = {
'down_short_hemisp': 'ghi',
'short_direct_normal': 'dni',
'down_short_diffuse_hemisp': 'dhi',
'temp_mean': 'air_temperature',
'rh_mean': 'relative_humidity',
'wspd_arith_mean': 'wind_speed',
}
logger = logging.getLogger('reference_data')
def _determine_stream_vars(datastream):
"""Returns a list of variables available based on datastream name.
Parameters
----------
datastream: str
Datastream name, or the product name. This string is searched for
`met` or `qcrad` and returns a list of expected variables.
Returns
-------
list of str
The variable names that can be found in the file.
"""
available = []
for stream_type, arm_vars in DOE_ARM_SITE_VARIABLES.items():
if stream_type in datastream:
available = available + arm_vars
return available
def initialize_site_observations(api, site):
"""Creates an observation at the site for each variable in
the matched DOE_ARM_VARIABLE_MAP.
Parameters
----------
api : solarforecastarbiter.io.api.APISession
An active Reference user session.
site : datamodel.Site
The site object for which to create Observations.
"""
try:
site_extra_params = common.decode_extra_parameters(site)
except ValueError:
logger.error(f'Failed to initialize observations for {site.name} '
'extra parameters could not be loaded.')
return
site_vars = site_variables_from_extra_params(site_extra_params)
for sfa_var in site_vars:
logger.info(f'Creating {sfa_var} at {site.name}')
try:
common.create_observation(
api, site, sfa_var)
except HTTPError as e:
logger.error(f'Could not create Observation for "{sfa_var}" '
f'at DOE ARM site {site.name}')
logger.debug(f'Error: {e.response.text}')
def initialize_site_forecasts(api, site):
"""
Create a forecast for each variable at the site.
Parameters
----------
api : solarforecastarbiter.io.api.APISession
An active Reference user session.
site : datamodel.Site
The site object for which to create Forecasts.
"""
try:
site_extra_params = common.decode_extra_parameters(site)
except ValueError:
logger.error('Failed to initialize reference forecasts for '
f'{site.name} extra parameters could not be loaded.')
return
site_vars = site_variables_from_extra_params(site_extra_params)
common.create_forecasts(api, site, site_vars,
default_forecasts.TEMPLATE_FORECASTS)
def fetch(api, site, start, end, *, doe_arm_user_id, doe_arm_api_key):
"""Retrieve observation data for a DOE ARM site between start and end.
Parameters
----------
api : io.APISession
Unused but conforms to common.update_site_observations call
site : datamodel.Site
Site object with the appropriate metadata.
start : datetime
The beginning of the period to request data for.
end : datetime
The end of the period to request data for.
doe_arm_user_id : str
User ID to access the DOE ARM api.
doe_arm_api_key : str
API key to access the DOE ARM api.
Returns
-------
data : pandas.DataFrame
All of the requested data concatenated into a single DataFrame.
"""
try:
site_extra_params = common.decode_extra_parameters(site)
except ValueError:
return pd.DataFrame()
available_datastreams = site_extra_params['datastreams']
datastreams = {}
# Build a dict with top-level keys to 'met' and 'qcrad' if meteorological
# or irradiance data exists at the site. This is to later group dataframes
# created from each datastream by the type of data found in the stream.
for ds_type in ['met', 'qcrad']:
if ds_type in available_datastreams:
ds_type_dict = {}
streams = available_datastreams[ds_type]
# When a dict is present each key is a datastream and value is
# a date range for which the datastream contains data. We need to
# determine which streams to use to get all of the requested data.
if isinstance(streams, dict):
ds_type_dict.update(
find_stream_data_availability(streams, start, end))
else:
# If a single string datastream name exists, we assume that all
# available data is contained in the stream. Deferring to the
# data fetch process, which will fail to retrieve data and
# continue gracefully.
ds_type_dict[streams] = (start, end)
datastreams[ds_type] = ds_type_dict
site_dfs = []
for stream_type in datastreams:
# Stitch together all the datastreams with similar data.
stream_type_dfs = []
for datastream, date_range in datastreams[stream_type].items():
stream_df = arm.fetch_arm(
doe_arm_user_id,
doe_arm_api_key,
datastream,
_determine_stream_vars(datastream),
date_range[0].tz_convert(site.timezone),
date_range[1].tz_convert(site.timezone)
)
if stream_df.empty:
logger.warning(f'Datastream {datastream} for site {site.name} '
f'contained no entries from {start} to {end}.')
else:
stream_type_dfs.append(stream_df)
if stream_type_dfs:
# Concatenate all dataframes of similar data
stream_type_df = pd.concat(stream_type_dfs)
site_dfs.append(stream_type_df)
if site_dfs:
# Join dataframes with different variables along the index, this has
# the side effect of introducing missing data if any requests have
# failed.
obs_df = pd.concat(site_dfs, axis=1)
obs_df = obs_df.rename(columns=DOE_ARM_VARIABLE_MAP)
return obs_df
else:
logger.warning(f'Data for site {site.name} contained no entries from '
f'{start} to {end}.')
return pd.DataFrame()
def update_observation_data(api, sites, observations, start, end):
"""Post new observation data to a list of DOE ARM Observations
from start to end.
api : solarforecastarbiter.io.api.APISession
An active Reference user session.
sites: list of solarforecastarbiter.datamodel.Site
List of all reference sites as Objects
observations: list of solarforecastarbiter.datamodel.Observation
List of all reference observations.
start : datetime
The beginning of the period to request data for.
end : datetime
The end of the period to request data for.
"""
doe_arm_api_key = os.getenv('DOE_ARM_API_KEY')
if doe_arm_api_key is None:
raise KeyError('"DOE_ARM_API_KEY" environment variable must be '
'set to update DOE ARM observation data.')
doe_arm_user_id = os.getenv('DOE_ARM_USER_ID')
if doe_arm_user_id is None:
raise KeyError('"DOE_ARM_USER_ID" environment variable must be '
'set to update DOE ARM observation data.')
doe_arm_sites = common.filter_by_networks(sites, 'DOE ARM')
for site in doe_arm_sites:
common.update_site_observations(
api, partial(fetch, doe_arm_user_id=doe_arm_user_id,
doe_arm_api_key=doe_arm_api_key),
site, observations, start, end)
def adjust_site_parameters(site):
"""Updates extra parameters with applicable datastreams from
`arm_reference_sites.json`
Parameters
----------
site: dict
Returns
-------
dict
Copy of input with updated extra parameters.
"""
with open(DEFAULT_SITEFILE) as fp:
sites_metadata = json.load(fp)['sites']
# ARM has multiple 'locations' at each 'site'. For example, the Southern
# Great Plains (SGP) ARM site has many locations throughout Oklahoma, and
# neighboring states. Each location is identified by a code, e.g. Byron
# Oklahoma is location `E11` at the SGP site, and it's data is accessed via
# datastreams with the pattern `sgp<product>e11.<data-level>` where product
# indicates the contents of the datastream (we are interested in `met` and
# `qcrad1long` products) and data-level indicates quality and any
# processing applied to the data. In the Solar Forecast Arbiter we store
# each 'location' as a SFA site. We use the `network_api_id` to indicate
# the ARM site's location code and `network_api_abbreviation` to represent
# ARM "site code (e.g. `sgp`). Both of these keys must be used to identify
# a site in the Solar Forecast Arbiter, because ARM's location ids are only
# unique for a given ARM site.
arm_location_id = site['extra_parameters']['network_api_id']
arm_site_id = site['extra_parameters']['network_api_abbreviation']
for site_metadata in sites_metadata:
site_extra_params = json.loads(site_metadata['extra_parameters'])
if (
site_extra_params['network_api_id'] == arm_location_id
and site_extra_params['network_api_abbreviation'] == arm_site_id
):
site_out = site.copy()
site_out['extra_parameters'] = site_extra_params
return site_out
return site
def find_stream_data_availability(streams, start, end):
"""Determines what date ranges to use for each datastream. Date ranges of
each stream must not overlap.
Parameters
----------
streams: dict
Dict where values are string datastream names and values are iso8601
date ranges `start/end` indicating the period of data available at
that datastream.
start: datetime
The start of the period to request data for.
end: datetime
The end of the period to request data for.
Returns
-------
dict
Dict where keys are datastreams and values are two element lists of
`[start datetime, end datetime]` that when considered together should
span all of the available data between the requested start and end.
Raises
------
ValueError
If streams in the dictionary have overlapping ranges.
"""
stream_range_dict = {}
streams_with_ranges = [(stream, parse_iso_date_range(date_range))
for stream, date_range in streams.items()]
streams_overlap = detect_stream_overlap(streams_with_ranges)
if streams_overlap:
raise ValueError(f'Overlapping datastreams found in {streams.keys()}.')
# Find the overlap between each streams available data, and the requested
# period
for datastream, stream_range in streams_with_ranges:
overlap = get_period_overlap(
start, end, stream_range[0], stream_range[1])
if overlap is None:
# The datastream did not contain any data within the requested
# range, we don't need to use it for this request.
continue
else:
stream_range_dict[datastream] = overlap
return stream_range_dict
def get_period_overlap(request_start, request_end, avail_start, avail_end):
"""Finds period of overlap between the requested time range and the
available period.
Parameters
----------
request_start: datetime-like
Start of the period of requested data.
request_end: datetime-like
End of the period of requested data.
avail_start: datetime-like
Start of the available data.
avail_end: datatime-like
End of available data.
Returns
-------
start, end: list of datetime or None
Start and end of overlapping period, or None if no overlap occurred.
"""
if request_start < avail_end and request_end > avail_start:
if request_start < avail_start:
start = avail_start
else:
start = request_start
if request_end > avail_end:
end = avail_end
else:
end = request_end
return [start, end]
else:
return None
def parse_iso_date_range(date_range_string):
"""Parses a date range string in iso8601 format ("start date/end date")
into a tuple of pandas timestamps `(start, end)`.
"""
start, end = date_range_string.split('/')
return ( | pd.Timestamp(start, tz='utc') | pandas.Timestamp |
from flask import Flask, render_template, request,session,redirect,url_for
from sqlalchemy import create_engine
from sqlalchemy import *
from sqlalchemy.ext.declarative import declarative_base, ConcreteBase
from sqlalchemy.orm import sessionmaker
from sqlalchemy.sql import *
from sqlalchemy import exc
from random import randint
import logging
import requests
import time
import os
import json,requests
import pickle
import json,requests
#imports the pickle file used for the predictions
pickle_in = open("dict.pickle","rb")
regression_models = pickle.load(pickle_in)
import dbinfo
import pandas as pd
import datetime
from datetime import timedelta,datetime
app = Flask(__name__)
# cant pass a variable in flask without a super secret key because lord fobid flask makes it easy, anyway this is just a placeholder, though true
app.secret_key = "Kilkenny is the best county, come at me bro"
DB_NAME = dbinfo.DB_DBIKES_USER
DB_PASS = dbinfo.DB_DBIKES_PASS
DB_HOST = dbinfo.DB_DBIKES
GM_KEY = dbinfo.GMAPS_KEY
Base = declarative_base()
def map_customer_query_data(obj):
"""
Function to return a dictionary mapped from obj.
Used for the weather table.
"""
return {
'FirstName' : obj['firstname'],
'LastName' : obj['lastname'],
'EmailAddress' : obj['emailAddress'],
'Country' : obj['country'],
'Subject' : obj['subject'],
'RecievedAt' : datetime.now()}
class CustomerQueries(Base):
"""
class constructor for the customer_query table.
"""
__tablename__ = "customer_query"
id = Column(Integer, primary_key=True)
FirstName = Column(String(128))
LastName = Column(String(128))
EmailAddress = Column(String(128))
Country = Column(String(128))
Subject = Column(String(128))
RecievedAt = Column(DateTime)
def __repr__(self):
"""
Prints the values instead of the memory pointer.
"""
return "<Node(Id='%s', Number='%s', Status='%s', AvailableBikeStands='%s', BikeStands='%s', LastUpdate='%s')>" \
% (self.Id, self.Number, self.Status, self.AvailableBikeStands, self.BikeStands, self.LastUpdate)
@app.route("/")
def home():
return render_template("index.html")
@app.route("/plan")
def plan():
return render_template("prediction.html")
@app.route("/about")
def about():
return render_template("about.html")
@app.route("/map")
def mapbikes():
return render_template("map.html", apiKey = GM_KEY)
@app.route("/contacts",methods=["GET","POST"])
def contacts():
if request.method == "POST":
engine = create_engine(f"mysql+mysqlconnector://{DB_NAME}:{DB_PASS}@{DB_HOST}/dbikes_main", echo=False)
CustomerQueries.__table__.create(bind=engine, checkfirst=True)
Session = sessionmaker(bind=engine)
session = Session()
req = request.form
session.add(CustomerQueries(**map_customer_query_data(request.form)))
session.commit()
return redirect(request.url)
return render_template("contacts.html")
@app.route("/stations")
def stations():
engine = create_engine(f"mysql+mysqlconnector://{DB_NAME}:{DB_PASS}@{DB_HOST}/dbikes_main", echo=False)
df_static = pd.read_sql_table("static_stations", engine)
df_live = | pd.read_sql_table("dynamic_stations_live", engine) | pandas.read_sql_table |
# -*- coding: utf-8 -*-
import pytest
import numpy as np
import pandas as pd
import pandas.util.testing as tm
import pandas.compat as compat
###############################################################
# Index / Series common tests which may trigger dtype coercions
###############################################################
class CoercionBase(object):
klasses = ['index', 'series']
dtypes = ['object', 'int64', 'float64', 'complex128', 'bool',
'datetime64', 'datetime64tz', 'timedelta64', 'period']
@property
def method(self):
raise NotImplementedError(self)
def _assert(self, left, right, dtype):
# explicitly check dtype to avoid any unexpected result
if isinstance(left, pd.Series):
tm.assert_series_equal(left, right)
elif isinstance(left, pd.Index):
tm.assert_index_equal(left, right)
else:
raise NotImplementedError
self.assertEqual(left.dtype, dtype)
self.assertEqual(right.dtype, dtype)
def test_has_comprehensive_tests(self):
for klass in self.klasses:
for dtype in self.dtypes:
method_name = 'test_{0}_{1}_{2}'.format(self.method,
klass, dtype)
if not hasattr(self, method_name):
msg = 'test method is not defined: {0}, {1}'
raise AssertionError(msg.format(type(self), method_name))
class TestSetitemCoercion(CoercionBase, tm.TestCase):
method = 'setitem'
def _assert_setitem_series_conversion(self, original_series, loc_value,
expected_series, expected_dtype):
""" test series value's coercion triggered by assignment """
temp = original_series.copy()
temp[1] = loc_value
tm.assert_series_equal(temp, expected_series)
# check dtype explicitly for sure
self.assertEqual(temp.dtype, expected_dtype)
# .loc works different rule, temporary disable
# temp = original_series.copy()
# temp.loc[1] = loc_value
# tm.assert_series_equal(temp, expected_series)
def test_setitem_series_object(self):
obj = pd.Series(list('abcd'))
self.assertEqual(obj.dtype, np.object)
# object + int -> object
exp = pd.Series(['a', 1, 'c', 'd'])
self._assert_setitem_series_conversion(obj, 1, exp, np.object)
# object + float -> object
exp = pd.Series(['a', 1.1, 'c', 'd'])
self._assert_setitem_series_conversion(obj, 1.1, exp, np.object)
# object + complex -> object
exp = pd.Series(['a', 1 + 1j, 'c', 'd'])
self._assert_setitem_series_conversion(obj, 1 + 1j, exp, np.object)
# object + bool -> object
exp = pd.Series(['a', True, 'c', 'd'])
self._assert_setitem_series_conversion(obj, True, exp, np.object)
def test_setitem_series_int64(self):
obj = pd.Series([1, 2, 3, 4])
self.assertEqual(obj.dtype, np.int64)
# int + int -> int
exp = pd.Series([1, 1, 3, 4])
self._assert_setitem_series_conversion(obj, 1, exp, np.int64)
# int + float -> float
# TODO_GH12747 The result must be float
# tm.assert_series_equal(temp, pd.Series([1, 1.1, 3, 4]))
# self.assertEqual(temp.dtype, np.float64)
exp = pd.Series([1, 1, 3, 4])
self._assert_setitem_series_conversion(obj, 1.1, exp, np.int64)
# int + complex -> complex
exp = pd.Series([1, 1 + 1j, 3, 4])
self._assert_setitem_series_conversion(obj, 1 + 1j, exp, np.complex128)
# int + bool -> int
exp = pd.Series([1, 1, 3, 4])
self._assert_setitem_series_conversion(obj, True, exp, np.int64)
def test_setitem_series_float64(self):
obj = pd.Series([1.1, 2.2, 3.3, 4.4])
self.assertEqual(obj.dtype, np.float64)
# float + int -> float
exp = pd.Series([1.1, 1.0, 3.3, 4.4])
self._assert_setitem_series_conversion(obj, 1, exp, np.float64)
# float + float -> float
exp = pd.Series([1.1, 1.1, 3.3, 4.4])
self._assert_setitem_series_conversion(obj, 1.1, exp, np.float64)
# float + complex -> complex
exp = pd.Series([1.1, 1 + 1j, 3.3, 4.4])
self._assert_setitem_series_conversion(obj, 1 + 1j, exp,
np.complex128)
# float + bool -> float
exp = pd.Series([1.1, 1.0, 3.3, 4.4])
self._assert_setitem_series_conversion(obj, True, exp, np.float64)
def test_setitem_series_complex128(self):
obj = pd.Series([1 + 1j, 2 + 2j, 3 + 3j, 4 + 4j])
self.assertEqual(obj.dtype, np.complex128)
# complex + int -> complex
exp = pd.Series([1 + 1j, 1, 3 + 3j, 4 + 4j])
self._assert_setitem_series_conversion(obj, True, exp, np.complex128)
# complex + float -> complex
exp = pd.Series([1 + 1j, 1.1, 3 + 3j, 4 + 4j])
self._assert_setitem_series_conversion(obj, 1.1, exp, np.complex128)
# complex + complex -> complex
exp = pd.Series([1 + 1j, 1 + 1j, 3 + 3j, 4 + 4j])
self._assert_setitem_series_conversion(obj, 1 + 1j, exp, np.complex128)
# complex + bool -> complex
exp = pd.Series([1 + 1j, 1, 3 + 3j, 4 + 4j])
self._assert_setitem_series_conversion(obj, True, exp, np.complex128)
def test_setitem_series_bool(self):
obj = pd.Series([True, False, True, False])
self.assertEqual(obj.dtype, np.bool)
# bool + int -> int
# TODO_GH12747 The result must be int
# tm.assert_series_equal(temp, pd.Series([1, 1, 1, 0]))
# self.assertEqual(temp.dtype, np.int64)
exp = pd.Series([True, True, True, False])
self._assert_setitem_series_conversion(obj, 1, exp, np.bool)
# TODO_GH12747 The result must be int
# assigning int greater than bool
# tm.assert_series_equal(temp, pd.Series([1, 3, 1, 0]))
# self.assertEqual(temp.dtype, np.int64)
exp = pd.Series([True, True, True, False])
self._assert_setitem_series_conversion(obj, 3, exp, np.bool)
# bool + float -> float
# TODO_GH12747 The result must be float
# tm.assert_series_equal(temp, pd.Series([1., 1.1, 1., 0.]))
# self.assertEqual(temp.dtype, np.float64)
exp = pd.Series([True, True, True, False])
self._assert_setitem_series_conversion(obj, 1.1, exp, np.bool)
# bool + complex -> complex (buggy, results in bool)
# TODO_GH12747 The result must be complex
# tm.assert_series_equal(temp, pd.Series([1, 1 + 1j, 1, 0]))
# self.assertEqual(temp.dtype, np.complex128)
exp = pd.Series([True, True, True, False])
self._assert_setitem_series_conversion(obj, 1 + 1j, exp, np.bool)
# bool + bool -> bool
exp = pd.Series([True, True, True, False])
self._assert_setitem_series_conversion(obj, True, exp, np.bool)
def test_setitem_series_datetime64(self):
obj = pd.Series([pd.Timestamp('2011-01-01'),
pd.Timestamp('2011-01-02'),
pd.Timestamp('2011-01-03'),
pd.Timestamp('2011-01-04')])
self.assertEqual(obj.dtype, 'datetime64[ns]')
# datetime64 + datetime64 -> datetime64
exp = pd.Series([pd.Timestamp('2011-01-01'),
pd.Timestamp('2012-01-01'),
pd.Timestamp('2011-01-03'),
pd.Timestamp('2011-01-04')])
self._assert_setitem_series_conversion(obj, pd.Timestamp('2012-01-01'),
exp, 'datetime64[ns]')
# datetime64 + int -> object
# ToDo: The result must be object
exp = pd.Series([pd.Timestamp('2011-01-01'),
pd.Timestamp(1),
pd.Timestamp('2011-01-03'),
pd.Timestamp('2011-01-04')])
self._assert_setitem_series_conversion(obj, 1, exp, 'datetime64[ns]')
# ToDo: add more tests once the above issue has been fixed
def test_setitem_series_datetime64tz(self):
tz = 'US/Eastern'
obj = pd.Series([pd.Timestamp('2011-01-01', tz=tz),
pd.Timestamp('2011-01-02', tz=tz),
pd.Timestamp('2011-01-03', tz=tz),
pd.Timestamp('2011-01-04', tz=tz)])
self.assertEqual(obj.dtype, 'datetime64[ns, US/Eastern]')
# datetime64tz + datetime64tz -> datetime64tz
exp = pd.Series([pd.Timestamp('2011-01-01', tz=tz),
pd.Timestamp('2012-01-01', tz=tz),
pd.Timestamp('2011-01-03', tz=tz),
pd.Timestamp('2011-01-04', tz=tz)])
value = pd.Timestamp('2012-01-01', tz=tz)
self._assert_setitem_series_conversion(obj, value, exp,
'datetime64[ns, US/Eastern]')
# datetime64 + int -> object
# ToDo: The result must be object
exp = pd.Series([pd.Timestamp('2011-01-01', tz=tz),
pd.Timestamp(1, tz=tz),
pd.Timestamp('2011-01-03', tz=tz),
pd.Timestamp('2011-01-04', tz=tz)])
self._assert_setitem_series_conversion(obj, 1, exp,
'datetime64[ns, US/Eastern]')
# ToDo: add more tests once the above issue has been fixed
def test_setitem_series_timedelta64(self):
pass
def test_setitem_series_period(self):
pass
def _assert_setitem_index_conversion(self, original_series, loc_key,
expected_index, expected_dtype):
""" test index's coercion triggered by assign key """
temp = original_series.copy()
temp[loc_key] = 5
exp = pd.Series([1, 2, 3, 4, 5], index=expected_index)
tm.assert_series_equal(temp, exp)
# check dtype explicitly for sure
self.assertEqual(temp.index.dtype, expected_dtype)
temp = original_series.copy()
temp.loc[loc_key] = 5
exp = pd.Series([1, 2, 3, 4, 5], index=expected_index)
tm.assert_series_equal(temp, exp)
# check dtype explicitly for sure
self.assertEqual(temp.index.dtype, expected_dtype)
def test_setitem_index_object(self):
obj = pd.Series([1, 2, 3, 4], index=list('abcd'))
self.assertEqual(obj.index.dtype, np.object)
# object + object -> object
exp_index = pd.Index(list('abcdx'))
self._assert_setitem_index_conversion(obj, 'x', exp_index, np.object)
# object + int -> IndexError, regarded as location
temp = obj.copy()
with tm.assertRaises(IndexError):
temp[5] = 5
# object + float -> object
exp_index = pd.Index(['a', 'b', 'c', 'd', 1.1])
self._assert_setitem_index_conversion(obj, 1.1, exp_index, np.object)
def test_setitem_index_int64(self):
# tests setitem with non-existing numeric key
obj = pd.Series([1, 2, 3, 4])
self.assertEqual(obj.index.dtype, np.int64)
# int + int -> int
exp_index = pd.Index([0, 1, 2, 3, 5])
self._assert_setitem_index_conversion(obj, 5, exp_index, np.int64)
# int + float -> float
exp_index = pd.Index([0, 1, 2, 3, 1.1])
self._assert_setitem_index_conversion(obj, 1.1, exp_index, np.float64)
# int + object -> object
exp_index = pd.Index([0, 1, 2, 3, 'x'])
self._assert_setitem_index_conversion(obj, 'x', exp_index, np.object)
def test_setitem_index_float64(self):
# tests setitem with non-existing numeric key
obj = pd.Series([1, 2, 3, 4], index=[1.1, 2.1, 3.1, 4.1])
self.assertEqual(obj.index.dtype, np.float64)
# float + int -> int
temp = obj.copy()
# TODO_GH12747 The result must be float
with tm.assertRaises(IndexError):
temp[5] = 5
# float + float -> float
exp_index = pd.Index([1.1, 2.1, 3.1, 4.1, 5.1])
self._assert_setitem_index_conversion(obj, 5.1, exp_index, np.float64)
# float + object -> object
exp_index = pd.Index([1.1, 2.1, 3.1, 4.1, 'x'])
self._assert_setitem_index_conversion(obj, 'x', exp_index, np.object)
def test_setitem_index_complex128(self):
pass
def test_setitem_index_bool(self):
pass
def test_setitem_index_datetime64(self):
pass
def test_setitem_index_datetime64tz(self):
pass
def test_setitem_index_timedelta64(self):
pass
def test_setitem_index_period(self):
pass
class TestInsertIndexCoercion(CoercionBase, tm.TestCase):
klasses = ['index']
method = 'insert'
def _assert_insert_conversion(self, original, value,
expected, expected_dtype):
""" test coercion triggered by insert """
target = original.copy()
res = target.insert(1, value)
tm.assert_index_equal(res, expected)
self.assertEqual(res.dtype, expected_dtype)
def test_insert_index_object(self):
obj = pd.Index(list('abcd'))
self.assertEqual(obj.dtype, np.object)
# object + int -> object
exp = pd.Index(['a', 1, 'b', 'c', 'd'])
self._assert_insert_conversion(obj, 1, exp, np.object)
# object + float -> object
exp = pd.Index(['a', 1.1, 'b', 'c', 'd'])
self._assert_insert_conversion(obj, 1.1, exp, np.object)
# object + bool -> object
res = obj.insert(1, False)
tm.assert_index_equal(res, pd.Index(['a', False, 'b', 'c', 'd']))
self.assertEqual(res.dtype, np.object)
# object + object -> object
exp = pd.Index(['a', 'x', 'b', 'c', 'd'])
self._assert_insert_conversion(obj, 'x', exp, np.object)
def test_insert_index_int64(self):
obj = pd.Int64Index([1, 2, 3, 4])
self.assertEqual(obj.dtype, np.int64)
# int + int -> int
exp = pd.Index([1, 1, 2, 3, 4])
self._assert_insert_conversion(obj, 1, exp, np.int64)
# int + float -> float
exp = pd.Index([1, 1.1, 2, 3, 4])
self._assert_insert_conversion(obj, 1.1, exp, np.float64)
# int + bool -> int
exp = pd.Index([1, 0, 2, 3, 4])
self._assert_insert_conversion(obj, False, exp, np.int64)
# int + object -> object
exp = pd.Index([1, 'x', 2, 3, 4])
self._assert_insert_conversion(obj, 'x', exp, np.object)
def test_insert_index_float64(self):
obj = pd.Float64Index([1., 2., 3., 4.])
self.assertEqual(obj.dtype, np.float64)
# float + int -> int
exp = pd.Index([1., 1., 2., 3., 4.])
self._assert_insert_conversion(obj, 1, exp, np.float64)
# float + float -> float
exp = pd.Index([1., 1.1, 2., 3., 4.])
self._assert_insert_conversion(obj, 1.1, exp, np.float64)
# float + bool -> float
exp = pd.Index([1., 0., 2., 3., 4.])
self._assert_insert_conversion(obj, False, exp, np.float64)
# float + object -> object
exp = pd.Index([1., 'x', 2., 3., 4.])
self._assert_insert_conversion(obj, 'x', exp, np.object)
def test_insert_index_complex128(self):
pass
def test_insert_index_bool(self):
pass
def test_insert_index_datetime64(self):
obj = pd.DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03',
'2011-01-04'])
self.assertEqual(obj.dtype, 'datetime64[ns]')
# datetime64 + datetime64 => datetime64
exp = pd.DatetimeIndex(['2011-01-01', '2012-01-01', '2011-01-02',
'2011-01-03', '2011-01-04'])
self._assert_insert_conversion(obj, pd.Timestamp('2012-01-01'),
exp, 'datetime64[ns]')
# ToDo: must coerce to object
msg = "Passed item and index have different timezone"
with tm.assertRaisesRegexp(ValueError, msg):
obj.insert(1, pd.Timestamp('2012-01-01', tz='US/Eastern'))
# ToDo: must coerce to object
msg = "cannot insert DatetimeIndex with incompatible label"
with tm.assertRaisesRegexp(TypeError, msg):
obj.insert(1, 1)
def test_insert_index_datetime64tz(self):
obj = pd.DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03',
'2011-01-04'], tz='US/Eastern')
self.assertEqual(obj.dtype, 'datetime64[ns, US/Eastern]')
# datetime64tz + datetime64tz => datetime64
exp = pd.DatetimeIndex(['2011-01-01', '2012-01-01', '2011-01-02',
'2011-01-03', '2011-01-04'], tz='US/Eastern')
val = pd.Timestamp('2012-01-01', tz='US/Eastern')
self._assert_insert_conversion(obj, val, exp,
'datetime64[ns, US/Eastern]')
# ToDo: must coerce to object
msg = "Passed item and index have different timezone"
with tm.assertRaisesRegexp(ValueError, msg):
obj.insert(1, pd.Timestamp('2012-01-01'))
# ToDo: must coerce to object
msg = "Passed item and index have different timezone"
with tm.assertRaisesRegexp(ValueError, msg):
obj.insert(1, pd.Timestamp('2012-01-01', tz='Asia/Tokyo'))
# ToDo: must coerce to object
msg = "cannot insert DatetimeIndex with incompatible label"
with tm.assertRaisesRegexp(TypeError, msg):
obj.insert(1, 1)
def test_insert_index_timedelta64(self):
obj = pd.TimedeltaIndex(['1 day', '2 day', '3 day', '4 day'])
self.assertEqual(obj.dtype, 'timedelta64[ns]')
# timedelta64 + timedelta64 => timedelta64
exp = pd.TimedeltaIndex(['1 day', '10 day', '2 day', '3 day', '4 day'])
self._assert_insert_conversion(obj, pd.Timedelta('10 day'),
exp, 'timedelta64[ns]')
# ToDo: must coerce to object
msg = "cannot insert TimedeltaIndex with incompatible label"
with tm.assertRaisesRegexp(TypeError, msg):
obj.insert(1, pd.Timestamp('2012-01-01'))
# ToDo: must coerce to object
msg = "cannot insert TimedeltaIndex with incompatible label"
with tm.assertRaisesRegexp(TypeError, msg):
obj.insert(1, 1)
def test_insert_index_period(self):
obj = pd.PeriodIndex(['2011-01', '2011-02', '2011-03', '2011-04'],
freq='M')
self.assertEqual(obj.dtype, 'period[M]')
# period + period => period
exp = pd.PeriodIndex(['2011-01', '2012-01', '2011-02',
'2011-03', '2011-04'], freq='M')
self._assert_insert_conversion(obj, pd.Period('2012-01', freq='M'),
exp, 'period[M]')
# period + datetime64 => object
exp = pd.Index([pd.Period('2011-01', freq='M'),
pd.Timestamp('2012-01-01'),
pd.Period('2011-02', freq='M'),
pd.Period('2011-03', freq='M'),
pd.Period('2011-04', freq='M')], freq='M')
self._assert_insert_conversion(obj, pd.Timestamp('2012-01-01'),
exp, np.object)
# period + int => object
exp = pd.Index([pd.Period('2011-01', freq='M'),
1,
pd.Period('2011-02', freq='M'),
pd.Period('2011-03', freq='M'),
pd.Period('2011-04', freq='M')], freq='M')
self._assert_insert_conversion(obj, 1, exp, np.object)
# period + object => object
exp = pd.Index([pd.Period('2011-01', freq='M'),
'x',
pd.Period('2011-02', freq='M'),
pd.Period('2011-03', freq='M'),
pd.Period('2011-04', freq='M')], freq='M')
self._assert_insert_conversion(obj, 'x', exp, np.object)
class TestWhereCoercion(CoercionBase, tm.TestCase):
method = 'where'
def _assert_where_conversion(self, original, cond, values,
expected, expected_dtype):
""" test coercion triggered by where """
target = original.copy()
res = target.where(cond, values)
self._assert(res, expected, expected_dtype)
def _where_object_common(self, klass):
obj = klass(list('abcd'))
self.assertEqual(obj.dtype, np.object)
cond = klass([True, False, True, False])
# object + int -> object
exp = klass(['a', 1, 'c', 1])
self._assert_where_conversion(obj, cond, 1, exp, np.object)
values = klass([5, 6, 7, 8])
exp = klass(['a', 6, 'c', 8])
self._assert_where_conversion(obj, cond, values, exp, np.object)
# object + float -> object
exp = klass(['a', 1.1, 'c', 1.1])
self._assert_where_conversion(obj, cond, 1.1, exp, np.object)
values = klass([5.5, 6.6, 7.7, 8.8])
exp = klass(['a', 6.6, 'c', 8.8])
self._assert_where_conversion(obj, cond, values, exp, np.object)
# object + complex -> object
exp = klass(['a', 1 + 1j, 'c', 1 + 1j])
self._assert_where_conversion(obj, cond, 1 + 1j, exp, np.object)
values = klass([5 + 5j, 6 + 6j, 7 + 7j, 8 + 8j])
exp = klass(['a', 6 + 6j, 'c', 8 + 8j])
self._assert_where_conversion(obj, cond, values, exp, np.object)
if klass is pd.Series:
exp = klass(['a', 1, 'c', 1])
self._assert_where_conversion(obj, cond, True, exp, np.object)
values = klass([True, False, True, True])
exp = klass(['a', 0, 'c', 1])
self._assert_where_conversion(obj, cond, values, exp, np.object)
elif klass is pd.Index:
# object + bool -> object
exp = klass(['a', True, 'c', True])
self._assert_where_conversion(obj, cond, True, exp, np.object)
values = klass([True, False, True, True])
exp = klass(['a', False, 'c', True])
self._assert_where_conversion(obj, cond, values, exp, np.object)
else:
NotImplementedError
def test_where_series_object(self):
self._where_object_common(pd.Series)
def test_where_index_object(self):
self._where_object_common(pd.Index)
def _where_int64_common(self, klass):
obj = klass([1, 2, 3, 4])
self.assertEqual(obj.dtype, np.int64)
cond = klass([True, False, True, False])
# int + int -> int
exp = klass([1, 1, 3, 1])
self._assert_where_conversion(obj, cond, 1, exp, np.int64)
values = klass([5, 6, 7, 8])
exp = klass([1, 6, 3, 8])
self._assert_where_conversion(obj, cond, values, exp, np.int64)
# int + float -> float
exp = klass([1, 1.1, 3, 1.1])
self._assert_where_conversion(obj, cond, 1.1, exp, np.float64)
values = klass([5.5, 6.6, 7.7, 8.8])
exp = klass([1, 6.6, 3, 8.8])
self._assert_where_conversion(obj, cond, values, exp, np.float64)
# int + complex -> complex
if klass is pd.Series:
exp = klass([1, 1 + 1j, 3, 1 + 1j])
self._assert_where_conversion(obj, cond, 1 + 1j, exp,
np.complex128)
values = klass([5 + 5j, 6 + 6j, 7 + 7j, 8 + 8j])
exp = klass([1, 6 + 6j, 3, 8 + 8j])
self._assert_where_conversion(obj, cond, values, exp,
np.complex128)
# int + bool -> int
exp = klass([1, 1, 3, 1])
self._assert_where_conversion(obj, cond, True, exp, np.int64)
values = klass([True, False, True, True])
exp = klass([1, 0, 3, 1])
self._assert_where_conversion(obj, cond, values, exp, np.int64)
def test_where_series_int64(self):
self._where_int64_common(pd.Series)
def test_where_index_int64(self):
self._where_int64_common(pd.Index)
def _where_float64_common(self, klass):
obj = klass([1.1, 2.2, 3.3, 4.4])
self.assertEqual(obj.dtype, np.float64)
cond = klass([True, False, True, False])
# float + int -> float
exp = klass([1.1, 1.0, 3.3, 1.0])
self._assert_where_conversion(obj, cond, 1, exp, np.float64)
values = klass([5, 6, 7, 8])
exp = klass([1.1, 6.0, 3.3, 8.0])
self._assert_where_conversion(obj, cond, values, exp, np.float64)
# float + float -> float
exp = klass([1.1, 1.1, 3.3, 1.1])
self._assert_where_conversion(obj, cond, 1.1, exp, np.float64)
values = klass([5.5, 6.6, 7.7, 8.8])
exp = klass([1.1, 6.6, 3.3, 8.8])
self._assert_where_conversion(obj, cond, values, exp, np.float64)
# float + complex -> complex
if klass is pd.Series:
exp = klass([1.1, 1 + 1j, 3.3, 1 + 1j])
self._assert_where_conversion(obj, cond, 1 + 1j, exp,
np.complex128)
values = klass([5 + 5j, 6 + 6j, 7 + 7j, 8 + 8j])
exp = klass([1.1, 6 + 6j, 3.3, 8 + 8j])
self._assert_where_conversion(obj, cond, values, exp,
np.complex128)
# float + bool -> float
exp = klass([1.1, 1.0, 3.3, 1.0])
self._assert_where_conversion(obj, cond, True, exp, np.float64)
values = klass([True, False, True, True])
exp = klass([1.1, 0.0, 3.3, 1.0])
self._assert_where_conversion(obj, cond, values, exp, np.float64)
def test_where_series_float64(self):
self._where_float64_common(pd.Series)
def test_where_index_float64(self):
self._where_float64_common(pd.Index)
def test_where_series_complex128(self):
obj = pd.Series([1 + 1j, 2 + 2j, 3 + 3j, 4 + 4j])
self.assertEqual(obj.dtype, np.complex128)
cond = pd.Series([True, False, True, False])
# complex + int -> complex
exp = pd.Series([1 + 1j, 1, 3 + 3j, 1])
self._assert_where_conversion(obj, cond, 1, exp, np.complex128)
values = pd.Series([5, 6, 7, 8])
exp = pd.Series([1 + 1j, 6.0, 3 + 3j, 8.0])
self._assert_where_conversion(obj, cond, values, exp, np.complex128)
# complex + float -> complex
exp = pd.Series([1 + 1j, 1.1, 3 + 3j, 1.1])
self._assert_where_conversion(obj, cond, 1.1, exp, np.complex128)
values = pd.Series([5.5, 6.6, 7.7, 8.8])
exp = pd.Series([1 + 1j, 6.6, 3 + 3j, 8.8])
self._assert_where_conversion(obj, cond, values, exp, np.complex128)
# complex + complex -> complex
exp = pd.Series([1 + 1j, 1 + 1j, 3 + 3j, 1 + 1j])
self._assert_where_conversion(obj, cond, 1 + 1j, exp, np.complex128)
values = pd.Series([5 + 5j, 6 + 6j, 7 + 7j, 8 + 8j])
exp = pd.Series([1 + 1j, 6 + 6j, 3 + 3j, 8 + 8j])
self._assert_where_conversion(obj, cond, values, exp, np.complex128)
# complex + bool -> complex
exp = pd.Series([1 + 1j, 1, 3 + 3j, 1])
self._assert_where_conversion(obj, cond, True, exp, np.complex128)
values = pd.Series([True, False, True, True])
exp = pd.Series([1 + 1j, 0, 3 + 3j, 1])
self._assert_where_conversion(obj, cond, values, exp, np.complex128)
def test_where_index_complex128(self):
pass
def test_where_series_bool(self):
obj = pd.Series([True, False, True, False])
self.assertEqual(obj.dtype, np.bool)
cond = pd.Series([True, False, True, False])
# bool + int -> int
exp = pd.Series([1, 1, 1, 1])
self._assert_where_conversion(obj, cond, 1, exp, np.int64)
values = pd.Series([5, 6, 7, 8])
exp = pd.Series([1, 6, 1, 8])
self._assert_where_conversion(obj, cond, values, exp, np.int64)
# bool + float -> float
exp = pd.Series([1.0, 1.1, 1.0, 1.1])
self._assert_where_conversion(obj, cond, 1.1, exp, np.float64)
values = pd.Series([5.5, 6.6, 7.7, 8.8])
exp = pd.Series([1.0, 6.6, 1.0, 8.8])
self._assert_where_conversion(obj, cond, values, exp, np.float64)
# bool + complex -> complex
exp = pd.Series([1, 1 + 1j, 1, 1 + 1j])
self._assert_where_conversion(obj, cond, 1 + 1j, exp, np.complex128)
values = pd.Series([5 + 5j, 6 + 6j, 7 + 7j, 8 + 8j])
exp = pd.Series([1, 6 + 6j, 1, 8 + 8j])
self._assert_where_conversion(obj, cond, values, exp, np.complex128)
# bool + bool -> bool
exp = pd.Series([True, True, True, True])
self._assert_where_conversion(obj, cond, True, exp, np.bool)
values = pd.Series([True, False, True, True])
exp = pd.Series([True, False, True, True])
self._assert_where_conversion(obj, cond, values, exp, np.bool)
def test_where_index_bool(self):
pass
def test_where_series_datetime64(self):
obj = pd.Series([pd.Timestamp('2011-01-01'),
pd.Timestamp('2011-01-02'),
pd.Timestamp('2011-01-03'),
pd.Timestamp('2011-01-04')])
self.assertEqual(obj.dtype, 'datetime64[ns]')
cond = pd.Series([True, False, True, False])
# datetime64 + datetime64 -> datetime64
exp = pd.Series([pd.Timestamp('2011-01-01'),
pd.Timestamp('2012-01-01'),
pd.Timestamp('2011-01-03'),
pd.Timestamp('2012-01-01')])
self._assert_where_conversion(obj, cond, pd.Timestamp('2012-01-01'),
exp, 'datetime64[ns]')
values = pd.Series([pd.Timestamp('2012-01-01'),
pd.Timestamp('2012-01-02'),
pd.Timestamp('2012-01-03'),
pd.Timestamp('2012-01-04')])
exp = pd.Series([pd.Timestamp('2011-01-01'),
pd.Timestamp('2012-01-02'),
pd.Timestamp('2011-01-03'),
pd.Timestamp('2012-01-04')])
self._assert_where_conversion(obj, cond, values, exp, 'datetime64[ns]')
# ToDo: coerce to object
msg = "cannot coerce a Timestamp with a tz on a naive Block"
with tm.assertRaisesRegexp(TypeError, msg):
obj.where(cond, pd.Timestamp('2012-01-01', tz='US/Eastern'))
# ToDo: do not coerce to UTC, must be object
values = pd.Series([pd.Timestamp('2012-01-01', tz='US/Eastern'),
pd.Timestamp('2012-01-02', tz='US/Eastern'),
pd.Timestamp('2012-01-03', tz='US/Eastern'),
pd.Timestamp('2012-01-04', tz='US/Eastern')])
exp = pd.Series([pd.Timestamp('2011-01-01'),
pd.Timestamp('2012-01-02 05:00'),
pd.Timestamp('2011-01-03'),
pd.Timestamp('2012-01-04 05:00')])
self._assert_where_conversion(obj, cond, values, exp, 'datetime64[ns]')
def test_where_index_datetime64(self):
obj = pd.Index([pd.Timestamp('2011-01-01'),
pd.Timestamp('2011-01-02'),
pd.Timestamp('2011-01-03'),
pd.Timestamp('2011-01-04')])
self.assertEqual(obj.dtype, 'datetime64[ns]')
cond = pd.Index([True, False, True, False])
# datetime64 + datetime64 -> datetime64
# must support scalar
msg = "cannot coerce a Timestamp with a tz on a naive Block"
with tm.assertRaises(TypeError):
obj.where(cond, pd.Timestamp('2012-01-01'))
values = pd.Index([pd.Timestamp('2012-01-01'),
pd.Timestamp('2012-01-02'),
pd.Timestamp('2012-01-03'),
pd.Timestamp('2012-01-04')])
exp = pd.Index([pd.Timestamp('2011-01-01'),
pd.Timestamp('2012-01-02'),
pd.Timestamp('2011-01-03'),
pd.Timestamp('2012-01-04')])
self._assert_where_conversion(obj, cond, values, exp, 'datetime64[ns]')
# ToDo: coerce to object
msg = ("Index\\(\\.\\.\\.\\) must be called with a collection "
"of some kind")
with tm.assertRaisesRegexp(TypeError, msg):
obj.where(cond, pd.Timestamp('2012-01-01', tz='US/Eastern'))
# ToDo: do not ignore timezone, must be object
values = pd.Index([pd.Timestamp('2012-01-01', tz='US/Eastern'),
pd.Timestamp('2012-01-02', tz='US/Eastern'),
pd.Timestamp('2012-01-03', tz='US/Eastern'),
pd.Timestamp('2012-01-04', tz='US/Eastern')])
exp = pd.Index([pd.Timestamp('2011-01-01'),
pd.Timestamp('2012-01-02'),
pd.Timestamp('2011-01-03'),
pd.Timestamp('2012-01-04')])
self._assert_where_conversion(obj, cond, values, exp, 'datetime64[ns]')
def test_where_series_datetime64tz(self):
pass
def test_where_series_timedelta64(self):
pass
def test_where_series_period(self):
pass
def test_where_index_datetime64tz(self):
pass
def test_where_index_timedelta64(self):
pass
def test_where_index_period(self):
pass
class TestFillnaSeriesCoercion(CoercionBase, tm.TestCase):
# not indexing, but place here for consisntency
method = 'fillna'
def _assert_fillna_conversion(self, original, value,
expected, expected_dtype):
""" test coercion triggered by fillna """
target = original.copy()
res = target.fillna(value)
self._assert(res, expected, expected_dtype)
def _fillna_object_common(self, klass):
obj = klass(['a', np.nan, 'c', 'd'])
self.assertEqual(obj.dtype, np.object)
# object + int -> object
exp = klass(['a', 1, 'c', 'd'])
self._assert_fillna_conversion(obj, 1, exp, np.object)
# object + float -> object
exp = klass(['a', 1.1, 'c', 'd'])
self._assert_fillna_conversion(obj, 1.1, exp, np.object)
# object + complex -> object
exp = klass(['a', 1 + 1j, 'c', 'd'])
self._assert_fillna_conversion(obj, 1 + 1j, exp, np.object)
# object + bool -> object
exp = klass(['a', True, 'c', 'd'])
self._assert_fillna_conversion(obj, True, exp, np.object)
def test_fillna_series_object(self):
self._fillna_object_common(pd.Series)
def test_fillna_index_object(self):
self._fillna_object_common(pd.Index)
def test_fillna_series_int64(self):
# int can't hold NaN
pass
def test_fillna_index_int64(self):
pass
def _fillna_float64_common(self, klass):
obj = klass([1.1, np.nan, 3.3, 4.4])
self.assertEqual(obj.dtype, np.float64)
# float + int -> float
exp = klass([1.1, 1.0, 3.3, 4.4])
self._assert_fillna_conversion(obj, 1, exp, np.float64)
# float + float -> float
exp = klass([1.1, 1.1, 3.3, 4.4])
self._assert_fillna_conversion(obj, 1.1, exp, np.float64)
if klass is pd.Series:
# float + complex -> complex
exp = klass([1.1, 1 + 1j, 3.3, 4.4])
self._assert_fillna_conversion(obj, 1 + 1j, exp, np.complex128)
elif klass is pd.Index:
# float + complex -> object
exp = klass([1.1, 1 + 1j, 3.3, 4.4])
self._assert_fillna_conversion(obj, 1 + 1j, exp, np.object)
else:
NotImplementedError
# float + bool -> float
exp = klass([1.1, 1.0, 3.3, 4.4])
self._assert_fillna_conversion(obj, True, exp, np.float64)
def test_fillna_series_float64(self):
self._fillna_float64_common(pd.Series)
def test_fillna_index_float64(self):
self._fillna_float64_common(pd.Index)
def test_fillna_series_complex128(self):
obj = pd.Series([1 + 1j, np.nan, 3 + 3j, 4 + 4j])
self.assertEqual(obj.dtype, np.complex128)
# complex + int -> complex
exp = pd.Series([1 + 1j, 1, 3 + 3j, 4 + 4j])
self._assert_fillna_conversion(obj, 1, exp, np.complex128)
# complex + float -> complex
exp = pd.Series([1 + 1j, 1.1, 3 + 3j, 4 + 4j])
self._assert_fillna_conversion(obj, 1.1, exp, np.complex128)
# complex + complex -> complex
exp = pd.Series([1 + 1j, 1 + 1j, 3 + 3j, 4 + 4j])
self._assert_fillna_conversion(obj, 1 + 1j, exp, np.complex128)
# complex + bool -> complex
exp = pd.Series([1 + 1j, 1, 3 + 3j, 4 + 4j])
self._assert_fillna_conversion(obj, True, exp, np.complex128)
def test_fillna_index_complex128(self):
self._fillna_float64_common(pd.Index)
def test_fillna_series_bool(self):
# bool can't hold NaN
pass
def test_fillna_index_bool(self):
pass
def test_fillna_series_datetime64(self):
obj = pd.Series([pd.Timestamp('2011-01-01'),
pd.NaT,
pd.Timestamp('2011-01-03'),
pd.Timestamp('2011-01-04')])
self.assertEqual(obj.dtype, 'datetime64[ns]')
# datetime64 + datetime64 => datetime64
exp = pd.Series([pd.Timestamp('2011-01-01'),
pd.Timestamp('2012-01-01'),
pd.Timestamp('2011-01-03'),
pd.Timestamp('2011-01-04')])
self._assert_fillna_conversion(obj, pd.Timestamp('2012-01-01'),
exp, 'datetime64[ns]')
# datetime64 + datetime64tz => object
exp = pd.Series([pd.Timestamp('2011-01-01'),
pd.Timestamp('2012-01-01', tz='US/Eastern'),
pd.Timestamp('2011-01-03'),
pd.Timestamp('2011-01-04')])
value = pd.Timestamp('2012-01-01', tz='US/Eastern')
self._assert_fillna_conversion(obj, value, exp, np.object)
# datetime64 + int => object
# ToDo: must be coerced to object
exp = pd.Series([pd.Timestamp('2011-01-01'),
pd.Timestamp(1),
pd.Timestamp('2011-01-03'),
pd.Timestamp('2011-01-04')])
self._assert_fillna_conversion(obj, 1, exp, 'datetime64[ns]')
# datetime64 + object => object
exp = pd.Series([pd.Timestamp('2011-01-01'),
'x',
pd.Timestamp('2011-01-03'),
pd.Timestamp('2011-01-04')])
self._assert_fillna_conversion(obj, 'x', exp, np.object)
def test_fillna_series_datetime64tz(self):
tz = 'US/Eastern'
obj = pd.Series([pd.Timestamp('2011-01-01', tz=tz),
pd.NaT,
pd.Timestamp('2011-01-03', tz=tz),
pd.Timestamp('2011-01-04', tz=tz)])
self.assertEqual(obj.dtype, 'datetime64[ns, US/Eastern]')
# datetime64tz + datetime64tz => datetime64tz
exp = pd.Series([pd.Timestamp('2011-01-01', tz=tz),
pd.Timestamp('2012-01-01', tz=tz),
pd.Timestamp('2011-01-03', tz=tz),
pd.Timestamp('2011-01-04', tz=tz)])
value = pd.Timestamp('2012-01-01', tz=tz)
self._assert_fillna_conversion(obj, value, exp,
'datetime64[ns, US/Eastern]')
# datetime64tz + datetime64 => object
exp = pd.Series([pd.Timestamp('2011-01-01', tz=tz),
pd.Timestamp('2012-01-01'),
pd.Timestamp('2011-01-03', tz=tz),
pd.Timestamp('2011-01-04', tz=tz)])
value = pd.Timestamp('2012-01-01')
self._assert_fillna_conversion(obj, value, exp, np.object)
# datetime64tz + datetime64tz(different tz) => object
exp = pd.Series([pd.Timestamp('2011-01-01', tz=tz),
pd.Timestamp('2012-01-01', tz='Asia/Tokyo'),
pd.Timestamp('2011-01-03', tz=tz),
| pd.Timestamp('2011-01-04', tz=tz) | pandas.Timestamp |
# -*- coding: utf-8 -*-
import warnings
warnings.filterwarnings('ignore')
import numpy as np
import pandas as pd
import glob
from src.util.log_util import set_logger
from sklearn.decomposition import TruncatedSVD
import json
from sklearn.feature_extraction.text import TfidfVectorizer
import input
import os
logger = set_logger(__name__)
def parse_tfidf(X_temp, X_text):
n_components = 16
text_features = []
# Generate text features:
for i in X_text.columns:
# Initialize decomposition methods:
print(f'generating features from: {i}')
tfv = TfidfVectorizer(min_df=2, max_features=None,
strip_accents='unicode', analyzer='word', token_pattern=r'(?u)\b\w+\b',
ngram_range=(1, 3), use_idf=1, smooth_idf=1, sublinear_tf=1)
svd_ = TruncatedSVD(
n_components=n_components, random_state=1337)
tfidf_col = tfv.fit_transform(X_text.loc[:, i].values)
svd_col = svd_.fit_transform(tfidf_col)
svd_col = pd.DataFrame(svd_col)
svd_col = svd_col.add_prefix('TFIDF_{}_'.format(i))
text_features.append(svd_col)
text_features = pd.concat(text_features, axis=1)
X_temp = pd.concat([X_temp, text_features], axis=1)
for i in X_text.columns:
X_temp = X_temp.drop(i, axis=1)
return X_temp
class PetFinderParser(object):
def __init__(self, debug=False):
self.debug = debug
self.sentence_sep = ' '
self.extract_sentiment_text = False
def open_json_file(self, filename):
with open(filename, 'r', encoding='utf-8') as f:
json_file = json.load(f)
return json_file
def parse_sentiment_file(self, file):
"""
Parse sentiment file. Output DF with sentiment features.
"""
file_sentiment = file['documentSentiment']
file_entities = [x['name'] for x in file['entities']]
file_entities = self.sentence_sep.join(file_entities)
file_sentences_sentiment = [x['sentiment'] for x in file['sentences']]
file_sentences_sentiment = pd.DataFrame.from_dict(
file_sentences_sentiment, orient='columns')
file_sentences_sentiment_df = pd.DataFrame(
{
'magnitude_sum': file_sentences_sentiment['magnitude'].sum(axis=0),
'score_sum': file_sentences_sentiment['score'].sum(axis=0),
'magnitude_mean': file_sentences_sentiment['magnitude'].mean(axis=0),
'score_mean': file_sentences_sentiment['score'].mean(axis=0),
'magnitude_var': file_sentences_sentiment['magnitude'].var(axis=0),
'score_var': file_sentences_sentiment['score'].var(axis=0),
}, index=[0]
)
df_sentiment = pd.DataFrame.from_dict(file_sentiment, orient='index').T
df_sentiment = pd.concat([df_sentiment, file_sentences_sentiment_df], axis=1)
df_sentiment['entities'] = file_entities
df_sentiment = df_sentiment.add_prefix('sentiment_')
return df_sentiment
def parse_metadata_file(self, file):
"""
Parse metadata file. Output DF with metadata features.
"""
file_keys = list(file.keys())
if 'labelAnnotations' in file_keys:
file_annots = file['labelAnnotations']
file_top_score = np.asarray([x['score'] for x in file_annots]).mean()
file_top_desc = [x['description'] for x in file_annots]
else:
file_top_score = np.nan
file_top_desc = ['']
file_colors = file['imagePropertiesAnnotation']['dominantColors']['colors']
file_crops = file['cropHintsAnnotation']['cropHints']
file_color_score = np.asarray([x['score'] for x in file_colors]).mean()
file_color_pixelfrac = np.asarray([x['pixelFraction'] for x in file_colors]).mean()
file_crop_conf = np.asarray([x['confidence'] for x in file_crops]).mean()
if 'importanceFraction' in file_crops[0].keys():
file_crop_importance = np.asarray([x['importanceFraction'] for x in file_crops]).mean()
else:
file_crop_importance = np.nan
df_metadata = {
'annots_score': file_top_score,
'color_score': file_color_score,
'color_pixelfrac': file_color_pixelfrac,
'crop_conf': file_crop_conf,
'crop_importance': file_crop_importance,
'annots_top_desc': self.sentence_sep.join(file_top_desc)
}
df_metadata = pd.DataFrame.from_dict(df_metadata, orient='index').T
df_metadata = df_metadata.add_prefix('metadata_')
return df_metadata
def extract_additional_features(pet_id, mode='train'):
pet_parser = PetFinderParser()
sentiment_filename = os.path.join(input.__path__[0],
f'petfinder-adoption-prediction/{mode}_sentiment/{pet_id}.json')
try:
sentiment_file = pet_parser.open_json_file(sentiment_filename)
df_sentiment = pet_parser.parse_sentiment_file(sentiment_file)
df_sentiment['PetID'] = pet_id
except FileNotFoundError:
df_sentiment = []
dfs_metadata = []
metadata_filenames = sorted(glob.glob(os.path.join(input.__path__[0],
f'petfinder-adoption-prediction/{mode}_metadata/{pet_id}*.json')))
if len(metadata_filenames) > 0:
for f in metadata_filenames:
metadata_file = pet_parser.open_json_file(f)
df_metadata = pet_parser.parse_metadata_file(metadata_file)
df_metadata['PetID'] = pet_id
dfs_metadata.append(df_metadata)
dfs_metadata = | pd.concat(dfs_metadata, ignore_index=True, sort=False) | pandas.concat |
import warnings
from typing import Iterable
import numpy as np
import pandas as pd
from .common import default_session, _set_verbose
from .frame import DataFrame
from .indexes import Index, IndexOpsMixin
from .internal import _ConstantSP, _InternalFrame
from .operator import ArithExpression, BooleanExpression
from .series import Series
from .utils import (ORCA_INDEX_NAME_FORMAT, _infer_dtype, _to_data_column,
_unsupport_columns_axis, to_dolphindb_literal,
to_dolphindb_type_string)
def connect(host, port, user="admin", passwd="<PASSWORD>", session=default_session()):
session.connect(host, port, user, passwd)
def set_verbose(verbose=False):
_set_verbose(verbose)
def read_pickle(path, compression="infer", session=default_session(), *args, **kwargs):
pdf = pd.read_pickle(path=path, compression=compression, *args, **kwargs)
return DataFrame(pdf, session=session)
def read_fwf(filepath_or_buffer, colspecs='infer', widths=None, infer_nrows=100,
session=default_session(), *args, **kwargs):
pdf = pd.read_fwf(filepath_or_buffer=filepath_or_buffer,colspecs=colspecs,
widths=widths, infer_nrows=infer_nrows, *args, **kwargs)
return DataFrame(pdf, session=session)
def read_msgpack(path_or_buf, encoding='utf-8', iterator=False, session=default_session(),
*args, **kwargs):
pdf = pd.read_msgpack(path_or_buf=path_or_buf, encoding=encoding, iterator=iterator,
*args, **kwargs)
return DataFrame(pdf, session=session)
def read_clipboard(sep=r'\s+', session=default_session(), **kwargs):
pdf = pd.read_clipboard(sep=sep, **kwargs)
return DataFrame(pdf, session=session)
def read_excel(io, sheet_name=0, header=0, names=None, index_col=None, usecols=None,
squeeze=False, dtype=None, engine=None, converters=None, true_values=None, false_values=None,
skiprows=None, nrows=None, na_values=None, keep_default_na=True, verbose=False, parse_dates=False,
date_parser=None, thousands=None, comment=None, skip_footer=0, skipfooter=0, convert_float=True,
mangle_dupe_cols=True, session=default_session(), *args, **kwargs):
pdf = pd.read_excel(io, sheet_name=sheet_name, header=header, names=names, index_col=index_col, usecols=None,
squeeze=squeeze, dtype=dtype, engine=engine, converters=converters, true_values=true_values,
false_values=false_values, skiprows=skiprows, nrows=nrows, na_values=na_values, keep_default_na=keep_default_na,
verbose=verbose, parse_dates=parse_dates,
date_parser=date_parser, thousands=thousands, comment=comment, skip_footer=skip_footer,
skipfooter=skipfooter, convert_float=convert_float,
mangle_dupe_cols=mangle_dupe_cols, *args, **kwargs)
return DataFrame(pdf, session=session)
def ExcelWriter(path, engine=None, date_format=None, datetime_format=None, mode='w', *args, **kwargs):
return pd.ExcelWriter(path=path, engine=engine, date_format=date_format, datetime_format=datetime_format,
mode=mode, *args, **kwargs)
def read_json(path_or_buf=None, orient=None, typ='frame', dtype=None, convert_axes=None,
convert_dates=True, keep_default_dates=True, numpy=False,
precise_float=False, date_unit=None, encoding=None, lines=False,
chunksize=None, compression='infer', session=default_session(), *args, **kwargs):
pdf = pd.read_json(path_or_buf=path_or_buf, orient=orient, typ=typ, dtype=dtype,
convert_axes=convert_axes, convert_dates=convert_dates,
keep_default_dates=keep_default_dates, numpy=numpy,
precise_float=precise_float, date_unit=date_unit,
encoding=encoding, lines=lines, chunksize=chunksize,
compression=compression, *args, **kwargs)
return DataFrame(pdf, session=session)
def json_normalize(data, record_path = None, meta = None, meta_prefix = None,
record_prefix = None, errors='raise', sep='.', max_level=None,
session=default_session(), *args, **kwargs):
from pandas.io.json import json_normalize as pdjson_normalize
pdf = pd.json_normalize(data=data,record_path=record_path,meta=meta,meta_prefix=meta_prefix,record_prefix=record_prefix,
errors=errors, sep=sep, max_level=max_level, *args, **kwargs)
return DataFrame(pdf, session=session)
def build_table_schema(data, index=True, primary_key=None, version=True,
session=default_session(), *args, **kwargs):
from pandas.io.json import build_table_schema as pdbuild_table_schema
pdf = pd.build_table_schema(data=data, index=index, primary_key=primary_key,
version=version, *args, **kwargs)
return DataFrame(pdf, session=session)
def read_html(io, match=".+", flavor=None, header=None, index_col=None, skiprows=None,
attrs=None, parse_dates=False, thousands=",", encoding=None, decimal=".",
converters=None, na_values=None, keep_default_na=True, displayed_only=True,
session=default_session(), *args, **kwargs):
pdf = pd.read_html(io, match=match, flavor=flavor, header=header, index_col=index_col,
skiprows=skiprows, attrs=attrs, parse_dates=parse_dates, thousands=thousands,
encoding=encoding, decimal=decimal, converters=converters, na_values=na_values,
keep_default_na=keep_default_na, displayed_only=displayed_only, *args, **kwargs)
return DataFrame(pdf, session=session)
def read_hdf(path_or_buf, key=None, mode='r', session=default_session(), *args, **kwargs):
pdf = | pd.read_hdf(path_or_buf, key=key, mode=mode, *args, **kwargs) | pandas.read_hdf |
from Bio import Entrez
import pandas as pd
import json
# Helper functions
def chunks(l, n):
for i in range(0, len(l), n):
yield l[i : i + n]
def extract(d, keys):
return dict((k, d[k]) for k in keys if k in d)
COLUMNS = [
"Id",
"Accession",
"GDS",
"title",
"summary",
"GPL",
"GSE",
"taxon",
"entryType",
"gdsType",
"ptechType",
"valType",
"SSInfo",
"subsetInfo",
"PDAT",
"suppFile",
"Samples",
"n_samples",
"SeriesTitle",
"PlatformTitle",
"PlatformTaxa",
"SamplesTaxa",
"PubMedIds",
"Projects",
"FTPLink",
]
# Parse parameters
Entrez.email = snakemake.params.get("email", None)
if Entrez.email is None:
raise ValueError("An email must be provided")
api_key = snakemake.params.get("api_key", None)
if api_key is None:
print(
"Personal API key from NCBI. If not set, only 3 queries per second are allowed. 10 queries per seconds otherwise with a valid API key."
)
else:
Entrez.api_key = api_key
query = snakemake.params.get("query", None)
if query is None:
raise ValueError("A query string must be provided")
db = snakemake.params.get("db", "gds")
retmax = snakemake.params.get("retmax", 10)
batch_size = snakemake.params.get("batch_size", 1)
columns = snakemake.params.get("columns", COLUMNS)
# Run query
search = Entrez.esearch(db=db, term=query, retmax=retmax)
ids = Entrez.read(search)
chunked_ids = chunks(ids["IdList"], batch_size)
# Fetch and parse summaries
with open(snakemake.output[0], "a") as f:
for chunk in chunked_ids:
summary = Entrez.esummary(
db=db, id=",".join(chunk), retmode="xml", retmax=batch_size
)
records = Entrez.parse(summary)
docsums = | pd.DataFrame(columns=columns) | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# This file is part of CbM (https://github.com/ec-jrc/cbm).
# Author : <NAME>
# Credits : GTCAP Team
# Copyright : 2021 European Commission, Joint Research Centre
# License : 3-Clause BSD
import json
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
from os.path import join, normpath, isfile
from datetime import timedelta
from cbm.utils import config
from cbm.get import parcel_info, time_series
def ndvi(aoi, pid):
path = normpath(join(config.get_value(['paths', 'temp']), aoi, str(pid)))
file_info = normpath(join(path, 'info.json'))
if not isfile(file_info):
parcel_info.by_pid(aoi, pid)
with open(file_info, 'r') as f:
info_data = json.loads(f.read())
crop_name = info_data['cropname'][0]
area = info_data['area'][0]
file_ts = normpath(join(path, 'time_series_s2.csv'))
if not isfile(file_ts):
time_series.by_pid(aoi, pid, 's2')
df = pd.read_csv(file_ts, index_col=0)
df['date'] = | pd.to_datetime(df['date_part'], unit='s') | pandas.to_datetime |
import os
import pandas as pd
import numpy as np
import argparse
def generateOracleData(args):
"""
Takes the annotated dataset and constructs the perfect output at different stages of the pipeline, except when an occlusion occurs.
Input:
gtCSV: path to the ground truth annotations sv file
outputDir: path to where the output files are saved
"""
tracks = args["gtCSV"]
outputDir = args["outputDir"]
if not os.path.isdir(outputDir):
os.makedirs(outputDir)
csv_df = pd.read_csv(tracks, sep=";")
frames = csv_df["frame"].unique()
ids = csv_df["id"].unique()
out_dfs = {"Det-cam1": pd.DataFrame(),
"Track-cam1": pd.DataFrame(),
"Det-cam2": pd.DataFrame(),
"Track-cam2": pd.DataFrame(),
"Track-3D": pd.DataFrame(),
"Track-Full": pd.DataFrame()}
id_counters = {"cam1": 1,
"cam2": 1,
"3D": 1}
id_state = {"cam1": False,
"cam2": False,
"3D": False}
for fid in ids:
fish_df = csv_df[csv_df["id"] == fid]
print(fid)
for frame in range(frames[0],frames[-1]+1):
frame_df = fish_df[fish_df["frame"] == frame]
if len(frame_df) == 0:
id_counters["cam1"] += 1
id_counters["cam2"] += 1
id_counters["3D"] += 1
for cam in ["cam1", "cam2"]:
if np.isnan(frame_df["{}_x".format(cam)].values[0]):
if id_state[cam]:
id_counters[cam] += 1
id_state[cam] = False
continue
if frame_df["{}_occlusion".format(cam)].values[0] == 1.0:
if id_state[cam]:
id_counters[cam] += 1
id_state[cam] = False
continue
id_state[cam] = True
values = {"frame" : int(frame_df["{}_frame".format(cam)].values[0]),
"id": float(id_counters[cam]),
"cam": int(cam[-1]),
"x": frame_df["{}_x".format(cam)].values[0],
"y": frame_df["{}_y".format(cam)].values[0],
"tl_x": -1,
"tl_y":-1,
"c_x": frame_df["{}_x".format(cam)].values[0],
"c_y": frame_df["{}_y".format(cam)].values[0],
"w": -1,
"h": -1,
"theta": -1,
"l_x": frame_df["{}_x".format(cam)].values[0],
"l_y": frame_df["{}_y".format(cam)].values[0],
"r_x": frame_df["{}_x".format(cam)].values[0],
"r_y": frame_df["{}_y".format(cam)].values[0],
"aa_tl_x": frame_df["{}_tl_x".format(cam)].values[0],
"aa_tl_y": frame_df["{}_tl_y".format(cam)].values[0],
"aa_w": (frame_df["{}_br_x".format(cam)].values[0]-frame_df["{}_tl_x".format(cam)].values[0]),
"aa_h": (frame_df["{}_br_y".format(cam)].values[0]-frame_df["{}_tl_y".format(cam)].values[0])}
values = {k: [v] for k,v in values.items()}
out_dfs["Det-{}".format(cam)] = pd.concat([out_dfs["Det-{}".format(cam)], | pd.DataFrame.from_dict(values) | pandas.DataFrame.from_dict |
#-- -- -- -- Introduction to Importing Data in Python
# Used for Data Scientist Training Path
#FYI it's a compilation of how to work
#with different commands.
### --------------------------------------------------------
# # ------>>>>> Exploring your working directory
# In order to import data into Python, you should first
# have an idea of what files are in your working directory.
# IPython, which is running on DataCamp's servers,
# has a bunch of cool commands, including its magic
# commands. For example, starting a line with ! gives
# you complete system shell access. This means that the
# IPython magic command ! ls will display the contents of
# your current directory. Your task is to use the IPython
# magic command ! ls to check out the contents of your
# current directory and answer the following question:
# which of the following files is in your working directory?
# R/ moby_dick.txt
### --------------------------------------------------------
# # ------>>>>> Importing entire text files
# Open a file: file
file = open('moby_dick.txt', mode='r')
# Print it
print(file.read())
# Check whether file is closed
print(file.closed)
# Close file
file.close()
# Check whether file is closed
print(file.closed)
### --------------------------------------------------------
# # ------>>>>> Importing text files line by line
# Read & print the first 3 lines
with open('moby_dick.txt') as file:
print(file.readline())
print(file.readline())
print(file.readline())
### --------------------------------------------------------
# # ------>>>>> Pop quiz: examples of flat files
# You're now well-versed in importing text files and
# you're about to become a wiz at importing flat files.
# But can you remember exactly what a flat file is? Test
# your knowledge by answering the following question:
# which of these file types below is NOT an example of a flat file?
# R/ A relational database (e.g. PostgreSQL).
### --------------------------------------------------------
# # ------>>>>>Pop quiz: what exactly are flat files?
# Which of the following statements about flat files is incorrect?
# Flat files consist of rows and each row is called a record.
# Flat files consist of multiple tables with structured
# relationships between the tables.
# A record in a flat file is composed of fields or
# attributes, each of which contains at most one item of information.
# Flat files are pervasive in data science.
# R/ Flat files consist of multiple tables with structured relationships between the tables.
### --------------------------------------------------------
# # ------>>>>>Why we like flat files and the Zen of Python
# In PythonLand, there are currently hundreds of Python
# Enhancement Proposals, commonly referred to as PEPs. PEP8, for example,
# is a standard style guide for Python, written by our sensei <NAME>
# Rossum himself. It is the basis for how we here at DataCamp ask our
# instructors to style their code. Another one of my favorites is PEP20,
# commonly called the Zen of Python. Its abstract is as follows:
# Long time Pythoneer <NAME> succinctly channels the BDFL's guiding
# principles for Python's design into 20 aphorisms, only 19 of which have
# been written down.
# If you don't know what the acronym BDFL stands for, I suggest that you
# look here. You can print the Zen of Python in your shell by typing import
# this into it! You're going to do this now and the 5th aphorism (line)
# will say something of particular interest.
# The question you need to answer is: what is the 5th aphorism of the Zen of Python?
# R/ -- > command: import this
# Flat is better than nested.
### --------------------------------------------------------
# # ------>>>>> Using NumPy to import flat files
# Import package
import numpy as np
import matplotlib.pyplot as plt
# Assign filename to variable: file
file = 'digits.csv'
# Load file as array: digits
digits = np.loadtxt(file, delimiter=',')
# Print datatype of digits
print(type(digits))
# Select and reshape a row
im = digits[21, 1:]
im_sq = np.reshape(im, (28, 28))
# Plot reshaped data (matplotlib.pyplot already loaded as plt)
plt.imshow(im_sq, cmap='Greys', interpolation='nearest')
plt.show()
### --------------------------------------------------------
# # ------>>>>> Customizing your NumPy import
# Import numpy
import numpy as np
# Assign the filename: file
file = 'digits_header.txt'
# Load the data: data
data = np.loadtxt(file, delimiter='\t', skiprows=1, usecols=[0, 2])
# Print data
print(data)
### --------------------------------------------------------
# # ------>>>>> Importing different datatypes
import numpy as np
import matplotlib.pyplot as plt
# Assign filename: file
file = 'seaslug.txt'
# Import file: data
data = np.loadtxt(file, delimiter='\t', dtype=str)
# Print the first element of data
print(data[0])
# Import data as floats and skip the first row: data_float
data_float = np.loadtxt(file, delimiter='\t', dtype=float, skiprows=1)
# Print the 10th element of data_float
print(data_float[9])
# Plot a scatterplot of the data
plt.scatter(data_float[:, 0], data_float[:, 1])
plt.xlabel('time (min.)')
plt.ylabel('percentage of larvae')
plt.show()
### --------------------------------------------------------
# # ------>>>>> Working with mixed datatypes (1)
# Much of the time you will need to import datasets which have
# different datatypes in different columns; one column may contain
# strings and another floats, for example. The function np.loadtxt()
# will freak at this. There is another function, np.genfromtxt(),
# which can handle such structures. If we pass dtype=None to it, it
# will figure out what types each column should be.
# Import 'titanic.csv' using the function np.genfromtxt() as follows:
# data = np.genfromtxt('titanic.csv', delimiter=',', names=True, dtype=None)
# Here, the first argument is the filename, the second specifies the delimiter,
# and the third argument names tells us there is a header. Because the data are
# of different types, data is an object called a structured array. Because numpy
# arrays have to contain elements that are all the same type, the structured array
# solves this by being a 1D array, where each element of the array is a row of the
# flat file imported. You can test this by checking out the array's shape in the
# shell by executing np.shape(data).
# Accessing rows and columns of structured arrays is super-intuitive: to get the
# ith row, merely execute data[i] and to get the column with name 'Fare', execute data['Fare'].
# After importing the Titanic data as a structured array (as per the instructions above),
# print the entire column with the name Survived to the shell. What are the last
# 4 values of this column?
# R/ 1,0,1,0
### --------------------------------------------------------
# # ------>>>>> Working with mixed datatypes (2)
# Assign the filename: file
file = 'titanic.csv'
# Import file using np.recfromcsv: d
d = np.recfromcsv(file)
# Print out first three entries of d
print(d[:3])
### --------------------------------------------------------
# # ------>>>>> Using pandas to import flat files as DataFrames (1)
# Import pandas as pd
import pandas as pd
# Assign the filename: file
file = 'titanic.csv'
# Read the file into a DataFrame: df
df = pd.read_csv(file)
# View the head of the DataFrame
print(df.head())
### --------------------------------------------------------
# # ------>>>>> Using pandas to import flat files as DataFrames (2)
# Assign the filename: file
file = 'digits.csv'
# Read the first 5 rows of the file into a DataFrame: data
data = pd.read_csv(file, nrows=5, header=None)
# Build a numpy array from the DataFrame: data_array
data_array = data.values
# Print the datatype of data_array to the shell
print(type(data_array))
### --------------------------------------------------------
# # ------>>>>> Customizing your pandas import
# Import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
# Assign filename: file
file = 'titanic_corrupt.txt'
# Import file: data
data = pd.read_csv(file, sep='\t', comment='#', na_values='Nothing')
# Print the head of the DataFrame
print(data.head())
# Plot 'Age' variable in a histogram
pd.DataFrame.hist(data[['Age']])
plt.xlabel('Age (years)')
plt.ylabel('count')
plt.show()
### --------------------------------------------------------
# # ------>>>>> Not so flat any more
# In Chapter 1, you learned how to use the IPython magic command !
# ls to explore your current working directory. You can
# also do this natively in Python using the library os, which
# consists of miscellaneous operating system interfaces.
# The first line of the following code imports the library os,
# the second line stores the name of the current directory in a
# string called wd and the third outputs the contents of the directory in a list to the shell.
# import os
# wd = os.getcwd()
# os.listdir(wd)
# Run this code in the IPython shell and answer the
# following questions. Ignore the files that begin with .
# Check out the contents of your current directory and answer the following
# questions:
# (1) which file is in your directory and NOT an example of a flat file;
# (2) why is it not a flat file?
# R/ battledeath.xlsx is not a flat because it is a spreadsheet consisting of many sheets, not a single table.
### --------------------------------------------------------
# # ------>>>>> Loading a pickled file
# Import pickle package
import pickle
# Open pickle file and load data: d
with open('data.pkl', 'rb') as file:
d = pickle.load(file)
# Print d
print(d)
# Print datatype of d
print(type(d))
### --------------------------------------------------------
# # ------>>>>> Listing sheets in Excel files
# Import pandas
import pandas as pd
# Assign spreadsheet filename: file
file = 'battledeath.xlsx'
# Load spreadsheet: xls
xls = pd.ExcelFile(file)
# Print sheet names
print(xls.sheet_names)
### --------------------------------------------------------
# # ------>>>>> Importing sheets from Excel files
# Load a sheet into a DataFrame by name: df1
df1 = xls.parse('2004')
# Print the head of the DataFrame df1
print(df1.head())
# Load a sheet into a DataFrame by index: df2
df2 = xls.parse(0)
# Print the head of the DataFrame df2
print(df2.head())
### --------------------------------------------------------
# # ------>>>>> Customizing your spreadsheet import
# Import pandas
import pandas as pd
# Assign spreadsheet filename: file
file = 'battledeath.xlsx'
# Load spreadsheet: xl
xls = pd.ExcelFile(file)
# Parse the first sheet and rename the columns: df1
df1 = xls.parse(0, skiprows=[0], names=['Country', 'AAM due to War (2002)'])
# Print the head of the DataFrame df1
print(df1.head())
# Parse the first column of the second sheet and rename the column: df2
df2 = xls.parse(1, usecols=[0], skiprows=[0], names=['Country'])
# # Print the head of the DataFrame df2
print(df2.head())
### --------------------------------------------------------
# # ------>>>>> How to import SAS7BDAT
# How do you correctly import the function SAS7BDAT() from the package sas7bdat?
# R/ from sas7bdat import SAS7BDAT
### --------------------------------------------------------
# # ------>>>>> Importing SAS files
# Import sas7bdat package
from sas7bdat import SAS7BDAT
# Save file to a DataFrame: df_sas
with SAS7BDAT('sales.sas7bdat') as file:
df_sas = file.to_data_frame()
# Print head of DataFrame
print(df_sas.head())
# Plot histogram of DataFrame features (pandas and pyplot already imported)
pd.DataFrame.hist(df_sas[['P']])
plt.ylabel('count')
plt.show()
### --------------------------------------------------------
# # ------>>>>> Using read_stata to import Stata files
# The pandas package has been imported in the environment as pd and
# the file disarea.dta is in your working directory.
# The data consist of disease extents for several diseases in various
# countries (more information can be found here).
# What is the correct way of using the read_stata() function to import
# disarea.dta into the object df?
# R/ df = pd.read_stata('disarea.dta')
### --------------------------------------------------------
# # ------>>>>> Importing Stata files
# Import pandas
import pandas as pd
# Load Stata file into a pandas DataFrame: df
df = pd.read_stata('disarea.dta')
# Print the head of the DataFrame df
print(df.head())
# Plot histogram of one column of the DataFrame
| pd.DataFrame.hist(df[['disa10']]) | pandas.DataFrame.hist |
import pandas as __pd
import datetime as __dt
from multiprocessing import Pool as __Pool
import multiprocessing as __mp
from functools import reduce as __red
import logging as __logging
from seffaflik.__ortak.__araclar import make_requests as __make_requests
from seffaflik.__ortak import __dogrulama as __dogrulama
from seffaflik.elektrik.uretim import organizasyonlar as __organizasyonlar
__first_part_url = "market/"
def ptf(baslangic_tarihi=__dt.datetime.today().strftime("%Y-%m-%d"),
bitis_tarihi=__dt.datetime.today().strftime("%Y-%m-%d")):
"""
İlgili tarih aralığı için saatlik gün öncesi piyasası (GÖP) piyasa takas fiyatlarını (PTF) vermektedir.
Parametreler
------------
baslangic_tarihi : %YYYY-%AA-%GG formatında başlangıç tarihi (Varsayılan: bugün)
bitis_tarihi : %YYYY-%AA-%GG formatında bitiş tarihi (Varsayılan: bugün)
Geri Dönüş Değeri
-----------------
Saatlik PTF (₺/MWh)
"""
if __dogrulama.__baslangic_bitis_tarih_dogrulama(baslangic_tarihi, bitis_tarihi):
try:
particular_url = \
__first_part_url + "day-ahead-mcp" + "?startDate=" + baslangic_tarihi + "&endDate=" + bitis_tarihi
json = __make_requests(particular_url)
df = __pd.DataFrame(json["body"]["dayAheadMCPList"])
df["Saat"] = df["date"].apply(lambda h: int(h[11:13]))
df["Tarih"] = __pd.to_datetime(df["date"].apply(lambda d: d[:10]))
df.rename(index=str, columns={"price": "PTF"}, inplace=True)
df = df[["Tarih", "Saat", "PTF"]]
except (KeyError, TypeError):
return __pd.DataFrame()
else:
return df
def hacim(baslangic_tarihi=__dt.datetime.today().strftime("%Y-%m-%d"),
bitis_tarihi=__dt.datetime.today().strftime("%Y-%m-%d"), organizasyon_eic=""):
"""
İlgili tarih aralığı için gün öncesi piyasası (GÖP) saatlik hacim bilgilerini vermektedir.
Not:
1) "organizasyon_eic" değeri girildiği taktirde organizasyona ait saatlik arz/talep eşleşme miktarı bilgisini
vermektedir.
2) "organizasyon_eic" değeri girilmediği taktirde saatlik arz/talep eşleşme miktarı, fiyattan bağımsız teklif edilen
miktar, blok teklif eşleşme miktarı, ve maksimum teklif edilen miktar bilgisini vermektedir.
Parametreler
------------
baslangic_tarihi : %YYYY-%AA-%GG formatında başlangıç tarihi (Varsayılan: bugün)
bitis_tarihi : %YYYY-%AA-%GG formatında bitiş tarihi (Varsayılan: bugün)
organizasyon_eic : metin formatında organizasyon eic kodu (Varsayılan: "")
Geri Dönüş Değeri
-----------------
Arz/Talep Saatlik GÖP Hacmi (MWh)
"""
if __dogrulama.__baslangic_bitis_tarih_eic_dogrulama(baslangic_tarihi, bitis_tarihi, organizasyon_eic):
try:
particular_url = \
__first_part_url + "day-ahead-market-volume" + "?startDate=" + baslangic_tarihi + "&endDate=" + \
bitis_tarihi + "&eic=" + organizasyon_eic
json = __make_requests(particular_url)
df = __pd.DataFrame(json["body"]["dayAheadMarketVolumeList"])
df["Saat"] = df["date"].apply(lambda h: int(h[11:13]))
df["Tarih"] = __pd.to_datetime(df["date"].apply(lambda d: d[:10]))
df.rename(index=str,
columns={"matchedBids": "Talep Eşleşme Miktarı", "matchedOffers": "Arz Eşleşme Miktarı",
"volume": "Eşleşme Miktarı", "blockBid": "Arz Blok Teklif Eşleşme Miktarı",
"blockOffer": "Talep Blok Teklif Eşleşme Miktarı",
"priceIndependentBid": "Fiyattan Bağımsız Talep Miktarı",
"priceIndependentOffer": "Fiyattan Bağımsız Arz Miktarı",
"quantityOfAsk": "Maksimum Talep Miktarı",
"quantityOfBid": "Maksimum Arz Miktarı"},
inplace=True)
if organizasyon_eic == "":
df = df[["Tarih", "Saat", "Talep Eşleşme Miktarı", "Eşleşme Miktarı", "Arz Eşleşme Miktarı",
"Fiyattan Bağımsız Talep Miktarı", "Fiyattan Bağımsız Arz Miktarı",
"Maksimum Talep Miktarı", "Maksimum Arz Miktarı"]]
else:
df = df[["Tarih", "Saat", "Talep Eşleşme Miktarı", "Arz Eşleşme Miktarı"]]
except (KeyError, TypeError):
return __pd.DataFrame()
else:
return df
def tum_organizasyonlar_hacim(baslangic_tarihi=__dt.datetime.today().strftime("%Y-%m-%d"),
bitis_tarihi=__dt.datetime.today().strftime("%Y-%m-%d"), hacim_tipi="NET"):
"""
İlgili tarih aralığı için tüm organizasyonların saatlik net hacim bilgilerini vermektedir.
Parametreler
------------
baslangic_tarihi : %YYYY-%AA-%GG formatında başlangıç tarihi (Varsayılan: bugün)
bitis_tarihi : %YYYY-%AA-%GG formatında bitiş tarihi (Varsayılan: bugün)
hacim_tipi : metin formatında hacim tipi ("NET", "ARZ", yada "TALEP") (varsayılan: "NET")
Geri Dönüş Değeri
-----------------
Tüm Organizasyonların Saatlik GÖP Hacmi (Tarih, Saat, Hacim)
"""
if __dogrulama.__baslangic_bitis_tarih_dogrulama(baslangic_tarihi, bitis_tarihi):
org = __organizasyonlar()
list_org = org[["EIC Kodu", "Kısa Adı"]].to_dict("records")
org_len = len(list_org)
list_date_org_eic = list(zip([baslangic_tarihi] * org_len, [bitis_tarihi] * org_len, list_org))
list_date_org_eic = list(map(list, list_date_org_eic))
with __Pool(__mp.cpu_count()) as p:
if hacim_tipi.lower() == "net":
list_df_unit = p.starmap(__organizasyonel_net_hacim, list_date_org_eic, chunksize=1)
elif hacim_tipi.lower() == "arz":
list_df_unit = p.starmap(__organizasyonel_arz_hacim, list_date_org_eic, chunksize=1)
elif hacim_tipi.lower() == "talep":
list_df_unit = p.starmap(__organizasyonel_talep_hacim, list_date_org_eic, chunksize=1)
else:
__logging.error("Lütfen geçerli bir hacim tipi giriniz: Net, Arz, Talep", exc_info=False)
list_df_unit = list(filter(lambda x: len(x) > 0, list_df_unit))
df_unit = __red(lambda left, right: | __pd.merge(left, right, how="outer", on=["Tarih", "Saat"], sort=True) | pandas.merge |
from __future__ import division
from builtins import str
from builtins import range
from builtins import object
__copyright__ = "Copyright 2015-2016 Contributing Entities"
__license__ = """
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import datetime
import os
import sys
import numpy as np
import pandas as pd
from .Error import NotImplementedError, UnexpectedError
from .Logger import FastTripsLogger
from .Passenger import Passenger
from .Route import Route
from .TAZ import TAZ
from .Trip import Trip
from .Util import Util
#: Default user class: just one class called "all"
def generic_user_class(row_series):
return "all"
class PathSet(object):
"""
Represents a path set for a passenger from an origin :py:class:`TAZ` to a destination :py:class:`TAZ`
through a set of stops.
"""
#: Paths output file
PATHS_OUTPUT_FILE = 'ft_output_passengerPaths.txt'
#: Path times output file
PATH_TIMES_OUTPUT_FILE = 'ft_output_passengerTimes.txt'
#: Configured functions, indexed by name
CONFIGURED_FUNCTIONS = { 'generic_user_class':generic_user_class }
#: Path configuration: Name of the function that defines user class
USER_CLASS_FUNCTION = None
#: File with weights file. Space delimited table.
WEIGHTS_FILE = 'pathweight_ft.txt'
#: Path weights
WEIGHTS_DF = None
#: Read weights file as fixed-width format. If false, standard CSV format is read.
WEIGHTS_FIXED_WIDTH = False
#: Configuration: Minimum transfer penalty. Safeguard against having no transfer penalty
#: which can result in terrible paths with excessive transfers.
MIN_TRANSFER_PENALTY = None
#: Configuration: Overlap scale parameter.
OVERLAP_SCALE_PARAMETER = None
#: Configuration: Overlap variable. Can be "None", "count", "distance", "time".
OVERLAP_VARIABLE = None
#: Overlap variable option: None. Don't use overlap pathsize correction.
OVERLAP_NONE = "None"
#: Overlap variable option: count. Use leg count overlap pathsize correction.
OVERLAP_COUNT = "count"
#: Overlap variable option: distance. Use leg distance overlap pathsize correction.
OVERLAP_DISTANCE = "distance"
#: Overlap variable option: time. Use leg time overlap pathsize correction.
OVERLAP_TIME = "time"
#: Valid values for OVERLAP_VARAIBLE
OVERLAP_VARIABLE_OPTIONS = [OVERLAP_NONE,
OVERLAP_COUNT,
OVERLAP_DISTANCE,
OVERLAP_TIME]
#: Overlap chunk size. How many person's trips to process at a time in overlap calculations
#: in python simulation
OVERLAP_CHUNK_SIZE = None
#: Overlap option: Split transit leg into component parts? e.g. split A-E
#: into A-B-C-D-E for overlap calculations?
OVERLAP_SPLIT_TRANSIT = None
LEARN_ROUTES = False
LEARN_ROUTES_RATE = 0.05
SUCCESS_FLAG_COLUMN = 'success_flag'
BUMP_FLAG_COLUMN = 'bump_flag'
#: Allow departures and arrivals before / after preferred time
ARRIVE_LATE_ALLOWED_MIN = datetime.timedelta(minutes = 0)
DEPART_EARLY_ALLOWED_MIN = datetime.timedelta(minutes = 0)
CONSTANT_GROWTH_MODEL = 'constant'
EXP_GROWTH_MODEL = 'exponential'
LOGARITHMIC_GROWTH_MODEL = 'logarithmic'
LOGISTIC_GROWTH_MODEL = 'logistic'
PENALTY_GROWTH_MODELS = [
CONSTANT_GROWTH_MODEL,
EXP_GROWTH_MODEL,
LOGARITHMIC_GROWTH_MODEL,
LOGISTIC_GROWTH_MODEL,
]
#: Weights column: User Class
WEIGHTS_COLUMN_USER_CLASS = "user_class"
#: Weights column: Purpose
WEIGHTS_COLUMN_PURPOSE = "purpose"
#: Weights column: Demand Mode Type
WEIGHTS_COLUMN_DEMAND_MODE_TYPE = "demand_mode_type"
#: Weights column: Demand Mode Type
WEIGHTS_COLUMN_DEMAND_MODE = "demand_mode"
#: Weights column: Supply Mode
WEIGHTS_COLUMN_SUPPLY_MODE = "supply_mode"
#: Weights column: Weight Name
WEIGHTS_COLUMN_WEIGHT_NAME = "weight_name"
#: Weights column: Weight Value
WEIGHTS_COLUMN_WEIGHT_VALUE = "weight_value"
#: Weights column: Growth Type
WEIGHTS_GROWTH_TYPE = "growth_type"
#: Weights column: Log Base for logarithmic growth function
WEIGHTS_GROWTH_LOG_BASE = "log_base"
#: Weights column: Max value for logistic growth function
WEIGHTS_GROWTH_LOGISTIC_MAX = "logistic_max"
#: Weights column: Midpoint value for logistic growth function
WEIGHTS_GROWTH_LOGISTIC_MID = "logistic_mid"
WEIGHT_NAME_DEPART_EARLY_MIN = "depart_early_min"
WEIGHT_NAME_ARRIVE_LATE_MIN = "arrive_late_min"
WEIGHT_NAME_DEPART_LATE_MIN = 'depart_late_min'
WEIGHT_NAME_ARRIVE_EARLY_MIN = 'arrive_early_min'
WEIGHT_NAME_VALID_NAMES = [
WEIGHT_NAME_DEPART_EARLY_MIN,
WEIGHT_NAME_DEPART_LATE_MIN,
WEIGHT_NAME_ARRIVE_EARLY_MIN,
WEIGHT_NAME_ARRIVE_LATE_MIN,
]
# ========== Added by fasttrips =======================================================
#: Weights column: Supply Mode number
WEIGHTS_COLUMN_SUPPLY_MODE_NUM = "supply_mode_num"
#: File with weights for c++
OUTPUT_WEIGHTS_FILE = "ft_intermediate_weights.txt"
DIR_OUTBOUND = 1 #: Trips outbound from home have preferred arrival times
DIR_INBOUND = 2 #: Trips inbound to home have preferred departure times
PATH_KEY_COST = "pf_cost" #: path cost according to pathfinder
PATH_KEY_FARE = "pf_fare" #: path fare according to pathfinder
PATH_KEY_PROBABILITY = "pf_probability" #: path probability according to pathfinder
PATH_KEY_INIT_COST = "pf_initcost" #: initial cost (in pathfinding, before path was finalized)
PATH_KEY_INIT_FARE = "pf_initfare" #: initial fare (in pathfinding, before path was finalized)
PATH_KEY_STATES = "states"
STATE_IDX_LABEL = 0 #: :py:class:`datetime.timedelta` instance
STATE_IDX_DEPARR = 1 #: :py:class:`datetime.datetime` instance. Departure if outbound/backwards, arrival if inbound/forwards.
STATE_IDX_DEPARRMODE = 2 #: mode id
STATE_IDX_TRIP = 3 #: trip id
STATE_IDX_SUCCPRED = 4 #: stop identifier or TAZ identifier
STATE_IDX_SEQ = 5 #: sequence (for trip)
STATE_IDX_SEQ_SUCCPRED = 6 #: sequence for successor/predecessor
STATE_IDX_LINKTIME = 7 #: :py:class:`datetime.timedelta` instance
STATE_IDX_LINKFARE = 8 #: fare cost, float
STATE_IDX_LINKCOST = 9 #: link generalized cost, float for hyperpath/stochastic,
STATE_IDX_LINKDIST = 10 #: link distance, float
STATE_IDX_COST = 11 #: cost float, for hyperpath/stochastic assignment
STATE_IDX_ARRDEP = 12 #: :py:class:`datetime.datetime` instance. Arrival if outbound/backwards, departure if inbound/forwards.
# these are also the demand_mode_type values
STATE_MODE_ACCESS = "access"
STATE_MODE_EGRESS = "egress"
STATE_MODE_TRANSFER = "transfer"
# new
STATE_MODE_TRIP = "transit" # onboard
BUMP_EXPERIENCED_COST = 999999
HUGE_COST = 9999
def __init__(self, trip_list_dict):
"""
Constructor from dictionary mapping attribute to value.
"""
self.__dict__.update(trip_list_dict)
#: Direction is one of :py:attr:`PathSet.DIR_OUTBOUND` or :py:attr:`PathSet.DIR_INBOUND`
#: Preferred time is a datetime.time object
if trip_list_dict[Passenger.TRIP_LIST_COLUMN_TIME_TARGET] == "arrival":
self.direction = PathSet.DIR_OUTBOUND
self.pref_time = trip_list_dict[Passenger.TRIP_LIST_COLUMN_ARRIVAL_TIME]
self.pref_time_min = trip_list_dict[Passenger.TRIP_LIST_COLUMN_ARRIVAL_TIME_MIN]
elif trip_list_dict[Passenger.TRIP_LIST_COLUMN_TIME_TARGET] == "departure":
self.direction = PathSet.DIR_INBOUND
self.pref_time = trip_list_dict[Passenger.TRIP_LIST_COLUMN_DEPARTURE_TIME]
self.pref_time_min = trip_list_dict[Passenger.TRIP_LIST_COLUMN_DEPARTURE_TIME_MIN]
else:
raise Exception("Don't understand trip_list %s: %s" % (Passenger.TRIP_LIST_COLUMN_TIME_TARGET, str(trip_list_dict)))
#: Dict of path-num -> { cost:, probability:, states: [List of (stop_id, stop_state)]}
self.pathdict = {}
def goes_somewhere(self):
"""
Does this path go somewhere? Does the destination differ from the origin?
"""
return (self.__dict__[Passenger.TRIP_LIST_COLUMN_ORIGIN_TAZ_ID] != self.__dict__[Passenger.TRIP_LIST_COLUMN_DESTINATION_TAZ_ID])
def path_found(self):
"""
Was a a transit path found from the origin to the destination with the constraints?
"""
return len(self.pathdict) > 0
def num_paths(self):
"""
Number of paths in the PathSet
"""
return len(self.pathdict)
def reset(self):
"""
Delete my states, something went wrong and it won't work out.
"""
self.pathdict = []
@staticmethod
def set_user_class(trip_list_df, new_colname):
"""
Adds a column called user_class by applying the configured user class function.
"""
trip_list_df[new_colname] = trip_list_df.apply(PathSet.CONFIGURED_FUNCTIONS[PathSet.USER_CLASS_FUNCTION], axis=1)
@staticmethod
def verify_weight_config(modes_df, output_dir, routes, capacity_constraint, trip_list_df):
"""
Verify that we have complete weight configurations for the user classes and modes in the given DataFrame.
Trips with invalid weight configurations will be dropped from the trip list and warned about.
The parameter mode_df is a dataframe with the user_class, demand_mode_type and demand_mode combinations
found in the demand file.
If *capacity_constraint* is true, make sure there's an at_capacity weight on the transit supply mode links
to enforce it.
Returns updated trip_list_df.
"""
(verify, error_str) = PathSet.verify_weights(PathSet.WEIGHTS_DF)
# Join - make sure that all demand combinations (user class, purpose, demand mode type and demand mode) are configured
weight_check = pd.merge(left=modes_df,
right=PathSet.WEIGHTS_DF,
on=[PathSet.WEIGHTS_COLUMN_USER_CLASS,
PathSet.WEIGHTS_COLUMN_PURPOSE,
PathSet.WEIGHTS_COLUMN_DEMAND_MODE_TYPE,
PathSet.WEIGHTS_COLUMN_DEMAND_MODE],
how='left')
FastTripsLogger.debug("demand_modes x weights: \n%s" % weight_check.to_string())
FastTripsLogger.debug("trip_list_df head=\n%s" % str(trip_list_df.head()))
# If something is missing, warn and remove those trips
null_supply_mode_weights = weight_check.loc[pd.isnull(weight_check[PathSet.WEIGHTS_COLUMN_SUPPLY_MODE])]
if len(null_supply_mode_weights) > 0:
# warn
FastTripsLogger.warn("The following user_class, demand_mode_type, demand_mode combinations exist in the demand file but are missing from the weight configuration:")
FastTripsLogger.warn("\n%s" % null_supply_mode_weights.to_string())
# remove those trips -- need to do it one demand mode type at a time
null_supply_mode_weights = null_supply_mode_weights[[PathSet.WEIGHTS_COLUMN_USER_CLASS,
PathSet.WEIGHTS_COLUMN_PURPOSE,
PathSet.WEIGHTS_COLUMN_DEMAND_MODE_TYPE,
PathSet.WEIGHTS_COLUMN_DEMAND_MODE]]
null_supply_mode_weights["to_remove"] = 1
for demand_mode_type in [PathSet.STATE_MODE_ACCESS, PathSet.STATE_MODE_EGRESS, PathSet.STATE_MODE_TRIP]:
remove_trips = null_supply_mode_weights.loc[null_supply_mode_weights[PathSet.WEIGHTS_COLUMN_DEMAND_MODE_TYPE]==demand_mode_type].copy()
if len(remove_trips) == 0: continue
remove_trips.rename(columns={PathSet.WEIGHTS_COLUMN_DEMAND_MODE:"%s_mode" % demand_mode_type}, inplace=True)
remove_trips.drop([PathSet.WEIGHTS_COLUMN_DEMAND_MODE_TYPE], axis=1, inplace=True)
FastTripsLogger.debug("Removing for \n%s" % remove_trips)
trip_list_df = pd.merge(left = trip_list_df,
right = remove_trips,
how = "left")
FastTripsLogger.debug("Removing\n%s" % trip_list_df.loc[pd.notnull(trip_list_df["to_remove"])])
# keep only those not flagged to_remove
trip_list_df = trip_list_df.loc[pd.isnull(trip_list_df["to_remove"])]
trip_list_df.drop(["to_remove"], axis=1, inplace=True)
# demand_mode_type and demand_modes implicit to all travel : xfer walk, xfer wait, initial wait
user_classes = modes_df[[PathSet.WEIGHTS_COLUMN_USER_CLASS, PathSet.WEIGHTS_COLUMN_PURPOSE]].drop_duplicates().reset_index()
implicit_df = pd.DataFrame({ PathSet.WEIGHTS_COLUMN_DEMAND_MODE_TYPE:[ 'transfer'],
PathSet.WEIGHTS_COLUMN_DEMAND_MODE :[ 'transfer'],
PathSet.WEIGHTS_COLUMN_SUPPLY_MODE :[ 'transfer'] })
user_classes['key'] = 1
implicit_df['key'] = 1
implicit_df = pd.merge(left=user_classes, right=implicit_df, on='key')
implicit_df.drop(['index','key'], axis=1, inplace=True)
# FastTripsLogger.debug("implicit_df: \n%s" % implicit_df)
weight_check = pd.merge(left=implicit_df, right=PathSet.WEIGHTS_DF,
on=[PathSet.WEIGHTS_COLUMN_USER_CLASS,
PathSet.WEIGHTS_COLUMN_PURPOSE,
PathSet.WEIGHTS_COLUMN_DEMAND_MODE_TYPE,
PathSet.WEIGHTS_COLUMN_DEMAND_MODE,
PathSet.WEIGHTS_COLUMN_SUPPLY_MODE],
how='left')
FastTripsLogger.debug("implicit demand_modes x weights: \n%s" % weight_check.to_string())
if pd.isnull(weight_check[PathSet.WEIGHTS_COLUMN_WEIGHT_NAME]).sum() > 0:
error_str += "\nThe following user_class, purpose, demand_mode_type, demand_mode, supply_mode combinations exist in the demand file but are missing from the weight configuration:\n"
error_str += weight_check.loc[pd.isnull(weight_check[PathSet.WEIGHTS_COLUMN_WEIGHT_NAME])].to_string()
error_str += "\n\n"
# transfer penalty check
tp_index = pd.DataFrame({ PathSet.WEIGHTS_COLUMN_DEMAND_MODE_TYPE:['transfer'],
PathSet.WEIGHTS_COLUMN_DEMAND_MODE :['transfer'],
PathSet.WEIGHTS_COLUMN_SUPPLY_MODE :['transfer'],
PathSet.WEIGHTS_COLUMN_WEIGHT_NAME :['transfer_penalty']})
uc_purp_index = PathSet.WEIGHTS_DF[[PathSet.WEIGHTS_COLUMN_USER_CLASS, PathSet.WEIGHTS_COLUMN_PURPOSE]].drop_duplicates()
FastTripsLogger.debug("uc_purp_index: \n%s" % uc_purp_index)
# these are all the transfer penalties we have
transfer_penaltes = pd.merge(left=tp_index, right=PathSet.WEIGHTS_DF, how='left')
FastTripsLogger.debug("transfer_penaltes: \n%s" % transfer_penaltes)
transfer_penalty_check = pd.merge(left=uc_purp_index, right=transfer_penaltes, how='left')
FastTripsLogger.debug("transfer_penalty_check: \n%s" % transfer_penalty_check)
# missing transfer penalty
if pd.isnull(transfer_penalty_check[PathSet.WEIGHTS_COLUMN_WEIGHT_NAME]).sum() > 0:
error_str += "\nThe following user class x purpose are missing a transfer penalty:\n"
error_str += transfer_penalty_check.loc[pd.isnull(transfer_penalty_check[PathSet.WEIGHTS_COLUMN_WEIGHT_NAME])].to_string()
error_str += "\n\n"
bad_pen = transfer_penalty_check.loc[transfer_penalty_check[PathSet.WEIGHTS_COLUMN_WEIGHT_VALUE] < PathSet.MIN_TRANSFER_PENALTY]
if len(bad_pen) > 0:
error_str += "\nThe following user class x purpose path weights have invalid (too small) transfer penalties. MIN=(%f)\n" % PathSet.MIN_TRANSFER_PENALTY
error_str += bad_pen.to_string()
error_str += "\nConfigure smaller min_transfer_penalty AT YOUR OWN RISK since this will make path generation slow/unreliable.\n\n"
# If *capacity_constraint* is true, make sure there's an at_capacity weight on the transit supply mode links
# to enforce it.
if capacity_constraint:
# see if it's here already -- we don't know how to handle that...
at_capacity = PathSet.WEIGHTS_DF.loc[ PathSet.WEIGHTS_DF[PathSet.WEIGHTS_COLUMN_WEIGHT_NAME] == "at_capacity" ]
if len(at_capacity) > 0:
error_str += "\nFound at_capacity path weights explicitly set when about to set these for hard capacity constraints.\n"
error_str += at_capacity.to_string()
error_str += "\n\n"
else:
# set it for all user_class x transit x demand_mode x supply_mode
transit_weights_df = PathSet.WEIGHTS_DF.loc[PathSet.WEIGHTS_DF[PathSet.WEIGHTS_COLUMN_DEMAND_MODE_TYPE] == PathSet.STATE_MODE_TRIP,
[PathSet.WEIGHTS_COLUMN_USER_CLASS,
PathSet.WEIGHTS_COLUMN_PURPOSE,
PathSet.WEIGHTS_COLUMN_DEMAND_MODE,
PathSet.WEIGHTS_COLUMN_DEMAND_MODE_TYPE,
PathSet.WEIGHTS_COLUMN_SUPPLY_MODE]].copy()
transit_weights_df.drop_duplicates(inplace=True)
transit_weights_df[PathSet.WEIGHTS_COLUMN_WEIGHT_NAME ] = "at_capacity"
transit_weights_df[PathSet.WEIGHTS_COLUMN_WEIGHT_VALUE] = PathSet.HUGE_COST
transit_weights_df[PathSet.WEIGHTS_GROWTH_TYPE] = PathSet.CONSTANT_GROWTH_MODEL
FastTripsLogger.debug("Adding capacity-constraint weights:\n%s" % transit_weights_df.to_string())
PathSet.WEIGHTS_DF = pd.concat([PathSet.WEIGHTS_DF, transit_weights_df], axis=0)
PathSet.WEIGHTS_DF.sort_values(by=[PathSet.WEIGHTS_COLUMN_USER_CLASS,
PathSet.WEIGHTS_COLUMN_PURPOSE,
PathSet.WEIGHTS_COLUMN_DEMAND_MODE_TYPE,
PathSet.WEIGHTS_COLUMN_DEMAND_MODE,
PathSet.WEIGHTS_COLUMN_SUPPLY_MODE,
PathSet.WEIGHTS_COLUMN_WEIGHT_NAME], inplace=True)
if len(error_str) > 0:
FastTripsLogger.fatal(error_str)
sys.exit(2)
# add mode numbers to weights DF for relevant rows
PathSet.WEIGHTS_DF = routes.add_numeric_mode_id(PathSet.WEIGHTS_DF,
id_colname=PathSet.WEIGHTS_COLUMN_SUPPLY_MODE,
numeric_newcolname=PathSet.WEIGHTS_COLUMN_SUPPLY_MODE_NUM,
warn=True) # don't fail if some supply modes are configured but not used, they may be for future runs
FastTripsLogger.debug("PathSet weights: \n%s" % PathSet.WEIGHTS_DF)
export_columns = [PathSet.WEIGHTS_COLUMN_USER_CLASS,
PathSet.WEIGHTS_COLUMN_PURPOSE,
PathSet.WEIGHTS_COLUMN_DEMAND_MODE_TYPE,
PathSet.WEIGHTS_COLUMN_DEMAND_MODE,
PathSet.WEIGHTS_COLUMN_SUPPLY_MODE_NUM,
PathSet.WEIGHTS_COLUMN_WEIGHT_NAME,
PathSet.WEIGHTS_COLUMN_WEIGHT_VALUE,
PathSet.WEIGHTS_GROWTH_TYPE,
PathSet.WEIGHTS_GROWTH_LOG_BASE,
PathSet.WEIGHTS_GROWTH_LOGISTIC_MAX,
PathSet.WEIGHTS_GROWTH_LOGISTIC_MID]
PathSet.WEIGHTS_DF.reindex(columns=export_columns).to_csv(os.path.join(output_dir,PathSet.OUTPUT_WEIGHTS_FILE),
columns=export_columns,
sep=" ", index=False)
# add placeholder weights (ivt weight) for fares - one for each user_class, purpose, transit demand mode
# these will be updated based on the person's value of time in calculate_cost()
fare_weights = PathSet.WEIGHTS_DF.loc[ (PathSet.WEIGHTS_DF[PathSet.WEIGHTS_COLUMN_DEMAND_MODE_TYPE]==PathSet.STATE_MODE_TRIP) &
(PathSet.WEIGHTS_DF[PathSet.WEIGHTS_COLUMN_WEIGHT_NAME ]== "in_vehicle_time_min")]
fare_weights = fare_weights[[PathSet.WEIGHTS_COLUMN_USER_CLASS,
PathSet.WEIGHTS_COLUMN_PURPOSE,
PathSet.WEIGHTS_COLUMN_DEMAND_MODE_TYPE,
PathSet.WEIGHTS_COLUMN_DEMAND_MODE,
PathSet.WEIGHTS_COLUMN_SUPPLY_MODE,
PathSet.WEIGHTS_COLUMN_SUPPLY_MODE_NUM,
PathSet.WEIGHTS_COLUMN_WEIGHT_NAME,
PathSet.WEIGHTS_COLUMN_WEIGHT_VALUE]].copy().drop_duplicates()
fare_weights[PathSet.WEIGHTS_COLUMN_WEIGHT_NAME ] = "fare" # SIM_COL_PAX_FARE
PathSet.WEIGHTS_DF = PathSet.WEIGHTS_DF.append(fare_weights)
FastTripsLogger.debug("PathSet.WEIGHTS_DF with fare weights: \n%s" % PathSet.WEIGHTS_DF)
return trip_list_df
@staticmethod
def verify_weights(weights):
# First, verify required columns are found
error_str = ""
weight_cols = list(weights.columns.values)
FastTripsLogger.debug("verify_weight_config:\n%s" % weights.to_string())
if (PathSet.WEIGHTS_COLUMN_USER_CLASS not in weight_cols):
error_str+='{} not in weight_cols\n'.format(PathSet.WEIGHTS_COLUMN_USER_CLASS)
if (PathSet.WEIGHTS_COLUMN_PURPOSE not in weight_cols):
error_str+='{} not in weight_cols\n'.format(PathSet.WEIGHTS_COLUMN_PURPOSE)
if (PathSet.WEIGHTS_COLUMN_DEMAND_MODE_TYPE not in weight_cols):
error_str+='{} not in weight_cols\n'.format(PathSet.WEIGHTS_COLUMN_DEMAND_MODE_TYPE)
if (PathSet.WEIGHTS_COLUMN_DEMAND_MODE not in weight_cols):
error_str+='{} not in weight_cols\n'.format(PathSet.WEIGHTS_COLUMN_DEMAND_MODE)
if (PathSet.WEIGHTS_COLUMN_SUPPLY_MODE not in weight_cols):
error_str+='{} not in weight_cols\n'.format(PathSet.WEIGHTS_COLUMN_SUPPLY_MODE)
if (PathSet.WEIGHTS_COLUMN_WEIGHT_NAME not in weight_cols):
error_str+='{} not in weight_cols\n'.format(PathSet.WEIGHTS_COLUMN_WEIGHT_NAME)
if (PathSet.WEIGHTS_COLUMN_WEIGHT_VALUE not in weight_cols):
error_str+='{} not in weight_cols\n'.format(PathSet.WEIGHTS_COLUMN_WEIGHT_VALUE)
if (PathSet.WEIGHTS_GROWTH_TYPE not in weight_cols):
error_str+='{} not in weight_cols\n'.format(PathSet.WEIGHTS_GROWTH_TYPE)
constant_exp_slice = weights.loc[
weights[PathSet.WEIGHTS_GROWTH_TYPE].isin(
[PathSet.CONSTANT_GROWTH_MODEL, PathSet.EXP_GROWTH_MODEL]),
]
logarithmic_slice = weights.loc[
weights[PathSet.WEIGHTS_GROWTH_TYPE] == PathSet.LOGARITHMIC_GROWTH_MODEL,
]
logistic_slice = weights.loc[
weights[PathSet.WEIGHTS_GROWTH_TYPE] == PathSet.LOGISTIC_GROWTH_MODEL,
]
# Verify that no extraneous values are set for constant and exponential functions
if not pd.isnull(constant_exp_slice.reindex([
PathSet.WEIGHTS_GROWTH_LOG_BASE,
PathSet.WEIGHTS_GROWTH_LOGISTIC_MAX,
PathSet.WEIGHTS_GROWTH_LOGISTIC_MID,
], axis='columns')).values.all():
error_str += 'Linear or Exponential qualifier includes unnecessary modifier(s)\n'
if not pd.isnull(logarithmic_slice.reindex([
PathSet.WEIGHTS_GROWTH_LOGISTIC_MAX,
PathSet.WEIGHTS_GROWTH_LOGISTIC_MID,
], axis='columns')).values.all():
error_str += 'Logarithmic qualifier includes unnecessary modifier(s)\n'
if not pd.isnull(logistic_slice.reindex([
PathSet.WEIGHTS_GROWTH_LOG_BASE,
], axis='columns')).values.all():
error_str += 'Logistic qualifier includes log_base modifier\n'
if not pd.notnull(logarithmic_slice.reindex([
PathSet.WEIGHTS_GROWTH_LOG_BASE,
],
axis='columns')).values.all():
error_str += 'Logarithmic qualifier missing necessary log_base modifier\n'
if not pd.notnull(logistic_slice.reindex([
PathSet.WEIGHTS_GROWTH_LOGISTIC_MAX,
PathSet.WEIGHTS_GROWTH_LOGISTIC_MID,
], axis='columns')).values.all():
error_str += 'Logistic qualifier missing necessary modifiers\n'
if error_str:
error_str = '\n-------Errors: pathweight_ft.txt---------------\n' + error_str
return (not error_str), error_str
def __str__(self):
"""
Readable string version of the path.
Note: If inbound trip, then the states are in reverse order (egress to access)
"""
ret_str = "Dict vars:\n"
for k,v in self.__dict__.items():
ret_str += "%30s => %-30s %s\n" % (str(k), str(v), str(type(v)))
# ret_str += PathSet.states_to_str(self.states, self.direction)
return ret_str
@staticmethod
def write_paths(passengers_df, output_dir):
"""
Write the assigned paths to the given output file.
:param passengers_df: Passenger paths assignment results
:type passengers_df: :py:class:`pandas.DataFrame` instance
:param output_dir: Output directory
:type output_dir: string
"""
# get trip information -- board stops, board trips and alight stops
passenger_trips = passengers_df.loc[passengers_df[Passenger.PF_COL_LINK_MODE]==PathSet.STATE_MODE_TRIP].copy()
ptrip_group = passenger_trips.groupby([Passenger.PERSONS_COLUMN_PERSON_ID, Passenger.TRIP_LIST_COLUMN_PERSON_TRIP_ID])
# these are Series
board_stops_str = ptrip_group.A_id.apply(lambda x:','.join(x))
board_trips_str = ptrip_group.trip_id.apply(lambda x:','.join(x))
alight_stops_str= ptrip_group.B_id.apply(lambda x:','.join(x))
board_stops_str.name = 'board_stop_str'
board_trips_str.name = 'board_trip_str'
alight_stops_str.name = 'alight_stop_str'
# get walking times
walk_links = passengers_df.loc[(passengers_df[Passenger.PF_COL_LINK_MODE]==PathSet.STATE_MODE_ACCESS )| \
(passengers_df[Passenger.PF_COL_LINK_MODE]==PathSet.STATE_MODE_TRANSFER)| \
(passengers_df[Passenger.PF_COL_LINK_MODE]==PathSet.STATE_MODE_EGRESS )].copy()
walk_links['linktime_str'] = walk_links.pf_linktime.apply(lambda x: "%.2f" % (x/np.timedelta64(1,'m')))
walklink_group = walk_links[['person_id','trip_list_id_num','linktime_str']].groupby(['person_id','trip_list_id_num'])
walktimes_str = walklink_group.linktime_str.apply(lambda x:','.join(x))
# aggregate to one line per person_id, trip_list_id
print_passengers_df = passengers_df[['person_id','trip_list_id_num','pathmode','A_id','B_id',Passenger.PF_COL_PAX_A_TIME]].groupby(['person_id','trip_list_id_num']).agg(
{'pathmode' :'first', # path mode
'A_id' :'first', # origin
'B_id' :'last', # destination
Passenger.PF_COL_PAX_A_TIME :'first' # start time
})
# put them all together
print_passengers_df = pd.concat([print_passengers_df,
board_stops_str,
board_trips_str,
alight_stops_str,
walktimes_str], axis=1)
print_passengers_df.reset_index(inplace=True)
print_passengers_df.sort_values(by=['trip_list_id_num'], inplace=True)
print_passengers_df.rename(columns=
{'pathmode' :'mode',
'A_id' :'originTaz',
'B_id' :'destinationTaz',
Passenger.PF_COL_PAX_A_TIME :'startTime_time',
'board_stop_str' :'boardingStops',
'board_trip_str' :'boardingTrips',
'alight_stop_str' :'alightingStops',
'linktime_str' :'walkingTimes'}, inplace=True)
print_passengers_df['startTime'] = print_passengers_df['startTime_time'].apply(Util.datetime64_formatter)
print_passengers_df = print_passengers_df[['trip_list_id_num','person_id','mode','originTaz','destinationTaz','startTime',
'boardingStops','boardingTrips','alightingStops','walkingTimes']]
print_passengers_df.to_csv(os.path.join(output_dir, PathSet.PATHS_OUTPUT_FILE), sep="\t", index=False)
# passengerId mode originTaz destinationTaz startTime boardingStops boardingTrips alightingStops walkingTimes
@staticmethod
def write_path_times(passengers_df, output_dir):
"""
Write the assigned path times to the given output file.
:param passengers_df: Passenger path links
:type passengers_df: :py:class:`pandas.DataFrame` instance
:param output_dir: Output directory
:type output_dir: string
"""
passenger_trips = passengers_df.loc[passengers_df[Passenger.PF_COL_LINK_MODE]==PathSet.STATE_MODE_TRIP].copy()
###### TODO: this is really catering to output format; an alternative might be more appropriate
from .Assignment import Assignment
passenger_trips.loc[:, 'board_time_str'] = passenger_trips[Assignment.SIM_COL_PAX_BOARD_TIME ].apply(Util.datetime64_formatter)
passenger_trips.loc[:,'arrival_time_str'] = passenger_trips[Passenger.PF_COL_PAX_A_TIME].apply(Util.datetime64_formatter)
passenger_trips.loc[:, 'alight_time_str'] = passenger_trips[Assignment.SIM_COL_PAX_ALIGHT_TIME].apply(Util.datetime64_formatter)
# Aggregate (by joining) across each passenger + path
ptrip_group = passenger_trips.groupby([Passenger.TRIP_LIST_COLUMN_PERSON_ID,
Passenger.TRIP_LIST_COLUMN_PERSON_TRIP_ID])
# these are Series
board_time_str = ptrip_group['board_time_str' ].apply(lambda x:','.join(x))
arrival_time_str = ptrip_group['arrival_time_str'].apply(lambda x:','.join(x))
alight_time_str = ptrip_group['alight_time_str' ].apply(lambda x:','.join(x))
# Aggregate other fields across each passenger + path
pax_exp_df = passengers_df.groupby([Passenger.TRIP_LIST_COLUMN_PERSON_ID,
Passenger.TRIP_LIST_COLUMN_PERSON_TRIP_ID]).agg(
{# 'pathmode' :'first', # path mode
'A_id' :'first', # origin
'B_id' :'last', # destination
Passenger.PF_COL_PAX_A_TIME :'first', # start time
Passenger.PF_COL_PAX_B_TIME :'last', # end time
# TODO: cost needs to be updated for updated dwell & travel time
# 'cost' :'first', # total travel cost is calculated for the whole path
})
# Put them together and return
assert(len(pax_exp_df) == len(board_time_str))
pax_exp_df = pd.concat([pax_exp_df,
board_time_str,
arrival_time_str,
alight_time_str], axis=1)
# print pax_exp_df.to_string(formatters={'A_time':Assignment.datetime64_min_formatter,
# 'B_time':Assignment.datetime64_min_formatter}
# reset columns
print_pax_exp_df = pax_exp_df.reset_index()
print_pax_exp_df.sort_values(by=[Passenger.TRIP_LIST_COLUMN_PERSON_ID,
Passenger.TRIP_LIST_COLUMN_PERSON_TRIP_ID], inplace=True)
print_pax_exp_df['A_time_str'] = print_pax_exp_df[Passenger.PF_COL_PAX_A_TIME].apply(Util.datetime64_formatter)
print_pax_exp_df['B_time_str'] = print_pax_exp_df[Passenger.PF_COL_PAX_B_TIME].apply(Util.datetime64_formatter)
# rename columns
print_pax_exp_df.rename(columns=
{#'pathmode' :'mode',
'A_id' :'originTaz',
'B_id' :'destinationTaz',
'A_time_str' :'startTime',
'B_time_str' :'endTime',
'arrival_time_str' :'arrivalTimes',
'board_time_str' :'boardingTimes',
'alight_time_str' :'alightingTimes',
# TODO: cost needs to be updated for updated dwell & travel time
# 'cost' :'travelCost',
}, inplace=True)
# reorder
print_pax_exp_df = print_pax_exp_df[[
Passenger.TRIP_LIST_COLUMN_PERSON_ID,
Passenger.TRIP_LIST_COLUMN_PERSON_TRIP_ID,
#'mode',
'originTaz',
'destinationTaz',
'startTime',
'endTime',
'arrivalTimes',
'boardingTimes',
'alightingTimes',
# 'travelCost',
]]
times_out = open(os.path.join(output_dir, PathSet.PATH_TIMES_OUTPUT_FILE), 'w')
print_pax_exp_df.to_csv(times_out,
sep="\t", float_format="%.2f", index=False)
@staticmethod
def split_transit_links(pathset_links_df, veh_trips_df, stops):
"""
Splits the transit links to their component links and returns.
So if a transit trip goes from stop A to D but passes stop B and C in between, the
row A->D will now be replaced by rows A->B, B->C, and C->D.
Adds "split_first" bool - True on the first veh link only
Note that this does *not* renumber the linknum field.
"""
from .Assignment import Assignment
if len(Assignment.TRACE_IDS) > 0:
FastTripsLogger.debug("split_transit_links: pathset_links_df (%d) trace\n%s" % (len(pathset_links_df),
pathset_links_df.loc[pathset_links_df[Passenger.TRIP_LIST_COLUMN_TRACE]==True].to_string()))
FastTripsLogger.debug("split_transit_links: pathset_links_df columns\n%s" % str(pathset_links_df.dtypes))
veh_links_df = Trip.linkify_vehicle_trips(veh_trips_df, stops)
veh_links_df["linkmode"] = "transit"
FastTripsLogger.debug("split_transit_links: veh_links_df\n%s" % veh_links_df.head(20).to_string())
# join the pathset links with the vehicle links
drop_cols = []
merge_cols = [Passenger.PF_COL_LINK_MODE,
Route.ROUTES_COLUMN_MODE,
Trip.TRIPS_COLUMN_ROUTE_ID,
Trip.TRIPS_COLUMN_TRIP_ID]
if Trip.TRIPS_COLUMN_TRIP_ID_NUM in pathset_links_df.columns.values:
merge_cols.append(Trip.TRIPS_COLUMN_TRIP_ID_NUM)
if Route.ROUTES_COLUMN_MODE_NUM in pathset_links_df.columns.values:
merge_cols.append(Route.ROUTES_COLUMN_MODE_NUM)
path2 = pd.merge(left =pathset_links_df,
right =veh_links_df,
on =merge_cols,
how ="left",
suffixes=["","_veh"])
path2["split_first"] = False
# delete anything irrelevant -- so keep non-transit links, and transit links WITH valid sequences
path2 = path2.loc[ (path2[Passenger.PF_COL_LINK_MODE]!=Route.MODE_TYPE_TRANSIT) |
( (path2[Passenger.PF_COL_LINK_MODE]==Route.MODE_TYPE_TRANSIT) &
(path2["A_seq_veh"]>=path2["A_seq"]) &
(path2["B_seq_veh"]<=path2["B_seq"]) ) ]
# These are the new columns -- incorporate them
path2.loc[ (path2[Passenger.PF_COL_LINK_MODE]==Route.MODE_TYPE_TRANSIT)&(path2["A_seq_veh"]==path2["A_seq"]), "split_first"] = True
# A_arrival_time datetime64[ns] => A time for intermediate links
path2.loc[ (path2[Passenger.PF_COL_LINK_MODE]==Route.MODE_TYPE_TRANSIT)&(path2["A_id"]!=path2["A_id_veh"]), Assignment.SIM_COL_PAX_A_TIME ] = path2["A_arrival_time"]
# no waittime, boardtime, missed_xfer except on first link
path2.loc[ (path2[Passenger.PF_COL_LINK_MODE]==Route.MODE_TYPE_TRANSIT)&(path2["A_id"]!=path2["A_id_veh"]), Assignment.SIM_COL_PAX_WAIT_TIME ] = None
path2.loc[ (path2[Passenger.PF_COL_LINK_MODE]==Route.MODE_TYPE_TRANSIT)&(path2["A_id"]!=path2["A_id_veh"]), Assignment.SIM_COL_PAX_BOARD_TIME ] = None
path2.loc[ (path2[Passenger.PF_COL_LINK_MODE]==Route.MODE_TYPE_TRANSIT)&(path2["A_id"]!=path2["A_id_veh"]), Assignment.SIM_COL_PAX_MISSED_XFER] = 0
# no alighttime except on last link
path2.loc[ (path2[Passenger.PF_COL_LINK_MODE]==Route.MODE_TYPE_TRANSIT)&(path2["B_id"]!=path2["B_id_veh"]), Assignment.SIM_COL_PAX_ALIGHT_TIME] = None
# route_id_num float64 => ignore
# A_id_veh object => A_id
path2.loc[path2[Passenger.PF_COL_LINK_MODE]==Route.MODE_TYPE_TRANSIT, "A_id" ] = path2["A_id_veh"]
# A_id_num_veh float64 => A_id_num
path2.loc[path2[Passenger.PF_COL_LINK_MODE]==Route.MODE_TYPE_TRANSIT, "A_id_num" ] = path2["A_id_num_veh"]
# A_seq_veh float64 => A_seq
path2.loc[path2[Passenger.PF_COL_LINK_MODE]==Route.MODE_TYPE_TRANSIT, "A_seq" ] = path2["A_seq_veh"]
if "A_lat_veh" in path2.columns.values:
# A_lat_veh float64 => A_lat
path2.loc[path2[Passenger.PF_COL_LINK_MODE]==Route.MODE_TYPE_TRANSIT, "A_lat" ] = path2["A_lat_veh"]
# A_lon_veh float64 => A_lon
path2.loc[path2[Passenger.PF_COL_LINK_MODE]==Route.MODE_TYPE_TRANSIT, "A_lon" ] = path2["A_lon_veh"]
# drop these later
drop_cols.extend(["A_lat_veh","A_lon_veh"])
# B_id_veh object => B_id
path2.loc[path2[Passenger.PF_COL_LINK_MODE]==Route.MODE_TYPE_TRANSIT, "B_id" ] = path2["B_id_veh"]
# B_id_num_veh float64 => B_id_num
path2.loc[path2[Passenger.PF_COL_LINK_MODE]==Route.MODE_TYPE_TRANSIT, "B_id_num" ] = path2["B_id_num_veh"]
# B_seq_veh float64 => B_seq
path2.loc[path2[Passenger.PF_COL_LINK_MODE]==Route.MODE_TYPE_TRANSIT, "B_seq" ] = path2["B_seq_veh"]
# B_arrival_time datetime64[ns] => new_B_time
path2.loc[path2[Passenger.PF_COL_LINK_MODE]==Route.MODE_TYPE_TRANSIT, "new_B_time" ] = path2["B_arrival_time"]
# B_departure_time datetime64[ns] => ignore
if "B_lat_veh" in path2.columns.values:
# B_lat_veh float64 => B_lat
path2.loc[path2[Passenger.PF_COL_LINK_MODE]==Route.MODE_TYPE_TRANSIT, "B_lat" ] = path2["B_lat_veh"]
# B_lon_veh float64 => B_lon
path2.loc[path2[Passenger.PF_COL_LINK_MODE]==Route.MODE_TYPE_TRANSIT, "B_lon" ] = path2["B_lon_veh"]
# drop these later
drop_cols.extend(["B_lat_veh","B_lon_veh"])
# update the link time
path2.loc[path2[Passenger.PF_COL_LINK_MODE]==Route.MODE_TYPE_TRANSIT,Assignment.SIM_COL_PAX_LINK_TIME] = path2[Assignment.SIM_COL_PAX_B_TIME] - path2[Assignment.SIM_COL_PAX_A_TIME]
# update transit distance
Util.calculate_distance_miles(path2, "A_lat","A_lon","B_lat","B_lon", "transit_distance")
path2.loc[path2[Passenger.PF_COL_LINK_MODE]==Route.MODE_TYPE_TRANSIT,Assignment.SIM_COL_PAX_DISTANCE ] = path2["transit_distance"]
# revert these back to ints
path2[["A_id_num","B_id_num","A_seq","B_seq"]] = path2[["A_id_num","B_id_num","A_seq","B_seq"]].astype(int)
# we're done with the fields - drop them
drop_cols.extend(["transit_distance", "route_id_num",
"A_id_veh","A_id_num_veh","A_seq_veh","A_arrival_time","A_departure_time",
"B_id_veh","B_id_num_veh","B_seq_veh","B_arrival_time","B_departure_time"])
path2.drop(drop_cols, axis=1, inplace=True)
# renumber linknum? Let's not bother
# trace
if len(Assignment.TRACE_IDS) > 0:
FastTripsLogger.debug("split_transit_links: path2 (%d) trace\n%s" % (len(path2),
path2.loc[path2[Passenger.TRIP_LIST_COLUMN_TRACE]==True].to_string()))
FastTripsLogger.debug("split_transit_links: path2 columns\n%s" % str(path2.dtypes))
return path2
@staticmethod
def calculate_cost(STOCH_DISPERSION, pathset_paths_df, pathset_links_df, veh_trips_df,
trip_list_df, routes, tazs, transfers, stops=None, reset_bump_iter=False, is_skimming=False):
"""
This is equivalent to the C++ Path::calculateCost() method. Would it be faster to do it in C++?
It would require us to package up the networks and paths and send back and forth. :p
I think if we can do it using vectorized pandas operations, it should be fast, but we can compare/test.
It's also messier to have this in two places. Maybe we should delete it from the C++; the overlap calcs are only in here right now.
Returns pathset_paths_df with additional columns, Assignment.SIM_COL_PAX_FARE, Assignment.SIM_COL_PAX_COST, Assignment.SIM_COL_PAX_PROBABILITY, Assignment.SIM_COL_PAX_LOGSUM
And pathset_links_df with additional columns, Assignment.SIM_COL_PAX_FARE, Assignment.SIM_COL_PAX_FARE_PERIOD, Assignment.SIM_COL_PAX_COST and Assignment.SIM_COL_PAX_DISTANCE
"""
from .Assignment import Assignment
# if these are here already, remove them since we'll recalculate them
if Assignment.SIM_COL_PAX_COST in list(pathset_paths_df.columns.values):
pathset_paths_df.drop([Assignment.SIM_COL_PAX_COST,
Assignment.SIM_COL_PAX_LNPS,
Assignment.SIM_COL_PAX_PROBABILITY,
Assignment.SIM_COL_PAX_LOGSUM], axis=1, inplace=True)
pathset_links_df.drop([Assignment.SIM_COL_PAX_COST,
Assignment.SIM_COL_PAX_DISTANCE], axis=1, inplace=True)
# leaving this in for writing to CSV for debugging but I could take it out
pathset_paths_df.drop(["logsum_component"], axis=1, inplace=True)
if len(Assignment.TRACE_IDS) > 0:
FastTripsLogger.debug("calculate_cost: pathset_links_df trace\n%s" % str(pathset_links_df.loc[pathset_links_df[Passenger.TRIP_LIST_COLUMN_TRACE]==True]))
FastTripsLogger.debug("calculate_cost: trip_list_df trace\n%s" % str(trip_list_df.loc[trip_list_df[Passenger.TRIP_LIST_COLUMN_TRACE]==True]))
# Add fares -- need stop zones first if they're not there.
# We only need to do this once per pathset.
# todo -- could remove non-transit links for this?
FastTripsLogger.debug("calculate_cost columns:\n%s" % str(list(pathset_links_df.columns.values)))
if "A_zone_id" not in list(pathset_links_df.columns.values):
assert(stops is not None)
pathset_links_df = stops.add_stop_zone_id(pathset_links_df, "A_id", "A_zone_id")
pathset_links_df = stops.add_stop_zone_id(pathset_links_df, "B_id", "B_zone_id")
# This needs to be done fresh each time since simulation might change the board times and therefore the fare periods
pathset_links_df = routes.add_fares(pathset_links_df, is_skimming)
# base this on pathfinding distance
pathset_links_df[Assignment.SIM_COL_PAX_DISTANCE] = pathset_links_df[Passenger.PF_COL_LINK_DIST]
pathset_links_to_use = pathset_links_df
if PathSet.OVERLAP_SPLIT_TRANSIT:
pathset_links_to_use = PathSet.split_transit_links(pathset_links_df, veh_trips_df, stops)
else:
pathset_links_to_use["split_first"] = True # all transit links are first
# First, we need user class, purpose, demand modes, and value of time
pathset_links_cost_df = pd.merge(left =pathset_links_to_use,
right=trip_list_df[
#Passenger.TRIP_LIST_COLUMN_PERSON_ID,
#Passenger.TRIP_LIST_COLUMN_PERSON_TRIP_ID,
Passenger.get_id_columns(is_skimming) +
[
Passenger.TRIP_LIST_COLUMN_USER_CLASS,
Passenger.TRIP_LIST_COLUMN_PURPOSE,
Passenger.TRIP_LIST_COLUMN_VOT,
Passenger.TRIP_LIST_COLUMN_ACCESS_MODE,
Passenger.TRIP_LIST_COLUMN_EGRESS_MODE,
Passenger.TRIP_LIST_COLUMN_TRANSIT_MODE,
]],
how ="left",
on =Passenger.get_id_columns(is_skimming))
#[Passenger.PERSONS_COLUMN_PERSON_ID, Passenger.TRIP_LIST_COLUMN_PERSON_TRIP_ID])
# linkmode = demand_mode_type. Set demand_mode for the links
pathset_links_cost_df[PathSet.WEIGHTS_COLUMN_DEMAND_MODE] = None
pathset_links_cost_df.loc[ pathset_links_cost_df[Passenger.PF_COL_LINK_MODE]== PathSet.STATE_MODE_ACCESS , PathSet.WEIGHTS_COLUMN_DEMAND_MODE] = pathset_links_cost_df[Passenger.TRIP_LIST_COLUMN_ACCESS_MODE ]
pathset_links_cost_df.loc[ pathset_links_cost_df[Passenger.PF_COL_LINK_MODE]== PathSet.STATE_MODE_EGRESS , PathSet.WEIGHTS_COLUMN_DEMAND_MODE] = pathset_links_cost_df[Passenger.TRIP_LIST_COLUMN_EGRESS_MODE ]
pathset_links_cost_df.loc[ pathset_links_cost_df[Passenger.PF_COL_LINK_MODE]== PathSet.STATE_MODE_TRIP , PathSet.WEIGHTS_COLUMN_DEMAND_MODE] = pathset_links_cost_df[Passenger.TRIP_LIST_COLUMN_TRANSIT_MODE]
pathset_links_cost_df.loc[ pathset_links_cost_df[Passenger.PF_COL_LINK_MODE]== PathSet.STATE_MODE_TRANSFER, PathSet.WEIGHTS_COLUMN_DEMAND_MODE] = "transfer"
# Verify that it's set for every link
missing_demand_mode = pd.isnull(pathset_links_cost_df[PathSet.WEIGHTS_COLUMN_DEMAND_MODE]).sum()
assert(missing_demand_mode == 0)
# drop the individual mode columns, we have what we need
pathset_links_cost_df.drop([Passenger.TRIP_LIST_COLUMN_ACCESS_MODE,
Passenger.TRIP_LIST_COLUMN_EGRESS_MODE,
Passenger.TRIP_LIST_COLUMN_TRANSIT_MODE], axis=1, inplace=True)
# if bump_iter doesn't exist or if it needs to be reset
if reset_bump_iter or Assignment.SIM_COL_PAX_BUMP_ITER not in pathset_links_cost_df:
pathset_links_cost_df[Assignment.SIM_COL_PAX_BUMP_ITER] = -1
if len(Assignment.TRACE_IDS) > 0:
FastTripsLogger.debug("calculate_cost: pathset_links_cost_df trace\n%s" % str(pathset_links_cost_df.loc[pathset_links_cost_df[Passenger.TRIP_LIST_COLUMN_TRACE]==True]))
# Inner join with the weights - now each weight has a row
cost_df = pd.merge(left =pathset_links_cost_df,
right =PathSet.WEIGHTS_DF,
left_on =[Passenger.TRIP_LIST_COLUMN_USER_CLASS,
Passenger.TRIP_LIST_COLUMN_PURPOSE,
Passenger.PF_COL_LINK_MODE,
PathSet.WEIGHTS_COLUMN_DEMAND_MODE,
Passenger.TRIP_LIST_COLUMN_MODE],
right_on=[Passenger.TRIP_LIST_COLUMN_USER_CLASS,
Passenger.TRIP_LIST_COLUMN_PURPOSE,
PathSet.WEIGHTS_COLUMN_DEMAND_MODE_TYPE,
PathSet.WEIGHTS_COLUMN_DEMAND_MODE,
PathSet.WEIGHTS_COLUMN_SUPPLY_MODE],
how ="inner")
# update the fare weight placeholder (ivt pathweight - utils per min)) based on value of time (currency per hour)
# since generalized cost is in utils, (ivt utils/min)x(60 min/1 hour)x(hour/vot currency) is the weight (utils/currency)
cost_df.loc[ cost_df[PathSet.WEIGHTS_COLUMN_WEIGHT_NAME]==Assignment.SIM_COL_PAX_FARE, "weight_value" ] *= (60.0/cost_df[Passenger.TRIP_LIST_COLUMN_VOT])
if (len(Assignment.TRACE_IDS) > 0) and not is_skimming:
FastTripsLogger.debug("calculate_cost: cost_df\n%s" % str(cost_df.loc[cost_df[Passenger.TRIP_LIST_COLUMN_TRACE]==True].sort_values([
Passenger.TRIP_LIST_COLUMN_TRIP_LIST_ID_NUM,
Passenger.PF_COL_PATH_NUM,Passenger.PF_COL_LINK_NUM]).head(20)))
# NOW we split it into 3 lists -- access/egress, transit, and transfer
# This is because they will each be joined to tables specific to those kinds of mode categories, and so we don't want all the transit nulls on the other tables, etc.
cost_columns = list(cost_df.columns.values)
cost_df["var_value"] = np.nan # This means unset
cost_accegr_df = cost_df.loc[(cost_df[Passenger.PF_COL_LINK_MODE]==PathSet.STATE_MODE_ACCESS )|(cost_df[Passenger.PF_COL_LINK_MODE]==PathSet.STATE_MODE_EGRESS)]
cost_trip_df = cost_df.loc[(cost_df[Passenger.PF_COL_LINK_MODE]==PathSet.STATE_MODE_TRIP )]
cost_transfer_df = cost_df.loc[(cost_df[Passenger.PF_COL_LINK_MODE]==PathSet.STATE_MODE_TRANSFER)]
del cost_df
##################### First, handle Access/Egress link costs
for accegr_type in ["walk","bike","drive"]:
# make copies; we don't want to mess with originals
if accegr_type == "walk":
link_df = tazs.walk_df.copy()
mode_list = TAZ.WALK_MODE_NUMS
elif accegr_type == "bike":
mode_list = TAZ.BIKE_MODE_NUMS
# not supported yet
continue
else:
link_df = tazs.drive_df.copy()
mode_list = TAZ.DRIVE_MODE_NUMS
FastTripsLogger.debug("Access/egress link_df %s\n%s" % (accegr_type, link_df.head().to_string()))
if len(link_df) == 0:
continue
# format these with A & B instead of TAZ and Stop
link_df.reset_index(inplace=True)
link_df["A_id_num"] = -1
link_df["B_id_num"] = -1
link_df.loc[link_df[TAZ.WALK_ACCESS_COLUMN_SUPPLY_MODE_NUM].isin(TAZ.ACCESS_MODE_NUMS), "A_id_num"] = link_df[TAZ.WALK_ACCESS_COLUMN_TAZ_NUM ]
link_df.loc[link_df[TAZ.WALK_ACCESS_COLUMN_SUPPLY_MODE_NUM].isin(TAZ.ACCESS_MODE_NUMS), "B_id_num"] = link_df[TAZ.WALK_ACCESS_COLUMN_STOP_NUM]
link_df.loc[link_df[TAZ.WALK_ACCESS_COLUMN_SUPPLY_MODE_NUM].isin(TAZ.EGRESS_MODE_NUMS), "A_id_num"] = link_df[TAZ.WALK_ACCESS_COLUMN_STOP_NUM]
link_df.loc[link_df[TAZ.WALK_ACCESS_COLUMN_SUPPLY_MODE_NUM].isin(TAZ.EGRESS_MODE_NUMS), "B_id_num"] = link_df[TAZ.WALK_ACCESS_COLUMN_TAZ_NUM ]
link_df.drop([TAZ.WALK_ACCESS_COLUMN_TAZ_NUM, TAZ.WALK_ACCESS_COLUMN_STOP_NUM], axis=1, inplace=True)
assert(len(link_df.loc[link_df["A_id_num"] < 0]) == 0)
FastTripsLogger.debug("%s link_df =\n%s" % (accegr_type, link_df.head().to_string()))
# Merge access/egress with walk|bike|drive access/egress information
cost_accegr_df = pd.merge(left = cost_accegr_df,
right = link_df,
on = ["A_id_num",
PathSet.WEIGHTS_COLUMN_SUPPLY_MODE_NUM,
"B_id_num"],
how = "left")
# rename new columns so it's clear it's for walk|bike|drive
for colname in list(link_df.select_dtypes(include=['float64','int64']).columns.values):
# don't worry about join columns
if colname in ["A_id_num", PathSet.WEIGHTS_COLUMN_SUPPLY_MODE_NUM, "B_id_num"]: continue
# rename the rest
new_colname = "%s %s" % (colname, accegr_type)
cost_accegr_df.rename(columns={colname:new_colname}, inplace=True)
# use it, if relevant
cost_accegr_df.loc[ (cost_accegr_df[PathSet.WEIGHTS_COLUMN_WEIGHT_NAME] == colname)&
(cost_accegr_df[PathSet.WEIGHTS_COLUMN_SUPPLY_MODE_NUM].isin(mode_list)), "var_value"] = cost_accegr_df[new_colname]
# Access/egress needs passenger trip departure, arrival and time_target
cost_accegr_df = pd.merge(left =cost_accegr_df,
right=trip_list_df[
#[Passenger.TRIP_LIST_COLUMN_PERSON_ID,
#Passenger.TRIP_LIST_COLUMN_PERSON_TRIP_ID,
Passenger.get_id_columns(is_skimming) + [
Passenger.TRIP_LIST_COLUMN_DEPARTURE_TIME,
Passenger.TRIP_LIST_COLUMN_ARRIVAL_TIME,
Passenger.TRIP_LIST_COLUMN_TIME_TARGET,
]],
how ="left",
on =Passenger.get_id_columns(is_skimming))
#[Passenger.PERSONS_COLUMN_PERSON_ID, Passenger.TRIP_LIST_COLUMN_PERSON_TRIP_ID])
# drop links that are irrelevant based on departure time for access links, or arrival time for egress links
cost_accegr_df["check_time"] = cost_accegr_df[Assignment.SIM_COL_PAX_A_TIME] # departure time for access
cost_accegr_df.loc[ cost_accegr_df[TAZ.MODE_COLUMN_MODE_NUM].isin(TAZ.EGRESS_MODE_NUMS), "check_time" ] = cost_accegr_df[Assignment.SIM_COL_PAX_B_TIME] # arrival time for egress
cost_accegr_df["check_time"] = (cost_accegr_df["check_time"] - Assignment.NETWORK_BUILD_DATE_START_TIME)/np.timedelta64(1,'m')
# it's only drive links we need to check
cost_accegr_df["to_drop"] = False
if "%s %s" % (TAZ.DRIVE_ACCESS_COLUMN_START_TIME_MIN, "drive") in cost_accegr_df.columns.values:
cost_accegr_df.loc[ cost_accegr_df[TAZ.MODE_COLUMN_MODE_NUM].isin(TAZ.DRIVE_MODE_NUMS)&
((cost_accegr_df["check_time"] < cost_accegr_df["%s %s" % (TAZ.DRIVE_ACCESS_COLUMN_START_TIME_MIN, "drive")])|
(cost_accegr_df["check_time"] >= cost_accegr_df["%s %s" % (TAZ.DRIVE_ACCESS_COLUMN_END_TIME_MIN, "drive")])), "to_drop"] = True
# if len(Assignment.TRACE_IDS) > 0:
# FastTripsLogger.debug("cost_accegr_df=\n%s\ndtypes=\n%s" % (cost_accegr_df.loc[cost_accegr_df[Passenger.TRIP_LIST_COLUMN_TRACE]==True]].to_string(), str(cost_accegr_df.dtypes)))
FastTripsLogger.debug("Dropping %d rows from cost_accegr_df" % cost_accegr_df["to_drop"].sum())
cost_accegr_df = cost_accegr_df.loc[ cost_accegr_df["to_drop"]==False ]
cost_accegr_df.drop(["check_time","to_drop"], axis=1, inplace=True)
# penalty for arriving before preferred arrival time.
cost_accegr_df.loc[(cost_accegr_df[PathSet.WEIGHTS_COLUMN_WEIGHT_NAME] == PathSet.WEIGHT_NAME_ARRIVE_EARLY_MIN )&
(cost_accegr_df[Passenger.PF_COL_LINK_MODE] == PathSet.STATE_MODE_ACCESS), "var_value"] = 0.0
cost_accegr_df.loc[(cost_accegr_df[PathSet.WEIGHTS_COLUMN_WEIGHT_NAME] == PathSet.WEIGHT_NAME_ARRIVE_EARLY_MIN) &
(cost_accegr_df[Passenger.PF_COL_LINK_MODE] == PathSet.STATE_MODE_EGRESS) &
(cost_accegr_df[Passenger.TRIP_LIST_COLUMN_TIME_TARGET] == 'departure'), "var_value"] = 0.0
cost_accegr_df.loc[(cost_accegr_df[PathSet.WEIGHTS_COLUMN_WEIGHT_NAME] == PathSet.WEIGHT_NAME_ARRIVE_EARLY_MIN )& \
(cost_accegr_df[Passenger.PF_COL_LINK_MODE] == PathSet.STATE_MODE_EGRESS)& \
(cost_accegr_df[Passenger.TRIP_LIST_COLUMN_TIME_TARGET] == 'arrival'), "var_value"] = (cost_accegr_df[Passenger.TRIP_LIST_COLUMN_ARRIVAL_TIME] - cost_accegr_df[Passenger.PF_COL_PAX_B_TIME])/np.timedelta64(1,'m')
# arrive early is not negative - that would be arriving late
cost_accegr_df.loc[(cost_accegr_df[PathSet.WEIGHTS_COLUMN_WEIGHT_NAME] == PathSet.WEIGHT_NAME_ARRIVE_EARLY_MIN)&(cost_accegr_df["var_value"] < 0), "var_value"] = 0.0
# penalty for departing after preferred departure time.
cost_accegr_df.loc[(cost_accegr_df[PathSet.WEIGHTS_COLUMN_WEIGHT_NAME] == PathSet.WEIGHT_NAME_DEPART_LATE_MIN) &
(cost_accegr_df[Passenger.PF_COL_LINK_MODE] == PathSet.STATE_MODE_EGRESS), "var_value"] = 0.0
cost_accegr_df.loc[(cost_accegr_df[PathSet.WEIGHTS_COLUMN_WEIGHT_NAME] == PathSet.WEIGHT_NAME_DEPART_LATE_MIN )&
(cost_accegr_df[Passenger.PF_COL_LINK_MODE] == PathSet.STATE_MODE_ACCESS)&
(cost_accegr_df[Passenger.TRIP_LIST_COLUMN_TIME_TARGET] == 'arrival'), "var_value"] = 0.0
cost_accegr_df.loc[(cost_accegr_df[PathSet.WEIGHTS_COLUMN_WEIGHT_NAME] == PathSet.WEIGHT_NAME_DEPART_LATE_MIN) &
(cost_accegr_df[Passenger.PF_COL_LINK_MODE] == PathSet.STATE_MODE_ACCESS) &
(cost_accegr_df[Passenger.TRIP_LIST_COLUMN_TIME_TARGET] == 'departure'), "var_value"] = (cost_accegr_df[Passenger.PF_COL_PAX_A_TIME] - cost_accegr_df[Passenger.TRIP_LIST_COLUMN_DEPARTURE_TIME])/ np.timedelta64(1, 'm')
# depart late is not negative - that would be departing early
cost_accegr_df.loc[(cost_accegr_df[PathSet.WEIGHTS_COLUMN_WEIGHT_NAME] == PathSet.WEIGHT_NAME_DEPART_LATE_MIN)&(cost_accegr_df["var_value"] < 0), "var_value"] = 0.0
# constant growth = exponential growth with 0 percent growth rate
# depart before preferred or arrive after preferred means the passenger just missed something important
# Arrive late only impacts the egress link, so set the var_value equal to zero for the access link
cost_accegr_df.loc[(cost_accegr_df[PathSet.WEIGHTS_COLUMN_WEIGHT_NAME] == PathSet.WEIGHT_NAME_ARRIVE_LATE_MIN ) & \
(cost_accegr_df[Passenger.PF_COL_LINK_MODE] == PathSet.STATE_MODE_ACCESS), "var_value"] = 0.0
# Arrive late only impacts those that have a preferred arrival time. If preferred departure time,
# set arrive late equal to zero. --This could have been done with previous line, but it would
# look ugly mixing and matching 'and' and 'or'.
cost_accegr_df.loc[(cost_accegr_df[PathSet.WEIGHTS_COLUMN_WEIGHT_NAME] == PathSet.WEIGHT_NAME_ARRIVE_LATE_MIN) & \
(cost_accegr_df[Passenger.TRIP_LIST_COLUMN_TIME_TARGET] == Passenger.TIME_TARGET_DEPARTURE), "var_value"] = 0.0
# Calculate how late the person arrives after preferred time.
cost_accegr_df.loc[(cost_accegr_df[PathSet.WEIGHTS_COLUMN_WEIGHT_NAME] == PathSet.WEIGHT_NAME_ARRIVE_LATE_MIN )& \
(cost_accegr_df[Passenger.PF_COL_LINK_MODE] == PathSet.STATE_MODE_EGRESS)& \
(cost_accegr_df[Passenger.TRIP_LIST_COLUMN_TIME_TARGET] == Passenger.TIME_TARGET_ARRIVAL), "var_value"] = \
(cost_accegr_df[Passenger.PF_COL_PAX_B_TIME] - cost_accegr_df[Passenger.TRIP_LIST_COLUMN_ARRIVAL_TIME])/np.timedelta64(1,'m')
# If arrived before preferred time, set the arrive late field to zero. You don't get a
# discount for arriving early.
cost_accegr_df.loc[(cost_accegr_df[PathSet.WEIGHTS_COLUMN_WEIGHT_NAME] == PathSet.WEIGHT_NAME_ARRIVE_LATE_MIN) & \
(cost_accegr_df['var_value'] < 0), "var_value"] = 0
# preferred delay_min - departure means want to depart after that time
# Depart early only impacts the access link, so set the var_value equal to zero for the egress link
cost_accegr_df.loc[(cost_accegr_df[PathSet.WEIGHTS_COLUMN_WEIGHT_NAME] == PathSet.WEIGHT_NAME_DEPART_EARLY_MIN )& \
(cost_accegr_df[Passenger.PF_COL_LINK_MODE] == PathSet.STATE_MODE_EGRESS), "var_value"] = 0.0
# Depart early only impacts those that have a preferred departure time. If preferred arrive time,
# set depart early equal to zero. --This could have been done with previous line, but it would
# look ugly mixing and matching 'and' and 'or'.
cost_accegr_df.loc[(cost_accegr_df[PathSet.WEIGHTS_COLUMN_WEIGHT_NAME] == PathSet.WEIGHT_NAME_DEPART_EARLY_MIN) & \
(cost_accegr_df[Passenger.TRIP_LIST_COLUMN_TIME_TARGET] == Passenger.TIME_TARGET_ARRIVAL), "var_value"] = 0.0
# Calculate how early the person departs before the preferred time.
cost_accegr_df.loc[(cost_accegr_df[PathSet.WEIGHTS_COLUMN_WEIGHT_NAME] == PathSet.WEIGHT_NAME_DEPART_EARLY_MIN) & \
(cost_accegr_df[Passenger.PF_COL_LINK_MODE] == PathSet.STATE_MODE_ACCESS) & \
(cost_accegr_df[Passenger.TRIP_LIST_COLUMN_TIME_TARGET] == Passenger.TIME_TARGET_DEPARTURE), "var_value"] = \
(cost_accegr_df[Passenger.TRIP_LIST_COLUMN_DEPARTURE_TIME] - cost_accegr_df[Passenger.PF_COL_PAX_A_TIME])/ np.timedelta64(1, 'm')
# If departing after preferred time, set the depart early field to zero. You don't get a
# discount for taking your time.
cost_accegr_df.loc[(cost_accegr_df[PathSet.WEIGHTS_COLUMN_WEIGHT_NAME] == PathSet.WEIGHT_NAME_DEPART_EARLY_MIN) & \
(cost_accegr_df['var_value'] < 0), "var_value"] = 0
assert 0 == cost_accegr_df[(cost_accegr_df[PathSet.WEIGHTS_COLUMN_WEIGHT_NAME].isin([PathSet.WEIGHT_NAME_DEPART_EARLY_MIN, PathSet.WEIGHT_NAME_ARRIVE_LATE_MIN])) & \
(cost_accegr_df['var_value'].isnull())].shape[0]
assert 0 == cost_accegr_df[(cost_accegr_df[PathSet.WEIGHTS_COLUMN_WEIGHT_NAME].isin([PathSet.WEIGHT_NAME_DEPART_EARLY_MIN, PathSet.WEIGHT_NAME_ARRIVE_LATE_MIN])) & \
(cost_accegr_df['var_value']<0)].shape[0]
if len(Assignment.TRACE_IDS) > 0:
FastTripsLogger.debug("cost_accegr_df trace\n%s\ndtypes=\n%s" % (cost_accegr_df.loc[cost_accegr_df[Passenger.TRIP_LIST_COLUMN_TRACE]==True].to_string(), str(cost_accegr_df.dtypes)))
missing_accegr_costs = cost_accegr_df.loc[ pd.isnull(cost_accegr_df["var_value"]) ]
error_accegr_msg = "Missing %d out of %d access/egress var_value values" % (len(missing_accegr_costs), len(cost_accegr_df))
FastTripsLogger.debug(error_accegr_msg)
if len(missing_accegr_costs) > 0:
error_accegr_msg += "\n%s" % missing_accegr_costs.head(10).to_string()
FastTripsLogger.fatal(error_accegr_msg)
##################### Next, handle Transit Trip link costs
# set the fare var_values for split_first only
cost_trip_df.loc[(cost_trip_df[PathSet.WEIGHTS_COLUMN_WEIGHT_NAME] == "fare")&(cost_trip_df["split_first"]==True), "var_value"] = cost_trip_df[Assignment.SIM_COL_PAX_FARE]
cost_trip_df.loc[(cost_trip_df[PathSet.WEIGHTS_COLUMN_WEIGHT_NAME] == "fare")&(cost_trip_df["split_first"]==False), "var_value"] = 0
if len(Assignment.TRACE_IDS) > 0:
FastTripsLogger.debug("cost_trip_df trace\n%s\ndtypes=\n%s" % (cost_trip_df.loc[cost_trip_df[Passenger.TRIP_LIST_COLUMN_TRACE]==True].to_string(), str(cost_trip_df.dtypes)))
# if there's a board time, in_vehicle_time = new_B_time - board_time
# otherwise, in_vehicle_time = B time - A time (for when we split)
cost_trip_df.loc[(cost_trip_df[PathSet.WEIGHTS_COLUMN_WEIGHT_NAME] == "in_vehicle_time_min")&pd.notnull(cost_trip_df[Assignment.SIM_COL_PAX_BOARD_TIME]), "var_value"] = \
(cost_trip_df[Assignment.SIM_COL_PAX_B_TIME] - cost_trip_df[Assignment.SIM_COL_PAX_BOARD_TIME])/np.timedelta64(1,'m')
cost_trip_df.loc[(cost_trip_df[PathSet.WEIGHTS_COLUMN_WEIGHT_NAME] == "in_vehicle_time_min")& pd.isnull(cost_trip_df[Assignment.SIM_COL_PAX_BOARD_TIME]), "var_value"] = \
(cost_trip_df[Assignment.SIM_COL_PAX_B_TIME] - cost_trip_df[Assignment.SIM_COL_PAX_A_TIME])/np.timedelta64(1,'m')
# if in vehicle time is less than 0 then off by 1 day error
cost_trip_df.loc[(cost_trip_df[PathSet.WEIGHTS_COLUMN_WEIGHT_NAME] == "in_vehicle_time_min")&(cost_trip_df["var_value"]<0), "var_value"] = cost_trip_df["var_value"] + (24*60)
# if there's a board time, wait time = board_time - A time
# otherwise, wait time = 0 (for when we split transit links)
cost_trip_df.loc[(cost_trip_df[PathSet.WEIGHTS_COLUMN_WEIGHT_NAME] == "wait_time_min")&pd.notnull(cost_trip_df[Assignment.SIM_COL_PAX_BOARD_TIME]), "var_value"] = \
(cost_trip_df[Assignment.SIM_COL_PAX_BOARD_TIME] - cost_trip_df[Assignment.SIM_COL_PAX_A_TIME])/np.timedelta64(1,'m')
cost_trip_df.loc[(cost_trip_df[PathSet.WEIGHTS_COLUMN_WEIGHT_NAME] == "wait_time_min")& | pd.isnull(cost_trip_df[Assignment.SIM_COL_PAX_BOARD_TIME]) | pandas.isnull |
# -*- coding: utf-8 -*-
"""
Created on Mon Sep 7 13:33:12 2020
@author: tamiryuv
"""
import os.path as pth
import yaml
path = pth.dirname(pth.abspath(__file__))[:-3] + '/'
with open(path + 'config.yaml', 'r') as fp:
config = yaml.load(fp, yaml.FullLoader)
import torch
import xgboost as xgb
import pandas as pd
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
from sklearn.metrics import roc_auc_score,roc_curve
import matplotlib.pyplot as plt
from sklearn.metrics import confusion_matrix
class LinearNet(torch.nn.Module):
def __init__(self):
super(LinearNet,self).__init__()
self.linear1 = torch.nn.Linear(51, 500)
self.linear2 = torch.nn.Linear(500, 150)
self.linear3 = torch.nn.Linear(150,20)
self.linear4 = torch.nn.Linear(20,2)
self.relu = torch.nn.ReLU()
self.Drop = torch.nn.Dropout(p = 0.2)
self.batchnorm1 = torch.nn.BatchNorm1d(500)
self.batchnorm2 = torch.nn.BatchNorm1d(150)
self.batchnorm3 = torch.nn.BatchNorm1d(20)
def forward(self,x):
x = self.relu(self.linear1(x))
x = self.batchnorm1(x)
x = self.Drop(self.relu(self.linear2(x)))
x = self.batchnorm2(x)
x = self.relu(self.linear3(x))
x = self.batchnorm3(x)
x = self.linear4(x)
return x
class simple_CNN(nn.Module):
def __init__(self,in_channels,out_channels,num_classes):
super(simple_CNN,self).__init__()
self.conv1 = nn.Conv2d(in_channels=in_channels,out_channels=out_channels,kernel_size=(1,3))
self.conv2 = nn.Conv2d(in_channels=8,out_channels=16,kernel_size=(1,3), padding = 1)
self.fc1 = nn.Linear(2352,150)
self.fc2 = nn.Linear(150,num_classes)
self.bnorm = nn.BatchNorm1d(150)
def forward(self,x):
out = F.relu(self.conv1(x))
out = F.relu(self.conv2(out))
out = out.flatten(start_dim = 1)
out = F.relu(self.fc1(out))
out = self.bnorm(out)
pred = self.fc2(out)
return pred
def main():
################################################################
# FC_NN Load
#
################################################################
checkpoint3 = torch.load(path + config['FCNN'], map_location='cpu')
fc_nn = checkpoint3['model']
fc_nn.load_state_dict(checkpoint3['state_dict'])
fc_nn.eval()
################################################################
# CNN Load
#
################################################################
in_channels = 1
out_channels = 8
num_classes = 2
checkpoint2 = torch.load(path + config['CNN'],map_location='cpu')
cnn = simple_CNN(in_channels,out_channels,num_classes)
cnn.load_state_dict(checkpoint2['state_dict'])
cnn.eval()
################################################################
# XGB Load
#
################################################################
xgb_model = xgb.Booster()
xgb_model.load_model(path + config['XGB'])
# Test set :
positives = | pd.read_csv(path + config['testing_data_positives']) | pandas.read_csv |
"""
Copyright 2021 Novartis Institutes for BioMedical Research Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# ---
import os
import random
import sys
import numpy as np
import pandas as pd
import plotly.graph_objects as go
import rdkit.Chem as Chem
import torch
import torch.optim as optim
import torch.optim.lr_scheduler as lr_scheduler
# --- JT-VAE
from jtnn import * # not cool, but this is how they do it ...
from jtnn.datautils import ToxPropDataset
# --- disable rdkit warnings
from rdkit import RDLogger
from torch.utils import data
from toxsquad.data import *
from toxsquad.losses import *
from toxsquad.modelling import *
from toxsquad.visualizations import Visualizations
# --- toxsquad
lg = RDLogger.logger()
lg.setLevel(RDLogger.CRITICAL)
import os
import pickle
# ------------ PRE-PROCESSING ROUTINES ------------
from mol_tree import *
def save_object(obj, filename):
with open(filename, "wb") as output: # Overwrites any existing file.
pickle.dump(obj, output, pickle.HIGHEST_PROTOCOL)
def open_object(filename):
with open(filename, "rb") as input:
reopened = pickle.load(input)
return reopened
def get_vocab(assay_dir, assay_id, toxdata):
filename = assay_dir + "/jtvae/" + str(assay_id) + "-vocab.pkl"
if os.path.isfile(filename):
print("Re-opening vocabulary file")
vocab = open_object(filename)
else:
print("Deriving vocabulary")
vocab = set()
for (
smiles
) in toxdata.smiles: # I guess here we should only use the training data??
mol = MolTree(smiles)
for c in mol.nodes:
vocab.add(c.smiles)
vocab = Vocab(list(vocab))
save_object(vocab, filename)
return vocab
# ------------ MODEL OPTIMIZATION ROUTINES ------------
def derive_inference_model(
toxdata,
vocab,
infer_dir,
model_params,
vis,
device,
model_name,
base_lr=0.003,
beta=0.005,
num_threads = 24,
weight_decay = 0.000
):
from jtnn.jtprop_vae import JTPropVAE
smiles = toxdata.smiles
props = toxdata.val
dataset = ToxPropDataset(smiles, props)
batch_size = 8
dataloader = data.DataLoader(
dataset,
batch_size=batch_size,
shuffle=True,
num_workers=num_threads,
collate_fn=lambda x: x,
drop_last=True,
)
from jtnn.jtprop_vae import JTPropVAE
model = JTPropVAE(vocab, **model_params).to(device)
optimizer = optim.Adam(model.parameters(), lr=base_lr, weight_decay=weight_decay)
scheduler = lr_scheduler.ExponentialLR(optimizer, 0.9)
scheduler.step()
# --- pre-train AE
total_step_count = 0
total_step_count = pre_train_jtvae(
model,
optimizer,
scheduler,
dataloader,
device,
infer_dir,
vis,
total_step_count,
model_name,
MAX_EPOCH=36,
PRINT_ITER=5,
)
# train (set a smaller initial LR, beta to 0.005)
optimizer = optim.Adam(model.parameters(), lr=0.0003,weight_decay=weight_decay)
scheduler = lr_scheduler.ExponentialLR(optimizer, 0.9)
scheduler.step()
print("[DEBUG] TRAINING")
total_step_count = train_jtvae(
model,
optimizer,
scheduler,
dataloader,
device,
infer_dir,
vis,
total_step_count,
beta=0.005,
model_name=model_name,
MAX_EPOCH=36,
PRINT_ITER=5,
)
# --- fine tune AE
# optimizer = optim.Adam(model.parameters(), lr=0.0003)
# scheduler = lr_scheduler.ExponentialLR(optimizer, 0.9)
# scheduler.step()
# total_step_count = train_jtvae(model, optimizer, scheduler, dataloader, device, infer_dir, vis, total_step_count, 0.005, model_name, MAX_EPOCH=36, PRINT_ITER=5)
def cross_validate_jtvae(
toxdata,
partitions,
xval_dir,
vocab,
model_params,
device,
model_name,
base_lr=0.003,
vis_host=None,
vis_port=8097,
assay_name="",
num_threads = 24,
weight_decay = 0.0000
):
"""
:todo ensure same training parameters are used for inference and cross-val models
"""
MAX_EPOCH = 36
PRINT_ITER = 5
run = 0
scores = []
for partition in partitions:
# I/O
save_dir = xval_dir + "/run-" + str(run)
if not os.path.exists(save_dir):
os.mkdir(save_dir)
# vis
if vis_host is not None:
vis = Visualizations(
env_name="jtvae-xval-" + str(assay_name) + "-run-" + str(run), server=vis_host, port=vis_port
)
else:
vis = None
# data
smiles = toxdata.smiles.loc[partition["train"]]
props = toxdata.val.loc[partition["train"]]
dataset = ToxPropDataset(smiles, props)
batch_size = 8
dataloader = data.DataLoader(
dataset,
batch_size=batch_size,
shuffle=True,
num_workers=num_threads,
collate_fn=lambda x: x,
drop_last=True,
)
# model
from jtnn.jtprop_vae import JTPropVAE
model = JTPropVAE(vocab, **model_params).to(device)
optimizer = optim.Adam(model.parameters(), lr=base_lr, weight_decay=weight_decay)
scheduler = lr_scheduler.ExponentialLR(optimizer, 0.9)
scheduler.step()
# pretrain
print("[DEBUG] PRETRAINING")
total_step_count = pre_train_jtvae(
model,
optimizer,
scheduler,
dataloader,
device,
save_dir,
vis,
0,
model_name,
MAX_EPOCH=36,
PRINT_ITER=5,
)
# train (set a smaller initial LR, beta to 0.005)
optimizer = optim.Adam(model.parameters(), lr=0.0003,weight_decay=weight_decay)
scheduler = lr_scheduler.ExponentialLR(optimizer, 0.9)
scheduler.step()
print("[DEBUG] TRAINING")
total_step_count = train_jtvae(
model,
optimizer,
scheduler,
dataloader,
device,
save_dir,
vis,
total_step_count,
beta=0.005,
model_name=model_name,
MAX_EPOCH=36,
PRINT_ITER=5,
)
# evaluate (only property prediction accuracy for now)
scores.append(
evaluate_predictions_model(
model,
toxdata.smiles.loc[partition["test"]],
toxdata.val.loc[partition["test"]],
vis,
)
)
# memory management
del model
del optimizer
torch.cuda.empty_cache()
run = run + 1
return scores
def pre_train_jtvae(
model,
optimizer,
scheduler,
dataloader,
device,
model_dir,
vis,
total_step_count,
model_name,
MAX_EPOCH=36,
PRINT_ITER=5,
):
my_log = open(model_dir + "/loss-pre.txt", "w")
for epoch in range(MAX_EPOCH):
print("pre epoch: " + str(epoch))
word_acc, topo_acc, assm_acc, steo_acc, prop_acc = 0, 0, 0, 0, 0
for it, batch in enumerate(dataloader):
for mol_tree, _ in batch:
for node in mol_tree.nodes:
if node.label not in node.cands:
node.cands.append(node.label)
node.cand_mols.append(node.label_mol)
model.zero_grad()
torch.cuda.empty_cache()
loss, kl_div, wacc, tacc, sacc, dacc, pacc = model(batch, beta=0)
loss.backward()
optimizer.step()
word_acc += wacc
topo_acc += tacc
assm_acc += sacc
steo_acc += dacc
prop_acc += pacc
if (it + 1) % PRINT_ITER == 0:
word_acc = word_acc / PRINT_ITER * 100
topo_acc = topo_acc / PRINT_ITER * 100
assm_acc = assm_acc / PRINT_ITER * 100
steo_acc = steo_acc / PRINT_ITER * 100
prop_acc = prop_acc / PRINT_ITER
if vis is not None:
vis.plot_loss(word_acc, total_step_count, 1, model_name, "word-acc")
vis.plot_loss(prop_acc, total_step_count, 1, model_name, "mse")
print(
"Epoch: %d, Step: %d, KL: %.1f, Word: %.2f, Topo: %.2f, Assm: %.2f, Steo: %.2f, Prop: %.4f"
% (
epoch,
it + 1,
kl_div,
word_acc,
topo_acc,
assm_acc,
steo_acc,
prop_acc,
),
file=my_log,
flush=True,
)
word_acc, topo_acc, assm_acc, steo_acc, prop_acc = 0, 0, 0, 0, 0
del loss
del kl_div
total_step_count = total_step_count + 1
torch.cuda.empty_cache()
scheduler.step()
print("learning rate: %.6f" % scheduler.get_lr()[0])
torch.save(
model.cpu().state_dict(), model_dir + "/model-pre.iter-" + str(epoch)
)
torch.cuda.empty_cache()
model = model.to(device)
my_log.close()
return total_step_count
def train_jtvae(
model,
optimizer,
scheduler,
dataloader,
device,
model_dir,
vis,
total_step_count,
beta,
model_name,
MAX_EPOCH=36,
PRINT_ITER=5,
):
my_log = open(model_dir + "/loss-ref.txt", "w")
for epoch in range(MAX_EPOCH):
print("epoch: " + str(epoch))
word_acc, topo_acc, assm_acc, steo_acc, prop_acc = 0, 0, 0, 0, 0
for it, batch in enumerate(dataloader):
for mol_tree, _ in batch:
for node in mol_tree.nodes:
if node.label not in node.cands:
node.cands.append(node.label)
node.cand_mols.append(node.label_mol)
model.zero_grad()
torch.cuda.empty_cache()
loss, kl_div, wacc, tacc, sacc, dacc, pacc = model(batch, beta)
loss.backward()
optimizer.step()
word_acc += wacc
topo_acc += tacc
assm_acc += sacc
steo_acc += dacc
prop_acc += pacc
if (it + 1) % PRINT_ITER == 0:
word_acc = word_acc / PRINT_ITER * 100
topo_acc = topo_acc / PRINT_ITER * 100
assm_acc = assm_acc / PRINT_ITER * 100
steo_acc = steo_acc / PRINT_ITER * 100
prop_acc /= PRINT_ITER
if vis is not None:
vis.plot_loss(word_acc, total_step_count, 1, model_name, "word-acc")
vis.plot_loss(prop_acc, total_step_count, 1, model_name, "mse")
print(
"Epoch: %d, Step: %d, KL: %.1f, Word: %.2f, Topo: %.2f, Assm: %.2f, Steo: %.2f, Prop: %.4f"
% (
epoch,
it + 1,
kl_div,
word_acc,
topo_acc,
assm_acc,
steo_acc,
prop_acc,
),
file=my_log,
flush=True,
)
word_acc, topo_acc, assm_acc, steo_acc, prop_acc = 0, 0, 0, 0, 0
# if (it + 1) % 1500 == 0: # Fast annealing
# # does this make sense? With the smaller datasets
# # we don't get to 1500? Why is this happening?
# # I don't quite trust it
# # But here, since we call model.cpu()
# # we need to move the model to the device again
# # else we ran onto that weird issue!
# scheduler.step()
# print("learning rate: %.6f" % scheduler.get_lr()[0])
# #torch.save(
# # model.cpu().state_dict(),
# # model_dir + "/model-ref.iter-%d-%d" % (epoch, it + 1),
# #)
# model.to(device)
del loss
del kl_div
total_step_count = total_step_count + 1
scheduler.step()
print("learning rate: %.6f" % scheduler.get_lr()[0])
torch.save(
model.cpu().state_dict(), model_dir + "/model-ref.iter-" + str(epoch)
) # is this the expensive part?
model = model.to(device)
my_log.close()
return total_step_count
# ------------ MODEL EVALUATION ROUTINES ------------
def evaluate_predictions_model(model, smiles, props, vis):
"""
Return evaluation objects for JT-VAE model.
This function will return a list of [mse, r2] for the smiles passed in,
and also return a 2-col matrix for plotting predicted vs. actual.
vis object allows us to use Visdom to directly update
a live performance plot view.
:param model: JT-VAE model
:param smiles: Pandas series with SMILES as entries
We usually pass toxdata.smiles
:param props: Pandas series with molecular activity or property to predict
:param vis: Visualization object from toxsquad.visualizations
:returns: Scores, coords
- Scores is a list of mean squared error and correlation coefficient
(for entire smiles batch). This is of length 2.
- coords are x, y coordinates for the "performance plot"
(where x=actual and y=predicted).
"""
predictions = dict()
n_molecules = len(smiles)
coords = np.zeros((n_molecules, 2))
# k = 0;
model = model.eval()
for k, idx in enumerate(smiles.index):
print_status(k, n_molecules)
sml = smiles.loc[idx]
prop = props.loc[idx]
# model.predict(sml) returns a torch tensor
# on which we need to call .item()
# to get the actual floating point value out.
predictions[idx] = model.predict(sml).item()
coords[k, 0] = prop.item()
coords[k, 1] = predictions[idx]
# k = k + 1;
model = model.train()
mse = np.mean((coords[:, 1] - coords[:, 0]) ** 2)
corr = np.corrcoef(coords[:, 1], coords[:, 0])[0, 1]
print("MSE: " + str(mse))
print("Corr: " + str(corr))
scores = []
scores.append(mse)
scores.append(corr)
# TODO do reconstruction test
if vis is not None:
vis.plot_scatter_gt_predictions(
coords, f"{mse:.2f}" + "-r: " + f"{corr:.2f}", ""
)
return scores, coords
# ------------ LATENT SPACE ROUTINES ------------
from numpy.random import choice
from rdkit import DataStructs
from rdkit.Chem import AllChem
def get_neighbor_along_direction_tree(sample_latent, direction, step_size):
"""
Direction should be normalized
Direction is in tree space
"""
tree_vec, mol_vec = torch.chunk(sample_latent, 2, dim=1)
new_tree_vec = tree_vec + (direction * step_size)
new_sample = torch.cat([new_tree_vec, mol_vec], dim=1)
return new_sample
def get_neighbor_along_direction_graph(sample_latent, direction, step_size):
"""
Direction should be normalized
"""
tree_vec, mol_vec = torch.chunk(sample_latent, 2, dim=1)
# update graph
new_mol_vec = mol_vec + (
direction * step_size
) # maybe the step size will have to be different?
new_sample = torch.cat([tree_vec, new_mol_vec], dim=1)
return new_sample
def get_neighbors_along_directions_tree_then_graph(
model,
smiles,
directions,
scale_factors,
direction_graph,
scale_factor_graph,
n_neighbors=10,
val_to_beat=-2,
max_cosine_distance=1.6,
direction_graph_plus=None,
convert_to_pac50=False,
):
sample_latent = model.embed(smiles)
n_directions = len(directions)
new_samples = []
int_step_sizes = np.arange(-n_neighbors, n_neighbors + 1, 1)
idx = int_step_sizes == 0
int_step_sizes = np.delete(int_step_sizes, np.where(idx)[0][0])
actual_n_neighbors = len(int_step_sizes)
# dynamic range (this adds a loot of additional samples ... just takes longer)
step_sizes_graph = np.arange(-n_neighbors, n_neighbors + 1, 1)
step_sizes_graph = step_sizes_graph * scale_factor_graph
# fixed range (original implementation)
step_sizes_graph_original = np.arange(-1, 2, 1)
step_sizes_graph_original = (
step_sizes_graph_original * 0.5
) # so here the step size is also fixed!
step_sizes_graph = np.concatenate(
(step_sizes_graph, step_sizes_graph_original), axis=None
)
actual_n_neighbors_graph = len(step_sizes_graph)
# this is pretty quick, as it's just arimethic operations in latent space
# todo: since cosine similarity in latent space correlates to an extent with
# chemical similarity, we could further reduce the number of evaluations based on that
cos = nn.CosineSimilarity(dim=1)
for k in range(n_directions): # iterate over axes
step_sizes = int_step_sizes * scale_factors[k]
for i in range(actual_n_neighbors): # iterate over steps along axis
sample = get_neighbor_along_direction_tree(
sample_latent, directions[k], step_sizes[i]
) # tree sample
for j in range(actual_n_neighbors_graph): # iterate along graph axis
graph_sample = get_neighbor_along_direction_graph(
sample, direction_graph, step_sizes_graph[j]
)
# check cosine
cdistance = 1 - cos(sample_latent, graph_sample)
if cdistance.item() < max_cosine_distance:
new_samples.append(graph_sample)
# additional direction
if direction_graph_plus is not None:
graph_sample = get_neighbor_along_direction_graph(
sample, direction_graph_plus, step_sizes_graph[j]
)
# check cosine
cdistance = 1 - cos(sample_latent, graph_sample)
if cdistance.item() < max_cosine_distance:
new_samples.append(graph_sample)
# predict activity and decode samples (probably should be another function, also because this happens ALL the time)
new_smiles, new_activities, new_samples = predict_and_decode_strict(
model, new_samples, val_to_beat, convert_to_pac50
)
return (
new_samples,
new_smiles,
new_activities,
sample_latent.squeeze().cpu().detach().numpy(),
)
# I guess the min val should be informed also relative to the MSE of the model
#
def predict_and_decode_strict(model, new_samples, min_val, convert_to_pac50=False):
n_samples = len(new_samples)
new_smiles = []
new_activities = []
my_bar = None
filtered_samples = []
try:
import streamlit as st
st.write("Decoding progress")
my_bar = st.progress(0)
except ImportError:
pass
for i in range(n_samples):
if my_bar is not None:
my_bar.progress((i + 1) / n_samples)
print_status(i, n_samples)
prediction = (
model.propNN(new_samples[i]).squeeze().cpu().detach().numpy()
) # compute the activity predictions
if convert_to_pac50:
prediction = (prediction - 6) * -1
# HIGHER IS BETTER
prediction_condition = prediction > min_val
if prediction_condition:
new_activities.append(prediction)
tree_vec, mol_vec = torch.chunk(new_samples[i], 2, dim=1)
more_smiles = model.decode(tree_vec, mol_vec, prob_decode=False)
new_smiles.append(more_smiles)
new_samples[i] = new_samples[i].squeeze().cpu().detach().numpy()
filtered_samples.append(new_samples[i])
return new_smiles, new_activities, filtered_samples
def predict_and_decode(model, new_samples, show_st=False):
n_samples = len(new_samples)
new_smiles = []
new_activities = []
my_bar = None
if show_st:
try:
import streamlit as st
st.write("Decoding progress")
my_bar = st.progress(0)
except ImportError:
pass
for i in range(n_samples):
if my_bar is not None:
my_bar.progress((i + 1) / n_samples)
print_status(i, n_samples)
prediction = (
model.propNN(new_samples[i]).squeeze().cpu().detach().numpy()
) # compute the activity predictions
new_activities.append(prediction)
tree_vec, mol_vec = torch.chunk(new_samples[i], 2, dim=1)
more_smiles = model.decode(tree_vec, mol_vec, prob_decode=False)
new_smiles.append(more_smiles)
new_samples[i] = new_samples[i].squeeze().cpu().detach().numpy()
return new_smiles, new_activities
def sample_gaussian(mean, sigma, n_samples):
center = mean
covariance = sigma
m = torch.distributions.MultivariateNormal(center, covariance)
samples = []
for i in range(n_samples):
samples.append(m.sample())
samples = torch.stack(samples)
return samples
def sample_gaussian_and_predict(model, n_samples, mean, sigma):
dim = int(model.latent_size)
center = mean
covariance = sigma
m = torch.distributions.MultivariateNormal(center, covariance)
samples = []
for i in range(n_samples):
samples.append(m.sample())
samples = torch.stack(samples)
cur_vec = create_var(samples.data, False)
predictions = model.propNN(cur_vec).squeeze()
vectors = cur_vec.cpu().detach().numpy()
predictions = predictions.cpu().detach().numpy()
return vectors, predictions
def get_embeddings(model, toxdata):
k = 0
n_molecules = len(toxdata)
vectors = {}
for idx in toxdata.smiles.index:
print_status(k, n_molecules)
sml = toxdata.smiles.loc[idx]
vectors[idx] = model.embed(sml).cpu().detach().numpy().ravel()
k = k + 1
return vectors
from rdkit import DataStructs
from rdkit.Chem import AllChem
from sklearn.metrics.pairwise import cosine_similarity
def sample_latent_space(model, latent, n_samples=2000, decode=False):
mu = torch.from_numpy(np.mean(latent).values).float()
sigma = torch.from_numpy(np.cov(latent.values.transpose())).float()
return sample_latent_space_pass_normal(model, mu, sigma, n_samples, decode)
def sample_latent_space_pass_normal(model, mu, sigma, n_samples=2000, decode=False):
samples, samples_predictions = model.sample_gaussian_and_predict(
n_samples, mu, sigma
) # this is fast
samples = samples.astype("float64")
samples_predictions = samples_predictions.astype("float64")
# dim = int(model_params["latent_size"] / 2)
dim = int(model.latent_size / 2)
tree_vec = create_var(torch.from_numpy(samples[:, 0:dim]).float())
mol_vec = create_var(torch.from_numpy(samples[:, dim : dim * 2]).float())
samples_decoded = []
if decode:
for i in range(n_samples):
print_status(i, n_samples)
samples_decoded.append(
model.decode(
tree_vec[i, :].reshape(1, -1),
mol_vec[i, :].reshape(1, -1),
prob_decode=False,
)
) # this is slow
samples_decoded_df = pd.DataFrame(data=samples_decoded)
samples_decoded_df.columns = ["smiles"]
else:
samples_decoded_df = None
return samples, samples_predictions, samples_decoded_df
# ------------ MISC ROUTINES ------------
def print_status(i, maxSteps):
percent = "0.00"
percentage = (float(i) / float(maxSteps)) * 100
divisor = 5
if i % divisor == 0:
sys.stdout.write("Progress: %d%% \r" % (percentage))
sys.stdout.flush()
# ------------ DISTANCES ROUTINES ------------
def normalize_morgans(morgans):
morgans_normalized = {}
for key in morgans.keys():
fp = morgans[key]
fp_array = np.zeros((0,), dtype=np.int8)
DataStructs.ConvertToNumpyArray(fp, fp_array)
morgans_normalized[key] = normalize_to_unity(fp_array)
return morgans_normalized
def normalize_to_unity(fp):
if np.sum(fp) == 0:
print("invalid fp")
return fp
else:
return fp / np.sum(fp)
import cadd.sascorer as sascorer
import networkx as nx
# ------------ CHEMISTRY ROUTINES ------------
from rdkit.Chem import Descriptors, rdmolops
from rdkit.Chem.Descriptors import ExactMolWt
def get_cycle_score(mol):
cycle_list = nx.cycle_basis(nx.Graph(rdmolops.GetAdjacencyMatrix(mol)))
if len(cycle_list) == 0:
cycle_length = 0
else:
cycle_length = max([len(j) for j in cycle_list])
if cycle_length <= 6:
cycle_length = 0
else:
cycle_length = cycle_length - 6
current_cycle_score = cycle_length
return current_cycle_score
import cadd.sascorer as sascorer
# toxdata should include a mols value
from rdkit.Chem import rdMolDescriptors
from rdkit.Chem.Descriptors import ExactMolWt
NumHDonors = lambda x: rdMolDescriptors.CalcNumHBD(x)
NumHAcceptors = lambda x: rdMolDescriptors.CalcNumHBA(x)
from rdkit.Chem import Descriptors
TPSA = lambda x: Descriptors.TPSA(x)
def compute_properties(toxdata):
n_molecules = len(toxdata)
k = 0
mw = {}
na = {}
log_p = {}
sas = {}
cycle_scores = {}
# more properties
nhdon= {}
nhacc = {}
tpsa = {}
for idx in toxdata.index:
print_status(k, n_molecules)
mol = toxdata.loc[idx].mols
try:
mw[idx] = ExactMolWt(mol)
log_p[idx] = Descriptors.MolLogP(mol)
sas[idx] = sascorer.calculateScore(mol)
cycle_scores[idx] = get_cycle_score(mol)
na[idx] = mol.GetNumAtoms()
nhdon[idx] = NumHDonors(mol)
nhacc[idx] = NumHAcceptors(mol)
tpsa[idx] = TPSA(mol)
except:
print("[DEBUG] Error computing properties")
mw[idx] = np.nan
log_p[idx] = np.nan
sas[idx] = np.nan
cycle_scores[idx] = np.nan
na[idx] = np.nan
nhdon[idx] = np.nan
nhacc[idx] = np.nan
tpsa[idx] = np.nan
continue
k = k + 1
props = [
pd.DataFrame.from_dict(mw, orient="index"),
pd.DataFrame.from_dict(log_p, orient="index"),
pd.DataFrame.from_dict(sas, orient="index"),
pd.DataFrame.from_dict(cycle_scores, orient="index"),
pd.DataFrame.from_dict(na, orient="index"),
pd.DataFrame.from_dict(nhdon, orient="index"),
pd.DataFrame.from_dict(nhacc, orient="index"),
| pd.DataFrame.from_dict(tpsa, orient="index") | pandas.DataFrame.from_dict |
# IMPORTS
import seaborn as sns
import pickle
import torch
import numpy as np
import itertools
import tqdm
from tqdm import tqdm
from distil_funcs import CustomDataset
from torch.utils.data import Dataset, DataLoader
import pandas as pd
from matplotlib import pyplot as plt
# EVAL FUNCTIONS - INFERENCE, COMPRESSION, PERFORMANCE
# 1. Inference
def evaluate_inference(model, encoded_eval_data, cpu_or_cuda="cpu",
num_samples=300, batch_sizes=[1], num_workers=[4]):
"""
Evaluates model inference time using device setting num_samples number of times
from eval_dataset.
Returns mean inference for each batch size / num_worker combination,
which are given as lists.
"""
# make sure model is in correct mode
device = torch.device(cpu_or_cuda)
model.to(device)
# setup timer
starter, ender = (torch.cuda.Event(enable_timing=True),
torch.cuda.Event(enable_timing=True))
timings = np.zeros((num_samples, len(batch_sizes), len(num_workers)))
timings = np.zeros((len(batch_sizes), 1))
# create small eval_dataset
eval_dataset = CustomDataset(encoded_eval_data['input_ids'][:10000],
encoded_eval_data['token_type_ids'][:10000],
encoded_eval_data['attention_mask'][:10000])
# GPU warmup
for i in range(len(eval_dataset[:100])):
warmup_input_ids = eval_dataset[i:i+2]['input_ids'].to(device)
warmup_attention_mask = eval_dataset[i:i+2]['attention_mask'].to(device)
with torch.no_grad():
_ = model(input_ids=warmup_input_ids, attention_mask=warmup_attention_mask)
means = []
std_devs = []
# try each batch / worker combination
for batch_size in tqdm(batch_sizes):
worker_means = []
worker_std_devs = []
for worker in num_workers:
# create dataloader
dataloader = DataLoader(dataset=eval_dataset, batch_size=batch_size,
num_workers=worker)
timings = np.zeros((num_samples, 1))
# measure inference
with torch.no_grad():
k=0
for batch in dataloader:
# move data to device
input_ids = batch['input_ids'].to(device)
attention_mask = batch['attention_mask'].to(device)
# do actual inference recording
starter.record()
_ = model(input_ids=input_ids, attention_mask=attention_mask)
ender.record()
# wait for GPU sync
torch.cuda.synchronize()
curr_time = starter.elapsed_time(ender)
timings[k] = curr_time
k=k+1
if k==(num_samples-1):
break
mean_syn = np.mean(timings)
std_syn = np.std(timings)
worker_means.append(mean_syn)
worker_std_devs.append(std_syn)
means.append(worker_means)
std_devs.append(worker_std_devs)
return means, std_devs
def vis_comparatively(batch_sizes, teacher_means, teacher_std_devs, student_means, student_std_devs, title, student_name):
""" Visualises the inference speed of teacher and student models comparatively. Inputs are outputs of evaluate_inference function."""
dct = {'batch_sizes':batch_sizes, 'means':list(itertools.chain(*teacher_means)), 'std_devs':list(itertools.chain(*teacher_std_devs))}
dct2 = {'batch_sizes':batch_sizes, 'means':list(itertools.chain(*student_means)), 'std_devs':list(itertools.chain(*student_std_devs))}
teacher_df = pd.DataFrame(data=dct)
teacher_df['model']='LaBSE'
student_df = pd.DataFrame(data=dct2)
student_df['model']=student_name
data=teacher_df.append(student_df)
dfCopy = data.copy()
duplicates = 100 # increase this number to increase precision
for index, row in data.iterrows():
for times in range(duplicates):
new_row = row.copy()
new_row['means'] = np.random.normal(row['means'],row['std_devs'])
dfCopy = dfCopy.append(new_row, ignore_index=True)
# Now Seaborn does the rest
sns.set_style("whitegrid")
fig = sns.barplot(x='batch_sizes',
y='means',
hue='model',
ci='sd',
data=dfCopy)
plt.legend(loc='upper left')
sns.set(rc={'figure.figsize':(8,5)})
plt.ylabel('Inference Time (ms)')
plt.xlabel('Batch Sizes')
plt.title(title)
plt.show()
# 2.Compression
def count_parameters(model):
return sum(p.numel() for p in model.parameters())
def evaluate_compression(teacher_model, student_model):
teacher_parm = count_parameters(teacher_model)
student_parm = count_parameters(student_model)
compression = np.round(teacher_parm/student_parm, decimals=2)
return teacher_parm, student_parm, compression
# 3. Performance
def evaluate_performance(teacher_model, student_model,encoded_eval_data, metric, batch_size=256, num_workers=4):
"""
Evaluates the performance of the teacher and student models on the provided metric. Cosine Similarity is advised.
"""
# create small eval_dataset
eval_dataset = CustomDataset(encoded_eval_data['input_ids'][:batch_size*2000],
encoded_eval_data['token_type_ids'][:batch_size*2000],
encoded_eval_data['attention_mask'][:batch_size*2000])
# create dataloader
dataloader = DataLoader(dataset=eval_dataset, batch_size=batch_size,
num_workers=num_workers)
# make sure model is in correct mode
device = torch.device('cuda')
student_model.to(device)
teacher_model.to(device)
performance = []
with torch.no_grad():
for batch in tqdm(dataloader):
# move data to device
input_ids = batch['input_ids'].to(device)
attention_mask = batch['attention_mask'].to(device)
output_t = teacher_model(input_ids=input_ids, attention_mask=attention_mask)
output_s = student_model(input_ids=input_ids, attention_mask=attention_mask)
cpu_output_t = output_t['pooler_output'].detach().cpu()
cpu_output_s = output_s['pooler_output'].detach().cpu()
batch_scores = metric(cpu_output_t, cpu_output_s)
batch_similarities = np.diag(batch_scores)
#print(batch_similarities)
performance.append(batch_similarities)
return np.mean(performance)
def lst_of_lst(lst):
return list(map(lambda el:[el], lst))
def convert_to_per_sentence(means):
return lst_of_lst(list(np.array(means)/np.array(batch_sizes)))
from matplotlib import pyplot as plt
import pandas as pd
def vis_comparatively_per_sentence(batch_sizes, teacher_means, teacher_std_devs, student_means, student_std_devs, student_means2, student_std_devs2, student_means3, student_std_devs3, title, student_names):
""" Visualises the inference speed of teacher and student models comparatively. Inputs are outputs of evaluate_inference function."""
dct = {'batch_sizes':batch_sizes, 'means':list(itertools.chain(*teacher_means)), 'std_devs':list(itertools.chain(*teacher_std_devs))}
dct2 = {'batch_sizes':batch_sizes, 'means':list(itertools.chain(*student_means)), 'std_devs':list(itertools.chain(*student_std_devs))}
dct3 = {'batch_sizes':batch_sizes, 'means':list(itertools.chain(*student_means2)), 'std_devs':list(itertools.chain(*student_std_devs2))}
dct4 = {'batch_sizes':batch_sizes, 'means':list(itertools.chain(*student_means3)), 'std_devs':list(itertools.chain(*student_std_devs3))}
teacher_df = pd.DataFrame(data=dct)
teacher_df['model']='LaBSE'
student_df = pd.DataFrame(data=dct2)
student_df['model']=student_names[0]
student_df2 = pd.DataFrame(data=dct3)
student_df2['model']=student_names[1]
student_df3 = | pd.DataFrame(data=dct4) | pandas.DataFrame |
import pandas as pd
import pytest
from synthesized_insight.metrics import EarthMoversDistance, HellingerDistance
from synthesized_insight.metrics.confidence_interval import compute_binomial_interval, compute_bootstrap_interval
@pytest.mark.parametrize(
'metric, data', [
(EarthMoversDistance(), ( | pd.Series(['a', 'b', 'c']) | pandas.Series |
import math
import queue
from datetime import datetime, timedelta, timezone
import pandas as pd
from storey import build_flow, SyncEmitSource, Reduce, Table, AggregateByKey, FieldAggregator, NoopDriver, \
DataframeSource
from storey.dtypes import SlidingWindows, FixedWindows, EmitAfterMaxEvent, EmitEveryEvent
test_base_time = datetime.fromisoformat("2020-07-21T21:40:00+00:00")
def append_return(lst, x):
lst.append(x)
return lst
def test_sliding_window_simple_aggregation_flow():
controller = build_flow([
SyncEmitSource(),
AggregateByKey([FieldAggregator("number_of_stuff", "col1", ["sum", "avg", "min", "max"],
SlidingWindows(['1h', '2h', '24h'], '10m'))],
Table("test", NoopDriver())),
Reduce([], lambda acc, x: append_return(acc, x)),
]).run()
for i in range(10):
data = {'col1': i}
controller.emit(data, 'tal', test_base_time + timedelta(minutes=25 * i))
controller.terminate()
actual = controller.await_termination()
expected_results = [
{'col1': 0, 'number_of_stuff_sum_1h': 0, 'number_of_stuff_sum_2h': 0, 'number_of_stuff_sum_24h': 0, 'number_of_stuff_min_1h': 0,
'number_of_stuff_min_2h': 0, 'number_of_stuff_min_24h': 0, 'number_of_stuff_max_1h': 0, 'number_of_stuff_max_2h': 0,
'number_of_stuff_max_24h': 0, 'number_of_stuff_avg_1h': 0.0, 'number_of_stuff_avg_2h': 0.0, 'number_of_stuff_avg_24h': 0.0},
{'col1': 1, 'number_of_stuff_sum_1h': 1, 'number_of_stuff_sum_2h': 1, 'number_of_stuff_sum_24h': 1, 'number_of_stuff_min_1h': 0,
'number_of_stuff_min_2h': 0, 'number_of_stuff_min_24h': 0, 'number_of_stuff_max_1h': 1, 'number_of_stuff_max_2h': 1,
'number_of_stuff_max_24h': 1, 'number_of_stuff_avg_1h': 0.5, 'number_of_stuff_avg_2h': 0.5, 'number_of_stuff_avg_24h': 0.5},
{'col1': 2, 'number_of_stuff_sum_1h': 3, 'number_of_stuff_sum_2h': 3, 'number_of_stuff_sum_24h': 3, 'number_of_stuff_min_1h': 0,
'number_of_stuff_min_2h': 0, 'number_of_stuff_min_24h': 0, 'number_of_stuff_max_1h': 2, 'number_of_stuff_max_2h': 2,
'number_of_stuff_max_24h': 2, 'number_of_stuff_avg_1h': 1.0, 'number_of_stuff_avg_2h': 1.0, 'number_of_stuff_avg_24h': 1.0},
{'col1': 3, 'number_of_stuff_sum_1h': 6, 'number_of_stuff_sum_2h': 6, 'number_of_stuff_sum_24h': 6, 'number_of_stuff_min_1h': 1,
'number_of_stuff_min_2h': 0, 'number_of_stuff_min_24h': 0, 'number_of_stuff_max_1h': 3, 'number_of_stuff_max_2h': 3,
'number_of_stuff_max_24h': 3, 'number_of_stuff_avg_1h': 2.0, 'number_of_stuff_avg_2h': 1.5, 'number_of_stuff_avg_24h': 1.5},
{'col1': 4, 'number_of_stuff_sum_1h': 9, 'number_of_stuff_sum_2h': 10, 'number_of_stuff_sum_24h': 10, 'number_of_stuff_min_1h': 2,
'number_of_stuff_min_2h': 0, 'number_of_stuff_min_24h': 0, 'number_of_stuff_max_1h': 4, 'number_of_stuff_max_2h': 4,
'number_of_stuff_max_24h': 4, 'number_of_stuff_avg_1h': 3.0, 'number_of_stuff_avg_2h': 2.0, 'number_of_stuff_avg_24h': 2.0},
{'col1': 5, 'number_of_stuff_sum_1h': 12, 'number_of_stuff_sum_2h': 15, 'number_of_stuff_sum_24h': 15, 'number_of_stuff_min_1h': 3,
'number_of_stuff_min_2h': 1, 'number_of_stuff_min_24h': 0, 'number_of_stuff_max_1h': 5, 'number_of_stuff_max_2h': 5,
'number_of_stuff_max_24h': 5, 'number_of_stuff_avg_1h': 4.0, 'number_of_stuff_avg_2h': 3.0, 'number_of_stuff_avg_24h': 2.5},
{'col1': 6, 'number_of_stuff_sum_1h': 15, 'number_of_stuff_sum_2h': 20, 'number_of_stuff_sum_24h': 21, 'number_of_stuff_min_1h': 4,
'number_of_stuff_min_2h': 2, 'number_of_stuff_min_24h': 0, 'number_of_stuff_max_1h': 6, 'number_of_stuff_max_2h': 6,
'number_of_stuff_max_24h': 6, 'number_of_stuff_avg_1h': 5.0, 'number_of_stuff_avg_2h': 4.0, 'number_of_stuff_avg_24h': 3.0},
{'col1': 7, 'number_of_stuff_sum_1h': 18, 'number_of_stuff_sum_2h': 25, 'number_of_stuff_sum_24h': 28, 'number_of_stuff_min_1h': 5,
'number_of_stuff_min_2h': 3, 'number_of_stuff_min_24h': 0, 'number_of_stuff_max_1h': 7, 'number_of_stuff_max_2h': 7,
'number_of_stuff_max_24h': 7, 'number_of_stuff_avg_1h': 6.0, 'number_of_stuff_avg_2h': 5.0, 'number_of_stuff_avg_24h': 3.5},
{'col1': 8, 'number_of_stuff_sum_1h': 21, 'number_of_stuff_sum_2h': 30, 'number_of_stuff_sum_24h': 36, 'number_of_stuff_min_1h': 6,
'number_of_stuff_min_2h': 4, 'number_of_stuff_min_24h': 0, 'number_of_stuff_max_1h': 8, 'number_of_stuff_max_2h': 8,
'number_of_stuff_max_24h': 8, 'number_of_stuff_avg_1h': 7.0, 'number_of_stuff_avg_2h': 6.0, 'number_of_stuff_avg_24h': 4.0},
{'col1': 9, 'number_of_stuff_sum_1h': 24, 'number_of_stuff_sum_2h': 35, 'number_of_stuff_sum_24h': 45, 'number_of_stuff_min_1h': 7,
'number_of_stuff_min_2h': 5, 'number_of_stuff_min_24h': 0, 'number_of_stuff_max_1h': 9, 'number_of_stuff_max_2h': 9,
'number_of_stuff_max_24h': 9, 'number_of_stuff_avg_1h': 8.0, 'number_of_stuff_avg_2h': 7.0, 'number_of_stuff_avg_24h': 4.5}
]
assert actual == expected_results, \
f'actual did not match expected. \n actual: {actual} \n expected: {expected_results}'
def test_sliding_window_sparse_data():
controller = build_flow([
SyncEmitSource(),
AggregateByKey(
[FieldAggregator("number_of_stuff1", "col1", ["sum", "avg", "min", "max"], SlidingWindows(['1h', '2h', '24h'], '10m')),
FieldAggregator("number_of_stuff2", "col2", ["sum", "avg", "min", "max"], SlidingWindows(['1h', '2h', '24h'], '10m'))],
Table("test", NoopDriver())),
Reduce([], lambda acc, x: append_return(acc, x)),
]).run()
for i in range(10):
controller.emit({'col1': i}, 'tal', test_base_time + timedelta(minutes=25 * i))
controller.emit({'col2': i}, 'tal', test_base_time + timedelta(minutes=25 * i))
controller.terminate()
actual = controller.await_termination()
expected_results = [{'col1': 0, 'number_of_stuff1_avg_1h': 0.0, 'number_of_stuff1_avg_24h': 0.0, 'number_of_stuff1_avg_2h': 0.0,
'number_of_stuff1_max_1h': 0, 'number_of_stuff1_max_24h': 0, 'number_of_stuff1_max_2h': 0,
'number_of_stuff1_min_1h': 0, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 0,
'number_of_stuff1_sum_1h': 0, 'number_of_stuff1_sum_24h': 0, 'number_of_stuff1_sum_2h': 0,
'number_of_stuff2_avg_1h': math.nan, 'number_of_stuff2_avg_24h': math.nan, 'number_of_stuff2_avg_2h': math.nan,
'number_of_stuff2_max_1h': math.nan, 'number_of_stuff2_max_24h': math.nan, 'number_of_stuff2_max_2h': math.nan,
'number_of_stuff2_min_1h': math.nan, 'number_of_stuff2_min_24h': math.nan, 'number_of_stuff2_min_2h': math.nan,
'number_of_stuff2_sum_1h': 0, 'number_of_stuff2_sum_24h': 0, 'number_of_stuff2_sum_2h': 0},
{'col2': 0, 'number_of_stuff1_avg_1h': 0.0, 'number_of_stuff1_avg_24h': 0.0, 'number_of_stuff1_avg_2h': 0.0,
'number_of_stuff1_max_1h': 0, 'number_of_stuff1_max_24h': 0, 'number_of_stuff1_max_2h': 0,
'number_of_stuff1_min_1h': 0, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 0,
'number_of_stuff1_sum_1h': 0, 'number_of_stuff1_sum_24h': 0, 'number_of_stuff1_sum_2h': 0,
'number_of_stuff2_avg_1h': 0.0, 'number_of_stuff2_avg_24h': 0.0, 'number_of_stuff2_avg_2h': 0.0,
'number_of_stuff2_max_1h': 0, 'number_of_stuff2_max_24h': 0, 'number_of_stuff2_max_2h': 0,
'number_of_stuff2_min_1h': 0, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 0,
'number_of_stuff2_sum_1h': 0, 'number_of_stuff2_sum_24h': 0, 'number_of_stuff2_sum_2h': 0},
{'col1': 1, 'number_of_stuff1_avg_1h': 0.5, 'number_of_stuff1_avg_24h': 0.5, 'number_of_stuff1_avg_2h': 0.5,
'number_of_stuff1_max_1h': 1, 'number_of_stuff1_max_24h': 1, 'number_of_stuff1_max_2h': 1,
'number_of_stuff1_min_1h': 0, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 0,
'number_of_stuff1_sum_1h': 1, 'number_of_stuff1_sum_24h': 1, 'number_of_stuff1_sum_2h': 1,
'number_of_stuff2_avg_1h': 0.0, 'number_of_stuff2_avg_24h': 0.0, 'number_of_stuff2_avg_2h': 0.0,
'number_of_stuff2_max_1h': 0, 'number_of_stuff2_max_24h': 0, 'number_of_stuff2_max_2h': 0,
'number_of_stuff2_min_1h': 0, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 0,
'number_of_stuff2_sum_1h': 0, 'number_of_stuff2_sum_24h': 0, 'number_of_stuff2_sum_2h': 0},
{'col2': 1, 'number_of_stuff1_avg_1h': 0.5, 'number_of_stuff1_avg_24h': 0.5, 'number_of_stuff1_avg_2h': 0.5,
'number_of_stuff1_max_1h': 1, 'number_of_stuff1_max_24h': 1, 'number_of_stuff1_max_2h': 1,
'number_of_stuff1_min_1h': 0, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 0,
'number_of_stuff1_sum_1h': 1, 'number_of_stuff1_sum_24h': 1, 'number_of_stuff1_sum_2h': 1,
'number_of_stuff2_avg_1h': 0.5, 'number_of_stuff2_avg_24h': 0.5, 'number_of_stuff2_avg_2h': 0.5,
'number_of_stuff2_max_1h': 1, 'number_of_stuff2_max_24h': 1, 'number_of_stuff2_max_2h': 1,
'number_of_stuff2_min_1h': 0, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 0,
'number_of_stuff2_sum_1h': 1, 'number_of_stuff2_sum_24h': 1, 'number_of_stuff2_sum_2h': 1},
{'col1': 2, 'number_of_stuff1_avg_1h': 1.0, 'number_of_stuff1_avg_24h': 1.0, 'number_of_stuff1_avg_2h': 1.0,
'number_of_stuff1_max_1h': 2, 'number_of_stuff1_max_24h': 2, 'number_of_stuff1_max_2h': 2,
'number_of_stuff1_min_1h': 0, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 0,
'number_of_stuff1_sum_1h': 3, 'number_of_stuff1_sum_24h': 3, 'number_of_stuff1_sum_2h': 3,
'number_of_stuff2_avg_1h': 0.5, 'number_of_stuff2_avg_24h': 0.5, 'number_of_stuff2_avg_2h': 0.5,
'number_of_stuff2_max_1h': 1, 'number_of_stuff2_max_24h': 1, 'number_of_stuff2_max_2h': 1,
'number_of_stuff2_min_1h': 0, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 0,
'number_of_stuff2_sum_1h': 1, 'number_of_stuff2_sum_24h': 1, 'number_of_stuff2_sum_2h': 1},
{'col2': 2, 'number_of_stuff1_avg_1h': 1.0, 'number_of_stuff1_avg_24h': 1.0, 'number_of_stuff1_avg_2h': 1.0,
'number_of_stuff1_max_1h': 2, 'number_of_stuff1_max_24h': 2, 'number_of_stuff1_max_2h': 2,
'number_of_stuff1_min_1h': 0, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 0,
'number_of_stuff1_sum_1h': 3, 'number_of_stuff1_sum_24h': 3, 'number_of_stuff1_sum_2h': 3,
'number_of_stuff2_avg_1h': 1.0, 'number_of_stuff2_avg_24h': 1.0, 'number_of_stuff2_avg_2h': 1.0,
'number_of_stuff2_max_1h': 2, 'number_of_stuff2_max_24h': 2, 'number_of_stuff2_max_2h': 2,
'number_of_stuff2_min_1h': 0, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 0,
'number_of_stuff2_sum_1h': 3, 'number_of_stuff2_sum_24h': 3, 'number_of_stuff2_sum_2h': 3},
{'col1': 3, 'number_of_stuff1_avg_1h': 2.0, 'number_of_stuff1_avg_24h': 1.5, 'number_of_stuff1_avg_2h': 1.5,
'number_of_stuff1_max_1h': 3, 'number_of_stuff1_max_24h': 3, 'number_of_stuff1_max_2h': 3,
'number_of_stuff1_min_1h': 1, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 0,
'number_of_stuff1_sum_1h': 6, 'number_of_stuff1_sum_24h': 6, 'number_of_stuff1_sum_2h': 6,
'number_of_stuff2_avg_1h': 1.0, 'number_of_stuff2_avg_24h': 1.0, 'number_of_stuff2_avg_2h': 1.0,
'number_of_stuff2_max_1h': 2, 'number_of_stuff2_max_24h': 2, 'number_of_stuff2_max_2h': 2,
'number_of_stuff2_min_1h': 0, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 0,
'number_of_stuff2_sum_1h': 3, 'number_of_stuff2_sum_24h': 3, 'number_of_stuff2_sum_2h': 3},
{'col2': 3, 'number_of_stuff1_avg_1h': 2.0, 'number_of_stuff1_avg_24h': 1.5, 'number_of_stuff1_avg_2h': 1.5,
'number_of_stuff1_max_1h': 3, 'number_of_stuff1_max_24h': 3, 'number_of_stuff1_max_2h': 3,
'number_of_stuff1_min_1h': 1, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 0,
'number_of_stuff1_sum_1h': 6, 'number_of_stuff1_sum_24h': 6, 'number_of_stuff1_sum_2h': 6,
'number_of_stuff2_avg_1h': 2.0, 'number_of_stuff2_avg_24h': 1.5, 'number_of_stuff2_avg_2h': 1.5,
'number_of_stuff2_max_1h': 3, 'number_of_stuff2_max_24h': 3, 'number_of_stuff2_max_2h': 3,
'number_of_stuff2_min_1h': 1, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 0,
'number_of_stuff2_sum_1h': 6, 'number_of_stuff2_sum_24h': 6, 'number_of_stuff2_sum_2h': 6},
{'col1': 4, 'number_of_stuff1_avg_1h': 3.0, 'number_of_stuff1_avg_24h': 2.0, 'number_of_stuff1_avg_2h': 2.0,
'number_of_stuff1_max_1h': 4, 'number_of_stuff1_max_24h': 4, 'number_of_stuff1_max_2h': 4,
'number_of_stuff1_min_1h': 2, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 0,
'number_of_stuff1_sum_1h': 9, 'number_of_stuff1_sum_24h': 10, 'number_of_stuff1_sum_2h': 10,
'number_of_stuff2_avg_1h': 2.0, 'number_of_stuff2_avg_24h': 1.5, 'number_of_stuff2_avg_2h': 1.5,
'number_of_stuff2_max_1h': 3, 'number_of_stuff2_max_24h': 3, 'number_of_stuff2_max_2h': 3,
'number_of_stuff2_min_1h': 1, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 0,
'number_of_stuff2_sum_1h': 6, 'number_of_stuff2_sum_24h': 6, 'number_of_stuff2_sum_2h': 6},
{'col2': 4, 'number_of_stuff1_avg_1h': 3.0, 'number_of_stuff1_avg_24h': 2.0, 'number_of_stuff1_avg_2h': 2.0,
'number_of_stuff1_max_1h': 4, 'number_of_stuff1_max_24h': 4, 'number_of_stuff1_max_2h': 4,
'number_of_stuff1_min_1h': 2, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 0,
'number_of_stuff1_sum_1h': 9, 'number_of_stuff1_sum_24h': 10, 'number_of_stuff1_sum_2h': 10,
'number_of_stuff2_avg_1h': 3.0, 'number_of_stuff2_avg_24h': 2.0, 'number_of_stuff2_avg_2h': 2.0,
'number_of_stuff2_max_1h': 4, 'number_of_stuff2_max_24h': 4, 'number_of_stuff2_max_2h': 4,
'number_of_stuff2_min_1h': 2, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 0,
'number_of_stuff2_sum_1h': 9, 'number_of_stuff2_sum_24h': 10, 'number_of_stuff2_sum_2h': 10},
{'col1': 5, 'number_of_stuff1_avg_1h': 4.0, 'number_of_stuff1_avg_24h': 2.5, 'number_of_stuff1_avg_2h': 3.0,
'number_of_stuff1_max_1h': 5, 'number_of_stuff1_max_24h': 5, 'number_of_stuff1_max_2h': 5,
'number_of_stuff1_min_1h': 3, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 1,
'number_of_stuff1_sum_1h': 12, 'number_of_stuff1_sum_24h': 15, 'number_of_stuff1_sum_2h': 15,
'number_of_stuff2_avg_1h': 3.0, 'number_of_stuff2_avg_24h': 2.0, 'number_of_stuff2_avg_2h': 2.0,
'number_of_stuff2_max_1h': 4, 'number_of_stuff2_max_24h': 4, 'number_of_stuff2_max_2h': 4,
'number_of_stuff2_min_1h': 2, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 0,
'number_of_stuff2_sum_1h': 9, 'number_of_stuff2_sum_24h': 10, 'number_of_stuff2_sum_2h': 10},
{'col2': 5, 'number_of_stuff1_avg_1h': 4.0, 'number_of_stuff1_avg_24h': 2.5, 'number_of_stuff1_avg_2h': 3.0,
'number_of_stuff1_max_1h': 5, 'number_of_stuff1_max_24h': 5, 'number_of_stuff1_max_2h': 5,
'number_of_stuff1_min_1h': 3, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 1,
'number_of_stuff1_sum_1h': 12, 'number_of_stuff1_sum_24h': 15, 'number_of_stuff1_sum_2h': 15,
'number_of_stuff2_avg_1h': 4.0, 'number_of_stuff2_avg_24h': 2.5, 'number_of_stuff2_avg_2h': 3.0,
'number_of_stuff2_max_1h': 5, 'number_of_stuff2_max_24h': 5, 'number_of_stuff2_max_2h': 5,
'number_of_stuff2_min_1h': 3, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 1,
'number_of_stuff2_sum_1h': 12, 'number_of_stuff2_sum_24h': 15, 'number_of_stuff2_sum_2h': 15},
{'col1': 6, 'number_of_stuff1_avg_1h': 5.0, 'number_of_stuff1_avg_24h': 3.0, 'number_of_stuff1_avg_2h': 4.0,
'number_of_stuff1_max_1h': 6, 'number_of_stuff1_max_24h': 6, 'number_of_stuff1_max_2h': 6,
'number_of_stuff1_min_1h': 4, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 2,
'number_of_stuff1_sum_1h': 15, 'number_of_stuff1_sum_24h': 21, 'number_of_stuff1_sum_2h': 20,
'number_of_stuff2_avg_1h': 4.0, 'number_of_stuff2_avg_24h': 2.5, 'number_of_stuff2_avg_2h': 3.0,
'number_of_stuff2_max_1h': 5, 'number_of_stuff2_max_24h': 5, 'number_of_stuff2_max_2h': 5,
'number_of_stuff2_min_1h': 3, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 1,
'number_of_stuff2_sum_1h': 12, 'number_of_stuff2_sum_24h': 15, 'number_of_stuff2_sum_2h': 15},
{'col2': 6, 'number_of_stuff1_avg_1h': 5.0, 'number_of_stuff1_avg_24h': 3.0, 'number_of_stuff1_avg_2h': 4.0,
'number_of_stuff1_max_1h': 6, 'number_of_stuff1_max_24h': 6, 'number_of_stuff1_max_2h': 6,
'number_of_stuff1_min_1h': 4, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 2,
'number_of_stuff1_sum_1h': 15, 'number_of_stuff1_sum_24h': 21, 'number_of_stuff1_sum_2h': 20,
'number_of_stuff2_avg_1h': 5.0, 'number_of_stuff2_avg_24h': 3.0, 'number_of_stuff2_avg_2h': 4.0,
'number_of_stuff2_max_1h': 6, 'number_of_stuff2_max_24h': 6, 'number_of_stuff2_max_2h': 6,
'number_of_stuff2_min_1h': 4, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 2,
'number_of_stuff2_sum_1h': 15, 'number_of_stuff2_sum_24h': 21, 'number_of_stuff2_sum_2h': 20},
{'col1': 7, 'number_of_stuff1_avg_1h': 6.0, 'number_of_stuff1_avg_24h': 3.5, 'number_of_stuff1_avg_2h': 5.0,
'number_of_stuff1_max_1h': 7, 'number_of_stuff1_max_24h': 7, 'number_of_stuff1_max_2h': 7,
'number_of_stuff1_min_1h': 5, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 3,
'number_of_stuff1_sum_1h': 18, 'number_of_stuff1_sum_24h': 28, 'number_of_stuff1_sum_2h': 25,
'number_of_stuff2_avg_1h': 5.0, 'number_of_stuff2_avg_24h': 3.0, 'number_of_stuff2_avg_2h': 4.0,
'number_of_stuff2_max_1h': 6, 'number_of_stuff2_max_24h': 6, 'number_of_stuff2_max_2h': 6,
'number_of_stuff2_min_1h': 4, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 2,
'number_of_stuff2_sum_1h': 15, 'number_of_stuff2_sum_24h': 21, 'number_of_stuff2_sum_2h': 20},
{'col2': 7, 'number_of_stuff1_avg_1h': 6.0, 'number_of_stuff1_avg_24h': 3.5, 'number_of_stuff1_avg_2h': 5.0,
'number_of_stuff1_max_1h': 7, 'number_of_stuff1_max_24h': 7, 'number_of_stuff1_max_2h': 7,
'number_of_stuff1_min_1h': 5, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 3,
'number_of_stuff1_sum_1h': 18, 'number_of_stuff1_sum_24h': 28, 'number_of_stuff1_sum_2h': 25,
'number_of_stuff2_avg_1h': 6.0, 'number_of_stuff2_avg_24h': 3.5, 'number_of_stuff2_avg_2h': 5.0,
'number_of_stuff2_max_1h': 7, 'number_of_stuff2_max_24h': 7, 'number_of_stuff2_max_2h': 7,
'number_of_stuff2_min_1h': 5, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 3,
'number_of_stuff2_sum_1h': 18, 'number_of_stuff2_sum_24h': 28, 'number_of_stuff2_sum_2h': 25},
{'col1': 8, 'number_of_stuff1_avg_1h': 7.0, 'number_of_stuff1_avg_24h': 4.0, 'number_of_stuff1_avg_2h': 6.0,
'number_of_stuff1_max_1h': 8, 'number_of_stuff1_max_24h': 8, 'number_of_stuff1_max_2h': 8,
'number_of_stuff1_min_1h': 6, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 4,
'number_of_stuff1_sum_1h': 21, 'number_of_stuff1_sum_24h': 36, 'number_of_stuff1_sum_2h': 30,
'number_of_stuff2_avg_1h': 6.0, 'number_of_stuff2_avg_24h': 3.5, 'number_of_stuff2_avg_2h': 5.0,
'number_of_stuff2_max_1h': 7, 'number_of_stuff2_max_24h': 7, 'number_of_stuff2_max_2h': 7,
'number_of_stuff2_min_1h': 5, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 3,
'number_of_stuff2_sum_1h': 18, 'number_of_stuff2_sum_24h': 28, 'number_of_stuff2_sum_2h': 25},
{'col2': 8, 'number_of_stuff1_avg_1h': 7.0, 'number_of_stuff1_avg_24h': 4.0, 'number_of_stuff1_avg_2h': 6.0,
'number_of_stuff1_max_1h': 8, 'number_of_stuff1_max_24h': 8, 'number_of_stuff1_max_2h': 8,
'number_of_stuff1_min_1h': 6, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 4,
'number_of_stuff1_sum_1h': 21, 'number_of_stuff1_sum_24h': 36, 'number_of_stuff1_sum_2h': 30,
'number_of_stuff2_avg_1h': 7.0, 'number_of_stuff2_avg_24h': 4.0, 'number_of_stuff2_avg_2h': 6.0,
'number_of_stuff2_max_1h': 8, 'number_of_stuff2_max_24h': 8, 'number_of_stuff2_max_2h': 8,
'number_of_stuff2_min_1h': 6, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 4,
'number_of_stuff2_sum_1h': 21, 'number_of_stuff2_sum_24h': 36, 'number_of_stuff2_sum_2h': 30},
{'col1': 9, 'number_of_stuff1_avg_1h': 8.0, 'number_of_stuff1_avg_24h': 4.5, 'number_of_stuff1_avg_2h': 7.0,
'number_of_stuff1_max_1h': 9, 'number_of_stuff1_max_24h': 9, 'number_of_stuff1_max_2h': 9,
'number_of_stuff1_min_1h': 7, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 5,
'number_of_stuff1_sum_1h': 24, 'number_of_stuff1_sum_24h': 45, 'number_of_stuff1_sum_2h': 35,
'number_of_stuff2_avg_1h': 7.0, 'number_of_stuff2_avg_24h': 4.0, 'number_of_stuff2_avg_2h': 6.0,
'number_of_stuff2_max_1h': 8, 'number_of_stuff2_max_24h': 8, 'number_of_stuff2_max_2h': 8,
'number_of_stuff2_min_1h': 6, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 4,
'number_of_stuff2_sum_1h': 21, 'number_of_stuff2_sum_24h': 36, 'number_of_stuff2_sum_2h': 30},
{'col2': 9, 'number_of_stuff1_avg_1h': 8.0, 'number_of_stuff1_avg_24h': 4.5, 'number_of_stuff1_avg_2h': 7.0,
'number_of_stuff1_max_1h': 9, 'number_of_stuff1_max_24h': 9, 'number_of_stuff1_max_2h': 9,
'number_of_stuff1_min_1h': 7, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 5,
'number_of_stuff1_sum_1h': 24, 'number_of_stuff1_sum_24h': 45, 'number_of_stuff1_sum_2h': 35,
'number_of_stuff2_avg_1h': 8.0, 'number_of_stuff2_avg_24h': 4.5, 'number_of_stuff2_avg_2h': 7.0,
'number_of_stuff2_max_1h': 9, 'number_of_stuff2_max_24h': 9, 'number_of_stuff2_max_2h': 9,
'number_of_stuff2_min_1h': 7, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 5,
'number_of_stuff2_sum_1h': 24, 'number_of_stuff2_sum_24h': 45, 'number_of_stuff2_sum_2h': 35}]
assert actual == expected_results, \
f'actual did not match expected. \n actual: {actual} \n expected: {expected_results}'
def test_sliding_window_sparse_data_uneven_feature_occurrence():
controller = build_flow([
SyncEmitSource(),
AggregateByKey(
[FieldAggregator("number_of_stuff1", "col1", ["sum", "avg", "min", "max"], SlidingWindows(['1h', '2h', '24h'], '10m')),
FieldAggregator("number_of_stuff2", "col2", ["sum", "avg", "min", "max"], SlidingWindows(['1h', '2h', '24h'], '10m'))],
Table("test", NoopDriver())),
Reduce([], lambda acc, x: append_return(acc, x)),
]).run()
controller.emit({'col1': 0}, 'tal', test_base_time)
for i in range(10):
controller.emit({'col2': i}, 'tal', test_base_time + timedelta(minutes=25 * i))
controller.terminate()
actual = controller.await_termination()
expected_results = [{'col1': 0, 'number_of_stuff1_avg_1h': 0.0, 'number_of_stuff1_avg_24h': 0.0, 'number_of_stuff1_avg_2h': 0.0,
'number_of_stuff1_max_1h': 0, 'number_of_stuff1_max_24h': 0, 'number_of_stuff1_max_2h': 0,
'number_of_stuff1_min_1h': 0, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 0,
'number_of_stuff1_sum_1h': 0, 'number_of_stuff1_sum_24h': 0, 'number_of_stuff1_sum_2h': 0,
'number_of_stuff2_avg_1h': math.nan, 'number_of_stuff2_avg_24h': math.nan, 'number_of_stuff2_avg_2h': math.nan,
'number_of_stuff2_max_1h': math.nan, 'number_of_stuff2_max_24h': math.nan, 'number_of_stuff2_max_2h': math.nan,
'number_of_stuff2_min_1h': math.nan, 'number_of_stuff2_min_24h': math.nan, 'number_of_stuff2_min_2h': math.nan,
'number_of_stuff2_sum_1h': 0, 'number_of_stuff2_sum_24h': 0, 'number_of_stuff2_sum_2h': 0},
{'col2': 0, 'number_of_stuff1_avg_1h': 0.0, 'number_of_stuff1_avg_24h': 0.0, 'number_of_stuff1_avg_2h': 0.0,
'number_of_stuff1_max_1h': 0, 'number_of_stuff1_max_24h': 0, 'number_of_stuff1_max_2h': 0,
'number_of_stuff1_min_1h': 0, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 0,
'number_of_stuff1_sum_1h': 0, 'number_of_stuff1_sum_24h': 0, 'number_of_stuff1_sum_2h': 0,
'number_of_stuff2_avg_1h': 0.0, 'number_of_stuff2_avg_24h': 0.0, 'number_of_stuff2_avg_2h': 0.0,
'number_of_stuff2_max_1h': 0, 'number_of_stuff2_max_24h': 0, 'number_of_stuff2_max_2h': 0,
'number_of_stuff2_min_1h': 0, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 0,
'number_of_stuff2_sum_1h': 0, 'number_of_stuff2_sum_24h': 0, 'number_of_stuff2_sum_2h': 0},
{'col2': 1, 'number_of_stuff1_avg_1h': 0.0, 'number_of_stuff1_avg_24h': 0.0, 'number_of_stuff1_avg_2h': 0.0,
'number_of_stuff1_max_1h': 0, 'number_of_stuff1_max_24h': 0, 'number_of_stuff1_max_2h': 0,
'number_of_stuff1_min_1h': 0, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 0,
'number_of_stuff1_sum_1h': 0, 'number_of_stuff1_sum_24h': 0, 'number_of_stuff1_sum_2h': 0,
'number_of_stuff2_avg_1h': 0.5, 'number_of_stuff2_avg_24h': 0.5, 'number_of_stuff2_avg_2h': 0.5,
'number_of_stuff2_max_1h': 1, 'number_of_stuff2_max_24h': 1, 'number_of_stuff2_max_2h': 1,
'number_of_stuff2_min_1h': 0, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 0,
'number_of_stuff2_sum_1h': 1, 'number_of_stuff2_sum_24h': 1, 'number_of_stuff2_sum_2h': 1},
{'col2': 2, 'number_of_stuff1_avg_1h': 0.0, 'number_of_stuff1_avg_24h': 0.0, 'number_of_stuff1_avg_2h': 0.0,
'number_of_stuff1_max_1h': 0, 'number_of_stuff1_max_24h': 0, 'number_of_stuff1_max_2h': 0,
'number_of_stuff1_min_1h': 0, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 0,
'number_of_stuff1_sum_1h': 0, 'number_of_stuff1_sum_24h': 0, 'number_of_stuff1_sum_2h': 0,
'number_of_stuff2_avg_1h': 1.0, 'number_of_stuff2_avg_24h': 1.0, 'number_of_stuff2_avg_2h': 1.0,
'number_of_stuff2_max_1h': 2, 'number_of_stuff2_max_24h': 2, 'number_of_stuff2_max_2h': 2,
'number_of_stuff2_min_1h': 0, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 0,
'number_of_stuff2_sum_1h': 3, 'number_of_stuff2_sum_24h': 3, 'number_of_stuff2_sum_2h': 3},
{'col2': 3, 'number_of_stuff1_avg_1h': 0.0, 'number_of_stuff1_avg_24h': 0.0, 'number_of_stuff1_avg_2h': 0.0,
'number_of_stuff1_max_1h': 0, 'number_of_stuff1_max_24h': 0, 'number_of_stuff1_max_2h': 0,
'number_of_stuff1_min_1h': 0, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 0,
'number_of_stuff1_sum_1h': 0, 'number_of_stuff1_sum_24h': 0, 'number_of_stuff1_sum_2h': 0,
'number_of_stuff2_avg_1h': 2.0, 'number_of_stuff2_avg_24h': 1.5, 'number_of_stuff2_avg_2h': 1.5,
'number_of_stuff2_max_1h': 3, 'number_of_stuff2_max_24h': 3, 'number_of_stuff2_max_2h': 3,
'number_of_stuff2_min_1h': 1, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 0,
'number_of_stuff2_sum_1h': 6, 'number_of_stuff2_sum_24h': 6, 'number_of_stuff2_sum_2h': 6},
{'col2': 4, 'number_of_stuff1_avg_1h': 0.0, 'number_of_stuff1_avg_24h': 0.0, 'number_of_stuff1_avg_2h': 0.0,
'number_of_stuff1_max_1h': 0, 'number_of_stuff1_max_24h': 0, 'number_of_stuff1_max_2h': 0,
'number_of_stuff1_min_1h': 0, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 0,
'number_of_stuff1_sum_1h': 0, 'number_of_stuff1_sum_24h': 0, 'number_of_stuff1_sum_2h': 0,
'number_of_stuff2_avg_1h': 3.0, 'number_of_stuff2_avg_24h': 2.0, 'number_of_stuff2_avg_2h': 2.0,
'number_of_stuff2_max_1h': 4, 'number_of_stuff2_max_24h': 4, 'number_of_stuff2_max_2h': 4,
'number_of_stuff2_min_1h': 2, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 0,
'number_of_stuff2_sum_1h': 9, 'number_of_stuff2_sum_24h': 10, 'number_of_stuff2_sum_2h': 10},
{'col2': 5, 'number_of_stuff1_avg_1h': 0.0, 'number_of_stuff1_avg_24h': 0.0, 'number_of_stuff1_avg_2h': 0.0,
'number_of_stuff1_max_1h': 0, 'number_of_stuff1_max_24h': 0, 'number_of_stuff1_max_2h': 0,
'number_of_stuff1_min_1h': 0, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 0,
'number_of_stuff1_sum_1h': 0, 'number_of_stuff1_sum_24h': 0, 'number_of_stuff1_sum_2h': 0,
'number_of_stuff2_avg_1h': 4.0, 'number_of_stuff2_avg_24h': 2.5, 'number_of_stuff2_avg_2h': 3.0,
'number_of_stuff2_max_1h': 5, 'number_of_stuff2_max_24h': 5, 'number_of_stuff2_max_2h': 5,
'number_of_stuff2_min_1h': 3, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 1,
'number_of_stuff2_sum_1h': 12, 'number_of_stuff2_sum_24h': 15, 'number_of_stuff2_sum_2h': 15},
{'col2': 6, 'number_of_stuff1_avg_1h': 0.0, 'number_of_stuff1_avg_24h': 0.0, 'number_of_stuff1_avg_2h': 0.0,
'number_of_stuff1_max_1h': 0, 'number_of_stuff1_max_24h': 0, 'number_of_stuff1_max_2h': 0,
'number_of_stuff1_min_1h': 0, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 0,
'number_of_stuff1_sum_1h': 0, 'number_of_stuff1_sum_24h': 0, 'number_of_stuff1_sum_2h': 0,
'number_of_stuff2_avg_1h': 5.0, 'number_of_stuff2_avg_24h': 3.0, 'number_of_stuff2_avg_2h': 4.0,
'number_of_stuff2_max_1h': 6, 'number_of_stuff2_max_24h': 6, 'number_of_stuff2_max_2h': 6,
'number_of_stuff2_min_1h': 4, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 2,
'number_of_stuff2_sum_1h': 15, 'number_of_stuff2_sum_24h': 21, 'number_of_stuff2_sum_2h': 20},
{'col2': 7, 'number_of_stuff1_avg_1h': 0.0, 'number_of_stuff1_avg_24h': 0.0, 'number_of_stuff1_avg_2h': 0.0,
'number_of_stuff1_max_1h': 0, 'number_of_stuff1_max_24h': 0, 'number_of_stuff1_max_2h': 0,
'number_of_stuff1_min_1h': 0, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 0,
'number_of_stuff1_sum_1h': 0, 'number_of_stuff1_sum_24h': 0, 'number_of_stuff1_sum_2h': 0,
'number_of_stuff2_avg_1h': 6.0, 'number_of_stuff2_avg_24h': 3.5, 'number_of_stuff2_avg_2h': 5.0,
'number_of_stuff2_max_1h': 7, 'number_of_stuff2_max_24h': 7, 'number_of_stuff2_max_2h': 7,
'number_of_stuff2_min_1h': 5, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 3,
'number_of_stuff2_sum_1h': 18, 'number_of_stuff2_sum_24h': 28, 'number_of_stuff2_sum_2h': 25},
{'col2': 8, 'number_of_stuff1_avg_1h': 0.0, 'number_of_stuff1_avg_24h': 0.0, 'number_of_stuff1_avg_2h': 0.0,
'number_of_stuff1_max_1h': 0, 'number_of_stuff1_max_24h': 0, 'number_of_stuff1_max_2h': 0,
'number_of_stuff1_min_1h': 0, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 0,
'number_of_stuff1_sum_1h': 0, 'number_of_stuff1_sum_24h': 0, 'number_of_stuff1_sum_2h': 0,
'number_of_stuff2_avg_1h': 7.0, 'number_of_stuff2_avg_24h': 4.0, 'number_of_stuff2_avg_2h': 6.0,
'number_of_stuff2_max_1h': 8, 'number_of_stuff2_max_24h': 8, 'number_of_stuff2_max_2h': 8,
'number_of_stuff2_min_1h': 6, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 4,
'number_of_stuff2_sum_1h': 21, 'number_of_stuff2_sum_24h': 36, 'number_of_stuff2_sum_2h': 30},
{'col2': 9, 'number_of_stuff1_avg_1h': 0.0, 'number_of_stuff1_avg_24h': 0.0, 'number_of_stuff1_avg_2h': 0.0,
'number_of_stuff1_max_1h': 0, 'number_of_stuff1_max_24h': 0, 'number_of_stuff1_max_2h': 0,
'number_of_stuff1_min_1h': 0, 'number_of_stuff1_min_24h': 0, 'number_of_stuff1_min_2h': 0,
'number_of_stuff1_sum_1h': 0, 'number_of_stuff1_sum_24h': 0, 'number_of_stuff1_sum_2h': 0,
'number_of_stuff2_avg_1h': 8.0, 'number_of_stuff2_avg_24h': 4.5, 'number_of_stuff2_avg_2h': 7.0,
'number_of_stuff2_max_1h': 9, 'number_of_stuff2_max_24h': 9, 'number_of_stuff2_max_2h': 9,
'number_of_stuff2_min_1h': 7, 'number_of_stuff2_min_24h': 0, 'number_of_stuff2_min_2h': 5,
'number_of_stuff2_sum_1h': 24, 'number_of_stuff2_sum_24h': 45, 'number_of_stuff2_sum_2h': 35}]
assert actual == expected_results, \
f'actual did not match expected. \n actual: {actual} \n expected: {expected_results}'
def test_sliding_window_multiple_keys_aggregation_flow():
controller = build_flow([
SyncEmitSource(),
AggregateByKey([FieldAggregator("number_of_stuff", "col1", ["sum", "avg"],
SlidingWindows(['1h', '2h', '24h'], '10m'))],
Table("test", NoopDriver())),
Reduce([], lambda acc, x: append_return(acc, x)),
]).run()
for i in range(10):
data = {'col1': i}
controller.emit(data, f'{i % 2}', test_base_time + timedelta(minutes=i))
controller.terminate()
actual = controller.await_termination()
expected_results = [
{'col1': 0, 'number_of_stuff_sum_1h': 0, 'number_of_stuff_sum_2h': 0, 'number_of_stuff_sum_24h': 0,
'number_of_stuff_avg_1h': 0.0, 'number_of_stuff_avg_2h': 0.0, 'number_of_stuff_avg_24h': 0.0},
{'col1': 1, 'number_of_stuff_sum_1h': 1, 'number_of_stuff_sum_2h': 1, 'number_of_stuff_sum_24h': 1,
'number_of_stuff_avg_1h': 1.0, 'number_of_stuff_avg_2h': 1.0, 'number_of_stuff_avg_24h': 1.0},
{'col1': 2, 'number_of_stuff_sum_1h': 2, 'number_of_stuff_sum_2h': 2, 'number_of_stuff_sum_24h': 2,
'number_of_stuff_avg_1h': 1.0, 'number_of_stuff_avg_2h': 1.0, 'number_of_stuff_avg_24h': 1.0},
{'col1': 3, 'number_of_stuff_sum_1h': 4, 'number_of_stuff_sum_2h': 4, 'number_of_stuff_sum_24h': 4,
'number_of_stuff_avg_1h': 2.0, 'number_of_stuff_avg_2h': 2.0, 'number_of_stuff_avg_24h': 2.0},
{'col1': 4, 'number_of_stuff_sum_1h': 6, 'number_of_stuff_sum_2h': 6, 'number_of_stuff_sum_24h': 6,
'number_of_stuff_avg_1h': 2.0, 'number_of_stuff_avg_2h': 2.0, 'number_of_stuff_avg_24h': 2.0},
{'col1': 5, 'number_of_stuff_sum_1h': 9, 'number_of_stuff_sum_2h': 9, 'number_of_stuff_sum_24h': 9,
'number_of_stuff_avg_1h': 3.0, 'number_of_stuff_avg_2h': 3.0, 'number_of_stuff_avg_24h': 3.0},
{'col1': 6, 'number_of_stuff_sum_1h': 12, 'number_of_stuff_sum_2h': 12, 'number_of_stuff_sum_24h': 12,
'number_of_stuff_avg_1h': 3.0, 'number_of_stuff_avg_2h': 3.0, 'number_of_stuff_avg_24h': 3.0},
{'col1': 7, 'number_of_stuff_sum_1h': 16, 'number_of_stuff_sum_2h': 16, 'number_of_stuff_sum_24h': 16,
'number_of_stuff_avg_1h': 4.0, 'number_of_stuff_avg_2h': 4.0, 'number_of_stuff_avg_24h': 4.0},
{'col1': 8, 'number_of_stuff_sum_1h': 20, 'number_of_stuff_sum_2h': 20, 'number_of_stuff_sum_24h': 20,
'number_of_stuff_avg_1h': 4.0, 'number_of_stuff_avg_2h': 4.0, 'number_of_stuff_avg_24h': 4.0},
{'col1': 9, 'number_of_stuff_sum_1h': 25, 'number_of_stuff_sum_2h': 25, 'number_of_stuff_sum_24h': 25,
'number_of_stuff_avg_1h': 5.0, 'number_of_stuff_avg_2h': 5.0, 'number_of_stuff_avg_24h': 5.0}]
assert actual == expected_results, \
f'actual did not match expected. \n actual: {actual} \n expected: {expected_results}'
def test_sliding_window_aggregations_with_filters_flow():
controller = build_flow([
SyncEmitSource(),
AggregateByKey([FieldAggregator("number_of_stuff", "col1", ["sum", "avg"],
SlidingWindows(['1h', '2h', '24h'], '10m'),
aggr_filter=lambda element: element['is_valid'] == 0)],
Table("test", NoopDriver())),
Reduce([], lambda acc, x: append_return(acc, x)),
]).run()
for i in range(10):
data = {'col1': i, 'is_valid': i % 2}
controller.emit(data, 'tal', test_base_time + timedelta(minutes=i))
controller.terminate()
actual = controller.await_termination()
expected_results = [{'col1': 0, 'is_valid': 0, 'number_of_stuff_sum_1h': 0, 'number_of_stuff_sum_2h': 0,
'number_of_stuff_sum_24h': 0, 'number_of_stuff_avg_1h': 0.0, 'number_of_stuff_avg_2h': 0.0,
'number_of_stuff_avg_24h': 0.0},
{'col1': 1, 'is_valid': 1, 'number_of_stuff_sum_1h': 0, 'number_of_stuff_sum_2h': 0,
'number_of_stuff_sum_24h': 0, 'number_of_stuff_avg_1h': 0.0, 'number_of_stuff_avg_2h': 0.0,
'number_of_stuff_avg_24h': 0.0},
{'col1': 2, 'is_valid': 0, 'number_of_stuff_sum_1h': 2, 'number_of_stuff_sum_2h': 2,
'number_of_stuff_sum_24h': 2, 'number_of_stuff_avg_1h': 1.0, 'number_of_stuff_avg_2h': 1.0,
'number_of_stuff_avg_24h': 1.0},
{'col1': 3, 'is_valid': 1, 'number_of_stuff_sum_1h': 2, 'number_of_stuff_sum_2h': 2,
'number_of_stuff_sum_24h': 2, 'number_of_stuff_avg_1h': 1.0, 'number_of_stuff_avg_2h': 1.0,
'number_of_stuff_avg_24h': 1.0},
{'col1': 4, 'is_valid': 0, 'number_of_stuff_sum_1h': 6, 'number_of_stuff_sum_2h': 6,
'number_of_stuff_sum_24h': 6, 'number_of_stuff_avg_1h': 2.0, 'number_of_stuff_avg_2h': 2.0,
'number_of_stuff_avg_24h': 2.0},
{'col1': 5, 'is_valid': 1, 'number_of_stuff_sum_1h': 6, 'number_of_stuff_sum_2h': 6,
'number_of_stuff_sum_24h': 6, 'number_of_stuff_avg_1h': 2.0, 'number_of_stuff_avg_2h': 2.0,
'number_of_stuff_avg_24h': 2.0},
{'col1': 6, 'is_valid': 0, 'number_of_stuff_sum_1h': 12, 'number_of_stuff_sum_2h': 12,
'number_of_stuff_sum_24h': 12, 'number_of_stuff_avg_1h': 3.0, 'number_of_stuff_avg_2h': 3.0,
'number_of_stuff_avg_24h': 3.0},
{'col1': 7, 'is_valid': 1, 'number_of_stuff_sum_1h': 12, 'number_of_stuff_sum_2h': 12,
'number_of_stuff_sum_24h': 12, 'number_of_stuff_avg_1h': 3.0, 'number_of_stuff_avg_2h': 3.0,
'number_of_stuff_avg_24h': 3.0},
{'col1': 8, 'is_valid': 0, 'number_of_stuff_sum_1h': 20, 'number_of_stuff_sum_2h': 20,
'number_of_stuff_sum_24h': 20, 'number_of_stuff_avg_1h': 4.0, 'number_of_stuff_avg_2h': 4.0,
'number_of_stuff_avg_24h': 4.0},
{'col1': 9, 'is_valid': 1, 'number_of_stuff_sum_1h': 20, 'number_of_stuff_sum_2h': 20,
'number_of_stuff_sum_24h': 20, 'number_of_stuff_avg_1h': 4.0, 'number_of_stuff_avg_2h': 4.0,
'number_of_stuff_avg_24h': 4.0}]
assert actual == expected_results, \
f'actual did not match expected. \n actual: {actual} \n expected: {expected_results}'
def test_sliding_window_aggregations_with_max_values_flow():
controller = build_flow([
SyncEmitSource(),
AggregateByKey([FieldAggregator("num_hours_with_stuff_in_the_last_24h", "col1", ["count"],
SlidingWindows(['24h'], '1h'),
max_value=5)],
Table("test", NoopDriver())),
Reduce([], lambda acc, x: append_return(acc, x)),
]).run()
for i in range(10):
data = {'col1': i}
controller.emit(data, 'tal', test_base_time + timedelta(minutes=10 * i))
controller.terminate()
actual = controller.await_termination()
expected_results = [{'col1': 0, 'num_hours_with_stuff_in_the_last_24h_count_24h': 1},
{'col1': 1, 'num_hours_with_stuff_in_the_last_24h_count_24h': 2},
{'col1': 2, 'num_hours_with_stuff_in_the_last_24h_count_24h': 3},
{'col1': 3, 'num_hours_with_stuff_in_the_last_24h_count_24h': 4},
{'col1': 4, 'num_hours_with_stuff_in_the_last_24h_count_24h': 5},
{'col1': 5, 'num_hours_with_stuff_in_the_last_24h_count_24h': 5},
{'col1': 6, 'num_hours_with_stuff_in_the_last_24h_count_24h': 5},
{'col1': 7, 'num_hours_with_stuff_in_the_last_24h_count_24h': 5},
{'col1': 8, 'num_hours_with_stuff_in_the_last_24h_count_24h': 5},
{'col1': 9, 'num_hours_with_stuff_in_the_last_24h_count_24h': 5}]
assert actual == expected_results, \
f'actual did not match expected. \n actual: {actual} \n expected: {expected_results}'
def test_sliding_window_simple_aggregation_flow_multiple_fields():
controller = build_flow([
SyncEmitSource(),
AggregateByKey([FieldAggregator("number_of_stuff", "col1", ["sum", "avg"],
SlidingWindows(['1h', '2h', '24h'], '10m')),
FieldAggregator("number_of_things", "col2", ["count"],
SlidingWindows(['1h', '2h'], '15m')),
FieldAggregator("abc", "col3", ["sum"],
SlidingWindows(['24h'], '10m'))],
Table("test", NoopDriver())),
Reduce([], lambda acc, x: append_return(acc, x)),
]).run()
for i in range(10):
data = {'col1': i, 'col2': i * 1.2, 'col3': i * 2 + 4}
controller.emit(data, 'tal', test_base_time + timedelta(minutes=i))
controller.terminate()
actual = controller.await_termination()
expected_results = [{'col1': 0, 'col2': 0.0, 'col3': 4, 'number_of_stuff_sum_1h': 0, 'number_of_stuff_sum_2h': 0,
'number_of_stuff_sum_24h': 0, 'number_of_things_count_1h': 1, 'number_of_things_count_2h': 1,
'abc_sum_24h': 4, 'number_of_stuff_avg_1h': 0.0, 'number_of_stuff_avg_2h': 0.0, 'number_of_stuff_avg_24h': 0.0},
{'col1': 1, 'col2': 1.2, 'col3': 6, 'number_of_stuff_sum_1h': 1, 'number_of_stuff_sum_2h': 1,
'number_of_stuff_sum_24h': 1, 'number_of_things_count_1h': 2, 'number_of_things_count_2h': 2,
'abc_sum_24h': 10, 'number_of_stuff_avg_1h': 0.5, 'number_of_stuff_avg_2h': 0.5, 'number_of_stuff_avg_24h': 0.5},
{'col1': 2, 'col2': 2.4, 'col3': 8, 'number_of_stuff_sum_1h': 3, 'number_of_stuff_sum_2h': 3,
'number_of_stuff_sum_24h': 3, 'number_of_things_count_1h': 3, 'number_of_things_count_2h': 3,
'abc_sum_24h': 18, 'number_of_stuff_avg_1h': 1.0, 'number_of_stuff_avg_2h': 1.0, 'number_of_stuff_avg_24h': 1.0},
{'col1': 3, 'col2': 3.5999999999999996, 'col3': 10, 'number_of_stuff_sum_1h': 6,
'number_of_stuff_sum_2h': 6, 'number_of_stuff_sum_24h': 6, 'number_of_things_count_1h': 4,
'number_of_things_count_2h': 4, 'abc_sum_24h': 28, 'number_of_stuff_avg_1h': 1.5, 'number_of_stuff_avg_2h': 1.5,
'number_of_stuff_avg_24h': 1.5},
{'col1': 4, 'col2': 4.8, 'col3': 12, 'number_of_stuff_sum_1h': 10, 'number_of_stuff_sum_2h': 10,
'number_of_stuff_sum_24h': 10, 'number_of_things_count_1h': 5, 'number_of_things_count_2h': 5,
'abc_sum_24h': 40, 'number_of_stuff_avg_1h': 2.0, 'number_of_stuff_avg_2h': 2.0, 'number_of_stuff_avg_24h': 2.0},
{'col1': 5, 'col2': 6.0, 'col3': 14, 'number_of_stuff_sum_1h': 15, 'number_of_stuff_sum_2h': 15,
'number_of_stuff_sum_24h': 15, 'number_of_things_count_1h': 6, 'number_of_things_count_2h': 6,
'abc_sum_24h': 54, 'number_of_stuff_avg_1h': 2.5, 'number_of_stuff_avg_2h': 2.5, 'number_of_stuff_avg_24h': 2.5},
{'col1': 6, 'col2': 7.199999999999999, 'col3': 16, 'number_of_stuff_sum_1h': 21,
'number_of_stuff_sum_2h': 21, 'number_of_stuff_sum_24h': 21, 'number_of_things_count_1h': 7,
'number_of_things_count_2h': 7, 'abc_sum_24h': 70, 'number_of_stuff_avg_1h': 3.0,
'number_of_stuff_avg_2h': 3.0, 'number_of_stuff_avg_24h': 3.0},
{'col1': 7, 'col2': 8.4, 'col3': 18, 'number_of_stuff_sum_1h': 28, 'number_of_stuff_sum_2h': 28,
'number_of_stuff_sum_24h': 28, 'number_of_things_count_1h': 8, 'number_of_things_count_2h': 8,
'abc_sum_24h': 88, 'number_of_stuff_avg_1h': 3.5, 'number_of_stuff_avg_2h': 3.5, 'number_of_stuff_avg_24h': 3.5},
{'col1': 8, 'col2': 9.6, 'col3': 20, 'number_of_stuff_sum_1h': 36, 'number_of_stuff_sum_2h': 36,
'number_of_stuff_sum_24h': 36, 'number_of_things_count_1h': 9, 'number_of_things_count_2h': 9,
'abc_sum_24h': 108, 'number_of_stuff_avg_1h': 4.0, 'number_of_stuff_avg_2h': 4.0, 'number_of_stuff_avg_24h': 4.0},
{'col1': 9, 'col2': 10.799999999999999, 'col3': 22, 'number_of_stuff_sum_1h': 45,
'number_of_stuff_sum_2h': 45, 'number_of_stuff_sum_24h': 45,
'number_of_things_count_1h': 10, 'number_of_things_count_2h': 10, 'abc_sum_24h': 130,
'number_of_stuff_avg_1h': 4.5, 'number_of_stuff_avg_2h': 4.5, 'number_of_stuff_avg_24h': 4.5}]
assert actual == expected_results, \
f'actual did not match expected. \n actual: {actual} \n expected: {expected_results}'
def test_fixed_window_simple_aggregation_flow():
controller = build_flow([
SyncEmitSource(),
AggregateByKey([FieldAggregator("number_of_stuff", "col1", ["count"],
FixedWindows(['1h', '2h', '3h', '24h']))],
Table("test", NoopDriver())),
Reduce([], lambda acc, x: append_return(acc, x)),
]).run()
for i in range(10):
data = {'col1': i}
controller.emit(data, 'tal', test_base_time + timedelta(minutes=25 * i))
controller.terminate()
actual = controller.await_termination()
expected_results = [{'col1': 0, 'number_of_stuff_count_1h': 1, 'number_of_stuff_count_2h': 1, 'number_of_stuff_count_3h': 1,
'number_of_stuff_count_24h': 1},
{'col1': 1, 'number_of_stuff_count_1h': 1, 'number_of_stuff_count_2h': 1, 'number_of_stuff_count_3h': 2,
'number_of_stuff_count_24h': 2},
{'col1': 2, 'number_of_stuff_count_1h': 2, 'number_of_stuff_count_2h': 2, 'number_of_stuff_count_3h': 3,
'number_of_stuff_count_24h': 3},
{'col1': 3, 'number_of_stuff_count_1h': 3, 'number_of_stuff_count_2h': 3, 'number_of_stuff_count_3h': 4,
'number_of_stuff_count_24h': 4},
{'col1': 4, 'number_of_stuff_count_1h': 1, 'number_of_stuff_count_2h': 4, 'number_of_stuff_count_3h': 5,
'number_of_stuff_count_24h': 5},
{'col1': 5, 'number_of_stuff_count_1h': 2, 'number_of_stuff_count_2h': 5, 'number_of_stuff_count_3h': 6,
'number_of_stuff_count_24h': 6},
{'col1': 6, 'number_of_stuff_count_1h': 1, 'number_of_stuff_count_2h': 1, 'number_of_stuff_count_3h': 1,
'number_of_stuff_count_24h': 1},
{'col1': 7, 'number_of_stuff_count_1h': 2, 'number_of_stuff_count_2h': 2, 'number_of_stuff_count_3h': 2,
'number_of_stuff_count_24h': 2},
{'col1': 8, 'number_of_stuff_count_1h': 1, 'number_of_stuff_count_2h': 3, 'number_of_stuff_count_3h': 3,
'number_of_stuff_count_24h': 3},
{'col1': 9, 'number_of_stuff_count_1h': 2, 'number_of_stuff_count_2h': 4, 'number_of_stuff_count_3h': 4,
'number_of_stuff_count_24h': 4}]
assert actual == expected_results, \
f'actual did not match expected. \n actual: {actual} \n expected: {expected_results}'
def test_fixed_window_aggregation_with_uncommon_windows_flow():
time_format = '%Y-%m-%d %H:%M:%S.%f'
columns = ['sample_time', 'signal', 'isotope']
data = [[datetime.strptime('2021-05-30 16:42:15.797000', time_format).replace(tzinfo=timezone.utc), 790.235, 'U235'],
[datetime.strptime('2021-05-30 16:45:15.798000', time_format).replace(tzinfo=timezone.utc), 498.491, 'U235'],
[datetime.strptime('2021-05-30 16:48:15.799000', time_format).replace(tzinfo=timezone.utc), 34650.00343, 'U235'],
[datetime.strptime('2021-05-30 16:51:15.800000', time_format).replace(tzinfo=timezone.utc), 189.823, 'U235'],
[datetime.strptime('2021-05-30 16:54:15.801000', time_format).replace(tzinfo=timezone.utc), 379.524, 'U235'],
[datetime.strptime('2021-05-30 16:57:15.802000', time_format).replace(tzinfo=timezone.utc), 2225.4952, 'U235'],
[datetime.strptime('2021-05-30 17:00:15.803000', time_format).replace(tzinfo=timezone.utc), 1049.0903, 'U235'],
[datetime.strptime('2021-05-30 17:03:15.804000', time_format).replace(tzinfo=timezone.utc), 41905.63447, 'U235'],
[datetime.strptime('2021-05-30 17:06:15.805000', time_format).replace(tzinfo=timezone.utc), 4987.6764, 'U235'],
[datetime.strptime('2021-05-30 17:09:15.806000', time_format).replace(tzinfo=timezone.utc), 67657.11975, 'U235'],
[datetime.strptime('2021-05-30 17:12:15.807000', time_format).replace(tzinfo=timezone.utc), 56173.06327, 'U235'],
[datetime.strptime('2021-05-30 17:15:15.808000', time_format).replace(tzinfo=timezone.utc), 14249.67394, 'U235'],
[datetime.strptime('2021-05-30 17:18:15.809000', time_format).replace(tzinfo=timezone.utc), 656.831, 'U235'],
[datetime.strptime('2021-05-30 17:21:15.810000', time_format).replace(tzinfo=timezone.utc), 5768.4822, 'U235'],
[datetime.strptime('2021-05-30 17:24:15.811000', time_format).replace(tzinfo=timezone.utc), 929.028, 'U235'],
[datetime.strptime('2021-05-30 17:27:15.812000', time_format).replace(tzinfo=timezone.utc), 2585.9646, 'U235'],
[datetime.strptime('2021-05-30 17:30:15.813000', time_format).replace(tzinfo=timezone.utc), 358.918, 'U235']]
df = pd.DataFrame(data, columns=columns)
controller = build_flow([
DataframeSource(df, time_field="sample_time", key_field="isotope"),
AggregateByKey([FieldAggregator("samples", "signal", ["count"],
FixedWindows(['15m', '25m', '45m', '1h']))], Table("U235_test", NoopDriver())),
Reduce([], lambda acc, x: append_return(acc, x)),
]).run()
termination_result = controller.await_termination()
expected = [{'samples_count_15m': 1.0, 'samples_count_25m': 1.0, 'samples_count_45m': 1.0, 'samples_count_1h': 1.0,
'sample_time': pd.Timestamp('2021-05-30 16:42:15.797000+0000', tz='UTC'), 'signal': 790.235,
'isotope': 'U235'},
{'samples_count_15m': 1.0, 'samples_count_25m': 2.0, 'samples_count_45m': 2.0, 'samples_count_1h': 2.0,
'sample_time': pd.Timestamp('2021-05-30 16:45:15.798000+0000', tz='UTC'), 'signal': 498.491,
'isotope': 'U235'},
{'samples_count_15m': 2.0, 'samples_count_25m': 3.0, 'samples_count_45m': 3.0, 'samples_count_1h': 3.0,
'sample_time': pd.Timestamp('2021-05-30 16:48:15.799000+0000', tz='UTC'), 'signal': 34650.00343,
'isotope': 'U235'},
{'samples_count_15m': 3.0, 'samples_count_25m': 4.0, 'samples_count_45m': 4.0, 'samples_count_1h': 4.0,
'sample_time': pd.Timestamp('2021-05-30 16:51:15.800000+0000', tz='UTC'), 'signal': 189.823,
'isotope': 'U235'},
{'samples_count_15m': 4.0, 'samples_count_25m': 5.0, 'samples_count_45m': 5.0, 'samples_count_1h': 5.0,
'sample_time': pd.Timestamp('2021-05-30 16:54:15.801000+0000', tz='UTC'), 'signal': 379.524,
'isotope': 'U235'},
{'samples_count_15m': 5.0, 'samples_count_25m': 6.0, 'samples_count_45m': 6.0, 'samples_count_1h': 6.0,
'sample_time': pd.Timestamp('2021-05-30 16:57:15.802000+0000', tz='UTC'), 'signal': 2225.4952,
'isotope': 'U235'},
{'samples_count_15m': 1.0, 'samples_count_25m': 1.0, 'samples_count_45m': 7.0, 'samples_count_1h': 1.0,
'sample_time': pd.Timestamp('2021-05-30 17:00:15.803000+0000', tz='UTC'), 'signal': 1049.0903,
'isotope': 'U235'},
{'samples_count_15m': 2.0, 'samples_count_25m': 2.0, 'samples_count_45m': 8.0, 'samples_count_1h': 2.0,
'sample_time': pd.Timestamp('2021-05-30 17:03:15.804000+0000', tz='UTC'), 'signal': 41905.63447,
'isotope': 'U235'},
{'samples_count_15m': 3.0, 'samples_count_25m': 3.0, 'samples_count_45m': 9.0, 'samples_count_1h': 3.0,
'sample_time': pd.Timestamp('2021-05-30 17:06:15.805000+0000', tz='UTC'), 'signal': 4987.6764,
'isotope': 'U235'},
{'samples_count_15m': 4.0, 'samples_count_25m': 4.0, 'samples_count_45m': 10.0, 'samples_count_1h': 4.0,
'sample_time': pd.Timestamp('2021-05-30 17:09:15.806000+0000', tz='UTC'), 'signal': 67657.11975,
'isotope': 'U235'},
{'samples_count_15m': 5.0, 'samples_count_25m': 5.0, 'samples_count_45m': 11.0, 'samples_count_1h': 5.0,
'sample_time': pd.Timestamp('2021-05-30 17:12:15.807000+0000', tz='UTC'), 'signal': 56173.06327,
'isotope': 'U235'},
{'samples_count_15m': 1.0, 'samples_count_25m': 6.0, 'samples_count_45m': 1.0, 'samples_count_1h': 6.0,
'sample_time': pd.Timestamp('2021-05-30 17:15:15.808000+0000', tz='UTC'), 'signal': 14249.67394,
'isotope': 'U235'},
{'samples_count_15m': 2.0, 'samples_count_25m': 7.0, 'samples_count_45m': 2.0, 'samples_count_1h': 7.0,
'sample_time': pd.Timestamp('2021-05-30 17:18:15.809000+0000', tz='UTC'), 'signal': 656.831,
'isotope': 'U235'},
{'samples_count_15m': 3.0, 'samples_count_25m': 8.0, 'samples_count_45m': 3.0, 'samples_count_1h': 8.0,
'sample_time': pd.Timestamp('2021-05-30 17:21:15.810000+0000', tz='UTC'), 'signal': 5768.4822,
'isotope': 'U235'},
{'samples_count_15m': 4.0, 'samples_count_25m': 9.0, 'samples_count_45m': 4.0, 'samples_count_1h': 9.0,
'sample_time': pd.Timestamp('2021-05-30 17:24:15.811000+0000', tz='UTC'), 'signal': 929.028,
'isotope': 'U235'},
{'samples_count_15m': 5.0, 'samples_count_25m': 1.0, 'samples_count_45m': 5.0, 'samples_count_1h': 10.0,
'sample_time': | pd.Timestamp('2021-05-30 17:27:15.812000+0000', tz='UTC') | pandas.Timestamp |
import datetime as dt
import pandas as pd
import numpy as np
import re
# Begin User Input Data
report_date = dt.datetime(2020, 8, 31)
wscf_market_value = 194719540.46
aqr_market_value = 182239774.63
delaware_market_value = 151551731.17
wellington_market_value = 149215529.22
qic_cash_market_value = 677011299.30
input_directory = 'U:/'
output_directory = 'U:/'
jpm_filepath = input_directory + 'CIO/#Data/input/jpm/holdings/2020/08/Priced Positions - All.csv'
wscf_filepath = input_directory + 'CIO/#Data/input/lgs/holdings/unitprices/2020/08/wscf_holdings.xlsx'
aqr_filepath = input_directory + 'CIO/#Data/input/lgs/holdings/unitprices/2020/08/aqr_holdings.xls'
delaware_filepath = input_directory + 'CIO/#Data/input/lgs/holdings/unitprices/2020/08/delaware_holdings.xlsx'
wellington_filepath = input_directory + 'CIO/#Data/input/lgs/holdings/unitprices/2020/08/wellington_holdings.xlsx'
qic_cash_filepath = input_directory + 'CIO/#Data/input/lgs/holdings/unitprices/2020/07/qic_cash_holdings.xlsx'
tickers_filepath = input_directory + 'CIO/#Holdings/Data/input/tickers/tickers_201909.xlsx'
asx_filepath = input_directory + 'CIO/#Data/input/asx/ASX300/20200501-asx300.csv'
aeq_filepath = input_directory + 'CIO/#Holdings/Data/input/exclusions/LGS Exclusions List_December 2018_AEQ_Manager Version.xlsx'
ieq_filepath = input_directory + 'CIO/#Holdings/Data/input/exclusions/LGS Exclusions List_December 2018_IEQ_Manager Version.xlsx'
aeq_exclusions_filepath = input_directory + 'CIO/#Holdings/Data/output/exclusions/aeq_exclusions_' + str(report_date.date()) + '.csv'
ieq_exclusions_filepath = input_directory + 'CIO/#Holdings/Data/output/exclusions/ieq_exclusions_' + str(report_date.date()) + '.csv'
# End User Input Data
# Account Name to LGS Name dictionary
australian_equity_managers_dict = {
'LGS AUSTRALIAN EQUITIES - BLACKROCK': 'BlackRock',
'LGS AUSTRALIAN EQUITIES - ECP': 'ECP',
'LGS AUSTRALIAN EQUITIES DNR CAPITAL': 'DNR',
'LGS AUSTRALIAN EQUITIES - PENDAL': 'Pendal',
'LGS AUSTRALIAN EQUITIES - SSGA': 'SSGA',
'LGS AUSTRALIAN EQUITIES - UBIQUE': 'Ubique',
'LGS AUSTRALIAN EQUITIES - WSCF': 'First Sentier',
'LGS AUSTRALIAN EQUITIES REBALANCE': 'Rebalance',
'LGS AUST EQUITIES - ALPHINITY': 'Alphinity'
}
international_equity_managers_dict = {
'LGS INTERNATIONAL EQUITIES - WCM': 'WCM',
'LGS INTERNATIONAL EQUITIES - AQR': 'AQR',
'LGS INTERNATIONAL EQUITIES - HERMES': 'Hermes',
'LGS INTERNATIONAL EQUITIES - IMPAX': 'Impax',
'LGS INTERNATIONAL EQUITIES - LONGVI EW': 'Longview',
'LGS INTERNATIONAL EQUITIES - LSV': 'LSV',
'LGS INTERNATIONAL EQUITIES - MFS': 'MFS',
'LGS INTERNATIONAL EQUITIES - MACQUARIE': 'Macquarie',
'LGS INTERNATIONAL EQUITIES - WELLINGTON': 'Wellington',
'LGS GLOBAL LISTED PROPERTY - RESOLUTION': 'Resolution',
}
# Imports JPM Mandates holdings data
df_jpm = pd.read_csv(
jpm_filepath,
skiprows=[0, 1, 2, 3],
header=0,
usecols=[
'Account Number',
'Account Name',
'Security ID',
'ISIN',
'Security Name',
'Asset Type Description',
'Price Date',
'Market Price',
'Total Units',
'Total Market Value (Local)',
'Total Market Value (Base)',
'Local Currency'
],
parse_dates=['Price Date'],
infer_datetime_format=True
)
# Renames the columns into LGS column names
df_jpm = df_jpm.rename(
columns={
'Security ID': 'SEDOL',
'Asset Type Description': 'Asset Type',
'Price Date': 'Date',
'Market Price': 'Purchase Price Local',
'Total Units': 'Quantity',
'Total Market Value (Local)': 'Market Value Local',
'Total Market Value (Base)': 'Market Value AUD',
'Local Currency': 'Currency'
}
)
df_jpm['Purchase Price AUD'] = df_jpm['Market Value AUD'] / df_jpm['Quantity']
# Imports WSCF holdings data
df_wscf = pd.read_excel(
pd.ExcelFile(wscf_filepath),
sheet_name='Holdings',
skiprows=[0, 1, 2, 3, 4, 5, 6, 8],
header=0,
usecols=[
'Security SEDOL',
'Security ISIN',
'Security Name',
'Unit Holdings',
'Market Value (Local Currency)',
'Market Value (Base Currency)',
'Security Currency'
]
)
# Renames the columns into LGS column names
df_wscf = df_wscf.rename(
columns={
'Security SEDOL': 'SEDOL',
'Security ISIN': 'ISIN',
'Unit Holdings': 'Quantity',
'Market Value (Local Currency)': 'Market Value Local',
'Market Value (Base Currency)': 'Market Value AUD',
'Security Currency': 'Currency'
}
)
# Scales holdings by market value
wscf_scaling_factor = wscf_market_value/df_wscf['Market Value AUD'].sum()
df_wscf['Market Value Local'] = wscf_scaling_factor * df_wscf['Market Value Local']
df_wscf['Market Value AUD'] = wscf_scaling_factor * df_wscf['Market Value AUD']
df_wscf['Quantity'] = wscf_scaling_factor * df_wscf['Quantity']
df_wscf['Purchase Price Local'] = df_wscf['Market Value Local'] / df_wscf['Quantity']
df_wscf['Purchase Price AUD'] = df_wscf['Market Value AUD'] / df_wscf['Quantity']
df_wscf['Account Number'] = 'WSCF'
df_wscf['Account Name'] = 'LGS AUSTRALIAN EQUITIES - WSCF'
df_wscf['Date'] = report_date
df_wscf['Asset Type'] = np.nan
# Imports AQR holdings data
df_aqr = pd.read_excel(
pd.ExcelFile(aqr_filepath),
sheet_name='Holdings',
skiprows=[0, 1, 2, 3, 4, 5, 6, 7],
header=0,
usecols=[
'Sedol',
'Isin',
'Investment Description',
'Asset Type',
'Price Local',
'Base Price',
'Quantity',
'MV Local',
'MV Base',
'Ccy'
]
)
# Renames the columns into LGS column names
df_aqr = df_aqr.rename(
columns={
'Sedol': 'SEDOL',
'Isin': 'ISIN',
'Investment Description': 'Security Name',
'Price Local': 'Purchase Price Local',
'Base Price': 'Purchase Price AUD',
'MV Local': 'Market Value Local',
'MV Base': 'Market Value AUD',
'Ccy': 'Currency'
}
)
# Scales holdings by market value
aqr_scaling_factor = aqr_market_value/df_aqr['Market Value AUD'].sum()
df_aqr['Market Value Local'] = aqr_scaling_factor * df_aqr['Market Value Local']
df_aqr['Market Value AUD'] = aqr_scaling_factor * df_aqr['Market Value AUD']
df_aqr['Quantity'] = aqr_scaling_factor * df_aqr['Quantity']
df_aqr['Account Number'] = 'AQR'
df_aqr['Account Name'] = 'LGS INTERNATIONAL EQUITIES - AQR'
df_aqr['Date'] = report_date
# Imports Delaware holdings data
df_delaware = pd.read_excel(
pd.ExcelFile(delaware_filepath),
sheet_name='EM SICAV holdings 7-31-2020',
header=0,
usecols=[
'Security SEDOL',
'Security ISIN',
'Security Description (Short)',
'Position Date',
'Shares/Par',
'Trading Currency',
'Traded Market Value (Local)',
'Traded Market Value (AUD)'
]
)
# Renames the columns into LGS column names
df_delaware = df_delaware.rename(
columns={
'Security SEDOL': 'SEDOL',
'Security ISIN': 'ISIN',
'Security Description (Short)': 'Security Name',
'Position Date': 'Date',
'Shares/Par': 'Quantity',
'Trading Currency': 'Currency',
'Traded Market Value (Local)': 'Market Value Local',
'Traded Market Value (AUD)': 'Market Value AUD'
}
)
# Scales holdings by market value
delaware_scaling_factor = delaware_market_value/df_delaware['Market Value AUD'].sum()
df_delaware['Market Value Local'] = delaware_scaling_factor * df_delaware['Market Value Local']
df_delaware['Market Value AUD'] = delaware_scaling_factor * df_delaware['Market Value AUD']
df_delaware['Quantity'] = delaware_scaling_factor * df_aqr['Quantity']
df_delaware['Purchase Price Local'] = df_delaware['Market Value Local'] / df_delaware['Quantity']
df_delaware['Purchase Price AUD'] = df_delaware['Market Value AUD'] / df_delaware['Quantity']
df_delaware['Account Number'] = 'MACQUARIE'
df_delaware['Account Name'] = 'LGS INTERNATIONAL EQUITIES - MACQUARIE'
df_delaware['Date'] = report_date
# Imports Wellington holdings data
df_wellington = pd.read_excel(
pd.ExcelFile(wellington_filepath),
sheet_name='wellington_holdings',
header=0,
usecols=[
'SEDOL',
'ISIN',
'Security',
'Shares or Par Value',
'ISO Code',
'Market Value (Local)',
'Market Value (Report Currency)'
]
)
# Renames the columns into LGS column names
df_wellington = df_wellington.rename(
columns={
'Security': 'Security Name',
'Shares or Par Value': 'Quantity',
'ISO Code': 'Currency',
'Market Value (Local)': 'Market Value Local',
'Market Value (Report Currency)': 'Market Value AUD'
}
)
# Scales holdings by market value
wellington_scaling_factor = wellington_market_value/df_wellington['Market Value AUD'].sum()
df_wellington['Market Value Local'] = wellington_scaling_factor * df_wellington['Market Value Local']
df_wellington['Market Value AUD'] = wellington_scaling_factor * df_wellington['Market Value AUD']
df_wellington['Quantity'] = wellington_scaling_factor * df_wellington['Quantity']
df_wellington['Purchase Price Local'] = df_wellington['Market Value Local'] / df_wellington['Quantity']
df_wellington['Purchase Price AUD'] = df_wellington['Market Value AUD'] / df_wellington['Quantity']
df_wellington['Account Number'] = 'WELLINGTON'
df_wellington['Account Name'] = 'LGS INTERNATIONAL EQUITIES - WELLINGTON'
df_wellington['Date'] = report_date
df_qic_cash = pd.read_excel(
pd.ExcelFile(qic_cash_filepath),
sheet_name='Risk and Exposure',
header=4,
usecols=[
'ISIN',
'Security Description',
'Security Type',
'Currency',
'Market Value %'
]
)
df_qic_cash = df_qic_cash.rename(
columns={
'Security Description': 'Security Name',
'Security Type': 'Asset Type'
}
)
df_qic_cash['Market Value Local'] = [np.nan for i in range(0,len(df_qic_cash))]
df_qic_cash['Market Value AUD'] = df_qic_cash['Market Value %'] * qic_cash_market_value
df_qic_cash['Quantity'] = [np.nan for i in range(0,len(df_qic_cash))]
df_qic_cash['Purchase Price Local'] = [np.nan for i in range(0,len(df_qic_cash))]
df_qic_cash['Purchase Price AUD'] = [np.nan for i in range(0,len(df_qic_cash))]
df_qic_cash['Account Number'] = 'QIC Cash'
df_qic_cash['Account Name'] = 'LGS CASH - QIC CASH'
df_qic_cash['Date'] = report_date
df_qic_cash = df_qic_cash.drop(columns=['Market Value %'], axis=1)
df_qic_cash = df_qic_cash[~df_qic_cash['Security Name'].isin([np.nan])].reset_index(drop=True)
# Joins all the dataframes
df_main = pd.concat([df_jpm, df_wscf, df_aqr, df_delaware, df_wellington], axis=0, sort=True).reset_index(drop=True)
# Outputs all of the holdings
df_main_all = df_main.copy()
df_main_all = df_main_all.drop(['Date'], axis=1)
df_main_all.to_csv(output_directory + 'CIO/#Data/output/holdings/all_holdings.csv', index=False)
# <NAME> Spreadsheet
df_cp = df_main_all[['Account Name', 'Security Name', 'Market Value AUD']]
df_cp.to_csv(output_directory + 'CIO/#Data/output/holdings/craigpete.csv', index=False)
# Selects Australian Equity and International Equity managers only JANA
df_main_all_aeq = df_main_all[df_main_all['Account Name'].isin(australian_equity_managers_dict)].reset_index(drop=True)
df_main_all_ieq = df_main_all[df_main_all['Account Name'].isin(international_equity_managers_dict)].reset_index(drop=True)
# Writes to excel file for JANA
writer = pd.ExcelWriter(output_directory + 'CIO/#Data/output/holdings/jana/aeq_holdings.xlsx', engine='xlsxwriter')
account_to_dataframe_dict = dict(list(df_main_all_aeq.groupby('Account Name')))
for account, dataframe in account_to_dataframe_dict.items():
dataframe.to_excel(writer, sheet_name=australian_equity_managers_dict[account], index=False)
writer.save()
writer = pd.ExcelWriter(output_directory + 'CIO/#Data/output/holdings/jana/ieq_holdings.xlsx', engine='xlsxwriter')
account_to_dataframe_dict = dict(list(df_main_all_ieq.groupby('Account Name')))
for account, dataframe in account_to_dataframe_dict.items():
dataframe.to_excel(writer, sheet_name=international_equity_managers_dict[account], index=False)
writer.save()
# Starts top holdings section
# Removes SEDOLS with np.nan value
df_main_nan = df_main[df_main['SEDOL'].isin([np.nan])]
df_main = df_main[~df_main['SEDOL'].isin([np.nan])].reset_index(drop=True)
df_main = df_main[~df_main['ISIN'].isin([np.nan])].reset_index(drop=True)
# Cleans the SEDOL and ISIN strings
df_main['SEDOL'] = [str(df_main['SEDOL'][i]).replace(" ", "").upper() for i in range(0, len(df_main))]
df_main['ISIN'] = [str(df_main['ISIN'][i]).replace(" ", "").upper() for i in range(0, len(df_main))]
# Selects Australian Equity and International Equity managers only
df_main_aeq = df_main[df_main['Account Name'].isin(australian_equity_managers_dict)].reset_index(drop=True)
df_main_ieq = df_main[df_main['Account Name'].isin(international_equity_managers_dict)].reset_index(drop=True)
# Calculates % of portfolio within each asset class
df_main_aeq['(%) of Portfolio'] = (df_main_aeq['Market Value AUD'] / df_main_aeq['Market Value AUD'].sum()) * 100
df_main_ieq['(%) of Portfolio'] = (df_main_ieq['Market Value AUD'] / df_main_ieq['Market Value AUD'].sum()) * 100
# Sums all the security market values by their SEDOL
df_main_aeq = df_main_aeq.groupby(['SEDOL']).sum().sort_values(['Market Value AUD'], ascending=[False])[['Market Value AUD', '(%) of Portfolio']]
df_main_ieq = df_main_ieq.groupby(['SEDOL']).sum().sort_values(['Market Value AUD'], ascending=[False])[['Market Value AUD', '(%) of Portfolio']]
# Selects SEDOLS and Security names
df_security_names = df_main[['SEDOL', 'Security Name']].drop_duplicates(subset=['SEDOL'], keep='first').reset_index(drop=True)
# Merges security names back onto df_main_aeq
df_main_aeq = pd.merge(
left=df_main_aeq,
right=df_security_names,
left_on=['SEDOL'],
right_on=['SEDOL'],
how='outer',
indicator=True
)
df_main_aeq = df_main_aeq[df_main_aeq['_merge'].isin(['left_only', 'both'])].drop(columns=['_merge'], axis=1)
# Merges security names back onto df_main_ieq
df_main_ieq = pd.merge(
left=df_main_ieq,
right=df_security_names,
left_on=['SEDOL'],
right_on=['SEDOL'],
how='outer',
indicator=True
)
df_main_ieq = df_main_ieq[df_main_ieq['_merge'].isin(['left_only', 'both'])].drop(columns=['_merge'], axis=1)
# Remove AUD
df_main_ieq = df_main_ieq[~df_main_ieq['SEDOL'].isin(['--'])].reset_index(drop=True)
# Creates SEDOL to LGS friendly names dictionary for the top 10 holdings table for AE and IE.
sedol_to_common_name_dict = {
'6215035': 'CBA',
'6144690': 'BHP',
'6185495': 'CSL',
'6624608': 'NAB',
'6076146': 'Westpac',
'B28YTC2': 'Macquarie',
'6065586': 'ANZ',
'6087289': 'Telstra',
'6948836': 'Westfarmers',
'6220103': '<NAME>',
'6981239': 'Woolworths',
'BTN1Y11': 'Medtronic',
'B2PZN04': 'Visa',
'2661568': 'Oracle',
'2886907': '<NAME>',
'2842040': 'State Street',
'B4BNMY3': 'Accenture',
'2044545': 'Comcast',
'2270726': '<NAME>',
'BD6K457': 'Compass',
'2210959': 'Canadian Rail',
'7123870': 'Nestle',
'2588173': 'Microsoft',
'B4MGBG6': 'HCA',
'BMMV2K8': 'Tencent',
'2046251': 'Apple',
'6066608': 'Amcor',
'B44WZD7': 'Prologis',
'2000019': 'Amazon',
'--': 'AUD'
}
# Selects top 10 holdings for AE and IE
df_main_aeq_top10 = df_main_aeq.head(10)[['SEDOL', 'Market Value AUD', '(%) of Portfolio']]
df_main_ieq_top10 = df_main_ieq.head(10)[['SEDOL', 'Market Value AUD', '(%) of Portfolio']]
# Applies SEDOL to company name dictionary
df_main_aeq_top10['Company'] = [sedol_to_common_name_dict[df_main_aeq_top10['SEDOL'][i]] for i in range(0, len(df_main_aeq_top10))]
df_main_ieq_top10['Company'] = [sedol_to_common_name_dict[df_main_ieq_top10['SEDOL'][i]] for i in range(0, len(df_main_ieq_top10))]
# Divides market value by a million
df_main_aeq_top10['Market Value'] = df_main_aeq_top10['Market Value AUD'] / 1000000
df_main_ieq_top10['Market Value'] = df_main_ieq_top10['Market Value AUD'] / 1000000
# Selects columns for output into latex
df_main_aeq_top10 = df_main_aeq_top10[['Company', 'Market Value', '(%) of Portfolio']].round(2)
df_main_ieq_top10 = df_main_ieq_top10[['Company', 'Market Value', '(%) of Portfolio']].round(2)
# Outputs the tables into latex
with open(output_directory + 'CIO/#Data/output/investment/holdings/top10_local.tex', 'w') as tf:
tf.write(df_main_aeq_top10.to_latex(index=False))
with open(output_directory + 'CIO/#Data/output/investment/holdings/top10_foreign.tex', 'w') as tf:
tf.write(df_main_ieq_top10.to_latex(index=False))
# Writes to excel
writer = pd.ExcelWriter(output_directory + 'CIO/#Data/output/holdings/top_holdings.xlsx', engine='xlsxwriter')
df_main_aeq.to_excel(writer, sheet_name='local', index=False)
df_main_ieq.to_excel(writer, sheet_name='foreign', index=False)
writer.save()
# EXCLUSIONS SECTION
df_aeq_exclusions = pd.read_excel(
pd.ExcelFile(aeq_filepath),
sheet_name='AEQ',
skiprows=[0, 1],
header=0,
usecols=[
'ISSUER_ISIN',
'ISSUER_\nSEDOL',
'SCREEN'
]
)
df_aeq_exclusions = df_aeq_exclusions.rename(columns={'ISSUER_ISIN': 'ISIN', 'ISSUER_\nSEDOL': 'SEDOL'})
df_aeq_exclusions['SEDOL'] = [str(df_aeq_exclusions['SEDOL'][i]).replace(" ", "").upper() for i in range(0, len(df_aeq_exclusions))]
df_aeq_exclusions['ISIN'] = [str(df_aeq_exclusions['ISIN'][i]).replace(" ", "").upper() for i in range(0, len(df_aeq_exclusions))]
df_ieq_exclusions = pd.read_excel(
| pd.ExcelFile(ieq_filepath) | pandas.ExcelFile |
import pandas as pd
import numpy as np
from statsmodels.formula.api import ols
from swstats import *
from scipy.stats import ttest_ind
import xlsxwriter
from statsmodels.stats.multitest import multipletests
from statsmodels.stats.proportion import proportions_ztest
debugging = False
def pToSign(pval):
if pval < .001:
return "***"
elif pval < .01:
return "**"
elif pval < .05:
return "*"
elif pval < .1:
return "+"
else:
return ""
def analyzeExperiment_ContinuousVar(dta, varName):
order_value_control_group = dta.loc[dta.surveyArm == "arm1_control", varName]
order_value_arm2_group = dta.loc[dta.surveyArm == "arm2_written_techniques", varName]
order_value_arm3_group = dta.loc[dta.surveyArm == "arm3_existingssa", varName]
order_value_arm4_group = dta.loc[dta.surveyArm == "arm4_interactive_training", varName]
# Arm 1
arm1mean = np.mean(order_value_control_group)
arm1sd = np.std(order_value_control_group)
arm1text = "" + "{:.2f}".format(arm1mean) + " (" + "{:.2f}".format(arm1sd) + ")"
# Effect of Arm 2
arm2mean = np.mean(order_value_arm2_group)
arm2sd = np.std(order_value_arm2_group)
tscore, pval2 = ttest_ind(order_value_control_group, order_value_arm2_group)
arm2sign = pToSign(pval2)
arm2text = "" + "{:.2f}".format(arm2mean) + " (" + "{:.2f}".format(arm2sd) + ")" + arm2sign + " p:" + "{:.3f}".format(pval2)
# Effect of Arm 3
arm3mean = np.mean(order_value_arm3_group)
arm3sd = np.std(order_value_arm3_group)
tscore, pval3 = ttest_ind(order_value_control_group, order_value_arm3_group)
arm3sign = pToSign(pval3)
arm3text = "" + "{:.2f}".format(arm3mean) + " (" + "{:.2f}".format(arm3sd) + ")" + arm3sign + " p:" + "{:.3f}".format(pval3)
# Effect of Arm 4
arm4mean = np.mean(order_value_arm4_group)
arm4sd = np.std(order_value_arm4_group)
tscore, pval4 = ttest_ind(order_value_control_group, order_value_arm4_group)
arm4sign = pToSign(pval4)
arm4text = "" + "{:.2f}".format(arm4mean) + " (" + "{:.2f}".format(arm4sd) + ")" + arm4sign + " p:" + "{:.3f}".format(pval4)
# Correct P-values
y = multipletests(pvals=[pval2, pval3, pval4], alpha=0.05, method="holm")
# print(len(y[1][np.where(y[1] < 0.05)])) # y[1] returns corrected P-vals (array)
sigWithCorrection = y[1] < 0.05
if sigWithCorrection[0]:
arm2text = arm2text + ",#"
if sigWithCorrection[1]:
arm3text = arm3text + ",#"
if sigWithCorrection[2]:
arm4text = arm4text + ",#"
# Additional checks
tscore, pval2to4 = ttest_ind(order_value_arm2_group, order_value_arm4_group)
arm2to4sign = pToSign(pval2to4)
arm2to4text = "" + "{:.2f}".format(arm4mean - arm2mean) + " " + arm2to4sign + " p:" + "{:.3f}".format(pval2to4)
tscore, pval3to4 = ttest_ind(order_value_arm3_group, order_value_arm4_group)
arm3to4sign = pToSign(pval3to4)
arm3to4text = "" + "{:.2f}".format(arm4mean - arm3mean) + " " + arm3to4sign + " p:" + "{:.3f}".format(pval3to4)
results = {"Outcome": varName,
"Arm1": arm1text,
"Arm2": arm2text,
"Arm3": arm3text,
"Arm4": arm4text,
"Arm2To4": arm2to4text,
"Arm3To4": arm3to4text,
}
return results
def analyzeExperiment_BinaryVar(dta, varName):
order_value_control_group = dta.loc[dta.surveyArm == "arm1_control", varName]
order_value_arm2_group = dta.loc[dta.surveyArm == "arm2_written_techniques", varName]
order_value_arm3_group = dta.loc[dta.surveyArm == "arm3_existingssa", varName]
order_value_arm4_group = dta.loc[dta.surveyArm == "arm4_interactive_training", varName]
# Arm 1
arm1Successes = sum(order_value_control_group.isin([True, 1]))
arm1Count = sum(order_value_control_group.isin([True, False, 1, 0]))
arm1PercentSuccess = arm1Successes/arm1Count
arm1text = "" + "{:.2f}".format(arm1PercentSuccess) + " (" + "{:.0f}".format(arm1Successes) + ")"
# Effect of Arm 2
arm2Successes = sum(order_value_arm2_group.isin([True, 1]))
arm2Count = sum(order_value_arm2_group.isin([True, False, 1, 0]))
arm2PercentSuccess = arm2Successes/arm2Count
zstat, pval2 = proportions_ztest(count=[arm1Successes,arm2Successes], nobs=[arm1Count,arm2Count], alternative='two-sided')
arm2sign = pToSign(pval2)
arm2text = "" + "{:.2f}".format(arm2PercentSuccess) + " (" + "{:.0f}".format(arm2Successes) + ")" + arm2sign + " p:" + "{:.3f}".format(pval2)
# Effect of Arm 3
arm3Successes = sum(order_value_arm3_group.isin([True, 1]))
arm3Count = sum(order_value_arm3_group.isin([True, False, 1, 0]))
arm3PercentSuccess = arm3Successes/arm3Count
zstat, pval3 = proportions_ztest(count=[arm1Successes,arm3Successes], nobs=[arm1Count,arm3Count], alternative='two-sided')
arm3sign = pToSign(pval3)
arm3text = "" + "{:.2f}".format(arm3PercentSuccess) + " (" + "{:.0f}".format(arm3Successes) + ")" + arm3sign + " p:" + "{:.3f}".format(pval3)
# Effect of Arm 4
arm4Successes = sum(order_value_arm4_group.isin([True, 1]))
arm4Count = sum(order_value_arm4_group.isin([True, False, 1, 0]))
arm4PercentSuccess = arm4Successes/arm4Count
zstat, pval4 = proportions_ztest(count=[arm1Successes,arm4Successes], nobs=[arm1Count,arm4Count], alternative='two-sided')
arm4sign = pToSign(pval4)
arm4text = "" + "{:.2f}".format(arm4PercentSuccess) + " (" + "{:.0f}".format(arm4Successes) + ")" + arm4sign + " p:" + "{:.3f}".format(pval4)
# Correct P-values
y = multipletests(pvals=[pval2, pval3, pval4], alpha=0.05, method="holm")
# print(len(y[1][np.where(y[1] < 0.05)])) # y[1] returns corrected P-vals (array)
sigWithCorrection = y[1] < 0.05
if sigWithCorrection[0]:
arm2text = arm2text + ",#"
if sigWithCorrection[1]:
arm3text = arm3text + ",#"
if sigWithCorrection[2]:
arm4text = arm4text + ",#"
# Additional checks
zstat, pval2to4 = proportions_ztest(count=[arm2Successes,arm4Successes], nobs=[arm2Count,arm4Count], alternative='two-sided')
arm2to4sign = pToSign(pval2to4)
arm2to4text = "" + "{:.2f}".format(arm4PercentSuccess - arm2PercentSuccess) + " " + arm2to4sign + " p:" + "{:.3f}".format(pval2to4)
zstat, pval3to4 = proportions_ztest(count=[arm3Successes,arm4Successes], nobs=[arm3Count,arm4Count], alternative='two-sided')
arm3to4sign = pToSign(pval3to4)
arm3to4text = "" + "{:.2f}".format(arm4PercentSuccess - arm3PercentSuccess) + " " + arm3to4sign + " p:" + "{:.3f}".format(pval3to4)
results = {"Outcome": varName,
"Arm1": arm1text,
"Arm2": arm2text,
"Arm3": arm3text,
"Arm4": arm4text,
"Arm2To4": arm2to4text,
"Arm3To4": arm3to4text,
}
return results
def analyzeResults(dta, outputFileName, scoringVars, surveyVersion, primaryOnly=True):
if primaryOnly:
dta = dta[dta.IsPrimaryWave].copy()
dataDir = "C:/Dev/src/ssascams/data/"
''' Analyze the answers'''
writer = | pd.ExcelWriter(dataDir + 'RESULTS_' + outputFileName + '.xlsx', engine='xlsxwriter') | pandas.ExcelWriter |
import re
import json
from datetime import datetime
import pandas as pd
import numpy as np
from src import db_handler as db
from src.settings import LANGUAGE, PATTERN, LINK_LOCATION
from src.emoji import EMOJI, DEMOJI, CLEANER
emoji_pattern = re.compile('|'.join(sorted([re.escape(emo) for emo in EMOJI], key=len, reverse=True)))
demoji_pattern = re.compile('|'.join(DEMOJI))
cleaner_pattern = re.compile('|'.join([re.escape(c) for c in CLEANER]))
def get_pattern(key, lang='en'):
"""Provide regex pattern for emoji, link, mention, media, location, contact, deleted and events."""
re_patterns = PATTERN[key]
if isinstance(re_patterns, list):
re_patterns = [re_pattern.format(**LANGUAGE[lang]) for re_pattern in re_patterns]
elif key != 'mention':
re_patterns = re_patterns.format(**LANGUAGE[lang])
return re_patterns
def translate_event_type(event_type, lang):
"""Translate non english event type to standard english event type."""
event_index = {v: k for k, v in LANGUAGE[lang].items()}[event_type]
return LANGUAGE['en'][event_index]
def decode_emoji(text):
"""Convert unicode character of emoji into string representation."""
replaced = emoji_pattern.sub(lambda x: '<Emoji_' + str(EMOJI.get(x.group(0))) + '>', text)
cleaned = cleaner_pattern.sub('', replaced)
if cleaned[-1:] == '\n':
cleaned = cleaned[:-1]
return cleaned
def encode_emoji(text):
"""Convert emoji string representation to unicode character."""
return demoji_pattern.sub(lambda x: DEMOJI.get(x.group(0)), text)
def convert_to_re_pattern(pattern):
"""Convert python date-format pattern into regular expression pattern."""
symb_conv = lambda x: '\d{1,' + str(len(datetime.today().strftime(x))) + '}'
re_pattern = ''
i = 0
while i < len(pattern):
if pattern[i] == '%':
re_pattern += symb_conv(pattern[i:i+2])
i += 1
elif pattern[i] in '/()[].$^*?':
re_pattern += '\\' + pattern[i]
else:
re_pattern += pattern[i]
i += 1
return re_pattern
def detect_language(chat):
"""Detect android system language when exporting chat history, affect date format, event type, and non text object declaration."""
for lang in LANGUAGE:
if re.search(get_pattern('events', lang)[0], chat):
return lang, 'groupchat'
elif re.search(get_pattern('events', lang)[1], chat):
return lang, 'personalchat'
return 'not_supported', ''
def clean_message(x):
"""Remove newline, emoji, link, mention, and other non text object from message."""
category, message = x
if category == 'Text':
message = message.replace('\n', ' ')
message = re.sub(get_pattern('emoji'), '', message)
message = re.sub(get_pattern('link'), '', message)
message = re.sub(get_pattern('mention'), '', message)
else:
message = ''
return message
def find_link(x):
"""Find links from message."""
category, message = x
list_link = []
if category == 'Text':
for link in re.findall(get_pattern('link'), message):
temp = link[:-1] if link[-1] in ['.', ','] else link
temp = temp[2:] if temp[:2] in ['m.'] else temp
if temp not in LINK_LOCATION:
list_link.append(temp)
return list_link
def find_word(text):
"""Find words from message."""
words = re.findall('\w+', text)
return [word.lower() for word in words]
def get_category(x, lang):
"""Define category (Event, Media, Location, Contact, Deleted, and Text) for each message."""
contact, message = x
if pd.isna(contact):
return 'Event'
elif re.match(get_pattern('media', lang), message):
return 'Media'
elif re.match(get_pattern('location', lang), message):
return 'Location'
elif re.match(get_pattern('contact', lang), message):
return 'Contact'
elif re.match(get_pattern('deleted', lang), message):
return 'Deleted'
else:
return 'Text'
def extract_event(text, lang):
"""Define subject, type, and target for every event."""
for event in get_pattern('events', lang):
match = re.match(event, text)
if match:
matchs = match.groups()
if len(matchs) == 3:
contact, event_type, target = matchs
if target == LANGUAGE[lang]['you'].lower():
target = LANGUAGE[lang]['you']
elif len(matchs) == 2:
contact, event_type, target = matchs[0], matchs[1], np.nan
else:
contact, event_type, target = np.nan, matchs[0], np.nan
if lang != 'en':
event_type = translate_event_type(event_type, lang)
if isinstance(contact, str):
contact = contact.replace('\\u200e', '')
return contact, event_type, target
return np.nan, text, np.nan
def enrich(df, lang):
"""Adding some column for analysis purpose."""
df['category'] = pd.Categorical(df[['contact', 'message']].apply(lambda x: get_category(x, lang), axis=1))
df['clean_message'] = df[['category', 'message']].apply(clean_message, axis=1)
df['date'] = df.datetime.dt.date
df['year'] = df.date + pd.offsets.YearEnd(0)
df['month'] = df.date + pd.offsets.MonthEnd(0)
df['week'] = df.date + pd.offsets.Week(weekday=6)
df['day'] = pd.Categorical(df.datetime.dt.strftime('%A'))
df['hour'] = pd.Categorical(df.datetime.apply(lambda x: x.strftime('%H:00')))
df['list_emoji'] = df.message.apply(lambda x: re.findall(get_pattern('emoji'), x))
df['list_link'] = df[['category', 'message']].apply(find_link, axis=1)
df['list_mention'] = df.message.apply(lambda x: re.findall(get_pattern('mention'), x))
df['list_words'] = df.clean_message.apply(find_word)
df['count_emoji'] = df.list_emoji.apply(len)
df['count_link'] = df.list_link.apply(len)
df['count_mention'] = df.list_mention.apply(len)
df['count_words'] = df.list_words.apply(len)
df['count_character'] = df.clean_message.apply(len)
df['count_newline'] = df.message.str.count('\n')
df['event_type'] = np.nan
df['event_target'] = np.nan
df.loc[df.category == 'Event', 'contact'], df.loc[df.category == 'Event', 'event_type'], df.loc[df.category == 'Event', 'event_target'], = zip(*df[df.category == 'Event'].message.apply(lambda x: extract_event(x, lang)))
return df
def parse(chat):
"""Parse exported chat and define date, contact, message for each message."""
chat = chat.decode('utf-8')
lang, chat_type = detect_language(chat)
if lang == 'not_supported':
df = pd.DataFrame()
else:
pattern = LANGUAGE[lang]['date'] + ' - '
re_pattern = convert_to_re_pattern(pattern)
dates = re.findall(re_pattern, chat)
msgs = re.split(re_pattern, chat)
msgs.pop(0)
data = []
for date, msg in zip(dates, msgs):
date = datetime.strptime(date, pattern)
msg_splitted = msg.split(': ', 1)
if len(msg_splitted) > 1:
contact, msg = msg_splitted
else:
contact, msg = np.nan, msg_splitted[0]
msg = decode_emoji(msg)
data.append({
'datetime': date,
'contact': contact,
'message': msg.encode('unicode_escape').decode()})
df = pd.DataFrame(data)
return df, chat_type, lang
def load_parsed_data(input_string, input_type, save=True):
"""Grab chat data, parse, enrich, and store information to client side."""
if input_type == 'upload':
df, chat_type, lang = parse(input_string)
url = db.generate_url(10, unique=save)
if save and not df.empty:
url = db.add_chat(df, lang, chat_type, url)
elif input_type == 'url':
url = input_string
df, chat_type, lang = db.get_chat(url)
if lang in ['not_supported', 'not_found']:
return lang, {'data': ''}
df = enrich(df, lang)
users = sorted(filter(lambda x: | pd.notna(x) | pandas.notna |
import datetime
import json
import pandas as pd
from pandas.io.json import json_normalize
def convert_data(infile_path, outfile_path):
today = datetime.date.today()
today_string = today.strftime('%Y-%m-%d')
start_date_string = (today + datetime.timedelta(days=-180)).strftime('%Y-%m-%d')
data = None
with open(infile_path) as inf:
data = json.load(inf)
for k, v in data.items():
df = | json_normalize(v) | pandas.io.json.json_normalize |
from pathlib import Path
import pandas as pd
from tools import selection_methods
import time
import numpy as np
from pathlib import Path
from collections import OrderedDict
from tools import eval_methods, selection_methods, resolution_methods
from tqdm.auto import tqdm
from dateutil import parser
import pickle
import re
tqdm.pandas()
setting = "allquicks" # dev or test
num_candidates = 1
# Load Wikipedia inlinks:
with open("../resources/wikipedia/overall_entity_freq.pickle", 'rb') as fp:
wikipedia_entity_overall_dict = pickle.load(fp)
# DeezyMatch parameters:
dm_model = "wikidata_gb"
inputfile = "input_dfm"
candrank_metric = "faiss" # 'faiss', 'cosine', 'conf'
candrank_thr = 3
# The following is a default value. The threshold if we use one of the other
# two metrics should not be higher than 1.
if candrank_metric in ['cosine', 'conf']:
candrank_thr = 1
# -------------------------------
# Candidate selection:
# Run candidate selection on all Quicks
# -------------------------------
if not Path("../processed/resolution/candranking_" + setting + ".pkl").is_file():
df = pd.read_pickle("../resources/quicks/quicks_parsed.pkl")
alts_df = pd.read_csv("../resources/quicks/quicks_altname_" + setting + ".tsv", sep="\t")
wkdt_df_places = pd.read_csv("../processed/wikidata/altname_gb_gazetteer.tsv", sep="\t")
wkdt_df_stations = pd.read_csv("../processed/wikidata/altname_gb_stations_gazetteer.tsv", sep="\t")
# ---------------
# DeezyMatch
candidates = "gb_stations"
queries = "quicks_stations"
query_column = "SubStFormatted"
ranked_candidates = selection_methods.find_deezymatch_candidates(wkdt_df_stations, df, query_column, dm_model, inputfile, candidates, queries, candrank_metric, candrank_thr, num_candidates)
df = pd.merge(left=df, right=ranked_candidates, how="left", left_on=query_column, right_on="query")
df = df.rename(columns={"wkcands":"cr_deezy_match_stations"})
df = df.drop(columns = ["query"])
print("Deezy match done!")
candidates = "gb"
queries = "quicks_places"
query_column = "MainStation"
ranked_candidates = selection_methods.find_deezymatch_candidates(wkdt_df_places, df, query_column, dm_model, inputfile, candidates, queries, candrank_metric, candrank_thr, num_candidates)
df = pd.merge(left=df, right=ranked_candidates, how="left", left_on=query_column, right_on="query")
df = df.rename(columns={"wkcands":"cr_deezy_match_places"})
df = df.drop(columns = ["query"])
print("Deezy match done!")
candidates = "gb_stations"
queries = "quicks_altns"
query_column = "Altname"
ranked_candidates = selection_methods.find_deezymatch_candidates(wkdt_df_stations, alts_df, query_column, dm_model, inputfile, candidates, queries, candrank_metric, candrank_thr, num_candidates)
alts_df = pd.merge(left=alts_df, right=ranked_candidates, how="left", left_on=query_column, right_on="query")
alts_df = alts_df.rename(columns={"wkcands":"cr_deezy_match_alts"})
alts_df = alts_df.drop(columns = ["query"])
print("Deezy match done!")
# Add altnames to dataframe:
# Add deezymatch altnames to dataframe:
dAlts = dict()
altn_candidates = []
for i, row in alts_df.iterrows():
if row["SubId"] in dAlts:
dAlts[row["SubId"]].update(row["cr_deezy_match_alts"])
else:
dAlts[row["SubId"]] = row["cr_deezy_match_alts"]
for i, row in df.iterrows():
if row["SubId"] in dAlts:
altn_candidates.append(dict(OrderedDict(dAlts[row["SubId"]])))
else:
altn_candidates.append(dict())
df["cr_deezy_match_alts"] = altn_candidates
# ---------------
# Store candidate selection
df.to_pickle("../processed/resolution/candranking_" + setting + ".pkl")
gazetteer_df = pd.read_csv("../processed/wikidata/gb_gazetteer.csv", header=0, index_col=0, low_memory=False)
# -------------------------------
# Feature selection:
# Extract features for all Quicks
# -------------------------------
setting = "allquicks"
candrank = "deezy_match"
features_file = "../processed/resolution/features_" + setting + "_" + candrank + ".tsv"
if not Path(features_file).is_file():
df = pd.read_pickle("../processed/resolution/candranking_" + setting + ".pkl")
exp_df = resolution_methods.feature_selection(candrank, df, gazetteer_df, wikipedia_entity_overall_dict, False)
exp_df.drop_duplicates(subset=['SubId','Candidate'], inplace=True)
exp_df.to_csv(features_file, sep="\t")
print(candrank + " " + setting + " done!")
features_all = pd.read_csv("../processed/resolution/features_allquicks_" + candrank + ".tsv",sep='\t', index_col=0)
# -------------------------------
# Place linking:
# Our method comb: Combine stations and places classifiers
# -------------------------------
use_cols_all = ["f_0", "f_1", "f_2", "f_3", "f_4", "f_5", "f_6", "f_7", "f_8"]
features_dev_df = pd.read_csv("../processed/resolution/features_deezy_match_dev1.tsv",sep='\t', index_col=0)
# Train railway stations classifier (exact setting):
dev_df = features_dev_df # development set feature vectors
df_exact = dev_df[dev_df["Exact"] == 1]
use_cols_stations = use_cols_all
# Train the classifier:
clf_stations = resolution_methods.train_classifier(df_exact, use_cols_all)
# Train places classifier (not exact setting):
dev_df = features_dev_df # development set feature vectors
df_inexact = dev_df[dev_df["Exact"] == 0]
use_cols_places = use_cols_all
# Train the classifier:
clf_places = resolution_methods.train_classifier(df_inexact, use_cols_all)
# Find optimal threshold for stations/places:
optimal_threshold = 0.0
keep_acc = 0.0
for th in np.arange(0, 1, 0.05):
th = round(th, 2)
results_dev_df = pd.read_csv("../resources/quicks/quicks_dev.tsv", sep="\t")
results_dev_df = resolution_methods.our_method_comb(features_dev_df, clf_stations, use_cols_stations, clf_places, use_cols_places, gazetteer_df, th, results_dev_df)
acc = eval_methods.topres_exactmetrics(results_dev_df, "our_method_comb", False)
if acc >= keep_acc:
optimal_threshold = th
keep_acc = acc
print(optimal_threshold, keep_acc)
features_test_df = | pd.read_csv("../processed/resolution/features_allquicks_deezy_match.tsv",sep='\t', index_col=0) | pandas.read_csv |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
## 挂载到谷歌云盘
from google.colab import drive
drive.mount('/content/drive')
# In[2]:
## 进入到文件所在位置
import os
os.chdir("/content/drive/My Drive/Colab Notebooks")
# In[3]:
import numpy as np
import torch
import torch.nn as nn
from torch.autograd import Variable
import torch.nn.functional as F
import torchvision
import torch.optim as optim
from torchvision import transforms
from tqdm import *
import matplotlib.pyplot as plt
import copy
from torch.autograd.gradcheck import zero_gradients
import pandas as pd
import seaborn as sns
import re
import torch.utils.data as data
# In[4]:
## 读取文件
test = pd.read_csv("./titanic/test.csv")
train = pd.read_csv("./titanic/train.csv")
# In[5]:
train['Embarked'].value_counts()
# In[6]:
## 用Embarked中的众数弥补测试文件中的空白
test['Embarked'].fillna(
test.Embarked.mode().values[0], inplace=True)
# In[7]:
## 用Embarked中的众数弥补训练文件中的空白
train['Embarked'].fillna(
test.Embarked.mode().values[0], inplace=True)
# In[8]:
train['Age'].describe()
# In[9]:
## 弥补测试和训练文件中的Age空白
test['Age'].fillna(29.699118, inplace=True)
train['Age'].fillna(29.699118, inplace=True)
# In[10]:
## 弥补Fare的空白
test['Fare'].fillna(32.204208, inplace=True)
# In[11]:
test.info()
# In[12]:
## 用独热编码处理训练数据
dummy_fields=['Pclass','Sex','Embarked']
for each in dummy_fields:
dummies= pd.get_dummies(train[each], prefix= each, drop_first=False)
train = | pd.concat([train, dummies], axis=1) | pandas.concat |
import sys
sys.path.insert(0, "../")
import xalpha as xa
from xalpha.exceptions import FundTypeError
import pandas as pd
import pytest
ioconf = {"save": True, "fetch": True, "path": "pytest", "form": "csv"}
ca = xa.cashinfo(interest=0.0002, start="2015-01-01")
zzhb = xa.indexinfo("0000827", **ioconf)
hs300 = xa.fundinfo("000311")
zogqb = xa.mfundinfo("001211", **ioconf)
def test_fundreport():
# somehow fragile, to be checked
r = xa.FundReport("000827")
assert r.get_report()[0][:2] == "广发"
assert r.analyse_report(1)["bank"][:2] == "兴业"
assert r.show_report_list(type_=0)[0]["FUNDCODE"] == "000827"
assert r.get_report(id_="AN202003171376532533")[0][:2] == "广发"
def test_cash():
assert (
round(ca.price[ca.price["date"] == "2018-01-02"].iloc[0].netvalue, 4) == 1.2453
)
assert ca.code == "mf"
date, value, share = ca.shuhui(
300, "2018-01-01", [[pd.Timestamp("2017-01-03"), 200]]
)
assert date == pd.Timestamp("2018-01-02")
assert value == 249.06
assert share == -200
ca.bcmkset(ca)
assert ca.alpha() == 0
assert round(ca.total_annualized_returns("2018-01-01"), 4) == 0.0757
def test_index():
assert (
round(zzhb.price[zzhb.price["date"] == "2012-02-01"].iloc[0].totvalue, 3)
== 961.406
)
assert (
round(zzhb.price[zzhb.price["date"] == "2015-02-02"].iloc[0].netvalue, 2)
== 1.62
)
assert zzhb.name == "中证环保"
assert zzhb.shengou(100, "2018-01-02")[2] == 55.24
assert zzhb.shuhui(100, "2016-01-01", [[pd.Timestamp("2017-01-03"), 200]])[2] == 0
zzhb.info()
zzhb.ma(window=10)
zzhb.md()
zzhb.ema(col="totvalue")
zzhb.macd()
zzhb.mtm()
zzhb.roc()
zzhb.boll()
zzhb.bias()
zzhb.rsi()
zzhb.kdj()
zzhb.wnr()
zzhb.dma(col="totvalue")
zzhb.bbi()
zzhb.trix(col="totvalue")
zzhb.psy()
row = zzhb.price[zzhb.price["date"] == "2018-08-01"].iloc[0]
assert round(row["MD5"], 3) == 0.012
assert round(row["MA10"], 3) == 1.361
assert round(row["MACD_OSC_12_26"], 4) == 0.0076
assert round(row["EMA5"], 1) == 1318.8
assert round(row["MTM10"], 4) == 0.0078
assert round(row["ROC10"], 4) == 0.0058
assert round(row["BOLL_UPPER"], 3) == 1.398
assert round(row["BIAS10"], 3) == -0.012
assert round(row["RSI14"], 3) == 0.411
assert round(row["KDJ_J"], 4) == 0.0456
assert round(row["WNR14"], 2) == 0.27
assert round(row["AMA"], 2) == -87.71
assert round(row["BBI"], 3) == 1.356
assert round(row["TRIX10"], 4) == 0.0005
assert round(row["PSYMA12"], 2) == 0.47
zzhb.v_techindex(col=["TRIX10"])
def test_fund():
assert hs300.round_label == 1
assert hs300.name == "景顺长城沪深300指数增强" ## "景顺长城沪深300增强", 蜜汁改名。。。
assert hs300.fenhongdate[1] == pd.Timestamp("2017-08-15")
assert hs300.get_holdings(2019, 4).iloc[0]["name"] == "中国平安"
assert (
float(hs300.special[hs300.special["date"] == "2017-08-04"]["comment"]) == 0.19
)
hs300.rate = 0.12
hs300.segment = [[0, 7], [7, 365], [365, 730], [730]]
with pytest.raises(Exception) as excinfo:
hs300.shuhui(
100,
"2014-01-04",
[[pd.Timestamp("2014-01-03"), 200], [pd.Timestamp("2017-01-03"), 200]],
)
assert str(excinfo.value) == "One cannot move share before the lastest operation"
assert (
hs300.shuhui(
320,
"2018-01-01",
[[pd.Timestamp("2011-01-03"), 200], [pd.Timestamp("2017-12-29"), 200]],
)[1]
== 685.72
)
assert hs300.shengou(200, "2018-07-20")[2] == 105.24
with pytest.raises(FundTypeError) as excinfo:
xa.mfundinfo("000311")
assert str(excinfo.value) == "This code seems to be a fund, use fundinfo instead"
hs300.info()
dax = xa.fundinfo("510030") # test empty shuhuifei and shengoufei case
assert dax.feeinfo == ["小于7天", "1.50%", "大于等于7天", "0.00%"]
def test_mfundinfo():
zogqb.bcmkset(xa.cashinfo())
assert round(zogqb.total_annualized_returns("2018-08-01"), 3) == 0.036
with pytest.raises(FundTypeError) as excinfo:
xa.fundinfo("001211")
assert str(excinfo.value) == "This code seems to be a mfund, use mfundinfo instead"
def test_evaluate():
comp = xa.evaluate(ca, zzhb, hs300)
comp.v_netvalue(end="2018-08-01")
comp.v_correlation()
comp2 = xa.evaluate(ca, zzhb, start="2018-01-01")
assert round(comp2.correlation_table("2018-08-01").iloc[0, 1], 3) == 0.064
def delete_csvlines(path, lines=5):
df = | pd.read_csv(path) | pandas.read_csv |
"""
Experimental manager based on storing a collection of 1D arrays
"""
from __future__ import annotations
from typing import (
TYPE_CHECKING,
Any,
Callable,
TypeVar,
)
import numpy as np
from pandas._libs import (
NaT,
lib,
)
from pandas._typing import (
ArrayLike,
Hashable,
)
from pandas.util._validators import validate_bool_kwarg
from pandas.core.dtypes.cast import (
astype_array_safe,
infer_dtype_from_scalar,
soft_convert_objects,
)
from pandas.core.dtypes.common import (
ensure_int64,
is_datetime64_ns_dtype,
is_dtype_equal,
is_extension_array_dtype,
is_numeric_dtype,
is_object_dtype,
is_timedelta64_ns_dtype,
)
from pandas.core.dtypes.dtypes import (
ExtensionDtype,
PandasDtype,
)
from pandas.core.dtypes.generic import (
ABCDataFrame,
ABCPandasArray,
ABCSeries,
)
from pandas.core.dtypes.inference import is_inferred_bool_dtype
from pandas.core.dtypes.missing import (
array_equals,
isna,
)
import pandas.core.algorithms as algos
from pandas.core.array_algos.quantile import quantile_compat
from pandas.core.array_algos.take import take_1d
from pandas.core.arrays import (
DatetimeArray,
ExtensionArray,
PandasArray,
TimedeltaArray,
)
from pandas.core.arrays.sparse import SparseDtype
from pandas.core.construction import (
ensure_wrapped_if_datetimelike,
extract_array,
sanitize_array,
)
from pandas.core.indexers import (
maybe_convert_indices,
validate_indices,
)
from pandas.core.indexes.api import (
Index,
ensure_index,
)
from pandas.core.internals.base import (
DataManager,
SingleDataManager,
interleaved_dtype,
)
from pandas.core.internals.blocks import (
ensure_block_shape,
external_values,
new_block,
to_native_types,
)
if TYPE_CHECKING:
from pandas import Float64Index
T = TypeVar("T", bound="ArrayManager")
class ArrayManager(DataManager):
"""
Core internal data structure to implement DataFrame and Series.
Alternative to the BlockManager, storing a list of 1D arrays instead of
Blocks.
This is *not* a public API class
Parameters
----------
arrays : Sequence of arrays
axes : Sequence of Index
verify_integrity : bool, default True
"""
__slots__ = [
"_axes", # private attribute, because 'axes' has different order, see below
"arrays",
]
arrays: list[np.ndarray | ExtensionArray]
_axes: list[Index]
def __init__(
self,
arrays: list[np.ndarray | ExtensionArray],
axes: list[Index],
verify_integrity: bool = True,
):
# Note: we are storing the axes in "_axes" in the (row, columns) order
# which contrasts the order how it is stored in BlockManager
self._axes = axes
self.arrays = arrays
if verify_integrity:
self._axes = [ensure_index(ax) for ax in axes]
self.arrays = [ | ensure_wrapped_if_datetimelike(arr) | pandas.core.construction.ensure_wrapped_if_datetimelike |
# -*- coding: utf-8 -*-
import numpy as np
from numpy.random import randint
from textwrap import dedent
import pytest
import pandas as pd
from pandas import DataFrame
from pandas import read_clipboard
from pandas import get_option
from pandas.util import testing as tm
from pandas.util.testing import makeCustomDataframe as mkdf
from pandas.io.clipboard.exceptions import PyperclipException
from pandas.io.clipboard import clipboard_set
try:
DataFrame({'A': [1, 2]}).to_clipboard()
_DEPS_INSTALLED = 1
except (PyperclipException, RuntimeError):
_DEPS_INSTALLED = 0
@pytest.mark.single
@pytest.mark.skipif(not _DEPS_INSTALLED,
reason="clipboard primitives not installed")
class TestClipboard(object):
@classmethod
def setup_class(cls):
cls.data = {}
cls.data['string'] = mkdf(5, 3, c_idx_type='s', r_idx_type='i',
c_idx_names=[None], r_idx_names=[None])
cls.data['int'] = mkdf(5, 3, data_gen_f=lambda *args: randint(2),
c_idx_type='s', r_idx_type='i',
c_idx_names=[None], r_idx_names=[None])
cls.data['float'] = mkdf(5, 3,
data_gen_f=lambda r, c: float(r) + 0.01,
c_idx_type='s', r_idx_type='i',
c_idx_names=[None], r_idx_names=[None])
cls.data['mixed'] = DataFrame({'a': np.arange(1.0, 6.0) + 0.01,
'b': np.arange(1, 6),
'c': list('abcde')})
# Test columns exceeding "max_colwidth" (GH8305)
_cw = get_option('display.max_colwidth') + 1
cls.data['colwidth'] = mkdf(5, 3, data_gen_f=lambda *args: 'x' * _cw,
c_idx_type='s', r_idx_type='i',
c_idx_names=[None], r_idx_names=[None])
# Test GH-5346
max_rows = get_option('display.max_rows')
cls.data['longdf'] = mkdf(max_rows + 1, 3,
data_gen_f=lambda *args: randint(2),
c_idx_type='s', r_idx_type='i',
c_idx_names=[None], r_idx_names=[None])
# Test for non-ascii text: GH9263
cls.data['nonascii'] = pd.DataFrame({'en': 'in English'.split(),
'es': 'en español'.split()})
# unicode round trip test for GH 13747, GH 12529
cls.data['utf8'] = pd.DataFrame({'a': ['µasd', 'Ωœ∑´'],
'b': ['øπ∆˚¬', 'œ∑´®']})
cls.data_types = list(cls.data.keys())
@classmethod
def teardown_class(cls):
del cls.data_types, cls.data
def check_round_trip_frame(self, data_type, excel=None, sep=None,
encoding=None):
data = self.data[data_type]
data.to_clipboard(excel=excel, sep=sep, encoding=encoding)
if sep is not None:
result = read_clipboard(sep=sep, index_col=0, encoding=encoding)
else:
result = read_clipboard(encoding=encoding)
tm.assert_frame_equal(data, result, check_dtype=False)
def test_round_trip_frame_sep(self):
for dt in self.data_types:
self.check_round_trip_frame(dt, sep=',')
self.check_round_trip_frame(dt, sep=r'\s+')
self.check_round_trip_frame(dt, sep='|')
def test_round_trip_frame_string(self):
for dt in self.data_types:
self.check_round_trip_frame(dt, excel=False)
def test_round_trip_frame(self):
for dt in self.data_types:
self.check_round_trip_frame(dt)
def test_read_clipboard_infer_excel(self):
# gh-19010: avoid warnings
clip_kwargs = dict(engine="python")
text = dedent("""
<NAME> <NAME>
1 2
4 <NAME>
""".strip())
clipboard_set(text)
df = pd.read_clipboard(**clip_kwargs)
# excel data is parsed correctly
assert df.iloc[1][1] == 'Harry Carney'
# having diff tab counts doesn't trigger it
text = dedent("""
a\t b
1 2
3 4
""".strip())
| clipboard_set(text) | pandas.io.clipboard.clipboard_set |
import time
import numpy as np
import pandas as pd
from pandarallel import pandarallel
pandarallel.initialize()
import modin.pandas as mpd
def func(x):
return x**3
data=np.random.rand(100000,1000)
df= | pd.DataFrame(data) | pandas.DataFrame |
import pandas as pd
import numpy as np
import os
from datetime import datetime
from IPython.display import IFrame,clear_output
# for PDF reading
import textract
import re
import sys
import docx
from difflib import SequenceMatcher
#######################################################################################
def similar(a, b):
return SequenceMatcher(None, a, b).ratio()
#######################################################################################
def dms_to_dd(x,as_string=True):
d,m,s = x.split()
result = abs(float(d)) + float(m)/60. + float(s)/3600.
if float(d) < 0:
result = -result
return result
#######################################################################################
def convert_state(state):
return {'New Hampshire':'NH','Maine':'ME',
'Massachusetts':'MA','New Hampshire/Maine':'NH'}[state]
#######################################################################################
def doy_to_date(x, year=2008, jan1=1):
# jan1 is Day 1, usually
#if np.isnan(x):
# return np.nan
#print(x)
result = ( pd.Period(year = year-1, month=12, day=31, freq='D') +
pd.to_timedelta(x+(1-jan1), unit='days') )
return result.strftime('%Y-%m-%d')
#######################################################################################
def date_conversion(x, year=None, dateformat='%d-%m-%y'):
# year is Fall Year for date
# default interpretations:
# aaaa-bb-cc : Year/Month/Day
# PROBLEMATIC:
# aa-bb-cc : Month/Day/Year - or Day/Month/Year if aa>12
# Returns string
# Unknown / missing
if np.any([True for i in ['(earliest/latest)', '-999','no data','no response',
'unknown', 'missing', 'unknown', 'unkown','none',
# the following are added for postcard data
# 2021-02-07
'died', 'no res','skip','omit','card not received',
'card not returned', 'moved','nursing home','delete']
if i in str(x).lower()]):
return '-999'
elif (str(x).strip()=='') | (str(x).strip()=='?') | (str(x).strip()=='-?-'):
return '-999'
elif x in ['0',0]:
return '0'
xx = str(x)
if ('+1' in xx) | ('+2' in xx) | ('+3' in xx):
xx = xx.split('+')[0].strip()
outofbounds = False
if ((year < 1678) | (year > 2262)) & (year is not None):
outofbounds = True
if ((len(xx)==8) | ((len(xx)==10))) & ('-' not in xx) & ('/' not in xx):
#print xx, year
if (xx[-2]=='.') | ((len(xx)==8) & (xx.isdigit())):
xx = '{}-{}-{}'.format(xx[:4],xx[4:6],xx[6:8]) # year, month, day
#print xx, year
try:
if (len(xx)==8 ) & ('-' in xx):
xdt = pd.to_datetime(xx, format=dateformat)
else:
xdt = pd.to_datetime(xx)
d, m, y = xdt.day, xdt.month, xdt.year
except ValueError as e:
if (len(xx)==8) & ('-' in xx):
# mostly a problem if 00-02-28 (i.e., thinking 00 is a month)
if (xx[2]=='-') & (xx[5]=='-'):
xx = '19'+xx
else:
xx = xx+', {}'.format(year)
elif (len(xx)==10)& ('-' in xx) & outofbounds:
if len(xx.split('-')[0]) >2:
y,m, d = (int(i) for i in xx.split('-'))
else:
d,m,y = (int(i) for i in xx.split('-'))
# latest thaw in August; earliest freeze in August
if ((m<=8) & (y== year+1)) | ((m>=8) & (y==year)):
return '{:04d}-{:02d}-{:02d}'.format(y,m,d)
else:
print ('+++++PROBLEM+++++')
print(xx)
xx = xx+', {}'.format(year)
else:
xx = xx+', {}'.format(year)
try:
xdt = pd.to_datetime(xx)
d, m, y = xdt.day, xdt.month, xdt.year
except ValueError as e:
print ('**************')
print (e)
print (' {} can not be converted to YYYY/MM/DD'.format(str(x)))
print ('**************\n')
return '-999'
if year is not None:
# print type(y), type(year)
# latest thaw in September!,
# latest thaw in August; earliest freeze in August
if ((m < 8) & (y != (year+1))) | ((m>9) & (y!=year)) | (
((m==8) | (m==9)) & (y!=year) & (y!=(year+1) ) ):
if m<=8:
yearnew = year+1
else:
yearnew = year+0
print ('==================')
print ('Wrong Year in table')
print ('\tData from table: {} (start_year is {})'.format(xx, year))
print ('\t\tYMD: {}-{:02d}-{:02d}'.format(y,m,d))
print (' Recorded (or added) ice date year {} should be {}\n'.format(y, yearnew))
if (np.abs(int(y) - int(yearnew)) % 100) == 0:
print ('\tFORCING YEAR TO NEW VALUE (wrong century)')
y = yearnew
# OTHERWISE TRY FIXING IT BY INVERTING DATE
elif (len(xx)==8) & ('-' in xx):
#print xx
xx = '-'.join(xx.split('-')[::-1])
#print xx
# assuming default as before but switching backwards
xdt = pd.to_datetime(xx,format=dateformat)
d, m, y = xdt.day, xdt.month, xdt.year
if ((m <= 8) & (y != year+1)) | ((m>8) & (y!=year)):
if m<=8:
yearnew = year+1
else:
yearnew = year
if (np.abs(int(y) - int(yearnew)) % 100) == 0:
print ('\tFORCING YEAR TO NEW VALUE (wrong century)')
y = yearnew
else:
print (x, xx)
print ('\tSTILL A PROBLEM. Recorded year {} should be {}'.format(y, yearnew))
else:
print ('Problem fixed')
else:
print ('\tFORCING ICE YEAR TO NEW VALUE (assuming typo)')
y = yearnew
print (' {}-{}, new corrected ice date {:}-{:02d}-{:02d}'.format(year, year+1,y,m,d))
try:
##return '{:02d}-{:02d}-{:04d}'.format(m,d,y)
return '{:04d}-{:02d}-{:02d}'.format(y,m,d)
except ValueError as e:
print ('*****FINAL*****')
print (e)
print ('**************')
print ('{} can not be converted to YYYY/MM/DD'.format(str(x)))
return '-999'
#######################################################################################
######## READ IN FILES ################################################################
#######################################################################################
def read_all_files(filename_dict, readin_dict , verbose=False,logfile=None, record_contributor=True):
"""
INPUT: filename_dict is dictionary of files names, sorted by file type
readin_dict is a list of corrections and column renames, etc. by filename
OUTPUT: All files merged into a Pandas DataFrame
"""
default_ext = {
'txt':{'delimiter':'\t'},
'tab':{'delimiter':'\t'}
}
dfresult = pd.DataFrame()
# run through the files
for file_ext in filename_dict.keys():
for f in filename_dict[file_ext]:
default_values = {'header':0, 'delimiter':None, 'sheetname':False,
'lakename':None, 'city':None, 'state':None,'contributor':None, 'reorient':False,
'column_rename':None,'ncolumns':None, 'split':False,
'multi':False, 'index_col':None}
if file_ext in default_ext:
for key, value in default_ext[file_ext].items():
default_values[key] = value
if logfile is not None:
logfile.write('===========\nReading in {}\n'.format(f))
if (np.array([i in f for i in readin_dict.keys()])).any():
lakeid = [i for i in readin_dict.keys() if i in f]
if len(lakeid) > 1:
print ('WARNING. There are too many similarly named readin_dict items. Could be a problem.')
if logfile is not None:
logfile.write('\nWARNING. There are too many similarly named readin_dict items.\n')
break
foo = readin_dict[lakeid[0]]
for key,value in foo.items():
default_values[key] = value
#if 'Updated Data 2019.5' in f:
# print(f)
df = read_ts(f,delimiter=default_values['delimiter'],
sheetname=default_values['sheetname'],
header=default_values['header'],
ncolumns=default_values['ncolumns'],
index_col=default_values['index_col'],
logfile = logfile,record_contributor=record_contributor)
if verbose:
if len(df)>0:
sys.stdout.write('\r[ {:150s} ]\r'.format(f))
#sys.stdout.flush()
else:
sys.stdout.write('Skipping {}\n'.format(f))
#sys.stdout.flush()
# specific case for Maine lakes
if default_values['reorient']:
if logfile is not None:
logfile.write('\tReorienting table.\n')
contributor = df.Contributor.values[0]
#df = df.set_index(df.columns[0])
#print('Maine drop')
#display(df.head())
#print(df.columns)
df = df.drop('Contributor',axis=1,level=0).unstack().reset_index()
#print('END Maine drop')
df['Contributor'] = contributor
if default_values['column_rename'] is not None:
if logfile is not None:
logfile.write('\tRenaming columns.\n')
df = df.rename(default_values['column_rename'],axis=1)
if default_values['lakename'] is not None:
if logfile is not None:
logfile.write('\tSetting lakename to {}\n'.format(default_values['lakename']))
df['lake'] = default_values['lakename']
if default_values['city'] is not None:
if logfile is not None:
logfile.write('\tSetting city to {}\n'.format(default_values['city']))
df['city'] = default_values['city']
if default_values['state'] is not None:
if logfile is not None:
logfile.write('\tSetting state to {}\n'.format(default_values['state']))
df['state'] = default_values['state']
if default_values['split']:
# rearrange years/seasons
if logfile is not None:
logfile.write('\tRearranging years/seasons\n')
df = sort_by_season(df)
if default_values['multi']:
if logfile is not None:
logfile.write('\tSorting by events.\n')
df = sort_by_events(df)
#if default_values['lakename'] is not None:
# df['lake'] = default_values['lakename']
if default_values['contributor'] is not None:
if logfile is not None:
logfile.write('\tAssigning contributor: {}\n'.format(default_values['contributor']))
df['Contributor'] = default_values['contributor']
if 'Updated Data' in f:
updated_year = f.split('Updated Data')[1].split('/')[0].strip()
if updated_year == '2018':
updated_year = 2018.5
elif updated_year == '':
updated_year = 2018.0
else:
updated_year = float(updated_year)
df['Updated Year'] = updated_year
"""
if 'Updated Data 2020.5' in f:
df['Updated Year'] = 2020.5
elif 'Updated Data 2020' in f:
df['Updated Year'] = 2020.0
elif 'Updated Data 2019.5' in f:
df['Updated Year'] = 2019.5
elif 'Updated Data 2018' in f:
df['Updated Year'] = 2018.5
elif 'Updated Data 2019' in f:
df['Updated Year'] = 2019.0
elif 'Updated Data' in f:
df['Updated Year'] = 2018.0
"""
df['FileName'] = f
try:
dfresult = dfresult.append(df,ignore_index=True, sort=False)
except:
display(df)
print(kasdf)
return dfresult
#######################################################################################
def sort_by_events(df):
# Move multi-freeze thaw years into separate rows
iceon1col = [c for c in ['Freeze date 1',] if c in df.columns][0]
iceon2col = [c for c in ['Freeze date 2',] if c in df.columns][0]
iceoff1col = [c for c in ['Thaw date 1',] if c in df.columns][0]
iceoff2col = [c for c in ['Thaw date 2',] if c in df.columns][0]
ind = ((~df[iceon1col].isnull() | ~df[iceoff1col].isnull()) &
(~df[iceon2col].isnull() | ~df[iceoff2col].isnull()))
# .copy
dfoo = df[ind].copy()
dfoo[iceon1col] = dfoo[iceon2col]
dfoo[iceoff1col] = dfoo[iceoff2col]
#print('sort by events Drop')
df = df.append(dfoo,ignore_index=True,sort=False).drop([iceoff2col,iceon2col],axis=1)
#print('END sort by events Drop')
# display(df)
return df
#######################################################################################
def sort_by_season(df):
#print (df.columns)
#display(df)
yearcolumn = [c for c in ['Year','year'] if c in df.columns][0]
iceoncolumn = [c for c in ['datefirstice','IceOnDOY','Ice On','Ice-On','Ice on'] if c in df.columns][0]
iceoffcolumn = [c for c in ['datelastice','IceOffDOY','Ice Off','Ice-Off','Ice off'] if c in df.columns][0]
# print df.columns
lakecolumn = [c for c in ['lakeid','lake'] if c in df.columns][0]
dropcolumns = [iceoncolumn, iceoffcolumn]
dfresult = pd.DataFrame()
for name, group in df.groupby(lakecolumn):
iceoff = group[iceoffcolumn].tolist() + [np.nan]
iceon = [np.nan] + group[iceoncolumn].tolist()
try:
years = [float(group[yearcolumn].astype(str).min()) - 1] + group[yearcolumn].tolist()
except:
print(yearcolumn)
display(group[yearcolumn])
display(df)
#print (kmtpasdf)
dfoo = pd.DataFrame({lakecolumn:name,
'Fall Year': years,
iceoncolumn:iceon,
iceoffcolumn:iceoff})
dfresult = dfresult.append(dfoo, ignore_index=True,sort=False)
#print('sort by season Drop')
dfresult = dfresult.merge(df.drop(dropcolumns,axis=1), left_on=[lakecolumn,'Fall Year'],
right_on=[lakecolumn,yearcolumn], how='left')
#print('END sort by season Drop')
for c in dfresult.columns:
## if c not in [lakecolumn, yearcolumn,'Fall Year']+dropcolumns:
if c in ['Contributor','Clerk']:
## print 'backfilling', c
dfresult[c] = dfresult[c].fillna(method='bfill')
## clean up, remove no result years OK
# print dfresult.shape
ind = dfresult[iceoncolumn].isnull() & dfresult[iceoffcolumn].isnull()
## display(dfresult[ind])
#.copy
dfresult = dfresult[~ind].copy()
#print dfresult.shape
# remove duplicates
#display(dfresult[dfresult.duplicated(subset=[lakecolumn,yearcolumn,
# iceoncolumn,iceoffcolumn],keep=False)])
dfresult = dfresult.drop_duplicates(subset=[lakecolumn,yearcolumn,
iceoncolumn,iceoffcolumn])
#print dfresult.shape
if 'Duration' in dfresult.columns:
#display(dfresult.tail(6))
#display(df.tail(6))
dfresult.loc[dfresult.index[:-1],'Duration'] = df.loc[df.index[:],'Duration'].values
# last duration should be removed
dfresult.loc[dfresult.index[-1],'Duration'] = np.nan
if dfresult.lake.values[0]!='Mirror Lake':
print(dfresult.columns)
display(dfresult.head())
print(brokend)
return dfresult
#######################################################################################
#######################################################################################
#######################################################################################
def read_ts(filename, header=0, sheetname=False, index_col=None, logfile=None,delimiter=None,ncolumns=None,
record_contributor=True):
""" ncolumns : number of columns to keep, starting with first
"""
filetype = filename.split('.')[-1].lower()
if filetype == 'pdf':
tsdf = read_pdf(filename,logfile=logfile)
#elif filetype == 'jpg':
# tsdf = read_jpg(filename)
elif filetype in ['csv','txt','tab']:
tsdf = read_csv(filename, delimiter=delimiter, header=header,record_contributor=record_contributor)
#elif filetype in ['txt']:
# tsdf = read_csv(filename, delimiter=delimiter, header=header)
elif filetype in ['xls','xlsx']:
tsdf = read_excel(filename, sheetname=sheetname, logfile=logfile, index_col=index_col,header=header,ncolumns=ncolumns,
record_contributor=record_contributor)
elif filetype in ['doc','docx']:
if 'Updated Data 2019.5' in filename:
doc = docx.Document(filename)
if logfile is not None:
for p in doc.paragraphs:
logfile.write('\t{}\n'.format(p.text))
tsdf = pd.DataFrame()
"""
if 'Updated Data 2019.5' in filename:
doc = docx.Document(filename)
print ('=====================')
print (filename)
print ('=====================')
for p in doc.paragraphs:
print (p.text)
"""
elif filetype in ['jpg']:
if logfile is not None:
logfile.write('\tSKIPPING\n')
tsdf = pd.DataFrame()
else:
if logfile is not None:
logfile.write('\tSKIPPING\n')
tsdf = pd.DataFrame()
return tsdf
#######################################################################################
def read_csv(filename, delimiter=None, encoding='utf-8', header=0, record_contributor=True):
try:
df = pd.read_csv(filename, delimiter=delimiter, encoding='utf-8',engine='python',header=header)
if df.shape[1]==1:
print('{}\n\tToo few columns. Trying a different method.'.format(filename))
df = pd.read_csv(filename, delimiter=delimiter, encoding='utf-8',engine='c',header=header)
print('New shape:',df.shape)
except UnicodeDecodeError as e:
df = | pd.read_csv(filename, delimiter=delimiter, encoding='latin1',engine='python',header=header) | pandas.read_csv |
"""
Core implementation of :mod:`sklearndf.transformation.wrapper`
"""
import logging
from abc import ABCMeta, abstractmethod
from typing import Any, Generic, List, Optional, TypeVar, Union
import numpy as np
import pandas as pd
from sklearn.base import TransformerMixin
from sklearn.compose import ColumnTransformer
from sklearn.impute import MissingIndicator, SimpleImputer
from sklearn.kernel_approximation import AdditiveChi2Sampler
from sklearn.manifold import Isomap
from sklearn.preprocessing import KBinsDiscretizer, OneHotEncoder, PolynomialFeatures
from pytools.api import AllTracker
from ... import TransformerDF
from ...wrapper import TransformerWrapperDF
log = logging.getLogger(__name__)
__all__ = [
"BaseDimensionalityReductionWrapperDF",
"BaseMultipleInputsPerOutputTransformerWrapperDF",
"ColumnPreservingTransformerWrapperDF",
"ColumnSubsetTransformerWrapperDF",
"ComponentsDimensionalityReductionWrapperDF",
"FeatureSelectionWrapperDF",
"NComponentsDimensionalityReductionWrapperDF",
"NumpyTransformerWrapperDF",
"ColumnTransformerWrapperDF",
"IsomapWrapperDF",
"ImputerWrapperDF",
"MissingIndicatorWrapperDF",
"AdditiveChi2SamplerWrapperDF",
"KBinsDiscretizerWrapperDF",
"PolynomialFeaturesWrapperDF",
"OneHotEncoderWrapperDF",
]
#
# type variables
#
T_Transformer = TypeVar("T_Transformer", bound=TransformerMixin)
# T_Imputer is needed because sklearn's _BaseImputer only exists from v0.22 onwards.
# Once we drop support for sklearn 0.21, _BaseImputer can be used instead.
# The following TypeVar helps to annotate availability of "add_indicator" and
# "missing_values" attributes on an imputer instance for ImputerWrapperDF below
# noinspection PyProtectedMember
from sklearn.impute._iterative import IterativeImputer
T_Imputer = TypeVar("T_Imputer", SimpleImputer, IterativeImputer)
#
# Ensure all symbols introduced below are included in __all__
#
__tracker = AllTracker(globals())
#
# wrapper classes for transformers
#
class NumpyTransformerWrapperDF(
TransformerWrapperDF[T_Transformer], Generic[T_Transformer], metaclass=ABCMeta
):
"""
Abstract base class of DF wrappers for transformers that only accept numpy arrays.
Converts data frames to numpy arrays before handing off to the native transformer.
Implementations must define :meth:`_get_features_original`.
"""
# noinspection PyPep8Naming
def _adjust_X_type_for_delegate(
self, X: pd.DataFrame, *, to_numpy: Optional[bool] = None
) -> np.ndarray:
assert to_numpy is not False, "X must be converted to a numpy array"
return super()._adjust_X_type_for_delegate(X, to_numpy=True)
def _adjust_y_type_for_delegate(
self,
y: Optional[Union[pd.Series, pd.DataFrame]],
*,
to_numpy: Optional[bool] = None,
) -> Optional[np.ndarray]:
assert to_numpy is not False, "y must be converted to a numpy array"
return super()._adjust_y_type_for_delegate(y, to_numpy=True)
class ColumnSubsetTransformerWrapperDF(
TransformerWrapperDF[T_Transformer], Generic[T_Transformer], metaclass=ABCMeta
):
"""
Abstract base class of DF wrappers for transformers that do not change column names,
but that may remove one or more columns.
Implementations must define :meth:`_get_features_out`.
"""
@abstractmethod
def _get_features_out(self) -> pd.Index:
# return column labels for arrays returned by the fitted transformer.
pass
def _get_features_original(self) -> pd.Series:
# return the series with output columns in index and output columns as values
features_out = self._get_features_out()
return pd.Series(index=features_out, data=features_out.values)
class ColumnPreservingTransformerWrapperDF(
ColumnSubsetTransformerWrapperDF[T_Transformer],
Generic[T_Transformer],
):
"""
DF wrapper for transformers whose output columns match the input columns.
The native transformer must not add, remove, reorder, or rename any of the input
columns.
"""
def _get_features_out(self) -> pd.Index:
return self.feature_names_in_
class BaseMultipleInputsPerOutputTransformerWrapperDF(
TransformerWrapperDF[T_Transformer], Generic[T_Transformer]
):
"""
DF wrapper for transformers mapping multiple input columns to individual output
columns.
"""
@abstractmethod
def _get_features_out(self) -> pd.Index:
# make this method abstract to ensure subclasses override the default
# behaviour, which usually relies on method ``_get_features_original``
pass
def _get_features_original(self) -> pd.Series:
raise NotImplementedError(
f"{type(self.native_estimator).__name__} transformers map multiple "
"inputs to individual output columns; current sklearndf implementation "
"only supports many-to-1 mappings from output columns to input columns"
)
class BaseDimensionalityReductionWrapperDF(
BaseMultipleInputsPerOutputTransformerWrapperDF[T_Transformer],
Generic[T_Transformer],
metaclass=ABCMeta,
):
"""
Base class of DF wrappers for dimensionality-reducing transformers.
The native transformer is considered to map all input columns to each output column.
"""
@property
@abstractmethod
def _n_components_(self) -> int:
pass
def _get_features_out(self) -> pd.Index:
return pd.Index([f"x_{i}" for i in range(self._n_components_)])
class NComponentsDimensionalityReductionWrapperDF(
BaseDimensionalityReductionWrapperDF[T_Transformer],
Generic[T_Transformer],
metaclass=ABCMeta,
):
"""
Base class of DF wrappers for dimensionality-reducing transformers supporting the
:attr:`n_components` attribute.
Subclasses must implement :meth:`_get_features_original`.
"""
_ATTR_N_COMPONENTS = "n_components"
def _validate_delegate_estimator(self) -> None:
self._validate_delegate_attribute(attribute_name=self._ATTR_N_COMPONENTS)
@property
def _n_components_(self) -> int:
return getattr(self.native_estimator, self._ATTR_N_COMPONENTS)
class ComponentsDimensionalityReductionWrapperDF(
BaseDimensionalityReductionWrapperDF[T_Transformer],
Generic[T_Transformer],
metaclass=ABCMeta,
):
"""
Base class of DF wrappers for dimensionality-reducing transformers supporting the
``components_`` attribute.
The native transformer must provide a ``components_`` attribute once fitted,
as an array of shape (n_components, n_features).
"""
_ATTR_COMPONENTS = "components_"
# noinspection PyPep8Naming
def _post_fit(
self, X: pd.DataFrame, y: Optional[pd.Series] = None, **fit_params
) -> None:
# noinspection PyProtectedMember
super()._post_fit(X, y, **fit_params)
self._validate_delegate_attribute(attribute_name=self._ATTR_COMPONENTS)
@property
def _n_components_(self) -> int:
return len(getattr(self.native_estimator, self._ATTR_COMPONENTS))
class FeatureSelectionWrapperDF(
ColumnSubsetTransformerWrapperDF[T_Transformer],
Generic[T_Transformer],
metaclass=ABCMeta,
):
"""
DF wrapper for feature selection transformers.
The native transformer must implement a ``get_support`` method, providing the
indices of the selected input columns
"""
_ATTR_GET_SUPPORT = "get_support"
def _validate_delegate_estimator(self) -> None:
self._validate_delegate_attribute(attribute_name=self._ATTR_GET_SUPPORT)
def _get_features_out(self) -> pd.Index:
get_support = getattr(self.native_estimator, self._ATTR_GET_SUPPORT)
return self.feature_names_in_[get_support()]
class ColumnTransformerWrapperDF(
TransformerWrapperDF[ColumnTransformer], metaclass=ABCMeta
):
"""
DF wrapper for :class:`sklearn.compose.ColumnTransformer`.
Requires all transformers passed as the ``transformers`` parameter to implement
:class:`.TransformerDF`.
"""
__DROP = "drop"
__PASSTHROUGH = "passthrough"
__SPECIAL_TRANSFORMERS = (__DROP, __PASSTHROUGH)
def _validate_delegate_estimator(self) -> None:
column_transformer: ColumnTransformer = self.native_estimator
if (
column_transformer.remainder
not in ColumnTransformerWrapperDF.__SPECIAL_TRANSFORMERS
):
raise ValueError(
f"unsupported value for arg remainder: ({column_transformer.remainder})"
)
non_compliant_transformers: List[str] = [
type(transformer).__name__
for _, transformer, _ in column_transformer.transformers
if not (
isinstance(transformer, TransformerDF)
or transformer in ColumnTransformerWrapperDF.__SPECIAL_TRANSFORMERS
)
]
if non_compliant_transformers:
from .. import ColumnTransformerDF
raise ValueError(
f"{ColumnTransformerDF.__name__} only accepts instances of "
f"{TransformerDF.__name__} or special values "
f'"{" and ".join(ColumnTransformerWrapperDF.__SPECIAL_TRANSFORMERS)}" '
"as valid transformers, but "
f'also got: {", ".join(non_compliant_transformers)}'
)
def _get_features_original(self) -> pd.Series:
"""
Return the series mapping output column names to original columns names.
:return: the series with index the column names of the output dataframe and
values the corresponding input column names.
"""
def _features_original(df_transformer: TransformerDF, columns: List[Any]):
if df_transformer == ColumnTransformerWrapperDF.__PASSTHROUGH:
# we may get positional indices for columns selected by the
# 'passthrough' transformer, and in that case so need to look up the
# associated column names
if all(isinstance(column, int) for column in columns):
column_names = self._get_features_in()[columns]
else:
column_names = columns
return pd.Series(index=column_names, data=column_names)
else:
return df_transformer.feature_names_original_
return pd.concat(
[
_features_original(df_transformer, columns)
for _, df_transformer, columns in self.native_estimator.transformers_
if (
len(columns) > 0
and df_transformer != ColumnTransformerWrapperDF.__DROP
)
]
)
class ImputerWrapperDF(TransformerWrapperDF[T_Imputer], metaclass=ABCMeta):
"""
DF wrapper for imputation transformers, e.g., :class:`sklearn.impute.SimpleImputer`.
"""
def _get_features_original(self) -> pd.Series:
# get the columns that were dropped during imputation
delegate_estimator = self.native_estimator
nan_mask = []
def _nan_mask_from_statistics(stats: np.array):
if issubclass(stats.dtype.type, float):
na_mask = np.isnan(stats)
else:
na_mask = [
x is None or (isinstance(x, float) and np.isnan(x)) for x in stats
]
return na_mask
# implementation for i.e. SimpleImputer
if hasattr(delegate_estimator, "statistics_"):
nan_mask = _nan_mask_from_statistics(stats=delegate_estimator.statistics_)
# implementation for IterativeImputer
elif hasattr(delegate_estimator, "initial_imputer_"):
initial_imputer: SimpleImputer = delegate_estimator.initial_imputer_
nan_mask = _nan_mask_from_statistics(stats=initial_imputer.statistics_)
# implementation for i.e. KNNImputer
elif hasattr(delegate_estimator, "_mask_fit_X"):
# noinspection PyProtectedMember
nan_mask = np.all(delegate_estimator._mask_fit_X, axis=0)
# the imputed columns are all ingoing columns, except the ones that were dropped
imputed_columns = self.feature_names_in_.delete(np.argwhere(nan_mask).tolist())
features_original = pd.Series(
index=imputed_columns, data=imputed_columns.values
)
# if the add_indicator flag is set, we will get additional "missing" columns
if delegate_estimator.add_indicator:
from .. import MissingIndicatorDF
missing_indicator = MissingIndicatorDF.from_fitted(
estimator=delegate_estimator.indicator_,
features_in=self.feature_names_in_,
n_outputs=self.n_outputs_,
)
return features_original.append(missing_indicator.feature_names_original_)
else:
return features_original
class MissingIndicatorWrapperDF(
TransformerWrapperDF[MissingIndicator], metaclass=ABCMeta
):
"""
DF wrapper for :class:`sklearn.impute.MissingIndicator`.
"""
def _get_features_original(self) -> pd.Series:
features_original: np.ndarray = self.feature_names_in_[
self.native_estimator.features_
].values
features_out = pd.Index([f"{name}__missing" for name in features_original])
return pd.Series(index=features_out, data=features_original)
class IsomapWrapperDF(BaseDimensionalityReductionWrapperDF[Isomap], metaclass=ABCMeta):
"""
DF wrapper for :class:`sklearn.manifold.Isomap`.
"""
@property
def _n_components_(self) -> int:
return self.native_estimator.embedding_.shape[1]
class AdditiveChi2SamplerWrapperDF(
BaseDimensionalityReductionWrapperDF[AdditiveChi2Sampler], metaclass=ABCMeta
):
"""
DF wrapper for :class:`sklearn.kernel_approximation.AdditiveChi2Sampler`.
"""
@property
def _n_components_(self) -> int:
return len(self._features_in) * (2 * self.native_estimator.sample_steps + 1)
class PolynomialFeaturesWrapperDF(
BaseMultipleInputsPerOutputTransformerWrapperDF[PolynomialFeatures],
metaclass=ABCMeta,
):
"""
DF wrapper for :class:`sklearn.preprocessing.PolynomialFeatures`.
"""
def _get_features_out(self) -> pd.Index:
return pd.Index(
data=self.native_estimator.get_feature_names(
input_features=self.feature_names_in_.astype(str)
)
)
class OneHotEncoderWrapperDF(TransformerWrapperDF[OneHotEncoder], metaclass=ABCMeta):
"""
DF wrapper for :class:`sklearn.preprocessing.OneHotEncoder`.
"""
def _validate_delegate_estimator(self) -> None:
if self.native_estimator.sparse:
raise NotImplementedError("sparse matrices not supported; use sparse=False")
def _get_features_original(self) -> pd.Series:
# Return the series mapping output column names to original column names.
#
# Remove 1st category column if argument drop == 'first'
# Remove 1st category column only of binary features if arg drop == 'if_binary'
feature_names_out = pd.Index(
self.native_estimator.get_feature_names(self.feature_names_in_)
)
if self.drop == "first":
feature_names_in = [
column_original
for column_original, category in zip(
self.feature_names_in_, self.native_estimator.categories_
)
for _ in range(len(category) - 1)
]
elif self.drop == "if_binary":
feature_names_in = [
column_original
for column_original, category in zip(
self.feature_names_in_, self.native_estimator.categories_
)
for _ in (range(1) if len(category) == 2 else category)
]
else:
feature_names_in = [
column_original
for column_original, category in zip(
self.feature_names_in_, self.native_estimator.categories_
)
for _ in category
]
return | pd.Series(index=feature_names_out, data=feature_names_in) | pandas.Series |
# !pip3 install streamlit
from io import BytesIO
import base64
import datetime
import streamlit as st
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import yfinance as yf # https://pypi.org/project/yfinance/
##############################
# Technical Analysis Classes #
##############################
# https://github.com/bukosabino/ta/blob/master/ta/utils.py
class IndicatorMixin:
"""Util mixin indicator class"""
_fillna = False
def _check_fillna(self, series: pd.Series, value: int = 0) -> pd.Series:
"""Check if fillna flag is True.
Args:
series(pandas.Series): dataset 'Close' column.
value(int): value to fill gaps; if -1 fill values using 'backfill' mode.
Returns:
pandas.Series: New feature generated.
"""
if self._fillna:
series_output = series.copy(deep=False)
series_output = series_output.replace([np.inf, -np.inf], np.nan)
if isinstance(value, int) and value == -1:
series = series_output.fillna(method="ffill").fillna(value=-1)
else:
series = series_output.fillna(method="ffill").fillna(value)
return series
@staticmethod
def _true_range(
high: pd.Series, low: pd.Series, prev_close: pd.Series
) -> pd.Series:
tr1 = high - low
tr2 = (high - prev_close).abs()
tr3 = (low - prev_close).abs()
true_range = pd.DataFrame(
data={"tr1": tr1, "tr2": tr2, "tr3": tr3}).max(axis=1)
return true_range
def dropna(df: pd.DataFrame) -> pd.DataFrame:
"""Drop rows with "Nans" values"""
df = df.copy()
number_cols = df.select_dtypes("number").columns.to_list()
df[number_cols] = df[number_cols][df[number_cols]
< math.exp(709)] # big number
df[number_cols] = df[number_cols][df[number_cols] != 0.0]
df = df.dropna()
return df
def _sma(series, periods: int, fillna: bool = False):
min_periods = 0 if fillna else periods
return series.rolling(window=periods, min_periods=min_periods).mean()
def _ema(series, periods, fillna=False):
min_periods = 0 if fillna else periods
return series.ewm(span=periods, min_periods=min_periods, adjust=False).mean()
def _get_min_max(series1: pd.Series, series2: pd.Series, function: str = "min"):
"""Find min or max value between two lists for each index"""
series1 = np.array(series1)
series2 = np.array(series2)
if function == "min":
output = np.amin([series1, series2], axis=0)
elif function == "max":
output = np.amax([series1, series2], axis=0)
else:
raise ValueError('"f" variable value should be "min" or "max"')
return pd.Series(output)
# https://github.com/bukosabino/ta/blob/master/ta/volatility.py
class BollingerBands(IndicatorMixin):
"""Bollinger Bands
https://school.stockcharts.com/doku.php?id=technical_indicators:bollinger_bands
Args:
close(pandas.Series): dataset 'Close' column.
window(int): n period.
window_dev(int): n factor standard deviation
fillna(bool): if True, fill nan values.
"""
def __init__(
self,
close: pd.Series,
window: int = 20,
window_dev: int = 2,
fillna: bool = False,
):
self._close = close
self._window = window
self._window_dev = window_dev
self._fillna = fillna
self._run()
def _run(self):
min_periods = 0 if self._fillna else self._window
self._mavg = self._close.rolling(
self._window, min_periods=min_periods).mean()
self._mstd = self._close.rolling(self._window, min_periods=min_periods).std(
ddof=0
)
self._hband = self._mavg + self._window_dev * self._mstd
self._lband = self._mavg - self._window_dev * self._mstd
def bollinger_mavg(self) -> pd.Series:
"""Bollinger Channel Middle Band
Returns:
pandas.Series: New feature generated.
"""
mavg = self._check_fillna(self._mavg, value=-1)
return pd.Series(mavg, name="mavg")
def bollinger_hband(self) -> pd.Series:
"""Bollinger Channel High Band
Returns:
pandas.Series: New feature generated.
"""
hband = self._check_fillna(self._hband, value=-1)
return pd.Series(hband, name="hband")
def bollinger_lband(self) -> pd.Series:
"""Bollinger Channel Low Band
Returns:
pandas.Series: New feature generated.
"""
lband = self._check_fillna(self._lband, value=-1)
return pd.Series(lband, name="lband")
def bollinger_wband(self) -> pd.Series:
"""Bollinger Channel Band Width
From: https://school.stockcharts.com/doku.php?id=technical_indicators:bollinger_band_width
Returns:
pandas.Series: New feature generated.
"""
wband = ((self._hband - self._lband) / self._mavg) * 100
wband = self._check_fillna(wband, value=0)
return pd.Series(wband, name="bbiwband")
def bollinger_pband(self) -> pd.Series:
"""Bollinger Channel Percentage Band
From: https://school.stockcharts.com/doku.php?id=technical_indicators:bollinger_band_perce
Returns:
pandas.Series: New feature generated.
"""
pband = (self._close - self._lband) / (self._hband - self._lband)
pband = self._check_fillna(pband, value=0)
return pd.Series(pband, name="bbipband")
def bollinger_hband_indicator(self) -> pd.Series:
"""Bollinger Channel Indicator Crossing High Band (binary).
It returns 1, if close is higher than bollinger_hband. Else, it returns 0.
Returns:
pandas.Series: New feature generated.
"""
hband = pd.Series(
np.where(self._close > self._hband, 1.0, 0.0), index=self._close.index
)
hband = self._check_fillna(hband, value=0)
return | pd.Series(hband, index=self._close.index, name="bbihband") | pandas.Series |
import pandas as pd
from bokeh.plotting import figure
from bokeh.palettes import Spectral11 as pallette
from bokeh.models import Range1d
from bokeh.embed import components
from django.template.defaulttags import register
import numpy as np
import time
class Compare(object):
df = None
select_dict = None
name_dict = None
col_dict = None
def __init__(self, model, request, **kwargs):
assert hasattr(model, 'Name'), "Model must have a Name attribute."
assert hasattr(model, 'df'), "Model must have a df associated with it. This should return a Pandas DataFrame."
self.model = model
self.get_comp_df(request, kwargs)
self.get_comp_dicts(request, kwargs['pk'], kwargs['slug'])
@staticmethod
def name_col(schedule_name, column_name):
return "{schedule_name} - {column_name}".format(schedule_name=schedule_name, column_name=column_name)
def process_request(self, request, kwargs):
self.get_comp_dicts(request, kwargs['pk'], kwargs['slug'])
self.make_data_chart()
def get_comp_df(self, request, kwargs):
df = pd.DataFrame()
pk_list = [kwargs['pk']] + kwargs['slug'].split('-')
for pk in pk_list:
other_df = self.model.objects.get(pk=pk).df
schedule_name = self.model.objects.get(pk=pk).Name
if not other_df.columns.empty:
# if has columns, else supply None and name column empty
if str(pk) in request.GET.keys():
# if this schedule in get request, use col specifics, else use first col
# TODO: create error handling
assert (request.GET.get(str(pk)).isnumeric()), 'Columns must be specified by a number'
col_name = other_df.columns[int(request.GET.get(str(pk)))]
else:
col_name = other_df.columns[0]
df[self.name_col(schedule_name, col_name)] = other_df[col_name]
else:
df[self.name_col(schedule_name, 'Empty')] = None
self.df = df
def get_comp_dicts(self, request, main_pk, slug):
pk_list = [main_pk] + slug.split('-')
name_dict = {}
select_dict = {}
for pk in pk_list:
matrix = self.model.objects.get(pk=pk)
name_dict[pk] = matrix.Name
select_dict[pk] = 0
if request.GET.get(str(pk)) is not None:
select_dict[pk] = request.GET.get(str(pk))
else:
select_dict[pk] = 0
self.select_dict = select_dict
self.name_dict = name_dict
self.make_col_dict()
def make_col_dict(self):
col_dict = {}
for pk in self.select_dict.keys():
matrix = self.model.objects.get(pk=pk)
col_dict[pk] = dict(zip(range(len(matrix.df.columns)), matrix.df.columns))
self.col_dict = col_dict
def make_col_name(self, col_name, columns):
if col_name in columns:
# TODO: use .format
if col_name[-1:] == ')':
return self.make_col_name(col_name[:-2] + str(int(col_name[-2])+1) + ')', columns)
else:
return self.make_col_name(col_name + " (0)", columns)
else:
return col_name
def make_data_chart(self):
df = pd.DataFrame()
col_dict = {}
cols = []
t0 = time.time()
for pk in self.select_dict.keys():
matrix = self.model.objects.get(pk=pk)
lane = matrix.df[matrix.df.columns[self.select_dict[pk]]]
col_name = self.make_col_name(str(lane.name) + ' - ' + self.name_dict[pk], df.columns)
cols.append(col_name)
lane = lane.rename(col_name)
lane.index = pd.to_numeric(lane.index)
df = pd.concat([df, lane], axis=1, sort=True)
col_dict[pk] = dict(zip(range(len(matrix.df.columns)), matrix.df.columns))
self.df = df[cols].sort_index()
self.col_dict = col_dict
print("Made chart in "+str(time.time()-t0)+" seconds")
def lane_comp_plotter(self):
# TODO: Handle non-unique column headings
p = figure(width=800, height=480, title='Lane Comparison Plotter')
p.left[0].formatter.use_scientific = False
numlines = len(self.df.columns)
mypalette = pallette[0:numlines]
running_max = 0
count = 0
for name in self.df:
y = self.df[name].astype('float')
y = y.loc[~pd.isnull(y)]
running_max = max(running_max, y.max())
p.line(y.index, y.values, line_color=mypalette[count], line_width=5, legend=name)
count += 1
p.y_range = Range1d(0, running_max * 1.1) # Not sure why turn into string...
p.legend.location = "bottom_right"
script, div = components(p)
return script, div
@staticmethod
def highlight_cols():
color = '#007FFF'
return 'background-color: %s' % color
def format_df(self):
d = {}
# df = self.df.replace(to_replace='None', value=np.nan).astype('float')
for col in self.df.columns:
d[col] = lambda x: '' if | pd.isnull(x) | pandas.isnull |
# -*- coding: utf-8 -*-
# pylint: disable-msg=E1101,W0612
from datetime import datetime, timedelta
import pytest
import re
from numpy import nan as NA
import numpy as np
from numpy.random import randint
from pandas.compat import range, u
import pandas.compat as compat
from pandas import Index, Series, DataFrame, isna, MultiIndex, notna
from pandas.util.testing import assert_series_equal
import pandas.util.testing as tm
import pandas.core.strings as strings
class TestStringMethods(object):
def test_api(self):
# GH 6106, GH 9322
assert Series.str is strings.StringMethods
assert isinstance(Series(['']).str, strings.StringMethods)
# GH 9184
invalid = Series([1])
with tm.assert_raises_regex(AttributeError,
"only use .str accessor"):
invalid.str
assert not hasattr(invalid, 'str')
def test_iter(self):
# GH3638
strs = 'google', 'wikimedia', 'wikipedia', 'wikitravel'
ds = Series(strs)
for s in ds.str:
# iter must yield a Series
assert isinstance(s, Series)
# indices of each yielded Series should be equal to the index of
# the original Series
tm.assert_index_equal(s.index, ds.index)
for el in s:
# each element of the series is either a basestring/str or nan
assert isinstance(el, compat.string_types) or isna(el)
# desired behavior is to iterate until everything would be nan on the
# next iter so make sure the last element of the iterator was 'l' in
# this case since 'wikitravel' is the longest string
assert s.dropna().values.item() == 'l'
def test_iter_empty(self):
ds = Series([], dtype=object)
i, s = 100, 1
for i, s in enumerate(ds.str):
pass
# nothing to iterate over so nothing defined values should remain
# unchanged
assert i == 100
assert s == 1
def test_iter_single_element(self):
ds = Series(['a'])
for i, s in enumerate(ds.str):
pass
assert not i
assert_series_equal(ds, s)
def test_iter_object_try_string(self):
ds = Series([slice(None, randint(10), randint(10, 20)) for _ in range(
4)])
i, s = 100, 'h'
for i, s in enumerate(ds.str):
pass
assert i == 100
assert s == 'h'
def test_cat(self):
one = np.array(['a', 'a', 'b', 'b', 'c', NA], dtype=np.object_)
two = np.array(['a', NA, 'b', 'd', 'foo', NA], dtype=np.object_)
# single array
result = strings.str_cat(one)
exp = 'aabbc'
assert result == exp
result = strings.str_cat(one, na_rep='NA')
exp = 'aabbcNA'
assert result == exp
result = strings.str_cat(one, na_rep='-')
exp = 'aabbc-'
assert result == exp
result = strings.str_cat(one, sep='_', na_rep='NA')
exp = 'a_a_b_b_c_NA'
assert result == exp
result = strings.str_cat(two, sep='-')
exp = 'a-b-d-foo'
assert result == exp
# Multiple arrays
result = strings.str_cat(one, [two], na_rep='NA')
exp = np.array(['aa', 'aNA', 'bb', 'bd', 'cfoo', 'NANA'],
dtype=np.object_)
tm.assert_numpy_array_equal(result, exp)
result = strings.str_cat(one, two)
exp = np.array(['aa', NA, 'bb', 'bd', 'cfoo', NA], dtype=np.object_)
tm.assert_almost_equal(result, exp)
def test_count(self):
values = np.array(['foo', 'foofoo', NA, 'foooofooofommmfoo'],
dtype=np.object_)
result = strings.str_count(values, 'f[o]+')
exp = np.array([1, 2, NA, 4])
tm.assert_numpy_array_equal(result, exp)
result = Series(values).str.count('f[o]+')
exp = Series([1, 2, NA, 4])
assert isinstance(result, Series)
tm.assert_series_equal(result, exp)
# mixed
mixed = ['a', NA, 'b', True, datetime.today(), 'foo', None, 1, 2.]
rs = strings.str_count(mixed, 'a')
xp = np.array([1, NA, 0, NA, NA, 0, NA, NA, NA])
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.count('a')
xp = Series([1, NA, 0, NA, NA, 0, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = [u('foo'), u('foofoo'), NA, u('foooofooofommmfoo')]
result = strings.str_count(values, 'f[o]+')
exp = np.array([1, 2, NA, 4])
tm.assert_numpy_array_equal(result, exp)
result = Series(values).str.count('f[o]+')
exp = Series([1, 2, NA, 4])
assert isinstance(result, Series)
tm.assert_series_equal(result, exp)
def test_contains(self):
values = np.array(['foo', NA, 'fooommm__foo',
'mmm_', 'foommm[_]+bar'], dtype=np.object_)
pat = 'mmm[_]+'
result = strings.str_contains(values, pat)
expected = np.array([False, NA, True, True, False], dtype=np.object_)
tm.assert_numpy_array_equal(result, expected)
result = strings.str_contains(values, pat, regex=False)
expected = np.array([False, NA, False, False, True], dtype=np.object_)
tm.assert_numpy_array_equal(result, expected)
values = ['foo', 'xyz', 'fooommm__foo', 'mmm_']
result = strings.str_contains(values, pat)
expected = np.array([False, False, True, True])
assert result.dtype == np.bool_
tm.assert_numpy_array_equal(result, expected)
# case insensitive using regex
values = ['Foo', 'xYz', 'fOOomMm__fOo', 'MMM_']
result = strings.str_contains(values, 'FOO|mmm', case=False)
expected = np.array([True, False, True, True])
tm.assert_numpy_array_equal(result, expected)
# case insensitive without regex
result = strings.str_contains(values, 'foo', regex=False, case=False)
expected = np.array([True, False, True, False])
tm.assert_numpy_array_equal(result, expected)
# mixed
mixed = ['a', NA, 'b', True, datetime.today(), 'foo', None, 1, 2.]
rs = strings.str_contains(mixed, 'o')
xp = np.array([False, NA, False, NA, NA, True, NA, NA, NA],
dtype=np.object_)
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.contains('o')
xp = Series([False, NA, False, NA, NA, True, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = np.array([u'foo', NA, u'fooommm__foo', u'mmm_'],
dtype=np.object_)
pat = 'mmm[_]+'
result = strings.str_contains(values, pat)
expected = np.array([False, np.nan, True, True], dtype=np.object_)
tm.assert_numpy_array_equal(result, expected)
result = strings.str_contains(values, pat, na=False)
expected = np.array([False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
values = np.array(['foo', 'xyz', 'fooommm__foo', 'mmm_'],
dtype=np.object_)
result = strings.str_contains(values, pat)
expected = np.array([False, False, True, True])
assert result.dtype == np.bool_
tm.assert_numpy_array_equal(result, expected)
# na
values = Series(['om', 'foo', np.nan])
res = values.str.contains('foo', na="foo")
assert res.loc[2] == "foo"
def test_startswith(self):
values = Series(['om', NA, 'foo_nom', 'nom', 'bar_foo', NA, 'foo'])
result = values.str.startswith('foo')
exp = Series([False, NA, True, False, False, NA, True])
tm.assert_series_equal(result, exp)
# mixed
mixed = np.array(['a', NA, 'b', True, datetime.today(),
'foo', None, 1, 2.], dtype=np.object_)
rs = strings.str_startswith(mixed, 'f')
xp = np.array([False, NA, False, NA, NA, True, NA, NA, NA],
dtype=np.object_)
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.startswith('f')
assert isinstance(rs, Series)
xp = Series([False, NA, False, NA, NA, True, NA, NA, NA])
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('om'), NA, u('foo_nom'), u('nom'), u('bar_foo'), NA,
u('foo')])
result = values.str.startswith('foo')
exp = Series([False, NA, True, False, False, NA, True])
tm.assert_series_equal(result, exp)
result = values.str.startswith('foo', na=True)
tm.assert_series_equal(result, exp.fillna(True).astype(bool))
def test_endswith(self):
values = Series(['om', NA, 'foo_nom', 'nom', 'bar_foo', NA, 'foo'])
result = values.str.endswith('foo')
exp = Series([False, NA, False, False, True, NA, True])
tm.assert_series_equal(result, exp)
# mixed
mixed = ['a', NA, 'b', True, datetime.today(), 'foo', None, 1, 2.]
rs = strings.str_endswith(mixed, 'f')
xp = np.array([False, NA, False, NA, NA, False, NA, NA, NA],
dtype=np.object_)
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.endswith('f')
xp = Series([False, NA, False, NA, NA, False, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('om'), NA, u('foo_nom'), u('nom'), u('bar_foo'), NA,
u('foo')])
result = values.str.endswith('foo')
exp = Series([False, NA, False, False, True, NA, True])
tm.assert_series_equal(result, exp)
result = values.str.endswith('foo', na=False)
tm.assert_series_equal(result, exp.fillna(False).astype(bool))
def test_title(self):
values = Series(["FOO", "BAR", NA, "Blah", "blurg"])
result = values.str.title()
exp = Series(["Foo", "Bar", NA, "Blah", "Blurg"])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(["FOO", NA, "bar", True, datetime.today(), "blah", None,
1, 2.])
mixed = mixed.str.title()
exp = Series(["Foo", NA, "Bar", NA, NA, "Blah", NA, NA, NA])
tm.assert_almost_equal(mixed, exp)
# unicode
values = Series([u("FOO"), NA, u("bar"), u("Blurg")])
results = values.str.title()
exp = Series([u("Foo"), NA, u("Bar"), u("Blurg")])
tm.assert_series_equal(results, exp)
def test_lower_upper(self):
values = Series(['om', NA, 'nom', 'nom'])
result = values.str.upper()
exp = Series(['OM', NA, 'NOM', 'NOM'])
tm.assert_series_equal(result, exp)
result = result.str.lower()
tm.assert_series_equal(result, values)
# mixed
mixed = Series(['a', NA, 'b', True, datetime.today(), 'foo', None, 1,
2.])
mixed = mixed.str.upper()
rs = Series(mixed).str.lower()
xp = Series(['a', NA, 'b', NA, NA, 'foo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('om'), NA, u('nom'), u('nom')])
result = values.str.upper()
exp = Series([u('OM'), NA, u('NOM'), u('NOM')])
tm.assert_series_equal(result, exp)
result = result.str.lower()
tm.assert_series_equal(result, values)
def test_capitalize(self):
values = Series(["FOO", "BAR", NA, "Blah", "blurg"])
result = values.str.capitalize()
exp = Series(["Foo", "Bar", NA, "Blah", "Blurg"])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(["FOO", NA, "bar", True, datetime.today(), "blah", None,
1, 2.])
mixed = mixed.str.capitalize()
exp = Series(["Foo", NA, "Bar", NA, NA, "Blah", NA, NA, NA])
tm.assert_almost_equal(mixed, exp)
# unicode
values = Series([u("FOO"), NA, u("bar"), u("Blurg")])
results = values.str.capitalize()
exp = Series([u("Foo"), NA, u("Bar"), u("Blurg")])
tm.assert_series_equal(results, exp)
def test_swapcase(self):
values = Series(["FOO", "BAR", NA, "Blah", "blurg"])
result = values.str.swapcase()
exp = Series(["foo", "bar", NA, "bLAH", "BLURG"])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(["FOO", NA, "bar", True, datetime.today(), "Blah", None,
1, 2.])
mixed = mixed.str.swapcase()
exp = Series(["foo", NA, "BAR", NA, NA, "bLAH", NA, NA, NA])
tm.assert_almost_equal(mixed, exp)
# unicode
values = Series([u("FOO"), NA, u("bar"), u("Blurg")])
results = values.str.swapcase()
exp = Series([u("foo"), NA, u("BAR"), u("bLURG")])
tm.assert_series_equal(results, exp)
def test_casemethods(self):
values = ['aaa', 'bbb', 'CCC', 'Dddd', 'eEEE']
s = Series(values)
assert s.str.lower().tolist() == [v.lower() for v in values]
assert s.str.upper().tolist() == [v.upper() for v in values]
assert s.str.title().tolist() == [v.title() for v in values]
assert s.str.capitalize().tolist() == [v.capitalize() for v in values]
assert s.str.swapcase().tolist() == [v.swapcase() for v in values]
def test_replace(self):
values = Series(['fooBAD__barBAD', NA])
result = values.str.replace('BAD[_]*', '')
exp = Series(['foobar', NA])
tm.assert_series_equal(result, exp)
result = values.str.replace('BAD[_]*', '', n=1)
exp = Series(['foobarBAD', NA])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['aBAD', NA, 'bBAD', True, datetime.today(), 'fooBAD',
None, 1, 2.])
rs = Series(mixed).str.replace('BAD[_]*', '')
xp = Series(['a', NA, 'b', NA, NA, 'foo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('fooBAD__barBAD'), NA])
result = values.str.replace('BAD[_]*', '')
exp = Series([u('foobar'), NA])
tm.assert_series_equal(result, exp)
result = values.str.replace('BAD[_]*', '', n=1)
exp = Series([u('foobarBAD'), NA])
tm.assert_series_equal(result, exp)
# flags + unicode
values = Series([b"abcd,\xc3\xa0".decode("utf-8")])
exp = Series([b"abcd, \xc3\xa0".decode("utf-8")])
result = values.str.replace(r"(?<=\w),(?=\w)", ", ", flags=re.UNICODE)
tm.assert_series_equal(result, exp)
# GH 13438
for klass in (Series, Index):
for repl in (None, 3, {'a': 'b'}):
for data in (['a', 'b', None], ['a', 'b', 'c', 'ad']):
values = klass(data)
pytest.raises(TypeError, values.str.replace, 'a', repl)
def test_replace_callable(self):
# GH 15055
values = Series(['fooBAD__barBAD', NA])
# test with callable
repl = lambda m: m.group(0).swapcase()
result = values.str.replace('[a-z][A-Z]{2}', repl, n=2)
exp = Series(['foObaD__baRbaD', NA])
tm.assert_series_equal(result, exp)
# test with wrong number of arguments, raising an error
if compat.PY2:
p_err = r'takes (no|(exactly|at (least|most)) ?\d+) arguments?'
else:
p_err = (r'((takes)|(missing)) (?(2)from \d+ to )?\d+ '
r'(?(3)required )positional arguments?')
repl = lambda: None
with tm.assert_raises_regex(TypeError, p_err):
values.str.replace('a', repl)
repl = lambda m, x: None
with tm.assert_raises_regex(TypeError, p_err):
values.str.replace('a', repl)
repl = lambda m, x, y=None: None
with tm.assert_raises_regex(TypeError, p_err):
values.str.replace('a', repl)
# test regex named groups
values = Series(['Foo Bar Baz', NA])
pat = r"(?P<first>\w+) (?P<middle>\w+) (?P<last>\w+)"
repl = lambda m: m.group('middle').swapcase()
result = values.str.replace(pat, repl)
exp = Series(['bAR', NA])
tm.assert_series_equal(result, exp)
def test_replace_compiled_regex(self):
# GH 15446
values = Series(['fooBAD__barBAD', NA])
# test with compiled regex
pat = re.compile(r'BAD[_]*')
result = values.str.replace(pat, '')
exp = Series(['foobar', NA])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['aBAD', NA, 'bBAD', True, datetime.today(), 'fooBAD',
None, 1, 2.])
rs = Series(mixed).str.replace(pat, '')
xp = Series(['a', NA, 'b', NA, NA, 'foo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('fooBAD__barBAD'), NA])
result = values.str.replace(pat, '')
exp = Series([u('foobar'), NA])
tm.assert_series_equal(result, exp)
result = values.str.replace(pat, '', n=1)
exp = Series([u('foobarBAD'), NA])
tm.assert_series_equal(result, exp)
# flags + unicode
values = Series([b"abcd,\xc3\xa0".decode("utf-8")])
exp = Series([b"abcd, \xc3\xa0".decode("utf-8")])
pat = re.compile(r"(?<=\w),(?=\w)", flags=re.UNICODE)
result = values.str.replace(pat, ", ")
tm.assert_series_equal(result, exp)
# case and flags provided to str.replace will have no effect
# and will produce warnings
values = Series(['fooBAD__barBAD__bad', NA])
pat = re.compile(r'BAD[_]*')
with tm.assert_raises_regex(ValueError,
"case and flags cannot be"):
result = values.str.replace(pat, '', flags=re.IGNORECASE)
with tm.assert_raises_regex(ValueError,
"case and flags cannot be"):
result = values.str.replace(pat, '', case=False)
with tm.assert_raises_regex(ValueError,
"case and flags cannot be"):
result = values.str.replace(pat, '', case=True)
# test with callable
values = Series(['fooBAD__barBAD', NA])
repl = lambda m: m.group(0).swapcase()
pat = re.compile('[a-z][A-Z]{2}')
result = values.str.replace(pat, repl, n=2)
exp = Series(['foObaD__baRbaD', NA])
tm.assert_series_equal(result, exp)
def test_repeat(self):
values = Series(['a', 'b', NA, 'c', NA, 'd'])
result = values.str.repeat(3)
exp = Series(['aaa', 'bbb', NA, 'ccc', NA, 'ddd'])
tm.assert_series_equal(result, exp)
result = values.str.repeat([1, 2, 3, 4, 5, 6])
exp = Series(['a', 'bb', NA, 'cccc', NA, 'dddddd'])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['a', NA, 'b', True, datetime.today(), 'foo', None, 1,
2.])
rs = Series(mixed).str.repeat(3)
xp = Series(['aaa', NA, 'bbb', NA, NA, 'foofoofoo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('a'), u('b'), NA, u('c'), NA, u('d')])
result = values.str.repeat(3)
exp = Series([u('aaa'), u('bbb'), NA, u('ccc'), NA, u('ddd')])
tm.assert_series_equal(result, exp)
result = values.str.repeat([1, 2, 3, 4, 5, 6])
exp = Series([u('a'), u('bb'), NA, u('cccc'), NA, u('dddddd')])
tm.assert_series_equal(result, exp)
def test_match(self):
# New match behavior introduced in 0.13
values = Series(['fooBAD__barBAD', NA, 'foo'])
result = values.str.match('.*(BAD[_]+).*(BAD)')
exp = Series([True, NA, False])
tm.assert_series_equal(result, exp)
values = Series(['fooBAD__barBAD', NA, 'foo'])
result = values.str.match('.*BAD[_]+.*BAD')
exp = Series([True, NA, False])
tm.assert_series_equal(result, exp)
# test passing as_indexer still works but is ignored
values = Series(['fooBAD__barBAD', NA, 'foo'])
exp = Series([True, NA, False])
with tm.assert_produces_warning(FutureWarning):
result = values.str.match('.*BAD[_]+.*BAD', as_indexer=True)
tm.assert_series_equal(result, exp)
with tm.assert_produces_warning(FutureWarning):
result = values.str.match('.*BAD[_]+.*BAD', as_indexer=False)
tm.assert_series_equal(result, exp)
with tm.assert_produces_warning(FutureWarning):
result = values.str.match('.*(BAD[_]+).*(BAD)', as_indexer=True)
tm.assert_series_equal(result, exp)
pytest.raises(ValueError, values.str.match, '.*(BAD[_]+).*(BAD)',
as_indexer=False)
# mixed
mixed = Series(['aBAD_BAD', NA, 'BAD_b_BAD', True, datetime.today(),
'foo', None, 1, 2.])
rs = Series(mixed).str.match('.*(BAD[_]+).*(BAD)')
xp = Series([True, NA, True, NA, NA, False, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('fooBAD__barBAD'), NA, u('foo')])
result = values.str.match('.*(BAD[_]+).*(BAD)')
exp = Series([True, NA, False])
tm.assert_series_equal(result, exp)
# na GH #6609
res = Series(['a', 0, np.nan]).str.match('a', na=False)
exp = Series([True, False, False])
assert_series_equal(exp, res)
res = Series(['a', 0, np.nan]).str.match('a')
exp = Series([True, np.nan, np.nan])
assert_series_equal(exp, res)
def test_extract_expand_None(self):
values = Series(['fooBAD__barBAD', NA, 'foo'])
with tm.assert_produces_warning(FutureWarning):
values.str.extract('.*(BAD[_]+).*(BAD)', expand=None)
def test_extract_expand_unspecified(self):
values = Series(['fooBAD__barBAD', NA, 'foo'])
with tm.assert_produces_warning(FutureWarning):
values.str.extract('.*(BAD[_]+).*(BAD)')
def test_extract_expand_False(self):
# Contains tests like those in test_match and some others.
values = Series(['fooBAD__barBAD', NA, 'foo'])
er = [NA, NA] # empty row
result = values.str.extract('.*(BAD[_]+).*(BAD)', expand=False)
exp = DataFrame([['BAD__', 'BAD'], er, er])
tm.assert_frame_equal(result, exp)
# mixed
mixed = Series(['aBAD_BAD', NA, 'BAD_b_BAD', True, datetime.today(),
'foo', None, 1, 2.])
rs = Series(mixed).str.extract('.*(BAD[_]+).*(BAD)', expand=False)
exp = DataFrame([['BAD_', 'BAD'], er, ['BAD_', 'BAD'], er, er, er, er,
er, er])
tm.assert_frame_equal(rs, exp)
# unicode
values = Series([u('fooBAD__barBAD'), NA, u('foo')])
result = values.str.extract('.*(BAD[_]+).*(BAD)', expand=False)
exp = DataFrame([[u('BAD__'), u('BAD')], er, er])
tm.assert_frame_equal(result, exp)
# GH9980
# Index only works with one regex group since
# multi-group would expand to a frame
idx = Index(['A1', 'A2', 'A3', 'A4', 'B5'])
with tm.assert_raises_regex(ValueError, "supported"):
idx.str.extract('([AB])([123])', expand=False)
# these should work for both Series and Index
for klass in [Series, Index]:
# no groups
s_or_idx = klass(['A1', 'B2', 'C3'])
f = lambda: s_or_idx.str.extract('[ABC][123]', expand=False)
pytest.raises(ValueError, f)
# only non-capturing groups
f = lambda: s_or_idx.str.extract('(?:[AB]).*', expand=False)
pytest.raises(ValueError, f)
# single group renames series/index properly
s_or_idx = klass(['A1', 'A2'])
result = s_or_idx.str.extract(r'(?P<uno>A)\d', expand=False)
assert result.name == 'uno'
exp = klass(['A', 'A'], name='uno')
if klass == Series:
tm.assert_series_equal(result, exp)
else:
tm.assert_index_equal(result, exp)
s = Series(['A1', 'B2', 'C3'])
# one group, no matches
result = s.str.extract('(_)', expand=False)
exp = Series([NA, NA, NA], dtype=object)
tm.assert_series_equal(result, exp)
# two groups, no matches
result = s.str.extract('(_)(_)', expand=False)
exp = DataFrame([[NA, NA], [NA, NA], [NA, NA]], dtype=object)
tm.assert_frame_equal(result, exp)
# one group, some matches
result = s.str.extract('([AB])[123]', expand=False)
exp = Series(['A', 'B', NA])
tm.assert_series_equal(result, exp)
# two groups, some matches
result = s.str.extract('([AB])([123])', expand=False)
exp = | DataFrame([['A', '1'], ['B', '2'], [NA, NA]]) | pandas.DataFrame |
# coding: utf-8
import os
import pandas as pd
from tqdm import tqdm
from czsc.objects import RawBar, Freq
from czsc.utils.kline_generator import KlineGenerator, KlineGeneratorD, freq_end_time
from test.test_analyze import read_1min
cur_path = os.path.split(os.path.realpath(__file__))[0]
kline = read_1min()
def test_freq_end_time():
assert freq_end_time(pd.to_datetime("2021-11-11 09:43"), Freq.F1) == pd.to_datetime("2021-11-11 09:43")
assert freq_end_time(pd.to_datetime("2021-11-11 09:43"), Freq.F5) == pd.to_datetime("2021-11-11 09:45")
assert freq_end_time( | pd.to_datetime("2021-11-11 09:43") | pandas.to_datetime |
# -*- coding:utf-8 -*-
__author__ = 'Randolph'
import random
import pickle
import pandas as pd
from tqdm import tqdm
from sklearn.model_selection import train_test_split
def split_data(input_file):
"""
Split_data.
Args:
input_file:
Returns:
Train Valid Test
0-11 个时间戳的 items 用于训练 12 时刻的 items 用于测试
每个时刻的 items 称为一个 basket
示例:
userID baskets num_baskets
1 [[2, 5],[3, 7]] 2
"""
data = | pd.read_csv(input_file, names=['userID', 'itemID', 'timestamp']) | pandas.read_csv |
import time
import threading
import argparse
import tushare as ts
import numpy as np
import pandas as pd
from pandas import datetime as dt
from tqdm import tqdm
from utils import *
with open('../../tushare_token.txt', 'r') as f:
token = f.readline()
ts.set_token(token)
tushare_api = ts.pro_api()
# 股票列表
df_list = []
for list_status in ['L', 'D', 'P']:
df_i = tushare_api.stock_basic(
exchange='',
list_status=list_status,
fields='ts_code')
df_list.append(df_i)
df_all = pd.concat(df_list)
# 利润表
df = | pd.DataFrame() | pandas.DataFrame |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.