prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
#
# Copyright (C) 2019 Databricks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from distutils.version import LooseVersion
from itertools import product
import unittest
import pandas as pd
import numpy as np
import pyspark
from databricks import koalas as ks
from databricks.koalas.config import set_option, reset_option
from databricks.koalas.frame import DataFrame
from databricks.koalas.testing.utils import ReusedSQLTestCase, SQLTestUtils
from databricks.koalas.typedef.typehints import (
extension_dtypes,
extension_dtypes_available,
extension_float_dtypes_available,
extension_object_dtypes_available,
)
class OpsOnDiffFramesEnabledTest(ReusedSQLTestCase, SQLTestUtils):
@classmethod
def setUpClass(cls):
super().setUpClass()
set_option("compute.ops_on_diff_frames", True)
@classmethod
def tearDownClass(cls):
reset_option("compute.ops_on_diff_frames")
super().tearDownClass()
@property
def pdf1(self):
return pd.DataFrame(
{"a": [1, 2, 3, 4, 5, 6, 7, 8, 9], "b": [4, 5, 6, 3, 2, 1, 0, 0, 0]},
index=[0, 1, 3, 5, 6, 8, 9, 10, 11],
)
@property
def pdf2(self):
return pd.DataFrame(
{"a": [9, 8, 7, 6, 5, 4, 3, 2, 1], "b": [0, 0, 0, 4, 5, 6, 1, 2, 3]},
index=list(range(9)),
)
@property
def pdf3(self):
return pd.DataFrame(
{"b": [1, 1, 1, 1, 1, 1, 1, 1, 1], "c": [1, 1, 1, 1, 1, 1, 1, 1, 1]},
index=list(range(9)),
)
@property
def pdf4(self):
return pd.DataFrame(
{"e": [2, 2, 2, 2, 2, 2, 2, 2, 2], "f": [2, 2, 2, 2, 2, 2, 2, 2, 2]},
index=list(range(9)),
)
@property
def pdf5(self):
return pd.DataFrame(
{
"a": [1, 2, 3, 4, 5, 6, 7, 8, 9],
"b": [4, 5, 6, 3, 2, 1, 0, 0, 0],
"c": [4, 5, 6, 3, 2, 1, 0, 0, 0],
},
index=[0, 1, 3, 5, 6, 8, 9, 10, 11],
).set_index(["a", "b"])
@property
def pdf6(self):
return pd.DataFrame(
{
"a": [9, 8, 7, 6, 5, 4, 3, 2, 1],
"b": [0, 0, 0, 4, 5, 6, 1, 2, 3],
"c": [9, 8, 7, 6, 5, 4, 3, 2, 1],
"e": [4, 5, 6, 3, 2, 1, 0, 0, 0],
},
index=list(range(9)),
).set_index(["a", "b"])
@property
def pser1(self):
midx = pd.MultiIndex(
[["lama", "cow", "falcon", "koala"], ["speed", "weight", "length", "power"]],
[[0, 3, 1, 1, 1, 2, 2, 2], [0, 2, 0, 3, 2, 0, 1, 3]],
)
return pd.Series([45, 200, 1.2, 30, 250, 1.5, 320, 1], index=midx)
@property
def pser2(self):
midx = pd.MultiIndex(
[["lama", "cow", "falcon"], ["speed", "weight", "length"]],
[[0, 0, 0, 1, 1, 1, 2, 2, 2], [0, 1, 2, 0, 1, 2, 0, 1, 2]],
)
return pd.Series([-45, 200, -1.2, 30, -250, 1.5, 320, 1, -0.3], index=midx)
@property
def pser3(self):
midx = pd.MultiIndex(
[["koalas", "cow", "falcon"], ["speed", "weight", "length"]],
[[0, 0, 0, 1, 1, 1, 2, 2, 2], [1, 1, 2, 0, 0, 2, 2, 2, 1]],
)
return pd.Series([45, 200, 1.2, 30, 250, 1.5, 320, 1, 0.3], index=midx)
@property
def kdf1(self):
return ks.from_pandas(self.pdf1)
@property
def kdf2(self):
return ks.from_pandas(self.pdf2)
@property
def kdf3(self):
return ks.from_pandas(self.pdf3)
@property
def kdf4(self):
return ks.from_pandas(self.pdf4)
@property
def kdf5(self):
return ks.from_pandas(self.pdf5)
@property
def kdf6(self):
return ks.from_pandas(self.pdf6)
@property
def kser1(self):
return ks.from_pandas(self.pser1)
@property
def kser2(self):
return ks.from_pandas(self.pser2)
@property
def kser3(self):
return ks.from_pandas(self.pser3)
def test_ranges(self):
self.assert_eq(
(ks.range(10) + ks.range(10)).sort_index(),
(
ks.DataFrame({"id": list(range(10))}) + ks.DataFrame({"id": list(range(10))})
).sort_index(),
)
def test_no_matched_index(self):
with self.assertRaisesRegex(ValueError, "Index names must be exactly matched"):
ks.DataFrame({"a": [1, 2, 3]}).set_index("a") + ks.DataFrame(
{"b": [1, 2, 3]}
).set_index("b")
def test_arithmetic(self):
self._test_arithmetic_frame(self.pdf1, self.pdf2, check_extension=False)
self._test_arithmetic_series(self.pser1, self.pser2, check_extension=False)
@unittest.skipIf(not extension_dtypes_available, "pandas extension dtypes are not available")
def test_arithmetic_extension_dtypes(self):
self._test_arithmetic_frame(
self.pdf1.astype("Int64"), self.pdf2.astype("Int64"), check_extension=True
)
self._test_arithmetic_series(
self.pser1.astype(int).astype("Int64"),
self.pser2.astype(int).astype("Int64"),
check_extension=True,
)
@unittest.skipIf(
not extension_float_dtypes_available, "pandas extension float dtypes are not available"
)
def test_arithmetic_extension_float_dtypes(self):
self._test_arithmetic_frame(
self.pdf1.astype("Float64"), self.pdf2.astype("Float64"), check_extension=True
)
self._test_arithmetic_series(
self.pser1.astype("Float64"), self.pser2.astype("Float64"), check_extension=True
)
def _test_arithmetic_frame(self, pdf1, pdf2, *, check_extension):
kdf1 = ks.from_pandas(pdf1)
kdf2 = ks.from_pandas(pdf2)
def assert_eq(actual, expected):
if LooseVersion("1.1") <= LooseVersion(pd.__version__) < LooseVersion("1.2.2"):
self.assert_eq(actual, expected, check_exact=not check_extension)
if check_extension:
if isinstance(actual, DataFrame):
for dtype in actual.dtypes:
self.assertTrue(isinstance(dtype, extension_dtypes))
else:
self.assertTrue(isinstance(actual.dtype, extension_dtypes))
else:
self.assert_eq(actual, expected)
# Series
assert_eq((kdf1.a - kdf2.b).sort_index(), (pdf1.a - pdf2.b).sort_index())
assert_eq((kdf1.a * kdf2.a).sort_index(), (pdf1.a * pdf2.a).sort_index())
if check_extension and not extension_float_dtypes_available:
self.assert_eq(
(kdf1["a"] / kdf2["a"]).sort_index(), (pdf1["a"] / pdf2["a"]).sort_index()
)
else:
assert_eq((kdf1["a"] / kdf2["a"]).sort_index(), (pdf1["a"] / pdf2["a"]).sort_index())
# DataFrame
assert_eq((kdf1 + kdf2).sort_index(), (pdf1 + pdf2).sort_index())
# Multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "a"), ("x", "b")])
kdf1.columns = columns
kdf2.columns = columns
pdf1.columns = columns
pdf2.columns = columns
# Series
assert_eq(
(kdf1[("x", "a")] - kdf2[("x", "b")]).sort_index(),
(pdf1[("x", "a")] - pdf2[("x", "b")]).sort_index(),
)
assert_eq(
(kdf1[("x", "a")] - kdf2["x"]["b"]).sort_index(),
(pdf1[("x", "a")] - pdf2["x"]["b"]).sort_index(),
)
assert_eq(
(kdf1["x"]["a"] - kdf2[("x", "b")]).sort_index(),
(pdf1["x"]["a"] - pdf2[("x", "b")]).sort_index(),
)
# DataFrame
assert_eq((kdf1 + kdf2).sort_index(), (pdf1 + pdf2).sort_index())
def _test_arithmetic_series(self, pser1, pser2, *, check_extension):
kser1 = ks.from_pandas(pser1)
kser2 = ks.from_pandas(pser2)
def assert_eq(actual, expected):
if LooseVersion("1.1") <= LooseVersion(pd.__version__) < LooseVersion("1.2.2"):
self.assert_eq(actual, expected, check_exact=not check_extension)
if check_extension:
self.assertTrue(isinstance(actual.dtype, extension_dtypes))
else:
self.assert_eq(actual, expected)
# MultiIndex Series
assert_eq((kser1 + kser2).sort_index(), (pser1 + pser2).sort_index())
assert_eq((kser1 - kser2).sort_index(), (pser1 - pser2).sort_index())
assert_eq((kser1 * kser2).sort_index(), (pser1 * pser2).sort_index())
if check_extension and not extension_float_dtypes_available:
self.assert_eq((kser1 / kser2).sort_index(), (pser1 / pser2).sort_index())
else:
assert_eq((kser1 / kser2).sort_index(), (pser1 / pser2).sort_index())
def test_arithmetic_chain(self):
self._test_arithmetic_chain_frame(self.pdf1, self.pdf2, self.pdf3, check_extension=False)
self._test_arithmetic_chain_series(
self.pser1, self.pser2, self.pser3, check_extension=False
)
@unittest.skipIf(not extension_dtypes_available, "pandas extension dtypes are not available")
def test_arithmetic_chain_extension_dtypes(self):
self._test_arithmetic_chain_frame(
self.pdf1.astype("Int64"),
self.pdf2.astype("Int64"),
self.pdf3.astype("Int64"),
check_extension=True,
)
self._test_arithmetic_chain_series(
self.pser1.astype(int).astype("Int64"),
self.pser2.astype(int).astype("Int64"),
self.pser3.astype(int).astype("Int64"),
check_extension=True,
)
@unittest.skipIf(
not extension_float_dtypes_available, "pandas extension float dtypes are not available"
)
def test_arithmetic_chain_extension_float_dtypes(self):
self._test_arithmetic_chain_frame(
self.pdf1.astype("Float64"),
self.pdf2.astype("Float64"),
self.pdf3.astype("Float64"),
check_extension=True,
)
self._test_arithmetic_chain_series(
self.pser1.astype("Float64"),
self.pser2.astype("Float64"),
self.pser3.astype("Float64"),
check_extension=True,
)
def _test_arithmetic_chain_frame(self, pdf1, pdf2, pdf3, *, check_extension):
kdf1 = ks.from_pandas(pdf1)
kdf2 = ks.from_pandas(pdf2)
kdf3 = ks.from_pandas(pdf3)
common_columns = set(kdf1.columns).intersection(kdf2.columns).intersection(kdf3.columns)
def assert_eq(actual, expected):
if LooseVersion("1.1") <= LooseVersion(pd.__version__) < LooseVersion("1.2.2"):
self.assert_eq(actual, expected, check_exact=not check_extension)
if check_extension:
if isinstance(actual, DataFrame):
for column, dtype in zip(actual.columns, actual.dtypes):
if column in common_columns:
self.assertTrue(isinstance(dtype, extension_dtypes))
else:
self.assertFalse(isinstance(dtype, extension_dtypes))
else:
self.assertTrue(isinstance(actual.dtype, extension_dtypes))
else:
self.assert_eq(actual, expected)
# Series
assert_eq((kdf1.a - kdf2.b - kdf3.c).sort_index(), (pdf1.a - pdf2.b - pdf3.c).sort_index())
assert_eq(
(kdf1.a * (kdf2.a * kdf3.c)).sort_index(), (pdf1.a * (pdf2.a * pdf3.c)).sort_index()
)
if check_extension and not extension_float_dtypes_available:
self.assert_eq(
(kdf1["a"] / kdf2["a"] / kdf3["c"]).sort_index(),
(pdf1["a"] / pdf2["a"] / pdf3["c"]).sort_index(),
)
else:
assert_eq(
(kdf1["a"] / kdf2["a"] / kdf3["c"]).sort_index(),
(pdf1["a"] / pdf2["a"] / pdf3["c"]).sort_index(),
)
# DataFrame
if check_extension and (
LooseVersion("1.0") <= LooseVersion(pd.__version__) < LooseVersion("1.1")
):
self.assert_eq(
(kdf1 + kdf2 - kdf3).sort_index(), (pdf1 + pdf2 - pdf3).sort_index(), almost=True
)
else:
assert_eq((kdf1 + kdf2 - kdf3).sort_index(), (pdf1 + pdf2 - pdf3).sort_index())
# Multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "a"), ("x", "b")])
kdf1.columns = columns
kdf2.columns = columns
pdf1.columns = columns
pdf2.columns = columns
columns = pd.MultiIndex.from_tuples([("x", "b"), ("y", "c")])
kdf3.columns = columns
pdf3.columns = columns
common_columns = set(kdf1.columns).intersection(kdf2.columns).intersection(kdf3.columns)
# Series
assert_eq(
(kdf1[("x", "a")] - kdf2[("x", "b")] - kdf3[("y", "c")]).sort_index(),
(pdf1[("x", "a")] - pdf2[("x", "b")] - pdf3[("y", "c")]).sort_index(),
)
assert_eq(
(kdf1[("x", "a")] * (kdf2[("x", "b")] * kdf3[("y", "c")])).sort_index(),
(pdf1[("x", "a")] * (pdf2[("x", "b")] * pdf3[("y", "c")])).sort_index(),
)
# DataFrame
if check_extension and (
LooseVersion("1.0") <= LooseVersion(pd.__version__) < LooseVersion("1.1")
):
self.assert_eq(
(kdf1 + kdf2 - kdf3).sort_index(), (pdf1 + pdf2 - pdf3).sort_index(), almost=True
)
else:
assert_eq((kdf1 + kdf2 - kdf3).sort_index(), (pdf1 + pdf2 - pdf3).sort_index())
def _test_arithmetic_chain_series(self, pser1, pser2, pser3, *, check_extension):
kser1 = ks.from_pandas(pser1)
kser2 = ks.from_pandas(pser2)
kser3 = ks.from_pandas(pser3)
def assert_eq(actual, expected):
if LooseVersion("1.1") <= LooseVersion(pd.__version__) < LooseVersion("1.2.2"):
self.assert_eq(actual, expected, check_exact=not check_extension)
if check_extension:
self.assertTrue(isinstance(actual.dtype, extension_dtypes))
else:
self.assert_eq(actual, expected)
# MultiIndex Series
assert_eq((kser1 + kser2 - kser3).sort_index(), (pser1 + pser2 - pser3).sort_index())
assert_eq((kser1 * kser2 * kser3).sort_index(), (pser1 * pser2 * pser3).sort_index())
if check_extension and not extension_float_dtypes_available:
if LooseVersion(pd.__version__) >= LooseVersion("1.0"):
self.assert_eq(
(kser1 - kser2 / kser3).sort_index(), (pser1 - pser2 / pser3).sort_index()
)
else:
expected = pd.Series(
[249.0, np.nan, 0.0, 0.88, np.nan, np.nan, np.nan, np.nan, np.nan, -np.inf]
+ [np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan],
index=pd.MultiIndex(
[
["cow", "falcon", "koala", "koalas", "lama"],
["length", "power", "speed", "weight"],
],
[
[0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 3, 3, 3, 4, 4, 4],
[0, 1, 2, 2, 3, 0, 0, 1, 2, 3, 0, 0, 3, 3, 0, 2, 3],
],
),
)
self.assert_eq((kser1 - kser2 / kser3).sort_index(), expected)
else:
assert_eq((kser1 - kser2 / kser3).sort_index(), (pser1 - pser2 / pser3).sort_index())
assert_eq((kser1 + kser2 * kser3).sort_index(), (pser1 + pser2 * pser3).sort_index())
def test_mod(self):
pser = pd.Series([100, None, -300, None, 500, -700])
pser_other = pd.Series([-150] * 6)
kser = ks.from_pandas(pser)
kser_other = ks.from_pandas(pser_other)
self.assert_eq(kser.mod(kser_other).sort_index(), pser.mod(pser_other))
self.assert_eq(kser.mod(kser_other).sort_index(), pser.mod(pser_other))
self.assert_eq(kser.mod(kser_other).sort_index(), pser.mod(pser_other))
def test_rmod(self):
pser = pd.Series([100, None, -300, None, 500, -700])
pser_other = pd.Series([-150] * 6)
kser = ks.from_pandas(pser)
kser_other = ks.from_pandas(pser_other)
self.assert_eq(kser.rmod(kser_other).sort_index(), pser.rmod(pser_other))
self.assert_eq(kser.rmod(kser_other).sort_index(), pser.rmod(pser_other))
self.assert_eq(kser.rmod(kser_other).sort_index(), pser.rmod(pser_other))
def test_getitem_boolean_series(self):
pdf1 = pd.DataFrame(
{"A": [0, 1, 2, 3, 4], "B": [100, 200, 300, 400, 500]}, index=[20, 10, 30, 0, 50]
)
pdf2 = pd.DataFrame(
{"A": [0, -1, -2, -3, -4], "B": [-100, -200, -300, -400, -500]},
index=[0, 30, 10, 20, 50],
)
kdf1 = ks.from_pandas(pdf1)
kdf2 = ks.from_pandas(pdf2)
self.assert_eq(pdf1[pdf2.A > -3].sort_index(), kdf1[kdf2.A > -3].sort_index())
self.assert_eq(pdf1.A[pdf2.A > -3].sort_index(), kdf1.A[kdf2.A > -3].sort_index())
self.assert_eq(
(pdf1.A + 1)[pdf2.A > -3].sort_index(), (kdf1.A + 1)[kdf2.A > -3].sort_index()
)
def test_loc_getitem_boolean_series(self):
pdf1 = pd.DataFrame(
{"A": [0, 1, 2, 3, 4], "B": [100, 200, 300, 400, 500]}, index=[20, 10, 30, 0, 50]
)
pdf2 = pd.DataFrame(
{"A": [0, -1, -2, -3, -4], "B": [-100, -200, -300, -400, -500]},
index=[20, 10, 30, 0, 50],
)
kdf1 = ks.from_pandas(pdf1)
kdf2 = ks.from_pandas(pdf2)
self.assert_eq(pdf1.loc[pdf2.A > -3].sort_index(), kdf1.loc[kdf2.A > -3].sort_index())
self.assert_eq(pdf1.A.loc[pdf2.A > -3].sort_index(), kdf1.A.loc[kdf2.A > -3].sort_index())
self.assert_eq(
(pdf1.A + 1).loc[pdf2.A > -3].sort_index(), (kdf1.A + 1).loc[kdf2.A > -3].sort_index()
)
def test_bitwise(self):
pser1 = pd.Series([True, False, True, False, np.nan, np.nan, True, False, np.nan])
pser2 = pd.Series([True, False, False, True, True, False, np.nan, np.nan, np.nan])
kser1 = ks.from_pandas(pser1)
kser2 = ks.from_pandas(pser2)
self.assert_eq(pser1 | pser2, (kser1 | kser2).sort_index())
self.assert_eq(pser1 & pser2, (kser1 & kser2).sort_index())
pser1 = pd.Series([True, False, np.nan], index=list("ABC"))
pser2 = pd.Series([False, True, np.nan], index=list("DEF"))
kser1 = ks.from_pandas(pser1)
kser2 = ks.from_pandas(pser2)
self.assert_eq(pser1 | pser2, (kser1 | kser2).sort_index())
self.assert_eq(pser1 & pser2, (kser1 & kser2).sort_index())
@unittest.skipIf(
not extension_object_dtypes_available, "pandas extension object dtypes are not available"
)
def test_bitwise_extension_dtype(self):
def assert_eq(actual, expected):
if LooseVersion("1.1") <= LooseVersion(pd.__version__) < LooseVersion("1.2.2"):
self.assert_eq(actual, expected, check_exact=False)
self.assertTrue(isinstance(actual.dtype, extension_dtypes))
else:
self.assert_eq(actual, expected)
pser1 = pd.Series(
[True, False, True, False, np.nan, np.nan, True, False, np.nan], dtype="boolean"
)
pser2 = pd.Series(
[True, False, False, True, True, False, np.nan, np.nan, np.nan], dtype="boolean"
)
kser1 = ks.from_pandas(pser1)
kser2 = ks.from_pandas(pser2)
assert_eq((kser1 | kser2).sort_index(), pser1 | pser2)
assert_eq((kser1 & kser2).sort_index(), pser1 & pser2)
pser1 = pd.Series([True, False, np.nan], index=list("ABC"), dtype="boolean")
pser2 = pd.Series([False, True, np.nan], index=list("DEF"), dtype="boolean")
kser1 = ks.from_pandas(pser1)
kser2 = ks.from_pandas(pser2)
# a pandas bug?
# assert_eq((kser1 | kser2).sort_index(), pser1 | pser2)
# assert_eq((kser1 & kser2).sort_index(), pser1 & pser2)
assert_eq(
(kser1 | kser2).sort_index(),
pd.Series([True, None, None, None, True, None], index=list("ABCDEF"), dtype="boolean"),
)
assert_eq(
(kser1 & kser2).sort_index(),
pd.Series(
[None, False, None, False, None, None], index=list("ABCDEF"), dtype="boolean"
),
)
def test_concat_column_axis(self):
pdf1 = pd.DataFrame({"A": [0, 2, 4], "B": [1, 3, 5]}, index=[1, 2, 3])
pdf1.columns.names = ["AB"]
pdf2 = pd.DataFrame({"C": [1, 2, 3], "D": [4, 5, 6]}, index=[1, 3, 5])
pdf2.columns.names = ["CD"]
kdf1 = ks.from_pandas(pdf1)
kdf2 = ks.from_pandas(pdf2)
kdf3 = kdf1.copy()
kdf4 = kdf2.copy()
pdf3 = pdf1.copy()
pdf4 = pdf2.copy()
columns = pd.MultiIndex.from_tuples([("X", "A"), ("X", "B")], names=["X", "AB"])
pdf3.columns = columns
kdf3.columns = columns
columns = pd.MultiIndex.from_tuples([("X", "C"), ("X", "D")], names=["Y", "CD"])
pdf4.columns = columns
kdf4.columns = columns
pdf5 = pd.DataFrame({"A": [0, 2, 4], "B": [1, 3, 5]}, index=[1, 2, 3])
pdf6 = pd.DataFrame({"C": [1, 2, 3]}, index=[1, 3, 5])
kdf5 = ks.from_pandas(pdf5)
kdf6 = ks.from_pandas(pdf6)
ignore_indexes = [True, False]
joins = ["inner", "outer"]
objs = [
([kdf1.A, kdf2.C], [pdf1.A, pdf2.C]),
# TODO: ([kdf1, kdf2.C], [pdf1, pdf2.C]),
([kdf1.A, kdf2], [pdf1.A, pdf2]),
([kdf1.A, kdf2.C], [pdf1.A, pdf2.C]),
([kdf3[("X", "A")], kdf4[("X", "C")]], [pdf3[("X", "A")], pdf4[("X", "C")]]),
([kdf3, kdf4[("X", "C")]], [pdf3, pdf4[("X", "C")]]),
([kdf3[("X", "A")], kdf4], [pdf3[("X", "A")], pdf4]),
([kdf3, kdf4], [pdf3, pdf4]),
([kdf5, kdf6], [pdf5, pdf6]),
([kdf6, kdf5], [pdf6, pdf5]),
]
for ignore_index, join in product(ignore_indexes, joins):
for i, (kdfs, pdfs) in enumerate(objs):
with self.subTest(ignore_index=ignore_index, join=join, pdfs=pdfs, pair=i):
actual = ks.concat(kdfs, axis=1, ignore_index=ignore_index, join=join)
expected = pd.concat(pdfs, axis=1, ignore_index=ignore_index, join=join)
self.assert_eq(
repr(actual.sort_values(list(actual.columns)).reset_index(drop=True)),
repr(expected.sort_values(list(expected.columns)).reset_index(drop=True)),
)
def test_combine_first(self):
pser1 = pd.Series({"falcon": 330.0, "eagle": 160.0})
pser2 = pd.Series({"falcon": 345.0, "eagle": 200.0, "duck": 30.0})
kser1 = ks.from_pandas(pser1)
kser2 = ks.from_pandas(pser2)
self.assert_eq(
kser1.combine_first(kser2).sort_index(), pser1.combine_first(pser2).sort_index()
)
with self.assertRaisesRegex(
ValueError, "`combine_first` only allows `Series` for parameter `other`"
):
kser1.combine_first(50)
kser1.name = ("X", "A")
kser2.name = ("Y", "B")
pser1.name = ("X", "A")
pser2.name = ("Y", "B")
self.assert_eq(
kser1.combine_first(kser2).sort_index(), pser1.combine_first(pser2).sort_index()
)
# MultiIndex
midx1 = pd.MultiIndex(
[["lama", "cow", "falcon", "koala"], ["speed", "weight", "length", "power"]],
[[0, 3, 1, 1, 1, 2, 2, 2], [0, 2, 0, 3, 2, 0, 1, 3]],
)
midx2 = pd.MultiIndex(
[["lama", "cow", "falcon"], ["speed", "weight", "length"]],
[[0, 0, 0, 1, 1, 1, 2, 2, 2], [0, 1, 2, 0, 1, 2, 0, 1, 2]],
)
pser1 = pd.Series([45, 200, 1.2, 30, 250, 1.5, 320, 1], index=midx1)
pser2 = pd.Series([-45, 200, -1.2, 30, -250, 1.5, 320, 1, -0.3], index=midx2)
kser1 = ks.from_pandas(pser1)
kser2 = ks.from_pandas(pser2)
self.assert_eq(
kser1.combine_first(kser2).sort_index(), pser1.combine_first(pser2).sort_index()
)
# Series come from same DataFrame
pdf = pd.DataFrame(
{
"A": {"falcon": 330.0, "eagle": 160.0},
"B": {"falcon": 345.0, "eagle": 200.0, "duck": 30.0},
}
)
pser1 = pdf.A
pser2 = pdf.B
kser1 = ks.from_pandas(pser1)
kser2 = ks.from_pandas(pser2)
self.assert_eq(
kser1.combine_first(kser2).sort_index(), pser1.combine_first(pser2).sort_index()
)
kser1.name = ("X", "A")
kser2.name = ("Y", "B")
pser1.name = ("X", "A")
pser2.name = ("Y", "B")
self.assert_eq(
kser1.combine_first(kser2).sort_index(), pser1.combine_first(pser2).sort_index()
)
def test_insert(self):
#
# Basic DataFrame
#
pdf = pd.DataFrame([1, 2, 3])
kdf = ks.from_pandas(pdf)
pser = pd.Series([4, 5, 6])
kser = ks.from_pandas(pser)
kdf.insert(1, "y", kser)
pdf.insert(1, "y", pser)
self.assert_eq(kdf.sort_index(), pdf.sort_index())
#
# DataFrame with Index different from inserting Series'
#
pdf = pd.DataFrame([1, 2, 3], index=[10, 20, 30])
kdf = ks.from_pandas(pdf)
pser = pd.Series([4, 5, 6])
kser = ks.from_pandas(pser)
kdf.insert(1, "y", kser)
pdf.insert(1, "y", pser)
self.assert_eq(kdf.sort_index(), pdf.sort_index())
#
# DataFrame with Multi-index columns
#
pdf = pd.DataFrame({("x", "a"): [1, 2, 3]})
kdf = ks.from_pandas(pdf)
pser = pd.Series([4, 5, 6])
kser = ks.from_pandas(pser)
pdf = pd.DataFrame({("x", "a", "b"): [1, 2, 3]})
kdf = ks.from_pandas(pdf)
kdf.insert(0, "a", kser)
pdf.insert(0, "a", pser)
self.assert_eq(kdf.sort_index(), pdf.sort_index())
kdf.insert(0, ("b", "c", ""), kser)
pdf.insert(0, ("b", "c", ""), pser)
self.assert_eq(kdf.sort_index(), pdf.sort_index())
def test_compare(self):
if LooseVersion(pd.__version__) >= LooseVersion("1.1"):
pser1 = pd.Series(["b", "c", np.nan, "g", np.nan])
pser2 = pd.Series(["a", "c", np.nan, np.nan, "h"])
kser1 = ks.from_pandas(pser1)
kser2 = ks.from_pandas(pser2)
self.assert_eq(
pser1.compare(pser2).sort_index(), kser1.compare(kser2).sort_index(),
)
# `keep_shape=True`
self.assert_eq(
pser1.compare(pser2, keep_shape=True).sort_index(),
kser1.compare(kser2, keep_shape=True).sort_index(),
)
# `keep_equal=True`
self.assert_eq(
pser1.compare(pser2, keep_equal=True).sort_index(),
kser1.compare(kser2, keep_equal=True).sort_index(),
)
# `keep_shape=True` and `keep_equal=True`
self.assert_eq(
pser1.compare(pser2, keep_shape=True, keep_equal=True).sort_index(),
kser1.compare(kser2, keep_shape=True, keep_equal=True).sort_index(),
)
# MultiIndex
pser1.index = pd.MultiIndex.from_tuples(
[("a", "x"), ("b", "y"), ("c", "z"), ("x", "k"), ("q", "l")]
)
pser2.index = pd.MultiIndex.from_tuples(
[("a", "x"), ("b", "y"), ("c", "z"), ("x", "k"), ("q", "l")]
)
kser1 = ks.from_pandas(pser1)
kser2 = ks.from_pandas(pser2)
self.assert_eq(
pser1.compare(pser2).sort_index(), kser1.compare(kser2).sort_index(),
)
# `keep_shape=True` with MultiIndex
self.assert_eq(
pser1.compare(pser2, keep_shape=True).sort_index(),
kser1.compare(kser2, keep_shape=True).sort_index(),
)
# `keep_equal=True` with MultiIndex
self.assert_eq(
pser1.compare(pser2, keep_equal=True).sort_index(),
kser1.compare(kser2, keep_equal=True).sort_index(),
)
# `keep_shape=True` and `keep_equal=True` with MultiIndex
self.assert_eq(
pser1.compare(pser2, keep_shape=True, keep_equal=True).sort_index(),
kser1.compare(kser2, keep_shape=True, keep_equal=True).sort_index(),
)
else:
kser1 = ks.Series(["b", "c", np.nan, "g", np.nan])
kser2 = ks.Series(["a", "c", np.nan, np.nan, "h"])
expected = ks.DataFrame(
[["b", "a"], ["g", None], [None, "h"]], index=[0, 3, 4], columns=["self", "other"]
)
self.assert_eq(expected, kser1.compare(kser2).sort_index())
# `keep_shape=True`
expected = ks.DataFrame(
[["b", "a"], [None, None], [None, None], ["g", None], [None, "h"]],
index=[0, 1, 2, 3, 4],
columns=["self", "other"],
)
self.assert_eq(
expected, kser1.compare(kser2, keep_shape=True).sort_index(),
)
# `keep_equal=True`
expected = ks.DataFrame(
[["b", "a"], ["g", None], [None, "h"]], index=[0, 3, 4], columns=["self", "other"]
)
self.assert_eq(
expected, kser1.compare(kser2, keep_equal=True).sort_index(),
)
# `keep_shape=True` and `keep_equal=True`
expected = ks.DataFrame(
[["b", "a"], ["c", "c"], [None, None], ["g", None], [None, "h"]],
index=[0, 1, 2, 3, 4],
columns=["self", "other"],
)
self.assert_eq(
expected, kser1.compare(kser2, keep_shape=True, keep_equal=True).sort_index(),
)
# MultiIndex
kser1 = ks.Series(
["b", "c", np.nan, "g", np.nan],
index=pd.MultiIndex.from_tuples(
[("a", "x"), ("b", "y"), ("c", "z"), ("x", "k"), ("q", "l")]
),
)
kser2 = ks.Series(
["a", "c", np.nan, np.nan, "h"],
index=pd.MultiIndex.from_tuples(
[("a", "x"), ("b", "y"), ("c", "z"), ("x", "k"), ("q", "l")]
),
)
expected = ks.DataFrame(
[["b", "a"], [None, "h"], ["g", None]],
index=pd.MultiIndex.from_tuples([("a", "x"), ("q", "l"), ("x", "k")]),
columns=["self", "other"],
)
self.assert_eq(expected, kser1.compare(kser2).sort_index())
# `keep_shape=True`
expected = ks.DataFrame(
[["b", "a"], [None, None], [None, None], [None, "h"], ["g", None]],
index=pd.MultiIndex.from_tuples(
[("a", "x"), ("b", "y"), ("c", "z"), ("q", "l"), ("x", "k")]
),
columns=["self", "other"],
)
self.assert_eq(
expected, kser1.compare(kser2, keep_shape=True).sort_index(),
)
# `keep_equal=True`
expected = ks.DataFrame(
[["b", "a"], [None, "h"], ["g", None]],
index=pd.MultiIndex.from_tuples([("a", "x"), ("q", "l"), ("x", "k")]),
columns=["self", "other"],
)
self.assert_eq(
expected, kser1.compare(kser2, keep_equal=True).sort_index(),
)
# `keep_shape=True` and `keep_equal=True`
expected = ks.DataFrame(
[["b", "a"], ["c", "c"], [None, None], [None, "h"], ["g", None]],
index=pd.MultiIndex.from_tuples(
[("a", "x"), ("b", "y"), ("c", "z"), ("q", "l"), ("x", "k")]
),
columns=["self", "other"],
)
self.assert_eq(
expected, kser1.compare(kser2, keep_shape=True, keep_equal=True).sort_index(),
)
# Different Index
with self.assertRaisesRegex(
ValueError, "Can only compare identically-labeled Series objects"
):
kser1 = ks.Series([1, 2, 3, 4, 5], index=pd.Index([1, 2, 3, 4, 5]),)
kser2 = ks.Series([2, 2, 3, 4, 1], index=pd.Index([5, 4, 3, 2, 1]),)
kser1.compare(kser2)
# Different MultiIndex
with self.assertRaisesRegex(
ValueError, "Can only compare identically-labeled Series objects"
):
kser1 = ks.Series(
[1, 2, 3, 4, 5],
index=pd.MultiIndex.from_tuples(
[("a", "x"), ("b", "y"), ("c", "z"), ("x", "k"), ("q", "l")]
),
)
kser2 = ks.Series(
[2, 2, 3, 4, 1],
index=pd.MultiIndex.from_tuples(
[("a", "x"), ("b", "y"), ("c", "a"), ("x", "k"), ("q", "l")]
),
)
kser1.compare(kser2)
def test_different_columns(self):
kdf1 = self.kdf1
kdf4 = self.kdf4
pdf1 = self.pdf1
pdf4 = self.pdf4
self.assert_eq((kdf1 + kdf4).sort_index(), (pdf1 + pdf4).sort_index(), almost=True)
# Multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "a"), ("x", "b")])
kdf1.columns = columns
pdf1.columns = columns
columns = pd.MultiIndex.from_tuples([("z", "e"), ("z", "f")])
kdf4.columns = columns
pdf4.columns = columns
self.assert_eq((kdf1 + kdf4).sort_index(), (pdf1 + pdf4).sort_index(), almost=True)
def test_assignment_series(self):
kdf = ks.from_pandas(self.pdf1)
pdf = self.pdf1
kser = kdf.a
pser = pdf.a
kdf["a"] = self.kdf2.a
pdf["a"] = self.pdf2.a
self.assert_eq(kdf.sort_index(), pdf.sort_index())
self.assert_eq(kser, pser)
kdf = ks.from_pandas(self.pdf1)
pdf = self.pdf1
kser = kdf.a
pser = pdf.a
kdf["a"] = self.kdf2.b
pdf["a"] = self.pdf2.b
self.assert_eq(kdf.sort_index(), pdf.sort_index())
self.assert_eq(kser, pser)
kdf = ks.from_pandas(self.pdf1)
pdf = self.pdf1
kdf["c"] = self.kdf2.a
pdf["c"] = self.pdf2.a
self.assert_eq(kdf.sort_index(), pdf.sort_index())
# Multi-index columns
kdf = ks.from_pandas(self.pdf1)
pdf = self.pdf1
columns = pd.MultiIndex.from_tuples([("x", "a"), ("x", "b")])
kdf.columns = columns
pdf.columns = columns
kdf[("y", "c")] = self.kdf2.a
pdf[("y", "c")] = self.pdf2.a
self.assert_eq(kdf.sort_index(), pdf.sort_index())
pdf = pd.DataFrame({"a": [1, 2, 3], "Koalas": [0, 1, 2]}).set_index("Koalas", drop=False)
kdf = ks.from_pandas(pdf)
kdf.index.name = None
kdf["NEW"] = ks.Series([100, 200, 300])
pdf.index.name = None
pdf["NEW"] = pd.Series([100, 200, 300])
self.assert_eq(kdf.sort_index(), pdf.sort_index())
def test_assignment_frame(self):
kdf = ks.from_pandas(self.pdf1)
pdf = self.pdf1
kser = kdf.a
pser = pdf.a
kdf[["a", "b"]] = self.kdf1
pdf[["a", "b"]] = self.pdf1
self.assert_eq(kdf.sort_index(), pdf.sort_index())
self.assert_eq(kser, pser)
# 'c' does not exist in `kdf`.
kdf = ks.from_pandas(self.pdf1)
pdf = self.pdf1
kser = kdf.a
pser = pdf.a
kdf[["b", "c"]] = self.kdf1
pdf[["b", "c"]] = self.pdf1
self.assert_eq(kdf.sort_index(), pdf.sort_index())
self.assert_eq(kser, pser)
# 'c' and 'd' do not exist in `kdf`.
kdf = ks.from_pandas(self.pdf1)
pdf = self.pdf1
kdf[["c", "d"]] = self.kdf1
pdf[["c", "d"]] = self.pdf1
self.assert_eq(kdf.sort_index(), pdf.sort_index())
# Multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "a"), ("x", "b")])
kdf = ks.from_pandas(self.pdf1)
pdf = self.pdf1
kdf.columns = columns
pdf.columns = columns
kdf[[("y", "c"), ("z", "d")]] = self.kdf1
pdf[[("y", "c"), ("z", "d")]] = self.pdf1
self.assert_eq(kdf.sort_index(), pdf.sort_index())
kdf = ks.from_pandas(self.pdf1)
pdf = self.pdf1
kdf1 = ks.from_pandas(self.pdf1)
pdf1 = self.pdf1
kdf1.columns = columns
pdf1.columns = columns
kdf[["c", "d"]] = kdf1
pdf[["c", "d"]] = pdf1
self.assert_eq(kdf.sort_index(), pdf.sort_index())
def test_assignment_series_chain(self):
kdf = ks.from_pandas(self.pdf1)
pdf = self.pdf1
kdf["a"] = self.kdf1.a
pdf["a"] = self.pdf1.a
kdf["a"] = self.kdf2.b
pdf["a"] = self.pdf2.b
kdf["d"] = self.kdf3.c
pdf["d"] = self.pdf3.c
self.assert_eq(kdf.sort_index(), pdf.sort_index())
def test_assignment_frame_chain(self):
kdf = ks.from_pandas(self.pdf1)
pdf = self.pdf1
kdf[["a", "b"]] = self.kdf1
pdf[["a", "b"]] = self.pdf1
kdf[["e", "f"]] = self.kdf3
pdf[["e", "f"]] = self.pdf3
kdf[["b", "c"]] = self.kdf2
pdf[["b", "c"]] = self.pdf2
self.assert_eq(kdf.sort_index(), pdf.sort_index())
def test_multi_index_arithmetic(self):
kdf5 = self.kdf5
kdf6 = self.kdf6
pdf5 = self.pdf5
pdf6 = self.pdf6
# Series
self.assert_eq((kdf5.c - kdf6.e).sort_index(), (pdf5.c - pdf6.e).sort_index())
self.assert_eq((kdf5["c"] / kdf6["e"]).sort_index(), (pdf5["c"] / pdf6["e"]).sort_index())
# DataFrame
self.assert_eq((kdf5 + kdf6).sort_index(), (pdf5 + pdf6).sort_index(), almost=True)
def test_multi_index_assignment_series(self):
kdf = ks.from_pandas(self.pdf5)
pdf = self.pdf5
kdf["x"] = self.kdf6.e
pdf["x"] = self.pdf6.e
self.assert_eq(kdf.sort_index(), pdf.sort_index())
kdf = ks.from_pandas(self.pdf5)
pdf = self.pdf5
kdf["e"] = self.kdf6.e
pdf["e"] = self.pdf6.e
self.assert_eq(kdf.sort_index(), pdf.sort_index())
kdf = ks.from_pandas(self.pdf5)
pdf = self.pdf5
kdf["c"] = self.kdf6.e
pdf["c"] = self.pdf6.e
self.assert_eq(kdf.sort_index(), pdf.sort_index())
def test_multi_index_assignment_frame(self):
kdf = ks.from_pandas(self.pdf5)
pdf = self.pdf5
kdf[["c"]] = self.kdf5
pdf[["c"]] = self.pdf5
self.assert_eq(kdf.sort_index(), pdf.sort_index())
kdf = ks.from_pandas(self.pdf5)
pdf = self.pdf5
kdf[["x"]] = self.kdf5
pdf[["x"]] = self.pdf5
self.assert_eq(kdf.sort_index(), pdf.sort_index())
kdf = ks.from_pandas(self.pdf6)
pdf = self.pdf6
kdf[["x", "y"]] = self.kdf6
pdf[["x", "y"]] = self.pdf6
self.assert_eq(kdf.sort_index(), pdf.sort_index())
def test_frame_loc_setitem(self):
pdf_orig = pd.DataFrame(
[[1, 2], [4, 5], [7, 8]],
index=["cobra", "viper", "sidewinder"],
columns=["max_speed", "shield"],
)
kdf_orig = ks.DataFrame(pdf_orig)
pdf = pdf_orig.copy()
kdf = kdf_orig.copy()
pser1 = pdf.max_speed
pser2 = pdf.shield
kser1 = kdf.max_speed
kser2 = kdf.shield
another_kdf = ks.DataFrame(pdf_orig)
kdf.loc[["viper", "sidewinder"], ["shield"]] = -another_kdf.max_speed
pdf.loc[["viper", "sidewinder"], ["shield"]] = -pdf.max_speed
self.assert_eq(kdf, pdf)
self.assert_eq(kser1, pser1)
self.assert_eq(kser2, pser2)
pdf = pdf_orig.copy()
kdf = kdf_orig.copy()
pser1 = pdf.max_speed
pser2 = pdf.shield
kser1 = kdf.max_speed
kser2 = kdf.shield
kdf.loc[another_kdf.max_speed < 5, ["shield"]] = -kdf.max_speed
pdf.loc[pdf.max_speed < 5, ["shield"]] = -pdf.max_speed
self.assert_eq(kdf, pdf)
self.assert_eq(kser1, pser1)
self.assert_eq(kser2, pser2)
pdf = pdf_orig.copy()
kdf = kdf_orig.copy()
pser1 = pdf.max_speed
pser2 = pdf.shield
kser1 = kdf.max_speed
kser2 = kdf.shield
kdf.loc[another_kdf.max_speed < 5, ["shield"]] = -another_kdf.max_speed
pdf.loc[pdf.max_speed < 5, ["shield"]] = -pdf.max_speed
self.assert_eq(kdf, pdf)
self.assert_eq(kser1, pser1)
self.assert_eq(kser2, pser2)
def test_frame_iloc_setitem(self):
pdf = pd.DataFrame(
[[1, 2], [4, 5], [7, 8]],
index=["cobra", "viper", "sidewinder"],
columns=["max_speed", "shield"],
)
kdf = ks.DataFrame(pdf)
another_kdf = ks.DataFrame(pdf)
kdf.iloc[[0, 1, 2], 1] = -another_kdf.max_speed
pdf.iloc[[0, 1, 2], 1] = -pdf.max_speed
self.assert_eq(kdf, pdf)
# TODO: matching the behavior with pandas 1.2 and uncomment below test
# with self.assertRaisesRegex(
# ValueError,
# "shape mismatch: value array of shape (3,) could not be broadcast to indexing "
# "result of shape (2,1)",
# ):
# kdf.iloc[[1, 2], [1]] = -another_kdf.max_speed
kdf.iloc[[0, 1, 2], 1] = 10 * another_kdf.max_speed
pdf.iloc[[0, 1, 2], 1] = 10 * pdf.max_speed
self.assert_eq(kdf, pdf)
# TODO: matching the behavior with pandas 1.2 and uncomment below test
# with self.assertRaisesRegex(
# ValueError,
# "shape mismatch: value array of shape (3,) could not be broadcast to indexing "
# "result of shape (1,)",
# ):
# kdf.iloc[[0], 1] = 10 * another_kdf.max_speed
def test_series_loc_setitem(self):
pdf = pd.DataFrame({"x": [1, 2, 3], "y": [4, 5, 6]}, index=["cobra", "viper", "sidewinder"])
kdf = ks.from_pandas(pdf)
pser = pdf.x
psery = pdf.y
kser = kdf.x
ksery = kdf.y
pser_another = pd.Series([1, 2, 3], index=["cobra", "viper", "sidewinder"])
kser_another = ks.from_pandas(pser_another)
kser.loc[kser % 2 == 1] = -kser_another
pser.loc[pser % 2 == 1] = -pser_another
self.assert_eq(kser, pser)
self.assert_eq(kdf, pdf)
self.assert_eq(ksery, psery)
pdf = pd.DataFrame({"x": [1, 2, 3], "y": [4, 5, 6]}, index=["cobra", "viper", "sidewinder"])
kdf = ks.from_pandas(pdf)
pser = pdf.x
psery = pdf.y
kser = kdf.x
ksery = kdf.y
kser.loc[kser_another % 2 == 1] = -kser
pser.loc[pser_another % 2 == 1] = -pser
self.assert_eq(kser, pser)
self.assert_eq(kdf, pdf)
self.assert_eq(ksery, psery)
pdf = pd.DataFrame({"x": [1, 2, 3], "y": [4, 5, 6]}, index=["cobra", "viper", "sidewinder"])
kdf = ks.from_pandas(pdf)
pser = pdf.x
psery = pdf.y
kser = kdf.x
ksery = kdf.y
kser.loc[kser_another % 2 == 1] = -kser
pser.loc[pser_another % 2 == 1] = -pser
self.assert_eq(kser, pser)
self.assert_eq(kdf, pdf)
self.assert_eq(ksery, psery)
pdf = pd.DataFrame({"x": [1, 2, 3], "y": [4, 5, 6]}, index=["cobra", "viper", "sidewinder"])
kdf = ks.from_pandas(pdf)
pser = pdf.x
psery = pdf.y
kser = kdf.x
ksery = kdf.y
kser.loc[kser_another % 2 == 1] = -kser_another
pser.loc[pser_another % 2 == 1] = -pser_another
self.assert_eq(kser, pser)
self.assert_eq(kdf, pdf)
self.assert_eq(ksery, psery)
pdf = pd.DataFrame({"x": [1, 2, 3], "y": [4, 5, 6]}, index=["cobra", "viper", "sidewinder"])
kdf = ks.from_pandas(pdf)
pser = pdf.x
psery = pdf.y
kser = kdf.x
ksery = kdf.y
kser.loc[["viper", "sidewinder"]] = -kser_another
pser.loc[["viper", "sidewinder"]] = -pser_another
self.assert_eq(kser, pser)
self.assert_eq(kdf, pdf)
self.assert_eq(ksery, psery)
pdf = pd.DataFrame({"x": [1, 2, 3], "y": [4, 5, 6]}, index=["cobra", "viper", "sidewinder"])
kdf = ks.from_pandas(pdf)
pser = pdf.x
psery = pdf.y
kser = kdf.x
ksery = kdf.y
kser.loc[kser_another % 2 == 1] = 10
pser.loc[pser_another % 2 == 1] = 10
self.assert_eq(kser, pser)
self.assert_eq(kdf, pdf)
self.assert_eq(ksery, psery)
def test_series_iloc_setitem(self):
pdf = pd.DataFrame({"x": [1, 2, 3], "y": [4, 5, 6]}, index=["cobra", "viper", "sidewinder"])
kdf = ks.from_pandas(pdf)
pser = pdf.x
psery = pdf.y
kser = kdf.x
ksery = kdf.y
pser1 = pser + 1
kser1 = kser + 1
pser_another = pd.Series([1, 2, 3], index=["cobra", "viper", "sidewinder"])
kser_another = ks.from_pandas(pser_another)
kser.iloc[[0, 1, 2]] = -kser_another
pser.iloc[[0, 1, 2]] = -pser_another
self.assert_eq(kser, pser)
self.assert_eq(kdf, pdf)
self.assert_eq(ksery, psery)
# TODO: matching the behavior with pandas 1.2 and uncomment below test.
# with self.assertRaisesRegex(
# ValueError,
# "cannot set using a list-like indexer with a different length than the value",
# ):
# kser.iloc[[1, 2]] = -kser_another
kser.iloc[[0, 1, 2]] = 10 * kser_another
pser.iloc[[0, 1, 2]] = 10 * pser_another
self.assert_eq(kser, pser)
self.assert_eq(kdf, pdf)
self.assert_eq(ksery, psery)
# with self.assertRaisesRegex(
# ValueError,
# "cannot set using a list-like indexer with a different length than the value",
# ):
# kser.iloc[[0]] = 10 * kser_another
kser1.iloc[[0, 1, 2]] = -kser_another
pser1.iloc[[0, 1, 2]] = -pser_another
self.assert_eq(kser1, pser1)
self.assert_eq(kdf, pdf)
self.assert_eq(ksery, psery)
# with self.assertRaisesRegex(
# ValueError,
# "cannot set using a list-like indexer with a different length than the value",
# ):
# kser1.iloc[[1, 2]] = -kser_another
pdf = pd.DataFrame({"x": [1, 2, 3], "y": [4, 5, 6]}, index=["cobra", "viper", "sidewinder"])
kdf = ks.from_pandas(pdf)
pser = pdf.x
psery = pdf.y
kser = kdf.x
ksery = kdf.y
piloc = pser.iloc
kiloc = kser.iloc
kiloc[[0, 1, 2]] = -kser_another
piloc[[0, 1, 2]] = -pser_another
self.assert_eq(kser, pser)
self.assert_eq(kdf, pdf)
self.assert_eq(ksery, psery)
# TODO: matching the behavior with pandas 1.2 and uncomment below test.
# with self.assertRaisesRegex(
# ValueError,
# "cannot set using a list-like indexer with a different length than the value",
# ):
# kiloc[[1, 2]] = -kser_another
kiloc[[0, 1, 2]] = 10 * kser_another
piloc[[0, 1, 2]] = 10 * pser_another
self.assert_eq(kser, pser)
self.assert_eq(kdf, pdf)
self.assert_eq(ksery, psery)
# with self.assertRaisesRegex(
# ValueError,
# "cannot set using a list-like indexer with a different length than the value",
# ):
# kiloc[[0]] = 10 * kser_another
def test_update(self):
pdf = pd.DataFrame({"x": [1, 2, 3], "y": [10, 20, 30]})
kdf = ks.from_pandas(pdf)
pser = pdf.x
kser = kdf.x
pser.update(pd.Series([4, 5, 6]))
kser.update(ks.Series([4, 5, 6]))
self.assert_eq(kser.sort_index(), pser.sort_index())
self.assert_eq(kdf.sort_index(), pdf.sort_index())
def test_where(self):
pdf1 = pd.DataFrame({"A": [0, 1, 2, 3, 4], "B": [100, 200, 300, 400, 500]})
pdf2 = pd.DataFrame({"A": [0, -1, -2, -3, -4], "B": [-100, -200, -300, -400, -500]})
kdf1 = ks.from_pandas(pdf1)
kdf2 = ks.from_pandas(pdf2)
self.assert_eq(pdf1.where(pdf2 > 100), kdf1.where(kdf2 > 100).sort_index())
pdf1 = pd.DataFrame({"A": [-1, -2, -3, -4, -5], "B": [-100, -200, -300, -400, -500]})
pdf2 = pd.DataFrame({"A": [-10, -20, -30, -40, -50], "B": [-5, -4, -3, -2, -1]})
kdf1 = ks.from_pandas(pdf1)
kdf2 = ks.from_pandas(pdf2)
self.assert_eq(pdf1.where(pdf2 < -250), kdf1.where(kdf2 < -250).sort_index())
# multi-index columns
pdf1 = | pd.DataFrame({("X", "A"): [0, 1, 2, 3, 4], ("X", "B"): [100, 200, 300, 400, 500]}) | pandas.DataFrame |
import numpy as np
import pytest
from pandas import (
Categorical,
CategoricalDtype,
CategoricalIndex,
DataFrame,
Index,
MultiIndex,
Series,
Timestamp,
concat,
get_dummies,
period_range,
)
import pandas._testing as tm
from pandas.core.arrays import SparseArray
class TestGetitem:
def test_getitem_unused_level_raises(self):
# GH#20410
mi = MultiIndex(
levels=[["a_lot", "onlyone", "notevenone"], [1970, ""]],
codes=[[1, 0], [1, 0]],
)
df = DataFrame(-1, index=range(3), columns=mi)
with pytest.raises(KeyError, match="notevenone"):
df["notevenone"]
def test_getitem_periodindex(self):
rng = period_range("1/1/2000", periods=5)
df = DataFrame(np.random.randn(10, 5), columns=rng)
ts = df[rng[0]]
| tm.assert_series_equal(ts, df.iloc[:, 0]) | pandas._testing.assert_series_equal |
# /usr/bin/python3
import numpy as np
import pandas as pd
import data.data_input as di
import package.solution as sol
import package.instance as inst
import pytups.tuplist as tl
import pytups.superdict as sd
import os
import random as rn
class Experiment(object):
"""
This object represents the unification of both input examples and solution.
Each is represented by their proper objects.
Methods are especially checks on feasibility.
"""
def __init__(self, instance: inst.Instance, solution: sol.Solution):
"""
:param inst.Instance instance:
:param sol.Solution solution:
"""
self.instance = instance
self.solution = solution
@classmethod
def from_dir(cls, path, format='json', prefix="data_"):
files = [os.path.join(path, prefix + f + "." + format) for f in ['in', 'out']]
if not np.all([os.path.exists(f) for f in files]):
return None
instance = di.load_data(files[0])
solution = di.load_data(files[1])
return cls(inst.Instance(instance), sol.Solution(solution))
def to_dir(self, path, format='json', prefix='data_'):
di.export_data(path, self.instance.data, name=prefix + "in", file_type=format, exclude_aux=True)
di.export_data(path, self.solution.data, name=prefix + "out", file_type=format, exclude_aux=True)
return
@classmethod
def from_zipfile(cls, zipobj, path, format='json', prefix="data_"):
# files = [os.path.join(path, prefix + f + "." + format) for f in ['in', 'out']]
files = [path + '/' + prefix + f + "." + format for f in ['in', 'out']]
instance = di.load_data_zip(zipobj, files[0])
solution = di.load_data_zip(zipobj, files[1])
# print(files[0], files[1])
if not np.all([instance, solution]):
return None
return cls(inst.Instance(instance), sol.Solution(solution))
@staticmethod
def expand_resource_period(data, resource, period):
if resource not in data:
data[resource] = {}
if period not in data[resource]:
data[resource][period] = {}
return True
@staticmethod
def label_rt(time):
if time == "rut":
return 'used'
elif time == 'ret':
return 'elapsed'
else:
raise ValueError("time needs to be rut or ret")
def check_solution_count(self, **params):
return self.check_solution(**params).to_lendict()
def check_solution(self, list_tests=None, **params):
func_list = {
'candidates': self.check_resource_in_candidates
,'state': self.check_resource_state
,'resources': self.check_task_num_resources
,'usage': self.check_usage_consumption
,'elapsed': self.check_elapsed_consumption
,'min_assign': self.check_min_max_assignment
,'available': self.check_min_available
,'hours': self.check_min_flight_hours
,'start_periods': self.check_fixed_assignments
,'dist_maints': self.check_min_distance_maints
,'capacity': self.check_sub_maintenance_capacity
,'maint_size': self.check_maints_size
}
if list_tests is None:
list_tests = func_list.keys()
result = {k: func_list[k](**params) for k in list_tests}
return sd.SuperDict({k: v for k, v in result.items() if v})
# @profile
def check_sub_maintenance_capacity(self, ref_compare=0, deficit_only=True, periods=None, resources=None, **param):
"""
:param ref_compare: if None, we return all remaining capacity.
If not we use it to filter which to return
:param periods: periods to check for capacity
:param resources: optional filter for resources to count
:param param:
:return: (resource, period): remaining capacity
"""
# we get the capacity per month
inst = self.instance
rem = inst.get_capacity_calendar(periods)
first, last = inst.get_param('start'), inst.get_param('end')
maintenances = inst.get_maintenances()
types = maintenances.get_property('type')
usage = maintenances.get_property('capacity_usage')
all_states_tuple = self.get_states()
if periods is not None:
periods = set(periods)
all_states_tuple = all_states_tuple.vfilter(lambda x: x[1] in periods)
else:
all_states_tuple = all_states_tuple.vfilter(lambda x: last >= x[1] >= first)
if resources is not None:
resources = set(resources)
all_states_tuple = all_states_tuple.vfilter(lambda v: v[0] in resources)
if not len(all_states_tuple):
if ref_compare is None:
return rem
return []
for res, period, maint in all_states_tuple:
_type = types[maint]
rem[_type, period] -= usage[maint]
if ref_compare is None or not deficit_only:
return rem
return rem.vfilter(lambda x: x < ref_compare)
@staticmethod
def sum_by_group(values, groups):
order = np.argsort(groups)
groups = groups[order]
values = values[order]
values.cumsum(out=values)
index = np.ones(len(groups), 'bool')
index[:-1] = groups[1:] != groups[:-1]
values = values[index]
groups = groups[index]
values[1:] = values[1:] - values[:-1]
return values, groups
def check_task_num_resources(self, deficit_only=True, assign_missions=True, periods=None, resources=None, **params):
if not assign_missions:
return sd.SuperDict()
if periods is None:
periods = self.instance.get_periods().to_set()
else:
periods = set(periods)
task_reqs = self.instance.get_tasks('num_resource')
task_period_list = \
self.instance.get_task_period_list().\
vfilter(lambda v: v[1] in periods)
task_under_assigned = \
self.solution.get_task_num_resources(periods, resources).\
fill_with_default(task_period_list).\
kvapply(lambda k, v: task_reqs[k[0]] - v)
if not deficit_only:
return task_under_assigned
else:
return task_under_assigned.vfilter(lambda x: x > 0)
def check_resource_in_candidates(self, **params):
task_solution = self.solution.get_tasks()
if not len(task_solution):
return sd.SuperDict()
task_candidates = self.instance.get_task_candidates()
bad_assignment = {
(resource, period): task
for (resource, period), task in task_solution.items()
if resource not in task_candidates[task]
}
return sd.SuperDict.from_dict(bad_assignment)
def get_consumption(self):
hours = self.instance.get_tasks("consumption")
return {k: hours[v] for k, v in self.solution.get_tasks().items()}
def set_remainingtime(self, resource, period, time, value, maint='M'):
"""
This procedure *updates* the remaining time in the aux property of the solution.
:param str resource:
:param str period:
:param str time: ret or rut
:param float value: remaining time
:return: True
"""
tup = [time, maint, resource, period]
self.solution.data['aux'].set_m(*tup, value=value)
return True
def get_remainingtime(self, resource=None, period=None, time='rut', maint='M'):
try:
data = self.solution.data['aux'][time][maint]
if resource is None:
return data
data2 = data[resource]
if period is None:
return data2
return data2[period]
except KeyError:
return None
def update_resource_all(self, resource):
periods_to_update = self.instance.get_periods()
for t in ['rut', 'ret']:
self.update_time_usage(resource, periods_to_update, time=t)
def update_time_usage_all(self, resource, periods, time):
maints = self.instance.get_maintenances()
for m in maints:
self.update_time_usage(resource, periods, time=time, maint=m)
return True
def update_time_usage(self, resource, periods, previous_value=None, time='rut', maint='M'):
"""
This procedure *updates* the time of each period using set_remainingtime.
It assumes all periods do not have a maintenance.
So the periods should be filled with a task or nothing.
:param resource: a resource to update
:param periods: a list of consecutive periods to update. ordered.
:param previous_value: optional value for the remaining time before the first period
:param time: rut or ret depending if it's usage time or elapsed time
:return: True
"""
if self.instance.get_max_remaining_time(time, maint) is None or not periods:
# we do not update maints that do not check this
# if periods is empty: we have nothing to update
return True
if previous_value is None:
_period = self.instance.get_prev_period(periods[0])
previous_value = self.get_remainingtime(resource, _period, time, maint=maint)
for period in periods:
value = previous_value - self.get_consumption_individual(resource, period, time)
self.set_remainingtime(resource=resource, period=period,
time=time, value=value, maint=maint)
previous_value = value
return True
def get_consumption_individual(self, resource, period, time='rut'):
if time == 'ret':
return 1
task = self.solution.data['task'].get_m(resource, period)
if task is not None:
return self.instance.data['tasks'].get_m(task, 'consumption', default=0)
# here, we check for an optional overwriting of defaults from the solution...
consumption = self.solution.data.get_m('new_default', resource, period)
if consumption is not None:
return consumption
# now get the default consumption:
return self.instance.get_default_consumption(resource, period)
def get_non_maintenance_periods(self, resource=None, state_list=None):
"""
:return: a tuplist with the following structure:
resource: [(resource, start_period1, end_period1), (resource, start_period2, end_period2), ..., (resource, start_periodN, end_periodN)]
two consecutive periods being separated by a maintenance operation.
It's built using the information of the maintenance operations.
:param resource: if not None, we filter to only provide this resource's info
:return: a tuplist with the following structure:
resource: [(resource, start_period1, end_period1), (resource, start_period2, end_period2), ..., (resource, start_periodN, end_periodN)]
two consecutive periods being separated by a maintenance operation.
It's built using the information of the maintenance operations.
"""
# TODO: change to:
# cycles_dict = self.get_all_maintenance_cycles(resource)
# return cycles_dict.to_tuplist()
first, last = self.instance.get_param('start'), self.instance.get_param('end')
maintenances = \
self.get_maintenance_periods(resource, state_list=state_list).\
to_dict(result_col=[1, 2])
if resource is None:
resources = self.instance.get_resources()
else:
resources = [resource]
# we initialize nomaint periods for resources that do not have a single maintenance:
nonmaintenances = [(r, first, last) for r in resources if r not in maintenances]
# now, we iterate over all maintenances to add the before and the after
for res in maintenances:
maints = sorted(maintenances[res], key=lambda x: x[0])
first_maint_start = maints[0][0]
last_maint_end = maints[-1][1]
if first_maint_start > first:
first_maint_start_prev = self.instance.get_prev_period(first_maint_start)
nonmaintenances.append((res, first, first_maint_start_prev))
for maint1, maint2 in zip(maints, maints[1:]):
start = self.instance.get_next_period(maint1[1])
end = self.instance.get_prev_period(maint2[0])
nonmaintenances.append((res, start, end))
if last_maint_end != last:
start = self.instance.get_next_period(last_maint_end)
nonmaintenances.append((res, start, last))
return tl.TupList(nonmaintenances)
def set_start_periods(self):
"""
This function remakes the start tasks assignments and states (maintenances)
It edits the aux part of the solution examples
:return: the dictionary that it assigns
"""
tasks_start = self.get_task_periods()
states_start = self.get_state_periods()
all_starts = tasks_start + states_start
starts = {(r, t): v for (r, t, v, _) in all_starts}
if 'aux' not in self.solution.data:
self.solution.data['aux'] = sd.SuperDict()
self.solution.data['aux']['start'] = sd.SuperDict.from_dict(starts)
return starts
def set_remaining_usage_time_all(self, time='rut', resource=None):
"""
Wrapper around set_remaining_usage_time to do all maints
:param time:
:param resource:
:return:
"""
maints = self.instance.get_maintenances()
return {m: self.set_remaining_usage_time(time=time, maint=m, resource=resource)
for m in maints if time in self.instance.get_maint_rt(m)}
def set_remaining_usage_time(self, time="rut", maint='M', resource=None):
"""
This function remakes the rut and ret times for all resources.
It assumes nothing of state or task.
It edits the aux part of the solution examples
:param time: ret or rut
:param maint: type of maintenance
:param resource: optional filter of resources
:return: the dictionary that's assigned
"""
inst = self.instance
prev_month = inst.get_prev_period(self.instance.get_param('start'))
# initial = self.instance.get_resources(label)
initial = inst.get_initial_state(self.instance.label_rt(time), maint=maint, resource=resource)
# we initialize values for the start of the horizon
max_rem = inst.get_max_remaining_time(time=time, maint=maint)
depends_on = inst.data['maintenances'][maint]['depends_on']
for _res in initial:
self.set_remainingtime(resource=_res, period=prev_month, time=time, value=initial[_res], maint=maint)
# we update values during maintenances
maintenances = self.get_maintenance_periods(state_list=depends_on, resource=resource)
for _res, start, end in maintenances:
for period in inst.get_periods_range(start, end):
self.set_remainingtime(resource=_res, period=period, time=time, value=max_rem, maint=maint)
# we update values in between maintenances
non_maintenances = self.get_non_maintenance_periods(resource=resource, state_list=depends_on)
for _res, start, end in non_maintenances:
# print(resource, start, end)
periods = inst.get_periods_range(start, end)
self.update_time_usage(resource=_res, periods=periods, time=time, maint=maint)
return self.solution.data['aux'][time][maint]
def check_usage_consumption(self, **params):
return self.check_resource_consumption(time='rut', **params, min_value=-1)
def check_elapsed_consumption(self, **params):
return self.check_resource_consumption(time='ret', **params, min_value=0)
def check_resource_consumption(self, time='rut', recalculate=True, min_value=0, **params):
"""
This function (calculates and) checks the "remaining time" for all maintenances
:param time: calculate rut or ret
:param recalculate: used cached rut and ret
:param params: optional. compatibility
:return: {(maint, resource, period): remaining time}
"""
if recalculate:
rt_maint = self.set_remaining_usage_time_all(time=time)
else:
rt_maint = self.solution.data['aux'][time]
return sd.SuperDict(rt_maint).to_dictup().\
clean(func=lambda x: x is not None and x <= min_value)
def check_resource_state(self, **params):
task_solution = self.solution.get_tasks()
state_solution = self.solution.get_state_tuplist().take([0, 1])
task_solution_k = np.fromiter(task_solution.keys(),
dtype=[('A', '<U6'), ('T', 'U7')])
state_solution_k = np.asarray(state_solution,
dtype=[('A', '<U6'), ('T', 'U7')])
duplicated_states = \
np.intersect1d(task_solution_k, state_solution_k)
return sd.SuperDict({tuple(item): 1 for item in duplicated_states})
def check_min_max_assignment(self, **params):
"""
:return: periods were the min assignment (including maintenance)
in format: (resource, start, end): error.
if error negative: bigger than max. Otherwise: less than min
is not respected
"""
# TODO: do it with self.solution.get_schedule()
tasks = self.solution.get_tasks().to_tuplist()
maints = self.solution.get_state_tuplist()
previous = sd.SuperDict.from_dict(self.instance.get_resources("states")).\
to_dictup().to_tuplist()
min_assign = self.instance.get_min_assign()
max_assign = self.instance.get_max_assign()
num_periods = self.instance.get_param('num_period')
ct = self.instance.compare_tups
all_states = maints + tasks + previous
all_states_periods = \
tl.TupList(all_states).\
sorted(key=lambda v: (v[0], v[2], v[1])).\
to_start_finish(ct, sort=False)
first_period = self.instance.get_param('start')
last_period = self.instance.get_param('end')
incorrect = {}
for (resource, start, state, finish) in all_states_periods:
# periods that finish before the horizon
# or at the end are not checked
if finish < first_period or finish == last_period:
continue
size_period = len(self.instance.get_periods_range(start, finish))
if size_period < min_assign.get(state, 1):
incorrect[resource, start, finish, state] = min_assign[state] - size_period
elif size_period > max_assign.get(state, num_periods):
incorrect[resource, start, finish, state] = max_assign[state] - size_period
return sd.SuperDict(incorrect)
def check_fixed_assignments(self, **params):
first_period = self.instance.get_param('start')
last_period = self.instance.get_param('end')
state_tasks = self.solution.get_state_tasks().to_list()
fixed_states = self.instance.get_fixed_states()
fixed_states_h = \
fixed_states.\
vfilter(lambda x: first_period <= x[2] <= last_period).\
take([0, 2, 1])
diff_tups = set(fixed_states_h) - set(state_tasks)
return sd.SuperDict({k: 1 for k in diff_tups})
def check_min_available(self, deficit_only=True, periods=None, **params):
"""
:return: periods where the min availability is not guaranteed.
"""
if periods is None:
periods = self.instance.get_periods().to_set()
else:
periods = set(periods)
res_clusters = self.instance.get_cluster_candidates().list_reverse()
cluster_data = self.instance.get_cluster_constraints()
max_candidates = cluster_data['num'].kfilter(lambda k: k[1] in periods)
num_maintenances = \
self.get_states().\
vfilter(lambda v: v[1] in periods and v[2] in {'M'}). \
to_dict(None). \
vapply(lambda v: res_clusters[v[0]]). \
to_tuplist(). \
vapply(lambda v: (*v, res_clusters[v[0]])).\
to_dict(indices=[3, 1]).to_lendict()
over_assigned = max_candidates.kvapply(lambda k, v: v - num_maintenances.get(k, 0))
if deficit_only:
over_assigned = over_assigned.vfilter(lambda x: x < 0)
return over_assigned
def check_min_flight_hours(self, recalculate=True, deficit_only=True, periods=None, resources=None, **params):
"""
:param recalculate: recalculate ruts (not use cache)
:param deficit_only: return all, not only failed checks
:param periods: optional filter for periods to check
:param resources: optional filter for resources to count
:param params: for compatibility
:return:
"""
if recalculate:
ruts = self.set_remaining_usage_time(time='rut', maint='M')
else:
ruts = self.get_remainingtime(time='rut', maint='M')
if resources is not None:
ruts = ruts.filter(resources)
all_periods = self.instance.get_periods().to_set()
if periods is None:
periods = all_periods
else:
periods = set(periods) & all_periods
cluster_data = self.instance.get_cluster_constraints()
min_hours = cluster_data['hours']
clusters = self.instance.get_cluster_candidates().list_reverse()
ruts_dt = ruts.to_dictup()
data = [((c, p), h) for (r, p), h in ruts_dt.items()
for c in clusters[r] if p in periods]
keys, weights = zip(*data)
dict_keys = min_hours.keys_tl()
equiv = {k: pos for pos, k in enumerate(dict_keys)}
keys_int = np.array([equiv[k] for k in keys])
dict_values = np.bincount(keys_int, weights=weights)
hours_deficit2 = sd.SuperDict({k: v - min_hours[k] for k, v in zip(dict_keys, dict_values)})
if deficit_only:
hours_deficit2 = hours_deficit2.vfilter(lambda x: x < 0)
return hours_deficit2
def check_min_flight_hours_seminew(self, recalculate=True, deficit_only=True, periods=None, **params):
if recalculate:
ruts = self.set_remaining_usage_time(time='rut', maint='M')
else:
ruts = self.get_remainingtime(time='rut', maint='M')
all_periods = self.instance.get_periods().to_set()
if periods is None:
periods = all_periods
else:
periods = set(periods) & all_periods
cluster_data = self.instance.get_cluster_constraints()
min_hours = cluster_data['hours']
resources = self.instance.get_resources().keys_tl().vapply(int).sorted()
clusters = \
self.instance.get_cluster_candidates(). \
vapply(lambda v: set(int(vv) for vv in v)). \
vapply(lambda v: [r in v for r in resources]). \
vapply(np.array)
positions = self.instance.get_period_positions()
ruts_dt = ruts.to_dictup()
res_arr, periods_arr = zip(*ruts_dt.keys())
rut_arr = ruts_dt.values_l()
res_arr = np.array(res_arr, dtype='int')
periods_arr = np.array([positions[p] for p in periods_arr])
rut_arr = np.array(rut_arr)
def deficit(c, p, candidates):
pos = positions[p]
mask = (candidates[res_arr]) & (periods_arr == pos)
return np.sum(rut_arr[mask]) - min_hours[c, p]
hours_deficit = \
sd.SuperDict({
(c, p): deficit(c, p, candidates)
for c, candidates in clusters.items()
for p in periods
})
if deficit_only:
hours_deficit = hours_deficit.vfilter(lambda x: x < 0)
return hours_deficit
def check_maints_size(self, **params):
maints = self.instance.get_maintenances()
duration = maints.get_property('duration_periods')
inst = self.instance
start, end = inst.get_param('start'), inst.get_param('end')
m_s_tab_r = pd.DataFrame.from_records(self.get_state_periods().to_list(),
columns=['resource', 'start', 'maint', 'end'])
def dist_periods(series, series2):
return pd.Series(self.instance.get_dist_periods(p, p2) for p, p2 in zip(series, series2))
# TODO: this check was too strict but the model complied with it, apparently...
inside = (m_s_tab_r.start > start) & (m_s_tab_r.end < end)
m_s_tab = m_s_tab_r[inside].reset_index()
m_s_tab['dist'] = dist_periods(m_s_tab.start, m_s_tab.end) + 1
m_s_tab['duration'] = m_s_tab.maint.map(duration)
m_s_tab['value'] = m_s_tab.dist - m_s_tab.duration
error = m_s_tab[m_s_tab.value != 0]
result = error[['resource', 'start', 'value']].to_records(index=False)
return tl.TupList(result).to_dict(result_col=2, is_list=False)
def check_min_distance_maints(self, **params):
maints = self.instance.get_maintenances()
elapsed_time_size = maints.get_property('elapsed_time_size').clean(func=lambda x: x is not None)
first, last = self.instance.get_first_last_period()
_next = self.instance.get_next_period
def compare(tup, last_tup, pp):
return tup[0]!=last_tup[0] or tup[1]!=last_tup[1] or\
tup[3] > last_tup[3]
rets = \
elapsed_time_size.\
kapply(lambda m: self.get_remainingtime(time='ret', maint=m)).to_dictup()
# periods where there's been too short of a period between maints
ret_before_maint = \
rets.\
to_tuplist().sorted().\
to_start_finish(compare_tups=compare, sort=False, pp=2).\
vfilter(lambda x: x[4] < last).\
take([0, 1, 2, 4]).\
to_dict(result_col=None).\
kapply(lambda k: (rets[k[0], k[1], k[3]], k[0])).\
clean(func=lambda v: v[0] > elapsed_time_size[v[1]]).\
vapply(lambda v: v[0])
# maybe filter resources when getting states:
ret_before_maint.keys_tl().take(1).unique2()
states = self.get_states().to_dict(result_col=2, is_list=False)
# here we filter the errors to the ones that involve the same
# maintenance done twice.
return \
ret_before_maint.\
kapply(lambda k: (k[1], _next(k[3]))).\
vapply(lambda v: states.get(v)).\
to_tuplist().\
vfilter(lambda x: x[0] == x[4]).\
take([0, 1, 2, 3]).\
to_dict(result_col=None).\
vapply(lambda v: ret_before_maint[v])
def get_objective_function(self, *args, **kwargs):
raise NotImplementedError("This is no longer supported in the master class")
def get_kpis(self):
raise NotImplementedError("This is no longer supported")
def export_solution(self, path, sheet_name='solution'):
tasks = self.solution.get_tasks().to_dictup()
hours = self.instance.get_tasks('consumption')
tasks_edited = [(t[0], t[1], '{} ({}h)'.format(t[2], hours[t[2]]))
for t in tasks]
statesMissions = self.solution.get_state_tuplist() + tasks_edited
table = | pd.DataFrame(statesMissions, columns=['resource', 'period', 'status']) | pandas.DataFrame |
import re
import pandas as pd
import spacy as sp
from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer
from foobar.data_loader import load_all_stock_tags
def clean_text_col(df, col):
def text_processing(text):
text = str(text) # remove handlers
text = re.sub(r"@[^\s]+", "", text)
text = re.sub(r"http\S+", "", text) # remove URLS
text = " ".join(re.findall(r"\w+", text)) # remove special chars
text = re.sub(r"\s+[a-zA-Z]\s+", "", text) # remove single chars
text = re.sub(r"\s+", " ", text, flags=re.I) # multiple to single spaces
return text
df[col] = df[col].apply(text_processing)
df = df.dropna(subset=[col])
return df
def perform_tag_extraction(df, col):
stock_tags_df = load_all_stock_tags()
df["tag"] = df[col].str.upper().str.split()
tags_df = df[["id", "tag"]].explode("tag")
tags_df = tags_df[tags_df["tag"].isin(stock_tags_df["finnhub_tags"])]
return tags_df.drop_duplicates()
def perform_entity_extraction(df, col):
nlps = sp.load("en_core_web_sm")
def entity_extraction(x):
_id, text = x["id"], x[col]
doc = nlps(text)
return [(_id, chunk.text) for chunk in doc.noun_chunks]
tags_sf = df[["id", col]].apply(entity_extraction, axis=1)
if tags_sf.empty:
return pd.DataFrame()
tags_sf = tags_sf.loc[tags_sf.astype(str) != "[]"]
tags_df = pd.DataFrame(tags_sf.explode().tolist(), columns=["post_id", "tag"])
tags_df["tag"] = tags_df["tag"].str.split()
tags_df = tags_df.explode("tag")
return tags_df
def filter_tags_by_stock_tags(tags_df):
tags_df["tag"] = tags_df["tag"].str.upper()
stock_tags_df = load_all_stock_tags()
if stock_tags_df.empty:
return pd.DataFrame()
print(stock_tags_df)
tags_df.loc[tags_df["tag"].isin(stock_tags_df["finnhub_tags"])]
return tags_df
def filter_by_cols(df, cols_list):
"""Keep only columns in cols_list
Note: potential comment columns include the following:
"associated_award",
"author",
"author_flair_background_color",
"author_flair_css_class",
"author_flair_richtext",
"author_flair_template_id",
"author_flair_text",
"author_flair_text_color",
"author_flair_type",
"author_fullname",
"author_patreon_flair",
"author_premium",
"awarders",
"body",
"collapsed_because_crowd_control",
"created_utc",
"gildings",
"id",
"is_submitter",
"link_id",
"locked",
"no_follow",
"parent_id",
"permalink",
"retrieved_on",
"score",
"send_replies",
"stickied",
"subreddit",
"subreddit_id",
"total_awards_received",
"treatment_tags",
"top_awarded_type",
"edited",
"distinguished",
"comment_type",
"author_cakeday",
"editable",
"media_metadata",
"""
cols_to_keep = [col for col in cols_list if col in df.columns]
return df[cols_to_keep]
def filter_bad_utcs(df, col):
return df[df[col].apply(lambda x: str(x).isdigit())]
def utc_to_datetime(df, col):
return | pd.to_datetime(df[col], unit="s") | pandas.to_datetime |
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from subprocess import call
from orca import *
from orca.data import *
climate_indices = True
climate_forecasts = True
run_projection = True
consolidate_outputs = True
consolidate_inputs = False
#need climate data folders for this, which are too large for github (a few are present in repository for example)
#######Define a few parameters
SHA_shift = 0
ORO_shift = 0
FOL_shift = 0
index_exceedance_sac = 8
window_type = 'rolling'
window_length = 30
SHA_exceedance = {"W": 2, "AN": 2, "BN": 2, "D": 2, "C": 2}
ORO_exceedance = {"W": 2, "AN": 2, "BN": 2, "D": 2, "C": 2}
FOL_exceedance = {"W": 10, "AN": 10, "BN": 5, "D": 2, "C": 1}
call(['mkdir', 'orca/data/scenario_runs'])
call(['mkdir', 'orca/data/climate_results'])
call(['mkdir', 'orca/data/climate_input_forecasts'])
with open('orca/data/scenario_names.txt') as f:
scenarios = f.read().splitlines()
i = 0
for sc in scenarios:
i+=1
print('projection # %s' %i)
call(['mkdir', 'orca/data/scenario_runs/%s'%sc])
if climate_indices:
input_df = pd.read_csv('orca/data/input_climate_files/%s_input_data.csv'%sc, index_col = 0, parse_dates = True)
gains_loop_df = pd.read_csv('orca/data/historical_runs_data/gains_loops.csv', index_col = 0, parse_dates = True)
OMR_loop_df = pd.read_csv('orca/data/historical_runs_data/OMR_loops.csv', index_col = 0, parse_dates = True)
proj_ind_df = process_projection(input_df,gains_loop_df,OMR_loop_df,'orca/data/json_files/gains_regression.json','orca/data/json_files/inf_regression.json',window = window_type)
proj_ind_df.to_csv('orca/data/scenario_runs/%s/orca-data-processed-%s.csv'%(sc,sc))
if climate_forecasts:
if not climate_indices:
proj_ind_df = pd.read_csv('orca/data/scenario_runs/%s/orca-data-processed-%s.csv'%(sc,sc), index_col = 0, parse_dates = True)
WYI_stats_file = pd.read_csv('orca/data/forecast_regressions/WYI_forcasting_regression_stats.csv', index_col = 0, parse_dates = True)
carryover_stats_file = pd.read_csv('orca/data/forecast_regressions/carryover_regression_statistics.csv', index_col = 0, parse_dates = True)
forc_df= projection_forecast(proj_ind_df,WYI_stats_file,carryover_stats_file,window_type,window_length, index_exceedance_sac)
forc_df.to_csv('orca/data/scenario_runs/%s/orca-data-climate-forecasted-%s.csv'%(sc,sc))
if run_projection:
model = Model('orca/data/scenario_runs/%s/orca-data-climate-forecasted-%s.csv'%(sc,sc), 'orca/data/historical_runs_data/results.csv',SHA_shift, ORO_shift, FOL_shift,sd='10-01-1999',projection = True, sim_gains = True) #climate scenario test
results = model.simulate() # takes a while... save results
results.to_csv('orca/data/scenario_runs/%s/%s-results.csv'%(sc,sc))
if consolidate_outputs:
result_ids =['SHA_storage','SHA_out','SHA_target','SHA_out_to_delta','SHA_tocs','FOL_storage','FOL_out',
'FOL_target','FOL_out_to_delta','FOL_tocs','ORO_storage','ORO_out','ORO_target','ORO_out_to_delta',
'ORO_tocs','DEL_in','DEL_out','DEL_TRP_pump','DEL_HRO_pump','SHA_sodd','SHA_spill',
'ORO_sodd','ORO_spill','FOL_sodd','FOL_spill', 'DEL_X2','ORO_forecast','FOL_forecast','SHA_forecast','DEL_SODD_CVP','DEL_SODD_SWP']
for obj in result_ids:
df = pd.DataFrame()
print(obj)
i = 0
for sc in scenarios:
i+=1
print('projection # %s' %i)
dfobj = pd.read_csv('orca/data/scenario_runs/%s/%s-results.csv'%(sc,sc), parse_dates = True, index_col = 0)
df['%s'%sc] = dfobj[obj]
df.to_csv('orca/data/climate_results/%s.csv'%obj)
if consolidate_inputs:
input_ids = ['TLG_fnf', 'FOL_fnf', 'MRC_fnf', 'MIL_fnf', 'NML_fnf', 'ORO_fnf',
'MKM_fnf', 'BND_fnf', 'NHG_fnf', 'SHA_fnf', 'YRS_fnf', 'BKL_swe',
'SHA_pr', 'ORO_pr', 'FOL_pr', 'SHA_tas', 'ORO_tas', 'FOL_tas',
'SHA_tasmax', 'ORO_tasmax', 'FOL_tasmax', 'SHA_tasmin', 'ORO_tasmin',
'FOL_tasmin', 'WY', 'DOWY', 'SR_WYI', 'SR_WYT', 'SR_WYT_rolling',
'SJR_WYI', 'SJR_WYT', '8RI', 'SHA_fci', 'ORO_fci', 'FOL_fci', 'GOL_swe',
'CSL_swe', 'HYS_swe', 'SCN_swe', 'RBB_swe', 'CAP_swe', 'RBP_swe',
'HMB_swe', 'FOR_swe', 'RTL_swe', 'GRZ_swe', 'SDF_swe', 'SLT_swe',
'MED_swe', 'BND_swe', 'ORO_swe', 'YRS_swe', 'FOL_swe', 'aprjul_slope',
'aprjul_intercept', 'aprjul_mean', 'aprjul_std', 'octmar_mean',
'octmar_std', 'octmar_intercept', 'octmar_slope', 'WYI_sim', 'WYT_sim',
'gains_sim', 'SHA_snowpack', 'SHA_cum_flow_to_date',
'SHA_remaining_flow', 'SHA_slope', 'SHA_intercept', 'SHA_mean',
'SHA_std', 'ORO_snowpack', 'ORO_cum_flow_to_date', 'ORO_remaining_flow',
'ORO_slope', 'ORO_intercept', 'ORO_mean', 'ORO_std', 'FOL_snowpack',
'FOL_cum_flow_to_date', 'FOL_remaining_flow', 'FOL_slope',
'FOL_intercept', 'FOL_mean', 'FOL_std', 'X2']
for obj in input_ids:
df = pd.DataFrame()
print(obj)
i = 0
for sc in scenarios:
i+=1
print('projection # %s' %i)
dfobj = | pd.read_csv('orca/data/scenario_runs/%s/orca-data-climate-forecasted-%s.csv'%(sc,sc), parse_dates = True, index_col = 0) | pandas.read_csv |
# Volatility Futures vs Equity Index Futures
import numpy as np
import pandas as pd
# import matplotlib.pyplot as plt
# import statsmodels.formula.api as sm
# import statsmodels.tsa.stattools as ts
# import statsmodels.tsa.vector_ar.vecm as vm
entryThreshold = 0.1
onewaytcost = 1 / 10000
# VX futures
vx = pd.read_csv('inputDataDaily_VX_20120507.csv')
vx['Date'] = pd.to_datetime(vx['Date'], format='%Y%m%d').dt.date # remove HH:MM:SS
vx.set_index('Date', inplace=True)
# VIX index
vix = pd.read_csv('VIX.csv')
vix['Date'] = | pd.to_datetime(vix['Date'], format='%Y-%m-%d') | pandas.to_datetime |
import os
from datetime import date
from dask.dataframe import DataFrame as DaskDataFrame
from numpy import nan, ndarray
from numpy.testing import assert_allclose, assert_array_equal
from pandas import DataFrame, Series, Timedelta, Timestamp
from pandas.testing import assert_frame_equal, assert_series_equal
from pymove import (
DaskMoveDataFrame,
MoveDataFrame,
PandasDiscreteMoveDataFrame,
PandasMoveDataFrame,
read_csv,
)
from pymove.core.grid import Grid
from pymove.utils.constants import (
DATE,
DATETIME,
DAY,
DIST_PREV_TO_NEXT,
DIST_TO_PREV,
HOUR,
HOUR_SIN,
LATITUDE,
LOCAL_LABEL,
LONGITUDE,
PERIOD,
SITUATION,
SPEED_PREV_TO_NEXT,
TID,
TIME_PREV_TO_NEXT,
TRAJ_ID,
TYPE_DASK,
TYPE_PANDAS,
UID,
WEEK_END,
)
list_data = [
[39.984094, 116.319236, '2008-10-23 05:53:05', 1],
[39.984198, 116.319322, '2008-10-23 05:53:06', 1],
[39.984224, 116.319402, '2008-10-23 05:53:11', 2],
[39.984224, 116.319402, '2008-10-23 05:53:11', 2],
]
str_data_default = """
lat,lon,datetime,id
39.984093,116.319236,2008-10-23 05:53:05,4
39.9842,116.319322,2008-10-23 05:53:06,1
39.984222,116.319402,2008-10-23 05:53:11,2
39.984222,116.319402,2008-10-23 05:53:11,2
"""
str_data_different = """
latitude,longitude,time,traj_id
39.984093,116.319236,2008-10-23 05:53:05,4
39.9842,116.319322,2008-10-23 05:53:06,1
39.984222,116.319402,2008-10-23 05:53:11,2
39.984222,116.319402,2008-10-23 05:53:11,2
"""
str_data_missing = """
39.984093,116.319236,2008-10-23 05:53:05,4
39.9842,116.319322,2008-10-23 05:53:06,1
39.984222,116.319402,2008-10-23 05:53:11,2
39.984222,116.319402,2008-10-23 05:53:11,2
"""
def _default_move_df():
return MoveDataFrame(
data=list_data,
latitude=LATITUDE,
longitude=LONGITUDE,
datetime=DATETIME,
traj_id=TRAJ_ID,
)
def _default_pandas_df():
return DataFrame(
data=[
[39.984094, 116.319236, Timestamp('2008-10-23 05:53:05'), 1],
[39.984198, 116.319322, Timestamp('2008-10-23 05:53:06'), 1],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
],
columns=['lat', 'lon', 'datetime', 'id'],
index=[0, 1, 2, 3],
)
def test_move_data_frame_from_list():
move_df = _default_move_df()
assert MoveDataFrame.has_columns(move_df)
try:
MoveDataFrame.validate_move_data_frame(move_df)
except Exception:
assert False
assert isinstance(move_df, PandasMoveDataFrame)
def test_move_data_frame_from_file(tmpdir):
d = tmpdir.mkdir('core')
file_default_columns = d.join('test_read_default.csv')
file_default_columns.write(str_data_default)
filename_default = os.path.join(
file_default_columns.dirname, file_default_columns.basename
)
move_df = read_csv(filename_default)
assert MoveDataFrame.has_columns(move_df)
try:
MoveDataFrame.validate_move_data_frame(move_df)
except Exception:
assert False
assert isinstance(move_df, PandasMoveDataFrame)
file_different_columns = d.join('test_read_different.csv')
file_different_columns.write(str_data_different)
filename_diferent = os.path.join(
file_different_columns.dirname, file_different_columns.basename
)
move_df = read_csv(
filename_diferent,
latitude='latitude',
longitude='longitude',
datetime='time',
traj_id='traj_id',
)
assert MoveDataFrame.has_columns(move_df)
try:
MoveDataFrame.validate_move_data_frame(move_df)
except Exception:
assert False
assert isinstance(move_df, PandasMoveDataFrame)
file_missing_columns = d.join('test_read_missing.csv')
file_missing_columns.write(str_data_missing)
filename_missing = os.path.join(
file_missing_columns.dirname, file_missing_columns.basename
)
move_df = read_csv(
filename_missing, names=[LATITUDE, LONGITUDE, DATETIME, TRAJ_ID]
)
assert MoveDataFrame.has_columns(move_df)
try:
MoveDataFrame.validate_move_data_frame(move_df)
except Exception:
assert False
assert isinstance(move_df, PandasMoveDataFrame)
def test_move_data_frame_from_dict():
dict_data = {
LATITUDE: [39.984198, 39.984224, 39.984094],
LONGITUDE: [116.319402, 116.319322, 116.319402],
DATETIME: [
'2008-10-23 05:53:11',
'2008-10-23 05:53:06',
'2008-10-23 05:53:06',
],
}
move_df = MoveDataFrame(
data=dict_data,
latitude=LATITUDE,
longitude=LONGITUDE,
datetime=DATETIME,
traj_id=TRAJ_ID,
)
assert MoveDataFrame.has_columns(move_df)
try:
MoveDataFrame.validate_move_data_frame(move_df)
except Exception:
assert False
assert isinstance(move_df, PandasMoveDataFrame)
def test_move_data_frame_from_data_frame():
df = _default_pandas_df()
move_df = MoveDataFrame(
data=df, latitude=LATITUDE, longitude=LONGITUDE, datetime=DATETIME
)
assert MoveDataFrame.has_columns(move_df)
try:
MoveDataFrame.validate_move_data_frame(move_df)
except Exception:
assert False
assert isinstance(move_df, PandasMoveDataFrame)
def test_attribute_error_from_data_frame():
df = DataFrame(
data=[
[39.984094, 116.319236, Timestamp('2008-10-23 05:53:05'), 1],
[39.984198, 116.319322, Timestamp('2008-10-23 05:53:06'), 1],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
],
columns=['laterr', 'lon', 'datetime', 'id'],
index=[0, 1, 2, 3],
)
try:
MoveDataFrame(
data=df, latitude=LATITUDE, longitude=LONGITUDE, datetime=DATETIME
)
raise AssertionError(
'AttributeError error not raised by MoveDataFrame'
)
except KeyError:
pass
df = DataFrame(
data=[
[39.984094, 116.319236, Timestamp('2008-10-23 05:53:05'), 1],
[39.984198, 116.319322, Timestamp('2008-10-23 05:53:06'), 1],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
],
columns=['lat', 'lonerr', 'datetime', 'id'],
index=[0, 1, 2, 3],
)
try:
MoveDataFrame(
data=df, latitude=LATITUDE, longitude=LONGITUDE, datetime=DATETIME
)
raise AssertionError(
'AttributeError error not raised by MoveDataFrame'
)
except KeyError:
pass
df = DataFrame(
data=[
[39.984094, 116.319236, Timestamp('2008-10-23 05:53:05'), 1],
[39.984198, 116.319322, Timestamp('2008-10-23 05:53:06'), 1],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
],
columns=['lat', 'lon', 'datetimerr', 'id'],
index=[0, 1, 2, 3],
)
try:
MoveDataFrame(
data=df, latitude=LATITUDE, longitude=LONGITUDE, datetime=DATETIME
)
raise AssertionError(
'AttributeError error not raised by MoveDataFrame'
)
except KeyError:
pass
def test_lat():
move_df = _default_move_df()
lat = move_df.lat
srs = Series(
data=[39.984094, 39.984198, 39.984224, 39.984224],
index=[0, 1, 2, 3],
dtype='float64',
name='lat',
)
assert_series_equal(lat, srs)
def test_lon():
move_df = _default_move_df()
lon = move_df.lon
srs = Series(
data=[116.319236, 116.319322, 116.319402, 116.319402],
index=[0, 1, 2, 3],
dtype='float64',
name='lon',
)
assert_series_equal(lon, srs)
def test_datetime():
move_df = _default_move_df()
datetime = move_df.datetime
srs = Series(
data=[
'2008-10-23 05:53:05',
'2008-10-23 05:53:06',
'2008-10-23 05:53:11',
'2008-10-23 05:53:11',
],
index=[0, 1, 2, 3],
dtype='datetime64[ns]',
name='datetime',
)
assert_series_equal(datetime, srs)
def test_loc():
move_df = _default_move_df()
assert move_df.loc[0, TRAJ_ID] == 1
loc_ = move_df.loc[move_df[LONGITUDE] > 116.319321]
expected = DataFrame(
data=[
[39.984198, 116.319322, Timestamp('2008-10-23 05:53:06'), 1],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
],
columns=['lat', 'lon', 'datetime', 'id'],
index=[1, 2, 3],
)
assert_frame_equal(loc_, expected)
def test_iloc():
move_df = _default_move_df()
expected = Series(
data=[39.984094, 116.319236, Timestamp('2008-10-23 05:53:05'), 1],
index=['lat', 'lon', 'datetime', 'id'],
dtype='object',
name=0,
)
assert_series_equal(move_df.iloc[0], expected)
def test_at():
move_df = _default_move_df()
assert move_df.at[0, TRAJ_ID] == 1
def test_values():
move_df = _default_move_df()
expected = [
[39.984094, 116.319236, Timestamp('2008-10-23 05:53:05'), 1],
[39.984198, 116.319322, Timestamp('2008-10-23 05:53:06'), 1],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
]
assert_array_equal(move_df.values, expected)
def test_columns():
move_df = _default_move_df()
assert_array_equal(
move_df.columns, [LATITUDE, LONGITUDE, DATETIME, TRAJ_ID]
)
def test_index():
move_df = _default_move_df()
assert_array_equal(move_df.index, [0, 1, 2, 3])
def test_dtypes():
move_df = _default_move_df()
expected = Series(
data=['float64', 'float64', '<M8[ns]', 'int64'],
index=['lat', 'lon', 'datetime', 'id'],
dtype='object',
name=None,
)
assert_series_equal(move_df.dtypes, expected)
def test_shape():
move_df = _default_move_df()
assert move_df.shape == (4, 4)
def test_len():
move_df = _default_move_df()
assert move_df.len() == 4
def test_unique():
move_df = _default_move_df()
assert_array_equal(move_df['id'].unique(), [1, 2])
def test_head():
move_df = _default_move_df()
expected = DataFrame(
data=[
[39.984094, 116.319236, Timestamp('2008-10-23 05:53:05'), 1],
[39.984198, 116.319322, Timestamp('2008-10-23 05:53:06'), 1],
],
columns=['lat', 'lon', 'datetime', 'id'],
index=[0, 1],
)
assert_frame_equal(move_df.head(2), expected)
def test_tail():
move_df = _default_move_df()
expected = DataFrame(
data=[
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
],
columns=['lat', 'lon', 'datetime', 'id'],
index=[2, 3],
)
assert_frame_equal(move_df.tail(2), expected)
def test_number_users():
move_df = MoveDataFrame(
data=list_data,
latitude=LATITUDE,
longitude=LONGITUDE,
datetime=DATETIME,
traj_id=TRAJ_ID,
)
assert move_df.get_users_number() == 1
move_df[UID] = [1, 1, 2, 3]
assert move_df.get_users_number() == 3
def test_to_numpy():
move_df = MoveDataFrame(
data=list_data,
latitude=LATITUDE,
longitude=LONGITUDE,
datetime=DATETIME,
traj_id=TRAJ_ID,
)
assert isinstance(move_df.to_numpy(), ndarray)
def test_to_dict():
move_df = MoveDataFrame(
data=list_data,
latitude=LATITUDE,
longitude=LONGITUDE,
datetime=DATETIME,
traj_id=TRAJ_ID,
)
assert isinstance(move_df.to_dict(), dict)
def test_to_grid():
move_df = MoveDataFrame(
data=list_data,
latitude=LATITUDE,
longitude=LONGITUDE,
datetime=DATETIME,
traj_id=TRAJ_ID,
)
g = move_df.to_grid(8)
assert isinstance(move_df.to_grid(8), Grid)
def test_to_data_frame():
move_df = MoveDataFrame(
data=list_data,
latitude=LATITUDE,
longitude=LONGITUDE,
datetime=DATETIME,
traj_id=TRAJ_ID,
)
assert isinstance(move_df.to_data_frame(), DataFrame)
def test_to_discrete_move_df():
move_df = PandasDiscreteMoveDataFrame(
data={DATETIME: ['2020-01-01 01:08:29',
'2020-01-05 01:13:24',
'2020-01-06 02:21:53',
'2020-01-06 03:34:48',
'2020-01-08 05:55:41'],
LATITUDE: [3.754245,
3.150849,
3.754249,
3.165933,
3.920178],
LONGITUDE: [38.3456743,
38.6913486,
38.3456743,
38.2715962,
38.5161605],
TRAJ_ID: ['pwe-5089',
'xjt-1579',
'tre-1890',
'xjt-1579',
'pwe-5089'],
LOCAL_LABEL: [1, 4, 2, 16, 32]},
)
assert isinstance(
move_df.to_dicrete_move_df(), PandasDiscreteMoveDataFrame
)
def test_describe():
move_df = _default_move_df()
expected = DataFrame(
data=[
[4.0, 4.0, 4.0],
[39.984185, 116.31934049999998, 1.5],
[6.189237971348586e-05, 7.921910543639078e-05, 0.5773502691896257],
[39.984094, 116.319236, 1.0],
[39.984172, 116.3193005, 1.0],
[39.984211, 116.319362, 1.5],
[39.984224, 116.319402, 2.0],
[39.984224, 116.319402, 2.0],
],
columns=['lat', 'lon', 'id'],
index=['count', 'mean', 'std', 'min', '25%', '50%', '75%', 'max'],
)
assert_frame_equal(move_df.describe(), expected)
def test_memory_usage():
move_df = _default_move_df()
expected = Series(
data=[128, 32, 32, 32, 32],
index=['Index', 'lat', 'lon', 'datetime', 'id'],
dtype='int64',
name=None,
)
assert_series_equal(move_df.memory_usage(), expected)
def test_copy():
move_df = _default_move_df()
cp = move_df.copy()
assert_frame_equal(move_df, cp)
cp.at[0, TRAJ_ID] = 0
assert move_df.loc[0, TRAJ_ID] == 1
assert move_df.loc[0, TRAJ_ID] != cp.loc[0, TRAJ_ID]
def test_generate_tid_based_on_id_datetime():
move_df = _default_move_df()
new_move_df = move_df.generate_tid_based_on_id_datetime(inplace=False)
expected = DataFrame(
data=[
[
39.984094,
116.319236,
Timestamp('2008-10-23 05:53:05'),
1,
'12008102305',
],
[
39.984198,
116.319322,
Timestamp('2008-10-23 05:53:06'),
1,
'12008102305',
],
[
39.984224,
116.319402,
Timestamp('2008-10-23 05:53:11'),
2,
'22008102305',
],
[
39.984224,
116.319402,
Timestamp('2008-10-23 05:53:11'),
2,
'22008102305',
],
],
columns=['lat', 'lon', 'datetime', 'id', 'tid'],
index=[0, 1, 2, 3],
)
assert_frame_equal(new_move_df, expected)
assert isinstance(new_move_df, PandasMoveDataFrame)
assert TID not in move_df
move_df.generate_tid_based_on_id_datetime()
assert_frame_equal(move_df, expected)
def test_generate_date_features():
move_df = _default_move_df()
new_move_df = move_df.generate_date_features(inplace=False)
expected = DataFrame(
data=[
[
39.984094,
116.319236,
Timestamp('2008-10-23 05:53:05'),
1,
date(2008, 10, 23),
],
[
39.984198,
116.319322,
Timestamp('2008-10-23 05:53:06'),
1,
date(2008, 10, 23),
],
[
39.984224,
116.319402,
Timestamp('2008-10-23 05:53:11'),
2,
date(2008, 10, 23),
],
[
39.984224,
116.319402,
Timestamp('2008-10-23 05:53:11'),
2,
date(2008, 10, 23),
],
],
columns=['lat', 'lon', 'datetime', 'id', 'date'],
index=[0, 1, 2, 3],
)
assert_frame_equal(new_move_df, expected)
assert isinstance(new_move_df, PandasMoveDataFrame)
assert DATE not in move_df
move_df.generate_date_features()
assert_frame_equal(move_df, expected)
def test_generate_hour_features():
move_df = _default_move_df()
new_move_df = move_df.generate_hour_features(inplace=False)
expected = DataFrame(
data=[
[39.984094, 116.319236, Timestamp('2008-10-23 05:53:05'), 1, 5],
[39.984198, 116.319322, Timestamp('2008-10-23 05:53:06'), 1, 5],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2, 5],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2, 5],
],
columns=['lat', 'lon', 'datetime', 'id', 'hour'],
index=[0, 1, 2, 3],
)
assert_frame_equal(new_move_df, expected)
assert isinstance(new_move_df, PandasMoveDataFrame)
assert HOUR not in move_df
move_df.generate_hour_features()
assert_frame_equal(move_df, expected)
def test_generate_day_of_the_week_features():
move_df = _default_move_df()
new_move_df = move_df.generate_day_of_the_week_features(inplace=False)
expected = DataFrame(
data=[
[
39.984094,
116.319236,
Timestamp('2008-10-23 05:53:05'),
1,
'Thursday',
],
[
39.984198,
116.319322,
Timestamp('2008-10-23 05:53:06'),
1,
'Thursday',
],
[
39.984224,
116.319402,
Timestamp('2008-10-23 05:53:11'),
2,
'Thursday',
],
[
39.984224,
116.319402,
Timestamp('2008-10-23 05:53:11'),
2,
'Thursday',
],
],
columns=['lat', 'lon', 'datetime', 'id', 'day'],
index=[0, 1, 2, 3],
)
assert_frame_equal(new_move_df, expected)
assert isinstance(new_move_df, PandasMoveDataFrame)
assert DAY not in move_df
move_df.generate_day_of_the_week_features()
assert_frame_equal(move_df, expected)
def test_generate_weekend_features():
move_df = _default_move_df()
new_move_df = move_df.generate_weekend_features(inplace=False)
expected = DataFrame(
data=[
[39.984094, 116.319236, Timestamp('2008-10-23 05:53:05'), 1, 0],
[39.984198, 116.319322, Timestamp('2008-10-23 05:53:06'), 1, 0],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2, 0],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2, 0],
],
columns=['lat', 'lon', 'datetime', 'id', 'weekend'],
index=[0, 1, 2, 3],
)
assert_frame_equal(new_move_df, expected)
assert isinstance(new_move_df, PandasMoveDataFrame)
assert WEEK_END not in move_df
move_df.generate_weekend_features()
assert_frame_equal(move_df, expected)
def test_generate_time_of_day_features():
move_df = _default_move_df()
new_move_df = move_df.generate_time_of_day_features(inplace=False)
expected = DataFrame(
data=[
[
39.984094,
116.319236,
Timestamp('2008-10-23 05:53:05'),
1,
'Early morning',
],
[
39.984198,
116.319322,
Timestamp('2008-10-23 05:53:06'),
1,
'Early morning',
],
[
39.984224,
116.319402,
Timestamp('2008-10-23 05:53:11'),
2,
'Early morning',
],
[
39.984224,
116.319402,
Timestamp('2008-10-23 05:53:11'),
2,
'Early morning',
],
],
columns=['lat', 'lon', 'datetime', 'id', 'period'],
index=[0, 1, 2, 3],
)
assert_frame_equal(new_move_df, expected)
assert isinstance(new_move_df, PandasMoveDataFrame)
assert PERIOD not in move_df
move_df.generate_time_of_day_features()
assert_frame_equal(move_df, expected)
def test_generate_datetime_in_format_cyclical():
move_df = _default_move_df()
new_move_df = move_df.generate_datetime_in_format_cyclical(inplace=False)
expected = DataFrame(
data=[
[
39.984094,
116.319236,
Timestamp('2008-10-23 05:53:05'),
1,
0.9790840876823229,
0.20345601305263375,
],
[
39.984198,
116.319322,
Timestamp('2008-10-23 05:53:06'),
1,
0.9790840876823229,
0.20345601305263375,
],
[
39.984224,
116.319402,
Timestamp('2008-10-23 05:53:11'),
2,
0.9790840876823229,
0.20345601305263375,
],
[
39.984224,
116.319402,
Timestamp('2008-10-23 05:53:11'),
2,
0.9790840876823229,
0.20345601305263375,
],
],
columns=['lat', 'lon', 'datetime', 'id', 'hour_sin', 'hour_cos'],
index=[0, 1, 2, 3],
)
assert_frame_equal(new_move_df, expected)
assert isinstance(new_move_df, PandasMoveDataFrame)
assert HOUR_SIN not in move_df
move_df.generate_datetime_in_format_cyclical()
assert_frame_equal(move_df, expected)
def test_generate_dist_time_speed_features():
move_df = _default_move_df()
new_move_df = move_df.generate_dist_time_speed_features(inplace=False)
expected = DataFrame(
data=[
[
1,
39.984094,
116.319236,
Timestamp('2008-10-23 05:53:05'),
nan,
nan,
nan,
],
[
1,
39.984198,
116.319322,
Timestamp('2008-10-23 05:53:06'),
13.690153134343689,
1.0,
13.690153134343689,
],
[
2,
39.984224,
116.319402,
Timestamp('2008-10-23 05:53:11'),
nan,
nan,
nan,
],
[
2,
39.984224,
116.319402,
Timestamp('2008-10-23 05:53:11'),
0.0,
0.0,
nan,
],
],
columns=[
'id',
'lat',
'lon',
'datetime',
'dist_to_prev',
'time_to_prev',
'speed_to_prev',
],
index=[0, 1, 2, 3],
)
assert_frame_equal(new_move_df, expected)
assert isinstance(new_move_df, PandasMoveDataFrame)
assert DIST_TO_PREV not in move_df
move_df.generate_dist_time_speed_features()
assert_frame_equal(move_df, expected)
def test_generate_dist_features():
move_df = _default_move_df()
new_move_df = move_df.generate_dist_features(inplace=False)
expected = DataFrame(
data=[
[
1,
39.984094,
116.319236,
Timestamp('2008-10-23 05:53:05'),
nan,
13.690153134343689,
nan,
],
[
1,
39.984198,
116.319322,
Timestamp('2008-10-23 05:53:06'),
13.690153134343689,
nan,
nan,
],
[
2,
39.984224,
116.319402,
Timestamp('2008-10-23 05:53:11'),
nan,
0.0,
nan,
],
[
2,
39.984224,
116.319402,
Timestamp('2008-10-23 05:53:11'),
0.0,
nan,
nan,
],
],
columns=[
'id',
'lat',
'lon',
'datetime',
'dist_to_prev',
'dist_to_next',
'dist_prev_to_next',
],
index=[0, 1, 2, 3],
)
assert_frame_equal(new_move_df, expected)
assert isinstance(new_move_df, PandasMoveDataFrame)
assert DIST_PREV_TO_NEXT not in move_df
move_df.generate_dist_features()
assert_frame_equal(move_df, expected)
def test_generate_time_features():
move_df = _default_move_df()
new_move_df = move_df.generate_time_features(inplace=False)
expected = DataFrame(
data=[
[
1,
39.984094,
116.319236,
Timestamp('2008-10-23 05:53:05'),
nan,
1.0,
nan,
],
[
1,
39.984198,
116.319322,
Timestamp('2008-10-23 05:53:06'),
1.0,
nan,
nan,
],
[
2,
39.984224,
116.319402,
Timestamp('2008-10-23 05:53:11'),
nan,
0.0,
nan,
],
[
2,
39.984224,
116.319402,
Timestamp('2008-10-23 05:53:11'),
0.0,
nan,
nan,
],
],
columns=[
'id',
'lat',
'lon',
'datetime',
'time_to_prev',
'time_to_next',
'time_prev_to_next',
],
index=[0, 1, 2, 3],
)
assert_frame_equal(new_move_df, expected)
assert isinstance(new_move_df, PandasMoveDataFrame)
assert TIME_PREV_TO_NEXT not in move_df
move_df.generate_time_features()
assert_frame_equal(move_df, expected)
def test_generate_speed_features():
move_df = _default_move_df()
new_move_df = move_df.generate_speed_features(inplace=False)
expected = DataFrame(
data=[
[
1,
39.984094,
116.319236,
Timestamp('2008-10-23 05:53:05'),
nan,
13.690153134343689,
nan,
],
[
1,
39.984198,
116.319322,
Timestamp('2008-10-23 05:53:06'),
13.690153134343689,
nan,
nan,
],
[
2,
39.984224,
116.319402,
Timestamp('2008-10-23 05:53:11'),
nan,
nan,
nan,
],
[
2,
39.984224,
116.319402,
Timestamp('2008-10-23 05:53:11'),
nan,
nan,
nan,
],
],
columns=[
'id',
'lat',
'lon',
'datetime',
'speed_to_prev',
'speed_to_next',
'speed_prev_to_next',
],
index=[0, 1, 2, 3],
)
assert_frame_equal(new_move_df, expected)
assert isinstance(new_move_df, PandasMoveDataFrame)
assert SPEED_PREV_TO_NEXT not in move_df
move_df.generate_speed_features()
assert_frame_equal(move_df, expected)
def test_generate_move_and_stop_by_radius():
move_df = _default_move_df()
new_move_df = move_df.generate_move_and_stop_by_radius(inplace=False)
expected = DataFrame(
data=[
[
1,
39.984094,
116.319236,
Timestamp('2008-10-23 05:53:05'),
nan,
13.690153134343689,
nan,
'nan',
],
[
1,
39.984198,
116.319322,
Timestamp('2008-10-23 05:53:06'),
13.690153134343689,
nan,
nan,
'move',
],
[
2,
39.984224,
116.319402,
Timestamp('2008-10-23 05:53:11'),
nan,
0.0,
nan,
'nan',
],
[
2,
39.984224,
116.319402,
Timestamp('2008-10-23 05:53:11'),
0.0,
nan,
nan,
'stop',
],
],
columns=[
'id',
'lat',
'lon',
'datetime',
'dist_to_prev',
'dist_to_next',
'dist_prev_to_next',
'situation',
],
index=[0, 1, 2, 3],
)
assert_frame_equal(new_move_df, expected)
assert isinstance(new_move_df, PandasMoveDataFrame)
assert SITUATION not in move_df
move_df.generate_move_and_stop_by_radius()
assert_frame_equal(move_df, expected)
def test_time_interval():
move_df = _default_move_df()
assert move_df.time_interval() == Timedelta('0 days 00:00:06')
def test_get_bbox():
move_df = _default_move_df()
assert_allclose(
move_df.get_bbox(), (39.984093, 116.31924, 39.984222, 116.319405)
)
def test_min():
move_df = _default_move_df()
expected = Series(
data=[39.984094, 116.319236, Timestamp('2008-10-23 05:53:05'), 1],
index=['lat', 'lon', 'datetime', 'id'],
dtype='object',
name=None,
)
assert_series_equal(move_df.min(), expected)
def test_max():
move_df = _default_move_df()
expected = Series(
data=[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
index=['lat', 'lon', 'datetime', 'id'],
dtype='object',
name=None,
)
assert_series_equal(move_df.max(), expected)
def test_count():
move_df = _default_move_df()
expected = Series(
data=[4, 4, 4, 4],
index=['lat', 'lon', 'datetime', 'id'],
dtype='int64',
name=None,
)
assert_series_equal(move_df.count(), expected)
def test_group_by():
move_df = _default_move_df()
expected = _default_pandas_df()
expected = expected.groupby('id').mean()
assert_frame_equal(move_df.groupby(TRAJ_ID).mean(), expected)
def test_select_dtypes():
move_df = _default_move_df()
expected = DataFrame(
data=[
[39.984094, 116.319236],
[39.984198, 116.319322],
[39.984224, 116.319402],
[39.984224, 116.319402],
],
columns=['lat', 'lon'],
index=[0, 1, 2, 3],
)
assert_frame_equal(move_df.select_dtypes(include='float64'), expected)
def test_astype():
move_df = _default_move_df()
expected = DataFrame(
data=[
[39, 116, 1224741185000000000, 1],
[39, 116, 1224741186000000000, 1],
[39, 116, 1224741191000000000, 2],
[39, 116, 1224741191000000000, 2],
],
columns=['lat', 'lon', 'datetime', 'id'],
index=[0, 1, 2, 3],
)
result = move_df.astype('int64')
assert_frame_equal(result, expected)
def test_sort_values():
move_df = _default_move_df()
expected = DataFrame(
data=[
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
[39.984094, 116.319236, Timestamp('2008-10-23 05:53:05'), 1],
[39.984198, 116.319322, Timestamp('2008-10-23 05:53:06'), 1],
],
columns=['lat', 'lon', 'datetime', 'id'],
index=[2, 3, 0, 1],
)
assert_frame_equal(
move_df.sort_values(by=TRAJ_ID, ascending=False), expected
)
def test_reset_index():
move_df = _default_move_df()
move_df = move_df.loc[1:]
assert_array_equal(move_df.index, [1, 2, 3])
move_df.reset_index(inplace=True)
assert_array_equal(move_df.index, [0, 1, 2])
def test_set_index():
move_df = _default_move_df()
expected = DataFrame(
data=[
[39.984094, 116.319236, Timestamp('2008-10-23 05:53:05')],
[39.984198, 116.319322, Timestamp('2008-10-23 05:53:06')],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11')],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11')],
],
columns=['lat', 'lon', 'datetime'],
index=[1, 1, 2, 2],
)
expected.index.name = 'id'
assert_frame_equal(move_df.set_index('id'), expected)
try:
move_df.set_index('datetime', inplace=True)
assert False
except AttributeError:
assert True
def test_drop():
move_df = _default_move_df()
move_df[UID] = [1, 1, 2, 3]
move_test = move_df.drop(columns=[UID])
assert UID not in move_test
assert UID in move_df
assert isinstance(move_test, PandasMoveDataFrame)
move_test = move_df.drop(index=[0, 1])
assert move_test.len() == 2
assert isinstance(move_test, PandasMoveDataFrame)
move_df.drop(columns=[UID], inplace=True)
assert UID not in move_df
assert isinstance(move_df, PandasMoveDataFrame)
try:
move_df.drop(columns=[LATITUDE], inplace=True)
raise AssertionError(
'AttributeError error not raised by MoveDataFrame'
)
except AttributeError:
pass
try:
move_df.drop(columns=[LONGITUDE], inplace=True)
raise AssertionError(
'AttributeError error not raised by MoveDataFrame'
)
except AttributeError:
pass
try:
move_df.drop(columns=[DATETIME], inplace=True)
raise AssertionError(
'AttributeError error not raised by MoveDataFrame'
)
except AttributeError:
pass
def test_duplicated():
move_df = _default_move_df()
expected = [False, True, False, True]
assert_array_equal(move_df.duplicated(TRAJ_ID), expected)
expected = [False, False, True, False]
assert_array_equal(
move_df.duplicated(subset=DATETIME, keep='last'), expected
)
def test_drop_duplicates():
move_df = _default_move_df()
move_test = move_df.drop_duplicates()
expected = DataFrame(
data=[
[39.984094, 116.319236, Timestamp('2008-10-23 05:53:05'), 1],
[39.984198, 116.319322, Timestamp('2008-10-23 05:53:06'), 1],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
],
columns=['lat', 'lon', 'datetime', 'id'],
index=[0, 1, 2],
)
assert_frame_equal(move_test, expected)
assert isinstance(move_test, PandasMoveDataFrame)
assert move_df.len() == 4
move_df.drop_duplicates(inplace=True)
assert_frame_equal(move_test, expected)
assert isinstance(move_df, PandasMoveDataFrame)
assert move_df.len() == 3
def test_all():
move_df = _default_move_df()
move_df['teste'] = [False, False, True, True]
assert_array_equal(move_df.all(), [True, True, True, True, False])
assert_array_equal(move_df.all(axis=1), [False, False, True, True])
def test_any():
move_df = _default_move_df()
move_df['teste'] = [False, False, False, False]
assert_array_equal(move_df.any(), [True, True, True, True, False])
assert_array_equal(move_df.any(axis=1), [True, True, True, True])
def test_isna():
move_df = _default_move_df()
move_df.at[0, DATETIME] = nan
expected = DataFrame(
data=[
[False, False, True, False],
[False, False, False, False],
[False, False, False, False],
[False, False, False, False],
],
columns=['lat', 'lon', 'datetime', 'id'],
index=[0, 1, 2, 3],
)
assert_frame_equal(move_df.isna(), expected)
def test_fillna():
move_df = _default_move_df()
move_df.at[0, LATITUDE] = nan
move_test = move_df.fillna(0)
expected = DataFrame(
data=[
[0, 116.319236, Timestamp('2008-10-23 05:53:05'), 1],
[39.984198, 116.319322, Timestamp('2008-10-23 05:53:06'), 1],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
[39.984224, 116.319402, Timestamp('2008-10-23 05:53:11'), 2],
],
columns=['lat', 'lon', 'datetime', 'id'],
index=[0, 1, 2, 3],
)
assert_frame_equal(move_test, expected)
assert isinstance(move_test, PandasMoveDataFrame)
assert move_df.isna().any(axis=None)
move_df.fillna(0, inplace=True)
assert_frame_equal(move_df, expected)
def test_dropna():
move_df = _default_move_df()
move_df.at[0, LATITUDE] = nan
move_test = move_df.dropna(axis=1)
expected = DataFrame(
data=[
[116.319236, Timestamp('2008-10-23 05:53:05'), 1],
[116.319322, Timestamp('2008-10-23 05:53:06'), 1],
[116.319402, Timestamp('2008-10-23 05:53:11'), 2],
[116.319402, Timestamp('2008-10-23 05:53:11'), 2],
],
columns=['lon', 'datetime', 'id'],
index=[0, 1, 2, 3],
)
assert_frame_equal(move_test, expected)
assert move_test.shape == (4, 3)
assert isinstance(move_test, DataFrame)
assert move_df.shape == (4, 4)
move_test = move_df.dropna(axis=0)
expected = DataFrame(
data=[
[39.984198, 116.319322, | Timestamp('2008-10-23 05:53:06') | pandas.Timestamp |
import numpy as np
import scipy
import matplotlib
import pandas as pd
import sklearn
from sklearn.preprocessing import MinMaxScaler
import tensorflow as tf
import keras
import matplotlib.pyplot as plt
from datetime import datetime
from loss_mse import loss_mse_warmup
from custom_generator import batch_generator
#Keras
from tensorflow.python.keras.models import Sequential
from tensorflow.python.keras.layers import Input, Dense, GRU, Embedding
from tensorflow.python.keras.optimizers import RMSprop
from tensorflow.python.keras.callbacks import EarlyStopping, ModelCheckpoint, TensorBoard, ReduceLROnPlateau
#########Wczytanie danych############
dataset = pd.read_csv('dataset_PSE (1).csv', delimiter = ',')
#print(dataset)
########Wizualizacja surowych probek###########
#Vds= dataset.iloc[:,3] # ZAP, raw, unstamped.
#visualize ZAP
#plt.figure()
#Vds.plot()
#plt.show()
#plt.savefig('RAW_Unstamped_ZAP.pdf')
############Sprawdzenie i naprawa danych#############
#identyfikacja wadliwych rekordow
#dataset['Godzina'].map(int)
dataset = dataset[dataset['Godzina'] != '2A']
#odrzucono 2 rekordow
#sprawdzenie danych
HCheck = dataset[dataset['Godzina'].map(int).isin([1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24])]
#print(HCheck)
#19669 rekordow
#############Analiza sasiedztwa uszkodzonych danych##############
#dataset['DH'] = dataset['Data'].map(str)+ dataset['Godzina']
#db_ds = dataset[dataset['Godzina'] == '2A']
#print(db_ds)
#print(dataset)
#db_ds = dataset[dataset['Data'] == 20171029]
#print(db_ds)
#print(dataset)
##########Konwersja etykiet probek#############
timeStamps = pd.to_datetime(dataset.Data,format='%Y%m%d') + dataset.Godzina.astype('timedelta64[h]')
#print(timeStamps)
dataset['timeStamp'] = timeStamps
dataset = dataset.drop(['Godzina','Data'], 1)
dataset = dataset.reindex(columns = ['timeStamp', 'ZAP'])
################Indeksowanie po datach###############
dataset.set_index('timeStamp', inplace=True)
############Reindeksowanie do pelnej dziedziny dat.#################
timeIndexRange = pd.date_range('2016-01-01 3:00', '2018-04-01', freq='H')
dataset.index = pd.DatetimeIndex(dataset.index)
#dataset = dataset.reindex(timeIndexRange, fill_value=0)
dataset = dataset.reindex(timeIndexRange, fill_value=float('NaN'))
#print(dataset)
##########sprawdzenie struktury brakujacych danych########
#missingVals = dataset[dataset['ZAP'].isnull()]
#print(missingVals)
############uzupelniam NaN przez mean#############
dataset = dataset.interpolate(method='linear')
##########Wizualizacja interpolacji brakujacych danych #######
#print (dataset.loc((dataset['index'] > pd.to_datetime('2016-03-03 13:00:00')) & (dataset['index'] <= pd.to_datetime('2016-03-04 14:00:00'))))
anomallyIndexRange = pd.date_range('2016-03-03 1:00', '2016-03-04 23:00', freq='H')
#print (AnomallyIndexRange)
anomally = dataset.ix[anomallyIndexRange]
#plt.figure()
#anomally.plot()
#plt.show()
#plt.savefig('anomally_linear_ZAP.pdf')
#######Wizualizacja oetykietowanych probek########
#weekly_summary = pd.DataFrame()
#weekly_summary['ZAP'] = dataset['ZAP'].resample('W').mean()
#print(dataset)
#plt.figure()
#dataset.plot()
#plt.show()
#plt.savefig('filled_with_linear_interpolation_ZAP.pdf')
#
#print(dataset.iloc[:,3])
#print (dataset.iloc[:,1])
#dataset['Godzina'] = dataset['Godzina'].resample(freq='H', periods=24)
#dataset['DH'] = dataset['Data'].map(str)+ dataset['Godzina']
#print(dataset)
##########shiftowanie danych w przeszlosc#####
shift_days = 1
shift_steps = shift_days * 24 # Number of hours.
df_pred = dataset['ZAP'].shift(-shift_steps)
#print(df_pred)
#print(dataset)
######DO NumPy
x_data = dataset.values[0:-shift_steps] ##pierwotne dane
y_data = df_pred.values[:-shift_steps] ##wyjscia modelu 24 probki wstecz
y_data = y_data.reshape(-1, 1)
#print (y_data)
num_train = len(x_data)
#print(num_train)
##Podzial danych na zestawy uczace i testowe
train_split = 0.9
num_train = int(train_split * num_train)
x_train = x_data[0:num_train]
x_test = x_data[num_train:]
len(x_train) + len(x_test)
y_train = y_data[0:num_train]
y_test = y_data[num_train:]
print ('X: ucz:'+str(len(x_train))+' test:'+str(len(x_test))+' suma:'+str(len(x_train) + len(x_test)))
print ('Y: ucz:'+str(len(y_train))+' test:'+str(len(y_test))+' suma:'+str(len(y_train) + len(y_test)))
#Skalowanie wejsc do <0,1>
print("X::")
print("Min:", np.min(x_train))
print("Max:", np.max(x_train))
x_scaler = MinMaxScaler()
x_train_scaled = x_scaler.fit_transform(x_train)
print("Min:", np.min(x_train_scaled))
print("Max:", np.max(x_train_scaled))
x_test_scaled = x_scaler.transform(x_test)
y_scaler = MinMaxScaler()
y_train_scaled = y_scaler.fit_transform(y_train)
y_test_scaled = y_scaler.transform(y_test)
##datagen ?
print(x_train_scaled.shape)
print(y_train_scaled.shape)
batch_size = 256
sequence_length = 24 * 7 * 8
generator = batch_generator(batch_size=batch_size, sequence_length=sequence_length, num_train=num_train, x_train_scaled=x_train_scaled, y_train_scaled=y_train_scaled)
#x_batch, y_batch = next(generator)
#print(x_batch.shape)
#print(y_batch.shape)
##Val set
validation_data = (np.expand_dims(x_test_scaled, axis=0),
np.expand_dims(y_test_scaled, axis=0))
##model
model = Sequential()
model.add(GRU(units=32, return_sequences=True,input_shape=(None, 1,)))
model.add(Dense(1, activation='sigmoid'))
optimizer = RMSprop(lr=1e-3)
model.compile(loss=loss_mse_warmup, optimizer=optimizer)
model.summary()
##Checkpoints
path_checkpoint = '23_checkpoint.keras'
callback_checkpoint = ModelCheckpoint(filepath=path_checkpoint,
monitor='val_loss',
verbose=1,
save_weights_only=True,
save_best_only=True)
##EarlyStop
callback_early_stopping = EarlyStopping(monitor='val_loss',
patience=5, verbose=1)
##logi
callback_tensorboard = TensorBoard(log_dir='./23_logs/',
histogram_freq=0,
write_graph=False)
##learning rate
callback_reduce_lr = ReduceLROnPlateau(monitor='val_loss',
factor=0.1,
min_lr=1e-4,
patience=0,
verbose=1)
#callback vector
callbacks = [callback_early_stopping,
callback_checkpoint,
callback_tensorboard,
callback_reduce_lr]
##Train
#odkomentowac by uczyc
#model.fit_generator(generator=generator,epochs=20, steps_per_epoch=100, validation_data=validation_data, callbacks=callbacks)
##load checkpoint
#Odkomentowac by uzywac nauczonego modelu
try:
model.load_weights(path_checkpoint)
except Exception as error:
print("Error trying to load checkpoint.")
print(error)
##//Train
##test set
result = model.evaluate(x=np.expand_dims(x_test_scaled, axis=0),y=np.expand_dims(y_test_scaled, axis=0))
print("loss (test-set):", result)
##rysownie
#print(len(x_train_scaled))
#plot_comparison(start_idx=0, length=17000, train=False)
x = np.expand_dims(x_train_scaled, axis=0)
y_pred = model.predict(x)
y_train_pred_rescaled = y_scaler.inverse_transform(y_pred[0])
x = np.expand_dims(x_test_scaled, axis=0)
y_pred = model.predict(x)
y_test_pred_rescaled = y_scaler.inverse_transform(y_pred[0])
df_y_train = pd.DataFrame(data=y_train_pred_rescaled[1:,0])
TIR = pd.date_range('2016-01-01 3:00', periods = len(df_y_train), freq='H')
df_y_train.index = TIR
df_y_train_true = | pd.DataFrame(data=y_train[1:,0]) | pandas.DataFrame |
"""
Copyright 2019 <NAME>.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
"""
import datetime
import datetime as dt
import os
from typing import Union
import numpy as np
import pandas as pd
import pytest
import pytz
from gs_quant.target.common import XRef, PricingLocation, Currency as CurrEnum
from numpy.testing import assert_allclose
from pandas.testing import assert_series_equal
from pandas.tseries.offsets import CustomBusinessDay
from pytz import timezone
from testfixtures import Replacer
from testfixtures.mock import Mock
import gs_quant.timeseries.measures as tm
import gs_quant.timeseries.measures_rates as tm_rates
from gs_quant.api.gs.assets import GsTemporalXRef, GsAssetApi, GsIdType, IdList, GsAsset
from gs_quant.api.gs.data import GsDataApi, MarketDataResponseFrame
from gs_quant.api.gs.data import QueryType
from gs_quant.data.core import DataContext
from gs_quant.data.dataset import Dataset
from gs_quant.data.fields import Fields
from gs_quant.errors import MqError, MqValueError, MqTypeError
from gs_quant.markets.securities import AssetClass, Cross, Index, Currency, SecurityMaster, Stock, \
Swap, CommodityNaturalGasHub
from gs_quant.session import GsSession, Environment
from gs_quant.test.timeseries.utils import mock_request
from gs_quant.timeseries import Returns
from gs_quant.timeseries.measures import BenchmarkType
_index = [pd.Timestamp('2019-01-01')]
_test_datasets = ('TEST_DATASET',)
def mock_empty_market_data_response():
df = MarketDataResponseFrame()
df.dataset_ids = ()
return df
def map_identifiers_default_mocker(input_type: Union[GsIdType, str],
output_type: Union[GsIdType, str],
ids: IdList,
as_of: dt.datetime = None,
multimap: bool = False,
limit: int = None,
**kwargs
) -> dict:
if "USD-LIBOR-BBA" in ids:
return {"USD-LIBOR-BBA": "MAPDB7QNB2TZVQ0E"}
elif "EUR-EURIBOR-TELERATE" in ids:
return {"EUR-EURIBOR-TELERATE": "MAJNQPFGN1EBDHAE"}
elif "GBP-LIBOR-BBA" in ids:
return {"GBP-LIBOR-BBA": "MAFYB8Z4R1377A19"}
elif "JPY-LIBOR-BBA" in ids:
return {"JPY-LIBOR-BBA": "MABMVE27EM8YZK33"}
elif "EUR OIS" in ids:
return {"EUR OIS": "MARFAGXDQRWM07Y2"}
def map_identifiers_swap_rate_mocker(input_type: Union[GsIdType, str],
output_type: Union[GsIdType, str],
ids: IdList,
as_of: dt.datetime = None,
multimap: bool = False,
limit: int = None,
**kwargs
) -> dict:
if "USD-3m" in ids:
return {"USD-3m": "MAAXGV0GZTW4GFNC"}
elif "EUR-6m" in ids:
return {"EUR-6m": "MA5WM2QWRVMYKDK0"}
elif "KRW" in ids:
return {"KRW": 'MAJ6SEQH3GT0GA2Z'}
def map_identifiers_inflation_mocker(input_type: Union[GsIdType, str],
output_type: Union[GsIdType, str],
ids: IdList,
as_of: dt.datetime = None,
multimap: bool = False,
limit: int = None,
**kwargs
) -> dict:
if "CPI-UKRPI" in ids:
return {"CPI-UKRPI": "MAQ7ND0MBP2AVVQW"}
elif "CPI-CPXTEMU" in ids:
return {"CPI-CPXTEMU": "MAK1FHKH5P5GJSHH"}
def map_identifiers_cross_basis_mocker(input_type: Union[GsIdType, str],
output_type: Union[GsIdType, str],
ids: IdList,
as_of: dt.datetime = None,
multimap: bool = False,
limit: int = None,
**kwargs
) -> dict:
if "USD-3m/JPY-3m" in ids:
return {"USD-3m/JPY-3m": "MA99N6C1KF9078NM"}
elif "EUR-3m/USD-3m" in ids:
return {"EUR-3m/USD-3m": "MAXPKTXW2D4X6MFQ"}
elif "GBP-3m/USD-3m" in ids:
return {"GBP-3m/USD-3m": "MA8BZHQV3W32V63B"}
def get_data_policy_rate_expectation_mocker(
start: Union[dt.date, dt.datetime] = None,
end: Union[dt.date, dt.datetime] = None,
as_of: dt.datetime = None,
since: dt.datetime = None,
fields: Union[str, Fields] = None,
asset_id_type: str = None,
**kwargs) -> pd.DataFrame:
if 'meetingNumber' in kwargs:
if kwargs['meetingNumber'] == 0:
return mock_meeting_spot()
elif 'meeting_date' in kwargs:
if kwargs['meeting_date'] == dt.date(2019, 10, 24):
return mock_meeting_spot()
return mock_meeting_expectation()
def test_parse_meeting_date():
assert tm.parse_meeting_date(5) == ''
assert tm.parse_meeting_date('') == ''
assert tm.parse_meeting_date('test') == ''
assert tm.parse_meeting_date('2019-09-01') == dt.date(2019, 9, 1)
def test_currency_to_default_benchmark_rate(mocker):
mocker.patch.object(GsSession.__class__, 'default_value',
return_value=GsSession.get(Environment.QA, 'client_id', 'secret'))
mocker.patch.object(GsSession.current, '_get', side_effect=mock_request)
mocker.patch.object(GsAssetApi, 'map_identifiers', side_effect=map_identifiers_default_mocker)
asset_id_list = ["MAZ7RWC904JYHYPS", "MAJNQPFGN1EBDHAE", "MA66CZBQJST05XKG", "MAK1FHKH5P5GJSHH", "MA4J1YB8XZP2BPT8",
"MA4B66MW5E27U8P32SB"]
correct_mapping = ["MAPDB7QNB2TZVQ0E", "MAJNQPFGN1EBDHAE", "MAFYB8Z4R1377A19", "MABMVE27EM8YZK33",
"MA4J1YB8XZP2BPT8", "MA4B66MW5E27U8P32SB"]
with tm.PricingContext(dt.date.today()):
for i in range(len(asset_id_list)):
correct_id = tm.currency_to_default_benchmark_rate(asset_id_list[i])
assert correct_id == correct_mapping[i]
def test_currency_to_default_swap_rate_asset(mocker):
mocker.patch.object(GsSession.__class__, 'default_value',
return_value=GsSession.get(Environment.QA, 'client_id', 'secret'))
mocker.patch.object(GsSession.current, '_get', side_effect=mock_request)
mocker.patch.object(GsAssetApi, 'map_identifiers', side_effect=map_identifiers_swap_rate_mocker)
asset_id_list = ['MAZ7RWC904JYHYPS', 'MAJNQPFGN1EBDHAE', 'MAJ6SEQH3GT0GA2Z']
correct_mapping = ['MAAXGV0GZTW4GFNC', 'MA5WM2QWRVMYKDK0', 'MAJ6SEQH3GT0GA2Z']
with tm.PricingContext(dt.date.today()):
for i in range(len(asset_id_list)):
correct_id = tm.currency_to_default_swap_rate_asset(asset_id_list[i])
assert correct_id == correct_mapping[i]
def test_currency_to_inflation_benchmark_rate(mocker):
mocker.patch.object(GsSession.__class__, 'default_value',
return_value=GsSession.get(Environment.QA, 'client_id', 'secret'))
mocker.patch.object(GsSession.current, '_get', side_effect=mock_request)
mocker.patch.object(GsAssetApi, 'map_identifiers', side_effect=map_identifiers_inflation_mocker)
asset_id_list = ["MA66CZBQJST05XKG", "MAK1FHKH5P5GJSHH", "MA4J1YB8XZP2BPT8"]
correct_mapping = ["MAQ7ND0MBP2AVVQW", "MAK1FHKH5P5GJSHH", "MA4J1YB8XZP2BPT8"]
with tm.PricingContext(dt.date.today()):
for i in range(len(asset_id_list)):
correct_id = tm.currency_to_inflation_benchmark_rate(asset_id_list[i])
assert correct_id == correct_mapping[i]
# Test that the same id is returned when a TypeError is raised
mocker.patch.object(GsAssetApi, 'map_identifiers', side_effect=TypeError('Test'))
assert tm.currency_to_inflation_benchmark_rate('MA66CZBQJST05XKG') == 'MA66CZBQJST05XKG'
def test_cross_to_basis(mocker):
mocker.patch.object(GsSession.__class__, 'default_value',
return_value=GsSession.get(Environment.QA, 'client_id', 'secret'))
mocker.patch.object(GsSession.current, '_get', side_effect=mock_request)
mocker.patch.object(GsAssetApi, 'map_identifiers', side_effect=map_identifiers_cross_basis_mocker)
asset_id_list = ["MAYJPCVVF2RWXCES", "MA4B66MW5E27U8P32SB", "nobbid"]
correct_mapping = ["MA99N6C1KF9078NM", "MA4B66MW5E27U8P32SB", "nobbid"]
with tm.PricingContext(dt.date.today()):
for i in range(len(asset_id_list)):
correct_id = tm.cross_to_basis(asset_id_list[i])
assert correct_id == correct_mapping[i]
# Test that the same id is returned when a TypeError is raised
mocker.patch.object(GsAssetApi, 'map_identifiers', side_effect=TypeError('Test'))
assert tm.cross_to_basis('MAYJPCVVF2RWXCES') == 'MAYJPCVVF2RWXCES'
def test_currency_to_tdapi_swap_rate_asset(mocker):
replace = Replacer()
mocker.patch.object(GsSession.__class__, 'current',
return_value=GsSession.get(Environment.QA, 'client_id', 'secret'))
mocker.patch.object(GsSession.current, '_get', side_effect=mock_request)
mocker.patch.object(SecurityMaster, 'get_asset', side_effect=mock_request)
bbid_mock = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
with tm.PricingContext(dt.date.today()):
asset = Currency('MA25DW5ZGC1BSC8Y', 'NOK')
bbid_mock.return_value = 'NOK'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
asset = Currency('MAZ7RWC904JYHYPS', 'USD')
bbid_mock.return_value = 'USD'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MAFRSWPAF5QPNTP2' == correct_id
bbid_mock.return_value = 'CHF'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MAW25BGQJH9P6DPT' == correct_id
bbid_mock.return_value = 'EUR'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MAA9MVX15AJNQCVG' == correct_id
bbid_mock.return_value = 'GBP'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MA6QCAP9B7ABS9HA' == correct_id
bbid_mock.return_value = 'JPY'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MAEE219J5ZP0ZKRK' == correct_id
bbid_mock.return_value = 'SEK'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MAETMVTPNP3199A5' == correct_id
bbid_mock.return_value = 'HKD'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MABRNGY8XRFVC36N' == correct_id
bbid_mock.return_value = 'NZD'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MAH16NHE1HBN0FBZ' == correct_id
bbid_mock.return_value = 'AUD'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MAY8147CRK0ZP53B' == correct_id
bbid_mock.return_value = 'CAD'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MANJ8SS88WJ6N28Q' == correct_id
bbid_mock.return_value = 'KRW'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MAP55AXG5SQVS6C5' == correct_id
bbid_mock.return_value = 'INR'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MA20JHJXN1PD5HGE' == correct_id
bbid_mock.return_value = 'CNY'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MA4K1D8HH2R0RQY5' == correct_id
bbid_mock.return_value = 'SGD'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MA5CQFHYBPH9E5BS' == correct_id
bbid_mock.return_value = 'DKK'
correct_id = tm_rates._currency_to_tdapi_swap_rate_asset(asset)
assert 'MAF131NKWVRESFYA' == correct_id
asset = Currency('MA890', 'PLN')
bbid_mock.return_value = 'PLN'
assert 'MA890' == tm_rates._currency_to_tdapi_swap_rate_asset(asset)
replace.restore()
def test_currency_to_tdapi_basis_swap_rate_asset(mocker):
replace = Replacer()
mocker.patch.object(GsSession.__class__, 'current',
return_value=GsSession.get(Environment.QA, 'client_id', 'secret'))
mocker.patch.object(GsSession.current, '_get', side_effect=mock_request)
mocker.patch.object(SecurityMaster, 'get_asset', side_effect=mock_request)
bbid_mock = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
with tm.PricingContext(dt.date.today()):
asset = Currency('MA890', 'NOK')
bbid_mock.return_value = 'NOK'
assert 'MA890' == tm_rates._currency_to_tdapi_basis_swap_rate_asset(asset)
asset = Currency('MAZ7RWC904JYHYPS', 'USD')
bbid_mock.return_value = 'USD'
correct_id = tm_rates._currency_to_tdapi_basis_swap_rate_asset(asset)
assert 'MAQB1PGEJFCET3GG' == correct_id
bbid_mock.return_value = 'EUR'
correct_id = tm_rates._currency_to_tdapi_basis_swap_rate_asset(asset)
assert 'MAGRG2VT11GQ2RQ9' == correct_id
bbid_mock.return_value = 'GBP'
correct_id = tm_rates._currency_to_tdapi_basis_swap_rate_asset(asset)
assert 'MAHCYNB3V75JC5Q8' == correct_id
bbid_mock.return_value = 'JPY'
correct_id = tm_rates._currency_to_tdapi_basis_swap_rate_asset(asset)
assert 'MAXVRBEZCJVH0C4V' == correct_id
replace.restore()
def test_check_clearing_house():
assert tm_rates._ClearingHouse.LCH == tm_rates._check_clearing_house('lch')
assert tm_rates._ClearingHouse.CME == tm_rates._check_clearing_house(tm_rates._ClearingHouse.CME)
assert tm_rates._ClearingHouse.LCH == tm_rates._check_clearing_house(None)
invalid_ch = ['NYSE']
for ch in invalid_ch:
with pytest.raises(MqError):
tm_rates._check_clearing_house(ch)
def test_get_swap_csa_terms():
euribor_index = tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['EUR'][BenchmarkType.EURIBOR.value]
usd_libor_index = tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['USD'][BenchmarkType.LIBOR.value]
fed_funds_index = tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['USD'][BenchmarkType.Fed_Funds.value]
estr_index = tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['EUR'][BenchmarkType.EUROSTR.value]
assert dict(csaTerms='USD-1') == tm_rates._get_swap_csa_terms('USD', fed_funds_index)
assert dict(csaTerms='EUR-EuroSTR') == tm_rates._get_swap_csa_terms('EUR', estr_index)
assert {} == tm_rates._get_swap_csa_terms('EUR', euribor_index)
assert {} == tm_rates._get_swap_csa_terms('USD', usd_libor_index)
def test_get_basis_swap_csa_terms():
euribor_index = tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['EUR'][BenchmarkType.EURIBOR.value]
usd_libor_index = tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['USD'][BenchmarkType.LIBOR.value]
fed_funds_index = tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['USD'][BenchmarkType.Fed_Funds.value]
sofr_index = tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['USD'][BenchmarkType.SOFR.value]
estr_index = tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['EUR'][BenchmarkType.EUROSTR.value]
eonia_index = tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['EUR'][BenchmarkType.EONIA.value]
assert dict(csaTerms='USD-1') == tm_rates._get_basis_swap_csa_terms('USD', fed_funds_index, sofr_index)
assert dict(csaTerms='EUR-EuroSTR') == tm_rates._get_basis_swap_csa_terms('EUR', estr_index, eonia_index)
assert {} == tm_rates._get_basis_swap_csa_terms('EUR', eonia_index, euribor_index)
assert {} == tm_rates._get_basis_swap_csa_terms('USD', fed_funds_index, usd_libor_index)
def test_match_floating_tenors():
swap_args = dict(asset_parameters_payer_rate_option=tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['USD']['LIBOR'],
asset_parameters_payer_designated_maturity='3m',
asset_parameters_receiver_rate_option=tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['USD']['SOFR'],
asset_parameters_receiver_designated_maturity='1y')
assert '3m' == tm_rates._match_floating_tenors(swap_args)['asset_parameters_receiver_designated_maturity']
swap_args = dict(asset_parameters_payer_rate_option=tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['USD']['SOFR'],
asset_parameters_payer_designated_maturity='1y',
asset_parameters_receiver_rate_option=tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['USD']['LIBOR'],
asset_parameters_receiver_designated_maturity='3m')
assert '3m' == tm_rates._match_floating_tenors(swap_args)['asset_parameters_payer_designated_maturity']
swap_args = dict(asset_parameters_payer_rate_option=tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['GBP']['SONIA'],
asset_parameters_payer_designated_maturity='1y',
asset_parameters_receiver_rate_option=tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['GBP']['LIBOR'],
asset_parameters_receiver_designated_maturity='3m')
assert '3m' == tm_rates._match_floating_tenors(swap_args)['asset_parameters_payer_designated_maturity']
swap_args = dict(asset_parameters_payer_rate_option=tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['GBP']['LIBOR'],
asset_parameters_payer_designated_maturity='3m',
asset_parameters_receiver_rate_option=tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['GBP']['SONIA'],
asset_parameters_receiver_designated_maturity='1y')
assert '3m' == tm_rates._match_floating_tenors(swap_args)['asset_parameters_receiver_designated_maturity']
swap_args = dict(asset_parameters_payer_rate_option=tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['USD']['LIBOR'],
asset_parameters_payer_designated_maturity='3m',
asset_parameters_receiver_rate_option=tm_rates.CURRENCY_TO_SWAP_RATE_BENCHMARK['USD']['LIBOR'],
asset_parameters_receiver_designated_maturity='6m')
assert swap_args == tm_rates._match_floating_tenors(swap_args)
def test_get_term_struct_date(mocker):
today = datetime.datetime.today()
biz_day = CustomBusinessDay()
assert today == tm_rates._get_term_struct_date(tenor=today, index=today, business_day=biz_day)
date_index = datetime.datetime(2020, 7, 31, 0, 0)
assert date_index == tm_rates._get_term_struct_date(tenor='2020-07-31', index=date_index, business_day=biz_day)
assert date_index == tm_rates._get_term_struct_date(tenor='0b', index=date_index, business_day=biz_day)
assert datetime.datetime(2021, 7, 30, 0, 0) == tm_rates._get_term_struct_date(tenor='1y', index=date_index,
business_day=biz_day)
def test_cross_stored_direction_for_fx_vol(mocker):
mocker.patch.object(GsSession.__class__, 'default_value',
return_value=GsSession.get(Environment.QA, 'client_id', 'secret'))
mocker.patch.object(GsSession.current, '_get', side_effect=mock_request)
mocker.patch.object(GsSession.current, '_post', side_effect=mock_request)
asset_id_list = ["MAYJPCVVF2RWXCES", "MATGYV0J9MPX534Z"]
correct_mapping = ["MATGYV0J9MPX534Z", "MATGYV0J9MPX534Z"]
with tm.PricingContext(dt.date.today()):
for i in range(len(asset_id_list)):
correct_id = tm.cross_stored_direction_for_fx_vol(asset_id_list[i])
assert correct_id == correct_mapping[i]
def test_cross_to_usd_based_cross_for_fx_forecast(mocker):
mocker.patch.object(GsSession.__class__, 'default_value',
return_value=GsSession.get(Environment.QA, 'client_id', 'secret'))
mocker.patch.object(GsSession.current, '_get', side_effect=mock_request)
mocker.patch.object(GsSession.current, '_post', side_effect=mock_request)
asset_id_list = ["MAYJPCVVF2RWXCES", "MATGYV0J9MPX534Z"]
correct_mapping = ["MATGYV0J9MPX534Z", "MATGYV0J9MPX534Z"]
with tm.PricingContext(dt.date.today()):
for i in range(len(asset_id_list)):
correct_id = tm.cross_to_usd_based_cross(asset_id_list[i])
assert correct_id == correct_mapping[i]
def test_cross_to_used_based_cross(mocker):
mocker.patch.object(GsSession.__class__, 'default_value',
return_value=GsSession.get(Environment.QA, 'client_id', 'secret'))
mocker.patch.object(GsSession.current, '_get', side_effect=mock_request)
mocker.patch.object(GsSession.current, '_post', side_effect=mock_request)
mocker.patch.object(SecurityMaster, 'get_asset', side_effect=TypeError('unsupported'))
replace = Replacer()
bbid_mock = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
bbid_mock.return_value = 'HELLO'
assert 'FUN' == tm.cross_to_usd_based_cross(Cross('FUN', 'EURUSD'))
replace.restore()
def test_cross_stored_direction(mocker):
mocker.patch.object(GsSession.__class__, 'default_value',
return_value=GsSession.get(Environment.QA, 'client_id', 'secret'))
mocker.patch.object(GsSession.current, '_get', side_effect=mock_request)
mocker.patch.object(GsSession.current, '_post', side_effect=mock_request)
mocker.patch.object(SecurityMaster, 'get_asset', side_effect=TypeError('unsupported'))
replace = Replacer()
bbid_mock = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
bbid_mock.return_value = 'HELLO'
assert 'FUN' == tm.cross_stored_direction_for_fx_vol(Cross('FUN', 'EURUSD'))
replace.restore()
def test_get_tdapi_rates_assets(mocker):
mock_asset_1 = GsAsset(asset_class='Rate', id='MAW25BGQJH9P6DPT', type_='Swap', name='Test_asset')
mock_asset_2 = GsAsset(asset_class='Rate', id='MAA9MVX15AJNQCVG', type_='Swap', name='Test_asset')
mock_asset_3 = GsAsset(asset_class='Rate', id='MANQHVYC30AZFT7R', type_='BasisSwap', name='Test_asset')
replace = Replacer()
assets = replace('gs_quant.timeseries.measures.GsAssetApi.get_many_assets', Mock())
assets.return_value = [mock_asset_1]
assert 'MAW25BGQJH9P6DPT' == tm_rates._get_tdapi_rates_assets()
replace.restore()
assets = replace('gs_quant.timeseries.measures.GsAssetApi.get_many_assets', Mock())
assets.return_value = [mock_asset_1, mock_asset_2]
kwargs = dict(asset_parameters_termination_date='10y', asset_parameters_effective_date='0b')
with pytest.raises(MqValueError):
tm_rates._get_tdapi_rates_assets(**kwargs)
replace.restore()
assets = replace('gs_quant.timeseries.measures.GsAssetApi.get_many_assets', Mock())
assets.return_value = []
with pytest.raises(MqValueError):
tm_rates._get_tdapi_rates_assets()
replace.restore()
assets = replace('gs_quant.timeseries.measures.GsAssetApi.get_many_assets', Mock())
assets.return_value = [mock_asset_1, mock_asset_2]
kwargs = dict()
assert ['MAW25BGQJH9P6DPT', 'MAA9MVX15AJNQCVG'] == tm_rates._get_tdapi_rates_assets(**kwargs)
replace.restore()
# test case will test matching sofr maturity with libor leg and flipping legs to get right asset
kwargs = dict(type='BasisSwap', asset_parameters_termination_date='10y',
asset_parameters_payer_rate_option=BenchmarkType.LIBOR,
asset_parameters_payer_designated_maturity='3m',
asset_parameters_receiver_rate_option=BenchmarkType.SOFR,
asset_parameters_receiver_designated_maturity='1y',
asset_parameters_clearing_house='lch', asset_parameters_effective_date='Spot',
asset_parameters_notional_currency='USD',
pricing_location='NYC')
assets = replace('gs_quant.timeseries.measures.GsAssetApi.get_many_assets', Mock())
assets.return_value = [mock_asset_3]
assert 'MANQHVYC30AZFT7R' == tm_rates._get_tdapi_rates_assets(**kwargs)
replace.restore()
def test_get_swap_leg_defaults():
result_dict = dict(currency=CurrEnum.JPY, benchmark_type='JPY-LIBOR-BBA', floating_rate_tenor='6m',
pricing_location=PricingLocation.TKO)
defaults = tm_rates._get_swap_leg_defaults(CurrEnum.JPY)
assert result_dict == defaults
result_dict = dict(currency=CurrEnum.USD, benchmark_type='USD-LIBOR-BBA', floating_rate_tenor='3m',
pricing_location=PricingLocation.NYC)
defaults = tm_rates._get_swap_leg_defaults(CurrEnum.USD)
assert result_dict == defaults
result_dict = dict(currency=CurrEnum.EUR, benchmark_type='EUR-EURIBOR-TELERATE', floating_rate_tenor='6m',
pricing_location=PricingLocation.LDN)
defaults = tm_rates._get_swap_leg_defaults(CurrEnum.EUR)
assert result_dict == defaults
result_dict = dict(currency=CurrEnum.SEK, benchmark_type='SEK-STIBOR-SIDE', floating_rate_tenor='6m',
pricing_location=PricingLocation.LDN)
defaults = tm_rates._get_swap_leg_defaults(CurrEnum.SEK)
assert result_dict == defaults
def test_check_forward_tenor():
valid_tenors = [datetime.date(2020, 1, 1), '1y', 'imm2', 'frb2', '1m', '0b']
for tenor in valid_tenors:
assert tenor == tm_rates._check_forward_tenor(tenor)
invalid_tenors = ['5yr', 'imm5', 'frb0']
for tenor in invalid_tenors:
with pytest.raises(MqError):
tm_rates._check_forward_tenor(tenor)
def mock_commod(_cls, _q):
d = {
'price': [30, 30, 30, 30, 35.929686, 35.636039, 27.307498, 23.23177, 19.020833, 18.827291, 17.823749, 17.393958,
17.824999, 20.307603, 24.311249, 25.160103, 25.245728, 25.736873, 28.425206, 28.779789, 30.519996,
34.896348, 33.966973, 33.95489, 33.686348, 34.840307, 32.674163, 30.261665, 30, 30, 30]
}
df = MarketDataResponseFrame(data=d, index=pd.date_range('2019-05-01', periods=31, freq='H', tz=timezone('UTC')))
df.dataset_ids = _test_datasets
return df
def mock_forward_price(_cls, _q):
d = {
'forwardPrice': [
22.0039,
24.8436,
24.8436,
11.9882,
14.0188,
11.6311,
18.9234,
21.3654,
21.3654,
],
'quantityBucket': [
"PEAK",
"PEAK",
"PEAK",
"7X8",
"7X8",
"7X8",
"2X16H",
"2X16H",
"2X16H",
],
'contract': [
"J20",
"K20",
"M20",
"J20",
"K20",
"M20",
"J20",
"K20",
"M20",
]
}
df = MarketDataResponseFrame(data=d, index=pd.to_datetime([datetime.date(2019, 1, 2)] * 9))
df.dataset_ids = _test_datasets
return df
def mock_fair_price(_cls, _q):
d = {
'fairPrice': [
2.880,
2.844,
2.726,
],
'contract': [
"F21",
"G21",
"H21",
]
}
df = MarketDataResponseFrame(data=d, index=pd.to_datetime([datetime.date(2019, 1, 2)] * 3))
df.dataset_ids = _test_datasets
return df
def mock_natgas_forward_price(_cls, _q):
d = {
'forwardPrice': [
2.880,
2.844,
2.726,
],
'contract': [
"F21",
"G21",
"H21",
]
}
df = MarketDataResponseFrame(data=d, index=pd.to_datetime([datetime.date(2019, 1, 2)] * 3))
df.dataset_ids = _test_datasets
return df
def mock_fair_price_swap(_cls, _q):
d = {'fairPrice': [2.880]}
df = MarketDataResponseFrame(data=d, index=pd.to_datetime([datetime.date(2019, 1, 2)]))
df.dataset_ids = _test_datasets
return df
def mock_implied_volatility(_cls, _q):
d = {
'impliedVolatility': [
2.880,
2.844,
2.726,
],
'contract': [
"F21",
"G21",
"H21",
]
}
df = MarketDataResponseFrame(data=d, index=pd.to_datetime([datetime.date(2019, 1, 2)] * 3))
df.dataset_ids = _test_datasets
return df
def mock_missing_bucket_forward_price(_cls, _q):
d = {
'forwardPrice': [
22.0039,
24.8436,
24.8436,
11.9882,
14.0188,
18.9234,
21.3654,
21.3654,
],
'quantityBucket': [
"PEAK",
"PEAK",
"PEAK",
"7X8",
"7X8",
"2X16H",
"2X16H",
"2X16H",
],
'contract': [
"J20",
"K20",
"M20",
"J20",
"K20",
"J20",
"K20",
"M20",
]
}
return pd.DataFrame(data=d, index=pd.to_datetime([datetime.date(2019, 1, 2)] * 8))
def mock_fx_vol(_cls, q):
queries = q.get('queries', [])
if len(queries) > 0 and 'Last' in queries[0]['measures']:
return MarketDataResponseFrame({'impliedVolatility': [3]}, index=[pd.Timestamp('2019-01-04T12:00:00Z')])
d = {
'strikeReference': ['delta', 'spot', 'forward'],
'relativeStrike': [25, 100, 100],
'impliedVolatility': [5, 1, 2],
'forecast': [1.1, 1.1, 1.1]
}
df = MarketDataResponseFrame(data=d, index=pd.date_range('2019-01-01', periods=3, freq='D'))
df.dataset_ids = _test_datasets
return df
def mock_fx_forecast(_cls, _q):
d = {
'fxForecast': [1.1, 1.1, 1.1]
}
df = MarketDataResponseFrame(data=d, index=_index * 3)
df.dataset_ids = _test_datasets
return df
def mock_fx_delta(_cls, _q):
d = {
'relativeStrike': [25, -25, 0],
'impliedVolatility': [1, 5, 2],
'forecast': [1.1, 1.1, 1.1]
}
df = MarketDataResponseFrame(data=d, index=_index * 3)
df.dataset_ids = _test_datasets
return df
def mock_fx_empty(_cls, _q):
d = {
'strikeReference': [],
'relativeStrike': [],
'impliedVolatility': []
}
df = MarketDataResponseFrame(data=d, index=[])
df.dataset_ids = _test_datasets
return df
def mock_fx_switch(_cls, _q, _n):
replace = Replacer()
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_fx_empty)
replace.restore()
return Cross('MA1889', 'ABC/XYZ')
def mock_curr(_cls, _q):
d = {
'swapAnnuity': [1, 2, 3],
'swapRate': [1, 2, 3],
'basisSwapRate': [1, 2, 3],
'swaptionVol': [1, 2, 3],
'atmFwdRate': [1, 2, 3],
'midcurveVol': [1, 2, 3],
'capFloorVol': [1, 2, 3],
'spreadOptionVol': [1, 2, 3],
'inflationSwapRate': [1, 2, 3],
'midcurveAtmFwdRate': [1, 2, 3],
'capFloorAtmFwdRate': [1, 2, 3],
'spreadOptionAtmFwdRate': [1, 2, 3],
'strike': [0.25, 0.5, 0.75]
}
df = MarketDataResponseFrame(data=d, index=_index * 3)
df.dataset_ids = _test_datasets
return df
def mock_cross(_cls, _q):
d = {
'basis': [1, 2, 3],
}
df = MarketDataResponseFrame(data=d, index=_index * 3)
df.dataset_ids = _test_datasets
return df
def mock_eq(_cls, _q):
d = {
'relativeStrike': [0.75, 0.25, 0.5],
'impliedVolatility': [5, 1, 2],
'impliedCorrelation': [5, 1, 2],
'realizedCorrelation': [3.14, 2.71828, 1.44],
'averageImpliedVolatility': [5, 1, 2],
'averageImpliedVariance': [5, 1, 2],
'averageRealizedVolatility': [5, 1, 2],
'impliedVolatilityByDeltaStrike': [5, 1, 2],
'fundamentalMetric': [5, 1, 2]
}
df = MarketDataResponseFrame(data=d, index=_index * 3)
df.dataset_ids = _test_datasets
return df
def mock_eq_vol(_cls, q):
queries = q.get('queries', [])
if len(queries) > 0 and 'Last' in queries[0]['measures']:
idx = [pd.Timestamp(datetime.datetime.now(pytz.UTC))]
return MarketDataResponseFrame({'impliedVolatility': [3]}, index=idx)
d = {
'impliedVolatility': [5, 1, 2],
}
end = datetime.datetime.now(pytz.UTC).date() - datetime.timedelta(days=1)
df = MarketDataResponseFrame(data=d, index=pd.date_range(end=end, periods=3, freq='D'))
df.dataset_ids = _test_datasets
return df
def mock_eq_vol_last_err(_cls, q):
queries = q.get('queries', [])
if len(queries) > 0 and 'Last' in queries[0]['measures']:
raise MqValueError('error while getting last')
d = {
'impliedVolatility': [5, 1, 2],
}
end = datetime.date.today() - datetime.timedelta(days=1)
df = MarketDataResponseFrame(data=d, index=pd.date_range(end=end, periods=3, freq='D'))
df.dataset_ids = _test_datasets
return df
def mock_eq_vol_last_empty(_cls, q):
queries = q.get('queries', [])
if len(queries) > 0 and 'Last' in queries[0]['measures']:
return MarketDataResponseFrame()
d = {
'impliedVolatility': [5, 1, 2],
}
end = datetime.date.today() - datetime.timedelta(days=1)
df = MarketDataResponseFrame(data=d, index=pd.date_range(end=end, periods=3, freq='D'))
df.dataset_ids = _test_datasets
return df
def mock_eq_norm(_cls, _q):
d = {
'relativeStrike': [-4.0, 4.0, 0],
'impliedVolatility': [5, 1, 2]
}
df = MarketDataResponseFrame(data=d, index=_index * 3)
df.dataset_ids = _test_datasets
return df
def mock_eq_spot(_cls, _q):
d = {
'relativeStrike': [0.75, 1.25, 1.0],
'impliedVolatility': [5, 1, 2]
}
df = MarketDataResponseFrame(data=d, index=_index * 3)
df.dataset_ids = _test_datasets
return df
def mock_inc(_cls, _q):
d = {
'relativeStrike': [0.25, 0.75],
'impliedVolatility': [5, 1]
}
df = MarketDataResponseFrame(data=d, index=_index * 2)
df.dataset_ids = _test_datasets
return df
def mock_meeting_expectation():
data_dict = MarketDataResponseFrame({'date': [dt.date(2019, 12, 6)],
'assetId': ['MARFAGXDQRWM07Y2'],
'location': ['NYC'],
'rateType': ['Meeting Forward'],
'startingDate': [dt.date(2020, 1, 29)],
'endingDate': [dt.date(2020, 1, 29)],
'meetingNumber': [2],
'valuationDate': [dt.date(2019, 12, 6)],
'meetingDate': [dt.date(2020, 1, 23)],
'value': [-0.004550907771]
})
data_dict.dataset_ids = _test_datasets
return data_dict
def mock_meeting_spot():
data_dict = MarketDataResponseFrame({'date': [dt.date(2019, 12, 6)],
'assetId': ['MARFAGXDQRWM07Y2'],
'location': ['NYC'],
'rateType': ['Meeting Forward'],
'startingDate': [dt.date(2019, 10, 30)],
'endingDate': [dt.date(2019, 12, 18)],
'meetingNumber': [0],
'valuationDate': [dt.date(2019, 12, 6)],
'meetingDate': [dt.date(2019, 10, 24)],
'value': [-0.004522570525]
})
data_dict.dataset_ids = _test_datasets
return data_dict
def mock_meeting_absolute():
data_dict = MarketDataResponseFrame({'date': [datetime.date(2019, 12, 6), datetime.date(2019, 12, 6)],
'assetId': ['MARFAGXDQRWM07Y2', 'MARFAGXDQRWM07Y2'],
'location': ['NYC', 'NYC'],
'rateType': ['Meeting Forward', 'Meeting Forward'],
'startingDate': [datetime.date(2019, 10, 30), datetime.date(2020, 1, 29)],
'endingDate': [datetime.date(2019, 10, 30), datetime.date(2020, 1, 29)],
'meetingNumber': [0, 2],
'valuationDate': [datetime.date(2019, 12, 6), datetime.date(2019, 12, 6)],
'meetingDate': [datetime.date(2019, 10, 24), datetime.date(2020, 1, 23)],
'value': [-0.004522570525, -0.004550907771]
})
data_dict.dataset_ids = _test_datasets
return data_dict
def mock_ois_spot():
data_dict = MarketDataResponseFrame({'date': [datetime.date(2019, 12, 6)],
'assetId': ['MARFAGXDQRWM07Y2'],
'location': ['NYC'],
'rateType': ['Spot'],
'startingDate': [datetime.date(2019, 12, 6)],
'endingDate': [datetime.date(2019, 12, 7)],
'meetingNumber': [-1],
'valuationDate': [datetime.date(2019, 12, 6)],
'meetingDate': [datetime.date(2019, 12, 6)],
'value': [-0.00455]
})
data_dict.dataset_ids = _test_datasets
return data_dict
def mock_esg(_cls, _q):
d = {
"esNumericScore": [2, 4, 6],
"esNumericPercentile": [81.2, 75.4, 65.7],
"esPolicyScore": [2, 4, 6],
"esPolicyPercentile": [81.2, 75.4, 65.7],
"esScore": [2, 4, 6],
"esPercentile": [81.2, 75.4, 65.7],
"esProductImpactScore": [2, 4, 6],
"esProductImpactPercentile": [81.2, 75.4, 65.7],
"gScore": [2, 4, 6],
"gPercentile": [81.2, 75.4, 65.7],
"esMomentumScore": [2, 4, 6],
"esMomentumPercentile": [81.2, 75.4, 65.7],
"gRegionalScore": [2, 4, 6],
"gRegionalPercentile": [81.2, 75.4, 65.7],
"controversyScore": [2, 4, 6],
"controversyPercentile": [81.2, 75.4, 65.7],
"esDisclosurePercentage": [49.2, 55.7, 98.4]
}
df = MarketDataResponseFrame(data=d, index=_index * 3)
df.dataset_ids = _test_datasets
return df
def mock_index_positions_data(
asset_id,
start_date,
end_date,
fields=None,
position_type=None
):
return [
{'underlyingAssetId': 'MA3',
'netWeight': 0.1,
'positionType': 'close',
'assetId': 'MA890',
'positionDate': '2020-01-01'
},
{'underlyingAssetId': 'MA1',
'netWeight': 0.6,
'positionType': 'close',
'assetId': 'MA890',
'positionDate': '2020-01-01'
},
{'underlyingAssetId': 'MA2',
'netWeight': 0.3,
'positionType': 'close',
'assetId': 'MA890',
'positionDate': '2020-01-01'
}
]
def mock_rating(_cls, _q):
d = {
'rating': ['Buy', 'Sell', 'Buy', 'Neutral'],
'convictionList': [1, 0, 0, 0]
}
df = MarketDataResponseFrame(data=d, index=pd.to_datetime([datetime.date(2020, 8, 13), datetime.date(2020, 8, 14),
datetime.date(2020, 8, 17), datetime.date(2020, 8, 18)]))
df.dataset_ids = _test_datasets
return df
def mock_gsdeer_gsfeer(_cls, assetId, start_date):
d = {
'gsdeer': [1, 1.2, 1.1],
'gsfeer': [2, 1.8, 1.9],
'year': [2000, 2010, 2020],
'quarter': ['Q1', 'Q2', 'Q3']
}
df = MarketDataResponseFrame(data=d, index=_index * 3)
return df
def mock_factor_profile(_cls, _q):
d = {
'growthScore': [0.238, 0.234, 0.234, 0.230],
'financialReturnsScore': [0.982, 0.982, 0.982, 0.982],
'multipleScore': [0.204, 0.192, 0.190, 0.190],
'integratedScore': [0.672, 0.676, 0.676, 0.674]
}
df = MarketDataResponseFrame(data=d, index=pd.to_datetime([datetime.date(2020, 8, 13), datetime.date(2020, 8, 14),
datetime.date(2020, 8, 17), datetime.date(2020, 8, 18)]))
df.dataset_ids = _test_datasets
return df
def mock_commodity_forecast(_cls, _q):
d = {
'forecastPeriod': ['3m', '3m', '3m', '3m'],
'forecastType': ['spotReturn', 'spotReturn', 'spotReturn', 'spotReturn'],
'commodityForecast': [1700, 1400, 1500, 1600]
}
df = MarketDataResponseFrame(data=d, index=pd.to_datetime([datetime.date(2020, 8, 13), datetime.date(2020, 8, 14),
datetime.date(2020, 8, 17), datetime.date(2020, 8, 18)]))
df.dataset_ids = _test_datasets
return df
def test_skew():
replace = Replacer()
mock_spx = Index('MA890', AssetClass.Equity, 'SPX')
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_eq)
actual = tm.skew(mock_spx, '1m', tm.SkewReference.DELTA, 25)
assert_series_equal(pd.Series([2.0], index=_index, name='impliedVolatility'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_eq_norm)
actual = tm.skew(mock_spx, '1m', tm.SkewReference.NORMALIZED, 4)
assert_series_equal(pd.Series([2.0], index=_index, name='impliedVolatility'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_eq_spot)
actual = tm.skew(mock_spx, '1m', tm.SkewReference.SPOT, 25)
assert_series_equal(pd.Series([2.0], index=_index, name='impliedVolatility'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
mock = replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', Mock())
mock.return_value = mock_empty_market_data_response()
actual = tm.skew(mock_spx, '1m', tm.SkewReference.SPOT, 25)
assert actual.empty
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_inc)
with pytest.raises(MqError):
tm.skew(mock_spx, '1m', tm.SkewReference.DELTA, 25)
replace.restore()
with pytest.raises(MqError):
tm.skew(mock_spx, '1m', None, 25)
def test_skew_fx():
replace = Replacer()
cross = Cross('MAA0NE9QX2ABETG6', 'USD/EUR')
xrefs = replace('gs_quant.timeseries.measures.GsAssetApi.get_asset_xrefs', Mock())
xrefs.return_value = [GsTemporalXRef(dt.date(2019, 1, 1), dt.date(2952, 12, 31), XRef(bbid='EURUSD', ))]
replace('gs_quant.markets.securities.SecurityMaster.get_asset', Mock()).return_value = cross
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_fx_delta)
mock = cross
actual = tm.skew(mock, '1m', tm.SkewReference.DELTA, 25)
assert_series_equal(pd.Series([2.0], index=_index, name='impliedVolatility'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(MqError):
tm.skew(mock, '1m', tm.SkewReference.DELTA, 25, real_time=True)
with pytest.raises(MqError):
tm.skew(mock, '1m', tm.SkewReference.SPOT, 25)
with pytest.raises(MqError):
tm.skew(mock, '1m', tm.SkewReference.FORWARD, 25)
with pytest.raises(MqError):
tm.skew(mock, '1m', tm.SkewReference.NORMALIZED, 25)
with pytest.raises(MqError):
tm.skew(mock, '1m', None, 25)
replace.restore()
def test_implied_vol():
replace = Replacer()
mock_spx = Index('MA890', AssetClass.Equity, 'SPX')
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_eq_vol)
idx = pd.date_range(end=datetime.datetime.now(pytz.UTC).date(), periods=4, freq='D')
actual = tm.implied_volatility(mock_spx, '1m', tm.VolReference.DELTA_CALL, 25)
assert_series_equal(pd.Series([5, 1, 2, 3], index=idx, name='impliedVolatility'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
actual = tm.implied_volatility(mock_spx, '1m', tm.VolReference.DELTA_PUT, 75)
assert_series_equal(pd.Series([5, 1, 2, 3], index=idx, name='impliedVolatility'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(MqError):
tm.implied_volatility(mock_spx, '1m', tm.VolReference.DELTA_NEUTRAL)
with pytest.raises(MqError):
tm.implied_volatility(mock_spx, '1m', tm.VolReference.DELTA_CALL)
replace.restore()
def test_implied_vol_no_last():
replace = Replacer()
mock_spx = Index('MA890', AssetClass.Equity, 'SPX')
idx = pd.date_range(end=datetime.date.today() - datetime.timedelta(days=1), periods=3, freq='D')
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_eq_vol_last_err)
actual = tm.implied_volatility(mock_spx, '1m', tm.VolReference.DELTA_CALL, 25)
assert_series_equal(pd.Series([5, 1, 2], index=idx, name='impliedVolatility'), pd.Series(actual))
actual = tm.implied_volatility(mock_spx, '1m', tm.VolReference.DELTA_PUT, 75)
assert_series_equal(pd.Series([5, 1, 2], index=idx, name='impliedVolatility'), pd.Series(actual))
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_eq_vol_last_empty)
actual = tm.implied_volatility(mock_spx, '1m', tm.VolReference.DELTA_CALL, 25)
assert_series_equal(pd.Series([5, 1, 2], index=idx, name='impliedVolatility'), pd.Series(actual))
actual = tm.implied_volatility(mock_spx, '1m', tm.VolReference.DELTA_PUT, 75)
assert_series_equal(pd.Series([5, 1, 2], index=idx, name='impliedVolatility'), pd.Series(actual))
replace.restore()
def test_implied_vol_fx():
replace = Replacer()
mock = Cross('MAA0NE9QX2ABETG6', 'USD/EUR')
xrefs = replace('gs_quant.timeseries.measures.GsAssetApi.get_asset_xrefs', Mock())
xrefs.return_value = [GsTemporalXRef(dt.date(2019, 1, 1), dt.date(2952, 12, 31), XRef(bbid='EURUSD', ))]
replace('gs_quant.markets.securities.SecurityMaster.get_asset', Mock()).return_value = mock
# for different delta strikes
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_fx_vol)
actual = tm.implied_volatility(mock, '1m', tm.VolReference.DELTA_CALL, 25)
expected = pd.Series([5, 1, 2, 3], index=pd.date_range('2019-01-01', periods=4, freq='D'), name='impliedVolatility')
assert_series_equal(expected, pd.Series(actual))
assert actual.dataset_ids == _test_datasets
actual = tm.implied_volatility(mock, '1m', tm.VolReference.DELTA_PUT, 25)
assert_series_equal(expected, pd.Series(actual))
assert actual.dataset_ids == _test_datasets
actual = tm.implied_volatility(mock, '1m', tm.VolReference.DELTA_NEUTRAL)
assert_series_equal(expected, pd.Series(actual))
assert actual.dataset_ids == _test_datasets
actual = tm.implied_volatility(mock, '1m', tm.VolReference.FORWARD, 100)
assert_series_equal(expected, pd.Series(actual))
assert actual.dataset_ids == _test_datasets
actual = tm.implied_volatility(mock, '1m', tm.VolReference.SPOT, 100)
assert_series_equal(expected, pd.Series(actual))
assert actual.dataset_ids == _test_datasets
# NORMALIZED not supported
with pytest.raises(MqError):
tm.implied_volatility(mock, '1m', tm.VolReference.DELTA_CALL)
with pytest.raises(MqError):
tm.implied_volatility(mock, '1m', tm.VolReference.NORMALIZED, 25)
with pytest.raises(MqError):
tm.implied_volatility(mock, '1m', tm.VolReference.SPOT, 25)
with pytest.raises(MqError):
tm.implied_volatility(mock, '1m', tm.VolReference.FORWARD, 25)
replace.restore()
def test_fx_forecast():
replace = Replacer()
mock = Cross('MAA0NE9QX2ABETG6', 'USD/EUR')
xrefs = replace('gs_quant.timeseries.measures.GsAssetApi.get_asset_xrefs', Mock())
xrefs.return_value = [GsTemporalXRef(dt.date(2019, 1, 1), dt.date(2952, 12, 31), XRef(bbid='EURUSD', ))]
replace('gs_quant.markets.securities.SecurityMaster.get_asset', Mock()).return_value = mock
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_fx_forecast)
actual = tm.fx_forecast(mock, '12m')
assert_series_equal(pd.Series([1.1, 1.1, 1.1], index=_index * 3, name='fxForecast'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
actual = tm.fx_forecast(mock, '3m')
assert_series_equal(pd.Series([1.1, 1.1, 1.1], index=_index * 3, name='fxForecast'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(NotImplementedError):
tm.fx_forecast(mock, '3m', real_time=True)
replace.restore()
def test_fx_forecast_inverse():
replace = Replacer()
get_cross = replace('gs_quant.timeseries.measures.cross_to_usd_based_cross', Mock())
get_cross.return_value = "MATGYV0J9MPX534Z"
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_fx_forecast)
mock = Cross("MAYJPCVVF2RWXCES", 'USD/JPY')
actual = tm.fx_forecast(mock, '3m')
assert_series_equal(pd.Series([1 / 1.1, 1 / 1.1, 1 / 1.1], index=_index * 3, name='fxForecast'),
pd.Series(actual))
assert actual.dataset_ids == _test_datasets
replace.restore()
def test_vol_smile():
replace = Replacer()
mock_spx = Index('MA890', AssetClass.Equity, 'SPX')
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_eq)
actual = tm.vol_smile(mock_spx, '1m', tm.VolSmileReference.FORWARD, '5d')
assert_series_equal(pd.Series([5, 1, 2], index=[0.75, 0.25, 0.5]), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
actual = tm.vol_smile(mock_spx, '1m', tm.VolSmileReference.SPOT, '5d')
assert_series_equal(pd.Series([5, 1, 2], index=[0.75, 0.25, 0.5]), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
market_mock = replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', Mock())
market_mock.return_value = mock_empty_market_data_response()
actual = tm.vol_smile(mock_spx, '1m', tm.VolSmileReference.SPOT, '1d')
assert actual.empty
assert actual.dataset_ids == ()
market_mock.assert_called_once()
with pytest.raises(NotImplementedError):
tm.vol_smile(mock_spx, '1m', tm.VolSmileReference.SPOT, '1d', real_time=True)
replace.restore()
def test_impl_corr():
replace = Replacer()
mock_spx = Index('MA890', AssetClass.Equity, 'SPX')
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_eq)
actual = tm.implied_correlation(mock_spx, '1m', tm.EdrDataReference.DELTA_CALL, 25)
assert_series_equal(pd.Series([5, 1, 2], index=_index * 3, name='impliedCorrelation'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
actual = tm.implied_correlation(mock_spx, '1m', tm.EdrDataReference.DELTA_PUT, 75)
assert_series_equal(pd.Series([5, 1, 2], index=_index * 3, name='impliedCorrelation'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(NotImplementedError):
tm.implied_correlation(..., '1m', tm.EdrDataReference.DELTA_PUT, 75, real_time=True)
with pytest.raises(MqError):
tm.implied_correlation(..., '1m', tm.EdrDataReference.DELTA_CALL, 50, '')
replace.restore()
def test_impl_corr_n():
spx = Index('MA4B66MW5E27U8P32SB', AssetClass.Equity, 'SPX')
with pytest.raises(MqValueError):
tm.implied_correlation(spx, '1m', tm.EdrDataReference.DELTA_CALL, 0.5,
composition_date=datetime.date.today())
with pytest.raises(MqValueError):
tm.implied_correlation(spx, '1m', tm.EdrDataReference.DELTA_CALL, 0.5, 200)
resources = os.path.join(os.path.dirname(__file__), '..', 'resources')
i_vol = pd.read_csv(os.path.join(resources, 'SPX_50_icorr_in.csv'))
i_vol.index = pd.to_datetime(i_vol['date'])
weights = pd.read_csv(os.path.join(resources, 'SPX_50_weights.csv'))
weights.set_index('underlyingAssetId', inplace=True)
replace = Replacer()
market_data = replace('gs_quant.timeseries.econometrics.GsDataApi.get_market_data', Mock())
market_data.return_value = i_vol
constituents = replace('gs_quant.timeseries.measures._get_index_constituent_weights', Mock())
constituents.return_value = weights
expected = pd.read_csv(os.path.join(resources, 'SPX_50_icorr_out.csv'))
expected.index = pd.to_datetime(expected['date'])
expected = expected['value']
actual = tm.implied_correlation(spx, '1m', tm.EdrDataReference.DELTA_CALL, 0.5, 50, datetime.date(2020, 8, 31),
source='PlotTool')
pd.testing.assert_series_equal(actual, expected, check_names=False)
replace.restore()
def test_real_corr():
spx = Index('MA4B66MW5E27U8P32SB', AssetClass.Equity, 'SPX')
with pytest.raises(NotImplementedError):
tm.realized_correlation(spx, '1m', real_time=True)
replace = Replacer()
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_eq)
actual = tm.realized_correlation(spx, '1m')
assert_series_equal(pd.Series([3.14, 2.71828, 1.44], index=_index * 3), pd.Series(actual), check_names=False)
assert actual.dataset_ids == _test_datasets
replace.restore()
def test_real_corr_missing():
spx = Index('MA4B66MW5E27U8P32SB', AssetClass.Equity, 'SPX')
d = {
'assetId': ['MA4B66MW5E27U8P32SB'] * 3,
'spot': [3000, 3100, 3050],
}
df = MarketDataResponseFrame(data=d, index=pd.date_range('2020-08-01', periods=3, freq='D'))
resources = os.path.join(os.path.dirname(__file__), '..', 'resources')
weights = pd.read_csv(os.path.join(resources, 'SPX_50_weights.csv'))
weights.set_index('underlyingAssetId', inplace=True)
replace = Replacer()
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', lambda *args, **kwargs: df)
constituents = replace('gs_quant.timeseries.measures._get_index_constituent_weights', Mock())
constituents.return_value = weights
with pytest.raises(MqValueError):
tm.realized_correlation(spx, '1m', 50)
replace.restore()
def test_real_corr_n():
spx = Index('MA4B66MW5E27U8P32SB', AssetClass.Equity, 'SPX')
with pytest.raises(MqValueError):
tm.realized_correlation(spx, '1m', composition_date=datetime.date.today())
with pytest.raises(MqValueError):
tm.realized_correlation(spx, '1m', 200)
resources = os.path.join(os.path.dirname(__file__), '..', 'resources')
r_vol = pd.read_csv(os.path.join(resources, 'SPX_50_rcorr_in.csv'))
r_vol.index = pd.to_datetime(r_vol['date'])
weights = pd.read_csv(os.path.join(resources, 'SPX_50_weights.csv'))
weights.set_index('underlyingAssetId', inplace=True)
replace = Replacer()
market_data = replace('gs_quant.timeseries.econometrics.GsDataApi.get_market_data', Mock())
market_data.return_value = r_vol
constituents = replace('gs_quant.timeseries.measures._get_index_constituent_weights', Mock())
constituents.return_value = weights
expected = pd.read_csv(os.path.join(resources, 'SPX_50_rcorr_out.csv'))
expected.index = pd.to_datetime(expected['date'])
expected = expected['value']
actual = tm.realized_correlation(spx, '1m', 50, datetime.date(2020, 8, 31), source='PlotTool')
pd.testing.assert_series_equal(actual, expected, check_names=False)
replace.restore()
def test_cds_implied_vol():
replace = Replacer()
mock_cds = Index('MA890', AssetClass.Equity, 'CDS')
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_eq)
actual = tm.cds_implied_volatility(mock_cds, '1m', '5y', tm.CdsVolReference.DELTA_CALL, 10)
assert_series_equal(pd.Series([5, 1, 2], index=_index * 3, name='impliedVolatilityByDeltaStrike'),
pd.Series(actual))
assert actual.dataset_ids == _test_datasets
actual = tm.cds_implied_volatility(mock_cds, '1m', '5y', tm.CdsVolReference.FORWARD, 100)
assert_series_equal(pd.Series([5, 1, 2], index=_index * 3, name='impliedVolatilityByDeltaStrike'),
pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(NotImplementedError):
tm.cds_implied_volatility(..., '1m', '5y', tm.CdsVolReference.DELTA_PUT, 75, real_time=True)
replace.restore()
def test_avg_impl_vol():
replace = Replacer()
mock_spx = Index('MA890', AssetClass.Equity, 'SPX')
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_eq)
actual = tm.average_implied_volatility(mock_spx, '1m', tm.EdrDataReference.DELTA_CALL, 25)
assert_series_equal(pd.Series([5, 1, 2], index=_index * 3, name='averageImpliedVolatility'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
actual = tm.average_implied_volatility(mock_spx, '1m', tm.EdrDataReference.DELTA_PUT, 75)
assert_series_equal(pd.Series([5, 1, 2], index=_index * 3, name='averageImpliedVolatility'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
replace.restore()
df1 = pd.DataFrame(data={'impliedVolatility': [1, 2, 3], 'assetId': ['MA1', 'MA1', 'MA1']},
index=pd.date_range(start='2020-01-01', periods=3))
df2 = pd.DataFrame(data={'impliedVolatility': [2, 3, 4], 'assetId': ['MA2', 'MA2', 'MA2']},
index=pd.date_range(start='2020-01-01', periods=3))
df3 = pd.DataFrame(data={'impliedVolatility': [2, 5], 'assetId': ['MA3', 'MA3']},
index=pd.date_range(start='2020-01-01', periods=2))
replace('gs_quant.api.gs.assets.GsAssetApi.get_asset_positions_data', mock_index_positions_data)
market_data_mock = replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', Mock())
mock_implied_vol = MarketDataResponseFrame(pd.concat([df1, df2, df3], join='inner'))
mock_implied_vol.dataset_ids = _test_datasets
market_data_mock.return_value = mock_implied_vol
actual = tm.average_implied_volatility(mock_spx, '1m', tm.EdrDataReference.DELTA_CALL, 25, 3, '1d')
assert_series_equal(pd.Series([1.4, 2.6, 3.33333],
index=pd.date_range(start='2020-01-01', periods=3), name='averageImpliedVolatility'),
pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(NotImplementedError):
tm.average_implied_volatility(..., '1m', tm.EdrDataReference.DELTA_PUT, 75, real_time=True)
with pytest.raises(MqValueError):
tm.average_implied_volatility(mock_spx, '1m', tm.EdrDataReference.DELTA_PUT, 75, top_n_of_index=None,
composition_date='1d')
with pytest.raises(NotImplementedError):
tm.average_implied_volatility(mock_spx, '1m', tm.EdrDataReference.DELTA_PUT, 75, top_n_of_index=101)
replace.restore()
def test_avg_realized_vol():
replace = Replacer()
mock_spx = Index('MA890', AssetClass.Equity, 'SPX')
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_eq)
actual = tm.average_realized_volatility(mock_spx, '1m')
assert_series_equal(pd.Series([5, 1, 2], index=_index * 3, name='averageRealizedVolatility'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
replace.restore()
df1 = pd.DataFrame(data={'spot': [1, 2, 3], 'assetId': ['MA1', 'MA1', 'MA1']},
index=pd.date_range(start='2020-01-01', periods=3))
df2 = pd.DataFrame(data={'spot': [2, 3, 4], 'assetId': ['MA2', 'MA2', 'MA2']},
index=pd.date_range(start='2020-01-01', periods=3))
df3 = pd.DataFrame(data={'spot': [2, 5], 'assetId': ['MA3', 'MA3']},
index=pd.date_range(start='2020-01-01', periods=2))
mock_spot = MarketDataResponseFrame(pd.concat([df1, df2, df3], join='inner'))
mock_spot.dataset_ids = _test_datasets
replace('gs_quant.api.gs.assets.GsAssetApi.get_asset_positions_data', mock_index_positions_data)
market_data_mock = replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', Mock())
market_data_mock.return_value = mock_spot
actual = tm.average_realized_volatility(mock_spx, '2d', Returns.SIMPLE, 3, '1d')
assert_series_equal(pd.Series([392.874026], index=pd.date_range(start='2020-01-03', periods=1),
name='averageRealizedVolatility'),
pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(NotImplementedError):
tm.average_realized_volatility(mock_spx, '1w', real_time=True)
with pytest.raises(MqValueError):
tm.average_realized_volatility(mock_spx, '1w', composition_date='1d')
with pytest.raises(NotImplementedError):
tm.average_realized_volatility(mock_spx, '1w', Returns.LOGARITHMIC)
with pytest.raises(NotImplementedError):
tm.average_realized_volatility(mock_spx, '1w', Returns.SIMPLE, 201)
replace.restore()
empty_positions_data_mock = replace('gs_quant.api.gs.assets.GsAssetApi.get_asset_positions_data', Mock())
empty_positions_data_mock.return_value = []
with pytest.raises(MqValueError):
tm.average_realized_volatility(mock_spx, '1w', Returns.SIMPLE, 5)
replace.restore()
def test_avg_impl_var():
replace = Replacer()
mock_spx = Index('MA890', AssetClass.Equity, 'SPX')
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_eq)
actual = tm.average_implied_variance(mock_spx, '1m', tm.EdrDataReference.DELTA_CALL, 25)
assert_series_equal(pd.Series([5, 1, 2], index=_index * 3, name='averageImpliedVariance'), pd.Series(actual))
actual = tm.average_implied_variance(mock_spx, '1m', tm.EdrDataReference.DELTA_PUT, 75)
assert actual.dataset_ids == _test_datasets
assert_series_equal(pd.Series([5, 1, 2], index=_index * 3, name='averageImpliedVariance'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(NotImplementedError):
tm.average_implied_variance(..., '1m', tm.EdrDataReference.DELTA_PUT, 75, real_time=True)
replace.restore()
def test_basis_swap_spread(mocker):
replace = Replacer()
args = dict(swap_tenor='10y', spread_benchmark_type=None, spread_tenor=None,
reference_benchmark_type=None, reference_tenor=None, forward_tenor='0b', real_time=False)
mock_nok = Currency('MA891', 'NOK')
xrefs = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
xrefs.return_value = 'NOK'
args['asset'] = mock_nok
with pytest.raises(NotImplementedError):
tm_rates.basis_swap_spread(**args)
mock_usd = Currency('MAZ7RWC904JYHYPS', 'USD')
args['asset'] = mock_usd
xrefs = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
xrefs.return_value = 'USD'
with pytest.raises(NotImplementedError):
tm_rates.basis_swap_spread(..., '1y', real_time=True)
args['swap_tenor'] = '5yr'
with pytest.raises(MqValueError):
tm_rates.basis_swap_spread(**args)
args['swap_tenor'] = '6y'
args['spread_tenor'] = '5yr'
with pytest.raises(MqValueError):
tm_rates.basis_swap_spread(**args)
args['spread_tenor'] = '3m'
args['reference_tenor'] = '5yr'
with pytest.raises(MqValueError):
tm_rates.basis_swap_spread(**args)
args['reference_tenor'] = '6m'
args['forward_tenor'] = '5yr'
with pytest.raises(MqValueError):
tm_rates.basis_swap_spread(**args)
args['forward_tenor'] = None
args['spread_benchmark_type'] = BenchmarkType.STIBOR
with pytest.raises(MqValueError):
tm_rates.basis_swap_spread(**args)
args['spread_benchmark_type'] = BenchmarkType.LIBOR
args['reference_benchmark_type'] = 'libor_3m'
with pytest.raises(MqValueError):
tm_rates.basis_swap_spread(**args)
args['reference_benchmark_type'] = BenchmarkType.LIBOR
xrefs = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
xrefs.return_value = 'USD'
identifiers = replace('gs_quant.timeseries.measures_rates._get_tdapi_rates_assets', Mock())
identifiers.return_value = {'MAQB1PGEJFCET3GG'}
mocker.patch.object(GsDataApi, 'get_market_data', return_value=mock_curr(None, None))
actual = tm_rates.basis_swap_spread(**args)
expected = tm.ExtendedSeries([1, 2, 3], index=_index * 3, name='basisSwapRate')
expected.dataset_ids = _test_datasets
assert_series_equal(expected, actual)
assert actual.dataset_ids == expected.dataset_ids
args['reference_benchmark_type'] = BenchmarkType.SOFR
args['reference_tenor'] = '1y'
args['reference_benchmark_type'] = BenchmarkType.LIBOR
args['reference_tenor'] = '3m'
xrefs = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
xrefs.return_value = 'USD'
identifiers = replace('gs_quant.timeseries.measures_rates._get_tdapi_rates_assets', Mock())
identifiers.return_value = {'MA06ATQ9CM0DCZFC'}
mocker.patch.object(GsDataApi, 'get_market_data', return_value=mock_curr(None, None))
actual = tm_rates.basis_swap_spread(**args)
expected = tm.ExtendedSeries([1, 2, 3], index=_index * 3, name='basisSwapRate')
expected.dataset_ids = _test_datasets
assert_series_equal(expected, actual)
assert actual.dataset_ids == expected.dataset_ids
replace.restore()
def test_swap_rate(mocker):
replace = Replacer()
args = dict(swap_tenor='10y', benchmark_type=None, floating_rate_tenor=None, forward_tenor='0b', real_time=False)
mock_usd = Currency('MAZ7RWC904JYHYPS', 'USD')
args['asset'] = mock_usd
xrefs = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
xrefs.return_value = 'USD'
args['swap_tenor'] = '5yr'
with pytest.raises(MqValueError):
tm_rates.swap_rate(**args)
args['swap_tenor'] = '10y'
args['floating_rate_tenor'] = '5yr'
with pytest.raises(MqValueError):
tm_rates.swap_rate(**args)
args['floating_rate_tenor'] = '1y'
args['forward_tenor'] = '5yr'
with pytest.raises(MqValueError):
tm_rates.swap_rate(**args)
args['forward_tenor'] = None
args['benchmark_type'] = BenchmarkType.STIBOR
with pytest.raises(MqValueError):
tm_rates.swap_rate(**args)
args['benchmark_type'] = 'sonia'
with pytest.raises(MqValueError):
tm_rates.swap_rate(**args)
args['benchmark_type'] = 'fed_funds'
xrefs = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
xrefs.return_value = 'USD'
identifiers = replace('gs_quant.timeseries.measures_rates._get_tdapi_rates_assets', Mock())
identifiers.return_value = {'MAZ7RWC904JYHYPS'}
mocker.patch.object(GsDataApi, 'get_market_data', return_value=mock_curr(None, None))
actual = tm_rates.swap_rate(**args)
expected = tm.ExtendedSeries([1, 2, 3], index=_index * 3, name='swapRate')
expected.dataset_ids = _test_datasets
assert_series_equal(expected, actual)
assert actual.dataset_ids == _test_datasets
xrefs = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
xrefs.return_value = 'EUR'
identifiers = replace('gs_quant.timeseries.measures_rates._get_tdapi_rates_assets', Mock())
identifiers.return_value = {'MAJNQPFGN1EBDHAE'}
mocker.patch.object(GsDataApi, 'get_market_data', return_value=mock_curr(None, None))
args['asset'] = Currency('MAJNQPFGN1EBDHAE', 'EUR')
args['benchmark_type'] = 'estr'
actual = tm_rates.swap_rate(**args)
expected = tm.ExtendedSeries([1, 2, 3], index=_index * 3, name='swapRate')
expected.dataset_ids = _test_datasets
assert_series_equal(expected, actual)
assert actual.dataset_ids == _test_datasets
replace.restore()
def test_swap_annuity(mocker):
replace = Replacer()
args = dict(swap_tenor='10y', benchmark_type=None, floating_rate_tenor=None, forward_tenor='0b', real_time=False)
mock_nok = Currency('MA891', 'PLN')
xrefs = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
xrefs.return_value = 'PLN'
args['asset'] = mock_nok
with pytest.raises(NotImplementedError):
tm_rates.swap_annuity(**args)
mock_usd = Currency('MAZ7RWC904JYHYPS', 'USD')
args['asset'] = mock_usd
xrefs = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
xrefs.return_value = 'USD'
with pytest.raises(NotImplementedError):
tm_rates.swap_annuity(..., '1y', real_time=True)
args['swap_tenor'] = '5yr'
with pytest.raises(MqValueError):
tm_rates.swap_annuity(**args)
args['swap_tenor'] = '10y'
args['floating_rate_tenor'] = '5yr'
with pytest.raises(MqValueError):
tm_rates.swap_annuity(**args)
args['floating_rate_tenor'] = '1y'
args['forward_tenor'] = '5yr'
with pytest.raises(MqValueError):
tm_rates.swap_annuity(**args)
args['forward_tenor'] = None
args['benchmark_type'] = BenchmarkType.STIBOR
with pytest.raises(MqValueError):
tm_rates.swap_annuity(**args)
args['benchmark_type'] = BenchmarkType.SOFR
xrefs = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
xrefs.return_value = 'USD'
identifiers = replace('gs_quant.timeseries.measures_rates._get_tdapi_rates_assets', Mock())
identifiers.return_value = {'MAZ7RWC904JYHYPS'}
mocker.patch.object(GsDataApi, 'get_market_data', return_value=mock_curr(None, None))
actual = tm_rates.swap_annuity(**args)
expected = abs(tm.ExtendedSeries([1.0, 2.0, 3.0], index=_index * 3, name='swapAnnuity') * 1e4 / 1e8)
expected.dataset_ids = _test_datasets
assert_series_equal(expected, actual)
assert actual.dataset_ids == expected.dataset_ids
replace.restore()
def test_swap_term_structure():
replace = Replacer()
args = dict(benchmark_type=None, floating_rate_tenor=None, tenor_type=tm_rates._SwapTenorType.FORWARD_TENOR,
tenor='0b', real_time=False)
mock_nok = Currency('MA891', 'PLN')
xrefs = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
xrefs.return_value = 'PLN'
args['asset'] = mock_nok
with pytest.raises(NotImplementedError):
tm_rates.swap_term_structure(**args)
mock_usd = Currency('MAZ7RWC904JYHYPS', 'USD')
args['asset'] = mock_usd
xrefs = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
xrefs.return_value = 'USD'
with pytest.raises(NotImplementedError):
tm_rates.swap_term_structure(..., '1y', real_time=True)
args['floating_rate_tenor'] = '5yr'
with pytest.raises(MqValueError):
tm_rates.swap_term_structure(**args)
args['floating_rate_tenor'] = '3m'
args['tenor_type'] = 'expiry'
with pytest.raises(MqValueError):
tm_rates.swap_term_structure(**args)
args['tenor_type'] = None
args['tenor'] = '5yr'
with pytest.raises(MqValueError):
tm_rates.swap_term_structure(**args)
args['tenor'] = None
args['benchmark_type'] = BenchmarkType.STIBOR
with pytest.raises(MqValueError):
tm_rates.swap_term_structure(**args)
args['benchmark_type'] = BenchmarkType.LIBOR
bd_mock = replace('gs_quant.data.dataset.Dataset.get_data', Mock())
bd_mock.return_value = pd.DataFrame(data=dict(date="2020-04-10", exchange="NYC", description="Good Friday"),
index=[pd.Timestamp('2020-04-10')])
args['pricing_date'] = datetime.date(2020, 4, 10)
with pytest.raises(MqValueError):
tm_rates.swap_term_structure(**args)
args['pricing_date'] = None
xrefs = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
xrefs.return_value = 'USD'
identifiers_empty = replace('gs_quant.timeseries.measures.GsAssetApi.get_many_assets', Mock())
identifiers_empty.return_value = {}
with pytest.raises(MqValueError):
tm_rates.swap_term_structure(**args)
identifiers = replace('gs_quant.timeseries.measures.GsAssetApi.get_many_assets', Mock())
mock_asset = Currency('USD', name='USD')
mock_asset.id = 'MAEMPCXQG3T716EX'
mock_asset.exchange = 'OTC'
identifiers.return_value = [mock_asset]
d = {
'terminationTenor': ['1y', '2y', '3y', '4y'], 'swapRate': [1, 2, 3, 4],
'assetId': ['MAEMPCXQG3T716EX', 'MAFRSWPAF5QPNTP2', 'MA88BXZ3TCTXTFW1', 'MAC4KAG9B9ZAZHFT']
}
pricing_date_mock = replace('gs_quant.timeseries.measures_rates._range_from_pricing_date', Mock())
pricing_date_mock.return_value = [datetime.date(2019, 1, 1), datetime.date(2019, 1, 1)]
bd_mock.return_value = pd.DataFrame()
market_data_mock = replace('gs_quant.timeseries.measures_rates._market_data_timed', Mock())
market_data_mock.return_value = pd.DataFrame()
df = pd.DataFrame(data=d, index=_index * 4)
assert tm_rates.swap_term_structure(**args).empty
market_data_mock.return_value = df
with DataContext('2019-01-01', '2025-01-01'):
actual = tm_rates.swap_term_structure(**args)
actual.dataset_ids = _test_datasets
expected = tm.ExtendedSeries([1, 2, 3, 4], index=pd.to_datetime(['2020-01-01', '2021-01-01', '2021-12-31',
'2022-12-30']))
expected.dataset_ids = _test_datasets
assert_series_equal(expected, actual, check_names=False)
assert actual.dataset_ids == expected.dataset_ids
df = pd.DataFrame(data={'effectiveTenor': ['1y'], 'swapRate': [1], 'assetId': ['MAEMPCXQG3T716EX']}, index=_index)
market_data_mock.return_value = df
args['tenor_type'] = 'swap_tenor'
args['tenor'] = '5y'
with DataContext('2019-01-01', '2025-01-01'):
actual = tm_rates.swap_term_structure(**args)
actual.dataset_ids = _test_datasets
expected = tm.ExtendedSeries([1], index=pd.to_datetime(['2020-01-01']))
expected.dataset_ids = _test_datasets
assert_series_equal(expected, actual, check_names=False)
assert actual.dataset_ids == expected.dataset_ids
d = {
'effectiveTenor': ['1y', '2y', '3y', '4y'], 'swapRate': [1, 2, 3, 4],
'assetId': ['MAEMPCXQG3T716EX', 'MAFRSWPAF5QPNTP2', 'MA88BXZ3TCTXTFW1', 'MAC4KAG9B9ZAZHFT']
}
df = pd.DataFrame(data=d, index=_index * 4)
market_data_mock.return_value = df
args['tenor_type'] = 'swap_tenor'
args['tenor'] = '5yr'
with pytest.raises(MqValueError):
tm_rates.swap_term_structure(**args)
args['tenor'] = '5y'
market_data_mock.return_value = pd.DataFrame()
df = pd.DataFrame(data=d, index=_index * 4)
assert tm_rates.swap_term_structure(**args).empty
market_data_mock.return_value = df
with DataContext('2019-01-01', '2025-01-01'):
actual = tm_rates.swap_term_structure(**args)
actual.dataset_ids = _test_datasets
expected = tm.ExtendedSeries([1, 2, 3, 4], index=pd.to_datetime(['2020-01-01', '2021-01-01', '2021-12-31',
'2022-12-30']))
expected.dataset_ids = _test_datasets
assert_series_equal(expected, actual, check_names=False)
assert actual.dataset_ids == expected.dataset_ids
replace.restore()
def test_basis_swap_term_structure():
replace = Replacer()
range_mock = replace('gs_quant.timeseries.measures_rates._range_from_pricing_date', Mock())
range_mock.return_value = [datetime.date(2019, 1, 1), datetime.date(2019, 1, 1)]
args = dict(spread_benchmark_type=None, spread_tenor=None,
reference_benchmark_type=None, reference_tenor=None, tenor_type=tm_rates._SwapTenorType.FORWARD_TENOR,
tenor='0b', real_time=False)
mock_nok = Currency('MA891', 'NOK')
xrefs = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
xrefs.return_value = 'NOK'
args['asset'] = mock_nok
with pytest.raises(NotImplementedError):
tm_rates.basis_swap_term_structure(**args)
mock_usd = Currency('MAZ7RWC904JYHYPS', 'USD')
args['asset'] = mock_usd
xrefs = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
xrefs.return_value = 'USD'
with pytest.raises(NotImplementedError):
tm_rates.basis_swap_term_structure(..., '1y', real_time=True)
args['spread_tenor'] = '5yr'
with pytest.raises(MqValueError):
tm_rates.basis_swap_term_structure(**args)
args['spread_tenor'] = '3m'
args['reference_tenor'] = '5yr'
with pytest.raises(MqValueError):
tm_rates.basis_swap_term_structure(**args)
args['reference_tenor'] = '6m'
args['tenor_type'] = 'expiry'
with pytest.raises(MqValueError):
tm_rates.basis_swap_term_structure(**args)
args['tenor_type'] = 'forward_tenor'
args['tenor'] = '5yr'
with pytest.raises(MqValueError):
tm_rates.basis_swap_term_structure(**args)
args['tenor'] = None
args['spread_benchmark_type'] = BenchmarkType.STIBOR
with pytest.raises(MqValueError):
tm_rates.basis_swap_term_structure(**args)
args['spread_benchmark_type'] = BenchmarkType.LIBOR
args['reference_benchmark_type'] = BenchmarkType.STIBOR
with pytest.raises(MqValueError):
tm_rates.basis_swap_term_structure(**args)
args['reference_benchmark_type'] = BenchmarkType.LIBOR
bd_mock = replace('gs_quant.data.dataset.Dataset.get_data', Mock())
bd_mock.return_value = pd.DataFrame(data=dict(date="2020-04-10", exchange="NYC", description="Good Friday"),
index=[pd.Timestamp('2020-04-10')])
args['pricing_date'] = datetime.date(2020, 4, 10)
with pytest.raises(MqValueError):
tm_rates.basis_swap_term_structure(**args)
args['pricing_date'] = None
xrefs = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
xrefs.return_value = 'USD'
identifiers_empty = replace('gs_quant.timeseries.measures.GsAssetApi.get_many_assets', Mock())
identifiers_empty.return_value = {}
with pytest.raises(MqValueError):
tm_rates.basis_swap_term_structure(**args)
identifiers = replace('gs_quant.timeseries.measures.GsAssetApi.get_many_assets', Mock())
mock_asset = Currency('USD', name='USD')
mock_asset.id = 'MAEMPCXQG3T716EX'
mock_asset.exchange = 'OTC'
identifiers.return_value = [mock_asset]
d = {
'terminationTenor': ['1y', '2y', '3y', '4y'], 'basisSwapRate': [1, 2, 3, 4],
'assetId': ['MAEMPCXQG3T716EX', 'MAFRSWPAF5QPNTP2', 'MA88BXZ3TCTXTFW1', 'MAC4KAG9B9ZAZHFT']
}
pricing_date_mock = replace('gs_quant.timeseries.measures_rates._range_from_pricing_date', Mock())
pricing_date_mock.return_value = [datetime.date(2019, 1, 1), datetime.date(2019, 1, 1)]
bd_mock.return_value = pd.DataFrame()
market_data_mock = replace('gs_quant.timeseries.measures_rates._market_data_timed', Mock())
market_data_mock.return_value = pd.DataFrame()
assert tm_rates.basis_swap_term_structure(**args).empty
df = pd.DataFrame(data=d, index=_index * 4)
market_data_mock.return_value = df
with DataContext('2019-01-01', '2025-01-01'):
actual = tm_rates.basis_swap_term_structure(**args)
actual.dataset_ids = _test_datasets
expected = tm.ExtendedSeries([1, 2, 3, 4], index=pd.to_datetime(['2020-01-01', '2021-01-01', '2021-12-31',
'2022-12-30']))
expected.dataset_ids = _test_datasets
assert_series_equal(expected, actual, check_names=False)
assert actual.dataset_ids == expected.dataset_ids
d = {
'effectiveTenor': ['1y', '2y', '3y', '4y'], 'basisSwapRate': [1, 2, 3, 4],
'assetId': ['MAEMPCXQG3T716EX', 'MAFRSWPAF5QPNTP2', 'MA88BXZ3TCTXTFW1', 'MAC4KAG9B9ZAZHFT']
}
bd_mock.return_value = pd.DataFrame()
market_data_mock = replace('gs_quant.timeseries.measures_rates._market_data_timed', Mock())
df = pd.DataFrame(data=d, index=_index * 4)
market_data_mock.return_value = df
args['tenor_type'] = tm_rates._SwapTenorType.SWAP_TENOR
args['tenor'] = '5y'
with DataContext('2019-01-01', '2025-01-01'):
actual = tm_rates.basis_swap_term_structure(**args)
actual.dataset_ids = _test_datasets
expected = tm.ExtendedSeries([1, 2, 3, 4], index=pd.to_datetime(['2020-01-01', '2021-01-01', '2021-12-31',
'2022-12-30']))
expected.dataset_ids = _test_datasets
assert_series_equal(expected, actual, check_names=False)
assert actual.dataset_ids == expected.dataset_ids
df = pd.DataFrame(data={'effectiveTenor': ['1y'], 'basisSwapRate': [1], 'assetId': ['MAEMPCXQG3T716EX']},
index=_index)
market_data_mock.return_value = df
with DataContext('2019-01-01', '2025-01-01'):
actual = tm_rates.basis_swap_term_structure(**args)
actual.dataset_ids = _test_datasets
expected = tm.ExtendedSeries([1], index=pd.to_datetime(['2020-01-01']))
expected.dataset_ids = _test_datasets
assert_series_equal(expected, actual, check_names=False)
assert actual.dataset_ids == expected.dataset_ids
replace.restore()
def test_cap_floor_vol():
replace = Replacer()
mock_usd = Currency('MA890', 'USD')
xrefs = replace('gs_quant.timeseries.measures.GsAssetApi.get_asset_xrefs', Mock())
xrefs.return_value = [GsTemporalXRef(dt.date(2019, 1, 1), dt.date(2952, 12, 31), XRef(bbid='USD', ))]
identifiers = replace('gs_quant.timeseries.measures.GsAssetApi.map_identifiers', Mock())
identifiers.return_value = {'USD-LIBOR-BBA': 'MA123'}
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_curr)
actual = tm.cap_floor_vol(mock_usd, '5y', 50)
assert_series_equal(pd.Series([1, 2, 3], index=_index * 3, name='capFloorVol'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(NotImplementedError):
tm.cap_floor_vol(..., '5y', 50, real_time=True)
replace.restore()
def test_cap_floor_atm_fwd_rate():
replace = Replacer()
mock_usd = Currency('MA890', 'USD')
xrefs = replace('gs_quant.timeseries.measures.GsAssetApi.get_asset_xrefs', Mock())
xrefs.return_value = [GsTemporalXRef(dt.date(2019, 1, 1), dt.date(2952, 12, 31), XRef(bbid='USD', ))]
identifiers = replace('gs_quant.timeseries.measures.GsAssetApi.map_identifiers', Mock())
identifiers.return_value = {'USD-LIBOR-BBA': 'MA123'}
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_curr)
actual = tm.cap_floor_atm_fwd_rate(mock_usd, '5y')
assert_series_equal(pd.Series([1, 2, 3], index=_index * 3, name='capFloorAtmFwdRate'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(NotImplementedError):
tm.cap_floor_atm_fwd_rate(..., '5y', real_time=True)
replace.restore()
def test_spread_option_vol():
replace = Replacer()
mock_usd = Currency('MA890', 'USD')
xrefs = replace('gs_quant.timeseries.measures.GsAssetApi.get_asset_xrefs', Mock())
xrefs.return_value = [GsTemporalXRef(dt.date(2019, 1, 1), dt.date(2952, 12, 31), XRef(bbid='USD', ))]
identifiers = replace('gs_quant.timeseries.measures.GsAssetApi.map_identifiers', Mock())
identifiers.return_value = {'USD-LIBOR-BBA': 'MA123'}
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_curr)
actual = tm.spread_option_vol(mock_usd, '3m', '10y', '5y', 50)
assert_series_equal(pd.Series([1, 2, 3], index=_index * 3, name='spreadOptionVol'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(NotImplementedError):
tm.spread_option_vol(..., '3m', '10y', '5y', 50, real_time=True)
replace.restore()
def test_spread_option_atm_fwd_rate():
replace = Replacer()
mock_usd = Currency('MA890', 'USD')
xrefs = replace('gs_quant.timeseries.measures.GsAssetApi.get_asset_xrefs', Mock())
xrefs.return_value = [GsTemporalXRef(dt.date(2019, 1, 1), dt.date(2952, 12, 31), XRef(bbid='USD', ))]
identifiers = replace('gs_quant.timeseries.measures.GsAssetApi.map_identifiers', Mock())
identifiers.return_value = {'USD-LIBOR-BBA': 'MA123'}
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_curr)
actual = tm.spread_option_atm_fwd_rate(mock_usd, '3m', '10y', '5y')
assert_series_equal(pd.Series([1, 2, 3], index=_index * 3, name='spreadOptionAtmFwdRate'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(NotImplementedError):
tm.spread_option_atm_fwd_rate(..., '3m', '10y', '5y', real_time=True)
replace.restore()
def test_zc_inflation_swap_rate():
replace = Replacer()
mock_gbp = Currency('MA890', 'GBP')
xrefs = replace('gs_quant.timeseries.measures.GsAssetApi.get_asset_xrefs', Mock())
xrefs.return_value = [GsTemporalXRef(dt.date(2019, 1, 1), dt.date(2952, 12, 31), XRef(bbid='GBP', ))]
identifiers = replace('gs_quant.timeseries.measures.GsAssetApi.map_identifiers', Mock())
identifiers.return_value = {'CPI-UKRPI': 'MA123'}
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_curr)
actual = tm.zc_inflation_swap_rate(mock_gbp, '1y')
assert_series_equal(pd.Series([1, 2, 3], index=_index * 3, name='inflationSwapRate'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(NotImplementedError):
tm.zc_inflation_swap_rate(..., '1y', real_time=True)
replace.restore()
def test_basis():
replace = Replacer()
mock_jpyusd = Cross('MA890', 'USD/JPY')
xrefs = replace('gs_quant.timeseries.measures.GsAssetApi.get_asset_xrefs', Mock())
xrefs.return_value = [GsTemporalXRef(dt.date(2019, 1, 1), dt.date(2952, 12, 31), XRef(bbid='JPYUSD', ))]
identifiers = replace('gs_quant.timeseries.measures.GsAssetApi.map_identifiers', Mock())
identifiers.return_value = {'USD-3m/JPY-3m': 'MA123'}
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_cross)
actual = tm.basis(mock_jpyusd, '1y')
assert_series_equal(pd.Series([1, 2, 3], index=_index * 3, name='basis'), pd.Series(actual))
assert actual.dataset_ids == _test_datasets
with pytest.raises(NotImplementedError):
tm.basis(..., '1y', real_time=True)
replace.restore()
def test_td():
cases = {'3d': pd.DateOffset(days=3), '9w': pd.DateOffset(weeks=9), '2m': pd.DateOffset(months=2),
'10y': pd.DateOffset(years=10)
}
for k, v in cases.items():
actual = tm._to_offset(k)
assert v == actual, f'expected {v}, got actual {actual}'
with pytest.raises(ValueError):
tm._to_offset('5z')
def test_pricing_range():
import datetime
given = datetime.date(2019, 4, 20)
s, e = tm._range_from_pricing_date('NYSE', given)
assert s == e == given
class MockDate(datetime.date):
@classmethod
def today(cls):
return cls(2019, 5, 25)
# mock
replace = Replacer()
cbd = replace('gs_quant.timeseries.measures._get_custom_bd', Mock())
cbd.return_value = pd.tseries.offsets.BusinessDay()
today = replace('gs_quant.timeseries.measures.pd.Timestamp.today', Mock())
today.return_value = pd.Timestamp(2019, 5, 25)
gold = datetime.date
datetime.date = MockDate
# cases
s, e = tm._range_from_pricing_date('ANY')
assert s == pd.Timestamp(2019, 5, 24)
assert e == pd.Timestamp(2019, 5, 24)
s, e = tm._range_from_pricing_date('ANY', '3m')
assert s == pd.Timestamp(2019, 2, 22)
assert e == pd.Timestamp(2019, 2, 24)
s, e = tm._range_from_pricing_date('ANY', '3b')
assert s == e == pd.Timestamp(2019, 5, 22)
# restore
datetime.date = gold
replace.restore()
def test_var_swap_tenors():
session = GsSession.get(Environment.DEV, token='<PASSWORD>')
replace = Replacer()
get_mock = replace('gs_quant.session.GsSession._get', Mock())
get_mock.return_value = {
'data': [
{
'dataField': 'varSwap',
'filteredFields': [
{
'field': 'tenor',
'values': ['abc', 'xyc']
}
]
}
]
}
with session:
actual = tm._var_swap_tenors(Index('MAXXX', AssetClass.Equity, 'XXX'))
assert actual == ['abc', 'xyc']
get_mock.return_value = {
'data': []
}
with pytest.raises(MqError):
with session:
tm._var_swap_tenors(Index('MAXXX', AssetClass.Equity, 'XXX'))
replace.restore()
def test_tenor_to_month():
with pytest.raises(MqError):
tm._tenor_to_month('1d')
with pytest.raises(MqError):
tm._tenor_to_month('2w')
assert tm._tenor_to_month('3m') == 3
assert tm._tenor_to_month('4y') == 48
def test_month_to_tenor():
assert tm._month_to_tenor(36) == '3y'
assert tm._month_to_tenor(18) == '18m'
def test_forward_var_term():
idx = pd.DatetimeIndex([datetime.date(2020, 4, 1), datetime.date(2020, 4, 2)] * 6)
data = {
'varSwap': [1.1, 1, 2.1, 2, 3.1, 3, 4.1, 4, 5.1, 5, 6.1, 6],
'tenor': ['1w', '1w', '1m', '1m', '5w', '5w', '2m', '2m', '3m', '3m', '5m', '5m']
}
out = MarketDataResponseFrame(data=data, index=idx)
out.dataset_ids = _test_datasets
replace = Replacer()
market_mock = replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', Mock())
market_mock.return_value = out
# Equity
expected = pd.Series([np.nan, 5.29150, 6.55744], name='forwardVarTerm',
index=pd.DatetimeIndex(['2020-05-01', '2020-06-02', '2020-07-02'], name='expirationDate'))
with DataContext('2020-01-01', '2020-07-31'):
actual = tm.forward_var_term(Index('MA123', AssetClass.Equity, '123'), datetime.date(2020, 4, 2))
assert_series_equal(expected, pd.Series(actual))
assert actual.dataset_ids == _test_datasets
market_mock.assert_called_once()
# FX
expected_fx = pd.Series([np.nan, 5.29150, 6.55744, 7.24569], name='forwardVarTerm',
index=pd.DatetimeIndex(['2020-05-01', '2020-06-02', '2020-07-02', '2020-09-02'],
name='expirationDate'))
with DataContext('2020-01-01', '2020-09-02'):
actual_fx = tm.forward_var_term(Cross('ABCDE', 'EURUSD'))
assert_series_equal(expected_fx, pd.Series(actual_fx))
assert actual_fx.dataset_ids == _test_datasets
# no data
market_mock.reset_mock()
market_mock.return_value = mock_empty_market_data_response()
actual = tm.forward_var_term(Index('MA123', AssetClass.Equity, '123'))
assert actual.empty
# real-time
with pytest.raises(NotImplementedError):
tm.forward_var_term(..., real_time=True)
replace.restore()
def _mock_var_swap_data(_cls, q):
queries = q.get('queries', [])
if len(queries) > 0 and 'Last' in queries[0]['measures']:
return MarketDataResponseFrame({'varSwap': [4]}, index=[pd.Timestamp('2019-01-04T12:00:00Z')])
idx = pd.date_range(start="2019-01-01", periods=3, freq="D")
data = {
'varSwap': [1, 2, 3]
}
out = MarketDataResponseFrame(data=data, index=idx)
out.dataset_ids = _test_datasets
return out
def test_var_swap():
replace = Replacer()
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', _mock_var_swap_data)
expected = pd.Series([1, 2, 3, 4], name='varSwap', index=pd.date_range("2019-01-01", periods=4, freq="D"))
actual = tm.var_swap(Index('MA123', AssetClass.Equity, '123'), '1m')
assert_series_equal(expected, pd.Series(actual))
assert actual.dataset_ids == _test_datasets
market_mock = replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', Mock())
market_mock.return_value = mock_empty_market_data_response()
actual = tm.var_swap(Index('MA123', AssetClass.Equity, '123'), '1m')
assert actual.empty
replace.restore()
def _mock_var_swap_fwd(_cls, q):
queries = q.get('queries', [])
if len(queries) > 0 and 'Last' in queries[0]['measures']:
return MarketDataResponseFrame({'varSwap': [4, 4.5], 'tenor': ['1y', '13m']},
index=[pd.Timestamp('2019-01-04T12:00:00Z')] * 2)
idx = pd.date_range(start="2019-01-01", periods=3, freq="D")
d1 = {
'varSwap': [1, 2, 3],
'tenor': ['1y'] * 3
}
d2 = {
'varSwap': [1.5, 2.5, 3.5],
'tenor': ['13m'] * 3
}
df1 = MarketDataResponseFrame(data=d1, index=idx)
df2 = MarketDataResponseFrame(data=d2, index=idx)
out = pd.concat([df1, df2])
out.dataset_ids = _test_datasets
return out
def _mock_var_swap_1t(_cls, q):
queries = q.get('queries', [])
if len(queries) > 0 and 'Last' in queries[0]['measures']:
return MarketDataResponseFrame({'varSwap': [4, 4.5], 'tenor': ['1y', '13m']},
index=[pd.Timestamp('2019-01-04T12:00:00Z')])
idx = pd.date_range(start="2019-01-01", periods=3, freq="D")
d1 = {
'varSwap': [1, 2, 3],
'tenor': ['1y'] * 3
}
df1 = MarketDataResponseFrame(data=d1, index=idx)
df1.dataset_ids = _test_datasets
return df1
def test_var_swap_fwd():
# bad input
with pytest.raises(MqError):
tm.var_swap(Index('MA123', AssetClass.Equity, '123'), '1m', 500)
# regular
replace = Replacer()
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', _mock_var_swap_fwd)
tenors_mock = replace('gs_quant.timeseries.measures._var_swap_tenors', Mock())
tenors_mock.return_value = ['1m', '1y', '13m']
expected = pd.Series([4.1533, 5.7663, 7.1589, 8.4410], name='varSwap',
index=pd.date_range(start="2019-01-01", periods=4, freq="D"))
actual = tm.var_swap(Index('MA123', AssetClass.Equity, '123'), '1m', '1y')
assert_series_equal(expected, pd.Series(actual))
assert actual.dataset_ids == _test_datasets
# no data
market_mock = replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', Mock())
market_mock.return_value = mock_empty_market_data_response()
actual = tm.var_swap(Index('MA123', AssetClass.Equity, '123'), '1m', '1y')
assert actual.empty
assert actual.dataset_ids == ()
# no data for a tenor
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', _mock_var_swap_1t)
actual = tm.var_swap(Index('MA123', AssetClass.Equity, '123'), '1m', '1y')
assert actual.empty
assert actual.dataset_ids == ()
# no such tenors
tenors_mock.return_value = []
actual = tm.var_swap(Index('MA123', AssetClass.Equity, '123'), '1m', '1y')
assert actual.empty
assert actual.dataset_ids == ()
# finish
replace.restore()
def _var_term_typical():
assert DataContext.current_is_set
data = {
'tenor': ['1w', '2w', '1y', '2y'],
'varSwap': [1, 2, 3, 4]
}
out = MarketDataResponseFrame(data=data, index=pd.DatetimeIndex(['2018-01-01'] * 4))
out.dataset_ids = _test_datasets
replace = Replacer()
market_mock = replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', Mock())
market_mock.return_value = out
actual = tm.var_term(Index('MA123', AssetClass.Equity, '123'))
idx = pd.DatetimeIndex(['2018-01-08', '2018-01-15', '2019-01-01', '2020-01-01'], name='varSwap')
expected = pd.Series([1, 2, 3, 4], name='varSwap', index=idx)
expected = expected.loc[DataContext.current.start_date: DataContext.current.end_date]
if expected.empty:
assert actual.empty
else:
assert_series_equal(expected, pd.Series(actual), check_names=False)
assert actual.dataset_ids == _test_datasets
market_mock.assert_called_once()
replace.restore()
return actual
def _var_term_empty():
replace = Replacer()
market_mock = replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', Mock())
market_mock.return_value = mock_empty_market_data_response()
actual = tm.var_term(Index('MAXYZ', AssetClass.Equity, 'XYZ'))
assert actual.empty
assert actual.dataset_ids == ()
market_mock.assert_called_once()
replace.restore()
def _var_term_fwd():
idx = pd.date_range('2018-01-01', periods=2, freq='D')
def mock_var_swap(_asset, tenor, _forward_start_date, **_kwargs):
if tenor == '1m':
series = tm.ExtendedSeries([1, 2], idx, name='varSwap')
series.dataset_ids = _test_datasets
elif tenor == '2m':
series = tm.ExtendedSeries([3, 4], idx, name='varSwap')
series.dataset_ids = _test_datasets
else:
series = tm.ExtendedSeries()
series.dataset_ids = ()
return series
replace = Replacer()
market_mock = replace('gs_quant.timeseries.measures.var_swap', Mock())
market_mock.side_effect = mock_var_swap
tenors_mock = replace('gs_quant.timeseries.measures._var_swap_tenors', Mock())
tenors_mock.return_value = ['1m', '2m', '3m']
actual = tm.var_term(Index('MA123', AssetClass.Equity, '123'), forward_start_date='1m')
idx = pd.DatetimeIndex(['2018-02-02', '2018-03-02'], name='varSwap')
expected = pd.Series([2, 4], name='varSwap', index=idx)
expected = expected.loc[DataContext.current.start_date: DataContext.current.end_date]
if expected.empty:
assert actual.empty
else:
assert_series_equal(expected, pd.Series(actual), check_names=False)
assert actual.dataset_ids == _test_datasets
market_mock.assert_called()
replace.restore()
return actual
def test_var_term():
with DataContext('2018-01-01', '2019-01-01'):
_var_term_typical()
_var_term_empty()
_var_term_fwd()
with DataContext('2019-01-01', '2019-07-04'):
_var_term_fwd()
with DataContext('2018-01-16', '2018-12-31'):
out = _var_term_typical()
assert out.empty
assert out.dataset_ids == _test_datasets
with pytest.raises(MqError):
tm.var_term(..., pricing_date=300)
def test_forward_vol():
idx = pd.DatetimeIndex([datetime.date(2020, 5, 1), datetime.date(2020, 5, 2)] * 4)
data = {
'impliedVolatility': [2.1, 2, 3.1, 3, 4.1, 4, 5.1, 5],
'tenor': ['1m', '1m', '2m', '2m', '3m', '3m', '4m', '4m']
}
out = MarketDataResponseFrame(data=data, index=idx)
out.dataset_ids = _test_datasets
replace = Replacer()
market_mock = replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', Mock())
market_mock.return_value = out
# Equity
expected = pd.Series([5.58659, 5.47723], name='forwardVol',
index=pd.to_datetime(['2020-05-01', '2020-05-02']))
with DataContext('2020-01-01', '2020-09-01'):
actual = tm.forward_vol(Index('MA123', AssetClass.Equity, '123'), '1m', '2m', tm.VolReference.SPOT, 100)
assert_series_equal(expected, pd.Series(actual))
assert actual.dataset_ids == _test_datasets
market_mock.assert_called_once()
# FX
cross_mock = replace('gs_quant.timeseries.measures.cross_stored_direction_for_fx_vol', Mock())
cross_mock.return_value = 'EURUSD'
with DataContext('2020-01-01', '2020-09-01'):
actual_fx = tm.forward_vol(Cross('ABCDE', 'EURUSD'), '1m', '2m', tm.VolReference.SPOT, 100)
assert_series_equal(expected, pd.Series(actual_fx))
assert actual_fx.dataset_ids == _test_datasets
# no data
market_mock.reset_mock()
market_mock.return_value = mock_empty_market_data_response()
actual = tm.forward_vol(Index('MA123', AssetClass.Equity, '123'), '1m', '2m', tm.VolReference.SPOT, 100)
assert actual.empty
# no data for required tenor
market_mock.reset_mock()
market_mock.return_value = MarketDataResponseFrame(data={'impliedVolatility': [2.1, 3.1, 5.1],
'tenor': ['1m', '2m', '4m']},
index=[datetime.date(2020, 5, 1)] * 3)
actual = tm.forward_vol(Index('MA123', AssetClass.Equity, '123'), '1m', '2m', tm.VolReference.SPOT, 100)
assert actual.empty
# real-time
with pytest.raises(NotImplementedError):
tm.forward_vol(..., '1m', '2m', tm.VolReference.SPOT, 100, real_time=True)
replace.restore()
def test_forward_vol_term():
idx = pd.DatetimeIndex([datetime.date(2020, 4, 1), datetime.date(2020, 4, 2)] * 6)
data = {
'impliedVolatility': [1.1, 1, 2.1, 2, 3.1, 3, 4.1, 4, 5.1, 5, 6.1, 6],
'tenor': ['1w', '1w', '1m', '1m', '5w', '5w', '2m', '2m', '3m', '3m', '5m', '5m']
}
out = MarketDataResponseFrame(data=data, index=idx)
out.dataset_ids = _test_datasets
replace = Replacer()
market_mock = replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', Mock())
market_mock.return_value = out
# Equity
expected = pd.Series([np.nan, 5.29150, 6.55744], name='forwardVolTerm',
index=pd.DatetimeIndex(['2020-05-01', '2020-06-02', '2020-07-02'], name='expirationDate'))
with DataContext('2020-01-01', '2020-07-31'):
actual = tm.forward_vol_term(Index('MA123', AssetClass.Equity, '123'), tm.VolReference.SPOT, 100,
datetime.date(2020, 4, 2))
assert_series_equal(expected, pd.Series(actual))
assert actual.dataset_ids == _test_datasets
market_mock.assert_called_once()
# FX
cross_mock = replace('gs_quant.timeseries.measures.cross_stored_direction_for_fx_vol', Mock())
cross_mock.return_value = 'EURUSD'
expected_fx = pd.Series([np.nan, 5.29150, 6.55744, 7.24569], name='forwardVolTerm',
index=pd.DatetimeIndex(['2020-05-01', '2020-06-02', '2020-07-02', '2020-09-02'],
name='expirationDate'))
with DataContext('2020-01-01', '2020-09-02'):
actual_fx = tm.forward_vol_term(Cross('ABCDE', 'EURUSD'), tm.VolReference.SPOT, 100)
assert_series_equal(expected_fx, pd.Series(actual_fx))
assert actual_fx.dataset_ids == _test_datasets
# no data
market_mock.reset_mock()
market_mock.return_value = mock_empty_market_data_response()
actual = tm.forward_vol_term(Index('MA123', AssetClass.Equity, '123'), tm.VolReference.SPOT, 100)
assert actual.empty
# real-time
with pytest.raises(NotImplementedError):
tm.forward_vol_term(..., tm.VolReference.SPOT, 100, real_time=True)
replace.restore()
def _vol_term_typical(reference, value):
assert DataContext.current_is_set
data = {
'tenor': ['1w', '2w', '1y', '2y'],
'impliedVolatility': [1, 2, 3, 4]
}
out = MarketDataResponseFrame(data=data, index=pd.DatetimeIndex(['2018-01-01'] * 4))
out.dataset_ids = _test_datasets
replace = Replacer()
market_mock = replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', Mock())
market_mock.return_value = out
actual = tm.vol_term(Index('MA123', AssetClass.Equity, '123'), reference, value)
idx = pd.DatetimeIndex(['2018-01-08', '2018-01-15', '2019-01-01', '2020-01-01'], name='expirationDate')
expected = pd.Series([1, 2, 3, 4], name='impliedVolatility', index=idx)
expected = expected.loc[DataContext.current.start_date: DataContext.current.end_date]
if expected.empty:
assert actual.empty
else:
assert_series_equal(expected, pd.Series(actual))
assert actual.dataset_ids == _test_datasets
market_mock.assert_called_once()
replace.restore()
return actual
def _vol_term_empty():
replace = Replacer()
market_mock = replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', Mock())
market_mock.return_value = MarketDataResponseFrame()
actual = tm.vol_term(Index('MAXYZ', AssetClass.Equity, 'XYZ'), tm.VolReference.DELTA_CALL, 777)
assert actual.empty
assert actual.dataset_ids == ()
market_mock.assert_called_once()
replace.restore()
def test_vol_term():
with DataContext('2018-01-01', '2019-01-01'):
_vol_term_typical(tm.VolReference.SPOT, 100)
_vol_term_typical(tm.VolReference.NORMALIZED, 4)
_vol_term_typical(tm.VolReference.DELTA_PUT, 50)
_vol_term_empty()
with DataContext('2018-01-16', '2018-12-31'):
out = _vol_term_typical(tm.VolReference.SPOT, 100)
assert out.empty
assert out.dataset_ids == _test_datasets
with pytest.raises(NotImplementedError):
tm.vol_term(..., tm.VolReference.SPOT, 100, real_time=True)
with pytest.raises(MqError):
tm.vol_term(Index('MA123', AssetClass.Equity, '123'), tm.VolReference.DELTA_NEUTRAL, 0)
def _vol_term_fx(reference, value):
assert DataContext.current_is_set
data = {
'tenor': ['1w', '2w', '1y', '2y'],
'impliedVolatility': [1, 2, 3, 4]
}
out = MarketDataResponseFrame(data=data, index=pd.DatetimeIndex(['2018-01-01'] * 4))
out.dataset_ids = _test_datasets
replace = Replacer()
market_mock = replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', Mock())
market_mock.return_value = out
cross_mock = replace('gs_quant.timeseries.measures.cross_stored_direction_for_fx_vol', Mock())
cross_mock.return_value = 'EURUSD'
actual = tm.vol_term(Cross('ABCDE', 'EURUSD'), reference, value)
idx = pd.DatetimeIndex(['2018-01-08', '2018-01-15', '2019-01-01', '2020-01-01'], name='expirationDate')
expected = pd.Series([1, 2, 3, 4], name='impliedVolatility', index=idx)
expected = expected.loc[DataContext.current.start_date: DataContext.current.end_date]
if expected.empty:
assert actual.empty
else:
assert_series_equal(expected, pd.Series(actual))
assert actual.dataset_ids == _test_datasets
market_mock.assert_called_once()
replace.restore()
return actual
def test_vol_term_fx():
with pytest.raises(MqError):
tm.vol_term(Cross('MABLUE', 'BLUE'), tm.VolReference.SPOT, 50)
with pytest.raises(MqError):
tm.vol_term(Cross('MABLUE', 'BLUE'), tm.VolReference.NORMALIZED, 1)
with pytest.raises(MqError):
tm.vol_term(Cross('MABLUE', 'BLUE'), tm.VolReference.DELTA_NEUTRAL, 1)
with DataContext('2018-01-01', '2019-01-01'):
_vol_term_fx(tm.VolReference.DELTA_CALL, 50)
with DataContext('2018-01-01', '2019-01-01'):
_vol_term_fx(tm.VolReference.DELTA_PUT, 50)
def _fwd_term_typical():
assert DataContext.current_is_set
data = {
'tenor': ['1w', '2w', '1y', '2y'],
'forward': [1, 2, 3, 4]
}
out = MarketDataResponseFrame(data=data, index=pd.DatetimeIndex(['2018-01-01'] * 4))
out.dataset_ids = _test_datasets
replace = Replacer()
market_mock = replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', Mock())
market_mock.return_value = out
actual = tm.fwd_term(Index('MA123', AssetClass.Equity, '123'))
idx = pd.DatetimeIndex(['2018-01-08', '2018-01-15', '2019-01-01', '2020-01-01'], name='expirationDate')
expected = pd.Series([1, 2, 3, 4], name='forward', index=idx)
expected = expected.loc[DataContext.current.start_date: DataContext.current.end_date]
if expected.empty:
assert actual.empty
else:
assert_series_equal(expected, pd.Series(actual))
assert actual.dataset_ids == _test_datasets
market_mock.assert_called_once()
replace.restore()
return actual
def _fwd_term_empty():
replace = Replacer()
market_mock = replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', Mock())
market_mock.return_value = mock_empty_market_data_response()
actual = tm.fwd_term(Index('MAXYZ', AssetClass.Equity, 'XYZ'))
assert actual.empty
assert actual.dataset_ids == ()
market_mock.assert_called_once()
replace.restore()
def test_fwd_term():
with DataContext('2018-01-01', '2019-01-01'):
_fwd_term_typical()
_fwd_term_empty()
with DataContext('2018-01-16', '2018-12-31'):
out = _fwd_term_typical()
assert out.empty
assert out.dataset_ids == _test_datasets
with pytest.raises(NotImplementedError):
tm.fwd_term(..., real_time=True)
def test_bucketize_price():
target = {
'7x24': [27.323461],
'offpeak': [26.004816],
'peak': [27.982783],
'7x8': [26.004816],
'2x16h': [],
'monthly': [],
'CAISO 7x24': [26.953743375],
'CAISO peak': [29.547952562499997],
'MISO 7x24': [27.076390749999998],
'MISO offpeak': [25.263605624999997],
}
replace = Replacer()
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_commod)
mock_pjm = Index('MA001', AssetClass.Commod, 'PJM')
mock_caiso = Index('MA002', AssetClass.Commod, 'CAISO')
mock_miso = Index('MA003', AssetClass.Commod, 'MISO')
with DataContext(datetime.date(2019, 5, 1), datetime.date(2019, 5, 1)):
bbid_mock = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
bbid_mock.return_value = 'MISO'
actual = tm.bucketize_price(mock_miso, 'LMP', bucket='7x24')
assert_series_equal(pd.Series(target['MISO 7x24'],
index=[datetime.date(2019, 5, 1)],
name='price'),
pd.Series(actual))
actual = tm.bucketize_price(mock_miso, 'LMP', bucket='offpeak')
assert_series_equal(pd.Series(target['MISO offpeak'],
index=[datetime.date(2019, 5, 1)],
name='price'),
pd.Series(actual))
bbid_mock.return_value = 'CAISO'
actual = tm.bucketize_price(mock_caiso, 'LMP', bucket='7x24')
assert_series_equal(pd.Series(target['CAISO 7x24'],
index=[datetime.date(2019, 5, 1)],
name='price'),
pd.Series(actual))
actual = tm.bucketize_price(mock_caiso, 'LMP', bucket='peak')
assert_series_equal(pd.Series(target['CAISO peak'],
index=[datetime.date(2019, 5, 1)],
name='price'),
pd.Series(actual))
bbid_mock.return_value = 'PJM'
actual = tm.bucketize_price(mock_pjm, 'LMP', bucket='7x24')
assert_series_equal(pd.Series(target['7x24'],
index=[datetime.date(2019, 5, 1)],
name='price'),
pd.Series(actual))
actual = tm.bucketize_price(mock_pjm, 'LMP', bucket='offpeak')
assert_series_equal(pd.Series(target['offpeak'],
index=[datetime.date(2019, 5, 1)],
name='price'),
pd.Series(actual))
actual = tm.bucketize_price(mock_pjm, 'LMP', bucket='peak')
assert_series_equal(pd.Series(target['peak'],
index=[datetime.date(2019, 5, 1)],
name='price'),
pd.Series(actual))
actual = tm.bucketize_price(mock_pjm, 'LMP', bucket='7x8')
assert_series_equal(pd.Series(target['7x8'],
index=[datetime.date(2019, 5, 1)],
name='price'),
pd.Series(actual))
actual = tm.bucketize_price(mock_pjm, 'LMP', bucket='2x16h')
assert_series_equal(pd.Series(target['2x16h'],
index=[],
name='price'),
pd.Series(actual))
actual = tm.bucketize_price(mock_pjm, 'LMP', granularity='m', bucket='7X24')
assert_series_equal(pd.Series(target['monthly'],
index=[],
name='price'),
pd.Series(actual))
with pytest.raises(ValueError):
tm.bucketize_price(mock_pjm, 'LMP', bucket='7X24', real_time=True)
with pytest.raises(ValueError):
tm.bucketize_price(mock_pjm, 'LMP', bucket='weekday')
with pytest.raises(ValueError):
tm.bucketize_price(mock_caiso, 'LMP', bucket='weekday')
with pytest.raises(ValueError):
tm.bucketize_price(mock_pjm, 'LMP', granularity='yearly')
replace.restore()
# No market data
market_mock = replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', Mock())
market_mock.return_value = mock_empty_market_data_response()
with DataContext(datetime.date(2019, 1, 2), datetime.date(2019, 1, 2)):
bbid_mock = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
bbid_mock.return_value = 'MISO'
actual = tm.bucketize_price(mock_miso, 'LMP', bucket='7x24')
assert_series_equal(pd.Series(dtype='float64'), pd.Series(actual))
replace.restore()
def test_forward_price():
# US Power
target = {
'7x24': [19.46101],
'peak': [23.86745],
'J20 7x24': [18.11768888888889],
'J20-K20 7x24': [19.283921311475414],
'J20-K20 offpeak': [15.82870707070707],
'J20-K20 7x8': [13.020144262295084],
}
replace = Replacer()
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_forward_price)
mock_spp = Index('MA001', AssetClass.Commod, 'SPP')
with DataContext(datetime.date(2019, 1, 2), datetime.date(2019, 1, 2)):
bbid_mock = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
bbid_mock.return_value = 'SPP'
# Should return empty series as mark for '7x8' bucket is missing
actual = tm.forward_price(mock_spp,
price_method='LMP',
contract_range='2Q20',
bucket='7x24'
)
assert_series_equal(pd.Series(target['7x24'],
index=[datetime.date(2019, 1, 2)],
name='price'),
pd.Series(actual))
actual = tm.forward_price(mock_spp,
price_method='LMP',
contract_range='J20',
bucket='7x24'
)
assert_series_equal(pd.Series(target['J20 7x24'],
index=[datetime.date(2019, 1, 2)],
name='price'),
pd.Series(actual))
actual = tm.forward_price(mock_spp,
price_method='LMP',
contract_range='2Q20',
bucket='PEAK'
)
assert_series_equal(pd.Series(target['peak'],
index=[datetime.date(2019, 1, 2)],
name='price'),
pd.Series(actual))
actual = tm.forward_price(mock_spp,
price_method='LMP',
contract_range='J20-K20',
bucket='7x24'
)
assert_series_equal(pd.Series(target['J20-K20 7x24'],
index=[datetime.date(2019, 1, 2)],
name='price'),
pd.Series(actual))
actual = tm.forward_price(mock_spp,
price_method='LMP',
contract_range='J20-K20',
bucket='offpeak'
)
assert_series_equal(pd.Series(target['J20-K20 offpeak'],
index=[datetime.date(2019, 1, 2)],
name='price'),
pd.Series(actual))
actual = tm.forward_price(mock_spp,
price_method='LMP',
contract_range='J20-K20',
bucket='7x8'
)
assert_series_equal(pd.Series(target['J20-K20 7x8'],
index=[datetime.date(2019, 1, 2)],
name='price'),
pd.Series(actual))
actual = tm.forward_price(mock_spp,
price_method='lmp',
contract_range='2Q20',
bucket='7x24'
)
assert_series_equal(pd.Series(target['7x24'],
index=[datetime.date(2019, 1, 2)],
name='price'),
pd.Series(actual))
with pytest.raises(ValueError):
tm.forward_price(mock_spp,
price_method='LMP',
contract_range='5Q20',
bucket='PEAK'
)
with pytest.raises(ValueError):
tm.forward_price(mock_spp,
price_method='LMP',
contract_range='Invalid',
bucket='PEAK'
)
with pytest.raises(ValueError):
tm.forward_price(mock_spp,
price_method='LMP',
contract_range='3H20',
bucket='7x24'
)
with pytest.raises(ValueError):
tm.forward_price(mock_spp,
price_method='LMP',
contract_range='F20-I20',
bucket='7x24'
)
with pytest.raises(ValueError):
tm.forward_price(mock_spp,
price_method='LMP',
contract_range='2H20',
bucket='7x24',
real_time=True
)
replace.restore()
replace = Replacer()
replace('gs_quant.timeseries.measures.GsDataApi.get_market_data', mock_missing_bucket_forward_price)
bbid_mock = replace('gs_quant.timeseries.measures.Asset.get_identifier', Mock())
bbid_mock.return_value = 'SPP'
actual = tm.forward_price(mock_spp,
price_method='LMP',
contract_range='2Q20',
bucket='7x24'
)
assert_series_equal(pd.Series(), | pd.Series(actual) | pandas.Series |
import numpy as np
import pandas as pd
def compute_date_difference(df: pd.DataFrame) -> pd.DataFrame:
df.construction_year = pd.to_datetime(df.construction_year, format='%Y')
df.date_recorded = pd.to_datetime(df.date_recorded, format='%Y/%m/%d')
df['date_diff'] = (df.date_recorded - df.construction_year).dt.days
df = (
df.query('date_diff > 0')
.assign(log_date_diff = np.log(df.date_diff))
)
return df
def prepare_data(xfile: str, yfile: str) -> pd.DataFrame:
df = pd.read_csv(xfile).set_index('id')
y = pd.read_csv(yfile).set_index('id')
# Simplifying the problem to binary versus; can generalize in the future
label_map = {'functional': 1, 'functional needs repair': 1,
'non functional': 0}
payment_map = {'monthly': 1, 'never pay': 0, 'unknown': 0, 'annually': 1,
'per bucket': 1, 'on failure': 1, 'other': 1}
quantity_map = {'dry': 'dry', 'unknown': 'dry', 'enough': 'enough',
'seasonal': 'enough', 'insufficient': 'insufficient'}
df = (
df.query('longitude != 0 & population > 0')
.query('construction_year != 0')
.assign(log_population = np.log(df['population']))
.join(y, on='id', how='inner')
.rename(columns={'status_group': 'working_well'})
.replace({'working_well': label_map})
.rename(columns={'payment_type': 'requires_payment'})
.replace({'requires_payment': payment_map})
.replace({'quantity': quantity_map})
.pipe(compute_date_difference)
)
# The model will work with integer value representing the administrative
# regions so I will remap them from the strings to a number
unique_regions = np.sort(df.region.unique())
region_map = dict(zip(unique_regions, range(len(unique_regions))))
df['region_code'] = df.region.map(region_map)
# After investigating in the Pluto notebooks, I'm only going to work with
# a subset of the columns (also removing the LGA & Ward administrative
# levels)
cols = ['region', 'region_code', 'quantity', 'source', 'latitude',
'longitude', 'log_population', 'waterpoint_type', 'log_date_diff',
'requires_payment', 'working_well']
df = df.filter(items=cols)
# To work with the Bayesian logistic regression model, we must
# one-hot-encode the categorical features
one_hot_features = ['quantity', 'source', 'waterpoint_type']
df = | pd.get_dummies(df, columns=one_hot_features) | pandas.get_dummies |
import datetime
import pathlib
import pickle
from io import BytesIO
from unittest.mock import MagicMock, patch
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import pytest
import yaml
from sklearn.base import BaseEstimator, RegressorMixin
from sklearn.dummy import DummyClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LinearRegression, LogisticRegression
from sklearn.pipeline import Pipeline
from ml_tooling import Model
from ml_tooling.data import Dataset
from ml_tooling.logging import Log
from ml_tooling.metrics import Metrics, Metric
from ml_tooling.result import Result
from ml_tooling.search import Real
from ml_tooling.storage import FileStorage
from ml_tooling.transformers import DFStandardScaler, DFFeatureUnion
from ml_tooling.utils import MLToolingError, DatasetError
plt.switch_backend("agg")
class TestBaseClass:
def test_is_properties_works(
self, classifier: Model, regression: Model, pipeline_linear: Pipeline
):
assert classifier.is_regressor is False
assert classifier.is_classifier is True
assert regression.is_regressor is True
assert regression.is_classifier is False
assert classifier.is_pipeline is False
assert regression.is_pipeline is False
pipeline = Model(pipeline_linear)
assert pipeline.is_pipeline is True
def test_instantiate_model_with_non_estimator_pipeline_fails(self):
example_pipe = Pipeline([("scale", DFStandardScaler)])
with pytest.raises(
MLToolingError,
match="You passed a Pipeline without an estimator as the last step",
):
Model(example_pipe)
def test_instantiate_model_with_feature_pipeline_sets_estimator_correctly(self):
example_pipe = Pipeline([("scale", DFStandardScaler)])
clf = LinearRegression()
model = Model(clf, feature_pipeline=example_pipe)
expected = Pipeline([("features", example_pipe), ("estimator", clf)])
assert model.estimator.steps == expected.steps
def test_instantiate_model_with_other_object_fails(self):
with pytest.raises(
MLToolingError,
match="Expected a Pipeline or Estimator - got <class 'dict'>",
):
Model({})
def test_default_metric_getter_works_as_expected_classifier(self):
rf = Model(RandomForestClassifier(n_estimators=10))
assert rf.config.CLASSIFIER_METRIC == "accuracy"
assert rf.config.REGRESSION_METRIC == "r2"
assert rf.default_metric == "accuracy"
rf.default_metric = "fowlkes_mallows_score"
assert rf.config.CLASSIFIER_METRIC == "fowlkes_mallows_score"
assert rf.config.REGRESSION_METRIC == "r2"
assert rf.default_metric == "fowlkes_mallows_score"
rf.config.reset_config()
def test_default_metric_getter_works_as_expected_regressor(self):
linreg = Model(LinearRegression())
assert linreg.config.CLASSIFIER_METRIC == "accuracy"
assert linreg.config.REGRESSION_METRIC == "r2"
assert linreg.default_metric == "r2"
linreg.default_metric = "neg_mean_squared_error"
assert linreg.config.CLASSIFIER_METRIC == "accuracy"
assert linreg.config.REGRESSION_METRIC == "neg_mean_squared_error"
assert linreg.default_metric == "neg_mean_squared_error"
linreg.config.reset_config()
def test_default_metric_works_as_expected_without_pipeline(self):
rf = Model(RandomForestClassifier(n_estimators=10))
linreg = Model(LinearRegression())
assert "accuracy" == rf.default_metric
assert "r2" == linreg.default_metric
rf.config.CLASSIFIER_METRIC = "fowlkes_mallows_score"
linreg.config.REGRESSION_METRIC = "neg_mean_squared_error"
assert "fowlkes_mallows_score" == rf.default_metric
assert "neg_mean_squared_error" == linreg.default_metric
rf.config.reset_config()
linreg.config.reset_config()
def test_default_metric_works_as_expected_with_pipeline(
self, pipeline_logistic: Pipeline, pipeline_linear: Pipeline
):
logreg = Model(pipeline_logistic)
linreg = Model(pipeline_linear)
assert "accuracy" == logreg.default_metric
assert "r2" == linreg.default_metric
logreg.config.CLASSIFIER_METRIC = "fowlkes_mallows_score"
linreg.config.REGRESSION_METRIC = "neg_mean_squared_error"
assert "fowlkes_mallows_score" == logreg.default_metric
assert "neg_mean_squared_error" == linreg.default_metric
logreg.config.reset_config()
linreg.config.reset_config()
def test_regression_model_can_be_saved(
self, classifier: Model, tmp_path: pathlib.Path, train_iris_dataset
):
classifier.score_estimator(train_iris_dataset)
load_storage = FileStorage(tmp_path)
storage = FileStorage(tmp_path)
saved_model_path = classifier.save_estimator(storage)
assert saved_model_path.exists()
loaded_model = classifier.load_estimator(saved_model_path, storage=load_storage)
assert loaded_model.estimator.get_params() == classifier.estimator.get_params()
def test_regression_model_filename_is_generated_correctly(
self, classifier: Model, tmp_path: pathlib.Path, train_iris_dataset
):
storage = FileStorage(tmp_path)
saved_model_path = classifier.save_estimator(storage)
assert saved_model_path.exists()
assert datetime.datetime.strptime(
saved_model_path.stem, f"{classifier.estimator_name}_%Y_%m_%d_%H_%M_%S_%f"
)
def test_save_model_saves_pipeline_correctly(
self, pipeline_logistic: Pipeline, tmp_path: pathlib.Path, train_iris_dataset
):
model = Model(pipeline_logistic)
model.train_estimator(train_iris_dataset)
saved_model_path = model.save_estimator(FileStorage(tmp_path))
assert saved_model_path.exists()
@patch("ml_tooling.logging.log_estimator.get_git_hash")
def test_save_estimator_saves_logging_dir_correctly(
self, mock_hash: MagicMock, classifier: Model, tmp_path: pathlib.Path
):
mock_hash.return_value = "1234"
with classifier.log(str(tmp_path)):
expected_file = classifier.save_estimator(FileStorage(tmp_path))
assert expected_file.exists()
assert (
"LogisticRegression" in [str(file) for file in tmp_path.rglob("*.yaml")][0]
)
mock_hash.assert_called_once()
def test_save_estimator_with_prod_flag_saves_correctly(self, classifier: Model):
mock_storage = MagicMock()
classifier.save_estimator(mock_storage, prod=True)
mock_storage.save.assert_called_once_with(
classifier.estimator, "production_model.pkl", prod=True
)
def test_save_estimator_uses_default_storage_if_no_storage_is_passed(
self, tmp_path: pathlib.Path, classifier: Model
):
classifier.config.ESTIMATOR_DIR = tmp_path
classifier.save_estimator()
models = classifier.config.default_storage.get_list()
assert len(models) == 1
new_classifier = Model.load_estimator(models[0])
assert (
classifier.estimator.get_params() == new_classifier.estimator.get_params()
)
@patch("ml_tooling.baseclass.import_path")
def test_can_load_production_estimator(
self, mock_path: MagicMock, classifier: Model
):
buffer = BytesIO()
pickle.dump(classifier.estimator, buffer)
buffer.seek(0)
mock_path.return_value.__enter__.return_value = buffer
model = Model.load_production_estimator("test")
assert isinstance(model, Model)
assert isinstance(model.estimator, BaseEstimator)
def test_log_context_manager_works_as_expected(self, regression: Model):
assert regression.config.LOG is False
assert "runs" == regression.config.RUN_DIR.name
with regression.log("test"):
assert regression.config.LOG is True
assert "test" == regression.config.RUN_DIR.name
assert "runs" == regression.config.RUN_DIR.parent.name
assert regression.config.LOG is False
assert "runs" == regression.config.RUN_DIR.name
assert "test" not in regression.config.RUN_DIR.parts
def test_log_context_manager_logs_when_scoring_model(
self, tmp_path: pathlib.Path, train_iris_dataset
):
model = Model(LinearRegression())
runs = tmp_path / "runs"
with model.log(str(runs)):
result = model.score_estimator(train_iris_dataset)
for file in runs.rglob("LinearRegression_*"):
with file.open() as f:
log_result = yaml.safe_load(f)
assert result.metrics.score == log_result["metrics"]["r2"]
assert result.model.estimator_name == log_result["estimator_name"]
def test_test_models_logs_when_given_dir(
self, tmp_path: pathlib.Path, train_iris_dataset
):
test_models_log = tmp_path / "test_estimators"
Model.test_estimators(
train_iris_dataset,
[
RandomForestClassifier(n_estimators=10),
DummyClassifier(strategy="prior"),
],
log_dir=str(test_models_log),
metrics="accuracy",
)
for file in test_models_log.rglob("*.yaml"):
with file.open() as f:
result = yaml.safe_load(f)
model_name = result["model_name"]
assert model_name in {
"IrisData_RandomForestClassifier",
"IrisData_DummyClassifier",
}
def test_dump_serializes_correctly_without_pipeline(self, regression: Model):
serialized_model = regression.to_dict()
expected = [
{
"module": "sklearn.linear_model._base",
"classname": "LinearRegression",
"params": {
"copy_X": True,
"fit_intercept": True,
"n_jobs": None,
"normalize": False,
},
}
]
assert serialized_model == expected
def test_dump_serializes_correctly_with_pipeline(self, pipeline_linear: Pipeline):
serialized_model = Model(pipeline_linear).to_dict()
expected = [
{
"name": "scale",
"module": "sklearn.preprocessing._data",
"classname": "StandardScaler",
"params": {"copy": True, "with_mean": True, "with_std": True},
},
{
"name": "estimator",
"module": "sklearn.linear_model._base",
"classname": "LinearRegression",
"params": {
"copy_X": True,
"fit_intercept": True,
"n_jobs": None,
"normalize": False,
},
},
]
assert serialized_model == expected
def test_to_dict_serializes_correctly_with_feature_union(
self, feature_union_classifier: DFFeatureUnion
):
model = Model(feature_union_classifier)
result = model.to_dict()
assert len(result) == 2
union = result[0]
assert union["name"] == "features"
assert len(union["params"]) == 2
pipe1 = union["params"][0]
pipe2 = union["params"][1]
assert pipe1["name"] == "pipe1"
select1 = pipe1["params"][0]
scale1 = pipe1["params"][1]
assert select1["name"] == "select"
assert select1["classname"] == "Select"
assert select1["params"] == {
"columns": ["sepal length (cm)", "sepal width (cm)"]
}
assert scale1["name"] == "scale"
assert scale1["classname"] == "DFStandardScaler"
assert scale1["params"] == {"copy": True, "with_mean": True, "with_std": True}
assert pipe2["name"] == "pipe2"
select2 = pipe2["params"][0]
scale2 = pipe2["params"][1]
assert select2["name"] == "select"
assert select2["classname"] == "Select"
assert select2["params"] == {
"columns": ["petal length (cm)", "petal width (cm)"]
}
assert scale2["name"] == "scale"
assert scale2["classname"] == "DFStandardScaler"
assert scale2["params"] == {"copy": True, "with_mean": True, "with_std": True}
def test_from_yaml_serializes_correctly_with_feature_union(
self, feature_union_classifier: DFFeatureUnion, tmp_path: pathlib.Path
):
model = Model(feature_union_classifier)
result = model.to_dict()
log = Log(
name="test", metrics=Metrics.from_list(["accuracy"]), estimator=result
)
log.save_log(tmp_path)
new_model = Model.from_yaml(log.output_path)
assert len(new_model.estimator.steps[0][1].transformer_list) == 2
new_steps = new_model.estimator.steps
old_steps = model.estimator.steps
assert new_steps[0][0] == old_steps[0][0]
assert isinstance(new_steps[0][1], type(old_steps[0][1]))
new_union = new_steps[0][1].transformer_list
old_union = old_steps[0][1].transformer_list
assert len(new_union) == len(old_union)
for new_transform, old_transform in zip(new_union, old_union):
assert new_transform[1].steps[0][0] == old_transform[1].steps[0][0]
assert (
new_transform[1].steps[0][1].get_params()
== old_transform[1].steps[0][1].get_params()
)
def test_can_load_serialized_model_from_pipeline(
self, pipeline_linear: Pipeline, tmp_path: pathlib.Path
):
model = Model(pipeline_linear)
log = Log(
name="test",
estimator=model.to_dict(),
metrics=Metrics([Metric("accuracy", score=1.0)]),
)
log.save_log(tmp_path)
model2 = Model.from_yaml(log.output_path)
for model1, model2 in zip(model.estimator.steps, model2.estimator.steps):
assert model1[0] == model2[0]
assert model1[1].get_params() == model2[1].get_params()
def test_can_load_serialized_model_from_estimator(
self, classifier: Model, tmp_path: pathlib.Path
):
log = Log(
name="test",
estimator=classifier.to_dict(),
metrics=Metrics([Metric("accuracy", score=1.0)]),
)
log.save_log(tmp_path)
model2 = Model.from_yaml(log.output_path)
assert model2.estimator.get_params() == classifier.estimator.get_params()
class TestTrainEstimator:
def test_train_model_sets_result_to_none(
self, regression: Model, train_iris_dataset
):
assert regression.result is not None
regression.train_estimator(train_iris_dataset)
assert regression.result is None
def test_train_model_followed_by_score_model_returns_correctly(
self, pipeline_logistic: Pipeline, train_iris_dataset
):
model = Model(pipeline_logistic)
model.train_estimator(train_iris_dataset)
model.score_estimator(train_iris_dataset)
assert isinstance(model.result, Result)
def test_train_model_errors_correctly_when_not_scored(
self, pipeline_logistic: Pipeline, tmp_path: pathlib.Path, train_iris_dataset
):
model = Model(pipeline_logistic)
with pytest.raises(MLToolingError, match="You haven't scored the estimator"):
with model.log(str(tmp_path)):
model.train_estimator(train_iris_dataset)
model.save_estimator(FileStorage(tmp_path))
def test_can_score_estimator_with_no_y_value(self):
class DummyEstimator(BaseEstimator, RegressorMixin):
def __init__(self):
self.average = None
def fit(self, x, y=None):
self.average = np.mean(x, axis=0)
return self
def predict(self, x):
return self.average
class DummyData(Dataset):
def load_training_data(self):
return pd.DataFrame({"col1": [1, 2, 3, 4], "col2": [4, 5, 6, 7]}), None
def load_prediction_data(self, *args, **kwargs):
return pd.DataFrame({"col1": [1, 2, 3, 4], "col2": [4, 5, 6, 7]})
model = Model(DummyEstimator())
data = DummyData()
model.train_estimator(data)
assert np.all(np.isclose(model.estimator.average, np.array([2.5, 5.5])))
with pytest.raises(DatasetError, match="The dataset does not define a y value"):
data.create_train_test()
class TestScoreEstimator:
def test_score_estimator_creates_train_test_data(
self, boston_dataset, train_boston_dataset
):
model = Model(LinearRegression())
data = boston_dataset()
model.score_estimator(data)
test = train_boston_dataset
pd.testing.assert_frame_equal(data.test_x, test.test_x)
assert np.array_equal(data.test_y, test.test_y)
pd.testing.assert_frame_equal(data.train_x, test.train_x)
assert np.array_equal(data.train_y, test.train_y)
def test_score_estimator_creates_train_test_data_classification(
self, iris_dataset, train_iris_dataset
):
model = Model(LogisticRegression())
data = iris_dataset()
model.score_estimator(data)
test = train_iris_dataset
pd.testing.assert_frame_equal(data.test_x, test.test_x)
assert np.array_equal(data.test_y, test.test_y)
pd.testing.assert_frame_equal(data.train_x, test.train_x)
assert np.array_equal(data.train_y, test.train_y)
def test_score_estimator_creates_train_test_data_with_changed_config(
self, boston_dataset
):
model = Model(LinearRegression())
model.config.RANDOM_STATE = 1
model.config.TEST_SIZE = 0.5
model.config.TRAIN_TEST_SHUFFLE = False
data = boston_dataset()
model.score_estimator(data)
test = boston_dataset()
test.create_train_test(stratify=False, shuffle=False, seed=1, test_size=0.5)
| pd.testing.assert_frame_equal(data.test_x, test.test_x) | pandas.testing.assert_frame_equal |
import json
import numpy as np
import pandas as pd
import xarray as xr
import cubepy
from pyplan_engine.classes.evaluators.BaseEvaluator import BaseEvaluator
from pyplan_engine.common.classes.filterChoices import filterChoices
from pyplan_engine.common.classes.indexValuesReq import IndexValuesReq
from cubepy.cube import kindToString, safemax, safemean, safemin, safesum
class CubepyEvaluator(BaseEvaluator):
PAGESIZE = 100
def evaluateNode(self, result, nodeDic, nodeId, dims=None, rows=None, columns=None, summaryBy="sum", bottomTotal=False, rightTotal=False, fromRow=0, toRow=0):
if isinstance(result, cubepy.Cube):
return self.cubeEvaluate(result, nodeDic, nodeId, dims, rows, columns, summaryBy, bottomTotal, rightTotal, fromRow, toRow)
elif isinstance(result, cubepy.Index):
return self.indexEvaluate(result, nodeDic, nodeId, dims, rows, columns, summaryBy, bottomTotal, rightTotal, fromRow, toRow)
def cubeEvaluate(self, result, nodeDic, nodeId, dims=None, rows=None, columns=None, summaryBy="sum", bottomTotal=False, rightTotal=False, fromRow=0, toRow=0):
sby = safesum
if summaryBy == 'avg':
sby = safemean
elif summaryBy == 'max':
sby = safemax
elif summaryBy == 'min':
sby = safemin
if (fromRow is None) or int(fromRow) <= 0:
fromRow = 1
if (toRow is None) or int(toRow) < 1:
toRow = 100
fromRow = int(fromRow)
toRow = int(toRow)
result = self.applyHierarchy(
result, nodeDic, nodeId, dims, rows, columns, sby)
_filters = []
_rows = []
_columns = []
if not rows is None:
for row in rows:
if self.hasDim(result, str(row["field"])):
_rows.append(str(row["field"]))
self.addToFilter(nodeDic, row, _filters)
if not columns is None:
for column in columns:
if self.hasDim(result, str(column["field"])):
_columns.append(str(column["field"]))
self.addToFilter(nodeDic, column, _filters)
if not dims is None:
for dim in dims:
if self.hasDim(result, str(dim["field"])):
self.addToFilter(nodeDic, dim, _filters)
tmp = None
if len(_rows) == 0 and len(_columns) == 0 and result.ndim > 0:
#_rows.append( result.dims[0] )
tmp = cubepy.Cube([], result.filter(_filters).reduce(sby))
else:
tmp = result.filter(_filters).reduce(sby, keep=(
_rows + _columns)).transpose(_rows + _columns)
finalValues = tmp.values
finalIndexes = []
if tmp.ndim > 0:
finalIndexes = tmp.axes[0].values
finalColumns = ["Total"]
if tmp.ndim == 2:
finalColumns = tmp.axes[1].values
# Add totales
_totalRow = None
if bottomTotal and len(_rows) > 0:
# add total row
#finalIndexes = np.append(finalIndexes,"Total")
if tmp.ndim == 1:
_totalRow = finalValues.sum(axis=0).reshape(1)
#finalValues = np.append( finalValues, finalValues.sum(axis=0).reshape(1), axis=0)
else:
_totalRow = finalValues.sum(
axis=0).reshape(1, len(finalValues[0]))
_totalRow = _totalRow[0]
if rightTotal:
_totalRow = np.append(_totalRow, finalValues.sum())
if rightTotal and len(_columns) > 0:
# add total column
if tmp.ndim == 1:
finalIndexes = np.append(finalIndexes, "Total")
finalValues = np.append(
finalValues, finalValues.sum(axis=0).reshape(1), axis=0)
else:
finalColumns = np.append(finalColumns, "Total")
finalValues = np.append(finalValues, finalValues.sum(
axis=1).reshape(len(finalValues), 1), axis=1)
# con una sola dimension
# chek inf
if kindToString(finalValues.dtype.kind) == "numeric":
if np.isinf(finalValues).any():
finalValues[np.isinf(finalValues)] = None
# chec if haver nan values
# if np.isnan(finalValues).any():
if | pd.isnull(finalValues) | pandas.isnull |
"""
Testing the ``modelchain`` module.
SPDX-FileCopyrightText: 2019 oemof developer group <<EMAIL>>
SPDX-License-Identifier: MIT
"""
import pandas as pd
import numpy as np
import pytest
from pandas.util.testing import assert_series_equal
import windpowerlib.wind_turbine as wt
import windpowerlib.modelchain as mc
class TestModelChain:
@classmethod
def setup_class(self):
"""Setup default values"""
self.test_turbine = {'hub_height': 100,
'turbine_type': 'E-126/4200',
'power_curve': pd.DataFrame(
data={'value': [0.0, 4200 * 1000],
'wind_speed': [0.0, 25.0]})}
temperature_2m = np.array([[267], [268]])
temperature_10m = np.array([[267], [266]])
pressure_0m = np.array([[101125], [101000]])
wind_speed_8m = np.array([[4.0], [5.0]])
wind_speed_10m = np.array([[5.0], [6.5]])
roughness_length = np.array([[0.15], [0.15]])
self.weather_df = pd.DataFrame(
np.hstack((temperature_2m, temperature_10m, pressure_0m,
wind_speed_8m, wind_speed_10m, roughness_length)),
index=[0, 1],
columns=[np.array(['temperature', 'temperature', 'pressure',
'wind_speed', 'wind_speed',
'roughness_length']),
np.array([2, 10, 0, 8, 10, 0])])
def test_temperature_hub(self):
# Test modelchain with temperature_model='linear_gradient'
test_mc = mc.ModelChain(wt.WindTurbine(**self.test_turbine))
# Test modelchain with temperature_model='interpolation_extrapolation'
test_mc_2 = mc.ModelChain(
wt.WindTurbine(**self.test_turbine),
temperature_model='interpolation_extrapolation')
# Parameters for tests
temperature_2m = np.array([[267], [268]])
temperature_10m = np.array([[267], [266]])
weather_df = pd.DataFrame(np.hstack((temperature_2m,
temperature_10m)),
index=[0, 1],
columns=[np.array(['temperature',
'temperature']),
np.array([2, 10])])
# temperature_10m is closer to hub height than temperature_2m
temp_exp = pd.Series(data=[266.415, 265.415], name=10)
assert_series_equal(test_mc.temperature_hub(weather_df), temp_exp)
temp_exp = pd.Series(data=[267.0, 243.5])
assert_series_equal(test_mc_2.temperature_hub(weather_df), temp_exp)
# change heights of temperatures so that old temperature_2m is now used
weather_df.columns = [np.array(['temperature', 'temperature']),
np.array([10, 200])]
temp_exp = pd.Series(data=[266.415, 267.415], name=10)
assert_series_equal(test_mc.temperature_hub(weather_df), temp_exp)
temp_exp = pd.Series(data=[267.0, 267.052632])
assert_series_equal(test_mc_2.temperature_hub(weather_df), temp_exp)
# temperature at hub height
weather_df.columns = [np.array(['temperature', 'temperature']),
np.array([100, 10])]
temp_exp = pd.Series(data=[267, 268], name=100)
assert_series_equal(test_mc.temperature_hub(weather_df), temp_exp)
def test_density_hub(self):
# Test modelchain with density_model='barometric'
test_mc = mc.ModelChain(wt.WindTurbine(**self.test_turbine))
# Test modelchain with density_model='ideal_gas'
test_mc_2 = mc.ModelChain(wt.WindTurbine(**self.test_turbine),
density_model='ideal_gas')
# Test modelchain with density_model='interpolation_extrapolation'
test_mc_3 = mc.ModelChain(wt.WindTurbine(**self.test_turbine),
density_model='interpolation_extrapolation')
# Parameters for tests
temperature_2m = np.array([[267], [268]])
temperature_10m = np.array([[267], [266]])
pressure_0m = np.array([[101125], [101000]])
weather_df = pd.DataFrame(np.hstack((temperature_2m,
temperature_10m,
pressure_0m)),
index=[0, 1],
columns=[np.array(['temperature',
'temperature',
'pressure']),
np.array([2, 10, 0])])
# temperature_10m is closer to hub height than temperature_2m
rho_exp = pd.Series(data=[1.30591, 1.30919])
assert_series_equal(test_mc.density_hub(weather_df), rho_exp)
rho_exp = pd.Series(data=[1.30595575725, 1.30923554056])
assert_series_equal(test_mc_2.density_hub(weather_df), rho_exp)
# change heights of temperatures so that old temperature_2m is now used
weather_df.columns = [np.array(['temperature', 'temperature',
'pressure']),
np.array([10, 200, 0])]
rho_exp = pd.Series(data=[1.30591, 1.29940])
assert_series_equal(test_mc.density_hub(weather_df), rho_exp)
rho_exp = pd.Series(data=[1.30595575725, 1.29944375221])
assert_series_equal(test_mc_2.density_hub(weather_df), rho_exp)
# temperature at hub height
weather_df.columns = [np.array(['temperature', 'temperature',
'pressure']),
np.array([100, 10, 0])]
rho_exp = pd.Series(data=[1.30305, 1.29657])
assert_series_equal(test_mc.density_hub(weather_df), rho_exp)
# density interpolation
density_10m = np.array([[1.30591], [1.29940]])
density_150m = np.array([[1.30305], [1.29657]])
weather_df = pd.DataFrame(np.hstack((density_10m,
density_150m)),
index=[0, 1],
columns=[np.array(['density',
'density']),
np.array([10, 150])])
rho_exp = | pd.Series(data=[1.304071, 1.297581]) | pandas.Series |
import pandas as pd
import numpy as np
import pymc3 as pm
import matplotlib.pyplot as plt
import seaborn as sns
from mpl_toolkits.mplot3d import Axes3D
import theano.tensor as tt
def fit_spindle_density_prior():
#data from purcell
data = [[85, 177],
[89, 148],
[93, 115],
[98, 71],
[105, 42],
[117, 20],
[134, 17],
[148, 27],
[157, 39],
[165, 53],
[170, 68],
[174, 84],
[180, 102],
[184, 123],
[190, 143],
[196, 156],
[202, 165],
[210, 173],
[217, 176],
[222, 177]]
xscale = [0, 4]
yscale = [0, 800]
data_df = get_target_curve(data, xscale, yscale, scale=False)
sample_data = np.random.choice(a=data_df['x'], p=data_df['y'], size=1000)
with pm.Model() as model:
a = pm.HalfNormal('a', 100*10)
b = pm.HalfNormal('b', 100*10)
pm.Beta('spindle_density', alpha=a, beta=b, observed=sample_data)
trace = pm.sample(2000)
summary_df = pm.summary(trace)
a_est = summary_df.loc['a', 'mean']
b_est = summary_df.loc['b', 'mean']
n_samples = 10000
with pm.Model() as model:
pm.Beta('spindle_density_mean_params', alpha=a_est, beta=b_est)
outcome = pm.sample(n_samples, njobs=1, nchains=1)
# pm.traceplot(trace)
# plt.show()
samples = outcome['spindle_density_mean_params']
sns.distplot(samples, kde=True)
x = data_df['x']
y = data_df['y']*len(samples)*(x[1]-x[0])
sns.lineplot(x, y)
plt.show()
print(summary_df)
sp_per_epoch = xscale[1]*outcome['spindle_density_mean_params']*25/60
counts, bins, patches = plt.hist(sp_per_epoch, np.arange(0, 8)-0.5, density=True)
sns.distplot(sp_per_epoch, kde=True, hist=False)
plt.show()
print(counts, bins)
def fit_spindle_duration():
data = [
[78, 163],
[80, 30],
[81, 15],
[83, 6],
[86, 8],
[91, 26],
[101, 51],
[114, 85],
[124, 105],
[137, 126],
[150, 139],
[164, 150],
[177, 156],
[194, 160],
[208, 163]
]
xscale = [0.4, 2]
yscale = [0, 4000]
data_df = get_target_curve(data, xscale, yscale, scale=False)
sample_data = np.random.choice(a=data_df['x'], p=data_df['y'], size=1000)
with pm.Model() as model:
a = pm.HalfNormal('a', 100*10)
b = pm.HalfNormal('b', 100*10)
pm.Gamma('spindle_duration', alpha=a, beta=b, observed=sample_data)
trace = pm.sample(2000, njobs=1)
summary_df = pm.summary(trace)
a_est = summary_df.loc['a', 'mean']
b_est = summary_df.loc['b', 'mean']
n_samples = 10000
with pm.Model() as model:
pm.Gamma('spindle_density_mean_params', alpha=a_est, beta=b_est)
outcome = pm.sample(n_samples, njobs=1, nchains=1)
pm.traceplot(trace)
plt.show()
samples = outcome['spindle_density_mean_params']
sns.distplot(samples, kde=True)
x = data_df['x']
y = data_df['y'] * len(samples) * (x[1] - x[0])
sns.lineplot(x, y)
plt.show()
print(summary_df)
return samples*(2-0.4)+0.4
def fit_spindle_refractory():
data = [[88, 317],
[118, 99],
[125, 93],
[131, 97],
[137, 115],
[144, 143],
[151, 194],
[158, 223],
[175, 245],
[197, 265],
[239, 287],
[285, 297],
[355, 304],
[432, 307],
[454, 313]]
xscale = [0, 30]
yscale = [0, 0.08]
data_df = get_target_curve(data, xscale, yscale, scale=False)
sample_data = np.random.choice(a=data_df['x'], p=data_df['y'], size=1000)
with pm.Model() as model:
a = pm.HalfNormal('a', 100*10)
b = pm.HalfNormal('b', 100*10)
pm.Wald('spindle_duration', mu=a, lam=b, observed=sample_data)
trace = pm.sample(2000, njobs=1)
summary_df = pm.summary(trace)
a_est = summary_df.loc['a', 'mean']
b_est = summary_df.loc['b', 'mean']
n_samples = 10000
with pm.Model() as model:
pm.Wald('spindle_density_mean_params', mu=a_est, lam=b_est)
outcome = pm.sample(n_samples, njobs=1, nchains=1)
# pm.traceplot(trace)
# plt.show()
samples = outcome['spindle_density_mean_params']
sns.distplot(samples, kde=True, bins=100)
x = data_df['x']
y = data_df['y'] * len(samples) * (x[1] - x[0])
sns.lineplot(x, y)
plt.show()
print(summary_df)
return samples*30+0.5
def get_samples_for_refractory():
samples = fit_spindle_refractory() + fit_spindle_duration()
pd.DataFrame({'samples': samples}).to_pickle('../data/raw/refractory_prior_samples.pkl')
def fit_refractory_minus_duration():
sample_data = | pd.read_pickle('../data/raw/refractory_prior_samples.pkl') | pandas.read_pickle |
"""Live or test trading account"""
import re
import requests
import numpy as np
import pandas as pd
from binance.client import Client
from models.exchange.binance import AuthAPI as BAuthAPI, PublicAPI as BPublicAPI
from models.exchange.coinbase_pro import AuthAPI as CBAuthAPI
class TradingAccount():
def __init__(self, app=None):
"""Trading account object model
Parameters
----------
app : object
PyCryptoBot object
"""
# config needs to be a dictionary, empty or otherwise
if app is None:
raise TypeError('App is not a PyCryptoBot object.')
if app.getExchange() == 'binance':
self.client = Client(app.getAPIKey(), app.getAPISecret(), { 'verify': False, 'timeout': 20 })
# if trading account is for testing it will be instantiated with a balance of 1000
self.balance = pd.DataFrame([
[ 'QUOTE', 1000, 0, 1000 ],
[ 'BASE', 0, 0, 0 ]],
columns=['currency','balance','hold','available'])
self.app = app
if app.isLive() == 1:
self.mode = 'live'
else:
self.mode = 'test'
self.orders = pd.DataFrame()
def __convertStatus(self, val):
if val == 'filled':
return 'done'
else:
return val
def _checkMarketSyntax(self, market):
"""Check that the market is syntactically correct
Parameters
----------
market : str
market to check
"""
if self.app.getExchange() == 'coinbasepro' and market != '':
p = re.compile(r"^[1-9A-Z]{2,5}\-[1-9A-Z]{2,5}$")
if not p.match(market):
raise TypeError('Coinbase Pro market is invalid.')
elif self.app.getExchange() == 'binance':
p = re.compile(r"^[A-Z]{6,12}$")
if not p.match(market):
raise TypeError('Binance market is invalid.')
def getOrders(self, market='', action='', status='all'):
"""Retrieves orders either live or simulation
Parameters
----------
market : str, optional
Filters orders by market
action : str, optional
Filters orders by action
status : str
Filters orders by status, defaults to 'all'
"""
# validate market is syntactically correct
self._checkMarketSyntax(market)
if action != '':
# validate action is either a buy or sell
if not action in ['buy', 'sell']:
raise ValueError('Invalid order action.')
# validate status is open, pending, done, active or all
if not status in ['open', 'pending', 'done', 'active', 'all', 'filled']:
raise ValueError('Invalid order status.')
if self.app.getExchange() == 'binance':
if self.mode == 'live':
# if config is provided and live connect to Binance account portfolio
model = BAuthAPI(self.app.getAPIKey(), self.app.getAPISecret(), self.app.getAPIURL())
# retrieve orders from live Binance account portfolio
self.orders = model.getOrders(market, action, status)
return self.orders
else:
# return dummy orders
if market == '':
return self.orders
else:
return self.orders[self.orders['market'] == market]
if self.app.getExchange() == 'coinbasepro':
if self.mode == 'live':
# if config is provided and live connect to Coinbase Pro account portfolio
model = CBAuthAPI(self.app.getAPIKey(), self.app.getAPISecret(), self.app.getAPIPassphrase(), self.app.getAPIURL())
# retrieve orders from live Coinbase Pro account portfolio
self.orders = model.getOrders(market, action, status)
return self.orders
else:
# return dummy orders
if market == '':
return self.orders
else:
return self.orders[self.orders['market'] == market]
def getBalance(self, currency=''):
"""Retrieves balance either live or simulation
Parameters
----------
currency: str, optional
Filters orders by currency
"""
if self.app.getExchange() == 'binance':
if self.mode == 'live':
model = BAuthAPI(self.app.getAPIKey(), self.app.getAPISecret())
df = model.getAccount()
if isinstance(df, pd.DataFrame):
if currency == '':
# retrieve all balances
return df
else:
# retrieve balance of specified currency
df_filtered = df[df['currency'] == currency]['available']
if len(df_filtered) == 0:
# return nil balance if no positive balance was found
return 0.0
else:
# return balance of specified currency (if positive)
if currency in ['EUR', 'GBP', 'USD']:
return float(self.app.truncate(float(df[df['currency'] == currency]['available'].values[0]), 2))
else:
return float(self.app.truncate(float(df[df['currency'] == currency]['available'].values[0]), 4))
else:
return 0.0
else:
# return dummy balances
if currency == '':
# retrieve all balances
return self.balance
else:
if self.app.getExchange() == 'binance':
self.balance = self.balance.replace('QUOTE', currency)
else:
# replace QUOTE and BASE placeholders
if currency in ['EUR','GBP','USD']:
self.balance = self.balance.replace('QUOTE', currency)
else:
self.balance = self.balance.replace('BASE', currency)
if self.balance.currency[self.balance.currency.isin([currency])].empty:
self.balance.loc[len(self.balance)] = [currency, 0, 0, 0]
# retrieve balance of specified currency
df = self.balance
df_filtered = df[df['currency'] == currency]['available']
if len(df_filtered) == 0:
# return nil balance if no positive balance was found
return 0.0
else:
# return balance of specified currency (if positive)
if currency in ['EUR', 'GBP', 'USD']:
return float(self.app.truncate(float(df[df['currency'] == currency]['available'].values[0]), 2))
else:
return float(self.app.truncate(float(df[df['currency'] == currency]['available'].values[0]), 4))
else:
if self.mode == 'live':
# if config is provided and live connect to Coinbase Pro account portfolio
model = CBAuthAPI(self.app.getAPIKey(), self.app.getAPISecret(), self.app.getAPIPassphrase(), self.app.getAPIURL())
if currency == '':
# retrieve all balances
return model.getAccounts()[['currency', 'balance', 'hold', 'available']]
else:
df = model.getAccounts()
# retrieve balance of specified currency
df_filtered = df[df['currency'] == currency]['available']
if len(df_filtered) == 0:
# return nil balance if no positive balance was found
return 0.0
else:
# return balance of specified currency (if positive)
if currency in ['EUR','GBP','USD']:
return float(self.app.truncate(float(df[df['currency'] == currency]['available'].values[0]), 2))
else:
return float(self.app.truncate(float(df[df['currency'] == currency]['available'].values[0]), 4))
else:
# return dummy balances
if currency == '':
# retrieve all balances
return self.balance
else:
# replace QUOTE and BASE placeholders
if currency in ['EUR','GBP','USD']:
self.balance = self.balance.replace('QUOTE', currency)
elif currency in ['BCH','BTC','ETH','LTC','XLM']:
self.balance = self.balance.replace('BASE', currency)
if self.balance.currency[self.balance.currency.isin([currency])].empty == True:
self.balance.loc[len(self.balance)] = [currency,0,0,0]
# retrieve balance of specified currency
df = self.balance
df_filtered = df[df['currency'] == currency]['available']
if len(df_filtered) == 0:
# return nil balance if no positive balance was found
return 0.0
else:
# return balance of specified currency (if positive)
if currency in ['EUR','GBP','USD']:
return float(self.app.truncate(float(df[df['currency'] == currency]['available'].values[0]), 2))
else:
return float(self.app.truncate(float(df[df['currency'] == currency]['available'].values[0]), 4))
def saveTrackerCSV(self, market='', save_file='tracker.csv'):
"""Saves order tracker to CSV
Parameters
----------
market : str, optional
Filters orders by market
save_file : str
Output CSV file
"""
# validate market is syntactically correct
self._checkMarketSyntax(market)
if self.mode == 'live':
if self.app.getExchange() == 'coinbasepro':
# retrieve orders from live Coinbase Pro account portfolio
df = self.getOrders(market, '', 'done')
elif self.app.getExchange() == 'binance':
# retrieve orders from live Binance account portfolio
df = self.getOrders(market, '', 'done')
else:
df = | pd.DataFrame() | pandas.DataFrame |
from pathlib import Path
from typing import List
import pandas as pd
from settings.conf import (LOCAL_DATASETS_DIR, LOCAL_DIR, blacklisted,
false_positives)
from strategies.ppb import extract
from utils import list_directory
from utils.pages import check_page_orientation
def validate_files(directory: Path) -> List[Path]:
valid_files: List[Path] = []
for f in list_directory(directory):
if f.suffix != ".pdf":
continue
if f.name in false_positives:
valid_files.append(f)
continue
if f.name in blacklisted:
continue
valid = check_page_orientation(directory, f)
valid_files.append(valid)
return valid_files
def main() -> None:
files = validate_files(LOCAL_DIR)
files_len = len(files)
assets_list = []
liabs_list = []
loss_list = []
profit_list = []
for i, file in enumerate(files, start=1):
print(f"[INFO]: attempting {file.name}")
dfs = extract(file)
assets_list.append(dfs[0])
liabs_list.append(dfs[1])
loss_list.append(dfs[2])
profit_list.append(dfs[3])
print(f"[OK]: {i}/{files_len} - {file.name} done...")
print("[INFO]: All files parsed, creating DataFrames...")
assets = pd.concat(assets_list).sort_index()
liabs = | pd.concat(liabs_list) | pandas.concat |
# Import modules
import abc
import random
import numpy as np
import pandas as pd
from tqdm import tqdm
from math import floor
from itertools import chain
import tensorflow as tf
from tensorflow.keras import layers
from tensorflow.keras.layers import *
from tensorflow.keras import Sequential
from tensorflow.keras import regularizers
from typeguard import typechecked
from sklearn.cluster import KMeans, SpectralClustering, MiniBatchKMeans
from skimage.feature import hog
from skimage.color import rgb2gray
from scipy.cluster.vq import vq
import matplotlib.pyplot as plt
#######################################################
class alAlgo(metaclass=abc.ABCMeta):
"""
alAlgo() Documentation:
--------------------------
Purpose
----------
Parent class that will be used for making new Active Learning algo classes.
Currently, the class is very sparse. Will make adjustments as the project continues.
Attributes
----------
algo_name : str
used to keep track of name of algo in engine.log
sample_log : dict
tracks what samples are chosen each round, places sample ids in list within dict
round : int
tracks what round algo is on
predict_to_sample : bool
bool that determines whether or not the algo needs the predictions of the model to choose which samples to label
Methods
-------
@classmethod
__subclasshook__(cls, subclass):
Used to check if custom child class of alAlgo is properly made
reset(self):
set round=0 and sample_log={}
@abc.abstractmethod
__call__(self, cache: list, n: int, yh):
Empty function that is required to be declared in custom child class. Allows for algo
to be called to pick which samples to return based on algo criteria.
"""
def __init__(self, algo_name="NA"):
self.algo_name = algo_name
self.round = 0
self.sample_log = {}
self.predict_to_sample = False
@classmethod
def __subclasshook__(cls, subclass):
return (hasattr(subclass, '__call__') and
callable(subclass.__call__) or
NotImplemented)
def reset(self):
self.round = 0
self.sample_log = {}
@abc.abstractmethod
def __call__(self, cache: list, n: int, yh):
""" Selects which samples to get labels for """
raise NotImplementedError
#######################################################
class marginConfidence(alAlgo):
"""
marginConfidence(alAlgo) Documentation:
--------------------------
Purpose
----------
Custom active learning class, inherits alAlgo class.
Score samples by predictions through formula MC(x)=(1-(P(y1*|x)-P(y2*|x)))
Attributes
----------
predict_to_sample : bool
Determines if algo needs models prediction on cache to determine what samples from the cache to return
Methods
-------
@abc.abstractmethod
__call__(self, cache: list, n: int, yh):
Empty function that is required to be declared in custom child class. Allows for algo
to be called to pick which samples to return based on algo criteria.
"""
def __init__(self):
super().__init__(algo_name="Margin Confidence")
self.predict_to_sample = True
self.feature_set = False
self.single_output = False
def __call__(self, cache: list, n: int, yh) -> list:
# Check if embedded cache, then cache is available for the round
if any(isinstance(i, list) for i in cache):
try:
cache = cache[self.round]
except:
raise ValueError("Active Learning Algo has iterated through each round\'s unlabled cache.")
# Check if sample size is to large for cache
if len(cache) < n:
raise ValueError("Sample size n is larger than length of round's cache")
# Calculate MC(x) values
yh_vals = yh.iloc[:, 1:].values
MC_vals = []
for i in range(yh_vals.shape[0]):
sample = yh_vals[i, :]
sample[::-1].sort()
y1, y2 = sample[0], sample[1]
mc_val = 1 - (y1 - y2)
MC_vals.append(mc_val)
target_col_names = ["y" + str(i) for i in range(yh_vals.shape[1])]
yh_col_names = ["MC", "ID"] + target_col_names
yh = pd.concat([pd.DataFrame(MC_vals), yh], axis=1)
yh.columns = yh_col_names
# Get ids of n largest LC vals
n_largest = yh.nlargest(n, 'MC')
batch = n_largest["ID"].to_list()
# Log which samples were used for that round
self.sample_log[str(self.round)] = batch
print("\n")
print("Round {} selected samples: {}".format(self.round, batch))
print("\n")
# Increment round
self.round += 1
return batch
#######################################################
class leastConfidence(alAlgo):
"""
leastConfidence(alAlgo) Documentation:
--------------------------
Purpose
----------
Custom active learning class, inherits alAlgo class.
Score samples by predictions through formula LC(x)=(1-P(y*|x))*(n/(n-1))
Attributes
----------
predict_to_sample : bool
Determines if algo needs models prediction on cache to determine what samples from the cache to return
Methods
-------
@abc.abstractmethod
__call__(self, cache: list, n: int, yh):
Empty function that is required to be declared in custom child class. Allows for algo
to be called to pick which samples to return based on algo criteria.
"""
def __init__(self):
super().__init__(algo_name="Least Confidence")
self.predict_to_sample = True
self.feature_set = False
self.single_output = False
def __call__(self, cache: list, n: int, yh) -> list:
# Check if embedded cache, then cache is available for the round
if any(isinstance(i, list) for i in cache):
try:
cache = cache[self.round]
except:
raise ValueError("Active Learning Algo has iterated through each round\'s unlabled cache.")
# Check if sample size is to large for cache
if len(cache) < n:
raise ValueError("Sample size n is larger than length of round's cache")
# Calculate LC(x) values
yh_vals = yh.iloc[:, 1:].values
LC_vals = []
for i in range(yh_vals.shape[0]):
sample = yh_vals[i, :]
lc = (1 - np.amax(sample)) * (yh_vals.shape[1] / (yh_vals.shape[1] - 1))
LC_vals.append((lc))
target_col_names = ["y" + str(i) for i in range(yh_vals.shape[1])]
yh_col_names = ["LC", "ID"] + target_col_names
yh = pd.concat([pd.DataFrame(LC_vals), yh], axis=1)
yh.columns = yh_col_names
# Get ids of n largest LC vals
n_largest = yh.nlargest(n, 'LC')
batch = n_largest["ID"].to_list()
# Log which samples were used for that round
self.sample_log[str(self.round)] = batch
print("\n")
print("Round {} selected samples: {}".format(self.round, batch))
print("\n")
# Increment round
self.round += 1
return batch
#######################################################
class uniformSample(alAlgo):
"""
uniformSample(alAlgo) Documentation:
--------------------------
Purpose
----------
Custom active learning class, inherits alAlgo class.
Randomly samples over a uniform distribution of passed cache of data ids.
Use as a baseline to compare the performance of your active learning algorithms.
Attributes
----------
predict_to_sample : bool
Determines if algo needs models prediction on cache to determine what samples from the cache to return
Methods
-------
@abc.abstractmethod
__call__(self, cache: list, n: int, yh):
Empty function that is required to be declared in custom child class. Allows for algo
to be called to pick which samples to return based on algo criteria.
"""
def __init__(self):
super().__init__(algo_name="Passive")
self.predict_to_sample = False
self.feature_set = False
self.single_output = False
def __call__(self, cache: list, n: int, yh=None) -> list:
# Check if embedded cache, then cache is available for the round
if any(isinstance(i, list) for i in cache):
try:
cache = cache[self.round]
except:
raise ValueError("Active Learning Algo has iterated through each round\'s unlabled cache.")
# Check if sample size is to large for cache
if len(cache) < n:
raise ValueError("Sample size n is larger than length of round's cache")
# Select from uniform distributions data ID's from given cache
idx = random.sample(range(0, len(cache)), n)
batch = [cache[i] for i in idx]
# Log which samples were used for that round
self.sample_log[str(self.round)] = batch
print("Selected samples: ")
print(idx)
print("\n")
# Increment round
self.round += 1
return batch
#######################################################
class ratioConfidence(alAlgo):
"""
ratioConfidence(alAlgo) Documentation:
--------------------------
Purpose
----------
Custom active learning class, inherits alAlgo class.
Score samples by predictions through formula theta(x)=P(y_1/x)/P(y_2/x)
Attributes
----------
predict_to_sample : bool
Determines if algo needs models prediction on cache to determine what samples from the cache to return
Methods
-------
@abc.abstractmethod
__call__(self, cache: list, n: int, yh):
Empty function that is required to be declared in custom child class. Allows for algo
to be called to pick which samples to return based on algo criteria.
"""
def __init__(self):
super().__init__(algo_name="Ratio Confidence")
self.predict_to_sample = True
self.feature_set = False
self.single_output = False
def __call__(self, cache: list, n: int, yh) -> list:
# Check if embedded cache, then cache is available for the round
if any(isinstance(i, list) for i in cache):
try:
cache = cache[self.round]
except:
raise ValueError("Active Learning Algo has iterated through each round\'s unlabled cache.")
# Check if sample size is to large for cache
if len(cache) < n:
raise ValueError("Sample size n is larger than length of round's cache")
# Calculate RC(x) values
yh_vals = yh.iloc[:, 1:].values
RC_vals = []
for i in range(yh_vals.shape[0]):
sample = yh_vals[i, :]
sample[::-1].sort()
y1, y2 = sample[0], sample[1]
if y2 == 0:
RC_vals.append(100)
else:
RC_vals.append(y1 / y2)
target_col_names = ["y" + str(i) for i in range(yh_vals.shape[1])]
yh_col_names = ["RC", "ID"] + target_col_names
yh = pd.concat([pd.DataFrame(RC_vals), yh], axis=1)
yh.columns = yh_col_names
# Get ids of n largest LC vals
n_smallest = yh.nsmallest(n, 'RC')
batch = n_smallest["ID"].to_list()
# Log which samples were used for that round
self.sample_log[str(self.round)] = batch
print("\n")
print("Round {} selected samples: {}".format(self.round, batch))
print("\n")
# Increment round
self.round += 1
return batch
#######################################################
class entropy(alAlgo):
"""
ratioConfidence(alAlgo) Documentation:
--------------------------
Purpose
----------
Custom active learning class, inherits alAlgo class.
Score samples by predictions through formula ent(x)= -sum(P(Y|X)log_{2}P(Y|X))/log_{2}
Attributes
----------
predict_to_sample : bool
Determines if algo needs models prediction on cache to determine what samples from the cache to return
Methods
-------
@abc.abstractmethod
__call__(self, cache: list, n: int, yh):
Empty function that is required to be declared in custom child class. Allows for algo
to be called to pick which samples to return based on algo criteria.
"""
def __init__(self):
super().__init__(algo_name="Ratio Confidence")
self.predict_to_sample = True
self.feature_set = False
self.single_output = False
def __call__(self, cache: list, n: int, yh) -> list:
# Check if embedded cache, then cache is available for the round
if any(isinstance(i, list) for i in cache):
try:
cache = cache[self.round]
except:
raise ValueError("Active Learning Algo has iterated through each round\'s unlabled cache.")
# Check if sample size is to large for cache
if len(cache) < n:
raise ValueError("Sample size n is larger than length of round's cache")
# Calculate ent(x) values
yh_vals = yh.iloc[:, 1:].values
ent_vals = []
for i in range(yh_vals.shape[0]):
sample = yh_vals[i, :]
log_probs = sample * np.log2(sample) # multiply each proba by its base 2 log
raw_entropy = 0 - np.sum(log_probs)
normalized_entropy = raw_entropy / np.log2(len(sample))
ent_vals.append(normalized_entropy)
target_col_names = ["y" + str(i) for i in range(yh_vals.shape[1])]
yh_col_names = ["ENT", "ID"] + target_col_names
yh = pd.concat([pd.DataFrame(ent_vals), yh], axis=1)
yh.columns = yh_col_names
# Get ids of n largest LC vals
n_largest = yh.nlargest(n, 'ENT')
batch = n_largest["ID"].to_list()
# Log which samples were used for that round
self.sample_log[str(self.round)] = batch
print("\n")
print("Round {} selected samples: {}".format(self.round, batch))
print("\n")
# Increment round
self.round += 1
return batch
#######################################################
class DAL(alAlgo):
"""
DAL(alAlgo) Documentation:
--------------------------
Purpose
----------
Custom active learning class, inherits alAlgo class.
Attributes
----------
predict_to_sample : bool
Determines if algo needs models prediction on cache to determine what samples from the cache to return
Methods
-------
@abc.abstractmethod
__call__(self, cache: list, n: int, yh):
Empty function that is required to be declared in custom child class. Allows for algo
to be called to pick which samples to return based on algo criteria.
"""
def __init__(self, input_dim=None):
super().__init__(algo_name="DAL")
self.predict_to_sample = False
self.feature_set = True
self.single_output = False
if input_dim == None:
raise ValueError("Must pass input dim as int to use DAL")
self.input_dim = input_dim
self.model = self.getBinaryClassifier()
self.opt = tf.keras.optimizers.Adam(lr=0.0001)
self.loss = tf.keras.losses.categorical_crossentropy
self.loss = tf.keras.losses.cosine_similarity
# self.loss = tf.keras.losses.kl_divergence
def getBinaryClassifier(self):
model = Sequential(name="BC")
model.add(Dense(120, activation='relu', input_dim=self.input_dim))
model.add(Dense(60, activation='relu'))
#model.add(Dense(2, activation='softmax'))
model.add(Dense(2, activation='sigmoid'))
return model
@tf.function
def grad(self, inputs, targets):
with tf.GradientTape() as tape:
loss_value = self.loss(self.model(inputs, training=True), targets)
return loss_value, tape.gradient(loss_value, self.model.trainable_variables)
@tf.function
def trainBatch(self, inputs, targets) -> float:
""" Calculates loss and gradients for batch of data and applies update to weights """
loss_value, grads = self.grad(inputs, targets)
self.opt.apply_gradients(zip(grads, self.model.trainable_variables))
return loss_value
@tf.function
def predict(self, inputs):
""" Used for predicting with model but does not have labels """
yh = self.model(inputs)
return yh
def trainBinaryClassifier(self, dataset, batch_size):
remainder_samples = dataset.shape[0] % batch_size # Calculate number of remainder samples from batches
total_loss = []
# Test resetting binary classifier each round
#self.model = self.getBinaryClassifier()
# Run batches
print("Training DAL Binary Classifier")
for i in tqdm(range(50)):
for batch in range(floor(dataset.shape[0] / batch_size)):
data = dataset[batch_size * batch:batch_size * (batch + 1), :]
X, y = data[:, :-2], data[:, -2:]
loss = self.trainBatch(X, y)
total_loss.append(loss)
# Run remainders
if remainder_samples > 0:
data = dataset[(-1) * remainder_samples:, :]
X, y = data[:, :-2], data[:, -2:]
loss = self.trainBatch(X, y)
total_loss.append(loss)
total_loss = list(chain(*total_loss))
val_avg_loss = sum(total_loss) / len(total_loss)
total_loss = []
print("DAL binary classifier loss: {}".format(val_avg_loss))
def inferBinaryClassifier(self, inputs):
yh = self.model(inputs)
return yh
def resetBinayClassifier(self):
pass
def __call__(self, cache: list, n: int, yh) -> list:
# Check if embedded cache, then cache is available for the round
if any(isinstance(i, list) for i in cache):
try:
cache = cache[self.round]
except:
raise ValueError("Active Learning Algo has iterated through each round\'s unlabled cache.")
# Check if sample size is to large for cache
if len(cache) < n:
raise ValueError("Sample size n is larger than length of round's cache")
# Calculate LC(x) values
yh_vals = yh.iloc[:, 1].values
yh_col_names = ["yh", "ID"]
yh = pd.concat([pd.DataFrame(yh_vals), pd.DataFrame(cache)], axis=1)
yh.columns = yh_col_names
# Get ids of n largest LC vals
n_largest = yh.nlargest(n, 'yh')
batch = n_largest["ID"].to_list()
#input("p")
#sort_yh = yh.sort_values(by=['yh'])
#plt.scatter(range(0,len(sort_yh)),sort_yh["yh"].values)
#plt.show()
#input("p")
# Log which samples were used for that round
self.sample_log[str(self.round)] = batch
print("\n")
print("Round {} selected samples: {}".format(self.round, batch))
print("\n")
# Increment round
self.round += 1
return batch
#######################################################
class DALratio(alAlgo):
"""
DAL(alAlgo) Documentation:
--------------------------
Purpose
----------
Custom active learning class, inherits alAlgo class.
Attributes
----------
predict_to_sample : bool
Determines if algo needs models prediction on cache to determine what samples from the cache to return
Methods
-------
@abc.abstractmethod
__call__(self, cache: list, n: int, yh):
Empty function that is required to be declared in custom child class. Allows for algo
to be called to pick which samples to return based on algo criteria.
"""
def __init__(self, input_dim=None):
super().__init__(algo_name="DALratio")
self.predict_to_sample = False
self.feature_set = True
self.single_output = False
if input_dim == None:
raise ValueError("Must pass input dim as int to use DAL")
self.input_dim = input_dim
self.model = self.getBinaryClassifier()
self.opt = tf.keras.optimizers.Adam(lr=0.0001)
self.loss = tf.keras.losses.categorical_crossentropy
def getBinaryClassifier(self):
model = Sequential(name="Binary Classifier")
model.add(Dense(128, activation='elu', input_dim=self.input_dim))
model.add(Dropout(.1))
model.add(Dense(2, activation='softmax'))
return model
def grad(self, inputs, targets):
with tf.GradientTape() as tape:
loss_value = self.loss(self.model(inputs, training=True), targets)
return loss_value, tape.gradient(loss_value, self.model.trainable_variables)
def trainBatch(self, inputs, targets) -> float:
""" Calculates loss and gradients for batch of data and applies update to weights """
loss_value, grads = self.grad(inputs, targets)
self.opt.apply_gradients(zip(grads, self.model.trainable_variables))
return loss_value
def predict(self, inputs):
""" Used for predicting with model but does not have labels """
yh = self.model(inputs)
return yh
def trainBinaryClassifier(self, dataset, batch_size):
remainder_samples = dataset.shape[0] % batch_size # Calculate number of remainder samples from batches
total_loss = []
# Run batches
print("Training DAL Binary Classifier")
for i in tqdm(range(50)):
for batch in range(floor(dataset.shape[0] / batch_size)):
data = dataset[batch_size * batch:batch_size * (batch + 1), :]
X, y = data[:, :-2], data[:, -2:]
loss = self.trainBatch(X, y)
total_loss.append(loss)
# Run remainders
if remainder_samples > 0:
data = dataset[(-1) * remainder_samples:, :]
X, y = data[:, :-2], data[:, -2:]
loss = self.trainBatch(X, y)
total_loss.append(loss)
total_loss = list(chain(*total_loss))
val_avg_loss = sum(total_loss) / len(total_loss)
total_loss = []
print("DAL binary classifier loss: {}".format(val_avg_loss))
def inferBinaryClassifier(self, inputs):
yh = self.model(inputs)
return yh
def resetBinayClassifier(self):
pass
def __call__(self, cache: list, n: int, yh) -> list:
# Check if embedded cache, then cache is available for the round
if any(isinstance(i, list) for i in cache):
try:
cache = cache[self.round]
except:
raise ValueError("Active Learning Algo has iterated through each round\'s unlabled cache.")
# Check if sample size is to large for cache
if len(cache) < n:
raise ValueError("Sample size n is larger than length of round's cache")
# Calculate LC(x) values
yh1_vals = yh.iloc[:, 0].values
yh2_vals = yh.iloc[:, 1].values
yh_vals = np.absolute(yh1_vals - yh2_vals)
yh_col_names = ["yh", "ID"]
yh = pd.concat([pd.DataFrame(yh_vals), pd.DataFrame(cache)], axis=1)
yh.columns = yh_col_names
# Get ids of n largest LC vals
n_largest = yh.nsmallest(n, 'yh')
batch = n_largest["ID"].to_list()
# Log which samples were used for that round
self.sample_log[str(self.round)] = batch
print("\n")
print("Round {} selected samples: {}".format(self.round, batch))
print("\n")
# Increment round
self.round += 1
return batch
#######################################################
class OC(alAlgo):
"""
OC(alAlgo) Documentation:
--------------------------
Purpose
----------
Custom active learning class, inherits alAlgo class.
Attributes
----------
predict_to_sample : bool
Determines if algo needs models prediction on cache to determine what samples from the cache to return
Methods
-------
@abc.abstractmethod
__call__(self, cache: list, n: int, yh):
Empty function that is required to be declared in custom child class. Allows for algo
to be called to pick which samples to return based on algo criteria.
"""
def __init__(self, input_dim=None):
super().__init__(algo_name="OC")
self.predict_to_sample = False
self.feature_set = True
self.single_output = False
if input_dim == None:
raise ValueError("Must pass input dim as int to use DAL")
self.input_dim = input_dim
self.k = 500
self.opt = tf.keras.optimizers.RMSprop(lr=0.0001)
# self.loss = tf.keras.metrics.Mean()
self.loss = tf.keras.losses.categorical_crossentropy
self.model = self.getBinaryClassifier()
def getBinaryClassifier(self):
inputs = tf.keras.Input((self.input_dim,))
out = tf.keras.layers.Dense(self.k, activation='relu', use_bias=False, name='certificates')(inputs)
model = tf.keras.models.Model(inputs=[inputs], outputs=out, name='ONC')
return model
def grad(self, inputs):
with tf.GradientTape() as tape:
y_hat = self.model(inputs, training=True)
# compute the loss
# error = tf.math.reduce_mean(tf.math.square(y_hat))
error = self.loss(y_hat, tf.zeros(y_hat.shape) + .0001)
error = tf.cast(error, dtype=tf.dtypes.float64)
W = self.model.layers[1].get_weights()[0] # Equation 4.
W = tf.linalg.matmul(tf.transpose(W), W)
W = tf.cast(W, dtype=tf.dtypes.float64)
penalty = tf.math.square(W - tf.eye(self.k, dtype=tf.dtypes.float64)) * 10
penalty = tf.math.reduce_mean(penalty)
error = error + penalty
loss_value = self.loss(y_hat, tf.zeros(y_hat.shape) + .0001)
return loss_value, tape.gradient(error, self.model.trainable_variables)
def trainBatch(self, inputs) -> float:
""" Calculates loss and gradients for batch of data and applies update to weights """
loss_value, grads = self.grad(inputs)
self.opt.apply_gradients(zip(grads, self.model.trainable_variables))
# self.model.layers[1].get_weights()[0] = tf.clip_by_value(self.model.layers[1].get_weights()[0],-.01,.01,)
return loss_value
def predict(self, inputs):
""" Used for predicting with model but does not have labels """
yh = self.model(inputs)
return yh
def trainBinaryClassifier(self, dataset, batch_size):
remainder_samples = dataset.shape[0] % batch_size # Calculate number of remainder samples from batches
total_loss = []
# Run batches
print("Training DALOC")
for i in tqdm(range(300)):
for batch in range(floor(dataset.shape[0] / batch_size)):
X = dataset[batch_size * batch:batch_size * (batch + 1), :]
loss = self.trainBatch(X)
total_loss.append(loss)
# Run remainders
if remainder_samples > 0:
X = dataset[(-1) * remainder_samples:, :]
loss = self.trainBatch(X)
total_loss.append(loss)
# val_avg_loss = sum(total_loss) / len(total_loss)
val_avg_loss = 0
print("DAL binary classifier loss: {}".format(val_avg_loss))
def inferBinaryClassifier(self, inputs):
yh = self.model(inputs)
return yh
def resetBinayClassifier(self):
pass
def __call__(self, cache: list, n: int, yh) -> list:
# Check if embedded cache, then cache is available for the round
if any(isinstance(i, list) for i in cache):
try:
cache = cache[self.round]
except:
raise ValueError("Active Learning Algo has iterated through each round\'s unlabled cache.")
# Check if sample size is to large for cache
if len(cache) < n:
raise ValueError("Sample size n is larger than length of round's cache")
# Calculate OC(x) values
yh_vals = yh.values
# Calculate epistemic uncertainty
scores = tf.math.reduce_mean(tf.math.square(yh_vals), axis=1).numpy()
yh_col_names = ["yh", "ID"]
yh = pd.concat([pd.DataFrame(scores), pd.DataFrame(cache)], axis=1)
yh.columns = yh_col_names
# Get ids
yh = yh.sort_values(by=['yh'])
# median_index = yh[yh["yh"] == yh["yh"].quantile(.95, interpolation='lower')]
# median_index = median_index.index.values[0]
# n_largest = list(random.sample(range(median_index, yh.shape[0]), n))
# n_largest = yh.iloc[n_largest,:]
n_largest = yh.iloc[-n:, :]
batch = n_largest["ID"].to_list()
# Log which samples were used for that round
self.sample_log[str(self.round)] = batch
print("\n")
print("Round {} selected samples: {}".format(self.round, batch))
print("\n")
# Increment round
self.round += 1
return batch
#######################################################
class AADA(alAlgo):
"""
AADA(alAlgo) Documentation:
--------------------------
Purpose
----------
Custom active learning class, inherits alAlgo class.
Attributes
----------
predict_to_sample : bool
Determines if algo needs models prediction on cache to determine what samples from the cache to return
Methods
-------
@abc.abstractmethod
__call__(self, cache: list, n: int, yh):
Empty function that is required to be declared in custom child class. Allows for algo
to be called to pick which samples to return based on algo criteria.
"""
def __init__(self, input_dim=None):
super().__init__(algo_name="AADA")
self.predict_to_sample = False
self.feature_set = True
self.single_output = True
if input_dim == None:
raise ValueError("Must pass input dim as int to use AADA")
self.input_dim = input_dim
self.model = self.getBinaryClassifier()
self.opt = tf.keras.optimizers.RMSprop(lr=0.00005)
# self.loss = tf.keras.losses.categorical_crossentropy
self.loss = tf.keras.losses.mean_absolute_error
def getBinaryClassifier(self):
model = Sequential(name="AADA")
model.add(Dense(128, activation='elu', input_dim=self.input_dim))
# model.add(Dropout(.05))
model.add(Dense(1, activation='linear'))
# model.add(Dense(1, activation='linear'))
return model
def grad(self, inputs, targets):
with tf.GradientTape() as tape:
yh = self.model(inputs, training=True)
loss_value = tf.math.reduce_mean(targets * yh)
# x_source = inputs[0]
# x_target = inputs[1]
# yh_source = self.model(x_source, training=True)
# yh_target = self.model(x_target, training=True)
# loss_value = tf.math.reduce_mean(yh_source)
# loss_value = loss_value - tf.math.reduce_mean(yh_target)
# loss_value = tf.math.reduce_mean(tf.math.log(yh_source+.01))
# loss_value = -(loss_value + tf.math.reduce_mean(tf.math.log(1.01 - yh_target)))
return loss_value, tape.gradient(loss_value, self.model.trainable_variables)
def trainBatch(self, inputs, targets) -> float:
""" Calculates loss and gradients for batch of data and applies update to weights """
loss_value, grads = self.grad(inputs, targets)
self.opt.apply_gradients(zip(grads, self.model.trainable_variables))
return loss_value
@tf.function
def predict(self, inputs):
""" Used for predicting with model but does not have labels """
yh = self.model(inputs)
return yh
def trainBinaryClassifier(self, dataset, batch_size):
# source_data = dataset[0]
# target_data = dataset[1]
# remainder_samples = target_data.shape[0] % batch_size
remainder_samples = dataset.shape[0] % batch_size # Calculate number of remainder samples from batches
total_loss = []
# Run batches
print("Training AADA Classifier")
for i in tqdm(range(100)):
for batch in range(floor(dataset.shape[0] / batch_size)):
data = dataset[batch_size * batch:batch_size * (batch + 1), :]
X, y = data[:, :-1], data[:, -1]
loss = self.trainBatch(X, y)
total_loss.append(loss)
# Run remainders
if remainder_samples > 0:
data = dataset[(-1) * remainder_samples:, :]
X, y = data[:, :-1], data[:, -1]
loss = self.trainBatch(X, y)
total_loss.append(loss)
np.random.shuffle(dataset)
# total_loss = list(chain(*total_loss))
# val_avg_loss = sum(total_loss) / len(total_loss)
total_loss = []
print("DAL binary classifier loss: {}".format(0))
def inferBinaryClassifier(self, inputs):
yh = self.model(inputs)
return yh
def resetBinayClassifier(self):
pass
def __call__(self, cache: list, n: int, yh) -> list:
# Check if embedded cache, then cache is available for the round
if any(isinstance(i, list) for i in cache):
try:
cache = cache[self.round]
except:
raise ValueError("Active Learning Algo has iterated through each round\'s unlabled cache.")
# Check if sample size is to large for cache
if len(cache) < n:
raise ValueError("Sample size n is larger than length of round's cache")
# Calculate LC(x) values
yh_vals = yh.values
# yh_vals = (1-yh_vals)/yh_vals
yh_col_names = ["yh", "ID"]
yh = pd.concat([pd.DataFrame(yh_vals), pd.DataFrame(cache)], axis=1)
yh.columns = yh_col_names
print(yh_vals)
# Get ids of n largest LC vals
n_largest = yh.nlargest(n, 'yh')
# n_largest = yh.nsmallest(n, 'yh')
batch = n_largest["ID"].to_list()
# Log which samples were used for that round
self.sample_log[str(self.round)] = batch
print("\n")
print("Round {} selected samples: {}".format(self.round, batch))
print("\n")
# Increment round
self.round += 1
return batch
#######################################################
class DALOC(alAlgo):
"""
DALOC(alAlgo) Documentation:
--------------------------
Purpose
DALOC implementation trains a binary classifier to discern between unlabeled and labeled data.
OC's are also trained on the labeled data. The binary classifier takes in all of the unlabeled data
and then outputs softmax scores for uncertainty. I then select the top 90th quantile of values and
from there select the top 'n' values based on OC scores.
Attributes
----------
predict_to_sample : bool
Determines if algo needs models prediction on cache to determine what samples from the cache to return
Methods
-------
@abc.abstractmethod
__call__(self, cache: list, n: int, yh):
Empty function that is required to be declared in custom child class. Allows for algo
to be called to pick which samples to return based on algo criteria.
"""
def __init__(self, input_dim=None):
super().__init__(algo_name="DALOC")
self.predict_to_sample = False
self.feature_set = True
self.single_output = False
if input_dim == None:
raise ValueError("Must pass input dim as int to use DAL")
self.input_dim = input_dim
self.k = 500
self.opt = tf.keras.optimizers.RMSprop(lr=0.001)
self.loss = tf.keras.losses.categorical_crossentropy
self.model = self.getBinaryClassifier()
self.OC = self.getOC()
def getBinaryClassifier(self):
model = Sequential(name="binary class")
model.add(Dense(128, activation='elu', input_dim=self.input_dim))
model.add(Dropout(.05))
model.add(Dense(2, activation='softmax'))
return model
def getOC(self):
inputs = tf.keras.Input((self.input_dim,))
out = tf.keras.layers.Dense(self.k, activation='relu', use_bias=False, name='certificates')(inputs)
model = tf.keras.models.Model(inputs=[inputs], outputs=out, name='OC')
return model
def gradOC(self, inputs):
with tf.GradientTape() as tape:
y_hat = self.OC(inputs, training=True)
# compute the loss
error = tf.math.reduce_mean(tf.math.square(y_hat))
error = tf.cast(error, dtype=tf.dtypes.float64)
W = self.OC.layers[1].get_weights()[0] # Equation 4.
W = tf.linalg.matmul(tf.transpose(W), W)
W = tf.cast(W, dtype=tf.dtypes.float64)
penalty = tf.math.square(W - tf.eye(self.k, dtype=tf.dtypes.float64)) * 10
penalty = tf.math.reduce_mean(penalty)
error = error + penalty
return error, tape.gradient(error, self.OC.trainable_variables)
def grad(self, inputs, targets):
with tf.GradientTape() as tape:
loss_value = self.loss(self.model(inputs, training=True), targets)
return loss_value, tape.gradient(loss_value, self.model.trainable_variables)
def trainBatch(self, inputs, targets) -> float:
""" Calculates loss and gradients for batch of data and applies update to weights """
loss_value, grads = self.grad(inputs, targets)
self.opt.apply_gradients(zip(grads, self.model.trainable_variables))
return loss_value
def predict(self, inputs):
""" Used for predicting with model but does not have labels """
yh = self.model(inputs)
return yh
def trainOCBatch(self, inputs) -> float:
""" Calculates loss and gradients for batch of data and applies update to weights """
loss_value, grads = self.gradOC(inputs)
self.opt.apply_gradients(zip(grads, self.OC.trainable_variables))
return loss_value
def trainOC(self, dataset, batch_size):
remainder_samples = dataset.shape[0] % batch_size # Calculate number of remainder samples from batches
total_loss = []
# Run batches
print("Training OC")
for i in tqdm(range(300)):
for batch in range(floor(dataset.shape[0] / batch_size)):
X = dataset[batch_size * batch:batch_size * (batch + 1), :]
loss = self.trainOCBatch(X)
total_loss.append(loss)
# Run remainders
if remainder_samples > 0:
X = dataset[(-1) * remainder_samples:, :]
loss = self.trainOCBatch(X)
total_loss.append(loss)
val_avg_loss = sum(total_loss) / len(total_loss)
print("OC loss: {}".format(val_avg_loss))
def trainBinaryClassifier(self, dataset, batch_size):
remainder_samples = dataset.shape[0] % batch_size # Calculate number of remainder samples from batches
total_loss = []
# Run batches
print("Training DAL Binary Classifier")
for i in tqdm(range(50)):
for batch in range(floor(dataset.shape[0] / batch_size)):
data = dataset[batch_size * batch:batch_size * (batch + 1), :]
X, y = data[:, :-2], data[:, -2:]
loss = self.trainBatch(X, y)
total_loss.append(loss)
# Run remainders
if remainder_samples > 0:
data = dataset[(-1) * remainder_samples:, :]
X, y = data[:, :-2], data[:, -2:]
loss = self.trainBatch(X, y)
total_loss.append(loss)
total_loss = list(chain(*total_loss))
val_avg_loss = sum(total_loss) / len(total_loss)
total_loss = []
print("DAL binary classifier loss: {}".format(val_avg_loss))
def inferBinaryClassifier(self, inputs):
yh = self.model(inputs)
return yh
def inferOC(self, inputs):
yh = self.OC(inputs)
return yh
def resetBinayClassifier(self):
pass
def __call__(self, cache: list, n: int, yh) -> list:
# Check if embedded cache, then cache is available for the round
if any(isinstance(i, list) for i in cache):
try:
cache = cache[self.round]
except:
raise ValueError("Active Learning Algo has iterated through each round\'s unlabled cache.")
# Check if sample size is to large for cache
if len(cache) < n:
raise ValueError("Sample size n is larger than length of round's cache")
# Calculate OC(x) values
bc_vals = yh.iloc[:, 1].values
oc_vals = yh.iloc[:, -1].values
yh_col_names = ["bc", "oc", "ID"]
yh = pd.concat([pd.DataFrame(bc_vals), pd.DataFrame(oc_vals), | pd.DataFrame(cache) | pandas.DataFrame |
import nose
import os
import string
from distutils.version import LooseVersion
from datetime import datetime, date, timedelta
from pandas import Series, DataFrame, MultiIndex, PeriodIndex, date_range
from pandas.compat import range, lrange, StringIO, lmap, lzip, u, zip
import pandas.util.testing as tm
from pandas.util.testing import ensure_clean
from pandas.core.config import set_option
import numpy as np
from numpy import random
from numpy.random import randn
from numpy.testing import assert_array_equal
from numpy.testing.decorators import slow
import pandas.tools.plotting as plotting
def _skip_if_no_scipy():
try:
import scipy
except ImportError:
raise nose.SkipTest("no scipy")
@tm.mplskip
class TestSeriesPlots(tm.TestCase):
def setUp(self):
import matplotlib as mpl
self.mpl_le_1_2_1 = str(mpl.__version__) <= LooseVersion('1.2.1')
self.ts = tm.makeTimeSeries()
self.ts.name = 'ts'
self.series = tm.makeStringSeries()
self.series.name = 'series'
self.iseries = tm.makePeriodSeries()
self.iseries.name = 'iseries'
def tearDown(self):
tm.close()
@slow
def test_plot(self):
_check_plot_works(self.ts.plot, label='foo')
_check_plot_works(self.ts.plot, use_index=False)
_check_plot_works(self.ts.plot, rot=0)
_check_plot_works(self.ts.plot, style='.', logy=True)
_check_plot_works(self.ts.plot, style='.', logx=True)
_check_plot_works(self.ts.plot, style='.', loglog=True)
_check_plot_works(self.ts[:10].plot, kind='bar')
_check_plot_works(self.iseries.plot)
_check_plot_works(self.series[:5].plot, kind='bar')
_check_plot_works(self.series[:5].plot, kind='line')
_check_plot_works(self.series[:5].plot, kind='barh')
_check_plot_works(self.series[:10].plot, kind='barh')
_check_plot_works(Series(randn(10)).plot, kind='bar', color='black')
@slow
def test_plot_figsize_and_title(self):
# figsize and title
import matplotlib.pyplot as plt
ax = self.series.plot(title='Test', figsize=(16, 8))
self.assertEqual(ax.title.get_text(), 'Test')
assert_array_equal(np.round(ax.figure.get_size_inches()),
np.array((16., 8.)))
@slow
def test_bar_colors(self):
import matplotlib.pyplot as plt
import matplotlib.colors as colors
default_colors = plt.rcParams.get('axes.color_cycle')
custom_colors = 'rgcby'
df = DataFrame(randn(5, 5))
ax = df.plot(kind='bar')
rects = ax.patches
conv = colors.colorConverter
for i, rect in enumerate(rects[::5]):
xp = conv.to_rgba(default_colors[i % len(default_colors)])
rs = rect.get_facecolor()
self.assertEqual(xp, rs)
tm.close()
ax = df.plot(kind='bar', color=custom_colors)
rects = ax.patches
conv = colors.colorConverter
for i, rect in enumerate(rects[::5]):
xp = conv.to_rgba(custom_colors[i])
rs = rect.get_facecolor()
self.assertEqual(xp, rs)
tm.close()
from matplotlib import cm
# Test str -> colormap functionality
ax = df.plot(kind='bar', colormap='jet')
rects = ax.patches
rgba_colors = lmap(cm.jet, np.linspace(0, 1, 5))
for i, rect in enumerate(rects[::5]):
xp = rgba_colors[i]
rs = rect.get_facecolor()
self.assertEqual(xp, rs)
tm.close()
# Test colormap functionality
ax = df.plot(kind='bar', colormap=cm.jet)
rects = ax.patches
rgba_colors = lmap(cm.jet, np.linspace(0, 1, 5))
for i, rect in enumerate(rects[::5]):
xp = rgba_colors[i]
rs = rect.get_facecolor()
self.assertEqual(xp, rs)
tm.close()
df.ix[:, [0]].plot(kind='bar', color='DodgerBlue')
@slow
def test_bar_linewidth(self):
df = DataFrame(randn(5, 5))
# regular
ax = df.plot(kind='bar', linewidth=2)
for r in ax.patches:
self.assertEqual(r.get_linewidth(), 2)
# stacked
ax = df.plot(kind='bar', stacked=True, linewidth=2)
for r in ax.patches:
self.assertEqual(r.get_linewidth(), 2)
# subplots
axes = df.plot(kind='bar', linewidth=2, subplots=True)
for ax in axes:
for r in ax.patches:
self.assertEqual(r.get_linewidth(), 2)
@slow
def test_bar_log(self):
expected = np.array([1., 10., 100., 1000.])
if not self.mpl_le_1_2_1:
expected = np.hstack((.1, expected, 1e4))
ax = Series([200, 500]).plot(log=True, kind='bar')
assert_array_equal(ax.yaxis.get_ticklocs(), expected)
def test_rotation(self):
df = DataFrame(randn(5, 5))
ax = df.plot(rot=30)
for l in ax.get_xticklabels():
self.assertEqual(l.get_rotation(), 30)
def test_irregular_datetime(self):
rng = date_range('1/1/2000', '3/1/2000')
rng = rng[[0, 1, 2, 3, 5, 9, 10, 11, 12]]
ser = Series(randn(len(rng)), rng)
ax = ser.plot()
xp = datetime(1999, 1, 1).toordinal()
ax.set_xlim('1/1/1999', '1/1/2001')
self.assertEqual(xp, ax.get_xlim()[0])
@slow
def test_hist(self):
_check_plot_works(self.ts.hist)
_check_plot_works(self.ts.hist, grid=False)
_check_plot_works(self.ts.hist, figsize=(8, 10))
_check_plot_works(self.ts.hist, by=self.ts.index.month)
import matplotlib.pyplot as plt
fig, ax = plt.subplots(1, 1)
_check_plot_works(self.ts.hist, ax=ax)
_check_plot_works(self.ts.hist, ax=ax, figure=fig)
_check_plot_works(self.ts.hist, figure=fig)
tm.close()
fig, (ax1, ax2) = plt.subplots(1, 2)
_check_plot_works(self.ts.hist, figure=fig, ax=ax1)
_check_plot_works(self.ts.hist, figure=fig, ax=ax2)
with tm.assertRaises(ValueError):
self.ts.hist(by=self.ts.index, figure=fig)
@slow
def test_hist_layout(self):
n = 10
gender = | tm.choice(['Male', 'Female'], size=n) | pandas.util.testing.choice |
import numpy as np
import pandas as pd
import itertools
import operator
import copy
import matplotlib.pyplot as plt
import seaborn as sns
get_ipython().magic('matplotlib inline')
sns.set(style="white", color_codes=True)
# imported ARIMA from statsmodels pkg
from statsmodels.tsa.arima_model import ARIMA
# helper functions
from ...utils import print_static_rmse, print_dynamic_rmse
from ...models.ar_based.param_finder import find_lowest_pq
import pdb
def build_arima_model(ts_df, metric='aic', p_max=3, d_max=1, q_max=3,
forecast_period=2, method='mle', verbose=0):
"""
This builds a Non Seasonal ARIMA model given a Univariate time series dataframe with time
as the Index, ts_df can be a dataframe with one column only or a single array. Dont send
Multiple Columns!!! Include only that variable that is a Time Series. DO NOT include
Non-Stationary data. Make sure your Time Series is "Stationary"!! If not, this
will give spurious results, since it automatically builds a Non-Seasonal model,
you need not give it a Seasonal True/False flag.
"metric": You can give it any of the following metrics as criteria: AIC, BIC, Deviance,
Log-likelihood. Optionally, you can give it a fit method as one of the following:
{'css-mle','mle','css'}
"""
p_min = 0
d_min = 0
q_min = 0
# Initialize a DataFrame to store the results
iteration = 0
results_dict = {}
################################################################################
####### YOU MUST Absolutely set this parameter correctly as "levels". If not,
#### YOU WILL GET DIFFERENCED PREDICTIONS WHICH ARE FIENDISHLY DIFFICULT TO UNDO.
#### If you set this to levels, then you can do any order of differencing and
#### ARIMA will give you predictions in the same level as orignal values.
################################################################################
pred_type = 'levels'
#########################################################################
ts_train = ts_df[:-forecast_period]
ts_test = ts_df[-forecast_period:]
if verbose == 1:
print('Data Set split into train %s and test %s for Cross Validation Purposes'
% (ts_train.shape, ts_test.shape))
#########################################################################
if ts_train.dtype == 'int64':
ts_train = ts_train.astype(float)
for d_val in range(d_min, d_max+1):
print('\nDifferencing = %d' % d_val)
results_bic = pd.DataFrame(index=['AR{}'.format(i) for i in range(p_min, p_max+1)],
columns=['MA{}'.format(i) for i in range(q_min, q_max+1)])
for p_val, q_val in itertools.product(range(p_min, p_max+1), range(q_min, q_max+1)):
if p_val == 0 and d_val == 0 and q_val == 0:
results_bic.loc['AR{}'.format(p_val), 'MA{}'.format(q_val)] = np.nan
continue
else:
try:
model = ARIMA(ts_train, order=(p_val, d_val, q_val))
results = model.fit(transparams=False, method=method)
results_bic.loc['AR{}'.format(p_val), 'MA{}'.format(q_val)] = eval('results.' + metric)
if iteration % 10 == 0:
print(' Iteration %d completed...' % iteration)
iteration += 1
if iteration >= 100:
print(' Ending Iterations at %d' % iteration)
break
except:
iteration += 1
continue
results_bic = results_bic[results_bic.columns].astype(float)
interim_d = copy.deepcopy(d_val)
interim_p, interim_q, interim_bic = find_lowest_pq(results_bic)
if verbose == 1:
fig, ax = plt.subplots(figsize=(20, 10))
ax = sns.heatmap(results_bic,
mask=results_bic.isnull(),
ax=ax,
annot=True,
fmt='.0f')
ax.set_title(metric)
results_dict[str(interim_p) + ' ' + str(interim_d) + ' ' + str(interim_q)] = interim_bic
best_bic = min(results_dict.items(), key=operator.itemgetter(1))[1]
best_pdq = min(results_dict.items(), key=operator.itemgetter(1))[0]
best_p = int(best_pdq.split(' ')[0])
best_d = int(best_pdq.split(' ')[1])
best_q = int(best_pdq.split(' ')[2])
print('\nBest model is: Non Seasonal ARIMA(%d,%d,%d), %s = %0.3f' % (best_p, best_d, best_q,metric, best_bic))
bestmodel = ARIMA(ts_train, order=(best_p, best_d, best_q))
print('#### Fitting best model for full data set now. Will take time... ######')
try:
results = bestmodel.fit(transparams=True, method=method)
except:
results = bestmodel.fit(transparams=False, method=method)
### this is needed for static forecasts ####################
y_truth = ts_train[:]
y_forecasted = results.predict(typ='levels')
concatenated = | pd.concat([y_truth, y_forecasted], axis=1, keys=['original', 'predicted']) | pandas.concat |
import numpy
import pandas
import spacy
in_data = pandas.read_excel('./data/source.xlsx')
array = in_data['Text'].values
nlp = spacy.load('en_core_web_sm')
# Step 2. Make our data (with the vocabulary navigating columns)
start = True
start_len = 0
j = 0
result = []
columns = []
for y in array:
doc = nlp(y)
enha = {}
for x in doc.ents:
token = x.text
code = x.label_
if token in list(enha.keys()):
if code not in enha[token]:
enha[token].append(code)
else:
enha[token] = [code]
# h = list(enha.keys())
add_c = []
for kk in enha.keys():
for vv in enha[kk]:
appie = "[{}]_['{}']".format(kk, vv)
add_c.append(appie)
outers = [z for z in add_c if z not in columns]
columns = columns + outers
h = outers
if len(h) > 0:
start_len = start_len + len(h)
if start:
start = False
else:
for g in range(len(result)):
gle = len(h)
result[g] = numpy.concatenate((result[g], numpy.zeros(shape=(1, gle))), axis=1)
values = numpy.zeros(shape=(1, start_len))
for key in enha.keys():
for value in enha[key]:
appi = "[{}]_['{}']".format(key, value)
ix = columns.index(appi)
values[0, ix] = 1
result.append(values)
else:
result.append(numpy.zeros(shape=(1, start_len)))
result = numpy.concatenate(result, axis=0)
data = | pandas.DataFrame(data=result, columns=columns) | pandas.DataFrame |
# -*- coding: utf-8 -*-
import pandas as pd
import numpy as np
from tqdm import tqdm as pb
import datetime
import re
import warnings
import matplotlib.pyplot as plt
import pylab as mpl
from docx import Document
from docx.shared import Pt
from data_source import local_source
def concat_ts_codes(df): #拼接df中所有TS_CODE为输入条件的格式
result = ''
for code in df["TS_CODE"]:
result = result + 'TS_CODE = "' + code + '" or '
result = result[:-4]
return result
def drop_duplicates_keep_nonnan(df,subset): #保留nan最少的行, 暂时没用
warnings.filterwarnings("ignore")
subset_values = []
df_result = pd.DataFrame(columns=df.columns)
for i in range(len(df)):
subset_value = list(df[subset].iloc[i,:])
if subset_value not in subset_values: subset_values.append(subset_value)
for subset_value in subset_values:
df_sub = df[(df[subset]==subset_value).product(axis=1)==1]
df_sub["nan_count"] = 0
df_sub.loc[:,"nan_count"] = df_sub.isnull().sum(axis=1)
df_sub.sort_values(by='nan_count',ascending=True, inplace=True)
df_sub = pd.DataFrame(df_sub.iloc[0,:]).T
df_result = pd.concat([df_result, df_sub],axis=0)
warnings.filterwarnings("default")
return df_result
#tester = pd.DataFrame([[1,1,5,5,5],[1,1,5,np.nan,np.nan],[2,2,5,5,5],[2,2,np.nan,5,5],[2,1,np.nan,np.nan,np.nan]],columns=['a','b','c','d','e'])
#tester2 = drop_duplicates_keep_nonnan(df=tester, subset=['a','b'])
def Find_Comparibles(ts_code, df_ind): #按总市值差选取对比公司, 可改进。输入df需要有END_DATE和INDUSTRY和TOTAL_MV列
stocks_used = df_ind.copy()
stocks_used["END_DATE"] = stocks_used["END_DATE"].astype(int)
last_end_date = max(stocks_used["END_DATE"])
stocks_used = stocks_used[stocks_used["END_DATE"]==last_end_date]
stocks_used["TOTAL_MV_diff"] = abs( stocks_used["TOTAL_MV"] - stocks_used.loc[stocks_used["TS_CODE"]==ts_code, "TOTAL_MV"].iloc[0] )
stocks_used.sort_values(by="TOTAL_MV_diff", ascending=True,inplace=True)
stocks_used = stocks_used[1:3]
return list(stocks_used["TS_CODE"])
def RatioComparation_Plotter(df,var_name, year=5):
fig = plt.figure(figsize=(4, 4))
ax = fig.add_subplot(1,1,1)
ax.set_title("{var_name}趋势图".format(var_name=var_name))
ax.set_xlabel("年份",labelpad=0, position=(0.5,1))
ax.set_ylabel("{var_name}".format(var_name=var_name),labelpad=0, position=(1,0.5))
for stock in df["TS_CODE"].unique():
x = df.loc[df["TS_CODE"]==stock,"END_DATE_year"].iloc[(-1*year):]
y = df.loc[df["TS_CODE"]==stock, var_name].iloc[(-1*year):]
ax.plot(x,y,linewidth='1',label="{stock}".format(stock=stock))
ax.legend(loc="upper right",bbox_to_anchor=(1.4,1),shadow=True)
plt.show()
def FSA_Initializer(ts_code):
basic = local_source.get_stock_list(condition='TS_CODE = '+'"'+ts_code+'"')
ind = basic["INDUSTRY"].iloc[0]
stocks_ind = local_source.get_stock_list(condition='INDUSTRY = '+'"'+ind+'"')
ts_codes_ind = concat_ts_codes(stocks_ind)
quotations_monthly_ind = local_source.get_quotations_monthly(cols='TRADE_DATE,TS_CODE,CLOSE',condition=ts_codes_ind).sort_values(by="TRADE_DATE", ascending=True)
quotations_monthly_ind.rename(columns={'TRADE_DATE':'END_DATE'}, inplace = True)
stock_indicators_daily_ind = local_source.get_stock_indicators_daily(cols='TRADE_DATE,TS_CODE,TOTAL_SHARE',condition=ts_codes_ind).sort_values(by="TRADE_DATE", ascending=True)
stock_indicators_daily_ind.rename(columns={'TRADE_DATE':'END_DATE'}, inplace = True)
financial_indicators_ind = local_source.get_financial_indicators(condition=ts_codes_ind).sort_values(by="END_DATE", ascending=True)
stocks_ind = pd.merge(financial_indicators_ind,stocks_ind, on=['TS_CODE'], how="left")
stocks_ind = pd.merge(stocks_ind, quotations_monthly_ind, on=['TS_CODE','END_DATE'], how="left")
stocks_ind = | pd.merge(stocks_ind, stock_indicators_daily_ind, on=['TS_CODE','END_DATE'], how="left") | pandas.merge |
from IMLearn.utils import split_train_test
from IMLearn.learners.regressors import LinearRegression
from IMLearn.metrics.loss_functions import mean_square_error
from typing import NoReturn
import numpy as np
import pandas as pd
import plotly.graph_objects as go
import plotly.express as px
import plotly.io as pio
pio.templates.default = "simple_white"
# columns by type:
ZERO_AND_ABOVE = ["bathrooms", "floors", "sqft_above", "sqft_basement",
"yr_renovated"]
ONLY_POSITIVE = ["price", "sqft_living", "sqft_lot", "floors", "yr_built",
"sqft_living15", "sqft_lot15"]
def load_data(filename: str):
"""
Load house prices dataset and preprocess data.
Parameters
----------
filename: str
Path to house prices dataset
Returns
-------
Design matrix and response vector (prices) - either as a single
DataFrame or a Tuple[DataFrame, Series]
"""
# raise NotImplementedError()
full_data = | pd.read_csv(filename) | pandas.read_csv |
import html2text
import requests
import pandas as pd
import os
from Property import Property
class DataSource:
def __init__(self, region='Auckland', district='Auckland-City', suburb='Parnell'):
self.region = region.lower()
self.district = district.lower()
self.suburb = suburb.lower()
self.Property = Property()
# Public method to request page content and convert plain text
def page2text(self, page_num = 1):
"""
Convert html to text
"""
base_url = 'https://www.trademe.co.nz/a/property/residential/rent'
url = base_url + '/' + self.region + '/' + self.district + '/' + self.suburb
res = requests.get(url)
res.encoding = 'utf-8'
content = html2text.html2text(res.text)
return content
# Public method to convert plain text to pandas dataframe
def text2df(self, content):
apts_map = self.text2apts_map(content)
df = | pd.DataFrame(apts_map) | pandas.DataFrame |
import os
import pandas as pd
import re
from io import BytesIO
from urllib.request import urlopen
from zipfile import ZipFile
from requests_html import HTMLSession
def get_fia_data(force: bool = False):
if not os.path.exists('data/fia'):
os.makedirs('data/fia')
html_list = ["https://www.fia.com/documents/season/season-2020-1059",
"https://www.fia.com/documents/season/season-2019-971"]
races = | pd.read_csv('data/ergast/races.csv') | pandas.read_csv |
import os
import spotipy
import spotipy.util as util
import pandas as pd
def load_environment():
from dotenv import load_dotenv
load_dotenv()
username = os.getenv("USR")
client_id = os.getenv("ID")
client_secret = os.getenv("SECRET")
redirect_uri = os.getenv("URI")
return username, client_id, client_secret, redirect_uri
def get_audio_features(infile, outfile, username, client_id, client_secret, redirect_uri):
scope = 'user-read-private user-read-playback-state user-modify-playback-state'
# Erase catche and prompt for user permission
try:
token = util.prompt_for_user_token(username,
scope,
client_id=client_id,
client_secret=client_secret,
redirect_uri=redirect_uri)
except Exception:
os.remove(f".cache-{username}")
token = util.prompt_for_user_token(username, scope)
# Create spotify object
sp = spotipy.Spotify(auth=token)
user = sp.current_user()
displayName = user['display_name']
print(">>> Hello", displayName)
data = | pd.read_csv(infile) | pandas.read_csv |
import warnings
import anndata
import numpy as np
from packaging import version
import pandas as pd
import scipy as sp
import traceback
from scipy import sparse
from sklearn.preprocessing import StandardScaler
import igraph as ig
import leidenalg
import time
from sklearn.decomposition import PCA
import os
import gc
from glob import glob
import scanpy as sc
import scanpy.external as sce
import samalg.utilities as ut
import scipy as sp
from samalg import SAM
import backend.common.compute.diffexp_generic as diffexp_generic
from flask import jsonify, current_app, session
from backend.common.colors import convert_anndata_category_colors_to_cxg_category_colors
from backend.common.constants import Axis
from backend.server.common.corpora import corpora_get_props_from_anndata
from backend.common.errors import DatasetAccessError
from anndata import AnnData
from backend.server.data_common.data_adaptor import DataAdaptor
from backend.common.fbs.matrix import encode_matrix_fbs
from functools import partial
import backend.server.common.rest as common_rest
import json
from backend.common.utils.utils import jsonify_numpy
import signal
import pickle
import base64
from hashlib import blake2b
from functools import wraps
from os.path import exists
from numba import njit, prange
from numba.core import types
from numba.typed import Dict
from sklearn.utils import check_array, check_random_state, sparsefuncs as sf
from sklearn.utils.validation import _check_psd_eigenvalues
from sklearn.utils.extmath import svd_flip
import ray, threading
import psutil
global process_count
process_count = 0
anndata_version = version.parse(str(anndata.__version__)).release
def _init_arpack_v0(size, random_state):
random_state = check_random_state(random_state)
v0 = random_state.uniform(-1, 1, size)
return v0
def kpca(XL,npcs=150,seed=0,which='LA'):
random_init = _init_arpack_v0(XL.shape[1],seed)
w, u = sp.sparse.linalg.eigsh(XL, which=which, k=npcs, v0=random_init)
u, _ = svd_flip(u,np.zeros_like(u).T)
indices = w.argsort()[::-1]
w = w[indices]
u = u[:, indices]*w**0.5
w= _check_psd_eigenvalues(w,enable_warnings=False)
return u
def kernel_svd(K, k=100, seed=0):
K = check_array(K, accept_sparse=['csr', 'csc'])
K=(K+K.T)/2
H = get_centering_operator(K)
XL = get_linear_operator((H,K,H))
return kpca(XL,npcs=k,seed=seed)
def get_centering_operator(X):
ones = np.ones(X.shape[0])[None, :].dot
onesT = np.ones(X.shape[0])[:, None].dot
O = sp.sparse.diags(np.ones(X.shape[0])).tocsr()
def p(x):
return O.dot(x) - onesT(ones(x))/X.shape[0]
H = sp.sparse.linalg.LinearOperator(
matvec=p,
dtype=X.dtype,
matmat=p,
shape=(X.shape[0],X.shape[0]),
rmatvec=p,
rmatmat=p,
)
return H
def get_linear_operator(matrices):
def p(x):
v = matrices[-1].dot(x)
for m in matrices[::-1][1:]:
v = m.dot(v)
return v
def pt(x):
v = matrices[0].T.dot(x)
for m in matrices[1:]:
v = m.T.dot(v)
return v
H = sp.sparse.linalg.LinearOperator(
matvec=p,
dtype=matrices[0].dtype,
matmat=p,
shape=(matrices[0].shape[0],matrices[-1].shape[1]),
rmatvec=pt,
rmatmat=pt,
)
return H
def sparse_knn(D, k):
D1 = D.tocoo()
idr = np.argsort(D1.row)
D1.row[:] = D1.row[idr]
D1.col[:] = D1.col[idr]
D1.data[:] = D1.data[idr]
_, ind = np.unique(D1.row, return_index=True)
ind = np.append(ind, D1.data.size)
for i in range(ind.size - 1):
idx = np.argsort(D1.data[ind[i] : ind[i + 1]])# - mu[D1.col[ind[i] : ind[i+1]]])
if idx.size > k:
idx = idx[:-k] if k > 0 else idx
D1.data[np.arange(ind[i], ind[i + 1])[idx]] = 0
D1.eliminate_zeros()
return D1
def mima(X1):
x,y = X1.nonzero()
data = X1.data
mi = X1.min(0).A.flatten()
ma = X1.max(0).A.flatten()
X1.data[:] = (data - mi[y])/(ma[y]-mi[y])
def desktop_mode_only(f):
@wraps(f)
def decorated(*args, **kwargs):
if current_app.hosted_mode:
return jsonify({'message' : 'Feature only available in desktop mode.'}), 401
return f(*args, **kwargs)
return decorated
def auth0_token_required(f):
@wraps(f)
def decorated(*args, **kwargs):
token = 'excxg_profile' in session
# return 401 if token is not passed
if not token and current_app.hosted_mode:
return jsonify({'message' : 'Authorization missing.'}), 401
return f(*args, **kwargs)
return decorated
def anndata_version_is_pre_070():
major = anndata_version[0]
minor = anndata_version[1] if len(anndata_version) > 1 else 0
return major == 0 and minor < 7
def _callback_fn(res,ws,cfn,data,post_processing,tstart,pid):
if post_processing is not None:
res = post_processing(res)
d = {"response": res,"cfn": cfn, "fail": False}
d.update(data)
try:
ws.send(jsonify_numpy(d))
except Exception as e:
traceback.print_exception(type(e), e, e.__traceback__)
print("Process count:",pid,"Time elsapsed:",time.time()-tstart,"seconds")
def _multiprocessing_wrapper(da,ws,fn,cfn,data,post_processing,*args):
shm,shm_csc = da.shm_layers_csr,da.shm_layers_csc
global process_count
process_count = process_count + 1
_new_callback_fn = partial(_callback_fn,ws=ws,cfn=cfn,data=data,post_processing=post_processing,tstart=time.time(),pid=process_count)
_new_error_fn = partial(_error_callback,ws=ws, cfn=cfn)
args+=(shm,shm_csc)
if ray.is_initialized():
def _ray_getter(i):
try:
_new_callback_fn(ray.get(i))
except Exception as e:
_new_error_fn(e)
thread = threading.Thread(target=_ray_getter,args=(fn.remote(*args),))
thread.start()
else:
try:
_new_callback_fn(fn._function(*args))
except Exception as e:
_new_error_fn(e)
def _error_callback(e, ws, cfn):
ws.send(jsonify_numpy({"fail": True, "cfn": cfn}))
traceback.print_exception(type(e), e, e.__traceback__)
def sparse_scaler(X,scale=False, mode="OBS", mu=None, std=None):
if scale:
x,y = X.nonzero()
if mode == "OBS":
s = std[y]
s[s==0]=1
X.data[:] = (X.data - mu[y]) / s
X.data[X.data>10]=10
else:
s = std[x]
s[s==0]=1
X.data[:] = (X.data - mu[x]) / s
X.data[X.data>10]=10
X.data[X.data<0]=0
def _sp_scaler(X, mu):
_,y = X.nonzero()
X.data[:] = X.data - mu[y]
X.data[X.data>10]=10
X.data[X.data<0]=0
X.eliminate_zeros()
@ray.remote
def compute_diffexp_ttest(layer,tMean,tMeanSq,obs_mask_A,obs_mask_B,fname, multiplex, userID, scale, tMeanObs, tMeanSqObs,shm,shm_csc):
iA = np.where(obs_mask_A)[0]
iB = np.where(obs_mask_B)[0]
niA = np.where(np.invert(np.in1d(np.arange(obs_mask_A.size),iA)))[0]
niB = np.where(np.invert(np.in1d(np.arange(obs_mask_A.size),iB)))[0]
nA = iA.size
nB = iB.size
mode = userID.split("/")[-1].split("\\")[-1]
CUTOFF = 60000
mu = tMeanObs
std = (tMeanSqObs**2 - mu**2)
std[std<0]=0
std=std**0.5
if nA + nB == obs_mask_A.size:
if nA < nB:
if (nA < CUTOFF):
XI = _read_shmem(shm,shm_csc,layer,format="csr",mode=mode)
XS = XI[iA]
sparse_scaler(XS,scale=scale,mode=mode,mu=mu,std=std)
n = XI.shape[0]
meanA,vA = sf.mean_variance_axis(XS,axis=0)
meanAsq = vA-meanA**2
meanAsq[meanAsq<0]=0
else:
XI = _read_shmem(shm, shm_csc, layer, format="csc", mode=mode)
n = XI.shape[0]
meanA,meanAsq = _partial_summer(XI.data,XI.indices,XI.indptr,XI.shape[1],iA,niA,
mu=mu,std=std,mode=mode,scale=scale)
meanA/=nA
meanAsq/=nA
vA = meanAsq - meanA**2
vA[vA<0]=0
meanB = (tMean*n - meanA*nA) / nB
meanBsq = (tMeanSq*n - meanAsq*nA) / nB
vB = meanBsq - meanB**2
else:
if (nB < CUTOFF):
XI = _read_shmem(shm, shm_csc, layer, format="csr", mode=mode)
XS = XI[iB]
sparse_scaler(XS,scale=scale,mode=mode,mu=mu,std=std)
n = XI.shape[0]
meanB,vB = sf.mean_variance_axis(XS,axis=0)
meanBsq = vB-meanB**2
meanBsq[meanBsq<0]=0
else:
XI = _read_shmem(shm, shm_csc, layer, format="csc", mode=mode)
n = XI.shape[0]
meanB,meanBsq = _partial_summer(XI.data,XI.indices,XI.indptr,XI.shape[1],iB,niB,
mu=mu,std=std,mode=mode,scale=scale)
meanB/=nB
meanBsq/=nB
vB = meanBsq - meanB**2
vB[vB<0]=0
meanA = (tMean*n - meanB*nB) / nA
meanAsq = (tMeanSq*n - meanBsq*nB) / nA
vA = meanAsq - meanA**2
else:
if (nA < CUTOFF):
XI = _read_shmem(shm, shm_csc, layer, format="csr", mode=mode)
XS = XI[iA]
sparse_scaler(XS,scale=scale,mode=mode,mu=mu,std=std)
n = XI.shape[0]
meanA,vA = sf.mean_variance_axis(XS,axis=0)
else:
XI = _read_shmem(shm, shm_csc, layer, format="csc", mode=mode)
n = XI.shape[0]
meanA,meanAsq = _partial_summer(XI.data,XI.indices,XI.indptr,XI.shape[1],iA,niA,
mu=mu,std=std,mode=mode,scale=scale)
meanA/=nA
meanAsq/=nA
vA = meanAsq - meanA**2
vA[vA<0]=0
if (nB < CUTOFF):
XI = _read_shmem(shm, shm_csc, layer, format="csr", mode=mode)
XS = XI[iB]
sparse_scaler(XS,scale=scale,mode=mode,mu=mu,std=std)
n = XI.shape[0]
meanB,vB = sf.mean_variance_axis(XS,axis=0)
else:
XI = _read_shmem(shm, shm_csc, layer, format="csc", mode=mode)
n = XI.shape[0]
meanB,meanBsq = _partial_summer(XI.data,XI.indices,XI.indptr,XI.shape[1],iB,niB,
mu=mu,std=std,mode=mode,scale=scale)
meanB/=nB
meanBsq/=nB
vB = meanBsq - meanB**2
vB[vB<0]=0
res = diffexp_generic.diffexp_ttest(meanA,vA,nA,meanB,vB,nB)
fname2 = fname.split("_output.p")[0]+"_sg.p"
if multiplex:
pickle_dumper(res['positive'],fname)
pickle_dumper(list(np.arange(150)),fname2)
else:
pickle_dumper(res['positive'],fname)
pickle_dumper(res['negative'],fname.replace('Pop1 high','Pop2 high'))
pickle_dumper(list(np.arange(150)),fname2)
pickle_dumper(list(np.arange(150)),fname2.replace('Pop1 high','Pop2 high'))
m = {}
for k in res.keys():
m[k] = res[k][:150]
return m
def pickle_loader(fn):
with open(fn,"rb") as f:
x = pickle.load(f)
return x
@ray.remote
def save_data(AnnDataDict,labelNames,cids,currentLayout,obs_mask,userID,ihm, shm, shm_csc):
#direc
fnames = glob(f"{userID}/emb/*.p")
name = currentLayout.split(';')[-1]
embs = {}
nnms = {}
params={}
for f in fnames:
n = f.split('/')[-1].split('\\')[-1][:-2]
if name == n.split(';')[-1] or (';;' not in currentLayout and ';;' not in n):
if exists(f) and exists(f"{userID}/nnm/{n}.p") and exists(f"{userID}/params/{n}.p"):
embs[n] = pickle_loader(f)
nnms[n] = pickle_loader(f"{userID}/nnm/{n}.p")
params[n] = pickle_loader(f"{userID}/params/{n}.p")
elif exists(f):
embs[n] = pickle_loader(f)
X = embs[currentLayout]
f = np.isnan(X).sum(1)==0
filt = np.logical_and(f,obs_mask)
mode = userID.split("/")[-1].split("\\")[-1]
X = _read_shmem(shm,shm_csc,"X",format="csr",mode=mode)
v = pickle_loader(f"{userID}/var/name_0.p")
adata = AnnData(X = X[filt])
adata.var_names = pd.Index(v)
adata.obs_names = | pd.Index(cids[filt]) | pandas.Index |
# -*- coding: utf-8 -*-
"""Datareader for cell testers and potentiostats.
This module is used for loading data and databases created by different cell
testers. Currently it only accepts arbin-type res-files (access) data as
raw data files, but we intend to implement more types soon. It also creates
processed files in the hdf5-format.
Example:
>>> d = CellpyData()
>>> d.loadcell(names = [file1.res, file2.res]) # loads and merges the runs
>>> voltage_curves = d.get_cap()
>>> d.save("mytest.hdf")
"""
import os
from pathlib import Path
import logging
import sys
import collections
import warnings
import csv
import itertools
import time
import copy
import numpy as np
import pandas as pd
from pandas.errors import PerformanceWarning
from scipy import interpolate
from cellpy.parameters import prms
from cellpy.parameters.legacy import internal_settings as old_settings
from cellpy.exceptions import WrongFileVersion, DeprecatedFeature, NullData
from cellpy.parameters.internal_settings import (
get_headers_summary,
get_cellpy_units,
get_headers_normal,
get_headers_step_table,
ATTRS_CELLPYFILE,
ATTRS_DATASET,
ATTRS_DATASET_DEEP,
ATTRS_CELLPYDATA,
)
from cellpy.readers.core import (
FileID,
Cell,
CELLPY_FILE_VERSION,
MINIMUM_CELLPY_FILE_VERSION,
xldate_as_datetime,
interpolate_y_on_x,
identify_last_data_point,
)
HEADERS_NORMAL = get_headers_normal()
HEADERS_SUMMARY = get_headers_summary()
HEADERS_STEP_TABLE = get_headers_step_table()
# TODO: @jepe - performance warnings - mixed types within cols (pytables)
performance_warning_level = "ignore" # "ignore", "error"
warnings.filterwarnings(
performance_warning_level, category=pd.io.pytables.PerformanceWarning
)
pd.set_option("mode.chained_assignment", None) # "raise", "warn", None
module_logger = logging.getLogger(__name__)
class CellpyData(object):
"""Main class for working and storing data.
This class is the main work-horse for cellpy where all the functions for
reading, selecting, and tweaking your data is located. It also contains the
header definitions, both for the cellpy hdf5 format, and for the various
cell-tester file-formats that can be read. The class can contain
several cell-tests and each test is stored in a list. If you see what I mean...
Attributes:
cells (list): list of DataSet objects.
"""
def __str__(self):
txt = "<CellpyData>\n"
if self.name:
txt += f"name: {self.name}\n"
if self.table_names:
txt += f"table_names: {self.table_names}\n"
if self.tester:
txt += f"tester: {self.tester}\n"
if self.cells:
txt += "datasets: [ ->\n"
for i, d in enumerate(self.cells):
txt += f" ({i})\n"
for t in str(d).split("\n"):
txt += " "
txt += t
txt += "\n"
txt += "\n"
txt += "]"
else:
txt += "datasets: []"
txt += "\n"
return txt
def __bool__(self):
if self.cells:
return True
else:
return False
def __init__(
self,
filenames=None,
selected_scans=None,
profile=False,
filestatuschecker=None, # "modified"
fetch_one_liners=False,
tester=None,
initialize=False,
):
"""CellpyData object
Args:
filenames: list of files to load.
selected_scans:
profile: experimental feature.
filestatuschecker: property to compare cellpy and raw-files;
default read from prms-file.
fetch_one_liners: experimental feature.
tester: instrument used (e.g. "arbin") (checks prms-file as
default).
initialize: create a dummy (empty) dataset; defaults to False.
"""
if tester is None:
self.tester = prms.Instruments.tester
else:
self.tester = tester
self.loader = None # this will be set in the function set_instrument
self.logger = logging.getLogger(__name__)
self.logger.debug("created CellpyData instance")
self.name = None
self.profile = profile
self.minimum_selection = {}
if filestatuschecker is None:
self.filestatuschecker = prms.Reader.filestatuschecker
else:
self.filestatuschecker = filestatuschecker
self.forced_errors = 0
self.summary_exists = False
if not filenames:
self.file_names = []
else:
self.file_names = filenames
if not self._is_listtype(self.file_names):
self.file_names = [self.file_names]
if not selected_scans:
self.selected_scans = []
else:
self.selected_scans = selected_scans
if not self._is_listtype(self.selected_scans):
self.selected_scans = [self.selected_scans]
self.cells = []
self.status_datasets = []
self.selected_cell_number = 0
self.number_of_datasets = 0
self.capacity_modifiers = ["reset"]
self.list_of_step_types = [
"charge",
"discharge",
"cv_charge",
"cv_discharge",
"taper_charge",
"taper_discharge",
"charge_cv",
"discharge_cv",
"ocvrlx_up",
"ocvrlx_down",
"ir",
"rest",
"not_known",
]
# - options
self.force_step_table_creation = prms.Reader.force_step_table_creation
self.force_all = prms.Reader.force_all
self.sep = prms.Reader.sep
self._cycle_mode = prms.Reader.cycle_mode
# self.max_res_filesize = prms.Reader.max_res_filesize
self.load_only_summary = prms.Reader.load_only_summary
self.select_minimal = prms.Reader.select_minimal
# self.chunk_size = prms.Reader.chunk_size # 100000
# self.max_chunks = prms.Reader.max_chunks
# self.last_chunk = prms.Reader.last_chunk
self.limit_loaded_cycles = prms.Reader.limit_loaded_cycles
# self.load_until_error = prms.Reader.load_until_error
self.ensure_step_table = prms.Reader.ensure_step_table
self.daniel_number = prms.Reader.daniel_number
# self.raw_datadir = prms.Reader.raw_datadir
self.raw_datadir = prms.Paths.rawdatadir
# self.cellpy_datadir = prms.Reader.cellpy_datadir
self.cellpy_datadir = prms.Paths.cellpydatadir
# search in prm-file for res and hdf5 dirs in loadcell:
self.auto_dirs = prms.Reader.auto_dirs
# - headers and instruments
self.headers_normal = get_headers_normal()
self.headers_summary = get_headers_summary()
self.headers_step_table = get_headers_step_table()
self.table_names = None # dictionary defined in set_instruments
self.set_instrument()
# - units used by cellpy
self.cellpy_units = get_cellpy_units()
if initialize:
self.initialize()
def initialize(self):
self.logger.debug("Initializing...")
self.cells.append(Cell())
@property
def cell(self):
"""returns the DataSet instance"""
# could insert a try-except thingy here...
cell = self.cells[self.selected_cell_number]
return cell
@cell.setter
def cell(self, new_cell):
self.cells[self.selected_cell_number] = new_cell
@property
def dataset(self):
"""returns the DataSet instance"""
# could insert a try-except thingy here...
warnings.warn(
"The .dataset property is deprecated, please use .cell instead.",
DeprecationWarning,
)
cell = self.cells[self.selected_cell_number]
return cell
@property
def empty(self):
"""gives False if the CellpyData object is empty (or un-functional)"""
return not self.check()
@classmethod
def vacant(cls, cell=None):
"""Create a CellpyData instance.
Args:
cell (CellpyData instance): the attributes from the cell will be copied
to the new Cellpydata instance.
Returns:
CellpyData instance.
"""
new_cell = cls(initialize=True)
if cell is not None:
for attr in ATTRS_DATASET:
value = getattr(cell.cell, attr)
setattr(new_cell.cell, attr, value)
for attr in ATTRS_DATASET_DEEP:
value = getattr(cell.cell, attr)
setattr(new_cell.cell, attr, copy.deepcopy(value))
for attr in ATTRS_CELLPYDATA:
value = getattr(cell, attr)
setattr(new_cell, attr, value)
return new_cell
def split(self, cycle=None):
"""Split experiment (CellpyData object) into two sub-experiments. if cycle
is not give, it will split on the median cycle number"""
if isinstance(cycle, int) or cycle is None:
return self.split_many(base_cycles=cycle)
def drop_from(self, cycle=None):
"""Select first part of experiment (CellpyData object) up to cycle number
'cycle'"""
if isinstance(cycle, int):
c1, c2 = self.split_many(base_cycles=cycle)
return c1
def drop_to(self, cycle=None):
"""Select last part of experiment (CellpyData object) from cycle number
'cycle'"""
if isinstance(cycle, int):
c1, c2 = self.split_many(base_cycles=cycle)
return c2
def drop_edges(self, start, end):
"""Select middle part of experiment (CellpyData object) from cycle
number 'start' to 'end"""
if end < start:
raise ValueError("end cannot be larger than start")
if end == start:
raise ValueError("end cannot be the same as start")
return self.split_many([start, end])[1]
def split_many(self, base_cycles=None):
"""Split experiment (CellpyData object) into several sub-experiments.
Args:
base_cycles (int or list of ints): cycle(s) to do the split on.
Returns:
List of CellpyData objects
"""
h_summary_index = HEADERS_SUMMARY.cycle_index
h_raw_index = HEADERS_NORMAL.cycle_index_txt
h_step_cycle = HEADERS_STEP_TABLE.cycle
if base_cycles is None:
all_cycles = self.get_cycle_numbers()
base_cycles = int(np.median(all_cycles))
cells = list()
if not isinstance(base_cycles, (list, tuple)):
base_cycles = [base_cycles]
dataset = self.cell
steptable = dataset.steps
data = dataset.raw
summary = dataset.summary
# In case Cycle_Index has been promoted to index [#index]
if h_summary_index not in summary.columns:
summary = summary.reset_index(drop=False)
for b_cycle in base_cycles:
steptable0, steptable = [
steptable[steptable[h_step_cycle] < b_cycle],
steptable[steptable[h_step_cycle] >= b_cycle],
]
data0, data = [
data[data[h_raw_index] < b_cycle],
data[data[h_raw_index] >= b_cycle],
]
summary0, summary = [
summary[summary[h_summary_index] < b_cycle],
summary[summary[h_summary_index] >= b_cycle],
]
new_cell = CellpyData.vacant(cell=self)
old_cell = CellpyData.vacant(cell=self)
new_cell.cell.steps = steptable0
new_cell.cell.raw = data0
new_cell.cell.summary = summary0
new_cell.cell = identify_last_data_point(new_cell.cell)
old_cell.cell.steps = steptable
old_cell.cell.raw = data
old_cell.cell.summary = summary
old_cell.cell = identify_last_data_point(old_cell.cell)
cells.append(new_cell)
cells.append(old_cell)
return cells
# TODO: @jepe - merge the _set_xxinstrument methods into one method
def set_instrument(self, instrument=None):
"""Set the instrument (i.e. tell cellpy the file-type you use).
Args:
instrument: (str) in ["arbin", "bio-logic-csv", "bio-logic-bin",...]
Sets the instrument used for obtaining the data (i.e. sets fileformat)
"""
if instrument is None:
instrument = self.tester
self.logger.debug(f"Setting instrument: {instrument}")
if instrument in ["arbin", "arbin_res"]:
from cellpy.readers.instruments.arbin import ArbinLoader as RawLoader
self._set_instrument(RawLoader)
self.tester = "arbin"
elif instrument == "arbin_sql":
warnings.warn(f"{instrument} not implemented yet")
self.tester = "arbin"
elif instrument in ["pec", "pec_csv"]:
warnings.warn("Experimental! Not ready for production!")
from cellpy.readers.instruments.pec import PECLoader as RawLoader
self._set_instrument(RawLoader)
self.tester = "pec"
elif instrument in ["biologics", "biologics_mpr"]:
from cellpy.readers.instruments.biologics_mpr import MprLoader as RawLoader
warnings.warn("Experimental! Not ready for production!")
self._set_instrument(RawLoader)
self.tester = "biologic"
elif instrument == "custom":
from cellpy.readers.instruments.custom import CustomLoader as RawLoader
self._set_instrument(RawLoader)
self.tester = "custom"
else:
raise Exception(f"option does not exist: '{instrument}'")
def _set_instrument(self, loader_class):
self.loader_class = loader_class()
# ----- get information --------------------------
self.raw_units = self.loader_class.get_raw_units()
self.raw_limits = self.loader_class.get_raw_limits()
# ----- create the loader ------------------------
self.loader = self.loader_class.loader
def _create_logger(self):
from cellpy import log
self.logger = logging.getLogger(__name__)
log.setup_logging(default_level="DEBUG")
def set_cycle_mode(self, cycle_mode):
"""set the cycle mode"""
# TODO: remove this
warnings.warn(
"deprecated - use it as a property instead, e.g.: cycle_mode = 'anode'",
DeprecationWarning,
)
self._cycle_mode = cycle_mode
@property
def cycle_mode(self):
return self._cycle_mode
@cycle_mode.setter
def cycle_mode(self, cycle_mode):
self.logger.debug(f"-> cycle_mode: {cycle_mode}")
self._cycle_mode = cycle_mode
def set_raw_datadir(self, directory=None):
"""Set the directory containing .res-files.
Used for setting directory for looking for res-files.@
A valid directory name is required.
Args:
directory (str): path to res-directory
Example:
>>> d = CellpyData()
>>> directory = "MyData/Arbindata"
>>> d.set_raw_datadir(directory)
"""
if directory is None:
self.logger.info("No directory name given")
return
if not os.path.isdir(directory):
self.logger.info(directory)
self.logger.info("Directory does not exist")
return
self.raw_datadir = directory
def set_cellpy_datadir(self, directory=None):
"""Set the directory containing .hdf5-files.
Used for setting directory for looking for hdf5-files.
A valid directory name is required.
Args:
directory (str): path to hdf5-directory
Example:
>>> d = CellpyData()
>>> directory = "MyData/HDF5"
>>> d.set_raw_datadir(directory)
"""
if directory is None:
self.logger.info("No directory name given")
return
if not os.path.isdir(directory):
self.logger.info("Directory does not exist")
return
self.cellpy_datadir = directory
def check_file_ids(self, rawfiles, cellpyfile, detailed=False):
"""Check the stats for the files (raw-data and cellpy hdf5).
This function checks if the hdf5 file and the res-files have the same
timestamps etc to find out if we need to bother to load .res -files.
Args:
cellpyfile (str): filename of the cellpy hdf5-file.
rawfiles (list of str): name(s) of raw-data file(s).
detailed (bool): return a dict containing True or False for each
individual raw-file
Returns:
If detailed is False:
False if the raw files are newer than the cellpy hdf5-file
(update needed).
True if update is not needed.
If detailed is True it returns a dict containing True or False for each
individual raw-file.
"""
txt = "Checking file ids - using '%s'" % self.filestatuschecker
self.logger.info(txt)
ids_cellpy_file = self._check_cellpy_file(cellpyfile)
self.logger.debug(f"cellpyfile ids: {ids_cellpy_file}")
if not ids_cellpy_file:
# self.logger.debug("hdf5 file does not exist - needs updating")
return False
ids_raw = self._check_raw(rawfiles)
if detailed:
similar = self._parse_ids(ids_raw, ids_cellpy_file)
return similar
else:
similar = self._compare_ids(ids_raw, ids_cellpy_file)
if not similar:
# self.logger.debug("hdf5 file needs updating")
return False
else:
# self.logger.debug("hdf5 file is updated")
return True
def _check_raw(self, file_names, abort_on_missing=False):
"""Get the file-ids for the res_files."""
strip_file_names = True
check_on = self.filestatuschecker
if not self._is_listtype(file_names):
file_names = [file_names]
ids = dict()
for f in file_names:
self.logger.debug(f"checking res file {f}")
fid = FileID(f)
# self.logger.debug(fid)
if fid.name is None:
warnings.warn(f"file does not exist: {f}")
if abort_on_missing:
sys.exit(-1)
else:
if strip_file_names:
name = os.path.basename(f)
else:
name = f
if check_on == "size":
ids[name] = int(fid.size)
elif check_on == "modified":
ids[name] = int(fid.last_modified)
else:
ids[name] = int(fid.last_accessed)
return ids
def _check_cellpy_file(self, filename):
"""Get the file-ids for the cellpy_file."""
strip_filenames = True
parent_level = prms._cellpyfile_root
fid_dir = prms._cellpyfile_fid
check_on = self.filestatuschecker
self.logger.debug("checking cellpy-file")
self.logger.debug(filename)
if not os.path.isfile(filename):
self.logger.debug("cellpy-file does not exist")
return None
try:
store = pd.HDFStore(filename)
except Exception as e:
self.logger.debug(f"could not open cellpy-file ({e})")
return None
try:
fidtable = store.select(parent_level + fid_dir)
except KeyError:
self.logger.warning("no fidtable -" " you should update your hdf5-file")
fidtable = None
finally:
store.close()
if fidtable is not None:
raw_data_files, raw_data_files_length = self._convert2fid_list(fidtable)
txt = "contains %i res-files" % (len(raw_data_files))
self.logger.debug(txt)
ids = dict()
for fid in raw_data_files:
full_name = fid.full_name
size = fid.size
mod = fid.last_modified
self.logger.debug(f"fileID information for: {full_name}")
self.logger.debug(f" modified: {mod}")
self.logger.debug(f" size: {size}")
if strip_filenames:
name = os.path.basename(full_name)
else:
name = full_name
if check_on == "size":
ids[name] = int(fid.size)
elif check_on == "modified":
ids[name] = int(fid.last_modified)
else:
ids[name] = int(fid.last_accessed)
return ids
else:
return None
@staticmethod
def _compare_ids(ids_res, ids_cellpy_file):
similar = True
l_res = len(ids_res)
l_cellpy = len(ids_cellpy_file)
if l_res == l_cellpy and l_cellpy > 0:
for name, value in list(ids_res.items()):
if ids_cellpy_file[name] != value:
similar = False
else:
similar = False
return similar
@staticmethod
def _parse_ids(ids_raw, ids_cellpy_file):
similar = dict()
for name in ids_raw:
v_cellpy = ids_cellpy_file.get(name, None)
v_raw = ids_raw[name]
similar[name] = False
if v_raw is not None:
if v_raw == v_cellpy:
similar[name] = True
return similar
def loadcell(
self,
raw_files,
cellpy_file=None,
mass=None,
summary_on_raw=False,
summary_ir=True,
summary_ocv=False,
summary_end_v=True,
only_summary=False,
force_raw=False,
use_cellpy_stat_file=None,
**kwargs,
):
"""Loads data for given cells.
Args:
raw_files (list): name of res-files
cellpy_file (path): name of cellpy-file
mass (float): mass of electrode or active material
summary_on_raw (bool): use raw-file for summary
summary_ir (bool): summarize ir
summary_ocv (bool): summarize ocv steps
summary_end_v (bool): summarize end voltage
only_summary (bool): get only the summary of the runs
force_raw (bool): only use raw-files
use_cellpy_stat_file (bool): use stat file if creating summary
from raw
**kwargs: passed to from_raw
Example:
>>> srnos = my_dbreader.select_batch("testing_new_solvent")
>>> cell_datas = []
>>> for srno in srnos:
>>> ... my_run_name = my_dbreader.get_cell_name(srno)
>>> ... mass = my_dbreader.get_mass(srno)
>>> ... rawfiles, cellpyfiles = \
>>> ... filefinder.search_for_files(my_run_name)
>>> ... cell_data = cellreader.CellpyData()
>>> ... cell_data.loadcell(raw_files=rawfiles,
>>> ... cellpy_file=cellpyfiles)
>>> ... cell_data.set_mass(mass)
>>> ... if not cell_data.summary_exists:
>>> ... cell_data.make_summary() # etc. etc.
>>> ... cell_datas.append(cell_data)
>>>
"""
# This is a part of a dramatic API change. It will not be possible to
# load more than one set of datasets (i.e. one single cellpy-file or
# several raw-files that will be automatically merged)
# TODO @jepe Make setting or prm so that it is possible to update only new data
self.logger.info("Started cellpy.cellreader.loadcell")
if cellpy_file is None:
similar = False
elif force_raw:
similar = False
else:
similar = self.check_file_ids(raw_files, cellpy_file)
self.logger.debug("checked if the files were similar")
if only_summary:
self.load_only_summary = True
else:
self.load_only_summary = False
if not similar:
self.logger.debug("cellpy file(s) needs updating - loading raw")
self.logger.info("Loading raw-file")
self.logger.debug(raw_files)
self.from_raw(raw_files, **kwargs)
self.logger.debug("loaded files")
# Check if the run was loaded ([] if empty)
if self.status_datasets:
if mass:
self.set_mass(mass)
if summary_on_raw:
nom_cap = kwargs.pop("nom_cap", None)
if nom_cap is not None:
self.set_nom_cap(nom_cap)
self.make_summary(
all_tests=False,
find_ocv=summary_ocv,
find_ir=summary_ir,
find_end_voltage=summary_end_v,
use_cellpy_stat_file=use_cellpy_stat_file,
# nom_cap=nom_cap,
)
else:
self.logger.warning("Empty run!")
else:
self.load(cellpy_file)
if mass:
self.set_mass(mass)
return self
def dev_update_loadcell(
self,
raw_files,
cellpy_file=None,
mass=None,
summary_on_raw=False,
summary_ir=True,
summary_ocv=False,
summary_end_v=True,
force_raw=False,
use_cellpy_stat_file=None,
nom_cap=None,
):
self.logger.info("Started cellpy.cellreader.loadcell")
if cellpy_file is None or force_raw:
similar = None
else:
similar = self.check_file_ids(raw_files, cellpy_file, detailed=True)
self.logger.debug("checked if the files were similar")
if similar is None:
# forcing to load only raw_files
self.from_raw(raw_files)
if self.status_datasets:
if mass:
self.set_mass(mass)
if summary_on_raw:
self.make_summary(
all_tests=False,
find_ocv=summary_ocv,
find_ir=summary_ir,
find_end_voltage=summary_end_v,
use_cellpy_stat_file=use_cellpy_stat_file,
nom_cap=nom_cap,
)
else:
self.logger.warning("Empty run!")
return self
self.load(cellpy_file)
if mass:
self.set_mass(mass)
if all(similar.values()):
self.logger.info("Everything is up to date")
return
start_file = True
for i, f in enumerate(raw_files):
f = Path(f)
if not similar[f.name] and start_file:
try:
last_data_point = self.cell.raw_data_files[i].last_data_point
except IndexError:
last_data_point = 0
self.dev_update_from_raw(
file_names=f, data_points=[last_data_point, None]
)
self.cell = self.dev_update_merge()
elif not similar[f.name]:
try:
last_data_point = self.cell.raw_data_files[i].last_data_point
except IndexError:
last_data_point = 0
self.dev_update_from_raw(
file_names=f, data_points=[last_data_point, None]
)
self.merge()
start_file = False
self.dev_update_make_steps()
self.dev_update_make_summary(
all_tests=False,
find_ocv=summary_ocv,
find_ir=summary_ir,
find_end_voltage=summary_end_v,
use_cellpy_stat_file=use_cellpy_stat_file,
)
return self
def dev_update(self, file_names=None, **kwargs):
print("NOT FINISHED YET - but close")
if len(self.cell.raw_data_files) != 1:
self.logger.warning(
"Merged cell. But can only update based on the last file"
)
print(self.cell.raw_data_files)
for fid in self.cell.raw_data_files:
print(fid)
last = self.cell.raw_data_files[0].last_data_point
self.dev_update_from_raw(
file_names=file_names, data_points=[last, None], **kwargs
)
print("lets try to merge")
self.cell = self.dev_update_merge()
print("now it is time to update the step table")
self.dev_update_make_steps()
print("and finally, lets update the summary")
self.dev_update_make_summary()
def dev_update_merge(self):
print("NOT FINISHED YET - but very close")
number_of_tests = len(self.cells)
if number_of_tests != 2:
self.logger.warning(
"Cannot merge if you do not have exactly two cell-objects"
)
return
t1, t2 = self.cells
if t1.raw.empty:
self.logger.debug("OBS! the first dataset is empty")
if t2.raw.empty:
t1.merged = True
self.logger.debug("the second dataset was empty")
self.logger.debug(" -> merged contains only first")
return t1
test = t1
cycle_index_header = self.headers_normal.cycle_index_txt
if not t1.raw.empty:
t1.raw = t1.raw.iloc[:-1]
raw2 = pd.concat([t1.raw, t2.raw], ignore_index=True)
test.no_cycles = max(raw2[cycle_index_header])
test.raw = raw2
else:
test.no_cycles = max(t2.raw[cycle_index_header])
test = t2
self.logger.debug(" -> merged with new dataset")
return test
def dev_update_make_steps(self, **kwargs):
old_steps = self.cell.steps.iloc[:-1]
# Note! hard-coding header name (might fail if changing default headers)
from_data_point = self.cell.steps.iloc[-1].point_first
new_steps = self.make_step_table(from_data_point=from_data_point, **kwargs)
merged_steps = pd.concat([old_steps, new_steps]).reset_index(drop=True)
self.cell.steps = merged_steps
def dev_update_make_summary(self, **kwargs):
print("NOT FINISHED YET - but not critical")
# Update not implemented yet, running full summary calculations for now.
# For later:
# old_summary = self.cell.summary.iloc[:-1]
cycle_index_header = self.headers_summary.cycle_index
from_cycle = self.cell.summary.iloc[-1][cycle_index_header]
self.make_summary(from_cycle=from_cycle, **kwargs)
# For later:
# (Remark! need to solve how to merge culumated columns)
# new_summary = self.make_summary(from_cycle=from_cycle)
# merged_summary = pd.concat([old_summary, new_summary]).reset_index(drop=True)
# self.cell.summary = merged_summary
def dev_update_from_raw(self, file_names=None, data_points=None, **kwargs):
"""This method is under development. Using this to develop updating files
with only new data.
"""
print("NOT FINISHED YET - but very close")
if file_names:
self.file_names = file_names
if file_names is None:
self.logger.info(
"No filename given and no stored in the file_names "
"attribute. Returning None"
)
return None
if not isinstance(self.file_names, (list, tuple)):
self.file_names = [file_names]
raw_file_loader = self.loader
set_number = 0
test = None
self.logger.debug("start iterating through file(s)")
print(self.file_names)
for f in self.file_names:
self.logger.debug("loading raw file:")
self.logger.debug(f"{f}")
# get a list of cellpy.readers.core.Cell objects
test = raw_file_loader(f, data_points=data_points, **kwargs)
# remark that the bounds are included (i.e. the first datapoint
# is 5000.
self.logger.debug("added the data set - merging file info")
# raw_data_file = copy.deepcopy(test[set_number].raw_data_files[0])
# file_size = test[set_number].raw_data_files_length[0]
# test[set_number].raw_data_files.append(raw_data_file)
# test[set_number].raw_data_files_length.append(file_size)
# return test
self.cells.append(test[set_number])
self.number_of_datasets = len(self.cells)
self.status_datasets = self._validate_datasets()
self._invent_a_name()
return self
def from_raw(self, file_names=None, **kwargs):
"""Load a raw data-file.
Args:
file_names (list of raw-file names): uses CellpyData.file_names if
None. If the list contains more than one file name, then the
runs will be merged together.
Other keywords depending on loader:
[ArbinLoader]:
bad_steps (list of tuples): (c, s) tuples of steps s (in cycle c)
to skip loading.
dataset_number (int): the data set number to select if you are dealing
with arbin files with more than one data-set.
data_points (tuple of ints): load only data from data_point[0] to
data_point[1] (use None for infinite). NOT IMPLEMEMTED YET.
"""
# This function only loads one test at a time (but could contain several
# files). The function from_res() used to implement loading several
# datasets (using list of lists as input), however it is now deprecated.
if file_names:
self.file_names = file_names
if not isinstance(self.file_names, (list, tuple)):
self.file_names = [file_names]
# file_type = self.tester
raw_file_loader = self.loader
# test is currently a list of tests - this option will be removed in the future
# so set_number is hard-coded to 0, i.e. actual-test is always test[0]
set_number = 0
test = None
counter = 0
self.logger.debug("start iterating through file(s)")
for f in self.file_names:
self.logger.debug("loading raw file:")
self.logger.debug(f"{f}")
new_tests = raw_file_loader(f, **kwargs)
if new_tests:
# retrieving the first cell data (e.g. first file)
if test is None:
self.logger.debug("getting data from first file")
if new_tests[set_number].no_data:
self.logger.debug("NO DATA")
else:
test = new_tests
# appending cell data file to existing
else:
self.logger.debug("continuing reading files...")
_test = self._append(test[set_number], new_tests[set_number])
if not _test:
self.logger.warning(f"EMPTY TEST: {f}")
continue
test[set_number] = _test
# retrieving file info in a for-loop in case of multiple files
# Remark!
# - the raw_data_files attribute is a list
# - the raw_data_files_length attribute is a list
# The reason for this choice is not clear anymore, but
# let us keep it like this for now
self.logger.debug("added the data set - merging file info")
for j in range(len(new_tests[set_number].raw_data_files)):
raw_data_file = new_tests[set_number].raw_data_files[j]
file_size = new_tests[set_number].raw_data_files_length[j]
test[set_number].raw_data_files.append(raw_data_file)
test[set_number].raw_data_files_length.append(file_size)
counter += 1
if counter > 10:
self.logger.debug("ERROR? Too many files to merge")
raise ValueError(
"Too many files to merge - "
"could be a p2-p3 zip thing"
)
else:
self.logger.debug("NOTHING LOADED")
self.logger.debug("finished loading the raw-files")
test_exists = False
if test:
if test[0].no_data:
self.logging.debug(
"the first dataset (or only dataset) loaded from the raw data file is empty"
)
else:
test_exists = True
if test_exists:
if not prms.Reader.sorted_data:
self.logger.debug("sorting data")
test[set_number] = self._sort_data(test[set_number])
self.cells.append(test[set_number])
else:
self.logger.warning("No new datasets added!")
self.number_of_datasets = len(self.cells)
self.status_datasets = self._validate_datasets()
self._invent_a_name()
return self
def from_res(self, filenames=None, check_file_type=True):
"""Convenience function for loading arbin-type data into the
datastructure.
Args:
filenames: ((lists of) list of raw-file names): uses
cellpy.file_names if None.
If list-of-list, it loads each list into separate datasets.
The files in the inner list will be merged.
check_file_type (bool): check file type if True
(res-, or cellpy-format)
"""
raise DeprecatedFeature
def _validate_datasets(self, level=0):
self.logger.debug("validating test")
level = 0
# simple validation for finding empty datasets - should be expanded to
# find not-complete datasets, datasets with missing prms etc
v = []
if level == 0:
for test in self.cells:
# check that it contains all the necessary headers
# (and add missing ones)
# test = self._clean_up_normal_table(test)
# check that the test is not empty
v.append(self._is_not_empty_dataset(test))
self.logger.debug(f"validation array: {v}")
return v
def check(self):
"""Returns False if no datasets exists or if one or more of the datasets
are empty"""
if len(self.status_datasets) == 0:
return False
if all(self.status_datasets):
return True
return False
# TODO: maybe consider being a bit more concice (re-implement)
def _is_not_empty_dataset(self, dataset):
if dataset is self._empty_dataset():
return False
else:
return True
# TODO: check if this is useful and if it is rename, if not delete
def _clean_up_normal_table(self, test=None, dataset_number=None):
# check that test contains all the necessary headers
# (and add missing ones)
raise NotImplementedError
# TODO: this is used for the check-datasetnr-thing. Will soon be obsolete?
def _report_empty_dataset(self):
self.logger.info("Empty set")
@staticmethod
def _empty_dataset():
return None
def _invent_a_name(self, filename=None, override=False):
if filename is None:
self.name = "nameless"
return
if self.name and not override:
return
path = Path(filename)
self.name = path.with_suffix("").name
def partial_load(self, **kwargs):
"""Load only a selected part of the cellpy file."""
raise NotImplementedError
def link(self, **kwargs):
"""Create a link to a cellpy file.
If the file is very big, it is sometimes better to work with the data
out of memory (i.e. on disk). A CellpyData object with a linked file
will in most cases work as a normal object. However, some of the methods
might be disabled. And it will be slower.
Notes:
2020.02.08 - maybe this functionality is not needed and can be replaced
by using dask or similar?
"""
raise NotImplementedError
def dev_load(self, cellpy_file, parent_level=None, return_cls=True, accept_old=False):
"""Loads a cellpy file.
Args:
cellpy_file (path, str): Full path to the cellpy file.
parent_level (str, optional): Parent level. Warning! Deprecating this soon!
return_cls (bool): Return the class.
accept_old (bool): Accept loading old cellpy-file versions.
Instead of raising WrongFileVersion it only issues a warning.
Returns:
cellpy.CellPyData class if return_cls is True
"""
try:
self.logger.debug("loading cellpy-file (hdf5):")
self.logger.debug(cellpy_file)
new_datasets = self._dev_load_hdf5(cellpy_file, parent_level, accept_old)
self.logger.debug("cellpy-file loaded")
except AttributeError:
new_datasets = []
self.logger.warning(
"This cellpy-file version is not supported by"
"current reader (try to update cellpy)."
)
if new_datasets:
for dataset in new_datasets:
self.cells.append(dataset)
else:
# raise LoadError
self.logger.warning("Could not load")
self.logger.warning(str(cellpy_file))
self.number_of_datasets = len(self.cells)
self.status_datasets = self._validate_datasets()
self._invent_a_name(cellpy_file)
if return_cls:
return self
def load(self, cellpy_file, parent_level=None, return_cls=True, accept_old=False):
"""Loads a cellpy file.
Args:
cellpy_file (path, str): Full path to the cellpy file.
parent_level (str, optional): Parent level. Warning! Deprecating this soon!
return_cls (bool): Return the class.
accept_old (bool): Accept loading old cellpy-file versions.
Instead of raising WrongFileVersion it only issues a warning.
Returns:
cellpy.CellPyData class if return_cls is True
"""
try:
self.logger.debug("loading cellpy-file (hdf5):")
self.logger.debug(cellpy_file)
new_datasets = self._load_hdf5(cellpy_file, parent_level, accept_old)
self.logger.debug("cellpy-file loaded")
except AttributeError:
new_datasets = []
self.logger.warning(
"This cellpy-file version is not supported by"
"current reader (try to update cellpy)."
)
if new_datasets:
for dataset in new_datasets:
self.cells.append(dataset)
else:
# raise LoadError
self.logger.warning("Could not load")
self.logger.warning(str(cellpy_file))
self.number_of_datasets = len(self.cells)
self.status_datasets = self._validate_datasets()
self._invent_a_name(cellpy_file)
if return_cls:
return self
def _get_cellpy_file_version(self, filename, meta_dir="/info", parent_level=None):
if parent_level is None:
parent_level = prms._cellpyfile_root
with pd.HDFStore(filename) as store:
try:
meta_table = store.select(parent_level + meta_dir)
except KeyError:
raise WrongFileVersion(
"This file is VERY old - cannot read file version number"
)
try:
cellpy_file_version = self._extract_from_dict(
meta_table, "cellpy_file_version"
)
except Exception as e:
warnings.warn(f"Unhandled exception raised: {e}")
return 0
return cellpy_file_version
def _dev_load_hdf5(self, filename, parent_level=None, accept_old=False):
"""Load a cellpy-file.
Args:
filename (str): Name of the cellpy file.
parent_level (str) (optional): name of the parent level
(defaults to "CellpyData"). DeprecationWarning!
accept_old (bool): accept old file versions.
Returns:
loaded datasets (DataSet-object)
"""
CELLPY_FILE_VERSION = 6
HEADERS_SUMMARY["cycle_index"] = "cycle_index"
HEADERS_SUMMARY["discharge_capacity"] = "discharge_capacity_mAh_g"
if parent_level is None:
parent_level = prms._cellpyfile_root
if parent_level != prms._cellpyfile_root:
self.logger.debug(
f"Using non-default parent label for the " f"hdf-store: {parent_level}"
)
if not os.path.isfile(filename):
self.logger.info(f"File does not exist: {filename}")
raise IOError(f"File does not exist: {filename}")
cellpy_file_version = self._get_cellpy_file_version(filename)
if cellpy_file_version > CELLPY_FILE_VERSION:
raise WrongFileVersion(
f"File format too new: {filename} :: version: {cellpy_file_version}"
f"Reload from raw or upgrade your cellpy!"
)
elif cellpy_file_version < MINIMUM_CELLPY_FILE_VERSION:
raise WrongFileVersion(
f"File format too old: {filename} :: version: {cellpy_file_version}"
f"Reload from raw or downgrade your cellpy!"
)
elif cellpy_file_version < CELLPY_FILE_VERSION:
if accept_old:
self.logger.debug(f"old cellpy file version {cellpy_file_version}")
self.logger.debug(f"filename: {filename}")
self.logger.warning(f"Loading old file-type. It is recommended that you remake the step table and the "
f"summary table.")
new_data = self._load_old_hdf5(filename, cellpy_file_version)
else:
raise WrongFileVersion(
f"File format too old: {filename} :: version: {cellpy_file_version}"
f"Try loading setting accept_old=True"
)
else:
self.logger.debug(f"Loading {filename} :: v{cellpy_file_version}")
new_data = self._load_hdf5_current_version(filename)
return new_data
def _load_hdf5(self, filename, parent_level=None, accept_old=False):
"""Load a cellpy-file.
Args:
filename (str): Name of the cellpy file.
parent_level (str) (optional): name of the parent level
(defaults to "CellpyData"). DeprecationWarning!
accept_old (bool): accept old file versions.
Returns:
loaded datasets (DataSet-object)
"""
if parent_level is None:
parent_level = prms._cellpyfile_root
if parent_level != prms._cellpyfile_root:
self.logger.debug(
f"Using non-default parent label for the " f"hdf-store: {parent_level}"
)
if not os.path.isfile(filename):
self.logger.info(f"File does not exist: {filename}")
raise IOError(f"File does not exist: {filename}")
cellpy_file_version = self._get_cellpy_file_version(filename)
if cellpy_file_version > CELLPY_FILE_VERSION:
raise WrongFileVersion(
f"File format too new: {filename} :: version: {cellpy_file_version}"
f"Reload from raw or upgrade your cellpy!"
)
elif cellpy_file_version < MINIMUM_CELLPY_FILE_VERSION:
raise WrongFileVersion(
f"File format too old: {filename} :: version: {cellpy_file_version}"
f"Reload from raw or downgrade your cellpy!"
)
elif cellpy_file_version < CELLPY_FILE_VERSION:
if accept_old:
self.logger.debug(f"old cellpy file version {cellpy_file_version}")
self.logger.debug(f"filename: {filename}")
new_data = self._load_old_hdf5(filename, cellpy_file_version)
else:
raise WrongFileVersion(
f"File format too old: {filename} :: version: {cellpy_file_version}"
f"Try loading setting accept_old=True"
)
else:
self.logger.debug(f"Loading {filename} :: v{cellpy_file_version}")
new_data = self._load_hdf5_current_version(filename)
return new_data
def _load_hdf5_current_version(self, filename, meta_dir="/info", parent_level=None):
if parent_level is None:
parent_level = prms._cellpyfile_root
raw_dir = prms._cellpyfile_raw
step_dir = prms._cellpyfile_step
summary_dir = prms._cellpyfile_summary
fid_dir = prms._cellpyfile_fid
with pd.HDFStore(filename) as store:
data, meta_table = self._create_initial_data_set_from_cellpy_file(
meta_dir, parent_level, store
)
self._check_keys_in_cellpy_file(
meta_dir, parent_level, raw_dir, store, summary_dir
)
self._extract_summary_from_cellpy_file(
data, parent_level, store, summary_dir
)
self._extract_raw_from_cellpy_file(data, parent_level, raw_dir, store)
self._extract_steps_from_cellpy_file(data, parent_level, step_dir, store)
fid_table, fid_table_selected = self._extract_fids_from_cellpy_file(
fid_dir, parent_level, store
)
self._extract_meta_from_cellpy_file(data, meta_table, filename)
if fid_table_selected:
(data.raw_data_files, data.raw_data_files_length,) = self._convert2fid_list(
fid_table
)
else:
data.raw_data_files = None
data.raw_data_files_length = None
# this does not yet allow multiple sets
new_tests = [
data
] # but cellpy is ready when that time comes (if it ever happens)
return new_tests
def _load_hdf5_v5(self, filename):
parent_level = "CellpyData"
raw_dir = "/raw"
step_dir = "/steps"
summary_dir = "/summary"
fid_dir = "/fid"
meta_dir = "/info"
with pd.HDFStore(filename) as store:
data, meta_table = self._create_initial_data_set_from_cellpy_file(
meta_dir, parent_level, store
)
self._check_keys_in_cellpy_file(
meta_dir, parent_level, raw_dir, store, summary_dir
)
self._extract_summary_from_cellpy_file(
data, parent_level, store, summary_dir
)
self._extract_raw_from_cellpy_file(data, parent_level, raw_dir, store)
self._extract_steps_from_cellpy_file(data, parent_level, step_dir, store)
fid_table, fid_table_selected = self._extract_fids_from_cellpy_file(
fid_dir, parent_level, store
)
self._extract_meta_from_cellpy_file(data, meta_table, filename)
if fid_table_selected:
(data.raw_data_files, data.raw_data_files_length,) = self._convert2fid_list(
fid_table
)
else:
data.raw_data_files = None
data.raw_data_files_length = None
# this does not yet allow multiple sets
self.logger.debug("loaded new test")
new_tests = [
data
] # but cellpy is ready when that time comes (if it ever happens)
return new_tests
def _load_old_hdf5(self, filename, cellpy_file_version):
if cellpy_file_version < 5:
new_data = self._load_old_hdf5_v3_to_v4(filename)
elif cellpy_file_version == 5:
new_data = self._load_hdf5_v5(filename)
else:
raise WrongFileVersion(f"version {cellpy_file_version} is not supported")
if cellpy_file_version < 6:
self.logger.debug("legacy cellpy file version needs translation")
new_data = old_settings.translate_headers(new_data, cellpy_file_version)
return new_data
def _load_old_hdf5_v3_to_v4(self, filename):
parent_level = "CellpyData"
meta_dir = "/info"
_raw_dir = "/dfdata"
_step_dir = "/step_table"
_summary_dir = "/dfsummary"
_fid_dir = "/fidtable"
with pd.HDFStore(filename) as store:
data, meta_table = self._create_initial_data_set_from_cellpy_file(
meta_dir, parent_level, store
)
self._check_keys_in_cellpy_file(
meta_dir, parent_level, _raw_dir, store, _summary_dir
)
self._extract_summary_from_cellpy_file(data, parent_level, store, _summary_dir)
self._extract_raw_from_cellpy_file(data, parent_level, _raw_dir, store)
self._extract_steps_from_cellpy_file(data, parent_level, _step_dir, store)
fid_table, fid_table_selected = self._extract_fids_from_cellpy_file(
_fid_dir, parent_level, store
)
self._extract_meta_from_cellpy_file(data, meta_table, filename)
warnings.warn(
"Loaded old cellpy-file version (<5). " "Please update and save again."
)
if fid_table_selected:
(data.raw_data_files, data.raw_data_files_length,) = self._convert2fid_list(
fid_table
)
else:
data.raw_data_files = None
data.raw_data_files_length = None
new_tests = [data]
return new_tests
def _create_initial_data_set_from_cellpy_file(self, meta_dir, parent_level, store):
# Remark that this function is run before selecting loading method
# based on version. If you change the meta_dir prm to something else than
# "/info" it will most likely fail.
# Remark! Used for versions 3, 4, 5
data = Cell()
meta_table = None
try:
meta_table = store.select(parent_level + meta_dir)
except KeyError as e:
self.logger.info("This file is VERY old - no info given here")
self.logger.info("You should convert the files to a newer version!")
self.logger.debug(e)
return data, meta_table
try:
data.cellpy_file_version = self._extract_from_dict(
meta_table, "cellpy_file_version"
)
except Exception as e:
data.cellpy_file_version = 0
warnings.warn(f"Unhandled exception raised: {e}")
return data, meta_table
self.logger.debug(f"cellpy file version. {data.cellpy_file_version}")
return data, meta_table
def _check_keys_in_cellpy_file(
self, meta_dir, parent_level, raw_dir, store, summary_dir
):
required_keys = [raw_dir, summary_dir, meta_dir]
required_keys = ["/" + parent_level + _ for _ in required_keys]
for key in required_keys:
if key not in store.keys():
self.logger.info(
f"This cellpy-file is not good enough - "
f"at least one key is missing: {key}"
)
raise Exception(
f"OH MY GOD! At least one crucial key is missing {key}!"
)
self.logger.debug(f"Keys in current cellpy-file: {store.keys()}")
@staticmethod
def _extract_raw_from_cellpy_file(data, parent_level, raw_dir, store):
data.raw = store.select(parent_level + raw_dir)
@staticmethod
def _extract_summary_from_cellpy_file(data, parent_level, store, summary_dir):
data.summary = store.select(parent_level + summary_dir)
def _extract_fids_from_cellpy_file(self, fid_dir, parent_level, store):
self.logger.debug(f"Extracting fid table from {fid_dir} in hdf5 store")
try:
fid_table = store.select(
parent_level + fid_dir
) # remark! changed spelling from
# lower letter to camel-case!
fid_table_selected = True
except Exception as e:
self.logger.debug(e)
self.logger.debug("could not get fid from cellpy-file")
fid_table = []
warnings.warn("no fid_table - you should update your cellpy-file")
fid_table_selected = False
return fid_table, fid_table_selected
def _extract_steps_from_cellpy_file(self, data, parent_level, step_dir, store):
try:
data.steps = store.select(parent_level + step_dir)
except Exception as e:
self.logging.debug("could not get steps from cellpy-file")
data.steps = pd.DataFrame()
warnings.warn(f"Unhandled exception raised: {e}")
def _extract_meta_from_cellpy_file(self, data, meta_table, filename):
# get attributes from meta table
# remark! could also utilise the pandas to dictionary method directly
# for example: meta_table.T.to_dict()
# Maybe a good task for someone who would like to learn more about
# how cellpy works..
for attribute in ATTRS_CELLPYFILE:
value = self._extract_from_dict(meta_table, attribute)
# some fixes due to errors propagated into the cellpy-files
if attribute == "creator":
if not isinstance(value, str):
value = "no_name"
if attribute == "test_no":
if not isinstance(value, (int, float)):
value = 0
setattr(data, attribute, value)
if data.mass is None:
data.mass = 1.0
else:
data.mass_given = True
data.loaded_from = str(filename)
# hack to allow the renaming of tests to datasets
try:
name = self._extract_from_dict_hard(meta_table, "name")
if not isinstance(name, str):
name = "no_name"
data.name = name
except KeyError:
self.logger.debug(f"missing key in meta table: {name}")
print(meta_table)
warnings.warn("OLD-TYPE: Recommend to save in new format!")
try:
name = self._extract_from_dict(meta_table, "test_name")
except Exception as e:
name = "no_name"
self.logger.debug("name set to 'no_name")
warnings.warn(f"Unhandled exception raised: {e}")
data.name = name
# unpacking the raw data limits
for key in data.raw_limits:
try:
data.raw_limits[key] = self._extract_from_dict_hard(meta_table, key)
except KeyError:
self.logger.debug(f"missing key in meta_table: {key}")
warnings.warn("OLD-TYPE: Recommend to save in new format!")
@staticmethod
def _extract_from_dict(t, x, default_value=None):
try:
value = t[x].values
if value:
value = value[0]
except KeyError:
value = default_value
return value
@staticmethod
def _extract_from_dict_hard(t, x):
value = t[x].values
if value:
value = value[0]
return value
def _create_infotable(self, dataset_number=None):
# needed for saving class/DataSet to hdf5
dataset_number = self._validate_dataset_number(dataset_number)
if dataset_number is None:
self._report_empty_dataset()
return
test = self.get_cell(dataset_number)
infotable = collections.OrderedDict()
for attribute in ATTRS_CELLPYFILE:
value = getattr(test, attribute)
infotable[attribute] = [value]
infotable["cellpy_file_version"] = [CELLPY_FILE_VERSION]
limits = test.raw_limits
for key in limits:
infotable[key] = limits[key]
infotable = pd.DataFrame(infotable)
self.logger.debug("_create_infotable: fid")
fidtable = collections.OrderedDict()
fidtable["raw_data_name"] = []
fidtable["raw_data_full_name"] = []
fidtable["raw_data_size"] = []
fidtable["raw_data_last_modified"] = []
fidtable["raw_data_last_accessed"] = []
fidtable["raw_data_last_info_changed"] = []
fidtable["raw_data_location"] = []
fidtable["raw_data_files_length"] = []
fidtable["last_data_point"] = []
fids = test.raw_data_files
fidtable["raw_data_fid"] = fids
if fids:
for fid, length in zip(fids, test.raw_data_files_length):
fidtable["raw_data_name"].append(fid.name)
fidtable["raw_data_full_name"].append(fid.full_name)
fidtable["raw_data_size"].append(fid.size)
fidtable["raw_data_last_modified"].append(fid.last_modified)
fidtable["raw_data_last_accessed"].append(fid.last_accessed)
fidtable["raw_data_last_info_changed"].append(fid.last_info_changed)
fidtable["raw_data_location"].append(fid.location)
fidtable["raw_data_files_length"].append(length)
fidtable["last_data_point"].append(fid.last_data_point)
else:
warnings.warn("seems you lost info about your raw-data (missing fids)")
fidtable = pd.DataFrame(fidtable)
return infotable, fidtable
def _convert2fid_list(self, tbl):
self.logger.debug("converting loaded fidtable to FileID object")
fids = []
lengths = []
min_amount = 0
for counter, item in enumerate(tbl["raw_data_name"]):
fid = FileID()
fid.name = item
fid.full_name = tbl["raw_data_full_name"][counter]
fid.size = tbl["raw_data_size"][counter]
fid.last_modified = tbl["raw_data_last_modified"][counter]
fid.last_accessed = tbl["raw_data_last_accessed"][counter]
fid.last_info_changed = tbl["raw_data_last_info_changed"][counter]
fid.location = tbl["raw_data_location"][counter]
length = tbl["raw_data_files_length"][counter]
if "last_data_point" in tbl.columns:
fid.last_data_point = tbl["last_data_point"][counter]
else:
fid.last_data_point = 0
fids.append(fid)
lengths.append(length)
min_amount = 1
if min_amount < 1:
self.logger.debug("info about raw files missing")
return fids, lengths
def merge(self, datasets=None, separate_datasets=False):
"""This function merges datasets into one set."""
self.logger.info("Merging")
if separate_datasets:
warnings.warn(
"The option separate_datasets=True is"
"not implemented yet. Performing merging, but"
"neglecting the option."
)
else:
if datasets is None:
datasets = list(range(len(self.cells)))
first = True
for dataset_number in datasets:
if first:
dataset = self.cells[dataset_number]
first = False
else:
dataset = self._append(dataset, self.cells[dataset_number])
for raw_data_file, file_size in zip(
self.cells[dataset_number].raw_data_files,
self.cells[dataset_number].raw_data_files_length,
):
dataset.raw_data_files.append(raw_data_file)
dataset.raw_data_files_length.append(file_size)
self.cells = [dataset]
self.number_of_datasets = 1
return self
def _append(self, t1, t2, merge_summary=True, merge_step_table=True):
self.logger.debug(
f"merging two datasets (merge summary = {merge_summary}) "
f"(merge step table = {merge_step_table})"
)
if t1.raw.empty:
self.logger.debug("OBS! the first dataset is empty")
if t2.raw.empty:
t1.merged = True
self.logger.debug("the second dataset was empty")
self.logger.debug(" -> merged contains only first")
return t1
test = t1
# finding diff of time
start_time_1 = t1.start_datetime
start_time_2 = t2.start_datetime
diff_time = xldate_as_datetime(start_time_2) - xldate_as_datetime(start_time_1)
diff_time = diff_time.total_seconds()
if diff_time < 0:
self.logger.warning("Wow! your new dataset is older than the old!")
self.logger.debug(f"diff time: {diff_time}")
sort_key = self.headers_normal.datetime_txt # DateTime
# mod data points for set 2
data_point_header = self.headers_normal.data_point_txt
try:
last_data_point = max(t1.raw[data_point_header])
except ValueError:
last_data_point = 0
t2.raw[data_point_header] = t2.raw[data_point_header] + last_data_point
# mod cycle index for set 2
cycle_index_header = self.headers_normal.cycle_index_txt
try:
last_cycle = max(t1.raw[cycle_index_header])
except ValueError:
last_cycle = 0
t2.raw[cycle_index_header] = t2.raw[cycle_index_header] + last_cycle
# mod test time for set 2
test_time_header = self.headers_normal.test_time_txt
t2.raw[test_time_header] = t2.raw[test_time_header] + diff_time
# merging
if not t1.raw.empty:
raw2 = pd.concat([t1.raw, t2.raw], ignore_index=True)
# checking if we already have made a summary file of these datasets
# (to be used if merging summaries (but not properly implemented yet))
if t1.summary_made and t2.summary_made:
dfsummary_made = True
else:
dfsummary_made = False
# checking if we already have made step tables for these datasets
if t1.steps_made and t2.steps_made:
step_table_made = True
else:
step_table_made = False
if merge_summary:
# check if (self-made) summary exists.
self_made_summary = True
try:
test_it = t1.summary[cycle_index_header]
except KeyError as e:
self_made_summary = False
try:
test_it = t2.summary[cycle_index_header]
except KeyError as e:
self_made_summary = False
if self_made_summary:
# mod cycle index for set 2
last_cycle = max(t1.summary[cycle_index_header])
t2.summary[cycle_index_header] = (
t2.summary[cycle_index_header] + last_cycle
)
# mod test time for set 2
t2.summary[test_time_header] = (
t2.summary[test_time_header] + diff_time
)
# to-do: mod all the cumsum stuff in the summary (best to make
# summary after merging) merging
else:
t2.summary[data_point_header] = (
t2.summary[data_point_header] + last_data_point
)
summary2 = pd.concat([t1.summary, t2.summary], ignore_index=True)
test.summary = summary2
if merge_step_table:
if step_table_made:
cycle_index_header = self.headers_normal.cycle_index_txt
t2.steps[self.headers_step_table.cycle] = (
t2.raw[self.headers_step_table.cycle] + last_cycle
)
steps2 = pd.concat([t1.steps, t2.steps], ignore_index=True)
test.steps = steps2
else:
self.logger.debug(
"could not merge step tables "
"(non-existing) -"
"create them first!"
)
test.no_cycles = max(raw2[cycle_index_header])
test.raw = raw2
else:
test.no_cycles = max(t2.raw[cycle_index_header])
test = t2
test.merged = True
self.logger.debug(" -> merged with new dataset")
# TODO: @jepe - update merging for more variables
return test
# --------------iterate-and-find-in-data-----------------------------------
# TODO: make this obsolete (somehow)
def _validate_dataset_number(self, n, check_for_empty=True):
# Returns dataset_number (or None if empty)
# Remark! _is_not_empty_dataset returns True or False
if not len(self.cells):
self.logger.info(
"Can't see any datasets! Are you sure you have " "loaded anything?"
)
return
if n is not None:
v = n
else:
if self.selected_cell_number is None:
v = 0
else:
v = self.selected_cell_number
if check_for_empty:
not_empty = self._is_not_empty_dataset(self.cells[v])
if not_empty:
return v
else:
return None
else:
return v
# TODO: check if this can be moved to helpers
def _validate_step_table(self, dataset_number=None, simple=False):
dataset_number = self._validate_dataset_number(dataset_number)
if dataset_number is None:
self._report_empty_dataset()
return
step_index_header = self.headers_normal.step_index_txt
self.logger.debug("-validating step table")
d = self.cells[dataset_number].raw
s = self.cells[dataset_number].steps
if not self.cells[dataset_number].steps_made:
return False
no_cycles_raw = np.amax(d[self.headers_normal.cycle_index_txt])
headers_step_table = self.headers_step_table
no_cycles_step_table = np.amax(s[headers_step_table.cycle])
if simple:
self.logger.debug(" (simple)")
if no_cycles_raw == no_cycles_step_table:
return True
else:
return False
else:
validated = True
if no_cycles_raw != no_cycles_step_table:
self.logger.debug(" differ in no. of cycles")
validated = False
else:
for j in range(1, no_cycles_raw + 1):
cycle_number = j
no_steps_raw = len(
np.unique(
d.loc[
d[self.headers_normal.cycle_index_txt] == cycle_number,
self.headers_normal.step_index_txt,
]
)
)
no_steps_step_table = len(
s.loc[
s[headers_step_table.cycle] == cycle_number,
headers_step_table.step,
]
)
if no_steps_raw != no_steps_step_table:
validated = False
# txt = ("Error in step table "
# "(cycle: %i) d: %i, s:%i)" % (
# cycle_number,
# no_steps_raw,
# no_steps_steps
# )
# )
#
# self.logger.debug(txt)
return validated
def print_steps(self, dataset_number=None):
"""Print the step table."""
dataset_number = self._validate_dataset_number(dataset_number)
if dataset_number is None:
self._report_empty_dataset()
return
st = self.cells[dataset_number].steps
print(st)
def get_step_numbers(
self,
steptype="charge",
allctypes=True,
pdtype=False,
cycle_number=None,
dataset_number=None,
trim_taper_steps=None,
steps_to_skip=None,
steptable=None,
):
# TODO: @jepe - include sub_steps here
# TODO: @jepe - include option for not selecting taper steps here
"""Get the step numbers of selected type.
Returns the selected step_numbers for the selected type of step(s).
Args:
steptype (string): string identifying type of step.
allctypes (bool): get all types of charge (or discharge).
pdtype (bool): return results as pandas.DataFrame
cycle_number (int): selected cycle, selects all if not set.
dataset_number (int): test number (default first)
(usually not used).
trim_taper_steps (integer): number of taper steps to skip (counted
from the end, i.e. 1 means skip last step in each cycle).
steps_to_skip (list): step numbers that should not be included.
steptable (pandas.DataFrame): optional steptable
Returns:
A dictionary containing a list of step numbers corresponding
to the selected steptype for the cycle(s).
Returns a pandas.DataFrame instead of a dict of lists if pdtype is
set to True. The frame is a sub-set of the step-table frame
(i.e. all the same columns, only filtered by rows).
Example:
>>> my_charge_steps = CellpyData.get_step_numbers(
>>> "charge",
>>> cycle_number = 3
>>> )
>>> print my_charge_steps
{3: [5,8]}
"""
t0 = time.time()
self.logger.debug("Trying to get step-types")
if steps_to_skip is None:
steps_to_skip = []
if steptable is None:
self.logger.debug("steptable=None")
dataset_number = self._validate_dataset_number(dataset_number)
self.logger.debug(f"dt 1: {time.time() - t0}")
if dataset_number is None:
self._report_empty_dataset()
return
if not self.cells[dataset_number].steps_made:
self.logger.debug("steps is not made")
if self.force_step_table_creation or self.force_all:
self.logger.debug("creating step_table for")
self.logger.debug(self.cells[dataset_number].loaded_from)
# print "CREAING STEP-TABLE"
self.make_step_table(dataset_number=dataset_number)
else:
self.logger.info(
"ERROR! Cannot use get_steps: create step_table first"
)
self.logger.info("You could use find_step_numbers method instead")
self.logger.info("(but I don't recommend it)")
return None
# check if steptype is valid
steptype = steptype.lower()
steptypes = []
helper_step_types = ["ocv", "charge_discharge"]
valid_step_type = True
self.logger.debug(f"dt 2: {time.time() - t0}")
if steptype in self.list_of_step_types:
steptypes.append(steptype)
else:
txt = "%s is not a valid core steptype" % steptype
if steptype in helper_step_types:
txt = "but a helper steptype"
if steptype == "ocv":
steptypes.append("ocvrlx_up")
steptypes.append("ocvrlx_down")
elif steptype == "charge_discharge":
steptypes.append("charge")
steptypes.append("discharge")
else:
valid_step_type = False
self.logger.debug(txt)
if not valid_step_type:
return None
# in case of selection allctypes, then modify charge, discharge
if allctypes:
add_these = []
for st in steptypes:
if st in ["charge", "discharge"]:
st1 = st + "_cv"
add_these.append(st1)
st1 = "cv_" + st
add_these.append(st1)
for st in add_these:
steptypes.append(st)
# self.logger.debug("Your steptypes:")
# self.logger.debug(steptypes)
if steptable is None:
st = self.cells[dataset_number].steps
else:
st = steptable
shdr = self.headers_step_table
# retrieving cycle numbers
self.logger.debug(f"dt 3: {time.time() - t0}")
if cycle_number is None:
cycle_numbers = self.get_cycle_numbers(dataset_number, steptable=steptable)
else:
if isinstance(cycle_number, (list, tuple)):
cycle_numbers = cycle_number
else:
cycle_numbers = [cycle_number]
if trim_taper_steps is not None:
trim_taper_steps = -trim_taper_steps
self.logger.debug("taper steps to trim given")
if pdtype:
self.logger.debug("Return pandas dataframe.")
if trim_taper_steps:
self.logger.info(
"Trimming taper steps is currently not"
"possible when returning pd.DataFrame. "
"Do it manually insteaD."
)
out = st[st[shdr.type].isin(steptypes) & st[shdr.cycle].isin(cycle_numbers)]
return out
# if not pdtype, return a dict instead
# self.logger.debug("out as dict; out[cycle] = [s1,s2,...]")
# self.logger.debug("(same behaviour as find_step_numbers)")
# self.logger.debug("return dict of lists")
# self.logger.warning(
# "returning dict will be deprecated",
# )
out = dict()
self.logger.debug(f"return a dict")
self.logger.debug(f"dt 4: {time.time() - t0}")
for cycle in cycle_numbers:
steplist = []
for s in steptypes:
step = st[(st[shdr.type] == s) & (st[shdr.cycle] == cycle)][
shdr.step
].tolist()
for newstep in step[:trim_taper_steps]:
if newstep in steps_to_skip:
self.logger.debug(f"skipping step {newstep}")
else:
steplist.append(int(newstep))
if not steplist:
steplist = [0]
out[cycle] = steplist
self.logger.debug(f"dt tot: {time.time() - t0}")
return out
def load_step_specifications(self, file_name, short=False, dataset_number=None):
""" Load a table that contains step-type definitions.
This function loads a file containing a specification for each step or
for each (cycle_number, step_number) combinations if short==False. The
step_cycle specifications that are allowed are stored in the variable
cellreader.list_of_step_types.
"""
dataset_number = self._validate_dataset_number(dataset_number)
if dataset_number is None:
self._report_empty_dataset()
return
# if short:
# # the table only consists of steps (not cycle,step pairs) assuming
# # that the step numbers uniquely defines step type (this is true
# # for arbin at least).
# raise NotImplementedError
step_specs = pd.read_csv(file_name, sep=prms.Reader.sep)
if "step" not in step_specs.columns:
self.logger.info("Missing column: step")
raise IOError
if "type" not in step_specs.columns:
self.logger.info("Missing column: type")
raise IOError
if not short and "cycle" not in step_specs.columns:
self.logger.info("Missing column: cycle")
raise IOError
self.make_step_table(step_specifications=step_specs, short=short)
def _sort_data(self, dataset):
# TODO: [# index]
if self.headers_normal.data_point_txt in dataset.raw.columns:
dataset.raw = dataset.raw.sort_values(
self.headers_normal.data_point_txt
).reset_index()
return dataset
self.logger.debug("_sort_data: no datapoint header to sort by")
def _ustep(self, n):
un = []
c = 0
n = n.diff()
for i in n:
if i != 0:
c += 1
un.append(c)
self.logger.debug("created u-steps")
return un
def make_step_table(
self,
step_specifications=None,
short=False,
profiling=False,
all_steps=False,
add_c_rate=True,
skip_steps=None,
sort_rows=True,
dataset_number=None,
from_data_point=None,
):
""" Create a table (v.4) that contains summary information for each step.
This function creates a table containing information about the
different steps for each cycle and, based on that, decides what type of
step it is (e.g. charge) for each cycle.
The format of the steps is:
index: cycleno - stepno - sub-step-no - ustep
Time info (average, stdev, max, min, start, end, delta) -
Logging info (average, stdev, max, min, start, end, delta) -
Current info (average, stdev, max, min, start, end, delta) -
Voltage info (average, stdev, max, min, start, end, delta) -
Type (from pre-defined list) - SubType -
Info
Args:
step_specifications (pandas.DataFrame): step specifications
short (bool): step specifications in short format
profiling (bool): turn on profiling
all_steps (bool): investigate all steps including same steps within
one cycle (this is useful for e.g. GITT).
add_c_rate (bool): include a C-rate estimate in the steps
skip_steps (list of integers): list of step numbers that should not
be processed (future feature - not used yet).
sort_rows (bool): sort the rows after processing.
dataset_number: defaults to self.dataset_number
from_data_point (int): first data point to use
Returns:
None
"""
# TODO: @jepe - include option for omitting steps
# TODO: @jepe - make it is possible to update only new data
time_00 = time.time()
dataset_number = self._validate_dataset_number(dataset_number)
if dataset_number is None:
self._report_empty_dataset()
return
if profiling:
print("PROFILING MAKE_STEP_TABLE".center(80, "="))
def first(x):
return x.iloc[0]
def last(x):
return x.iloc[-1]
def delta(x):
if x.iloc[0] == 0.0:
# starts from a zero value
difference = 100.0 * x.iloc[-1]
else:
difference = (x.iloc[-1] - x.iloc[0]) * 100 / abs(x.iloc[0])
return difference
nhdr = self.headers_normal
shdr = self.headers_step_table
if from_data_point is not None:
df = self.cells[dataset_number].raw.loc[
self.cells[dataset_number].raw[nhdr.data_point_txt] >= from_data_point
]
else:
df = self.cells[dataset_number].raw
# df[shdr.internal_resistance_change] = \
# df[nhdr.internal_resistance_txt].pct_change()
# selecting only the most important columns from raw:
keep = [
nhdr.data_point_txt,
nhdr.test_time_txt,
nhdr.step_time_txt,
nhdr.step_index_txt,
nhdr.cycle_index_txt,
nhdr.current_txt,
nhdr.voltage_txt,
nhdr.ref_voltage_txt,
nhdr.charge_capacity_txt,
nhdr.discharge_capacity_txt,
nhdr.internal_resistance_txt,
# "ir_pct_change"
]
# only use col-names that exist:
keep = [col for col in keep if col in df.columns]
df = df[keep]
# preparing for implementation of sub_steps (will come in the future):
df[nhdr.sub_step_index_txt] = 1
# using headers as defined in the internal_settings.py file
rename_dict = {
nhdr.cycle_index_txt: shdr.cycle,
nhdr.step_index_txt: shdr.step,
nhdr.sub_step_index_txt: shdr.sub_step,
nhdr.data_point_txt: shdr.point,
nhdr.test_time_txt: shdr.test_time,
nhdr.step_time_txt: shdr.step_time,
nhdr.current_txt: shdr.current,
nhdr.voltage_txt: shdr.voltage,
nhdr.charge_capacity_txt: shdr.charge,
nhdr.discharge_capacity_txt: shdr.discharge,
nhdr.internal_resistance_txt: shdr.internal_resistance,
}
df = df.rename(columns=rename_dict)
by = [shdr.cycle, shdr.step, shdr.sub_step]
if skip_steps is not None:
self.logger.debug(f"omitting steps {skip_steps}")
df = df.loc[~df[shdr.step].isin(skip_steps)]
if all_steps:
by.append(shdr.ustep)
df[shdr.ustep] = self._ustep(df[shdr.step])
self.logger.debug(f"groupby: {by}")
if profiling:
time_01 = time.time()
gf = df.groupby(by=by)
df_steps = gf.agg(
[np.mean, np.std, np.amin, np.amax, first, last, delta]
).rename(columns={"amin": "min", "amax": "max", "mean": "avr"})
# TODO: [#index]
df_steps = df_steps.reset_index()
if profiling:
print(f"*** groupby-agg: {time.time() - time_01} s")
time_01 = time.time()
# new cols
# column with C-rates:
if add_c_rate:
nom_cap = self.cells[dataset_number].nom_cap
mass = self.cells[dataset_number].mass
spec_conv_factor = self.get_converter_to_specific()
self.logger.debug(f"c-rate: nom_cap={nom_cap} spec_conv={spec_conv_factor}")
df_steps[shdr.rate_avr] = abs(
round(
df_steps.loc[:, (shdr.current, "avr")]
/ (nom_cap / spec_conv_factor),
2,
)
)
df_steps[shdr.type] = np.nan
df_steps[shdr.sub_type] = np.nan
df_steps[shdr.info] = np.nan
if step_specifications is None:
current_limit_value_hard = self.raw_limits["current_hard"]
current_limit_value_soft = self.raw_limits["current_soft"]
stable_current_limit_hard = self.raw_limits["stable_current_hard"]
stable_current_limit_soft = self.raw_limits["stable_current_soft"]
stable_voltage_limit_hard = self.raw_limits["stable_voltage_hard"]
stable_voltage_limit_soft = self.raw_limits["stable_voltage_soft"]
stable_charge_limit_hard = self.raw_limits["stable_charge_hard"]
stable_charge_limit_soft = self.raw_limits["stable_charge_soft"]
ir_change_limit = self.raw_limits["ir_change"]
mask_no_current_hard = (
df_steps.loc[:, (shdr.current, "max")].abs()
+ df_steps.loc[:, (shdr.current, "min")].abs()
) < current_limit_value_hard / 2
mask_voltage_down = (
df_steps.loc[:, (shdr.voltage, "delta")] < -stable_voltage_limit_hard
)
mask_voltage_up = (
df_steps.loc[:, (shdr.voltage, "delta")] > stable_voltage_limit_hard
)
mask_voltage_stable = (
df_steps.loc[:, (shdr.voltage, "delta")].abs()
< stable_voltage_limit_hard
)
mask_current_down = (
df_steps.loc[:, (shdr.current, "delta")] < -stable_current_limit_soft
)
mask_current_up = (
df_steps.loc[:, (shdr.current, "delta")] > stable_current_limit_soft
)
mask_current_negative = (
df_steps.loc[:, (shdr.current, "avr")] < -current_limit_value_hard
)
mask_current_positive = (
df_steps.loc[:, (shdr.current, "avr")] > current_limit_value_hard
)
mask_galvanostatic = (
df_steps.loc[:, (shdr.current, "delta")].abs()
< stable_current_limit_soft
)
mask_charge_changed = (
df_steps.loc[:, (shdr.charge, "delta")].abs() > stable_charge_limit_hard
)
mask_discharge_changed = (
df_steps.loc[:, (shdr.discharge, "delta")].abs()
> stable_charge_limit_hard
)
mask_no_change = (
(df_steps.loc[:, (shdr.voltage, "delta")] == 0)
& (df_steps.loc[:, (shdr.current, "delta")] == 0)
& (df_steps.loc[:, (shdr.charge, "delta")] == 0)
& (df_steps.loc[:, (shdr.discharge, "delta")] == 0)
)
# TODO: make an option for only checking unique steps
# e.g.
# df_x = df_steps.where.steps.are.unique
self.logger.debug("masking and labelling steps")
df_steps.loc[mask_no_current_hard & mask_voltage_stable, shdr.type] = "rest"
df_steps.loc[
mask_no_current_hard & mask_voltage_up, shdr.type
] = "ocvrlx_up"
df_steps.loc[
mask_no_current_hard & mask_voltage_down, shdr.type
] = "ocvrlx_down"
df_steps.loc[
mask_discharge_changed & mask_current_negative, shdr.type
] = "discharge"
df_steps.loc[
mask_charge_changed & mask_current_positive, shdr.type
] = "charge"
df_steps.loc[
mask_voltage_stable & mask_current_negative & mask_current_down,
shdr.type,
] = "cv_discharge"
df_steps.loc[
mask_voltage_stable & mask_current_positive & mask_current_down,
shdr.type,
] = "cv_charge"
# --- internal resistance ----
df_steps.loc[mask_no_change, shdr.type] = "ir"
# assumes that IR is stored in just one row
# --- sub-step-txt -----------
df_steps[shdr.sub_type] = None
# --- CV steps ----
# "voltametry_charge"
# mask_charge_changed
# mask_voltage_up
# (could also include abs-delta-cumsum current)
# "voltametry_discharge"
# mask_discharge_changed
# mask_voltage_down
if profiling:
print(f"*** masking: {time.time() - time_01} s")
time_01 = time.time()
else:
self.logger.debug("parsing custom step definition")
if not short:
self.logger.debug("using long format (cycle,step)")
for row in step_specifications.itertuples():
df_steps.loc[
(df_steps[shdr.step] == row.step)
& (df_steps[shdr.cycle] == row.cycle),
"type",
] = row.type
df_steps.loc[
(df_steps[shdr.step] == row.step)
& (df_steps[shdr.cycle] == row.cycle),
"info",
] = row.info
else:
self.logger.debug("using short format (step)")
for row in step_specifications.itertuples():
df_steps.loc[df_steps[shdr.step] == row.step, "type"] = row.type
df_steps.loc[df_steps[shdr.step] == row.step, "info"] = row.info
if profiling:
print(f"*** introspect: {time.time() - time_01} s")
# check if all the steps got categorizes
self.logger.debug("looking for un-categorized steps")
empty_rows = df_steps.loc[df_steps[shdr.type].isnull()]
if not empty_rows.empty:
logging.warning(
f"found {len(empty_rows)}"
f":{len(df_steps)} non-categorized steps "
f"(please, check your raw-limits)"
)
# logging.debug(empty_rows)
# flatten (possible remove in the future),
# (maybe we will implement mulitindexed tables)
self.logger.debug(f"flatten columns")
if profiling:
time_01 = time.time()
flat_cols = []
for col in df_steps.columns:
if isinstance(col, tuple):
if col[-1]:
col = "_".join(col)
else:
col = col[0]
flat_cols.append(col)
df_steps.columns = flat_cols
if sort_rows:
self.logger.debug("sorting the step rows")
# TODO: [#index]
df_steps = df_steps.sort_values(by=shdr.test_time + "_first").reset_index()
if profiling:
print(f"*** flattening: {time.time() - time_01} s")
self.logger.debug(f"(dt: {(time.time() - time_00):4.2f}s)")
if from_data_point is not None:
return df_steps
else:
self.cells[dataset_number].steps = df_steps
return self
def select_steps(self, step_dict, append_df=False, dataset_number=None):
"""Select steps (not documented yet)."""
raise DeprecatedFeature
def _select_step(self, cycle, step, dataset_number=None):
# TODO: @jepe - insert sub_step here
dataset_number = self._validate_dataset_number(dataset_number)
if dataset_number is None:
self._report_empty_dataset()
return
test = self.cells[dataset_number]
# check if columns exist
c_txt = self.headers_normal.cycle_index_txt
s_txt = self.headers_normal.step_index_txt
y_txt = self.headers_normal.voltage_txt
x_txt = self.headers_normal.discharge_capacity_txt # jepe fix
# no_cycles=np.amax(test.raw[c_txt])
# print d.columns
if not any(test.raw.columns == c_txt):
self.logger.info("ERROR - cannot find %s" % c_txt)
sys.exit(-1)
if not any(test.raw.columns == s_txt):
self.logger.info("ERROR - cannot find %s" % s_txt)
sys.exit(-1)
# self.logger.debug(f"selecting cycle {cycle} step {step}")
v = test.raw[(test.raw[c_txt] == cycle) & (test.raw[s_txt] == step)]
if self.is_empty(v):
self.logger.debug("empty dataframe")
return None
else:
return v
def populate_step_dict(self, step, dataset_number=None):
"""Returns a dict with cycle numbers as keys
and corresponding steps (list) as values."""
raise DeprecatedFeature
def _export_cycles(
self,
dataset_number,
setname=None,
sep=None,
outname=None,
shifted=False,
method=None,
shift=0.0,
last_cycle=None,
):
# export voltage - capacity curves to .csv file
self.logger.debug("START exporing cycles")
time_00 = time.time()
lastname = "_cycles.csv"
if sep is None:
sep = self.sep
if outname is None:
outname = setname + lastname
self.logger.debug(f"outname: {outname}")
list_of_cycles = self.get_cycle_numbers(dataset_number=dataset_number)
self.logger.debug(f"you have {len(list_of_cycles)} cycles")
if last_cycle is not None:
list_of_cycles = [c for c in list_of_cycles if c <= int(last_cycle)]
self.logger.debug(f"only processing up to cycle {last_cycle}")
self.logger.debug(f"you have {len(list_of_cycles)}" f"cycles to process")
out_data = []
c = None
if not method:
method = "back-and-forth"
if shifted:
method = "back-and-forth"
shift = 0.0
_last = 0.0
self.logger.debug(f"number of cycles: {len(list_of_cycles)}")
for cycle in list_of_cycles:
try:
if shifted and c is not None:
shift = _last
# print(f"shifted = {shift}, first={_first}")
df = self.get_cap(
cycle, dataset_number=dataset_number, method=method, shift=shift
)
if df.empty:
self.logger.debug("NoneType from get_cap")
else:
c = df["capacity"]
v = df["voltage"]
_last = c.iat[-1]
_first = c.iat[0]
c = c.tolist()
v = v.tolist()
header_x = "cap cycle_no %i" % cycle
header_y = "voltage cycle_no %i" % cycle
c.insert(0, header_x)
v.insert(0, header_y)
out_data.append(c)
out_data.append(v)
# txt = "extracted cycle %i" % cycle
# self.logger.debug(txt)
except IndexError as e:
txt = "Could not extract cycle %i" % cycle
self.logger.info(txt)
self.logger.debug(e)
# Saving cycles in one .csv file (x,y,x,y,x,y...)
# print "saving the file with delimiter '%s' " % (sep)
self.logger.debug("writing cycles to file")
with open(outname, "w", newline="") as f:
writer = csv.writer(f, delimiter=sep)
writer.writerows(itertools.zip_longest(*out_data))
# star (or asterix) means transpose (writing cols instead of rows)
self.logger.info(f"The file {outname} was created")
self.logger.debug(f"(dt: {(time.time() - time_00):4.2f}s)")
self.logger.debug("END exporting cycles")
# TODO: remove this
def _export_cycles_old(
self,
dataset_number,
setname=None,
sep=None,
outname=None,
shifted=False,
method=None,
shift=0.0,
last_cycle=None,
):
# export voltage - capacity curves to .csv file
self.logger.debug("*** OLD EXPORT-CYCLES METHOD***")
lastname = "_cycles.csv"
if sep is None:
sep = self.sep
if outname is None:
outname = setname + lastname
list_of_cycles = self.get_cycle_numbers(dataset_number=dataset_number)
self.logger.debug(f"you have {len(list_of_cycles)} cycles")
if last_cycle is not None:
list_of_cycles = [c for c in list_of_cycles if c <= int(last_cycle)]
self.logger.debug(f"only processing up to cycle {last_cycle}")
self.logger.debug(f"you have {len(list_of_cycles)}" f"cycles to process")
out_data = []
c = None
if not method:
method = "back-and-forth"
if shifted:
method = "back-and-forth"
shift = 0.0
_last = 0.0
for cycle in list_of_cycles:
try:
if shifted and c is not None:
shift = _last
# print(f"shifted = {shift}, first={_first}")
c, v = self.get_cap(
cycle, dataset_number=dataset_number, method=method, shift=shift
)
if c is None:
self.logger.debug("NoneType from get_cap")
else:
_last = c.iat[-1]
_first = c.iat[0]
c = c.tolist()
v = v.tolist()
header_x = "cap cycle_no %i" % cycle
header_y = "voltage cycle_no %i" % cycle
c.insert(0, header_x)
v.insert(0, header_y)
out_data.append(c)
out_data.append(v)
# txt = "extracted cycle %i" % cycle
# self.logger.debug(txt)
except IndexError as e:
txt = "Could not extract cycle %i" % cycle
self.logger.info(txt)
self.logger.debug(e)
# Saving cycles in one .csv file (x,y,x,y,x,y...)
# print "saving the file with delimiter '%s' " % (sep)
self.logger.debug("writing cycles to file")
with open(outname, "w", newline="") as f:
writer = csv.writer(f, delimiter=sep)
writer.writerows(itertools.zip_longest(*out_data))
# star (or asterix) means transpose (writing cols instead of rows)
self.logger.info(f"The file {outname} was created")
def _export_normal(self, data, setname=None, sep=None, outname=None):
time_00 = time.time()
lastname = "_normal.csv"
if sep is None:
sep = self.sep
if outname is None:
outname = setname + lastname
txt = outname
try:
data.raw.to_csv(outname, sep=sep)
txt += " OK"
except Exception as e:
txt += " Could not save it!"
self.logger.debug(e)
warnings.warn(f"Unhandled exception raised: {e}")
self.logger.info(txt)
self.logger.debug(f"(dt: {(time.time() - time_00):4.2f}s)")
def _export_stats(self, data, setname=None, sep=None, outname=None):
time_00 = time.time()
lastname = "_stats.csv"
if sep is None:
sep = self.sep
if outname is None:
outname = setname + lastname
txt = outname
try:
data.summary.to_csv(outname, sep=sep)
txt += " OK"
except Exception as e:
txt += " Could not save it!"
self.logger.debug(e)
warnings.warn(f"Unhandled exception raised: {e}")
self.logger.info(txt)
self.logger.debug(f"(dt: {(time.time() - time_00):4.2f}s)")
def _export_steptable(self, data, setname=None, sep=None, outname=None):
time_00 = time.time()
lastname = "_steps.csv"
if sep is None:
sep = self.sep
if outname is None:
outname = setname + lastname
txt = outname
try:
data.steps.to_csv(outname, sep=sep)
txt += " OK"
except Exception as e:
txt += " Could not save it!"
self.logger.debug(e)
warnings.warn(f"Unhandled exception raised: {e}")
self.logger.info(txt)
self.logger.debug(f"(dt: {(time.time() - time_00):4.2f}s)")
def to_csv(
self,
datadir=None,
sep=None,
cycles=False,
raw=True,
summary=True,
shifted=False,
method=None,
shift=0.0,
last_cycle=None,
):
"""Saves the data as .csv file(s).
Args:
datadir: folder where to save the data (uses current folder if not
given).
sep: the separator to use in the csv file
(defaults to CellpyData.sep).
cycles: (bool) export voltage-capacity curves if True.
raw: (bool) export raw-data if True.
summary: (bool) export summary if True.
shifted (bool): export with cumulated shift.
method (string): how the curves are given
"back-and-forth" - standard back and forth; discharge
(or charge) reversed from where charge (or
discharge) ends.
"forth" - discharge (or charge) continues along x-axis.
"forth-and-forth" - discharge (or charge) also starts at 0 (or
shift if not shift=0.0)
shift: start-value for charge (or discharge)
last_cycle: process only up to this cycle (if not None).
Returns: Nothing
"""
if sep is None:
sep = self.sep
self.logger.debug("saving to csv")
dataset_number = -1
for data in self.cells:
dataset_number += 1
if not self._is_not_empty_dataset(data):
self.logger.info("to_csv -")
self.logger.info("empty test [%i]" % dataset_number)
self.logger.info("not saved!")
else:
if isinstance(data.loaded_from, (list, tuple)):
txt = "merged file"
txt += "using first file as basename"
self.logger.debug(txt)
no_merged_sets = len(data.loaded_from)
no_merged_sets = "_merged_" + str(no_merged_sets).zfill(3)
filename = data.loaded_from[0]
else:
filename = data.loaded_from
no_merged_sets = ""
firstname, extension = os.path.splitext(filename)
firstname += no_merged_sets
if datadir:
firstname = os.path.join(datadir, os.path.basename(firstname))
if raw:
outname_normal = firstname + "_normal.csv"
self._export_normal(data, outname=outname_normal, sep=sep)
if data.steps_made is True:
outname_steps = firstname + "_steps.csv"
self._export_steptable(data, outname=outname_steps, sep=sep)
else:
self.logger.debug("steps_made is not True")
if summary:
outname_stats = firstname + "_stats.csv"
self._export_stats(data, outname=outname_stats, sep=sep)
if cycles:
outname_cycles = firstname + "_cycles.csv"
self._export_cycles(
outname=outname_cycles,
dataset_number=dataset_number,
sep=sep,
shifted=shifted,
method=method,
shift=shift,
last_cycle=last_cycle,
)
def save(
self,
filename,
dataset_number=None,
force=False,
overwrite=True,
extension="h5",
ensure_step_table=None,
):
"""Save the data structure to cellpy-format.
Args:
filename: (str or pathlib.Path) the name you want to give the file
dataset_number: (int) if you have several datasets, chose the one
you want (probably leave this untouched)
force: (bool) save a file even if the summary is not made yet
(not recommended)
overwrite: (bool) save the new version of the file even if old one
exists.
extension: (str) filename extension.
ensure_step_table: (bool) make step-table if missing.
Returns: Nothing at all.
"""
self.logger.debug(f"Trying to save cellpy-file to {filename}")
self.logger.info(f" -> {filename}")
if ensure_step_table is None:
ensure_step_table = self.ensure_step_table
dataset_number = self._validate_dataset_number(dataset_number)
if dataset_number is None:
self.logger.info("Saving test failed!")
self._report_empty_dataset()
return
test = self.get_cell(dataset_number)
summary_made = test.summary_made
if not summary_made and not force:
self.logger.info(
"You should not save datasets without making a summary first!"
)
self.logger.info("If you really want to do it, use save with force=True")
return
step_table_made = test.steps_made
if not step_table_made and not force and not ensure_step_table:
self.logger.info(
"You should not save datasets without making a step-table first!"
)
self.logger.info("If you really want to do it, use save with force=True")
return
if not os.path.splitext(filename)[-1]:
outfile_all = filename + "." + extension
else:
outfile_all = filename
if os.path.isfile(outfile_all):
self.logger.debug("Outfile exists")
if overwrite:
self.logger.debug("overwrite = True")
try:
os.remove(outfile_all)
except PermissionError as e:
self.logger.info("Could not over write old file")
self.logger.info(e)
return
else:
self.logger.info("Save (hdf5): file exist - did not save", end=" ")
self.logger.info(outfile_all)
return
if ensure_step_table:
self.logger.debug("ensure_step_table is on")
if not test.steps_made:
self.logger.debug("save: creating step table")
self.make_step_table(dataset_number=dataset_number)
# This method can probably be updated using pandas transpose trick
self.logger.debug("trying to make infotable")
infotbl, fidtbl = self._create_infotable(dataset_number=dataset_number)
root = prms._cellpyfile_root
if CELLPY_FILE_VERSION > 4:
raw_dir = prms._cellpyfile_raw
step_dir = prms._cellpyfile_step
summary_dir = prms._cellpyfile_summary
meta_dir = "/info"
fid_dir = prms._cellpyfile_fid
else:
raw_dir = "/raw"
step_dir = "/step_table"
summary_dir = "/dfsummary"
meta_dir = "/info"
fid_dir = "/fidtable"
self.logger.debug("trying to save to hdf5")
txt = "\nHDF5 file: %s" % outfile_all
self.logger.debug(txt)
warnings.simplefilter("ignore", PerformanceWarning)
try:
store = pd.HDFStore(
outfile_all,
complib=prms._cellpyfile_complib,
complevel=prms._cellpyfile_complevel,
)
self.logger.debug("trying to put raw data")
self.logger.debug(" - lets set Data_Point as index")
hdr_data_point = self.headers_normal.data_point_txt
if test.raw.index.name != hdr_data_point:
test.raw = test.raw.set_index(hdr_data_point, drop=False)
store.put(root + raw_dir, test.raw, format=prms._cellpyfile_raw_format)
self.logger.debug(" raw -> hdf5 OK")
self.logger.debug("trying to put summary")
store.put(
root + summary_dir, test.summary, format=prms._cellpyfile_summary_format
)
self.logger.debug(" summary -> hdf5 OK")
self.logger.debug("trying to put meta data")
store.put(
root + meta_dir, infotbl, format=prms._cellpyfile_infotable_format
)
self.logger.debug(" meta -> hdf5 OK")
self.logger.debug("trying to put fidtable")
store.put(root + fid_dir, fidtbl, format=prms._cellpyfile_fidtable_format)
self.logger.debug(" fid -> hdf5 OK")
self.logger.debug("trying to put step")
try:
store.put(
root + step_dir, test.steps, format=prms._cellpyfile_stepdata_format
)
self.logger.debug(" step -> hdf5 OK")
except TypeError:
test = self._fix_dtype_step_table(test)
store.put(
root + step_dir, test.steps, format=prms._cellpyfile_stepdata_format
)
self.logger.debug(" fixed step -> hdf5 OK")
# creating indexes
# hdr_data_point = self.headers_normal.data_point_txt
# hdr_cycle_steptable = self.headers_step_table.cycle
# hdr_cycle_normal = self.headers_normal.cycle_index_txt
# store.create_table_index(root + "/raw", columns=[hdr_data_point],
# optlevel=9, kind='full')
finally:
store.close()
self.logger.debug(" all -> hdf5 OK")
warnings.simplefilter("default", PerformanceWarning)
# del store
# --------------helper-functions--------------------------------------------
def _fix_dtype_step_table(self, dataset):
hst = get_headers_step_table()
try:
cols = dataset.steps.columns
except AttributeError:
self.logger.info("Could not extract columns from steps")
return
for col in cols:
if col not in [hst.cycle, hst.sub_step, hst.info]:
dataset.steps[col] = dataset.steps[col].apply(pd.to_numeric)
else:
dataset.steps[col] = dataset.steps[col].astype("str")
return dataset
# TODO: check if this is useful and if it is rename, if not delete
def _cap_mod_summary(self, summary, capacity_modifier="reset"):
# modifies the summary table
time_00 = time.time()
discharge_title = self.headers_normal.discharge_capacity_txt
charge_title = self.headers_normal.charge_capacity_txt
chargecap = 0.0
dischargecap = 0.0
# TODO: @jepe - use pd.loc[row,column]
if capacity_modifier == "reset":
for index, row in summary.iterrows():
dischargecap_2 = row[discharge_title]
summary.loc[index, discharge_title] = dischargecap_2 - dischargecap
dischargecap = dischargecap_2
chargecap_2 = row[charge_title]
summary.loc[index, charge_title] = chargecap_2 - chargecap
chargecap = chargecap_2
else:
raise NotImplementedError
self.logger.debug(f"(dt: {(time.time() - time_00):4.2f}s)")
return summary
# TODO: check if this is useful and if it is rename, if not delete
def _cap_mod_normal(
self, dataset_number=None, capacity_modifier="reset", allctypes=True
):
# modifies the normal table
time_00 = time.time()
self.logger.debug("Not properly checked yet! Use with caution!")
dataset_number = self._validate_dataset_number(dataset_number)
if dataset_number is None:
self._report_empty_dataset()
return
cycle_index_header = self.headers_normal.cycle_index_txt
step_index_header = self.headers_normal.step_index_txt
discharge_index_header = self.headers_normal.discharge_capacity_txt
discharge_energy_index_header = self.headers_normal.discharge_energy_txt
charge_index_header = self.headers_normal.charge_capacity_txt
charge_energy_index_header = self.headers_normal.charge_energy_txt
raw = self.cells[dataset_number].raw
chargecap = 0.0
dischargecap = 0.0
if capacity_modifier == "reset":
# discharge cycles
no_cycles = np.amax(raw[cycle_index_header])
for j in range(1, no_cycles + 1):
cap_type = "discharge"
e_header = discharge_energy_index_header
cap_header = discharge_index_header
discharge_cycles = self.get_step_numbers(
steptype=cap_type,
allctypes=allctypes,
cycle_number=j,
dataset_number=dataset_number,
)
steps = discharge_cycles[j]
txt = "Cycle %i (discharge): " % j
self.logger.debug(txt)
# TODO: @jepe - use pd.loc[row,column] e.g. pd.loc[:,"charge_cap"]
# for col or pd.loc[(pd.["step"]==1),"x"]
selection = (raw[cycle_index_header] == j) & (
raw[step_index_header].isin(steps)
)
c0 = raw[selection].iloc[0][cap_header]
e0 = raw[selection].iloc[0][e_header]
raw.loc[selection, cap_header] = raw.loc[selection, cap_header] - c0
raw.loc[selection, e_header] = raw.loc[selection, e_header] - e0
cap_type = "charge"
e_header = charge_energy_index_header
cap_header = charge_index_header
charge_cycles = self.get_step_numbers(
steptype=cap_type,
allctypes=allctypes,
cycle_number=j,
dataset_number=dataset_number,
)
steps = charge_cycles[j]
txt = "Cycle %i (charge): " % j
self.logger.debug(txt)
selection = (raw[cycle_index_header] == j) & (
raw[step_index_header].isin(steps)
)
if any(selection):
c0 = raw[selection].iloc[0][cap_header]
e0 = raw[selection].iloc[0][e_header]
raw.loc[selection, cap_header] = raw.loc[selection, cap_header] - c0
raw.loc[selection, e_header] = raw.loc[selection, e_header] - e0
self.logger.debug(f"(dt: {(time.time() - time_00):4.2f}s)")
def get_number_of_tests(self):
return self.number_of_datasets
def get_mass(self, set_number=None):
set_number = self._validate_dataset_number(set_number)
if set_number is None:
self._report_empty_dataset()
return
if not self.cells[set_number].mass_given:
self.logger.info("No mass")
return self.cells[set_number].mass
def get_cell(self, n=0):
# TODO: remove me
return self.cells[n]
def sget_voltage(self, cycle, step, set_number=None):
"""Returns voltage for cycle, step.
Convenience function; same as issuing
raw[(raw[cycle_index_header] == cycle) &
(raw[step_index_header] == step)][voltage_header]
Args:
cycle: cycle number
step: step number
set_number: the dataset number (automatic selection if None)
Returns:
pandas.Series or None if empty
"""
time_00 = time.time()
set_number = self._validate_dataset_number(set_number)
if set_number is None:
self._report_empty_dataset()
return
cycle_index_header = self.headers_normal.cycle_index_txt
voltage_header = self.headers_normal.voltage_txt
step_index_header = self.headers_normal.step_index_txt
test = self.cells[set_number].raw
if isinstance(step, (list, tuple)):
warnings.warn(
f"The varialbe step is a list." f"Should be an integer." f"{step}"
)
step = step[0]
c = test[
(test[cycle_index_header] == cycle) & (test[step_index_header] == step)
]
self.logger.debug(f"(dt: {(time.time() - time_00):4.2f}s)")
if not self.is_empty(c):
v = c[voltage_header]
return v
else:
return None
# TODO: make this
def sget_current(self, cycle, step, set_number=None):
raise NotImplementedError
def get_voltage(self, cycle=None, dataset_number=None, full=True):
"""Returns voltage (in V).
Args:
cycle: cycle number (all cycles if None)
dataset_number: first dataset if None
full: valid only for cycle=None (i.e. all cycles), returns the full
pandas.Series if True, else a list of pandas.Series
Returns:
pandas.Series (or list of pandas.Series if cycle=None og full=False)
"""
dataset_number = self._validate_dataset_number(dataset_number)
if dataset_number is None:
self._report_empty_dataset()
return
cycle_index_header = self.headers_normal.cycle_index_txt
voltage_header = self.headers_normal.voltage_txt
# step_index_header = self.headers_normal.step_index_txt
test = self.cells[dataset_number].raw
if cycle:
self.logger.debug("getting voltage curve for cycle")
c = test[(test[cycle_index_header] == cycle)]
if not self.is_empty(c):
v = c[voltage_header]
return v
else:
if not full:
self.logger.debug("getting list of voltage-curves for all cycles")
v = []
no_cycles = np.amax(test[cycle_index_header])
for j in range(1, no_cycles + 1):
txt = "Cycle %i: " % j
self.logger.debug(txt)
c = test[(test[cycle_index_header] == j)]
v.append(c[voltage_header])
else:
self.logger.debug("getting frame of all voltage-curves")
v = test[voltage_header]
return v
def get_current(self, cycle=None, dataset_number=None, full=True):
"""Returns current (in mA).
Args:
cycle: cycle number (all cycles if None)
dataset_number: first dataset if None
full: valid only for cycle=None (i.e. all cycles), returns the full
pandas.Series if True, else a list of pandas.Series
Returns:
pandas.Series (or list of pandas.Series if cycle=None og full=False)
"""
dataset_number = self._validate_dataset_number(dataset_number)
if dataset_number is None:
self._report_empty_dataset()
return
cycle_index_header = self.headers_normal.cycle_index_txt
current_header = self.headers_normal.current_txt
# step_index_header = self.headers_normal.step_index_txt
test = self.cells[dataset_number].raw
if cycle:
self.logger.debug(f"getting current for cycle {cycle}")
c = test[(test[cycle_index_header] == cycle)]
if not self.is_empty(c):
v = c[current_header]
return v
else:
if not full:
self.logger.debug("getting a list of current-curves for all cycles")
v = []
no_cycles = np.amax(test[cycle_index_header])
for j in range(1, no_cycles + 1):
txt = "Cycle %i: " % j
self.logger.debug(txt)
c = test[(test[cycle_index_header] == j)]
v.append(c[current_header])
else:
self.logger.debug("getting all current-curves ")
v = test[current_header]
return v
def sget_steptime(self, cycle, step, dataset_number=None):
"""Returns step time for cycle, step.
Convenience function; same as issuing
raw[(raw[cycle_index_header] == cycle) &
(raw[step_index_header] == step)][step_time_header]
Args:
cycle: cycle number
step: step number
dataset_number: the dataset number (automatic selection if None)
Returns:
pandas.Series or None if empty
"""
dataset_number = self._validate_dataset_number(dataset_number)
if dataset_number is None:
self._report_empty_dataset()
return
cycle_index_header = self.headers_normal.cycle_index_txt
step_time_header = self.headers_normal.step_time_txt
step_index_header = self.headers_normal.step_index_txt
test = self.cells[dataset_number].raw
if isinstance(step, (list, tuple)):
warnings.warn(f"The variable step is a list. Should be an integer. {step}")
step = step[0]
c = test.loc[
(test[cycle_index_header] == cycle) & (test[step_index_header] == step), :
]
if not self.is_empty(c):
t = c[step_time_header]
return t
else:
return None
def sget_timestamp(self, cycle, step, dataset_number=None):
"""Returns timestamp for cycle, step.
Convenience function; same as issuing
raw[(raw[cycle_index_header] == cycle) &
(raw[step_index_header] == step)][timestamp_header]
Args:
cycle: cycle number
step: step number
dataset_number: the dataset number (automatic selection if None)
Returns:
pandas.Series
"""
dataset_number = self._validate_dataset_number(dataset_number)
if dataset_number is None:
self._report_empty_dataset()
return
cycle_index_header = self.headers_normal.cycle_index_txt
timestamp_header = self.headers_normal.test_time_txt
step_index_header = self.headers_normal.step_index_txt
test = self.cells[dataset_number].raw
if isinstance(step, (list, tuple)):
warnings.warn(
f"The varialbe step is a list." f"Should be an integer." f"{step}"
)
step = step[0]
c = test[
(test[cycle_index_header] == cycle) & (test[step_index_header] == step)
]
if not self.is_empty(c):
t = c[timestamp_header]
return t
else:
return pd.Series()
def get_datetime(self, cycle=None, dataset_number=None, full=True):
dataset_number = self._validate_dataset_number(dataset_number)
if dataset_number is None:
self._report_empty_dataset()
return
cycle_index_header = self.headers_normal.cycle_index_txt
datetime_header = self.headers_normal.datetime_txt
v = pd.Series()
test = self.cells[dataset_number].raw
if cycle:
c = test[(test[cycle_index_header] == cycle)]
if not self.is_empty(c):
v = c[datetime_header]
else:
if not full:
self.logger.debug("getting datetime for all cycles")
v = []
cycles = self.get_cycle_numbers()
for j in cycles:
txt = "Cycle %i: " % j
self.logger.debug(txt)
c = test[(test[cycle_index_header] == j)]
v.append(c[datetime_header])
else:
self.logger.debug("returning full datetime col")
v = test[datetime_header]
return v
def get_timestamp(
self, cycle=None, dataset_number=None, in_minutes=False, full=True
):
"""Returns timestamps (in sec or minutes (if in_minutes==True)).
Args:
cycle: cycle number (all if None)
dataset_number: first dataset if None
in_minutes: return values in minutes instead of seconds if True
full: valid only for cycle=None (i.e. all cycles), returns the full
pandas.Series if True, else a list of pandas.Series
Returns:
pandas.Series (or list of pandas.Series if cycle=None og full=False)
"""
dataset_number = self._validate_dataset_number(dataset_number)
if dataset_number is None:
self._report_empty_dataset()
return
cycle_index_header = self.headers_normal.cycle_index_txt
timestamp_header = self.headers_normal.test_time_txt
v = pd.Series()
test = self.cells[dataset_number].raw
if cycle:
c = test[(test[cycle_index_header] == cycle)]
if not self.is_empty(c):
v = c[timestamp_header]
else:
if not full:
self.logger.debug("getting timestapm for all cycles")
v = []
cycles = self.get_cycle_numbers()
for j in cycles:
txt = "Cycle %i: " % j
self.logger.debug(txt)
c = test[(test[cycle_index_header] == j)]
v.append(c[timestamp_header])
else:
self.logger.debug("returning full timestamp col")
v = test[timestamp_header]
if in_minutes and v is not None:
v /= 60.0
if in_minutes and v is not None:
v /= 60.0
return v
def get_dcap(self, cycle=None, dataset_number=None, **kwargs):
"""Returns discharge_capacity (in mAh/g), and voltage."""
# TODO - jepe: should return a DataFrame as default
# but remark that we then have to update e.g. batch_helpers.py
# TODO - jepe: change needed: should not use
# dataset_number as parameter
dataset_number = self._validate_dataset_number(dataset_number)
if dataset_number is None:
self._report_empty_dataset()
return
dc, v = self._get_cap(cycle, dataset_number, "discharge", **kwargs)
return dc, v
def get_ccap(self, cycle=None, dataset_number=None, **kwargs):
"""Returns charge_capacity (in mAh/g), and voltage."""
# TODO - jepe: should return a DataFrame as default
# but remark that we then have to update e.g. batch_helpers.py
# TODO - jepe: change needed: should not use
# dataset_number as parameter
dataset_number = self._validate_dataset_number(dataset_number)
if dataset_number is None:
self._report_empty_dataset()
return
cc, v = self._get_cap(cycle, dataset_number, "charge", **kwargs)
return cc, v
def get_cap(
self,
cycle=None,
dataset_number=None,
method="back-and-forth",
shift=0.0,
categorical_column=False,
label_cycle_number=False,
split=False,
interpolated=False,
dx=0.1,
number_of_points=None,
ignore_errors=True,
dynamic=False,
inter_cycle_shift=True,
**kwargs,
):
"""Gets the capacity for the run.
Args:
cycle (int): cycle number.
method (string): how the curves are given
"back-and-forth" - standard back and forth; discharge
(or charge) reversed from where charge (or discharge) ends.
"forth" - discharge (or charge) continues along x-axis.
"forth-and-forth" - discharge (or charge) also starts at 0
(or shift if not shift=0.0)
shift: start-value for charge (or discharge) (typically used when
plotting shifted-capacity).
categorical_column: add a categorical column showing if it is
charge or discharge.
dataset_number (int): test number (default first)
(usually not used).
label_cycle_number (bool): add column for cycle number
(tidy format).
split (bool): return a list of c and v instead of the default
that is to return them combined in a DataFrame. This is only
possible for some specific combinations of options (neither
categorical_column=True or label_cycle_number=True are
allowed).
interpolated (bool): set to True if you would like to get
interpolated data (typically if you want to save disk space
or memory). Defaults to False.
dx (float): the step used when interpolating.
number_of_points (int): number of points to use (over-rides dx)
for interpolation (i.e. the length of the interpolated data).
ignore_errors (bool): don't break out of loop if an error occurs.
dynamic: for dynamic retrieving data from cellpy-file.
[NOT IMPLEMENTED YET]
inter_cycle_shift (bool): cumulative shifts between consecutive
cycles. Defaults to True.
Returns:
pandas.DataFrame ((cycle) voltage, capacity, (direction (-1, 1)))
unless split is explicitly set to True. Then it returns a tuple
with capacity (mAh/g) and voltage.
"""
# TODO: add option for adding a nan between charge and discharge
dataset_number = self._validate_dataset_number(dataset_number)
if dataset_number is None:
self._report_empty_dataset()
return
# if cycle is not given, then this function should
# iterate through cycles
if cycle is None:
cycle = self.get_cycle_numbers()
if not isinstance(cycle, (collections.Iterable,)):
cycle = [cycle]
if split and not (categorical_column or label_cycle_number):
return_dataframe = False
else:
return_dataframe = True
method = method.lower()
if method not in ["back-and-forth", "forth", "forth-and-forth"]:
warnings.warn(
f"method '{method}' is not a valid option "
f"- setting to 'back-and-forth'"
)
method = "back-and-forth"
capacity = None
voltage = None
cycle_df = pd.DataFrame()
initial = True
for current_cycle in cycle:
error = False
# self.logger.debug(f"processing cycle {current_cycle}")
try:
cc, cv = self.get_ccap(current_cycle, dataset_number, **kwargs)
dc, dv = self.get_dcap(current_cycle, dataset_number, **kwargs)
except NullData as e:
error = True
self.logger.debug(e)
if not ignore_errors:
self.logger.debug("breaking out of loop")
break
if not error:
if cc.empty:
self.logger.debug("get_ccap returns empty cc Series")
if dc.empty:
self.logger.debug("get_ccap returns empty dc Series")
if initial:
# self.logger.debug("(initial cycle)")
prev_end = shift
initial = False
if self._cycle_mode == "anode":
_first_step_c = dc
_first_step_v = dv
_last_step_c = cc
_last_step_v = cv
else:
_first_step_c = cc
_first_step_v = cv
_last_step_c = dc
_last_step_v = dv
if method == "back-and-forth":
_last = np.amax(_first_step_c)
# should change amax to last point
_first = None
_new_first = None
if not inter_cycle_shift:
prev_end = 0.0
if _last_step_c is not None:
_last_step_c = _last - _last_step_c + prev_end
else:
self.logger.debug("no last charge step found")
if _first_step_c is not None:
_first = _first_step_c.iat[0]
_first_step_c += prev_end
_new_first = _first_step_c.iat[0]
else:
self.logger.debug("probably empty (_first_step_c is None)")
# self.logger.debug(f"current shifts used: prev_end = {prev_end}")
# self.logger.debug(f"shifting start from {_first} to "
# f"{_new_first}")
prev_end = np.amin(_last_step_c)
# should change amin to last point
elif method == "forth":
_last = np.amax(_first_step_c)
# should change amax to last point
if _last_step_c is not None:
_last_step_c += _last + prev_end
else:
self.logger.debug("no last charge step found")
if _first_step_c is not None:
_first_step_c += prev_end
else:
self.logger.debug("no first charge step found")
prev_end = np.amax(_last_step_c)
# should change amin to last point
elif method == "forth-and-forth":
if _last_step_c is not None:
_last_step_c += shift
else:
self.logger.debug("no last charge step found")
if _first_step_c is not None:
_first_step_c += shift
else:
self.logger.debug("no first charge step found")
if return_dataframe:
try:
_first_df = pd.DataFrame(
{
"voltage": _first_step_v.values,
"capacity": _first_step_c.values,
}
)
if interpolated:
_first_df = interpolate_y_on_x(
_first_df,
y="capacity",
x="voltage",
dx=dx,
number_of_points=number_of_points,
direction=-1,
)
if categorical_column:
_first_df["direction"] = -1
_last_df = pd.DataFrame(
{
"voltage": _last_step_v.values,
"capacity": _last_step_c.values,
}
)
if interpolated:
_last_df = interpolate_y_on_x(
_last_df,
y="capacity",
x="voltage",
dx=dx,
number_of_points=number_of_points,
direction=1,
)
if categorical_column:
_last_df["direction"] = 1
except AttributeError:
self.logger.info(f"Could not extract cycle {current_cycle}")
else:
c = pd.concat([_first_df, _last_df], axis=0)
if label_cycle_number:
c.insert(0, "cycle", current_cycle)
# c["cycle"] = current_cycle
# c = c[["cycle", "voltage", "capacity", "direction"]]
if cycle_df.empty:
cycle_df = c
else:
cycle_df = | pd.concat([cycle_df, c], axis=0) | pandas.concat |
import numpy as np
import pandas as pd
import pickle
import scipy.sparse
import tensorflow as tf
from typing import Union, List
import os
from tcellmatch.models.models_ffn import ModelBiRnn, ModelSa, ModelConv, ModelLinear, ModelNoseq
from tcellmatch.models.model_inception import ModelInception
from tcellmatch.estimators.additional_metrics import pr_global, pr_label, auc_global, auc_label, \
deviation_global, deviation_label
from tcellmatch.estimators.estimator_base import EstimatorBase
from tcellmatch.estimators.losses import WeightedBinaryCrossentropy
from tcellmatch.estimators.metrics import custom_r2, custom_logr2
class EstimatorFfn(EstimatorBase):
model: tf.keras.Model
model_hyperparam: dict
train_hyperparam: dict
history: dict
evaluations: dict
evaluations_custom: dict
def __init__(
self,
model_name=None
):
EstimatorBase.__init__(self=self)
self.model_name = model_name
self.model_hyperparam = None
self.train_hyperparam = None
self.wbce_weight = None
# Training and evaluation output containers.
self.history = None
self.results_test = None
self.predictions = None
self.evaluations = None
self.evaluations_custom = None
def _out_activation(self, loss) -> str:
""" Decide whether network output activation
This decision is based on the loss function.
:param loss: loss name
- "categorical_crossentropy", "cce" for single boolean binding events with binary crossentropy loss.
- "binary_crossentropy", "bce" for multiple boolean binding events with binary crossentropy loss.
- "weighted_binary_crossentropy", "wbce" for multiple boolean binding events with
weighted binary crossentropy loss.
- "mean_squared_error", "mse" for continuous value prediction with mean squared error loss.
- "mean_squared_logarithmic_error", "msle" for continuous value prediction with mean squared
logarithmic error loss.
- "poisson", "pois" for count value prediction based on Poisson log-likelihood.
:return: How network output transformed:
- "categorical_crossentropy", "cce": softmax
- "binary_crossentropy", "bce": sigmoid
- "weighted_binary_crossentropy", "wbce": sigmoid
- "mean_squared_error", "mse": linear
- "mean_squared_logarithmic_error", "msle": exp
- "poisson", "pois": exp
"""
if loss.lower() in ["categorical_crossentropy", "cce"]:
return "softmax"
elif loss.lower() in ["binary_crossentropy", "bce"]:
return "sigmoid"
elif loss.lower() in ["weighted_binary_crossentropy", "wbce"]:
return "linear" # Cost function expect logits.
elif loss.lower() in ["mean_squared_error", "mse"]:
return "linear"
elif loss.lower() in ["mean_squared_logarithmic_error", "msle"]:
return "exponential"
elif loss.lower() in ["poisson", "pois"]:
return "exponential"
else:
raise ValueError("Loss %s not recognized." % loss)
def set_wbce_weight(self, weight):
""" Overwrites automatically computed weight that is chosen based on training data.
:param weight: Weight to use.
:return:
"""
self.wbce_weight = weight
def build_bilstm(
self,
topology: List[int],
split: bool = False,
aa_embedding_dim: Union[None, int] = None,
depth_final_dense: int = 1,
residual_connection: bool = False,
dropout: float = 0.0,
optimizer: str = "adam",
lr: float = 0.005,
loss: str = "bce",
label_smoothing: float = 0,
optimize_for_gpu: bool = True,
dtype: str = "float32"
):
""" Build a BiLSTM-based feed-forward model to use in the estimator.
:param topology: The depth of each bilstm layer (length of feature vector)
:param residual_connection: apply residual connection or not.
:param aa_embedding_dim: Dimension of the linear amino acid embedding, ie number of 1x1 convolutional filters.
This is set to the input dimension if aa_embedding_dim==0.
:param depth_final_dense: Number of final densely connected layers. They all have labels_dim number of units
and relu activation functions, apart from the last, which has either linear or sigmoid activation,
depending on out_probabilities.
:param dropout: Drop-out rate for training.
:param optimizer: str optimizer name or instance of tf.keras.optimizers
:param loss: loss name
- "categorical_crossentropy", "cce" for single boolean binding events with binary crossentropy loss.
- "binary_crossentropy", "bce" for multiple boolean binding events with binary crossentropy loss.
- "weighted_binary_crossentropy", "wbce" for multiple boolean binding events with
weighted binary crossentropy loss.
- "mean_squared_error", "mse" for continuous value prediction with mean squared error loss.
- "mean_squared_logarithmic_error", "msle" for continuous value prediction with mean squared
logarithmic error loss.
- "poisson", "pois" for count value prediction based on Poisson log-likelihood.
:param label_smoothing: Fraction of the label interval to take out during smoothing. The labels are mapped
from [0, 1] into [label_smoothing/2, 1-label_smoothing/2] throught the following transform:
f(x) = x*(1-label_smoothing) + 0.5*label_smoothing
:param optimize_for_gpu: Whether to choose implementation optimized for GPU.
:param dtype:
:return:
"""
self._build_sequential(
model="bilstm",
topology=topology,
split=split,
aa_embedding_dim=aa_embedding_dim,
depth_final_dense=depth_final_dense,
residual_connection=residual_connection,
dropout=dropout,
optimizer=optimizer,
lr=lr,
loss=loss,
label_smoothing=label_smoothing,
optimize_for_gpu=optimize_for_gpu,
dtype=dtype
)
def build_bigru(
self,
topology: List[int],
split: bool = False,
aa_embedding_dim: Union[None, int] = None,
depth_final_dense: int = 1,
residual_connection: bool = False,
dropout: float = 0.0,
optimizer: str = "adam",
lr: float = 0.005,
loss: str = "bce",
label_smoothing: float = 0,
optimize_for_gpu: bool = True,
dtype: str = "float32"
):
""" Build a BiGRU-based feed-forward model to use in the estimator.
:param topology: The depth of each bilstm layer (length of feature vector)
:param residual_connection: apply residual connection or not.
:param aa_embedding_dim: Dimension of the linear amino acid embedding, ie number of 1x1 convolutional filters.
This is set to the input dimension if aa_embedding_dim==0.
:param depth_final_dense: Number of final densely connected layers. They all have labels_dim number of units
and relu activation functions, apart from the last, which has either linear or sigmoid activation,
depending on out_probabilities.
:param dropout: Drop-out rate for training.
:param optimizer: str optimizer name or instance of tf.keras.optimizers
:param loss: loss name
- "categorical_crossentropy", "cce" for single boolean binding events with binary crossentropy loss.
- "binary_crossentropy", "bce" for multiple boolean binding events with binary crossentropy loss.
- "weighted_binary_crossentropy", "wbce" for multiple boolean binding events with
weighted binary crossentropy loss.
- "mean_squared_error", "mse" for continuous value prediction with mean squared error loss.
- "mean_squared_logarithmic_error", "msle" for continuous value prediction with mean squared
logarithmic error loss.
- "poisson", "pois" for count value prediction based on Poisson log-likelihood.s
:param label_smoothing: Fraction of the label interval to take out during smoothing. The labels are mapped
from [0, 1] into [label_smoothing/2, 1-label_smoothing/2] throught the following transform:
f(x) = x*(1-label_smoothing) + 0.5*label_smoothing
:param optimize_for_gpu: Whether to choose implementation optimized for GPU.
:param dtype:
:return:
"""
self._build_sequential(
model="bigru",
topology=topology,
split=split,
aa_embedding_dim=aa_embedding_dim,
depth_final_dense=depth_final_dense,
residual_connection=residual_connection,
dropout=dropout,
optimizer=optimizer,
lr=lr,
loss=loss,
label_smoothing=label_smoothing,
optimize_for_gpu=optimize_for_gpu,
dtype=dtype
)
def _build_sequential(
self,
model: str,
topology: List[int],
split: bool,
aa_embedding_dim: Union[None, int],
depth_final_dense: int,
residual_connection: bool,
dropout: float,
optimizer: str,
lr: float,
loss: str,
label_smoothing: float,
optimize_for_gpu: bool,
dtype: str = "float32"
):
""" Build a BiLSTM-based feed-forward model to use in the estimator.
:param topology: The depth of each bilstm layer (length of feature vector)
:param residual_connection: apply residual connection or not.
:param aa_embedding_dim: Dimension of the linear amino acid embedding, ie number of 1x1 convolutional filters.
This is set to the input dimension if aa_embedding_dim==0.
:param depth_final_dense: Number of final densely connected layers. They all have labels_dim number of units
and relu activation functions, apart from the last, which has either linear or sigmoid activation,
depending on out_probabilities.
:param dropout: Drop-out rate for training.
:param optimizer: str optimizer name or instance of tf.keras.optimizers
:param loss: loss name
- "categorical_crossentropy", "cce" for single boolean binding events with binary crossentropy loss.
- "binary_crossentropy", "bce" for multiple boolean binding events with binary crossentropy loss.
- "weighted_binary_crossentropy", "wbce" for multiple boolean binding events with
weighted binary crossentropy loss.
- "mean_squared_error", "mse" for continuous value prediction with mean squared error loss.
- "mean_squared_logarithmic_error", "msle" for continuous value prediction with mean squared
logarithmic error loss.
- "poisson", "pois" for count value prediction based on Poisson log-likelihood.
:param label_smoothing: Fraction of the label interval to take out during smoothing. The labels are mapped
from [0, 1] into [label_smoothing/2, 1-label_smoothing/2] throught the following transform:
f(x) = x*(1-label_smoothing) + 0.5*label_smoothing
:param optimize_for_gpu: Whether to choose implementation optimized for GPU.
:param dtype:
:return:
"""
# Save model settings:
self.model_hyperparam = {
"model": model,
"topology": topology,
"split": split,
"aa_embedding_dim": aa_embedding_dim,
"depth_final_dense": depth_final_dense,
"residual_connection": residual_connection,
"dropout": dropout,
"optimizer": optimizer,
"lr": lr,
"loss": loss,
"label_smoothing": label_smoothing,
"optimize_for_gpu": optimize_for_gpu,
"dtype": dtype
}
self.model = ModelBiRnn(
input_shapes=(
self.x_train.shape[1],
self.x_train.shape[2],
self.x_train.shape[3],
self.covariates_train.shape[1],
self.tcr_len
),
model=model.lower(),
labels_dim=self.y_train.shape[1],
topology=topology,
split=split,
residual_connection=residual_connection,
aa_embedding_dim=aa_embedding_dim,
depth_final_dense=depth_final_dense,
out_activation=self._out_activation(loss=loss),
dropout=dropout
)
self._compile_model(
optimizer=optimizer,
lr=lr,
loss=loss,
label_smoothing=label_smoothing
)
def build_self_attention(
self,
attention_size: List[int],
attention_heads: List[int],
aa_embedding_dim: Union[None, int] = None,
depth_final_dense: int = 1,
residual_connection: bool = False,
dropout: float = 0.0,
split: bool = False,
optimizer: str = "adam",
lr: float = 0.005,
loss: str = "bce",
label_smoothing: float = 0,
dtype: str = "float32"
):
""" Build a self-attention-based feed-forward model to use in the estimator.
:param attention_size: hidden size for attention, could be divided by attention_heads.
:param attention_heads: number of heads in attention.
:param residual_connection: apply residual connection or not.
:param aa_embedding_dim: Dimension of the linear amino acid embedding, ie number of 1x1 convolutional filters.
This is set to the input dimension if aa_embedding_dim==0.
:param depth_final_dense: Number of final densely connected layers. They all have labels_dim number of units
and relu activation functions, apart from the last, which has either linear or sigmoid activation,
depending on out_probabilities.
:param dropout: Drop-out rate for training.
:param optimizer: str optimizer name or instance of tf.keras.optimizers
:param loss: loss name
- "categorical_crossentropy", "cce" for single boolean binding events with binary crossentropy loss.
- "binary_crossentropy", "bce" for multiple boolean binding events with binary crossentropy loss.
- "weighted_binary_crossentropy", "wbce" for multiple boolean binding events with
weighted binary crossentropy loss.
- "mean_squared_error", "mse" for continuous value prediction with mean squared error loss.
- "mean_squared_logarithmic_error", "msle" for continuous value prediction with mean squared
logarithmic error loss.
- "poisson", "pois" for count value prediction based on Poisson log-likelihood.
:param label_smoothing: Fraction of the label interval to take out during smoothing. The labels are mapped
from [0, 1] into [label_smoothing/2, 1-label_smoothing/2] throught the following transform:
f(x) = x*(1-label_smoothing) + 0.5*label_smoothing
:param dtype:
:return:
"""
# Save model settings:
self.model_hyperparam = {
"model": "selfattention",
"attention_size": attention_size,
"attention_heads": attention_heads,
"split": split,
"aa_embedding_dim": aa_embedding_dim,
"depth_final_dense": depth_final_dense,
"residual_connection": residual_connection,
"dropout": dropout,
"optimizer": optimizer,
"lr": lr,
"loss": loss,
"label_smoothing": label_smoothing,
"dtype": dtype
}
# Build model.
self.model = ModelSa(
input_shapes=(
self.x_train.shape[1],
self.x_train.shape[2],
self.x_train.shape[3],
self.covariates_train.shape[1],
self.tcr_len
),
labels_dim=self.y_train.shape[1],
attention_size=attention_size,
attention_heads=attention_heads,
residual_connection=residual_connection,
split=split,
aa_embedding_dim=aa_embedding_dim,
out_activation=self._out_activation(loss=loss),
depth_final_dense=depth_final_dense,
dropout=dropout
)
self._compile_model(
optimizer=optimizer,
lr=lr,
loss=loss,
label_smoothing=label_smoothing
)
def build_conv(
self,
activations: List[str],
filter_widths: List[int],
filters: List[int],
strides: Union[List[Union[int, None]], None] = None,
pool_sizes: Union[List[Union[int, None]], None] = None,
pool_strides: Union[List[Union[int, None]], None] = None,
batch_norm: bool = False,
aa_embedding_dim: Union[None, int] = None,
depth_final_dense: int = 1,
dropout: float = 0.0,
split: bool = False,
optimizer: str = "adam",
lr: float = 0.005,
loss: str = "bce",
label_smoothing: float = 0,
dtype: str = "float32"
):
""" Build a self-attention-based feed-forward model to use in the estimator.
:param activations: Activation function. Refer to documentation of tf.keras.layers.Conv2D
:param filter_widths: Number of neurons per filter. Refer to documentation of tf.keras.layers.Conv2D
:param filters: NUmber of filters / output channels. Refer to documentation of tf.keras.layers.Conv2D
:param strides: Stride size for convolution on sequence. Refer to documentation of tf.keras.layers.Conv2D
:param pool_sizes: Size of max-pooling, ie. number of output nodes to pool over.
Refer to documentation of tf.keras.layers.MaxPool2D:pool_size
:param pool_strides: Stride of max-pooling.
Refer to documentation of tf.keras.layers.MaxPool2D:strides
:param batch_norm: Whether to perform batch normalization.
:param aa_embedding_dim: Dimension of the linear amino acid embedding, ie number of 1x1 convolutional filters.
This is set to the input dimension if aa_embedding_dim==0.
:param depth_final_dense: Number of final densely connected layers. They all have labels_dim number of units
and relu activation functions, apart from the last, which has either linear or sigmoid activation,
depending on out_probabilities.
:param dropout: Drop-out rate for training.
:param optimizer: str optimizer name or instance of tf.keras.optimizers
:param loss: loss name
- "categorical_crossentropy", "cce" for single boolean binding events with binary crossentropy loss.
- "binary_crossentropy", "bce" for multiple boolean binding events with binary crossentropy loss.
- "weighted_binary_crossentropy", "wbce" for multiple boolean binding events with
weighted binary crossentropy loss.
- "mean_squared_error", "mse" for continuous value prediction with mean squared error loss.
- "mean_squared_logarithmic_error", "msle" for continuous value prediction with mean squared
logarithmic error loss.
- "poisson", "pois" for count value prediction based on Poisson log-likelihood.
:param label_smoothing: Fraction of the label interval to take out during smoothing. The labels are mapped
from [0, 1] into [label_smoothing/2, 1-label_smoothing/2] throught the following transform:
f(x) = x*(1-label_smoothing) + 0.5*label_smoothing
:param dtype:
:return:
"""
# Save model settings.
self.model_hyperparam = {
"model": "conv",
"activations": activations,
"filter_widths": filter_widths,
"filters": filters,
"strides": strides,
"pool_sizes": pool_sizes,
"pool_strides": pool_strides,
"batch_norm": batch_norm,
"split": split,
"aa_embedding_dim": aa_embedding_dim,
"depth_final_dense": depth_final_dense,
"dropout": dropout,
"optimizer": optimizer,
"lr": lr,
"loss": loss,
"label_smoothing": label_smoothing,
"dtype": dtype
}
# Build model.
self.model = ModelConv(
input_shapes=(
self.x_train.shape[1],
self.x_train.shape[2],
self.x_train.shape[3],
self.covariates_train.shape[1],
self.tcr_len
),
labels_dim=self.y_train.shape[1],
activations=activations,
filter_widths=filter_widths,
filters=filters,
strides=strides,
pool_sizes=pool_sizes,
pool_strides=pool_strides,
batch_norm=batch_norm,
split=split,
aa_embedding_dim=aa_embedding_dim,
out_activation=self._out_activation(loss=loss),
depth_final_dense=depth_final_dense,
dropout=dropout
)
self._compile_model(
optimizer=optimizer,
lr=lr,
loss=loss,
label_smoothing=label_smoothing
)
def build_inception(
self,
n_filters_1x1: List[int],
n_filters_out: List[int],
n_hidden: int = 10,
residual_connection: bool = True,
aa_embedding_dim: Union[None, int] = None,
depth_final_dense: int = 1,
final_pool: str = "average",
dropout: float = 0.0,
split: bool = False,
optimizer: str = "adam",
lr: float = 0.005,
loss: str = "bce",
label_smoothing: float = 0,
dtype: str = "float32"
):
""" Build a self-attention-based feed-forward model to use in the estimator.
:param n_filters_1x1:
:param n_filters_out:
:param n_filters_final:
:param n_hidden:
:param residual_connection: apply residual connection or not.
:param aa_embedding_dim: Dimension of the linear amino acid embedding, ie number of 1x1 convolutional filters.
This is set to the input dimension if aa_embedding_dim==0.
:param depth_final_dense: Number of final densely connected layers. They all have labels_dim number of units
and relu activation functions, apart from the last, which has either linear or sigmoid activation,
depending on out_probabilities.
:param final_pool:
:param dropout: Drop-out rate for training.
:param optimizer: str optimizer name or instance of tf.keras.optimizers
:param loss: loss name
- "categorical_crossentropy", "cce" for single boolean binding events with binary crossentropy loss.
- "binary_crossentropy", "bce" for multiple boolean binding events with binary crossentropy loss.
- "weighted_binary_crossentropy", "wbce" for multiple boolean binding events with
weighted binary crossentropy loss.
- "mean_squared_error", "mse" for continuous value prediction with mean squared error loss.
- "mean_squared_logarithmic_error", "msle" for continuous value prediction with mean squared
logarithmic error loss.
- "poisson", "pois" for count value prediction based on Poisson log-likelihood.
:param label_smoothing: Fraction of the label interval to take out during smoothing. The labels are mapped
from [0, 1] into [label_smoothing/2, 1-label_smoothing/2] throught the following transform:
f(x) = x*(1-label_smoothing) + 0.5*label_smoothing
:param dtype:
:return:
"""
# Save model settings.
self.model_hyperparam = {
"model": "inception",
"n_filters_1x1": n_filters_1x1,
"n_filters_out": n_filters_out,
"n_hidden": n_hidden,
"split": split,
"final_pool": final_pool,
"residual_connection": residual_connection,
"aa_embedding_dim": aa_embedding_dim,
"depth_final_dense": depth_final_dense,
"dropout": dropout,
"optimizer": optimizer,
"lr": lr,
"loss": loss,
"dtype": dtype
}
# Build model.
self.model = ModelInception(
input_shapes=(
self.x_train.shape[1],
self.x_train.shape[2],
self.x_train.shape[3],
self.covariates_train.shape[1],
self.tcr_len
),
labels_dim=self.y_train.shape[1],
n_filters_1x1=n_filters_1x1,
n_filters_out=n_filters_out,
n_hidden=n_hidden,
split=split,
final_pool=final_pool,
residual_connection=residual_connection,
aa_embedding_dim=aa_embedding_dim,
depth_final_dense=depth_final_dense,
out_activation=self._out_activation(loss=loss),
dropout=dropout
)
self._compile_model(
optimizer=optimizer,
lr=lr,
loss=loss,
label_smoothing=label_smoothing
)
def build_linear(
self,
aa_embedding_dim: Union[None, int] = None,
optimizer: str = "adam",
lr: float = 0.005,
loss: str = "bce",
label_smoothing: float = 0,
dtype: str = "float32"
):
""" Build a linear feed-forward model to use in the estimator.
:param aa_embedding_dim: Dimension of the linear amino acid embedding, ie number of 1x1 convolutional filters.
This is set to the input dimension if aa_embedding_dim==0.
:param optimizer: str optimizer name or instance of tf.keras.optimizers
:param loss: loss name
- "categorical_crossentropy", "cce" for single boolean binding events with binary crossentropy loss.
- "binary_crossentropy", "bce" for multiple boolean binding events with binary crossentropy loss.
- "weighted_binary_crossentropy", "wbce" for multiple boolean binding events with
weighted binary crossentropy loss.
- "mean_squared_error", "mse" for continuous value prediction with mean squared error loss.
- "mean_squared_logarithmic_error", "msle" for continuous value prediction with mean squared
logarithmic error loss.
- "poisson", "pois" for count value prediction based on Poisson log-likelihood.
:param label_smoothing: Fraction of the label interval to take out during smoothing. The labels are mapped
from [0, 1] into [label_smoothing/2, 1-label_smoothing/2] throught the following transform:
f(x) = x*(1-label_smoothing) + 0.5*label_smoothing
:param dtype:
:return:
"""
# Save model settings.
self.model_hyperparam = {
"model": "linear",
"aa_embedding_dim": aa_embedding_dim,
"optimizer": optimizer,
"lr": lr,
"loss": loss,
"label_smoothing": label_smoothing,
"dtype": dtype
}
# Build model.
self.model = ModelLinear(
input_shapes=(
self.x_train.shape[1],
self.x_train.shape[2],
self.x_train.shape[3],
self.covariates_train.shape[1],
self.tcr_len
),
labels_dim=self.y_train.shape[1],
aa_embedding_dim=aa_embedding_dim,
out_activation=self._out_activation(loss=loss)
)
self._compile_model(
optimizer=optimizer,
lr=lr,
loss=loss,
label_smoothing=label_smoothing
)
def build_noseq(
self,
optimizer: str = "adam",
lr: float = 0.005,
loss: str = "bce",
label_smoothing: float = 0,
dtype: str = "float32"
):
""" Build a dense feed-forward model to use in the estimator that does not include the sequence data.
:param optimizer: str optimizer name or instance of tf.keras.optimizers
:param loss: loss name
- "categorical_crossentropy", "cce" for single boolean binding events with binary crossentropy loss.
- "binary_crossentropy", "bce" for multiple boolean binding events with binary crossentropy loss.
- "weighted_binary_crossentropy", "wbce" for multiple boolean binding events with
weighted binary crossentropy loss.
- "mean_squared_error", "mse" for continuous value prediction with mean squared error loss.
- "mean_squared_logarithmic_error", "msle" for continuous value prediction with mean squared
logarithmic error loss.
- "poisson", "pois" for count value prediction based on Poisson log-likelihood.
:param label_smoothing: Fraction of the label interval to take out during smoothing. The labels are mapped
from [0, 1] into [label_smoothing/2, 1-label_smoothing/2] throught the following transform:
f(x) = x*(1-label_smoothing) + 0.5*label_smoothing
:param dtype:
:return:
"""
# Save model settings.
self.model_hyperparam = {
"model": "noseq",
"optimizer": optimizer,
"lr": lr,
"loss": loss,
"label_smoothing": label_smoothing,
"dtype": dtype
}
# Build model.
self.model = ModelNoseq(
input_shapes=(
self.x_train.shape[1],
self.x_train.shape[2],
self.x_train.shape[3],
self.covariates_train.shape[1],
self.tcr_len
),
labels_dim=self.y_train.shape[1],
out_activation=self._out_activation(loss=loss)
)
self._compile_model(
optimizer=optimizer,
lr=lr,
loss=loss,
label_smoothing=label_smoothing
)
def _compile_model(
self,
optimizer,
lr,
loss,
label_smoothing: float = 0
):
""" Shared model building code across model classes.
:param optimizer: str optimizer name or instance of tf.keras.optimizers
:param loss: loss name
- "categorical_crossentropy", "cce" for multiple boolean binding events with categorical crossentropy loss.
- "binary_crossentropy", "bce" for multiple boolean binding events with binary crossentropy loss.
- "weighted_binary_crossentropy", "wbce" for multiple boolean binding events with
weighted binary crossentropy loss.
- "mean_squared_error", "mse" for continuous value prediction with mean squared error loss.
- "mean_squared_logarithmic_error", "msle" for continuous value prediction with mean squared
logarithmic error loss.
- "poisson", "pois" for count value prediction based on Poisson log-likelihood.
:param label_smoothing: Fraction of the label interval to take out during smoothing. The labels are mapped
from [0, 1] into [label_smoothing/2, 1-label_smoothing/2] throught the following transform:
f(x) = x*(1-label_smoothing) + 0.5*label_smoothing
:return:
"""
# Instantiate loss.
if loss.lower() in ["categorical_crossentropy", "cce"]:
tf_loss = tf.keras.losses.CategoricalCrossentropy(
from_logits=False,
label_smoothing=label_smoothing
)
metric_class = "categorical_crossentropy"
elif loss.lower() in ["binary_crossentropy", "bce"]:
tf_loss = tf.keras.losses.BinaryCrossentropy(
from_logits=False,
label_smoothing=label_smoothing
)
metric_class = "binary_crossentropy"
elif loss.lower() in ["weighted_binary_crossentropy", "wbce"]:
tf_loss = WeightedBinaryCrossentropy(
weight_positives=1./self.frac_positives - 1. if self.wbce_weight is None else self.wbce_weight,
label_smoothing=label_smoothing
)
metric_class = "binary_crossentropy"
elif loss.lower() in ["mean_squared_error", "mse"]:
tf_loss = tf.keras.losses.MeanSquaredError()
metric_class = "real"
elif loss.lower() in ["mean_squared_logarithmic_error", "msle"]:
tf_loss = tf.keras.losses.MeanSquaredLogarithmicError()
metric_class = "real"
elif loss.lower() in ["poisson", "pois"]:
tf_loss = tf.keras.losses.Poisson() # only in tf>=1.14.1
metric_class = "real"
else:
raise ValueError("Loss %s not recognized." % loss)
# Assemble metrics.
if metric_class == "categorical_crossentropy":
metrics = [
tf.keras.metrics.CategoricalAccuracy(name="keras_acc"),
tf.keras.metrics.Precision(name="keras_precision"),
tf.keras.metrics.Recall(name="keras_recall"),
tf.keras.metrics.AUC(name="keras_auc"),
tf.keras.metrics.FalseNegatives(name="keras_fn"),
tf.keras.metrics.FalsePositives(name="keras_fp"),
tf.keras.metrics.TrueNegatives(name="keras_tn"),
tf.keras.metrics.TruePositives(name="keras_tp"),
tf.keras.metrics.CategoricalCrossentropy(name="keras_ce", from_logits=False, label_smoothing=0)
]
elif metric_class == "binary_crossentropy":
metrics = [
tf.keras.metrics.BinaryAccuracy(name="keras_acc"),
tf.keras.metrics.Precision(name="keras_precision"),
tf.keras.metrics.Recall(name="keras_recall"),
tf.keras.metrics.AUC(name="keras_auc"),
tf.keras.metrics.FalseNegatives(name="keras_fn"),
tf.keras.metrics.FalsePositives(name="keras_fp"),
tf.keras.metrics.TrueNegatives(name="keras_tn"),
tf.keras.metrics.TruePositives(name="keras_tp"),
tf.keras.metrics.BinaryCrossentropy(name="keras_ce", from_logits=False, label_smoothing=0)
]
elif metric_class == "real":
metrics = [
tf.keras.metrics.MeanSquaredError(name="keras_mse"),
tf.keras.metrics.RootMeanSquaredError(name="keras_rmse"),
tf.keras.metrics.MeanSquaredLogarithmicError(name="keras_msle"),
tf.keras.metrics.Poisson(name="keras_poisson"),
tf.keras.metrics.CosineSimilarity(name="keras_cosine"),
custom_r2,
custom_logr2
]
else:
assert False
# Build optimizer:
if optimizer.lower() == "adam":
tf.keras.optimizers.Adam(lr=lr)
else:
raise ValueError("optimizer %s not recognized" % optimizer)
# Compile model.
self.model.training_model.compile(
loss=tf_loss,
optimizer=optimizer,
metrics=metrics
)
def train(
self,
epochs: int = 1000,
batch_size: int = 128,
max_steps_per_epoch: int = 100,
validation_split=0.1,
validation_batch_size: int = 256,
max_validation_steps: int = 100,
patience: int = 20,
lr_schedule_min_lr: float = 1e-5,
lr_schedule_factor: float = 0.2,
lr_schedule_patience: int = 5,
log_dir: Union[str, None] = None,
use_existing_eval_partition: bool = False
):
""" Train model.
Uses validation loss and maximum number of epochs as termination criteria.
:param epochs: refer to tf.keras.models.Model.fit() documentation
:param steps_per_epoch: refer to tf.keras.models.Model.fit() documentation
:param batch_size: refer to tf.keras.models.Model.fit() documentation
:param validation_split: refer to tf.keras.models.Model.fit() documentation
:param validation_batch_size: Number of validation data observations to evaluate evaluation metrics on.
:param validation_steps: refer to tf.keras.models.Model.fit() documentation
:param patience: refer to tf.keras.models.Model.fit() documentation
:param lr_schedule_min_lr: Minimum learning rate for learning rate reduction schedule.
:param lr_schedule_factor: Factor to reduce learning rate by within learning rate reduction schedule
when plateu is reached.
:param lr_schedule_patience: Patience for learning rate reduction in learning rate reduction schedule.
:param log_dir: Directory to save tensorboard callback to. Disabled if None.
:param use_existing_eval_partition: Whether to use existing training-evalutation partition of data. The index
vectors are expected in self.idx_train and self.idx_eval.
:return:
"""
# Save training settings to allow model restoring.
self.train_hyperparam = {
"epochs": epochs,
"batch_size": batch_size,
"validation_split": validation_split,
"validation_batch_size": validation_batch_size,
"patience": patience,
"lr_schedule_min_lr": lr_schedule_min_lr,
"lr_schedule_factor": lr_schedule_factor,
"lr_schedule_patience": lr_schedule_patience,
"log_dir": log_dir
}
# Set callbacks.
cbs = [
tf.keras.callbacks.EarlyStopping(
monitor='val_loss',
patience=patience,
restore_best_weights=True
),
tf.keras.callbacks.ReduceLROnPlateau(
monitor='val_loss',
factor=lr_schedule_factor,
patience=lr_schedule_patience,
min_lr=lr_schedule_min_lr
)
]
if log_dir is not None:
cbs.append(tf.keras.callbacks.TensorBoard(
log_dir=log_dir,
histogram_freq=0,
write_graph=False,
write_grads=False,
write_images=False,
embeddings_freq=0,
embeddings_layer_names=None,
embeddings_metadata=None,
embeddings_data=None,
update_freq='epoch'
))
# Split data into training and evaluation.
if use_existing_eval_partition:
idx_val = np.array([self.idx_train_val.tolist().index(x)
for x in self.idx_train_val if x in self.idx_val])
idx_train = np.array([self.idx_train_val.tolist().index(x)
for x in self.idx_train_val if x in self.idx_train])
else:
# Split training data into training and evaluation.
# Perform this splitting based on clonotypes.
clones = np.unique(self.clone_train)
clones_eval = clones[np.random.choice(
a=np.arange(0, clones.shape[0]),
size=round(clones.shape[0] * validation_split),
replace=False
)]
clones_train = np.array([x for x in clones if x not in clones_eval])
# Collect observations by clone partition:
idx_val = np.where([x in clones_eval for x in self.clone_train])[0]
idx_train = np.where([x in clones_train for x in self.clone_train])[0]
# Save partitions in terms of original indexing.
self.idx_train = self.idx_train_val[idx_train]
self.idx_val = self.idx_train_val[idx_val]
# Assert that split is exclusive and complete:
assert len(set(clones_eval).intersection(set(clones_train))) == 0, \
"ERROR: train-test assignment was not exclusive on level of clones"
assert len(set(idx_val).intersection(set(idx_train))) == 0, \
"ERROR: train-test assignment was not exclusive on level of cells"
assert len(clones_eval) + len(clones_train) == len(clones), \
"ERROR: train-test split was not complete on the level of clones"
assert len(idx_val) + len(idx_train) == len(self.clone_train), \
"ERROR: train-test split was not complete on the level of cells"
print("Number of observations in evaluation data: %i" % len(idx_val))
print("Number of observations in training data: %i" % len(idx_train))
# Build Datasets for each training and evaluation data to feed iterators for each to model fitting.
train_dataset = tf.data.Dataset.from_tensor_slices((
(self.x_train[idx_train], self.covariates_train[idx_train]),
self.y_train[idx_train]
#self.sample_weight_train[idx_train]
)).shuffle(buffer_size=len(idx_train), reshuffle_each_iteration=True).\
repeat().batch(batch_size).prefetch(1)
eval_dataset = tf.data.Dataset.from_tensor_slices((
(self.x_train[idx_val], self.covariates_train[idx_val]),
self.y_train[idx_val]
)).shuffle(buffer_size=len(idx_val), reshuffle_each_iteration=True).\
repeat().batch(validation_batch_size).prefetch(1)
steps_per_epoch = min(max(len(idx_train) // batch_size, 1), max_steps_per_epoch)
validation_steps = min(max(len(idx_val) // validation_batch_size, 1), max_validation_steps)
# Fit model and save summary of fitting.
if len(self.x_train.shape) != 4:
raise ValueError("input shape should be [?,1,pos,feature]")
self.history = self.model.training_model.fit(
x=train_dataset,
epochs=epochs,
steps_per_epoch=steps_per_epoch,
validation_data=eval_dataset,
validation_steps=validation_steps,
callbacks=cbs,
verbose=2
).history
@property
def idx_train_in_train_val(self):
return np.intersect1d(self.idx_train_val, self.idx_train, return_indices=True)[1]
@property
def idx_val_in_train_val(self):
return np.intersect1d(self.idx_train_val, self.idx_val, return_indices=True)[1]
def evaluate(
self,
batch_size: int = 1024
):
""" Evaluate loss on test data.
:param batch_size: Batch size for evaluation.
:return:
"""
results_test = self.evaluate_any(
x=self.x_test,
covar=self.covariates_test,
y=self.y_test,
batch_size=batch_size
)
results_val = self.evaluate_any(
x=self.x_train[self.idx_val_in_train_val],
covar=self.covariates_train[self.idx_val_in_train_val],
y=self.y_train[self.idx_val_in_train_val],
batch_size=batch_size,
)
results_train = self.evaluate_any(
x=self.x_train[self.idx_train_in_train_val],
covar=self.covariates_train[self.idx_train_in_train_val],
y=self.y_train[self.idx_train_in_train_val],
batch_size=batch_size,
)
self.evaluations = {
"test": results_test,
"val": results_val,
"train": results_train
}
def evaluate_any(
self,
x,
covar,
y,
batch_size: int = 1024,
):
""" Evaluate loss on supplied data.
:param batch_size: Batch size for evaluation.
:return: Dictionary of metrics
"""
results = self.model.training_model.evaluate(
x=(x, covar),
y=y,
batch_size=batch_size,
verbose=0
)
return dict(zip(self.model.training_model.metrics_names, results))
def evaluate_custom(
self,
classification_metrics: bool = True,
regression_metrics: bool = False,
transform: str = None
):
""" Obtain custom evaluation metrics for classification task on train, val and test data.
"""
results_test = self.evaluate_custom_any(
yhat=self.predict_any(x=self.x_test, covar=self.covariates_test, batch_size=1024),
yobs=self.y_test,
nc=self.nc_test,
labels=np.asarray(self.peptide_seqs_test),
labels_unique=self.peptide_seqs_unique,
classification_metrics=classification_metrics,
regression_metrics=regression_metrics,
transform_flavour=transform
)
results_val = self.evaluate_custom_any(
yhat=self.predict_any(
x=self.x_train[self.idx_val_in_train_val],
covar=self.covariates_train[self.idx_val_in_train_val],
batch_size=1024
),
yobs=self.y_train[self.idx_val_in_train_val],
nc=self.nc_train[self.idx_val_in_train_val] if self.nc_train is not None else None,
labels=np.asarray(self.peptide_seqs_train)[self.idx_val_in_train_val] \
if self.peptide_seqs_train is not None else None,
labels_unique=self.peptide_seqs_unique,
classification_metrics=classification_metrics,
regression_metrics=regression_metrics,
transform_flavour=transform
)
results_train = self.evaluate_custom_any(
yhat=self.predict_any(
x=self.x_train[self.idx_train_in_train_val],
covar=self.covariates_train[self.idx_train_in_train_val],
batch_size=1024
),
yobs=self.y_train[self.idx_train_in_train_val],
nc=self.nc_train[self.idx_train_in_train_val] if self.nc_train is not None else None,
labels=np.asarray(self.peptide_seqs_train)[self.idx_train_in_train_val] \
if self.peptide_seqs_train is not None else None,
labels_unique=self.peptide_seqs_unique,
classification_metrics=classification_metrics,
regression_metrics=regression_metrics,
transform_flavour=transform
)
self.evaluations_custom = {
"test": results_test,
"val": results_val,
"train": results_train
}
def _evaluate_custom_any(
self,
yhat,
yobs,
nc,
classification_metrics: bool,
regression_metrics: bool,
labels=None,
labels_unique=None,
transform_flavour: str = None
):
""" Obtain custom evaluation metrics for classification task on any data.
"""
metrics_global = {}
metrics_local = {}
if regression_metrics:
mse_global, msle_global, r2_global, r2log_global = deviation_global(
y_hat=[yhat], y_obs=[yobs]
)
mse_label, msle_label, r2_label, r2log_label = deviation_label(
y_hat=[yhat], y_obs=[yobs], labels=[labels], labels_unique=labels_unique
)
metrics_global.update({
"mse": mse_global,
"msle": msle_global,
"r2": r2_global,
"r2log": r2log_global
})
metrics_local.update({
"mse": mse_label,
"msle": msle_label,
"r2": r2_label,
"r2log": r2log_label
})
if classification_metrics:
if transform_flavour is not None:
yhat, yobs = self.transform_predictions_any(
yhat=yhat,
yobs=yobs,
nc=nc,
flavour=transform_flavour
)
score_auc_global = auc_global(y_hat=[yhat], y_obs=[yobs])
prec_global, rec_global, tp_global, tn_global, fp_global, fn_global = pr_global(
y_hat=[yhat], y_obs=[yobs]
)
score_auc_label = auc_label(
y_hat=[yhat], y_obs=[yobs], labels=[labels], labels_unique=labels_unique
)
prec_label, rec_label, tp_label, tn_label, fp_label, fn_label = pr_label(
y_hat=[yhat], y_obs=[yobs], labels=[labels], labels_unique=labels_unique
)
metrics_global.update({
"auc": score_auc_global,
"prec": prec_global,
"rec": rec_global,
"tp": tp_global,
"tn": tn_global,
"fp": fp_global,
"fn": fn_global
})
metrics_local.update({
"auc": score_auc_label,
"prec": prec_label,
"rec": rec_label,
"tp": tp_label,
"tn": tn_label,
"fp": fp_label,
"fn": fn_label
})
return {
"global": metrics_global,
"local": metrics_local
}
def evaluate_custom_any(
self,
yhat,
yobs,
nc,
labels=None,
labels_unique=None,
classification_metrics: bool = True,
regression_metrics: bool = False,
transform_flavour: str = None
):
"""
Obtain custom evaluation metrics for classification task.
Ignores labels as samples are not structured by labels (ie one sample contains observations on all labels.
:param yhat:
:param yobs:
:param nc:
:param labels:
:param transform_flavour:
:return:
"""
return self._evaluate_custom_any(
yhat=yhat,
yobs=yobs,
nc=nc,
classification_metrics=classification_metrics,
regression_metrics=regression_metrics,
transform_flavour=transform_flavour,
labels=None,
labels_unique=None
)
def predict(
self,
batch_size: int = 128
):
""" Predict labels on test data.
:param batch_size: Batch size for evaluation.
:return:
"""
self.predictions = self.model.training_model.predict(
x=(self.x_test, self.covariates_test),
batch_size=batch_size
)
def predict_any(
self,
x,
covar,
batch_size: int = 128
):
""" Predict labels on any data.
:param batch_size: Batch size for evaluation.
:return:
"""
return self.model.training_model.predict(
x=(x, covar),
batch_size=batch_size,
verbose=0
)
def transform_predictions_any(
self,
yhat,
yobs,
nc,
flavour="10x_cd8_v1"
):
""" Transform model predictions and ground truth labels on test data.
Transform predictions and self.y_test
- "10x_cd8" Use this setting to transform the real valued output of a network trained with MSE loss
into probability space by using the bound/unbound classifier published with the 10x data set:
An antigen is bound if it has (1) at least 10 counts and (2) at least 5 times more counts
than the highest observed negative control and (3) is the highest count pMHC.
Requires negative controls to be defined during reading.
:param flavour: Type of transform to use, see function description.
:return:
"""
if flavour == "10x_cd8_v1":
if self.model_hyperparam["loss"] not in ["mse", "msle", "poisson"]:
raise ValueError("Do not use transform_predictions with flavour=='10x_cd8_v1' on a model fit "
"with a loss that is not mse, msle or poisson.")
if nc.shape[1] == 0:
raise ValueError("Negative controls were not set, supply these during data reading.")
predictions_new = np.zeros(yhat.shape)
idx_bound_predictions = [np.where(np.logical_and(
np.logical_and(x > 10., np.max(x) == x), # At least 10 UMIs and is maximum element of cell.
x > 5. * np.max(nc[i, :])
# At least 5x as many UMIs as highest negative control UMI count in cell.
))[0] for i, x in enumerate(yhat)]
for i, j in enumerate(idx_bound_predictions):
if len(j) > 0:
predictions_new[i, j[-1]] = 1. # Chose last label if two labels are called.
yhat = predictions_new
y_test_new = np.zeros(yobs.shape)
idx_bound_y = [np.where(np.logical_and(
np.logical_and(x > 10., np.max(x) == x), # At least 10 UMIs and is maximum element of cell.
x > 5. * np.max(nc[i, :])
# At least 5x as many UMIs as highest negative control UMI count in cell.
))[0] for i, x in enumerate(yobs)]
for i, j in enumerate(idx_bound_y):
if len(j) > 0:
y_test_new[i, j[-1]] = 1. # Chose last label if two labels are called.
yobs = y_test_new
else:
raise ValueError("flavour %s not recognized" % flavour)
return yhat, yobs
def transform_predictions(
self,
flavour="10x_cd8_v1"
):
""" Transform model predictions and ground truth labels on test data.
Transform predictions and self.y_test
- "10x_cd8" Use this setting to transform the real valued output of a network trained with MSE loss
into probability space by using the bound/unbound classifier published with the 10x data set:
An antigen is bound if it has (1) at least 10 counts and (2) at least 5 times more counts
than the highest observed negative control and (3) is the highest count pMHC.
Requires negative controls to be defined during reading.
:param flavour: Type of transform to use, see function description.
:return:
"""
if flavour == "10x_cd8_v1":
if self.model_hyperparam["loss"] not in ["mse", "msle", "poisson"]:
raise ValueError("Do not use transform_predictions with flavour=='10x_cd8_v1' on a model fit "
"with a loss that is not mse, msle or poisson.")
if self.nc_test.shape[1] == 0:
raise ValueError("Negative controls were not set, supply these during data reading.")
predictions_new = np.zeros(self.predictions.shape)
idx_bound_predictions = [np.where(np.logical_and(
np.logical_and(x > 10., np.max(x) == x), # At least 10 UMIs and is maximum element of cell.
x > 5. * np.max(self.nc_test[i, :]) # At least 5x as many UMIs as highest negative control UMI count in cell.
))[0] for i, x in enumerate(self.predictions)]
for i, j in enumerate(idx_bound_predictions):
if len(j) > 0:
predictions_new[i, j[-1]] = 1. # Chose last label if two labels are called.
self.predictions = predictions_new
y_test_new = np.zeros(self.y_test.shape)
idx_bound_y = [np.where(np.logical_and(
np.logical_and(x > 10., np.max(x) == x), # At least 10 UMIs and is maximum element of cell.
x > 5. * np.max(self.nc_test[i, :]) # At least 5x as many UMIs as highest negative control UMI count in cell.
))[0] for i, x in enumerate(self.y_test)]
for i, j in enumerate(idx_bound_y):
if len(j) > 0:
y_test_new[i, j[-1]] = 1. # Chose last label if two labels are called.
self.y_test = y_test_new
else:
raise ValueError("flavour %s not recognized" % flavour)
def save_results(
self,
fn
):
""" Save training history, test loss and test predictions.
Will generate the following files:
- fn+"history.pkl": training history dictionary
- fn+"evaluations.npy": loss on test data
- fn+"evaluations_custom.npy": loss on test data
:param self:
:param fn: Path and file name prefix to write to.
:param save_labels: Whether to save ground truth labels. Use this for saving disk space.
:return:
"""
with open(fn + "_history.pkl", 'wb') as f:
pickle.dump(self.history, f)
with open(fn + "_evaluations.pkl", 'wb') as f:
pickle.dump(self.evaluations, f)
with open(fn + "_evaluations_custom.pkl", 'wb') as f:
pickle.dump(self.evaluations_custom, f)
if self.label_ids is not None:
pd.DataFrame({"label": self.label_ids}).to_csv(fn + "_labels.csv")
with open(fn + "_peptide_seqs_unique.pkl", 'wb') as f:
pickle.dump(self.peptide_seqs_unique, f)
def load_results(
self,
fn
):
""" Load training history, test loss and test predictions.
Will add the following entries to this instance from files:
- fn+"history.pkl": training history dictionary
- fn+"evaluations.npy": loss on test data
- fn+"evaluations_custom.npy": loss on test data
:param self:
:param fn: Path and file name prefix to read from.
:return:
"""
with open(fn + "_history.pkl", 'rb') as f:
self.history = pickle.load(f)
with open(fn + "_evaluations.pkl", 'rb') as f:
self.evaluations = pickle.load(f)
with open(fn + "_evaluations_custom.pkl", 'rb') as f:
self.evaluations_custom = pickle.load(f)
def save_model_full(
self,
fn,
reduce_size: bool = False,
save_yhat: bool = True,
save_train_data: bool = False
):
""" Save model settings, data and weights.
Saves all data necessary to perform full one-step model reloading with self.load_model().
:param self:
:param fn: Path and file name prefix to write to.
:param reduce_size: Whether to save storage efficient, ie only elements that are absolutely necessary.
:return:
"""
self.save_model(fn=fn)
self.save_estimator_args(fn=fn)
self.save_data(
fn=fn,
train=save_train_data,
test=True,
reduce_size=reduce_size
)
if save_yhat:
self.save_predictions(
fn=fn,
train=save_train_data,
test=True
)
def save_model(
self,
fn
):
""" Save model weights.
:param self:
:param fn: Path and file name prefix to write to. Will be suffixed with .tf to use tf weight saving.
:return:
"""
self.model.training_model.save_weights(fn, save_format="tf")
def load_model_full(
self,
fn: str = None,
fn_settings: str = None,
fn_data: str = None,
fn_model: str = None
):
""" Load entire model, this is possible if model weights, data and settings were stored.
:param self:
:param fn: Path and file name prefix to read model settings, data and model from.
:param fn_settings: Path and file name prefix to read model settings from.
:param fn_data: Path and file name prefix to read all fitting relevant data objects from.
:param fn_model: Path and file name prefix to read model weights from.
:param log_dir: Directory to save tensorboard callback to. Disabled if None. This is given to allow the user
to choose between a new logging directory and the directory from the saved settings.
- None if you want to enforce no logging.
- "previous" if you want to use the directory saved in the settings.
- any other string: This will be the new directory.
:return:
"""
if fn is not None:
fn_settings = fn
fn_data = fn
fn_model = fn
# Initialise model.
self.load_data(fn=fn_data)
self.load_model(
fn_settings=fn_settings,
fn_model=fn_model
)
def load_model(
self,
fn_settings: str,
fn_model: str
):
""" Load model from .tf weights.
:param self:
:param fn: Path and file name prefix to read model settings from.
:return:
"""
# Initialise model.
self.load_model_settings(fn=fn_settings)
self.initialise_model_from_settings()
self.model.training_model.load_weights(fn_model)
def save_estimator_args(
self,
fn
):
""" Save model settings.
:param self:
:param fn: Path and file name prefix to write to.
:return:
"""
# Save model args.
with open(fn + "_model_args.pkl", 'wb') as f:
pickle.dump(self.model.args, f)
# Save model settings.
with open(fn + "_model_settings.pkl", 'wb') as f:
pickle.dump(self.model_hyperparam, f)
# Save training settings.
with open(fn + "_train_settings.pkl", 'wb') as f:
pickle.dump(self.train_hyperparam, f)
def load_model_settings(
self,
fn
):
""" Load model settings.
:param self:
:param fn: Path and file name prefix to read weights from.
:return:
"""
# Load model settings.
with open(fn + "_model_settings.pkl", 'rb') as f:
self.model_hyperparam = pickle.load(f)
# Load training settings.
with open(fn + "_train_settings.pkl", 'rb') as f:
self.train_hyperparam = pickle.load(f)
def initialise_model_from_settings(self):
"""
:return:
"""
# Build model.
if self.model_hyperparam["model"].lower() in ["bilstm", "bigru"]:
self._build_sequential(
split=self.model_hyperparam["split"],
model=self.model_hyperparam["model"],
topology=self.model_hyperparam["topology"],
aa_embedding_dim=self.model_hyperparam["aa_embedding_dim"],
depth_final_dense=self.model_hyperparam["depth_final_dense"],
residual_connection=self.model_hyperparam["residual_connection"],
dropout=self.model_hyperparam["dropout"],
optimizer=self.model_hyperparam["optimizer"],
lr=self.model_hyperparam["lr"],
loss=self.model_hyperparam["loss"],
label_smoothing=self.model_hyperparam["label_smoothing"],
optimize_for_gpu=self.model_hyperparam["optimize_for_gpu"],
dtype=self.model_hyperparam["dtype"]
)
elif self.model_hyperparam["model"].lower() in ["sa", "selfattention"]:
self.build_self_attention(
attention_size=self.model_hyperparam["attention_size"],
attention_heads=self.model_hyperparam["attention_heads"],
aa_embedding_dim=self.model_hyperparam["aa_embedding_dim"],
depth_final_dense=self.model_hyperparam["depth_final_dense"],
residual_connection=self.model_hyperparam["residual_connection"],
dropout=self.model_hyperparam["dropout"],
optimizer=self.model_hyperparam["optimizer"],
lr=self.model_hyperparam["lr"],
loss=self.model_hyperparam["loss"],
label_smoothing=self.model_hyperparam["label_smoothing"],
dtype=self.model_hyperparam["dtype"]
)
elif self.model_hyperparam["model"].lower() in ["conv", "convolutional"]:
self.build_conv(
activations=self.model_hyperparam["activations"],
filter_widths=self.model_hyperparam["filter_widths"],
filters=self.model_hyperparam["filters"],
strides=self.model_hyperparam["strides"],
pool_sizes=self.model_hyperparam["pool_sizes"],
pool_strides=self.model_hyperparam["pool_strides"],
batch_norm=self.model_hyperparam["batch_norm"],
aa_embedding_dim=self.model_hyperparam["aa_embedding_dim"],
depth_final_dense=self.model_hyperparam["depth_final_dense"],
dropout=self.model_hyperparam["dropout"],
optimizer=self.model_hyperparam["optimizer"],
lr=self.model_hyperparam["lr"],
loss=self.model_hyperparam["loss"],
label_smoothing=self.model_hyperparam["label_smoothing"],
dtype=self.model_hyperparam["dtype"]
)
elif self.model_hyperparam["model"].lower() in ["inception"]:
self.build_inception(
split=self.model_hyperparam["split"],
n_filters_1x1=self.model_hyperparam["n_filters_1x1"],
n_filters_out=self.model_hyperparam["n_filters_out"],
n_hidden=self.model_hyperparam["n_hidden"],
final_pool=self.model_hyperparam["final_pool"],
residual_connection=self.model_hyperparam["residual_connection"],
aa_embedding_dim=self.model_hyperparam["aa_embedding_dim"],
depth_final_dense=self.model_hyperparam["depth_final_dense"],
dropout=self.model_hyperparam["dropout"],
optimizer=self.model_hyperparam["optimizer"],
lr=self.model_hyperparam["lr"],
loss=self.model_hyperparam["loss"],
label_smoothing=self.model_hyperparam["label_smoothing"],
dtype=self.model_hyperparam["dtype"]
)
elif self.model_hyperparam["model"].lower() in ["linear"]:
self.build_linear(
aa_embedding_dim=self.model_hyperparam["aa_embedding_dim"],
optimizer=self.model_hyperparam["optimizer"],
lr=self.model_hyperparam["lr"],
loss=self.model_hyperparam["loss"],
label_smoothing=self.model_hyperparam["label_smoothing"],
dtype=self.model_hyperparam["dtype"]
)
elif self.model_hyperparam["model"].lower() in ["noseq"]:
self.build_noseq(
optimizer=self.model_hyperparam["optimizer"],
lr=self.model_hyperparam["lr"],
loss=self.model_hyperparam["loss"],
label_smoothing=self.model_hyperparam["label_smoothing"],
dtype=self.model_hyperparam["dtype"]
)
else:
assert False
def save_weights_tonumpy(
self,
fn
):
""" Save model weights to pickled list of numpy arrays.
:param fn: Path and file name prefix to write to.
:return:
"""
weights = self.model.training_model.get_weights()
with open(fn + "_weights.pkl", 'wb') as f:
pickle.dump(weights, f)
def load_weights_asnumpy(
self,
fn
):
""" Load model weights.
:param fn: Path and file name prefix to write to.
:return: List of model weights as numpy arrays.
"""
with open(fn + "_weights.pkl", 'rb') as f:
weights = pickle.load(f)
return weights
def save_data(
self,
fn,
train: bool,
test: bool,
reduce_size: bool = False
):
""" Save train and test data.
:param fn: Path and file name prefix to write all fitting relevant data objects to.
:param reduce_size: Whether to save storage efficient, ie only elements that are absolutely necessary.
:return:
"""
if train:
if not reduce_size:
scipy.sparse.save_npz(
matrix=scipy.sparse.csr_matrix(np.reshape(self.x_train, [self.x_train.shape[0], -1])),
file=fn + "_x_train.npz"
)
np.save(arr=self.x_train.shape, file=fn + "_x_train_shape.npy")
if not reduce_size and self.covariates_train.shape[1] > 0:
if not isinstance(self.covariates_train, scipy.sparse.csr_matrix):
covariates_train = scipy.sparse.csr_matrix(np.reshape(
self.covariates_train,
[self.covariates_train.shape[0], -1]
))
else:
covariates_train = self.covariates_train
scipy.sparse.save_npz(matrix=covariates_train, file=fn + "_covariates_train.npz")
np.save(arr=self.covariates_train.shape, file=fn + "_covariates_train_shape.npy")
if not reduce_size:
if not isinstance(self.y_train, scipy.sparse.csr_matrix):
y_train = scipy.sparse.csr_matrix(self.y_train)
else:
y_train = self.y_train
scipy.sparse.save_npz(matrix=y_train, file=fn + "_y_train.npz")
np.save(arr=self.y_train.shape, file=fn + "_y_train_shape.npy")
if not reduce_size and self.nc_train is not None and self.nc_train.shape[1] > 0:
if not isinstance(self.nc_train, scipy.sparse.csr_matrix):
nc_train = scipy.sparse.csr_matrix(self.nc_train)
else:
nc_train = self.nc_train
scipy.sparse.save_npz(matrix=nc_train, file=fn + "_nc_train.npz")
if self.nc_train is not None:
np.save(arr=self.nc_train.shape, file=fn + "_nc_train_shape.npy")
else:
np.save(arr=np.array([None]), file=fn + "_nc_train_shape.npy")
np.save(arr=self.clone_train, file=fn + "_clone_train.npy")
if self.peptide_seqs_train is not None:
| pd.DataFrame({"antigen": self.peptide_seqs_train}) | pandas.DataFrame |
"""Module to run demo on streamlit"""
import cv2
import time
import beepy
import threading
import numpy as np
import pandas as pd
import streamlit as st
from datetime import date
import face_recognition as fr
class Camera:
'''
Camera object to get video from remote source
use read() method to read frames from video
'''
def __init__(self) -> None:
self.cam = cv2.VideoCapture(0)
def read(self):
_, frame = self.cam.read()
return cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
class LCMS:
'''
class to represent Live Class Monitoring System
'''
# constructor to initialize class
def __init__(self) -> None:
self.processed = None # processed images to output
self.image = None # raw frame from webcam
self.counter = 0 # counter from frame processing
self.student_name = ""
self.tolerance = 0.6 # threshold for face recognition
self.ear_threshold = 0.26 # threshold for drowsiness detection
self.time_delta = 2 # time for eyes closed to detecting drowsiness
self.enc_ref = None # computed encoding for reference image
self.attendance_register = None # register to keep track of attendance
# initialize states
self.is_drowsy = False
self.is_present = False
self.eye_closed = None # time when eye closed
self.time = 0 # total session time
self.time_stamp = None
# method to setup class demo
def start(self):
# write header
st.header('LIVE CLASS MONITORING SYSTEM')
# create a text field to input student name
self.student_name = st.sidebar.text_input('Enter full name of student')
# Add a slider for tolerance
self.tolerance = st.sidebar.slider('Select tolerance value', 0.0, 1.0, 0.6)
# Add a slider for ear_threshold
self.ear_threshold = st.sidebar.slider('Select eye aspect ratio threshold value', 0.0, 1.0, 0.26)
# Add a slider for drowsiness detection time
self.time_delta = st.sidebar.slider('Select drowsiness detection time value', 0, 10, 2)
# first ask for student name
# if student name is provided then as for student image for reference
if len(self.student_name) > 0:
upload = st.sidebar.file_uploader('Choose an image...', type='jpg')
# once a image is uploaded start the video for face recognition
if upload != None:
ref_image = fr.load_image_file(upload)
# create dataframe to keep track of attendace
self.attendance_register = create_register(self.student_name)
# create a list of face encoding from student image
# save encoding to avoid repeating computation
self.enc_ref = fr.face_encodings(ref_image)
# run live monitoring sysetem
self.run_live_monitoring()
# show attendance register at end
st.dataframe(self.attendance_register)
# method to process input video and produce resulting video
def run_live_monitoring(self):
'''
Runs facial recognition and drowsinees detection model on live video feed
Arguments:
image: input image from camera
Output:
processed video on app with drawsiness alert
'''
# use thread for plying sound in background while main thread can execute program
thread_sound = threading.Thread(target=beepy.beep, args=(6,), daemon=True) # play alarm sound when running
# capture frames from webcam
camera = Camera()
image_holder = st.empty()
# video is generated frame by frame
# each frame will be processed individualy
# loop to run model till video is availabe
while True:
try:
# read next frame from camera
self.image = camera.read()
# process current image
self.process()
# annote image
self.annote()
# play alarm to wake-up drowsy student
if self.is_drowsy:
if not thread_sound.is_alive():
thread_sound = threading.Thread(target=beepy.beep, args=(6,), daemon=True)
thread_sound.start()
# output image
image_holder.image(self.processed)
except:
break
# at end of class add session time to the attendance register
self.mark_attendance(0, self.time)
# method to run all calculations in background
def process(self):
# process every 2'nd frame for speedup
self.counter -= 1
if self.counter > 0:
return
# reset counter
self.counter = 2
self.face_recognition()
# update session time
if self.is_present:
if self.time_stamp != None:
self.time += (time.time() - self.time_stamp)
self.time_stamp = time.time()
# check for drowsiness
self.drowsiness_detection()
# if student is not present in frame we can assume student not attending class
else:
# stop current session time for student
self.time_stamp = None # reset time stamp
def face_recognition(self):
'''
Given an image performs face encoding and compare it with given list of encodings.
If distance between images is less than tolerance then the student with given encoding is marked
as present
'''
self.is_present = False
try:
# reduce image size to speed up processing
image = cv2.resize(self.image, (0,0), None, 0.25, 0.25)
# find face locations
face_locations = fr.face_locations(image)
# encode the test image
enc_test = fr.face_encodings(image, face_locations)[0] # extract first encoding from the list
# compare a list of face encodings against a test encoding to see if they match
# euclidean distance for each face encoding is calculated and compared with tolerance value
# tolerance is the distance between faces to consider it a match
result = fr.face_distance(self.enc_ref,enc_test)
# get the index of minimum distance
min_dist_index = np.argmin(result)
# compare with tolerance value
if result[min_dist_index] <= self.tolerance:
self.is_present = True
except:
# face encoding failed, there is no face present in image or can not match face encoding within tolerance limit
pass
def mark_attendance(self, index, session_time):
# add session time
prev_session_time_str = self.attendance_register.iloc[index][2]
# convert previous session time to int(in seconds) from string(h:mm:ss)
h, m, s = prev_session_time_str.split(':')
prev_time = int(h)*3600 + int(m)*60 + int(s)
# calculate new session time
new_time = prev_time + session_time
# convert new session time to string(h:mm:ss)
time_str = time.strftime('%H:%M:%S', time.gmtime(new_time))
self.attendance_register.iloc[[index],[2]] = time_str
def ratio(self, points):
# from list of tuples calculate aspect ratio
# initialize default values for extreme points
left = 1000000
right = 0
up = 1000000
down = 0
# iterate over all points to find extreme points
for p in points:
if p[0] < left:
left = p[0]
if p[0] > right:
right = p[0]
if p[1] < up:
up = p[1]
if p[1] > down:
down = p[1]
# calculate aspect ratio
ratio = (down - up) / (right - left)
return ratio
def drowsiness_detection(self):
'''
From given image, detect facial features and extracts eyes.
If eye feature is extracted calculate eye aspect ratio and return the average of ratio from both eyes.
With eye aspect ratio and threshold values for EAR and Time, detects the drowsiness
'''
ear = 0.5 # default start ratio
try:
# reduce image size to speed up processing
image = cv2.resize(self.image, (0,0), None, 0.25, 0.25)
# find face locations
face_locations = fr.face_locations(image)
# get facial landmarks as dictionary
landmarks = fr.face_landmarks(image, face_locations)
# extract left and right eye points from landmarks
left_eye_points = landmarks[0]['left_eye']
right_eye_points = landmarks[0]['right_eye']
ear_left = self.ratio(left_eye_points)
ear_right = self.ratio(right_eye_points)
ear = (ear_left + ear_right)/2
except:
# unable to load facial features
return
if ear < self.ear_threshold:
# if eyes are closed there are 2 posibilities
# 1. it's blink
# 2. drowsiness
# first check for blink
if self.eye_closed == None:
# start timer for closed eye
self.eye_closed = time.time()
else:
# if eyes already closed, check for duration
# when duration is more than time_delta we consider it as drowsiness
if (time.time() - self.eye_closed) > self.time_delta:
# put drowsiness notification
self.is_drowsy = True
# reset timer
self.eye_closed = None
else:
self.is_drowsy = False
self.eye_closed = None
def drowsiness_alert(self):
'''Adds text in image for drowsiness alert'''
return cv2.putText(self.image,text='Drowsiness Alert!',org=(10,30),fontFace=cv2.FONT_HERSHEY_SIMPLEX,
fontScale=1,color=(255,0,0),thickness=2)
def show_session_time(self):
'''Adds session time in image to indicate attendance is marked'''
time = "session time (in seconds): " + str(int(self.time))
return cv2.putText(self.image,text=time,org=(10,20),fontFace=cv2.FONT_HERSHEY_SIMPLEX,
fontScale=0.5,color=(0,0,0),thickness=1)
# method to annote image after processing it
def annote(self):
# check states and annote image
if self.is_drowsy:
self.processed = self.drowsiness_alert()
elif self.is_present:
self.processed = self.show_session_time()
else:
self.processed = self.image
# function to create register
@st.cache(allow_output_mutation=True)
def create_register(name):
register = | pd.DataFrame() | pandas.DataFrame |
import os
import pandas as pd
import numpy as np
import nips15
folds_dir = 'models/jmlr/folds'
demographic = ['female', 'afram']
molecular = ['aca', 'scl']
pfvc_spec = {'t':'years_seen_full', 'y':'pfvc', 'x1':demographic, 'x2':demographic + molecular}
pfvc = | pd.read_csv('data/benchmark_pfvc.csv') | pandas.read_csv |
#
# Copyright 2018 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
import datetime
from datetime import timedelta
from functools import partial
from textwrap import dedent
from copy import deepcopy
import logbook
import toolz
from logbook import TestHandler, WARNING
from parameterized import parameterized
from six import iteritems, itervalues, string_types
from six.moves import range
from testfixtures import TempDirectory
import numpy as np
import pandas as pd
import pytz
from pandas.errors import PerformanceWarning
from trading_calendars import get_calendar, register_calendar
import zipline.api
from zipline.api import FixedSlippage
from zipline.assets import Equity, Future, Asset
from zipline.assets.continuous_futures import ContinuousFuture
from zipline.assets.synthetic import (
make_jagged_equity_info,
make_simple_equity_info,
)
from zipline.errors import (
AccountControlViolation,
CannotOrderDelistedAsset,
IncompatibleSlippageModel,
RegisterTradingControlPostInit,
ScheduleFunctionInvalidCalendar,
SetCancelPolicyPostInit,
SymbolNotFound,
TradingControlViolation,
UnsupportedCancelPolicy,
UnsupportedDatetimeFormat,
ZeroCapitalError
)
from zipline.finance.commission import PerShare, PerTrade
from zipline.finance.execution import LimitOrder
from zipline.finance.order import ORDER_STATUS
from zipline.finance.trading import SimulationParameters
from zipline.finance.asset_restrictions import (
Restriction,
HistoricalRestrictions,
StaticRestrictions,
RESTRICTION_STATES,
)
from zipline.finance.controls import AssetDateBounds
from zipline.testing import (
FakeDataPortal,
create_daily_df_for_asset,
create_data_portal_from_trade_history,
create_minute_df_for_asset,
make_test_handler,
make_trade_data_for_asset_info,
parameter_space,
str_to_seconds,
to_utc,
)
from zipline.testing import RecordBatchBlotter
import zipline.testing.fixtures as zf
from zipline.test_algorithms import (
access_account_in_init,
access_portfolio_in_init,
api_algo,
api_get_environment_algo,
api_symbol_algo,
handle_data_api,
handle_data_noop,
initialize_api,
initialize_noop,
noop_algo,
record_float_magic,
record_variables,
call_with_kwargs,
call_without_kwargs,
call_with_bad_kwargs_current,
call_with_bad_kwargs_history,
bad_type_history_assets,
bad_type_history_fields,
bad_type_history_bar_count,
bad_type_history_frequency,
bad_type_history_assets_kwarg_list,
bad_type_current_assets,
bad_type_current_fields,
bad_type_can_trade_assets,
bad_type_is_stale_assets,
bad_type_history_assets_kwarg,
bad_type_history_fields_kwarg,
bad_type_history_bar_count_kwarg,
bad_type_history_frequency_kwarg,
bad_type_current_assets_kwarg,
bad_type_current_fields_kwarg,
call_with_bad_kwargs_get_open_orders,
call_with_good_kwargs_get_open_orders,
call_with_no_kwargs_get_open_orders,
empty_positions,
no_handle_data,
)
from zipline.testing.predicates import assert_equal
from zipline.utils.api_support import ZiplineAPI
from zipline.utils.context_tricks import CallbackManager, nop_context
from zipline.utils.events import (
date_rules,
time_rules,
Always,
ComposedRule,
Never,
OncePerDay,
)
import zipline.utils.factory as factory
# Because test cases appear to reuse some resources.
_multiprocess_can_split_ = False
class TestRecord(zf.WithMakeAlgo, zf.ZiplineTestCase):
ASSET_FINDER_EQUITY_SIDS = (133,)
SIM_PARAMS_DATA_FREQUENCY = 'daily'
DATA_PORTAL_USE_MINUTE_DATA = False
def test_record_incr(self):
def initialize(self):
self.incr = 0
def handle_data(self, data):
self.incr += 1
self.record(incr=self.incr)
name = 'name'
self.record(name, self.incr)
zipline.api.record(name, self.incr, 'name2', 2, name3=self.incr)
output = self.run_algorithm(
initialize=initialize,
handle_data=handle_data,
)
np.testing.assert_array_equal(output['incr'].values,
range(1, len(output) + 1))
np.testing.assert_array_equal(output['name'].values,
range(1, len(output) + 1))
np.testing.assert_array_equal(output['name2'].values,
[2] * len(output))
np.testing.assert_array_equal(output['name3'].values,
range(1, len(output) + 1))
class TestMiscellaneousAPI(zf.WithMakeAlgo, zf.ZiplineTestCase):
START_DATE = pd.Timestamp('2006-01-04', tz='UTC')
END_DATE = pd.Timestamp('2006-01-05', tz='UTC')
SIM_PARAMS_DATA_FREQUENCY = 'minute'
sids = 1, 2
# FIXME: Pass a benchmark source instead of this.
BENCHMARK_SID = None
@classmethod
def make_equity_info(cls):
return pd.concat((
make_simple_equity_info(cls.sids, '2002-02-1', '2007-01-01'),
pd.DataFrame.from_dict(
{3: {'symbol': 'PLAY',
'start_date': '2002-01-01',
'end_date': '2004-01-01',
'exchange': 'TEST'},
4: {'symbol': 'PLAY',
'start_date': '2005-01-01',
'end_date': '2006-01-01',
'exchange': 'TEST'}},
orient='index',
),
))
@classmethod
def make_futures_info(cls):
return pd.DataFrame.from_dict(
{
5: {
'symbol': 'CLG06',
'root_symbol': 'CL',
'start_date': pd.Timestamp('2005-12-01', tz='UTC'),
'notice_date': pd.Timestamp('2005-12-20', tz='UTC'),
'expiration_date': pd.Timestamp('2006-01-20', tz='UTC'),
'exchange': 'TEST'
},
6: {
'root_symbol': 'CL',
'symbol': 'CLK06',
'start_date': pd.Timestamp('2005-12-01', tz='UTC'),
'notice_date': pd.Timestamp('2006-03-20', tz='UTC'),
'expiration_date': pd.Timestamp('2006-04-20', tz='UTC'),
'exchange': 'TEST',
},
7: {
'symbol': 'CLQ06',
'root_symbol': 'CL',
'start_date': pd.Timestamp('2005-12-01', tz='UTC'),
'notice_date': pd.Timestamp('2006-06-20', tz='UTC'),
'expiration_date': pd.Timestamp('2006-07-20', tz='UTC'),
'exchange': 'TEST',
},
8: {
'symbol': 'CLX06',
'root_symbol': 'CL',
'start_date': pd.Timestamp('2006-02-01', tz='UTC'),
'notice_date': pd.Timestamp('2006-09-20', tz='UTC'),
'expiration_date': pd.Timestamp('2006-10-20', tz='UTC'),
'exchange': 'TEST',
}
},
orient='index',
)
def test_cancel_policy_outside_init(self):
code = """
from zipline.api import cancel_policy, set_cancel_policy
def initialize(algo):
pass
def handle_data(algo, data):
set_cancel_policy(cancel_policy.NeverCancel())
"""
algo = self.make_algo(script=code)
with self.assertRaises(SetCancelPolicyPostInit):
algo.run()
def test_cancel_policy_invalid_param(self):
code = """
from zipline.api import set_cancel_policy
def initialize(algo):
set_cancel_policy("foo")
def handle_data(algo, data):
pass
"""
algo = self.make_algo(script=code)
with self.assertRaises(UnsupportedCancelPolicy):
algo.run()
def test_zipline_api_resolves_dynamically(self):
# Make a dummy algo.
algo = self.make_algo(
initialize=lambda context: None,
handle_data=lambda context, data: None,
)
# Verify that api methods get resolved dynamically by patching them out
# and then calling them
for method in algo.all_api_methods():
name = method.__name__
sentinel = object()
def fake_method(*args, **kwargs):
return sentinel
setattr(algo, name, fake_method)
with ZiplineAPI(algo):
self.assertIs(sentinel, getattr(zipline.api, name)())
def test_sid_datetime(self):
algo_text = """
from zipline.api import sid, get_datetime
def initialize(context):
pass
def handle_data(context, data):
aapl_dt = data.current(sid(1), "last_traded")
assert_equal(aapl_dt, get_datetime())
"""
self.run_algorithm(
script=algo_text,
namespace={'assert_equal': self.assertEqual},
)
def test_datetime_bad_params(self):
algo_text = """
from zipline.api import get_datetime
from pytz import timezone
def initialize(context):
pass
def handle_data(context, data):
get_datetime(timezone)
"""
algo = self.make_algo(script=algo_text)
with self.assertRaises(TypeError):
algo.run()
@parameterized.expand([
(-1000, 'invalid_base'),
(0, 'invalid_base'),
])
def test_invalid_capital_base(self, cap_base, name):
"""
Test that the appropriate error is being raised and orders aren't
filled for algos with capital base <= 0
"""
algo_text = """
def initialize(context):
pass
def handle_data(context, data):
order(sid(24), 1000)
"""
sim_params = SimulationParameters(
start_session=pd.Timestamp("2006-01-04", tz='UTC'),
end_session=pd.Timestamp("2006-01-06", tz='UTC'),
capital_base=cap_base,
data_frequency="minute",
trading_calendar=self.trading_calendar
)
with self.assertRaises(ZeroCapitalError) as exc:
# make_algo will trace to TradingAlgorithm,
# where the exception will be raised
self.make_algo(script=algo_text, sim_params=sim_params)
# Make sure the correct error was raised
error = exc.exception
self.assertEqual(str(error),
'initial capital base must be greater than zero')
def test_get_environment(self):
expected_env = {
'arena': 'backtest',
'data_frequency': 'minute',
'start': pd.Timestamp('2006-01-04 14:31:00+0000', tz='utc'),
'end': pd.Timestamp('2006-01-05 21:00:00+0000', tz='utc'),
'capital_base': 100000.0,
'platform': 'zipline'
}
def initialize(algo):
self.assertEqual('zipline', algo.get_environment())
self.assertEqual(expected_env, algo.get_environment('*'))
def handle_data(algo, data):
pass
self.run_algorithm(initialize=initialize, handle_data=handle_data)
def test_get_open_orders(self):
def initialize(algo):
algo.minute = 0
def handle_data(algo, data):
if algo.minute == 0:
# Should be filled by the next minute
algo.order(algo.sid(1), 1)
# Won't be filled because the price is too low.
algo.order(
algo.sid(2), 1, style=LimitOrder(0.01, asset=algo.sid(2))
)
algo.order(
algo.sid(2), 1, style=LimitOrder(0.01, asset=algo.sid(2))
)
algo.order(
algo.sid(2), 1, style=LimitOrder(0.01, asset=algo.sid(2))
)
all_orders = algo.get_open_orders()
self.assertEqual(list(all_orders.keys()), [1, 2])
self.assertEqual(all_orders[1], algo.get_open_orders(1))
self.assertEqual(len(all_orders[1]), 1)
self.assertEqual(all_orders[2], algo.get_open_orders(2))
self.assertEqual(len(all_orders[2]), 3)
if algo.minute == 1:
# First order should have filled.
# Second order should still be open.
all_orders = algo.get_open_orders()
self.assertEqual(list(all_orders.keys()), [2])
self.assertEqual([], algo.get_open_orders(1))
orders_2 = algo.get_open_orders(2)
self.assertEqual(all_orders[2], orders_2)
self.assertEqual(len(all_orders[2]), 3)
for order_ in orders_2:
algo.cancel_order(order_)
all_orders = algo.get_open_orders()
self.assertEqual(all_orders, {})
algo.minute += 1
self.run_algorithm(initialize=initialize, handle_data=handle_data)
def test_schedule_function_custom_cal(self):
# run a simulation on the CMES cal, and schedule a function
# using the NYSE cal
algotext = """
from zipline.api import (
schedule_function, get_datetime, time_rules, date_rules, calendars,
)
def initialize(context):
schedule_function(
func=log_nyse_open,
date_rule=date_rules.every_day(),
time_rule=time_rules.market_open(),
calendar=calendars.CN_EQUITIES,
)
schedule_function(
func=log_nyse_close,
date_rule=date_rules.every_day(),
time_rule=time_rules.market_close(),
calendar=calendars.CN_EQUITIES,
)
context.nyse_opens = []
context.nyse_closes = []
def log_nyse_open(context, data):
context.nyse_opens.append(get_datetime())
def log_nyse_close(context, data):
context.nyse_closes.append(get_datetime())
"""
algo = self.make_algo(
script=algotext,
sim_params=self.make_simparams(
trading_calendar=get_calendar("XSHG"),
)
)
algo.run()
nyse = get_calendar("XSHG")
for minute in algo.nyse_opens:
# each minute should be a nyse session open
session_label = nyse.minute_to_session_label(minute)
session_open = nyse.session_open(session_label)
self.assertEqual(session_open, minute)
for minute in algo.nyse_closes:
# each minute should be a minute before a nyse session close
session_label = nyse.minute_to_session_label(minute)
session_close = nyse.session_close(session_label)
self.assertEqual(session_close - timedelta(minutes=1), minute)
# Test that passing an invalid calendar parameter raises an error.
erroring_algotext = dedent(
"""
from zipline.api import schedule_function
from trading_calendars import get_calendar
def initialize(context):
schedule_function(func=my_func, calendar=get_calendar('XNYS'))
def my_func(context, data):
pass
"""
)
algo = self.make_algo(
script=erroring_algotext,
sim_params=self.make_simparams(
trading_calendar=get_calendar("CMES"),
),
)
with self.assertRaises(ScheduleFunctionInvalidCalendar):
algo.run()
def test_schedule_function(self):
us_eastern = pytz.timezone('US/Eastern')
def incrementer(algo, data):
algo.func_called += 1
curdt = algo.get_datetime().tz_convert(pytz.utc)
self.assertEqual(
curdt,
us_eastern.localize(
datetime.datetime.combine(
curdt.date(),
datetime.time(9, 31)
),
),
)
def initialize(algo):
algo.func_called = 0
algo.days = 1
algo.date = None
algo.schedule_function(
func=incrementer,
date_rule=date_rules.every_day(),
time_rule=time_rules.market_open(),
)
def handle_data(algo, data):
if not algo.date:
algo.date = algo.get_datetime().date()
if algo.date < algo.get_datetime().date():
algo.days += 1
algo.date = algo.get_datetime().date()
algo = self.make_algo(
initialize=initialize,
handle_data=handle_data,
)
algo.run()
self.assertEqual(algo.func_called, algo.days)
def test_event_context(self):
expected_data = []
collected_data_pre = []
collected_data_post = []
function_stack = []
def pre(data):
function_stack.append(pre)
collected_data_pre.append(data)
def post(data):
function_stack.append(post)
collected_data_post.append(data)
def initialize(context):
context.add_event(Always(), f)
context.add_event(Always(), g)
def handle_data(context, data):
function_stack.append(handle_data)
expected_data.append(data)
def f(context, data):
function_stack.append(f)
def g(context, data):
function_stack.append(g)
algo = self.make_algo(
initialize=initialize,
handle_data=handle_data,
create_event_context=CallbackManager(pre, post),
)
algo.run()
self.assertEqual(len(expected_data), 480)
self.assertEqual(collected_data_pre, expected_data)
self.assertEqual(collected_data_post, expected_data)
self.assertEqual(
len(function_stack),
2400,
'Incorrect number of functions called: %s != 2400' %
len(function_stack),
)
expected_functions = [pre, handle_data, f, g, post] * 60030
for n, (f, g) in enumerate(zip(function_stack, expected_functions)):
self.assertEqual(
f,
g,
'function at position %d was incorrect, expected %s but got %s'
% (n, g.__name__, f.__name__),
)
@parameterized.expand([
('daily',),
('minute'),
])
def test_schedule_function_rule_creation(self, mode):
def nop(*args, **kwargs):
return None
self.sim_params.data_frequency = mode
algo = self.make_algo(
initialize=nop,
handle_data=nop,
sim_params=self.sim_params,
)
# Schedule something for NOT Always.
# Compose two rules to ensure calendar is set properly.
algo.schedule_function(nop, time_rule=Never() & Always())
event_rule = algo.event_manager._events[1].rule
self.assertIsInstance(event_rule, OncePerDay)
self.assertEqual(event_rule.cal, algo.trading_calendar)
inner_rule = event_rule.rule
self.assertIsInstance(inner_rule, ComposedRule)
self.assertEqual(inner_rule.cal, algo.trading_calendar)
first = inner_rule.first
second = inner_rule.second
composer = inner_rule.composer
self.assertIsInstance(first, Always)
self.assertEqual(first.cal, algo.trading_calendar)
self.assertEqual(second.cal, algo.trading_calendar)
if mode == 'daily':
self.assertIsInstance(second, Always)
else:
self.assertIsInstance(second, ComposedRule)
self.assertIsInstance(second.first, Never)
self.assertEqual(second.first.cal, algo.trading_calendar)
self.assertIsInstance(second.second, Always)
self.assertEqual(second.second.cal, algo.trading_calendar)
self.assertIs(composer, ComposedRule.lazy_and)
def test_asset_lookup(self):
algo = self.make_algo()
# this date doesn't matter
start_session = pd.Timestamp("2000-01-01", tz="UTC")
# Test before either PLAY existed
algo.sim_params = algo.sim_params.create_new(
start_session,
pd.Timestamp('2001-12-01', tz='UTC')
)
with self.assertRaises(SymbolNotFound):
algo.symbol('PLAY')
with self.assertRaises(SymbolNotFound):
algo.symbols('PLAY')
# Test when first PLAY exists
algo.sim_params = algo.sim_params.create_new(
start_session,
pd.Timestamp('2002-12-01', tz='UTC')
)
list_result = algo.symbols('PLAY')
self.assertEqual(3, list_result[0])
# Test after first PLAY ends
algo.sim_params = algo.sim_params.create_new(
start_session,
pd.Timestamp('2004-12-01', tz='UTC')
)
self.assertEqual(3, algo.symbol('PLAY'))
# Test after second PLAY begins
algo.sim_params = algo.sim_params.create_new(
start_session,
pd.Timestamp('2005-12-01', tz='UTC')
)
self.assertEqual(4, algo.symbol('PLAY'))
# Test after second PLAY ends
algo.sim_params = algo.sim_params.create_new(
start_session,
pd.Timestamp('2006-12-01', tz='UTC')
)
self.assertEqual(4, algo.symbol('PLAY'))
list_result = algo.symbols('PLAY')
self.assertEqual(4, list_result[0])
# Test lookup SID
self.assertIsInstance(algo.sid(3), Equity)
self.assertIsInstance(algo.sid(4), Equity)
# Supplying a non-string argument to symbol()
# should result in a TypeError.
with self.assertRaises(TypeError):
algo.symbol(1)
with self.assertRaises(TypeError):
algo.symbol((1,))
with self.assertRaises(TypeError):
algo.symbol({1})
with self.assertRaises(TypeError):
algo.symbol([1])
with self.assertRaises(TypeError):
algo.symbol({'foo': 'bar'})
def test_future_symbol(self):
""" Tests the future_symbol API function.
"""
algo = self.make_algo()
algo.datetime = pd.Timestamp('2006-12-01', tz='UTC')
# Check that we get the correct fields for the CLG06 symbol
cl = algo.future_symbol('CLG06')
self.assertEqual(cl.sid, 5)
self.assertEqual(cl.symbol, 'CLG06')
self.assertEqual(cl.root_symbol, 'CL')
self.assertEqual(cl.start_date, pd.Timestamp('2005-12-01', tz='UTC'))
self.assertEqual(cl.notice_date, pd.Timestamp('2005-12-20', tz='UTC'))
self.assertEqual(cl.expiration_date,
pd.Timestamp('2006-01-20', tz='UTC'))
with self.assertRaises(SymbolNotFound):
algo.future_symbol('')
with self.assertRaises(SymbolNotFound):
algo.future_symbol('PLAY')
with self.assertRaises(SymbolNotFound):
algo.future_symbol('FOOBAR')
# Supplying a non-string argument to future_symbol()
# should result in a TypeError.
with self.assertRaises(TypeError):
algo.future_symbol(1)
with self.assertRaises(TypeError):
algo.future_symbol((1,))
with self.assertRaises(TypeError):
algo.future_symbol({1})
with self.assertRaises(TypeError):
algo.future_symbol([1])
with self.assertRaises(TypeError):
algo.future_symbol({'foo': 'bar'})
class TestSetSymbolLookupDate(zf.WithMakeAlgo, zf.ZiplineTestCase):
# January 2006
# Su Mo Tu We Th Fr Sa
# 1 2 3 4 5 6 7
# 8 9 10 11 12 13 14
# 15 16 17 18 19 20 21
# 22 23 24 25 26 27 28
# 29 30 31
START_DATE = | pd.Timestamp('2006-01-04', tz='UTC') | pandas.Timestamp |
import pandas as pd
class CryptoDataDownload:
def __init__(self):
self.url = "https://www.cryptodatadownload.com/cdd/"
def fetch_default(self, exchange_name, base_symbol, quote_symbol, timeframe, include_all_volumes=False):
filename = "{}_{}{}_{}.csv".format(exchange_name, quote_symbol, base_symbol, timeframe)
base_vc = "Volume {}".format(base_symbol)
new_base_vc = "volume_base"
quote_vc = "Volume {}".format(quote_symbol)
new_quote_vc = "volume_quote"
df = pd.read_csv(self.url + filename, skiprows=1)
df = df[::-1]
df = df.drop(["Symbol"], axis=1)
df = df.rename({base_vc: new_base_vc, quote_vc: new_quote_vc, "Date": "date"}, axis=1)
if "d" in timeframe:
df["date"] = | pd.to_datetime(df["date"]) | pandas.to_datetime |
from typing import Tuple, Optional, List, Union, Dict
from typing import Any # pylint: disable=unused-import
from collections import OrderedDict # pylint: disable=unused-import
from datetime import datetime
import logging
import xmltodict
import pandas as pd
import numpy as np
from toolz import get_in
from .utils import get_equity_price
log = logging.getLogger(__name__)
ALL_MODELS = '__all_models'
def _typify(_: str, key: str, value: str
) -> Tuple[str, Union[str, float]]:
if key.endswith(('time', 'date', 'Time', 'Date', 'conid')):
# Skip converting @tradeDate, @date, @tradeTime and @toDate
return key, value
try:
return key, float(value)
except (ValueError, TypeError):
return key, value
class FlexStatement:
def __init__(self, flex_report_path: str) -> None:
self.flex_report_path = flex_report_path
with open(flex_report_path) as f:
self.report = xmltodict.parse(
f.read(),
postprocessor=_typify)
statements = self.report['FlexQueryResponse']['FlexStatements']
# FlexStatements could be one or multiple in source,
# consolidate it into a list
if int(statements['@count']) == 1:
self.stmts = [statements['FlexStatement'], ]
else:
self.stmts = statements['FlexStatement']
@property
def models(self) -> List[str]:
# FIXME: Not all models are presented under ChangeInNav.
return [e['ChangeInNAV']['@model'] for e in self.stmts]
def stmts_for_model(self,
model: str = ALL_MODELS
) -> Dict[str, 'OrderedDict[str, Any]']:
return {e['ChangeInNAV']['@model']: e
for e in self.stmts
if model in (ALL_MODELS, e['ChangeInNAV']['@model'])}
def nav(self, model: str) -> Tuple[float, float]:
stmts = self.stmts_for_model(model)
starting_nav = sum([model['ChangeInNAV']['@startingValue']
for model in stmts.values()])
ending_nav = sum([model['ChangeInNAV']['@endingValue']
for model in stmts.values()])
return starting_nav, ending_nav
def cash_flow(self, model: str) -> pd.Series:
# FIXME: Internal transfers and external withdrawals/deposits are not
# differentiated by this function.
# FIXME: This function sums transfers within the same day, hence
# not showing those transactions where the incoming and outgoing's
# sum is 0.
# FIXME: Only the active models are represented here. The ones which
# are were not active during the reporting period does not show in flex
# report hence cannot be handled by this function.
stmt_funds = self.stmt_funds(model)
cash_flows = {}
for model_name, funds in stmt_funds.items():
cash_flow = funds[
funds['@activityCode'].isin(('DEP', 'WITH')) &
(funds['@levelOfDetail'] == 'BaseCurrency')]['@amount']
cash_flows[model_name] = cash_flow.groupby(cash_flow.index).sum()
cash_flows_df = pd.concat(cash_flows, axis=1) # type: pd.DataFrame
# Summarize the results so that the end result is a single series
# with deduplicated index without any zero values
cash_flows_series = cash_flows_df.sum(axis=1).sort_index()
cash_flows_series = cash_flows_series[cash_flows_series != 0]
cash_flows_series.name = 'cash_flow'
return cash_flows_series
def returns(self, model: str) -> pd.Series:
# FIXME: this function has problems with reports where multiple
# models are present.
# Problem 1: the first two months are not showing up unless the
# nav is calculated as:
# navs[model_name] = summary[
# (summary['@total'] != 0.0) |
# (summary['@totalLong'] != 0.0) |
# (summary['@totalShort'] != 0.0)]['@total']
# Problem with this nav calculation that it screws the return calc.
# for multi-model periods
# Problem 2: the end returns are not in sync with portfolio analyst
# report.
equity_summary = self.equity_summary(model)
# Filter where cash is zero:
# When the lookback period goes beyond account inception
# or there was a paper trading acccount reset,
# leading zeros are filling equity_summary, which disturbs
# return calculation.
# equity_summary = equity_summary[model]
navs = {}
for model_name, summary in equity_summary.items():
navs[model_name] = summary[
(summary['@cash'] != 0.0) |
(summary['@cashLong'] != 0.0) |
(summary['@cashShort'] != 0.0)]['@total']
nav = \
(pd.DataFrame(navs)
.fillna(0) # Fill NAs caused by joining multiple series
.resample('1D').sum() # Sum per day to avoid daily duplicates
.dropna() # Skip weekends
.sum(axis=1)) # Reduce multiple series into one
df = pd.DataFrame(data={'nav': nav})
cash_flow = self.cash_flow(model)
cash_flow = cash_flow.resample('1D').sum().dropna()
df['cash_flow'] = cash_flow
df['cash_flow'].fillna(0, inplace=True)
df['initial_value'] = df['nav'].shift(1)
df['end_value'] = df['nav'].shift(0)
# Time Weighted Return
df['returns'] = \
(df['end_value'] - df['initial_value'] - df['cash_flow']) \
/ (df['initial_value'])
# Replace initial NaN with 0.0
df['returns'].iloc[0] = 0
# TODO: Add credit interest, fees, dividends
return df['returns']
def flex_dict_to_df(self,
model: str,
keys: List[str],
date_field: Optional[Union[str, Tuple[str, str]]],
local_tz: str = 'US/Eastern'
) -> Dict[str, pd.DataFrame]:
"""Returns a Multi-Index DataFrame with the parsed flex report.
Top level keys are the models, second level keys are the fields from
the flex statement. If model is not set all the models are returned.
"""
# TODO: split date_field into date_field and time_field
stmts = self.stmts_for_model(model)
def to_df(stmt: 'OrderedDict[str, Any]') -> pd.DataFrame:
df = pd.DataFrame(get_in(keys, stmt))
if df.empty:
return df
if isinstance(date_field, tuple):
df.index = \
pd.to_datetime(df[date_field[0]] + ' ' + df[date_field[1]])
df.index = df.index.tz_localize(local_tz).tz_convert('UTC')
elif date_field:
df.index = pd.to_datetime(df[date_field])
df.index = df.index.tz_localize(local_tz).tz_convert('UTC')
else:
pass
df.sort_index(inplace=True)
return df
dict_of_dfs = {model_name: to_df(stmt)
for model_name, stmt in stmts.items()}
return dict_of_dfs
@staticmethod
def dict_of_dfs_to_multiindex_df(dict_of_dfs: Dict[str, pd.DataFrame]
) -> pd.DataFrame:
df = pd.concat(
dict_of_dfs.values(), axis=1, keys=dict_of_dfs.keys()
) # type: pd.DataFrame
return df
def equity_summary(self, model: str) -> Dict[str, pd.DataFrame]:
equity_summary = self.flex_dict_to_df(
model,
['EquitySummaryInBase', 'EquitySummaryByReportDateInBase'],
date_field='@reportDate', local_tz='UTC')
return equity_summary
def trades(self, model: str) -> Dict[str, pd.DataFrame]:
trades = self.flex_dict_to_df(
model,
['Trades', 'Trade'],
date_field=('@tradeDate', '@tradeTime'), local_tz='US/Eastern')
return trades
def prior_period(self, model: str) -> Dict[str, pd.DataFrame]:
return self.flex_dict_to_df(
model,
['PriorPeriodPositions', 'PriorPeriodPosition'],
date_field='@date', local_tz='UTC')
def stmt_funds(self, model: str) -> Dict[str, pd.DataFrame]:
return self.flex_dict_to_df(
model,
['StmtFunds', 'StatementOfFundsLine'],
date_field='@date', local_tz='UTC')
def securities(self, model: str) -> Dict[str, pd.DataFrame]:
return self.flex_dict_to_df(
model,
['SecuritiesInfo', 'SecurityInfo'],
date_field=None)
def open_positions(self, model: str) -> Dict[str, pd.DataFrame]:
return self.flex_dict_to_df(
model,
['OpenPositions', 'OpenPosition'],
date_field='@reportDate')
@staticmethod
def calc_daily_qty(final_qty: float,
trades: pd.Series,
start_date: datetime,
end_date: datetime) -> pd.Series:
"""Calculates the daily position quantities based on the final quantity
and the trades occurred during the period."""
df = pd.concat(
[pd.DataFrame(
data={'position': [np.nan, final_qty]},
index=[start_date, end_date]),
trades.to_frame('trade_qty')]) # type: pd.DataFrame
df.sort_index(inplace=True)
df = df.resample('1D').sum()
df.index.name = 'dt'
df.reset_index(inplace=True)
# Global fillna won't work with pandas 0.18:
# https://github.com/pandas-dev/pandas/issues/7630
df['trade_qty'].fillna(0, inplace=True)
df['position'].fillna(0, inplace=True)
# FIXME: looping is not nice
# https://stackoverflow.com/questions/34855859/
# is-there-a-way-in-pandas-to-use-previous-row-value-
# in-dataframe-apply-when-previ
for i in reversed(range(len(df)-1)):
df.loc[i, 'position'] = \
df.loc[i + 1, 'position'] - df.loc[i + 1, 'trade_qty']
df.index = df['dt']
df.index.name = None
return df['position']
def positions(self, model: str) -> pd.DataFrame:
# FIXME: IEX does not have coverage for non-US or de-listed stocks
# FIXME: this function is pretty slow
all_equity_summary = self.equity_summary(model)
all_trades = self.trades(model)
all_open_positions = self.open_positions(model)
stmts = self.stmts_for_model(model)
positions = {}
for model_name in all_equity_summary.keys():
start_date = \
pd.to_datetime(stmts[model_name]['@fromDate'], utc=True)
end_date = \
pd.to_datetime(stmts[model_name]['@toDate'], utc=True)
equity_summary = all_equity_summary[model_name]
trades = all_trades[model_name]
open_positions = all_open_positions[model_name]
symbols = pd.concat(
[get_in([model_name, '@symbol'], all_open_positions,
default= | pd.DataFrame() | pandas.DataFrame |
# -*- coding: utf-8 -*-
import json
import re
from datetime import datetime, timedelta
import numpy as np
import pandas as pd
from pymongo import ASCENDING, DESCENDING
from src.data import conn
from src.data.setting import TRADE_BEGIN_DATE
from src.data.future.setting import NAME2CODE_MAP, COLUMNS_MAP
from src.data.future.utils import get_download_file_index, move_data_files, get_exist_files, \
split_symbol
from src.data.setting import RAW_HQ_DIR, INSTRUMENT_TYPE
from src.util import get_post_text, get_html_text
from log import LogHandler
# TIME_WAITING = 1
log = LogHandler('data.log')
# ----------------------------------download data from web-----------------
def is_data_empty(data):
"""
判断数据是否存在
:param data: pd.DataFrame or str
:return: True 数据不存在
"""
if isinstance(data, pd.DataFrame):
return data.empty
elif not isinstance(data, str):
return True
elif re.search('doctype', data, re.I):
return True
elif len(data) < 100:
return True
else:
return False
def download_cffex_hq_by_date(date: datetime, category=0):
"""
获取中国金融期货交易所交易所日交易数据 datetime(2010, 4, 30)
http://www.cffex.com.cn/sj/hqsj/rtj/201903/13/20190313_1.csv
没有期权,预留接口
:param date: datetime
:param category: 行情类型, 0期货 或 1期权
:return str
"""
assert date <= datetime.today()
assert category in [0, 1]
url_template = 'http://www.cffex.com.cn/fzjy/mrhq/{}/{}/{}_1.csv'
url = url_template.format(date.strftime('%Y%m'), date.strftime('%d'), date.strftime('%Y%m%d'))
return get_html_text(url)
def download_czce_hq_by_date(date: datetime, category=0):
"""
获取郑州商品交易所日交易数据
http://www.czce.com.cn/cn/DFSStaticFiles/Future/2019/20190314/FutureDataDaily.txt
http://www.czce.com.cn/cn/DFSStaticFiles/Future/2019/20190314/FutureDataDaily.htm
期权 datetime(2017, 4, 19)
http://www.czce.com.cn/cn/DFSStaticFiles/Option/2018/20180816/OptionDataDaily.htm
http://www.czce.com.cn/cn/DFSStaticFiles/Option/2017/20171109/OptionDataDaily.htm
datetime(2015, 10, 8)
http://www.czce.com.cn/cn/exchange/2015/datadaily/20150821.htm
http://www.czce.com.cn/cn/exchange/2015/datadaily/20150930.txt
datetime(2010, 8, 24)
http://www.czce.com.cn/cn/exchange/jyxx/hq/hq20100806.html
datetime(2005, 4, 29)
:param date: datetime
:param category: 行情类型, 0期货 或 1期权
:return pd.DataFrame
"""
assert date <= datetime.today()
assert category in [0, 1]
index = 0
ret = pd.DataFrame()
if date > datetime(2015, 10, 7):
template = ['http://www.czce.com.cn/cn/DFSStaticFiles/Future/{}/{}/FutureDataDaily.htm',
'http://www.czce.com.cn/cn/DFSStaticFiles/Option/{}/{}/OptionDataDaily.htm']
url_template = template[category]
url = url_template.format(date.year, date.strftime('%Y%m%d'))
elif date > datetime(2010, 8, 23):
url_template = 'http://www.czce.com.cn/cn/exchange/{}/datadaily/{}.htm'
url = url_template.format(date.year, date.strftime('%Y%m%d'))
index = 3
elif date > datetime(2005, 4, 28):
url_template = 'http://www.czce.com.cn/cn/exchange/jyxx/hq/hq{}.html'
url = url_template.format(date.strftime('%Y%m%d'))
index = 1
else:
return pd.DataFrame()
text = get_html_text(url)
if is_data_empty(text):
return ret
tables = | pd.read_html(text, header=0) | pandas.read_html |
import json
import logging
import math
import os
import ntpath
import random
import sys
import time
from itertools import product, chain
from collections import defaultdict, Iterable
import glob
import numpy as np
import pandas as pd
import torch
import yaml
import imgaug as ia
from PIL import Image
from attrdict import AttrDict
from pycocotools import mask as cocomask
from pycocotools.coco import COCO
from tqdm import tqdm
from scipy import ndimage as ndi
from .cocoeval import COCOeval
from .steps.base import BaseTransformer
def read_yaml(filepath):
with open(filepath) as f:
config = yaml.load(f)
return AttrDict(config)
def init_logger():
logger = logging.getLogger('mapping-challenge')
logger.setLevel(logging.INFO)
message_format = logging.Formatter(fmt='%(asctime)s %(name)s >>> %(message)s',
datefmt='%Y-%m-%d %H-%M-%S')
# console handler for validation info
ch_va = logging.StreamHandler(sys.stdout)
ch_va.setLevel(logging.INFO)
ch_va.setFormatter(fmt=message_format)
# add the handlers to the logger
logger.addHandler(ch_va)
return logger
def get_logger():
return logging.getLogger('mapping-challenge')
def decompose(labeled):
nr_true = labeled.max()
masks = []
for i in range(1, nr_true + 1):
msk = labeled.copy()
msk[msk != i] = 0.
msk[msk == i] = 255.
masks.append(msk)
if not masks:
return [labeled]
else:
return masks
def create_annotations(meta, predictions, logger, category_ids, category_layers, save=False, experiment_dir='./'):
"""
Args:
meta: pd.DataFrame with metadata
predictions: list of labeled masks or numpy array of size [n_images, im_height, im_width]
logger: logging object
category_ids: list with ids of categories,
e.g. [None, 100] means, that no annotations will be created from category 0 data, and annotations
from category 1 will be created with category_id=100
category_layers:
save: True, if one want to save submission, False if one want to return it
experiment_dir: directory of experiment to save annotations, relevant if save==True
Returns: submission if save==False else True
"""
annotations = []
logger.info('Creating annotations')
category_layers_inds = np.cumsum(category_layers)
for image_id, (prediction, image_scores) in zip(meta["ImageId"].values, predictions):
for category_ind, (category_instances, category_scores) in enumerate(zip(prediction, image_scores)):
category_nr = np.searchsorted(category_layers_inds, category_ind, side='right')
if category_ids[category_nr] != None:
masks = decompose(category_instances)
for mask_nr, (mask, score) in enumerate(zip(masks, category_scores)):
annotation = {}
annotation["image_id"] = int(image_id)
annotation["category_id"] = category_ids[category_nr]
annotation["score"] = score
annotation["segmentation"] = rle_from_binary(mask.astype('uint8'))
annotation['segmentation']['counts'] = annotation['segmentation']['counts'].decode("UTF-8")
annotation["bbox"] = bounding_box_from_rle(rle_from_binary(mask.astype('uint8')))
annotations.append(annotation)
if save:
submission_filepath = os.path.join(experiment_dir, 'submission.json')
with open(submission_filepath, "w") as fp:
fp.write(str(json.dumps(annotations)))
logger.info("Submission saved to {}".format(submission_filepath))
logger.info('submission head \n\n{}'.format(annotations[0]))
return True
else:
return annotations
def rle_from_binary(prediction):
prediction = np.asfortranarray(prediction)
return cocomask.encode(prediction)
def bounding_box_from_rle(rle):
return list(cocomask.toBbox(rle))
def read_params(ctx, fallback_file):
if ctx.params.__class__.__name__ == 'OfflineContextParams':
neptune_config = read_yaml(fallback_file)
params = neptune_config.parameters
else:
params = ctx.params
return params
def generate_metadata(data_dir,
meta_dir,
masks_overlayed_prefix,
process_train_data=True,
process_validation_data=True,
process_test_data=True,
public_paths=False,
competition_stage=1,
):
if competition_stage != 1:
raise NotImplementedError('only stage_1 is supported for now')
def _generate_metadata(dataset):
assert dataset in ["train", "test", "val"], "Unknown dataset!"
if dataset == "test":
dataset = "test_images"
images_path = os.path.join(data_dir, dataset)
if dataset != "test_images":
images_path = os.path.join(images_path, "images")
if public_paths:
raise NotImplementedError('public neptune paths not implemented')
else:
masks_overlayed_dirs, mask_overlayed_suffix = [], []
for file_path in glob.glob('{}/*'.format(meta_dir)):
if ntpath.basename(file_path).startswith(masks_overlayed_prefix):
masks_overlayed_dirs.append(file_path)
mask_overlayed_suffix.append(ntpath.basename(file_path).replace(masks_overlayed_prefix, ''))
df_dict = defaultdict(lambda: [])
for image_file_path in tqdm(sorted(glob.glob('{}/*'.format(images_path)))):
image_id = ntpath.basename(image_file_path).split('.')[0]
is_train = 0
is_valid = 0
is_test = 0
if dataset == "test_images":
n_buildings = None
is_test = 1
df_dict['ImageId'].append(image_id)
df_dict['file_path_image'].append(image_file_path)
df_dict['is_train'].append(is_train)
df_dict['is_valid'].append(is_valid)
df_dict['is_test'].append(is_test)
df_dict['n_buildings'].append(n_buildings)
for mask_dir_suffix in mask_overlayed_suffix:
df_dict['file_path_mask' + mask_dir_suffix].append(None)
else:
n_buildings = None
if dataset == "val":
is_valid = 1
else:
is_train = 1
df_dict['ImageId'].append(image_id)
df_dict['file_path_image'].append(image_file_path)
df_dict['is_train'].append(is_train)
df_dict['is_valid'].append(is_valid)
df_dict['is_test'].append(is_test)
df_dict['n_buildings'].append(n_buildings)
for mask_dir, mask_dir_suffix in zip(masks_overlayed_dirs, mask_overlayed_suffix):
file_path_mask = os.path.join(mask_dir, dataset, "masks", '{}.png'.format(image_id))
df_dict['file_path_mask' + mask_dir_suffix].append(file_path_mask)
return pd.DataFrame.from_dict(df_dict)
metadata = | pd.DataFrame() | pandas.DataFrame |
#!/usr/bin/env python3
import requests
import json
import pandas as pd
import tweepy
import os
import config as cfg
from datetime import datetime, timedelta
from pytz import timezone
def main():
# get data
nys_data = get_nys_data()
nys = get_nys_appt(nys_data, cfg.config["nys_sites"])
alb = get_nys_appt(nys_data, cfg.config["alb_sites"])
cvs = get_cvs_data()
pc = get_pc_data()
wal = get_walgreens_data()
# book urls
nys_url = 'https://am-i-eligible.covid19vaccine.health.ny.gov/'
cvs_url = 'https://www.cvs.com/immunizations/covid-19-vaccine'
wal_url = 'https://www.walgreens.com/findcare/vaccination/covid-19/location-screening'
pc_url = 'https://www.pricechopper.com/covidvaccine/new-york/'
# img urls
nys_img = '<img alt="" src="https://favicons.githubusercontent.com/am-i-eligible.covid19vaccine.health.ny.gov" height="13">'
cvs_img = '<img alt="" src="https://favicons.githubusercontent.com/www.cvs.com" height="13">'
wal_img = '<img alt="" src="https://favicons.githubusercontent.com/www.walgreens.com" height="13">'
pc_img = '<img alt="" src="https://favicons.githubusercontent.com/www.pricechopper.com" height="13">'
tz = timezone('EST')
date = str(datetime.now(tz).strftime('%Y-%m-%d %H:%M:%S'))
sites = ['SUNY Albany','Albany Armory','Price Chopper','CVS','Walgreens']
appointments = [ nys, alb, pc, cvs, wal ]
df_long = pd.DataFrame({'date': date, 'appointments': appointments, 'sites': sites})
df_long.head()
# wide format
df_wide = df_long.pivot(index = 'date', columns = 'sites', values = 'appointments').reset_index()
df_wide.head()
try:
df_historical = pd.read_csv('data/site-data.csv')
##Pull data from last row of history
last_data = df_historical.iloc[0]
##Maybe tweet new availability
if nys.startswith( 'Available' ) and not last_data['SUNY Albany'].startswith( 'Available' ):
tweet_it('Vaccination appointments are available at SUNY Albany. ' + nys_url)
if cvs.startswith( 'Available' ) and not last_data['CVS'].startswith( 'Available' ):
tweet_it('Vaccination appointments are available at CVS. ' + cvs[9:] + " " + cvs_url)
if wal.startswith( 'Available' ) and not last_data['Walgreens'].startswith( 'Available' ):
tweet_it('Vaccination appointments are available at Walgreens. ' + wal_url)
if pc.startswith( 'Available' ) and not last_data['Price Chopper'].startswith( 'Available' ):
tweet_it('Vaccination appointments are available at Price Chopper. ' + pc[9:] + " " + pc_url)
if alb.startswith( 'Available' ) and not last_data['Albany Armory'].startswith( 'Available' ):
tweet_it('Vaccination appointments are available at Albany Armory (**resident restricted). ' + nys_url)
##Maybe tweet new unavailability
if "Unavailable" == nys and last_data['SUNY Albany'].startswith( 'Available' ):
tweet_it('SUNY Albany vaccination appointments are now closed.')
if "Unavailable" == cvs and last_data['CVS'].startswith( 'Available' ):
tweet_it('CVS vaccination appointments are now closed.')
if "Unavailable" == wal and last_data['Walgreens'].startswith( 'Available' ):
tweet_it('Walgreens vaccination appointments are now closed.')
if "Unavailable" == pc and last_data['Price Chopper'].startswith( 'Available' ):
tweet_it('Price Chopper vaccination appointments are now closed.')
if "Unavailable" == alb and last_data['Albany Armory'].startswith( 'Available' ):
tweet_it('Albany Armory vaccination appointments are now closed.')
except pd.errors.EmptyDataError:
df_historical = | pd.DataFrame() | pandas.DataFrame |
# ~~~~~~~~~~~~ Author: <NAME> ~~~~~~~~~~~~~~~
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import os
class Plot_helper(object):
def __init__(self, MainDir):
""" Function used for initializing the Plot_helper object
Args:
MainDir: Main directory of the DLMP project
"""
self.MainDir = MainDir
self.result_dir = os.path.join(self.MainDir,'Result')
def processing(self, raw, name, toname):
""" Helper function used for rearranging raw data according to phases
Args:
raw: Raw data to be processed, Dataframe
name: column name of the data to be processed, str
toname: new column name header to be stored in the processed dataset, str
Return:
processed: processed data that have been rearranged by phases
"""
# tem = raw.index[0][:-2]
# bus_list = [tem]
# for idx in raw.index:
# if idx[:-2] == tem:
# pass
# else:
# bus_list.append(idx[:-2])
# tem = idx[:-2]
bus_list = set([idx[:-2] for idx in raw.index])
processed = | pd.DataFrame(np.nan, index=bus_list, columns=[toname+'_'+'1', toname+'_'+'2', toname+'_'+'3']) | pandas.DataFrame |
from __future__ import print_function
import os
import datetime
import sys
import pandas as pd
import numpy as np
import requests
import copy
# import pytz
import seaborn as sns
from urllib.parse import quote
import monetio.obs.obs_util as obs_util
"""
NAME: cems_api.py
PGRMMER: <NAME> ORG: ARL
This code written at the NOAA air resources laboratory
Python 3
#################################################################
The key and url for the epa api should be stored in a file called
.epaapirc in the $HOME directory.
The contents should be
key: apikey
url: https://api.epa.gov/FACT/1.0/
TO DO
-----
Date is in local time (not daylight savings)
Need to convert to UTC. This will require an extra package or api.
Classes:
----------
EpaApiObject - Base class
EmissionsCall
FacilitiesData
MonitoringPlan
Emissions
CEMS
Functions:
----------
addquarter
get_datelist
findquarter
sendrequest
getkey
"""
def test_end(endtime, current):
# if endtime None return True
if isinstance(endtime, pd._libs.tslibs.nattype.NaTType):
return True
elif not endtime:
return True
# if endtime greater than current return true
elif endtime >= current:
return True
# if endtime less than current time return true
elif endtime < current:
return False
else:
return True
def get_filename(fname, prompt):
"""
determines if file exists. If prompt is True then will prompt for
new filename if file does not exist.
"""
if fname:
done = False
iii = 0
while not done:
if iii > 2:
done = True
iii += 1
if os.path.isfile(fname):
done = True
elif prompt:
istr = "\n" + fname + " is not a valid name for Facilities Data \n"
istr += "Please enter a new filename \n"
istr += "enter None to load from the api \n"
istr += "enter x to exit program \n"
fname = input(istr)
# print('checking ' + fname)
if fname == "x":
sys.exit()
if fname.lower() == "none":
fname = None
done = True
else:
fname = None
done = True
return fname
# def get_timezone_offset(latitude, longitude):
# """
# uses geonames API
# must store username in the $HOME/.epaapirc file
# geousername: username
# """
# username = getkey()
# print(username)
# username = username["geousername"]
# url = "http://api.geonames.org/timezoneJSON?lat="
# request = url + str(latitude)
# request += "&lng="
# request += str(longitude)
# request += "&username="
# request += username
# try:
# data = requests.get(request)
# except BaseException:
# data = -99
#
# jobject = data.json()
# print(jobject)
# print(data)
# # raw offset should give standard time offset.
# if data == -99:
# return 0
# else:
# offset = jobject["rawOffset"]
# return offset
def getkey():
"""
key and url should be stored in $HOME/.epaapirc
"""
dhash = {}
homedir = os.environ["HOME"]
fname = "/.epaapirc"
if os.path.isfile(homedir + fname):
with open(homedir + fname) as fid:
lines = fid.readlines()
for temp in lines:
temp = temp.split(" ")
dhash[temp[0].strip().replace(":", "")] = temp[1].strip()
else:
dhash["key"] = None
dhash["url"] = None
dhash["geousername"] = None
return dhash
def sendrequest(rqq, key=None, url=None):
"""
Method for sending requests to the EPA API
Inputs :
--------
rqq : string
request string.
Returns:
--------
data : response object
"""
if not key or not url:
keyhash = getkey()
apiurl = keyhash["url"]
key = keyhash["key"]
if key:
# apiurl = "https://api.epa.gov/FACT/1.0/"
rqq = apiurl + rqq + "?api_key=" + key
print("Request: ", rqq)
data = requests.get(rqq)
print("Status Code", data.status_code)
if data.status_code == 429:
print("Too many requests Please Wait before trying again.")
sys.exit()
else:
print("WARNING: your api key for EPA data was not found")
print("Please obtain a key from")
print("https://www.epa.gov/airmarkets/field-audit-checklist_tool-fact-api")
print("The key should be placed in $HOME/.epaapirc")
print("Contents of the file should be as follows")
print("key: apikey")
print("url: https://api.epa.gov/FACT/1.0/")
sys.exit()
return data
def get_lookups():
"""
Request to get lookups - descriptions of various codes.
"""
getstr = "emissions/lookUps"
# rqq = self.apiurl + "emissions/" + getstr
# rqq += "?api_key=" + self.key
data = sendrequest(getstr)
jobject = data.json()
dstr = unpack_response(jobject)
return dstr
# According to lookups MODC values
# 01 primary monitoring system
# 02 backup monitoring system
# 03 alternative monitoring system
# 04 backup monitoring system
# 06 average hour before/hour after
# 07 average hourly
# 21 negative value replaced with 0.
# 08 90th percentile value in Lookback Period
# 09 95th precentile value in Lookback Period
# etc.
# it looks like values between 1-4 ok
# 6-7 probably ok
# higher values should be flagged.
def quarter2date(year, quarter):
if quarter == 1:
dt = datetime.datetime(year, 1, 1)
elif quarter == 2:
dt = datetime.datetime(year, 4, 1)
elif quarter == 3:
dt = datetime.datetime(year, 7, 1)
elif quarter == 4:
dt = datetime.datetime(year, 11, 1)
return dt
def addquarter(rdate):
"""
INPUT
rdate : datetime object
RETURNS
newdate : datetime object
requests for emissions are made per quarter.
Returns first date in the next quarter from the input date.
"""
quarter = findquarter(rdate)
quarter += 1
year = rdate.year
if quarter > 4:
quarter = 1
year += 1
month = 3 * quarter - 2
newdate = datetime.datetime(year, month, 1, 0)
return newdate
def get_datelist_sub(r1, r2):
rlist = []
qt1 = findquarter(r1)
yr1 = r1.year
qt2 = findquarter(r2)
yr2 = r2.year
done = False
iii = 0
while not done:
rlist.append(quarter2date(yr1, qt1))
if yr1 > yr2:
done = True
elif yr1 == yr2 and qt1 == qt2:
done = True
qt1 += 1
if qt1 > 4:
qt1 = 1
yr1 += 1
iii += 0
if iii > 30:
break
return rlist
def get_datelist(rdate):
"""
INPUT
rdate : tuple of datetime objects
(start date, end date)
RETURNS:
rdatelist : list of datetimes covering range specified by rdate by quarter.
Return list of first date in each quarter from
startdate to end date.
"""
if isinstance(rdate, list):
rdatelist = get_datelist_sub(rdate[0], rdate[1])
else:
rdatelist = [rdate]
return rdatelist
def findquarter(idate):
if idate.month <= 3:
qtr = 1
elif idate.month <= 6:
qtr = 2
elif idate.month <= 9:
qtr = 3
elif idate.month <= 12:
qtr = 4
return qtr
def keepcols(df, keeplist):
tcols = df.columns.values
klist = []
for ttt in keeplist:
if ttt in tcols:
# if ttt not in tcols:
# print("NOT IN ", ttt)
# print('Available', tcols)
# else:
klist.append(ttt)
tempdf = df[klist]
return tempdf
def get_so2(df):
"""
drop columns that are not in keep.
"""
keep = [
# "DateHour",
"time local",
# "time",
"OperatingTime",
# "HourLoad",
# "u so2_lbs",
"so2_lbs",
# "AdjustedFlow",
# "UnadjustedFlow",
# "FlowMODC",
"SO2MODC",
"unit",
"stackht",
"oris",
"latitude",
"longitude",
]
df = keepcols(df, keep)
if not df.empty:
df = df[df["oris"] != "None"]
return df
class EpaApiObject:
def __init__(self, fname=None, save=True, prompt=False, fdir=None):
"""
Base class for all classes that send request to EpaApi.
to avoid sending repeat requests to the api, the default option
is to save the data in a file - specified by fname.
fname : str
fdir : str
save : boolean
prompt : boolean
"""
# fname is name of file that data would be saved to.
self.status_code = None
self.df = | pd.DataFrame() | pandas.DataFrame |
import os
os.chdir(os.path.split(os.path.realpath(__file__))[0])
import torch
import pickle
import dgl
import pandas as pd
import numpy as np
from scipy import sparse
import constants
def array_norm(array,clip=100):
data=array
upper=np.percentile(data,clip)
data_clip=np.clip(data,0,upper)
mean=np.mean(data_clip)
std=np.std(data_clip)
data_norm=(data-mean)/std
return data_norm
def sparse_mat_list_norm(sparse_mat_list,clip=100):
data=list()
for i in range(len(sparse_mat_list)):
data.append(sparse_mat_list[i].data)
data=np.concatenate(data)
upper=np.percentile(data,clip)
data=np.clip(data,0,upper)
data_norm_factor=1/np.max(data)
data_norm_list=list()
for i in range(len(sparse_mat_list)):
data_norm_list.append(sparse_mat_list[i]*data_norm_factor)
return data_norm_list
def BuildGraph(poi_cbg):
poi_num,cbg_num=poi_cbg.shape
poi,cbg,weight=sparse.find(poi_cbg)
poi=torch.tensor(poi)
cbg=torch.tensor(cbg)
weight=torch.tensor(weight,dtype=torch.float32)
g=dgl.heterograph({('cbg','cbg_poi','poi'):(cbg,poi),
('poi','poi_cbg','cbg'):(poi,cbg)})
if (torch.max(cbg).item()!=cbg_num-1):
g.add_nodes(cbg_num-1-torch.max(cbg).item(), ntype='cbg')
if (torch.max(poi).item()!=poi_num-1):
g.add_nodes(poi_num-1-torch.max(poi).item(), ntype='poi')
g.edges['cbg_poi'].data['num']=weight
g.edges['poi_cbg'].data['num']=weight
g1=dgl.to_homogeneous(g, edata=['num'])
edge_weight=g1.edata['num']
g1.edata.pop('num')
return g1,edge_weight
def load_data(MSA_name):
MSA_NAME_FULL = constants.MSA_NAME_FULL_DICT[MSA_name]
epic_data_root = '../data'
data=dict()
# Load POI-CBG visiting matrices
f = open(os.path.join(epic_data_root, MSA_name, '%s_2020-03-01_to_2020-05-02_processed.pkl'%MSA_NAME_FULL), 'rb')
poi_cbg_visits_list = pickle.load(f)
f.close()
data['poi_cbg_visits_list']=poi_cbg_visits_list
# Load precomputed parameters to adjust(clip) POI dwell times
d = pd.read_csv(os.path.join(epic_data_root,MSA_name, 'parameters_%s.csv' % MSA_name))
poi_areas = d['feet'].values
poi_dwell_times = d['median'].values
poi_dwell_time_correction_factors = (poi_dwell_times / (poi_dwell_times+60)) ** 2
data['poi_areas']=poi_areas
data['poi_times']=poi_dwell_times
data['poi_dwell_time_correction_factors']=poi_dwell_time_correction_factors
# Load CBG ids for the MSA
cbg_ids_msa = pd.read_csv(os.path.join(epic_data_root,MSA_name,'%s_cbg_ids.csv'%MSA_NAME_FULL))
cbg_ids_msa.rename(columns={"cbg_id":"census_block_group"}, inplace=True)
# Load SafeGraph data to obtain CBG sizes (i.e., populations)
filepath = os.path.join(epic_data_root,"safegraph_open_census_data/data/cbg_b01.csv")
cbg_agesex = | pd.read_csv(filepath) | pandas.read_csv |
import argparse
from seqeval.metrics import classification_report
from seqeval.metrics import accuracy_score
from collections import defaultdict # available in Python 2.5 and newer
from sklearn.metrics import confusion_matrix
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
def read_conllu(file, column):
fin = open(file)
sentences = []
sentence = []
for line in fin:
if line.startswith('#'):
continue
if line is None or line == '\n':
sentences.append(sentence)
sentence = []
else:
columns = line.rstrip().split('\t')
if not '.' in columns[0]:
sentence.append(line.rstrip().split('\t')[column])
if len(sentence) > 0:
sentences.append(sentence)
return sentences
parser = argparse.ArgumentParser()
parser.add_argument("--gold_file", type=str)
parser.add_argument("--pred_file", type=str)
parser.add_argument("--out_plot", type=str)
parser.add_argument("--column", type=int, default=5)
args = parser.parse_args()
y_true = read_conllu(args.gold_file, args.column)
y_pred = read_conllu(args.pred_file, args.column)
flat_y_true = [item for sublist in y_true for item in sublist]
flat_y_pred = [item for sublist in y_pred for item in sublist]
assert len(flat_y_true) == len(flat_y_pred)
print(classification_report(y_true, y_pred, digits=4))
print(accuracy_score(y_true, y_pred))
# Creates a confusion matrix
label_count = defaultdict(int)
for label in flat_y_true:
label_count[label] += 1
labels = []
for l,c in label_count.items():
if c > 20:
labels.append(l)
cm = confusion_matrix(flat_y_true, flat_y_pred, labels=labels)
cm_df = | pd.DataFrame(cm, index=labels, columns=labels) | pandas.DataFrame |
# -*- coding: utf-8 -*-
import unittest
import platform
import pandas as pd
import numpy as np
import pyarrow.parquet as pq
import hpat
from hpat.tests.test_utils import (
count_array_REPs, count_parfor_REPs, count_array_OneDs, get_start_end)
from hpat.tests.gen_test_data import ParquetGenerator
from numba import types
from numba.config import IS_32BITS
from numba.errors import TypingError
_cov_corr_series = [(pd.Series(x), pd.Series(y)) for x, y in [
(
[np.nan, -2., 3., 9.1],
[np.nan, -2., 3., 5.0],
),
# TODO(quasilyte): more intricate data for complex-typed series.
# Some arguments make assert_almost_equal fail.
# Functions that yield mismaching results:
# _column_corr_impl and _column_cov_impl.
(
[complex(-2., 1.0), complex(3.0, 1.0)],
[complex(-3., 1.0), complex(2.0, 1.0)],
),
(
[complex(-2.0, 1.0), complex(3.0, 1.0)],
[1.0, -2.0],
),
(
[1.0, -4.5],
[complex(-4.5, 1.0), complex(3.0, 1.0)],
),
]]
min_float64 = np.finfo('float64').min
max_float64 = np.finfo('float64').max
test_global_input_data_float64 = [
[1., np.nan, -1., 0., min_float64, max_float64],
[np.nan, np.inf, np.NINF, np.NZERO]
]
min_int64 = np.iinfo('int64').min
max_int64 = np.iinfo('int64').max
max_uint64 = np.iinfo('uint64').max
test_global_input_data_integer64 = [
[1, -1, 0],
[min_int64, max_int64],
[max_uint64]
]
test_global_input_data_numeric = test_global_input_data_integer64 + test_global_input_data_float64
test_global_input_data_unicode_kind4 = [
'ascii',
'12345',
'1234567890',
'¡Y tú quién te crees?',
'🐍⚡',
'大处着眼,小处着手。',
]
test_global_input_data_unicode_kind1 = [
'ascii',
'12345',
'1234567890',
]
def _make_func_from_text(func_text, func_name='test_impl'):
loc_vars = {}
exec(func_text, {}, loc_vars)
test_impl = loc_vars[func_name]
return test_impl
def _make_func_use_binop1(operator):
func_text = "def test_impl(A, B):\n"
func_text += " return A {} B\n".format(operator)
return _make_func_from_text(func_text)
def _make_func_use_binop2(operator):
func_text = "def test_impl(A, B):\n"
func_text += " A {} B\n".format(operator)
func_text += " return A\n"
return _make_func_from_text(func_text)
def _make_func_use_method_arg1(method):
func_text = "def test_impl(A, B):\n"
func_text += " return A.{}(B)\n".format(method)
return _make_func_from_text(func_text)
GLOBAL_VAL = 2
class TestSeries(unittest.TestCase):
def test_create1(self):
def test_impl():
df = pd.DataFrame({'A': [1, 2, 3]})
return (df.A == 1).sum()
hpat_func = hpat.jit(test_impl)
self.assertEqual(hpat_func(), test_impl())
@unittest.skip('Feature request: implement Series::ctor with list(list(type))')
def test_create_list_list_unicode(self):
def test_impl():
S = pd.Series([
['abc', 'defg', 'ijk'],
['lmn', 'opq', 'rstuvwxyz']
])
return S
hpat_func = hpat.jit(test_impl)
result_ref = test_impl()
result = hpat_func()
pd.testing.assert_series_equal(result, result_ref)
@unittest.skip('Feature request: implement Series::ctor with list(list(type))')
def test_create_list_list_integer(self):
def test_impl():
S = pd.Series([
[123, 456, -789],
[-112233, 445566, 778899]
])
return S
hpat_func = hpat.jit(test_impl)
result_ref = test_impl()
result = hpat_func()
pd.testing.assert_series_equal(result, result_ref)
@unittest.skip('Feature request: implement Series::ctor with list(list(type))')
def test_create_list_list_float(self):
def test_impl():
S = pd.Series([
[1.23, -4.56, 7.89],
[11.2233, 44.5566, -778.899]
])
return S
hpat_func = hpat.jit(test_impl)
result_ref = test_impl()
result = hpat_func()
pd.testing.assert_series_equal(result, result_ref)
def test_create2(self):
def test_impl(n):
df = pd.DataFrame({'A': np.arange(n)})
return (df.A == 2).sum()
hpat_func = hpat.jit(test_impl)
n = 11
self.assertEqual(hpat_func(n), test_impl(n))
def test_create_series1(self):
def test_impl():
A = pd.Series([1, 2, 3])
return A
hpat_func = hpat.jit(test_impl)
pd.testing.assert_series_equal(hpat_func(), test_impl())
def test_create_series_index1(self):
# create and box an indexed Series
def test_impl():
A = pd.Series([1, 2, 3], ['A', 'C', 'B'])
return A
hpat_func = hpat.jit(test_impl)
pd.testing.assert_series_equal(hpat_func(), test_impl())
def test_create_series_index2(self):
def test_impl():
A = pd.Series([1, 2, 3], index=['A', 'C', 'B'])
return A
hpat_func = hpat.jit(test_impl)
pd.testing.assert_series_equal(hpat_func(), test_impl())
def test_create_series_index3(self):
def test_impl():
A = pd.Series([1, 2, 3], index=['A', 'C', 'B'], name='A')
return A
hpat_func = hpat.jit(test_impl)
pd.testing.assert_series_equal(hpat_func(), test_impl())
def test_create_series_index4(self):
def test_impl(name):
A = pd.Series([1, 2, 3], index=['A', 'C', 'B'], name=name)
return A
hpat_func = hpat.jit(test_impl)
pd.testing.assert_series_equal(hpat_func('A'), test_impl('A'))
def test_create_str(self):
def test_impl():
df = pd.DataFrame({'A': ['a', 'b', 'c']})
return (df.A == 'a').sum()
hpat_func = hpat.jit(test_impl)
self.assertEqual(hpat_func(), test_impl())
def test_pass_df1(self):
def test_impl(df):
return (df.A == 2).sum()
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
self.assertEqual(hpat_func(df), test_impl(df))
def test_pass_df_str(self):
def test_impl(df):
return (df.A == 'a').sum()
hpat_func = hpat.jit(test_impl)
df = pd.DataFrame({'A': ['a', 'b', 'c']})
self.assertEqual(hpat_func(df), test_impl(df))
def test_pass_series1(self):
# TODO: check to make sure it is series type
def test_impl(A):
return (A == 2).sum()
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
self.assertEqual(hpat_func(df.A), test_impl(df.A))
def test_pass_series2(self):
# test creating dataframe from passed series
def test_impl(A):
df = pd.DataFrame({'A': A})
return (df.A == 2).sum()
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
self.assertEqual(hpat_func(df.A), test_impl(df.A))
def test_pass_series_str(self):
def test_impl(A):
return (A == 'a').sum()
hpat_func = hpat.jit(test_impl)
df = pd.DataFrame({'A': ['a', 'b', 'c']})
self.assertEqual(hpat_func(df.A), test_impl(df.A))
def test_pass_series_index1(self):
def test_impl(A):
return A
hpat_func = hpat.jit(test_impl)
S = pd.Series([3, 5, 6], ['a', 'b', 'c'], name='A')
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_size(self):
def test_impl(S):
return S.size
hpat_func = hpat.jit(test_impl)
n = 11
for S, expected in [
(pd.Series(), 0),
(pd.Series([]), 0),
(pd.Series(np.arange(n)), n),
(pd.Series([np.nan, 1, 2]), 3),
(pd.Series(['1', '2', '3']), 3),
]:
with self.subTest(S=S, expected=expected):
self.assertEqual(hpat_func(S), expected)
self.assertEqual(hpat_func(S), test_impl(S))
def test_series_attr2(self):
def test_impl(A):
return A.copy().values
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
np.testing.assert_array_equal(hpat_func(df.A), test_impl(df.A))
def test_series_attr3(self):
def test_impl(A):
return A.min()
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
self.assertEqual(hpat_func(df.A), test_impl(df.A))
def test_series_attr4(self):
def test_impl(A):
return A.cumsum().values
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
np.testing.assert_array_equal(hpat_func(df.A), test_impl(df.A))
def test_series_argsort1(self):
def test_impl(A):
return A.argsort()
hpat_func = hpat.jit(test_impl)
n = 11
A = pd.Series(np.random.ranf(n))
pd.testing.assert_series_equal(hpat_func(A), test_impl(A))
def test_series_attr6(self):
def test_impl(A):
return A.take([2, 3]).values
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
np.testing.assert_array_equal(hpat_func(df.A), test_impl(df.A))
def test_series_attr7(self):
def test_impl(A):
return A.astype(np.float64)
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
np.testing.assert_array_equal(hpat_func(df.A), test_impl(df.A))
def test_series_getattr_ndim(self):
'''Verifies getting Series attribute ndim is supported'''
def test_impl(S):
return S.ndim
hpat_func = hpat.jit(test_impl)
n = 11
S = pd.Series(np.arange(n))
self.assertEqual(hpat_func(S), test_impl(S))
def test_series_getattr_T(self):
'''Verifies getting Series attribute T is supported'''
def test_impl(S):
return S.T
hpat_func = hpat.jit(test_impl)
n = 11
S = pd.Series(np.arange(n))
np.testing.assert_array_equal(hpat_func(S), test_impl(S))
def test_series_copy_str1(self):
def test_impl(A):
return A.copy()
hpat_func = hpat.jit(test_impl)
S = pd.Series(['aa', 'bb', 'cc'])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_copy_int1(self):
def test_impl(A):
return A.copy()
hpat_func = hpat.jit(test_impl)
S = pd.Series([1, 2, 3])
np.testing.assert_array_equal(hpat_func(S), test_impl(S))
def test_series_copy_deep(self):
def test_impl(A, deep):
return A.copy(deep=deep)
hpat_func = hpat.jit(test_impl)
for S in [
pd.Series([1, 2]),
pd.Series([1, 2], index=["a", "b"]),
]:
with self.subTest(S=S):
for deep in (True, False):
with self.subTest(deep=deep):
actual = hpat_func(S, deep)
expected = test_impl(S, deep)
pd.testing.assert_series_equal(actual, expected)
self.assertEqual(actual.values is S.values, expected.values is S.values)
self.assertEqual(actual.values is S.values, not deep)
# Shallow copy of index is not supported yet
if deep:
self.assertEqual(actual.index is S.index, expected.index is S.index)
self.assertEqual(actual.index is S.index, not deep)
def test_series_astype_int_to_str1(self):
'''Verifies Series.astype implementation with function 'str' as argument
converts integer series to series of strings
'''
def test_impl(S):
return S.astype(str)
hpat_func = hpat.jit(test_impl)
n = 11
S = pd.Series(np.arange(n))
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_astype_int_to_str2(self):
'''Verifies Series.astype implementation with a string literal dtype argument
converts integer series to series of strings
'''
def test_impl(S):
return S.astype('str')
hpat_func = hpat.jit(test_impl)
n = 11
S = pd.Series(np.arange(n))
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_astype_str_to_str1(self):
'''Verifies Series.astype implementation with function 'str' as argument
handles string series not changing it
'''
def test_impl(S):
return S.astype(str)
hpat_func = hpat.jit(test_impl)
S = pd.Series(['aa', 'bb', 'cc'])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_astype_str_to_str2(self):
'''Verifies Series.astype implementation with a string literal dtype argument
handles string series not changing it
'''
def test_impl(S):
return S.astype('str')
hpat_func = hpat.jit(test_impl)
S = pd.Series(['aa', 'bb', 'cc'])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_astype_str_to_str_index_str(self):
'''Verifies Series.astype implementation with function 'str' as argument
handles string series not changing it
'''
def test_impl(S):
return S.astype(str)
hpat_func = hpat.jit(test_impl)
S = pd.Series(['aa', 'bb', 'cc'], index=['d', 'e', 'f'])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_astype_str_to_str_index_int(self):
'''Verifies Series.astype implementation with function 'str' as argument
handles string series not changing it
'''
def test_impl(S):
return S.astype(str)
hpat_func = hpat.jit(test_impl)
S = pd.Series(['aa', 'bb', 'cc'], index=[1, 2, 3])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
@unittest.skip('TODO: requires str(datetime64) support in Numba')
def test_series_astype_dt_to_str1(self):
'''Verifies Series.astype implementation with function 'str' as argument
converts datetime series to series of strings
'''
def test_impl(A):
return A.astype(str)
hpat_func = hpat.jit(test_impl)
S = pd.Series([pd.Timestamp('20130101 09:00:00'),
pd.Timestamp('20130101 09:00:02'),
pd.Timestamp('20130101 09:00:03')
])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
@unittest.skip('AssertionError: Series are different'
'[left]: [0.000000, 1.000000, 2.000000, 3.000000, ...'
'[right]: [0.0, 1.0, 2.0, 3.0, ...'
'TODO: needs alignment to NumPy on Numba side')
def test_series_astype_float_to_str1(self):
'''Verifies Series.astype implementation with function 'str' as argument
converts float series to series of strings
'''
def test_impl(A):
return A.astype(str)
hpat_func = hpat.jit(test_impl)
n = 11.0
S = pd.Series(np.arange(n))
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_astype_int32_to_int64(self):
'''Verifies Series.astype implementation with NumPy dtype argument
converts series with dtype=int32 to series with dtype=int64
'''
def test_impl(A):
return A.astype(np.int64)
hpat_func = hpat.jit(test_impl)
n = 11
S = pd.Series(np.arange(n), dtype=np.int32)
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_astype_int_to_float64(self):
'''Verifies Series.astype implementation with NumPy dtype argument
converts integer series to series of float
'''
def test_impl(A):
return A.astype(np.float64)
hpat_func = hpat.jit(test_impl)
n = 11
S = pd.Series(np.arange(n))
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_astype_float_to_int32(self):
'''Verifies Series.astype implementation with NumPy dtype argument
converts float series to series of integers
'''
def test_impl(A):
return A.astype(np.int32)
hpat_func = hpat.jit(test_impl)
n = 11.0
S = pd.Series(np.arange(n))
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
@unittest.skip('TODO: needs Numba astype impl support string literal as dtype arg')
def test_series_astype_literal_dtype1(self):
'''Verifies Series.astype implementation with a string literal dtype argument
converts float series to series of integers
'''
def test_impl(A):
return A.astype('int32')
hpat_func = hpat.jit(test_impl)
n = 11.0
S = pd.Series(np.arange(n))
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
@unittest.skip('TODO: needs Numba astype impl support converting unicode_type to int')
def test_series_astype_str_to_int32(self):
'''Verifies Series.astype implementation with NumPy dtype argument
converts series of strings to series of integers
'''
import numba
def test_impl(A):
return A.astype(np.int32)
hpat_func = hpat.jit(test_impl)
n = 11
S = pd.Series([str(x) for x in np.arange(n) - n // 2])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
@unittest.skip('TODO: needs Numba astype impl support converting unicode_type to float')
def test_series_astype_str_to_float64(self):
'''Verifies Series.astype implementation with NumPy dtype argument
converts series of strings to series of float
'''
def test_impl(A):
return A.astype(np.float64)
hpat_func = hpat.jit(test_impl)
S = pd.Series(['3.24', '1E+05', '-1', '-1.3E-01', 'nan', 'inf'])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_astype_str_index_str(self):
'''Verifies Series.astype implementation with function 'str' as argument
handles string series not changing it
'''
def test_impl(S):
return S.astype(str)
hpat_func = hpat.jit(test_impl)
S = pd.Series(['aa', 'bb', 'cc'], index=['a', 'b', 'c'])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_astype_str_index_int(self):
'''Verifies Series.astype implementation with function 'str' as argument
handles string series not changing it
'''
def test_impl(S):
return S.astype(str)
hpat_func = hpat.jit(test_impl)
S = pd.Series(['aa', 'bb', 'cc'], index=[2, 3, 5])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_np_call_on_series1(self):
def test_impl(A):
return np.min(A)
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
np.testing.assert_array_equal(hpat_func(df.A), test_impl(df.A))
def test_series_values(self):
def test_impl(A):
return A.values
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
np.testing.assert_array_equal(hpat_func(df.A), test_impl(df.A))
def test_series_values1(self):
def test_impl(A):
return (A == 2).values
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
np.testing.assert_array_equal(hpat_func(df.A), test_impl(df.A))
def test_series_shape1(self):
def test_impl(A):
return A.shape
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
self.assertEqual(hpat_func(df.A), test_impl(df.A))
def test_static_setitem_series1(self):
def test_impl(A):
A[0] = 2
return (A == 2).sum()
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
self.assertEqual(hpat_func(df.A), test_impl(df.A))
def test_setitem_series1(self):
def test_impl(A, i):
A[i] = 2
return (A == 2).sum()
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
self.assertEqual(hpat_func(df.A.copy(), 0), test_impl(df.A.copy(), 0))
def test_setitem_series2(self):
def test_impl(A, i):
A[i] = 100
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
A1 = df.A.copy()
A2 = df.A
hpat_func(A1, 0)
test_impl(A2, 0)
pd.testing.assert_series_equal(A1, A2)
@unittest.skip("enable after remove dead in hiframes is removed")
def test_setitem_series3(self):
def test_impl(A, i):
S = pd.Series(A)
S[i] = 100
hpat_func = hpat.jit(test_impl)
n = 11
A = np.arange(n)
A1 = A.copy()
A2 = A
hpat_func(A1, 0)
test_impl(A2, 0)
np.testing.assert_array_equal(A1, A2)
def test_setitem_series_bool1(self):
def test_impl(A):
A[A > 3] = 100
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
A1 = df.A.copy()
A2 = df.A
hpat_func(A1)
test_impl(A2)
pd.testing.assert_series_equal(A1, A2)
def test_setitem_series_bool2(self):
def test_impl(A, B):
A[A > 3] = B[A > 3]
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n), 'B': np.arange(n)**2})
A1 = df.A.copy()
A2 = df.A
hpat_func(A1, df.B)
test_impl(A2, df.B)
pd.testing.assert_series_equal(A1, A2)
def test_static_getitem_series1(self):
def test_impl(A):
return A[0]
hpat_func = hpat.jit(test_impl)
n = 11
A = pd.Series(np.arange(n))
self.assertEqual(hpat_func(A), test_impl(A))
def test_getitem_series1(self):
def test_impl(A, i):
return A[i]
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
self.assertEqual(hpat_func(df.A, 0), test_impl(df.A, 0))
def test_getitem_series_str1(self):
def test_impl(A, i):
return A[i]
hpat_func = hpat.jit(test_impl)
df = pd.DataFrame({'A': ['aa', 'bb', 'cc']})
self.assertEqual(hpat_func(df.A, 0), test_impl(df.A, 0))
def test_series_iat1(self):
def test_impl(A):
return A.iat[3]
hpat_func = hpat.jit(test_impl)
n = 11
S = pd.Series(np.arange(n)**2)
self.assertEqual(hpat_func(S), test_impl(S))
def test_series_iat2(self):
def test_impl(A):
A.iat[3] = 1
return A
hpat_func = hpat.jit(test_impl)
n = 11
S = pd.Series(np.arange(n)**2)
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_iloc1(self):
def test_impl(A):
return A.iloc[3]
hpat_func = hpat.jit(test_impl)
n = 11
S = pd.Series(np.arange(n)**2)
self.assertEqual(hpat_func(S), test_impl(S))
def test_series_iloc2(self):
def test_impl(A):
return A.iloc[3:8]
hpat_func = hpat.jit(test_impl)
n = 11
S = pd.Series(np.arange(n)**2)
pd.testing.assert_series_equal(
hpat_func(S), test_impl(S).reset_index(drop=True))
def test_series_op1(self):
arithmetic_binops = ('+', '-', '*', '/', '//', '%', '**')
for operator in arithmetic_binops:
test_impl = _make_func_use_binop1(operator)
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(1, n), 'B': np.ones(n - 1)})
pd.testing.assert_series_equal(hpat_func(df.A, df.B), test_impl(df.A, df.B), check_names=False)
def test_series_op2(self):
arithmetic_binops = ('+', '-', '*', '/', '//', '%', '**')
for operator in arithmetic_binops:
test_impl = _make_func_use_binop1(operator)
hpat_func = hpat.jit(test_impl)
n = 11
if platform.system() == 'Windows' and not IS_32BITS:
df = pd.DataFrame({'A': np.arange(1, n, dtype=np.int64)})
else:
df = pd.DataFrame({'A': np.arange(1, n)})
pd.testing.assert_series_equal(hpat_func(df.A, 1), test_impl(df.A, 1), check_names=False)
def test_series_op3(self):
arithmetic_binops = ('+', '-', '*', '/', '//', '%', '**')
for operator in arithmetic_binops:
test_impl = _make_func_use_binop2(operator)
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(1, n), 'B': np.ones(n - 1)})
pd.testing.assert_series_equal(hpat_func(df.A, df.B), test_impl(df.A, df.B), check_names=False)
def test_series_op4(self):
arithmetic_binops = ('+', '-', '*', '/', '//', '%', '**')
for operator in arithmetic_binops:
test_impl = _make_func_use_binop2(operator)
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(1, n)})
pd.testing.assert_series_equal(hpat_func(df.A, 1), test_impl(df.A, 1), check_names=False)
def test_series_op5(self):
arithmetic_methods = ('add', 'sub', 'mul', 'div', 'truediv', 'floordiv', 'mod', 'pow')
for method in arithmetic_methods:
test_impl = _make_func_use_method_arg1(method)
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(1, n), 'B': np.ones(n - 1)})
pd.testing.assert_series_equal(hpat_func(df.A, df.B), test_impl(df.A, df.B), check_names=False)
@unittest.skipIf(platform.system() == 'Windows', 'Series values are different (20.0 %)'
'[left]: [1, 1024, 59049, 1048576, 9765625, 60466176, 282475249, 1073741824, 3486784401, 10000000000]'
'[right]: [1, 1024, 59049, 1048576, 9765625, 60466176, 282475249, 1073741824, -808182895, 1410065408]')
def test_series_op5_integer_scalar(self):
arithmetic_methods = ('add', 'sub', 'mul', 'div', 'truediv', 'floordiv', 'mod', 'pow')
for method in arithmetic_methods:
test_impl = _make_func_use_method_arg1(method)
hpat_func = hpat.jit(test_impl)
n = 11
if platform.system() == 'Windows' and not IS_32BITS:
operand_series = pd.Series(np.arange(1, n, dtype=np.int64))
else:
operand_series = pd.Series(np.arange(1, n))
operand_scalar = 10
pd.testing.assert_series_equal(
hpat_func(operand_series, operand_scalar),
test_impl(operand_series, operand_scalar),
check_names=False)
def test_series_op5_float_scalar(self):
arithmetic_methods = ('add', 'sub', 'mul', 'div', 'truediv', 'floordiv', 'mod', 'pow')
for method in arithmetic_methods:
test_impl = _make_func_use_method_arg1(method)
hpat_func = hpat.jit(test_impl)
n = 11
operand_series = pd.Series(np.arange(1, n))
operand_scalar = .5
pd.testing.assert_series_equal(
hpat_func(operand_series, operand_scalar),
test_impl(operand_series, operand_scalar),
check_names=False)
def test_series_op6(self):
def test_impl(A):
return -A
hpat_func = hpat.jit(test_impl)
n = 11
A = pd.Series(np.arange(n))
pd.testing.assert_series_equal(hpat_func(A), test_impl(A))
def test_series_op7(self):
comparison_binops = ('<', '>', '<=', '>=', '!=', '==')
for operator in comparison_binops:
test_impl = _make_func_use_binop1(operator)
hpat_func = hpat.jit(test_impl)
n = 11
A = pd.Series(np.arange(n))
B = pd.Series(np.arange(n)**2)
pd.testing.assert_series_equal(hpat_func(A, B), test_impl(A, B), check_names=False)
def test_series_op8(self):
comparison_methods = ('lt', 'gt', 'le', 'ge', 'ne', 'eq')
for method in comparison_methods:
test_impl = _make_func_use_method_arg1(method)
hpat_func = hpat.jit(test_impl)
n = 11
A = pd.Series(np.arange(n))
B = pd.Series(np.arange(n)**2)
pd.testing.assert_series_equal(hpat_func(A, B), test_impl(A, B), check_names=False)
@unittest.skipIf(platform.system() == 'Windows', "Attribute dtype are different: int64, int32")
def test_series_op8_integer_scalar(self):
comparison_methods = ('lt', 'gt', 'le', 'ge', 'eq', 'ne')
for method in comparison_methods:
test_impl = _make_func_use_method_arg1(method)
hpat_func = hpat.jit(test_impl)
n = 11
operand_series = pd.Series(np.arange(1, n))
operand_scalar = 10
pd.testing.assert_series_equal(
hpat_func(operand_series, operand_scalar),
test_impl(operand_series, operand_scalar),
check_names=False)
def test_series_op8_float_scalar(self):
comparison_methods = ('lt', 'gt', 'le', 'ge', 'eq', 'ne')
for method in comparison_methods:
test_impl = _make_func_use_method_arg1(method)
hpat_func = hpat.jit(test_impl)
n = 11
operand_series = pd.Series(np.arange(1, n))
operand_scalar = .5
pd.testing.assert_series_equal(
hpat_func(operand_series, operand_scalar),
test_impl(operand_series, operand_scalar),
check_names=False)
def test_series_inplace_binop_array(self):
def test_impl(A, B):
A += B
return A
hpat_func = hpat.jit(test_impl)
n = 11
A = np.arange(n)**2.0 # TODO: use 2 for test int casting
B = pd.Series(np.ones(n))
np.testing.assert_array_equal(hpat_func(A.copy(), B), test_impl(A, B))
def test_series_fusion1(self):
def test_impl(A, B):
return A + B + 1
hpat_func = hpat.jit(test_impl)
n = 11
if platform.system() == 'Windows' and not IS_32BITS:
A = pd.Series(np.arange(n), dtype=np.int64)
B = pd.Series(np.arange(n)**2, dtype=np.int64)
else:
A = pd.Series(np.arange(n))
B = pd.Series(np.arange(n)**2)
pd.testing.assert_series_equal(hpat_func(A, B), test_impl(A, B))
self.assertEqual(count_parfor_REPs(), 1)
def test_series_fusion2(self):
# make sure getting data var avoids incorrect single def assumption
def test_impl(A, B):
S = B + 2
if A[0] == 0:
S = A + 1
return S + B
hpat_func = hpat.jit(test_impl)
n = 11
if platform.system() == 'Windows' and not IS_32BITS:
A = pd.Series(np.arange(n), dtype=np.int64)
B = pd.Series(np.arange(n)**2, dtype=np.int64)
else:
A = pd.Series(np.arange(n))
B = pd.Series(np.arange(n)**2)
pd.testing.assert_series_equal(hpat_func(A, B), test_impl(A, B))
self.assertEqual(count_parfor_REPs(), 3)
def test_series_len(self):
def test_impl(A, i):
return len(A)
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
self.assertEqual(hpat_func(df.A, 0), test_impl(df.A, 0))
def test_series_box(self):
def test_impl():
A = pd.Series([1, 2, 3])
return A
hpat_func = hpat.jit(test_impl)
pd.testing.assert_series_equal(hpat_func(), test_impl())
def test_series_box2(self):
def test_impl():
A = pd.Series(['1', '2', '3'])
return A
hpat_func = hpat.jit(test_impl)
pd.testing.assert_series_equal(hpat_func(), test_impl())
def test_series_list_str_unbox1(self):
def test_impl(A):
return A.iloc[0]
hpat_func = hpat.jit(test_impl)
S = pd.Series([['aa', 'b'], ['ccc'], []])
np.testing.assert_array_equal(hpat_func(S), test_impl(S))
# call twice to test potential refcount errors
np.testing.assert_array_equal(hpat_func(S), test_impl(S))
def test_np_typ_call_replace(self):
# calltype replacement is tricky for np.typ() calls since variable
# type can't provide calltype
def test_impl(i):
return np.int32(i)
hpat_func = hpat.jit(test_impl)
self.assertEqual(hpat_func(1), test_impl(1))
def test_series_ufunc1(self):
def test_impl(A, i):
return np.isinf(A).values
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
np.testing.assert_array_equal(hpat_func(df.A, 1), test_impl(df.A, 1))
def test_list_convert(self):
def test_impl():
df = pd.DataFrame({'one': np.array([-1, np.nan, 2.5]),
'two': ['foo', 'bar', 'baz'],
'three': [True, False, True]})
return df.one.values, df.two.values, df.three.values
hpat_func = hpat.jit(test_impl)
one, two, three = hpat_func()
self.assertTrue(isinstance(one, np.ndarray))
self.assertTrue(isinstance(two, np.ndarray))
self.assertTrue(isinstance(three, np.ndarray))
@unittest.skip("needs empty_like typing fix in npydecl.py")
def test_series_empty_like(self):
def test_impl(A):
return np.empty_like(A)
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
self.assertTrue(isinstance(hpat_func(df.A), np.ndarray))
def test_series_fillna1(self):
def test_impl(A):
return A.fillna(5.0)
hpat_func = hpat.jit(test_impl)
df = pd.DataFrame({'A': [1.0, 2.0, np.nan, 1.0]})
pd.testing.assert_series_equal(hpat_func(df.A),
test_impl(df.A), check_names=False)
# test inplace fillna for named numeric series (obtained from DataFrame)
def test_series_fillna_inplace1(self):
def test_impl(A):
A.fillna(5.0, inplace=True)
return A
hpat_func = hpat.jit(test_impl)
df = pd.DataFrame({'A': [1.0, 2.0, np.nan, 1.0]})
pd.testing.assert_series_equal(hpat_func(df.A),
test_impl(df.A), check_names=False)
def test_series_fillna_str1(self):
def test_impl(A):
return A.fillna("dd")
hpat_func = hpat.jit(test_impl)
df = pd.DataFrame({'A': ['aa', 'b', None, 'ccc']})
pd.testing.assert_series_equal(hpat_func(df.A),
test_impl(df.A), check_names=False)
def test_series_fillna_str_inplace1(self):
def test_impl(A):
A.fillna("dd", inplace=True)
return A
hpat_func = hpat.jit(test_impl)
S1 = pd.Series(['aa', 'b', None, 'ccc'])
S2 = S1.copy()
pd.testing.assert_series_equal(hpat_func(S1), test_impl(S2))
# TODO: handle string array reflection
# hpat_func(S1)
# test_impl(S2)
# np.testing.assert_array_equal(S1, S2)
def test_series_fillna_str_inplace_empty1(self):
def test_impl(A):
A.fillna("", inplace=True)
return A
hpat_func = hpat.jit(test_impl)
S1 = pd.Series(['aa', 'b', None, 'ccc'])
S2 = S1.copy()
pd.testing.assert_series_equal(hpat_func(S1), test_impl(S2))
@unittest.skip('Unsupported functionality: failed to handle index')
def test_series_fillna_index_str(self):
def test_impl(S):
return S.fillna(5.0)
hpat_func = hpat.jit(test_impl)
S = pd.Series([1.0, 2.0, np.nan, 1.0], index=['a', 'b', 'c', 'd'])
pd.testing.assert_series_equal(hpat_func(S),
test_impl(S), check_names=False)
@unittest.skip('Unsupported functionality: failed to handle index')
def test_series_fillna_index_int(self):
def test_impl(S):
return S.fillna(5.0)
hpat_func = hpat.jit(test_impl)
S = pd.Series([1.0, 2.0, np.nan, 1.0], index=[2, 3, 4, 5])
pd.testing.assert_series_equal(hpat_func(S),
test_impl(S), check_names=False)
@unittest.skipIf(hpat.config.config_pipeline_hpat_default,
'No support of axis argument in old-style Series.dropna() impl')
def test_series_dropna_axis1(self):
'''Verifies Series.dropna() implementation handles 'index' as axis argument'''
def test_impl(S):
return S.dropna(axis='index')
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([1.0, 2.0, np.nan, 1.0, np.inf])
S2 = S1.copy()
pd.testing.assert_series_equal(hpat_func(S1), test_impl(S2))
@unittest.skipIf(hpat.config.config_pipeline_hpat_default,
'No support of axis argument in old-style Series.dropna() impl')
def test_series_dropna_axis2(self):
'''Verifies Series.dropna() implementation handles 0 as axis argument'''
def test_impl(S):
return S.dropna(axis=0)
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([1.0, 2.0, np.nan, 1.0, np.inf])
S2 = S1.copy()
pd.testing.assert_series_equal(hpat_func(S1), test_impl(S2))
@unittest.skipIf(hpat.config.config_pipeline_hpat_default,
'No support of axis argument in old-style Series.dropna() impl')
def test_series_dropna_axis3(self):
'''Verifies Series.dropna() implementation handles correct non-literal axis argument'''
def test_impl(S, axis):
return S.dropna(axis=axis)
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([1.0, 2.0, np.nan, 1.0, np.inf])
S2 = S1.copy()
axis_values = [0, 'index']
for value in axis_values:
pd.testing.assert_series_equal(hpat_func(S1, value), test_impl(S2, value))
@unittest.skipIf(hpat.config.config_pipeline_hpat_default,
'BUG: old-style dropna impl returns series without index')
def test_series_dropna_float_index1(self):
'''Verifies Series.dropna() implementation for float series with default index'''
def test_impl(S):
return S.dropna()
hpat_func = hpat.jit(test_impl)
for data in test_global_input_data_float64:
S1 = pd.Series(data)
S2 = S1.copy()
pd.testing.assert_series_equal(hpat_func(S1), test_impl(S2))
@unittest.skipIf(hpat.config.config_pipeline_hpat_default,
'BUG: old-style dropna impl returns series without index')
def test_series_dropna_float_index2(self):
'''Verifies Series.dropna() implementation for float series with string index'''
def test_impl(S):
return S.dropna()
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([1.0, 2.0, np.nan, 1.0, np.inf], ['a', 'b', 'c', 'd', 'e'])
S2 = S1.copy()
pd.testing.assert_series_equal(hpat_func(S1), test_impl(S2))
@unittest.skipIf(hpat.config.config_pipeline_hpat_default,
'BUG: old-style dropna impl returns series without index')
def test_series_dropna_str_index1(self):
'''Verifies Series.dropna() implementation for series of strings with default index'''
def test_impl(S):
return S.dropna()
hpat_func = hpat.jit(test_impl)
S1 = pd.Series(['aa', 'b', None, 'cccd', ''])
S2 = S1.copy()
pd.testing.assert_series_equal(hpat_func(S1), test_impl(S2))
@unittest.skipIf(hpat.config.config_pipeline_hpat_default,
'BUG: old-style dropna impl returns series without index')
def test_series_dropna_str_index2(self):
'''Verifies Series.dropna() implementation for series of strings with string index'''
def test_impl(S):
return S.dropna()
hpat_func = hpat.jit(test_impl)
S1 = pd.Series(['aa', 'b', None, 'cccd', ''], ['a', 'b', 'c', 'd', 'e'])
S2 = S1.copy()
pd.testing.assert_series_equal(hpat_func(S1), test_impl(S2))
@unittest.skipIf(hpat.config.config_pipeline_hpat_default,
'BUG: old-style dropna impl returns series without index')
def test_series_dropna_str_index3(self):
def test_impl(S):
return S.dropna()
hpat_func = hpat.jit(test_impl)
S1 = pd.Series(['aa', 'b', None, 'cccd', ''], index=[1, 2, 5, 7, 10])
S2 = S1.copy()
pd.testing.assert_series_equal(hpat_func(S1), test_impl(S2))
@unittest.skip('BUG: old-style dropna impl returns series without index, in new-style inplace is unsupported')
def test_series_dropna_float_inplace_no_index1(self):
'''Verifies Series.dropna() implementation for float series with default index and inplace argument True'''
def test_impl(S):
S.dropna(inplace=True)
return S
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([1.0, 2.0, np.nan, 1.0, np.inf])
S2 = S1.copy()
pd.testing.assert_series_equal(hpat_func(S1), test_impl(S2))
@unittest.skip('TODO: add reflection support and check method return value')
def test_series_dropna_float_inplace_no_index2(self):
'''Verifies Series.dropna(inplace=True) results are reflected back in the original float series'''
def test_impl(S):
return S.dropna(inplace=True)
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([1.0, 2.0, np.nan, 1.0, np.inf])
S2 = S1.copy()
self.assertIsNone(hpat_func(S1))
self.assertIsNone(test_impl(S2))
| pd.testing.assert_series_equal(S1, S2) | pandas.testing.assert_series_equal |
""""
This does not work
"""
import pandas as pd
import numpy as np
import os
import math
def sigmoid(x):
return 1 / (1 + math.exp(-x))
inputdir='../blend/'
preds0=pd.read_csv(inputdir+'vw_nn.csv.gz')
preds1=pd.read_csv(inputdir+'Nolearn_score_0.800750.csv.gz')
preds2=pd.read_csv(inputdir+'Nolearn_score_0.802373.csv.gz')
preds3=pd.read_csv(inputdir+'XGBOOST_Best_score_0.827847.csv.gz')
preds4=pd.read_csv(inputdir+'XGBOOST_Best_score_0.826534.csv.gz')
preds5= | pd.read_csv(inputdir+'XGBOOST_Best_score_0.820948.csv.gz') | pandas.read_csv |
# Copyright IBM All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""
Common functions for generating Dash components
"""
from typing import Optional, NamedTuple, List, Dict
import pandas as pd
import dash_bootstrap_components as l
from dash import html
# import dash_html_components as html
import dash_pivottable
from dash import dash_table
import dash_bootstrap_components as dbc
##########################################################################
# Generic Schema NamedTuple classes
##########################################################################
class ForeignKeySchema(NamedTuple):
table_name: str
foreign_keys: List[str]
class ScenarioTableSchema(NamedTuple):
table_name: str
index_columns: List[str]
value_columns: List[str]
foreign_tables: List[ForeignKeySchema]
class PivotTableConfig(NamedTuple):
table_name: str
rows: List[str]
cols: List[str]
vals: List[str]
rendererName: str
aggregatorName: str
##########################################################################
# VisualizationPage classes
##########################################################################
# class VisualizationPageConfig(NamedTuple):
# """Specification of visualization pages/tabs.
# """
# page_name: str # As used as the menu/tab name in the UI
# page_id: str # The Tab.value when using Tabs
# module_name: str # name of the Python module
# url: str # Part of the url string
# input_table_names: List[str] # Names of input tables used in page
# output_table_names:List[str] # Names of output tables used in page
##########################################################################
# Functions
##########################################################################
def table_type(df_column):
# Note - this only works with Pandas >= 1.0.0
# if sys.version_info < (3, 0): # Pandas 1.0.0 does not support Python 2
# return 'any'
if isinstance(df_column.dtype, pd.DatetimeTZDtype):
return 'datetime',
elif (isinstance(df_column.dtype, pd.StringDtype) or
isinstance(df_column.dtype, pd.BooleanDtype) or
isinstance(df_column.dtype, pd.CategoricalDtype) or
isinstance(df_column.dtype, pd.PeriodDtype)):
return 'text'
elif (isinstance(df_column.dtype, pd.SparseDtype) or
isinstance(df_column.dtype, pd.IntervalDtype) or
isinstance(df_column.dtype, pd.Int8Dtype) or
isinstance(df_column.dtype, pd.Int16Dtype) or
isinstance(df_column.dtype, pd.Int32Dtype) or
isinstance(df_column.dtype, pd.Int64Dtype)):
return 'numeric'
else:
return 'any'
def get_data_table(df, table_schema: Optional[ScenarioTableSchema] = None, editable: bool = False, data_table_id=None) -> dash_table.DataTable:
"""
Generates a DataTable for a DataFrame. For use in 'Prepare Data' and 'Explore Solution' pages.
:param df:
:param table_schema:
:return:
"""
if data_table_id is None:
data_table_id = 'my_data_table'
index_columns = []
if table_schema is not None:
df = df.set_index(table_schema.index_columns).reset_index() # ensures all index columns are first
index_columns = table_schema.index_columns
return dash_table.DataTable(
id=data_table_id,
data=df.to_dict('records'),
columns=[
{'name': i, 'id': i, 'type': table_type(df[i])}
for i in df.columns
],
fixed_rows={'headers': True},
editable=editable,
# fixed_columns={'headers': False, 'data': 0}, # Does NOT create a horizontal scroll bar
filter_action="native",
sort_action="native",
sort_mode="multi",
style_cell={
'textOverflow': 'ellipsis', # See https://dash.plotly.com/datatable/width to control column-name width
'maxWidth': 0, # Needs to be here for the 'ellipsis' option to work
'overflow' : 'hidden',
'font_family': 'sans-serif',
'font_size': '12px',
'textAlign': 'left'},
style_table={
'maxHeight': '400px',
'overflowY': 'scroll'
},
style_header={
'if': {
'column_id': index_columns
},
# 'backgroundColor': 'rgb(230, 230, 230)',
'fontWeight': 'bold'
},
style_data_conditional=([
{
'if': {
'column_id': index_columns
},
'fontWeight': 'bold',
# 'backgroundColor': '#0074D9',
# 'color': 'white'
}
]),
)
def get_editable_data_table(df, table_schema: Optional[ScenarioTableSchema]=None) -> dash_table.DataTable:
"""
Generates an editable DataTable for a DataFrame. For use in 'Prepare Data' page.
:param df:
:param table_schema:
:return:
"""
index_columns = []
if table_schema is not None:
df = df.set_index(table_schema.index_columns).reset_index() # ensures all index columns are first
index_columns = table_schema.index_columns
return dash_table.DataTable(
data=df.to_dict('records'),
columns=[
{'name': i, 'id': i, 'type': table_type(df[i])}
for i in df.columns
],
fixed_rows={'headers': True},
editable=True,
# fixed_columns={'headers': False, 'data': 0}, # Does NOT create a horizontal scroll bar
filter_action="native",
sort_action="native",
sort_mode="multi",
style_cell={
'textOverflow': 'ellipsis', # See https://dash.plotly.com/datatable/width to control column-name width
'maxWidth': 0, # Needs to be here for the 'ellipsis' option to work
'overflow' : 'hidden',
'font_family': 'sans-serif',
'font_size': '12px',
'textAlign': 'left'},
style_table={
'maxHeight': '400px',
'overflowY': 'scroll'
},
style_header={
'if': {
'column_id': index_columns
},
# 'backgroundColor': 'rgb(230, 230, 230)',
'fontWeight': 'bold'
},
style_data_conditional=([
{
'if': {
'column_id': index_columns
},
'fontWeight': 'bold',
# 'backgroundColor': '#0074D9',
# 'color': 'white'
}
]),
)
def get_pivot_table(df, scenario_name, table_name, pivot_config) -> dash_pivottable.PivotTable:
"""
Generates a PivotTable for a DataFrame. For use in 'Prepare Data' and 'Explore Solution' pages.
:param df:
:param scenario_name:
:param table_name:
:return:
"""
if pivot_config is None:
pivot = dash_pivottable.PivotTable(
id=f"pivot-{scenario_name}-{table_name}",
data=df.to_dict('records'), # What is difference between rows and records?
# cols=['timePeriodSeq'],
colOrder="key_a_to_z",
# rows=['lineName'],
rowOrder="key_a_to_z",
rendererName="Table",
aggregatorName="Sum",
# vals=["line_capacity_utilization"],
# valueFilter={'Day of Week': {'Thursday': False}}
)
else:
# print(pivot_config)
pivot = dash_pivottable.PivotTable(
id=f"pivot-{scenario_name}-{table_name}",
data=df.to_dict('records'), # What is difference between rows and records?
cols = pivot_config.cols,
colOrder="key_a_to_z",
rows = pivot_config.rows,
rowOrder="key_a_to_z",
rendererName=pivot_config.rendererName,
aggregatorName=pivot_config.aggregatorName,
vals=pivot_config.vals,
# valueFilter={'Day of Week': {'Thursday': False}}
)
return pivot
def get_data_table_card_children(df, table_name:str, table_schema: Optional[ScenarioTableSchema] = None,
editable: bool = False, data_table_id:str=None):
return [
dbc.CardHeader(
table_name
# title=table_name,
# fullscreen=True
),
get_data_table(df, table_schema, editable, data_table_id)
]
def get_pivot_table_card_children(df, scenario_name, table_name, pivot_config: Optional[PivotTableConfig]=None):
return [
dbc.CardHeader(
table_name
# title=table_name,
# fullscreen=True
),
get_pivot_table(df, scenario_name, table_name, pivot_config)
]
#####################################
import functools
import plotly.express as px
import plotly.graph_objects as go
import traceback
def plotly_figure_exception_handler(f):
# type: (Callable[..., Any]) -> Callable[..., Any]
"""
A function wrapper/decorator for catching all exceptions on methods that generate a Plotly Figure.
Returns a default Figure in case of an exception
"""
@functools.wraps(f)
def inner(self, *args, **kwargs):
# type: (*Any, **Any) -> Any
# TODO: only handle exception in 'deployed' Dash app, so that we can use the regular Dash exception handling in the UI for debugging.
# If in 'deployed' mode:
# return f(self, *args, **kwargs)
try:
return f(self, *args, **kwargs)
except Exception as ex:
# print(ex)
print(f"Exception handled by utils.dash_common_utils.plotly_figure_exception_handler decorator:")
traceback.print_exc()
# fig = px.scatter(x=[0, 1, 2, 3, 4], y=[0, 1, 4, 9, 16])
fig = go.Figure(data=[go.Table(header=dict(values=[f'Exception in {f.__name__}']),
cells=dict(values=[[traceback.format_exc()]]))
])
return fig
return inner
################################
# def diff_dashtable(data, data_previous, row_id_name=None) -> List[Dict]:
# """Generate a diff of Dash DataTable data.
#
# From: https://community.plotly.com/t/detecting-changed-cell-in-editable-datatable/26219/4
# Modified from: https://community.plotly.com/t/detecting-changed-cell-in-editable-datatable/26219/2
#
# Parameters
# ----------
# data: DataTable property (https://dash.plot.ly/datatable/reference)
# The contents of the table (list of dicts)
# data_previous: DataTable property
# The previous state of `data` (list of dicts).
#
# Returns
# -------
# A list of dictionaries in form of [{row_id_name:, column_name:, current_value:,
# previous_value:}]
# """
# df, df_previous = pd.DataFrame(data=data), pd.DataFrame(data_previous)
#
# if row_id_name is not None:
# # If using something other than the index for row id's, set it here
# for _df in [df, df_previous]:
#
# # Why do this? Guess just to be sure?
# assert row_id_name in _df.columns
#
# _df = _df.set_index(row_id_name)
# else:
# row_id_name = "index"
#
# # Pandas/Numpy says NaN != NaN, so we cannot simply compare the dataframes. Instead we can either replace the
# # NaNs with some unique value (which is fastest for very small arrays, but doesn't scale well) or we can do
# # (from https://stackoverflow.com/a/19322739/5394584):
# # Mask of elements that have changed, as a dataframe. Each element indicates True if df!=df_prev
# df_mask = ~((df == df_previous) | ((df != df) & (df_previous != df_previous)))
#
# # ...and keep only rows that include a changed value
# df_mask = df_mask.loc[df_mask.any(axis=1)]
#
# changes = []
#
# # This feels like a place I could speed this up if needed
# for idx, row in df_mask.iterrows():
# row_id = row.name
#
# # Act only on columns that had a change
# row = row[row.eq(True)]
#
# for change in row.iteritems():
#
# changes.append(
# {
# row_id_name: row_id,
# "column_name": change[0],
# "current_value": df.at[row_id, change[0]],
# "previous_value": df_previous.at[row_id, change[0]],
# }
# )
#
# return changes
def diff_dashtable_mi(data, data_previous, index_columns: List[str] = None,
table_name: str = None, scenario_name: str = None) -> List[Dict]:
"""Generate a diff of Dash DataTable data.
Allow for multi-index tables.
Based on idea in: https://community.plotly.com/t/detecting-changed-cell-in-editable-datatable/26219/4
Modified from: https://community.plotly.com/t/detecting-changed-cell-in-editable-datatable/26219/2
Parameters
----------
data: DataTable property (https://dash.plot.ly/datatable/reference)
The contents of the table (list of dicts)
data_previous: DataTable property
The previous state of `data` (list of dicts).
Returns
-------
A list of dictionaries in form of [{row_id_name:, column_name:, current_value:,
previous_value:}]
:param data: data from DataTable
:param data_previous: data_previous from DataTable
:param index_columns: names of the index/primary-key columns
:param table_name: name of table
:param scenario_name: name of scenario
:returns changes: A list of dictionaries in form of [{row_idx:, column_name:, current_value:,
previous_value:, row_index:, table_name:, scenario_name:}]
"""
df, df_previous = | pd.DataFrame(data=data) | pandas.DataFrame |
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Data Commons Python Client API unit tests.
Unit tests for core methods in the Data Commons Python Client API.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from pandas.util.testing import assert_series_equal, assert_frame_equal
from unittest import mock
import datacommons as dc
import datacommons.utils as utils
import pandas as pd
import json
import unittest
def post_request_mock(*args, **kwargs):
""" A mock POST requests sent in the requests package. """
# Create the mock response object.
class MockResponse:
def __init__(self, json_data, status_code):
self.json_data = json_data
self.status_code = status_code
def json(self):
return self.json_data
# Get the request json
req = kwargs['json']
headers = kwargs['headers']
# If the API key does not match, then return 403 Forbidden
if 'x-api-key' not in headers or headers['x-api-key'] != 'TEST-API-KEY':
return MockResponse({}, 403)
# Mock responses for post requests to get_property_labels.
if args[0] == utils._API_ROOT + utils._API_ENDPOINTS['get_property_labels']:
if req['dcids'] == ['geoId/0649670']:
# Response for sending a single dcid to get_property_labels
out_arcs = ['containedInPlace', 'name', 'geoId', 'typeOf']
res_json = json.dumps({
'geoId/0649670': {
'inLabels': [],
'outLabels': out_arcs
}
})
return MockResponse({"payload": res_json}, 200)
elif req['dcids'] == ['State', 'County', 'City']:
# Response for sending multiple dcids to get_property_labels
in_arcs = ['typeOf']
out_arcs = ['name', 'provenance', 'subClassOf', 'typeOf', 'url']
res_json = json.dumps({
'City': {'inLabels': in_arcs, 'outLabels': out_arcs},
'County': {'inLabels': in_arcs, 'outLabels': out_arcs},
'State': {'inLabels': in_arcs, 'outLabels': out_arcs}
})
return MockResponse({'payload': res_json}, 200)
elif req['dcids'] == ['dc/MadDcid']:
# Response for sending a dcid that doesn't exist to get_property_labels
res_json = json.dumps({
'dc/MadDcid': {
'inLabels': [],
'outLabels': []
}
})
return MockResponse({'payload': res_json}, 200)
elif req['dcids'] == []:
# Response for sending no dcids to get_property_labels
res_json = json.dumps({})
return MockResponse({'payload': res_json}, 200)
# Mock responses for post requests to get_property_values
if args[0] == utils._API_ROOT + utils._API_ENDPOINTS['get_property_values']:
if req['dcids'] == ['geoId/06085', 'geoId/24031']\
and req['property'] == 'containedInPlace'\
and req['value_type'] == 'Town':
# Response for sending a request for getting Towns containedInPlace of
# Santa Clara County and Montgomery County.
res_json = json.dumps({
'geoId/06085': {
'in': [
{
'dcid': 'geoId/0644112',
'name': 'Los Gatos',
'provenanceId': 'dc/sm3m2w3',
'types': [
'City',
'Town'
]
},
{
'dcid': 'geoId/0643294',
'name': '<NAME>',
'provenanceId': 'dc/sm3m2w3',
'types': [
'City',
'Town'
]
}
],
'out': []
},
'geoId/24031': {
'in': [
{
'dcid': 'geoId/2462850',
'name': 'Poolesville',
'provenanceId': 'dc/sm3m2w3',
'types': [
'City',
'Town'
]
},
],
'out': []
}
})
return MockResponse({'payload': res_json}, 200)
if req['dcids'] == ['geoId/06085', 'geoId/24031']\
and req['property'] == 'name':
# Response for sending a request for the name of multiple dcids.
res_json = json.dumps({
'geoId/06085': {
'in': [],
'out': [
{
'value': 'Santa Clara County',
'provenanceId': 'dc/sm3m2w3',
},
]
},
'geoId/24031': {
'in': [],
'out': [
{
'value': 'Montgomery County',
'provenanceId': 'dc/sm3m2w3',
},
]
}
})
return MockResponse({'payload': res_json}, 200)
if req['dcids'] == ['geoId/06085', 'geoId/24031']\
and req['property'] == 'madProperty':
# Response for sending a request with a property that does not exist.
res_json = json.dumps({
'geoId/06085': {
'in': [],
'out': []
},
'geoId/24031': {
'in': [],
'out': []
}
})
return MockResponse({'payload': res_json}, 200)
if req['dcids'] == ['geoId/06085', 'dc/MadDcid']\
and req['property'] == 'containedInPlace':
# Response for sending a request with a single dcid that does not exist.
res_json = json.dumps({
'geoId/06085': {
'in': [
{
'dcid': 'geoId/0644112',
'name': '<NAME>',
'provenanceId': 'dc/sm3m2w3',
'types': [
'City',
'Town'
]
},
],
'out': []
},
'dc/MadDcid': {
'in': [],
'out': []
}
})
return MockResponse({'payload': res_json}, 200)
if req['dcids'] == ['dc/MadDcid', 'dc/MadderDcid']:
# Response for sending a request where both dcids do not exist.
res_json = json.dumps({
'dc/MadDcid': {
'in': [],
'out': []
},
'dc/MadderDcid': {
'in': [],
'out': []
}
})
return MockResponse({'payload': res_json}, 200)
if req['dcids'] == [] and req['property'] == 'containedInPlace':
# Response for sending a request where no dcids are given.
res_json = json.dumps({})
return MockResponse({'payload': res_json}, 200)
# Mock responses for post requests to get_triples
if args[0] == utils._API_ROOT + utils._API_ENDPOINTS['get_triples']:
if req['dcids'] == ['geoId/06085', 'geoId/24031']:
# Response for sending a request with two valid dcids.
res_json = json.dumps({
'geoId/06085': [
{
"subjectId": "geoId/06085",
"predicate": "name",
"objectValue": "Santa Clara County"
},
{
"subjectId": "geoId/0649670",
"subjectName": "Mountain View",
"subjectTypes": [
"City"
],
"predicate": "containedInPlace",
"objectId": "geoId/06085",
"objectName": "Santa Clara County"
},
{
"subjectId": "geoId/06085",
"predicate": "containedInPlace",
"objectId": "geoId/06",
"objectName": "California"
},
],
'geoId/24031': [
{
"subjectId": "geoId/24031",
"predicate": "name",
"objectValue": "Montgomery County"
},
{
"subjectId": "geoId/2467675",
"subjectName": "Rockville",
"subjectTypes": [
"City"
],
"predicate": "containedInPlace",
"objectId": "geoId/24031",
"objectName": "Montgomery County"
},
{
"subjectId": "geoId/24031",
"predicate": "containedInPlace",
"objectId": "geoId/24",
"objectName": "Maryland"
},
]
})
return MockResponse({'payload': res_json}, 200)
if req['dcids'] == ['geoId/06085', 'dc/MadDcid']:
# Response for sending a request where one dcid does not exist.
res_json = json.dumps({
'geoId/06085': [
{
"subjectId": "geoId/06085",
"predicate": "name",
"objectValue": "Santa Clara County"
},
{
"subjectId": "geoId/0649670",
"subjectName": "Mountain View",
"subjectTypes": [
"City"
],
"predicate": "containedInPlace",
"objectId": "geoId/06085",
"objectName": "Santa Clara County"
},
{
"subjectId": "geoId/06085",
"predicate": "containedInPlace",
"objectId": "geoId/06",
"objectName": "California"
},
],
'dc/MadDcid': []
})
return MockResponse({'payload': res_json}, 200)
if req['dcids'] == ['dc/MadDcid', 'dc/MadderDcid']:
# Response for sending a request where both dcids do not exist.
res_json = json.dumps({
'dc/MadDcid': [],
'dc/MadderDcid': []
})
return MockResponse({'payload': res_json}, 200)
if req['dcids'] == []:
# Response for sending a request where no dcids are given.
res_json = json.dumps({})
return MockResponse({'payload': res_json}, 200)
# Otherwise, return an empty response and a 404.
return MockResponse({}, 404)
class TestGetPropertyLabels(unittest.TestCase):
""" Unit tests for get_property_labels. """
@mock.patch('requests.post', side_effect=post_request_mock)
def test_single_dcid(self, post_mock):
""" Calling get_property_labels with a single dcid returns a valid
result.
"""
# Set the API key
dc.set_api_key('TEST-API-KEY')
# Test for outgoing property labels
out_props = dc.get_property_labels(['geoId/0649670'])
self.assertDictEqual(out_props,
{'geoId/0649670': ["containedInPlace", "name", "geoId", "typeOf"]})
# Test with out=False
in_props = dc.get_property_labels(['geoId/0649670'], out=False)
self.assertDictEqual(in_props, {'geoId/0649670': []})
@mock.patch('requests.post', side_effect=post_request_mock)
def test_multiple_dcids(self, post_mock):
""" Calling get_property_labels returns valid results with multiple
dcids.
"""
# Set the API key
dc.set_api_key('TEST-API-KEY')
dcids = ['State', 'County', 'City']
expected_in = ["typeOf"]
expected_out = ["name", "provenance", "subClassOf", "typeOf", "url"]
# Test for outgoing property labels
out_props = dc.get_property_labels(dcids)
self.assertDictEqual(out_props, {
'State': expected_out,
'County': expected_out,
'City': expected_out,
})
# Test for incoming property labels
in_props = dc.get_property_labels(dcids, out=False)
self.assertDictEqual(in_props, {
'State': expected_in,
'County': expected_in,
'City': expected_in,
})
@mock.patch('requests.post', side_effect=post_request_mock)
def test_bad_dcids(self, post_mock):
""" Calling get_property_labels with dcids that do not exist returns empty
results.
"""
# Set the API key
dc.set_api_key('TEST-API-KEY')
# Test for outgoing property labels
out_props = dc.get_property_labels(['dc/MadDcid'])
self.assertDictEqual(out_props, {'dc/MadDcid': []})
# Test for incoming property labels
in_props = dc.get_property_labels(['dc/MadDcid'], out=False)
self.assertDictEqual(in_props, {'dc/MadDcid': []})
@mock.patch('requests.post', side_effect=post_request_mock)
def test_no_dcids(self, post_mock):
""" Calling get_property_labels with no dcids returns empty results. """
# Set the API key
dc.set_api_key('TEST-API-KEY')
# Test for outgoing property labels
out_props = dc.get_property_labels([])
self.assertDictEqual(out_props, {})
# Test for incoming property labels
in_props = dc.get_property_labels([], out=False)
self.assertDictEqual(in_props, {})
class TestGetPropertyValues(unittest.TestCase):
""" Unit tests for get_property_values. """
# --------------------------- STANDARD UNIT TESTS ---------------------------
@mock.patch('requests.post', side_effect=post_request_mock)
def test_multiple_dcids(self, post_mock):
""" Calling get_property_values with multiple dcids returns valid
results.
"""
# Set the API key
dc.set_api_key('TEST-API-KEY')
dcids = ['geoId/06085', 'geoId/24031']
# Get the containedInPlace Towns for Santa Clara and Montgomery County.
towns = dc.get_property_values(
dcids, 'containedInPlace', out=False, value_type='Town')
self.assertDictEqual(towns, {
'geoId/06085': ['geoId/0643294', 'geoId/0644112'],
'geoId/24031': ['geoId/2462850']
})
# Get the name of Santa Clara and Montgomery County.
names = dc.get_property_values(dcids, 'name')
self.assertDictEqual(names, {
'geoId/06085': ['Santa Clara County'],
'geoId/24031': ['Montgomery County']
})
@mock.patch('requests.post', side_effect=post_request_mock)
def test_bad_dcids(self, post_mock):
""" Calling get_property_values with dcids that do not exist returns empty
results.
"""
# Set the API key
dc.set_api_key('TEST-API-KEY')
bad_dcids_1 = ['geoId/06085', 'dc/MadDcid']
bad_dcids_2 = ['dc/MadDcid', 'dc/MadderDcid']
# Get entities containedInPlace of Santa Clara County and a dcid that does
# not exist.
contained_1 = dc.get_property_values(bad_dcids_1, 'containedInPlace', out=False)
self.assertDictEqual(contained_1, {
'geoId/06085': ['geoId/0644112'],
'dc/MadDcid': []
})
# Get entities containedInPlace for two dcids that do not exist.
contained_2 = dc.get_property_values(bad_dcids_2, 'containedInPlace')
self.assertDictEqual(contained_2, {
'dc/MadDcid': [],
'dc/MadderDcid': []
})
@mock.patch('requests.post', side_effect=post_request_mock)
def test_bad_property(self, post_mock):
""" Calling get_property_values with a property that does not exist returns
empty results.
"""
# Set the API key
dc.set_api_key('TEST-API-KEY')
# Get propery values for a property that does not exist.
prop_vals = dc.get_property_values(
['geoId/06085', 'geoId/24031'], 'madProperty')
self.assertDictEqual(prop_vals, {
'geoId/06085': [],
'geoId/24031': []
})
@mock.patch('requests.post', side_effect=post_request_mock)
def test_no_dcids(self, post_mock):
""" Calling get_property_values with no dcids returns empty results. """
# Set the API key
dc.set_api_key('TEST-API-KEY')
# Get property values with an empty list of dcids.
prop_vals = dc.get_property_values([], 'containedInPlace')
self.assertDictEqual(prop_vals, {})
# ---------------------------- PANDAS UNIT TESTS ----------------------------
@mock.patch('requests.post', side_effect=post_request_mock)
def test_series(self, post_mock):
""" Calling get_property_values with a Pandas Series returns the correct
results.
"""
# Set the API key
dc.set_api_key('TEST-API-KEY')
# The given and expected series.
dcids = pd.Series(['geoId/06085', 'geoId/24031'])
expected = pd.Series([
['geoId/0643294', 'geoId/0644112'],
['geoId/2462850']
])
# Call get_property_values with the series as input
actual = dc.get_property_values(
dcids, 'containedInPlace', out=False, value_type='Town')
assert_series_equal(actual, expected)
@mock.patch('requests.post', side_effect=post_request_mock)
def test_series_bad_dcids(self, post_mock):
""" Calling get_property_values with a Pandas Series and dcids that does not
exist resturns an empty result.
"""
# Set the API key
dc.set_api_key('TEST-API-KEY')
# The given and expected series
bad_dcids_1 = pd.Series(['geoId/06085', 'dc/MadDcid'])
bad_dcids_2 = pd.Series(['dc/MadDcid', 'dc/MadderDcid'])
expected_1 = pd.Series([['geoId/0644112'], []])
expected_2 = pd.Series([[], []])
# Call get_property_values with series as input
actual_1 = dc.get_property_values(bad_dcids_1, 'containedInPlace', out=False)
actual_2 = dc.get_property_values(bad_dcids_2, 'containedInPlace', out=False)
# Assert the results are correct
| assert_series_equal(actual_1, expected_1) | pandas.util.testing.assert_series_equal |
import streamlit as st
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import altair as alt
from requests import get
import re
import os
from bs4 import BeautifulSoup
from urllib.request import Request, urlopen
import datetime
import time
import matplotlib.pyplot as plt
import statsmodels.api as sm
from geopy.geocoders import Nominatim
from geopy.distance import geodesic
geolocator = Nominatim(user_agent='myuseragent')
import lxml
import plotly.express as px
from PIL import Image
#with open("styles/style.css") as f:
# st.markdown(f'<style>{f.read()}</style>', unsafe_allow_html=True)
st.set_page_config(
page_title="O/U Hockey Analytics",
page_icon=":ice_hockey_stick_and_puck:"
)
#Dummy data to get the header to display correctly
st.markdown("""<Head>
<Title> Test Title</Title><link rel="shortcut icon" href="favicon.ico" type="image/x-icon"> </Head>""",unsafe_allow_html=True)
#Title/Header
st.markdown("""<h1 style="text-align:center;color:white;font-weight:bolder;font-size:70px;font-family:helvetica; background:
-webkit-linear-gradient(#a73305,#000000,#093ff0); -webkit-background-clip:
text;-webkit-text-fill-color: transparent;">NHL<br>Wager<br>Analytics</h1>""",unsafe_allow_html=True)
# Load data
data_load_state = st.text('Checking and Fetching Data...')
#####################################
#### Data Gathering and Cleaning ####
#####################################
master_df = pd.read_csv('master_df.csv')
master_df = master_df.dropna(thresh=10)
start = pd.to_datetime(master_df.Date[-1:]).dt.date.values[0]+datetime.timedelta(days=1)
today = datetime.date.today()
yesterday = today-datetime.timedelta(days = 1)
#Function to covert dates to string
def covert_dates(date1, date2):
covert_list = []
days = pd.date_range(date1, date2, freq='d')
for i in range(len(days)):
covert_list.append(int(days[i].strftime('%Y%m%d')))
return covert_list
#Function to fetch missing data
@st.cache
def get_data(date1, date2):
new_df = pd.DataFrame()
for day in covert_dates(date1, date2):
site = f"https://sportsdatabase.com/nhl/query?output=default&sdql=date%3D{day}&submit=++S+D+Q+L+%21++"
hdr = {'User-Agent': 'Mozilla/5.0'}
req = Request(site, headers=hdr)
page = urlopen(req)
soup = BeautifulSoup(page)
tables = soup.find('table', attrs={'id':'DT_Table'})
page_df = pd.read_html(str(tables))[0]
new_df = pd.concat([new_df, page_df])
time.sleep(1)
return new_df
#Check if the data needs updating
if start <= today:
new_data = get_data(start, today)
master_df = pd.concat([master_df, new_data])
#Save updated data as csv
#master_df.to_csv("master_df.csv", index=False)
def clean_data(df):
df.Date =pd.to_datetime(df.Date)
df= df.sort_values(by=['Team', 'Date']).reset_index()
df.insert(2, "Date_Prev", df.Date.shift(1))
df.insert(2, "Days_Rest", (df.Date_Prev-df.Date)*-1)
df = df.drop(['index','Season', 'P1', 'P2', 'P3'], axis=1)
return df
#Fucntion to identify a team change to break streak counts
def trips(home_or_away, TeamChange, Site):
list =[]
x = 0
for i, j in zip(TeamChange, Site):
if i == False:
x = x
else:
x = 0
if j == home_or_away:
x += 1
else:
x = 0
list.append(x)
return list
#Function to calculate the distance the road team is from home
def distance_calc(df):
df.insert(4,"Team_City", df.Team.map(team_dict['City']))
df.insert(6,"Opp_City", df.Opp.map(team_dict['City']))
df.insert(9,"Team_point", df.Team.map(team_dict['Citypoint']))
df.insert(10,"Opp_point", df.Opp.map(team_dict['Citypoint']))
df['Distance'] = df.apply(lambda x: geodesic(x['Team_point'],x['Opp_point']).km, axis=1)
df['Team_distance'] = df.apply(lambda x: 0 if x.Site == "home" else x.Distance, axis=1)
df['Opp_distance'] = df.apply(lambda x: 0 if x.Site == "away" else x.Distance, axis=1)
df = df.drop(['Team_point','Distance','Opp_point'], axis=1)
return df
#Function to count the current streak of home or games
def road_trips(df):
df.insert(4, "TeamChange", df["Team"].shift(1, fill_value=df["Team"].head(1)) != df["Team"])
df.insert(10, "Home_Stand", trips("home", df.TeamChange, df.Site))
df.insert(11, "Road_Trip", trips("away", df.TeamChange, df.Site))
df.Days_Rest = df.Days_Rest.dt.days
df.Days_Rest = df.Days_Rest.fillna(5)
df.Days_Rest = df.Days_Rest.astype(int)-1
df.loc[df.Days_Rest < 0, 'Days_Rest'] = 5
df = df.drop('TeamChange', axis=1)
return df
#Function to pair games into a singel record -- for O/U analysis
def opp_func (df):
df.insert(2,"Opp_Days_Rest", eda_df.Oppkey.map(opp_days_rest))
df.insert(10,"Opp_home_stand", eda_df.Oppkey.map(opp_home_stand))
df.insert(11,"Opp_road_trip", eda_df.Oppkey.map(opp_road_trip))
return df
#Func to calculate the unit return of each game and team
def unit_value(Line, Result):
if Line < 0 and Result == 'W':
return 1
elif Line < 0 and Result == 'L':
return Line/100
elif Line > 0 and Result == 'W':
return Line/100
elif Line > 0 and Result == 'L':
return -1
nhltable= pd.read_csv('nhltable.csv')
team_dict = nhltable.set_index('Team').to_dict()
eda_df = clean_data(master_df)
eda_df = distance_calc(eda_df)
eda_df = road_trips(eda_df)
#Adding Division
eda_df = pd.merge(eda_df, nhltable[['Team', 'Division']], on='Team', how="left" )
#Create keys for pairing
Teamkey = []
Oppkey = []
for i in range(len(eda_df.Date)):
Teamkey.append(str(covert_dates(eda_df.Date[i], eda_df.Date[i])[0])+eda_df.Team[i])
Oppkey.append(str(covert_dates(eda_df.Date[i], eda_df.Date[i])[0])+eda_df.Opp[i])
eda_df['Oppkey'] = Oppkey
opp_days_rest = dict(zip(Teamkey, eda_df.Days_Rest))
opp_home_stand = dict(zip(Teamkey, eda_df.Home_Stand))
opp_road_trip = dict(zip(Teamkey, eda_df.Road_Trip))
opp_func(eda_df)
eda_df.Final = eda_df.Final.fillna('0-0')
eda_df = eda_df.fillna(0)
eda_df = pd.concat([eda_df, pd.get_dummies(eda_df.OUr)], axis=1)
goals_df = eda_df['Final'].str.split('-', expand=True).rename(columns={0:'Team_Goals', 1:'Opp_Goals'}).astype(int)
eda_df = | pd.concat([eda_df, goals_df], axis=1) | pandas.concat |
import pytest
def test_concat_with_duplicate_columns():
import captivity
import pandas as pd
with pytest.raises(captivity.CaptivityException):
pd.concat(
[pd.DataFrame({"a": [1], "b": [2]}), pd.DataFrame({"c": [0], "b": [3]}),],
axis=1,
)
def test_concat_mismatching_columns():
import captivity
import pandas as pd
with pytest.raises(captivity.CaptivityException):
pd.concat(
[pd.DataFrame({"a": [1], "b": [2]}), | pd.DataFrame({"c": [0], "b": [3]}) | pandas.DataFrame |
"""
Author: <NAME>
Created: 14/08/2020 11:04 AM
"""
import os
import numpy as np
import pandas as pd
from basgra_python import run_basgra_nz, _trans_manual_harv, get_month_day_to_nonleap_doy
from input_output_keys import matrix_weather_keys_pet
from check_basgra_python.support_for_tests import establish_org_input, get_org_correct_values, get_lincoln_broadfield, \
test_dir, establish_peyman_input, _clean_harvest, base_auto_harvest_data, base_manual_harvest_data
from supporting_functions.plotting import plot_multiple_results # used in test development and debugging
verbose = False
drop_keys = [ # newly added keys that must be dropped initially to manage tests, datasets are subsequently re-created
'WAFC',
'IRR_TARG',
'IRR_TRIG',
'IRRIG_DEM',
'RYE_YIELD',
'WEED_YIELD',
'DM_RYE_RM',
'DM_WEED_RM',
'DMH_RYE',
'DMH_WEED',
'DMH',
'WAWP',
'MXPAW',
'PAW',
'RESEEDED',
]
view_keys = [
'WAL',
'WCL',
'DM',
'YIELD',
'BASAL',
'ROOTD',
'IRRIG_DEM',
'HARVFR',
'RYE_YIELD',
'WEED_YIELD',
'DM_RYE_RM',
'DM_WEED_RM',
'DMH_RYE',
'DMH_WEED',
'DMH',
'WAWP', # # mm # Water in non-frozen root zone at wilting point
'MXPAW', # mm # maximum Profile available water
'PAW', # mm Profile available water at the time step
]
def test_trans_manual_harv(update_data=False):
test_nm = 'test_trans_manual_harv'
print('testing: ' + test_nm)
params, matrix_weather, days_harvest, doy_irr = establish_org_input()
days_harvest = _clean_harvest(days_harvest, matrix_weather)
np.random.seed(1)
days_harvest.loc[:, 'harv_trig'] = np.random.rand(len(days_harvest))
np.random.seed(2)
days_harvest.loc[:, 'harv_targ'] = np.random.rand(len(days_harvest))
np.random.seed(3)
days_harvest.loc[:, 'weed_dm_frac'] = np.random.rand(len(days_harvest))
out = _trans_manual_harv(days_harvest, matrix_weather)
data_path = os.path.join(test_dir, '{}_data.csv'.format(test_nm))
if update_data:
out.to_csv(data_path)
correct_out = pd.read_csv(data_path, index_col=0)
_output_checks(out, correct_out, dropable=False)
def _output_checks(out, correct_out, dropable=True):
"""
base checker
:param out: basgra data from current test
:param correct_out: expected basgra data
:param dropable: boolean, if True, can drop output keys, allows _output_checks to be used for not basgra data and
for new outputs to be dropped when comparing results.
:return:
"""
if dropable:
# should normally be empty, but is here to allow easy checking of old tests against versions with a new output
drop_keys_int = [
]
out2 = out.drop(columns=drop_keys_int)
else:
out2 = out.copy(True)
# check shapes
assert out2.shape == correct_out.shape, 'something is wrong with the output shapes'
# check datatypes
assert issubclass(out.values.dtype.type, np.float), 'outputs of the model should all be floats'
out2 = out2.values
correct_out2 = correct_out.values
out2[np.isnan(out2)] = -9999.99999
correct_out2[np.isnan(correct_out2)] = -9999.99999
# check values match for sample run
isclose = np.isclose(out2, correct_out2)
asmess = '{} values do not match between the output and correct output with rtol=1e-05, atol=1e-08'.format(
(~isclose).sum())
assert isclose.all(), asmess
print(' model passed test\n')
def test_org_basgra_nz(update_data=False):
print('testing original basgra_nz')
params, matrix_weather, days_harvest, doy_irr = establish_org_input()
days_harvest = _clean_harvest(days_harvest, matrix_weather)
out = run_basgra_nz(params, matrix_weather, days_harvest, doy_irr, verbose=verbose)
# test against my saved version (simply to have all columns
data_path = os.path.join(test_dir, 'test_org_basgra.csv')
if update_data:
out.to_csv(data_path)
print(' testing against full dataset')
correct_out = pd.read_csv(data_path, index_col=0)
_output_checks(out, correct_out)
# test to the original data provided by <NAME>ward
out.drop(columns=drop_keys, inplace=True) # remove all of the newly added keys
print(' testing against Simon Woodwards original data')
correct_out2 = get_org_correct_values()
_output_checks(out, correct_out2)
def test_irrigation_trigger(update_data=False):
print('testing irrigation trigger')
# note this is linked to test_leap, so any inputs changes there should be mapped here
params, matrix_weather, days_harvest, doy_irr = establish_org_input('lincoln')
matrix_weather = get_lincoln_broadfield()
matrix_weather.loc[:, 'max_irr'] = 15
matrix_weather.loc[:, 'irr_trig'] = 0.5
matrix_weather.loc[:, 'irr_targ'] = 1
matrix_weather = matrix_weather.loc[:, matrix_weather_keys_pet]
params['IRRIGF'] = 1 # irrigation to 100% of field capacity
doy_irr = list(range(305, 367)) + list(range(1, 91))
days_harvest = _clean_harvest(days_harvest, matrix_weather)
out = run_basgra_nz(params, matrix_weather, days_harvest, doy_irr, verbose=verbose)
data_path = os.path.join(test_dir, 'test_irrigation_trigger_output.csv')
if update_data:
out.to_csv(data_path)
correct_out = pd.read_csv(data_path, index_col=0)
_output_checks(out, correct_out)
def test_irrigation_fraction(update_data=False):
print('testing irrigation fraction')
params, matrix_weather, days_harvest, doy_irr = establish_org_input('lincoln')
matrix_weather = get_lincoln_broadfield()
matrix_weather.loc[:, 'max_irr'] = 10
matrix_weather.loc[:, 'irr_trig'] = 1
matrix_weather.loc[:, 'irr_targ'] = 1
matrix_weather = matrix_weather.loc[:, matrix_weather_keys_pet]
params['IRRIGF'] = .60 # irrigation of 60% of what is needed to get to field capacity
doy_irr = list(range(305, 367)) + list(range(1, 91))
days_harvest = _clean_harvest(days_harvest, matrix_weather)
out = run_basgra_nz(params, matrix_weather, days_harvest, doy_irr, verbose=verbose)
data_path = os.path.join(test_dir, 'test_irrigation_fraction_output.csv')
if update_data:
out.to_csv(data_path)
correct_out = pd.read_csv(data_path, index_col=0)
_output_checks(out, correct_out)
def test_water_short(update_data=False):
print('testing water shortage')
params, matrix_weather, days_harvest, doy_irr = establish_org_input('lincoln')
matrix_weather = get_lincoln_broadfield()
matrix_weather.loc[:, 'max_irr'] = 5
matrix_weather.loc[matrix_weather.index > '2015-08-01', 'max_irr'] = 15
matrix_weather.loc[:, 'irr_trig'] = 0.8
matrix_weather.loc[:, 'irr_targ'] = 1
matrix_weather = matrix_weather.loc[:, matrix_weather_keys_pet]
params['IRRIGF'] = .90 # irrigation to 90% of field capacity
doy_irr = list(range(305, 367)) + list(range(1, 91))
days_harvest = _clean_harvest(days_harvest, matrix_weather)
out = run_basgra_nz(params, matrix_weather, days_harvest, doy_irr, verbose=verbose)
data_path = os.path.join(test_dir, 'test_water_short_output.csv')
if update_data:
out.to_csv(data_path)
correct_out = pd.read_csv(data_path, index_col=0)
_output_checks(out, correct_out)
def test_short_season(update_data=False):
print('testing short season')
params, matrix_weather, days_harvest, doy_irr = establish_org_input('lincoln')
matrix_weather = get_lincoln_broadfield()
matrix_weather.loc[:, 'max_irr'] = 10
matrix_weather.loc[:, 'irr_trig'] = 1
matrix_weather.loc[:, 'irr_targ'] = 1
matrix_weather = matrix_weather.loc[:, matrix_weather_keys_pet]
params['IRRIGF'] = .90 # irrigation to 90% of field capacity
doy_irr = list(range(305, 367)) + list(range(1, 61))
days_harvest = _clean_harvest(days_harvest, matrix_weather)
out = run_basgra_nz(params, matrix_weather, days_harvest, doy_irr, verbose=verbose)
data_path = os.path.join(test_dir, 'test_short_season_output.csv')
if update_data:
out.to_csv(data_path)
correct_out = pd.read_csv(data_path, index_col=0)
_output_checks(out, correct_out)
def test_variable_irr_trig_targ(update_data=False):
print('testing time variable irrigation triggers and targets')
params, matrix_weather, days_harvest, doy_irr = establish_org_input('lincoln')
matrix_weather = get_lincoln_broadfield()
matrix_weather.loc[:, 'max_irr'] = 10
matrix_weather.loc[:, 'irr_trig'] = 0.5
matrix_weather.loc[matrix_weather.index > '2013-08-01', 'irr_trig'] = 0.7
matrix_weather.loc[:, 'irr_targ'] = 1
matrix_weather.loc[(matrix_weather.index < '2012-08-01'), 'irr_targ'] = 0.8
matrix_weather.loc[(matrix_weather.index > '2015-08-01'), 'irr_targ'] = 0.8
matrix_weather = matrix_weather.loc[:, matrix_weather_keys_pet]
params['IRRIGF'] = 1
doy_irr = list(range(305, 367)) + list(range(1, 61))
days_harvest = _clean_harvest(days_harvest, matrix_weather)
out = run_basgra_nz(params, matrix_weather, days_harvest, doy_irr, verbose=verbose)
data_path = os.path.join(test_dir, 'test_variable_irr_trig_targ.csv')
if update_data:
out.to_csv(data_path)
correct_out = pd.read_csv(data_path, index_col=0)
_output_checks(out, correct_out)
def test_irr_paw(update_data=False):
test_nm = 'test_irr_paw'
print('testing: ' + test_nm)
params, matrix_weather, days_harvest, doy_irr = establish_org_input('lincoln')
matrix_weather = get_lincoln_broadfield()
matrix_weather.loc[:, 'max_irr'] = 5
matrix_weather.loc[:, 'irr_trig'] = 0.5
matrix_weather.loc[:, 'irr_targ'] = 0.9
matrix_weather = matrix_weather.loc[:, matrix_weather_keys_pet]
params['IRRIGF'] = 1 # irrigation to 100% of field capacity
doy_irr = list(range(305, 367)) + list(range(1, 91))
params['irr_frm_paw'] = 1
days_harvest = _clean_harvest(days_harvest, matrix_weather)
out = run_basgra_nz(params, matrix_weather, days_harvest, doy_irr, verbose=verbose)
data_path = os.path.join(test_dir, '{}_data.csv'.format(test_nm))
if update_data:
out.to_csv(data_path)
correct_out = pd.read_csv(data_path, index_col=0)
_output_checks(out, correct_out)
def test_pet_calculation(update_data=False):
# note this test was not as throughrougly investigated as it was not needed for my work stream
print('testing pet calculation')
params, matrix_weather, days_harvest, doy_irr = establish_peyman_input()
days_harvest = _clean_harvest(days_harvest, matrix_weather)
out = run_basgra_nz(params, matrix_weather, days_harvest, doy_irr, verbose=verbose, dll_path='default',
supply_pet=False)
data_path = os.path.join(test_dir, 'test_pet_calculation.csv')
if update_data:
out.to_csv(data_path)
correct_out = pd.read_csv(data_path, index_col=0)
_output_checks(out, correct_out)
# Manual Harvest tests
def test_fixed_harvest_man(update_data=False):
test_nm = 'test_fixed_harvest_man'
print('testing: ' + test_nm)
params, matrix_weather, days_harvest, doy_irr = establish_org_input()
params['fixed_removal'] = 1
params['opt_harvfrin'] = 1
days_harvest = base_manual_harvest_data()
idx = days_harvest.date < '2014-01-01'
days_harvest.loc[idx, 'frac_harv'] = 1
days_harvest.loc[idx, 'harv_trig'] = 2500
days_harvest.loc[idx, 'harv_targ'] = 1000
days_harvest.loc[idx, 'weed_dm_frac'] = 0
idx = days_harvest.date >= '2014-01-01'
days_harvest.loc[idx, 'frac_harv'] = 1
days_harvest.loc[idx, 'harv_trig'] = 1000
days_harvest.loc[idx, 'harv_targ'] = 10
days_harvest.loc[idx, 'weed_dm_frac'] = 0
idx = days_harvest.date >= '2017-01-01'
days_harvest.loc[idx, 'frac_harv'] = 1
days_harvest.loc[idx, 'harv_trig'] = 2000
days_harvest.loc[idx, 'harv_targ'] = 100
days_harvest.loc[idx, 'weed_dm_frac'] = 0
days_harvest.drop(columns=['date'], inplace=True)
out = run_basgra_nz(params, matrix_weather, days_harvest, doy_irr, verbose=verbose)
data_path = os.path.join(test_dir, '{}_data.csv'.format(test_nm))
if update_data:
out.to_csv(data_path)
correct_out = pd.read_csv(data_path, index_col=0)
_output_checks(out, correct_out)
def test_harv_trig_man(update_data=False):
# test manaual harvesting dates with a set trigger, weed fraction set to zero
test_nm = 'test_harv_trig_man'
print('testing: ' + test_nm)
params, matrix_weather, days_harvest, doy_irr = establish_org_input()
params['fixed_removal'] = 0
params['opt_harvfrin'] = 1
days_harvest = base_manual_harvest_data()
idx = days_harvest.date < '2014-01-01'
days_harvest.loc[idx, 'frac_harv'] = 0.5
days_harvest.loc[idx, 'harv_trig'] = 2500
days_harvest.loc[idx, 'harv_targ'] = 2200
days_harvest.loc[idx, 'weed_dm_frac'] = 0
idx = days_harvest.date >= '2014-01-01'
days_harvest.loc[idx, 'frac_harv'] = 1
days_harvest.loc[idx, 'harv_trig'] = 1000
days_harvest.loc[idx, 'harv_targ'] = 500
days_harvest.loc[idx, 'weed_dm_frac'] = 0
idx = days_harvest.date >= '2017-01-01'
days_harvest.loc[idx, 'frac_harv'] = 1
days_harvest.loc[idx, 'harv_trig'] = 1500
days_harvest.loc[idx, 'harv_targ'] = 1000
days_harvest.loc[idx, 'weed_dm_frac'] = 0
days_harvest.drop(columns=['date'], inplace=True)
out = run_basgra_nz(params, matrix_weather, days_harvest, doy_irr, verbose=verbose)
data_path = os.path.join(test_dir, '{}_data.csv'.format(test_nm))
if update_data:
out.to_csv(data_path)
correct_out = pd.read_csv(data_path, index_col=0)
_output_checks(out, correct_out)
def test_weed_fraction_man(update_data=False):
# test manual harvesting trig set to zero +- target with weed fraction above 0
test_nm = 'test_weed_fraction_man'
print('testing: ' + test_nm)
params, matrix_weather, days_harvest, doy_irr = establish_org_input()
params['fixed_removal'] = 0
params['opt_harvfrin'] = 1
days_harvest = base_manual_harvest_data()
idx = days_harvest.date < '2014-01-01'
days_harvest.loc[idx, 'frac_harv'] = 0.5
days_harvest.loc[idx, 'harv_trig'] = 2500
days_harvest.loc[idx, 'harv_targ'] = 2200
days_harvest.loc[idx, 'weed_dm_frac'] = 0
idx = days_harvest.date >= '2014-01-01'
days_harvest.loc[idx, 'frac_harv'] = 1
days_harvest.loc[idx, 'harv_trig'] = 1000
days_harvest.loc[idx, 'harv_targ'] = 500
days_harvest.loc[idx, 'weed_dm_frac'] = 0.5
idx = days_harvest.date >= '2017-01-01'
days_harvest.loc[idx, 'frac_harv'] = 1
days_harvest.loc[idx, 'harv_trig'] = 1500
days_harvest.loc[idx, 'harv_targ'] = 1000
days_harvest.loc[idx, 'weed_dm_frac'] = 1
days_harvest.drop(columns=['date'], inplace=True)
out = run_basgra_nz(params, matrix_weather, days_harvest, doy_irr, verbose=verbose)
data_path = os.path.join(test_dir, '{}_data.csv'.format(test_nm))
if update_data:
out.to_csv(data_path)
correct_out = pd.read_csv(data_path, index_col=0)
_output_checks(out, correct_out)
# automatic harvesting tests
def test_auto_harv_trig(update_data=False):
test_nm = 'test_auto_harv_trig'
print('testing: ' + test_nm)
# test auto harvesting dates with a set trigger, weed fraction set to zero
params, matrix_weather, days_harvest, doy_irr = establish_org_input()
params['opt_harvfrin'] = 1
days_harvest = base_auto_harvest_data(matrix_weather)
idx = days_harvest.date < '2014-01-01'
days_harvest.loc[idx, 'frac_harv'] = 1
days_harvest.loc[idx, 'harv_trig'] = 3000
days_harvest.loc[idx, 'harv_targ'] = 2000
days_harvest.loc[idx, 'weed_dm_frac'] = 0
idx = days_harvest.date >= '2014-01-01'
days_harvest.loc[idx, 'frac_harv'] = 0.75
days_harvest.loc[idx, 'harv_trig'] = 2500
days_harvest.loc[idx, 'harv_targ'] = 1500
days_harvest.loc[idx, 'weed_dm_frac'] = 0
days_harvest.drop(columns=['date'], inplace=True)
out = run_basgra_nz(params, matrix_weather, days_harvest, doy_irr, verbose=verbose, auto_harvest=True)
data_path = os.path.join(test_dir, '{}_data.csv'.format(test_nm))
if update_data:
out.to_csv(data_path)
correct_out = pd.read_csv(data_path, index_col=0)
_output_checks(out, correct_out)
def test_auto_harv_fixed(update_data=False):
test_nm = 'test_auto_harv_fixed'
print('testing: ' + test_nm)
# test auto harvesting dates with a set trigger, weed fraction set to zero
params, matrix_weather, days_harvest, doy_irr = establish_org_input()
days_harvest = base_auto_harvest_data(matrix_weather)
params['fixed_removal'] = 1
params['opt_harvfrin'] = 1
idx = days_harvest.date < '2014-01-01'
days_harvest.loc[idx, 'frac_harv'] = 1
days_harvest.loc[idx, 'harv_trig'] = 3000
days_harvest.loc[idx, 'harv_targ'] = 500
days_harvest.loc[idx, 'weed_dm_frac'] = 0
idx = days_harvest.date >= '2014-01-01'
days_harvest.loc[idx, 'frac_harv'] = 0.75
days_harvest.loc[idx, 'harv_trig'] = 1500
days_harvest.loc[idx, 'harv_targ'] = 500
days_harvest.loc[idx, 'weed_dm_frac'] = 0
days_harvest.drop(columns=['date'], inplace=True)
out = run_basgra_nz(params, matrix_weather, days_harvest, doy_irr, verbose=verbose, auto_harvest=True)
data_path = os.path.join(test_dir, '{}_data.csv'.format(test_nm))
if update_data:
out.to_csv(data_path)
correct_out = pd.read_csv(data_path, index_col=0)
_output_checks(out, correct_out)
def test_weed_fraction_auto(update_data=False):
# test auto harvesting trig set +- target with weed fraction above 0
test_nm = 'test_weed_fraction_auto'
print('testing: ' + test_nm)
# test auto harvesting dates with a set trigger, weed fraction set to zero
params, matrix_weather, days_harvest, doy_irr = establish_org_input()
params['opt_harvfrin'] = 1
days_harvest = base_auto_harvest_data(matrix_weather)
idx = days_harvest.date < '2014-01-01'
days_harvest.loc[idx, 'frac_harv'] = 1
days_harvest.loc[idx, 'harv_trig'] = 3000
days_harvest.loc[idx, 'harv_targ'] = 2000
days_harvest.loc[idx, 'weed_dm_frac'] = 1.25
idx = days_harvest.date >= '2014-01-01'
days_harvest.loc[idx, 'frac_harv'] = 0.75
days_harvest.loc[idx, 'harv_trig'] = 2500
days_harvest.loc[idx, 'harv_targ'] = 1500
days_harvest.loc[idx, 'weed_dm_frac'] = 0.75
days_harvest.drop(columns=['date'], inplace=True)
out = run_basgra_nz(params, matrix_weather, days_harvest, doy_irr, verbose=verbose, auto_harvest=True)
data_path = os.path.join(test_dir, '{}_data.csv'.format(test_nm))
if update_data:
out.to_csv(data_path)
correct_out = pd.read_csv(data_path, index_col=0)
_output_checks(out, correct_out)
def test_weed_fixed_harv_auto(update_data=False):
# test auto fixed harvesting trig set +- target with weed fraction above 0
test_nm = 'test_weed_fixed_harv_auto'
print('testing: ' + test_nm)
# test auto harvesting dates with a set trigger, weed fraction set to zero
params, matrix_weather, days_harvest, doy_irr = establish_org_input()
days_harvest = base_auto_harvest_data(matrix_weather)
params['fixed_removal'] = 1
params['opt_harvfrin'] = 1
idx = days_harvest.date < '2014-01-01'
days_harvest.loc[idx, 'frac_harv'] = 1
days_harvest.loc[idx, 'harv_trig'] = 3000
days_harvest.loc[idx, 'harv_targ'] = 500
days_harvest.loc[idx, 'weed_dm_frac'] = 0.5
idx = days_harvest.date >= '2014-01-01'
days_harvest.loc[idx, 'frac_harv'] = 0.75
days_harvest.loc[idx, 'harv_trig'] = 1500
days_harvest.loc[idx, 'harv_targ'] = 500
days_harvest.loc[idx, 'weed_dm_frac'] = 1
days_harvest.drop(columns=['date'], inplace=True)
out = run_basgra_nz(params, matrix_weather, days_harvest, doy_irr, verbose=verbose, auto_harvest=True)
data_path = os.path.join(test_dir, '{}_data.csv'.format(test_nm))
if update_data:
out.to_csv(data_path)
correct_out = pd.read_csv(data_path, index_col=0)
_output_checks(out, correct_out)
def test_reseed(update_data=False):
print('testing reseeding')
params, matrix_weather, days_harvest, doy_irr = establish_org_input('lincoln')
matrix_weather = get_lincoln_broadfield()
matrix_weather.loc[:, 'max_irr'] = 1
matrix_weather.loc[matrix_weather.index > '2015-08-01', 'max_irr'] = 15
matrix_weather.loc[:, 'irr_trig'] = 0.5
matrix_weather.loc[:, 'irr_targ'] = 1
matrix_weather = matrix_weather.loc[:, matrix_weather_keys_pet]
params['IRRIGF'] = .90 # irrigation to 90% of field capacity
# these values are set to make observable changes in the results and are not reasonable values.
params['reseed_harv_delay'] = 120
params['reseed_LAI'] = 3
params['reseed_TILG2'] = 10
params['reseed_TILG1'] = 40
params['reseed_TILV'] = 5000
params['reseed_CLV'] = 100
params['reseed_CRES'] = 25
params['reseed_CST'] = 10
params['reseed_CSTUB'] = 0.5
doy_irr = list(range(305, 367)) + list(range(1, 91))
temp = pd.DataFrame(columns=days_harvest.keys())
for i, y in enumerate(days_harvest.year.unique()):
if y == 2011:
continue
temp.loc[i, 'year'] = y
temp.loc[i, 'doy'] = 152
temp.loc[i, 'frac_harv'] = 0
temp.loc[i, 'harv_trig'] = -1
temp.loc[i, 'harv_targ'] = 0
temp.loc[i, 'weed_dm_frac'] = 0
temp.loc[i, 'reseed_trig'] = 0.75
temp.loc[i, 'reseed_basal'] = 0.88
days_harvest = pd.concat((days_harvest, temp)).sort_values(['year', 'doy'])
days_harvest.loc[:, 'year'] = days_harvest.loc[:, 'year'].astype(int)
days_harvest.loc[:, 'doy'] = days_harvest.loc[:, 'doy'].astype(int)
days_harvest = _clean_harvest(days_harvest, matrix_weather)
out = run_basgra_nz(params, matrix_weather, days_harvest, doy_irr, verbose=verbose)
to_plot = [ # used to check the test
'RESEEDED',
'PHEN',
'BASAL',
'YIELD',
'DM_RYE_RM',
'LAI',
'TILG2',
'TILG1',
'TILV',
'CLV',
'CRES',
'CST',
'CSTUB',
]
data_path = os.path.join(test_dir, 'test_reseed.csv')
if update_data:
out.to_csv(data_path)
correct_out = pd.read_csv(data_path, index_col=0)
_output_checks(out, correct_out)
def test_leap(update_data=False):
print('testing leap year')
passed_test = []
# note this is linked to test irrigation trigger, so any inputs changes there should be mapped here
params, matrix_weather, days_harvest, doy_irr = establish_org_input('lincoln')
matrix_weather = get_lincoln_broadfield() # this has a leap year in 2012 and 2016
matrix_weather.loc[:, 'max_irr'] = 15
matrix_weather.loc[:, 'irr_trig'] = 0.5
matrix_weather.loc[:, 'irr_targ'] = 1
matrix_weather = matrix_weather.loc[:, matrix_weather_keys_pet]
params['IRRIGF'] = 1 # irrigation to 100% of field capacity
doy_irr = list(range(305, 367)) + list(range(1, 91))
days_harvest = _clean_harvest(days_harvest, matrix_weather)
try:
out = run_basgra_nz(params, matrix_weather, days_harvest, doy_irr, verbose=verbose, run_365_calendar=True)
passed_test.append(False)
except AssertionError as val:
passed_test.append(True)
matrix_weather = matrix_weather.loc[~((matrix_weather.index.day == 29) & (matrix_weather.index.month == 2))]
try:
out = run_basgra_nz(params, matrix_weather, days_harvest, doy_irr, verbose=verbose, run_365_calendar=True)
passed_test.append(False)
except AssertionError as val:
passed_test.append(True)
mapper = get_month_day_to_nonleap_doy()
matrix_weather.loc[:, 'doy'] = [mapper[(m, d)] for m, d in
zip(matrix_weather.index.month, matrix_weather.index.day)]
out = run_basgra_nz(params, matrix_weather, days_harvest, doy_irr, verbose=verbose, run_365_calendar=True)
external_data_path = os.path.join(test_dir, 'test_irrigation_trigger_output.csv')
# note this is linked to test irrigation trigger
correct_out = | pd.read_csv(external_data_path) | pandas.read_csv |
# coding: utf-8
# In[1]:
import pandas as pd
import numpy as np
from xgboost.sklearn import XGBClassifier
def xtrain_and_test(df_all):
'''
得到训练数据和测试数据
'''
df_label = pd.read_csv('../data/public/train.csv')
df_test_label = | pd.read_csv('../data/public/evaluation_public.csv') | pandas.read_csv |
import matplotlib.pyplot as plt
import numpy as np
import matplotlib as mpl
import pandas as pd
import sys
import os
import nltk
from nltk.sentiment.vader import SentimentIntensityAnalyzer
from nltk.corpus import twitter_samples
from sklearn.model_selection import train_test_split
# directory to sentiment data
dir_name = '../../database_real/sentiment_data/'
# initialise sentiment analyser
analyser = SentimentIntensityAnalyzer()
# input: @signals, signals dataframe
# output: @filtered_signals, filtered signals dataframe
def SentimentFilter(ticker, signals):
sentiment_scores = starter_vader(ticker)
sentiment_scores['dates'] = pd.to_datetime(sentiment_scores['dates'])
# check if sentiment label contrasting with buy/sell signals
merged_df = signals.merge(sentiment_scores, how='left', left_on='Date', right_on='dates')
#print(merged_df.head())
# create new column for filtered signals
merged_df['filtered_signal'] = merged_df['signal']
buy_signal = (merged_df['signal'] == 1.0)
sell_signal = (merged_df['signal'] == -1.0)
pos_label = (merged_df['vader_label'] == 2)
neg_label = (merged_df['vader_label'] == 0)
# when there is a buy signal but a -ve label
merged_df[(buy_signal) & (neg_label)]['filtered_signal'] = 0.0
# when there is a sell signal but a +ve label
merged_df[(sell_signal) & (pos_label)]['filtered_signal'] = 0.0
# generate positions with filtered signals
merged_df['filtered_positions'] = merged_df['filtered_signal'].diff()
merged_df = merged_df.drop(['dates', 'compound_vader_score', 'hsi_average', 'signal', 'positions'], axis=1)
#print(merged_df)
filtered_signals = pd.merge(signals, merged_df, how="left", on="Date")
filtered_signals = filtered_signals.drop(columns=['positions', 'signal'])
filtered_signals = filtered_signals.set_index('Date')
filtered_signals = filtered_signals[~filtered_signals.index.duplicated()] # remove duplicate rows
#print(filtered_signals)
filtered_signals = filtered_signals.rename({'filtered_positions': 'positions', 'vader_label': 'vader_label', 'filtered_signal': 'signal'}, axis=1)
return filtered_signals
# output dataframe with vader sentiment label
def starter_vader(ticker_full):
ticker = ticker_full[0:4]
# get the full path of ticker
path = os.path.join(dir_name, 'data-news/data-aastock/' +
'data-' + ticker.zfill(5) + '-aastock.csv')
df = | pd.read_csv(path, names=['dates', 'news']) | pandas.read_csv |
import glob
import numpy as np
import pandas as pd
from statsmodels.stats.multicomp import pairwise_tukeyhsd
# from statsmodels.stats.multicomp import MultiComparison
from statsmodels.stats.libqsturng import psturng
from scipy.interpolate import UnivariateSpline, interp1d
def get_segments_mean(a, n):
'''
Calculate mean values of every n items in a list (not walking average)
Input Parameters:
-----------------
a: a list or array of values
n: length of segments; has to be divisible by len(a)
Returns:
--------
res: a list of mean values of every n items in the given list
'''
assert len(a)%n == 0
res = []
for i in range(int(len(a)/n)):
a_temp = a[i*n : (i+1)*n]
temp = np.mean(a_temp)
res.append(temp)
return res
def header(f, N=10):
'''Print out the first N lines of a file
'''
with open(f) as myfile:
head = [next(myfile).strip() for x in range(N)]
return '\n'.join(head)
def tukeyTest(data, groups, alpha=0.05):
'''Perform pairwise Tukey test for data by groups
'''
# pairwise comparisons using Tukey's test, calculating p-values
res = pairwise_tukeyhsd(data, groups, alpha)
print('Summary of test:\n', res)
# print(dir(results))# prints out all attributes of an object
pVal = psturng(np.abs(res.meandiffs / res.std_pairs), len(res.groupsunique), res.df_total)
print('p values of all pair-wise tests:\n', pVal)
return res
def getInteriorAreaDF(codingFile, interiorAreaFile):
'''Read in data from corresponding filename coding and
ineterior area files, merge the data by scrambled filenames,
and returns the merged data frame.
'''
dfCode = pd.read_csv(codingFile, header=0, sep='\t')
dfCode.columns = ['file_name', 'scrambled_file_name']
dfInteriorArea0 = pd.read_csv(interiorAreaFile, header=0, sep='\t')
dfInteriorArea0.columns = ['scrambled_file_name', 'interior_area']
dfInteriorArea = dfCode.merge(dfInteriorArea0, on = 'scrambled_file_name')
return dfInteriorArea
def getTotalAreaDF(codingFile, totalAreaFile):
'''Read in data from corresponding filename coding and
total area files, merge the data by scrambled filenames,
and returns the merged data frame.
'''
dfCode = pd.read_csv(codingFile, header=0, sep='\t')
dfCode.columns = ['file_name', 'scrambled_file_name']
dfTotalArea0 = pd.read_csv(totalAreaFile, header=0, sep='\t')
dfTotalArea0.columns = ['scrambled_file_name', 'interior_area']
dfTotalArea = dfCode.merge(dfTotalArea0, on = 'scrambled_file_name')
if len(dfTotalArea)==0:
# In case the total area was extracted from files with regular file names
dfTotalArea0.columns = ['file_name', 'interior_area']
dfTotalArea = dfCode.merge(dfTotalArea0, on = 'file_name')
return dfTotalArea
def getAreaDF(codingFile, interiorAreaFile, totalAreaFile, totalAreaColName=None):
'''Read in data from corresponding filename coding, interior and
total area text files, merge the data by scrambled filenames,
and returns the merged data frame.
'''
if totalAreaColName is None:
# Specify whether the total area was obtained on files with
# original file names or scrambled file names
totalAreaColName = 'scrambled_file_name'
dfCode = pd.read_csv(codingFile, header=0, sep='\t')
dfCode.columns = ['file_name', 'scrambled_file_name']
dfInteriorArea0 = pd.read_csv(interiorAreaFile, header=0, sep='\t')
dfInteriorArea0.columns = ['scrambled_file_name', 'interior_area']
dfInteriorArea = dfCode.merge(dfInteriorArea0, on = 'scrambled_file_name')
dfTotalArea0 = | pd.read_csv(totalAreaFile, header=0, sep='\t') | pandas.read_csv |
import os
import argparse
import numpy as np
import pandas as pd
from time import time
from scipy.stats import norm
from scipy.spatial.distance import euclidean
from editing_dist_n_lcs_dp import edit_distance
from editing_dist_n_lcs_dp import lcs
#global variables
# BREAK_POINTS = []
# LOOKUP_TABLE = []
# TODO BUILD CLASS
# TODO find optimal VOCAB_SIZE & PAA_SIZE OR WINDOW_SIZE
# TODO compare multiple series
# TODO find motifs (cycles)
def matrix_to_df(cols, matrix):
"""
Convert matrix of time series to pd.DataFrame
"""
df = pd.DataFrame()
for i in range(len(cols)):
df[cols[i]] = matrix[i]
return df
def znorm(ts):
"""
Standardize data
"""
return (ts - np.mean(ts)) / np.std(ts)
def ts2paa(ts, paa_size):
"""
PAA algorithm implementation
The conde is inpired on the R SAX package code. For non-equidivisible PAA interval a weighted sum is applied,
The R package the weighted sum imlementationh has O(n * paa_size) complexity, instead this function has O(n) complexity.
"""
# convert ts to a single value
if paa_size == 1:
return np.array(np.mean(ts))
# use all ts' values
elif paa_size == ts.shape[0]:
return ts
# series' length is divisible by paa split
elif ts.shape[0] % paa_size == 0:
ts_split = np.reshape(ts, (paa_size, ts.shape[0]//paa_size))
return np.mean(ts_split, 1)
# ts' length is not divisible by paa split
# O(ts.shape[0]) complexity instead of O(ts.shape[0] * paa_size)
else:
ts_paa = np.zeros(paa_size)
carry = 0
n_vals = 0
paa_id = 0
weight = paa_size
for i in range(ts.shape[0]):
# update number of computed values
n_vals += paa_size
# set value's weight
weight = paa_size
# compute sum
ts_paa[paa_id] += weight * ts[i] + carry
# set carry
carry = 0
# verify integrety => update `weight` and compute `carry`
# update sum
if n_vals > ts.shape[0]:
# update weight to remove excess sum
weight = n_vals - ts.shape[0]
# remove excess
ts_paa[paa_id] -= weight * ts[i]
#compute paa value
ts_paa[paa_id] = ts_paa[paa_id] / ts.shape[0]
# update paa_id and aux. values
paa_id += 1
n_vals = weight
carry = weight * ts[i]
return ts_paa
def get_breakpoints(vocab_size):
"""
Devide series' area under N(0, 1) into `vocab_size` equal areas
Returns a np.array, where symbol: cut
Use inverse umulative distribution function
"""
probs = np.arange(0, vocab_size, 1) / vocab_size
# cumulative prob. function
return norm.ppf(probs)
# @deprecated
# use numpy instead (np.searchsorted(.))
def bin_search(val, arr):
"""
Adapted binary search (left)
if `val` is <= than `m` => compare with m-1, otherwise compare with m+1
Find symbol representation
Return index of symbol
"""
l = 0
r = arr.shape[0] - 1
while l <= r:
m = (l + r + 1) // 2
if arr[m] <= val:
# base case: m is right-most index
if m + 1 == arr.shape[0]:
return m
# compare `val` with right neighbour
elif val <= arr[m + 1]:
return m
l = m + 1
else:
#base case: `val` is <= than 2nd value index
if m <= 1:
return 0
# compare `val` with left neighbour
elif val > arr[m - 1]:
return m - 1
r = m - 1
return m
def val2symbol(ts_paa, vocab_size):
"""
Convert continuous time series values into discrete values,
using `vocab_size` discrete values
"""
vocab = np.array(['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm',
'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z',
'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M',
'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z'], dtype=str)
#vocab = vocab[:vocab_size]
# compute breakpoints under a normal distribution ~ N(0, 1)
breakpoints = get_breakpoints(vocab_size)
# get ids for symbol conversion
symbol_ids = np.searchsorted(breakpoints, ts_paa) - 1
# convert ts to string
ts_symbol = vocab[symbol_ids]
return breakpoints, ts_symbol
def sax(ts, out_size, vocab_size, paa=True):
"""
Apply SAX algorithm to time series, i.e. convert continuous values series into
discrete values aggregated series
:ts - time series of continuous values, numpy.array
:out_size - the final output size of ts
:vocab_size - number of sumbols to use (# lelvels), the size of vacabolary
:paa - boolean variable, out_size is PAA if paa is True, out_size is Window size otherwise
"""
if paa:
paa_size = out_size
else:
paa_size = get_paa_size_from_window_size(ts.shape[0], out_size)
# Normalize series
ts_norm = znorm(ts)
# Convert normalized series to paa
ts_paa = ts2paa(ts_norm, paa_size)
# Convert paa series into symbols
breakpoints, ts_sax = val2symbol(ts_paa, vocab_size)
# Lookup table containing distance between symbols
dist_lookup_table = compute_dist_lookup_table(breakpoints)
return breakpoints, dist_lookup_table, ts_norm, ts_paa, ts_sax
def symbol2index(ts_sax):
"""
Converts symbol string to index values of symbols
ts_sax: series as symbols, i.e. sax representation of a series
"""
# lookup table for symbols' indeces
s2id = {'a': 0, 'b': 1, 'c': 2, 'd': 3, 'e': 4, 'f': 5, 'g': 6, 'h': 7,
'i': 8, 'j': 9, 'k': 10, 'l': 11, 'm': 12, 'n': 13, 'o': 14, 'p': 15,
'q': 16, 'r': 17, 's': 18, 't': 19, 'u': 20, 'v': 21, 'w': 22, 'x': 23,
'y': 24, 'z': 25, 'A': 26, 'B': 27, 'C': 28, 'D': 29, 'E': 30, 'F': 31,
'G': 32, 'H': 33, 'I': 34, 'J': 35, 'K': 36, 'L': 37, 'M': 38, 'N': 39,
'O': 40, 'P': 41, 'Q': 42, 'R': 43, 'S': 44, 'T': 45, 'U': 46, 'V': 47,
'W': 48, 'X': 49, 'Y': 50, 'Z': 51}
# init. id series
ts_id = np.empty(ts_sax.shape[0], dtype=int)
# convert symbols to ids
for i in range(ts_sax.shape[0]):
ts_id[i] = s2id[ts_sax[i]]
return ts_id
def get_dists(ts1_sax, ts2_sax, lookup_table):
"""
Compute distance between each symbol of two words (series) using a lookup table
ts1_sax and ts2_sax are two sax representations (strings) built under the same conditions
"""
# Verify integrity
if ts1_sax.shape[0] != ts2_sax.shape[0]:
return -1
# convert symbol series into series of indexes (symbol indexes)
ts1_sax_id = symbol2index(ts1_sax)
ts2_sax_id = symbol2index(ts2_sax)
# array of distances between symbols
dists = np.zeros(ts1_sax.shape[0])
for i in range(ts1_sax_id.shape[0]):
dists[i] = lookup_table[ts1_sax_id[i], ts2_sax_id[i]]
return dists
def compute_mindist(n, lookup_table, ts1_sax, ts2_sax):
"""
Minimum distance between the original time series of two words
`n` is the original series' length
"""
aux = np.sqrt(n / ts1_sax.shape[0])
dists = get_dists(ts1_sax, ts2_sax, lookup_table)
dists_squares = np.square(dists)
dists_sum_squares = np.sum(dists_squares)
return aux * np.sqrt(dists_sum_squares)
def get_tightness_of_lower_bound(lookup_table, ts1, ts2, ts1_sax, ts2_sax):
"""
Compute the tightness of the lower bound
Used to find the parameters settings
"""
# compute euclidean distance between original series
or_dist = euclidean(ts1, ts2)
# compute MINDIST for sax series
mindist = compute_mindist(ts1.shape[0],lookup_table, ts1_sax, ts2_sax)
return mindist / or_dist
def compute_dist_lookup_table(breakpoints):
"""
The lookup table is computed as described in [X]
d(r, c) = |0, if |r - c| <= 1
|abs(breakpoints[i] - breakpoints[j-1]), otherwise
Contiguous values have distance 0, thus are not computed
"""
# init. matrix
lookup_table_dist = np.zeros((breakpoints.shape[0], breakpoints.shape[0]))
# compute distances
for bi in range(breakpoints.shape[0]):
# increment by 2, since contiguous values have distance 0
for bj in range(bi + 2, breakpoints.shape[0]):
# since breakpoints[0] = - np.inf and symbol is conditioned by <=
# bi is set to next value
# compute distance
dist = breakpoints[bj] - breakpoints[bi + 1]
# set distance
lookup_table_dist[bi, bj] = dist
# mirror
lookup_table_dist[bj, bi] = dist
return lookup_table_dist
def get_paa_size_from_window_size(n, window_size):
"""
Gets paa size from a sliding window size.
Use sliding window instead of symbol series.
"""
if n % window_size > 0:
return n // window_size + 1
return n // window_size
###############################################################################################
###############################################################################################
def main(args):
#CONSTATNS
MIN_VOCAB_SIZE = 1
MAX_VOCAB_SIZE = 52
MIN_PAA_SIZE = 1
######################
# Finding VOCAB_SIZE & PAA_SIZE. It is highly data dependent. Best values are those
# which minimize the tightness of the lowers bound
# Objective: Minimize(MINDIST(Â, Ê) / D(A, B)), i.e. Tightness of Lower Bound
# Read data (skips header)
data = np.loadtxt(args.data_path, delimiter=',', skiprows=1)
df = pd.read_csv(args.data_path)
data = df.as_matrix()
cols = list(df.columns)
#switch columns with rows (=>row is a time series)
data = data.T
#read arguments
# n = len of series
VOCAB_SIZE = args.vocab_size
PAA_SIZE = args.paa_size
WINDOW_SIZE = args.window_size
breakpoints_l = []
lookup_table_l = []
ts_norm_l = []
ts_paa_l = []
ts_sax_l = []
st = time()
print("Computing SAX...")
for ts in data:
# get number of obs.
n = ts.shape
#get PAA_SIZE or WINDOW_SIZE
if WINDOW_SIZE > 0:
PAA_SIZE = get_paa_size_from_window_size(n, WINDOW_SIZE)
# compute sax
breakpoints, lookup_table, ts_norm, ts_paa, ts_sax = sax(ts, PAA_SIZE, VOCAB_SIZE)
#add to list
breakpoints_l.append(breakpoints)
lookup_table_l.append(lookup_table)
ts_norm_l.append(ts_norm)
ts_paa_l.append(ts_paa)
ts_sax_l.append(ts_sax)
n_series = data.shape[0]
# compute TLS
tbl_df = | pd.DataFrame() | pandas.DataFrame |
'''A double-bar plot of April's maximum and Minimum temperatures of each day'''
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import glob
Min = [100]*30 #a bviously big number to make sure others will be smaller
Max = [-100]*30
for fname in glob.glob("./input/Montreal*"): # For loop for .csv files in given input folder
D = | pd.read_csv(fname, header=0) | pandas.read_csv |
"""Download population projections from https://github.com/nismod/population/blob/master/README.md
Info
-----
https://github.com/virgesmith/UKCensusAPI
https://www.nomisweb.co.uk/myaccount/webservice.asp
https://github.com/nismod/population
https://github.com/virgesmith/UKCensusAPI
Steps
------
1. optain nomis key
2. in cmd line: set NOMIS_API_KEY=XXX
3. ./setup.py install
4. run python script from command line
https://www.ons.gov.uk/peoplepopulationandcommunity/populationandmigration/populationprojections/bulletins/nationalpopulationprojections/2015-10-29
Potential variants
-------------------
hhh: High population,
hpp: High fertility,
lll: Low population,
lpp: Low fertility,
php: High life expectancy,
pjp: Moderately high life expectancy,
pkp: Moderately low life expectancy,
plp: Low life expectancy,
pph: High migration,
ppl: Low migration,
ppp: Principal,
ppq: 0% future EU migration (non-ONS),
ppr: 50% future EU migration (non-ONS),
pps: 150% future EU migration (non-ONS),
ppz: Zero net migration
Select for paper I
-------------------
Prinicpal projection ppp
Low mnigration ppl
High migration pph
"""
import pandas as pd
import population.nppdata as NPPData
import population.snppdata as SNPPData
import population.utils as utils
extrapolate_specific_scenario = True # 2016-2050
base_year_data = True # 2015
if base_year_data:
"""Base year data (no variants, same for all variants)
"""
npp = NPPData.NPPData()
snpp = SNPPData.SNPPData()
year = 2015
# start with an empty data frame
result_ppp = pd.DataFrame()
# loop over all the UK LAD (or LAD-equivalents)
for lad in snpp.data.GEOGRAPHY_CODE.unique():
#region_ppp = snpp.create_variant('ppp', npp, lad, year)
region_ppp = snpp.extrapolagg(["GENDER", "C_AGE"], npp, lad, year) #princiapl
# aggregate the calculated variants by age and gender
result_ppp = result_ppp.append(region_ppp, ignore_index=True)
# write out results
result_ppp.to_csv("C:/Users/cenv0553/mistral_population/__RESULTS/all_variants_2015.csv", index=False)
print("Finished writting 2015 data")
if extrapolate_specific_scenario:
'''
Get extrapolated data for full time range for different ONS scenarios from 2016 - 2050
# https://github.com/nismod/population/blob/master/doc/example_variant_ex.py
'''
# initialise the population modules
npp = NPPData.NPPData()
snpp = SNPPData.SNPPData()
# 50 years, roughly half is extrapolated
# Must start with 2015.
years = range(2016, 2051)
# start with an empty data frame
result_ppp = pd.DataFrame()
result_ppl = | pd.DataFrame() | pandas.DataFrame |
import argparse
import pandas as pd
import numpy as np
import param
import os
def preprocess_sam(r1_sam, r2_sam):
"""
preprocess sam files
"""
#if not os.path.isfile(r1_sam) or not os.path.isfile(r2_sam):
# print("file doesn't exist")
# exit(0)
dir_name = os.path.dirname(r1_sam)
r1_basename = os.path.basename(r1_sam)
r2_basename = os.path.basename(r2_sam)
sorted_r1 = os.path.join(dir_name, r1_basename.replace(".sam", "_sorted.sam"))
sort_r1 = param.SAMTOOLS + "sort -n -o " + sorted_r1 + " " + r1_sam
sorted_r2 = os.path.join(dir_name, r2_basename.replace(".sam","_sorted.sam"))
sort_r2 = param.SAMTOOLS + "sort -n -o " + sorted_r2 + " " + r2_sam
# remove headers
r1 = os.path.join(dir_name, r1_basename.replace(".sam", "_noh.sam"))
r2 = os.path.join(dir_name, r2_basename.replace(".sam", "_noh.sam"))
os.system(sort_r1)
os.system(sort_r2)
#os.system("rm "+r1_sam)
#os.system("rm "+r2_sam)
os.system("grep -v \"^@\" "+sorted_r1+" > "+r1)
os.system("grep -v \"^@\" "+sorted_r2+" > "+r2)
r1_csv = os.path.join(dir_name, r1.replace(".sam", ".csv"))
r2_csv = os.path.join(dir_name, r2.replace(".sam", ".csv"))
os.system("cut -f 1-5 "+r1+" > "+ r1_csv)
os.system("cut -f 1-5 "+r2+" > "+ r2_csv)
os.system("rm "+r1)
os.system("rm "+r2)
return r1_csv, r2_csv
def read_count_hap(r1_csv, r2_csv, DB_genes):
empty_matrix = pd.DataFrame(0, index = DB_genes, columns = DB_genes)
f1 = open(r1_csv, "rb")
f2 = open(r2_csv, "rb")
i = True
lines = 0
pairs = {}
fail = 0
count = 0
while 1:
r1_line = f1.readline() # uptag
r2_line = f2.readline() # dntag
if r1_line == "" or r2_line == "":
i = False
print("End of file")
break
r1_line = r1_line.strip().split("\t")
r2_line = r2_line.strip().split("\t")
if r1_line[0] != r2_line[0]:
i = False
print("# READ ID DOES NOT MATCH #")
break
if int(r1_line[4]) < param.cut_off or int(r2_line[4]) < param.cut_off: # check quality
fail += 1
continue
if r1_line[2] == "*" or r2_line[2] =="*":
fail +=1
continue
r1_name = r1_line[2].split(";")
r2_name = r2_line[2].split(";")
if r1_name[-1] != r2_name[-1]:
count+=1
pairs[(r2_name[1], r1_name[1])] = pairs.get((r2_name[1], r1_name[1]), 0) + 1
matrix = (pd.Series(pairs)
.unstack(fill_value=0)
.T
.reindex(index=empty_matrix.index, columns=empty_matrix.columns, fill_value=0))
f1.close()
f2.close()
diag = pd.Series(np.diag(matrix), index=[matrix.index, matrix.columns])
print(diag)
return diag
def read_DB(hDB):
"""
get a list of db gene from hDB summary
"""
summary = | pd.read_table(hDB, sep="\t") | pandas.read_table |
__all__ = ['ZeroBasedSkill']
import attr
import pandas as pd
from sklearn.utils.validation import check_is_fitted
from .. import annotations
from ..annotations import Annotation, manage_docstring
from ..base import BaseClassificationAggregator
from .majority_vote import MajorityVote
from ..utils import get_accuracy, named_series_attrib
@attr.attrs(auto_attribs=True)
class ZeroBasedSkill(BaseClassificationAggregator):
"""The Zero-Based Skill aggregation model
Performs weighted majority voting on tasks. After processing a pool of tasks,
re-estimates performers' skills according to the correctness of their answers.
Repeats this process until labels do not change or the number of iterations exceeds.
It's necessary that all performers in a dataset that send to 'predict' existed in answers
the dataset that was sent to 'fit'.
"""
n_iter: int = 100
lr_init: float = 1.0
lr_steps_to_reduce: int = 20
lr_reduce_factor: float = 0.5
eps: float = 1e-5
# Available after fit
skills_: annotations.OPTIONAL_SKILLS = named_series_attrib(name='skill')
# Available after predict or predict_proba
# labels_
probas_: annotations.OPTIONAL_PROBAS = attr.ib(init=False)
def _init_skills(self, data: annotations.LABELED_DATA) -> annotations.SKILLS:
skill_value = 1 / data.label.unique().size + self.eps
skill_index = pd.Index(data.performer.unique(), name='performer')
return | pd.Series(skill_value, index=skill_index) | pandas.Series |
#!/usr/bin/env python -W ignore::DeprecationWarning
import os
import ast
import pathlib
import pandas as pd
import numpy as np
import random
import itertools
from tqdm import tqdm
from skimage import measure
from scipy import stats
def warn(*args, **kwargs):
pass
import warnings
warnings.warn = warn
import logging
log = logging.getLogger('werkzeug')
log.setLevel(logging.ERROR)
logging.captureWarnings(True)
import inputfuncs
import ccprocess
import libcc
import segmfuncs
import parcelfuncs
import webbrowser
from threading import Timer
import dash
import dash_table
import dash_core_components as dcc
import dash_html_components as html
import dash_bootstrap_components as dbc
import dash_daq as daq
from dash.dependencies import Input, Output, State, ALL, MATCH
from dash.exceptions import PreventUpdate
from dash_extensions import Download
from dash_extensions.snippets import send_data_frame
import plotly.io as pio
import plotly.figure_factory as ff
import plotly.express as px
import plotly.graph_objs as go
from plotly.subplots import make_subplots
class Error(Exception):
pass
theme = 'plotly'
print(' ')
# GENERAL DEFINITIONS -------------------------------------------------------------------------
dict_segmentation_functions = {'ROQS': segmfuncs.segm_roqs,
'Watershed': segmfuncs.segm_watershed,
'Imported Masks': segmfuncs.segm_mask}
dict_parcellation_functions = {'Witelson': parcelfuncs.parc_witelson,
'Hofer': parcelfuncs.parc_hofer,
'Chao': parcelfuncs.parc_chao,
'Cover': parcelfuncs.parc_cover,
'Freesurfer': parcelfuncs.parc_freesurfer}
dict_3d_segmentation_functions = {'Watershed3d': segmfuncs.segm_watershed_3d}
scalar_list = ['FA', 'MD', 'RD', 'AD']
colors_list = px.colors.qualitative.Plotly
dict_parcellation_methods = {'Witelson': 'witelson', 'Hofer & Frahm': 'hofer', 'Chao et al':'chao', 'Cover et al': 'cover', 'Freesurfer':'freesurfer'}
dict_segmentation_methods = {'ROQS': 'roqs', 'Watershed': 'watershed'}
dict_3d_segmentation_methods = {'Watershed3d':'watershed3d'}
# DATA IMPORTING -----------------------------------------------------------------------------
# Arg parser
opts = inputfuncs.get_parser().parse_args()
if opts.staple is True:
dict_segmentation_functions['STAPLE'] = segmfuncs.segm_staple
dict_segmentation_methods['STAPLE'] = 'staple'
df_categories = pd.DataFrame()
df_numerical = | pd.DataFrame() | pandas.DataFrame |
# -- coding: utf-8 --
import pandas as pd
import numpy as np
data = pd.read_csv('train.csv')
data['datatime'] = | pd.to_datetime(data.date) | pandas.to_datetime |
import pandas as pd
from flask import Flask, jsonify, request
from tensorflow.keras.models import load_model
import pickle
import numpy as np
UP_Wheat = load_model('UP_Wheat')
october = pickle.load(open('UP_Wheat/october.pkl','rb'))
november = pickle.load(open('UP_Wheat/november.pkl','rb'))
december = pickle.load(open('UP_Wheat/december.pkl','rb'))
january = pickle.load(open('UP_Wheat/january.pkl','rb'))
february = pickle.load(open('UP_Wheat/february.pkl','rb'))
UP_Rice = load_model('UP_Rice')
june = pickle.load(open('UP_Rice/june.pkl','rb'))
july = pickle.load(open('UP_Rice/july.pkl','rb'))
august = pickle.load(open('UP_Rice/august.pkl','rb'))
september = pickle.load(open('UP_Rice/september.pkl','rb'))
october = pickle.load(open('UP_Rice/october.pkl','rb'))
november = pickle.load(open('UP_Rice/november.pkl','rb'))
UP_Sugarcane = load_model('UP_Sugarcane')
june = pickle.load(open('UP_Sugarcane/june.pkl','rb'))
july = pickle.load(open('UP_Sugarcane/july.pkl','rb'))
august = pickle.load(open('UP_Sugarcane/august.pkl','rb'))
september = pickle.load(open('UP_Sugarcane/september.pkl','rb'))
october = pickle.load(open('UP_Sugarcane/october.pkl','rb'))
MH_Arhar = load_model('MH_Arhar')
june = pickle.load(open('MH_Arhar/june.pkl','rb'))
july = pickle.load(open('MH_Arhar/july.pkl','rb'))
august = pickle.load(open('MH_Arhar/august.pkl','rb'))
september = pickle.load(open('MH_Arhar/september.pkl','rb'))
october = pickle.load(open('MH_Arhar/october.pkl','rb'))
november = pickle.load(open('MH_Arhar/november.pkl','rb'))
MH_Cotton = load_model('MH_Cotton')
june = pickle.load(open('MH_Cotton/june.pkl','rb'))
july = pickle.load(open('MH_Cotton/july.pkl','rb'))
august = pickle.load(open('MH_Cotton/august.pkl','rb'))
september = pickle.load(open('MH_Cotton/september.pkl','rb'))
october = pickle.load(open('MH_Cotton/october.pkl','rb'))
november = pickle.load(open('MH_Cotton/november.pkl','rb'))
MH_Rice = load_model('MH_Rice')
june = pickle.load(open('MH_Rice/june.pkl','rb'))
july = pickle.load(open('MH_Rice/july.pkl','rb'))
august = pickle.load(open('MH_Rice/august.pkl','rb'))
september = pickle.load(open('MH_Rice/september.pkl','rb'))
october = pickle.load(open('MH_Rice/october.pkl','rb'))
november = pickle.load(open('MH_Rice/november.pkl','rb'))
MH_Soyabean = load_model('MH_Soyabean')
june = pickle.load(open('MH_Soyabean/june.pkl','rb'))
july = pickle.load(open('MH_Soyabean/july.pkl','rb'))
august = pickle.load(open('MH_Soyabean/august.pkl','rb'))
september = pickle.load(open('MH_Soyabean/september.pkl','rb'))
october = pickle.load(open('MH_Soyabean/october.pkl','rb'))
november = pickle.load(open('MH_Soyabean/november.pkl','rb'))
HR_Wheat = load_model('HR_Wheat')
october = pickle.load(open('HR_Wheat/october.pkl','rb'))
november = pickle.load(open('HR_Wheat/november.pkl','rb'))
december = pickle.load(open('HR_Wheat/december.pkl','rb'))
january = pickle.load(open('HR_Wheat/january.pkl','rb'))
february = pickle.load(open('HR_Wheat/february.pkl','rb'))
HR_Rice = load_model('HR_Rice')
june = pickle.load(open('HR_Rice/june.pkl','rb'))
july = pickle.load(open('HR_Rice/july.pkl','rb'))
august = pickle.load(open('HR_Rice/august.pkl','rb'))
september = pickle.load(open('HR_Rice/september.pkl','rb'))
october = pickle.load(open('HR_Rice/october.pkl','rb'))
november = pickle.load(open('HR_Rice/november.pkl','rb'))
Bihar_Maize = load_model('Bihar_Maize')
june = pickle.load(open('Bihar_Maize/june.pkl','rb'))
july = pickle.load(open('Bihar_Maize/july.pkl','rb'))
august = pickle.load(open('Bihar_Maize/august.pkl','rb'))
september = pickle.load(open('Bihar_Maize/september.pkl','rb'))
october = pickle.load(open('Bihar_Maize/october.pkl','rb'))
Bihar_Rice = load_model('Bihar_Rice')
june = pickle.load(open('Bihar_Rice/june.pkl','rb'))
july = pickle.load(open('Bihar_Rice/july.pkl','rb'))
august = pickle.load(open('Bihar_Rice/august.pkl','rb'))
september = pickle.load(open('Bihar_Rice/september.pkl','rb'))
october = pickle.load(open('Bihar_Rice/october.pkl','rb'))
Bihar_Wheat = load_model('Bihar_Wheat')
november = pickle.load(open('Bihar_Wheat/november.pkl','rb'))
december = pickle.load(open('Bihar_Wheat/december.pkl','rb'))
january = pickle.load(open('Bihar_Wheat/january.pkl','rb'))
february = pickle.load(open('Bihar_Wheat/february.pkl','rb'))
PB_Rice = load_model('PB_Rice')
june = pickle.load(open('PB_Rice/june.pkl','rb'))
july = pickle.load(open('PB_Rice/july.pkl','rb'))
august = pickle.load(open('PB_Rice/august.pkl','rb'))
september = pickle.load(open('PB_Rice/september.pkl','rb'))
october = pickle.load(open('PB_Rice/october.pkl','rb'))
november = pickle.load(open('PB_Rice/november.pkl','rb'))
PB_Maize = load_model('PB_Maize')
june = pickle.load(open('PB_Maize/june.pkl','rb'))
july = pickle.load(open('PB_Maize/july.pkl','rb'))
august = pickle.load(open('PB_Maize/august.pkl','rb'))
september = pickle.load(open('PB_Maize/september.pkl','rb'))
october = pickle.load(open('PB_Maize/october.pkl','rb'))
november = pickle.load(open('PB_Maize/november.pkl','rb'))
PB_Wheat = load_model('PB_Wheat')
november = pickle.load(open('PB_Wheat/november.pkl','rb'))
december = pickle.load(open('PB_Wheat/december.pkl','rb'))
january = pickle.load(open('PB_Wheat/january.pkl','rb'))
february = pickle.load(open('PB_Wheat/february.pkl','rb'))
# app
app = Flask(__name__)
# routes
@app.route('/up/wheat/', methods=['POST'])
def predict_1():
# get data
data = request.get_json(force=True)
# convert data into dataframe
data.update((x, [y]) for x, y in data.items())
data_df = pd.DataFrame.from_dict(data)
# predictions
result1 = october.predict(data_df)
result2 = november.predict(data_df)
result3 = december.predict(data_df)
result4 = january.predict(data_df)
result5 = february.predict(data_df)
dataset = | pd.DataFrame({'october': result1, 'november': result2, 'december': result3, 'january': result4, 'february': result5}) | pandas.DataFrame |
##### file path
# input
path_df_D = "tianchi_fresh_comp_train_user.csv"
path_df_part_1 = "df_part_1.csv"
path_df_part_2 = "df_part_2.csv"
path_df_part_3 = "df_part_3.csv"
path_df_part_1_tar = "df_part_1_tar.csv"
path_df_part_2_tar = "df_part_2_tar.csv"
path_df_part_1_uic_label = "df_part_1_uic_label.csv"
path_df_part_2_uic_label = "df_part_2_uic_label.csv"
path_df_part_3_uic = "df_part_3_uic.csv"
# output
path_df_part_1_U = "df_part_1_U.csv"
path_df_part_1_I = "df_part_1_I.csv"
path_df_part_1_C = "df_part_1_C.csv"
path_df_part_1_IC = "df_part_1_IC.csv"
path_df_part_1_UI = "df_part_1_UI.csv"
path_df_part_1_UC = "df_part_1_UC.csv"
path_df_part_2_U = "df_part_2_U.csv"
path_df_part_2_I = "df_part_2_I.csv"
path_df_part_2_C = "df_part_2_C.csv"
path_df_part_2_IC = "df_part_2_IC.csv"
path_df_part_2_UI = "df_part_2_UI.csv"
path_df_part_2_UC = "df_part_2_UC.csv"
path_df_part_3_U = "df_part_3_U.csv"
path_df_part_3_I = "df_part_3_I.csv"
path_df_part_3_C = "df_part_3_C.csv"
path_df_part_3_IC = "df_part_3_IC.csv"
path_df_part_3_UI = "df_part_3_UI.csv"
path_df_part_3_UC = "df_part_3_UC.csv"
import pandas as pd
import numpy as np
##========================================================##
##======================== Part 3 ========================##
##========================================================##
###########################################
'''Step 1.1 feature data set U of df_part_3
(1)
u_b1_count_in_6
u_b2_count_in_6
u_b3_count_in_6
u_b4_count_in_6
u_b_count_in_6
(2)
u_b1_count_in_3
u_b2_count_in_3
u_b3_count_in_3
u_b4_count_in_3
u_b_count_in_3
(2)
u_b1_count_in_1
u_b2_count_in_1
u_b3_count_in_1
u_b4_count_in_1
u_b_count_in_1
(3)
u_b4_rate (in_6)
u_b4_diff_hours (in_6)
'''
# loading data
path_df = open(path_df_part_3, 'r')
try:
df_part_3 = pd.read_csv(path_df, index_col=False, parse_dates=[0])
df_part_3.columns = ['time', 'user_id', 'item_id', 'behavior_type', 'item_category']
finally:
path_df.close()
# u_b_count_in_6
df_part_3['cumcount'] = df_part_3.groupby(['user_id', 'behavior_type']).cumcount()
df_part_3_u_b_count_in_6 = df_part_3.drop_duplicates(['user_id', 'behavior_type'], 'last')[
['user_id', 'behavior_type', 'cumcount']]
df_part_3_u_b_count_in_6 = pd.get_dummies(df_part_3_u_b_count_in_6['behavior_type']).join(
df_part_3_u_b_count_in_6[['user_id', 'cumcount']])
df_part_3_u_b_count_in_6.rename(columns={1: 'behavior_type_1',
2: 'behavior_type_2',
3: 'behavior_type_3',
4: 'behavior_type_4'}, inplace=True)
df_part_3_u_b_count_in_6['u_b1_count_in_6'] = df_part_3_u_b_count_in_6['behavior_type_1'] * (
df_part_3_u_b_count_in_6['cumcount'] + 1)
df_part_3_u_b_count_in_6['u_b2_count_in_6'] = df_part_3_u_b_count_in_6['behavior_type_2'] * (
df_part_3_u_b_count_in_6['cumcount'] + 1)
df_part_3_u_b_count_in_6['u_b3_count_in_6'] = df_part_3_u_b_count_in_6['behavior_type_3'] * (
df_part_3_u_b_count_in_6['cumcount'] + 1)
df_part_3_u_b_count_in_6['u_b4_count_in_6'] = df_part_3_u_b_count_in_6['behavior_type_4'] * (
df_part_3_u_b_count_in_6['cumcount'] + 1)
df_part_3_u_b_count_in_6 = df_part_3_u_b_count_in_6.groupby('user_id').agg({'u_b1_count_in_6': np.sum,
'u_b2_count_in_6': np.sum,
'u_b3_count_in_6': np.sum,
'u_b4_count_in_6': np.sum})
df_part_3_u_b_count_in_6.reset_index(inplace=True)
df_part_3_u_b_count_in_6['u_b_count_in_6'] = df_part_3_u_b_count_in_6[['u_b1_count_in_6',
'u_b2_count_in_6',
'u_b3_count_in_6',
'u_b4_count_in_6']].apply(lambda x: x.sum(),
axis=1)
# u_b_count_in_3
df_part_3_in_3 = df_part_3[df_part_3['time'] >= np.datetime64('2014-12-16')]
df_part_3_in_3['cumcount'] = df_part_3_in_3.groupby(['user_id', 'behavior_type']).cumcount()
df_part_3_u_b_count_in_3 = df_part_3.drop_duplicates(['user_id', 'behavior_type'], 'last')[
['user_id', 'behavior_type', 'cumcount']]
df_part_3_u_b_count_in_3 = pd.get_dummies(df_part_3_u_b_count_in_3['behavior_type']).join(
df_part_3_u_b_count_in_3[['user_id', 'cumcount']])
df_part_3_u_b_count_in_3.rename(columns={1: 'behavior_type_1',
2: 'behavior_type_2',
3: 'behavior_type_3',
4: 'behavior_type_4'}, inplace=True)
df_part_3_u_b_count_in_3['u_b1_count_in_3'] = df_part_3_u_b_count_in_3['behavior_type_1'] * (
df_part_3_u_b_count_in_3['cumcount'] + 1)
df_part_3_u_b_count_in_3['u_b2_count_in_3'] = df_part_3_u_b_count_in_3['behavior_type_2'] * (
df_part_3_u_b_count_in_3['cumcount'] + 1)
df_part_3_u_b_count_in_3['u_b3_count_in_3'] = df_part_3_u_b_count_in_3['behavior_type_3'] * (
df_part_3_u_b_count_in_3['cumcount'] + 1)
df_part_3_u_b_count_in_3['u_b4_count_in_3'] = df_part_3_u_b_count_in_3['behavior_type_4'] * (
df_part_3_u_b_count_in_3['cumcount'] + 1)
df_part_3_u_b_count_in_3 = df_part_3_u_b_count_in_3.groupby('user_id').agg({'u_b1_count_in_3': np.sum,
'u_b2_count_in_3': np.sum,
'u_b3_count_in_3': np.sum,
'u_b4_count_in_3': np.sum})
df_part_3_u_b_count_in_3.reset_index(inplace=True)
df_part_3_u_b_count_in_3['u_b_count_in_3'] = df_part_3_u_b_count_in_3[['u_b1_count_in_3',
'u_b2_count_in_3',
'u_b3_count_in_3',
'u_b4_count_in_3']].apply(lambda x: x.sum(),
axis=1)
# u_b_count_in_1
df_part_3_in_1 = df_part_3[df_part_3['time'] >= np.datetime64('2014-12-18')]
df_part_3_in_1['cumcount'] = df_part_3_in_1.groupby(['user_id', 'behavior_type']).cumcount()
df_part_3_u_b_count_in_1 = df_part_3_in_1.drop_duplicates(['user_id', 'behavior_type'], 'last')[
['user_id', 'behavior_type', 'cumcount']]
df_part_3_u_b_count_in_1 = pd.get_dummies(df_part_3_u_b_count_in_1['behavior_type']).join(
df_part_3_u_b_count_in_1[['user_id', 'cumcount']])
df_part_3_u_b_count_in_1.rename(columns={1: 'behavior_type_1',
2: 'behavior_type_2',
3: 'behavior_type_3',
4: 'behavior_type_4'}, inplace=True)
df_part_3_u_b_count_in_1['u_b1_count_in_1'] = df_part_3_u_b_count_in_1['behavior_type_1'] * (
df_part_3_u_b_count_in_1['cumcount'] + 1)
df_part_3_u_b_count_in_1['u_b2_count_in_1'] = df_part_3_u_b_count_in_1['behavior_type_2'] * (
df_part_3_u_b_count_in_1['cumcount'] + 1)
df_part_3_u_b_count_in_1['u_b3_count_in_1'] = df_part_3_u_b_count_in_1['behavior_type_3'] * (
df_part_3_u_b_count_in_1['cumcount'] + 1)
df_part_3_u_b_count_in_1['u_b4_count_in_1'] = df_part_3_u_b_count_in_1['behavior_type_4'] * (
df_part_3_u_b_count_in_1['cumcount'] + 1)
df_part_3_u_b_count_in_1 = df_part_3_u_b_count_in_1.groupby('user_id').agg({'u_b1_count_in_1': np.sum,
'u_b2_count_in_1': np.sum,
'u_b3_count_in_1': np.sum,
'u_b4_count_in_1': np.sum})
df_part_3_u_b_count_in_1.reset_index(inplace=True)
df_part_3_u_b_count_in_1['u_b_count_in_1'] = df_part_3_u_b_count_in_1[['u_b1_count_in_1',
'u_b2_count_in_1',
'u_b3_count_in_1',
'u_b4_count_in_1']].apply(lambda x: x.sum(),
axis=1)
# merge the result of count_in_6, count_in_3, count_in_1
df_part_3_u_b_count = pd.merge(df_part_3_u_b_count_in_6,
df_part_3_u_b_count_in_3, on=['user_id'], how='left').fillna(0)
df_part_3_u_b_count = pd.merge(df_part_3_u_b_count,
df_part_3_u_b_count_in_1, on=['user_id'], how='left').fillna(0)
df_part_3_u_b_count[['u_b1_count_in_6',
'u_b2_count_in_6',
'u_b3_count_in_6',
'u_b4_count_in_6',
'u_b_count_in_6',
'u_b1_count_in_3',
'u_b2_count_in_3',
'u_b3_count_in_3',
'u_b4_count_in_3',
'u_b_count_in_3',
'u_b1_count_in_1',
'u_b2_count_in_1',
'u_b3_count_in_1',
'u_b4_count_in_1',
'u_b_count_in_1']] = df_part_3_u_b_count[['u_b1_count_in_6',
'u_b2_count_in_6',
'u_b3_count_in_6',
'u_b4_count_in_6',
'u_b_count_in_6',
'u_b1_count_in_3',
'u_b2_count_in_3',
'u_b3_count_in_3',
'u_b4_count_in_3',
'u_b_count_in_3',
'u_b1_count_in_1',
'u_b2_count_in_1',
'u_b3_count_in_1',
'u_b4_count_in_1',
'u_b_count_in_1']].astype(int)
# u_b4_rate
df_part_3_u_b_count['u_b4_rate'] = df_part_3_u_b_count['u_b4_count_in_6'] / df_part_3_u_b_count['u_b_count_in_6']
# u_b4_diff_time
df_part_3 = df_part_3.sort_values(by=['user_id', 'time'])
df_part_3_u_b4_time = df_part_3[df_part_3['behavior_type'] == 4].drop_duplicates(['user_id'], 'first')[
['user_id', 'time']]
df_part_3_u_b4_time.columns = ['user_id', 'b4_first_time']
df_part_3_u_b_time = df_part_3.drop_duplicates(['user_id'], 'first')[['user_id', 'time']]
df_part_3_u_b_time.columns = ['user_id', 'b_first_time']
df_part_3_u_b_b4_time = pd.merge(df_part_3_u_b_time, df_part_3_u_b4_time, on=['user_id'])
df_part_3_u_b_b4_time['u_b4_diff_time'] = df_part_3_u_b_b4_time['b4_first_time'] - df_part_3_u_b_b4_time['b_first_time']
df_part_3_u_b_b4_time = df_part_3_u_b_b4_time[['user_id', 'u_b4_diff_time']]
df_part_3_u_b_b4_time['u_b4_diff_hours'] = df_part_3_u_b_b4_time['u_b4_diff_time'].apply(
lambda x: x.days * 24 + x.seconds // 3600)
# generating feature set U
f_U_part_3 = pd.merge(df_part_3_u_b_count,
df_part_3_u_b_b4_time,
on=['user_id'], how='left')[['user_id',
'u_b1_count_in_6',
'u_b2_count_in_6',
'u_b3_count_in_6',
'u_b4_count_in_6',
'u_b_count_in_6',
'u_b1_count_in_3',
'u_b2_count_in_3',
'u_b3_count_in_3',
'u_b4_count_in_3',
'u_b_count_in_3',
'u_b1_count_in_1',
'u_b2_count_in_1',
'u_b3_count_in_1',
'u_b4_count_in_1',
'u_b_count_in_1',
'u_b4_rate',
'u_b4_diff_hours']]
# write to csv file
f_U_part_3 = f_U_part_3.round({'u_b4_rate': 3})
f_U_part_3.to_csv(path_df_part_3_U, index=False)
###########################################
'''Step 1.2 feature data set I of df_part_3
(1)
i_u_count_in_6
i_u_count_in_3
i_u_count_in_1
(2)
i_b1_count_in_6
i_b2_count_in_6
i_b3_count_in_6
i_b4_count_in_6
i_b_count_in_6
i_b1_count_in_3
i_b2_count_in_3
i_b3_count_in_3
i_b4_count_in_3
i_b_count_in_3
i_b1_count_in_1
i_b2_count_in_1
i_b3_count_in_1
i_b4_count_in_1
i_b_count_in_1
(3)
i_b4_rate (in_6)
i_b4_diff_hours (in_6)
'''
# loading data
path_df = open(path_df_part_3, 'r')
try:
df_part_3 = pd.read_csv(path_df, index_col=False, parse_dates=[0])
df_part_3.columns = ['time', 'user_id', 'item_id', 'behavior_type', 'item_category']
finally:
path_df.close()
# i_u_count_in_6
df_part_3_in_6 = df_part_3.drop_duplicates(['item_id', 'user_id'])
df_part_3_in_6['i_u_count_in_6'] = df_part_3_in_6.groupby('item_id').cumcount() + 1
df_part_3_i_u_count_in_6 = df_part_3_in_6.drop_duplicates(['item_id'], 'last')[['item_id', 'i_u_count_in_6']]
# i_u_count_in_3
df_part_3_in_3 = df_part_3[df_part_3['time'] >= np.datetime64('2014-12-16')].drop_duplicates(['item_id', 'user_id'])
df_part_3_in_3['i_u_count_in_3'] = df_part_3_in_3.groupby('item_id').cumcount() + 1
df_part_3_i_u_count_in_3 = df_part_3_in_3.drop_duplicates(['item_id'], 'last')[['item_id', 'i_u_count_in_3']]
# i_u_count_in_1
df_part_3_in_1 = df_part_3[df_part_3['time'] >= np.datetime64('2014-12-18')].drop_duplicates(['item_id', 'user_id'])
df_part_3_in_1['i_u_count_in_1'] = df_part_3_in_1.groupby('item_id').cumcount() + 1
df_part_3_i_u_count_in_1 = df_part_3_in_1.drop_duplicates(['item_id'], 'last')[['item_id', 'i_u_count_in_1']]
# merge for generation of i_u_count
df_part_3_i_u_count = pd.merge(df_part_3_i_u_count_in_6,
df_part_3_i_u_count_in_3,
on=['item_id'], how='left').fillna(0)
df_part_3_i_u_count = pd.merge(df_part_3_i_u_count,
df_part_3_i_u_count_in_1,
on=['item_id'], how='left').fillna(0)
df_part_3_i_u_count[['i_u_count_in_6',
'i_u_count_in_3',
'i_u_count_in_1']] = df_part_3_i_u_count[['i_u_count_in_6',
'i_u_count_in_3',
'i_u_count_in_1']].astype(int)
# i_b_count_in_6
df_part_3['cumcount'] = df_part_3.groupby(['item_id', 'behavior_type']).cumcount()
df_part_3_i_b_count_in_6 = df_part_3.drop_duplicates(['item_id', 'behavior_type'], 'last')[
['item_id', 'behavior_type', 'cumcount']]
df_part_3_i_b_count_in_6 = pd.get_dummies(df_part_3_i_b_count_in_6['behavior_type']).join(
df_part_3_i_b_count_in_6[['item_id', 'cumcount']])
df_part_3_i_b_count_in_6.rename(columns={1: 'behavior_type_1',
2: 'behavior_type_2',
3: 'behavior_type_3',
4: 'behavior_type_4'}, inplace=True)
df_part_3_i_b_count_in_6['i_b1_count_in_6'] = df_part_3_i_b_count_in_6['behavior_type_1'] * (
df_part_3_i_b_count_in_6['cumcount'] + 1)
df_part_3_i_b_count_in_6['i_b2_count_in_6'] = df_part_3_i_b_count_in_6['behavior_type_2'] * (
df_part_3_i_b_count_in_6['cumcount'] + 1)
df_part_3_i_b_count_in_6['i_b3_count_in_6'] = df_part_3_i_b_count_in_6['behavior_type_3'] * (
df_part_3_i_b_count_in_6['cumcount'] + 1)
df_part_3_i_b_count_in_6['i_b4_count_in_6'] = df_part_3_i_b_count_in_6['behavior_type_4'] * (
df_part_3_i_b_count_in_6['cumcount'] + 1)
df_part_3_i_b_count_in_6 = df_part_3_i_b_count_in_6[['item_id',
'i_b1_count_in_6',
'i_b2_count_in_6',
'i_b3_count_in_6',
'i_b4_count_in_6']]
df_part_3_i_b_count_in_6 = df_part_3_i_b_count_in_6.groupby('item_id').agg({'i_b1_count_in_6': np.sum,
'i_b2_count_in_6': np.sum,
'i_b3_count_in_6': np.sum,
'i_b4_count_in_6': np.sum})
df_part_3_i_b_count_in_6.reset_index(inplace=True)
df_part_3_i_b_count_in_6['i_b_count_in_6'] = df_part_3_i_b_count_in_6['i_b1_count_in_6'] + \
df_part_3_i_b_count_in_6['i_b2_count_in_6'] + \
df_part_3_i_b_count_in_6['i_b3_count_in_6'] + \
df_part_3_i_b_count_in_6['i_b4_count_in_6']
# i_b_count_in_3
df_part_3_in_3 = df_part_3[df_part_3['time'] >= np.datetime64('2014-12-16')]
df_part_3_in_3['cumcount'] = df_part_3_in_3.groupby(['item_id', 'behavior_type']).cumcount()
df_part_3_i_b_count_in_3 = df_part_3.drop_duplicates(['item_id', 'behavior_type'], 'last')[
['item_id', 'behavior_type', 'cumcount']]
df_part_3_i_b_count_in_3 = pd.get_dummies(df_part_3_i_b_count_in_3['behavior_type']).join(
df_part_3_i_b_count_in_3[['item_id', 'cumcount']])
df_part_3_i_b_count_in_3.rename(columns={1: 'behavior_type_1',
2: 'behavior_type_2',
3: 'behavior_type_3',
4: 'behavior_type_4'}, inplace=True)
df_part_3_i_b_count_in_3['i_b1_count_in_3'] = df_part_3_i_b_count_in_3['behavior_type_1'] * (
df_part_3_i_b_count_in_3['cumcount'] + 1)
df_part_3_i_b_count_in_3['i_b2_count_in_3'] = df_part_3_i_b_count_in_3['behavior_type_2'] * (
df_part_3_i_b_count_in_3['cumcount'] + 1)
df_part_3_i_b_count_in_3['i_b3_count_in_3'] = df_part_3_i_b_count_in_3['behavior_type_3'] * (
df_part_3_i_b_count_in_3['cumcount'] + 1)
df_part_3_i_b_count_in_3['i_b4_count_in_3'] = df_part_3_i_b_count_in_3['behavior_type_4'] * (
df_part_3_i_b_count_in_3['cumcount'] + 1)
df_part_3_i_b_count_in_3 = df_part_3_i_b_count_in_3[['item_id',
'i_b1_count_in_3',
'i_b2_count_in_3',
'i_b3_count_in_3',
'i_b4_count_in_3']]
df_part_3_i_b_count_in_3 = df_part_3_i_b_count_in_3.groupby('item_id').agg({'i_b1_count_in_3': np.sum,
'i_b2_count_in_3': np.sum,
'i_b3_count_in_3': np.sum,
'i_b4_count_in_3': np.sum})
df_part_3_i_b_count_in_3.reset_index(inplace=True)
df_part_3_i_b_count_in_3['i_b_count_in_3'] = df_part_3_i_b_count_in_3['i_b1_count_in_3'] + \
df_part_3_i_b_count_in_3['i_b2_count_in_3'] + \
df_part_3_i_b_count_in_3['i_b3_count_in_3'] + \
df_part_3_i_b_count_in_3['i_b4_count_in_3']
# i_b_count_in_1
df_part_3_in_1 = df_part_3[df_part_3['time'] >= np.datetime64('2014-12-18')]
df_part_3_in_1['cumcount'] = df_part_3_in_1.groupby(['item_id', 'behavior_type']).cumcount()
df_part_3_i_b_count_in_1 = df_part_3_in_1.drop_duplicates(['item_id', 'behavior_type'], 'last')[
['item_id', 'behavior_type', 'cumcount']]
df_part_3_i_b_count_in_1 = pd.get_dummies(df_part_3_i_b_count_in_1['behavior_type']).join(
df_part_3_i_b_count_in_1[['item_id', 'cumcount']])
df_part_3_i_b_count_in_1.rename(columns={1: 'behavior_type_1',
2: 'behavior_type_2',
3: 'behavior_type_3',
4: 'behavior_type_4'}, inplace=True)
df_part_3_i_b_count_in_1['i_b1_count_in_1'] = df_part_3_i_b_count_in_1['behavior_type_1'] * (
df_part_3_i_b_count_in_1['cumcount'] + 1)
df_part_3_i_b_count_in_1['i_b2_count_in_1'] = df_part_3_i_b_count_in_1['behavior_type_2'] * (
df_part_3_i_b_count_in_1['cumcount'] + 1)
df_part_3_i_b_count_in_1['i_b3_count_in_1'] = df_part_3_i_b_count_in_1['behavior_type_3'] * (
df_part_3_i_b_count_in_1['cumcount'] + 1)
df_part_3_i_b_count_in_1['i_b4_count_in_1'] = df_part_3_i_b_count_in_1['behavior_type_4'] * (
df_part_3_i_b_count_in_1['cumcount'] + 1)
df_part_3_i_b_count_in_1 = df_part_3_i_b_count_in_1[['item_id',
'i_b1_count_in_1',
'i_b2_count_in_1',
'i_b3_count_in_1',
'i_b4_count_in_1']]
df_part_3_i_b_count_in_1 = df_part_3_i_b_count_in_1.groupby('item_id').agg({'i_b1_count_in_1': np.sum,
'i_b2_count_in_1': np.sum,
'i_b3_count_in_1': np.sum,
'i_b4_count_in_1': np.sum})
df_part_3_i_b_count_in_1.reset_index(inplace=True)
df_part_3_i_b_count_in_1['i_b_count_in_1'] = df_part_3_i_b_count_in_1['i_b1_count_in_1'] + \
df_part_3_i_b_count_in_1['i_b2_count_in_1'] + \
df_part_3_i_b_count_in_1['i_b3_count_in_1'] + \
df_part_3_i_b_count_in_1['i_b4_count_in_1']
# merge for generation of i_b_count
df_part_3_i_b_count = pd.merge(df_part_3_i_b_count_in_6,
df_part_3_i_b_count_in_3,
on=['item_id'], how='left').fillna(0)
df_part_3_i_b_count = pd.merge(df_part_3_i_b_count,
df_part_3_i_b_count_in_1,
on=['item_id'], how='left').fillna(0)
df_part_3_i_b_count[['i_b1_count_in_6',
'i_b2_count_in_6',
'i_b3_count_in_6',
'i_b4_count_in_6',
'i_b_count_in_6',
'i_b1_count_in_3',
'i_b2_count_in_3',
'i_b3_count_in_3',
'i_b4_count_in_3',
'i_b_count_in_3',
'i_b1_count_in_1',
'i_b2_count_in_1',
'i_b3_count_in_1',
'i_b4_count_in_1',
'i_b_count_in_1']] = df_part_3_i_b_count[['i_b1_count_in_6',
'i_b2_count_in_6',
'i_b3_count_in_6',
'i_b4_count_in_6',
'i_b_count_in_6',
'i_b1_count_in_3',
'i_b2_count_in_3',
'i_b3_count_in_3',
'i_b4_count_in_3',
'i_b_count_in_3',
'i_b1_count_in_1',
'i_b2_count_in_1',
'i_b3_count_in_1',
'i_b4_count_in_1',
'i_b_count_in_1']].astype(int)
# i_b4_rate
df_part_3_i_b_count['i_b4_rate'] = df_part_3_i_b_count['i_b4_count_in_6'] / df_part_3_i_b_count['i_b_count_in_6']
# i_b4_diff_time
df_part_3 = df_part_3.sort_values(by=['item_id', 'time'])
df_part_3_i_b4_time = df_part_3[df_part_3['behavior_type'] == 4].drop_duplicates(['item_id'], 'first')[
['item_id', 'time']]
df_part_3_i_b4_time.columns = ['item_id', 'b4_first_time']
df_part_3_i_b_time = df_part_3.drop_duplicates(['item_id'], 'first')[['item_id', 'time']]
df_part_3_i_b_time.columns = ['item_id', 'b_first_time']
df_part_3_i_b_b4_time = pd.merge(df_part_3_i_b_time, df_part_3_i_b4_time, on=['item_id'])
df_part_3_i_b_b4_time['i_b4_diff_time'] = df_part_3_i_b_b4_time['b4_first_time'] - df_part_3_i_b_b4_time['b_first_time']
df_part_3_i_b_b4_time['i_b4_diff_hours'] = df_part_3_i_b_b4_time['i_b4_diff_time'].apply(
lambda x: x.days * 24 + x.seconds // 3600)
df_part_3_i_b_b4_time = df_part_3_i_b_b4_time[['item_id', 'i_b4_diff_hours']]
# generating feature set I
f_I_part_3 = pd.merge(df_part_3_i_b_count,
df_part_3_i_b_b4_time,
on=['item_id'], how='left')
f_I_part_3 = pd.merge(f_I_part_3,
df_part_3_i_u_count,
on=['item_id'], how='left')[['item_id',
'i_u_count_in_6',
'i_u_count_in_3',
'i_u_count_in_1',
'i_b1_count_in_6',
'i_b2_count_in_6',
'i_b3_count_in_6',
'i_b4_count_in_6',
'i_b_count_in_6',
'i_b1_count_in_3',
'i_b2_count_in_3',
'i_b3_count_in_3',
'i_b4_count_in_3',
'i_b_count_in_3',
'i_b1_count_in_1',
'i_b2_count_in_1',
'i_b3_count_in_1',
'i_b4_count_in_1',
'i_b_count_in_1',
'i_b4_rate',
'i_b4_diff_hours']]
# write to csv file
f_I_part_3 = f_I_part_3.round({'i_b4_rate': 3})
f_I_part_3.to_csv(path_df_part_3_I, index=False)
###########################################
'''Step 1.3 feature data set C of df_part_3
(1)
c_u_count_in_6
c_u_count_in_3
c_u_count_in_1
(2)
c_b1_count_in_6
c_b2_count_in_6
c_b3_count_in_6
c_b4_count_in_6
c_b_count_in_6
c_b1_count_in_3
c_b2_count_in_3
c_b3_count_in_3
c_b4_count_in_3
c_b_count_in_3
c_b1_count_in_1
c_b2_count_in_1
c_b3_count_in_1
c_b4_count_in_1
c_b_count_in_1
(3)
c_b4_rate (in_6)
c_b4_diff_hours (in_6)
'''
# loading data
path_df = open(path_df_part_3, 'r')
try:
df_part_3 = pd.read_csv(path_df, index_col=False, parse_dates=[0])
df_part_3.columns = ['time', 'user_id', 'item_id', 'behavior_type', 'item_category']
finally:
path_df.close()
# c_u_count_in_6
df_part_3_in_6 = df_part_3.drop_duplicates(['item_category', 'user_id'])
df_part_3_in_6['c_u_count_in_6'] = df_part_3_in_6.groupby('item_category').cumcount() + 1
df_part_3_c_u_count_in_6 = df_part_3_in_6.drop_duplicates(['item_category'], 'last')[
['item_category', 'c_u_count_in_6']]
# c_u_count_in_3
df_part_3_in_3 = df_part_3[df_part_3['time'] >= np.datetime64('2014-12-16')].drop_duplicates(
['item_category', 'user_id'])
df_part_3_in_3['c_u_count_in_3'] = df_part_3_in_3.groupby('item_category').cumcount() + 1
df_part_3_c_u_count_in_3 = df_part_3_in_3.drop_duplicates(['item_category'], 'last')[
['item_category', 'c_u_count_in_3']]
# c_u_count_in_1
df_part_3_in_1 = df_part_3[df_part_3['time'] >= np.datetime64('2014-12-18')].drop_duplicates(
['item_category', 'user_id'])
df_part_3_in_1['c_u_count_in_1'] = df_part_3_in_1.groupby('item_category').cumcount() + 1
df_part_3_c_u_count_in_1 = df_part_3_in_1.drop_duplicates(['item_category'], 'last')[
['item_category', 'c_u_count_in_1']]
df_part_3_c_u_count = pd.merge(df_part_3_c_u_count_in_6, df_part_3_c_u_count_in_3, on=['item_category'],
how='left').fillna(0)
df_part_3_c_u_count = pd.merge(df_part_3_c_u_count, df_part_3_c_u_count_in_1, on=['item_category'], how='left').fillna(
0)
df_part_3_c_u_count[['c_u_count_in_6',
'c_u_count_in_3',
'c_u_count_in_1']] = df_part_3_c_u_count[['c_u_count_in_6',
'c_u_count_in_3',
'c_u_count_in_1']].astype(int)
# c_b_count_in_6
df_part_3['cumcount'] = df_part_3.groupby(['item_category', 'behavior_type']).cumcount()
df_part_3_c_b_count_in_6 = df_part_3.drop_duplicates(['item_category', 'behavior_type'], 'last')[
['item_category', 'behavior_type', 'cumcount']]
df_part_3_c_b_count_in_6 = pd.get_dummies(df_part_3_c_b_count_in_6['behavior_type']).join(
df_part_3_c_b_count_in_6[['item_category', 'cumcount']])
df_part_3_c_b_count_in_6.rename(columns={1: 'behavior_type_1',
2: 'behavior_type_2',
3: 'behavior_type_3',
4: 'behavior_type_4'}, inplace=True)
df_part_3_c_b_count_in_6['c_b1_count_in_6'] = df_part_3_c_b_count_in_6['behavior_type_1'] * (
df_part_3_c_b_count_in_6['cumcount'] + 1)
df_part_3_c_b_count_in_6['c_b2_count_in_6'] = df_part_3_c_b_count_in_6['behavior_type_2'] * (
df_part_3_c_b_count_in_6['cumcount'] + 1)
df_part_3_c_b_count_in_6['c_b3_count_in_6'] = df_part_3_c_b_count_in_6['behavior_type_3'] * (
df_part_3_c_b_count_in_6['cumcount'] + 1)
df_part_3_c_b_count_in_6['c_b4_count_in_6'] = df_part_3_c_b_count_in_6['behavior_type_4'] * (
df_part_3_c_b_count_in_6['cumcount'] + 1)
df_part_3_c_b_count_in_6 = df_part_3_c_b_count_in_6[['item_category',
'c_b1_count_in_6',
'c_b2_count_in_6',
'c_b3_count_in_6',
'c_b4_count_in_6']]
df_part_3_c_b_count_in_6 = df_part_3_c_b_count_in_6.groupby('item_category').agg({'c_b1_count_in_6': np.sum,
'c_b2_count_in_6': np.sum,
'c_b3_count_in_6': np.sum,
'c_b4_count_in_6': np.sum})
df_part_3_c_b_count_in_6.reset_index(inplace=True)
df_part_3_c_b_count_in_6['c_b_count_in_6'] = df_part_3_c_b_count_in_6['c_b1_count_in_6'] + \
df_part_3_c_b_count_in_6['c_b2_count_in_6'] + \
df_part_3_c_b_count_in_6['c_b3_count_in_6'] + \
df_part_3_c_b_count_in_6['c_b4_count_in_6']
# c_b_count_in_3
df_part_3_in_3 = df_part_3[df_part_3['time'] >= np.datetime64('2014-12-16')]
df_part_3_in_3['cumcount'] = df_part_3_in_3.groupby(['item_category', 'behavior_type']).cumcount()
df_part_3_c_b_count_in_3 = df_part_3_in_3.drop_duplicates(['item_category', 'behavior_type'], 'last')[
['item_category', 'behavior_type', 'cumcount']]
df_part_3_c_b_count_in_3 = pd.get_dummies(df_part_3_c_b_count_in_3['behavior_type']).join(
df_part_3_c_b_count_in_3[['item_category', 'cumcount']])
df_part_3_c_b_count_in_3.rename(columns={1: 'behavior_type_1',
2: 'behavior_type_2',
3: 'behavior_type_3',
4: 'behavior_type_4'}, inplace=True)
df_part_3_c_b_count_in_3['c_b1_count_in_3'] = df_part_3_c_b_count_in_3['behavior_type_1'] * (
df_part_3_c_b_count_in_3['cumcount'] + 1)
df_part_3_c_b_count_in_3['c_b2_count_in_3'] = df_part_3_c_b_count_in_3['behavior_type_2'] * (
df_part_3_c_b_count_in_3['cumcount'] + 1)
df_part_3_c_b_count_in_3['c_b3_count_in_3'] = df_part_3_c_b_count_in_3['behavior_type_3'] * (
df_part_3_c_b_count_in_3['cumcount'] + 1)
df_part_3_c_b_count_in_3['c_b4_count_in_3'] = df_part_3_c_b_count_in_3['behavior_type_4'] * (
df_part_3_c_b_count_in_3['cumcount'] + 1)
df_part_3_c_b_count_in_3 = df_part_3_c_b_count_in_3[['item_category',
'c_b1_count_in_3',
'c_b2_count_in_3',
'c_b3_count_in_3',
'c_b4_count_in_3']]
df_part_3_c_b_count_in_3 = df_part_3_c_b_count_in_3.groupby('item_category').agg({'c_b1_count_in_3': np.sum,
'c_b2_count_in_3': np.sum,
'c_b3_count_in_3': np.sum,
'c_b4_count_in_3': np.sum})
df_part_3_c_b_count_in_3.reset_index(inplace=True)
df_part_3_c_b_count_in_3['c_b_count_in_3'] = df_part_3_c_b_count_in_3['c_b1_count_in_3'] + \
df_part_3_c_b_count_in_3['c_b2_count_in_3'] + \
df_part_3_c_b_count_in_3['c_b3_count_in_3'] + \
df_part_3_c_b_count_in_3['c_b4_count_in_3']
# c_b_count_in_1
df_part_3_in_1 = df_part_3[df_part_3['time'] >= np.datetime64('2014-12-18')]
df_part_3_in_1['cumcount'] = df_part_3_in_1.groupby(['item_category', 'behavior_type']).cumcount()
df_part_3_c_b_count_in_1 = df_part_3_in_1.drop_duplicates(['item_category', 'behavior_type'], 'last')[
['item_category', 'behavior_type', 'cumcount']]
df_part_3_c_b_count_in_1 = pd.get_dummies(df_part_3_c_b_count_in_1['behavior_type']).join(
df_part_3_c_b_count_in_1[['item_category', 'cumcount']])
df_part_3_c_b_count_in_1.rename(columns={1: 'behavior_type_1',
2: 'behavior_type_2',
3: 'behavior_type_3',
4: 'behavior_type_4'}, inplace=True)
df_part_3_c_b_count_in_1['c_b1_count_in_1'] = df_part_3_c_b_count_in_1['behavior_type_1'] * (
df_part_3_c_b_count_in_1['cumcount'] + 1)
df_part_3_c_b_count_in_1['c_b2_count_in_1'] = df_part_3_c_b_count_in_1['behavior_type_2'] * (
df_part_3_c_b_count_in_1['cumcount'] + 1)
df_part_3_c_b_count_in_1['c_b3_count_in_1'] = df_part_3_c_b_count_in_1['behavior_type_3'] * (
df_part_3_c_b_count_in_1['cumcount'] + 1)
df_part_3_c_b_count_in_1['c_b4_count_in_1'] = df_part_3_c_b_count_in_1['behavior_type_4'] * (
df_part_3_c_b_count_in_1['cumcount'] + 1)
df_part_3_c_b_count_in_1 = df_part_3_c_b_count_in_1[['item_category',
'c_b1_count_in_1',
'c_b2_count_in_1',
'c_b3_count_in_1',
'c_b4_count_in_1']]
df_part_3_c_b_count_in_1 = df_part_3_c_b_count_in_1.groupby('item_category').agg({'c_b1_count_in_1': np.sum,
'c_b2_count_in_1': np.sum,
'c_b3_count_in_1': np.sum,
'c_b4_count_in_1': np.sum})
df_part_3_c_b_count_in_1.reset_index(inplace=True)
df_part_3_c_b_count_in_1['c_b_count_in_1'] = df_part_3_c_b_count_in_1['c_b1_count_in_1'] + \
df_part_3_c_b_count_in_1['c_b2_count_in_1'] + \
df_part_3_c_b_count_in_1['c_b3_count_in_1'] + \
df_part_3_c_b_count_in_1['c_b4_count_in_1']
df_part_3_c_b_count = pd.merge(df_part_3_c_b_count_in_6, df_part_3_c_b_count_in_3, on=['item_category'],
how='left').fillna(0)
df_part_3_c_b_count = pd.merge(df_part_3_c_b_count, df_part_3_c_b_count_in_1, on=['item_category'], how='left').fillna(
0)
df_part_3_c_b_count[['c_b1_count_in_6',
'c_b2_count_in_6',
'c_b3_count_in_6',
'c_b4_count_in_6',
'c_b_count_in_6',
'c_b1_count_in_3',
'c_b2_count_in_3',
'c_b3_count_in_3',
'c_b4_count_in_3',
'c_b_count_in_3',
'c_b1_count_in_1',
'c_b2_count_in_1',
'c_b3_count_in_1',
'c_b4_count_in_1',
'c_b_count_in_1']] = df_part_3_c_b_count[['c_b1_count_in_6',
'c_b2_count_in_6',
'c_b3_count_in_6',
'c_b4_count_in_6',
'c_b_count_in_6',
'c_b1_count_in_3',
'c_b2_count_in_3',
'c_b3_count_in_3',
'c_b4_count_in_3',
'c_b_count_in_3',
'c_b1_count_in_1',
'c_b2_count_in_1',
'c_b3_count_in_1',
'c_b4_count_in_1',
'c_b_count_in_1']].astype(int)
# c_b4_rate
df_part_3_c_b_count['c_b4_rate'] = df_part_3_c_b_count['c_b4_count_in_6'] / df_part_3_c_b_count['c_b_count_in_6']
# c_b4_diff_time
df_part_3 = df_part_3.sort_values(by=['item_category', 'time'])
df_part_3_c_b4_time = df_part_3[df_part_3['behavior_type'] == 4].drop_duplicates(['item_category'], 'first')[
['item_category', 'time']]
df_part_3_c_b4_time.columns = ['item_category', 'b4_first_time']
df_part_3_c_b_time = df_part_3.drop_duplicates(['item_category'], 'first')[['item_category', 'time']]
df_part_3_c_b_time.columns = ['item_category', 'b_first_time']
df_part_3_c_b_b4_time = pd.merge(df_part_3_c_b_time, df_part_3_c_b4_time, on=['item_category'])
df_part_3_c_b_b4_time['c_b4_diff_time'] = df_part_3_c_b_b4_time['b4_first_time'] - df_part_3_c_b_b4_time['b_first_time']
df_part_3_c_b_b4_time['c_b4_diff_hours'] = df_part_3_c_b_b4_time['c_b4_diff_time'].apply(
lambda x: x.days * 24 + x.seconds // 3600)
df_part_3_c_b_b4_time = df_part_3_c_b_b4_time[['item_category',
'c_b4_diff_hours']]
# generating feature set C
f_C_part_3 = pd.merge(df_part_3_c_u_count, df_part_3_c_b_count, on=['item_category'], how='left')
f_C_part_3 = pd.merge(f_C_part_3, df_part_3_c_b_b4_time, on=['item_category'], how='left')
f_C_part_3 = f_C_part_3.round({'c_b4_rate': 3})
# write to csv file
f_C_part_3.to_csv(path_df_part_3_C, index=False)
############################################
'''Step 1.4 feature data set IC of df_part_3
ic_u_rank_in_c (in_6)
ic_b_rank_in_c (in_6)
ic_b4_rank_in_c (in_6)
'''
# get df_part_3_i_ub_count
path_df = open(path_df_part_3_I, 'r')
try:
df_part_3_I = pd.read_csv(path_df, index_col=False)
finally:
path_df.close()
df_part_3_i_ub_count = df_part_3_I[['item_id', 'i_u_count_in_6', 'i_b_count_in_6', 'i_b4_count_in_6']]
del (df_part_3_I)
# get df_part_3_uic for merge i & c
path_df = open(path_df_part_3_uic, 'r')
try:
df_part_3_uic = pd.read_csv(path_df, index_col=False)
finally:
path_df.close()
df_part_3_ic_u_b_count = pd.merge(df_part_3_uic, df_part_3_i_ub_count, on=['item_id'], how='left').fillna(0)
df_part_3_ic_u_b_count = df_part_3_ic_u_b_count.drop_duplicates(['item_id', 'item_category'])
# ic_u_rank_in_c
df_part_3_ic_u_b_count['ic_u_rank_in_c'] = df_part_3_ic_u_b_count.groupby('item_category')['i_u_count_in_6'].rank(
method='min', ascending=False).astype('int')
# ic_b_rank_in_c
df_part_3_ic_u_b_count['ic_b_rank_in_c'] = df_part_3_ic_u_b_count.groupby('item_category')['i_b_count_in_6'].rank(
method='min', ascending=False).astype('int')
# ic_b4_rank_in_c
df_part_3_ic_u_b_count['ic_b4_rank_in_c'] = df_part_3_ic_u_b_count.groupby('item_category')['i_b4_count_in_6'].rank(
method='min', ascending=False).astype('int')
f_IC_part_3 = df_part_3_ic_u_b_count[['item_id',
'item_category',
'ic_u_rank_in_c',
'ic_b_rank_in_c',
'ic_b4_rank_in_c']]
# write to csv file
f_IC_part_3.to_csv(path_df_part_3_IC, index=False)
############################################
'''Step 1.5 feature data set UI of df_part_3
(1)
ui_b1_count_in_6
ui_b2_count_in_6
ui_b3_count_in_6
ui_b4_count_in_6
ui_b_count_in_6
ui_b1_count_in_3
ui_b2_count_in_3
ui_b3_count_in_3
ui_b4_count_in_3
ui_b_count_in_3
ui_b1_count_in_1
ui_b2_count_in_1
ui_b3_count_in_1
ui_b4_count_in_1
ui_b_count_in_1
(2)
ui_b_count_rank_in_u (in_6)
ui_b_count_rank_in_uc (in_6)
(3)
ui_b1_last_hours (in_6)
ui_b2_last_hours (in_6)
ui_b3_last_hours (in_6)
ui_b4_last_hours (in_6)
'''
path_df = open(path_df_part_3, 'r')
try:
df_part_3 = pd.read_csv(path_df, index_col=False, parse_dates=[0])
df_part_3.columns = ['time', 'user_id', 'item_id', 'behavior_type', 'item_category']
finally:
path_df.close()
# ui_b_count_in_6
df_part_3['cumcount'] = df_part_3.groupby(['user_id', 'item_id', 'behavior_type']).cumcount()
df_part_3_ui_b_count_in_6 = df_part_3.drop_duplicates(['user_id', 'item_id', 'behavior_type'], 'last')[
['user_id', 'item_id', 'behavior_type', 'cumcount']]
df_part_3_ui_b_count_in_6 = pd.get_dummies(df_part_3_ui_b_count_in_6['behavior_type']).join(
df_part_3_ui_b_count_in_6[['user_id', 'item_id', 'cumcount']])
df_part_3_ui_b_count_in_6.rename(columns={1: 'behavior_type_1',
2: 'behavior_type_2',
3: 'behavior_type_3',
4: 'behavior_type_4'}, inplace=True)
df_part_3_ui_b_count_in_6['ui_b1_count_in_6'] = df_part_3_ui_b_count_in_6['behavior_type_1'] * (
df_part_3_ui_b_count_in_6['cumcount'] + 1)
df_part_3_ui_b_count_in_6['ui_b2_count_in_6'] = df_part_3_ui_b_count_in_6['behavior_type_2'] * (
df_part_3_ui_b_count_in_6['cumcount'] + 1)
df_part_3_ui_b_count_in_6['ui_b3_count_in_6'] = df_part_3_ui_b_count_in_6['behavior_type_3'] * (
df_part_3_ui_b_count_in_6['cumcount'] + 1)
df_part_3_ui_b_count_in_6['ui_b4_count_in_6'] = df_part_3_ui_b_count_in_6['behavior_type_4'] * (
df_part_3_ui_b_count_in_6['cumcount'] + 1)
df_part_3_ui_b_count_in_6 = df_part_3_ui_b_count_in_6[['user_id',
'item_id',
'ui_b1_count_in_6',
'ui_b2_count_in_6',
'ui_b3_count_in_6',
'ui_b4_count_in_6']]
df_part_3_ui_b_count_in_6 = df_part_3_ui_b_count_in_6.groupby(['user_id', 'item_id']).agg({'ui_b1_count_in_6': np.sum,
'ui_b2_count_in_6': np.sum,
'ui_b3_count_in_6': np.sum,
'ui_b4_count_in_6': np.sum})
df_part_3_ui_b_count_in_6.reset_index(inplace=True)
df_part_3_ui_b_count_in_6['ui_b_count_in_6'] = df_part_3_ui_b_count_in_6['ui_b1_count_in_6'] + \
df_part_3_ui_b_count_in_6['ui_b2_count_in_6'] + \
df_part_3_ui_b_count_in_6['ui_b3_count_in_6'] + \
df_part_3_ui_b_count_in_6['ui_b4_count_in_6']
# ui_b_count_in_3
df_part_3_in_3 = df_part_3[df_part_3['time'] >= np.datetime64('2014-12-16')]
df_part_3_in_3['cumcount'] = df_part_3_in_3.groupby(['user_id', 'item_id', 'behavior_type']).cumcount()
df_part_3_ui_b_count_in_3 = df_part_3.drop_duplicates(['user_id', 'item_id', 'behavior_type'], 'last')[
['user_id', 'item_id', 'behavior_type', 'cumcount']]
df_part_3_ui_b_count_in_3 = pd.get_dummies(df_part_3_ui_b_count_in_3['behavior_type']).join(
df_part_3_ui_b_count_in_3[['user_id', 'item_id', 'cumcount']])
df_part_3_ui_b_count_in_3.rename(columns={1: 'behavior_type_1',
2: 'behavior_type_2',
3: 'behavior_type_3',
4: 'behavior_type_4'}, inplace=True)
df_part_3_ui_b_count_in_3['ui_b1_count_in_3'] = df_part_3_ui_b_count_in_3['behavior_type_1'] * (
df_part_3_ui_b_count_in_3['cumcount'] + 1)
df_part_3_ui_b_count_in_3['ui_b2_count_in_3'] = df_part_3_ui_b_count_in_3['behavior_type_2'] * (
df_part_3_ui_b_count_in_3['cumcount'] + 1)
df_part_3_ui_b_count_in_3['ui_b3_count_in_3'] = df_part_3_ui_b_count_in_3['behavior_type_3'] * (
df_part_3_ui_b_count_in_3['cumcount'] + 1)
df_part_3_ui_b_count_in_3['ui_b4_count_in_3'] = df_part_3_ui_b_count_in_3['behavior_type_4'] * (
df_part_3_ui_b_count_in_3['cumcount'] + 1)
df_part_3_ui_b_count_in_3 = df_part_3_ui_b_count_in_3[['user_id',
'item_id',
'ui_b1_count_in_3',
'ui_b2_count_in_3',
'ui_b3_count_in_3',
'ui_b4_count_in_3']]
df_part_3_ui_b_count_in_3 = df_part_3_ui_b_count_in_3.groupby(['user_id', 'item_id']).agg({'ui_b1_count_in_3': np.sum,
'ui_b2_count_in_3': np.sum,
'ui_b3_count_in_3': np.sum,
'ui_b4_count_in_3': np.sum})
df_part_3_ui_b_count_in_3.reset_index(inplace=True)
df_part_3_ui_b_count_in_3['ui_b_count_in_3'] = df_part_3_ui_b_count_in_3['ui_b1_count_in_3'] + \
df_part_3_ui_b_count_in_3['ui_b2_count_in_3'] + \
df_part_3_ui_b_count_in_3['ui_b3_count_in_3'] + \
df_part_3_ui_b_count_in_3['ui_b4_count_in_3']
# ui_b_count_in_1
df_part_3_in_1 = df_part_3[df_part_3['time'] >= np.datetime64('2014-12-18')]
df_part_3_in_1['cumcount'] = df_part_3_in_1.groupby(['user_id', 'item_id', 'behavior_type']).cumcount()
df_part_3_ui_b_count_in_1 = df_part_3_in_1.drop_duplicates(['user_id', 'item_id', 'behavior_type'], 'last')[
['user_id', 'item_id', 'behavior_type', 'cumcount']]
df_part_3_ui_b_count_in_1 = pd.get_dummies(df_part_3_ui_b_count_in_1['behavior_type']).join(
df_part_3_ui_b_count_in_1[['user_id', 'item_id', 'cumcount']])
df_part_3_ui_b_count_in_1.rename(columns={1: 'behavior_type_1',
2: 'behavior_type_2',
3: 'behavior_type_3',
4: 'behavior_type_4'}, inplace=True)
df_part_3_ui_b_count_in_1['ui_b1_count_in_1'] = df_part_3_ui_b_count_in_1['behavior_type_1'] * (
df_part_3_ui_b_count_in_1['cumcount'] + 1)
df_part_3_ui_b_count_in_1['ui_b2_count_in_1'] = df_part_3_ui_b_count_in_1['behavior_type_2'] * (
df_part_3_ui_b_count_in_1['cumcount'] + 1)
df_part_3_ui_b_count_in_1['ui_b3_count_in_1'] = df_part_3_ui_b_count_in_1['behavior_type_3'] * (
df_part_3_ui_b_count_in_1['cumcount'] + 1)
df_part_3_ui_b_count_in_1['ui_b4_count_in_1'] = df_part_3_ui_b_count_in_1['behavior_type_4'] * (
df_part_3_ui_b_count_in_1['cumcount'] + 1)
df_part_3_ui_b_count_in_1 = df_part_3_ui_b_count_in_1[['user_id',
'item_id',
'ui_b1_count_in_1',
'ui_b2_count_in_1',
'ui_b3_count_in_1',
'ui_b4_count_in_1']]
df_part_3_ui_b_count_in_1 = df_part_3_ui_b_count_in_1.groupby(['user_id', 'item_id']).agg({'ui_b1_count_in_1': np.sum,
'ui_b2_count_in_1': np.sum,
'ui_b3_count_in_1': np.sum,
'ui_b4_count_in_1': np.sum})
df_part_3_ui_b_count_in_1.reset_index(inplace=True)
df_part_3_ui_b_count_in_1['ui_b_count_in_1'] = df_part_3_ui_b_count_in_1['ui_b1_count_in_1'] + \
df_part_3_ui_b_count_in_1['ui_b2_count_in_1'] + \
df_part_3_ui_b_count_in_1['ui_b3_count_in_1'] + \
df_part_3_ui_b_count_in_1['ui_b4_count_in_1']
df_part_3_ui_b_count = pd.merge(df_part_3_ui_b_count_in_6, df_part_3_ui_b_count_in_3, on=['user_id', 'item_id'],
how='left').fillna(0)
df_part_3_ui_b_count = pd.merge(df_part_3_ui_b_count, df_part_3_ui_b_count_in_1, on=['user_id', 'item_id'],
how='left').fillna(0)
df_part_3_ui_b_count[['ui_b1_count_in_6',
'ui_b2_count_in_6',
'ui_b3_count_in_6',
'ui_b4_count_in_6',
'ui_b_count_in_6',
'ui_b1_count_in_3',
'ui_b2_count_in_3',
'ui_b3_count_in_3',
'ui_b4_count_in_3',
'ui_b_count_in_3',
'ui_b1_count_in_1',
'ui_b2_count_in_1',
'ui_b3_count_in_1',
'ui_b4_count_in_1',
'ui_b_count_in_1']] = df_part_3_ui_b_count[['ui_b1_count_in_6',
'ui_b2_count_in_6',
'ui_b3_count_in_6',
'ui_b4_count_in_6',
'ui_b_count_in_6',
'ui_b1_count_in_3',
'ui_b2_count_in_3',
'ui_b3_count_in_3',
'ui_b4_count_in_3',
'ui_b_count_in_3',
'ui_b1_count_in_1',
'ui_b2_count_in_1',
'ui_b3_count_in_1',
'ui_b4_count_in_1',
'ui_b_count_in_1']].astype(int)
# ui_b_count_rank_in_u
df_part_3_ui_b_count['ui_b_count_rank_in_u'] = df_part_3_ui_b_count.groupby(['user_id'])['ui_b_count_in_6'].rank(
method='min', ascending=False).astype('int')
# ui_b_count_rank_in_uc
path_df = open(path_df_part_3_uic, 'r')
try:
df_part_3_uic = pd.read_csv(path_df, index_col=False)
finally:
path_df.close()
df_part_3_ui_b_count = pd.merge(df_part_3_uic, df_part_3_ui_b_count, on=['user_id', 'item_id'], how='left')
df_part_3_ui_b_count['ui_b_count_rank_in_uc'] = df_part_3_ui_b_count.groupby(['user_id', 'item_category'])[
'ui_b_count_rank_in_u'].rank(method='min', ascending=True).astype('int')
# ui_b_last_time
df_part_3.sort_values(by=['user_id', 'item_id', 'behavior_type', 'time'], inplace=True)
df_part_3_ui_b_last_time = df_part_3.drop_duplicates(['user_id', 'item_id', 'behavior_type'], 'last')[
['user_id', 'item_id', 'behavior_type', 'time']]
df_part_3_ui_b_last_time['ui_b1_last_time'] = df_part_3_ui_b_last_time[df_part_3_ui_b_last_time['behavior_type'] == 1][
'time']
df_part_3_ui_b_last_time['ui_b2_last_time'] = df_part_3_ui_b_last_time[df_part_3_ui_b_last_time['behavior_type'] == 2][
'time']
df_part_3_ui_b_last_time['ui_b3_last_time'] = df_part_3_ui_b_last_time[df_part_3_ui_b_last_time['behavior_type'] == 3][
'time']
df_part_3_ui_b_last_time['ui_b4_last_time'] = df_part_3_ui_b_last_time[df_part_3_ui_b_last_time['behavior_type'] == 4][
'time']
df_part_3_ui_b_last_time.loc[df_part_3_ui_b_last_time['ui_b1_last_time'].notnull(), 'ui_b1_last_hours'] = (
pd.to_datetime('2014-12-19') - df_part_3_ui_b_last_time['ui_b1_last_time'])
df_part_3_ui_b_last_time['ui_b1_last_hours'] = \
df_part_3_ui_b_last_time[df_part_3_ui_b_last_time['ui_b1_last_hours'].notnull()]['ui_b1_last_hours'].apply(
lambda x: x.days * 24 + x.seconds // 3600)
df_part_3_ui_b_last_time.loc[df_part_3_ui_b_last_time['ui_b2_last_time'].notnull(), 'ui_b2_last_hours'] = (
pd.to_datetime('2014-12-19') - df_part_3_ui_b_last_time['ui_b2_last_time'])
df_part_3_ui_b_last_time['ui_b2_last_hours'] = \
df_part_3_ui_b_last_time[df_part_3_ui_b_last_time['ui_b2_last_hours'].notnull()]['ui_b2_last_hours'].apply(
lambda x: x.days * 24 + x.seconds // 3600)
df_part_3_ui_b_last_time.loc[df_part_3_ui_b_last_time['ui_b3_last_time'].notnull(), 'ui_b3_last_hours'] = (
pd.to_datetime('2014-12-19') - df_part_3_ui_b_last_time['ui_b3_last_time'])
df_part_3_ui_b_last_time['ui_b3_last_hours'] = \
df_part_3_ui_b_last_time[df_part_3_ui_b_last_time['ui_b3_last_hours'].notnull()]['ui_b3_last_hours'].apply(
lambda x: x.days * 24 + x.seconds // 3600)
df_part_3_ui_b_last_time.loc[df_part_3_ui_b_last_time['ui_b4_last_time'].notnull(), 'ui_b4_last_hours'] = (
| pd.to_datetime('2014-12-19') | pandas.to_datetime |
import pandas as pd
import networkx as nx
import numpy as np
from sklearn.base import BaseEstimator, TransformerMixin
#funtions
def degree(G,f):
"""
Adds a column to the dataframe f with the degree of each node.
G: a networkx graph.
f: a pandas dataframe.
"""
if not(set(f.name) == set(G.nodes()) and len(f.name) == len(G.nodes())):
raise ValueError('The number of nodes and the length of the datadrame should be the same.')
degree_dic = nx.degree_centrality(G)
degree_df = pd.DataFrame(data = {'name': list(degree_dic.keys()), 'degree': list(degree_dic.values()) })
f = pd.merge(f, degree_df, on='name')
return f
def centrality(G,f):
"""
Adds a column to the dataframe f with the centrality of each node.
G: a networkx graph.
f: a pandas dataframe.
"""
if not(set(f.name) == set(G.nodes()) and len(f.name) == len(G.nodes())):
raise ValueError('The number of nodes and the length of the datadrame should be the same.')
centrality_dic = nx.degree_centrality(G)
centrality_df = pd.DataFrame(data = {'name': list(centrality_dic.keys()), 'centrality': list(centrality_dic.values()) })
f = pd.merge(f, centrality_df, on='name')
return f
def betweenness(G,f):
"""
Adds a column to the dataframe f with the betweenness of each node.
G: a networkx graph.
f: a pandas dataframe.
"""
if not(set(f.name) == set(G.nodes()) and len(f.name) == len(G.nodes())):
raise ValueError('The number of nodes and the length of the datadrame should be the same.')
betweenness_dic = nx.betweenness_centrality(G)
betweenness_df = pd.DataFrame(data = {'name': list(betweenness_dic.keys()), 'betweenness': list(betweenness_dic.values()) })
f = pd.merge(f, betweenness_df, on='name')
return f
def pagerank(G,f):
"""
Adds a column to the dataframe f with the pagerank of each node.
G: a networkx graph.
f: a pandas dataframe.
"""
if not(set(f.name) == set(G.nodes()) and len(f.name) == len(G.nodes())):
raise ValueError('The number of nodes and the length of the datadrame should be the same.')
pagerank_dic = nx.pagerank(G)
pagerank_df = pd.DataFrame(data = {'name': list(pagerank_dic.keys()), 'pagerank': list(pagerank_dic.values()) })
f = pd.merge(f, pagerank_df, on='name')
return f
def clustering(G,f):
"""
Adds a column to the dataframe f with the clustering coeficient of each node.
G: a networkx graph.
f: a pandas dataframe.
"""
if not(set(f.name) == set(G.nodes()) and len(f.name) == len(G.nodes())):
raise ValueError('The number of nodes and the length of the datadrame should be the same.')
clustering_dic = nx.clustering(G)
clustering_df = pd.DataFrame(data = {'name': list(clustering_dic.keys()), 'clustering': list(clustering_dic.values()) })
f = pd.merge(f, clustering_df, on='name')
return f
def communities_greedy_modularity(G,f):
"""
Adds a column to the dataframe f with the community of each node.
The communitys are detected using greedy modularity.
G: a networkx graph.
f: a pandas dataframe.
It works with networkx vesion: '2.4rc1.dev_20190610203526'
"""
if not(set(f.name) == set(G.nodes()) and len(f.name) == len(G.nodes())):
raise ValueError('The number of nodes and the length of the datadrame should be the same.')
communities_dic = nx.algorithms.community.greedy_modularity_communities(G)
communities_df = pd.DataFrame(data = {'name': [i for j in range(len(communities_dic)) for i in list(communities_dic[j])], 'communities_greedy_modularity': [j for j in range(len(communities_dic)) for i in list(communities_dic[j])] })
f = pd.merge(f, communities_df, on='name')
return f
def communities_label_propagation(G,f):
"""
Adds a column to the dataframe f with the community of each node.
The communitys are detected using glabel propagation.
G: a networkx graph.
f: a pandas dataframe.
It works with networkx vesion: '2.4rc1.dev_20190610203526'
"""
if not(set(f.name) == set(G.nodes()) and len(f.name) == len(G.nodes())):
raise ValueError('The number of nodes and the length of the datadrame should be the same.')
communities_gen = nx.algorithms.community.label_propagation_communities(G)
communities_dic = [community for community in communities_gen]
communities_df = pd.DataFrame(data = {'name': [i for j in range(len(communities_dic)) for i in list(communities_dic[j])], 'communities_label_propagation': [j for j in range(len(communities_dic)) for i in list(communities_dic[j])] })
f = | pd.merge(f, communities_df, on='name') | pandas.merge |
import seaborn as sns
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from config import LEGENDS, HEATMAP_LIST, GLUCOSE_LIST, GLUCOSE_LIST_AUC, NORMAL_LIST
from pandas.plotting import parallel_coordinates
def curveplots(df, parameter=None):
"""
Will plot the curves for OGTT.
Parameter should be Insuline, C peptide
or Glucose in string
"""
sns.set_style("whitegrid")
# Generate x, y and legend in function of the curve asked.
if parameter == "Insuline":
curve = "Insulin in mU/L"
y = ['OGTT Insulin -10', 'OGTT Insulin -5', 'OGTT Insulin 0',
'OGTT Insulin 30', 'OGTT Insulin 60', 'OGTT Insulin 90',
'OGTT Insulin 120']
x = [-10, -5, 0, 30, 60, 90, 120]
elif parameter == "C peptide":
curve = 'C peptide in mU/L'
y = ['OGTT C-pep -10', 'OGTT C-pep -5', 'OGTT C-pep 0',
'OGTT C-pep 30', 'OGTT C-pep 60', 'OGTT C-pep 90',
'OGTT C-pep 120']
x = [-10, -5, 0, 30, 60, 90, 120]
elif parameter == "Glucose":
curve = "Glucose in mg/dl"
y = ['OGTT Glucose 0', 'OGTT Glucose 30', 'OGTT Glucose 60',
'OGTT Glucose 90', 'OGTT Glucose 120']
x = [0, 30, 60, 90, 120]
# Create a Dataframe to be passed to the graph function.
df2 = pd.DataFrame()
for counter, elements in enumerate(y, 0):
dummyframe = | pd.DataFrame() | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
#----------------#
#--- run_sens ---#
#----------------#
#--- This script was developed to run a local sensitivity analysis for
#--- JULES-crop for the specific sites flagged with run_jules=TRUE in the
#--- sensitivity_run_setup.csv file. The file sensitivity_par_range.csv
#--- controls the parameters range for sensitivity.
#--- If a list of run_id is provided in the script argument the run_jules
#--- flags are set to the provided list. This is useful to run multiple
#--- run_ids is parallel jobs. Example:
#--- The below call will run only for run_ids MZ0002 and MZ0099
$ python run_sens.py MZ0002,MZ0099
#--- <NAME> (<EMAIL>)
#--- Feb, 2020.
"""
#------------------------#
#--- Running Settings ---#
#------------------------#
#--- Every line of dash_nm that has the column 'run_sens' set to True will be used as a baseline in the sensitivity analysis
dash_nm = 'sensitivity_run_setup.csv' # Filename of Dashboard CSV
meta_nm = 'meta_var.csv' # Filename of Meta-info CSV
calc_perf = True # Flag to Calculate model performance
clean_res = True # Flag to Get clean results
save_res = True # Flag to save results in 'results' folder
save_all = False # Flag to save all simulations files in 'results' folder
res_CSV = True # Flag to save simulation results as CSV files
ftime_idx = True # Flag to compute time indexers in simulation results (e.g. date, year, doy)
verb = True # Flag for verbose
exec_fn = 'jules.exe' # JULES ecxecutable filename
exec_path = 'jules_run_sens' # folder where simulations will be run
#----------------------------#
#--- Sensitivity Settings ---#
#----------------------------#
pace = 0.2 # Pace within sensitivity range (e.g. 0.2 is 20% variation from min to max range)
sens_par_nm = 'sensitivity_par_range.csv' # Filename of sensitivity parameters range (CSV)
wd_sim_db = 'sim_db_sens' # Name of baseline sim_db folder within results_sens
time_idx = ['year','doy','dap','das','date'] # Time indexers
dim_idx = ['soil','pft','cpft'] # Dimensions indexers
#--- Keys to merge sensitivity & baseline [note that das is not present as we also assess the initial date for simulation start]
merge_col = ['year','doy','dap','date'] + dim_idx + ['sim_code']
#--- Model outputs to analyze sensitivity
sens_vars = ['cropdvi',
'croplai',
'cropcanht',
'cropharvc',
'cropreservec',
'cropstemc',
'croprootc',
'cropleafc',
'resp_r',
'resp_l',
'resp_p',
'gpp',
'npp',
'soil_wet',
't_soil',
'smcl',
'le']
#----------------------#
#--- Load libraries ---#
#----------------------#
import os#; os.chdir('C:/Murilo/py-jules')
import shutil
import pandas as pd
import numpy as np
#import gen_nml_defs as gn
import util as u
from py_jules_run import py_jules_run
from get_netcdf import get_res
from get_model_perf import check_dependencies
#----------------------#
#--- Read dashboard ---#
#----------------------#
#--- get run wd
wd = os.getcwd().replace('\\','/')
#--- Open CSVs
dash = u.df_csv(wd+'/'+dash_nm)
meta = u.df_csv(wd+'/'+meta_nm)
sens = u.df_csv(wd+'/'+sens_par_nm)
#--- Get scripts arguments
if __name__ == "__main__":
import sys
if len(sys.argv) > 1:
#--- Filter sites based on arguments
arg_run_id = np.array(str(sys.argv[1]).split(','))
#--- Update Flags
dash['run_jules'] = False
dash.loc[np.isin(dash['run_id'],arg_run_id),'run_jules'] = True
#--- Filter sites flagged to run
dash_run = dash[:][dash['run_jules']]
sens_run = sens[:][sens['run_sens' ]]
#--- Run for all treatments
for run_id in dash_run['run_id']:
#--- Get run data
base_nml_fn = dash_run['sim_base'][dash_run['run_id'] == run_id].values[0]
driv_id = dash_run['driv_id'][dash_run['run_id'] == run_id].values[0]
soil_id = dash_run['soil_id'][dash_run['run_id'] == run_id].values[0]
crop_id = dash_run['crop_id'][dash_run['run_id'] == run_id].values[0]
crop_nm = dash_run['crop_nm'][dash_run['run_id'] == run_id].values[0]
#--- running path
wd_run = wd + '/'+exec_path+'/' + run_id
#--------------------#
#--- Run Baseline ---#
#--------------------#
res = py_jules_run(run_id,
base_nml_fn,
driv_id,
soil_id,
crop_id,
crop_nm,
wd,
'sim_db',
wd_run+'/sens_run',
exec_fn,
verb = verb,
res_CSV = res_CSV,
time_idx = ftime_idx,
clean_res= clean_res,
upd_base_nml= None,
copy_sim_db = True) # turned on to be used on sensitivity analysis
if type(res) == type(None):
print('ERROR: Error in Baseline Simulations for ID: '+str(run_id)+'\n -- SIMULATION ABORTED FOR THIS ID -- ')
continue
#--- Get targeted results from simulations outputs
res_df_b = get_res(sens_vars,
res,
time_idx = time_idx,
dim_idx = dim_idx)
#--- Rename columns
res_df_b = res_df_b.rename(columns = {'sim_value':'sim_base'})
#--- Save Baseline Results
#gn.save_sens_res(wd, run_id,save_all = True)
#--- Store baseline sim_db for this run_id
src = wd_run+'/sens_run/sim_db'
dst = wd_run+'/sim_db_sens'
if os.path.exists(dst): shutil.rmtree(dst)
shutil.copytree(src,dst)
#--- Store baseline base_nml for this run_id
src = wd_run+'/sens_run/nml_setup_'+run_id+'.csv'
dst = wd_run
shutil.copy(src,dst)
#--- Sensitivity for all parameters
for i in sens_run.index:
#--- Get sensitivity run class
var_class = sens_run['class'][sens_run.index == i].values[0]
#--- get sensitivity run indexers and info
var = sens_run['variable'][sens_run.index == i].values[0]
nml = sens_run['namelist'][sens_run.index == i].values[0]
arr = sens_run['array_id'][sens_run.index == i].values[0]
nnl = sens_run['n_nl'][sens_run.index == i].values[0]
dpc = sens_run['dependency'][sens_run.index == i].values[0]
typ = sens_run['type'][sens_run.index == i].values[0]
if var_class == 'parameter':
#-------------------------------------#
#--- Sensitivity on parameter type ---#
#-------------------------------------#
#--- Use updated base_nml
base_nml_fn = wd_run+'/nml_setup_'+run_id+'.csv'
#--- Read base nml
base_nml = pd.DataFrame(pd.read_csv(base_nml_fn))
#--- Find on base_nml
f = ((base_nml['variable'] == var) &
(base_nml['namelist'] == nml) &
(base_nml['array_id'] == arr) &
(base_nml['n_nl'] == nnl))
#--- check if parameter exist in base simulation
if not any(f):
print('Warning: Parameter '+str(var)+' not found in base simulation file: nml_setup_'+str(run_id)+'.csv.\n - PARAMETER IGNORED - ')
sens_stat = 1
#--- get range thresholds for sensitivity analysis
val_min = sens_run['min'][sens_run.index == i].values[0]
val_max = sens_run['max'][sens_run.index == i].values[0]
val_pace= pace
if typ == 'date':
from datetime import datetime
#--- date-type parameter
val = base_nml['val'][f].values[0]
val_date = datetime.strptime(val, "'%Y-%m-%d %H:%M:%S'")
range_sens = np.arange(val_min, val_max,(val_max - val_min) * val_pace)
if not val_max in range_sens: range_sens = np.append(range_sens,val_max)
#--- round days
range_sens = np.around(range_sens, decimals=0)
#--- Apply timedeltas to baseline date
range_sens = pd.to_timedelta(range_sens, unit = 'd') + | pd.to_datetime(val_date) | pandas.to_datetime |
# -*- coding: utf-8 -*-
# pylint: disable=E1101
# flake8: noqa
from datetime import datetime
import csv
import os
import sys
import re
import nose
import platform
from multiprocessing.pool import ThreadPool
from numpy import nan
import numpy as np
from pandas.io.common import DtypeWarning
from pandas import DataFrame, Series, Index, MultiIndex, DatetimeIndex
from pandas.compat import(
StringIO, BytesIO, PY3, range, long, lrange, lmap, u
)
from pandas.io.common import URLError
import pandas.io.parsers as parsers
from pandas.io.parsers import (read_csv, read_table, read_fwf,
TextFileReader, TextParser)
import pandas.util.testing as tm
import pandas as pd
from pandas.compat import parse_date
import pandas.lib as lib
from pandas import compat
from pandas.lib import Timestamp
from pandas.tseries.index import date_range
import pandas.tseries.tools as tools
from numpy.testing.decorators import slow
import pandas.parser
class ParserTests(object):
"""
Want to be able to test either C+Cython or Python+Cython parsers
"""
data1 = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
def read_csv(self, *args, **kwargs):
raise NotImplementedError
def read_table(self, *args, **kwargs):
raise NotImplementedError
def setUp(self):
import warnings
warnings.filterwarnings(action='ignore', category=FutureWarning)
self.dirpath = tm.get_data_path()
self.csv1 = os.path.join(self.dirpath, 'test1.csv')
self.csv2 = os.path.join(self.dirpath, 'test2.csv')
self.xls1 = os.path.join(self.dirpath, 'test.xls')
def construct_dataframe(self, num_rows):
df = DataFrame(np.random.rand(num_rows, 5), columns=list('abcde'))
df['foo'] = 'foo'
df['bar'] = 'bar'
df['baz'] = 'baz'
df['date'] = pd.date_range('20000101 09:00:00',
periods=num_rows,
freq='s')
df['int'] = np.arange(num_rows, dtype='int64')
return df
def generate_multithread_dataframe(self, path, num_rows, num_tasks):
def reader(arg):
start, nrows = arg
if not start:
return pd.read_csv(path, index_col=0, header=0, nrows=nrows,
parse_dates=['date'])
return pd.read_csv(path,
index_col=0,
header=None,
skiprows=int(start) + 1,
nrows=nrows,
parse_dates=[9])
tasks = [
(num_rows * i / num_tasks,
num_rows / num_tasks) for i in range(num_tasks)
]
pool = ThreadPool(processes=num_tasks)
results = pool.map(reader, tasks)
header = results[0].columns
for r in results[1:]:
r.columns = header
final_dataframe = pd.concat(results)
return final_dataframe
def test_converters_type_must_be_dict(self):
with tm.assertRaisesRegexp(TypeError, 'Type converters.+'):
self.read_csv(StringIO(self.data1), converters=0)
def test_empty_decimal_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
self.assertRaises(ValueError, read_csv, StringIO(data), decimal='')
def test_empty_thousands_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
self.assertRaises(ValueError, read_csv, StringIO(data), thousands='')
def test_multi_character_decimal_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
self.assertRaises(ValueError, read_csv, StringIO(data), thousands=',,')
def test_empty_string(self):
data = """\
One,Two,Three
a,1,one
b,2,two
,3,three
d,4,nan
e,5,five
nan,6,
g,7,seven
"""
df = self.read_csv(StringIO(data))
xp = DataFrame({'One': ['a', 'b', np.nan, 'd', 'e', np.nan, 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', np.nan, 'five',
np.nan, 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(StringIO(data), na_values={'One': [], 'Three': []},
keep_default_na=False)
xp = DataFrame({'One': ['a', 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', 'nan', 'five',
'', 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(
StringIO(data), na_values=['a'], keep_default_na=False)
xp = DataFrame({'One': [np.nan, 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', 'nan', 'five', '',
'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(StringIO(data), na_values={'One': [], 'Three': []})
xp = DataFrame({'One': ['a', 'b', np.nan, 'd', 'e', np.nan, 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', np.nan, 'five',
np.nan, 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
# GH4318, passing na_values=None and keep_default_na=False yields
# 'None' as a na_value
data = """\
One,Two,Three
a,1,None
b,2,two
,3,None
d,4,nan
e,5,five
nan,6,
g,7,seven
"""
df = self.read_csv(
StringIO(data), keep_default_na=False)
xp = DataFrame({'One': ['a', 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['None', 'two', 'None', 'nan', 'five', '',
'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
def test_read_csv(self):
if not compat.PY3:
if compat.is_platform_windows():
prefix = u("file:///")
else:
prefix = u("file://")
fname = prefix + compat.text_type(self.csv1)
# it works!
read_csv(fname, index_col=0, parse_dates=True)
def test_dialect(self):
data = """\
label1,label2,label3
index1,"a,c,e
index2,b,d,f
"""
dia = csv.excel()
dia.quoting = csv.QUOTE_NONE
df = self.read_csv(StringIO(data), dialect=dia)
data = '''\
label1,label2,label3
index1,a,c,e
index2,b,d,f
'''
exp = self.read_csv(StringIO(data))
exp.replace('a', '"a', inplace=True)
tm.assert_frame_equal(df, exp)
def test_dialect_str(self):
data = """\
fruit:vegetable
apple:brocolli
pear:tomato
"""
exp = DataFrame({
'fruit': ['apple', 'pear'],
'vegetable': ['brocolli', 'tomato']
})
dia = csv.register_dialect('mydialect', delimiter=':') # noqa
df = self.read_csv(StringIO(data), dialect='mydialect')
tm.assert_frame_equal(df, exp)
csv.unregister_dialect('mydialect')
def test_1000_sep(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
expected = DataFrame({
'A': [1, 10],
'B': [2334, 13],
'C': [5, 10.]
})
df = self.read_csv(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
def test_1000_sep_with_decimal(self):
data = """A|B|C
1|2,334.01|5
10|13|10.
"""
expected = DataFrame({
'A': [1, 10],
'B': [2334.01, 13],
'C': [5, 10.]
})
tm.assert_equal(expected.A.dtype, 'int64')
tm.assert_equal(expected.B.dtype, 'float')
tm.assert_equal(expected.C.dtype, 'float')
df = self.read_csv(StringIO(data), sep='|', thousands=',', decimal='.')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data), sep='|',
thousands=',', decimal='.')
tm.assert_frame_equal(df, expected)
data_with_odd_sep = """A|B|C
1|2.334,01|5
10|13|10,
"""
df = self.read_csv(StringIO(data_with_odd_sep),
sep='|', thousands='.', decimal=',')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data_with_odd_sep),
sep='|', thousands='.', decimal=',')
tm.assert_frame_equal(df, expected)
def test_separator_date_conflict(self):
# Regression test for issue #4678: make sure thousands separator and
# date parsing do not conflict.
data = '06-02-2013;13:00;1-000.215'
expected = DataFrame(
[[datetime(2013, 6, 2, 13, 0, 0), 1000.215]],
columns=['Date', 2]
)
df = self.read_csv(StringIO(data), sep=';', thousands='-',
parse_dates={'Date': [0, 1]}, header=None)
tm.assert_frame_equal(df, expected)
def test_squeeze(self):
data = """\
a,1
b,2
c,3
"""
idx = Index(['a', 'b', 'c'], name=0)
expected = Series([1, 2, 3], name=1, index=idx)
result = self.read_table(StringIO(data), sep=',', index_col=0,
header=None, squeeze=True)
tm.assertIsInstance(result, Series)
tm.assert_series_equal(result, expected)
def test_squeeze_no_view(self):
# GH 8217
# series should not be a view
data = """time,data\n0,10\n1,11\n2,12\n4,14\n5,15\n3,13"""
result = self.read_csv(StringIO(data), index_col='time', squeeze=True)
self.assertFalse(result._is_view)
def test_inf_parsing(self):
data = """\
,A
a,inf
b,-inf
c,Inf
d,-Inf
e,INF
f,-INF
g,INf
h,-INf
i,inF
j,-inF"""
inf = float('inf')
expected = Series([inf, -inf] * 5)
df = read_csv(StringIO(data), index_col=0)
tm.assert_almost_equal(df['A'].values, expected.values)
df = read_csv(StringIO(data), index_col=0, na_filter=False)
tm.assert_almost_equal(df['A'].values, expected.values)
def test_multiple_date_col(self):
# Can use multiple date parsers
data = """\
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
def func(*date_cols):
return lib.try_parse_dates(parsers._concat_date_cols(date_cols))
df = self.read_csv(StringIO(data), header=None,
date_parser=func,
prefix='X',
parse_dates={'nominal': [1, 2],
'actual': [1, 3]})
self.assertIn('nominal', df)
self.assertIn('actual', df)
self.assertNotIn('X1', df)
self.assertNotIn('X2', df)
self.assertNotIn('X3', df)
d = datetime(1999, 1, 27, 19, 0)
self.assertEqual(df.ix[0, 'nominal'], d)
df = self.read_csv(StringIO(data), header=None,
date_parser=func,
parse_dates={'nominal': [1, 2],
'actual': [1, 3]},
keep_date_col=True)
self.assertIn('nominal', df)
self.assertIn('actual', df)
self.assertIn(1, df)
self.assertIn(2, df)
self.assertIn(3, df)
data = """\
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
df = read_csv(StringIO(data), header=None,
prefix='X',
parse_dates=[[1, 2], [1, 3]])
self.assertIn('X1_X2', df)
self.assertIn('X1_X3', df)
self.assertNotIn('X1', df)
self.assertNotIn('X2', df)
self.assertNotIn('X3', df)
d = datetime(1999, 1, 27, 19, 0)
self.assertEqual(df.ix[0, 'X1_X2'], d)
df = read_csv(StringIO(data), header=None,
parse_dates=[[1, 2], [1, 3]], keep_date_col=True)
self.assertIn('1_2', df)
self.assertIn('1_3', df)
self.assertIn(1, df)
self.assertIn(2, df)
self.assertIn(3, df)
data = '''\
KORD,19990127 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
'''
df = self.read_csv(StringIO(data), sep=',', header=None,
parse_dates=[1], index_col=1)
d = datetime(1999, 1, 27, 19, 0)
self.assertEqual(df.index[0], d)
def test_multiple_date_cols_int_cast(self):
data = ("KORD,19990127, 19:00:00, 18:56:00, 0.8100\n"
"KORD,19990127, 20:00:00, 19:56:00, 0.0100\n"
"KORD,19990127, 21:00:00, 20:56:00, -0.5900\n"
"KORD,19990127, 21:00:00, 21:18:00, -0.9900\n"
"KORD,19990127, 22:00:00, 21:56:00, -0.5900\n"
"KORD,19990127, 23:00:00, 22:56:00, -0.5900")
date_spec = {'nominal': [1, 2], 'actual': [1, 3]}
import pandas.io.date_converters as conv
# it works!
df = self.read_csv(StringIO(data), header=None, parse_dates=date_spec,
date_parser=conv.parse_date_time)
self.assertIn('nominal', df)
def test_multiple_date_col_timestamp_parse(self):
data = """05/31/2012,15:30:00.029,1306.25,1,E,0,,1306.25
05/31/2012,15:30:00.029,1306.25,8,E,0,,1306.25"""
result = self.read_csv(StringIO(data), sep=',', header=None,
parse_dates=[[0, 1]], date_parser=Timestamp)
ex_val = Timestamp('05/31/2012 15:30:00.029')
self.assertEqual(result['0_1'][0], ex_val)
def test_single_line(self):
# GH 6607
# Test currently only valid with python engine because sep=None and
# delim_whitespace=False. Temporarily copied to TestPythonParser.
# Test for ValueError with other engines:
with tm.assertRaisesRegexp(ValueError,
'sep=None with delim_whitespace=False'):
# sniff separator
buf = StringIO()
sys.stdout = buf
# printing warning message when engine == 'c' for now
try:
# it works!
df = self.read_csv(StringIO('1,2'), names=['a', 'b'],
header=None, sep=None)
tm.assert_frame_equal(DataFrame({'a': [1], 'b': [2]}), df)
finally:
sys.stdout = sys.__stdout__
def test_multiple_date_cols_with_header(self):
data = """\
ID,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000"""
df = self.read_csv(StringIO(data), parse_dates={'nominal': [1, 2]})
self.assertNotIsInstance(df.nominal[0], compat.string_types)
ts_data = """\
ID,date,nominalTime,actualTime,A,B,C,D,E
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
def test_multiple_date_col_name_collision(self):
self.assertRaises(ValueError, self.read_csv, StringIO(self.ts_data),
parse_dates={'ID': [1, 2]})
data = """\
date_NominalTime,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir
KORD1,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD2,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD3,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD4,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD5,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD6,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000""" # noqa
self.assertRaises(ValueError, self.read_csv, StringIO(data),
parse_dates=[[1, 2]])
def test_index_col_named(self):
no_header = """\
KORD1,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD2,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD3,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD4,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD5,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD6,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000"""
h = "ID,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir\n"
data = h + no_header
rs = self.read_csv(StringIO(data), index_col='ID')
xp = self.read_csv(StringIO(data), header=0).set_index('ID')
tm.assert_frame_equal(rs, xp)
self.assertRaises(ValueError, self.read_csv, StringIO(no_header),
index_col='ID')
data = """\
1,2,3,4,hello
5,6,7,8,world
9,10,11,12,foo
"""
names = ['a', 'b', 'c', 'd', 'message']
xp = DataFrame({'a': [1, 5, 9], 'b': [2, 6, 10], 'c': [3, 7, 11],
'd': [4, 8, 12]},
index=Index(['hello', 'world', 'foo'], name='message'))
rs = self.read_csv(StringIO(data), names=names, index_col=['message'])
tm.assert_frame_equal(xp, rs)
self.assertEqual(xp.index.name, rs.index.name)
rs = self.read_csv(StringIO(data), names=names, index_col='message')
tm.assert_frame_equal(xp, rs)
self.assertEqual(xp.index.name, rs.index.name)
def test_usecols_index_col_False(self):
# Issue 9082
s = "a,b,c,d\n1,2,3,4\n5,6,7,8"
s_malformed = "a,b,c,d\n1,2,3,4,\n5,6,7,8,"
cols = ['a', 'c', 'd']
expected = DataFrame({'a': [1, 5], 'c': [3, 7], 'd': [4, 8]})
df = self.read_csv(StringIO(s), usecols=cols, index_col=False)
tm.assert_frame_equal(expected, df)
df = self.read_csv(StringIO(s_malformed),
usecols=cols, index_col=False)
tm.assert_frame_equal(expected, df)
def test_index_col_is_True(self):
# Issue 9798
self.assertRaises(ValueError, self.read_csv, StringIO(self.ts_data),
index_col=True)
def test_converter_index_col_bug(self):
# 1835
data = "A;B\n1;2\n3;4"
rs = self.read_csv(StringIO(data), sep=';', index_col='A',
converters={'A': lambda x: x})
xp = DataFrame({'B': [2, 4]}, index=Index([1, 3], name='A'))
tm.assert_frame_equal(rs, xp)
self.assertEqual(rs.index.name, xp.index.name)
def test_date_parser_int_bug(self):
# #3071
log_file = StringIO(
'posix_timestamp,elapsed,sys,user,queries,query_time,rows,'
'accountid,userid,contactid,level,silo,method\n'
'1343103150,0.062353,0,4,6,0.01690,3,'
'12345,1,-1,3,invoice_InvoiceResource,search\n'
)
def f(posix_string):
return datetime.utcfromtimestamp(int(posix_string))
# it works!
read_csv(log_file, index_col=0, parse_dates=0, date_parser=f)
def test_multiple_skts_example(self):
data = "year, month, a, b\n 2001, 01, 0.0, 10.\n 2001, 02, 1.1, 11."
pass
def test_malformed(self):
# all
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
"""
try:
df = self.read_table(
StringIO(data), sep=',', header=1, comment='#')
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 4, saw 5', str(inst))
# skip_footer
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
footer
"""
# GH 6607
# Test currently only valid with python engine because
# skip_footer != 0. Temporarily copied to TestPythonParser.
# Test for ValueError with other engines:
try:
with tm.assertRaisesRegexp(ValueError, 'skip_footer'): # XXX
df = self.read_table(
StringIO(data), sep=',', header=1, comment='#',
skip_footer=1)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 4, saw 5', str(inst))
# first chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',',
header=1, comment='#',
iterator=True, chunksize=1,
skiprows=[2])
df = it.read(5)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
# middle chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',', header=1,
comment='#', iterator=True, chunksize=1,
skiprows=[2])
df = it.read(1)
it.read(2)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
# last chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',',
header=1, comment='#',
iterator=True, chunksize=1, skiprows=[2])
df = it.read(1)
it.read()
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
def test_passing_dtype(self):
# GH 6607
# Passing dtype is currently only supported by the C engine.
# Temporarily copied to TestCParser*.
# Test for ValueError with other engines:
with tm.assertRaisesRegexp(ValueError,
"The 'dtype' option is not supported"):
df = DataFrame(np.random.rand(5, 2), columns=list(
'AB'), index=['1A', '1B', '1C', '1D', '1E'])
with tm.ensure_clean('__passing_str_as_dtype__.csv') as path:
df.to_csv(path)
# GH 3795
# passing 'str' as the dtype
result = self.read_csv(path, dtype=str, index_col=0)
tm.assert_series_equal(result.dtypes, Series(
{'A': 'object', 'B': 'object'}))
# we expect all object columns, so need to convert to test for
# equivalence
result = result.astype(float)
tm.assert_frame_equal(result, df)
# invalid dtype
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'foo', 'B': 'float64'},
index_col=0)
# valid but we don't support it (date)
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'datetime64', 'B': 'float64'},
index_col=0)
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'datetime64', 'B': 'float64'},
index_col=0, parse_dates=['B'])
# valid but we don't support it
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'timedelta64', 'B': 'float64'},
index_col=0)
with tm.assertRaisesRegexp(ValueError,
"The 'dtype' option is not supported"):
# empty frame
# GH12048
self.read_csv(StringIO('A,B'), dtype=str)
def test_quoting(self):
bad_line_small = """printer\tresult\tvariant_name
Klosterdruckerei\tKlosterdruckerei <Salem> (1611-1804)\tMuller, Jacob
Klosterdruckerei\tKlosterdruckerei <Salem> (1611-1804)\tMuller, Jakob
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\t"Furststiftische Hofdruckerei, <Kempten""
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\tGaller, Alois
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\tHochfurstliche Buchhandlung <Kempten>"""
self.assertRaises(Exception, self.read_table, StringIO(bad_line_small),
sep='\t')
good_line_small = bad_line_small + '"'
df = self.read_table(StringIO(good_line_small), sep='\t')
self.assertEqual(len(df), 3)
def test_non_string_na_values(self):
# GH3611, na_values that are not a string are an issue
with tm.ensure_clean('__non_string_na_values__.csv') as path:
df = DataFrame({'A': [-999, 2, 3], 'B': [1.2, -999, 4.5]})
df.to_csv(path, sep=' ', index=False)
result1 = read_csv(path, sep=' ', header=0,
na_values=['-999.0', '-999'])
result2 = read_csv(path, sep=' ', header=0,
na_values=[-999, -999.0])
result3 = read_csv(path, sep=' ', header=0,
na_values=[-999.0, -999])
tm.assert_frame_equal(result1, result2)
tm.assert_frame_equal(result2, result3)
result4 = read_csv(path, sep=' ', header=0, na_values=['-999.0'])
result5 = read_csv(path, sep=' ', header=0, na_values=['-999'])
result6 = read_csv(path, sep=' ', header=0, na_values=[-999.0])
result7 = read_csv(path, sep=' ', header=0, na_values=[-999])
tm.assert_frame_equal(result4, result3)
tm.assert_frame_equal(result5, result3)
tm.assert_frame_equal(result6, result3)
tm.assert_frame_equal(result7, result3)
good_compare = result3
# with an odd float format, so we can't match the string 999.0
# exactly, but need float matching
df.to_csv(path, sep=' ', index=False, float_format='%.3f')
result1 = read_csv(path, sep=' ', header=0,
na_values=['-999.0', '-999'])
result2 = read_csv(path, sep=' ', header=0,
na_values=[-999, -999.0])
result3 = read_csv(path, sep=' ', header=0,
na_values=[-999.0, -999])
tm.assert_frame_equal(result1, good_compare)
tm.assert_frame_equal(result2, good_compare)
tm.assert_frame_equal(result3, good_compare)
result4 = read_csv(path, sep=' ', header=0, na_values=['-999.0'])
result5 = read_csv(path, sep=' ', header=0, na_values=['-999'])
result6 = read_csv(path, sep=' ', header=0, na_values=[-999.0])
result7 = read_csv(path, sep=' ', header=0, na_values=[-999])
tm.assert_frame_equal(result4, good_compare)
tm.assert_frame_equal(result5, good_compare)
tm.assert_frame_equal(result6, good_compare)
tm.assert_frame_equal(result7, good_compare)
def test_default_na_values(self):
_NA_VALUES = set(['-1.#IND', '1.#QNAN', '1.#IND', '-1.#QNAN',
'#N/A', 'N/A', 'NA', '#NA', 'NULL', 'NaN',
'nan', '-NaN', '-nan', '#N/A N/A', ''])
self.assertEqual(_NA_VALUES, parsers._NA_VALUES)
nv = len(_NA_VALUES)
def f(i, v):
if i == 0:
buf = ''
elif i > 0:
buf = ''.join([','] * i)
buf = "{0}{1}".format(buf, v)
if i < nv - 1:
buf = "{0}{1}".format(buf, ''.join([','] * (nv - i - 1)))
return buf
data = StringIO('\n'.join([f(i, v) for i, v in enumerate(_NA_VALUES)]))
expected = DataFrame(np.nan, columns=range(nv), index= | range(nv) | pandas.compat.range |
import datetime
from collections import OrderedDict
import warnings
import numpy as np
from numpy import array, nan
import pandas as pd
import pytest
from numpy.testing import assert_almost_equal, assert_allclose
from conftest import assert_frame_equal, assert_series_equal
from pvlib import irradiance
from conftest import requires_ephem, requires_numba
# fixtures create realistic test input data
# test input data generated at Location(32.2, -111, 'US/Arizona', 700)
# test input data is hard coded to avoid dependencies on other parts of pvlib
@pytest.fixture
def times():
# must include night values
return pd.date_range(start='20140624', freq='6H', periods=4,
tz='US/Arizona')
@pytest.fixture
def irrad_data(times):
return pd.DataFrame(np.array(
[[ 0. , 0. , 0. ],
[ 79.73860422, 316.1949056 , 40.46149818],
[1042.48031487, 939.95469881, 118.45831879],
[ 257.20751138, 646.22886049, 62.03376265]]),
columns=['ghi', 'dni', 'dhi'], index=times)
@pytest.fixture
def ephem_data(times):
return pd.DataFrame(np.array(
[[124.0390863 , 124.0390863 , -34.0390863 , -34.0390863 ,
352.69550699, -2.36677158],
[ 82.85457044, 82.97705621, 7.14542956, 7.02294379,
66.71410338, -2.42072165],
[ 10.56413562, 10.56725766, 79.43586438, 79.43274234,
144.76567754, -2.47457321],
[ 72.41687122, 72.46903556, 17.58312878, 17.53096444,
287.04104128, -2.52831909]]),
columns=['apparent_zenith', 'zenith', 'apparent_elevation',
'elevation', 'azimuth', 'equation_of_time'],
index=times)
@pytest.fixture
def dni_et(times):
return np.array(
[1321.1655834833093, 1321.1655834833093, 1321.1655834833093,
1321.1655834833093])
@pytest.fixture
def relative_airmass(times):
return pd.Series([np.nan, 7.58831596, 1.01688136, 3.27930443], times)
# setup for et rad test. put it here for readability
timestamp = pd.Timestamp('20161026')
dt_index = pd.DatetimeIndex([timestamp])
doy = timestamp.dayofyear
dt_date = timestamp.date()
dt_datetime = datetime.datetime.combine(dt_date, datetime.time(0))
dt_np64 = np.datetime64(dt_datetime)
value = 1383.636203
@pytest.mark.parametrize('testval, expected', [
(doy, value),
(np.float64(doy), value),
(dt_date, value),
(dt_datetime, value),
(dt_np64, value),
(np.array([doy]), np.array([value])),
(pd.Series([doy]), np.array([value])),
(dt_index, pd.Series([value], index=dt_index)),
(timestamp, value)
])
@pytest.mark.parametrize('method', [
'asce', 'spencer', 'nrel', pytest.param('pyephem', marks=requires_ephem)])
def test_get_extra_radiation(testval, expected, method):
out = irradiance.get_extra_radiation(testval, method=method)
assert_allclose(out, expected, atol=10)
def test_get_extra_radiation_epoch_year():
out = irradiance.get_extra_radiation(doy, method='nrel', epoch_year=2012)
assert_allclose(out, 1382.4926804890767, atol=0.1)
@requires_numba
def test_get_extra_radiation_nrel_numba(times):
with warnings.catch_warnings():
# don't warn on method reload or num threads
warnings.simplefilter("ignore")
result = irradiance.get_extra_radiation(
times, method='nrel', how='numba', numthreads=4)
# and reset to no-numba state
irradiance.get_extra_radiation(times, method='nrel')
assert_allclose(result,
[1322.332316, 1322.296282, 1322.261205, 1322.227091])
def test_get_extra_radiation_invalid():
with pytest.raises(ValueError):
irradiance.get_extra_radiation(300, method='invalid')
def test_grounddiffuse_simple_float():
result = irradiance.get_ground_diffuse(40, 900)
assert_allclose(result, 26.32000014911496)
def test_grounddiffuse_simple_series(irrad_data):
ground_irrad = irradiance.get_ground_diffuse(40, irrad_data['ghi'])
assert ground_irrad.name == 'diffuse_ground'
def test_grounddiffuse_albedo_0(irrad_data):
ground_irrad = irradiance.get_ground_diffuse(
40, irrad_data['ghi'], albedo=0)
assert 0 == ground_irrad.all()
def test_grounddiffuse_albedo_invalid_surface(irrad_data):
with pytest.raises(KeyError):
irradiance.get_ground_diffuse(
40, irrad_data['ghi'], surface_type='invalid')
def test_grounddiffuse_albedo_surface(irrad_data):
result = irradiance.get_ground_diffuse(40, irrad_data['ghi'],
surface_type='sand')
assert_allclose(result, [0, 3.731058, 48.778813, 12.035025], atol=1e-4)
def test_isotropic_float():
result = irradiance.isotropic(40, 100)
assert_allclose(result, 88.30222215594891)
def test_isotropic_series(irrad_data):
result = irradiance.isotropic(40, irrad_data['dhi'])
assert_allclose(result, [0, 35.728402, 104.601328, 54.777191], atol=1e-4)
def test_klucher_series_float():
# klucher inputs
surface_tilt, surface_azimuth = 40.0, 180.0
dhi, ghi = 100.0, 900.0
solar_zenith, solar_azimuth = 20.0, 180.0
# expect same result for floats and pd.Series
expected = irradiance.klucher(
surface_tilt, surface_azimuth,
pd.Series(dhi), pd.Series(ghi),
pd.Series(solar_zenith), pd.Series(solar_azimuth)
) # 94.99429931664851
result = irradiance.klucher(
surface_tilt, surface_azimuth, dhi, ghi, solar_zenith, solar_azimuth
)
assert_allclose(result, expected[0])
def test_klucher_series(irrad_data, ephem_data):
result = irradiance.klucher(40, 180, irrad_data['dhi'], irrad_data['ghi'],
ephem_data['apparent_zenith'],
ephem_data['azimuth'])
# pvlib matlab 1.4 does not contain the max(cos_tt, 0) correction
# so, these values are different
assert_allclose(result, [0., 36.789794, 109.209347, 56.965916], atol=1e-4)
# expect same result for np.array and pd.Series
expected = irradiance.klucher(
40, 180, irrad_data['dhi'].values, irrad_data['ghi'].values,
ephem_data['apparent_zenith'].values, ephem_data['azimuth'].values
)
assert_allclose(result, expected, atol=1e-4)
def test_haydavies(irrad_data, ephem_data, dni_et):
result = irradiance.haydavies(
40, 180, irrad_data['dhi'], irrad_data['dni'], dni_et,
ephem_data['apparent_zenith'], ephem_data['azimuth'])
# values from matlab 1.4 code
assert_allclose(result, [0, 27.1775, 102.9949, 33.1909], atol=1e-4)
def test_reindl(irrad_data, ephem_data, dni_et):
result = irradiance.reindl(
40, 180, irrad_data['dhi'], irrad_data['dni'], irrad_data['ghi'],
dni_et, ephem_data['apparent_zenith'], ephem_data['azimuth'])
# values from matlab 1.4 code
assert_allclose(result, [np.nan, 27.9412, 104.1317, 34.1663], atol=1e-4)
def test_king(irrad_data, ephem_data):
result = irradiance.king(40, irrad_data['dhi'], irrad_data['ghi'],
ephem_data['apparent_zenith'])
assert_allclose(result, [0, 44.629352, 115.182626, 79.719855], atol=1e-4)
def test_perez(irrad_data, ephem_data, dni_et, relative_airmass):
dni = irrad_data['dni'].copy()
dni.iloc[2] = np.nan
out = irradiance.perez(40, 180, irrad_data['dhi'], dni,
dni_et, ephem_data['apparent_zenith'],
ephem_data['azimuth'], relative_airmass)
expected = pd.Series(np.array(
[ 0. , 31.46046871, np.nan, 45.45539877]),
index=irrad_data.index)
assert_series_equal(out, expected, check_less_precise=2)
def test_perez_components(irrad_data, ephem_data, dni_et, relative_airmass):
dni = irrad_data['dni'].copy()
dni.iloc[2] = np.nan
out = irradiance.perez(40, 180, irrad_data['dhi'], dni,
dni_et, ephem_data['apparent_zenith'],
ephem_data['azimuth'], relative_airmass,
return_components=True)
expected = pd.DataFrame(np.array(
[[ 0. , 31.46046871, np.nan, 45.45539877],
[ 0. , 26.84138589, np.nan, 31.72696071],
[ 0. , 0. , np.nan, 4.47966439],
[ 0. , 4.62212181, np.nan, 9.25316454]]).T,
columns=['sky_diffuse', 'isotropic', 'circumsolar', 'horizon'],
index=irrad_data.index
)
expected_for_sum = expected['sky_diffuse'].copy()
expected_for_sum.iloc[2] = 0
sum_components = out.iloc[:, 1:].sum(axis=1)
sum_components.name = 'sky_diffuse'
assert_frame_equal(out, expected, check_less_precise=2)
assert_series_equal(sum_components, expected_for_sum, check_less_precise=2)
def test_perez_arrays(irrad_data, ephem_data, dni_et, relative_airmass):
dni = irrad_data['dni'].copy()
dni.iloc[2] = np.nan
out = irradiance.perez(40, 180, irrad_data['dhi'].values, dni.values,
dni_et, ephem_data['apparent_zenith'].values,
ephem_data['azimuth'].values,
relative_airmass.values)
expected = np.array(
[ 0. , 31.46046871, np.nan, 45.45539877])
assert_allclose(out, expected, atol=1e-2)
assert isinstance(out, np.ndarray)
def test_perez_scalar():
# copied values from fixtures
out = irradiance.perez(40, 180, 118.45831879, 939.95469881,
1321.1655834833093, 10.56413562, 144.76567754,
1.01688136)
# this will fail. out is ndarry with ndim == 0. fix in future version.
# assert np.isscalar(out)
assert_allclose(out, 109.084332)
@pytest.mark.parametrize('model', ['isotropic', 'klucher', 'haydavies',
'reindl', 'king', 'perez'])
def test_sky_diffuse_zenith_close_to_90(model):
# GH 432
sky_diffuse = irradiance.get_sky_diffuse(
30, 180, 89.999, 230,
dni=10, ghi=51, dhi=50, dni_extra=1360, airmass=12, model=model)
assert sky_diffuse < 100
def test_get_sky_diffuse_invalid():
with pytest.raises(ValueError):
irradiance.get_sky_diffuse(
30, 180, 0, 180, 1000, 1100, 100, dni_extra=1360, airmass=1,
model='invalid')
def test_liujordan():
expected = pd.DataFrame(np.array(
[[863.859736967, 653.123094076, 220.65905025]]),
columns=['ghi', 'dni', 'dhi'],
index=[0])
out = irradiance.liujordan(
pd.Series([10]), pd.Series([0.5]), pd.Series([1.1]), dni_extra=1400)
assert_frame_equal(out, expected)
def test_get_total_irradiance(irrad_data, ephem_data, dni_et, relative_airmass):
models = ['isotropic', 'klucher',
'haydavies', 'reindl', 'king', 'perez']
for model in models:
total = irradiance.get_total_irradiance(
32, 180,
ephem_data['apparent_zenith'], ephem_data['azimuth'],
dni=irrad_data['dni'], ghi=irrad_data['ghi'],
dhi=irrad_data['dhi'],
dni_extra=dni_et, airmass=relative_airmass,
model=model,
surface_type='urban')
assert total.columns.tolist() == ['poa_global', 'poa_direct',
'poa_diffuse', 'poa_sky_diffuse',
'poa_ground_diffuse']
@pytest.mark.parametrize('model', ['isotropic', 'klucher',
'haydavies', 'reindl', 'king', 'perez'])
def test_get_total_irradiance_scalars(model):
total = irradiance.get_total_irradiance(
32, 180,
10, 180,
dni=1000, ghi=1100,
dhi=100,
dni_extra=1400, airmass=1,
model=model,
surface_type='urban')
assert list(total.keys()) == ['poa_global', 'poa_direct',
'poa_diffuse', 'poa_sky_diffuse',
'poa_ground_diffuse']
# test that none of the values are nan
assert np.isnan(np.array(list(total.values()))).sum() == 0
def test_poa_components(irrad_data, ephem_data, dni_et, relative_airmass):
aoi = irradiance.aoi(40, 180, ephem_data['apparent_zenith'],
ephem_data['azimuth'])
gr_sand = irradiance.get_ground_diffuse(40, irrad_data['ghi'],
surface_type='sand')
diff_perez = irradiance.perez(
40, 180, irrad_data['dhi'], irrad_data['dni'], dni_et,
ephem_data['apparent_zenith'], ephem_data['azimuth'], relative_airmass)
out = irradiance.poa_components(
aoi, irrad_data['dni'], diff_perez, gr_sand)
expected = pd.DataFrame(np.array(
[[ 0. , -0. , 0. , 0. ,
0. ],
[ 35.19456561, 0. , 35.19456561, 31.4635077 ,
3.73105791],
[956.18253696, 798.31939281, 157.86314414, 109.08433162,
48.77881252],
[ 90.99624896, 33.50143401, 57.49481495, 45.45978964,
12.03502531]]),
columns=['poa_global', 'poa_direct', 'poa_diffuse', 'poa_sky_diffuse',
'poa_ground_diffuse'],
index=irrad_data.index)
assert_frame_equal(out, expected)
@pytest.mark.parametrize('pressure,expected', [
(93193, [[830.46567, 0.79742, 0.93505],
[676.09497, 0.63776, 3.02102]]),
(None, [[868.72425, 0.79742, 1.01664],
[680.66679, 0.63776, 3.28463]]),
(101325, [[868.72425, 0.79742, 1.01664],
[680.66679, 0.63776, 3.28463]])
])
def test_disc_value(pressure, expected):
# see GH 449 for pressure=None vs. 101325.
columns = ['dni', 'kt', 'airmass']
times = pd.DatetimeIndex(['2014-06-24T1200', '2014-06-24T1800'],
tz='America/Phoenix')
ghi = pd.Series([1038.62, 254.53], index=times)
zenith = pd.Series([10.567, 72.469], index=times)
out = irradiance.disc(ghi, zenith, times, pressure=pressure)
expected_values = np.array(expected)
expected = pd.DataFrame(expected_values, columns=columns, index=times)
# check the pandas dataframe. check_less_precise is weird
assert_frame_equal(out, expected, check_less_precise=True)
# use np.assert_allclose to check values more clearly
assert_allclose(out.values, expected_values, atol=1e-5)
def test_disc_overirradiance():
columns = ['dni', 'kt', 'airmass']
ghi = np.array([3000])
solar_zenith = np.full_like(ghi, 0)
times = pd.date_range(start='2016-07-19 12:00:00', freq='1s',
periods=len(ghi), tz='America/Phoenix')
out = irradiance.disc(ghi=ghi, solar_zenith=solar_zenith,
datetime_or_doy=times)
expected = pd.DataFrame(np.array(
[[8.72544336e+02, 1.00000000e+00, 9.99493933e-01]]),
columns=columns, index=times)
assert_frame_equal(out, expected)
def test_disc_min_cos_zenith_max_zenith():
# map out behavior under difficult conditions with various
# limiting kwargs settings
columns = ['dni', 'kt', 'airmass']
times = pd.DatetimeIndex(['2016-07-19 06:11:00'], tz='America/Phoenix')
out = irradiance.disc(ghi=1.0, solar_zenith=89.99, datetime_or_doy=times)
expected = pd.DataFrame(np.array(
[[0.00000000e+00, 1.16046346e-02, 12.0]]),
columns=columns, index=times)
assert_frame_equal(out, expected)
# max_zenith and/or max_airmass keep these results reasonable
out = irradiance.disc(ghi=1.0, solar_zenith=89.99, datetime_or_doy=times,
min_cos_zenith=0)
expected = pd.DataFrame(np.array(
[[0.00000000e+00, 1.0, 12.0]]),
columns=columns, index=times)
assert_frame_equal(out, expected)
# still get reasonable values because of max_airmass=12 limit
out = irradiance.disc(ghi=1.0, solar_zenith=89.99, datetime_or_doy=times,
max_zenith=100)
expected = pd.DataFrame(np.array(
[[0., 1.16046346e-02, 12.0]]),
columns=columns, index=times)
assert_frame_equal(out, expected)
# still get reasonable values because of max_airmass=12 limit
out = irradiance.disc(ghi=1.0, solar_zenith=89.99, datetime_or_doy=times,
min_cos_zenith=0, max_zenith=100)
expected = pd.DataFrame(np.array(
[[277.50185968, 1.0, 12.0]]),
columns=columns, index=times)
assert_frame_equal(out, expected)
# max_zenith keeps this result reasonable
out = irradiance.disc(ghi=1.0, solar_zenith=89.99, datetime_or_doy=times,
min_cos_zenith=0, max_airmass=100)
expected = pd.DataFrame(np.array(
[[0.00000000e+00, 1.0, 36.39544757]]),
columns=columns, index=times)
assert_frame_equal(out, expected)
# allow zenith to be close to 90 and airmass to be infinite
# and we get crazy values
out = irradiance.disc(ghi=1.0, solar_zenith=89.99, datetime_or_doy=times,
max_zenith=100, max_airmass=100)
expected = pd.DataFrame(np.array(
[[6.68577449e+03, 1.16046346e-02, 3.63954476e+01]]),
columns=columns, index=times)
assert_frame_equal(out, expected)
# allow min cos zenith to be 0, zenith to be close to 90,
# and airmass to be very big and we get even higher DNI values
out = irradiance.disc(ghi=1.0, solar_zenith=89.99, datetime_or_doy=times,
min_cos_zenith=0, max_zenith=100, max_airmass=100)
expected = pd.DataFrame(np.array(
[[7.21238390e+03, 1., 3.63954476e+01]]),
columns=columns, index=times)
assert_frame_equal(out, expected)
def test_dirint_value():
times = pd.DatetimeIndex(['2014-06-24T12-0700', '2014-06-24T18-0700'])
ghi = pd.Series([1038.62, 254.53], index=times)
zenith = pd.Series([10.567, 72.469], index=times)
pressure = 93193.
dirint_data = irradiance.dirint(ghi, zenith, times, pressure=pressure)
assert_almost_equal(dirint_data.values,
np.array([868.8, 699.7]), 1)
def test_dirint_nans():
times = pd.date_range(start='2014-06-24T12-0700', periods=5, freq='6H')
ghi = pd.Series([np.nan, 1038.62, 1038.62, 1038.62, 1038.62], index=times)
zenith = pd.Series([10.567, np.nan, 10.567, 10.567, 10.567], index=times)
pressure = pd.Series([93193., 93193., np.nan, 93193., 93193.], index=times)
temp_dew = pd.Series([10, 10, 10, np.nan, 10], index=times)
dirint_data = irradiance.dirint(ghi, zenith, times, pressure=pressure,
temp_dew=temp_dew)
assert_almost_equal(dirint_data.values,
np.array([np.nan, np.nan, np.nan, np.nan, 893.1]), 1)
def test_dirint_tdew():
times = pd.DatetimeIndex(['2014-06-24T12-0700', '2014-06-24T18-0700'])
ghi = pd.Series([1038.62, 254.53], index=times)
zenith = pd.Series([10.567, 72.469], index=times)
pressure = 93193.
dirint_data = irradiance.dirint(ghi, zenith, times, pressure=pressure,
temp_dew=10)
assert_almost_equal(dirint_data.values,
np.array([882.1, 672.6]), 1)
def test_dirint_no_delta_kt():
times = pd.DatetimeIndex(['2014-06-24T12-0700', '2014-06-24T18-0700'])
ghi = pd.Series([1038.62, 254.53], index=times)
zenith = pd.Series([10.567, 72.469], index=times)
pressure = 93193.
dirint_data = irradiance.dirint(ghi, zenith, times, pressure=pressure,
use_delta_kt_prime=False)
assert_almost_equal(dirint_data.values,
np.array([861.9, 670.4]), 1)
def test_dirint_coeffs():
coeffs = irradiance._get_dirint_coeffs()
assert coeffs[0, 0, 0, 0] == 0.385230
assert coeffs[0, 1, 2, 1] == 0.229970
assert coeffs[3, 2, 6, 3] == 1.032260
def test_dirint_min_cos_zenith_max_zenith():
# map out behavior under difficult conditions with various
# limiting kwargs settings
# times don't have any physical relevance
times = pd.DatetimeIndex(['2014-06-24T12-0700', '2014-06-24T18-0700'])
ghi = pd.Series([0, 1], index=times)
solar_zenith = pd.Series([90, 89.99], index=times)
out = irradiance.dirint(ghi, solar_zenith, times)
expected = pd.Series([0.0, 0.0], index=times, name='dni')
assert_series_equal(out, expected)
out = irradiance.dirint(ghi, solar_zenith, times, min_cos_zenith=0)
expected = pd.Series([0.0, 0.0], index=times, name='dni')
assert_series_equal(out, expected)
out = irradiance.dirint(ghi, solar_zenith, times, max_zenith=90)
expected = pd.Series([0.0, 0.0], index=times, name='dni')
assert_series_equal(out, expected, check_less_precise=True)
out = irradiance.dirint(ghi, solar_zenith, times, min_cos_zenith=0,
max_zenith=90)
expected = pd.Series([0.0, 144.264507], index=times, name='dni')
assert_series_equal(out, expected, check_less_precise=True)
out = irradiance.dirint(ghi, solar_zenith, times, min_cos_zenith=0,
max_zenith=100)
expected = pd.Series([0.0, 144.264507], index=times, name='dni')
assert_series_equal(out, expected, check_less_precise=True)
def test_gti_dirint():
times = pd.DatetimeIndex(
['2014-06-24T06-0700', '2014-06-24T09-0700', '2014-06-24T12-0700'])
poa_global = np.array([20, 300, 1000])
aoi = np.array([100, 70, 10])
zenith = np.array([80, 45, 20])
azimuth = np.array([90, 135, 180])
surface_tilt = 30
surface_azimuth = 180
# test defaults
output = irradiance.gti_dirint(
poa_global, aoi, zenith, azimuth, times, surface_tilt, surface_azimuth)
expected_col_order = ['ghi', 'dni', 'dhi']
expected = pd.DataFrame(array(
[[ 21.05796198, 0. , 21.05796198],
[ 288.22574368, 60.59964218, 245.37532576],
[ 931.04078010, 695.94965324, 277.06172442]]),
columns=expected_col_order, index=times)
assert_frame_equal(output, expected)
# test ignore calculate_gt_90
output = irradiance.gti_dirint(
poa_global, aoi, zenith, azimuth, times, surface_tilt, surface_azimuth,
calculate_gt_90=False)
expected_no_90 = expected.copy()
expected_no_90.iloc[0, :] = np.nan
assert_frame_equal(output, expected_no_90)
# test pressure input
pressure = 93193.
output = irradiance.gti_dirint(
poa_global, aoi, zenith, azimuth, times, surface_tilt, surface_azimuth,
pressure=pressure)
expected = pd.DataFrame(array(
[[ 21.05796198, 0. , 21.05796198],
[ 289.81109139, 60.52460392, 247.01373353],
[ 932.46756378, 648.05001357, 323.49974813]]),
columns=expected_col_order, index=times)
assert_frame_equal(output, expected)
# test albedo input
albedo = 0.05
output = irradiance.gti_dirint(
poa_global, aoi, zenith, azimuth, times, surface_tilt, surface_azimuth,
albedo=albedo)
expected = pd.DataFrame(array(
[[ 21.3592591, 0. , 21.3592591 ],
[ 292.5162373, 64.42628826, 246.95997198],
[ 941.6753031, 727.16311901, 258.36548605]]),
columns=expected_col_order, index=times)
assert_frame_equal(output, expected)
# test temp_dew input
temp_dew = np.array([70, 80, 20])
output = irradiance.gti_dirint(
poa_global, aoi, zenith, azimuth, times, surface_tilt, surface_azimuth,
temp_dew=temp_dew)
expected = pd.DataFrame(array(
[[ 21.05796198, 0. , 21.05796198],
[ 292.40468994, 36.79559287, 266.3862767 ],
[ 931.79627208, 689.81549269, 283.5817439]]),
columns=expected_col_order, index=times)
assert_frame_equal(output, expected)
def test_erbs():
index = pd.DatetimeIndex(['20190101']*3 + ['20190620'])
ghi = pd.Series([0, 50, 1000, 1000], index=index)
zenith = pd.Series([120, 85, 10, 10], index=index)
expected = pd.DataFrame(np.array(
[[0.00000000e+00, 0.00000000e+00, 0.00000000e+00],
[9.67192672e+01, 4.15703604e+01, 4.05723511e-01],
[7.94205651e+02, 2.17860117e+02, 7.18132729e-01],
[8.42001578e+02, 1.70790318e+02, 7.68214312e-01]]),
columns=['dni', 'dhi', 'kt'], index=index)
out = irradiance.erbs(ghi, zenith, index)
assert_frame_equal(np.round(out, 0), np.round(expected, 0))
def test_erbs_min_cos_zenith_max_zenith():
# map out behavior under difficult conditions with various
# limiting kwargs settings
columns = ['dni', 'dhi', 'kt']
times = pd.DatetimeIndex(['2016-07-19 06:11:00'], tz='America/Phoenix')
# max_zenith keeps these results reasonable
out = irradiance.erbs(ghi=1.0, zenith=89.99999,
datetime_or_doy=times, min_cos_zenith=0)
expected = pd.DataFrame(np.array(
[[0., 1., 1.]]),
columns=columns, index=times)
assert_frame_equal(out, expected)
# 4-5 9s will produce bad behavior without max_zenith limit
out = irradiance.erbs(ghi=1.0, zenith=89.99999,
datetime_or_doy=times, max_zenith=100)
expected = pd.DataFrame(np.array(
[[6.00115286e+03, 9.98952601e-01, 1.16377640e-02]]),
columns=columns, index=times)
assert_frame_equal(out, expected)
# 1-2 9s will produce bad behavior without either limit
out = irradiance.erbs(ghi=1.0, zenith=89.99, datetime_or_doy=times,
min_cos_zenith=0, max_zenith=100)
expected = pd.DataFrame(np.array(
[[4.78419761e+03, 1.65000000e-01, 1.00000000e+00]]),
columns=columns, index=times)
assert_frame_equal(out, expected)
# check default behavior under hardest condition
out = irradiance.erbs(ghi=1.0, zenith=90, datetime_or_doy=times)
expected = pd.DataFrame(np.array(
[[0., 1., 0.01163776]]),
columns=columns, index=times)
assert_frame_equal(out, expected)
def test_erbs_all_scalar():
ghi = 1000
zenith = 10
doy = 180
expected = OrderedDict()
expected['dni'] = 8.42358014e+02
expected['dhi'] = 1.70439297e+02
expected['kt'] = 7.68919470e-01
out = irradiance.erbs(ghi, zenith, doy)
for k, v in out.items():
assert_allclose(v, expected[k], 5)
def test_dirindex(times):
ghi = pd.Series([0, 0, 1038.62, 254.53], index=times)
ghi_clearsky = pd.Series(
np.array([0., 79.73860422, 1042.48031487, 257.20751138]),
index=times
)
dni_clearsky = pd.Series(
np.array([0., 316.1949056, 939.95469881, 646.22886049]),
index=times
)
zenith = pd.Series(
np.array([124.0390863, 82.85457044, 10.56413562, 72.41687122]),
index=times
)
pressure = 93193.
tdew = 10.
out = irradiance.dirindex(ghi, ghi_clearsky, dni_clearsky,
zenith, times, pressure=pressure,
temp_dew=tdew)
dirint_close_values = irradiance.dirint(ghi, zenith, times,
pressure=pressure,
use_delta_kt_prime=True,
temp_dew=tdew).values
expected_out = np.array([np.nan, 0., 748.31562753, 630.72592644])
tolerance = 1e-8
assert np.allclose(out, expected_out, rtol=tolerance, atol=0,
equal_nan=True)
tol_dirint = 0.2
assert np.allclose(out.values, dirint_close_values, rtol=tol_dirint, atol=0,
equal_nan=True)
def test_dirindex_min_cos_zenith_max_zenith():
# map out behavior under difficult conditions with various
# limiting kwargs settings
# times don't have any physical relevance
times = pd.DatetimeIndex(['2014-06-24T12-0700', '2014-06-24T18-0700'])
ghi = pd.Series([0, 1], index=times)
ghi_clearsky = pd.Series([0, 1], index=times)
dni_clearsky = pd.Series([0, 5], index=times)
solar_zenith = pd.Series([90, 89.99], index=times)
out = irradiance.dirindex(ghi, ghi_clearsky, dni_clearsky, solar_zenith,
times)
expected = pd.Series([nan, nan], index=times)
assert_series_equal(out, expected)
out = irradiance.dirindex(ghi, ghi_clearsky, dni_clearsky, solar_zenith,
times, min_cos_zenith=0)
expected = pd.Series([nan, nan], index=times)
assert_series_equal(out, expected)
out = irradiance.dirindex(ghi, ghi_clearsky, dni_clearsky, solar_zenith,
times, max_zenith=90)
expected = pd.Series([nan, nan], index=times)
assert_series_equal(out, expected)
out = irradiance.dirindex(ghi, ghi_clearsky, dni_clearsky, solar_zenith,
times, min_cos_zenith=0, max_zenith=100)
expected = pd.Series([nan, 5.], index=times)
assert_series_equal(out, expected)
def test_dni():
ghi = pd.Series([90, 100, 100, 100, 100])
dhi = pd.Series([100, 90, 50, 50, 50])
zenith = | pd.Series([80, 100, 85, 70, 85]) | pandas.Series |
"""判断趋势示例"""
import datetime
import talib as ta
import pandas as pd
from core.back_test import BackTest
class MyBackTest(BackTest):
def sizer(self):
pass
def strategy(self):
date_now = self.data["trade_date"].iloc[-1]
sma_data_20 = ta.MA(self.data["close"], timeperiod=20, matype=0)
sma_data_20_20 = sma_data_20.iloc[-21:]
sma_data_20_20_point_raise_flag = sma_data_20_20.rolling(2).apply(lambda x: True if x[1] > x[0] else False)
raise_percent = sma_data_20_20_point_raise_flag.value_counts()[True] / (len(sma_data_20_20_point_raise_flag) - 1) # -1是因为窗口函数算出来的第一个值为None
self.args["trend_result"].append([date_now, sma_data_20_20_point_raise_flag.value_counts()[True]])
def after_execute(self):
trend_result = pd.DataFrame(self.args["trend_result"], columns=["trade_date", "raise_num"])
print(trend_result["raise_num"].value_counts())
if __name__ == "__main__":
"""
{
"month": { # 2-3个月的趋势
"hs300": {
"稳定平静": [],
"稳定波动": [],
"上涨平静": [],
"上涨波动": [
[datetime.datetime(2016, 2, 1), datetime.datetime(2018, 1, 20)],
[datetime.datetime(2019, 1, 1), datetime.datetime(2019, 4, 15)]
],
"下跌平静": [],
"下跌波动": [
[datetime.datetime(2018, 1, 20), datetime.datetime(2019, 1, 1)],
[datetime.datetime(2019, 4, 15), datetime.datetime(2016, 6, 17)]
]
},
"600030": {
"稳定平静": [],
"稳定波动": [
[datetime.datetime(2016, 2, 1), datetime.datetime(2017, 5, 1)],
[datetime.datetime(2018, 10, 20), datetime.datetime(2018, 12, 20)],
],
"上涨平静": [
[datetime.datetime(2018, 12, 20), datetime.datetime(2019, 3, 7)]
],
"上涨波动": [
[datetime.datetime(2017, 5, 1), datetime.datetime(2018, 1, 24)]
],
"下跌平静": [],
"下跌波动": [
[datetime.datetime(2018, 1, 24), datetime.datetime(2018, 10, 20)]
[datetime.datetime(2019, 3, 7), datetime.datetime(2019, 6, 17)]
]
}
},
"week": { # 1-2个星期的趋势
"hs300": {
"稳定平静": [],
"稳定波动": [],
"上涨平静": [],
"上涨波动": [],
"下跌平静": [],
"下跌波动": []
},
"600030": {
"稳定平静": [],
"稳定波动": [],
"上涨平静": [],
"上涨波动": [],
"下跌平静": [],
"下跌波动": []
}
}
}
"""
point_data_hs300 = | pd.read_csv("./point_data_used_by_trend_hs300.csv", index_col=[0], parse_dates=[2]) | pandas.read_csv |
# -*- coding: utf-8 -*-
# Copyright (c) 2018-2021, earthobservations developers.
# Distributed under the MIT License. See LICENSE for more info.
from datetime import datetime
import numpy as np
import pandas as pd
import pytest
import pytz
from freezegun import freeze_time
from pandas import Timestamp
from pandas._testing import assert_frame_equal
from wetterdienst.exceptions import StartDateEndDateError
from wetterdienst.metadata.period import Period
from wetterdienst.metadata.resolution import Resolution
from wetterdienst.metadata.timezone import Timezone
from wetterdienst.provider.dwd.observation import (
DwdObservationDataset,
DwdObservationPeriod,
DwdObservationResolution,
)
from wetterdienst.provider.dwd.observation.api import DwdObservationRequest
from wetterdienst.provider.dwd.observation.metadata.parameter import (
DwdObservationParameter,
)
from wetterdienst.settings import Settings
def test_dwd_observation_data_api():
request = DwdObservationRequest(
parameter=["precipitation_height"],
resolution="daily",
period=["recent", "historical"],
)
assert request == DwdObservationRequest(
parameter=[DwdObservationParameter.DAILY.PRECIPITATION_HEIGHT],
resolution=Resolution.DAILY,
period=[Period.HISTORICAL, Period.RECENT],
start_date=None,
end_date=None,
)
assert request.parameter == [
(
DwdObservationParameter.DAILY.CLIMATE_SUMMARY.PRECIPITATION_HEIGHT,
DwdObservationDataset.CLIMATE_SUMMARY,
)
]
@pytest.mark.remote
def test_dwd_observation_data_dataset():
"""Request a parameter set"""
expected = DwdObservationRequest(
parameter=["kl"],
resolution="daily",
period=["recent", "historical"],
).filter_by_station_id(station_id=(1,))
given = DwdObservationRequest(
parameter=[DwdObservationDataset.CLIMATE_SUMMARY],
resolution=DwdObservationResolution.DAILY,
period=[DwdObservationPeriod.HISTORICAL, DwdObservationPeriod.RECENT],
start_date=None,
end_date=None,
).filter_by_station_id(
station_id=(1,),
)
assert given == expected
expected = DwdObservationRequest(
parameter=[DwdObservationDataset.CLIMATE_SUMMARY],
resolution=DwdObservationResolution.DAILY,
period=[DwdObservationPeriod.HISTORICAL, DwdObservationPeriod.RECENT],
).filter_by_station_id(
station_id=(1,),
)
given = DwdObservationRequest(
parameter=[DwdObservationDataset.CLIMATE_SUMMARY],
resolution=DwdObservationResolution.DAILY,
period=[DwdObservationPeriod.HISTORICAL, DwdObservationPeriod.RECENT],
start_date=None,
end_date=None,
).filter_by_station_id(
station_id=(1,),
)
assert expected == given
assert expected.parameter == [
(
DwdObservationDataset.CLIMATE_SUMMARY,
DwdObservationDataset.CLIMATE_SUMMARY,
)
]
def test_dwd_observation_data_parameter():
"""Test parameter given as single value without dataset"""
request = DwdObservationRequest(
parameter=["precipitation_height"],
resolution="daily",
period=["recent", "historical"],
)
assert request.parameter == [
(
DwdObservationParameter.DAILY.CLIMATE_SUMMARY.PRECIPITATION_HEIGHT,
DwdObservationDataset.CLIMATE_SUMMARY,
)
]
request = DwdObservationRequest(
parameter=["climate_summary"],
resolution="daily",
period=["recent", "historical"],
)
assert request.parameter == [(DwdObservationDataset.CLIMATE_SUMMARY, DwdObservationDataset.CLIMATE_SUMMARY)]
def test_dwd_observation_data_parameter_dataset_pairs():
"""Test parameters given as parameter - dataset pair"""
request = DwdObservationRequest(
parameter=[("climate_summary", "climate_summary")],
resolution="daily",
period=["recent", "historical"],
)
assert request.parameter == [(DwdObservationDataset.CLIMATE_SUMMARY, DwdObservationDataset.CLIMATE_SUMMARY)]
request = DwdObservationRequest(
parameter=[("precipitation_height", "precipitation_more")],
resolution="daily",
period=["recent", "historical"],
)
assert request.parameter == [
(
DwdObservationParameter.DAILY.PRECIPITATION_MORE.PRECIPITATION_HEIGHT,
DwdObservationDataset.PRECIPITATION_MORE,
)
]
@pytest.mark.remote
def test_dwd_observation_data_fails():
# station id
assert (
DwdObservationRequest(
parameter=[DwdObservationDataset.CLIMATE_SUMMARY],
period=[DwdObservationPeriod.HISTORICAL],
resolution=DwdObservationResolution.DAILY,
)
.filter_by_station_id(
station_id=["test"],
)
.df.empty
)
with pytest.raises(StartDateEndDateError):
DwdObservationRequest(
parameter=["abc"],
resolution=DwdObservationResolution.DAILY,
start_date="1971-01-01",
end_date="1951-01-01",
)
def test_dwd_observation_data_dates():
# time input
request = DwdObservationRequest(
parameter=[DwdObservationDataset.CLIMATE_SUMMARY],
resolution=DwdObservationResolution.DAILY,
start_date="1971-01-01",
).filter_by_station_id(
station_id=[1],
)
assert request == DwdObservationRequest(
parameter=[DwdObservationDataset.CLIMATE_SUMMARY],
resolution=DwdObservationResolution.DAILY,
period=[
DwdObservationPeriod.HISTORICAL,
],
start_date=datetime(1971, 1, 1),
end_date=datetime(1971, 1, 1),
).filter_by_station_id(
station_id=[1],
)
request = DwdObservationRequest(
parameter=[DwdObservationDataset.CLIMATE_SUMMARY],
resolution=DwdObservationResolution.DAILY,
period=[DwdObservationPeriod.HISTORICAL],
end_date="1971-01-01",
).filter_by_station_id(
station_id=[1],
)
assert request == DwdObservationRequest(
parameter=[DwdObservationDataset.CLIMATE_SUMMARY],
resolution=DwdObservationResolution.DAILY,
period=[
DwdObservationPeriod.HISTORICAL,
],
start_date=datetime(1971, 1, 1),
end_date=datetime(1971, 1, 1),
).filter_by_station_id(
station_id=[1],
)
with pytest.raises(StartDateEndDateError):
DwdObservationRequest(
parameter=[DwdObservationDataset.CLIMATE_SUMMARY],
resolution=DwdObservationResolution.DAILY,
start_date="1971-01-01",
end_date="1951-01-01",
)
def test_request_period_historical():
# Historical period expected
request = DwdObservationRequest(
parameter=[DwdObservationDataset.CLIMATE_SUMMARY],
resolution=DwdObservationResolution.DAILY,
start_date="1971-01-01",
)
assert request.period == [
Period.HISTORICAL,
]
def test_request_period_historical_recent():
# Historical and recent period expected
request = DwdObservationRequest(
parameter=[DwdObservationDataset.CLIMATE_SUMMARY],
resolution=DwdObservationResolution.DAILY,
start_date="1971-01-01",
end_date=pd.Timestamp(datetime.utcnow()) - pd.Timedelta(days=400),
)
assert request.period == [
Period.HISTORICAL,
Period.RECENT,
]
def test_request_period_historical_recent_now():
# Historical, recent and now period expected
request = DwdObservationRequest(
parameter=[DwdObservationDataset.CLIMATE_SUMMARY],
resolution=DwdObservationResolution.DAILY,
start_date="1971-01-01",
end_date=pd.Timestamp(datetime.utcnow()),
)
assert request.period == [
Period.HISTORICAL,
Period.RECENT,
Period.NOW,
]
@freeze_time(datetime(2022, 1, 29, 1, 30, tzinfo=pytz.timezone(Timezone.GERMANY.value)))
def test_request_period_recent_now():
request = DwdObservationRequest(
parameter=[DwdObservationDataset.CLIMATE_SUMMARY],
resolution=DwdObservationResolution.DAILY,
start_date=pd.Timestamp(datetime.utcnow()) - pd.Timedelta(hours=2),
)
assert request.period == [Period.RECENT, Period.NOW]
@freeze_time(datetime(2022, 1, 29, 2, 30, tzinfo=pytz.timezone(Timezone.GERMANY.value)))
def test_request_period_now():
# Now period
request = DwdObservationRequest(
parameter=[DwdObservationDataset.CLIMATE_SUMMARY],
resolution=DwdObservationResolution.DAILY,
start_date=pd.Timestamp(datetime.utcnow()) - pd.Timedelta(hours=2),
)
assert request.period == [Period.NOW]
@freeze_time("2021-03-28T18:38:00+02:00")
def test_request_period_now_fixeddate():
# Now period
request = DwdObservationRequest(
parameter=[DwdObservationDataset.CLIMATE_SUMMARY],
resolution=DwdObservationResolution.DAILY,
start_date=pd.Timestamp(datetime.utcnow()) - pd.Timedelta(hours=2),
)
assert Period.NOW in request.period
def test_request_period_empty():
# No period (for example in future)
request = DwdObservationRequest(
parameter=[DwdObservationDataset.CLIMATE_SUMMARY],
resolution=DwdObservationResolution.DAILY,
start_date=pd.Timestamp(datetime.utcnow()) + pd.Timedelta(days=720),
)
assert request.period == []
@pytest.mark.remote
def test_dwd_observation_data_result_missing_data():
"""Test for DataFrame having empty values for dates where the station should not
have values"""
Settings.tidy = True
Settings.humanize = True
Settings.si_units = True
request = DwdObservationRequest(
parameter=[DwdObservationDataset.CLIMATE_SUMMARY],
resolution=DwdObservationResolution.DAILY,
start_date="1933-12-27", # few days before official start
end_date="1934-01-04", # few days after official start,
).filter_by_station_id(
station_id=[1048],
)
# Leave only one column to potentially contain NaN which is VALUE
df = request.values.all().df.drop("quality", axis=1)
df_1933 = df[df["date"].dt.year == 1933]
df_1934 = df[df["date"].dt.year == 1934]
assert not df_1933.empty and df_1933.dropna().empty
assert not df_1934.empty and not df_1934.dropna().empty
request = DwdObservationRequest(
parameter=DwdObservationParameter.HOURLY.TEMPERATURE_AIR_MEAN_200,
resolution=DwdObservationResolution.HOURLY,
start_date="2020-06-09 12:00:00", # no data at this time (reason unknown)
end_date="2020-06-09 12:00:00",
).filter_by_station_id(
station_id=["03348"],
)
df = request.values.all().df
assert_frame_equal(
df,
pd.DataFrame(
{
"station_id": pd.Categorical(["03348"]),
"dataset": pd.Categorical(["temperature_air"]),
"parameter": pd.Categorical(["temperature_air_mean_200"]),
"date": [datetime(2020, 6, 9, 12, 0, 0, tzinfo=pytz.UTC)],
"value": pd.Series([pd.NA], dtype=pd.Float64Dtype()).astype(float),
"quality": pd.Series([pd.NA], dtype=pd.Float64Dtype()).astype(float),
}
),
check_categorical=False,
)
@pytest.mark.remote
def test_dwd_observation_data_result_tabular():
"""Test for actual values (tabular)"""
Settings.tidy = False
Settings.humanize = False
Settings.si_units = False
request = DwdObservationRequest(
parameter=[DwdObservationDataset.CLIMATE_SUMMARY],
resolution=DwdObservationResolution.DAILY,
start_date="1933-12-31", # few days before official start
end_date="1934-01-01", # few days after official start,
).filter_by_station_id(
station_id=[1048],
)
df = request.values.all().df
assert list(df.columns.values) == [
"station_id",
"dataset",
"date",
"qn_3",
"fx",
"fm",
"qn_4",
"rsk",
"rskf",
"sdk",
"shk_tag",
"nm",
"vpm",
"pm",
"tmk",
"upm",
"txk",
"tnk",
"tgk",
]
assert_frame_equal(
df,
pd.DataFrame(
{
"station_id": pd.Categorical(["01048"] * 2),
"dataset": pd.Categorical(["climate_summary"] * 2),
"date": [
datetime(1933, 12, 31, tzinfo=pytz.UTC),
datetime(1934, 1, 1, tzinfo=pytz.UTC),
],
"qn_3": pd.Series([pd.NA, pd.NA], dtype=pd.Int64Dtype()),
"fx": pd.to_numeric([pd.NA, pd.NA], errors="coerce"),
"fm": pd.to_numeric([pd.NA, pd.NA], errors="coerce"),
"qn_4": pd.Series([pd.NA, 1], dtype=pd.Int64Dtype()),
"rsk": pd.to_numeric([pd.NA, 0.2], errors="coerce"),
"rskf": pd.to_numeric([pd.NA, 8], errors="coerce"),
"sdk": pd.to_numeric([pd.NA, pd.NA], errors="coerce"),
"shk_tag": pd.Series([pd.NA, 0], dtype=pd.Int64Dtype()),
"nm": pd.to_numeric([pd.NA, 8.0], errors="coerce"),
"vpm": pd.to_numeric([pd.NA, 6.4], errors="coerce"),
"pm": pd.to_numeric([pd.NA, 1008.60], errors="coerce"),
"tmk": pd.to_numeric([pd.NA, 0.5], errors="coerce"),
"upm": pd.to_numeric([pd.NA, 97.00], errors="coerce"),
"txk": pd.to_numeric([pd.NA, 0.7], errors="coerce"),
"tnk": pd.to_numeric([pd.NA, 0.2], errors="coerce"),
"tgk": pd.to_numeric([pd.NA, pd.NA], errors="coerce"),
}
),
check_categorical=False,
)
@pytest.mark.remote
def test_dwd_observation_data_result_tabular_metric():
"""Test for actual values (tabular) in metric units"""
Settings.tidy = False
Settings.humanize = False
Settings.si_units = True
request = DwdObservationRequest(
parameter=[DwdObservationDataset.CLIMATE_SUMMARY],
resolution=DwdObservationResolution.DAILY,
start_date="1933-12-31", # few days before official start
end_date="1934-01-01", # few days after official start,
).filter_by_station_id(
station_id=[1048],
)
df = request.values.all().df
assert list(df.columns.values) == [
"station_id",
"dataset",
"date",
"qn_3",
"fx",
"fm",
"qn_4",
"rsk",
"rskf",
"sdk",
"shk_tag",
"nm",
"vpm",
"pm",
"tmk",
"upm",
"txk",
"tnk",
"tgk",
]
assert_frame_equal(
df,
pd.DataFrame(
{
"station_id": pd.Categorical(["01048"] * 2),
"dataset": pd.Categorical(["climate_summary"] * 2),
"date": [
datetime(1933, 12, 31, tzinfo=pytz.UTC),
datetime(1934, 1, 1, tzinfo=pytz.UTC),
],
"qn_3": pd.Series([pd.NA, pd.NA], dtype=pd.Int64Dtype()),
"fx": pd.to_numeric([pd.NA, pd.NA], errors="coerce"),
"fm": pd.to_numeric([pd.NA, pd.NA], errors="coerce"),
"qn_4": pd.Series([pd.NA, 1], dtype=pd.Int64Dtype()),
"rsk": pd.to_numeric([pd.NA, 0.2], errors="coerce"),
"rskf": pd.to_numeric([pd.NA, 8], errors="coerce"),
"sdk": pd.to_numeric([pd.NA, pd.NA], errors="coerce"),
"shk_tag": pd.Series([pd.NA, 0], dtype=pd.Int64Dtype()),
"nm": pd.to_numeric([pd.NA, 100.0], errors="coerce"),
"vpm": pd.to_numeric([pd.NA, 640.0], errors="coerce"),
"pm": pd.to_numeric([pd.NA, 100860.0], errors="coerce"),
"tmk": pd.to_numeric([pd.NA, 273.65], errors="coerce"),
"upm": pd.to_numeric([pd.NA, 97.00], errors="coerce"),
"txk": pd.to_numeric([pd.NA, 273.84999999999997], errors="coerce"),
"tnk": pd.to_numeric([pd.NA, 273.34999999999997], errors="coerce"),
"tgk": pd.to_numeric([pd.NA, pd.NA], errors="coerce"),
}
),
check_categorical=False,
)
@pytest.mark.remote
def test_dwd_observation_data_result_tidy_metric():
"""Test for actual values (tidy) in metric units"""
Settings.tidy = True
Settings.humanize = False
Settings.si_units = True
request = DwdObservationRequest(
parameter=[DwdObservationDataset.CLIMATE_SUMMARY],
resolution=DwdObservationResolution.DAILY,
start_date="1933-12-31", # few days before official start
end_date="1934-01-01", # few days after official start,
).filter_by_station_id(
station_id=(1048,),
)
df = request.values.all().df
assert list(df.columns.values) == [
"station_id",
"dataset",
"parameter",
"date",
"value",
"quality",
]
assert_frame_equal(
df,
pd.DataFrame(
{
"station_id": pd.Categorical(["01048"] * 28),
"dataset": pd.Categorical(["climate_summary"] * 28),
"parameter": pd.Categorical(
[
"fx",
"fx",
"fm",
"fm",
"rsk",
"rsk",
"rskf",
"rskf",
"sdk",
"sdk",
"shk_tag",
"shk_tag",
"nm",
"nm",
"vpm",
"vpm",
"pm",
"pm",
"tmk",
"tmk",
"upm",
"upm",
"txk",
"txk",
"tnk",
"tnk",
"tgk",
"tgk",
]
),
"date": [
datetime(1933, 12, 31, tzinfo=pytz.UTC),
datetime(1934, 1, 1, tzinfo=pytz.UTC),
datetime(1933, 12, 31, tzinfo=pytz.UTC),
datetime(1934, 1, 1, tzinfo=pytz.UTC),
datetime(1933, 12, 31, tzinfo=pytz.UTC),
datetime(1934, 1, 1, tzinfo=pytz.UTC),
datetime(1933, 12, 31, tzinfo=pytz.UTC),
datetime(1934, 1, 1, tzinfo=pytz.UTC),
datetime(1933, 12, 31, tzinfo=pytz.UTC),
datetime(1934, 1, 1, tzinfo=pytz.UTC),
datetime(1933, 12, 31, tzinfo=pytz.UTC),
datetime(1934, 1, 1, tzinfo=pytz.UTC),
datetime(1933, 12, 31, tzinfo=pytz.UTC),
datetime(1934, 1, 1, tzinfo=pytz.UTC),
datetime(1933, 12, 31, tzinfo=pytz.UTC),
datetime(1934, 1, 1, tzinfo=pytz.UTC),
datetime(1933, 12, 31, tzinfo=pytz.UTC),
datetime(1934, 1, 1, tzinfo=pytz.UTC),
datetime(1933, 12, 31, tzinfo=pytz.UTC),
datetime(1934, 1, 1, tzinfo=pytz.UTC),
datetime(1933, 12, 31, tzinfo=pytz.UTC),
datetime(1934, 1, 1, tzinfo=pytz.UTC),
datetime(1933, 12, 31, tzinfo=pytz.UTC),
datetime(1934, 1, 1, tzinfo=pytz.UTC),
datetime(1933, 12, 31, tzinfo=pytz.UTC),
datetime(1934, 1, 1, tzinfo=pytz.UTC),
datetime(1933, 12, 31, tzinfo=pytz.UTC),
datetime(1934, 1, 1, tzinfo=pytz.UTC),
],
"value": pd.to_numeric(
[
# FX
pd.NA,
pd.NA,
# FM
pd.NA,
pd.NA,
# RSK
pd.NA,
0.2,
# RSKF
pd.NA,
8,
# SDK
pd.NA,
pd.NA,
# SHK_TAG
pd.NA,
0,
# NM
pd.NA,
100.0,
# VPM
pd.NA,
640.0,
# PM
pd.NA,
100860.0,
# TMK
pd.NA,
273.65,
# UPM
pd.NA,
97.00,
# TXK
pd.NA,
273.84999999999997,
# TNK
pd.NA,
273.34999999999997,
# TGK
pd.NA,
pd.NA,
],
errors="coerce",
).astype(float),
"quality": pd.Series(
[
# FX
np.NaN,
np.NaN,
# FM
np.NaN,
np.NaN,
# RSK
np.NaN,
1,
# RSKF
np.NaN,
1,
# SDK
np.NaN,
np.NaN,
# SHK_TAG
np.NaN,
1,
# NM
np.NaN,
1,
# VPM
np.NaN,
1,
# PM
np.NaN,
1,
# TMK
np.NaN,
1,
# UPM
np.NaN,
1,
# TXK
np.NaN,
1,
# TNK
np.NaN,
1,
# TGK
np.NaN,
np.NaN,
],
dtype=float,
),
},
),
# Needed since pandas 1.2?
check_categorical=False,
)
@pytest.mark.remote
def test_dwd_observation_data_10_minutes_result_tidy():
"""Test for actual values (tidy) in metric units"""
Settings.tidy = True
Settings.humanize = False
Settings.si_units = False
request = DwdObservationRequest(
parameter=[DwdObservationParameter.MINUTE_10.TEMPERATURE_AIR.PRESSURE_AIR_SITE],
resolution=DwdObservationResolution.MINUTE_10,
start_date="1999-12-31 22:00",
end_date="1999-12-31 23:00",
).filter_by_station_id(
station_id=(1048,),
)
df = request.values.all().df
assert_frame_equal(
df,
pd.DataFrame(
{
"station_id": pd.Categorical(["01048"] * 7),
"dataset": pd.Categorical(["temperature_air"] * 7),
"parameter": pd.Categorical(["pp_10"] * 7),
"date": [
datetime(1999, 12, 31, 22, 00, tzinfo=pytz.UTC),
datetime(1999, 12, 31, 22, 10, tzinfo=pytz.UTC),
datetime(1999, 12, 31, 22, 20, tzinfo=pytz.UTC),
datetime(1999, 12, 31, 22, 30, tzinfo=pytz.UTC),
datetime(1999, 12, 31, 22, 40, tzinfo=pytz.UTC),
datetime(1999, 12, 31, 22, 50, tzinfo=pytz.UTC),
datetime(1999, 12, 31, 23, 00, tzinfo=pytz.UTC),
],
"value": pd.to_numeric(
[
996.1,
996.2,
996.2,
996.2,
996.3,
996.4,
pd.NA,
],
errors="coerce",
).astype(float),
"quality": pd.to_numeric([1, 1, 1, 1, 1, 1, pd.NA], errors="coerce").astype(float),
},
),
# Needed since pandas 1.2?
check_categorical=False,
)
@pytest.mark.remote
def test_dwd_observation_data_monthly_tidy():
"""Test for actual values (tidy) in metric units"""
Settings.tidy = True
Settings.humanize = True
Settings.si_units = True
request = DwdObservationRequest(
parameter=[DwdObservationParameter.MONTHLY.PRECIPITATION_HEIGHT],
resolution=DwdObservationResolution.MONTHLY,
start_date="2020-01-01",
end_date="2020-12-31",
).filter_by_station_id("00433")
values = request.values.all().df
expected_df = pd.DataFrame(
{
"station_id": pd.Categorical(["00433"] * 12),
"dataset": pd.Categorical(["climate_summary"] * 12),
"parameter": pd.Categorical(["precipitation_height"] * 12),
"date": [
Timestamp("2020-01-01 00:00:00+0000", tz="UTC"),
Timestamp("2020-02-01 00:00:00+0000", tz="UTC"),
Timestamp("2020-03-01 00:00:00+0000", tz="UTC"),
Timestamp("2020-04-01 00:00:00+0000", tz="UTC"),
Timestamp("2020-05-01 00:00:00+0000", tz="UTC"),
Timestamp("2020-06-01 00:00:00+0000", tz="UTC"),
Timestamp("2020-07-01 00:00:00+0000", tz="UTC"),
Timestamp("2020-08-01 00:00:00+0000", tz="UTC"),
Timestamp("2020-09-01 00:00:00+0000", tz="UTC"),
Timestamp("2020-10-01 00:00:00+0000", tz="UTC"),
Timestamp("2020-11-01 00:00:00+0000", tz="UTC"),
| Timestamp("2020-12-01 00:00:00+0000", tz="UTC") | pandas.Timestamp |
from .models import *
import pandas as pd
import numpy as np
from copy import deepcopy
from scipy.stats import mode
TIME_UNITS = 'time'
SAMP_UNITS = 'samples'
def extract_event_ranges(samples, events_dataframe, start_offset=0,
end_offset=0, round_indices=True, borrow_attributes=[]):
""" Extracts ranges from samples based on event timing.
This method works, but has been replaced by extract_events (below).
Parameters
----------
samples (Samples object)
The Samples object from which you'd like to extract ranges.
events_dataframe (DataFrame object containing event timing info)
Indices should be onset times, duration should be in a column named
'duration'. Note that if you have an Events object evs, and it has,
say, a set of events named "EBLINK", then you can pass evs.EBLINK
for this argument.
start_offset (number - same type as your samples index, probably ms)
Each index of the events_dataframe is an event onset time, and we add
the start_offset to each of these times to find the beginnings of our
target ranges, then search the sample times to find the sample indices
of these range onset times. If there isn't an exact match, we pick the
last sample time before the range onset time.
end_offset (number - same type as your samples index, probably ms)
Like start_offset, but for the offsets of target ranges instead of the
onsets. Note, the sample containing the range offset time will be
*included* in the extracted range.
borrow_attributes (list of strings)
A list of column names in the events_dataframe whose values you would
like to copy to the respective ranges. For each item in the list, a
column will be created in the ranges dataframe - if the column does
not exist in the events dataframe, the values in the each
corrisponding range will be set to float('nan').
round_indices (bool)
Deprecated.
"""
from warnings import warn
warn("extract_event_ranges is deprecated, use extract_events instead.")
if start_offset >= end_offset:
raise ValueError("start_offset must be < end_offset")
# get the list of start and stop times - note that we no longer pay
# attention to the stop times (see below)
e_starts = events_dataframe.index.to_series()
r_times = pd.DataFrame(e_starts + end_offset)
r_times.index += start_offset
r_times.columns = ['last_onset']
# sanity check - make sure no events start before the data, or end afterwards
if any(r_times.index < samples.index[0]):
raise ValueError(
"at least one event range starts before the first sample")
if any(r_times.index > samples.index[-1]):
raise ValueError("at least one event range ends after the last sample")
# get the indices for the first event (minus the first index)
ev_idxs = np.logical_and(samples.index <= r_times.last_onset.iloc[0],
samples.index > r_times.index[0])
# this method just uses the length of the first event as a template for
# all future events
r_len = len(np.where(ev_idxs)[0]) + 1
# we're going to make a df with a hierarchical index.
samples['orig_idx'] = samples.index
midx = pd.MultiIndex.from_product([list(range(len(e_starts))), list(range(r_len))],
names=['event', 'onset'])
# get all of the samples!
# idxs = []
df = pd.DataFrame()
idx = 0
for stime, etime in r_times.itertuples():
# get the start time... add the number of indices that you want...
s_idx = np.where(samples.index > stime)[0][0] - 1
e_idx = s_idx + r_len - 1
stime = samples.index[s_idx]
etime = samples.index[e_idx]
new_df = samples.loc[stime:etime]
if borrow_attributes:
for ba in borrow_attributes:
new_df[ba] = events_dataframe.iloc[idx].get(ba, float('nan'))
df = pd.concat([df, new_df])
idx += 1
df.index = midx
return df
def extract_events(samples, events, offset=0, duration=0,
units='samples', borrow_attributes=[], return_count=False):
""" Extracts ranges from samples based on event timing and sample count.
Note that we will exclude any ranges which would cross discontinuities in
the dataset. If there are no events to return, we will return None
Parameters
==========
samples (Samples object)
The Samples object from which you'd like to extract ranges.
events (DataFrame object containing event timing info)
Indices should be onset times, duration should be in a column named
'duration'. Note that if you have an Events object evs, and it has,
say, a set of events named "EBLINK", then you can pass evs.EBLINK
for this argument.
offset (number)
How to position extraction range start relative to event start.
Interpretation depends upon 'units'. Default 0.
duration (number)
How long a range to extract. Interpretation depends upon 'units'.
Default 0. Note that if this and offset are both 0, you'll get None in
return.
units (string constant)
Can be 'time' or 'samples'. Default is 'samples'. Determines which index
will be used to interpret the offset and duration parameters. If units
is 'time', then we will extract ranges offset from each event's start
time by 'offset' ms, and 'duration' ms long (or as close as we can get
given your sampling frequency). Actually, we use the sample count of
the first event as a template for all events, so this method can be a
little slippery. For finer control over the size of the returned
dataset, you can set 'units' to 'samples'. Then, we will extract ranges
offset from each event's start time by 'offset' *samples*, and
'duration' samples long. It's then up to you to calculate how long the
sample is in time, based on your sampling rate.
borrow_attributes (list of strings)
A list of column names in the 'events' whose values you would like to
copy to the respective ranges. For each item in the list, a column
will be created in the ranges dataframe - if the column does not exist
in the events dataframe, the values in the each corrisponding range
will be set to float('nan').
return_count (bool)
If true, will return the number of events extracted
"""
# dummy check
if offset == 0 and duration == 0:
if return_count:
return None, 0
return None
# negative duration should raise an exception
if duration <= 0:
raise ValueError("Duration must be >0")
# get the list of start time indices
e_starts = events.index.to_series()
# find the indices of discontinuities
idiff = np.diff(samples.index)
diffmode = mode(idiff[np.where(idiff > 0)])[0][0]
disc_idxs = np.where(idiff > diffmode)[0] + 1
if units == TIME_UNITS:
# we want all of the extracted chunks to be the same length. but we're
# dealing with time, so who knows if time and samples are well aligned
# in all cases. so, we're going to get the sample index bounds for the
# first event, then re-use the length of the first event (# samples) for
# all other events.
# first, find the first samples of all of the events (taking the offset
# into account). searchsorted returns the insertion point needed to
# maintain sort order, so the first time index of an event is the
# leftmost insertion point for each event's start time.
r_times = e_starts + offset
r_idxs = np.searchsorted(samples.index, r_times.iloc[:], 'left')
if any(r_times < samples.index[0]):
raise ValueError(
"at least one event range starts before the first sample")
# exclude events that cross discontinuities
e_idxs = np.searchsorted(samples.index, r_times.iloc[:] + duration, 'left')
ok_idxs = [i for i in range(len(r_idxs)) if not
any([all((r_idxs[i]<=d, e_idxs[i]>=d)) for d in disc_idxs])]
if (len(r_idxs) - len(ok_idxs)) == 0:
print("excluding %d events for crossing discontinuities" % (len(r_idxs) - len(ok_idxs)))
# return None if there's nothing to do
if len(ok_idxs) == 0:
if return_count:
return None, 0
return None
# trim the events data
events = events.iloc[ok_idxs]
e_starts = e_starts.iloc[ok_idxs]
r_idxs = r_idxs[ok_idxs]
e_idxs = e_idxs[ok_idxs]
# find the duration of the first event.
r_dur = e_idxs[0] - r_idxs[0]
elif units == SAMP_UNITS:
# just find the indexes of the event starts, and offset by sample count
r_idxs = np.searchsorted(samples.index, e_starts.iloc[:], 'left') + offset
r_dur = duration
# exclude events that cross discontinuities
e_idxs = r_idxs + duration
ok_idxs = [i for i in range(len(r_idxs)) if not
any([all((r_idxs[i]<=d, e_idxs[i]>=d)) for d in disc_idxs])]
if (len(r_idxs) - len(ok_idxs)) == 0:
print("excluding %d events for crossing discontinuities" % (len(r_idxs) - len(ok_idxs)))
# return None if there's nothing to do
if len(ok_idxs) == 0:
if return_count:
return None, 0
return None
# trim the events data
events = events.iloc[ok_idxs]
e_starts = e_starts.iloc[ok_idxs]
r_idxs = r_idxs[ok_idxs]
e_idxs = e_idxs[ok_idxs]
else:
raise ValueError("'%s' is not a valid unit!" % units)
# sanity check - make sure no events start before the data, or end afterwards
if any(r_idxs < 0):
raise ValueError(
"at least one event range starts before the first sample")
if any(e_idxs >= len(samples)):
raise ValueError(
"at least one event range ends after the last sample")
# make a hierarchical index
samples['orig_idx'] = samples.index
midx = pd.MultiIndex.from_product([list(range(len(e_starts))), list(range(r_dur))],
names=['event', 'onset'])
# get the samples
df = | pd.DataFrame() | pandas.DataFrame |
"""Performs attention intervention on Winobias samples and saves results to JSON file."""
import json
import fire
from pandas import DataFrame
from transformers import (
GPT2Tokenizer, TransfoXLTokenizer, XLNetTokenizer,
BertTokenizer, DistilBertTokenizer, RobertaTokenizer
)
import winobias
from attention_utils import perform_interventions, get_odds_ratio
from experiment import Model
def get_interventions_winobias(gpt2_version, do_filter, split, model, tokenizer,
device='cuda', filter_quantile=0.25):
if split == 'dev':
examples = winobias.load_dev_examples()
elif split == 'test':
examples = winobias.load_test_examples()
else:
raise ValueError(f"Invalid split: {split}")
json_data = {'model_version': gpt2_version,
'do_filter': do_filter,
'split': split,
'num_examples_loaded': len(examples)}
if do_filter:
interventions = [ex.to_intervention(tokenizer) for ex in examples]
df = DataFrame({'odds_ratio': [get_odds_ratio(intervention, model) for intervention in interventions]})
df_expected = df[df.odds_ratio > 1]
threshold = df_expected.odds_ratio.quantile(filter_quantile)
filtered_examples = []
assert len(examples) == len(df)
for i in range(len(examples)):
ex = examples[i]
odds_ratio = df.iloc[i].odds_ratio
if odds_ratio > threshold:
filtered_examples.append(ex)
print(f'Num examples with odds ratio > 1: {len(df_expected)} / {len(examples)}')
print(
f'Num examples with odds ratio > {threshold:.4f} ({filter_quantile} quantile): {len(filtered_examples)} / {len(examples)}')
json_data['num_examples_aligned'] = len(df_expected)
json_data['filter_quantile'] = filter_quantile
json_data['threshold'] = threshold
examples = filtered_examples
json_data['num_examples_analyzed'] = len(examples)
interventions = [ex.to_intervention(tokenizer) for ex in examples]
return interventions, json_data
def intervene_attention(gpt2_version, do_filter, split, device='cuda',
filter_quantile=0.25, random_weights=False,
masking_approach=1):
model = Model(output_attentions=True, gpt2_version=gpt2_version,
device=device, random_weights=random_weights,
masking_approach=masking_approach)
tokenizer = (GPT2Tokenizer if model.is_gpt2 else
TransfoXLTokenizer if model.is_txl else
XLNetTokenizer if model.is_xlnet else
BertTokenizer if model.is_bert else
DistilBertTokenizer if model.is_distilbert else
RobertaTokenizer).from_pretrained(gpt2_version)
interventions, json_data = get_interventions_winobias(gpt2_version, do_filter, split, model, tokenizer,
device, filter_quantile)
results = perform_interventions(interventions, model)
json_data['mean_total_effect'] = | DataFrame(results) | pandas.DataFrame |
import pytest
import pandas as pd
from data_dashboard.features import NumericalFeature, CategoricalFeature, Features
from data_dashboard.descriptor import FeatureDescriptor
@pytest.mark.parametrize(
("column_name",),
(
("AgeGroup",),
("bool",),
("Product",),
("Sex",),
("Target",)
)
)
def test_categorical_feature_create_raw_mapping(
data_classification_balanced, expected_raw_mapping, column_name
):
"""Testing if ._create_raw_mapping function correctly extracts unique values in the Series and maps them."""
X = data_classification_balanced[0]
y = data_classification_balanced[1]
df = pd.concat([X, y], axis=1)
series = df[column_name]
feature = CategoricalFeature(series, column_name, "test", False)
actual_mapping = feature._create_raw_mapping()
expected_mapping = expected_raw_mapping[column_name]
assert actual_mapping == expected_mapping
@pytest.mark.parametrize(
("column_name",),
(
("AgeGroup",),
("bool",),
("Product",),
("Sex",),
("Target",)
)
)
def test_categorical_feature_create_mapped_series(
data_classification_balanced, expected_raw_mapping, column_name
):
"""Testing if Series values are correctly replaced with a "raw" mapping."""
X = data_classification_balanced[0]
y = data_classification_balanced[1]
df = pd.concat([X, y], axis=1)
expected_series = df[column_name].copy().replace(expected_raw_mapping[column_name])
series = df[column_name]
feature = CategoricalFeature(series, column_name, "test", False)
actual_series = feature._create_mapped_series()
assert actual_series.equals(expected_series)
@pytest.mark.parametrize(
("column_name",),
(
("AgeGroup",),
("bool",),
("Product",),
("Sex",),
("Target",)
)
)
def test_categorical_features_create_descriptive_mapping(
data_classification_balanced, expected_mapping, column_name, feature_descriptor
):
"""Testing if ._create_descriptive_mapping() correctly creates mapping between raw mapping and descriptions."""
X = data_classification_balanced[0]
y = data_classification_balanced[1]
df = pd.concat([X, y], axis=1)
series = df[column_name]
feature = CategoricalFeature(series, column_name, "test", False, mapping=feature_descriptor.mapping(column_name))
actual_mapping = feature._create_descriptive_mapping()
assert actual_mapping == expected_mapping[column_name]
@pytest.mark.parametrize(
("column_name",),
(
("AgeGroup",),
("Target",)
)
)
def test_categorical_features_create_descriptive_mapping_changed_keys(
data_classification_balanced, feature_descriptor_broken, expected_mapping, column_name
):
"""Testing if ._create_descriptive_mapping() creates correct output when the keys are incorrect:
descriptions provided as str, yet the data itself is int/float."""
X = data_classification_balanced[0]
y = data_classification_balanced[1]
df = pd.concat([X, y], axis=1)
series = df[column_name]
feature = CategoricalFeature(series, column_name, "test", False,
mapping=feature_descriptor_broken.mapping(column_name))
actual_mapping = feature._create_descriptive_mapping()
assert actual_mapping == expected_mapping[column_name]
@pytest.mark.parametrize(
("column_name",),
(
("Height",),
("Price",),
)
)
def test_numerical_features_no_mapping(
data_classification_balanced, column_name
):
"""Testing if .mapping() from NumericalFeature returns None."""
X = data_classification_balanced[0]
y = data_classification_balanced[1]
df = pd.concat([X, y], axis=1)
series = df[column_name]
feature = NumericalFeature(series, column_name, "test", False)
assert feature.mapping() is None
@pytest.mark.parametrize(
("column_name", "expected_type"),
(
("AgeGroup", "categorical"),
("bool", "categorical"),
("Date", "date"),
("Height", "numerical"),
("Price", "numerical"),
("Product", "categorical"),
("Sex", "categorical"),
("Target", "categorical")
)
)
def test_features_impute_column_type(data_classification_balanced, column_name, expected_type):
"""Testing if imputing column type works correctly."""
X = data_classification_balanced[0]
y = data_classification_balanced[1]
df = | pd.concat([X, y], axis=1) | pandas.concat |
import os
import pandas as pd
import pytest
from pandas.testing import assert_frame_equal
from .. import read_sql
@pytest.fixture(scope="module") # type: ignore
def mssql_url() -> str:
conn = os.environ["MSSQL_URL"]
return conn
@pytest.mark.xfail
def test_on_non_select(mssql_url: str) -> None:
query = "CREATE TABLE non_select(id INTEGER NOT NULL)"
df = read_sql(mssql_url, query)
def test_aggregation(mssql_url: str) -> None:
query = (
"SELECT test_bool, SUM(test_float) as sum FROM test_table GROUP BY test_bool"
)
df = read_sql(mssql_url, query)
expected = pd.DataFrame(
index=range(3),
data={
"test_bool": pd.Series([None, False, True], dtype="boolean"),
"sum": pd.Series([10.9, 5.2, -10.0], dtype="float64"),
},
)
assert_frame_equal(df, expected, check_names=True)
def test_partition_on_aggregation(mssql_url: str) -> None:
query = (
"SELECT test_bool, SUM(test_int) AS test_int FROM test_table GROUP BY test_bool"
)
df = read_sql(mssql_url, query, partition_on="test_int", partition_num=2)
expected = pd.DataFrame(
index=range(3),
data={
"test_bool": pd.Series([None, False, True], dtype="boolean"),
"test_int": pd.Series([4, 5, 1315], dtype="Int64"),
},
)
df.sort_values(by="test_int", inplace=True, ignore_index=True)
assert_frame_equal(df, expected, check_names=True)
def test_aggregation2(mssql_url: str) -> None:
query = "select DISTINCT(test_bool) from test_table"
df = read_sql(mssql_url, query)
expected = pd.DataFrame(
index=range(3),
data={
"test_bool": pd.Series([None, False, True], dtype="boolean"),
},
)
assert_frame_equal(df, expected, check_names=True)
def test_partition_on_aggregation2(mssql_url: str) -> None:
query = "select MAX(test_int) as max, MIN(test_int) as min from test_table"
df = read_sql(mssql_url, query, partition_on="max", partition_num=2)
expected = pd.DataFrame(
index=range(1),
data={
"max": pd.Series([1314], dtype="Int64"),
"min": pd.Series([0], dtype="Int64"),
},
)
assert_frame_equal(df, expected, check_names=True)
def test_udf(mssql_url: str) -> None:
query = (
"SELECT dbo.increment(test_int) AS test_int FROM test_table ORDER BY test_int"
)
df = read_sql(mssql_url, query, partition_on="test_int", partition_num=2)
expected = pd.DataFrame(
index=range(6),
data={
"test_int": pd.Series([1, 2, 3, 4, 5, 1315], dtype="Int64"),
},
)
df.sort_values(by="test_int", inplace=True, ignore_index=True)
assert_frame_equal(df, expected, check_names=True)
def test_manual_partition(mssql_url: str) -> None:
queries = [
"SELECT * FROM test_table WHERE test_int < 2",
"SELECT * FROM test_table WHERE test_int >= 2",
]
df = read_sql(mssql_url, query=queries)
expected = pd.DataFrame(
index=range(6),
data={
"test_int": pd.Series([0, 1, 2, 3, 4, 1314], dtype="int64"),
"test_nullint": pd.Series([5, 3, None, 7, 9, 2], dtype="Int64"),
"test_str": pd.Series(
["a", "str1", "str2", "b", "c", None], dtype="object"
),
"test_float": pd.Series([3.1, None, 2.2, 3, 7.8, -10], dtype="float64"),
"test_bool": pd.Series(
[None, True, False, False, None, True], dtype="boolean"
),
},
)
df.sort_values(by="test_int", inplace=True, ignore_index=True)
assert_frame_equal(df, expected, check_names=True)
def test_mssql_without_partition(mssql_url: str) -> None:
query = "SELECT * FROM test_table"
df = read_sql(mssql_url, query)
expected = pd.DataFrame(
index=range(6),
data={
"test_int": pd.Series([1, 2, 0, 3, 4, 1314], dtype="int64"),
"test_nullint": pd.Series([3, None, 5, 7, 9, 2], dtype="Int64"),
"test_str": pd.Series(
["str1", "str2", "a", "b", "c", None], dtype="object"
),
"test_float": pd.Series([None, 2.2, 3.1, 3, 7.8, -10], dtype="float64"),
"test_bool": pd.Series(
[True, False, None, False, None, True], dtype="boolean"
),
},
)
assert_frame_equal(df, expected, check_names=True)
def test_mssql_limit_without_partition(mssql_url: str) -> None:
query = "SELECT top 3 * FROM test_table"
df = read_sql(mssql_url, query)
expected = pd.DataFrame(
index=range(3),
data={
"test_int": pd.Series([1, 2, 0], dtype="int64"),
"test_nullint": pd.Series([3, None, 5], dtype="Int64"),
"test_str": pd.Series(["str1", "str2", "a"], dtype="object"),
"test_float": pd.Series([None, 2.2, 3.1], dtype="float64"),
"test_bool": pd.Series([True, False, None], dtype="boolean"),
},
)
assert_frame_equal(df, expected, check_names=True)
def test_mssql_limit_large_without_partition(mssql_url: str) -> None:
query = "SELECT top 10 * FROM test_table"
df = read_sql(mssql_url, query)
expected = pd.DataFrame(
index=range(6),
data={
"test_int": pd.Series([1, 2, 0, 3, 4, 1314], dtype="int64"),
"test_nullint": pd.Series([3, None, 5, 7, 9, 2], dtype="Int64"),
"test_str": pd.Series(
["str1", "str2", "a", "b", "c", None], dtype="object"
),
"test_float": pd.Series([None, 2.2, 3.1, 3, 7.8, -10], dtype="float64"),
"test_bool": pd.Series(
[True, False, None, False, None, True], dtype="boolean"
),
},
)
assert_frame_equal(df, expected, check_names=True)
def test_mssql_with_partition(mssql_url: str) -> None:
query = "SELECT * FROM test_table"
df = read_sql(
mssql_url,
query,
partition_on="test_int",
partition_range=(0, 2000),
partition_num=3,
)
expected = pd.DataFrame(
index=range(6),
data={
"test_int": pd.Series([0, 1, 2, 3, 4, 1314], dtype="int64"),
"test_nullint": pd.Series([5, 3, None, 7, 9, 2], dtype="Int64"),
"test_str": pd.Series(
["a", "str1", "str2", "b", "c", None], dtype="object"
),
"test_float": pd.Series([3.1, None, 2.2, 3, 7.8, -10], dtype="float64"),
"test_bool": pd.Series(
[None, True, False, False, None, True], dtype="boolean"
),
},
)
df.sort_values(by="test_int", inplace=True, ignore_index=True)
assert_frame_equal(df, expected, check_names=True)
def test_mssql_limit_with_partition(mssql_url: str) -> None:
query = "SELECT top 3 * FROM test_table"
df = read_sql(
mssql_url,
query,
partition_on="test_int",
partition_range=(0, 2000),
partition_num=3,
)
expected = pd.DataFrame(
index=range(3),
data={
"test_int": pd.Series([0, 1, 2], dtype="int64"),
"test_nullint": pd.Series([5, 3, None], dtype="Int64"),
"test_str": pd.Series(["a", "str1", "str2"], dtype="object"),
"test_float": pd.Series([3.1, None, 2.20], dtype="float64"),
"test_bool": pd.Series([None, True, False], dtype="boolean"),
},
)
df.sort_values(by="test_int", inplace=True, ignore_index=True)
assert_frame_equal(df, expected, check_names=True)
def test_mssql_limit_large_with_partition(mssql_url: str) -> None:
query = "SELECT top 10 * FROM test_table"
df = read_sql(
mssql_url,
query,
partition_on="test_int",
partition_range=(0, 2000),
partition_num=3,
)
expected = pd.DataFrame(
index=range(6),
data={
"test_int": pd.Series([0, 1, 2, 3, 4, 1314], dtype="int64"),
"test_nullint": pd.Series([5, 3, None, 7, 9, 2], dtype="Int64"),
"test_str": pd.Series(
["a", "str1", "str2", "b", "c", None], dtype="object"
),
"test_float": pd.Series([3.1, None, 2.2, 3, 7.8, -10], dtype="float64"),
"test_bool": pd.Series(
[None, True, False, False, None, True], dtype="boolean"
),
},
)
df.sort_values(by="test_int", inplace=True, ignore_index=True)
assert_frame_equal(df, expected, check_names=True)
def test_mssql_with_partition_without_partition_range(mssql_url: str) -> None:
query = "SELECT * FROM test_table where test_float > 3"
df = read_sql(
mssql_url,
query,
partition_on="test_int",
partition_num=3,
)
expected = pd.DataFrame(
index=range(2),
data={
"test_int": pd.Series([0, 4], dtype="int64"),
"test_nullint": pd.Series([5, 9], dtype="Int64"),
"test_str": pd.Series(["a", "c"], dtype="object"),
"test_float": pd.Series([3.1, 7.8], dtype="float64"),
"test_bool": pd.Series([None, None], dtype="boolean"),
},
)
df.sort_values(by="test_int", inplace=True, ignore_index=True)
assert_frame_equal(df, expected, check_names=True)
def test_mssql_with_partition_and_selection(mssql_url: str) -> None:
query = "SELECT * FROM test_table WHERE 1 = 3 OR 2 = 2"
df = read_sql(
mssql_url,
query,
partition_on="test_int",
partition_range=(0, 2000),
partition_num=3,
)
expected = pd.DataFrame(
index=range(6),
data={
"test_int": pd.Series([0, 1, 2, 3, 4, 1314], dtype="int64"),
"test_nullint": pd.Series([5, 3, None, 7, 9, 2], dtype="Int64"),
"test_str": pd.Series(
["a", "str1", "str2", "b", "c", None], dtype="object"
),
"test_float": pd.Series([3.1, None, 2.2, 3, 7.8, -10], dtype="float64"),
"test_bool": pd.Series(
[None, True, False, False, None, True], dtype="boolean"
),
},
)
df.sort_values(by="test_int", inplace=True, ignore_index=True)
assert_frame_equal(df, expected, check_names=True)
def test_mssql_with_partition_and_projection(mssql_url: str) -> None:
query = "SELECT test_int, test_float, test_str FROM test_table"
df = read_sql(
mssql_url,
query,
partition_on="test_int",
partition_range=(0, 2000),
partition_num=3,
)
expected = pd.DataFrame(
index=range(6),
data={
"test_int": pd.Series([0, 1, 2, 3, 4, 1314], dtype="int64"),
"test_float": pd.Series([3.1, None, 2.2, 3, 7.8, -10], dtype="float64"),
"test_str": pd.Series(
["a", "str1", "str2", "b", "c", None], dtype="object"
),
},
)
df.sort_values(by="test_int", inplace=True, ignore_index=True)
assert_frame_equal(df, expected, check_names=True)
def test_mssql_with_partition_and_spja(mssql_url: str) -> None:
query = """
SELECT test_bool, AVG(test_float) AS avg, SUM(test_int) AS sum
FROM test_table AS a, test_str AS b
WHERE a.test_int = b.id AND test_nullint IS NOT NULL
GROUP BY test_bool
ORDER BY sum
"""
df = read_sql(mssql_url, query, partition_on="sum", partition_num=2)
expected = pd.DataFrame(
index=range(3),
data={
"test_bool": pd.Series([True, False, None], dtype="boolean"),
"avg": pd.Series([None, 3, 5.45], dtype="float64"),
"sum": pd.Series([1, 3, 4], dtype="Int64"),
},
)
df = df.sort_values("sum").reset_index(drop=True)
assert_frame_equal(df, expected, check_names=True)
def test_empty_result(mssql_url: str) -> None:
query = "SELECT * FROM test_table where test_int < -100"
df = read_sql(mssql_url, query)
expected = pd.DataFrame(
data={
"test_int": pd.Series([], dtype="int64"),
"test_nullint": pd.Series([], dtype="Int64"),
"test_str": pd.Series([], dtype="object"),
"test_float": pd.Series([], dtype="float64"),
"test_bool": pd.Series([], dtype="boolean"),
}
)
assert_frame_equal(df, expected, check_names=True)
def test_empty_result_on_partition(mssql_url: str) -> None:
query = "SELECT * FROM test_table where test_int < -100"
df = read_sql(mssql_url, query, partition_on="test_int", partition_num=3)
expected = pd.DataFrame(
data={
"test_int": pd.Series([], dtype="int64"),
"test_nullint": pd.Series([], dtype="Int64"),
"test_str": pd.Series([], dtype="object"),
"test_float": pd.Series([], dtype="float64"),
"test_bool": pd.Series([], dtype="boolean"),
}
)
assert_frame_equal(df, expected, check_names=True)
def test_empty_result_on_some_partition(mssql_url: str) -> None:
query = "SELECT * FROM test_table where test_int < 1"
df = read_sql(mssql_url, query, partition_on="test_int", partition_num=3)
expected = pd.DataFrame(
data={
"test_int": | pd.Series([0], dtype="int64") | pandas.Series |
"""Model the behavioral data."""
# %%
# Imports
import itertools
import json
import sys
import warnings
from functools import partial
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import pingouin
import scipy.stats
import seaborn as sns
from scipy.optimize import Bounds, minimize
from tqdm.auto import tqdm
from config import ANALYSIS_DIR_LOCAL, DATA_DIR_LOCAL, NUMBERS, STREAMS, SUBJS
from utils import (
eq2,
get_sourcedata,
parse_overwrite,
prep_model_inputs,
psychometric_model,
)
# %%
# Settings
numbers_rescaled = np.interp(NUMBERS, (NUMBERS.min(), NUMBERS.max()), (-1, +1))
# Use a method that can work with bounds. "L-BFGS-B" is scipy default.
# "Nelder-Mead", "L-BFGS-B", "Powell" work, Nelder-Mead seems to work best.
minimize_method = "Nelder-Mead"
minimize_method_opts = {
"Nelder-Mead": dict(maxiter=1000),
"L-BFGS-B": dict(
maxiter=1000, eps=1e-6
), # https://stats.stackexchange.com/a/167199/148275
"Powell": dict(
maxiter=1000,
),
}[minimize_method]
param_names = ["bias", "kappa", "leakage", "noise"]
# parameter bounds (in order of param_names)
lower = np.array([-0.5, 0, 0, 0.01], dtype=float)
upper = np.array([0.5, 5, 0, 3], dtype=float)
bounds = Bounds(lower, upper)
analysis_dir = ANALYSIS_DIR_LOCAL
data_dir = DATA_DIR_LOCAL
overwrite = False
do_plot = True
do_fit_singlefx = False
# for plotting
SUBJ_LINE_SETTINGS = dict(color="black", alpha=0.1, linewidth=0.75)
# %%
# When not in an IPython session, get command line inputs
# https://docs.python.org/3/library/sys.html#sys.ps1
if not hasattr(sys, "ps1"):
defaults = dict(
analysis_dir=analysis_dir,
data_dir=data_dir,
overwrite=overwrite,
do_plot=do_plot,
)
defaults = parse_overwrite(defaults)
analysis_dir = defaults["analysis_dir"]
data_dir = defaults["data_dir"]
overwrite = defaults["overwrite"]
do_plot = defaults["do_plot"]
# %%
# Prepare file paths
fname_estimates = analysis_dir / "derived_data" / f"estim_params_{minimize_method}.tsv"
fname_estimates.parent.mkdir(parents=True, exist_ok=True)
fname_x0s = analysis_dir / "derived_data" / f"x0s_{minimize_method}.npy"
fname_neurometrics = analysis_dir / "derived_data" / "neurometrics_params.tsv"
fname_neurometrics_erp = analysis_dir / "derived_data" / "erp_adm.tsv"
# %%
# Simulate accuracies over parameter ranges
# fixed model parameter values
bias = 0
kappa = 1
leakage = 0
noise = 0.1
return_val = "G"
# vary one parameter over these ranges while holding all others fixed
simulation = {
param: ({"subject": [], "stream": [], "accuracy": [], param: []}, xs, kwargs)
for param, xs, kwargs in zip(
("bias", "kappa", "leakage", "noise"),
(
[-1, -0.75, -0.5, -0.25, 0, 0.25, 0.5, 0.75, 1],
[0.1, 0.5, 1, 2, 4],
[0, 0.2, 0.4, 0.6, 0.8, 1],
[0.01, 0.1, 1, 2, 4, 8],
),
(
{"kappa": kappa, "leakage": leakage, "noise": noise},
{"bias": bias, "leakage": leakage, "noise": noise},
{"bias": bias, "kappa": kappa, "noise": noise},
{"bias": bias, "kappa": kappa, "leakage": leakage},
),
)
}
# NOTE: We run over subjects and streams, but results are almost identical as when
# running over data from a single subject: The only variance comes from slightly
# different underlying datasets. But this has a small influence given the uniform
# distribution and large number of trials in these datasets.
for param, (data, xs_key, kwargs) in tqdm(simulation.items()):
for x in xs_key:
kwargs.update({param: x})
parameters = np.array(
[kwargs["bias"], kwargs["kappa"], kwargs["leakage"], kwargs["noise"]]
)
for sub in SUBJS:
for stream in STREAMS:
_, tsv = get_sourcedata(sub, stream, data_dir)
df = pd.read_csv(tsv, sep="\t")
df.insert(0, "subject", sub)
X, categories, y, y_true, ambiguous = prep_model_inputs(df)
# Run model
loss, CP = psychometric_model(
parameters=parameters,
X=X,
categories=categories,
y=y,
return_val=return_val,
)
# Calculate accuracy on non-ambiguous, objectively correct choices
acc = 1 - np.mean(np.abs(y_true[~ambiguous] - CP[~ambiguous]))
# Save data
data["subject"].append(sub)
data["stream"].append(stream)
data["accuracy"].append(acc)
data[param].append(x)
# %%
# Plot accuracy simulation results
if do_plot:
dfs = {}
with sns.plotting_context("talk"):
fig, axs = plt.subplots(2, 2, figsize=(15, 10), sharey=True)
for i, param in enumerate(simulation):
ax = axs.flat[i]
# Get data and turn into df
data, _, kwargs = simulation[param]
kwargs.pop(param, None)
df = pd.DataFrame.from_dict(data)
dfs[param] = df
# plot
sns.pointplot(
x=param,
y="accuracy",
hue="stream",
data=df,
ci=68,
ax=ax,
scale=0.5,
dodge=True,
)
ax.set_title(json.dumps(kwargs)[1:-1])
if i > 0:
ax.get_legend().remove()
fig.suptitle("Model run on participant data (N=30)", y=1.01)
sns.despine(fig)
fig.tight_layout()
# %%
# Simulate change in accuracy depending on noise and kappa parameters
# We can take data from any subj or stream, results will be nearly the same
sub = 32
stream = "dual"
_, tsv = get_sourcedata(sub, stream, data_dir)
df = pd.read_csv(tsv, sep="\t")
df.insert(0, "subject", sub)
X, categories, y, y_true, ambiguous = prep_model_inputs(df)
# Leave bias and leakage fixed at standard values
bias = 0
leakage = 0
return_val = "G"
# Vary kappa and noise
n = 101
kappas = np.linspace(0, 2.5, n)
noises = np.linspace(0.01, 2, n)[::-1]
idx_kappa_one = (np.abs(kappas - 1.0)).argmin()
# Apply different kinds of "gain normalization" to simulate limited-capacity agents
# (limited amount of "gain", e.g., firing speed of neurons, glucose in brain, ...)
gnorm_types = ["none", "experiment-wise", "trial-wise"]
# Collect data
acc_grid = np.full((n, n, len(gnorm_types)), np.nan)
for ignorm_type, gnorm_type in enumerate(tqdm(gnorm_types)):
for ikappa, kappa in enumerate(kappas):
# Setup gain normalization for this kappa parameterization
gain = None
gnorm = True
if gnorm_type == "experiment-wise":
feature_space = np.atleast_2d(numbers_rescaled)
elif gnorm_type == "trial-wise":
feature_space = X * categories
else:
assert gnorm_type == "none"
gnorm = False
if gnorm:
gain = eq2(feature_space=feature_space, kappa=kappa, bias=bias)
# Calculate accuracy for each noise level
kwargs = dict(
X=X,
categories=categories,
y=y,
return_val=return_val,
gain=gain,
gnorm=gnorm,
)
for inoise, noise in enumerate(noises):
parameters = np.array([bias, kappa, leakage, noise])
_, CP = psychometric_model(parameters=parameters, **kwargs)
acc = 1 - np.mean(np.abs(y_true[~ambiguous] - CP[~ambiguous]))
acc_grid[inoise, ikappa, ignorm_type] = acc
# %%
# Plot "change in accuracy" simulations
if do_plot:
with sns.plotting_context("talk"):
fig, axs = plt.subplots(1, 3, figsize=(15, 5), sharey=True)
for ignorm_type, gnorm_type in enumerate(gnorm_types):
ax = axs.flat[ignorm_type]
grid_norm = (
acc_grid[..., ignorm_type].T - acc_grid[..., idx_kappa_one, ignorm_type]
).T
# Trace maximum values using np.nan (inserts white cells)
grid_norm[np.arange(n), np.argmax(grid_norm, axis=1)] = np.nan
im = ax.imshow(grid_norm, origin="upper", interpolation="nearest")
ax.axvline(idx_kappa_one, ls="--", c="w")
fig.colorbar(im, ax=ax, label="Δ accuracy", shrink=0.625)
# Set ticklabels
ax.xaxis.set_major_locator(plt.MaxNLocator(6))
ax.yaxis.set_major_locator(plt.MaxNLocator(6))
xticklabels = (
[""]
+ [f"{i:.2f}" for i in kappas[(ax.get_xticks()[1:-1]).astype(int)]]
+ [""]
)
yticklabels = (
[""]
+ [f"{i:.1f}" for i in noises[(ax.get_yticks()[1:-1]).astype(int)]]
+ [""]
)
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore",
category=UserWarning,
message="FixedFormatter .* FixedLocator",
)
ax.set(
xticklabels=xticklabels,
yticklabels=yticklabels,
)
ax.set(
xlabel="kappa (k)",
ylabel="noise (s)",
title=f'Gain normalization:\n"{gnorm_type}"',
)
ax.set_ylabel(ax.get_ylabel(), labelpad=10)
fig.tight_layout()
# %%
# Fit model parameters for each subj and stream
# Initial guesses for parameter values: `x0`
bias0 = 0
kappa0 = 1
leakage0 = 0
noise0 = 0.1
x0 = np.array([bias0, kappa0, leakage0, noise0])
data = {
"subject": [],
"stream": [],
"success": [],
"loss": [],
"bias": [],
"kappa": [],
"leakage": [],
"noise": [],
}
for sub in tqdm(SUBJS):
for stream in STREAMS:
_, tsv = get_sourcedata(sub, stream, data_dir)
df = pd.read_csv(tsv, sep="\t")
df.insert(0, "subject", sub)
X, categories, y, y_true, ambiguous = prep_model_inputs(df)
# Add non-changing arguments to function
kwargs = dict(
X=X,
categories=categories,
y=y,
return_val="G_noCP",
gain=None,
gnorm=False,
)
fun = partial(psychometric_model, **kwargs)
# estimate
res = minimize(
fun=fun,
x0=x0,
method=minimize_method,
bounds=bounds,
options=minimize_method_opts,
)
data["subject"].append(sub)
data["stream"].append(stream)
data["success"].append(res.success)
data["loss"].append(res.fun)
data["bias"].append(res.x[0])
data["kappa"].append(res.x[1])
data["leakage"].append(res.x[2])
data["noise"].append(res.x[3])
df_fixed = pd.DataFrame.from_dict(data)
# Sanity check: no failures during fitting
assert not np.any(~df_fixed["success"])
df_fixed.drop(["success"], axis=1, inplace=True)
# This is data with "fixed" start values
df_fixed["bias0"] = bias0
df_fixed["kappa0"] = kappa0
df_fixed["leakage0"] = leakage0
df_fixed["noise0"] = noise0
df_fixed["x0_type"] = "fixed"
df_fixed["method"] = minimize_method
# %%
# Plot estimation results
def plot_estim_res(df, plot_single_subj, param_names):
"""Help to plot estimates."""
hlines = dict(bias=0, kappa=1, leakage=0, noise=0)
with sns.plotting_context("talk"):
fig, axs = plt.subplots(1, len(param_names), figsize=(10, 5))
for iparam, param in enumerate(param_names):
ax = axs.flat[iparam]
sns.pointplot(
x="stream", y=param, data=df, order=STREAMS, ci=68, ax=ax, color="black"
)
if plot_single_subj:
sns.swarmplot(
x="stream",
y=param,
data=df,
order=STREAMS,
ax=ax,
alpha=0.5,
size=2,
)
# https://stackoverflow.com/a/63171175/5201771
set1 = df[df["stream"] == STREAMS[0]][param]
set2 = df[df["stream"] == STREAMS[1]][param]
locs1 = ax.get_children()[1].get_offsets()
locs2 = ax.get_children()[2].get_offsets()
sort_idxs1 = np.argsort(set1)
sort_idxs2 = np.argsort(set2)
locs2_sorted = locs2[sort_idxs2.argsort()][sort_idxs1]
for i in range(locs1.shape[0]):
x = [locs1[i, 0], locs2_sorted[i, 0]]
y = [locs1[i, 1], locs2_sorted[i, 1]]
ax.plot(x, y, **SUBJ_LINE_SETTINGS)
hline = hlines.get(param, None)
if hline is not None:
ax.axhline(hline, c="black", ls="--", lw=0.5)
ax.set_xticklabels(ax.get_xticklabels(), rotation=45)
ax.set_xlabel("")
sns.despine(fig)
fig.tight_layout()
return fig, axs
if do_plot:
fig, axs = plot_estim_res(df_fixed, plot_single_subj=True, param_names=param_names)
fig.suptitle("Parameter estimates based on fixed initial values", y=1.05)
# %%
# Run large set of (reasonable) initial guesses per subj to find best ones
# NOTE: Depending on how many initial guesses to try, this will take a long time to run
# ... could be sped up significantly through parallelization.
# Draw random initial values for the parameters from "reasonable" ranges
bias0s = np.arange(-4, 5) / 10
kappa0s = np.arange(0.2, 2.2, 0.2)
leakage0s = [0] # np.arange(-0.25, 1, 0.25)
noise0s = np.arange(0.1, 1.1, 0.1)
if not fname_x0s.exists() or overwrite:
x0s = list(itertools.product(bias0s, kappa0s, leakage0s, noise0s))
# Estimate parameters based on initial values for each dataset
# we save columns: sub,stream_idx,ix0,res.success,res.fun,x0,res.x
# for `sub*streams*x0s` rows
# script takes about 125ms per fit, so (125*nrows)/1000 seconds overall
nrows = len(SUBJS) * len(STREAMS) * len(x0s)
secs = (125 * nrows) / 1000
print(f"Will run for about {secs} seconds ({secs/60/60:.2f}) hours.")
x0_estimates = np.full(
(len(x0s) * len(SUBJS) * len(STREAMS), 5 + len(param_names) * 2), np.nan
)
rowcount = 0
for sub in tqdm(SUBJS):
for stream in STREAMS:
# get input values
_, tsv = get_sourcedata(sub, stream, data_dir)
df = pd.read_csv(tsv, sep="\t")
df.insert(0, "subject", sub)
X, categories, y, y_true, ambiguous = prep_model_inputs(df)
# Add non-changing arguments to function
kwargs = dict(
X=X,
categories=categories,
y=y,
return_val="G_noCP",
gain=None,
gnorm=False,
)
fun = partial(psychometric_model, **kwargs)
# Run different initial guesses
for ix0, x0 in enumerate(x0s):
res = minimize(
fun=fun,
x0=x0,
method=minimize_method,
bounds=bounds,
options=minimize_method_opts,
)
x0_estimates[rowcount, ...] = np.array(
[sub, STREAMS.index(stream), ix0, res.success, res.fun, *x0, *res.x]
)
rowcount += 1
# Save as npy
np.save(fname_x0s, x0_estimates)
else:
# load if already saved
print(f"Initial guesses x0 npy file already exists: {fname_x0s}\n\nLoading ...")
x0_estimates = np.load(fname_x0s)
# turn into DataFrame and sanitize columns
df_x0s = pd.DataFrame(
x0_estimates,
columns=[
"subject",
"stream_idx",
"ix0",
"success",
"loss",
*[i + "0" for i in param_names],
*param_names,
],
)
df_x0s = df_x0s.astype({"subject": int, "stream_idx": int, "ix0": int, "success": bool})
df_x0s["stream"] = df_x0s["stream_idx"].map(dict(zip(range(2), STREAMS)))
# drop failed estimations
nfail = np.sum(~df_x0s["success"].to_numpy())
nstartvals = len(df_x0s)
print(f"{(nfail/nstartvals)*100:.2f}% of fitting procedures failed.")
print("...selecting only successful fits")
df_x0s = df_x0s[df_x0s["success"].to_numpy()]
# Get the best fitting start values and estimates per subj and stream
df_specific = df_x0s.loc[df_x0s.groupby(["subject", "stream"])["loss"].idxmin()]
assert len(df_specific) == len(SUBJS) * len(STREAMS)
df_specific = df_specific[
["subject", "stream", "loss", *param_names, *[i + "0" for i in param_names]]
].reset_index(drop=True)
df_specific["x0_type"] = "specific"
df_specific["method"] = minimize_method
# %%
# Plot info on initial guesses
# plot distribution of "losses" per stream and subject,
# depending on start values
with sns.plotting_context("poster"):
g = sns.catplot(
kind="violin",
data=df_x0s,
x="stream",
y="loss",
col="subject",
col_wrap=6,
)
# %%
# plot distribution of best fitting initial start values
if do_plot:
fig, axs = plot_estim_res(
df_specific, plot_single_subj=True, param_names=[i + "0" for i in param_names]
)
_ = fig.suptitle(
"Best fitting initial values over subjects\n"
f"y-limits indicate ranges from which\n{df_x0s['ix0'].max()} "
"initial values were tried out per subj and stream",
y=1.15,
)
# %%
# plot distribution of estimated params based on best fitting initial start values
if do_plot:
fig, axs = plot_estim_res(
df_specific, plot_single_subj=True, param_names=param_names
)
_ = fig.suptitle("Parameter estimates based on best fitting initial values", y=1.05)
# %%
# Work on stats for estimated params ("specific")
print("Means and standard errors:\n------------------------------------")
for param in param_names:
for stream in STREAMS:
vals = df_specific[df_specific["stream"] == stream][param].to_numpy()
m = np.mean(vals)
se = scipy.stats.sem(vals)
print(f"{param},{stream} --> {m:.2f} +- {se:.2f}")
# 1-samp tests vs "mu"
print("\n\n1-samp ttests vs mu\n------------------------------------------")
use_one_sided = False
stats_params = []
_to_test = [("bias", 0), ("kappa", 1), ("leakage", 0)]
if bounds.lb[2] == 0 and bounds.ub[2] == 0:
# don't test leakage if we fixed it at 0
_ = _to_test.pop()
for param, mu in _to_test:
for stream in STREAMS:
x = df_specific[df_specific["stream"] == stream][param].to_numpy()
alt = "two-sided"
if param == "kappa" and use_one_sided:
alt = "greater" if stream == "dual" else "less"
p, t = scipy.stats.wilcoxon(x - mu, alternative=alt)
print(param, stream, np.round(p, 3), np.round(t, 3))
pstats = pingouin.ttest(x, y=mu, alternative=alt)
pstats["stream"] = stream
pstats["parameter"] = param
pstats["mu"] = mu
stats_params.append(pstats)
stats_params = pd.concat(stats_params).reset_index(drop=True)
print(
"\n",
stats_params[
["T", "dof", "alternative", "p-val", "cohen-d", "stream", "parameter", "mu"]
].round(3),
)
# paired test for noise
print("\n\npaired ttests noise\n------------------------------------------")
x = df_specific[df_specific["stream"] == "single"]["noise"].to_numpy()
y = df_specific[df_specific["stream"] == "dual"]["noise"].to_numpy()
stats_paired = pingouin.ttest(x, y, paired=True)
print("\n", stats_paired.round(3))
# %%
# Concatenate fixed and specific estimates and save
df_estimates = pd.concat([df_fixed, df_specific]).reset_index(drop=True)
assert len(df_estimates) == len(SUBJS) * len(STREAMS) * 2
# Save the data
df_estimates.to_csv(fname_estimates, sep="\t", na_rep="n/a", index=False)
# %%
# Correlation between noise and kappa per stream
with sns.plotting_context("talk"):
g = sns.lmplot(
x="noise",
y="kappa",
col_order=STREAMS,
data=df_estimates,
col="stream",
row="x0_type",
)
statsouts = []
for meta, grp in df_estimates.groupby(["x0_type", "stream"]):
out = pingouin.corr(
grp["noise"], grp["kappa"], method="pearson", alternative="two-sided"
)
out["x0_type"] = meta[0]
out["stream"] = meta[1]
statsouts.append(out)
statsout = pd.concat(statsouts).reset_index(drop=True)
statsout.head()
# %%
# Compare mean loss between estimates based on fixed vs. specific start values
with sns.plotting_context("talk"):
g = sns.catplot(
kind="point", x="x0_type", y="loss", col="stream", data=df_estimates, ci=68
)
df_estimates.groupby(["stream", "x0_type"])["loss"].describe()
# %%
# Correlate parameters within subjects (single vs dual)
_data = {"x0_type": [], "parameter": [], "single": [], "dual": []}
outs = []
for x0_type in ["fixed", "specific"]:
for param in param_names:
xy_list = []
for stream in STREAMS:
xy_list += [
df_estimates[
(df_estimates["stream"] == stream)
& (df_estimates["x0_type"] == x0_type)
][param].to_numpy()
]
x, y = xy_list
out = pingouin.corr(x, y)
out["x0_type"] = x0_type
out["param"] = param
outs.append(out)
# save for plotting
_data["x0_type"] += [x0_type] * len(SUBJS)
_data["parameter"] += [param] * len(SUBJS)
_data["single"] += x.tolist()
_data["dual"] += y.tolist()
df_corrs = pd.concat(outs).reset_index(drop=True)
# plots
if do_plot:
_data = pd.DataFrame.from_dict(_data)
with sns.plotting_context("talk"):
g = sns.lmplot(
x="single",
y="dual",
col="parameter",
row="x0_type",
data=_data,
facet_kws=dict(sharex=False, sharey=False),
)
print("Within-subject correlations: Single vs Dual")
df_corrs
# %%
# Correlate behavioral modelling and neurometrics "kappa" and "bias" parameters
# do for RSA neurometrics and ERP neurometrics
for neurom_type in ["rsa", "erp"]:
if neurom_type == "rsa":
fname = fname_neurometrics
else:
assert neurom_type == "erp"
fname = fname_neurometrics_erp
if not fname.exists():
print(f"neurometrics params not found ... skipping.\n\n({fname})")
continue
else:
print(f"\n{neurom_type}\n---------\n\n")
df_neurom = pd.read_csv(fname, sep="\t")
# preprocess ERP data
if neurom_type == "erp":
df_neurom = df_neurom.drop_duplicates(["subject", "stream"]).reset_index(
drop=True
)[["subject", "stream", "bias", "kappa"]]
if f"kappa_neuro_{neurom_type}" not in df_estimates.columns:
df_estimates = df_estimates.merge(
df_neurom,
on=["subject", "stream"],
suffixes=(None, f"_neuro_{neurom_type}"),
)
_df = df_estimates[
[
"subject",
"stream",
"bias",
"kappa",
f"bias_neuro_{neurom_type}",
f"kappa_neuro_{neurom_type}",
"x0_type",
]
]
_df = _df.melt(id_vars=["subject", "stream", "x0_type"], var_name="parameter")
x0_type = "specific"
with sns.plotting_context("talk"):
fig, axs = plt.subplots(2, 2, figsize=(15, 10))
for istream, stream in enumerate(STREAMS):
for iparam, param in enumerate(["bias", "kappa"]):
ax = axs[istream, iparam]
x = _df[
(_df["stream"] == stream)
& (_df["x0_type"] == x0_type)
& (_df["parameter"] == param)
]["value"]
y = _df[
(_df["stream"] == stream)
& (_df["x0_type"] == x0_type)
& (_df["parameter"] == f"{param}_neuro_{neurom_type}")
]["value"]
assert len(x) == len(y)
assert len(x) == len(SUBJS)
ax.scatter(x, y)
m, b = np.polyfit(x, y, 1)
ax.plot(x, m * x + b, color="r")
ax.set_title(f"{stream}")
ax.set(xlabel=param, ylabel=param + f"_neuro_{neurom_type}")
print(f"{stream}: {param} ~ {param}_neuro_{neurom_type}")
# means
print(f"means: {x.mean():.2f}, {y.mean():.2f}")
# correlation
r, p = scipy.stats.pearsonr(x, y)
print(f"corr: r={r:.3f}, p={p:.3f}")
# paired t-test
_stat = pingouin.ttest(x=x, y=y, paired=True)
print(
f"paired ttest: t({_stat['dof'][0]})={_stat['T'][0]:.3f}, "
f"p={_stat['p-val'][0]:.3f}, d={_stat['cohen-d'][0]:.3f}\n"
)
sns.despine(fig)
fig.tight_layout()
# %%
# Fit all data as if from single subject
_bias0s = (0, -0.1, 0.1)
_kappa0s = (0.5, 1, 2)
_leakage0s = (0, 0.2)
_noise0s = (0.01, 0.1, 0.2)
_x0s = []
for bias0, kappa0, leakage0, noise0 in itertools.product(
_bias0s, _kappa0s, _leakage0s, _noise0s
):
_x0s.append(np.array([bias0, kappa0, leakage0, noise0]))
# Collect all data as if from "single subject" (fixed effects)
df_single_sub = []
for sub in SUBJS:
for istream, stream in enumerate(STREAMS):
_, tsv = get_sourcedata(sub, stream, data_dir)
df = | pd.read_csv(tsv, sep="\t") | pandas.read_csv |
import requests
import json
from datetime import datetime
import os
import sys
import pandas as pd
import numpy as np
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.dirname(SCRIPT_DIR))
from global_variables import config as g
ROOT_DIR = g.ROOT_DIR
processed_data_dir = g.processed_data_dir
raw_data_dir = g.raw_data_dir
def prepare_data_for_prediction(df_forecast:pd.DataFrame, data_name:str):
df_forecast.index = pd.to_datetime(df_forecast['Time'], format='%d-%m-%Y %H:%M')
df_forecast['Seconds'] = df_forecast.index.map(pd.Timestamp.timestamp)
day = 60*60*24
df_forecast['Day sin'] = np.sin(df_forecast['Seconds'] * (2 * np.pi / day))
df_forecast['Day cos'] = np.cos(df_forecast['Seconds'] * (2 * np.pi / day))
df_forecast = df_forecast.drop('Seconds', axis=1)
# df_forecast = df_forecast.drop('Time', axis=1)
for i in range (0, len(df_forecast.columns)):
df_forecast = normalize_column(df_forecast, i)
df_forecast.to_csv(processed_data_dir + data_name +"_predict_data.csv", index=False)
def normalize_column(df_forecast:pd.DataFrame, col:int = 1, a:int=0, b:int=1):
df = pd.read_csv(processed_data_dir + "merged.csv")
col_name = df_forecast.columns[col]
max = df[col_name].max()
min = df[col_name].min()
df_forecast[col_name] = (df_forecast[col_name] - min)/(max - min)
df_forecast[col_name] = (b-a)*df_forecast[col_name]+a
return df_forecast
api_key = "8af40bfbe568da6eecfc0b905b468c42"
lat = "55.1449" #Bornholm Latitude
lon = "14.9170" #Bornholm Longitude
url = "https://api.openweathermap.org/data/2.5/onecall?lat=%s&lon=%s&appid=%s&units=metric" % (lat, lon, api_key)
response = requests.get(url)
forecast = json.loads(response.text)
time = []
temperature = []
uvi = []
wind = []
power = []
for i in range(0, len(forecast["hourly"])):
ts = forecast["hourly"][i]["dt"]
date_time = datetime.utcfromtimestamp(ts).strftime('%d-%m-%Y %H:%M')
time.append(date_time)
temperature.append(forecast["hourly"][i]["temp"])
uvi.append(forecast["hourly"][i]["uvi"]*100)
wind.append(forecast["hourly"][i]["wind_speed"])
power.append(0)
df_pv_forecast = pd.DataFrame(data={"Time":time, "Temperature":temperature, "PV power":power, "Solar radiation":uvi})
df_wp_forecast = pd.DataFrame(data={"Time":time, "Temperature":temperature, "Wind power":power, "Wind speed":wind,})
df_pv_forecast.index = | pd.to_datetime(df_pv_forecast['Time'], format='%d-%m-%Y %H:%M') | pandas.to_datetime |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jun 17 14:47:03 2019
@author: olivergiesecke
"""
###############################################################################
### Import packages
import pandas as pd
import re
import os
from io import StringIO
import numpy as np
import matplotlib.pyplot as plt
###############################################################################
### Open the csv
data=pd.read_csv("../output/statements_text_extraction_cleaned.csv")
# Data stored in data['statement'] and data['policy_treatment']
### Clean Data
data=data[data['policy_treatment'].notna()][['statement','policy_treatment']]
print(len(data))
# 129 entries
## Create categorical variable for policy treatment
data['treatment_id'] = data['policy_treatment'].factorize()[0]
category_id_df = data[['policy_treatment', 'treatment_id']].drop_duplicates().sort_values('treatment_id')
category_to_id = dict(category_id_df.values)
id_to_category = dict(category_id_df[['treatment_id','policy_treatment']].values)
# Some sum stats
fig = plt.figure(figsize=(8,6))
data.groupby('policy_treatment')['statement'].count().plot.bar(ylim=0)
plt.show()
pd.pivot_table( data,index='policy_treatment',aggfunc=np.count_nonzero )
### Preprocessing of the text
## Get TF-IDF with following ooptions
# sublinear_df=True: logarithmic form for frequency.
# min_df is the minimum numbers of documents a word must be present in to be kept.
# norm=l2, to ensure all our feature vectors have a euclidian norm of 1.
# ngram_range is set to (1, 2) to indicate that we want to consider both unigrams and bigrams.
# stop_words is set to "english" to remove all common pronouns ("a", "the", ...) to reduce the number of noisy features.
from sklearn.feature_extraction.text import TfidfVectorizer
tfidf = TfidfVectorizer(sublinear_tf=True, min_df=5,norm='l2', encoding='latin-1', ngram_range=(1, 2),stop_words='english')
features = tfidf.fit_transform(data['statement']).toarray()
labels = data['treatment_id']
# yields 129 sentences with 2586 tokens
features.shape
# =============================================================================
# from sklearn.feature_extraction.text import CountVectorizer
# tf=CountVectorizer( min_df=3, encoding='latin-1', ngram_range=(1, 2), stop_words='english')
# features = tf.fit_transform(data['statement']).toarray()
# labels = data['treatment_id']
# features.shape
# =============================================================================
## Get most frequent unigrams and bigrams with each outcome
from sklearn.feature_selection import chi2
import numpy as np
N = 5
for Product, category_id in sorted(category_to_id.items()):
features_chi2 = chi2(features, labels == category_id)
indices = np.argsort(features_chi2[0])
feature_names = np.array(tfidf.get_feature_names())[indices]
unigrams = [v for v in feature_names if len(v.split(' ')) == 1]
bigrams = [v for v in feature_names if len(v.split(' ')) == 2]
print("# '{}':".format(Product))
print(" . Most correlated unigrams:\n. {}".format('\n. '.join(unigrams[-N:])))
print(" . Most correlated bigrams:\n. {}".format('\n. '.join(bigrams[-N:])))
# Get preditions
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.naive_bayes import MultinomialNB
X_train, X_test, y_train, y_test = train_test_split(data['statement'], data['policy_treatment'], random_state = 0)
count_vect = CountVectorizer()
X_train_counts = count_vect.fit_transform(X_train)
tfidf_transformer = TfidfTransformer()
X_train_tfidf = tfidf_transformer.fit_transform(X_train_counts)
clf = MultinomialNB().fit(X_train_tfidf, y_train)
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import LinearSVC
from sklearn.model_selection import cross_val_score
models = [
RandomForestClassifier(n_estimators=200, max_depth=3, random_state=0),
LinearSVC(),
MultinomialNB(),
LogisticRegression(random_state=0),
]
CV = 5
cv_df = pd.DataFrame(index=range(CV * len(models)))
entries = []
for model in models:
model_name = model.__class__.__name__
accuracies = cross_val_score(model, features, labels, scoring='accuracy', cv=CV)
for fold_idx, accuracy in enumerate(accuracies):
entries.append((model_name, fold_idx, accuracy))
cv_df = pd.DataFrame(entries, columns=['model_name', 'fold_idx', 'accuracy'])
import seaborn as sns
sns.boxplot(x='model_name', y='accuracy', data=cv_df)
sns.stripplot(x='model_name', y='accuracy', data=cv_df,
size=8, jitter=True, edgecolor="gray", linewidth=2)
plt.show()
cv_df.groupby('model_name').accuracy.mean()
model = RandomForestClassifier()
X_train, X_test, y_train, y_test, indices_train, indices_test = train_test_split(features, labels, data.index, test_size=0.33, random_state=0)
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
from sklearn.metrics import confusion_matrix
conf_mat = confusion_matrix(y_test, y_pred)
fig, ax = plt.subplots(figsize=(10,10))
sns.heatmap(conf_mat, annot=True, fmt='d',
xticklabels=category_id_df['policy_treatment'], yticklabels=category_id_df['policy_treatment'].values)
plt.ylabel('Actual')
plt.xlabel('Predicted')
plt.show()
for predicted in category_id_df['treatment_id']:
for actual in category_id_df['treatment_id']:
if predicted != actual and conf_mat[actual, predicted] >= 1:
print("'{}' predicted as '{}' : {} examples.".format(id_to_category[actual], id_to_category[predicted], conf_mat[actual, predicted]))
print(data.loc[indices_test[(y_test == actual) & (y_pred == predicted)]][['policy_treatment', 'statement']].values)
from sklearn import metrics
print(metrics.classification_report(y_test, y_pred, target_names=data['policy_treatment'].unique()))
### Import data by calling function in other file
from bluebook_alternative_extraction_and_classification import getdata_bluebook
#df_output=getdata_bluebook()
df_output['year']=pd.to_numeric(df_output['meeting_date'].str[:4])
df_output['date']= | pd.to_datetime(df_output['meeting_date']) | pandas.to_datetime |
"""
Module to perform recursive feature elimination
Author: <NAME>
Email: <EMAIL>
"""
import os
import pandas as pd
import joblib
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.feature_selection import RFECV
from sklearn import metrics
from lightgbm.sklearn import LGBMClassifier, LGBMRegressor
from xgboost import XGBClassifier, XGBRegressor
class recursive_feature_elimination():
"""
Class to perform recursive feature elimination
args:
(1) path_to_file (type:str) - location of the data file with features
(2) path_to_save (type:str) - location to save new data files
(3) path_to_features (type:str) - location of the features to use (e.g. those with multicollinearity reduced)
(4) problem (type:str) - whether it is a 'classification' or 'regression' problem
return:
(1) list of features obtained by applying RFE
"""
def __init__(self, path_to_file, path_to_save, path_to_features, problem, *args, **kwargs):
self.path_to_save = path_to_save
self.sample_train = joblib.load(path_to_file)
self.refined_features = joblib.load(path_to_features)
# Last column taken as the target variable or classes
self.features = self.sample_train.columns.values[:-1]
self.target = self.sample_train.columns.values[-1]
self.problem = problem
def base_model(self, boosting_method):
"""
Select the baseline model
Note:
For classification, multi-class models are defined as shown below
This can be changed into a binary problem by changing the 'objective' to 'binary' for LGBMClassifier, or to 'binary:logistic' or 'binary:logitraw' for XGBClassifier (see description in links below)
https://lightgbm.readthedocs.io/en/latest/pythonapi/lightgbm.LGBMClassifier.html
https://xgboost.readthedocs.io/en/latest/parameter.html
https://lightgbm.readthedocs.io/en/latest/pythonapi/lightgbm.LGBMRegressor.html
args:
(1) boosting_method (type:str) - either 'lightGBM' or 'XGBoost'
return:
(1)baseline model
"""
if self.problem == 'classification':
if boosting_method == 'lightGBM':
self.estimator = LGBMClassifier(
boosting_type='gbdt',
objective='multiclass',
importance_type='gain',
max_depth=-1
)
elif boosting_method == 'XGBoost':
self.estimator = XGBClassifier(
objective='multi:softprob',
booster='gbtree',
importance_type='total_gain'
)
elif self.problem == 'regression':
if boosting_method == 'lightGBM':
self.estimator = LGBMRegressor(
boosting_type ='gbdt',
importance_type='gain',
max_depth=-1
)
elif boosting_method == 'XGBoost':
self.estimator = XGBClassifier(
objective='reg:squarederror',
booster='gbtree',
random_state=42,
importance_type='total_gain'
)
return self.estimator
def perform(self):
"""
Perform RFE
"""
# Define metric to use
if self.problem == 'classification':
self.scoring = 'f1_weighted'
elif self.problem == 'regression':
self.scoring = 'neg_root_mean_squared_error'
# Define step and cv to apply to RFECV
self.min_features_to_select = 1
step = 1
cv = 10
self.selector = RFECV(
self.estimator,
min_features_to_select = self.min_features_to_select,
scoring = self.scoring,
step = step,
cv = cv,
verbose = 1
)
# Fit to training data
self.selector = self.selector.fit(
self.sample_train[self.refined_features],
self.sample_train[self.target].values.ravel()
)
# Create panda.Dataframe with newly selected features
RFECV_importance = self.selector.ranking_
RFECV_features = | pd.DataFrame({'features': self.refined_features, 'importance_score':RFECV_importance}) | pandas.DataFrame |
# -*- coding: utf-8 -*-
import re
import numpy as np
import pytest
from pandas.core.dtypes.common import (
is_bool_dtype, is_categorical, is_categorical_dtype,
is_datetime64_any_dtype, is_datetime64_dtype, is_datetime64_ns_dtype,
is_datetime64tz_dtype, is_datetimetz, is_dtype_equal, is_interval_dtype,
is_period, is_period_dtype, is_string_dtype)
from pandas.core.dtypes.dtypes import (
CategoricalDtype, DatetimeTZDtype, IntervalDtype, PeriodDtype, registry)
import pandas as pd
from pandas import (
Categorical, CategoricalIndex, IntervalIndex, Series, date_range)
from pandas.core.sparse.api import SparseDtype
import pandas.util.testing as tm
@pytest.fixture(params=[True, False, None])
def ordered(request):
return request.param
class Base(object):
def setup_method(self, method):
self.dtype = self.create()
def test_hash(self):
hash(self.dtype)
def test_equality_invalid(self):
assert not self.dtype == 'foo'
assert not is_dtype_equal(self.dtype, np.int64)
def test_numpy_informed(self):
pytest.raises(TypeError, np.dtype, self.dtype)
assert not self.dtype == np.str_
assert not np.str_ == self.dtype
def test_pickle(self):
# make sure our cache is NOT pickled
# clear the cache
type(self.dtype).reset_cache()
assert not len(self.dtype._cache)
# force back to the cache
result = tm.round_trip_pickle(self.dtype)
assert not len(self.dtype._cache)
assert result == self.dtype
class TestCategoricalDtype(Base):
def create(self):
return CategoricalDtype()
def test_pickle(self):
# make sure our cache is NOT pickled
# clear the cache
type(self.dtype).reset_cache()
assert not len(self.dtype._cache)
# force back to the cache
result = tm.round_trip_pickle(self.dtype)
assert result == self.dtype
def test_hash_vs_equality(self):
dtype = self.dtype
dtype2 = CategoricalDtype()
assert dtype == dtype2
assert dtype2 == dtype
assert hash(dtype) == hash(dtype2)
def test_equality(self):
assert is_dtype_equal(self.dtype, 'category')
assert is_dtype_equal(self.dtype, CategoricalDtype())
assert not is_dtype_equal(self.dtype, 'foo')
def test_construction_from_string(self):
result = CategoricalDtype.construct_from_string('category')
assert is_dtype_equal(self.dtype, result)
pytest.raises(
TypeError, lambda: CategoricalDtype.construct_from_string('foo'))
def test_constructor_invalid(self):
msg = "Parameter 'categories' must be list-like"
with pytest.raises(TypeError, match=msg):
CategoricalDtype("category")
dtype1 = CategoricalDtype(['a', 'b'], ordered=True)
dtype2 = CategoricalDtype(['x', 'y'], ordered=False)
c = Categorical([0, 1], dtype=dtype1, fastpath=True)
@pytest.mark.parametrize('values, categories, ordered, dtype, expected',
[
[None, None, None, None,
CategoricalDtype()],
[None, ['a', 'b'], True, None, dtype1],
[c, None, None, dtype2, dtype2],
[c, ['x', 'y'], False, None, dtype2],
])
def test_from_values_or_dtype(
self, values, categories, ordered, dtype, expected):
result = CategoricalDtype._from_values_or_dtype(values, categories,
ordered, dtype)
assert result == expected
@pytest.mark.parametrize('values, categories, ordered, dtype', [
[None, ['a', 'b'], True, dtype2],
[None, ['a', 'b'], None, dtype2],
[None, None, True, dtype2],
])
def test_from_values_or_dtype_raises(self, values, categories,
ordered, dtype):
msg = "Cannot specify `categories` or `ordered` together with `dtype`."
with pytest.raises(ValueError, match=msg):
CategoricalDtype._from_values_or_dtype(values, categories,
ordered, dtype)
def test_is_dtype(self):
assert CategoricalDtype.is_dtype(self.dtype)
assert CategoricalDtype.is_dtype('category')
assert CategoricalDtype.is_dtype(CategoricalDtype())
assert not CategoricalDtype.is_dtype('foo')
assert not CategoricalDtype.is_dtype(np.float64)
def test_basic(self):
assert is_categorical_dtype(self.dtype)
factor = Categorical(['a', 'b', 'b', 'a', 'a', 'c', 'c', 'c'])
s = Series(factor, name='A')
# dtypes
assert is_categorical_dtype(s.dtype)
assert is_categorical_dtype(s)
assert not is_categorical_dtype(np.dtype('float64'))
assert is_categorical(s.dtype)
assert is_categorical(s)
assert not is_categorical(np.dtype('float64'))
assert not is_categorical(1.0)
def test_tuple_categories(self):
categories = [(1, 'a'), (2, 'b'), (3, 'c')]
result = CategoricalDtype(categories)
assert all(result.categories == categories)
@pytest.mark.parametrize("categories, expected", [
([True, False], True),
([True, False, None], True),
([True, False, "a", "b'"], False),
([0, 1], False),
])
def test_is_boolean(self, categories, expected):
cat = Categorical(categories)
assert cat.dtype._is_boolean is expected
assert is_bool_dtype(cat) is expected
assert is_bool_dtype(cat.dtype) is expected
class TestDatetimeTZDtype(Base):
def create(self):
return DatetimeTZDtype('ns', 'US/Eastern')
def test_alias_to_unit_raises(self):
# 23990
with tm.assert_produces_warning(FutureWarning):
DatetimeTZDtype('datetime64[ns, US/Central]')
def test_alias_to_unit_bad_alias_raises(self):
# 23990
with pytest.raises(TypeError, match=''):
DatetimeTZDtype('this is a bad string')
with pytest.raises(TypeError, match=''):
DatetimeTZDtype('datetime64[ns, US/NotATZ]')
def test_hash_vs_equality(self):
# make sure that we satisfy is semantics
dtype = self.dtype
dtype2 = DatetimeTZDtype('ns', 'US/Eastern')
dtype3 = DatetimeTZDtype(dtype2)
assert dtype == dtype2
assert dtype2 == dtype
assert dtype3 == dtype
assert hash(dtype) == hash(dtype2)
assert hash(dtype) == hash(dtype3)
dtype4 = DatetimeTZDtype("ns", "US/Central")
assert dtype2 != dtype4
assert hash(dtype2) != hash(dtype4)
def test_construction(self):
pytest.raises(ValueError,
lambda: DatetimeTZDtype('ms', 'US/Eastern'))
def test_subclass(self):
a = DatetimeTZDtype.construct_from_string('datetime64[ns, US/Eastern]')
b = DatetimeTZDtype.construct_from_string('datetime64[ns, CET]')
assert issubclass(type(a), type(a))
assert issubclass(type(a), type(b))
def test_compat(self):
assert is_datetime64tz_dtype(self.dtype)
assert is_datetime64tz_dtype('datetime64[ns, US/Eastern]')
assert is_datetime64_any_dtype(self.dtype)
assert is_datetime64_any_dtype('datetime64[ns, US/Eastern]')
assert is_datetime64_ns_dtype(self.dtype)
assert is_datetime64_ns_dtype('datetime64[ns, US/Eastern]')
assert not is_datetime64_dtype(self.dtype)
assert not | is_datetime64_dtype('datetime64[ns, US/Eastern]') | pandas.core.dtypes.common.is_datetime64_dtype |
#!/usr/bin/python
# extract gtf-like annotations and intersect gene names
import argparse
import os
import re
import subprocess as sp
from time import time
import warnings
import matplotlib.pyplot as plt
# from numba import njit, prange, set_num_threads
import pandas as pd
from tqdm import tqdm
from upsetplot import from_contents, UpSet
def _argument_parser():
timestamp = str(int(time()))
parser = argparse.ArgumentParser(description=
"""
Compare and contrast the attributes of multiple differentially
expressed gene lists. No input validation is performed!
Usage: python compare_toptables.py </path/to/original.tsv>
</path/to/data.tsv> ... -o [/path/to/out.pdf]
"""
)
parser.add_argument("reference_path", type=str,
help="Provide path to reference toptables file. \
Must have gene name or identifier as column or index!")
parser.add_argument("infile_paths", type=str, nargs="+",
help="Provide path to other toptables files. \
Must have gtf-like attribute annotation fields!")
parser.add_argument("-p", "--plot_outfile", type=str,
default=".".join(["upset", timestamp, "pdf"]),
help="Provide path to output image file [eps/pdf/png].")
parser.add_argument("-s", "--sets_outfile", type=str,
default=".".join(["upset", timestamp, "tsv"]),
help="Provide path to output set membership files.")
# parser.add_argument("-n", "--ncpus", type=int, default=1,
# help="Specify number of cpus for parallelising \
# operations. (default 1)")
parser.add_argument("-t", "--threshold", type=float, default=0.05,
help="Filter on adjusted pval threshold.")
return parser.parse_args()
def check_parallel():
args = _argument_parser()
infile_paths = args.infile_paths
if args.ncpus > 1:
do_parallel = True
else:
do_parallel = False
return args.ncpus, do_parallel
# ncpus, do_parallel = check_parallel()
# set_num_threads(ncpus)
def load_filter_regions(infile_path: str, filter_col: str="adj.P.Val",
filter_val: float=0.05):
"""
Load and filter regions of interest only, default =< 0.05 adjusted p vals
Arguments:
(REQUIRED) regions: dataframe containing data of interest
(OPTIONAL) filter_col: column to filter on
(OPTIONAL) filter_val: value in column to filter on
"""
regions = pd.read_csv(infile_path, sep="\t")
return regions[regions[filter_col] <= filter_val]
def extract_genes(data: pd.DataFrame, colname: str=None) -> list:
"""
Take dataframe as input, extract feature names
Arguments:
(REQUIRED) data: path to input dataframe
(OPTIONAL) colname: column name, if absent uses index
"""
if not colname:
return | pd.Series(data.index) | pandas.Series |
import os, functools
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import skfuzzy as fuzz
from kneed import KneeLocator
from sklearn.decomposition import PCA
from GEN_Utils import FileHandling
from loguru import logger
logger.info("Import ok")
def multiple_PCAs(test_dict):
"""test_dict: dict mapping str(data_type): (df, sample_cols)"""
pcas = {}
for data_type, (data, sample_cols) in test_dict.items():
for_PCA = data[sample_cols].copy().fillna(0)
pca = PCA(n_components=2)
principalComponents = pca.fit_transform(for_PCA.values)
principalDf = pd.DataFrame(data = principalComponents, columns = ['PC1', 'PC2'])
principalDf.index = data['Sequence']
pcas[data_type] = principalDf
logger.info(f'{data_type}: {len(principalDf)}')
# visualise the PCA
fig = plt.figure(figsize = (8,8))
ax = fig.add_subplot(1,1,1)
ax.set_xlabel('Principal Component 1', fontsize = 15)
ax.set_ylabel('Principal Component 2', fontsize = 15)
ax.set_title(data_type, fontsize = 20)
ax.scatter(principalDf['PC1'] , principalDf['PC2'], s = 50)
# plt.savefig(f'{output_folder}{data_type}_PCA.png')
return pcas
def fuzzy_clustering(data_type, data, sample_cols, max_clusters=10):
alldata = data[sample_cols].fillna(0).T.values
fpcs = []
cluster_membership = {}
cluster_score = {}
cluster_parameters = {}
for ncenters in range(2, max_clusters):
cntr, u, u0, d, jm, p, fpc = fuzz.cluster.cmeans(
alldata, ncenters, 4, error=0.005, maxiter=1000, init=None)
cluster_parameters[ncenters] = [cntr, u, u0, d, jm, p, fpc]
cluster_membership[ncenters] = np.argmax(u, axis=0)
cluster_score[ncenters] = np.max(u, axis=0)
# Store fpc values for later
fpcs.append(fpc)
# clean cluster parameters
parameters = pd.DataFrame(cluster_parameters)
parameters.index = ['cntr', 'u', 'u0', 'd', 'jm', 'p', 'fpc']
# clean cluster data
membership = pd.DataFrame(cluster_membership)
membership.index = data.index.tolist()
membership.columns = [f'member_{col}' for col in membership.columns.tolist()]
score = pd.DataFrame(cluster_score)
score.index = data.index.tolist()
score.columns = [f'score_{col}' for col in score.columns.tolist()]
# Generate merged cluster info
cluster_membership = pd.merge(score, membership, left_index=True, right_index=True)
clustered = pd.merge(cluster_membership, data, left_index=True, right_index=True)
fig2, ax2 = plt.subplots()
ax2.plot(np.arange(2, max_clusters), fpcs)
ax2.set_xlabel("Number of centers")
ax2.set_ylabel("Fuzzy partition coefficient")
plt.title(data_type)
plt.show()
return [cluster_membership, clustered, fpcs, parameters]
def fuzzy_prediction(data_type, data, sample_cols, cntr):
alldata = data[sample_cols].fillna(0).T.values
u, u0, d, jm, p, fpc = fuzz.cluster.cmeans_predict(alldata, cntr, 4, error=0.005, maxiter=1000)
# clean cluster parameters
parameters = pd.DataFrame([cntr, u, u0, d, jm, p, fpc])
parameters.index = ['cntr', 'u', 'u0', 'd', 'jm', 'p', 'fpc']
# clean cluster data
membership = pd.DataFrame(np.argmax(u, axis=0))
membership.index = data.index.tolist()
membership.columns = ['member_predicted']
score = pd.DataFrame(np.max(u, axis=0))
score.index = data.index.tolist()
score.columns = ['score_predicted']
# Generate merged cluster info
cluster_membership = pd.merge(score, membership, left_index=True, right_index=True)
return pd.merge(cluster_membership, data, left_index=True, right_index=True)
def multiple_fuzzy_clustering(test_dict, max_clusters=20):
"""test_dict: dict mapping str(data_type): (df, sample_cols)"""
clustering = {}
for data_type, (data, sample_cols) in test_dict.items():
clustering[data_type] = fuzzy_clustering(data_type, data, sample_cols, max_clusters)
return clustering
def clustered_pca(clustering, pcas, visualise, save=False, output_path=None, palette=False):
"""visualise: dict mapping str(data_type): (cluster_number, sample_cols)"""
if not palette:
palette = 'Set1'
for data_type, (clusters, cols) in visualise.items():
pca_data = pcas[data_type].reset_index()
clustered = clustering[data_type][0]
clustered = clustered[[col for col in clustered.columns.tolist() if f'_{clusters}' in col]]
for_plotting = | pd.merge(clustered, pca_data, left_index=True, right_index=True) | pandas.merge |
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
import numpy as np
import scipy
import matplotlib.pyplot as plt
from sklearn.neighbors import LocalOutlierFactor
df = | pd.read_csv('german_data-numeric',delim_whitespace=True,header=None) | pandas.read_csv |
'''
Code written by <NAME> (August 2019)
(415)-845-2118
DESCRIPTION: Takes dataframe with list of epitopes
and UniProt Protein Names (Obtained from SwissProt);
Runs query on db2db to obtain matching gene name
for each entry in the dataframe.
'''
###IMPORT AND CLEAN-UP UNIPROT PROTEIN NAMES FOR QUERY
import pandas as pd
import urllib.request, json
import sys
import os
#Import pipeline directory path and set working directoy
run_path=str(sys.argv[1])
os.chdir(run_path)
#Reads in dataframe with Epitope and UniProt Protein Name
df = pd.read_csv(r'results/uniprot_names.tsv', header=None, delimiter="\t")
#Appends column names to dataframe
df.columns=['Epitope', 'UniProt_Name']
#Makes Protein Names into list of unique names (removes duplicate search terms to speed up query)
uniprot=list(df.UniProt_Name.unique())
'''
Removes all unmatched (NaN) values from list
Converts non-alphanumeric characters in UniProt Name
to their hexcode equivalent so it is URL compatible
'''
cleanedList = [urllib.parse.quote(x) for x in uniprot if str(x) != 'nan']
'''
Can only query a certain max amount of entries at a time
Divides list of protein names into sublists for iteration
'''
uniprotList=[]
while (len(cleanedList)>50):
subList=cleanedList[0:50]
cleanedList=cleanedList[50:]
uniprotList.append(list(subList))
else:
uniprotList.append(list(cleanedList))
#Creates empty pandas dataframe for query results to be loaded into
sub_df=pd.DataFrame()
###db2db QUERY (Iterates through each SubList)
for uniprotSubList in uniprotList:
#Formats UniProt Names list so it is compliant with db2db API Query
uniprotNames=','.join(uniprotSubList)
#Parameters for db2db Query (Change if necessary)
method='db2db'
format_type='row'
input_type='uniprotproteinname'
inputValues=uniprotNames
outputs='genesymbol'
taxonId='9606'
json_url = "https://biodbnet-abcc.ncifcrf.gov/webServices/rest.php/biodbnetRestApi.json?method{method}&format={format_type}&input={input_type}&inputValues={inputValues}&outputs={outputs}&taxonId={taxonId}".format(method=method, format_type=format_type, input_type=input_type, inputValues=inputValues, outputs=outputs, taxonId=taxonId)
#Results imported as JSON
with urllib.request.urlopen(json_url) as url:
data = json.loads(url.read().decode())
###POST-PROCESSING AND CLEAN-UP OF QUERY RESULTS
#Converts JSON to Pandas Dataframe
json_df = | pd.io.json.json_normalize(data) | pandas.io.json.json_normalize |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Jan 21 23:24:11 2021
@author: rayin
"""
import os, sys
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import math
import re
import random
from collections import Counter
from pprint import pprint
os.chdir("/Users/rayin/Google Drive/Harvard/5_data/UDN/work")
case_gene_update = pd.read_csv("data/processed/variant_clean.csv", index_col=0)
aa_variant = list(case_gene_update['\\12_Candidate variants\\09 Protein\\'])
#pd.DataFrame(aa_variant).to_csv('aa_variant.csv')
#aa_variant_update = pd.read_csv("data/processed/aa_variant_update.csv", index_col=0)
#aa_variant_update = list(aa_variant_update['\\12_Candidate variants\\09 Protein\\'])
amino_acid = {'CYS': 'C', 'ASP': 'D', 'SER': 'S', 'GLN': 'Q', 'LYS': 'K', 'ILE': 'I', 'PRO': 'P', 'THR': 'T', 'PHE': 'F', 'ASN': 'N',
'GLY': 'G', 'HIS': 'H', 'LEU': 'L', 'ARG': 'R', 'TRP': 'W', 'ALA': 'A', 'VAL':'V', 'GLU': 'E', 'TYR': 'Y', 'MET': 'M', 'TER': 'X'}
aa_3 = []
aa_1 = []
for i in amino_acid.keys():
aa_3.append(i)
aa_1.append(amino_acid[i])
for i in range(0, len(aa_variant)):
for j in range(len(aa_3)):
if isinstance(aa_variant[i], float):
break
aa_variant[i] = str(aa_variant[i].upper())
if aa_3[j] in aa_variant[i]:
aa_variant[i] = aa_variant[i].replace(aa_3[j], aa_1[j])
#extracting aa properties from aaindex
#https://www.genome.jp/aaindex/
aa = ['A', 'R', 'N', 'D', 'C', 'Q', 'E', 'G', 'H', 'I', 'L', 'K', 'M', 'F', 'P', 'S', 'T', 'W', 'Y', 'V']
#RADA880108
polarity = [-0.06, -0.84, -0.48, -0.80, 1.36, -0.73, -0.77, -0.41, 0.49, 1.31, 1.21, -1.18, 1.27, 1.27, 0.0, -0.50, -0.27, 0.88, 0.33, 1.09]
aa_polarity = pd.concat([pd.Series(aa), pd.Series(polarity)], axis=1)
aa_polarity = aa_polarity.rename(columns={0:'amino_acid', 1: 'polarity_value'})
#KLEP840101
net_charge = [0, 1, 0, -1, 0, 0, -1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0]
aa_net_charge = pd.concat([pd.Series(aa), pd.Series(net_charge)], axis=1)
aa_net_charge = aa_net_charge.rename(columns={0:'amino_acid', 1: 'net_charge_value'})
#CIDH920103
hydrophobicity = [0.36, -0.52, -0.90, -1.09, 0.70, -1.05, -0.83, -0.82, 0.16, 2.17, 1.18, -0.56, 1.21, 1.01, -0.06, -0.60, -1.20, 1.31, 1.05, 1.21]
aa_hydrophobicity = pd.concat([pd.Series(aa), pd.Series(hydrophobicity)], axis=1)
aa_hydrophobicity = aa_hydrophobicity.rename(columns={0:'amino_acid', 1: 'hydrophobicity_value'})
#FAUJ880103 -- Normalized van der Waals volume
normalized_vdw = [1.00, 6.13, 2.95, 2.78, 2.43, 3.95, 3.78, 0.00, 4.66, 4.00, 4.00, 4.77, 4.43, 5.89, 2.72, 1.60, 2.60, 8.08, 6.47, 3.00]
aa_normalized_vdw = pd.concat([pd.Series(aa), pd.Series(normalized_vdw)], axis=1)
aa_normalized_vdw = aa_normalized_vdw.rename(columns={0:'amino_acid', 1: 'normalized_vdw_value'})
#CHAM820101
polarizability = [0.046, 0.291, 0.134, 0.105, 0.128, 0.180, 0.151, 0.000, 0.230, 0.186, 0.186, 0.219, 0.221, 0.290, 0.131, 0.062, 0.108, 0.409, 0.298, 0.140]
aa_polarizability = pd.concat([pd.Series(aa), pd.Series(polarizability)], axis=1)
aa_polarizability = aa_polarizability.rename(columns={0:'amino_acid', 1: 'polarizability_value'})
#JOND750102
pK_COOH = [2.34, 1.18, 2.02, 2.01, 1.65, 2.17, 2.19, 2.34, 1.82, 2.36, 2.36, 2.18, 2.28, 1.83, 1.99, 2.21, 2.10, 2.38, 2.20, 2.32]
aa_pK_COOH = pd.concat([pd.Series(aa), pd.Series(pK_COOH)], axis=1)
aa_pK_COOH = aa_pK_COOH.rename(columns={0:'amino_acid', 1: 'pK_COOH_value'})
#FASG760104
pK_NH2 = [9.69, 8.99, 8.80, 9.60, 8.35, 9.13, 9.67, 9.78, 9.17, 9.68, 9.60, 9.18, 9.21, 9.18, 10.64, 9.21, 9.10, 9.44, 9.11, 9.62]
aa_pK_NH2 = pd.concat([pd.Series(aa), pd.Series(pK_NH2)], axis=1)
aa_pK_NH2 = aa_pK_NH2.rename(columns={0:'amino_acid', 1: 'pK_NH2_value'})
#ROBB790101 Hydration free energy
hydration = [-1.0, 0.3, -0.7, -1.2, 2.1, -0.1, -0.7, 0.3, 1.1, 4.0, 2.0, -0.9, 1.8, 2.8, 0.4, -1.2, -0.5, 3.0, 2.1, 1.4]
aa_hydration = pd.concat([pd.Series(aa), pd.Series(hydration)], axis=1)
aa_hydration = aa_hydration.rename(columns={0:'amino_acid', 1: 'hydration_value'})
#FASG760101
molecular_weight = [89.09, 174.20, 132.12, 133.10, 121.15, 146.15, 147.13, 75.07, 155.16, 131.17, 131.17, 146.19, 149.21, 165.19,
115.13, 105.09, 119.12, 204.24, 181.19, 117.15]
aa_molecular_weight = pd.concat([pd.Series(aa), pd.Series(molecular_weight)], axis=1)
aa_molecular_weight = aa_molecular_weight.rename(columns={0:'amino_acid', 1: 'molecular_weight_value'})
#FASG760103
optical_rotation = [1.80, 12.50, -5.60, 5.05, -16.50, 6.30, 12.00, 0.00, -38.50, 12.40, -11.00, 14.60, -10.00, -34.50, -86.20,
-7.50, -28.00, -33.70, -10.00, 5.63]
aa_optical_rotation = pd.concat([pd.Series(aa), pd.Series(optical_rotation)], axis=1)
aa_optical_rotation = aa_optical_rotation.rename(columns={0:'amino_acid', 1: 'optical_rotation_value'})
#secondary structure #LEVJ860101
#https://pybiomed.readthedocs.io/en/latest/_modules/CTD.html#CalculateCompositionSolventAccessibility
#SecondaryStr = {'1': 'EALMQKRH', '2': 'VIYCWFT', '3': 'GNPSD'}
# '1'stand for Helix; '2'stand for Strand, '3' stand for coil
secondary_structure = [1, 1, 3, 3, 2, 1, 1, 3, 1, 2, 1, 1, 1, 2, 3, 3, 2, 2, 2, 2]
aa_secondary_structure = pd.concat([pd.Series(aa), pd.Series(secondary_structure)], axis=1)
aa_secondary_structure = aa_secondary_structure.rename(columns={0:'amino_acid', 1: 'secondary_structure_value'})
#_SolventAccessibility = {'-1': 'ALFCGIVW', '1': 'RKQEND', '0': 'MPSTHY'}
# '-1'stand for Buried; '1'stand for Exposed, '0' stand for Intermediate
solvent_accessibility = [-1, 1, 1, 1, -1, 1, 1, -1, 0, -1, -1, 1, 0, -1, 0, 0, 0, -1, 0, -1]
aa_solvent_accessibility = pd.concat([ | pd.Series(aa) | pandas.Series |
from concurrent.futures import ProcessPoolExecutor, as_completed
from itertools import combinations
import matplotlib.pyplot as plt
import networkx as nx
import numpy as np
import pandas as pd
from networkx.algorithms.centrality import edge_betweenness_centrality
from numpy import log
from scipy.special import betaln
from .dendrogram import extract_all_nodes
from ALLCools.plot.dendro import *
def linkage_to_graph(linkage):
"""Turn the linkage matrix into a graph, an epimutation will just be remove one edge from the graph"""
_linkage = linkage.astype(int)
n_leaf = _linkage.shape[0] + 1
edges = []
for i in range(_linkage.shape[0]):
cur_node = i + n_leaf
left, right, *_ = _linkage.iloc[i]
edges.append([left, cur_node])
edges.append([right, cur_node])
g = nx.Graph()
g.add_edges_from(edges)
return g
def cut_by_highest_betweenness_centrality(g):
# order graph node by betweenness_centrality
highest_centrality_edge = pd.Series(edge_betweenness_centrality(g)).sort_values(ascending=False).index[0]
_g = g.copy()
_g.remove_edge(*highest_centrality_edge)
left_tree, right_tree = nx.connected_component_subgraphs(_g)
return left_tree, right_tree, highest_centrality_edge
def log_proba_beta_binomial(x, n, a, b):
"""log likelihood for the beta-binomial dist, ignore part not related to a and b."""
like = betaln((a + x), (b + n - x)) - betaln(a, b)
# when a or b has 0, like will have nan
return like.fillna(0)
def parse_one_pattern(tree_g, edges_to_remove, mc_df, cov_df):
"""
for a particular epimutation combination (edges_to_remove),
calculate the a and b for beta-binomial dist in each leaf node group.
after removing the edges (epimutations),
the leaf node group are leaf nodes in each of the disconnected sub graph.
"""
group_mc_df = mc_df.copy()
group_un_mc_df = cov_df - group_mc_df
sub_g = tree_g.copy()
if len(edges_to_remove) > 0: # this is the case of adding empty edge by left-right combine
sub_g.remove_edges_from(edges_to_remove)
# get disconnected sub-graphs
sub_tree = nx.connected_component_subgraphs(sub_g)
# for each sub-graph, add up the mc and un-mc of all leaf nodes for group a, b in beta-binomial dist
for _tree in sub_tree:
judge = group_mc_df.columns.isin(_tree.nodes)
if judge.sum() == 0:
# if sub-graph do not have leaf nodes, skip this sub-graph
continue
group_mc_df.loc[:, judge] = group_mc_df.loc[:, judge].sum(
axis=1).values[:, None]
group_un_mc_df.loc[:, judge] = group_un_mc_df.loc[:, judge].sum(
axis=1).values[:, None]
# group_mc_df is a, group_un_mc_df is b for beta-binomial dist
# each group of leaf nodes share same a, b
return group_mc_df, group_un_mc_df
def mutation_likelihood(n_mutation, p_mutation, n_edges):
lp0 = n_mutation * log(p_mutation) + \
(n_edges - n_mutation) * log(1 - p_mutation)
return lp0
def _max_likelihood_tree_worker(tree_g, mc_df, cov_df, max_mutation=2, p_mutation=0.1, sub_tree_cutoff=12):
top_n = 1
n_edges = len(tree_g.edges)
max_mutation = min(n_edges, max_mutation)
record_names = mc_df.index
if n_edges > sub_tree_cutoff:
# cut the tree into left and right in the edge that has biggest betweenness_centrality
# calculate best patterns for left and right separately, and then joint consider the overall pattern
left_tree, right_tree, removed_edge = cut_by_highest_betweenness_centrality(tree_g)
left_best_patterns, _ = _max_likelihood_tree_worker(
left_tree,
mc_df=mc_df.loc[:, mc_df.columns.isin(left_tree.nodes)],
cov_df=cov_df.loc[:, cov_df.columns.isin(left_tree.nodes)],
max_mutation=max_mutation, p_mutation=p_mutation, sub_tree_cutoff=sub_tree_cutoff)
right_best_patterns, _ = _max_likelihood_tree_worker(
right_tree,
mc_df=mc_df.loc[:, mc_df.columns.isin(right_tree.nodes)],
cov_df=cov_df.loc[:, cov_df.columns.isin(right_tree.nodes)],
max_mutation=max_mutation, p_mutation=p_mutation, sub_tree_cutoff=sub_tree_cutoff)
# for each DMR, go through all possible combination of best left and right pattern,
# when not exceed max_mutation, also consider whether should we add the removed edge or not
best_pattern_final = {}
likelihood_final = {}
for record_name in record_names:
_this_mc_df = mc_df.loc[[record_name]]
_this_cov_df = cov_df.loc[[record_name]]
left_patterns = list(left_best_patterns[record_name]) + [()] # add empty choice
right_patterns = list(right_best_patterns[record_name]) + [()] # add empty choice
middle_patterns = [[removed_edge], []]
# list all possible combined patterns
pattern_dict = {}
for left_i, left_pattern in enumerate(left_patterns):
for right_i, right_pattern in enumerate(right_patterns):
for middle_pattern in middle_patterns:
joint_pattern = (list(left_pattern) if len(left_pattern) != 0 else []) + (
list(right_pattern) if len(right_pattern) != 0 else []) + (
list(middle_pattern) if len(middle_pattern) != 0 else [])
_n_mutation = len(joint_pattern)
if _n_mutation > max_mutation:
continue
_this_group_mc_df, _this_group_un_mc_df = parse_one_pattern(
tree_g, joint_pattern, _this_mc_df, _this_cov_df)
# calculate tree likelihood on current pattern for all DMR
dmr_tree_likelihood = log_proba_beta_binomial(
_this_mc_df, _this_cov_df, _this_group_mc_df, _this_group_un_mc_df).values.sum()
# add mutation prior to tree likelihood, save to records
lp0 = mutation_likelihood(_n_mutation, p_mutation, n_edges)
try:
pattern_dict[_n_mutation][tuple(joint_pattern)] = dmr_tree_likelihood + lp0
except KeyError:
pattern_dict[_n_mutation] = {tuple(joint_pattern): dmr_tree_likelihood + lp0}
_this_final_pattern = []
_this_final_likelihood = []
for _n_mutation, _n_mutation_patterns in pattern_dict.items():
if _n_mutation != 0:
_s = pd.Series(_n_mutation_patterns).sort_values(ascending=False)[:top_n]
_this_final_pattern += _s.index.tolist()
_this_final_likelihood += _s.tolist()
else:
# empty pattern
_this_final_pattern += [()]
_this_final_likelihood += list(_n_mutation_patterns.values())
best_pattern_final[record_name] = np.array(_this_final_pattern)
likelihood_final[record_name] = np.array(_this_final_likelihood)
return | pd.Series(best_pattern_final) | pandas.Series |
#!/usr/bin/python3
# Module with dataframe operations.
# -
# append to a dataframe a.append(pd.DataFrame({'close':99.99},index=[datetime.datetime.now()])
import pandas as pd
from scipy import signal
import numpy
from numpy import NaN
import matplotlib.pyplot as plt
import datetime
from scipy.stats import linregress
# Creates DataFrame line
def CreateHorizontalLine(indexes, startValue, endValue, allIndexes=False):
data = pd.DataFrame()
# Only start and begin
if (allIndexes == False):
data = data.append(pd.DataFrame(
{'value': startValue}, index=[indexes[0]]))
data = data.append(pd.DataFrame(
{'value': endValue}, index=[indexes[-1]]))
# All data
else:
N = len(indexes)
alpha = (endValue - startValue) / N
for i in range(len(indexes)):
data = data.append(pd.DataFrame(
{'value': alpha * i + startValue},
index=[indexes[i]]))
return data
# Creates DataFrame line
def CreateVerticalLine(index, startValue, endValue):
data = pd.DataFrame()
data = data.append(pd.DataFrame({'value': startValue}, index=[index]))
data = data.append(pd.DataFrame({'value': endValue}, index=[index]))
return data
# Creates DataFrame rect
def CreateRect(index1, value1, index2, value2):
data = pd.DataFrame()
data = data.append(pd.DataFrame({'value': value1}, index=[index1]))
data = data.append(pd.DataFrame({'value': value2}, index=[index1]))
data = data.append(pd.DataFrame({'value': value2}, index=[index2]))
data = data.append(pd.DataFrame({'value': value1}, index=[index2]))
data = data.append(pd.DataFrame({'value': value1}, index=[index1]))
return data
# Creation of moving average with specific window and shift
def CreateMovingAverage(data, window, shiftPeriods=0):
average = data.rolling(window=int(window), min_periods=1).mean()
average.shift(periods=shiftPeriods)
return average
# Creation of moving std with specific window and shift
def CreateMovingStd(data, window, shiftPeriods=0):
average = data.rolling(window=int(window), min_periods=1).std()
average.shift(periods=shiftPeriods)
return average
# Create data subset by value
def CreateSubsetByValues(inputData, valueMin, valueMax):
subset = pd.DataFrame()
for i in range(len(inputData.values)):
if ((inputData.values[i] >= valueMin) and (inputData.values[i] <= valueMax)):
subset = subset.append(pd.DataFrame({'value': inputData.values[i]},
index=[inputData.index[i]]))
return subset
# Create data subset by date
def GetSubsetByDates(inputData, start_date, end_date, fillna=True):
subset = pd.DataFrame()
for i in range(len(inputData.values)):
if ((inputData.index[i] >= start_date) and (inputData.index[i] <= end_date)):
subset = subset.append(pd.DataFrame({'close': inputData.values[i]},
index=[inputData.index[i]]))
return subset
# Reindex weekly data
def SetReindex(data, start_date, end_date, fillna=True):
# Getting all weekdays between 01/01/2000 and 12/31/2016
all_weekdays = pd.date_range(start=start_date, end=end_date, freq='B')
# How do we align the existing prices in adj_close with our new set of dates?
# All we need to do is reindex close using all_weekdays as the new index
data = data.reindex(all_weekdays)
# Reindexing will insert missing values (NaN) for the dates that were not present
# in the original set. To cope with this, we can fill the missing by replacing them
# with the latest available price for each instrument.
if (fillna == True):
data = data.fillna(method='ffill')
data = data.dropna()
return data
# Calculate diff
def Diffrentiate(dataset):
diff = numpy.diff(dataset).tolist()
diff.append(0, 0)
return diff
# Find zeroes and zero cuts
def FindZeroes(data):
zeroes = pd.DataFrame()
signs = numpy.sign(data.values)
for i in range(1, len(signs)):
if (signs[i] != signs[i - 1]):
zeroes = zeroes.append(pd.DataFrame(
{'close': data.values[i]}, index=[data.index[i]]))
return zeroes
# Find both signals intersections
def FindIntersections(x, y):
# For dataframes
if type(y) is pd.DataFrame:
diffrence = x.subtract(y)
# for int or float values
else:
diffrence = x - y
fromBottom = | pd.DataFrame() | pandas.DataFrame |
'''
AAA lllllll lllllll iiii
A:::A l:::::l l:::::l i::::i
A:::::A l:::::l l:::::l iiii
A:::::::A l:::::l l:::::l
A:::::::::A l::::l l::::l iiiiiii eeeeeeeeeeee
A:::::A:::::A l::::l l::::l i:::::i ee::::::::::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::eeeee:::::ee
A:::::A A:::::A l::::l l::::l i::::i e::::::e e:::::e
A:::::A A:::::A l::::l l::::l i::::i e:::::::eeeee::::::e
A:::::AAAAAAAAA:::::A l::::l l::::l i::::i e:::::::::::::::::e
A:::::::::::::::::::::A l::::l l::::l i::::i e::::::eeeeeeeeeee
A:::::AAAAAAAAAAAAA:::::A l::::l l::::l i::::i e:::::::e
A:::::A A:::::A l::::::ll::::::li::::::ie::::::::e
A:::::A A:::::A l::::::ll::::::li::::::i e::::::::eeeeeeee
A:::::A A:::::A l::::::ll::::::li::::::i ee:::::::::::::e
AAAAAAA AAAAAAAlllllllllllllllliiiiiiii eeeeeeeeeeeeee
| \/ | | | | |
| . . | ___ __| | ___| |___
| |\/| |/ _ \ / _` |/ _ \ / __|
| | | | (_) | (_| | __/ \__ \
\_| |_/\___/ \__,_|\___|_|___/
Make model predictions using this load.py script. This loads in all models in this
directory and makes predictions on a target folder. Note that files in this target
directory will be featurized with the default features as specified by the settings.json.
Usage: python3 load.py [target directory] [sampletype] [target model directory]
Example: python3 load.py /Users/jim/desktop/allie/load_dir audio /Users/jim/desktop/gender_tpot_classifier
Alt Usage: python3 load.py
--> this just loads all the models and makes predictions in the ./load_dir
'''
import os, json, pickle, time, sys, shutil
import pandas as pd
import numpy as np
def prev_dir(directory):
g=directory.split('/')
dir_=''
for i in range(len(g)):
if i != len(g)-1:
if i==0:
dir_=dir_+g[i]
else:
dir_=dir_+'/'+g[i]
# print(dir_)
return dir_
def most_common(lst):
'''
get most common item in a list
'''
return max(set(lst), key=lst.count)
def model_schema():
models={'audio': dict(),
'text': dict(),
'image': dict(),
'video': dict(),
'csv': dict()
}
return models
def classifyfolder(listdir):
filetypes=list()
for i in range(len(listdir)):
if listdir[i].endswith(('.mp3', '.wav')):
filetypes.append('audio')
elif listdir[i].endswith(('.png', '.jpg')):
filetypes.append('image')
elif listdir[i].endswith(('.txt')):
filetypes.append('text')
elif listdir[i].endswith(('.mp4', '.avi')):
filetypes.append('video')
elif listdir[i].endswith(('.csv')):
filetypes.append('csv')
filetypes=list(set(filetypes))
return filetypes
def get_features(models, actual_model_dir, sampletype):
models=models['%s_models'%(sampletype)]
features=list()
for i in range(len(models)):
os.chdir(actual_model_dir+'/'+models[i])
temp_settings=json.load(open('settings.json'))
features=features+temp_settings['default_%s_features'%(sampletype)]
# get only the necessary features for all models
default_features=list(set(features))
return default_features
def featurize(features_dir, load_dir, model_dir, filetypes, models):
# contextually load the proper features based on the model information
actual_model_dir=prev_dir(features_dir)+'/models/'+model_dir
# get default features
sampletype=model_dir.split('_')[0]
default_features=get_features(models, actual_model_dir, sampletype)
# now change to proper directory for featurization
if model_dir=='audio_models' and 'audio' in filetypes:
os.chdir(features_dir+'/audio_features')
elif model_dir=='text_models' and 'text' in filetypes:
models=models['text_models']
os.chdir(features_dir+'/text_features')
elif model_dir=='image_models' and 'image' in filetypes:
models=models['image_models']
os.chdir(features_dir+'/image_features')
elif model_dir=='video_models' and 'video' in filetypes:
models=models['video_models']
os.chdir(features_dir+'/video_features')
elif model_dir=='csv_models' and 'csv' in filetypes:
models=models['csv_models']
os.chdir(features_dir+'/csv_features')
# call featurization API via default features
for i in range(len(default_features)):
print(os.getcwd())
os.system('python3 featurize.py %s %s'%(load_dir, default_features[i]))
def find_files(model_dir):
print(model_dir)
jsonfiles=list()
csvfiles=list()
if model_dir == 'audio_models':
listdir=os.listdir()
print(listdir)
for i in range(len(listdir)):
jsonfile=listdir[i][0:-4]+'.json'
if listdir[i].endswith('.wav') and jsonfile in listdir:
jsonfiles.append(jsonfile)
elif model_dir == 'text_models':
listdir=os.listdir()
for i in range(len(listdir)):
jsonfile=listdir[i][0:-4]+'.json'
if listdir[i].endswith('.txt') and jsonfile in listdir:
jsonfiles.append(jsonfile)
elif model_dir == 'image_models':
listdir=os.listdir()
for i in range(len(listdir)):
jsonfile=listdir[i][0:-4]+'.json'
if listdir[i].endswith('.png') and jsonfile in listdir:
jsonfiles.append(jsonfile)
elif model_dir == 'video_models':
listdir=os.listdir()
for i in range(len(listdir)):
jsonfile=listdir[i][0:-4]+'.json'
if listdir[i].endswith('.mp4') and jsonfile in listdir:
jsonfiles.append(jsonfile)
elif model_dir =='csv_models':
# csv files are a little different here
listdir=os.listdir()
for i in range(len(listdir)):
csvfile='featurized_'+listdir[i]
if listdir[i].endswith('.csv') and csvfile in listdir:
csvfiles.append(csvfile)
else:
jsonfiles=[]
print(jsonfiles)
return jsonfiles, csvfiles
def make_predictions(sampletype, transformer, clf, modeltype, jsonfiles, csvfiles, default_features, classes, modeldata, model_dir):
'''
get the metrics associated iwth a classification and regression problem
and output a .JSON file with the training session.
'''
sampletype=sampletype.split('_')[0]
if sampletype != 'csv':
for k in range(len(jsonfiles)):
try:
g=json.load(open(jsonfiles[k]))
print(sampletype)
print(g)
features=list()
print(default_features)
for j in range(len(default_features)):
print(sampletype)
features=features+g['features'][sampletype][default_features[j]]['features']
labels=g['features'][sampletype][default_features[0]]['labels']
print(transformer)
print(features)
if transformer != '':
features=np.array(transformer.transform(np.array(features).reshape(1, -1))).reshape(1, -1)
else:
features=np.array(features).reshape(1,-1)
print(features)
metrics_=dict()
print(modeltype)
if modeltype not in ['autogluon', 'autokeras', 'autopytorch', 'alphapy', 'atm', 'keras', 'devol', 'ludwig', 'safe', 'neuraxle']:
y_pred=clf.predict(features)
elif modeltype=='alphapy':
# go to the right folder
curdir=os.getcwd()
print(os.listdir())
os.chdir(common_name+'_alphapy_session')
alphapy_dir=os.getcwd()
os.chdir('input')
os.rename('test.csv', 'predict.csv')
os.chdir(alphapy_dir)
os.system('alphapy --predict')
os.chdir('output')
listdir=os.listdir()
for k in range(len(listdir)):
if listdir[k].startswith('predictions'):
csvfile=listdir[k]
y_pred=pd.read_csv(csvfile)['prediction']
os.chdir(curdir)
elif modeltype == 'autogluon':
curdir=os.getcwd()
os.chdir(model_dir+'/model/')
from autogluon import TabularPrediction as task
print(os.getcwd())
if transformer != '':
new_features=dict()
for i in range(len(features[0])):
new_features['feature_%s'%(str(i))]=[features[0][i]]
print(new_features)
df=pd.DataFrame(new_features)
else:
df=pd.DataFrame(features, columns=labels)
y_pred=clf.predict(df)
os.chdir(curdir)
elif modeltype == 'autokeras':
curdir=os.getcwd()
os.chdir(model_dir+'/model')
print(os.getcwd())
y_pred=clf.predict(features).flatten()
os.chdir(curdir)
elif modeltype == 'autopytorch':
y_pred=clf.predict(features).flatten()
elif modeltype == 'atm':
curdir=os.getcwd()
os.chdir('atm_temp')
data = | pd.read_csv('test.csv') | pandas.read_csv |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# ====================================================================
# @authors: <NAME>, <NAME>
# @since: 07/21/2018
# @summary: Functions for plotting radiance curves and errors.
# ====================================================================
import os
import csv
import matplotlib.patches as mpatches
import matplotlib.pyplot as plt
from scipy.interpolate import Rbf
import numpy as np
import pandas as pd
# local
import utility
import models
def Plot(args):
# configure matplotlib
# 'figure.figsize': (15, 5),
params = {'mathtext.default': 'regular',
'legend.fontsize': 'x-large',
'axes.labelsize': 'x-large',
'axes.titlesize': 'xx-large',
'xtick.labelsize': 'large',
'ytick.labelsize': 'x-large'}
plt.rcParams.update(params)
#plt.rcParams.update(plt.rcParamsDefault)
# plot bar charts for all results
PlotFinalResults(args)
# quick out if no specific captures to plot
if len(args.captures) <= 0:
return
# load dataset and predictions
dftruth = | pd.read_csv(args.datasetpath) | pandas.read_csv |
# ----------------------------------------------------------------------------
# Copyright (c) 2016-2022, QIIME 2 development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
import collections
import unittest
import warnings
import pandas as pd
import numpy as np
from qiime2 import Artifact
from qiime2.metadata import (Metadata, CategoricalMetadataColumn,
NumericMetadataColumn)
from qiime2.core.testing.util import get_dummy_plugin, ReallyEqualMixin
class TestInvalidMetadataConstruction(unittest.TestCase):
def test_non_dataframe(self):
with self.assertRaisesRegex(
TypeError, 'Metadata constructor.*DataFrame.*not.*Series'):
Metadata(pd.Series([1, 2, 3], name='col',
index=pd.Index(['a', 'b', 'c'], name='id')))
def test_no_ids(self):
with self.assertRaisesRegex(ValueError, 'Metadata.*at least one ID'):
Metadata(pd.DataFrame({}, index=pd.Index([], name='id')))
with self.assertRaisesRegex(ValueError, 'Metadata.*at least one ID'):
Metadata(pd.DataFrame({'column': []},
index=pd.Index([], name='id')))
def test_invalid_id_header(self):
# default index name
with self.assertRaisesRegex(ValueError, r'Index\.name.*None'):
Metadata(pd.DataFrame(
{'col': [1, 2, 3]}, index=pd.Index(['a', 'b', 'c'])))
with self.assertRaisesRegex(ValueError, r'Index\.name.*my-id-header'):
Metadata(pd.DataFrame(
{'col': [1, 2, 3]},
index=pd.Index(['a', 'b', 'c'], name='my-id-header')))
def test_non_str_id(self):
with self.assertRaisesRegex(
TypeError, 'non-string metadata ID.*type.*float.*nan'):
Metadata(pd.DataFrame(
{'col': [1, 2, 3]},
index=pd.Index(['a', np.nan, 'c'], name='id')))
def test_non_str_column_name(self):
with self.assertRaisesRegex(
TypeError, 'non-string metadata column name.*type.*'
'float.*nan'):
Metadata(pd.DataFrame(
{'col': [1, 2, 3],
np.nan: [4, 5, 6]},
index=pd.Index(['a', 'b', 'c'], name='id')))
def test_empty_id(self):
with self.assertRaisesRegex(
ValueError, 'empty metadata ID.*at least one character'):
Metadata(pd.DataFrame(
{'col': [1, 2, 3]}, index=pd.Index(['a', '', 'c'], name='id')))
def test_empty_column_name(self):
with self.assertRaisesRegex(
ValueError, 'empty metadata column name.*'
'at least one character'):
Metadata(pd.DataFrame(
{'col': [1, 2, 3],
'': [4, 5, 6]}, index=pd.Index(['a', 'b', 'c'], name='id')))
def test_pound_sign_id(self):
with self.assertRaisesRegex(
ValueError, "metadata ID.*begins with a pound sign.*'#b'"):
Metadata(pd.DataFrame(
{'col': [1, 2, 3]},
index=pd.Index(['a', '#b', 'c'], name='id')))
def test_id_conflicts_with_id_header(self):
with self.assertRaisesRegex(
ValueError, "metadata ID 'sample-id'.*conflicts.*reserved.*"
"ID header"):
Metadata(pd.DataFrame(
{'col': [1, 2, 3]},
index=pd.Index(['a', 'sample-id', 'c'], name='id')))
def test_column_name_conflicts_with_id_header(self):
with self.assertRaisesRegex(
ValueError, "metadata column name 'featureid'.*conflicts.*"
"reserved.*ID header"):
Metadata(pd.DataFrame(
{'col': [1, 2, 3],
'featureid': [4, 5, 6]},
index=pd.Index(['a', 'b', 'c'], name='id')))
def test_duplicate_ids(self):
with self.assertRaisesRegex(ValueError, "Metadata IDs.*unique.*'a'"):
Metadata(pd.DataFrame(
{'col': [1, 2, 3]},
index=pd.Index(['a', 'b', 'a'], name='id')))
def test_duplicate_column_names(self):
data = [[1, 2, 3],
[4, 5, 6],
[7, 8, 9]]
with self.assertRaisesRegex(ValueError,
"Metadata column names.*unique.*'col1'"):
Metadata(pd.DataFrame(data, columns=['col1', 'col2', 'col1'],
index=pd.Index(['a', 'b', 'c'], name='id')))
def test_unsupported_column_dtype(self):
with self.assertRaisesRegex(
TypeError, "Metadata column 'col2'.*unsupported.*dtype.*bool"):
Metadata(pd.DataFrame(
{'col1': [1, 2, 3],
'col2': [True, False, True]},
index=pd.Index(['a', 'b', 'c'], name='id')))
def test_categorical_column_unsupported_type(self):
with self.assertRaisesRegex(
TypeError, "CategoricalMetadataColumn.*strings or missing "
r"values.*42\.5.*float.*'col2'"):
Metadata(pd.DataFrame(
{'col1': [1, 2, 3],
'col2': ['foo', 'bar', 42.5]},
index=pd.Index(['a', 'b', 'c'], name='id')))
def test_categorical_column_empty_str(self):
with self.assertRaisesRegex(
ValueError, "CategoricalMetadataColumn.*empty strings.*"
"column 'col2'"):
Metadata(pd.DataFrame(
{'col1': [1, 2, 3],
'col2': ['foo', '', 'bar']},
index=pd.Index(['a', 'b', 'c'], name='id')))
def test_numeric_column_infinity(self):
with self.assertRaisesRegex(
ValueError, "NumericMetadataColumn.*positive or negative "
"infinity.*column 'col2'"):
Metadata(pd.DataFrame(
{'col1': ['foo', 'bar', 'baz'],
'col2': [42, float('+inf'), 4.3]},
index=pd.Index(['a', 'b', 'c'], name='id')))
class TestMetadataConstructionAndProperties(unittest.TestCase):
def assertEqualColumns(self, obs_columns, exp):
obs = [(name, props.type) for name, props in obs_columns.items()]
self.assertEqual(obs, exp)
def test_minimal(self):
md = Metadata(pd.DataFrame({}, index=pd.Index(['a'], name='id')))
self.assertEqual(md.id_count, 1)
self.assertEqual(md.column_count, 0)
self.assertEqual(md.id_header, 'id')
self.assertEqual(md.ids, ('a',))
self.assertEqualColumns(md.columns, [])
def test_single_id(self):
index = pd.Index(['id1'], name='id')
df = pd.DataFrame({'col1': [1.0], 'col2': ['a'], 'col3': ['foo']},
index=index)
md = Metadata(df)
self.assertEqual(md.id_count, 1)
self.assertEqual(md.column_count, 3)
self.assertEqual(md.id_header, 'id')
self.assertEqual(md.ids, ('id1',))
self.assertEqualColumns(md.columns,
[('col1', 'numeric'), ('col2', 'categorical'),
('col3', 'categorical')])
def test_no_columns(self):
index = pd.Index(['id1', 'id2', 'foo'], name='id')
df = pd.DataFrame({}, index=index)
md = Metadata(df)
self.assertEqual(md.id_count, 3)
self.assertEqual(md.column_count, 0)
self.assertEqual(md.id_header, 'id')
self.assertEqual(md.ids, ('id1', 'id2', 'foo'))
self.assertEqualColumns(md.columns, [])
def test_single_column(self):
index = pd.Index(['id1', 'a', 'my-id'], name='id')
df = pd.DataFrame({'column': ['foo', 'bar', 'baz']}, index=index)
md = Metadata(df)
self.assertEqual(md.id_count, 3)
self.assertEqual(md.column_count, 1)
self.assertEqual(md.id_header, 'id')
self.assertEqual(md.ids, ('id1', 'a', 'my-id'))
self.assertEqualColumns(md.columns, [('column', 'categorical')])
def test_retains_column_order(self):
# Supply DataFrame constructor with explicit column ordering instead of
# a dict.
index = pd.Index(['id1', 'id2', 'id3'], name='id')
columns = ['z', 'a', 'ch']
data = [
[1.0, 'a', 'foo'],
[2.0, 'b', 'bar'],
[3.0, 'c', '42']
]
df = pd.DataFrame(data, index=index, columns=columns)
md = Metadata(df)
self.assertEqual(md.id_count, 3)
self.assertEqual(md.column_count, 3)
self.assertEqual(md.id_header, 'id')
self.assertEqual(md.ids, ('id1', 'id2', 'id3'))
self.assertEqualColumns(md.columns,
[('z', 'numeric'), ('a', 'categorical'),
('ch', 'categorical')])
def test_supported_id_headers(self):
case_insensitive = {
'id', 'sampleid', 'sample id', 'sample-id', 'featureid',
'feature id', 'feature-id'
}
exact_match = {
'#SampleID', '#Sample ID', '#OTUID', '#OTU ID', 'sample_name'
}
# Build a set of supported headers, including exact matches and headers
# with different casing.
headers = set()
for header in case_insensitive:
headers.add(header)
headers.add(header.upper())
headers.add(header.title())
for header in exact_match:
headers.add(header)
count = 0
for header in headers:
index = pd.Index(['id1', 'id2'], name=header)
df = pd.DataFrame({'column': ['foo', 'bar']}, index=index)
md = Metadata(df)
self.assertEqual(md.id_header, header)
count += 1
# Since this test case is a little complicated, make sure that the
# expected number of comparisons are happening.
self.assertEqual(count, 26)
def test_recommended_ids(self):
index = pd.Index(['c6ca034a-223f-40b4-a0e0-45942912a5ea', 'My.ID'],
name='id')
df = pd.DataFrame({'col1': ['foo', 'bar']}, index=index)
md = Metadata(df)
self.assertEqual(md.id_count, 2)
self.assertEqual(md.column_count, 1)
self.assertEqual(md.id_header, 'id')
self.assertEqual(md.ids,
('c6ca034a-223f-40b4-a0e0-45942912a5ea', 'My.ID'))
self.assertEqualColumns(md.columns, [('col1', 'categorical')])
def test_non_standard_characters(self):
index = pd.Index(['©id##1', '((id))2', "'id_3<>'", '"id#4"',
'i d\r\t\n5'], name='id')
columns = ['↩c@l1™', 'col(#2)', "#col'3", '"<col_4>"', 'col\t \r\n5']
data = [
['ƒoo', '(foo)', '#f o #o', 'fo\ro', np.nan],
["''2''", 'b#r', 'ba\nr', np.nan, np.nan],
['b"ar', 'c\td', '4\r\n2', np.nan, np.nan],
['b__a_z', '<42>', '>42', np.nan, np.nan],
['baz', np.nan, '42']
]
df = pd.DataFrame(data, index=index, columns=columns)
md = Metadata(df)
self.assertEqual(md.id_count, 5)
self.assertEqual(md.column_count, 5)
self.assertEqual(md.id_header, 'id')
self.assertEqual(
md.ids, ('©id##1', '((id))2', "'id_3<>'", '"id#4"', 'i d\r\t\n5'))
self.assertEqualColumns(md.columns, [('↩c@l1™', 'categorical'),
('col(#2)', 'categorical'),
("#col'3", 'categorical'),
('"<col_4>"', 'categorical'),
('col\t \r\n5', 'numeric')])
def test_missing_data(self):
index = pd.Index(['None', 'nan', 'NA', 'foo'], name='id')
df = pd.DataFrame(collections.OrderedDict([
('col1', [1.0, np.nan, np.nan, np.nan]),
('NA', [np.nan, np.nan, np.nan, np.nan]),
('col3', ['null', 'N/A', np.nan, 'NA']),
('col4', np.array([np.nan, np.nan, np.nan, np.nan],
dtype=object))]),
index=index)
md = Metadata(df)
self.assertEqual(md.id_count, 4)
self.assertEqual(md.column_count, 4)
self.assertEqual(md.id_header, 'id')
self.assertEqual(md.ids, ('None', 'nan', 'NA', 'foo'))
self.assertEqualColumns(md.columns, [('col1', 'numeric'),
('NA', 'numeric'),
('col3', 'categorical'),
('col4', 'categorical')])
def test_does_not_cast_ids_or_column_names(self):
index = pd.Index(['0.000001', '0.004000', '0.000000'], dtype=object,
name='id')
columns = ['42.0', '1000', '-4.2']
data = [
[2.0, 'b', 2.5],
[1.0, 'b', 4.2],
[3.0, 'c', -9.999]
]
df = pd.DataFrame(data, index=index, columns=columns)
md = Metadata(df)
self.assertEqual(md.id_count, 3)
self.assertEqual(md.column_count, 3)
self.assertEqual(md.id_header, 'id')
self.assertEqual(md.ids, ('0.000001', '0.004000', '0.000000'))
self.assertEqualColumns(md.columns, [('42.0', 'numeric'),
('1000', 'categorical'),
('-4.2', 'numeric')])
def test_mixed_column_types(self):
md = Metadata(
pd.DataFrame({'col0': [1.0, 2.0, 3.0],
'col1': ['a', 'b', 'c'],
'col2': ['foo', 'bar', '42'],
'col3': ['1.0', '2.5', '-4.002'],
'col4': [1, 2, 3],
'col5': [1, 2, 3.5],
'col6': [1e-4, -0.0002, np.nan],
'col7': ['cat', np.nan, 'dog'],
'col8': ['a', 'a', 'a'],
'col9': [0, 0, 0]},
index=pd.Index(['id1', 'id2', 'id3'], name='id')))
self.assertEqual(md.id_count, 3)
self.assertEqual(md.column_count, 10)
self.assertEqual(md.id_header, 'id')
self.assertEqual(md.ids, ('id1', 'id2', 'id3'))
self.assertEqualColumns(md.columns, [('col0', 'numeric'),
('col1', 'categorical'),
('col2', 'categorical'),
('col3', 'categorical'),
('col4', 'numeric'),
('col5', 'numeric'),
('col6', 'numeric'),
('col7', 'categorical'),
('col8', 'categorical'),
('col9', 'numeric')])
def test_case_insensitive_duplicate_ids(self):
index = pd.Index(['a', 'b', 'A'], name='id')
df = pd.DataFrame({'column': ['1', '2', '3']}, index=index)
metadata = Metadata(df)
self.assertEqual(metadata.ids, ('a', 'b', 'A'))
def test_case_insensitive_duplicate_column_names(self):
index = pd.Index(['a', 'b', 'c'], name='id')
df = pd.DataFrame({'column': ['1', '2', '3'],
'Column': ['4', '5', '6']}, index=index)
metadata = Metadata(df)
self.assertEqual(set(metadata.columns), {'column', 'Column'})
def test_categorical_column_leading_trailing_whitespace_value(self):
md1 = Metadata(pd.DataFrame(
{'col1': [1, 2, 3],
'col2': ['foo', ' bar ', 'baz']},
index=pd.Index(['a', 'b', 'c'], name='id')))
md2 = Metadata(pd.DataFrame(
{'col1': [1, 2, 3],
'col2': ['foo', 'bar', 'baz']},
index=pd.Index(['a', 'b', 'c'], name='id')))
self.assertEqual(md1, md2)
def test_leading_trailing_whitespace_id(self):
md1 = Metadata(pd.DataFrame(
{'col1': [1, 2, 3], 'col2': [4, 5, 6]},
index=pd.Index(['a', ' b ', 'c'], name='id')))
md2 = Metadata(pd.DataFrame(
{'col1': [1, 2, 3], 'col2': [4, 5, 6]},
index=pd.Index(['a', 'b', 'c'], name='id')))
self.assertEqual(md1, md2)
def test_leading_trailing_whitespace_column_name(self):
md1 = Metadata(pd.DataFrame(
{'col1': [1, 2, 3], ' col2 ': [4, 5, 6]},
index=pd.Index(['a', 'b', 'c'], name='id')))
md2 = Metadata(pd.DataFrame(
{'col1': [1, 2, 3], 'col2': [4, 5, 6]},
index=pd.Index(['a', 'b', 'c'], name='id')))
self.assertEqual(md1, md2)
class TestSourceArtifacts(unittest.TestCase):
def setUp(self):
self.md = Metadata(pd.DataFrame(
{'col': [1, 2, 3]}, index=pd.Index(['a', 'b', 'c'], name='id')))
def test_no_source_artifacts(self):
self.assertEqual(self.md.artifacts, ())
def test_add_zero_artifacts(self):
self.md._add_artifacts([])
self.assertEqual(self.md.artifacts, ())
def test_add_artifacts(self):
# First two artifacts have the same data but different UUIDs.
artifact1 = Artifact.import_data('Mapping', {'a': '1', 'b': '3'})
self.md._add_artifacts([artifact1])
artifact2 = Artifact.import_data('Mapping', {'a': '1', 'b': '3'})
artifact3 = Artifact.import_data('IntSequence1', [1, 2, 3, 4])
self.md._add_artifacts([artifact2, artifact3])
self.assertEqual(self.md.artifacts, (artifact1, artifact2, artifact3))
def test_add_non_artifact(self):
artifact = Artifact.import_data('Mapping', {'a': '1', 'b': '3'})
with self.assertRaisesRegex(TypeError, "Artifact object.*42"):
self.md._add_artifacts([artifact, 42])
# Test that the object hasn't been mutated.
self.assertEqual(self.md.artifacts, ())
def test_add_duplicate_artifact(self):
artifact1 = Artifact.import_data('Mapping', {'a': '1', 'b': '3'})
artifact2 = Artifact.import_data('IntSequence1', [1, 2, 3, 4])
self.md._add_artifacts([artifact1, artifact2])
with self.assertRaisesRegex(
ValueError, "Duplicate source artifacts.*artifact: Mapping"):
self.md._add_artifacts([artifact1])
# Test that the object hasn't been mutated.
self.assertEqual(self.md.artifacts, (artifact1, artifact2))
class TestRepr(unittest.TestCase):
def test_singular(self):
md = Metadata(pd.DataFrame({'col1': [42]},
index=pd.Index(['a'], name='id')))
obs = repr(md)
self.assertIn('Metadata', obs)
self.assertIn('1 ID x 1 column', obs)
self.assertIn("col1: ColumnProperties(type='numeric')", obs)
def test_plural(self):
md = Metadata(pd.DataFrame({'col1': [42, 42], 'col2': ['foo', 'bar']},
index=pd.Index(['a', 'b'], name='id')))
obs = repr(md)
self.assertIn('Metadata', obs)
self.assertIn('2 IDs x 2 columns', obs)
self.assertIn("col1: ColumnProperties(type='numeric')", obs)
self.assertIn("col2: ColumnProperties(type='categorical')", obs)
def test_column_name_padding(self):
data = [[0, 42, 'foo']]
index = pd.Index(['my-id'], name='id')
columns = ['col1', 'longer-column-name', 'c']
md = Metadata(pd.DataFrame(data, index=index, columns=columns))
obs = repr(md)
self.assertIn('Metadata', obs)
self.assertIn('1 ID x 3 columns', obs)
self.assertIn(
"col1: ColumnProperties(type='numeric')", obs)
self.assertIn(
"longer-column-name: ColumnProperties(type='numeric')", obs)
self.assertIn(
"c: ColumnProperties(type='categorical')", obs)
class TestEqualityOperators(unittest.TestCase, ReallyEqualMixin):
def setUp(self):
get_dummy_plugin()
def test_type_mismatch(self):
md = Metadata(
pd.DataFrame({'col1': [1.0, 2.0, 3.0],
'col2': ['a', 'b', 'c'],
'col3': ['foo', 'bar', '42']},
index=pd.Index(['id1', 'id2', 'id3'], name='id')))
mdc = md.get_column('col1')
self.assertIsInstance(md, Metadata)
self.assertIsInstance(mdc, NumericMetadataColumn)
self.assertReallyNotEqual(md, mdc)
def test_id_header_mismatch(self):
data = {'col1': ['foo', 'bar'], 'col2': [42, 43]}
md1 = Metadata(pd.DataFrame(
data, index=pd.Index(['id1', 'id2'], name='id')))
md2 = Metadata(pd.DataFrame(
data, index=pd.Index(['id1', 'id2'], name='ID')))
self.assertReallyNotEqual(md1, md2)
def test_source_mismatch(self):
# Metadata created from an artifact vs not shouldn't compare equal,
# even if the data is the same.
artifact = Artifact.import_data('Mapping', {'a': '1', 'b': '2'})
md_from_artifact = artifact.view(Metadata)
md_no_artifact = Metadata(md_from_artifact.to_dataframe())
pd.testing.assert_frame_equal(md_from_artifact.to_dataframe(),
md_no_artifact.to_dataframe())
self.assertReallyNotEqual(md_from_artifact, md_no_artifact)
def test_artifact_mismatch(self):
# Metadata created from different artifacts shouldn't compare equal,
# even if the data is the same.
artifact1 = Artifact.import_data('Mapping', {'a': '1', 'b': '2'})
artifact2 = Artifact.import_data('Mapping', {'a': '1', 'b': '2'})
md1 = artifact1.view(Metadata)
md2 = artifact2.view(Metadata)
pd.testing.assert_frame_equal(md1.to_dataframe(), md2.to_dataframe())
self.assertReallyNotEqual(md1, md2)
def test_id_mismatch(self):
md1 = Metadata(pd.DataFrame({'a': '1', 'b': '2'},
index=pd.Index(['0'], name='id')))
md2 = Metadata(pd.DataFrame({'a': '1', 'b': '2'},
index=pd.Index(['1'], name='id')))
self.assertReallyNotEqual(md1, md2)
def test_column_name_mismatch(self):
md1 = Metadata(pd.DataFrame({'a': '1', 'b': '2'},
index=pd.Index(['0'], name='id')))
md2 = Metadata(pd.DataFrame({'a': '1', 'c': '2'},
index=pd.Index(['0'], name='id')))
self.assertReallyNotEqual(md1, md2)
def test_column_type_mismatch(self):
md1 = Metadata(pd.DataFrame({'col1': ['42', '43']},
index=pd.Index(['id1', 'id2'], name='id')))
md2 = Metadata(pd.DataFrame({'col1': [42, 43]},
index=pd.Index(['id1', 'id2'], name='id')))
self.assertReallyNotEqual(md1, md2)
def test_column_order_mismatch(self):
index = pd.Index(['id1', 'id2'], name='id')
md1 = Metadata(pd.DataFrame([[42, 'foo'], [43, 'bar']], index=index,
columns=['z', 'a']))
md2 = Metadata(pd.DataFrame([['foo', 42], ['bar', 43]], index=index,
columns=['a', 'z']))
self.assertReallyNotEqual(md1, md2)
def test_data_mismatch(self):
md1 = Metadata(pd.DataFrame({'a': '1', 'b': '3'},
index=pd.Index(['0'], name='id')))
md2 = Metadata(pd.DataFrame({'a': '1', 'b': '2'},
index=pd.Index(['0'], name='id')))
self.assertReallyNotEqual(md1, md2)
def test_equality_without_artifact(self):
md1 = Metadata(pd.DataFrame({'a': '1', 'b': '3'},
index=pd.Index(['0'], name='id')))
md2 = Metadata(pd.DataFrame({'a': '1', 'b': '3'},
index=pd.Index(['0'], name='id')))
self.assertReallyEqual(md1, md2)
def test_equality_with_artifact(self):
artifact = Artifact.import_data('Mapping', {'a': '1', 'b': '2'})
md1 = artifact.view(Metadata)
md2 = artifact.view(Metadata)
self.assertReallyEqual(md1, md2)
def test_equality_with_missing_data(self):
md1 = Metadata(pd.DataFrame(
{'col1': [1, np.nan, 4.2],
'col2': [np.nan, 'foo', np.nan]},
index=pd.Index(['id1', 'id2', 'id3'], name='id')))
md2 = Metadata(pd.DataFrame(
{'col1': [1, np.nan, 4.2],
'col2': [np.nan, 'foo', np.nan]},
index=pd.Index(['id1', 'id2', 'id3'], name='id')))
self.assertReallyEqual(md1, md2)
class TestToDataframe(unittest.TestCase):
def test_minimal(self):
df = pd.DataFrame({}, index=pd.Index(['id1'], name='id'))
md = Metadata(df)
obs = md.to_dataframe()
pd.testing.assert_frame_equal(obs, df)
def test_id_header_preserved(self):
df = pd.DataFrame({'col1': [42, 2.5], 'col2': ['foo', 'bar']},
index=pd.Index(['id1', 'id2'], name='#SampleID'))
md = Metadata(df)
obs = md.to_dataframe()
pd.testing.assert_frame_equal(obs, df)
self.assertEqual(obs.index.name, '#SampleID')
def test_dataframe_copy(self):
df = pd.DataFrame({'col1': [42, 2.5], 'col2': ['foo', 'bar']},
index=pd.Index(['id1', 'id2'], name='id'))
md = Metadata(df)
obs = md.to_dataframe()
pd.testing.assert_frame_equal(obs, df)
self.assertIsNot(obs, df)
def test_retains_column_order(self):
index = pd.Index(['id1', 'id2'], name='id')
columns = ['z', 'a', 'ch']
data = [
[1.0, 'a', 'foo'],
[2.0, 'b', 'bar']
]
df = pd.DataFrame(data, index=index, columns=columns)
md = Metadata(df)
obs = md.to_dataframe()
pd.testing.assert_frame_equal(obs, df)
self.assertEqual(obs.columns.tolist(), ['z', 'a', 'ch'])
def test_missing_data(self):
# Different missing data representations should be normalized to np.nan
index = pd.Index(['None', 'nan', 'NA', 'id1'], name='id')
df = pd.DataFrame(collections.OrderedDict([
('col1', [42.5, np.nan, float('nan'), 3]),
('NA', [np.nan, 'foo', float('nan'), None]),
('col3', ['null', 'N/A', np.nan, 'NA']),
('col4', np.array([np.nan, np.nan, np.nan, np.nan],
dtype=object))]),
index=index)
md = Metadata(df)
obs = md.to_dataframe()
exp = pd.DataFrame(collections.OrderedDict([
('col1', [42.5, np.nan, np.nan, 3.0]),
('NA', [np.nan, 'foo', np.nan, np.nan]),
('col3', ['null', 'N/A', np.nan, 'NA']),
('col4', np.array([np.nan, np.nan, np.nan, np.nan],
dtype=object))]),
index=index)
pd.testing.assert_frame_equal(obs, exp)
self.assertEqual(obs.dtypes.to_dict(),
{'col1': np.float64, 'NA': object, 'col3': object,
'col4': object})
self.assertTrue(np.isnan(obs['col1']['NA']))
self.assertTrue(np.isnan(obs['NA']['NA']))
self.assertTrue(np.isnan(obs['NA']['id1']))
def test_dtype_int_normalized_to_dtype_float(self):
index = pd.Index(['id1', 'id2', 'id3'], name='id')
df = pd.DataFrame({'col1': [42, -43, 0],
'col2': [42.0, -43.0, 0.0],
'col3': [42, np.nan, 0]},
index=index)
self.assertEqual(df.dtypes.to_dict(),
{'col1': np.int64, 'col2': np.float64,
'col3': np.float64})
md = Metadata(df)
obs = md.to_dataframe()
exp = pd.DataFrame({'col1': [42.0, -43.0, 0.0],
'col2': [42.0, -43.0, 0.0],
'col3': [42.0, np.nan, 0.0]},
index=index)
pd.testing.assert_frame_equal(obs, exp)
self.assertEqual(obs.dtypes.to_dict(),
{'col1': np.float64, 'col2': np.float64,
'col3': np.float64})
class TestGetColumn(unittest.TestCase):
def setUp(self):
get_dummy_plugin()
def test_column_name_not_found(self):
df = pd.DataFrame({'col1': [42, 2.5], 'col2': ['foo', 'bar']},
index=pd.Index(['id1', 'id2'], name='id'))
md = Metadata(df)
with self.assertRaisesRegex(ValueError,
"'col3'.*not a column.*'col1', 'col2'"):
md.get_column('col3')
def test_artifacts_are_propagated(self):
A = Artifact.import_data('Mapping', {'a': '1', 'b': '3'})
md = A.view(Metadata)
obs = md.get_column('b')
exp = CategoricalMetadataColumn(
pd.Series(['3'], name='b', index=pd.Index(['0'], name='id')))
exp._add_artifacts([A])
self.assertEqual(obs, exp)
self.assertEqual(obs.artifacts, (A,))
def test_categorical_column(self):
df = pd.DataFrame({'col1': [42, 2.5], 'col2': ['foo', 'bar']},
index=pd.Index(['id1', 'id2'], name='id'))
md = Metadata(df)
obs = md.get_column('col2')
exp = CategoricalMetadataColumn(
pd.Series(['foo', 'bar'], name='col2',
index=pd.Index(['id1', 'id2'], name='id')))
self.assertEqual(obs, exp)
def test_numeric_column(self):
df = pd.DataFrame({'col1': [42, 2.5], 'col2': ['foo', 'bar']},
index=pd.Index(['id1', 'id2'], name='id'))
md = Metadata(df)
obs = md.get_column('col1')
exp = NumericMetadataColumn(
pd.Series([42, 2.5], name='col1',
index=pd.Index(['id1', 'id2'], name='id')))
self.assertEqual(obs, exp)
def test_id_header_preserved(self):
df = pd.DataFrame({'col1': [42, 2.5], 'col2': ['foo', 'bar']},
index=pd.Index(['a', 'b'], name='#OTU ID'))
md = Metadata(df)
obs = md.get_column('col1')
exp = NumericMetadataColumn(
pd.Series([42, 2.5], name='col1',
index=pd.Index(['a', 'b'], name='#OTU ID')))
self.assertEqual(obs, exp)
self.assertEqual(obs.id_header, '#OTU ID')
class TestGetIDs(unittest.TestCase):
def test_default(self):
df = pd.DataFrame({'Subject': ['subject-1', 'subject-1', 'subject-2'],
'SampleType': ['gut', 'tongue', 'gut']},
index=pd.Index(['S1', 'S2', 'S3'], name='id'))
metadata = Metadata(df)
actual = metadata.get_ids()
expected = {'S1', 'S2', 'S3'}
self.assertEqual(actual, expected)
def test_incomplete_where(self):
df = pd.DataFrame({'Subject': ['subject-1', 'subject-1', 'subject-2'],
'SampleType': ['gut', 'tongue', 'gut']},
index=pd.Index(['S1', 'S2', 'S3'], name='sampleid'))
metadata = Metadata(df)
where = "Subject='subject-1' AND SampleType="
with self.assertRaises(ValueError):
metadata.get_ids(where)
where = "Subject="
with self.assertRaises(ValueError):
metadata.get_ids(where)
def test_invalid_where(self):
df = pd.DataFrame({'Subject': ['subject-1', 'subject-1', 'subject-2'],
'SampleType': ['gut', 'tongue', 'gut']},
index=pd.Index(['S1', 'S2', 'S3'], name='sampleid'))
metadata = Metadata(df)
where = "not-a-column-name='subject-1'"
with self.assertRaises(ValueError):
metadata.get_ids(where)
def test_empty_result(self):
df = pd.DataFrame({'Subject': ['subject-1', 'subject-1', 'subject-2'],
'SampleType': ['gut', 'tongue', 'gut']},
index=pd.Index(['S1', 'S2', 'S3'], name='id'))
metadata = Metadata(df)
where = "Subject='subject-3'"
actual = metadata.get_ids(where)
expected = set()
self.assertEqual(actual, expected)
def test_simple_expression(self):
df = pd.DataFrame({'Subject': ['subject-1', 'subject-1', 'subject-2'],
'SampleType': ['gut', 'tongue', 'gut']},
index=pd.Index(['S1', 'S2', 'S3'], name='id'))
metadata = Metadata(df)
where = "Subject='subject-1'"
actual = metadata.get_ids(where)
expected = {'S1', 'S2'}
self.assertEqual(actual, expected)
where = "Subject='subject-2'"
actual = metadata.get_ids(where)
expected = {'S3'}
self.assertEqual(actual, expected)
where = "Subject='subject-3'"
actual = metadata.get_ids(where)
expected = set()
self.assertEqual(actual, expected)
where = "SampleType='gut'"
actual = metadata.get_ids(where)
expected = {'S1', 'S3'}
self.assertEqual(actual, expected)
where = "SampleType='tongue'"
actual = metadata.get_ids(where)
expected = {'S2'}
self.assertEqual(actual, expected)
def test_more_complex_expressions(self):
df = pd.DataFrame({'Subject': ['subject-1', 'subject-1', 'subject-2'],
'SampleType': ['gut', 'tongue', 'gut']},
index=pd.Index(['S1', 'S2', 'S3'], name='id'))
metadata = Metadata(df)
where = "Subject='subject-1' OR Subject='subject-2'"
actual = metadata.get_ids(where)
expected = {'S1', 'S2', 'S3'}
self.assertEqual(actual, expected)
where = "Subject='subject-1' AND Subject='subject-2'"
actual = metadata.get_ids(where)
expected = set()
self.assertEqual(actual, expected)
where = "Subject='subject-1' AND SampleType='gut'"
actual = metadata.get_ids(where)
expected = {'S1'}
self.assertEqual(actual, expected)
def test_query_by_id(self):
df = pd.DataFrame({'Subject': ['subject-1', 'subject-1', 'subject-2'],
'SampleType': ['gut', 'tongue', 'gut']},
index=pd.Index(['S1', 'S2', 'S3'], name='id'))
metadata = Metadata(df)
actual = metadata.get_ids(where="id='S2' OR id='S1'")
expected = {'S1', 'S2'}
self.assertEqual(actual, expected)
def test_query_by_alternate_id_header(self):
metadata = Metadata(pd.DataFrame(
{}, index=pd.Index(['id1', 'id2', 'id3'], name='#OTU ID')))
obs = metadata.get_ids(where="\"#OTU ID\" IN ('id2', 'id3')")
exp = {'id2', 'id3'}
self.assertEqual(obs, exp)
def test_no_columns(self):
metadata = Metadata(
pd.DataFrame({}, index=pd.Index(['a', 'b', 'my-id'], name='id')))
obs = metadata.get_ids()
exp = {'a', 'b', 'my-id'}
self.assertEqual(obs, exp)
def test_query_mixed_column_types(self):
df = pd.DataFrame({'Name': ['Foo', 'Bar', 'Baz', 'Baaz'],
# numbers that would sort incorrectly as strings
'Age': [9, 10, 11, 101],
'Age_Str': ['9', '10', '11', '101'],
'Weight': [80.5, 85.3, np.nan, 120.0]},
index=pd.Index(['S1', 'S2', 'S3', 'S4'], name='id'))
metadata = Metadata(df)
# string pattern matching
obs = metadata.get_ids(where="Name LIKE 'Ba_'")
exp = {'S2', 'S3'}
self.assertEqual(obs, exp)
# string comparison
obs = metadata.get_ids(where="Age_Str >= 11")
exp = {'S1', 'S3'}
self.assertEqual(obs, exp)
# numeric comparison
obs = metadata.get_ids(where="Age >= 11")
exp = {'S3', 'S4'}
self.assertEqual(obs, exp)
# numeric comparison with missing data
obs = metadata.get_ids(where="Weight < 100")
exp = {'S1', 'S2'}
self.assertEqual(obs, exp)
def test_column_with_space_in_name(self):
df = pd.DataFrame({'Subject': ['subject-1', 'subject-1', 'subject-2'],
'Sample Type': ['gut', 'tongue', 'gut']},
index=pd.Index(['S1', 'S2', 'S3'], name='id'))
metadata = Metadata(df)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
metadata.get_ids()
# The list of captured warnings should be empty
self.assertFalse(w)
class TestMerge(unittest.TestCase):
def setUp(self):
get_dummy_plugin()
def test_merging_nothing(self):
md = Metadata(pd.DataFrame(
{'a': [1, 2, 3], 'b': [4, 5, 6]},
index=pd.Index(['id1', 'id2', 'id3'], name='id')))
with self.assertRaisesRegex(ValueError,
'At least one Metadata.*nothing to merge'):
md.merge()
def test_merging_two(self):
md1 = Metadata(pd.DataFrame(
{'a': [1, 2, 3], 'b': [4, 5, 6]},
index=pd.Index(['id1', 'id2', 'id3'], name='id')))
md2 = Metadata(pd.DataFrame(
{'c': [7, 8, 9], 'd': [10, 11, 12]},
index=pd.Index(['id1', 'id2', 'id3'], name='id')))
obs = md1.merge(md2)
exp = Metadata(pd.DataFrame(
{'a': [1, 2, 3], 'b': [4, 5, 6],
'c': [7, 8, 9], 'd': [10, 11, 12]},
index=pd.Index(['id1', 'id2', 'id3'], name='id')))
self.assertEqual(obs, exp)
def test_merging_three(self):
md1 = Metadata(pd.DataFrame(
{'a': [1, 2, 3], 'b': [4, 5, 6]},
index=pd.Index(['id1', 'id2', 'id3'], name='id')))
md2 = Metadata(pd.DataFrame(
{'c': [7, 8, 9], 'd': [10, 11, 12]},
index=pd.Index(['id1', 'id2', 'id3'], name='id')))
md3 = Metadata(pd.DataFrame(
{'e': [13, 14, 15], 'f': [16, 17, 18]},
index=pd.Index(['id1', 'id2', 'id3'], name='id')))
obs = md1.merge(md2, md3)
exp = Metadata(pd.DataFrame(
{'a': [1, 2, 3], 'b': [4, 5, 6],
'c': [7, 8, 9], 'd': [10, 11, 12],
'e': [13, 14, 15], 'f': [16, 17, 18]},
index=pd.Index(['id1', 'id2', 'id3'], name='id')))
self.assertEqual(obs, exp)
def test_merging_unaligned_indices(self):
md1 = Metadata(pd.DataFrame(
{'a': [1, 2, 3], 'b': [4, 5, 6]},
index=pd.Index(['id1', 'id2', 'id3'], name='id')))
md2 = Metadata(pd.DataFrame(
{'c': [9, 8, 7], 'd': [12, 11, 10]},
index=pd.Index(['id3', 'id2', 'id1'], name='id')))
md3 = Metadata(pd.DataFrame(
{'e': [13, 15, 14], 'f': [16, 18, 17]},
index=pd.Index(['id1', 'id3', 'id2'], name='id')))
obs = md1.merge(md2, md3)
exp = Metadata(pd.DataFrame(
{'a': [1, 2, 3], 'b': [4, 5, 6],
'c': [7, 8, 9], 'd': [10, 11, 12],
'e': [13, 14, 15], 'f': [16, 17, 18]},
index=pd.Index(['id1', 'id2', 'id3'], name='id')))
self.assertEqual(obs, exp)
def test_inner_join(self):
md1 = Metadata(pd.DataFrame(
{'a': [1, 2, 3], 'b': [4, 5, 6]},
index=pd.Index(['id1', 'id2', 'id3'], name='id')))
md2 = Metadata(pd.DataFrame(
{'c': [7, 8, 9], 'd': [10, 11, 12]},
index=pd.Index(['id2', 'X', 'Y'], name='id')))
md3 = Metadata(pd.DataFrame(
{'e': [13, 14, 15], 'f': [16, 17, 18]},
index=pd.Index(['X', 'id3', 'id2'], name='id')))
# Single shared ID.
obs = md1.merge(md2, md3)
exp = Metadata(pd.DataFrame(
{'a': [2], 'b': [5], 'c': [7], 'd': [10], 'e': [15], 'f': [18]},
index=pd.Index(['id2'], name='id')))
self.assertEqual(obs, exp)
# Multiple shared IDs.
obs = md1.merge(md3)
exp = Metadata(pd.DataFrame(
{'a': [2, 3], 'b': [5, 6], 'e': [15, 14], 'f': [18, 17]},
index=pd.Index(['id2', 'id3'], name='id')))
self.assertEqual(obs, exp)
def test_index_and_column_merge_order(self):
md1 = Metadata(pd.DataFrame(
[[1], [2], [3], [4]],
index=pd.Index(['id1', 'id2', 'id3', 'id4'], name='id'),
columns=['a']))
md2 = Metadata(pd.DataFrame(
[[5], [6], [7]], index=pd.Index(['id4', 'id3', 'id1'], name='id'),
columns=['b']))
md3 = Metadata(pd.DataFrame(
[[8], [9], [10]], index=pd.Index(['id1', 'id4', 'id3'], name='id'),
columns=['c']))
obs = md1.merge(md2, md3)
exp = Metadata(pd.DataFrame(
[[1, 7, 8], [3, 6, 10], [4, 5, 9]],
index=pd.Index(['id1', 'id3', 'id4'], name='id'),
columns=['a', 'b', 'c']))
self.assertEqual(obs, exp)
# Merging in different order produces different ID/column order.
obs = md2.merge(md1, md3)
exp = Metadata(pd.DataFrame(
[[5, 4, 9], [6, 3, 10], [7, 1, 8]],
index=pd.Index(['id4', 'id3', 'id1'], name='id'),
columns=['b', 'a', 'c']))
self.assertEqual(obs, exp)
def test_id_column_only(self):
md1 = Metadata(pd.DataFrame({},
index=pd.Index(['id1', 'id2', 'id3'], name='id')))
md2 = Metadata(pd.DataFrame({},
index=pd.Index(['id2', 'X', 'id1'], name='id')))
md3 = Metadata(pd.DataFrame({},
index=pd.Index(['id1', 'id3', 'id2'], name='id')))
obs = md1.merge(md2, md3)
exp = Metadata(
pd.DataFrame({}, index=pd.Index(['id1', 'id2'], name='id')))
self.assertEqual(obs, exp)
def test_merged_id_column_name(self):
md1 = Metadata(pd.DataFrame(
{'a': [1, 2]},
index=pd.Index(['id1', 'id2'], name='sample ID')))
md2 = Metadata(pd.DataFrame(
{'b': [3, 4]},
index= | pd.Index(['id1', 'id2'], name='feature ID') | pandas.Index |
import datetime
import json
import pandas as pd
from dateutil import relativedelta
from rest_framework.generics import ListCreateAPIView, get_object_or_404
from rest_framework.response import Response
from rest_framework.views import APIView
from analytics.events.utils.dataframe_builders import ProductivityLogEventsDataframeBuilder, \
SupplementEventsDataframeBuilder, SleepActivityDataframeBuilder
from apis.betterself.v1.constants import DAILY_FREQUENCY, MONTHLY_FREQUENCY
from apis.betterself.v1.events.filters import SupplementLogFilter, UserActivityFilter, UserActivityLogFilter, \
DailyProductivityLogFilter
from apis.betterself.v1.events.serializers import SupplementLogCreateUpdateSerializer, \
SupplementLogReadOnlySerializer, ProductivityLogReadSerializer, ProductivityLogCreateSerializer, \
UserActivitySerializer, UserActivityLogCreateSerializer, UserActivityLogReadSerializer, \
UserActivityUpdateSerializer, ProductivityLogRequestParametersSerializer, \
SupplementLogRequestParametersSerializer, SupplementReminderReadSerializer, SupplementReminderCreateSerializer, \
SupplementStackLogSerializer
from apis.betterself.v1.utils.views import ReadOrWriteSerializerChooser, UUIDDeleteMixin, UUIDUpdateMixin
from betterself.utils.date_utils import get_current_userdate
from betterself.utils.pandas_utils import force_start_end_date_to_series, force_start_end_data_to_dataframe, \
update_dataframe_to_be_none_instead_of_nan_for_api_responses
from config.pagination import ModifiedPageNumberPagination
from events.models import SupplementLog, DailyProductivityLog, UserActivity, UserActivityLog, SupplementReminder, \
SleepLog
from supplements.models import Supplement, UserSupplementStack
class SupplementEventView(ListCreateAPIView, ReadOrWriteSerializerChooser, UUIDDeleteMixin, UUIDUpdateMixin):
model = SupplementLog
read_serializer_class = SupplementLogReadOnlySerializer
write_serializer_class = SupplementLogCreateUpdateSerializer
update_serializer_class = SupplementLogCreateUpdateSerializer
filter_class = SupplementLogFilter
pagination_class = ModifiedPageNumberPagination
def get_queryset(self):
return self.model.objects.filter(user=self.request.user).select_related('supplement')
def get_serializer_class(self):
return self._get_read_or_write_serializer_class()
class ProductivityLogView(ListCreateAPIView, ReadOrWriteSerializerChooser, UUIDDeleteMixin):
model = DailyProductivityLog
pagination_class = ModifiedPageNumberPagination
read_serializer_class = ProductivityLogReadSerializer
write_serializer_class = ProductivityLogCreateSerializer
filter_class = DailyProductivityLogFilter
def get_serializer_class(self):
return self._get_read_or_write_serializer_class()
def get_queryset(self):
return self.model.objects.filter(user=self.request.user)
class ProductivityLogAggregatesView(APIView):
# TODO - Refactor all of this after Twilio integration!
def get(self, request):
user = request.user
serializer = ProductivityLogRequestParametersSerializer(data=request.query_params)
serializer.is_valid(raise_exception=True)
query_params = serializer.validated_data
query_start_date = query_params['start_date']
query_cumulative_window = query_params['cumulative_window']
complete_date_range_in_daily_frequency = query_params['complete_date_range_in_daily_frequency']
# if this is a cumulative window, we want to look back even further when filtering
log_filter_date = query_start_date - relativedelta.relativedelta(days=query_cumulative_window)
productivity_logs = DailyProductivityLog.objects.filter(user=user, date__gte=log_filter_date)
# data is consumed by front-end, so don't rename columns
dataframe_builder = ProductivityLogEventsDataframeBuilder(productivity_logs, rename_columns=False)
results = dataframe_builder.get_flat_daily_dataframe()
# TODO - feels like we should always just do this from the builder level to be on the safe side ...
results.sort_index(ascending=True, inplace=True)
# sum up the history by how many days as the window specifies
results = results.rolling(window=query_cumulative_window, min_periods=1).sum()
# because rolling windows need to look back further to sum, this timeseries has extra dates
results = results[query_start_date:]
if complete_date_range_in_daily_frequency:
results = force_start_end_data_to_dataframe(user, results, query_start_date, datetime.date.today())
data_formatted = json.loads(results.to_json(date_format='iso', orient='index', double_precision=2))
return Response(data_formatted)
class UserActivityView(ListCreateAPIView, UUIDDeleteMixin, UUIDUpdateMixin):
model = UserActivity
serializer_class = UserActivitySerializer
filter_class = UserActivityFilter
pagination_class = ModifiedPageNumberPagination
update_serializer_class = UserActivityUpdateSerializer
def get_queryset(self):
return self.model.objects.filter(user=self.request.user)
class UserActivityEventView(ListCreateAPIView, ReadOrWriteSerializerChooser, UUIDDeleteMixin, UUIDUpdateMixin):
model = UserActivityLog
pagination_class = ModifiedPageNumberPagination
read_serializer_class = UserActivityLogReadSerializer
write_serializer_class = UserActivityLogCreateSerializer
update_serializer_class = UserActivityLogCreateSerializer
filter_class = UserActivityLogFilter
def get_serializer_class(self):
return self._get_read_or_write_serializer_class()
def get_queryset(self):
return self.model.objects.filter(user=self.request.user).select_related('user_activity')
class SupplementLogListView(APIView):
# TODO - Refactor all of this after Twilio integration!
def get(self, request, supplement_uuid):
supplement = get_object_or_404(Supplement, uuid=supplement_uuid, user=request.user)
user = request.user
serializer = SupplementLogRequestParametersSerializer(data=request.query_params)
serializer.is_valid(raise_exception=True)
params = serializer.validated_data
start_date = params['start_date']
end_date = get_current_userdate(user)
supplement_events = SupplementLog.objects.filter(user=user, supplement=supplement, time__date__gte=start_date)
builder = SupplementEventsDataframeBuilder(supplement_events)
if params['frequency'] == 'daily':
# most of the time the dataframe contains a lot of supplements, here we are only picking one
try:
series = builder.get_flat_daily_dataframe()[supplement.name]
except KeyError:
# key error for no data if the supplement was never taken during this time
series = pd.Series()
if params['complete_date_range_in_daily_frequency']:
series = force_start_end_date_to_series(user, series, start_date, end_date)
else:
df = builder.build_dataframe()
series = df['Quantity']
json_data = series.to_json(date_format='iso')
data = json.loads(json_data)
return Response(data)
class SupplementReminderView(ListCreateAPIView, ReadOrWriteSerializerChooser, UUIDDeleteMixin):
model = SupplementReminder
write_serializer_class = SupplementReminderCreateSerializer
read_serializer_class = SupplementReminderReadSerializer
def get_queryset(self):
return self.model.objects.filter(user=self.request.user).select_related('supplement')
def get_serializer_class(self):
return self._get_read_or_write_serializer_class()
class AggregatedSupplementLogView(APIView):
# TODO - Refactor all of this after Twilio integration! Wow, this view sucks
""" Returns a list of dates that Supplement was taken along with the productivity and sleep of that date"""
def get(self, request, supplement_uuid):
# TODO - Refactor this garbage, you can add some smart redis caching level to this
supplement = get_object_or_404(Supplement, uuid=supplement_uuid, user=request.user)
user = request.user
serializer = SupplementLogRequestParametersSerializer(data=request.query_params)
serializer.is_valid(raise_exception=True)
params = serializer.validated_data
start_date = params['start_date']
end_date = get_current_userdate(user)
supplement_events = SupplementLog.objects.filter(
user=user, supplement=supplement, time__date__gte=start_date, time__date__lte=end_date)
# no point if nothing exists
if not supplement_events.exists():
return Response([])
# lots of crappy templating here, sorry.
supplement_builder = SupplementEventsDataframeBuilder(supplement_events)
# TODO - Really feels like you should build a helper on the builder to do this since you do it so often
supplement_series = supplement_builder.build_dataframe()['Quantity'].sort_index()
# because the dataframe will also get things like "source" etc, and we only care about
# quantity, take that series and then recast it as a numeric
supplement_series = | pd.to_numeric(supplement_series) | pandas.to_numeric |
import numpy as np
import pandas as pd
from collections import defaultdict
import re
import csv
from bs4 import BeautifulSoup
import sys
import os
import multiprocessing as mp
os.environ['KERAS_BACKEND']='theano'
import keras
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.utils.np_utils import to_categorical
from keras.layers import Embedding
from keras.layers import Dense, Input, Flatten
from keras.layers import merge
from keras.layers import Conv1D, MaxPooling1D, Embedding, Dropout, LSTM, GRU, Bidirectional
from keras.models import Model
from keras import backend as K
from keras.engine.topology import Layer, InputSpec
from keras import initializers
import preprocessor as p
from nltk import tokenize
##Configuration used for data cleaning and word embeddings vector creation
p.set_options(p.OPT.URL, p.OPT.EMOJI,p.OPT.NUMBER,p.OPT.SMILEY)
MAX_SEQUENCE_LENGTH = 10000
MAX_NB_WORDS = 20000
EMBEDDING_DIM = 100
VALIDATION_SPLIT = 0.25
# np.random.seed(12)
server="/local/data/"
###Load Socio linguistic features data which consist LIWC,Empath and other linguistic features.
data1=pd.read_csv(server+"features/Empath_features1.csv")
data1=data1.drop(["Tag"],axis=1)
data2=pd.read_csv(server+"features/features11.csv")
#Merge both data and normalize on the basis of number of tweets in a event
data1= | pd.merge(data1, data2, on="Event") | pandas.merge |
"""
Copyright (c) 2021, Electric Power Research Institute
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of DER-VET nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
"""
BatteryTech.py
This Python class contains methods and attributes specific for technology analysis within StorageVet.
"""
from .EnergyStorage import EnergyStorage
import numpy as np
import pandas as pd
import rainflow
from storagevet.ErrorHandling import *
from storagevet.Library import truncate_float, is_leap_yr
import cvxpy as cvx
class Battery(EnergyStorage):
""" Battery class that inherits from Storage.
"""
def __init__(self, params):
""" Initializes a battery class that inherits from the technology class.
It sets the type and physical constraints of the technology.
Args:
params (dict): params dictionary from dataframe for one case
"""
TellUser.debug(f"Initializing {__name__}")
# create generic storage object
super().__init__(params)
self.hp = params['hp']
self.tag = 'Battery'
# initialize degradation attributes
self.cycle_life = params['cycle_life']
self.degrade_perc = 0
self.soh_initial = 1 #Initial SOC at the start of the project
self.soh=1 #Initial SOC at the start of the project
self.yearly_degrade = params['yearly_degrade'] / 100
self.eol_condition = params['cycle_life_table_eol_condition'] / 100
self.incl_cycle_degrade = bool(params['incl_cycle_degrade'])
self.degrade_data = None
self.counted_cycles = []
def initialize_degradation_module(self, opt_agg):
"""
Notes: Should be called once, after optimization levels are assigned, but before
optimization loop gets called
Args:
opt_agg (DataFrame):
Returns: None
"""
if self.incl_cycle_degrade:
# initialize degradation dataframe
self.degrade_data = pd.DataFrame(index=['Optimization Start']+list(opt_agg.control.unique()))
self.degrade_data['degradation progress %'] = self.degrade_perc
self.degrade_data['state of health %'] = self.soh *100
self.degrade_data['effective energy capacity (kWh)'] = self.degraded_energy_capacity()
self.calc_degradation('Optimization Start', None, None)
def degraded_energy_capacity(self):
""" Updates ene_max_rated and control constraints based on degradation percent
Applies degrade percent to rated energy capacity
TODO: use lookup table for energy cap to degredation percentage
Returns:
Degraded energy capacity
"""
soh_change = self.degrade_perc
new_ene_max = max(self.ene_max_rated * (1 - soh_change), 0)
return new_ene_max
def calc_degradation(self, opt_period, start_dttm, last_dttm):
""" calculate degradation percent based on yearly degradation and cycle degradation
Args:
opt_period: the index of the optimization that occurred before calling this function, None if
no optimization problem has been solved yet
start_dttm (DateTime): Start timestamp to calculate degradation. ie. the first datetime in the optimization
problem
last_dttm (DateTime): End timestamp to calculate degradation. ie. the last datetime in the optimization
problem
A percent that represented the energy capacity degradation
"""
# time difference between time stamps converted into years multiplied by yearly degrate rate
if self.incl_cycle_degrade:
cycle_degrade = 0
yearly_degradation = 0
if not isinstance(opt_period, str):
# calculate degradation due to cycling iff energy values are given
energy_series = self.variables_df.loc[start_dttm:last_dttm, 'ene']
# Find the effective energy capacity
eff_e_cap = self.degraded_energy_capacity()
#If using rainflow counting package uncomment following few lines
# use rainflow counting algorithm to get cycle counts
# cycle_counts = rainflow.count_cycles(energy_series, ndigits=4)
#
# aux_df = pd.DataFrame(cycle_counts, columns=['DoD', 'N_cycles'])
# aux_df['Opt window'] = opt_period
#
# # sort cycle counts into user inputed cycle life bins
# digitized_cycles = np.searchsorted(self.cycle_life['Cycle Depth Upper Limit'],[min(i[0]/eff_e_cap, 1) for i in cycle_counts], side='left')
# use rainflow extract function to get information on each cycle
cycle_extract=list(rainflow.extract_cycles(energy_series))
aux_df = pd.DataFrame(cycle_extract, columns=['rng', 'mean','count','i_start','i_end'])
aux_df['Opt window'] = opt_period
# sort cycle counts into user inputed cycle life bins
digitized_cycles = np.searchsorted(self.cycle_life['Cycle Depth Upper Limit'],[min(i[0] / eff_e_cap, 1) for i in cycle_extract], side='left')
aux_df['Input_cycle_DoD_mapping'] = np.array(self.cycle_life['Cycle Depth Upper Limit'][digitized_cycles]*eff_e_cap)
aux_df['Cycle Life Value'] = np.array(self.cycle_life['Cycle Life Value'][digitized_cycles] )
self.counted_cycles.append(aux_df.copy())
# sum up number of cycles for all cycle counts in each bin
cycle_sum = self.cycle_life.loc[:, :]
cycle_sum.loc[:, 'cycles'] = 0
for i in range(len(cycle_extract)):
cycle_sum.loc[digitized_cycles[i], 'cycles'] += cycle_extract[i][2]
# sum across bins to get total degrade percent
# 1/cycle life value is degrade percent for each cycle
cycle_degrade = np.dot(1/cycle_sum['Cycle Life Value'], cycle_sum.cycles)* (1 - self.eol_condition)
if start_dttm is not None and last_dttm is not None:
# add the yearly degradation linearly to the # of years from START_DTTM to (END_DTTM + dt)
days_in_year = 366 if is_leap_yr(start_dttm.year) else 365
portion_of_year = (last_dttm + pd.Timedelta(self.dt, unit='h') - start_dttm) / pd.Timedelta(days_in_year, unit='d')
yearly_degradation = self.yearly_degrade * portion_of_year
# add the degradation due to time passing and cycling for total degradation
degrade_percent = cycle_degrade + yearly_degradation
# record the degradation
# the total degradation after optimization OPT_PERIOD must also take into account the
# degradation that occurred before the battery was in operation (which we saved as SELF.DEGRADE_PERC)
self.degrade_data.loc[opt_period, 'degradation progress %'] = degrade_percent + self.degrade_perc
self.degrade_perc += degrade_percent
soh_new = self.soh_initial - self.degrade_perc
self.soh = self.degrade_data.loc[opt_period, 'state of health %'] = soh_new
# apply degradation to technology (affects physical_constraints['ene_max_rated'] and control constraints)
eff_e_cap = self.degraded_energy_capacity()
TellUser.info(f"BATTERY - {self.name}: effective energy capacity is now {truncate_float(eff_e_cap)} kWh " +
f"({truncate_float(100*(1 - (self.ene_max_rated-eff_e_cap)/self.ene_max_rated), 7)}% of original)")
self.degrade_data.loc[opt_period, 'effective energy capacity (kWh)'] = eff_e_cap
self.effective_soe_max = eff_e_cap * self.ulsoc
self.effective_soe_min = eff_e_cap * self.llsoc
def constraints(self, mask, **kwargs):
"""Default build constraint list method. Used by services that do not
have constraints.
Args:
mask (DataFrame): A boolean array that is true for indices
corresponding to time_series data included in the subs data set
Returns:
A list of constraints that corresponds the battery's physical
constraints and its service constraints
"""
# create default list of constraints
constraint_list = super().constraints(mask, **kwargs)
if self.incl_binary:
# battery can not charge and discharge in the same timestep
constraint_list += [cvx.NonPos(self.variables_dict['on_c'] +
self.variables_dict['on_d'] - 1)]
return constraint_list
def save_variable_results(self, subs_index):
""" Searches through the dictionary of optimization variables and saves the ones specific to each
DER instance and saves the values it to itself
Args:
subs_index (Index): index of the subset of data for which the variables were solved for
"""
super().save_variable_results(subs_index)
# check for charging and discharging in same time step
eps = 1e-4
if np.any((self.variables_df.loc[subs_index, 'ch'].values >= eps) & (self.variables_df.loc[subs_index, 'dis'].values >= eps)):
TellUser.warning('non-zero charge and discharge powers found in optimization solution. Try binary formulation')
def proforma_report(self, apply_inflation_rate_func, fill_forward_func, results):
""" Calculates the proforma that corresponds to participation in this value stream
Args:
apply_inflation_rate_func:
fill_forward_func:
results (pd.DataFrame):
Returns: A DateFrame of with each year in opt_year as the index and
the corresponding value this stream provided.
"""
pro_forma = super().proforma_report(apply_inflation_rate_func, fill_forward_func, results)
if self.hp > 0:
tech_id = self.unique_tech_id()
# the value of the energy consumed by the auxiliary load (housekeeping power) is assumed to be equal to the
# value of energy for DA ETS, real time ETS, or retail ETS.
analysis_years = self.variables_df.index.year.unique()
hp_proforma = pd.DataFrame()
if results.columns.isin(['Energy Price ($/kWh)']).any():
hp_cost = self.dt * -results.loc[:, 'Energy Price ($/kWh)'] * self.hp
for year in analysis_years:
year_monthly = hp_cost[hp_cost.index.year == year]
hp_proforma.loc[pd.Period(year=year, freq='y'), tech_id + 'Aux Load Cost'] = year_monthly.sum()
# fill forward
hp_proforma = fill_forward_func(hp_proforma, None)
# append will super class's proforma
pro_forma = | pd.concat([pro_forma, hp_proforma], axis=1) | pandas.concat |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
'''
pyplr.oceanops
==============
A module to help with measurents for Ocean Optics spectrometers.
'''
from time import sleep
import numpy as np
import pandas as pd
import spectres
from seabreeze.spectrometers import Spectrometer
class OceanOptics(Spectrometer):
'''Device class for Ocean Optics spectrometer with user-defined methods.
'''
def _init_(self):
super(Spectrometer, self).__init__()
# User defined methods
def measurement(self, integration_time=None, setting={}):
'''Obtain a measurement with an Ocean Optics spectrometer.
If `integration_time` is not specified, will use an adaptive procedure
that avoids saturation by aiming for a maximum reported value of
80-90% of the maximum intensity value for the device. Can take up to a
maximum of ~3.5 mins for lower light levels, though this could be
reduced somewhat by optimising the algorithm.
Parameters
----------
integration_time : int
The integration time to use for the measurement. Leave as None to
adaptively set the integration time based on spectral measurements.
setting : dict, optional
Current setting of the light source (if known), to be included in
the `info`. For example ``{'led' : 5, 'intensity' : 3000}``, or
``{'intensities' : [0, 0, 0, 300, 4000, 200, 0, 0, 0, 0]}``.
The default is ``{}``.
Returns
-------
counts : np.array
Raw intensity counts from the Ocean Optics spectrometer.
info : dict
Companion info for measurement.
'''
if integration_time:
# set the spectrometer integration time
self.integration_time_micros(int(integration_time))
sleep(.01)
# obtain temperature measurements
temps = self.f.temperature.temperature_get_all()
sleep(.01)
# obtain intensity measurements
counts = self.intensities()
# get the maximum reported value
max_reported = max(counts)
print('\tIntegration time: {} ms --> maximum value: {}'.format(
integration_time / 1000, max_reported))
else:
# initial parameters
intgtlims = self.integration_time_micros_limits
maximum_intensity = self.max_intensity
lower_intgt = None
upper_intgt = None
lower_bound = maximum_intensity * .8
upper_bound = maximum_intensity * .9
# start with 1000 micros
intgt = 1000.0
max_reported = 0
# keep sampling with different integration times until the maximum
# reported value is within 80-90% of the maximum intensity value
# for the device
while max_reported < lower_bound or max_reported > upper_bound:
# if current integration time is greater than the upper limit,
# set it too the upper limit
if intgt >= intgtlims[1]:
intgt = intgtlims[1]
# set the spectrometer integration time
self.integration_time_micros(intgt)
sleep(.01)
# obtain temperature measurements
temps = self.f.temperature.temperature_get_all()
sleep(.01)
# obtain intensity measurements
counts = self.intensities()
# get the maximum reported value
max_reported = max(counts)
print('\tIntegration time: {} ms --> maximum value: {}'.format(
intgt / 1000, max_reported))
# if the integration time has reached the upper limit for the
# spectrometer, exit the while loop, having obtained the final
# measurement
if intgt == intgtlims[1]:
break
# if the max_reported value is less than the lower_bound and
# the upper_ingt is not yet known, update the lower_intgt and
# double intgt ready for the next iteration
elif max_reported < lower_bound and upper_intgt is None:
lower_intgt = intgt
intgt *= 2.0
# if the max_reported value is greater than the upper_bound,
# update the upper_intgt and subtract half of the difference
# between upper_intgt and lower_intgt from intgt ready for the
# next iteration
elif max_reported > upper_bound:
upper_intgt = intgt
intgt -= (upper_intgt - lower_intgt) / 2
# if the max_reported value is less than the lower_bound and
# the value of upper_intgt is known, update the lower_intgt and
# add half of the difference between upper_intgt and
# lower_intgt to intgt ready for the next iteration
elif max_reported < lower_bound and upper_intgt is not None:
lower_intgt = intgt
intgt += (upper_intgt - lower_intgt) / 2
info = {
'board_temp': temps[0],
'micro_temp': temps[2],
'integration_time': intgt,
'model': self.model
}
info = {**info, **setting}
return counts, info
def dark_measurement(self, integration_times=[1000]):
'''Sample the dark spectrum with a range of integration times.
Do this for a range of temperatures to map the relationship between
temperature and integration time.
'''
data = []
info = []
for intgt in integration_times:
self.integration_time_micros(intgt)
sleep(.05)
c, i = self.measurement(integration_time=intgt)
print('Board temp: {}, integration time: {}'.format(
i['board_temp'], intgt))
data.append(c)
info.append(i)
data = pd.DataFrame(data, columns=self.wavelengths())
info = | pd.DataFrame(info) | pandas.DataFrame |
import nltk
nltk.download('punkt')
nltk.download('stopwords')
import re
from bs4 import BeautifulSoup
import unicodedata
import contractions
import spacy
import nltk
import pandas as pd
import numpy as np
nlp = spacy.load('en_core_web_sm')
ps = nltk.porter.PorterStemmer()
# Links removal
def remove_links(text):
"""Takes a string and removes web links from it"""
text = re.sub(r'http\S+', '', text) # remove http links
text = re.sub(r'bit.ly/\S+', '', text) # remove bitly links
text = text.strip('[link]') # remove [links]
text = re.sub(r'pic.twitter\S+', '', text)
return text
# Retweet and @user information removal
def remove_users(text):
"""Takes a string and removes retweet and @user information"""
text = re.sub('(RT\s@[A-Za-z]+[A-Za-z0-9-_]+)', '', text) # remove re-tweet
text = re.sub('(@[A-Za-z]+[A-Za-z0-9-_]+)', '', text) # remove tweeted at
return text
# Hash tags removal
def remove_hashtags(text):
"""Takes a string and removes any hash tags"""
text = re.sub('(#[A-Za-z]+[A-Za-z0-9-_]+)', '', text) # remove hash tags
return text
# AUDIO/VIDEO tags or labels removal
def remove_av(text):
"""Takes a string and removes AUDIO/VIDEO tags or labels"""
text = re.sub('VIDEO:', '', text) # remove 'VIDEO:' from start of tweet
text = re.sub('AUDIO:', '', text) # remove 'AUDIO:' from start of tweet
return text
# HTML removal
def strip_html_tags(text):
soup = BeautifulSoup(text, "html.parser")
[s.extract() for s in soup(['iframe', 'script'])]
stripped_text = soup.get_text()
stripped_text = re.sub(r'[\r|\n|\r\n]+', '\n', stripped_text)
return stripped_text
# accent removal
def remove_accented_chars(text):
text = unicodedata.normalize('NFKD', text).encode('ascii', 'ignore').decode('utf-8', 'ignore')
return text
# contraction expansion
def expand_contractions(text):
return contractions.fix(text)
# lemamtization
def spacy_lemmatize_text(text):
text = nlp(text)
text = ' '.join([word.lemma_ if word.lemma_ != '-PRON-' else word.text for word in text])
return text
# stemming
def simple_stemming(text, stemmer=ps):
text = ' '.join([stemmer.stem(word) for word in text.split()])
return text
# special character removal
def remove_special_characters(text, remove_digits=False):
pattern = r'[^a-zA-Z0-9\s]' if not remove_digits else r'[^a-zA-Z\s]'
text = re.sub(pattern, '', text)
return text
# stopword removal
def remove_stopwords(text, is_lower_case=False, stopwords=None):
if not stopwords:
stopwords = nltk.corpus.stopwords.words('english')
tokens = nltk.word_tokenize(text)
tokens = [token.strip() for token in tokens]
if is_lower_case:
filtered_tokens = [token for token in tokens if token not in stopwords]
else:
filtered_tokens = [token for token in tokens if token.lower() not in stopwords]
filtered_text = ' '.join(filtered_tokens)
return filtered_text
import tqdm # progressbar
def text_pre_processor(text, remove_links_=True, remove_users_=True, remove_hashtags_=True, remove_av_=True,
html_strip=True, accented_char_removal=True, contraction_expansion=True,
text_lower_case=True, text_stemming=False, text_lemmatization=True,
special_char_removal=True, remove_digits=True, stopword_removal=True,
stopword_list=None):
# remove links
if remove_links_:
text = remove_links(text)
# remove users and retweets
if remove_users_:
text = remove_users(text)
# remove hash tags
if remove_hashtags_:
text = remove_hashtags(text)
# remove audio video
if remove_av_:
text = remove_av(text)
# strip HTML
if html_strip:
text = strip_html_tags(text)
# remove extra newlines (often might be present in really noisy text)
text = text.translate(text.maketrans("\n\t\r", " "))
# remove accented characters
if accented_char_removal:
text = remove_accented_chars(text)
# expand contractions
if contraction_expansion:
text = expand_contractions(text)
# lemmatize text
if text_lemmatization:
text = spacy_lemmatize_text(text)
# remove special characters and\or digits
if special_char_removal:
# insert spaces between special characters to isolate them
special_char_pattern = re.compile(r'([{.(-)!}])') # 'I will not go!here' => I will not go ! here'
text = special_char_pattern.sub(" \\1 ", text)
text = remove_special_characters(text, remove_digits=remove_digits)
# stem text
if text_stemming and not text_lemmatization:
text = simple_stemming(text)
# lowercase the text
if text_lower_case:
text = text.lower()
# remove stopwords
if stopword_removal:
text = remove_stopwords(text, is_lower_case=text_lower_case,
stopwords=stopword_list)
# remove extra whitespace
text = re.sub(' +', ' ', text) # 'I will not' => 'I will not'
text = text.strip()
return text
def corpus_pre_processor(corpus):
norm_corpus = []
for doc in tqdm.tqdm(corpus):
norm_corpus.append(text_pre_processor(doc))
return norm_corpus
import textblob
def textblob_labls(example):
df_snt_obj = textblob.TextBlob(example['data_text']).sentiment
example['subjectivity'] = df_snt_obj.subjectivity
return example
def count_features(example):
example['n_mentions'] = example['data_text'].count('@')
example['n_hashtags'] = example['data_text'].count('#')
example['n_links'] = example['data_text'].count('https://')
return example
def prepare_demo_df(example):
example = | pd.DataFrame.from_dict(example, orient='index') | pandas.DataFrame.from_dict |
import numpy as np
import pandas as pd
import argparse
def check_smiles_match(data,screen):
return (data['SMILES'].values==screen['SMILES'].values).all()
def apply_screen(data,col_name,selection_type,selection_thresh,keep):
data = data.sort_values(col_name,ascending=True)
if selection_type=='Fraction':
if keep=='High':
data = data[-int(len(data)*selection_thresh):]
elif keep=='Low':
data = data[0:-int(len(data)*selection_thresh)]
else:
print('WARNING: INVALID KEEP TYPE')
elif selection_type=='Cutoff':
if keep=='High':
data = data[data[col_name]>selection_thresh]
elif keep=='Low':
data = data[data[col_name]<selection_thresh]
else:
print('WARNING: INVALID KEEP TYPE')
else:
print('WARNING: INVALID SELECTION TYPE')
return data
parser = argparse.ArgumentParser()
parser.add_argument('--molfile', type=str, required=True)
parser.add_argument('--outfile', type=str, required=True)
parser.add_argument('--screen_file1', type=str, default=None)
parser.add_argument('--selection_type1', type=str, default='Fraction') # Fraction or Cutoff Value
parser.add_argument('--selection_thresh1', type=float, default=0.5)
parser.add_argument('--keep1', type=str, default='High') # High or low
parser.add_argument('--screen_file2', type=str, default=None)
parser.add_argument('--selection_type2', type=str, default='Cutoff') # Fraction or Cutoff Value
parser.add_argument('--selection_thresh2', type=float, default=5.0)
parser.add_argument('--keep2', type=str, default='Low') # High or low
args = parser.parse_args()
data = pd.read_csv(args.molfile)
data = data.drop_duplicates() # Remove duplicates
if args.screen_file1 is not None:
screen1 = pd.read_csv(args.screen_file1)
# Check if smiles match:
if not check_smiles_match(data,screen1):
print('WARNING: SMILES LISTS DO NOT MATCH')
# Add screen
col_name1 = pd.DataFrame(screen1.columns)[[not (x =='SMILES') for x in screen1.columns]].values[0][0]
data[col_name1]=screen1[col_name1]
if args.screen_file2 is not None:
screen2 = | pd.read_csv(args.screen_file2) | pandas.read_csv |
# -*- coding: utf-8 -*-
# pylint: disable=E1101
# flake8: noqa
from datetime import datetime
import csv
import os
import sys
import re
import nose
import platform
from multiprocessing.pool import ThreadPool
from numpy import nan
import numpy as np
from pandas.io.common import DtypeWarning
from pandas import DataFrame, Series, Index, MultiIndex, DatetimeIndex
from pandas.compat import(
StringIO, BytesIO, PY3, range, long, lrange, lmap, u
)
from pandas.io.common import URLError
import pandas.io.parsers as parsers
from pandas.io.parsers import (read_csv, read_table, read_fwf,
TextFileReader, TextParser)
import pandas.util.testing as tm
import pandas as pd
from pandas.compat import parse_date
import pandas.lib as lib
from pandas import compat
from pandas.lib import Timestamp
from pandas.tseries.index import date_range
import pandas.tseries.tools as tools
from numpy.testing.decorators import slow
import pandas.parser
class ParserTests(object):
"""
Want to be able to test either C+Cython or Python+Cython parsers
"""
data1 = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
def read_csv(self, *args, **kwargs):
raise NotImplementedError
def read_table(self, *args, **kwargs):
raise NotImplementedError
def setUp(self):
import warnings
warnings.filterwarnings(action='ignore', category=FutureWarning)
self.dirpath = tm.get_data_path()
self.csv1 = os.path.join(self.dirpath, 'test1.csv')
self.csv2 = os.path.join(self.dirpath, 'test2.csv')
self.xls1 = os.path.join(self.dirpath, 'test.xls')
def construct_dataframe(self, num_rows):
df = DataFrame(np.random.rand(num_rows, 5), columns=list('abcde'))
df['foo'] = 'foo'
df['bar'] = 'bar'
df['baz'] = 'baz'
df['date'] = pd.date_range('20000101 09:00:00',
periods=num_rows,
freq='s')
df['int'] = np.arange(num_rows, dtype='int64')
return df
def generate_multithread_dataframe(self, path, num_rows, num_tasks):
def reader(arg):
start, nrows = arg
if not start:
return pd.read_csv(path, index_col=0, header=0, nrows=nrows,
parse_dates=['date'])
return pd.read_csv(path,
index_col=0,
header=None,
skiprows=int(start) + 1,
nrows=nrows,
parse_dates=[9])
tasks = [
(num_rows * i / num_tasks,
num_rows / num_tasks) for i in range(num_tasks)
]
pool = ThreadPool(processes=num_tasks)
results = pool.map(reader, tasks)
header = results[0].columns
for r in results[1:]:
r.columns = header
final_dataframe = pd.concat(results)
return final_dataframe
def test_converters_type_must_be_dict(self):
with tm.assertRaisesRegexp(TypeError, 'Type converters.+'):
self.read_csv(StringIO(self.data1), converters=0)
def test_empty_decimal_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
self.assertRaises(ValueError, read_csv, StringIO(data), decimal='')
def test_empty_thousands_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
self.assertRaises(ValueError, read_csv, StringIO(data), thousands='')
def test_multi_character_decimal_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
self.assertRaises(ValueError, read_csv, StringIO(data), thousands=',,')
def test_empty_string(self):
data = """\
One,Two,Three
a,1,one
b,2,two
,3,three
d,4,nan
e,5,five
nan,6,
g,7,seven
"""
df = self.read_csv(StringIO(data))
xp = DataFrame({'One': ['a', 'b', np.nan, 'd', 'e', np.nan, 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', np.nan, 'five',
np.nan, 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(StringIO(data), na_values={'One': [], 'Three': []},
keep_default_na=False)
xp = DataFrame({'One': ['a', 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', 'nan', 'five',
'', 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(
StringIO(data), na_values=['a'], keep_default_na=False)
xp = DataFrame({'One': [np.nan, 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', 'nan', 'five', '',
'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(StringIO(data), na_values={'One': [], 'Three': []})
xp = DataFrame({'One': ['a', 'b', np.nan, 'd', 'e', np.nan, 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', np.nan, 'five',
np.nan, 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
# GH4318, passing na_values=None and keep_default_na=False yields
# 'None' as a na_value
data = """\
One,Two,Three
a,1,None
b,2,two
,3,None
d,4,nan
e,5,five
nan,6,
g,7,seven
"""
df = self.read_csv(
StringIO(data), keep_default_na=False)
xp = DataFrame({'One': ['a', 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['None', 'two', 'None', 'nan', 'five', '',
'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
def test_read_csv(self):
if not compat.PY3:
if compat.is_platform_windows():
prefix = u("file:///")
else:
prefix = u("file://")
fname = prefix + compat.text_type(self.csv1)
# it works!
read_csv(fname, index_col=0, parse_dates=True)
def test_dialect(self):
data = """\
label1,label2,label3
index1,"a,c,e
index2,b,d,f
"""
dia = csv.excel()
dia.quoting = csv.QUOTE_NONE
df = self.read_csv(StringIO(data), dialect=dia)
data = '''\
label1,label2,label3
index1,a,c,e
index2,b,d,f
'''
exp = self.read_csv(StringIO(data))
exp.replace('a', '"a', inplace=True)
tm.assert_frame_equal(df, exp)
def test_dialect_str(self):
data = """\
fruit:vegetable
apple:brocolli
pear:tomato
"""
exp = DataFrame({
'fruit': ['apple', 'pear'],
'vegetable': ['brocolli', 'tomato']
})
dia = csv.register_dialect('mydialect', delimiter=':') # noqa
df = self.read_csv(StringIO(data), dialect='mydialect')
tm.assert_frame_equal(df, exp)
csv.unregister_dialect('mydialect')
def test_1000_sep(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
expected = DataFrame({
'A': [1, 10],
'B': [2334, 13],
'C': [5, 10.]
})
df = self.read_csv(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
def test_1000_sep_with_decimal(self):
data = """A|B|C
1|2,334.01|5
10|13|10.
"""
expected = DataFrame({
'A': [1, 10],
'B': [2334.01, 13],
'C': [5, 10.]
})
tm.assert_equal(expected.A.dtype, 'int64')
tm.assert_equal(expected.B.dtype, 'float')
tm.assert_equal(expected.C.dtype, 'float')
df = self.read_csv(StringIO(data), sep='|', thousands=',', decimal='.')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data), sep='|',
thousands=',', decimal='.')
tm.assert_frame_equal(df, expected)
data_with_odd_sep = """A|B|C
1|2.334,01|5
10|13|10,
"""
df = self.read_csv(StringIO(data_with_odd_sep),
sep='|', thousands='.', decimal=',')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data_with_odd_sep),
sep='|', thousands='.', decimal=',')
tm.assert_frame_equal(df, expected)
def test_separator_date_conflict(self):
# Regression test for issue #4678: make sure thousands separator and
# date parsing do not conflict.
data = '06-02-2013;13:00;1-000.215'
expected = DataFrame(
[[datetime(2013, 6, 2, 13, 0, 0), 1000.215]],
columns=['Date', 2]
)
df = self.read_csv(StringIO(data), sep=';', thousands='-',
parse_dates={'Date': [0, 1]}, header=None)
tm.assert_frame_equal(df, expected)
def test_squeeze(self):
data = """\
a,1
b,2
c,3
"""
idx = Index(['a', 'b', 'c'], name=0)
expected = Series([1, 2, 3], name=1, index=idx)
result = self.read_table(StringIO(data), sep=',', index_col=0,
header=None, squeeze=True)
tm.assertIsInstance(result, Series)
tm.assert_series_equal(result, expected)
def test_squeeze_no_view(self):
# GH 8217
# series should not be a view
data = """time,data\n0,10\n1,11\n2,12\n4,14\n5,15\n3,13"""
result = self.read_csv(StringIO(data), index_col='time', squeeze=True)
self.assertFalse(result._is_view)
def test_inf_parsing(self):
data = """\
,A
a,inf
b,-inf
c,Inf
d,-Inf
e,INF
f,-INF
g,INf
h,-INf
i,inF
j,-inF"""
inf = float('inf')
expected = Series([inf, -inf] * 5)
df = read_csv(StringIO(data), index_col=0)
tm.assert_almost_equal(df['A'].values, expected.values)
df = read_csv(StringIO(data), index_col=0, na_filter=False)
tm.assert_almost_equal(df['A'].values, expected.values)
def test_multiple_date_col(self):
# Can use multiple date parsers
data = """\
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
def func(*date_cols):
return lib.try_parse_dates(parsers._concat_date_cols(date_cols))
df = self.read_csv(StringIO(data), header=None,
date_parser=func,
prefix='X',
parse_dates={'nominal': [1, 2],
'actual': [1, 3]})
self.assertIn('nominal', df)
self.assertIn('actual', df)
self.assertNotIn('X1', df)
self.assertNotIn('X2', df)
self.assertNotIn('X3', df)
d = datetime(1999, 1, 27, 19, 0)
self.assertEqual(df.ix[0, 'nominal'], d)
df = self.read_csv(StringIO(data), header=None,
date_parser=func,
parse_dates={'nominal': [1, 2],
'actual': [1, 3]},
keep_date_col=True)
self.assertIn('nominal', df)
self.assertIn('actual', df)
self.assertIn(1, df)
self.assertIn(2, df)
self.assertIn(3, df)
data = """\
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
df = read_csv(StringIO(data), header=None,
prefix='X',
parse_dates=[[1, 2], [1, 3]])
self.assertIn('X1_X2', df)
self.assertIn('X1_X3', df)
self.assertNotIn('X1', df)
self.assertNotIn('X2', df)
self.assertNotIn('X3', df)
d = datetime(1999, 1, 27, 19, 0)
self.assertEqual(df.ix[0, 'X1_X2'], d)
df = read_csv(StringIO(data), header=None,
parse_dates=[[1, 2], [1, 3]], keep_date_col=True)
self.assertIn('1_2', df)
self.assertIn('1_3', df)
self.assertIn(1, df)
self.assertIn(2, df)
self.assertIn(3, df)
data = '''\
KORD,19990127 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
'''
df = self.read_csv(StringIO(data), sep=',', header=None,
parse_dates=[1], index_col=1)
d = datetime(1999, 1, 27, 19, 0)
self.assertEqual(df.index[0], d)
def test_multiple_date_cols_int_cast(self):
data = ("KORD,19990127, 19:00:00, 18:56:00, 0.8100\n"
"KORD,19990127, 20:00:00, 19:56:00, 0.0100\n"
"KORD,19990127, 21:00:00, 20:56:00, -0.5900\n"
"KORD,19990127, 21:00:00, 21:18:00, -0.9900\n"
"KORD,19990127, 22:00:00, 21:56:00, -0.5900\n"
"KORD,19990127, 23:00:00, 22:56:00, -0.5900")
date_spec = {'nominal': [1, 2], 'actual': [1, 3]}
import pandas.io.date_converters as conv
# it works!
df = self.read_csv(StringIO(data), header=None, parse_dates=date_spec,
date_parser=conv.parse_date_time)
self.assertIn('nominal', df)
def test_multiple_date_col_timestamp_parse(self):
data = """05/31/2012,15:30:00.029,1306.25,1,E,0,,1306.25
05/31/2012,15:30:00.029,1306.25,8,E,0,,1306.25"""
result = self.read_csv(StringIO(data), sep=',', header=None,
parse_dates=[[0, 1]], date_parser=Timestamp)
ex_val = Timestamp('05/31/2012 15:30:00.029')
self.assertEqual(result['0_1'][0], ex_val)
def test_single_line(self):
# GH 6607
# Test currently only valid with python engine because sep=None and
# delim_whitespace=False. Temporarily copied to TestPythonParser.
# Test for ValueError with other engines:
with tm.assertRaisesRegexp(ValueError,
'sep=None with delim_whitespace=False'):
# sniff separator
buf = StringIO()
sys.stdout = buf
# printing warning message when engine == 'c' for now
try:
# it works!
df = self.read_csv(StringIO('1,2'), names=['a', 'b'],
header=None, sep=None)
tm.assert_frame_equal(DataFrame({'a': [1], 'b': [2]}), df)
finally:
sys.stdout = sys.__stdout__
def test_multiple_date_cols_with_header(self):
data = """\
ID,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000"""
df = self.read_csv(StringIO(data), parse_dates={'nominal': [1, 2]})
self.assertNotIsInstance(df.nominal[0], compat.string_types)
ts_data = """\
ID,date,nominalTime,actualTime,A,B,C,D,E
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
def test_multiple_date_col_name_collision(self):
self.assertRaises(ValueError, self.read_csv, StringIO(self.ts_data),
parse_dates={'ID': [1, 2]})
data = """\
date_NominalTime,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir
KORD1,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD2,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD3,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD4,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD5,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD6,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000""" # noqa
self.assertRaises(ValueError, self.read_csv, StringIO(data),
parse_dates=[[1, 2]])
def test_index_col_named(self):
no_header = """\
KORD1,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD2,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD3,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD4,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD5,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD6,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000"""
h = "ID,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir\n"
data = h + no_header
rs = self.read_csv(StringIO(data), index_col='ID')
xp = self.read_csv(StringIO(data), header=0).set_index('ID')
tm.assert_frame_equal(rs, xp)
self.assertRaises(ValueError, self.read_csv, StringIO(no_header),
index_col='ID')
data = """\
1,2,3,4,hello
5,6,7,8,world
9,10,11,12,foo
"""
names = ['a', 'b', 'c', 'd', 'message']
xp = DataFrame({'a': [1, 5, 9], 'b': [2, 6, 10], 'c': [3, 7, 11],
'd': [4, 8, 12]},
index=Index(['hello', 'world', 'foo'], name='message'))
rs = self.read_csv(StringIO(data), names=names, index_col=['message'])
tm.assert_frame_equal(xp, rs)
self.assertEqual(xp.index.name, rs.index.name)
rs = self.read_csv(StringIO(data), names=names, index_col='message')
tm.assert_frame_equal(xp, rs)
self.assertEqual(xp.index.name, rs.index.name)
def test_usecols_index_col_False(self):
# Issue 9082
s = "a,b,c,d\n1,2,3,4\n5,6,7,8"
s_malformed = "a,b,c,d\n1,2,3,4,\n5,6,7,8,"
cols = ['a', 'c', 'd']
expected = DataFrame({'a': [1, 5], 'c': [3, 7], 'd': [4, 8]})
df = self.read_csv(StringIO(s), usecols=cols, index_col=False)
tm.assert_frame_equal(expected, df)
df = self.read_csv(StringIO(s_malformed),
usecols=cols, index_col=False)
tm.assert_frame_equal(expected, df)
def test_index_col_is_True(self):
# Issue 9798
self.assertRaises(ValueError, self.read_csv, StringIO(self.ts_data),
index_col=True)
def test_converter_index_col_bug(self):
# 1835
data = "A;B\n1;2\n3;4"
rs = self.read_csv(StringIO(data), sep=';', index_col='A',
converters={'A': lambda x: x})
xp = DataFrame({'B': [2, 4]}, index=Index([1, 3], name='A'))
tm.assert_frame_equal(rs, xp)
self.assertEqual(rs.index.name, xp.index.name)
def test_date_parser_int_bug(self):
# #3071
log_file = StringIO(
'posix_timestamp,elapsed,sys,user,queries,query_time,rows,'
'accountid,userid,contactid,level,silo,method\n'
'1343103150,0.062353,0,4,6,0.01690,3,'
'12345,1,-1,3,invoice_InvoiceResource,search\n'
)
def f(posix_string):
return datetime.utcfromtimestamp(int(posix_string))
# it works!
read_csv(log_file, index_col=0, parse_dates=0, date_parser=f)
def test_multiple_skts_example(self):
data = "year, month, a, b\n 2001, 01, 0.0, 10.\n 2001, 02, 1.1, 11."
pass
def test_malformed(self):
# all
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
"""
try:
df = self.read_table(
StringIO(data), sep=',', header=1, comment='#')
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 4, saw 5', str(inst))
# skip_footer
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
footer
"""
# GH 6607
# Test currently only valid with python engine because
# skip_footer != 0. Temporarily copied to TestPythonParser.
# Test for ValueError with other engines:
try:
with tm.assertRaisesRegexp(ValueError, 'skip_footer'): # XXX
df = self.read_table(
StringIO(data), sep=',', header=1, comment='#',
skip_footer=1)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 4, saw 5', str(inst))
# first chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',',
header=1, comment='#',
iterator=True, chunksize=1,
skiprows=[2])
df = it.read(5)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
# middle chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',', header=1,
comment='#', iterator=True, chunksize=1,
skiprows=[2])
df = it.read(1)
it.read(2)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
# last chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',',
header=1, comment='#',
iterator=True, chunksize=1, skiprows=[2])
df = it.read(1)
it.read()
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
def test_passing_dtype(self):
# GH 6607
# Passing dtype is currently only supported by the C engine.
# Temporarily copied to TestCParser*.
# Test for ValueError with other engines:
with tm.assertRaisesRegexp(ValueError,
"The 'dtype' option is not supported"):
df = DataFrame(np.random.rand(5, 2), columns=list(
'AB'), index=['1A', '1B', '1C', '1D', '1E'])
with tm.ensure_clean('__passing_str_as_dtype__.csv') as path:
df.to_csv(path)
# GH 3795
# passing 'str' as the dtype
result = self.read_csv(path, dtype=str, index_col=0)
tm.assert_series_equal(result.dtypes, Series(
{'A': 'object', 'B': 'object'}))
# we expect all object columns, so need to convert to test for
# equivalence
result = result.astype(float)
tm.assert_frame_equal(result, df)
# invalid dtype
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'foo', 'B': 'float64'},
index_col=0)
# valid but we don't support it (date)
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'datetime64', 'B': 'float64'},
index_col=0)
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'datetime64', 'B': 'float64'},
index_col=0, parse_dates=['B'])
# valid but we don't support it
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'timedelta64', 'B': 'float64'},
index_col=0)
with tm.assertRaisesRegexp(ValueError,
"The 'dtype' option is not supported"):
# empty frame
# GH12048
self.read_csv(StringIO('A,B'), dtype=str)
def test_quoting(self):
bad_line_small = """printer\tresult\tvariant_name
Klosterdruckerei\tKlosterdruckerei <Salem> (1611-1804)\tMuller, Jacob
Klosterdruckerei\tKlosterdruckerei <Salem> (1611-1804)\tMuller, Jakob
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\t"Furststiftische Hofdruckerei, <Kempten""
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\tGaller, Alois
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\tHochfurstliche Buchhandlung <Kempten>"""
self.assertRaises(Exception, self.read_table, StringIO(bad_line_small),
sep='\t')
good_line_small = bad_line_small + '"'
df = self.read_table(StringIO(good_line_small), sep='\t')
self.assertEqual(len(df), 3)
def test_non_string_na_values(self):
# GH3611, na_values that are not a string are an issue
with tm.ensure_clean('__non_string_na_values__.csv') as path:
df = DataFrame({'A': [-999, 2, 3], 'B': [1.2, -999, 4.5]})
df.to_csv(path, sep=' ', index=False)
result1 = read_csv(path, sep=' ', header=0,
na_values=['-999.0', '-999'])
result2 = read_csv(path, sep=' ', header=0,
na_values=[-999, -999.0])
result3 = read_csv(path, sep=' ', header=0,
na_values=[-999.0, -999])
tm.assert_frame_equal(result1, result2)
tm.assert_frame_equal(result2, result3)
result4 = read_csv(path, sep=' ', header=0, na_values=['-999.0'])
result5 = read_csv(path, sep=' ', header=0, na_values=['-999'])
result6 = read_csv(path, sep=' ', header=0, na_values=[-999.0])
result7 = read_csv(path, sep=' ', header=0, na_values=[-999])
tm.assert_frame_equal(result4, result3)
tm.assert_frame_equal(result5, result3)
tm.assert_frame_equal(result6, result3)
tm.assert_frame_equal(result7, result3)
good_compare = result3
# with an odd float format, so we can't match the string 999.0
# exactly, but need float matching
df.to_csv(path, sep=' ', index=False, float_format='%.3f')
result1 = read_csv(path, sep=' ', header=0,
na_values=['-999.0', '-999'])
result2 = read_csv(path, sep=' ', header=0,
na_values=[-999, -999.0])
result3 = read_csv(path, sep=' ', header=0,
na_values=[-999.0, -999])
tm.assert_frame_equal(result1, good_compare)
tm.assert_frame_equal(result2, good_compare)
tm.assert_frame_equal(result3, good_compare)
result4 = read_csv(path, sep=' ', header=0, na_values=['-999.0'])
result5 = read_csv(path, sep=' ', header=0, na_values=['-999'])
result6 = read_csv(path, sep=' ', header=0, na_values=[-999.0])
result7 = read_csv(path, sep=' ', header=0, na_values=[-999])
tm.assert_frame_equal(result4, good_compare)
tm.assert_frame_equal(result5, good_compare)
tm.assert_frame_equal(result6, good_compare)
tm.assert_frame_equal(result7, good_compare)
def test_default_na_values(self):
_NA_VALUES = set(['-1.#IND', '1.#QNAN', '1.#IND', '-1.#QNAN',
'#N/A', 'N/A', 'NA', '#NA', 'NULL', 'NaN',
'nan', '-NaN', '-nan', '#N/A N/A', ''])
self.assertEqual(_NA_VALUES, parsers._NA_VALUES)
nv = len(_NA_VALUES)
def f(i, v):
if i == 0:
buf = ''
elif i > 0:
buf = ''.join([','] * i)
buf = "{0}{1}".format(buf, v)
if i < nv - 1:
buf = "{0}{1}".format(buf, ''.join([','] * (nv - i - 1)))
return buf
data = StringIO('\n'.join([f(i, v) for i, v in enumerate(_NA_VALUES)]))
expected = DataFrame(np.nan, columns=range(nv), index=range(nv))
df = self.read_csv(data, header=None)
tm.assert_frame_equal(df, expected)
def test_custom_na_values(self):
data = """A,B,C
ignore,this,row
1,NA,3
-1.#IND,5,baz
7,8,NaN
"""
expected = [[1., nan, 3],
[nan, 5, nan],
[7, 8, nan]]
df = self.read_csv(StringIO(data), na_values=['baz'], skiprows=[1])
tm.assert_almost_equal(df.values, expected)
df2 = self.read_table(StringIO(data), sep=',', na_values=['baz'],
skiprows=[1])
tm.assert_almost_equal(df2.values, expected)
df3 = self.read_table(StringIO(data), sep=',', na_values='baz',
skiprows=[1])
tm.assert_almost_equal(df3.values, expected)
def test_nat_parse(self):
# GH 3062
df = DataFrame(dict({
'A': np.asarray(lrange(10), dtype='float64'),
'B': pd.Timestamp('20010101')}))
df.iloc[3:6, :] = np.nan
with tm.ensure_clean('__nat_parse_.csv') as path:
df.to_csv(path)
result = read_csv(path, index_col=0, parse_dates=['B'])
tm.assert_frame_equal(result, df)
expected = Series(dict(A='float64', B='datetime64[ns]'))
tm.assert_series_equal(expected, result.dtypes)
# test with NaT for the nan_rep
# we don't have a method to specif the Datetime na_rep (it defaults
# to '')
df.to_csv(path)
result = read_csv(path, index_col=0, parse_dates=['B'])
tm.assert_frame_equal(result, df)
def test_skiprows_bug(self):
# GH #505
text = """#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
1/1/2000,1.,2.,3.
1/2/2000,4,5,6
1/3/2000,7,8,9
"""
data = self.read_csv(StringIO(text), skiprows=lrange(6), header=None,
index_col=0, parse_dates=True)
data2 = self.read_csv(StringIO(text), skiprows=6, header=None,
index_col=0, parse_dates=True)
expected = DataFrame(np.arange(1., 10.).reshape((3, 3)),
columns=[1, 2, 3],
index=[datetime(2000, 1, 1), datetime(2000, 1, 2),
datetime(2000, 1, 3)])
expected.index.name = 0
tm.assert_frame_equal(data, expected)
tm.assert_frame_equal(data, data2)
def test_deep_skiprows(self):
# GH #4382
text = "a,b,c\n" + \
"\n".join([",".join([str(i), str(i + 1), str(i + 2)])
for i in range(10)])
condensed_text = "a,b,c\n" + \
"\n".join([",".join([str(i), str(i + 1), str(i + 2)])
for i in [0, 1, 2, 3, 4, 6, 8, 9]])
data = self.read_csv(StringIO(text), skiprows=[6, 8])
condensed_data = self.read_csv(StringIO(condensed_text))
tm.assert_frame_equal(data, condensed_data)
def test_skiprows_blank(self):
# GH 9832
text = """#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
1/1/2000,1.,2.,3.
1/2/2000,4,5,6
1/3/2000,7,8,9
"""
data = self.read_csv(StringIO(text), skiprows=6, header=None,
index_col=0, parse_dates=True)
expected = DataFrame(np.arange(1., 10.).reshape((3, 3)),
columns=[1, 2, 3],
index=[datetime(2000, 1, 1), datetime(2000, 1, 2),
datetime(2000, 1, 3)])
expected.index.name = 0
tm.assert_frame_equal(data, expected)
def test_detect_string_na(self):
data = """A,B
foo,bar
NA,baz
NaN,nan
"""
expected = [['foo', 'bar'],
[nan, 'baz'],
[nan, nan]]
df = self.read_csv(StringIO(data))
tm.assert_almost_equal(df.values, expected)
def test_unnamed_columns(self):
data = """A,B,C,,
1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
expected = [[1, 2, 3, 4, 5.],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15]]
df = self.read_table(StringIO(data), sep=',')
tm.assert_almost_equal(df.values, expected)
self.assert_numpy_array_equal(df.columns,
['A', 'B', 'C', 'Unnamed: 3',
'Unnamed: 4'])
def test_string_nas(self):
data = """A,B,C
a,b,c
d,,f
,g,h
"""
result = self.read_csv(StringIO(data))
expected = DataFrame([['a', 'b', 'c'],
['d', np.nan, 'f'],
[np.nan, 'g', 'h']],
columns=['A', 'B', 'C'])
tm.assert_frame_equal(result, expected)
def test_duplicate_columns(self):
for engine in ['python', 'c']:
data = """A,A,B,B,B
1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
# check default beahviour
df = self.read_table(StringIO(data), sep=',', engine=engine)
self.assertEqual(list(df.columns), ['A', 'A.1', 'B', 'B.1', 'B.2'])
df = self.read_table(StringIO(data), sep=',',
engine=engine, mangle_dupe_cols=False)
self.assertEqual(list(df.columns), ['A', 'A', 'B', 'B', 'B'])
df = self.read_table(StringIO(data), sep=',',
engine=engine, mangle_dupe_cols=True)
self.assertEqual(list(df.columns), ['A', 'A.1', 'B', 'B.1', 'B.2'])
def test_csv_mixed_type(self):
data = """A,B,C
a,1,2
b,3,4
c,4,5
"""
df = self.read_csv(StringIO(data))
# TODO
def test_csv_custom_parser(self):
data = """A,B,C
20090101,a,1,2
20090102,b,3,4
20090103,c,4,5
"""
f = lambda x: datetime.strptime(x, '%Y%m%d')
df = self.read_csv(StringIO(data), date_parser=f)
expected = self.read_csv(StringIO(data), parse_dates=True)
tm.assert_frame_equal(df, expected)
def test_parse_dates_implicit_first_col(self):
data = """A,B,C
20090101,a,1,2
20090102,b,3,4
20090103,c,4,5
"""
df = self.read_csv(StringIO(data), parse_dates=True)
expected = self.read_csv(StringIO(data), index_col=0, parse_dates=True)
self.assertIsInstance(
df.index[0], (datetime, np.datetime64, Timestamp))
tm.assert_frame_equal(df, expected)
def test_parse_dates_string(self):
data = """date,A,B,C
20090101,a,1,2
20090102,b,3,4
20090103,c,4,5
"""
rs = self.read_csv(
StringIO(data), index_col='date', parse_dates='date')
idx = date_range('1/1/2009', periods=3)
idx.name = 'date'
xp = DataFrame({'A': ['a', 'b', 'c'],
'B': [1, 3, 4],
'C': [2, 4, 5]}, idx)
tm.assert_frame_equal(rs, xp)
def test_yy_format(self):
data = """date,time,B,C
090131,0010,1,2
090228,1020,3,4
090331,0830,5,6
"""
rs = self.read_csv(StringIO(data), index_col=0,
parse_dates=[['date', 'time']])
idx = DatetimeIndex([datetime(2009, 1, 31, 0, 10, 0),
datetime(2009, 2, 28, 10, 20, 0),
datetime(2009, 3, 31, 8, 30, 0)],
dtype=object, name='date_time')
xp = DataFrame({'B': [1, 3, 5], 'C': [2, 4, 6]}, idx)
tm.assert_frame_equal(rs, xp)
rs = self.read_csv(StringIO(data), index_col=0,
parse_dates=[[0, 1]])
idx = DatetimeIndex([datetime(2009, 1, 31, 0, 10, 0),
datetime(2009, 2, 28, 10, 20, 0),
datetime(2009, 3, 31, 8, 30, 0)],
dtype=object, name='date_time')
xp = DataFrame({'B': [1, 3, 5], 'C': [2, 4, 6]}, idx)
tm.assert_frame_equal(rs, xp)
def test_parse_dates_column_list(self):
from pandas.core.datetools import to_datetime
data = '''date;destination;ventilationcode;unitcode;units;aux_date
01/01/2010;P;P;50;1;12/1/2011
01/01/2010;P;R;50;1;13/1/2011
15/01/2010;P;P;50;1;14/1/2011
01/05/2010;P;P;50;1;15/1/2011'''
expected = self.read_csv(StringIO(data), sep=";", index_col=lrange(4))
lev = expected.index.levels[0]
levels = list(expected.index.levels)
levels[0] = lev.to_datetime(dayfirst=True)
# hack to get this to work - remove for final test
levels[0].name = lev.name
expected.index.set_levels(levels, inplace=True)
expected['aux_date'] = to_datetime(expected['aux_date'],
dayfirst=True)
expected['aux_date'] = lmap(Timestamp, expected['aux_date'])
tm.assertIsInstance(expected['aux_date'][0], datetime)
df = self.read_csv(StringIO(data), sep=";", index_col=lrange(4),
parse_dates=[0, 5], dayfirst=True)
tm.assert_frame_equal(df, expected)
df = self.read_csv(StringIO(data), sep=";", index_col=lrange(4),
parse_dates=['date', 'aux_date'], dayfirst=True)
tm.assert_frame_equal(df, expected)
def test_no_header(self):
data = """1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
df = self.read_table(StringIO(data), sep=',', header=None)
df_pref = self.read_table(StringIO(data), sep=',', prefix='X',
header=None)
names = ['foo', 'bar', 'baz', 'quux', 'panda']
df2 = self.read_table(StringIO(data), sep=',', names=names)
expected = [[1, 2, 3, 4, 5.],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15]]
tm.assert_almost_equal(df.values, expected)
tm.assert_almost_equal(df.values, df2.values)
self.assert_numpy_array_equal(df_pref.columns,
['X0', 'X1', 'X2', 'X3', 'X4'])
self.assert_numpy_array_equal(df.columns, lrange(5))
self.assert_numpy_array_equal(df2.columns, names)
def test_no_header_prefix(self):
data = """1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
df_pref = self.read_table(StringIO(data), sep=',', prefix='Field',
header=None)
expected = [[1, 2, 3, 4, 5.],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15]]
tm.assert_almost_equal(df_pref.values, expected)
self.assert_numpy_array_equal(df_pref.columns,
['Field0', 'Field1', 'Field2', 'Field3', 'Field4'])
def test_header_with_index_col(self):
data = """foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
names = ['A', 'B', 'C']
df = self.read_csv(StringIO(data), names=names)
self.assertEqual(names, ['A', 'B', 'C'])
values = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
expected = DataFrame(values, index=['foo', 'bar', 'baz'],
columns=['A', 'B', 'C'])
tm.assert_frame_equal(df, expected)
def test_read_csv_dataframe(self):
df = self.read_csv(self.csv1, index_col=0, parse_dates=True)
df2 = self.read_table(self.csv1, sep=',', index_col=0,
parse_dates=True)
self.assert_numpy_array_equal(df.columns, ['A', 'B', 'C', 'D'])
self.assertEqual(df.index.name, 'index')
self.assertIsInstance(
df.index[0], (datetime, np.datetime64, Timestamp))
self.assertEqual(df.values.dtype, np.float64)
tm.assert_frame_equal(df, df2)
def test_read_csv_no_index_name(self):
df = self.read_csv(self.csv2, index_col=0, parse_dates=True)
df2 = self.read_table(self.csv2, sep=',', index_col=0,
parse_dates=True)
self.assert_numpy_array_equal(df.columns, ['A', 'B', 'C', 'D', 'E'])
self.assertIsInstance(
df.index[0], (datetime, np.datetime64, Timestamp))
self.assertEqual(df.ix[:, ['A', 'B', 'C', 'D']
].values.dtype, np.float64)
tm.assert_frame_equal(df, df2)
def test_read_csv_infer_compression(self):
# GH 9770
expected = self.read_csv(self.csv1, index_col=0, parse_dates=True)
inputs = [self.csv1, self.csv1 + '.gz',
self.csv1 + '.bz2', open(self.csv1)]
for f in inputs:
df = self.read_csv(f, index_col=0, parse_dates=True,
compression='infer')
tm.assert_frame_equal(expected, df)
inputs[3].close()
def test_read_table_unicode(self):
fin = BytesIO(u('\u0141aski, Jan;1').encode('utf-8'))
df1 = read_table(fin, sep=";", encoding="utf-8", header=None)
tm.assertIsInstance(df1[0].values[0], compat.text_type)
def test_read_table_wrong_num_columns(self):
# too few!
data = """A,B,C,D,E,F
1,2,3,4,5,6
6,7,8,9,10,11,12
11,12,13,14,15,16
"""
self.assertRaises(Exception, self.read_csv, StringIO(data))
def test_read_table_duplicate_index(self):
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo,12,13,14,15
bar,12,13,14,15
"""
result = self.read_csv(StringIO(data), index_col=0)
expected = self.read_csv(StringIO(data)).set_index('index',
verify_integrity=False)
tm.assert_frame_equal(result, expected)
def test_read_table_duplicate_index_implicit(self):
data = """A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo,12,13,14,15
bar,12,13,14,15
"""
# it works!
result = self.read_csv(StringIO(data))
def test_parse_bools(self):
data = """A,B
True,1
False,2
True,3
"""
data = self.read_csv(StringIO(data))
self.assertEqual(data['A'].dtype, np.bool_)
data = """A,B
YES,1
no,2
yes,3
No,3
Yes,3
"""
data = self.read_csv(StringIO(data),
true_values=['yes', 'Yes', 'YES'],
false_values=['no', 'NO', 'No'])
self.assertEqual(data['A'].dtype, np.bool_)
data = """A,B
TRUE,1
FALSE,2
TRUE,3
"""
data = self.read_csv(StringIO(data))
self.assertEqual(data['A'].dtype, np.bool_)
data = """A,B
foo,bar
bar,foo"""
result = self.read_csv(StringIO(data), true_values=['foo'],
false_values=['bar'])
expected = DataFrame({'A': [True, False], 'B': [False, True]})
tm.assert_frame_equal(result, expected)
def test_int_conversion(self):
data = """A,B
1.0,1
2.0,2
3.0,3
"""
data = self.read_csv(StringIO(data))
self.assertEqual(data['A'].dtype, np.float64)
self.assertEqual(data['B'].dtype, np.int64)
def test_infer_index_col(self):
data = """A,B,C
foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
data = self.read_csv(StringIO(data))
self.assertTrue(data.index.equals(Index(['foo', 'bar', 'baz'])))
def test_read_nrows(self):
df = self.read_csv(StringIO(self.data1), nrows=3)
expected = self.read_csv(StringIO(self.data1))[:3]
tm.assert_frame_equal(df, expected)
def test_read_chunksize(self):
reader = self.read_csv(StringIO(self.data1), index_col=0, chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunks = list(reader)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
def test_read_chunksize_named(self):
reader = self.read_csv(
StringIO(self.data1), index_col='index', chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col='index')
chunks = list(reader)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
def test_get_chunk_passed_chunksize(self):
data = """A,B,C
1,2,3
4,5,6
7,8,9
1,2,3"""
result = self.read_csv(StringIO(data), chunksize=2)
piece = result.get_chunk()
self.assertEqual(len(piece), 2)
def test_read_text_list(self):
data = """A,B,C\nfoo,1,2,3\nbar,4,5,6"""
as_list = [['A', 'B', 'C'], ['foo', '1', '2', '3'], ['bar',
'4', '5', '6']]
df = self.read_csv(StringIO(data), index_col=0)
parser = TextParser(as_list, index_col=0, chunksize=2)
chunk = parser.read(None)
tm.assert_frame_equal(chunk, df)
def test_iterator(self):
# GH 6607
# Test currently only valid with python engine because
# skip_footer != 0. Temporarily copied to TestPythonParser.
# Test for ValueError with other engines:
with tm.assertRaisesRegexp(ValueError, 'skip_footer'):
reader = self.read_csv(StringIO(self.data1), index_col=0,
iterator=True)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunk = reader.read(3)
tm.assert_frame_equal(chunk, df[:3])
last_chunk = reader.read(5)
tm.assert_frame_equal(last_chunk, df[3:])
# pass list
lines = list(csv.reader(StringIO(self.data1)))
parser = TextParser(lines, index_col=0, chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunks = list(parser)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
# pass skiprows
parser = TextParser(lines, index_col=0, chunksize=2, skiprows=[1])
chunks = list(parser)
tm.assert_frame_equal(chunks[0], df[1:3])
# test bad parameter (skip_footer)
reader = self.read_csv(StringIO(self.data1), index_col=0,
iterator=True, skip_footer=True)
self.assertRaises(ValueError, reader.read, 3)
treader = self.read_table(StringIO(self.data1), sep=',', index_col=0,
iterator=True)
tm.assertIsInstance(treader, TextFileReader)
# stopping iteration when on chunksize is specified, GH 3967
data = """A,B,C
foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
reader = self.read_csv(StringIO(data), iterator=True)
result = list(reader)
expected = DataFrame(dict(A=[1, 4, 7], B=[2, 5, 8], C=[
3, 6, 9]), index=['foo', 'bar', 'baz'])
tm.assert_frame_equal(result[0], expected)
# chunksize = 1
reader = self.read_csv(StringIO(data), chunksize=1)
result = list(reader)
expected = DataFrame(dict(A=[1, 4, 7], B=[2, 5, 8], C=[
3, 6, 9]), index=['foo', 'bar', 'baz'])
self.assertEqual(len(result), 3)
tm.assert_frame_equal(pd.concat(result), expected)
def test_header_not_first_line(self):
data = """got,to,ignore,this,line
got,to,ignore,this,line
index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
"""
data2 = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
"""
df = self.read_csv(StringIO(data), header=2, index_col=0)
expected = self.read_csv(StringIO(data2), header=0, index_col=0)
tm.assert_frame_equal(df, expected)
def test_header_multi_index(self):
expected = tm.makeCustomDataframe(
5, 3, r_idx_nlevels=2, c_idx_nlevels=4)
data = """\
C0,,C_l0_g0,C_l0_g1,C_l0_g2
C1,,C_l1_g0,C_l1_g1,C_l1_g2
C2,,C_l2_g0,C_l2_g1,C_l2_g2
C3,,C_l3_g0,C_l3_g1,C_l3_g2
R0,R1,,,
R_l0_g0,R_l1_g0,R0C0,R0C1,R0C2
R_l0_g1,R_l1_g1,R1C0,R1C1,R1C2
R_l0_g2,R_l1_g2,R2C0,R2C1,R2C2
R_l0_g3,R_l1_g3,R3C0,R3C1,R3C2
R_l0_g4,R_l1_g4,R4C0,R4C1,R4C2
"""
df = self.read_csv(StringIO(data), header=[0, 1, 2, 3], index_col=[
0, 1], tupleize_cols=False)
tm.assert_frame_equal(df, expected)
# skipping lines in the header
df = self.read_csv(StringIO(data), header=[0, 1, 2, 3], index_col=[
0, 1], tupleize_cols=False)
tm.assert_frame_equal(df, expected)
#### invalid options ####
# no as_recarray
self.assertRaises(ValueError, self.read_csv, StringIO(data), header=[0, 1, 2, 3],
index_col=[0, 1], as_recarray=True, tupleize_cols=False)
# names
self.assertRaises(ValueError, self.read_csv, StringIO(data), header=[0, 1, 2, 3],
index_col=[0, 1], names=['foo', 'bar'], tupleize_cols=False)
# usecols
self.assertRaises(ValueError, self.read_csv, StringIO(data), header=[0, 1, 2, 3],
index_col=[0, 1], usecols=['foo', 'bar'], tupleize_cols=False)
# non-numeric index_col
self.assertRaises(ValueError, self.read_csv, StringIO(data), header=[0, 1, 2, 3],
index_col=['foo', 'bar'], tupleize_cols=False)
def test_header_multiindex_common_format(self):
df = DataFrame([[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12]],
index=['one', 'two'],
columns=MultiIndex.from_tuples([('a', 'q'), ('a', 'r'), ('a', 's'),
('b', 't'), ('c', 'u'), ('c', 'v')]))
# to_csv
data = """,a,a,a,b,c,c
,q,r,s,t,u,v
,,,,,,
one,1,2,3,4,5,6
two,7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=0)
tm.assert_frame_equal(df, result)
# common
data = """,a,a,a,b,c,c
,q,r,s,t,u,v
one,1,2,3,4,5,6
two,7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=0)
tm.assert_frame_equal(df, result)
# common, no index_col
data = """a,a,a,b,c,c
q,r,s,t,u,v
1,2,3,4,5,6
7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=None)
tm.assert_frame_equal(df.reset_index(drop=True), result)
# malformed case 1
expected = DataFrame(np.array([[2, 3, 4, 5, 6],
[8, 9, 10, 11, 12]], dtype='int64'),
index=Index([1, 7]),
columns=MultiIndex(levels=[[u('a'), u('b'), u('c')], [u('r'), u('s'), u('t'), u('u'), u('v')]],
labels=[[0, 0, 1, 2, 2], [
0, 1, 2, 3, 4]],
names=[u('a'), u('q')]))
data = """a,a,a,b,c,c
q,r,s,t,u,v
1,2,3,4,5,6
7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=0)
tm.assert_frame_equal(expected, result)
# malformed case 2
expected = DataFrame(np.array([[2, 3, 4, 5, 6],
[8, 9, 10, 11, 12]], dtype='int64'),
index=Index([1, 7]),
columns=MultiIndex(levels=[[u('a'), u('b'), u('c')], [u('r'), u('s'), u('t'), u('u'), u('v')]],
labels=[[0, 0, 1, 2, 2], [
0, 1, 2, 3, 4]],
names=[None, u('q')]))
data = """,a,a,b,c,c
q,r,s,t,u,v
1,2,3,4,5,6
7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=0)
tm.assert_frame_equal(expected, result)
# mi on columns and index (malformed)
expected = DataFrame(np.array([[3, 4, 5, 6],
[9, 10, 11, 12]], dtype='int64'),
index=MultiIndex(levels=[[1, 7], [2, 8]],
labels=[[0, 1], [0, 1]]),
columns=MultiIndex(levels=[[u('a'), u('b'), u('c')], [u('s'), u('t'), u('u'), u('v')]],
labels=[[0, 1, 2, 2],
[0, 1, 2, 3]],
names=[None, u('q')]))
data = """,a,a,b,c,c
q,r,s,t,u,v
1,2,3,4,5,6
7,8,9,10,11,12"""
result = self.read_csv(StringIO(data), header=[0, 1], index_col=[0, 1])
tm.assert_frame_equal(expected, result)
def test_pass_names_with_index(self):
lines = self.data1.split('\n')
no_header = '\n'.join(lines[1:])
# regular index
names = ['index', 'A', 'B', 'C', 'D']
df = self.read_csv(StringIO(no_header), index_col=0, names=names)
expected = self.read_csv(StringIO(self.data1), index_col=0)
tm.assert_frame_equal(df, expected)
# multi index
data = """index1,index2,A,B,C,D
foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
"""
lines = data.split('\n')
no_header = '\n'.join(lines[1:])
names = ['index1', 'index2', 'A', 'B', 'C', 'D']
df = self.read_csv(StringIO(no_header), index_col=[0, 1],
names=names)
expected = self.read_csv(StringIO(data), index_col=[0, 1])
tm.assert_frame_equal(df, expected)
df = self.read_csv(StringIO(data), index_col=['index1', 'index2'])
tm.assert_frame_equal(df, expected)
def test_multi_index_no_level_names(self):
data = """index1,index2,A,B,C,D
foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
"""
data2 = """A,B,C,D
foo,one,2,3,4,5
foo,two,7,8,9,10
foo,three,12,13,14,15
bar,one,12,13,14,15
bar,two,12,13,14,15
"""
lines = data.split('\n')
no_header = '\n'.join(lines[1:])
names = ['A', 'B', 'C', 'D']
df = self.read_csv(StringIO(no_header), index_col=[0, 1],
header=None, names=names)
expected = self.read_csv(StringIO(data), index_col=[0, 1])
tm.assert_frame_equal(df, expected, check_names=False)
# 2 implicit first cols
df2 = self.read_csv(StringIO(data2))
tm.assert_frame_equal(df2, df)
# reverse order of index
df = self.read_csv(StringIO(no_header), index_col=[1, 0], names=names,
header=None)
expected = self.read_csv(StringIO(data), index_col=[1, 0])
tm.assert_frame_equal(df, expected, check_names=False)
def test_multi_index_parse_dates(self):
data = """index1,index2,A,B,C
20090101,one,a,1,2
20090101,two,b,3,4
20090101,three,c,4,5
20090102,one,a,1,2
20090102,two,b,3,4
20090102,three,c,4,5
20090103,one,a,1,2
20090103,two,b,3,4
20090103,three,c,4,5
"""
df = self.read_csv(StringIO(data), index_col=[0, 1], parse_dates=True)
self.assertIsInstance(df.index.levels[0][0],
(datetime, np.datetime64, Timestamp))
# specify columns out of order!
df2 = self.read_csv(StringIO(data), index_col=[1, 0], parse_dates=True)
self.assertIsInstance(df2.index.levels[1][0],
(datetime, np.datetime64, Timestamp))
def test_skip_footer(self):
# GH 6607
# Test currently only valid with python engine because
# skip_footer != 0. Temporarily copied to TestPythonParser.
# Test for ValueError with other engines:
with tm.assertRaisesRegexp(ValueError, 'skip_footer'):
data = """A,B,C
1,2,3
4,5,6
7,8,9
want to skip this
also also skip this
"""
result = self.read_csv(StringIO(data), skip_footer=2)
no_footer = '\n'.join(data.split('\n')[:-3])
expected = self.read_csv(StringIO(no_footer))
tm.assert_frame_equal(result, expected)
result = self.read_csv(StringIO(data), nrows=3)
tm.assert_frame_equal(result, expected)
# skipfooter alias
result = read_csv(StringIO(data), skipfooter=2)
no_footer = '\n'.join(data.split('\n')[:-3])
expected = read_csv(StringIO(no_footer))
tm.assert_frame_equal(result, expected)
def test_no_unnamed_index(self):
data = """ id c0 c1 c2
0 1 0 a b
1 2 0 c d
2 2 2 e f
"""
df = self.read_table(StringIO(data), sep=' ')
self.assertIsNone(df.index.name)
def test_converters(self):
data = """A,B,C,D
a,1,2,01/01/2009
b,3,4,01/02/2009
c,4,5,01/03/2009
"""
from pandas.compat import parse_date
result = self.read_csv(StringIO(data), converters={'D': parse_date})
result2 = self.read_csv(StringIO(data), converters={3: parse_date})
expected = self.read_csv(StringIO(data))
expected['D'] = expected['D'].map(parse_date)
tm.assertIsInstance(result['D'][0], (datetime, Timestamp))
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(result2, expected)
# produce integer
converter = lambda x: int(x.split('/')[2])
result = self.read_csv(StringIO(data), converters={'D': converter})
expected = self.read_csv(StringIO(data))
expected['D'] = expected['D'].map(converter)
tm.assert_frame_equal(result, expected)
def test_converters_no_implicit_conv(self):
# GH2184
data = """000102,1.2,A\n001245,2,B"""
f = lambda x: x.strip()
converter = {0: f}
df = self.read_csv(StringIO(data), header=None, converters=converter)
self.assertEqual(df[0].dtype, object)
def test_converters_euro_decimal_format(self):
data = """Id;Number1;Number2;Text1;Text2;Number3
1;1521,1541;187101,9543;ABC;poi;4,738797819
2;121,12;14897,76;DEF;uyt;0,377320872
3;878,158;108013,434;GHI;rez;2,735694704"""
f = lambda x: float(x.replace(",", "."))
converter = {'Number1': f, 'Number2': f, 'Number3': f}
df2 = self.read_csv(StringIO(data), sep=';', converters=converter)
self.assertEqual(df2['Number1'].dtype, float)
self.assertEqual(df2['Number2'].dtype, float)
self.assertEqual(df2['Number3'].dtype, float)
def test_converter_return_string_bug(self):
# GH #583
data = """Id;Number1;Number2;Text1;Text2;Number3
1;1521,1541;187101,9543;ABC;poi;4,738797819
2;121,12;14897,76;DEF;uyt;0,377320872
3;878,158;108013,434;GHI;rez;2,735694704"""
f = lambda x: float(x.replace(",", "."))
converter = {'Number1': f, 'Number2': f, 'Number3': f}
df2 = self.read_csv(StringIO(data), sep=';', converters=converter)
self.assertEqual(df2['Number1'].dtype, float)
def test_read_table_buglet_4x_multiindex(self):
# GH 6607
# Parsing multi-level index currently causes an error in the C parser.
# Temporarily copied to TestPythonParser.
# Here test that CParserError is raised:
with tm.assertRaises(pandas.parser.CParserError):
text = """ A B C D E
one two three four
a b 10.0032 5 -0.5109 -2.3358 -0.4645 0.05076 0.3640
a q 20 4 0.4473 1.4152 0.2834 1.00661 0.1744
x q 30 3 -0.6662 -0.5243 -0.3580 0.89145 2.5838"""
# it works!
df = self.read_table(StringIO(text), sep='\s+')
self.assertEqual(df.index.names, ('one', 'two', 'three', 'four'))
def test_line_comment(self):
data = """# empty
A,B,C
1,2.,4.#hello world
#ignore this line
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
df = self.read_csv(StringIO(data), comment='#')
tm.assert_almost_equal(df.values, expected)
def test_comment_skiprows(self):
data = """# empty
random line
# second empty line
1,2,3
A,B,C
1,2.,4.
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
# this should ignore the first four lines (including comments)
df = self.read_csv(StringIO(data), comment='#', skiprows=4)
tm.assert_almost_equal(df.values, expected)
def test_comment_header(self):
data = """# empty
# second empty line
1,2,3
A,B,C
1,2.,4.
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
# header should begin at the second non-comment line
df = self.read_csv(StringIO(data), comment='#', header=1)
tm.assert_almost_equal(df.values, expected)
def test_comment_skiprows_header(self):
data = """# empty
# second empty line
# third empty line
X,Y,Z
1,2,3
A,B,C
1,2.,4.
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
# skiprows should skip the first 4 lines (including comments), while
# header should start from the second non-commented line starting
# with line 5
df = self.read_csv(StringIO(data), comment='#', skiprows=4, header=1)
tm.assert_almost_equal(df.values, expected)
def test_read_csv_parse_simple_list(self):
text = """foo
bar baz
qux foo
foo
bar"""
df = read_csv(StringIO(text), header=None)
expected = DataFrame({0: ['foo', 'bar baz', 'qux foo',
'foo', 'bar']})
tm.assert_frame_equal(df, expected)
def test_parse_dates_custom_euroformat(self):
text = """foo,bar,baz
31/01/2010,1,2
01/02/2010,1,NA
02/02/2010,1,2
"""
parser = lambda d: parse_date(d, dayfirst=True)
df = self.read_csv(StringIO(text),
names=['time', 'Q', 'NTU'], header=0,
index_col=0, parse_dates=True,
date_parser=parser, na_values=['NA'])
exp_index = Index([datetime(2010, 1, 31), datetime(2010, 2, 1),
datetime(2010, 2, 2)], name='time')
expected = DataFrame({'Q': [1, 1, 1], 'NTU': [2, np.nan, 2]},
index=exp_index, columns=['Q', 'NTU'])
tm.assert_frame_equal(df, expected)
parser = lambda d: parse_date(d, day_first=True)
self.assertRaises(Exception, self.read_csv,
StringIO(text), skiprows=[0],
names=['time', 'Q', 'NTU'], index_col=0,
parse_dates=True, date_parser=parser,
na_values=['NA'])
def test_na_value_dict(self):
data = """A,B,C
foo,bar,NA
bar,foo,foo
foo,bar,NA
bar,foo,foo"""
df = self.read_csv(StringIO(data),
na_values={'A': ['foo'], 'B': ['bar']})
expected = DataFrame({'A': [np.nan, 'bar', np.nan, 'bar'],
'B': [np.nan, 'foo', np.nan, 'foo'],
'C': [np.nan, 'foo', np.nan, 'foo']})
tm.assert_frame_equal(df, expected)
data = """\
a,b,c,d
0,NA,1,5
"""
xp = DataFrame({'b': [np.nan], 'c': [1], 'd': [5]}, index=[0])
xp.index.name = 'a'
df = self.read_csv(StringIO(data), na_values={}, index_col=0)
tm.assert_frame_equal(df, xp)
xp = DataFrame({'b': [np.nan], 'd': [5]},
MultiIndex.from_tuples([(0, 1)]))
xp.index.names = ['a', 'c']
df = self.read_csv(StringIO(data), na_values={}, index_col=[0, 2])
tm.assert_frame_equal(df, xp)
xp = DataFrame({'b': [np.nan], 'd': [5]},
MultiIndex.from_tuples([(0, 1)]))
xp.index.names = ['a', 'c']
df = self.read_csv(StringIO(data), na_values={}, index_col=['a', 'c'])
tm.assert_frame_equal(df, xp)
@tm.network
def test_url(self):
# HTTP(S)
url = ('https://raw.github.com/pydata/pandas/master/'
'pandas/io/tests/data/salary.table')
url_table = self.read_table(url)
dirpath = tm.get_data_path()
localtable = os.path.join(dirpath, 'salary.table')
local_table = self.read_table(localtable)
tm.assert_frame_equal(url_table, local_table)
# TODO: ftp testing
@slow
def test_file(self):
# FILE
if sys.version_info[:2] < (2, 6):
raise nose.SkipTest("file:// not supported with Python < 2.6")
dirpath = tm.get_data_path()
localtable = os.path.join(dirpath, 'salary.table')
local_table = self.read_table(localtable)
try:
url_table = self.read_table('file://localhost/' + localtable)
except URLError:
# fails on some systems
raise nose.SkipTest("failing on %s" %
' '.join(platform.uname()).strip())
tm.assert_frame_equal(url_table, local_table)
def test_parse_tz_aware(self):
import pytz
# #1693
data = StringIO("Date,x\n2012-06-13T01:39:00Z,0.5")
# it works
result = read_csv(data, index_col=0, parse_dates=True)
stamp = result.index[0]
self.assertEqual(stamp.minute, 39)
try:
self.assertIs(result.index.tz, pytz.utc)
except AssertionError: # hello Yaroslav
arr = result.index.to_pydatetime()
result = tools.to_datetime(arr, utc=True)[0]
self.assertEqual(stamp.minute, result.minute)
self.assertEqual(stamp.hour, result.hour)
self.assertEqual(stamp.day, result.day)
def test_multiple_date_cols_index(self):
data = """\
ID,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir
KORD1,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD2,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD3,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD4,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD5,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD6,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000"""
xp = self.read_csv(StringIO(data), parse_dates={'nominal': [1, 2]})
df = self.read_csv(StringIO(data), parse_dates={'nominal': [1, 2]},
index_col='nominal')
tm.assert_frame_equal(xp.set_index('nominal'), df)
df2 = self.read_csv(StringIO(data), parse_dates={'nominal': [1, 2]},
index_col=0)
tm.assert_frame_equal(df2, df)
df3 = self.read_csv(StringIO(data), parse_dates=[[1, 2]], index_col=0)
tm.assert_frame_equal(df3, df, check_names=False)
def test_multiple_date_cols_chunked(self):
df = self.read_csv(StringIO(self.ts_data), parse_dates={
'nominal': [1, 2]}, index_col='nominal')
reader = self.read_csv(StringIO(self.ts_data), parse_dates={'nominal':
[1, 2]}, index_col='nominal', chunksize=2)
chunks = list(reader)
self.assertNotIn('nominalTime', df)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
def test_multiple_date_col_named_components(self):
xp = self.read_csv(StringIO(self.ts_data),
parse_dates={'nominal': [1, 2]},
index_col='nominal')
colspec = {'nominal': ['date', 'nominalTime']}
df = self.read_csv(StringIO(self.ts_data), parse_dates=colspec,
index_col='nominal')
tm.assert_frame_equal(df, xp)
def test_multiple_date_col_multiple_index(self):
df = self.read_csv(StringIO(self.ts_data),
parse_dates={'nominal': [1, 2]},
index_col=['nominal', 'ID'])
xp = self.read_csv(StringIO(self.ts_data),
parse_dates={'nominal': [1, 2]})
tm.assert_frame_equal(xp.set_index(['nominal', 'ID']), df)
def test_comment(self):
data = """A,B,C
1,2.,4.#hello world
5.,NaN,10.0
"""
expected = [[1., 2., 4.],
[5., np.nan, 10.]]
df = self.read_csv(StringIO(data), comment='#')
tm.assert_almost_equal(df.values, expected)
df = self.read_table(StringIO(data), sep=',', comment='#',
na_values=['NaN'])
tm.assert_almost_equal(df.values, expected)
def test_bool_na_values(self):
data = """A,B,C
True,False,True
NA,True,False
False,NA,True"""
result = self.read_csv(StringIO(data))
expected = DataFrame({'A': np.array([True, nan, False], dtype=object),
'B': np.array([False, True, nan], dtype=object),
'C': [True, False, True]})
tm.assert_frame_equal(result, expected)
def test_nonexistent_path(self):
# don't segfault pls #2428
path = '%s.csv' % tm.rands(10)
self.assertRaises(Exception, self.read_csv, path)
def test_missing_trailing_delimiters(self):
data = """A,B,C,D
1,2,3,4
1,3,3,
1,4,5"""
result = self.read_csv(StringIO(data))
self.assertTrue(result['D'].isnull()[1:].all())
def test_skipinitialspace(self):
s = ('"09-Apr-2012", "01:10:18.300", 2456026.548822908, 12849, '
'1.00361, 1.12551, 330.65659, 0355626618.16711, 73.48821, '
'314.11625, 1917.09447, 179.71425, 80.000, 240.000, -350, '
'70.06056, 344.98370, 1, 1, -0.689265, -0.692787, '
'0.212036, 14.7674, 41.605, -9999.0, -9999.0, '
'-9999.0, -9999.0, -9999.0, -9999.0, 000, 012, 128')
sfile = StringIO(s)
# it's 33 columns
result = self.read_csv(sfile, names=lrange(33), na_values=['-9999.0'],
header=None, skipinitialspace=True)
self.assertTrue(pd.isnull(result.ix[0, 29]))
def test_utf16_bom_skiprows(self):
# #2298
data = u("""skip this
skip this too
A\tB\tC
1\t2\t3
4\t5\t6""")
data2 = u("""skip this
skip this too
A,B,C
1,2,3
4,5,6""")
path = '__%s__.csv' % tm.rands(10)
with tm.ensure_clean(path) as path:
for sep, dat in [('\t', data), (',', data2)]:
for enc in ['utf-16', 'utf-16le', 'utf-16be']:
bytes = dat.encode(enc)
with open(path, 'wb') as f:
f.write(bytes)
s = BytesIO(dat.encode('utf-8'))
if compat.PY3:
# somewhat False since the code never sees bytes
from io import TextIOWrapper
s = TextIOWrapper(s, encoding='utf-8')
result = self.read_csv(path, encoding=enc, skiprows=2,
sep=sep)
expected = self.read_csv(s, encoding='utf-8', skiprows=2,
sep=sep)
tm.assert_frame_equal(result, expected)
def test_utf16_example(self):
path = tm.get_data_path('utf16_ex.txt')
# it works! and is the right length
result = self.read_table(path, encoding='utf-16')
self.assertEqual(len(result), 50)
if not compat.PY3:
buf = BytesIO(open(path, 'rb').read())
result = self.read_table(buf, encoding='utf-16')
self.assertEqual(len(result), 50)
def test_converters_corner_with_nas(self):
# skip aberration observed on Win64 Python 3.2.2
if hash(np.int64(-1)) != -2:
raise nose.SkipTest("skipping because of windows hash on Python"
" 3.2.2")
csv = """id,score,days
1,2,12
2,2-5,
3,,14+
4,6-12,2"""
def convert_days(x):
x = x.strip()
if not x:
return np.nan
is_plus = x.endswith('+')
if is_plus:
x = int(x[:-1]) + 1
else:
x = int(x)
return x
def convert_days_sentinel(x):
x = x.strip()
if not x:
return np.nan
is_plus = x.endswith('+')
if is_plus:
x = int(x[:-1]) + 1
else:
x = int(x)
return x
def convert_score(x):
x = x.strip()
if not x:
return np.nan
if x.find('-') > 0:
valmin, valmax = lmap(int, x.split('-'))
val = 0.5 * (valmin + valmax)
else:
val = float(x)
return val
fh = StringIO(csv)
result = self.read_csv(fh, converters={'score': convert_score,
'days': convert_days},
na_values=['', None])
self.assertTrue(pd.isnull(result['days'][1]))
fh = StringIO(csv)
result2 = self.read_csv(fh, converters={'score': convert_score,
'days': convert_days_sentinel},
na_values=['', None])
tm.assert_frame_equal(result, result2)
def test_unicode_encoding(self):
pth = tm.get_data_path('unicode_series.csv')
result = self.read_csv(pth, header=None, encoding='latin-1')
result = result.set_index(0)
got = result[1][1632]
expected = u('\xc1 k\xf6ldum klaka (Cold Fever) (1994)')
self.assertEqual(got, expected)
def test_trailing_delimiters(self):
# #2442. grumble grumble
data = """A,B,C
1,2,3,
4,5,6,
7,8,9,"""
result = self.read_csv(StringIO(data), index_col=False)
expected = DataFrame({'A': [1, 4, 7], 'B': [2, 5, 8],
'C': [3, 6, 9]})
tm.assert_frame_equal(result, expected)
def test_escapechar(self):
# http://stackoverflow.com/questions/13824840/feature-request-for-
# pandas-read-csv
data = '''SEARCH_TERM,ACTUAL_URL
"bra tv bord","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"
"tv p\xc3\xa5 hjul","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"
"SLAGBORD, \\"Bergslagen\\", IKEA:s 1700-tals serie","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"'''
result = self.read_csv(StringIO(data), escapechar='\\',
quotechar='"', encoding='utf-8')
self.assertEqual(result['SEARCH_TERM'][2],
'SLAGBORD, "Bergslagen", IKEA:s 1700-tals serie')
self.assertTrue(np.array_equal(result.columns,
['SEARCH_TERM', 'ACTUAL_URL']))
def test_header_names_backward_compat(self):
# #2539
data = '1,2,3\n4,5,6'
result = self.read_csv(StringIO(data), names=['a', 'b', 'c'])
expected = self.read_csv(StringIO(data), names=['a', 'b', 'c'],
header=None)
tm.assert_frame_equal(result, expected)
data2 = 'foo,bar,baz\n' + data
result = self.read_csv(StringIO(data2), names=['a', 'b', 'c'],
header=0)
tm.assert_frame_equal(result, expected)
def test_int64_min_issues(self):
# #2599
data = 'A,B\n0,0\n0,'
result = self.read_csv(StringIO(data))
expected = DataFrame({'A': [0, 0], 'B': [0, np.nan]})
tm.assert_frame_equal(result, expected)
def test_parse_integers_above_fp_precision(self):
data = """Numbers
17007000002000191
17007000002000191
17007000002000191
17007000002000191
17007000002000192
17007000002000192
17007000002000192
17007000002000192
17007000002000192
17007000002000194"""
result = self.read_csv(StringIO(data))
expected = DataFrame({'Numbers': [17007000002000191,
17007000002000191,
17007000002000191,
17007000002000191,
17007000002000192,
17007000002000192,
17007000002000192,
17007000002000192,
17007000002000192,
17007000002000194]})
self.assertTrue(np.array_equal(result['Numbers'], expected['Numbers']))
def test_usecols_index_col_conflict(self):
# Issue 4201 Test that index_col as integer reflects usecols
data = """SecId,Time,Price,P2,P3
10000,2013-5-11,100,10,1
500,2013-5-12,101,11,1
"""
expected = DataFrame({'Price': [100, 101]}, index=[
datetime(2013, 5, 11), datetime(2013, 5, 12)])
expected.index.name = 'Time'
df = self.read_csv(StringIO(data), usecols=[
'Time', 'Price'], parse_dates=True, index_col=0)
tm.assert_frame_equal(expected, df)
df = self.read_csv(StringIO(data), usecols=[
'Time', 'Price'], parse_dates=True, index_col='Time')
tm.assert_frame_equal(expected, df)
df = self.read_csv(StringIO(data), usecols=[
1, 2], parse_dates=True, index_col='Time')
tm.assert_frame_equal(expected, df)
df = self.read_csv(StringIO(data), usecols=[
1, 2], parse_dates=True, index_col=0)
tm.assert_frame_equal(expected, df)
expected = DataFrame(
{'P3': [1, 1], 'Price': (100, 101), 'P2': (10, 11)})
expected = expected.set_index(['Price', 'P2'])
df = self.read_csv(StringIO(data), usecols=[
'Price', 'P2', 'P3'], parse_dates=True, index_col=['Price', 'P2'])
tm.assert_frame_equal(expected, df)
def test_chunks_have_consistent_numerical_type(self):
integers = [str(i) for i in range(499999)]
data = "a\n" + "\n".join(integers + ["1.0", "2.0"] + integers)
with tm.assert_produces_warning(False):
df = self.read_csv(StringIO(data))
# Assert that types were coerced.
self.assertTrue(type(df.a[0]) is np.float64)
self.assertEqual(df.a.dtype, np.float)
def test_warn_if_chunks_have_mismatched_type(self):
# See test in TestCParserLowMemory.
integers = [str(i) for i in range(499999)]
data = "a\n" + "\n".join(integers + ['a', 'b'] + integers)
with tm.assert_produces_warning(False):
df = self.read_csv(StringIO(data))
self.assertEqual(df.a.dtype, np.object)
def test_usecols(self):
data = """\
a,b,c
1,2,3
4,5,6
7,8,9
10,11,12"""
result = self.read_csv(StringIO(data), usecols=(1, 2))
result2 = self.read_csv(StringIO(data), usecols=('b', 'c'))
exp = self.read_csv(StringIO(data))
self.assertEqual(len(result.columns), 2)
self.assertTrue((result['b'] == exp['b']).all())
self.assertTrue((result['c'] == exp['c']).all())
tm.assert_frame_equal(result, result2)
result = self.read_csv(StringIO(data), usecols=[1, 2], header=0,
names=['foo', 'bar'])
expected = self.read_csv(StringIO(data), usecols=[1, 2])
expected.columns = ['foo', 'bar']
tm.assert_frame_equal(result, expected)
data = """\
1,2,3
4,5,6
7,8,9
10,11,12"""
result = self.read_csv(StringIO(data), names=['b', 'c'],
header=None, usecols=[1, 2])
expected = self.read_csv(StringIO(data), names=['a', 'b', 'c'],
header=None)
expected = expected[['b', 'c']]
tm.assert_frame_equal(result, expected)
result2 = self.read_csv(StringIO(data), names=['a', 'b', 'c'],
header=None, usecols=['b', 'c'])
tm.assert_frame_equal(result2, result)
# 5766
result = self.read_csv(StringIO(data), names=['a', 'b'],
header=None, usecols=[0, 1])
expected = self.read_csv(StringIO(data), names=['a', 'b', 'c'],
header=None)
expected = expected[['a', 'b']]
tm.assert_frame_equal(result, expected)
# length conflict, passed names and usecols disagree
self.assertRaises(ValueError, self.read_csv, StringIO(data),
names=['a', 'b'], usecols=[1], header=None)
def test_integer_overflow_bug(self):
# #2601
data = "65248E10 11\n55555E55 22\n"
result = self.read_csv(StringIO(data), header=None, sep=' ')
self.assertTrue(result[0].dtype == np.float64)
result = self.read_csv(StringIO(data), header=None, sep='\s+')
self.assertTrue(result[0].dtype == np.float64)
def test_catch_too_many_names(self):
# Issue 5156
data = """\
1,2,3
4,,6
7,8,9
10,11,12\n"""
tm.assertRaises(Exception, read_csv, StringIO(data),
header=0, names=['a', 'b', 'c', 'd'])
def test_ignore_leading_whitespace(self):
# GH 6607, GH 3374
data = ' a b c\n 1 2 3\n 4 5 6\n 7 8 9'
result = self.read_table(StringIO(data), sep='\s+')
expected = DataFrame({'a': [1, 4, 7], 'b': [2, 5, 8], 'c': [3, 6, 9]})
tm.assert_frame_equal(result, expected)
def test_nrows_and_chunksize_raises_notimplemented(self):
data = 'a b c'
self.assertRaises(NotImplementedError, self.read_csv, StringIO(data),
nrows=10, chunksize=5)
def test_single_char_leading_whitespace(self):
# GH 9710
data = """\
MyColumn
a
b
a
b\n"""
expected = DataFrame({'MyColumn': list('abab')})
result = self.read_csv(StringIO(data), skipinitialspace=True)
tm.assert_frame_equal(result, expected)
def test_chunk_begins_with_newline_whitespace(self):
# GH 10022
data = '\n hello\nworld\n'
result = self.read_csv(StringIO(data), header=None)
self.assertEqual(len(result), 2)
# GH 9735
chunk1 = 'a' * (1024 * 256 - 2) + '\na'
chunk2 = '\n a'
result = pd.read_csv(StringIO(chunk1 + chunk2), header=None)
expected = pd.DataFrame(['a' * (1024 * 256 - 2), 'a', ' a'])
tm.assert_frame_equal(result, expected)
def test_empty_with_index(self):
# GH 10184
data = 'x,y'
result = self.read_csv(StringIO(data), index_col=0)
expected = DataFrame([], columns=['y'], index=Index([], name='x'))
tm.assert_frame_equal(result, expected)
def test_emtpy_with_multiindex(self):
# GH 10467
data = 'x,y,z'
result = self.read_csv(StringIO(data), index_col=['x', 'y'])
expected = DataFrame([], columns=['z'],
index=MultiIndex.from_arrays([[]] * 2, names=['x', 'y']))
tm.assert_frame_equal(result, expected, check_index_type=False)
def test_empty_with_reversed_multiindex(self):
data = 'x,y,z'
result = self.read_csv(StringIO(data), index_col=[1, 0])
expected = DataFrame([], columns=['z'],
index=MultiIndex.from_arrays([[]] * 2, names=['y', 'x']))
tm.assert_frame_equal(result, expected, check_index_type=False)
def test_empty_index_col_scenarios(self):
data = 'x,y,z'
# None, no index
index_col, expected = None, DataFrame([], columns=list('xyz')),
tm.assert_frame_equal(self.read_csv(
StringIO(data), index_col=index_col), expected)
# False, no index
index_col, expected = False, DataFrame([], columns=list('xyz')),
tm.assert_frame_equal(self.read_csv(
StringIO(data), index_col=index_col), expected)
# int, first column
index_col, expected = 0, DataFrame(
[], columns=['y', 'z'], index=Index([], name='x'))
tm.assert_frame_equal(self.read_csv(
StringIO(data), index_col=index_col), expected)
# int, not first column
index_col, expected = 1, DataFrame(
[], columns=['x', 'z'], index=Index([], name='y'))
tm.assert_frame_equal(self.read_csv(
StringIO(data), index_col=index_col), expected)
# str, first column
index_col, expected = 'x', DataFrame(
[], columns=['y', 'z'], index=Index([], name='x'))
tm.assert_frame_equal(self.read_csv(
StringIO(data), index_col=index_col), expected)
# str, not the first column
index_col, expected = 'y', DataFrame(
[], columns=['x', 'z'], index=Index([], name='y'))
tm.assert_frame_equal(self.read_csv(
StringIO(data), index_col=index_col), expected)
# list of int
index_col, expected = [0, 1], DataFrame([], columns=['z'],
index=MultiIndex.from_arrays([[]] * 2, names=['x', 'y']))
tm.assert_frame_equal(self.read_csv(StringIO(data), index_col=index_col), expected,
check_index_type=False)
# list of str
index_col = ['x', 'y']
expected = DataFrame([], columns=['z'], index=MultiIndex.from_arrays(
[[]] * 2, names=['x', 'y']))
tm.assert_frame_equal(self.read_csv(StringIO(data), index_col=index_col), expected,
check_index_type=False)
# list of int, reversed sequence
index_col = [1, 0]
expected = DataFrame([], columns=['z'], index=MultiIndex.from_arrays(
[[]] * 2, names=['y', 'x']))
tm.assert_frame_equal(self.read_csv(StringIO(data), index_col=index_col), expected,
check_index_type=False)
# list of str, reversed sequence
index_col = ['y', 'x']
expected = DataFrame([], columns=['z'], index=MultiIndex.from_arrays(
[[]] * 2, names=['y', 'x']))
tm.assert_frame_equal(self.read_csv(StringIO(data), index_col=index_col), expected,
check_index_type=False)
def test_empty_with_index_col_false(self):
# GH 10413
data = 'x,y'
result = self.read_csv(StringIO(data), index_col=False)
expected = DataFrame([], columns=['x', 'y'])
tm.assert_frame_equal(result, expected)
def test_float_parser(self):
# GH 9565
data = '45e-1,4.5,45.,inf,-inf'
result = self.read_csv(StringIO(data), header=None)
expected = pd.DataFrame([[float(s) for s in data.split(',')]])
tm.assert_frame_equal(result, expected)
def test_int64_overflow(self):
data = """ID
00013007854817840016671868
00013007854817840016749251
00013007854817840016754630
00013007854817840016781876
00013007854817840017028824
00013007854817840017963235
00013007854817840018860166"""
result = self.read_csv(StringIO(data))
self.assertTrue(result['ID'].dtype == object)
self.assertRaises((OverflowError, pandas.parser.OverflowError),
self.read_csv, StringIO(data),
converters={'ID': np.int64})
# Just inside int64 range: parse as integer
i_max = np.iinfo(np.int64).max
i_min = np.iinfo(np.int64).min
for x in [i_max, i_min]:
result = pd.read_csv(StringIO(str(x)), header=None)
expected = pd.DataFrame([x])
tm.assert_frame_equal(result, expected)
# Just outside int64 range: parse as string
too_big = i_max + 1
too_small = i_min - 1
for x in [too_big, too_small]:
result = pd.read_csv(StringIO(str(x)), header=None)
expected = pd.DataFrame([str(x)])
tm.assert_frame_equal(result, expected)
def test_empty_with_nrows_chunksize(self):
# GH 9535
expected = pd.DataFrame([], columns=['foo', 'bar'])
result = self.read_csv(StringIO('foo,bar\n'), nrows=10)
tm.assert_frame_equal(result, expected)
result = next(iter(pd.read_csv(StringIO('foo,bar\n'), chunksize=10)))
tm.assert_frame_equal(result, expected)
result = pd.read_csv(StringIO('foo,bar\n'), nrows=10, as_recarray=True)
result = pd.DataFrame(result[2], columns=result[1], index=result[0])
tm.assert_frame_equal(pd.DataFrame.from_records(
result), expected, check_index_type=False)
result = next(
iter(pd.read_csv(StringIO('foo,bar\n'), chunksize=10, as_recarray=True)))
result = pd.DataFrame(result[2], columns=result[1], index=result[0])
tm.assert_frame_equal(pd.DataFrame.from_records(
result), expected, check_index_type=False)
def test_eof_states(self):
# GH 10728 and 10548
# With skip_blank_lines = True
expected = pd.DataFrame([[4, 5, 6]], columns=['a', 'b', 'c'])
# GH 10728
# WHITESPACE_LINE
data = 'a,b,c\n4,5,6\n '
result = self.read_csv(StringIO(data))
tm.assert_frame_equal(result, expected)
# GH 10548
# EAT_LINE_COMMENT
data = 'a,b,c\n4,5,6\n#comment'
result = self.read_csv(StringIO(data), comment='#')
tm.assert_frame_equal(result, expected)
# EAT_CRNL_NOP
data = 'a,b,c\n4,5,6\n\r'
result = self.read_csv(StringIO(data))
tm.assert_frame_equal(result, expected)
# EAT_COMMENT
data = 'a,b,c\n4,5,6#comment'
result = self.read_csv(StringIO(data), comment='#')
tm.assert_frame_equal(result, expected)
# SKIP_LINE
data = 'a,b,c\n4,5,6\nskipme'
result = self.read_csv(StringIO(data), skiprows=[2])
tm.assert_frame_equal(result, expected)
# With skip_blank_lines = False
# EAT_LINE_COMMENT
data = 'a,b,c\n4,5,6\n#comment'
result = self.read_csv(
StringIO(data), comment='#', skip_blank_lines=False)
expected = pd.DataFrame([[4, 5, 6]], columns=['a', 'b', 'c'])
tm.assert_frame_equal(result, expected)
# IN_FIELD
data = 'a,b,c\n4,5,6\n '
result = self.read_csv(StringIO(data), skip_blank_lines=False)
expected = pd.DataFrame(
[['4', 5, 6], [' ', None, None]], columns=['a', 'b', 'c'])
tm.assert_frame_equal(result, expected)
# EAT_CRNL
data = 'a,b,c\n4,5,6\n\r'
result = self.read_csv(StringIO(data), skip_blank_lines=False)
expected = pd.DataFrame(
[[4, 5, 6], [None, None, None]], columns=['a', 'b', 'c'])
tm.assert_frame_equal(result, expected)
# Should produce exceptions
# ESCAPED_CHAR
data = "a,b,c\n4,5,6\n\\"
self.assertRaises(Exception, self.read_csv,
StringIO(data), escapechar='\\')
# ESCAPE_IN_QUOTED_FIELD
data = 'a,b,c\n4,5,6\n"\\'
self.assertRaises(Exception, self.read_csv,
StringIO(data), escapechar='\\')
# IN_QUOTED_FIELD
data = 'a,b,c\n4,5,6\n"'
self.assertRaises(Exception, self.read_csv,
StringIO(data), escapechar='\\')
class TestPythonParser(ParserTests, tm.TestCase):
def test_negative_skipfooter_raises(self):
text = """#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
1/1/2000,1.,2.,3.
1/2/2000,4,5,6
1/3/2000,7,8,9
"""
with tm.assertRaisesRegexp(ValueError,
'skip footer cannot be negative'):
df = self.read_csv(StringIO(text), skipfooter=-1)
def read_csv(self, *args, **kwds):
kwds = kwds.copy()
kwds['engine'] = 'python'
return | read_csv(*args, **kwds) | pandas.io.parsers.read_csv |
"""This module contains code to recode values to achieve k-anonymity"""
import math
import pandas as pd
from pandas.api.types import (is_categorical_dtype, is_datetime64_any_dtype, is_numeric_dtype)
from kernel.util import is_token_list, must_be_flattened, flatten_set_valued_series, next_string_to_reduce, reduce_string, intersect_token_lists
def recode(series, recoding_rules=None, hierarchies=None):
"""
Takes a series and applies appropriate generalization function. Returns a generalized series
Parameters
----------
series: Series
Series to be recoded.
recoding_rules: dict
Dictionary containing recoding rules.
hierarchies: dict
Dictionary containing generalization hierarchies.
Returns
-------
Series
Recoded series.
"""
generalization_function = None
set_valued = False
if is_token_list(series):
generalization_function = recode_tokens
elif must_be_flattened(series):
generalization_function = recode_set_valued
set_valued = True
elif is_numeric_dtype(series):
generalization_function = recode_range
elif is_datetime64_any_dtype(series):
generalization_function = recode_dates
elif is_categorical_dtype(series):
if recoding_rules and series.name in recoding_rules and recoding_rules[series.name] == "string_reduction":
generalization_function = recode_strings
elif series.cat.ordered:
generalization_function = recode_ordinal
else:
generalization_function = recode_nominal
else:
generalization_function = recode_set_valued
set_valued = True
if hierarchies and series.name in recoding_rules and recoding_rules[series.name] == "hierarchy" and series.name in hierarchies:
hierarchy = hierarchies[series.name]
result = generalization_function(series, hierarchy)
elif set_valued:
result = generalization_function(series, recoding_rules, hierarchies)
else:
result = generalization_function(series)
series = series.map(lambda x: result)
return series
def recode_set_valued(series, recoding_rules, hierarchies):
"""
Generalizes set valued series by flattening
Parameters
----------
series: Series
Series to be recoded.
recoding_rules: dict
Dictionary containing recoding rules.
hierarchies: dict
Dictionary containing generalization hierarchies.
Returns
-------
any
Single value recoded to.
"""
flattened, indexes, is_category = flatten_set_valued_series(series)
if is_categorical_dtype(series) or is_category:
flattened_series = pd.Series(flattened, index=indexes, dtype="category", name=series.name)
else:
flattened_series = pd.Series(flattened, index=indexes, name=series.name)
result = recode(flattened_series, recoding_rules, hierarchies)
return result.iloc[0]
def recode_strings(series):
"""
Generalizes a series of strings by stepwise reduction of strings
Parameters
----------
series: Series
Series to be recoded.
Returns
-------
str
Single value recoded to.
"""
values = set(series.unique())
while len(values) > 1:
longest_element = next_string_to_reduce(values)
values.remove(longest_element)
generalized = reduce_string(longest_element)
values.add(generalized)
return list(values)[0]
def recode_range(series, hierarchy=None):
"""
Generalizes a series of numbers to a range, using hierarchical brackets if provided
Parameters
----------
series: Series
Series to be recoded.
hierarchies: dict
Generalization hierarchy.
Returns
-------
range
Single value recoded to.
"""
if len(series.unique()) == 1:
return series.unique().tolist()[0]
if hierarchy:
return recode_range_hierarchical(series, hierarchy)
minimum = math.floor(min(series))
maximum = math.ceil(max(series))
return range(minimum, maximum + 1)
def recode_range_hierarchical(series, hierarchy):
"""
Generalizes a series of numbers using hierarchical ranges
Parameters
----------
series: Series
Series to be recoded.
hierarchies: dict
Generalization hierarchy.
Returns
-------
AnyNode
Single node covering all series items.
"""
nodes_to_consider = list(hierarchy.leaves)
nodes_to_consider.sort(key=lambda node: len(node.range))
min_el = series.min()
max_el = series.max()
node = nodes_to_consider.pop(0)
while not node.is_root:
result = node.range
if min_el in result and max_el in result:
return node
if node.parent not in nodes_to_consider:
nodes_to_consider.append(node.parent)
nodes_to_consider.sort(key=lambda node: len(node.range))
node = nodes_to_consider.pop(0)
return node
def recode_dates(series):
"""
Generalizes a series of datetime objects by suppressing, day and month, and then generalizing to a range of years
Parameters
----------
series: Series
Series to be recoded.
Returns
-------
any
Single value recoded to.
"""
result = series.dt.normalize()
if len(result.unique()) > 1:
result = series.dt.to_period('M')
if len(result.unique()) > 1:
result = series.dt.to_period('Y')
if len(result.unique()) > 1:
years = series.apply(lambda x: x.year)
years_range = recode_range(years)
return years_range
return result.tolist()[0]
def recode_ordinal(series):
"""
Generalizes a series with ordered categorical values and returns either a single value (if all have the same), or a set of values
Parameters
----------
series: Series
Series to be recoded.
Returns
-------
any
Single value recoded to or FrozenSet.
"""
if not | is_categorical_dtype(series) | pandas.api.types.is_categorical_dtype |
from logging import log
import numpy as np
import pandas as pd
from tqdm import tqdm
import scipy.sparse as sp
from sklearn.utils import check_array
from sklearn.feature_extraction.text import (
CountVectorizer,
TfidfTransformer,
TfidfVectorizer
)
from sklearn.metrics.pairwise import cosine_similarity
from transformers import AutoTokenizer
from nltk.corpus import stopwords as stopwords_nltk
#from spacy.lang.en.stop_words import STOP_WORDS as stopwords_spacy
from vespid.features.preprocessing import preprocess_text
from vespid.models.specter import get_keyphrase_embeddings
import hdbscan
from vespid.models.clustering import HdbscanEstimator
from vespid import setup_logger
logger = setup_logger(__name__)
# Determines how best to tell BERT that we've connected two
# strings together that aren't always like that.
# Use whenever aggregating/joining documents
tokenizer = AutoTokenizer.from_pretrained('allenai/specter')
CONCATENATING_TOKEN = tokenizer.sep_token
class ClusterTfidf(TfidfTransformer):
"""
A Cluster/Class-based TF-IDF procedure using scikit-learn's TfidfTransformer as a base.
Adapted from the BERTopic project: https://github.com/MaartenGr/BERTopic.
C-TF-IDF can best be explained as a TF-IDF formula adopted for multiple classes
by joining all documents per class. Thus, each class is converted to a single document
instead of set of documents. Then, the frequency of words **t** are extracted for
each class **i** and divided by the total number of words **w**.
Next, the total, unjoined, number of documents across all classes **m** is divided by the total
sum of word **i** across all classes.
"""
def __init__(
self,
cluster_label='cluster_label',
embedding_aggregation_type='mean',
embedding_weights=None,
top_n=30,
ngram_range=(2,3),
stop_words='nltk',
**kwargs
):
'''
Parameters
----------
cluster_label: str. Indicates column in DataFrames provided
that provides cluster membership label for a given document.
embedding_aggregation_type: str. See
self._aggregate_embeddings() for
list of allowed values.
embedding_weights: numpy array of float of shape
(n_documents,) or and HdbscanEstimator object.
If passed a numpy array, the values will be
used to weight the contribution of each document
to the aggregate embedding vector of the
relevant cluster.
If passed an HdbscanEstimator object, it must
be the trained model used to generate the cluster
labels. The model's soft clustering probabilities
will be used to provide weights for embedding
aggregation.
If None, an unweighted aggregation will be performed.
top_n: int. Indicates how many candidate keyphrases to generate
per cluster. Recommended values are between 10 and 30.
ngram_range: 2-tuple of ints of the form (min, max). Indicates the
minimum and maximum keyphrase length to allow.
stop_words: str. Can be one of ['nltk', 'sklearn', 'spacy']. Indicates what
the stop words should be that are used for preprocessing, if
any. If None, stopwords will not be used.
* 'nltk': generates 179 stopwords
* 'spacy': generates 326 stopwords
* 'sklearn': TBD
kwargs: keyword arguments for the sklearn TfidfTransformer class
'''
super().__init__(**kwargs)
self.cluster_label = cluster_label
self.embedding_aggregation_type = embedding_aggregation_type
self.embedding_weights = embedding_weights
self.top_n = top_n
self.ngram_range = ngram_range
self.stop_words = stop_words
def _prepare_text_for_keyphrase_extraction(
self,
data
):
'''
Prepares the text data for keyphrase extraction at
the document cluster level.
Parameters
----------
df: pandas DataFrame. Should contain at least the
columns ['<cluster_label>', 'title', 'abstract'].
Returns
-------
Numpy array of preprocessed text for each document
with shape (n_documents,).
'''
df = data.copy()
# Just in case
df[self.cluster_label] = df[self.cluster_label].astype(int)
titles = preprocess_text(df['title'])
abstracts = preprocess_text(df['abstract'])
return titles + f' {CONCATENATING_TOKEN} ' + abstracts
def _aggregate_embeddings(self, embeddings):
'''
Aggregate vectorized embeddings, possibly in a weighted fashion.
Note that aggregation will default to unweighted calculations
if
Parameters
----------
embeddings: pandas Series or numpy array (if unweighted)
of embedding vectors or pandas DataFrame (if weighted)
with columns ['embedding', 'weights']
Returns
-------
numpy array of shape (embedding_size,) that
represents the aggregate vector
'''
# Make sure we get arrays even if we're given lists
if isinstance(embeddings, (pd.Series, np.ndarray)):
e = np.array(embeddings.tolist())
weights = None
elif isinstance(embeddings, pd.DataFrame):
e = np.array(embeddings['embedding'].tolist())
if 'weights' in embeddings.columns:
weights = embeddings['weights'].values
else:
weights = None
else:
raise ValueError(f"``embeddings`` is of type {type(embeddings)} "
"which is not supported")
if self.embedding_aggregation_type == 'mean':
return np.average(e, axis=0, weights=weights)
else:
raise ValueError(f"``agg_type`` of '{self.embedding_aggregation_type}' not supported")
def _prepare_data_for_keyphrase_extraction(
self,
df
):
'''
Takes document-level information (e.g. text and cluster labels)
and returns cluster-level
data ready for keyphrase extraction.
Parameters
----------
df: pandas DataFrame. Should contain at least the
columns ['<cluster_label>', 'text', 'embedding'] wherein
'text' is preprocessed and concatenated
titles and abstracts and 'embedding'
is a language vector embedding for the document
(e.g. from SPECTER).
Returns
-------
pandas DataFrame with columns ['<cluster_label>', 'text', 'embedding'],
one row per cluster. The text is a concatenation of all documents
for a given cluster and the embedding is an aggregation of the
embeddings for each document in a given cluster.
'''
data = df.copy()
allowed_aggregation_types = ['mean']
if self.embedding_aggregation_type not in allowed_aggregation_types:
raise ValueError("embedding_aggregation_type of type "
f"{type(self.embedding_aggregation_type)} not supported")
if self.embedding_weights is None or isinstance(self.embedding_weights, np.ndarray):
if self.embedding_weights is None:
pass # do nothing, aggregation function knows what to do
else:
data['weights'] = self.embedding_weights
elif isinstance(self.embedding_weights, HdbscanEstimator):
data['weights'] = self.embedding_weights.soft_cluster_probabilities
else:
raise ValueError("``embedding_weights`` type of "
f"{type(self.embedding_weights)} not supported")
# Pre-process text elements and concatenate titles + abstracts
data['text'] = self._prepare_text_for_keyphrase_extraction(data)
# Concatenate all documents together on a cluster level
tqdm.pandas(desc='Concatenating all documents per cluster')
output = pd.DataFrame(
data.groupby(self.cluster_label)['text'].progress_apply(
lambda t: t.str.cat(sep=f" {CONCATENATING_TOKEN} ")
)
)
# Aggregate document embeddings on a cluster level
#TODO: this is currently the biggest time suck, any way to speed up?
tqdm.pandas(
desc='Aggregating all document embeddings into one embedding per cluster')
cluster_embeddings = pd.DataFrame(data.groupby(self.cluster_label)
.progress_apply(
self._aggregate_embeddings
), columns=['embedding'])
output['embedding'] = output.join(
cluster_embeddings, how='left')['embedding']
output.reset_index(drop=False, inplace=True)
return output
def extract_keyphrase_candidates(
self,
df
):
'''
Using cluster-based tf-idf, extract a bunch of candiate keyphrases
from each cluster to be used as the full set of possible keyphrases
per cluster in downstream modeling.
Parameters
----------
pandas DataFrame. Should contain at least the
columns ['<cluster_label>', 'title', 'abstract', 'embedding'].
Each row is a document.
Returns
-------
pandas DataFrame with columns ['<cluster_label>', 'keyphrases'],
with the latter being lists of strings.
'''
num_documents = len(df)
num_candidates = 30
# List of stopwords to exclude that we know we want to avoid
additional_stopwords = [
'elsevier'
]
if self.stop_words == 'nltk':
stops = set(stopwords_nltk.words('english'))
elif self.stop_words == 'spacy':
# Tokenizer removes punctuation so this must too
#stops = set(t.replace("'", "") for t in stopwords_spacy)
logger.warning("spacy stopwords currently having issues, switching to nltk stopwords...")
stops = set(stopwords_nltk.words('english'))
elif self.stop_words == 'sklearn':
stops = 'english'
elif self.stop_words is not None:
raise ValueError(f"``stop_words`` value of {self.stop_words} is not supported")
stops.update(additional_stopwords)
# Pre-process document-level text
df['text'] = self._prepare_text_for_keyphrase_extraction(df)
# Transform document-level data to cluster-level
df_clusters = self._prepare_data_for_keyphrase_extraction(df)
logger.info("Counting terms across the corpus...")
# Make sure we leave the [SEP] token alone for BERT
#TODO: find a way to use CONCATENATING_TOKEN here without losing regex escapes
tokenizing_pattern = r'(?u)\b\w\w+\b|\[SEP\]'
#TODO: make this part of init()
vectorizer = CountVectorizer(
ngram_range=self.ngram_range,
stop_words=stops,
token_pattern=tokenizing_pattern,
lowercase=False,
#max_features=100_000,
#dtype=np.int32 # default is np.int64
)
#TODO: Make this part of fit()
X = vectorizer.fit_transform(df_clusters['text'])
words = vectorizer.get_feature_names_out()
logger.debug(f"len(words) = {len(words)}")
logger.info("Calculating tf-idf scores...")
tfidf_matrix = self.fit_transform(X, n_samples=num_documents)
# Get words with highest scores for each cluster
# Remember that, for tfidf_matrix[i][j], i == document, j == word
#TODO: find a way to get value-sorted sparse matrix to save on memory
logger.warning("This algorithm can't handle cluster counts in the 20,000 range!")
tfidf_matrix_dense = tfidf_matrix.toarray()
# Get indices for the top_n highest-scoring words for each cluster
# Returns indices sorted in ascending order based on the values they refer to
top_indices = tfidf_matrix_dense.argsort()[:, -num_candidates:]
logger.debug(f"top_indices.shape = {top_indices.shape}")
# Return word-score pairs for top_n of each cluster label integer
# Form is {cluster_num: [(phrase1, score1), (phrase2, score2), etc.]}
logger.info("Extracting topics...")
#TODO: find an efficient numpy-driven way to do this
#topics = {label: [(words[j], tfidf_matrix_dense[i][j]) for j in top_indices[i]][::-1] for i, label in enumerate(df[self.cluster_label])}
topics = {}
for i, label in enumerate(df_clusters[self.cluster_label]):
topics[label] = [words[j] for j in top_indices[i]][::-1]
# Build out a DataFrame for quick merging
topics = pd.DataFrame(pd.Series(topics, name='keyphrases'))\
.reset_index(drop=False).rename(columns={'index': self.cluster_label})
df_clusters = df_clusters.merge(topics, on=self.cluster_label)
return df_clusters
def fit(self, X: sp.csr_matrix, n_samples: int):
"""
Learn the idf vector (global term weights).
Parameters
----------
X: A matrix of term/token counts.
n_samples: Number of total documents prior to
cluster-wise document contatenation.
"""
X = check_array(X, accept_sparse=('csr', 'csc'))
if not sp.issparse(X):
X = sp.csr_matrix(X)
dtype = np.float64
if self.use_idf:
_, n_features = X.shape
df = np.squeeze(np.asarray(X.sum(axis=0)))
avg_nr_samples = int(X.sum(axis=1).mean())
idf = np.log(avg_nr_samples / df)
self._idf_diag = sp.diags(idf, offsets=0,
shape=(n_features, n_features),
format='csr',
dtype=dtype)
return self
def mmr(doc_embedding,
word_embeddings,
words,
top_n=5,
diversity=0.8):
"""
Calculate Maximal Marginal Relevance (MMR)
between candidate keywords and the document.
MMR considers the similarity of keywords/keyphrases with the
document, along with the similarity of already selected
keywords and keyphrases. This results in a selection of keywords
that maximize their within diversity with respect to the document.
Note that this is copied from the BERTopic project implementation:
https://github.com/MaartenGr/BERTopic/blob/1ffc4569a40bf845d2083bfd63c30c8d648a3772/bertopic/_mmr.py
Parameters
----------
doc_embeddings: numpy array. A single document embedding.
word_embeddings: numpy array. The embeddings of the selected
candidate keywords/phrases
words: iterable of str. The selected candidate keywords/keyphrases
that are represented in ``word_embeddings``.
top_n: int. The number of keywords/keyhprases to return.
diversity: float in the range [0.0, 1.0] . Indicates how diverse
the selected keywords/keyphrases are. The higher the value,
the more priority diversity is given over similarity
to the document.
Returns
-------
numpy array of str - the selected keywords/keyphrases.
"""
# Extract similarity within words, and between words and the document
word_doc_similarity = cosine_similarity(word_embeddings, doc_embedding.reshape(1, -1))
word_similarity = cosine_similarity(word_embeddings)
# Initialize candidates and already choose best keyword/keyphras
keywords_idx = [np.argmax(word_doc_similarity)]
candidates_idx = [i for i in range(len(words)) if i != keywords_idx[0]]
for _ in range(top_n - 1):
# Extract similarities within candidates and
# between candidates and selected keywords/phrases
candidate_similarities = word_doc_similarity[candidates_idx, :]
target_similarities = np.max(word_similarity[candidates_idx][:, keywords_idx], axis=1)
# Calculate MMR
mmr = (1-diversity) * candidate_similarities - diversity * target_similarities.reshape(-1, 1)
mmr_idx = candidates_idx[np.argmax(mmr)]
# Update keywords & candidates
keywords_idx.append(mmr_idx)
candidates_idx.remove(mmr_idx)
results = | pd.Series([words[idx] for idx in keywords_idx]) | pandas.Series |
import pandas as pd
import os
import xgboost as xgb
import operator
from matplotlib import pylab as plt
from sklearn import preprocessing
# import data
train = | pd.read_csv("../input/train.csv") | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
Created on Wed Apr 28 17:01:23 2021
@author: sercan
"""
#Import libraries--------------------------------------------------------------
import streamlit as st
import numpy as np
from scipy.optimize import curve_fit
import matplotlib.pyplot as plt
import pandas as pd
import xlsxwriter
import xlrd
import base64
from io import BytesIO
plt.style.use('default')
#Define curve fitting functions------------------------------------------------
#y-shear rate
#K-consistency index
#n-flow behavior index
#ty-yield stress
def YPLfunction(y, ty, K, n):
return ty + K*y**n
def PLfunction(y, K, n):
return K*y**n
def BPfunction(y,PV,YP):
return YP + PV*y
#Perform curve fitting and calculate r2----------------------------------------
#PL - power law
#YPL - yield power law
#BP - bingham plastic
def r2(residuals,shear_stress,shear_rate):
ss_res = np.sum(residuals**2)
ss_tot = np.sum((shear_stress-np.mean(shear_stress))**2)
r_squared = 1 - (ss_res / ss_tot)
return r_squared
def PL(shear_stress,shear_rate):
popt, pcov = curve_fit(PLfunction,shear_rate,shear_stress)
K,m =popt[0],popt[1]
residuals = shear_stress- PLfunction(shear_rate, popt[0],popt[1])
r_squared = r2(residuals,shear_stress,shear_rate)
return K,m,r_squared
def YPL(shear_stress,shear_rate):
popt, pcov = curve_fit(YPLfunction,shear_rate,shear_stress)
ty,K,m = popt[0],popt[1],popt[2]
residuals = shear_stress- YPLfunction(shear_rate, popt[0],popt[1],popt[2])
r_squared = r2(residuals,shear_stress,shear_rate)
if popt[0]<0:
K,m,r_squared = PL(shear_stress,shear_rate)
ty = 0
return ty,K,m,r_squared
def BP(shear_stress,shear_rate):
PV = (shear_stress[0] - shear_stress[1])/511
YP = (2*shear_stress[1] - shear_stress[0])
residuals = shear_stress- BPfunction(shear_rate, PV, YP)
r_squared = r2(residuals,shear_stress,shear_rate)
#Calculate equivalent sigma600 (DR)
sigma600 = (YP + PV*600*1.7) / (1.066 * 0.4788)
#Calculate equivalent sigma300 (DR)
sigma300 = (YP + PV*300*1.7) / (1.066 * 0.4788)
return r_squared,PV, YP, sigma600, sigma300
#Define functions for download links, and xlsx conversions---------------------
def to_excel(df):
output = BytesIO()
writer = pd.ExcelWriter(output, engine='xlsxwriter')
df.to_excel(writer, sheet_name='Sheet1',index=False)
writer.save()
processed_data = output.getvalue()
return processed_data
def get_table_download_link(df):
"""Generates a link allowing the data in a given panda dataframe to be downloaded
in: dataframe
out: href string
"""
val = to_excel(df)
b64 = base64.b64encode(val) # val looks like b'...'
return f'<a href="data:application/octet-stream;base64,{b64.decode()}" download="example_data.xlsx">Please click here to download an example dataset for this app as an excel file.</a>' # decode b'abc' => abc
def download_link(object_to_download, download_filename, download_link_text):
"""
Generates a link to download the given object_to_download.
object_to_download (str, pd.DataFrame): The object to be downloaded.
download_filename (str): filename and extension of file. e.g. mydata.csv, some_txt_output.txt
download_link_text (str): Text to display for download link.
Examples:
download_link(YOUR_DF, 'YOUR_DF.csv', 'Click here to download data!')
download_link(YOUR_STRING, 'YOUR_STRING.txt', 'Click here to download your text!')
"""
if isinstance(object_to_download,pd.DataFrame):
object_to_download = object_to_download.to_csv(index=False)
# some strings <-> bytes conversions necessary here
b64 = base64.b64encode(object_to_download.encode()).decode()
return f'<a href="data:file/txt;base64,{b64}" download="{download_filename}">{download_link_text}</a>'
@st.cache(allow_output_mutation=True)
def load_data(file):
df = pd.read_excel(file)
return df
st.header("Drilling Fluid Rheological Model Parameters")
st.write("This web-app is used to analyze API rotational viscometer data by comparing various rheological models.")
st.write("The rheological constants for Yield Power-law (YPL - also called Herschel-Bulkley), Power-law, and Bingham-Plastic models are calculated and compared.")
st.write("Please upload the data using the file uploader on the left side. Please make sure that the data is in excel (.xlsx) format, where the first column is the RPM values and the second column is shear stress values (as viscometer dial readings) for each corresponding RPM.")
st.write("Below link can be used to download an example dataset for this web-app.")
st.write("NOTE: If you are using a 6-speed viscometer, you might be more interested in apiviscometer.herokuapp.com")
d = {'RPM': [300,200,100,60,30,6,3], 'Viscometer Dial Readings (DR)': [105.1,90.8,71.7,63.4,55.3,45.8,44]}
df_template = | pd.DataFrame(data=d) | pandas.DataFrame |
import pandas as pd
import logging
import os
from collections import defaultdict
from annotation.utility import Utility
_logger = logging.getLogger(__name__)
TYPE_MAP_DICT = {"string": "String", "number": "Quantity", "year": "Time", "month": "Time", "day": "Time",
"date": "Time", "entity": 'WikibaseItem'}
# kyao
# Only add one location qualifier until datamart-api can handle multiple locations. 31 July 2020.
ADDITIONAL_QUALIFIER_MAP = {
# ("lat", "lon", "latitude", "longitude"): {"Attribute": "location", "Property": "P276"},
# ("country",): {"Attribute": "country", "Property": "P17"},
# ("admin1",): {"Attribute": "located in the first-level administrative country subdivision",
# "Property": "P2006190001"},
# ("admin2",): {"Attribute": "located in the second-level administrative country subdivision",
# "Property": "P2006190002"},
# ("admin3",): {"Attribute": "located in the third-level administrative country subdivision",
# "Property": "P2006190003"},
("country", "admin1", "admin2", "admin3"): {"Attribute": "located in the administrative territorial entity",
"Property": "P131"},
}
def generate_template_from_df(input_df: pd.DataFrame, dataset_qnode: str, dataset_id: str) -> dict:
"""
Function used for datamart annotation batch mode, return a dict of dataFrame instead of output a xlsx file.
"""
# Assumes cell [0,0] is the start of the annotation
if not input_df.iloc[0,0] == 'dataset':
raise Exception('The first column of the dataframe must bet the annotations (not the index of the dataframe)')
utility = Utility()
# updated 2020.7.22: it is possible that header is not at row 7, so we need to search header row if exist
header_row, data_row = utility.find_data_start_row(input_df)
input_df = input_df.set_index(0)
if 'tag' in input_df.iloc[:7, 0]:
annotation_rows = list(range(1, 7)) + [header_row]
else:
annotation_rows = list(range(1, 6)) + [header_row]
content_rows = list(range(data_row, len(input_df)))
annotation_part = input_df.iloc[annotation_rows].fillna("")
content_part = input_df.iloc[content_rows]
# start generate dataframe for templates
dataset_df = _generate_dataset_tab(input_df, dataset_qnode, dataset_id)
attribute_df = _generate_attributes_tab(dataset_qnode, annotation_part)
unit_df = _generate_unit_tab(dataset_qnode, content_part, annotation_part)
extra_df, wikifier_df1 = _process_main_subject(dataset_qnode, content_part, annotation_part, data_row)
wikifier_df2 = _generate_wikifier_part(content_part, annotation_part, data_row)
wikifier_df = pd.concat([wikifier_df1, wikifier_df2])
output_df_dict = {
'dataset_file': dataset_df,
'attributes_file': attribute_df,
'units_file': unit_df,
"extra_edges": extra_df,
"Wikifier_t2wml": wikifier_df,
"wikifier": None,
"qualifiers": None,
}
return output_df_dict
def generate_template(input_path: str, output_path: str, dataset_qnode: str = None) -> None:
"""
generate the template xlsx file from the input xlsx file
:param dataset_qnode:
:param input_path:
:param output_path:
:return:
"""
input_df = pd.read_excel(input_path, index_col=0, header=None)
output_df_dict = generate_template_from_df(input_df, dataset_qnode=dataset_qnode)
output_folder = output_path[:output_path.rfind("/")]
os.makedirs(output_folder, exist_ok=True)
save_template_file(output_df_dict, output_path)
def save_template_file(output_df_dict: dict, output_path: str) -> None:
with pd.ExcelWriter(output_path) as writer:
output_df_dict["dataset_file"].to_excel(writer, sheet_name='Dataset', index=False)
output_df_dict["attributes_file"].to_excel(writer, sheet_name='Attributes', index=False)
output_df_dict["units_file"].to_excel(writer, sheet_name='Units', index=False)
output_df_dict["extra_edges"].to_excel(writer, sheet_name="Extra Edges", index=False)
output_df_dict["Wikifier_t2wml"].to_excel(writer, sheet_name="Wikifier_t2wml", index=False)
def _generate_dataset_tab(input_df: pd.DataFrame, dataset_qnode: str, dataset_id: str) -> pd.DataFrame:
"""
A sample dataset file looks like: here {dataset_qnode} = "aid-security"
node1 label node2 id
Qaid-security P31 Q1172284 aid-security-P31
Qaid-security label aid-security dataset aid-security-label
Qaid-security P1476 aid-security dataset aid-security-P1476
Qaid-security description aid-security dataset aid-security-description
Qaid-security P2699 aid-security aid-security-P2699
Qaid-security P1813 aid-security aid-security-P1813
:param dataset_qnode: input dataset id
:return:
"""
dataset_qnode_df_list = []
name = input_df.iloc[0, 1] if input_df.shape[1] > 1 and input_df.iloc[0, 1] else '{} dataset'.format(dataset_id)
description = input_df.iloc[0, 2] if input_df.shape[1] > 2 and input_df.iloc[0, 2] else '{} dataset'.format(dataset_id)
url = input_df.iloc[0, 3] if input_df.shape[1] > 3 and input_df.iloc[0, 3] else 'http://not/defined/{}'.format(dataset_id)
dataset_labels = ["P31", "label", "P1476", "description", "P2699", "P1813"]
dataset_node2s = ["Q1172284", '"{}"'.format(name), '"{}"'.format(name),
'"{}"'.format(description), '"{}"'.format(url), dataset_qnode]
for label, node2 in zip(dataset_labels, dataset_node2s):
dataset_qnode_df_list.append({"dataset": dataset_qnode, "label": label, "node2": node2})
dataset_df = pd.DataFrame(dataset_qnode_df_list)
return dataset_df
def _generate_attributes_tab(dataset_qnode: str, annotation_part: pd.DataFrame) -> pd.DataFrame:
"""
codes used to generate the template attribute tab
1. add for columns with role = variable or role = qualifier.
"""
attributes_df_list = []
seen_attributes = {}
# for causx country as main subject
# # update 2020.11.11, always check if P131 is needed
# all_column_types = set(annotation_part.T['type'].unique())
# for types, edge_info in ADDITIONAL_QUALIFIER_MAP.items():
# if len(set(types).intersection(all_column_types)) > 0:
# attributes_df_list.append({"Attribute": edge_info["Attribute"],
# "Property": edge_info["Property"], "Role": "qualifier",
# "Relationship": "", "type": "WikibaseItem",
# "label": edge_info["Attribute"],
# "description": edge_info["Attribute"]})
for i in range(annotation_part.shape[1]):
each_col_info = annotation_part.iloc[:, i]
role_info = each_col_info["role"].split(";")
role_lower = role_info[0].lower()
if role_lower in {"variable", "qualifier"}:
# if ";" exists, we need to use those details on variables
if len(role_info) > 1:
relationship = role_info[1]
# otherwise apply this variable / qualifier for all by give empty cell
else:
relationship = ""
attribute = each_col_info["header"]
role_type = each_col_info["type"].lower()
if role_type == "":
continue
if role_type not in TYPE_MAP_DICT:
raise ValueError("Column type {} for column {} is not valid!".format(role_type, i))
data_type = TYPE_MAP_DICT[each_col_info["type"]]
label = "{}".format(attribute) if not each_col_info['name'] else each_col_info['name']
description = "{} column in {}".format(role_lower, dataset_qnode) if not each_col_info['description'] \
else each_col_info['description']
tag = each_col_info['tag'] if 'tag' in each_col_info else ""
# qualifier and variables have been deduplicated already in validation. Now if anything is repeating,
# it is meant to be same.
if attribute not in seen_attributes:
attributes_df_list.append({"Attribute": attribute, "Property": "", "Role": role_lower,
"Relationship": relationship, "type": data_type,
"label": label, "description": description, "tag": tag})
seen_attributes[attribute] = 1
if len(attributes_df_list) == 0:
attributes_df = pd.DataFrame(columns=['Attribute', 'Property', 'label', 'description'])
else:
attributes_df = pd.DataFrame(attributes_df_list)
return attributes_df
def _generate_unit_tab(dataset_qnode: str, content_part: pd.DataFrame, annotation_part: pd.DataFrame) -> pd.DataFrame:
"""
codes used to generate the template unit tab
1. list all the distinct units defined in the units row
2. If there are columns with role == unit, also add them
The output will have 2 columns like:
Q-Node can be empty or user specified nodes, if automatically generated from this script,
it will always be empty and with be generated in wikify_datamart_units_and_attributes.py
Unit Q-Node
person ""
"""
unit_df_list = []
unit_cols = defaultdict(list)
units_set = set()
for i in range(annotation_part.shape[1]):
each_col_info = annotation_part.iloc[:, i]
role = each_col_info["role"].lower()
# if role is unit, record them
if len(role) >= 4 and role[:4] == "unit":
if role == "unit":
unit_cols[""].append(i)
# update 2020.7.24, now allow unit only corresponding to specific variables
else:
target_variables = role[role.rfind(";") + 1:]
for each_variable in target_variables.split("|"):
unit_cols[each_variable].append(i)
# add units defined in unit
if each_col_info['unit'] != "":
units_set.add(each_col_info['unit'])
if len(unit_cols) > 0:
for each_variable_units in unit_cols.values():
units_set.update(content_part.iloc[:, each_variable_units].agg(", ".join, axis=1).unique())
# sort for better index for human vision
for each_unit in sorted(list(units_set)):
unit_df_list.append({"Unit": each_unit, "Q-Node": ""})
if len(unit_df_list) == 0:
unit_df = pd.DataFrame(columns=['Unit', 'Q-Node'])
else:
unit_df = pd.DataFrame(unit_df_list)
return unit_df
def _process_main_subject(dataset_qnode: str, content_part: pd.DataFrame, annotation_part: pd.DataFrame, data_row):
col_offset = 1
wikifier_df_list = []
extra_df_list = []
created_node_ids = set()
for i in range(annotation_part.shape[1]):
each_col_info = annotation_part.iloc[:, i]
role = each_col_info["role"].lower()
if role == "main subject":
allowed_types = {"string", "country", "admin1", "admin2", "admin3"}
each_col_info = annotation_part.iloc[:, i]
type_ = each_col_info["type"].lower()
main_subject_annotation = annotation_part.iloc[:, i]
# generate wikifier file and extra edge file for main subjects when type == string
if type_ == "string":
for row, each in enumerate(content_part.iloc[:, i]):
label = str(each).strip()
node = "{}_{}_{}".format(dataset_qnode, each_col_info["header"], label) \
.replace(" ", "_").replace("-", "_")
# wikifier part should always be updated, as column/row is specified for each cell
wikifier_df_list.append(
{"column": i + col_offset, "row": row + data_row, "value": label,
"context": "main subject", "item": node})
# update 2020.7.24, not create again if exist
if node in created_node_ids:
continue
created_node_ids.add(node)
labels = ["label", "description", "P31"]
node2s = ["{} {}".format(main_subject_annotation["header"], label),
main_subject_annotation["description"], "Q35120"]
for each_label, each_node2 in zip(labels, node2s):
id_ = "{}-{}".format(node, each_label)
extra_df_list.append({"id": id_, "node1": node, "label": each_label, "node2": each_node2})
elif type_ not in allowed_types:
raise ValueError("{} is not a legal type among {{{}}}!".format(type_, allowed_types))
# only one main subject so no need to continue
break
if len(extra_df_list) == 0:
extra_df = pd.DataFrame(columns=['id', 'node1', 'label', 'node2'])
else:
extra_df = pd.DataFrame(extra_df_list)
if len(wikifier_df_list) == 0:
wikifier_df = | pd.DataFrame(columns=['column', 'row', 'value', 'context', "item"]) | pandas.DataFrame |
#Setting up the data for chapter
#Import the required packages
import pandas as pd
#Read in the data
df = pd.read_csv('all_stocks_5yr.csv')
#Convert the date column into datetime data type
df['date'] = pd.to_datetime(df['date'])
#Filter the data for Apple stocks only
df_apple = df[df['Name'] == 'AAL']
#Import the required packages
from bokeh.io import output_file, show
from bokeh.plotting import figure
from bokeh.plotting import ColumnDataSource
#Create the ColumnDataSource object
data = ColumnDataSource(data = {
'x' : df_apple['high'],
'y' : df_apple['low'],
'x1': df_apple['open'],
'y1': df_apple['close'],
'x2': df_apple['date'],
'y2': df_apple['volume'],
})
#Adding titles to plots
#Import the required packages
from bokeh.plotting import figure, show, output_file
#Create the plot with the title
plot3 = figure(title = "5 year time series distribution of volumn of Apple stocks traded", title_location = "above",
x_axis_type = 'datetime', x_axis_label = 'date', y_axis_label = 'Volume Traded')
#Create the time series plot
plot3.line(x = 'x2', y = 'y2', source = data, color = 'red')
plot3.circle(x = 'x2', y = 'y2', source = data, fill_color = 'white', size = 3)
#Output the plot
output_file('title.html')
show(plot3)
#Adding legends to plots
#Import the required packages
from bokeh.plotting import figure, show, output_file
#Create the two scatter plots
plot = figure()
#Create the legends
plot.cross(x = 'x', y = 'y', source = data, color = 'red', size = 10, alpha = 0.8, legend = "High Vs. Low")
plot.circle(x = 'x1', y = 'y1', source = data, color = 'green', size = 10, alpha = 0.3, legend = "Open Vs. Close")
#Output the plot
output_file('legend.html')
show(plot)
#Adding colormaps to plots
#Reading in the S&P 500 data
df = pd.read_csv('all_stocks_5yr.csv')
#Filtering for Google or USB
df_multiple = df[(df['Name'] == 'GOOGL') | (df['Name'] == 'USB')]
#Import the required packages
from bokeh.models import CategoricalColorMapper
#Store the data in the ColumnDataSource object
data = ColumnDataSource(df_multiple)
#Create the mapper
category_map = CategoricalColorMapper(
factors = ['GOOGL', 'USB'], palette = ['blue', 'red'])
#Plot the figure
plot = figure()
plot.circle('high', 'low', size = 8, source = data, color = {'field': 'Name', 'transform': category_map})
#Output the plot
output_file('category.html')
show(plot)
#Button widget
#Import the required packages
from bokeh.io import output_file, show
from bokeh.layouts import widgetbox
from bokeh.models.widgets import Button
#Create the button
button = Button(label="Click me", button_type = "success")
#Output the button
output_file("button.html")
show(widgetbox(button))
#Checkbox widget
#Import the required packages
from bokeh.io import output_file, show
from bokeh.layouts import widgetbox
from bokeh.models.widgets import CheckboxGroup
#Create the checkbox
checkbox = CheckboxGroup(
labels=["Category: 1", "Category: 2", "Category: 3"], active=[0, 1, 2])
#Output the checkbox
output_file("checkbox.html")
show(widgetbox(checkbox))
#Dropdown menu widget
#Import the required packages
from bokeh.io import output_file, show
from bokeh.layouts import widgetbox
from bokeh.models.widgets import Dropdown
#Create the menu
menu = [("Option 1", "item_1"), ("Option 2", "item_2")]
#Create the Dropdown
dropdown = Dropdown(label="Dropdown Menu", button_type="warning", menu=menu)
#Output the dropdown menu
output_file("dropdown.html")
show(widgetbox(dropdown))
#Radio button widget
#Import the required packages
from bokeh.io import output_file, show
from bokeh.layouts import widgetbox
from bokeh.models.widgets import RadioGroup
#Create the radio button
radio_button = RadioGroup(
labels=["Option 1", "Option 2"], active=0)
#Output the radio button widget
output_file("radiobutton.html")
show(widgetbox(radio_button))
#Slider widget
#Import the required packages
from bokeh.io import output_file, show
from bokeh.layouts import widgetbox
from bokeh.models.widgets import Slider
#Create the slider widget
slider = Slider(start=0, end=50, value=0, step= 5, title="Simple Slider")
#Output the slider
output_file("slider.html")
show(widgetbox(slider))
#Text input widget
#Import the required packages
from bokeh.io import output_file, show
from bokeh.layouts import widgetbox
from bokeh.models.widgets import TextInput
#Create the text input widget
text_widget = TextInput(value="", title="Type your text here")
#Output the text input widget
output_file("text_input.html")
show(widgetbox(text_widget))
#Hover tooltip
#Import the required packages
from bokeh.models import CategoricalColorMapper
from bokeh.models import HoverTool
from bokeh.io import output_file, show
from bokeh.plotting import ColumnDataSource
from bokeh.plotting import figure
import pandas as pd
#Read in the data and filter for Google and USB stocks
df = pd.read_csv('all_stocks_5yr.csv')
df_multiple = df[(df['Name'] == 'GOOGL') | (df['Name'] == 'USB')]
#Create the hover tooltip
hover_tool = HoverTool(tooltips = [
('Stock Ticker', '@Name'),
('High Price', '@high'),
('Low Price', '@low')
])
#Save the data in a ColumnDataSource object
data = ColumnDataSource(df_multiple)
#Create the categorical color mapper
category_map = CategoricalColorMapper(
factors = ['GOOGL', 'USB'], palette = ['blue', 'red'])
#Create the plot with the hover tooltip
plot = figure(tools = [hover_tool])
plot.circle('high', 'low', size = 8, source = data, color = {'field': 'Name', 'transform': category_map})
#Output the plot
output_file('hover.html')
show(plot)
#Creating selections
#Import the required packages
from bokeh.models import CategoricalColorMapper
from bokeh.models import HoverTool
from bokeh.io import output_file, show
from bokeh.plotting import ColumnDataSource
from bokeh.plotting import figure
#Read in the dataset and filter for Google and USB stocks
df = pd.read_csv('all_stocks_5yr.csv')
df_multiple = df[(df['Name'] == 'GOOGL') | (df['Name'] == 'USB')]
#Save the data into a ColumnDataSource object
data = ColumnDataSource(df_multiple)
#Create the categorical color mapper
category_map = CategoricalColorMapper(
factors = ['GOOGL', 'USB'], palette = ['blue', 'red'])
#Create the plot with the selection tool
plot = figure(tools = 'box_select')
plot.circle('high', 'low', size = 8, source = data,
color = {'field': 'Name', 'transform': category_map}, selection_color = 'green',
nonselection_fill_alpha = 0.3, nonselection_fill_color = 'grey')
#Output the plot
output_file('selection.html')
show(plot)
#Styling the title
#Import the required packages
from bokeh.models import CategoricalColorMapper
from bokeh.models import HoverTool
from bokeh.io import output_file, show
from bokeh.plotting import ColumnDataSource
from bokeh.plotting import figure
#Read in and filter the data for Google and USB stocks
df = pd.read_csv("all_stocks_5yr.csv")
df_multiple = df[(df['Name'] == 'GOOGL') | (df['Name'] == 'USB')]
#Store the data in a ColumnDataSource
data = ColumnDataSource(df_multiple)
#Create the categorical color mapper
category_map = CategoricalColorMapper(
factors = ['GOOGL', 'USB'], palette = ['blue', 'red'])
#Create the plot and configure the title
plot = figure(title = "High Vs. Low Prices (Google & USB)")
plot.title.text_color = "red"
plot.title.text_font = "times"
plot.title.text_font_style = "bold"
plot.circle('high', 'low', size = 8, source = data,
color = {'field': 'Name', 'transform': category_map})
#Output the plot
output_file('title.html')
show(plot)
#Styling the background
#Import the required packages
from bokeh.models import CategoricalColorMapper
from bokeh.models import HoverTool
from bokeh.io import output_file, show
from bokeh.plotting import ColumnDataSource
from bokeh.plotting import figure
#Read in the data and filter for Google and USB stocks
df = pd.read_csv("all_stocks_5yr.csv")
df_multiple = df[(df['Name'] == 'GOOGL') | (df['Name'] == 'USB')]
#Save the data in a ColumnDataSource object
data = ColumnDataSource(df_multiple)
#Create the categorical color mapper
category_map = CategoricalColorMapper(
factors = ['GOOGL', 'USB'], palette = ['blue', 'red'])
#Create the plot and configure the background
plot = figure(title = "High Vs. Low Prices (Google & USB)")
plot.background_fill_color = "yellow"
plot.background_fill_alpha = 0.3
plot.circle('high', 'low', size = 8, source = data,
color = {'field': 'Name', 'transform': category_map})
#Output the plot
output_file('title.html')
show(plot)
#Styling the outline of the plot
#Import the required packages
from bokeh.models import CategoricalColorMapper
from bokeh.models import HoverTool
from bokeh.io import output_file, show
from bokeh.plotting import ColumnDataSource
from bokeh.plotting import figure
#Read in the data and filter for Google and USB stocks
df = | pd.read_csv("all_stocks_5yr.csv") | pandas.read_csv |
import pytest
from pandas import DataFrame
@pytest.fixture(scope='module')
def model():
from model import Model
from config import DATA_FILES, DATA_MERGE_KEYS
try:
model = Model( DATA_FILES['companies'], DATA_FILES['users'], DATA_MERGE_KEYS['companies'], DATA_MERGE_KEYS['users'] )
model.df = | DataFrame({'col1':[1,2,3,4],'col2':['val1','val2','val3','val4']}) | pandas.DataFrame |
from __future__ import print_function
from datetime import datetime, timedelta
import numpy as np
import pandas as pd
from pandas import (Series, Index, Int64Index, Timestamp, Period,
DatetimeIndex, PeriodIndex, TimedeltaIndex,
Timedelta, timedelta_range, date_range, Float64Index,
_np_version_under1p10)
import pandas.tslib as tslib
import pandas.tseries.period as period
import pandas.util.testing as tm
from pandas.tests.test_base import Ops
class TestDatetimeIndexOps(Ops):
tz = [None, 'UTC', 'Asia/Tokyo', 'US/Eastern', 'dateutil/Asia/Singapore',
'dateutil/US/Pacific']
def setUp(self):
super(TestDatetimeIndexOps, self).setUp()
mask = lambda x: (isinstance(x, DatetimeIndex) or
isinstance(x, PeriodIndex))
self.is_valid_objs = [o for o in self.objs if mask(o)]
self.not_valid_objs = [o for o in self.objs if not mask(o)]
def test_ops_properties(self):
self.check_ops_properties(
['year', 'month', 'day', 'hour', 'minute', 'second', 'weekofyear',
'week', 'dayofweek', 'dayofyear', 'quarter'])
self.check_ops_properties(['date', 'time', 'microsecond', 'nanosecond',
'is_month_start', 'is_month_end',
'is_quarter_start',
'is_quarter_end', 'is_year_start',
'is_year_end', 'weekday_name'],
lambda x: isinstance(x, DatetimeIndex))
def test_ops_properties_basic(self):
# sanity check that the behavior didn't change
# GH7206
for op in ['year', 'day', 'second', 'weekday']:
self.assertRaises(TypeError, lambda x: getattr(self.dt_series, op))
# attribute access should still work!
s = Series(dict(year=2000, month=1, day=10))
self.assertEqual(s.year, 2000)
self.assertEqual(s.month, 1)
self.assertEqual(s.day, 10)
self.assertRaises(AttributeError, lambda: s.weekday)
def test_asobject_tolist(self):
idx = pd.date_range(start='2013-01-01', periods=4, freq='M',
name='idx')
expected_list = [Timestamp('2013-01-31'),
Timestamp('2013-02-28'),
Timestamp('2013-03-31'),
Timestamp('2013-04-30')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = pd.date_range(start='2013-01-01', periods=4, freq='M',
name='idx', tz='Asia/Tokyo')
expected_list = [Timestamp('2013-01-31', tz='Asia/Tokyo'),
| Timestamp('2013-02-28', tz='Asia/Tokyo') | pandas.Timestamp |
Subsets and Splits