prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
from collections import OrderedDict
import datetime
from datetime import timedelta
from io import StringIO
import json
import os
import numpy as np
import pytest
from pandas.compat import is_platform_32bit, is_platform_windows
import pandas.util._test_decorators as td
import pandas as pd
from pandas import DataFrame, DatetimeIndex, Series, Timestamp, read_json
import pandas._testing as tm
_seriesd = tm.getSeriesData()
_tsd = tm.getTimeSeriesData()
_frame = DataFrame(_seriesd)
_intframe = DataFrame({k: v.astype(np.int64) for k, v in _seriesd.items()})
_tsframe = DataFrame(_tsd)
_cat_frame = _frame.copy()
cat = ["bah"] * 5 + ["bar"] * 5 + ["baz"] * 5 + ["foo"] * (len(_cat_frame) - 15)
_cat_frame.index = pd.CategoricalIndex(cat, name="E")
_cat_frame["E"] = list(reversed(cat))
_cat_frame["sort"] = np.arange(len(_cat_frame), dtype="int64")
_mixed_frame = _frame.copy()
def assert_json_roundtrip_equal(result, expected, orient):
if orient == "records" or orient == "values":
expected = expected.reset_index(drop=True)
if orient == "values":
expected.columns = range(len(expected.columns))
tm.assert_frame_equal(result, expected)
@pytest.mark.filterwarnings("ignore:the 'numpy' keyword is deprecated:FutureWarning")
class TestPandasContainer:
@pytest.fixture(autouse=True)
def setup(self):
self.intframe = _intframe.copy()
self.tsframe = _tsframe.copy()
self.mixed_frame = _mixed_frame.copy()
self.categorical = _cat_frame.copy()
yield
del self.intframe
del self.tsframe
del self.mixed_frame
def test_frame_double_encoded_labels(self, orient):
df = DataFrame(
[["a", "b"], ["c", "d"]],
index=['index " 1', "index / 2"],
columns=["a \\ b", "y / z"],
)
result = read_json(df.to_json(orient=orient), orient=orient)
expected = df.copy()
assert_json_roundtrip_equal(result, expected, orient)
@pytest.mark.parametrize("orient", ["split", "records", "values"])
def test_frame_non_unique_index(self, orient):
df = DataFrame([["a", "b"], ["c", "d"]], index=[1, 1], columns=["x", "y"])
result = read_json(df.to_json(orient=orient), orient=orient)
expected = df.copy()
assert_json_roundtrip_equal(result, expected, orient)
@pytest.mark.parametrize("orient", ["index", "columns"])
def test_frame_non_unique_index_raises(self, orient):
df = DataFrame([["a", "b"], ["c", "d"]], index=[1, 1], columns=["x", "y"])
msg = f"DataFrame index must be unique for orient='{orient}'"
with pytest.raises(ValueError, match=msg):
df.to_json(orient=orient)
@pytest.mark.parametrize("orient", ["split", "values"])
@pytest.mark.parametrize(
"data",
[
[["a", "b"], ["c", "d"]],
[[1.5, 2.5], [3.5, 4.5]],
[[1, 2.5], [3, 4.5]],
[[Timestamp("20130101"), 3.5], [Timestamp("20130102"), 4.5]],
],
)
def test_frame_non_unique_columns(self, orient, data):
df = DataFrame(data, index=[1, 2], columns=["x", "x"])
result = read_json(
df.to_json(orient=orient), orient=orient, convert_dates=["x"]
)
if orient == "values":
expected = pd.DataFrame(data)
if expected.iloc[:, 0].dtype == "datetime64[ns]":
# orient == "values" by default will write Timestamp objects out
# in milliseconds; these are internally stored in nanosecond,
# so divide to get where we need
# TODO: a to_epoch method would also solve; see GH 14772
expected.iloc[:, 0] = expected.iloc[:, 0].astype(np.int64) // 1000000
elif orient == "split":
expected = df
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("orient", ["index", "columns", "records"])
def test_frame_non_unique_columns_raises(self, orient):
df = DataFrame([["a", "b"], ["c", "d"]], index=[1, 2], columns=["x", "x"])
msg = f"DataFrame columns must be unique for orient='{orient}'"
with pytest.raises(ValueError, match=msg):
df.to_json(orient=orient)
def test_frame_default_orient(self, float_frame):
assert float_frame.to_json() == float_frame.to_json(orient="columns")
@pytest.mark.parametrize("dtype", [False, float])
@pytest.mark.parametrize("convert_axes", [True, False])
@pytest.mark.parametrize("numpy", [True, False])
def test_roundtrip_simple(self, orient, convert_axes, numpy, dtype, float_frame):
data = float_frame.to_json(orient=orient)
result = pd.read_json(
data, orient=orient, convert_axes=convert_axes, numpy=numpy, dtype=dtype
)
expected = float_frame
assert_json_roundtrip_equal(result, expected, orient)
@pytest.mark.parametrize("dtype", [False, np.int64])
@pytest.mark.parametrize("convert_axes", [True, False])
@pytest.mark.parametrize("numpy", [True, False])
def test_roundtrip_intframe(self, orient, convert_axes, numpy, dtype):
data = self.intframe.to_json(orient=orient)
result = pd.read_json(
data, orient=orient, convert_axes=convert_axes, numpy=numpy, dtype=dtype
)
expected = self.intframe.copy()
if (
numpy
and (is_platform_32bit() or is_platform_windows())
and not dtype
and orient != "split"
):
# TODO: see what is causing roundtrip dtype loss
expected = expected.astype(np.int32)
assert_json_roundtrip_equal(result, expected, orient)
@pytest.mark.parametrize("dtype", [None, np.float64, np.int, "U3"])
@pytest.mark.parametrize("convert_axes", [True, False])
@pytest.mark.parametrize("numpy", [True, False])
def test_roundtrip_str_axes(self, orient, convert_axes, numpy, dtype):
df = DataFrame(
np.zeros((200, 4)),
columns=[str(i) for i in range(4)],
index=[str(i) for i in range(200)],
dtype=dtype,
)
# TODO: do we even need to support U3 dtypes?
if numpy and dtype == "U3" and orient != "split":
pytest.xfail("Can't decode directly to array")
data = df.to_json(orient=orient)
result = pd.read_json(
data, orient=orient, convert_axes=convert_axes, numpy=numpy, dtype=dtype
)
expected = df.copy()
if not dtype:
expected = expected.astype(np.int64)
# index columns, and records orients cannot fully preserve the string
# dtype for axes as the index and column labels are used as keys in
# JSON objects. JSON keys are by definition strings, so there's no way
# to disambiguate whether those keys actually were strings or numeric
# beforehand and numeric wins out.
# TODO: Split should be able to support this
if convert_axes and (orient in ("split", "index", "columns")):
expected.columns = expected.columns.astype(np.int64)
expected.index = expected.index.astype(np.int64)
elif orient == "records" and convert_axes:
expected.columns = expected.columns.astype(np.int64)
assert_json_roundtrip_equal(result, expected, orient)
@pytest.mark.parametrize("convert_axes", [True, False])
@pytest.mark.parametrize("numpy", [True, False])
def test_roundtrip_categorical(self, orient, convert_axes, numpy):
# TODO: create a better frame to test with and improve coverage
if orient in ("index", "columns"):
pytest.xfail(f"Can't have duplicate index values for orient '{orient}')")
data = self.categorical.to_json(orient=orient)
if numpy and orient in ("records", "values"):
pytest.xfail(f"Orient {orient} is broken with numpy=True")
result = pd.read_json(
data, orient=orient, convert_axes=convert_axes, numpy=numpy
)
expected = self.categorical.copy()
expected.index = expected.index.astype(str) # Categorical not preserved
expected.index.name = None # index names aren't preserved in JSON
if not numpy and orient == "index":
expected = expected.sort_index()
assert_json_roundtrip_equal(result, expected, orient)
@pytest.mark.parametrize("convert_axes", [True, False])
@pytest.mark.parametrize("numpy", [True, False])
def test_roundtrip_empty(self, orient, convert_axes, numpy, empty_frame):
data = empty_frame.to_json(orient=orient)
result = pd.read_json(
data, orient=orient, convert_axes=convert_axes, numpy=numpy
)
expected = empty_frame.copy()
# TODO: both conditions below are probably bugs
if convert_axes:
expected.index = expected.index.astype(float)
expected.columns = expected.columns.astype(float)
if numpy and orient == "values":
expected = expected.reindex([0], axis=1).reset_index(drop=True)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("convert_axes", [True, False])
@pytest.mark.parametrize("numpy", [True, False])
def test_roundtrip_timestamp(self, orient, convert_axes, numpy):
# TODO: improve coverage with date_format parameter
data = self.tsframe.to_json(orient=orient)
result = pd.read_json(
data, orient=orient, convert_axes=convert_axes, numpy=numpy
)
expected = self.tsframe.copy()
if not convert_axes: # one off for ts handling
# DTI gets converted to epoch values
idx = expected.index.astype(np.int64) // 1000000
if orient != "split": # TODO: handle consistently across orients
idx = idx.astype(str)
expected.index = idx
assert_json_roundtrip_equal(result, expected, orient)
@pytest.mark.parametrize("convert_axes", [True, False])
@pytest.mark.parametrize("numpy", [True, False])
def test_roundtrip_mixed(self, orient, convert_axes, numpy):
if numpy and orient != "split":
pytest.xfail("Can't decode directly to array")
index = pd.Index(["a", "b", "c", "d", "e"])
values = {
"A": [0.0, 1.0, 2.0, 3.0, 4.0],
"B": [0.0, 1.0, 0.0, 1.0, 0.0],
"C": ["foo1", "foo2", "foo3", "foo4", "foo5"],
"D": [True, False, True, False, True],
}
df = DataFrame(data=values, index=index)
data = df.to_json(orient=orient)
result = pd.read_json(
data, orient=orient, convert_axes=convert_axes, numpy=numpy
)
expected = df.copy()
expected = expected.assign(**expected.select_dtypes("number").astype(np.int64))
if not numpy and orient == "index":
expected = expected.sort_index()
assert_json_roundtrip_equal(result, expected, orient)
@pytest.mark.parametrize(
"data,msg,orient",
[
('{"key":b:a:d}', "Expected object or value", "columns"),
# too few indices
(
'{"columns":["A","B"],'
'"index":["2","3"],'
'"data":[[1.0,"1"],[2.0,"2"],[null,"3"]]}',
r"Shape of passed values is \(3, 2\), indices imply \(2, 2\)",
"split",
),
# too many columns
(
'{"columns":["A","B","C"],'
'"index":["1","2","3"],'
'"data":[[1.0,"1"],[2.0,"2"],[null,"3"]]}',
"3 columns passed, passed data had 2 columns",
"split",
),
# bad key
(
'{"badkey":["A","B"],'
'"index":["2","3"],'
'"data":[[1.0,"1"],[2.0,"2"],[null,"3"]]}',
r"unexpected key\(s\): badkey",
"split",
),
],
)
def test_frame_from_json_bad_data_raises(self, data, msg, orient):
with pytest.raises(ValueError, match=msg):
read_json(StringIO(data), orient=orient)
@pytest.mark.parametrize("dtype", [True, False])
@pytest.mark.parametrize("convert_axes", [True, False])
@pytest.mark.parametrize("numpy", [True, False])
def test_frame_from_json_missing_data(self, orient, convert_axes, numpy, dtype):
num_df = DataFrame([[1, 2], [4, 5, 6]])
result = read_json(
num_df.to_json(orient=orient),
orient=orient,
convert_axes=convert_axes,
dtype=dtype,
)
assert np.isnan(result.iloc[0, 2])
obj_df = DataFrame([["1", "2"], ["4", "5", "6"]])
result = read_json(
obj_df.to_json(orient=orient),
orient=orient,
convert_axes=convert_axes,
dtype=dtype,
)
if not dtype: # TODO: Special case for object data; maybe a bug?
assert result.iloc[0, 2] is None
else:
assert np.isnan(result.iloc[0, 2])
@pytest.mark.parametrize("inf", [np.inf, np.NINF])
@pytest.mark.parametrize("dtype", [True, False])
def test_frame_infinity(self, orient, inf, dtype):
# infinities get mapped to nulls which get mapped to NaNs during
# deserialisation
df = DataFrame([[1, 2], [4, 5, 6]])
df.loc[0, 2] = inf
result = read_json(df.to_json(), dtype=dtype)
assert np.isnan(result.iloc[0, 2])
@pytest.mark.skipif(
is_platform_32bit(), reason="not compliant on 32-bit, xref #15865"
)
@pytest.mark.parametrize(
"value,precision,expected_val",
[
(0.95, 1, 1.0),
(1.95, 1, 2.0),
(-1.95, 1, -2.0),
(0.995, 2, 1.0),
(0.9995, 3, 1.0),
(0.99999999999999944, 15, 1.0),
],
)
def test_frame_to_json_float_precision(self, value, precision, expected_val):
df = pd.DataFrame([dict(a_float=value)])
encoded = df.to_json(double_precision=precision)
assert encoded == f'{{"a_float":{{"0":{expected_val}}}}}'
def test_frame_to_json_except(self):
df = DataFrame([1, 2, 3])
msg = "Invalid value 'garbage' for option 'orient'"
with pytest.raises(ValueError, match=msg):
df.to_json(orient="garbage")
def test_frame_empty(self):
df = DataFrame(columns=["jim", "joe"])
assert not df._is_mixed_type
tm.assert_frame_equal(
read_json(df.to_json(), dtype=dict(df.dtypes)), df, check_index_type=False
)
# GH 7445
result = pd.DataFrame({"test": []}, index=[]).to_json(orient="columns")
expected = '{"test":{}}'
assert result == expected
def test_frame_empty_mixedtype(self):
# mixed type
df = DataFrame(columns=["jim", "joe"])
df["joe"] = df["joe"].astype("i8")
assert df._is_mixed_type
tm.assert_frame_equal(
read_json(df.to_json(), dtype=dict(df.dtypes)), df, check_index_type=False
)
def test_frame_mixedtype_orient(self): # GH10289
vals = [
[10, 1, "foo", 0.1, 0.01],
[20, 2, "bar", 0.2, 0.02],
[30, 3, "baz", 0.3, 0.03],
[40, 4, "qux", 0.4, 0.04],
]
df = DataFrame(
vals, index=list("abcd"), columns=["1st", "2nd", "3rd", "4th", "5th"]
)
assert df._is_mixed_type
right = df.copy()
for orient in ["split", "index", "columns"]:
inp = df.to_json(orient=orient)
left = read_json(inp, orient=orient, convert_axes=False)
tm.assert_frame_equal(left, right)
right.index = np.arange(len(df))
inp = df.to_json(orient="records")
left = read_json(inp, orient="records", convert_axes=False)
tm.assert_frame_equal(left, right)
right.columns = np.arange(df.shape[1])
inp = df.to_json(orient="values")
left = read_json(inp, orient="values", convert_axes=False)
tm.assert_frame_equal(left, right)
def test_v12_compat(self, datapath):
df = DataFrame(
[
[1.56808523, 0.65727391, 1.81021139, -0.17251653],
[-0.2550111, -0.08072427, -0.03202878, -0.17581665],
[1.51493992, 0.11805825, 1.629455, -1.31506612],
[-0.02765498, 0.44679743, 0.33192641, -0.27885413],
[0.05951614, -2.69652057, 1.28163262, 0.34703478],
],
columns=["A", "B", "C", "D"],
index=pd.date_range("2000-01-03", "2000-01-07"),
)
df["date"] = pd.Timestamp("19920106 18:21:32.12")
df.iloc[3, df.columns.get_loc("date")] = pd.Timestamp("20130101")
df["modified"] = df["date"]
df.iloc[1, df.columns.get_loc("modified")] = pd.NaT
dirpath = datapath("io", "json", "data")
v12_json = os.path.join(dirpath, "tsframe_v012.json")
df_unser = pd.read_json(v12_json)
tm.assert_frame_equal(df, df_unser)
df_iso = df.drop(["modified"], axis=1)
v12_iso_json = os.path.join(dirpath, "tsframe_iso_v012.json")
df_unser_iso = pd.read_json(v12_iso_json)
tm.assert_frame_equal(df_iso, df_unser_iso)
def test_blocks_compat_GH9037(self):
index = pd.date_range("20000101", periods=10, freq="H")
df_mixed = DataFrame(
OrderedDict(
float_1=[
-0.92077639,
0.77434435,
1.25234727,
0.61485564,
-0.60316077,
0.24653374,
0.28668979,
-2.51969012,
0.95748401,
-1.02970536,
],
int_1=[
19680418,
75337055,
99973684,
65103179,
79373900,
40314334,
21290235,
4991321,
41903419,
16008365,
],
str_1=[
"78c608f1",
"64a99743",
"13d2ff52",
"ca7f4af2",
"97236474",
"bde7e214",
"1a6bde47",
"b1190be5",
"7a669144",
"8d64d068",
],
float_2=[
-0.0428278,
-1.80872357,
3.36042349,
-0.7573685,
-0.48217572,
0.86229683,
1.08935819,
0.93898739,
-0.03030452,
1.43366348,
],
str_2=[
"14f04af9",
"d085da90",
"4bcfac83",
"81504caf",
"2ffef4a9",
"08e2f5c4",
"07e1af03",
"addbd4a7",
"1f6a09ba",
"4bfc4d87",
],
int_2=[
86967717,
98098830,
51927505,
20372254,
12601730,
20884027,
34193846,
10561746,
24867120,
76131025,
],
),
index=index,
)
# JSON deserialisation always creates unicode strings
df_mixed.columns = df_mixed.columns.astype("unicode")
df_roundtrip = pd.read_json(df_mixed.to_json(orient="split"), orient="split")
tm.assert_frame_equal(
df_mixed,
df_roundtrip,
check_index_type=True,
check_column_type=True,
by_blocks=True,
check_exact=True,
)
def test_frame_nonprintable_bytes(self):
# GH14256: failing column caused segfaults, if it is not the last one
class BinaryThing:
def __init__(self, hexed):
self.hexed = hexed
self.binary = bytes.fromhex(hexed)
def __str__(self) -> str:
return self.hexed
hexed = "574b4454ba8c5eb4f98a8f45"
binthing = BinaryThing(hexed)
# verify the proper conversion of printable content
df_printable = DataFrame({"A": [binthing.hexed]})
assert df_printable.to_json() == f'{{"A":{{"0":"{hexed}"}}}}'
# check if non-printable content throws appropriate Exception
df_nonprintable = DataFrame({"A": [binthing]})
msg = "Unsupported UTF-8 sequence length when encoding string"
with pytest.raises(OverflowError, match=msg):
df_nonprintable.to_json()
# the same with multiple columns threw segfaults
df_mixed = DataFrame({"A": [binthing], "B": [1]}, columns=["A", "B"])
with pytest.raises(OverflowError):
df_mixed.to_json()
# default_handler should resolve exceptions for non-string types
result = df_nonprintable.to_json(default_handler=str)
expected = f'{{"A":{{"0":"{hexed}"}}}}'
assert result == expected
assert (
df_mixed.to_json(default_handler=str)
== f'{{"A":{{"0":"{hexed}"}},"B":{{"0":1}}}}'
)
def test_label_overflow(self):
# GH14256: buffer length not checked when writing label
result = pd.DataFrame({"bar" * 100000: [1], "foo": [1337]}).to_json()
expected = f'{{"{"bar" * 100000}":{{"0":1}},"foo":{{"0":1337}}}}'
assert result == expected
def test_series_non_unique_index(self):
s = Series(["a", "b"], index=[1, 1])
msg = "Series index must be unique for orient='index'"
with pytest.raises(ValueError, match=msg):
s.to_json(orient="index")
tm.assert_series_equal(
s, read_json(s.to_json(orient="split"), orient="split", typ="series")
)
unser = read_json(s.to_json(orient="records"), orient="records", typ="series")
tm.assert_numpy_array_equal(s.values, unser.values)
def test_series_default_orient(self, string_series):
assert string_series.to_json() == string_series.to_json(orient="index")
@pytest.mark.parametrize("numpy", [True, False])
def test_series_roundtrip_simple(self, orient, numpy, string_series):
data = string_series.to_json(orient=orient)
result = pd.read_json(data, typ="series", orient=orient, numpy=numpy)
expected = string_series
if orient in ("values", "records"):
expected = expected.reset_index(drop=True)
if orient != "split":
expected.name = None
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("dtype", [False, None])
@pytest.mark.parametrize("numpy", [True, False])
def test_series_roundtrip_object(self, orient, numpy, dtype, object_series):
data = object_series.to_json(orient=orient)
result = pd.read_json(
data, typ="series", orient=orient, numpy=numpy, dtype=dtype
)
expected = object_series
if orient in ("values", "records"):
expected = expected.reset_index(drop=True)
if orient != "split":
expected.name = None
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("numpy", [True, False])
def test_series_roundtrip_empty(self, orient, numpy, empty_series):
data = empty_series.to_json(orient=orient)
result = pd.read_json(data, typ="series", orient=orient, numpy=numpy)
expected = empty_series
if orient in ("values", "records"):
expected = expected.reset_index(drop=True)
else:
expected.index = expected.index.astype(float)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("numpy", [True, False])
def test_series_roundtrip_timeseries(self, orient, numpy, datetime_series):
data = datetime_series.to_json(orient=orient)
result = pd.read_json(data, typ="series", orient=orient, numpy=numpy)
expected = datetime_series
if orient in ("values", "records"):
expected = expected.reset_index(drop=True)
if orient != "split":
expected.name = None
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("dtype", [np.float64, np.int])
@pytest.mark.parametrize("numpy", [True, False])
def test_series_roundtrip_numeric(self, orient, numpy, dtype):
s = Series(range(6), index=["a", "b", "c", "d", "e", "f"])
data = s.to_json(orient=orient)
result = pd.read_json(data, typ="series", orient=orient, numpy=numpy)
expected = s.copy()
if orient in ("values", "records"):
expected = expected.reset_index(drop=True)
tm.assert_series_equal(result, expected)
def test_series_to_json_except(self):
s = Series([1, 2, 3])
msg = "Invalid value 'garbage' for option 'orient'"
with pytest.raises(ValueError, match=msg):
s.to_json(orient="garbage")
def test_series_from_json_precise_float(self):
s = Series([4.56, 4.56, 4.56])
result = read_json(s.to_json(), typ="series", precise_float=True)
tm.assert_series_equal(result, s, check_index_type=False)
def test_series_with_dtype(self):
# GH 21986
s = Series([4.56, 4.56, 4.56])
result = read_json(s.to_json(), typ="series", dtype=np.int64)
expected = Series([4] * 3)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"dtype,expected",
[
(True, Series(["2000-01-01"], dtype="datetime64[ns]")),
(False, Series([946684800000])),
],
)
def test_series_with_dtype_datetime(self, dtype, expected):
s = Series(["2000-01-01"], dtype="datetime64[ns]")
data = s.to_json()
result = pd.read_json(data, typ="series", dtype=dtype)
tm.assert_series_equal(result, expected)
def test_frame_from_json_precise_float(self):
df = DataFrame([[4.56, 4.56, 4.56], [4.56, 4.56, 4.56]])
result = read_json(df.to_json(), precise_float=True)
tm.assert_frame_equal(
result, df, check_index_type=False, check_column_type=False
)
def test_typ(self):
s = Series(range(6), index=["a", "b", "c", "d", "e", "f"], dtype="int64")
result = read_json(s.to_json(), typ=None)
tm.assert_series_equal(result, s)
def test_reconstruction_index(self):
df = DataFrame([[1, 2, 3], [4, 5, 6]])
result = read_json(df.to_json())
tm.assert_frame_equal(result, df)
df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}, index=["A", "B", "C"])
result = read_json(df.to_json())
tm.assert_frame_equal(result, df)
def test_path(self, float_frame):
with tm.ensure_clean("test.json") as path:
for df in [
float_frame,
self.intframe,
self.tsframe,
self.mixed_frame,
]:
df.to_json(path)
read_json(path)
def test_axis_dates(self, datetime_series):
# frame
json = self.tsframe.to_json()
result = read_json(json)
tm.assert_frame_equal(result, self.tsframe)
# series
json = datetime_series.to_json()
result = read_json(json, typ="series")
tm.assert_series_equal(result, datetime_series, check_names=False)
assert result.name is None
def test_convert_dates(self, datetime_series):
# frame
df = self.tsframe.copy()
df["date"] = Timestamp("20130101")
json = df.to_json()
result = read_json(json)
tm.assert_frame_equal(result, df)
df["foo"] = 1.0
json = df.to_json(date_unit="ns")
result = read_json(json, convert_dates=False)
expected = df.copy()
expected["date"] = expected["date"].values.view("i8")
expected["foo"] = expected["foo"].astype("int64")
tm.assert_frame_equal(result, expected)
# series
ts = Series(Timestamp("20130101"), index=datetime_series.index)
json = ts.to_json()
result = read_json(json, typ="series")
tm.assert_series_equal(result, ts)
@pytest.mark.parametrize("date_format", ["epoch", "iso"])
@pytest.mark.parametrize("as_object", [True, False])
@pytest.mark.parametrize(
"date_typ", [datetime.date, datetime.datetime, pd.Timestamp]
)
def test_date_index_and_values(self, date_format, as_object, date_typ):
data = [date_typ(year=2020, month=1, day=1), pd.NaT]
if as_object:
data.append("a")
ser = pd.Series(data, index=data)
result = ser.to_json(date_format=date_format)
if date_format == "epoch":
expected = '{"1577836800000":1577836800000,"null":null}'
else:
expected = (
'{"2020-01-01T00:00:00.000Z":"2020-01-01T00:00:00.000Z","null":null}'
)
if as_object:
expected = expected.replace("}", ',"a":"a"}')
assert result == expected
@pytest.mark.parametrize(
"infer_word",
[
"trade_time",
"date",
"datetime",
"sold_at",
"modified",
"timestamp",
"timestamps",
],
)
def test_convert_dates_infer(self, infer_word):
# GH10747
from pandas.io.json import dumps
data = [{"id": 1, infer_word: 1036713600000}, {"id": 2}]
expected = DataFrame(
[[1, Timestamp("2002-11-08")], [2, pd.NaT]], columns=["id", infer_word]
)
result = read_json(dumps(data))[["id", infer_word]]
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"date,date_unit",
[
("20130101 20:43:42.123", None),
("20130101 20:43:42", "s"),
("20130101 20:43:42.123", "ms"),
("20130101 20:43:42.123456", "us"),
("20130101 20:43:42.123456789", "ns"),
],
)
def test_date_format_frame(self, date, date_unit):
df = self.tsframe.copy()
df["date"] = Timestamp(date)
df.iloc[1, df.columns.get_loc("date")] = pd.NaT
df.iloc[5, df.columns.get_loc("date")] = pd.NaT
if date_unit:
json = df.to_json(date_format="iso", date_unit=date_unit)
else:
json = df.to_json(date_format="iso")
result = read_json(json)
expected = df.copy()
expected.index = expected.index.tz_localize("UTC")
expected["date"] = expected["date"].dt.tz_localize("UTC")
tm.assert_frame_equal(result, expected)
def test_date_format_frame_raises(self):
df = self.tsframe.copy()
msg = "Invalid value 'foo' for option 'date_unit'"
with pytest.raises(ValueError, match=msg):
df.to_json(date_format="iso", date_unit="foo")
@pytest.mark.parametrize(
"date,date_unit",
[
("20130101 20:43:42.123", None),
("20130101 20:43:42", "s"),
("20130101 20:43:42.123", "ms"),
("20130101 20:43:42.123456", "us"),
("20130101 20:43:42.123456789", "ns"),
],
)
def test_date_format_series(self, date, date_unit, datetime_series):
ts = Series(Timestamp(date), index=datetime_series.index)
ts.iloc[1] = pd.NaT
ts.iloc[5] = pd.NaT
if date_unit:
json = ts.to_json(date_format="iso", date_unit=date_unit)
else:
json = ts.to_json(date_format="iso")
result = read_json(json, typ="series")
expected = ts.copy()
expected.index = expected.index.tz_localize("UTC")
expected = expected.dt.tz_localize("UTC")
tm.assert_series_equal(result, expected)
def test_date_format_series_raises(self, datetime_series):
ts = Series( | Timestamp("20130101 20:43:42.123") | pandas.Timestamp |
import pandas as pd
from urllib.request import urlopen
import io
import os
from datetime import datetime
__author__ = '<NAME>, <NAME>'
__copyright__ = '© Pandemic Central, 2021'
__license__ = 'MIT'
__status__ = 'release'
__url__ = 'https://github.com/solveforj/pandemic-central'
__version__ = '3.0.0'
def preprocess_JHU():
print('• Processing JHU Case Data')
# Get all other data from Johns Hopkins
jhu_data = pd.read_csv("https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_confirmed_US.csv").dropna()
jhu_data['FIPS'] = jhu_data['FIPS'].astype(int).astype(str)
jhu_data = jhu_data[jhu_data['FIPS'].notnull()]
def process_FIPS(fips):
missing_zeroes = "0" * (5-len(fips))
return missing_zeroes + fips
jhu_data['FIPS'] = jhu_data['FIPS'].apply(lambda x : process_FIPS(x))
# Filter for non-errant counties
fips_to_use = pd.read_csv("data/geodata/FIPS_used.csv", dtype={'FIPS': 'str'})
jhu_data = jhu_data[jhu_data['FIPS'].isin(fips_to_use['FIPS'].to_list())]
jhu_data = jhu_data.drop(["Admin2","Province_State","Country_Region","Lat","Long_","Combined_Key","UID","iso2","iso3","code3"], axis=1)
jhu_data = jhu_data.melt(id_vars=['FIPS'], var_name = 'date', value_name = 'confirmed_cases')
jhu_data['date'] = pd.to_datetime(jhu_data['date'])
jhu_data = jhu_data.sort_values(['FIPS', 'date'])
# Case counts are cumulative and will be converted into daily change
jhu_data['confirmed_cases'] = jhu_data.groupby('FIPS')['confirmed_cases'].diff().dropna()
full_data = jhu_data
full_data = full_data.sort_values(['FIPS','date'], axis=0)
full_data = full_data.reset_index(drop=True)
# Compute 14-day (weekly) rolling average of cases for each county
full_data['confirmed_cases'] = pd.Series(full_data.groupby("FIPS")['confirmed_cases'].rolling(7).sum()).reset_index(drop=True)
full_data = full_data[full_data['confirmed_cases'].notnull()]
# Move dates forward by 1 day so that movement averages represent data from past week
full_data['date'] = | pd.to_datetime(full_data['date']) | pandas.to_datetime |
"""
Prepare training and testing datasets as CSV dictionaries 2.0
Created on 04/26/2019; modified on 11/06/2019
@author: RH
"""
import os
import pandas as pd
import sklearn.utils as sku
import numpy as np
import re
# get all full paths of images
def image_ids_in(root_dir, ignore=['.DS_Store','dict.csv', 'all.csv']):
ids = []
for id in os.listdir(root_dir):
if id in ignore:
print('Skipping ID:', id)
else:
ids.append(id)
return ids
# Get intersection of 2 lists
def intersection(lst1, lst2):
lst3 = [value for value in lst1 if value in lst2]
return lst3
# pair tiles of 20x, 10x, 5x of the same area
def paired_tile_ids_in_old(slide, label, root_dir):
dira = os.path.isdir(root_dir + 'level0')
dirb = os.path.isdir(root_dir + 'level1')
dirc = os.path.isdir(root_dir + 'level2')
if dira and dirb and dirc:
if "TCGA" in root_dir:
fac = 1000
else:
fac = 500
ids = []
for level in range(3):
dirr = root_dir + 'level{}'.format(str(level))
for id in os.listdir(dirr):
if '.png' in id:
x = int(float(id.split('x-', 1)[1].split('-', 1)[0]) / fac)
y = int(float(re.split('_', id.split('y-', 1)[1])[0]) / fac)
try:
dup = re.split('.p', re.split('_', id.split('y-', 1)[1])[1])[0]
except IndexError:
dup = np.nan
ids.append([slide, label, level, dirr + '/' + id, x, y, dup])
else:
print('Skipping ID:', id)
ids = pd.DataFrame(ids, columns=['slide', 'label', 'level', 'path', 'x', 'y', 'dup'])
idsa = ids.loc[ids['level'] == 0]
idsa = idsa.drop(columns=['level'])
idsa = idsa.rename(index=str, columns={"path": "L0path"})
idsb = ids.loc[ids['level'] == 1]
idsb = idsb.drop(columns=['slide', 'label', 'level'])
idsb = idsb.rename(index=str, columns={"path": "L1path"})
idsc = ids.loc[ids['level'] == 2]
idsc = idsc.drop(columns=['slide', 'label', 'level'])
idsc = idsc.rename(index=str, columns={"path": "L2path"})
idsa = pd.merge(idsa, idsb, on=['x', 'y', 'dup'], how='left', validate="many_to_many")
idsa['x'] = idsa['x'] - (idsa['x'] % 2)
idsa['y'] = idsa['y'] - (idsa['y'] % 2)
idsa = pd.merge(idsa, idsc, on=['x', 'y', 'dup'], how='left', validate="many_to_many")
idsa = idsa.drop(columns=['x', 'y', 'dup'])
idsa = idsa.dropna()
idsa = sku.shuffle(idsa)
else:
idsa = pd.DataFrame(columns=['slide', 'label', 'L0path', 'L1path', 'L2path'])
return idsa
def tile_ids_in(inp):
ids = []
try:
for id in os.listdir(inp['path']):
if '_{}.png'.format(str(inp['sldnum'])) in id:
ids.append([inp['slide'], inp['level'], inp['path']+'/'+id, inp['BMI'], inp['age'], inp['label']])
except FileNotFoundError:
print('Ignore:', inp['path'])
return ids
# pair tiles of 10x, 5x, 2.5x of the same area
def paired_tile_ids_in(slide, label, root_dir, age=None, BMI=None):
dira = os.path.isdir(root_dir + 'level1')
dirb = os.path.isdir(root_dir + 'level2')
dirc = os.path.isdir(root_dir + 'level3')
if dira and dirb and dirc:
if "TCGA" in root_dir:
fac = 2000
else:
fac = 1000
ids = []
for level in range(1, 4):
dirr = root_dir + 'level{}'.format(str(level))
for id in os.listdir(dirr):
if '.png' in id:
x = int(float(id.split('x-', 1)[1].split('-', 1)[0]) / fac)
y = int(float(re.split('_', id.split('y-', 1)[1])[0]) / fac)
try:
dup = re.split('.p', re.split('_', id.split('y-', 1)[1])[1])[0]
except IndexError:
dup = np.nan
ids.append([slide, label, level, dirr + '/' + id, x, y, dup])
else:
print('Skipping ID:', id)
ids = pd.DataFrame(ids, columns=['slide', 'label', 'level', 'path', 'x', 'y', 'dup'])
idsa = ids.loc[ids['level'] == 1]
idsa = idsa.drop(columns=['level'])
idsa = idsa.rename(index=str, columns={"path": "L0path"})
idsb = ids.loc[ids['level'] == 2]
idsb = idsb.drop(columns=['slide', 'label', 'level'])
idsb = idsb.rename(index=str, columns={"path": "L1path"})
idsc = ids.loc[ids['level'] == 3]
idsc = idsc.drop(columns=['slide', 'label', 'level'])
idsc = idsc.rename(index=str, columns={"path": "L2path"})
idsa = pd.merge(idsa, idsb, on=['x', 'y', 'dup'], how='left', validate="many_to_many")
idsa['x'] = idsa['x'] - (idsa['x'] % 2)
idsa['y'] = idsa['y'] - (idsa['y'] % 2)
idsa = pd.merge(idsa, idsc, on=['x', 'y', 'dup'], how='left', validate="many_to_many")
idsa = idsa.drop(columns=['x', 'y', 'dup'])
idsa = idsa.dropna()
idsa = sku.shuffle(idsa)
idsa['age'] = age
idsa['BMI'] = BMI
else:
idsa = pd.DataFrame(columns=['slide', 'label', 'L0path', 'L1path', 'L2path', 'age', 'BMI'])
return idsa
# Balance CPTAC and TCGA tiles in each class
def balance(pdls, cls):
balanced = pd.DataFrame(columns=['slide', 'label', 'L0path', 'L1path', 'L2path', 'age', 'BMI'])
for i in range(cls):
ref = pdls.loc[pdls['label'] == i]
CPTAC = ref[~ref['slide'].str.contains("TCGA")]
TCGA = ref[ref['slide'].str.contains("TCGA")]
if CPTAC.shape[0] != 0 and TCGA.shape[0] != 0:
ratio = (CPTAC.shape[0])/(TCGA.shape[0])
if ratio < 0.2:
TCGA = TCGA.sample(int(5*CPTAC.shape[0]), replace=False)
ref = pd.concat([TCGA, CPTAC], sort=False)
elif ratio > 5:
CPTAC = CPTAC.sample(int(5*TCGA.shape[0]), replace=False)
ref = pd.concat([TCGA, CPTAC], sort=False)
balanced = pd.concat([balanced, ref], sort=False)
return balanced
# Prepare label at per patient level
def big_image_sum(pmd, path='../tiles/', ref_file='../Fusion_dummy_His_MUT_joined.csv'):
ref = pd.read_csv(ref_file, header=0)
big_images = []
if pmd == 'subtype':
ref = ref.loc[ref['subtype_0NA'] == 0]
for idx, row in ref.iterrows():
if row['subtype_POLE'] == 1:
big_images.append([row['name'], 0, path + "{}/".format(str(row['name'])), row['age'], row['BMI']])
elif row['subtype_MSI'] == 1:
big_images.append([row['name'], 1, path + "{}/".format(str(row['name'])), row['age'], row['BMI']])
elif row['subtype_Endometrioid'] == 1:
big_images.append([row['name'], 2, path + "{}/".format(str(row['name'])), row['age'], row['BMI']])
elif row['subtype_Serous-like'] == 1:
big_images.append([row['name'], 3, path + "{}/".format(str(row['name'])), row['age'], row['BMI']])
elif pmd == 'histology':
ref = ref.loc[ref['histology_Mixed'] == 0]
for idx, row in ref.iterrows():
if row['histology_Endometrioid'] == 1:
big_images.append([row['name'], 0, path + "{}/".format(str(row['name'])), row['age'], row['BMI']])
if row['histology_Serous'] == 1:
big_images.append([row['name'], 1, path + "{}/".format(str(row['name'])), row['age'], row['BMI']])
elif pmd in ['Endometrioid', 'MSI', 'Serous-like', 'POLE']:
# ref = ref.loc[ref['histology_Endometrioid'] == 1]
ref = ref.loc[ref['subtype_0NA'] == 0]
for idx, row in ref.iterrows():
big_images.append([row['name'], int(row['subtype_{}'.format(pmd)]), path + "{}/".format(str(row['name'])),
row['age'], row['BMI']])
elif pmd == 'MSIst':
ref = ref.loc[ref['MSIst_0NA'] == 0]
for idx, row in ref.iterrows():
big_images.append([row['name'], int(row['MSIst_MSI-H']), path + "{}/".format(str(row['name'])),
row['age'], row['BMI']])
else:
ref = ref.dropna(subset=[pmd])
for idx, row in ref.iterrows():
big_images.append([row['name'], int(row[pmd]), path + "{}/".format(str(row['name'])), row['age'], row['BMI']])
datapd = pd.DataFrame(big_images, columns=['slide', 'label', 'path', 'age', 'BMI'])
return datapd
# TO KEEP SPLIT SAME AS BASELINES. seperate into training and testing; each type is the same separation
# ratio on big images test and train csv files contain tiles' path.
def set_sep_secondary(alll, path, cls, pmd, batchsize=24):
if pmd == 'subtype':
split = pd.read_csv('../split/ST.csv', header=0)
elif pmd == 'histology':
split = pd.read_csv('../split/his.csv', header=0)
elif pmd == 'Serous-like':
split = pd.read_csv('../split/CNVH.csv', header=0)
elif pmd == 'Endometrioid':
split = pd.read_csv('../split/CNVL.csv', header=0)
else:
split = pd.read_csv('../split/{}.csv'.format(pmd), header=0)
train = split.loc[split['set'] == 'train']['slide'].tolist()
validation = split.loc[split['set'] == 'validation']['slide'].tolist()
test = split.loc[split['set'] == 'test']['slide'].tolist()
trlist = []
telist = []
valist = []
subset = alll
valist.append(subset[subset['slide'].isin(validation)])
telist.append(subset[subset['slide'].isin(test)])
trlist.append(subset[subset['slide'].isin(train)])
test = pd.concat(telist)
train = | pd.concat(trlist) | pandas.concat |
import numpy as np
import pandas as pd
from rbergomi.rbergomi_utils import *
class rBergomi(object):
"""
Class for generating paths of the rBergomi model.
Integral equations for reference:
Y(t) := sqrt(2a + 1) int 0,t (t - u)^a dW(u)
V(t) := xi exp(eta Y - 0.5 eta^2 t^(2a + 1))
S(t) := S0 int 0,t sqrt(V) dB(u) - 0.5 V du
"""
def __init__(self, n=256, N=1024, T=1.0):
"""
Constructor for class.
"""
# Basic assignments.
self.n = n # Steps per year
self.N = N # Paths
self.T = T # Maturity
self.dt = 1.0/n # Step size
self.s = int(n*T) # Steps
self.t = np.linspace(0,T,1+self.s)[np.newaxis,:] # Time grid
def dW(self, α=0.4, β=-0.4, seed=0):
"""
.
"""
self.α = α
self.β = β
s = self.s
# Store required covariance matrices
cov1 = cov(α, self.n)
cov2 = cov(β, self.n)
chol1 = np.linalg.cholesky(cov1)[np.newaxis,np.newaxis,:,:]
chol2 = np.linalg.cholesky(cov2)[np.newaxis,np.newaxis,:,:]
# fn = 'sobol/'+str(seed)+'-'+str(self.N)+'-'+str(4*s)+'.csv'
# random_numbers = np.array(pd.read_csv(fn))
## SHOULD BE OUTSIDE CALIBRATION ROUTINE
np.random.seed(seed)
random_numbers = np.random.normal(size=(self.N,4*s))
# Obviously generalise
dB11 = random_numbers[:,0*s:1*s]
dB12 = random_numbers[:,1*s:2*s]
dB21 = random_numbers[:,2*s:3*s]
dB22 = random_numbers[:,3*s:4*s]
# Prepare for operations
dB1 = np.zeros((self.N,s,2,1))
dB2 = np.zeros((self.N,s,2,1))
dB1[:,:,0,0] = dB11
dB1[:,:,1,0] = dB12
dB2[:,:,0,0] = dB21
dB2[:,:,1,0] = dB22
# Finally, correlate in C-layer
dW1 = np.squeeze(np.matmul(chol1,dB1))
dW2 = np.squeeze(np.matmul(chol2,dB2))
dW = np.zeros((self.N,s,2,2))
dW[:,:,:,0] = dW1
dW[:,:,:,1] = dW2
return dW
# Should promote this for two dimensions given α, β use
def Y(self, dW, α):
"""
Constructs Volterra process from appropriately
correlated 2d Brownian increments.
"""
Y1 = np.zeros((self.N, 1 + self.s)) # Exact integral
Y2 = np.zeros((self.N, 1 + self.s)) # Riemann sum
# Construct Y1 through exact integral
# for i in range(1 + self.s):
# Use np.cumsum here? - must time this
# for i in np.arange(1, 1 + self.s, 1): # See (3.6)
# Y1[:,i] += dW[:,i-1,1] # Assumes kappa = 1
# Construct Y1 through exact integral
Y1[:,1:1+self.s] = dW[:,:self.s,1] # Assumes kappa = 1
# Construct arrays for convolution
Γ = np.zeros(1 + self.s) # Gamma
for k in np.arange(2, 1 + self.s, 1): # Assumes kappa = 1
Γ[k] = g(b(k, α)/self.n, α)
Ξ = dW[:,:,0] # Xi
# Initialise convolution result, GX
ΓΞ = np.zeros((self.N, len(Ξ[0,:]) + len(Γ) - 1))
# Compute convolution, FFT not used for small n
# Not able to compute all paths in C-layer
for i in range(self.N):
ΓΞ[i,:] = np.convolve(Γ, Ξ[i,:])
# Extract appropriate part of convolution
Y2 = ΓΞ[:,:1 + self.s]
# Finally contruct and return full process
Y = np.sqrt(2 * α + 1) * (Y1 + Y2)
return Y
# Yes should raise dimens
def V(self, Yα, Yβ, ξ=1.0, ζ=-0.5, η=1.5):
"""
rBergomi variance process.
SHOULD ALSO WRITE INTEGRATED PROCESS METHOD FOR EFFICIENT LATER USE.
"""
self.ξ = ξ
self.ζ = ζ
self.η = η
α = self.α
β = self.β
t = self.t
Vα = np.exp(ζ*Yα - 0.5*ζ**2 * t**(2*α+1))
Vβ = np.exp(η*Yβ - 0.5*η**2 * t**(2*β+1))
V = ξ * Vα * Vβ
return V
def S(self, V, dB):
"""
rBergomi price process.
"""
dt = self.dt
# Construct non-anticipative Riemann increments
increments = np.sqrt(V[:,:-1]) * dB - 0.5 * V[:,:-1] * dt
# Cumsum is actually a little slower than Python loop. Not terribly
integral = np.cumsum(increments, axis = 1)
S = np.zeros_like(V)
S[:,0] = 1.
S[:,1:] = np.exp(integral)
return S
def surface(self, S, surf):
"""
Provides the implied Black volatility surface for every option
implicitely in a Surface object.
"""
vec_bsinv = np.vectorize(bsinv)
indices = (surf.maturities * self.n).astype(int)
ST = S[:,indices][:,:,np.newaxis]
K = np.array(surf.strikes())[np.newaxis,:,:]
Δ = np.array(surf.forward_deltas())
T = surf.maturities[:,np.newaxis]
call_payoffs = np.maximum(ST - K,0) #- (1-Δ)*(ST - 1)
call_prices = np.mean(call_payoffs, axis=0)
call_vols = vec_bsinv(call_prices, 1., np.squeeze(K), T, ϕ=1)
put_payoffs = np.maximum(K - ST,0) #+ Δ*(ST - 1)
put_prices = np.mean(put_payoffs, axis=0)
put_vols = vec_bsinv(put_prices, 1., np.squeeze(K), T, ϕ=-1)
# don't think helpful when have control
vols = (call_vols + put_vols) / 2
return | pd.DataFrame(vols, index=surf.tenors, columns=surf.deltas) | pandas.DataFrame |
#Vertical Scrolled Frame class was created by github user @novel-yet-trivial
#Input directory was used from Stack Overflow user @scotty3785
#All other code written by <NAME>
#-------------------------------------------------------------------------------------------------------------------------------------
#Import neccessary packages
from tkinter import *
from tkinter.ttk import *
from tkinter import ttk
from tkinter import filedialog
from astropy.io import fits
from astropy.utils.data import get_pkg_data_filename
import tkinter.messagebox
import pandas as pd
import tkinter as tk
import os
import csv
import astropy.io
#-------------------------------------------------------------------------------------------------------------------------------------
#Define entire code as a function, neccessary for turning into EXE
#CMD prompt to make executable : pyinstaller.exe --onefile --noconsole --icon=logoHeader.ico headereditor2.py
def main():
#---------------------------------------------------------------------------------------------------------------------------------
#Portion to initialize and name window
window = Tk()
window.geometry('690x460') #Set window parameters
window.title("FITS Header Editor 2.0") #name window
window.iconbitmap(r"C:\Users\willm\Desktop\Python_Codes\Executable headerEditor\logoHeader.ico")#set icon for program
#---------------------------------------------------------------------------------------------------------------------------------
#Vertical Scrolled Frame class, created by github user @novel-yet-trivial
class VerticalScrolledFrame:
def __init__(self, master, **kwargs):
width = kwargs.pop('width', None)
height = kwargs.pop('height', None)
bg = kwargs.pop('bg', kwargs.pop('background', None))
self.outer = tk.Frame(master, **kwargs)
self.vsb = tk.Scrollbar(self.outer, orient=tk.VERTICAL)
self.vsb.pack(fill=tk.Y, side=tk.RIGHT)
self.canvas = tk.Canvas(self.outer, highlightthickness=0, width=width, height=height, bg=bg)
self.canvas.pack(side=tk.RIGHT, fill=tk.BOTH, expand=True)
self.canvas['yscrollcommand'] = self.vsb.set
self.canvas.bind("<Enter>", self._bind_mouse)
self.canvas.bind("<Leave>", self._unbind_mouse)
self.vsb['command'] = self.canvas.yview
self.inner = tk.Frame(self.canvas, bg=bg)
self.canvas.create_window(4, 4, window=self.inner, anchor='nw')
self.inner.bind("<Configure>", self._on_frame_configure)
self.outer_attr = set(dir(tk.Widget))
def __getattr__(self, item):
if item in self.outer_attr:
return getattr(self.outer, item)
else:
return getattr(self.inner, item)
def _on_frame_configure(self, event=None):
x1, y1, x2, y2 = self.canvas.bbox("all")
height = self.canvas.winfo_height()
self.canvas.config(scrollregion = (0,0, x2, max(y2, height)))
def _bind_mouse(self, event=None):
self.canvas.bind_all("<4>", self._on_mousewheel)
self.canvas.bind_all("<5>", self._on_mousewheel)
self.canvas.bind_all("<MouseWheel>", self._on_mousewheel)
def _unbind_mouse(self, event=None):
self.canvas.unbind_all("<4>")
self.canvas.unbind_all("<5>")
self.canvas.unbind_all("<MouseWheel>")
def _on_mousewheel(self, event):
if event.num == 4 or event.delta > 0:
self.canvas.yview_scroll(-1, "units" )
elif event.num == 5 or event.delta < 0:
self.canvas.yview_scroll(1, "units" )
def __str__(self):
return str(self.outer)
#---------------------------------------------------------------------------------------------------------------------------------
#Portion to initially place canvas on grid. This canvas will be destroyed and written over whenever the clear button is selected.
#Canvas is defined globally so it can be destroyed in the clear function
global canv
canv = VerticalScrolledFrame(window, width=500, borderwidth=1, relief=tk.SUNKEN, background="light gray")#Initialize canvas with VSF function
canv.grid(column=0, row=9) # set canvas on grid
#---------------------------------------------------------------------------------------------------------------------------------
#Input directory, created by Stack Overflow user scotty3785
class FolderSelect(Frame):
def __init__(self,parent=None,folderDescription="",**kw):
Frame.__init__(self,master=parent,**kw)
self.folderPath = StringVar()
self.lblName = Label(self, text=folderDescription, font=("Arial Bold", 10))
self.lblName.grid(row=1,column=0)
self.entPath = Entry(self, textvariable=self.folderPath)
self.entPath.grid(row=1,column=1)
self.btnFind = ttk.Button(self, text="Browse Folder",command=self.setFolderPath)
self.btnFind.grid(row=1,column=2)
def setFolderPath(self):
folder_selected = filedialog.askdirectory()
self.folderPath.set(folder_selected)
@property
def folder_path(self):
return self.folderPath.get()
folderPath = StringVar()
#---------------------------------------------------------------------------------------------------------------------------------
#Section initializes all labels, entry boxes, check buttons, and directory select buttons
lbl = Label(window, text="FITS Header Editor 2.0", font=("Arial Bold", 10))#Set title variable
lbl.grid(column=0, row=0)#Place title on grid
directory1Select = FolderSelect(window,"Choose folder to display/edit FITS header ") #Set directory select one
directory1Select.grid(row=1) #Place on grid
lblTwo = Label(window, text="Enter the keyword you would like to look for", font=("Arial Bold", 10)) #Label for txt entry window
lblTwo.grid(column=0, row=2) #Place label on grid
txt = Entry(window,width=10) #Entry window for keyword
txt.grid(column=1, row=2) #Place entry window on grid
ask = Label(window, text="Would you like to write to csv, display, and/or edit?", font=("Arial Bold", 10)) #Label for check boxes
ask.grid(column=0, row=3) #Place label on grid
chk_state = BooleanVar() #Make check state true or false variable
chk_state.set(False) #Have check box set to false upon opening window
chk_stateOne = BooleanVar() #^
chk_stateOne.set(False)#^
chk_stateTwo = BooleanVar()#^
chk_stateTwo.set(False)#^
chk = Checkbutton(window, text='Display', var=chk_state) #Set first check box and assign to boolean variable
chk.grid(column=1, row=3) #Place check box on grid
chkOne = Checkbutton(window, text='Edit', var=chk_stateOne) #Set second check box and assign to boolean variable
chkOne.grid(column=2, row=3) #Place check box on grid
chkTwo = Checkbutton(window, text='Write', var=chk_stateTwo) #Set third check box and assign to boolean variable
chkTwo.grid(column=3, row=3) #Place check box on grid
lblTwo = Label(window, text="If edit, what would you like to change the keyword to?", font=("Arial Bold", 10)) #Label for edit keyword entry window
lblTwo.grid(column=0, row=4) #Place label on grid
txtTwo = Entry(window,width=10) #Entry window for lbltwo
txtTwo.grid(column=1, row=4) #Place entry window on grid
lblTwo = Label(window, text="If write, what would you like to name your csv file? (Format : example.csv)", font=("Arial Bold", 10)) #Label for naming csv file entry window
lblTwo.grid(column=0, row=5) #Place label on grid
txtThree = Entry(window,width=10) #entry window for naming csv file
txtThree.grid(column=1, row=5) #Place entry window on grid
directory2Select = FolderSelect(window,"If write, choose location to save csv ") #Call directory select function to save csv to certain location
directory2Select.grid(row=6) #Place directory select on grid
#---------------------------------------------------------------------------------------------------------------------------------
#Set count variable
#Set count as global so it can be used in process and clear with value set to one upon running the program. After initially running the program,
#these two lines will not be ran through again. This variable is for placing labels on canvas. Every time a label is placed, count increases.
#If the clear button is selected, count is set back to one so, when the process button is pressed again, count will start by placing label on first row in canvas
global count
count = 1 #Count set to one outside of process function
#---------------------------------------------------------------------------------------------------------------------------------
#Process function for all reasonable actions desired by user
def process():
global count #Call count global again so function recognizes count when called upon
g = 0 #Variable created for exiting function out of a for loop -explained in more detail when called upon
#-----------------------------------------------------------------------------------------------------------------------------
#Error Handling : All error messages kill the process function with return statement
if chk_stateTwo.get() != True and chk_stateOne.get() != True and chk_state.get() != True: #Error if no check box is checked
tk.messagebox.showinfo("Error" , "Choose Write, Edit, or Display")
return
if directory1Select.folder_path == '': #Error if no directory is entered to parse through
tk.messagebox.showinfo("Error", "Enter a directory")
return
if chk_stateOne.get() == True and str(txtTwo.get()) == '': #Error if edit selected but no new value entered
tk.messagebox.showinfo("Error", "Enter new value for chosen keyword")
return
if str(txt.get()) == '': #Error if no object to search for entered
tk.messagebox.showinfo("Error", "Enter keyword to search for")
return
if str(txtThree.get()) == '' and chk_stateTwo.get() == True : #Error if write and no name for csv file
tk.messagebox.showinfo("Error", "Enter name to save csv file")
return
if '.csv' not in str(txtThree.get()) and chk_stateTwo.get() == True : #Error if csv file name entered is not properly formatted
tk.messagebox.showinfo("Error", "Please format csv file as follows : example.csv")
return
if directory2Select.folder_path == '' and chk_stateTwo.get() == True : #Error if write selected and no location to enter csv entered
tk.messagebox.showinfo("Error", "Enter directory to save csv file")
return
#-----------------------------------------------------------------------------------------------------------------------------
#The only time that there will be a widget in row 8 is after the code has been cleared or process has been ran
#Row 8 is for displaying messages that confirm the action desired has been completed. For example, if row 8 says "All values
#cleared successfuly", at this point in the process function that will be cleared, so another label, like "values
#successfuly displayed" can be placed in row 8
for displ in window.grid_slaves():
if int(displ.grid_info()["row"]) == 8:
displ.grid_forget()
#-----------------------------------------------------------------------------------------------------------------------------
#Get values from all entries. If the values are not needed, ie. newVal when the user doesn't select to input a new value,
#the value will simply not be used
directory = directory1Select.folder_path
directory2 = directory2Select.folder_path
keyWord = str(txt.get())
newVal = str(txtTwo.get())
name = str(txtThree.get())
err = 0 #Variable created to notify user if there are no FTS files in entered directory, explained further when called upon
#-----------------------------------------------------------------------------------------------------------------------------
#All values that are entered to csv file are put into a data frame, which is INITIALIZED here. Data frame has axis titles
#to keep all data in order, but the actual titles displayed in the csv file are placed in the first row of the dataframe.
#This was done because dataframes are formatted in a way that both row and column axes have titles. Although it's
#possible to just title the column axis as Object and have each row axis be the filename, this causes (1,1) in the csv file to
#be an empty cell
csvDf = pd.DataFrame({'File Name' : [], 'Object': []}) #initialize dataframe
frame = pd.DataFrame({'File Name': ['File Name'], 'Object': ['Object']}) #enter dataframe titles
csvDf = [csvDf, frame] #create structure for concatenation
csvDf = | pd.concat(csvDf) | pandas.concat |
import pandas as pd
from faker import Faker
import random
import os
import numpy as np
import datetime
import arrow
class tools():
def __init__(self,path):
self.path = str(path)
def get_province(self):
ip_data = pd.read_feather(self.path+'/ip_data.feather')
return list(ip_data['province'].unique())
def get_datetime(self,input_datetime = '2022-01-19 15:00:00',count=300):
mid_datetime = arrow.get(input_datetime).naive
s = list(np.random.randint(-10*60,10*60,size=count))
datatime_list = []
for item in s:
offset = datetime.timedelta(seconds=int(item))
time = mid_datetime + offset
datatime_list.append(time)
return datatime_list
def createRandomString(self,len):
result = []
for i in range (len):
raw = ""
range1 = range(58, 65) # between 0~9 and A~Z
range2 = range(91, 97) # between A~Z and a~z
i = 0
while i < 12:
seed = random.randint(48, 122)
if ((seed in range1) or (seed in range2)):
continue
raw += chr(seed)
i += 1
result.append(raw)
return result
def long2ip(self,long):
floor_list=[]
yushu=long
for i in reversed(range(4)): #3,2,1,0
res=divmod(yushu,256**i)
floor_list.append(str(res[0]))
yushu=res[1]
return '.'.join(floor_list)
def get_fakename(self,number=300):
result =[]
fake = Faker(['zh_CN'])
for _ in range(number):
result.append(fake.name())
return result
def get_nickname(self,number=300):
table = pd.read_excel(self.path+'/nickname.xlsx')
result = random.sample(list(table['nickname']), number)
return result
def get_ramdon_ip(self,ip=16777472):
offset = random.randint(1,254)
ip_address = ip+offset
return self.long2ip(ip_address)
def generate_dataset(self,province="上海市",count=300,rate=2/10,start='2022-01-19 15:00:00',end='2022-01-19 18:00:00'):
ip_data = pd.read_feather(self.path+'/ip_data.feather')
selected_ip = ip_data[ip_data['province']==province]
out_selected_ip = ip_data[ip_data['province']!=province]
if len(selected_ip) >= count:
#随机抽样
order = np.random.randint(0,len(selected_ip),size=count)
#通过随机抽样抽取DataFrame中的行
newDf = selected_ip.take(order)
else:
loop = int(count/len(selected_ip))
newDf = selected_ip
for i in range(loop):
newDf = | pd.concat([newDf,selected_ip],sort=False) | pandas.concat |
from __future__ import division # brings in Python 3.0 mixed type calculation rules
import datetime
import inspect
import numpy as np
import numpy.testing as npt
import os.path
import pandas as pd
import sys
from tabulate import tabulate
import unittest
print("Python version: " + sys.version)
print("Numpy version: " + np.__version__)
from ..ted_exe import Ted
test = {}
class TestTed(unittest.TestCase):
"""
Unit tests for TED model.
"""
print("ted unittests conducted at " + str(datetime.datetime.today()))
def setUp(self):
"""
Setup routine for ted unit tests.
:return:
"""
pass
def tearDown(self):
"""
Teardown routine for ted unit tests.
:return:
"""
pass
# teardown called after each test
# e.g. maybe write test results to some text file
def create_ted_object(self):
# create empty pandas dataframes to create empty object for testing
df_empty = pd.DataFrame()
# create an empty ted object
ted_empty = Ted(df_empty, df_empty)
return ted_empty
def test_daily_app_flag(self):
"""
:description generates a daily flag to denote whether a pesticide is applied that day or not (1 - applied, 0 - anot applied)
:param num_apps; number of applications
:param app_interval; number of days between applications
:NOTE in TED model there are two application scenarios per simulation (one for a min/max exposure scenario)
(this is why the parameters are passed in)
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
ted_empty = self.create_ted_object()
expected_results = pd.Series([], dtype='bool')
result = pd.Series([[]], dtype='bool')
expected_results = [[True, False, False, True, False, False, True, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False],
[True, False, False, False, False, False, False, True, False, False,
False, False, False, False, True, False, False, False, False, False,
False, True, False, False, False, False, False, False, True, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False],
[True, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False]]
try:
# internal model constants
ted_empty.num_simulation_days = 366
# input varialbles that change per simulation
ted_empty.num_apps_min = pd.Series([3, 5, 1])
ted_empty.app_interval_min = pd.Series([3, 7, 1])
for i in range (3):
result[i] = ted_empty.daily_app_flag(ted_empty.num_apps_min[i], ted_empty.app_interval_min[i])
np.array_equal(result[i],expected_results[i])
finally:
for i in range(3):
tab = [result[i], expected_results[i]]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_set_drift_parameters(self):
"""
:description provides parmaeter values to use when calculating distances from edge of application source area to
concentration of interest
:param app_method; application method (aerial/ground/airblast)
:param boom_hgt; height of boom (low/high) - 'NA' if not ground application
:param drop_size; droplet spectrum for application (see list below for aerial/ground - 'NA' if airblast)
:param param_a (result[i][0]; parameter a for spray drift distance calculation
:param param_b (result[i][1]; parameter b for spray drift distance calculation
:param param_c (result[i][2]; parameter c for spray drift distance calculation
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
ted_empty = self.create_ted_object()
expected_results = pd.Series([], dtype='float')
result = pd.Series(9*[[0.,0.,0.]], dtype='float')
expected_results = [[0.0292,0.822,0.6539],[0.043,1.03,0.5],[0.0721,1.0977,0.4999],[0.1014,1.1344,0.4999],
[1.0063,0.9998,1.0193],[5.5513,0.8523,1.0079],[0.1913,1.2366,1.0552],
[2.4154,0.9077,1.0128],[0.0351,2.4586,0.4763]]
try:
# input variable that change per simulation
ted_empty.app_method_min = pd.Series(['aerial','aerial','aerial','aerial','ground','ground','ground','ground','airblast'])
ted_empty.boom_hgt_min = pd.Series(['','','','','low','low','high','high',''])
ted_empty.droplet_spec_min = pd.Series(['very_fine_to_fine','fine_to_medium','medium_to_coarse','coarse_to_very_coarse',
'very_fine_to_fine','fine_to_medium-coarse','very_fine_to_fine','fine_to_medium-coarse',''])
for i in range (9): # test that the nine combinations are accessed
result[i][0], result[i][1], result[i][2] = ted_empty.set_drift_parameters(ted_empty.app_method_min[i], ted_empty.boom_hgt_min[i], ted_empty.droplet_spec_min[i])
npt.assert_allclose(result[i],expected_results[i],rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
for i in range (9):
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_drift_distance_calc(self):
"""
:description provides parmaeter values to use when calculating distances from edge of application source area to
concentration of interest
:param app_rate_frac; fraction of active ingredient application rate equivalent to the health threshold of concern
:param param_a; parameter a for spray drift distance calculation
:param param_b; parameter b for spray drift distance calculation
:param param_c; parameter c for spray drift distance calculation
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
ted_empty = self.create_ted_object()
expected_results = pd.Series([], dtype='float')
result = pd.Series([], dtype='float')
expected_results = [302.050738, 11.484378, 0.0]
try:
# internal model constants
ted_empty.max_distance_from_source = 1000.
# input variable that is internally specified from among options
param_a = pd.Series([0.0292, 0.1913, 0.0351], dtype='float')
param_b = pd.Series([0.822, 1.2366, 2.4586], dtype='float')
param_c = pd.Series([0.6539, 1.0522, 0.4763], dtype='float')
# internally calculated variables
app_rate_frac = pd.Series([0.1,0.25,0.88], dtype='float')
for i in range(3):
result[i] = ted_empty.drift_distance_calc(app_rate_frac[i], param_a[i], param_b[i], param_c[i], ted_empty.max_distance_from_source)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_conc_timestep(self):
"""
:description unittest for function conc_timestep:
:param conc_ini; initial concentration for day (actually previous day concentration)
:param half_life; halflife of pesiticde representing either foliar dissipation halflife or aerobic soil metabolism halflife (days)
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
ted_empty = self.create_ted_object()
expected_results = pd.Series([], dtype='float')
result = pd.Series([], dtype='float')
expected_results = [9.803896e-4, 0.106066, 1.220703e-3]
try:
# input variable that is internally specified from among options
half_life = pd.Series([35., 2., .1])
# internally calculated variables
conc_ini = pd.Series([1.e-3, 0.15, 1.25])
result = ted_empty.conc_timestep(conc_ini, half_life)
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_conc_initial_canopy_air(self):
"""
:description calculates initial (1st application day) air concentration of pesticide within plant canopy (ug/mL)
:param application rate; active ingredient application rate (lbs a.i./acre)
:param mass_pest; mass of pesticide on treated field (mg)
:param volume_air; volume of air in 1 hectare to a height equal to the height of the crop canopy
:param biotransfer_factor; the volume_based biotransfer factor; function of Henry's las constant and Log Kow
NOTE: this represents Eq 24 (and supporting eqs 25,26,27) of Attachment 1-7 of 'Biological Evaluation Chapters for Diazinon ESA Assessment'
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
ted_empty = self.create_ted_object()
expected_results = pd.Series([], dtype='float')
result = pd.Series([], dtype='float')
expected_results = [1.152526e-7, 1.281910e-5, 7.925148e-8]
try:
# internal model constants
ted_empty.hectare_to_acre = 2.47105
ted_empty.gms_to_mg = 1000.
ted_empty.lbs_to_gms = 453.592
ted_empty.crop_hgt = 1. #m
ted_empty.hectare_area = 10000. #m2
ted_empty.m3_to_liters = 1000.
ted_empty.mass_plant = 25000. # kg/hectare
ted_empty.density_plant = 0.77 #kg/L
# input variables that change per simulation
ted_empty.log_kow = pd.Series([2., 4., 6.], dtype='float')
ted_empty.log_unitless_hlc = pd.Series([-5., -3., -4.], dtype='float')
ted_empty.app_rate_min = pd.Series([1.e-3, 0.15, 1.25]) # lbs a.i./acre
for i in range(3): #let's do 3 iterations
result[i] = ted_empty.conc_initial_canopy_air(i, ted_empty.app_rate_min[i])
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_conc_initial_soil_h2o(self):
"""
:description calculates initial (1st application day) concentration in soil pore water or surface puddles(ug/L)
:param application rate; active ingredient application rate (lbs a.i./acre)
:param soil_depth
:param soil_bulk_density; kg/L
:param porosity; soil porosity
:param frac_org_cont_soil; fraction organic carbon in soil
:param app_rate_conv; conversion factor used to convert units of application rate (lbs a.i./acre) to (ug a.i./mL)
:NOTE this represents Eq 3 of Attachment 1-7 of 'Biological Evaluation Chapters for Diazinon ESA Assessment'
(the depth of water in this equation is assumed to be 0.0 and therefore not included here)
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
ted_empty = self.create_ted_object()
expected_results = pd.Series([], dtype='float')
result = pd.Series([], dtype='float')
expected_results = [5.067739e-3, 1.828522, 6.13194634]
try:
# internal model constants
ted_empty.app_rate_conv1 = 11.2
ted_empty.soil_depth = 2.6 # cm
ted_empty.soil_porosity = 0.35
ted_empty.soil_bulk_density = 1.5 # kg/L
ted_empty.soil_foc = 0.015
ted_empty.h2o_depth_soil = 0.0
ted_empty.h2o_depth_puddles = 1.3
# internally specified variable
ted_empty.water_type = pd.Series(["puddles", "pore_water", "puddles"])
# input variables that change per simulation
ted_empty.koc = pd.Series([1.e-3, 0.15, 1.25])
ted_empty.app_rate_min = pd.Series([1.e-3, 0.15, 1.25]) # lbs a.i./acre
for i in range(3): #let's do 3 iterations
result[i] = ted_empty.conc_initial_soil_h2o(i, ted_empty.app_rate_min[i], ted_empty.water_type[i])
npt.assert_allclose(result,expected_results,rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_conc_initial_plant(self):
"""
:description calculates initial (1st application day) dietary based EEC (residue concentration) from pesticide application
(mg/kg-diet for food items including short/tall grass, broadleaf plants, seeds/fruit/pods, and above ground arthropods)
:param application rate; active ingredient application rate (lbs a.i./acre)
:param food_multiplier; factor by which application rate of active ingredient is multiplied to estimate dietary based EECs
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
ted_empty = self.create_ted_object()
expected_results = pd.Series([], dtype='float')
result = | pd.Series([], dtype='float') | pandas.Series |
## Copyright 2015-2021 PyPSA Developers
## You can find the list of PyPSA Developers at
## https://pypsa.readthedocs.io/en/latest/developers.html
## PyPSA is released under the open source MIT License, see
## https://github.com/PyPSA/PyPSA/blob/master/LICENSE.txt
"""
Build optimisation problems from PyPSA networks without Pyomo.
Originally retrieved from nomopyomo ( -> 'no more Pyomo').
"""
__author__ = "PyPSA Developers, see https://pypsa.readthedocs.io/en/latest/developers.html"
__copyright__ = ("Copyright 2015-2021 PyPSA Developers, see https://pypsa.readthedocs.io/en/latest/developers.html, "
"MIT License")
from .pf import (_as_snapshots, get_switchable_as_dense as get_as_dense)
from .descriptors import (get_bounds_pu, get_extendable_i, get_non_extendable_i,
expand_series, nominal_attrs, additional_linkports,
Dict, get_active_assets, get_activity_mask)
from .linopt import (linexpr, write_bound, write_constraint, write_objective,
set_conref, set_varref, get_con, get_var, join_exprs,
run_and_read_highs, run_and_read_cbc, run_and_read_gurobi,
run_and_read_glpk, run_and_read_cplex, run_and_read_xpress,
define_constraints, define_variables, define_binaries,
align_with_static_component)
import pandas as pd
import numpy as np
from numpy import inf
from distutils.version import LooseVersion
pd_version = LooseVersion(pd.__version__)
agg_group_kwargs = dict(numeric_only=False) if pd_version >= "1.3" else {}
import gc, time, os, re, shutil
from tempfile import mkstemp
import logging
logger = logging.getLogger(__name__)
lookup = pd.read_csv(os.path.join(os.path.dirname(__file__), 'variables.csv'),
index_col=['component', 'variable'])
def define_nominal_for_extendable_variables(n, c, attr):
"""
Initializes variables for nominal capacities for a given component and a
given attribute.
Parameters
----------
n : pypsa.Network
c : str
network component of which the nominal capacity should be defined
attr : str
name of the variable, e.g. 'p_nom'
"""
ext_i = get_extendable_i(n, c)
if ext_i.empty: return
lower = n.df(c)[attr+'_min'][ext_i]
upper = n.df(c)[attr+'_max'][ext_i]
define_variables(n, lower, upper, c, attr)
def define_dispatch_for_extendable_and_committable_variables(n, sns, c, attr):
"""
Initializes variables for power dispatch for a given component and a
given attribute.
Parameters
----------
n : pypsa.Network
c : str
name of the network component
attr : str
name of the attribute, e.g. 'p'
"""
ext_i = get_extendable_i(n, c)
if c == 'Generator':
ext_i = ext_i.union(n.generators.query('committable').index)
if ext_i.empty:
return
active = get_activity_mask(n, c, sns)[ext_i] if n._multi_invest else None
define_variables(n, -inf, inf, c, attr, axes=[sns, ext_i], spec='ext', mask=active)
def define_dispatch_for_non_extendable_variables(n, sns, c, attr):
"""
Initializes variables for power dispatch for a given component and a
given attribute.
Parameters
----------
n : pypsa.Network
c : str
name of the network component
attr : str
name of the attribute, e.g. 'p'
"""
fix_i = get_non_extendable_i(n, c)
if c == 'Generator':
fix_i = fix_i.difference(n.generators.query('committable').index)
if fix_i.empty: return
nominal_fix = n.df(c)[nominal_attrs[c]][fix_i]
min_pu, max_pu = get_bounds_pu(n, c, sns, fix_i, attr)
lower = min_pu.mul(nominal_fix)
upper = max_pu.mul(nominal_fix)
axes = [sns, fix_i]
active = get_activity_mask(n, c, sns)[fix_i] if n._multi_invest else None
kwargs = dict(spec='non_ext', mask=active)
dispatch = define_variables(n, -inf, inf, c, attr, axes=axes, **kwargs)
dispatch = linexpr((1, dispatch))
define_constraints(n, dispatch, '>=', lower, c, 'mu_lower', **kwargs)
define_constraints(n, dispatch, '<=', upper, c, 'mu_upper', **kwargs)
def define_dispatch_for_extendable_constraints(n, sns, c, attr):
"""
Sets power dispatch constraints for extendable devices for a given
component and a given attribute.
Parameters
----------
n : pypsa.Network
c : str
name of the network component
attr : str
name of the attribute, e.g. 'p'
"""
ext_i = get_extendable_i(n, c)
if ext_i.empty: return
min_pu, max_pu = get_bounds_pu(n, c, sns, ext_i, attr)
operational_ext_v = get_var(n, c, attr)[ext_i]
nominal_v = get_var(n, c, nominal_attrs[c])[ext_i]
rhs = 0
active = get_activity_mask(n, c, sns)[ext_i] if n._multi_invest else None
kwargs = dict(spec=attr, mask=active)
lhs, *axes = linexpr((max_pu, nominal_v), (-1, operational_ext_v), return_axes=True)
define_constraints(n, lhs, '>=', rhs, c, 'mu_upper', axes=axes, **kwargs)
lhs, *axes = linexpr((min_pu, nominal_v), (-1, operational_ext_v), return_axes=True)
define_constraints(n, lhs, '<=', rhs, c, 'mu_lower', axes=axes, **kwargs)
def define_fixed_variable_constraints(n, sns, c, attr, pnl=True):
"""
Sets constraints for fixing variables of a given component and attribute
to the corresponding values in n.df(c)[attr + '_set'] if pnl is True, or
n.pnl(c)[attr + '_set']
Parameters
----------
n : pypsa.Network
c : str
name of the network component
attr : str
name of the attribute, e.g. 'p'
pnl : bool, default True
Whether variable which should be fixed is time-dependent
"""
if pnl:
if attr + '_set' not in n.pnl(c): return
fix = n.pnl(c)[attr + '_set'].loc[sns]
if fix.empty: return
if n._multi_invest:
active = get_activity_mask(n, c, sns)
fix = fix.where(active)
fix = fix.stack()
lhs = linexpr((1, get_var(n, c, attr).stack()[fix.index]),
as_pandas=False)
constraints = write_constraint(n, lhs, '=', fix).unstack().T
else:
if attr + '_set' not in n.df(c): return
fix = n.df(c)[attr + '_set'].dropna()
if fix.empty: return
lhs = linexpr((1, get_var(n, c, attr)[fix.index]), as_pandas=False)
constraints = write_constraint(n, lhs, '=', fix)
set_conref(n, constraints, c, f'mu_{attr}_set')
def define_generator_status_variables(n, sns):
c = 'Generator'
com_i = n.generators.query('committable').index
ext_i = get_extendable_i(n, c)
if not (ext_i.intersection(com_i)).empty:
logger.warning("The following generators have both investment optimisation"
f" and unit commitment:\n\n\t{', '.join((ext_i.intersection(com_i)))}\n\nCurrently PyPSA cannot "
"do both these functions, so PyPSA is choosing investment optimisation "
"for these generators.")
com_i = com_i.difference(ext_i)
if com_i.empty: return
active = get_activity_mask(n, c, sns)[com_i] if n._multi_invest else None
define_binaries(n, (sns, com_i), 'Generator', 'status', mask=active)
def define_committable_generator_constraints(n, sns):
c, attr = 'Generator', 'status'
com_i = n.df(c).query('committable and not p_nom_extendable').index
if com_i.empty: return
nominal = n.df(c)[nominal_attrs[c]][com_i]
min_pu, max_pu = get_bounds_pu(n, c, sns, com_i, 'p')
lower = min_pu.mul(nominal)
upper = max_pu.mul(nominal)
status = get_var(n, c, attr)
p = get_var(n, c, 'p')[com_i]
lhs = linexpr((lower, status), (-1, p))
active = get_activity_mask(n, c, sns)[com_i] if n._multi_invest else None
define_constraints(n, lhs, '<=', 0, 'Generators', 'committable_lb', mask=active)
lhs = linexpr((upper, status), (-1, p))
define_constraints(n, lhs, '>=', 0, 'Generators', 'committable_ub', mask=active)
def define_ramp_limit_constraints(n, sns, c):
"""
Defines ramp limits for a given component with valid ramplimit.
"""
rup_i = n.df(c).query('ramp_limit_up == ramp_limit_up').index
rdown_i = n.df(c).query('ramp_limit_down == ramp_limit_down').index
if rup_i.empty & rdown_i.empty:
return
fix_i = get_non_extendable_i(n, c)
ext_i = get_extendable_i(n, c)
if "committable" in n.df(c):
com_i = n.df(c).query('committable').index.difference(ext_i)
else:
com_i = []
# Check if ramping is not at start of n.snapshots
start_i = n.snapshots.get_loc(sns[0]) - 1
pnl = n.pnl(c)
# get dispatch for either one or two ports
attr = ({'p', 'p0'} & set(pnl)).pop()
p_prev_fix = pnl[attr].iloc[start_i]
is_rolling_horizon = (sns[0] != n.snapshots[0]) and not p_prev_fix.empty
if is_rolling_horizon:
active = get_activity_mask(n, c, sns)
p = get_var(n, c, 'p')
p_prev = get_var(n, c, 'p').shift(1, fill_value=-1)
rhs_prev = pd.DataFrame(0, *p.axes)
rhs_prev.loc[sns[0]] = p_prev_fix
else:
active = get_activity_mask(n, c, sns[1:])
p = get_var(n, c, 'p').loc[sns[1:]]
p_prev = get_var(n, c, 'p').shift(1, fill_value=-1).loc[sns[1:]]
rhs_prev = pd.DataFrame(0, *p.axes)
# fix up
gens_i = rup_i.intersection(fix_i)
if not gens_i.empty:
lhs = linexpr((1, p[gens_i]), (-1, p_prev[gens_i]))
rhs = rhs_prev[gens_i] + n.df(c).loc[gens_i].eval('ramp_limit_up * p_nom')
kwargs = dict(spec='nonext.', mask=active[gens_i])
define_constraints(n, lhs, '<=', rhs, c, 'mu_ramp_limit_up', **kwargs)
# ext up
gens_i = rup_i.intersection(ext_i)
if not gens_i.empty:
limit_pu = n.df(c)['ramp_limit_up'][gens_i]
p_nom = get_var(n, c, 'p_nom')[gens_i]
lhs = linexpr((1, p[gens_i]), (-1, p_prev[gens_i]), (-limit_pu, p_nom))
rhs = rhs_prev[gens_i]
kwargs = dict(spec='ext.', mask=active[gens_i])
define_constraints(n, lhs, '<=', rhs, c, 'mu_ramp_limit_up', **kwargs)
# com up
gens_i = rup_i.intersection(com_i)
if not gens_i.empty:
limit_start = n.df(c).loc[gens_i].eval('ramp_limit_start_up * p_nom')
limit_up = n.df(c).loc[gens_i].eval('ramp_limit_up * p_nom')
status = get_var(n, c, 'status').loc[p.index, gens_i]
status_prev = get_var(n, c, 'status').shift(1, fill_value=-1).loc[p.index, gens_i]
lhs = linexpr((1, p[gens_i]), (-1, p_prev[gens_i]),
(limit_start - limit_up, status_prev),
(- limit_start, status))
rhs = rhs_prev[gens_i]
if is_rolling_horizon:
status_prev_fix = n.pnl(c)['status'][com_i].iloc[start_i]
rhs.loc[sns[0]] += (limit_up - limit_start) * status_prev_fix
kwargs = dict(spec='com.', mask=active[gens_i])
define_constraints(n, lhs, '<=', rhs, c, 'mu_ramp_limit_up', **kwargs)
# fix down
gens_i = rdown_i.intersection(fix_i)
if not gens_i.empty:
lhs = linexpr((1, p[gens_i]), (-1, p_prev[gens_i]))
rhs = rhs_prev[gens_i] + n.df(c).loc[gens_i].eval('-1 * ramp_limit_down * p_nom')
kwargs = dict(spec='nonext.', mask=active[gens_i])
define_constraints(n, lhs, '>=', rhs, c, 'mu_ramp_limit_down', **kwargs)
# ext down
gens_i = rdown_i.intersection(ext_i)
if not gens_i.empty:
limit_pu = n.df(c)['ramp_limit_down'][gens_i]
p_nom = get_var(n, c, 'p_nom')[gens_i]
lhs = linexpr((1, p[gens_i]), (-1, p_prev[gens_i]), (limit_pu, p_nom))
rhs = rhs_prev[gens_i]
kwargs = dict(spec='ext.', mask=active[gens_i])
define_constraints(n, lhs, '>=', rhs, c, 'mu_ramp_limit_down', **kwargs)
# com down
gens_i = rdown_i.intersection(com_i)
if not gens_i.empty:
limit_shut = n.df(c).loc[gens_i].eval('ramp_limit_shut_down * p_nom')
limit_down = n.df(c).loc[gens_i].eval('ramp_limit_down * p_nom')
status = get_var(n, c, 'status').loc[p.index, gens_i]
status_prev = get_var(n, c, 'status').shift(1, fill_value=-1).loc[p.index, gens_i]
lhs = linexpr((1, p[gens_i]), (-1, p_prev[gens_i]),
(limit_down - limit_shut, status),
(limit_shut, status_prev))
rhs = rhs_prev[gens_i]
if is_rolling_horizon:
status_prev_fix = n.pnl(c)['status'][com_i].iloc[start_i]
rhs.loc[sns[0]] += -limit_shut * status_prev_fix
kwargs = dict(spec='com.', mask=active[gens_i])
define_constraints(n, lhs, '>=', rhs, c, 'mu_ramp_limit_down', **kwargs)
def define_nominal_constraints_per_bus_carrier(n, sns):
for carrier in n.carriers.index:
for bound, sense in [("max", "<="), ("min", ">=")]:
col = f'nom_{bound}_{carrier}'
if col not in n.buses.columns: continue
rhs = n.buses[col].dropna()
lhs = pd.Series('', rhs.index)
for c, attr in nominal_attrs.items():
if c not in n.one_port_components: continue
attr = nominal_attrs[c]
if (c, attr) not in n.variables.index: continue
nominals = get_var(n, c, attr)[n.df(c).carrier == carrier]
if nominals.empty: continue
per_bus = linexpr((1, nominals)).groupby(n.df(c).bus).sum(**agg_group_kwargs)
lhs += per_bus.reindex(lhs.index, fill_value='')
if bound == 'max':
lhs = lhs[lhs != '']
rhs = rhs.reindex(lhs.index)
else:
assert (lhs != '').all(), (
f'No extendable components of carrier {carrier} on bus '
f'{list(lhs[lhs == ""].index)}')
define_constraints(n, lhs, sense, rhs, 'Bus', 'mu_' + col)
def define_nodal_balance_constraints(n, sns):
"""
Defines nodal balance constraint.
"""
def bus_injection(c, attr, groupcol='bus', sign=1):
# additional sign only necessary for branches in reverse direction
if 'sign' in n.df(c):
sign = sign * n.df(c).sign
expr = linexpr((sign, get_var(n, c, attr))).rename(columns=n.df(c)[groupcol])
# drop empty bus2, bus3 if multiline link
if c == 'Link':
expr.drop(columns='', errors='ignore', inplace=True)
return expr
# one might reduce this a bit by using n.branches and lookup
args = [['Generator', 'p'], ['Store', 'p'], ['StorageUnit', 'p_dispatch'],
['StorageUnit', 'p_store', 'bus', -1], ['Line', 's', 'bus0', -1],
['Line', 's', 'bus1', 1], ['Transformer', 's', 'bus0', -1],
['Transformer', 's', 'bus1', 1], ['Link', 'p', 'bus0', -1],
['Link', 'p', 'bus1', get_as_dense(n, 'Link', 'efficiency', sns)]]
args = [arg for arg in args if not n.df(arg[0]).empty]
if not n.links.empty:
for i in additional_linkports(n):
eff = get_as_dense(n, 'Link', f'efficiency{i}', sns)
args.append(['Link', 'p', f'bus{i}', eff])
lhs = (pd.concat([bus_injection(*arg) for arg in args], axis=1)
.groupby(axis=1, level=0)
.sum(**agg_group_kwargs)
.reindex(columns=n.buses.index, fill_value=''))
if (lhs == '').any().any():
raise ValueError("Empty LHS in nodal balance constraint.")
sense = '='
rhs = ((- get_as_dense(n, 'Load', 'p_set', sns) * n.loads.sign)
.groupby(n.loads.bus, axis=1).sum()
.reindex(columns=n.buses.index, fill_value=0))
define_constraints(n, lhs, sense, rhs, 'Bus', 'marginal_price')
def define_kirchhoff_constraints(n, sns):
"""
Defines Kirchhoff voltage constraints
"""
comps = n.passive_branch_components & set(n.variables.index.levels[0])
if len(comps) == 0: return
branch_vars = pd.concat({c:get_var(n, c, 's') for c in comps}, axis=1)
def cycle_flow(ds, sns):
if sns is None:
sns = slice(None)
ds = ds[lambda ds: ds!=0.].dropna()
vals = linexpr((ds, branch_vars.loc[sns, ds.index]), as_pandas=False)
return vals.sum(1)
constraints = []
periods = sns.unique('period') if n._multi_invest else [None]
for period in periods:
n.determine_network_topology(investment_period=period)
subconstraints = []
for sub in n.sub_networks.obj:
branches = sub.branches()
C = pd.DataFrame(sub.C.todense(), index=branches.index)
if C.empty:
continue
carrier = n.sub_networks.carrier[sub.name]
weightings = branches.x_pu_eff if carrier == 'AC' else branches.r_pu_eff
C_weighted = 1e5 * C.mul(weightings, axis=0)
cycle_sum = C_weighted.apply(cycle_flow, sns=period)
snapshots = sns if period == None else sns[sns.get_loc(period)]
cycle_sum.set_index(snapshots, inplace=True)
con = write_constraint(n, cycle_sum, '=', 0)
subconstraints.append(con)
if len(subconstraints) == 0:
continue
constraints.append(pd.concat(subconstraints, axis=1, ignore_index=True))
if constraints:
constraints = pd.concat(constraints).rename_axis(columns='cycle')
set_conref(n, constraints, 'SubNetwork', 'mu_kirchhoff_voltage_law')
def define_storage_unit_constraints(n, sns):
"""
Defines state of charge (soc) constraints for storage units. In principal
the constraints states:
previous_soc + p_store - p_dispatch + inflow - spill == soc
"""
sus_i = n.storage_units.index
if sus_i.empty: return
c = 'StorageUnit'
# spillage
has_periods = isinstance(sns, pd.MultiIndex)
active = get_activity_mask(n, c, sns)
upper = get_as_dense(n, c, 'inflow', sns).loc[:, lambda df: df.max() > 0]
spill = define_variables(n, 0, upper, 'StorageUnit', 'spill', mask=active[upper.columns])
# elapsed hours
eh = expand_series(n.snapshot_weightings.stores[sns], sus_i)
# efficiencies
eff_stand = expand_series(1-n.df(c).standing_loss, sns).T.pow(eh)
eff_dispatch = expand_series(n.df(c).efficiency_dispatch, sns).T
eff_store = expand_series(n.df(c).efficiency_store, sns).T
soc = get_var(n, c, 'state_of_charge')
if has_periods:
cyclic_i = n.df(c).query('cyclic_state_of_charge & '
'~cyclic_state_of_charge_per_period').index
cyclic_pp_i = n.df(c).query('cyclic_state_of_charge & '
'cyclic_state_of_charge_per_period').index
noncyclic_i = n.df(c).query('~cyclic_state_of_charge & '
'~state_of_charge_initial_per_period').index
noncyclic_pp_i = n.df(c).query("~cyclic_state_of_charge & "
"state_of_charge_initial_per_period").index
else:
cyclic_i = n.df(c).query('cyclic_state_of_charge').index
noncyclic_i = n.df(c).query('~cyclic_state_of_charge ').index
# cyclic constraint for whole optimization horizon
previous_soc_cyclic = soc.where(active).ffill().apply(lambda ds: np.roll(ds, 1)).ffill()
# non cyclic constraint: determine the first active snapshot
first_active_snapshot = active.cumsum()[noncyclic_i] == 1
coeff_var = [(-1, soc),
(-1/eff_dispatch * eh, get_var(n, c, 'p_dispatch')),
(eff_store * eh, get_var(n, c, 'p_store'))]
lhs, *axes = linexpr(*coeff_var, return_axes=True)
def masked_term(coeff, var, cols):
return linexpr((coeff[cols], var[cols]))\
.reindex(index=axes[0], columns=axes[1], fill_value='').values
if (c, 'spill') in n.variables.index:
lhs += masked_term(-eh, get_var(n, c, 'spill'), spill.columns)
lhs += masked_term(eff_stand, previous_soc_cyclic, cyclic_i)
lhs += masked_term(eff_stand[~first_active_snapshot],
soc.shift()[~first_active_snapshot], noncyclic_i)
# rhs set e at beginning of optimization horizon for noncyclic
rhs = -get_as_dense(n, c, 'inflow', sns).mul(eh).astype(float)
rhs[noncyclic_i] = rhs[noncyclic_i].where(~first_active_snapshot,
rhs-n.df(c).state_of_charge_initial, axis=1)
if has_periods:
# cyclic constraint for soc per period - cyclic soc within each period
previous_soc_cyclic_pp = soc.groupby(level=0).transform(lambda ds: np.roll(ds, 1))
lhs += masked_term(eff_stand, previous_soc_cyclic_pp, cyclic_pp_i)
# set the initial enery at the beginning of each period
first_active_snapshot_pp = (
active[noncyclic_pp_i].groupby(level=0).cumsum() == 1)
lhs += masked_term(eff_stand[~first_active_snapshot_pp],
soc.shift()[~first_active_snapshot_pp],
noncyclic_pp_i)
rhs[noncyclic_pp_i] = (
rhs[noncyclic_pp_i].where(~first_active_snapshot_pp,
rhs - n.df(c).state_of_charge_initial, axis=1))
define_constraints(n, lhs, '==', rhs, c, 'mu_state_of_charge', mask=active)
def define_store_constraints(n, sns):
"""
Defines energy balance constraints for stores. In principal this states:
previous_e - p == e
"""
stores_i = n.stores.index
if stores_i.empty: return
c = 'Store'
has_periods = isinstance(sns, pd.MultiIndex)
active = get_activity_mask(n, c, sns)
define_variables(n, -inf, inf, axes=[sns, stores_i], name=c, attr='p', mask=active)
# elapsed hours
eh = expand_series(n.snapshot_weightings.stores[sns], stores_i) #elapsed hours
eff_stand = expand_series(1-n.df(c).standing_loss, sns).T.pow(eh)
e = get_var(n, c, 'e')
if has_periods:
cyclic_i = n.df(c).query('e_cyclic & ~e_cyclic_per_period').index
cyclic_pp_i = n.df(c).query('e_cyclic & e_cyclic_per_period').index
noncyclic_i = n.df(c).query('~e_cyclic & ~e_initial_per_period').index
noncyclic_pp_i = n.df(c).query("~e_cyclic & e_initial_per_period").index
else:
cyclic_i = n.df(c).query('e_cyclic').index
noncyclic_i = n.df(c).query('~e_cyclic').index
# cyclic constraint for whole optimization horizon
previous_e_cyclic = e.where(active).ffill().apply(lambda ds: np.roll(ds, 1)).ffill()
# non cyclic constraint: determine the first active snapshot
first_active_snapshot = active.cumsum()[noncyclic_i] == 1
coeff_var = [(-eh, get_var(n, c, 'p')), (-1, e)]
lhs, *axes = linexpr(*coeff_var, return_axes=True)
def masked_term(coeff, var, cols):
return linexpr((coeff[cols], var[cols]))\
.reindex(index=sns, columns=stores_i, fill_value='').values
lhs += masked_term(eff_stand, previous_e_cyclic, cyclic_i)
lhs += masked_term(eff_stand[~first_active_snapshot],
e.shift()[~first_active_snapshot], noncyclic_i)
# rhs set e at beginning of optimization horizon for noncyclic
rhs = pd.DataFrame(0., sns, stores_i)
rhs[noncyclic_i] = rhs[noncyclic_i].where(~first_active_snapshot, -n.df(c).e_initial, axis=1)
if has_periods:
# cyclic constraint for soc per period - cyclic soc within each period
previous_e_cyclic_pp = e.groupby(level=0).transform(lambda ds: np.roll(ds, 1))
lhs += masked_term(eff_stand, previous_e_cyclic_pp, cyclic_pp_i)
# set the initial enery at the beginning of each period
first_active_snapshot_pp = (
active[noncyclic_pp_i].groupby(level=0).cumsum() == 1)
lhs += masked_term(eff_stand[~first_active_snapshot_pp],
e.shift()[~first_active_snapshot_pp],
noncyclic_pp_i)
rhs[noncyclic_pp_i] = (
rhs[noncyclic_pp_i].where(~first_active_snapshot_pp, -n.df(c).e_initial, axis=1))
define_constraints(n, lhs, '==', rhs, c, 'mu_state_of_charge', mask=active)
def define_growth_limit(n, sns, c, attr):
"""Constraint new installed capacity per investment period.
Parameters
----------
n : pypsa.Network
c : str
network component of which the nominal capacity should be defined
attr : str
name of the variable, e.g. 'p_nom'
"""
if not n._multi_invest: return
ext_i = get_extendable_i(n, c)
if "carrier" not in n.df(c) or n.df(c).empty: return
with_limit = n.carriers.query("max_growth != inf").index
limit_i = n.df(c).query("carrier in @with_limit").index.intersection(ext_i)
if limit_i.empty: return
periods = sns.unique('period')
v = get_var(n, c, attr)
carriers = n.df(c).loc[limit_i, "carrier"]
caps = pd.concat({period: linexpr((1, v)).where(n.get_active_assets(c, period), '')
for period in periods}, axis=1).T[limit_i]
lhs = caps.groupby(carriers, axis=1).sum(**agg_group_kwargs)
rhs = n.carriers.max_growth[with_limit]
define_constraints(n, lhs, '<=', rhs, 'Carrier', 'growth_limit_{}'.format(c))
def define_global_constraints(n, sns):
"""
Defines global constraints for the optimization. Possible types are
1. primary_energy
Use this to constraint the byproducts of primary energy sources as
CO2
2. transmission_volume_expansion_limit
Use this to set a limit for line volume expansion. Possible carriers
are 'AC' and 'DC'
3. transmission_expansion_cost_limit
Use this to set a limit for line expansion costs. Possible carriers
are 'AC' and 'DC'
4. tech_capacity_expansion_limit
Use this to se a limit for the summed capacitiy of a carrier (e.g.
'onwind') for each investment period at choosen nodes. This limit
could e.g. represent land resource/ building restrictions for a
technology in a certain region. Currently, only the
capacities of extendable generators have to be below the set limit.
"""
if n._multi_invest:
period_weighting = n.investment_period_weightings["years"]
weightings = n.snapshot_weightings.mul(period_weighting, level=0, axis=0).loc[sns]
else:
weightings = n.snapshot_weightings.loc[sns]
def get_period(n, glc, sns):
period = slice(None)
if n._multi_invest and not np.isnan(glc["investment_period"]):
period = int(glc["investment_period"])
if period not in sns.unique('period'):
logger.warning("Optimized snapshots do not contain the investment "
f"period required for global constraint `{glc.name}`.")
return period
# (1) primary_energy
glcs = n.global_constraints.query('type == "primary_energy"')
for name, glc in glcs.iterrows():
rhs = glc.constant
lhs = ''
carattr = glc.carrier_attribute
emissions = n.carriers.query(f'{carattr} != 0')[carattr]
period = get_period(n, glc, sns)
if emissions.empty: continue
# generators
gens = n.generators.query('carrier in @emissions.index')
if not gens.empty:
em_pu = gens.carrier.map(emissions)/gens.efficiency
em_pu = (weightings["generators"].to_frame('weightings') @\
em_pu.to_frame('weightings').T).loc[period]
p = get_var(n, 'Generator', 'p').loc[sns, gens.index].loc[period]
vals = linexpr((em_pu, p), as_pandas=False)
lhs += join_exprs(vals)
# storage units
sus = n.storage_units.query('carrier in @emissions.index and '
'not cyclic_state_of_charge')
sus_i = sus.index
if not sus.empty:
em_pu = sus.carrier.map(emissions)
soc = get_var(n, 'StorageUnit', 'state_of_charge').loc[sns, sus_i].loc[period]
soc = soc.where(soc!=-1).ffill().iloc[-1]
vals = linexpr((-em_pu, soc), as_pandas=False)
lhs = lhs + '\n' + join_exprs(vals)
rhs -= em_pu @ sus.state_of_charge_initial
# stores
n.stores['carrier'] = n.stores.bus.map(n.buses.carrier)
stores = n.stores.query('carrier in @emissions.index and not e_cyclic')
if not stores.empty:
em_pu = stores.carrier.map(emissions)
e = get_var(n, 'Store', 'e').loc[sns, stores.index].loc[period]
e = e.where(e!=-1).ffill().iloc[-1]
vals = linexpr((-em_pu, e), as_pandas=False)
lhs = lhs + '\n' + join_exprs(vals)
rhs -= stores.carrier.map(emissions) @ stores.e_initial
define_constraints(n, lhs, glc.sense, rhs, 'GlobalConstraint', 'mu',
axes=pd.Index([name]), spec=name)
# (2) transmission_volume_expansion_limit
glcs = n.global_constraints.query('type == '
'"transmission_volume_expansion_limit"')
substr = lambda s: re.sub(r'[\[\]\(\)]', '', s)
for name, glc in glcs.iterrows():
car = [substr(c.strip()) for c in glc.carrier_attribute.split(',')]
lhs = ''
period = get_period(n, glc, sns)
for c, attr in (('Line', 's_nom'), ('Link', 'p_nom')):
if n.df(c).empty: continue
ext_i = n.df(c).query(f'carrier in @car and {attr}_extendable').index
ext_i = ext_i[get_activity_mask(n, c, sns)[ext_i].loc[period].any()]
if ext_i.empty: continue
v = linexpr((n.df(c).length[ext_i], get_var(n, c, attr)[ext_i]),
as_pandas=False)
lhs += '\n' + join_exprs(v)
if lhs == '': continue
sense = glc.sense
rhs = glc.constant
define_constraints(n, lhs, sense, rhs, 'GlobalConstraint', 'mu',
axes= | pd.Index([name]) | pandas.Index |
import pandas as pd
import numpy as np
from numpy import mean
from numpy import std
from numpy import NaN
from sklearn.datasets import make_regression
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import RepeatedKFold
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error
#from xgboost import XGBRFRegressor
import lightgbm as lgb
from lightgbm import LGBMRegressor
# https://www.kaggle.com/shreyagopal/suicide-rate-prediction-with-machine-learning
#from sklearn.linear_model import LinearRegression
dat = "C:/Users/LIUM3478/OneDrive Corp/OneDrive - Atkins Ltd/Work_Atkins/Docker/hjulanalys/wheel_prediction_data.csv"
df = pd.read_csv(dat, encoding = 'ISO 8859-1', sep = ";", decimal=",")
df.head()
df.groupby(['Littera','VehicleOperatorName']).size().reset_index().rename(columns={0:'count'})
y = df[['km_till_OMS']].values
X = df[["LeftWheelDiameter", "Littera", "VehicleOperatorName",
"TotalPerformanceSnapshot", "maxTotalPerformanceSnapshot"]]
# X["Littera_Operator"] = X.Littera + " " + X.VehicleOperatorName
# X.drop(["Littera", "VehicleOperatorName"], axis = 1, inplace=True)
# converting object type to category for gradient boosting algorithms
def obj_to_cat(data):
obj_feat = list(data.loc[:, data.dtypes == 'object'].columns.values)
for feature in obj_feat:
data[feature] = pd.Series(data[feature], dtype="category")
return data
X = obj_to_cat(X)
# Training and Testing Sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 1234)
# calling lightgbm method directly
# converting the specific Dataset data format
lgb_train = lgb.Dataset(X_train, y_train)
lgb_eval = lgb.Dataset(X_test, y_test, reference=lgb_train)
hyper_params = {
'task': 'train',
'boosting_type': 'gbdt', # set promotion type
'objective': 'regression',
'metric': {'l2', 'auc'}, # evaluation function: rmse, l2 loss function
'learning_rate': 0.5,
'feature_fraction': 1, # The proportion of feature selection for tree building
"num_leaves": 20, # number of leaf nodes
}
gbm = lgb.train(params=hyper_params,
train_set=lgb_train, valid_sets=lgb_eval,
num_boost_round=20, verbose_eval=False, early_stopping_rounds=5)
y_pred = gbm.predict(X_test)
y_pred = | pd.DataFrame(y_pred) | pandas.DataFrame |
import glob as glob
import matplotlib as mpl
import matplotlib.patheffects as PathEffects
import matplotlib.pyplot as plt
import matplotlib.ticker as mtick
import matplotlib.transforms as transforms
import numpy as np
import pandas as pd
import seaborn as sns
import bz2
import corner
import json
import pathlib
import pickle
import warnings
from astropy import constants as const
from astropy import units as uni
from astropy.io import ascii, fits
from astropy.time import Time
from mpl_toolkits.axes_grid1 import ImageGrid
warnings.filterwarnings("ignore", r"All-NaN (slice|axis) encountered")
warnings.filterwarnings("ignore", r"Degrees of freedom <= 0 for slice")
def _bad_idxs(s):
if s == "[]":
return []
else:
# Merges indices/idxs specified in `s` into a single numpy array of
# indices to omit
s = s.strip("[]").split(",")
bad_idxs = list(map(_to_arr, s))
bad_idxs = np.concatenate(bad_idxs, axis=0)
return bad_idxs
def _to_arr(idx_or_slc):
# Converts str to 1d numpy array
# or slice to numpy array of ints.
# This format makes it easier for flattening multiple arrays in `_bad_idxs`
if ":" in idx_or_slc:
lower, upper = map(int, idx_or_slc.split(":"))
return np.arange(lower, upper + 1)
else:
return np.array([int(idx_or_slc)])
def compress_pickle(fname_out, fpath_pickle):
data = load_pickle(fpath_pickle)
with bz2.BZ2File(f"{fname_out}.pbz2", "wb") as f:
pickle.dump(data, f)
def decompress_pickle(fname):
data = bz2.BZ2File(fname, "rb")
return pickle.load(data)
def get_evidences(base_dir, relative_to_spot_only=False):
fit_R0 = "fitR0" if "fit_R0" in base_dir else "NofitR0"
species = ["Na", "K", "TiO", "Na_K", "Na_TiO", "K_TiO", "Na_K_TiO"]
model_names_dict = {
"clear": f"NoHet_FitP0_NoClouds_NoHaze_{fit_R0}",
"clear+cloud": f"NoHet_FitP0_Clouds_NoHaze_{fit_R0}",
"clear+haze": f"NoHet_FitP0_NoClouds_Haze_{fit_R0}",
"clear+cloud+haze": f"NoHet_FitP0_Clouds_Haze_{fit_R0}",
"clear+spot": f"Het_FitP0_NoClouds_NoHaze_{fit_R0}",
"clear+spot+cloud": f"Het_FitP0_Clouds_NoHaze_{fit_R0}",
"clear+spot+haze": f"Het_FitP0_NoClouds_Haze_{fit_R0}",
"clear+spot+cloud+haze": f"Het_FitP0_Clouds_Haze_{fit_R0}",
}
data_dict = {
sp: {
model_name: load_pickle(f"{base_dir}/HATP23_E1_{model_id}_{sp}/retrieval.pkl")
for (model_name, model_id) in model_names_dict.items()
}
for sp in species
}
lnZ = {}
lnZ_err = {}
for species_name, species_data in data_dict.items():
lnZ[species_name] = {}
lnZ_err[species_name] = {}
for model_name, model_data in species_data.items():
lnZ[species_name][model_name] = model_data["lnZ"]
lnZ_err[species_name][model_name] = model_data["lnZerr"]
df_lnZ = pd.DataFrame(lnZ)
df_lnZ_err = | pd.DataFrame(lnZ_err) | pandas.DataFrame |
from __future__ import print_function, division
import os, mimetypes
import sys
import torch
import pandas as pd
from collections import defaultdict
import PIL
import random
from tqdm.autonotebook import tqdm
from tqdm import tnrange
import numpy as np
import matplotlib.pyplot as plt
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms, utils, models
from pathlib import Path
from random import shuffle
from scipy import ndimage
#from torchsummary import summary
import torch.nn as nn
import time
import copy
import torch.optim as optim
from torch.optim import lr_scheduler
imagenet_stats = ([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
if sys.platform == 'linux':
path = Path('/home/ec2-user/SageMaker/data')
else:
path = Path('C:/Users/francesco.pochetti/Downloads/imagenette')
#################################################
# UNIT TESTS
#################################################
def test_data(dl, ds, bs, size, p=0):
assert dl['train'].dataset == ds['train']
assert dl['valid'].dataset == ds['valid']
assert abs(len(ds['train'])/bs - len(dl['train'])) < 2
assert abs(len(ds['valid'])/bs - len(dl['valid'])) < 2
i,c,s = next(iter(dl['train']))
assert i.shape[0] == bs
assert i.shape[1] == 3
assert i.shape[2] == i.shape[3] == (size+p*2)
i,c,s = next(iter(dl['valid']))
assert i.shape[0] == bs
assert i.shape[1] == 3
assert i.shape[2] == i.shape[3] == (size+p*2)
def test_deprocess(ds_item, size, p):
denorm = DeProcess(imagenet_stats, size, p)
d = denorm(ds_item)
print('shape of re-center-cropped image:', d[0].shape)
return PIL.Image.fromarray(d[random.choice([0,1,2])])
def test_hooks(model, dl, bs):
fst = FastStyleTransfer(dl, *get_model_opt(model))
assert fst.hooks_initialized == True
d = random.choice(['train', 'valid'])
i, c, s = next(iter(fst.dl[d]))
i = i.to(fst.device)
c = c.to(fst.device)
s = s.to(fst.device)
assert torch.allclose(i, c) == True
assert torch.allclose(i, s) == False
fst.vgg(i)
input_act = [o.features.clone().detach_().to(fst.device) for o in fst.act]
fst.vgg(c)
content_act = [o.features.clone().detach_().to(fst.device) for o in fst.act]
fst.vgg(s)
style_act = [o.features.clone().detach_().to(fst.device) for o in fst.act]
assert len(input_act) == len(content_act) == len(style_act) == 5
assert torch.allclose(input_act[1], content_act[1])
assert torch.allclose(input_act[3], content_act[3])
assert style_act[0].shape[0] == style_act[2].shape[0] == style_act[4].shape[0] == bs
fst.close_hooks()
assert fst.hooks_initialized == False
def test_losses(model, dl):
fst = FastStyleTransfer(dl, *get_model_opt(model))
assert fst.hooks_initialized == True
d = random.choice(['train', 'valid'])
i, c, s = next(iter(fst.dl[d]))
i = i.to(fst.device)
c = c.to(fst.device)
s = s.to(fst.device)
#assert torch.allclose(i, c)
assert torch.allclose(i, s) == False
fst.vgg(i)
input_act = [o.features.clone().detach_().to(fst.device) for o in fst.act]
print('shape of input_act: ', [o.shape for o in input_act])
fst.vgg(c)
content_act = [o.features.clone().detach_().to(fst.device) for o in fst.act]
print('shape of content_act: ', [o.shape for o in content_act])
fst.vgg(s)
style_act = [o.features.clone().detach_().to(fst.device) for o in fst.act]
print('shape of style_act: ', [o.shape for o in style_act])
co_loss = fst.content_mse(input_act[0], content_act[0])
assert isinstance(co_loss, torch.Tensor)
st_loss = fst.gram_mse_loss(input_act[4], style_act[4])
assert isinstance(st_loss, torch.Tensor)
fst.input_act = input_act
fst.content_act = content_act
fst.style_act = style_act
fst.outputs = fst.model(i)
loss, content, style, tv = fst.combined_loss()
assert isinstance(st_loss, torch.Tensor)
fst.close_hooks()
assert fst.hooks_initialized == False
#################################################
# UTILS
#################################################
def _get_files(p, fs, extensions=None):
p = Path(p)
res = [p/f for f in fs if not f.startswith('.')
and ((not extensions) or f'.{f.split(".")[-1].lower()}' in extensions)]
return res
def get_files(path, extensions=None, recurse=False, include=None):
path = Path(path)
extensions = {e.lower() for e in extensions}
if recurse:
res = []
for i,(p,d,f) in enumerate(os.walk(path)): # returns (dirpath, dirnames, filenames)
if include is not None and i==0: d[:] = [o for o in d if o in include]
else: d[:] = [o for o in d if not o.startswith('.')]
res += _get_files(p, f, extensions)
return res
else:
f = [o.name for o in os.scandir(path) if o.is_file()]
return _get_files(path, f, extensions)
class SaveFeatures():
features=None
def __init__(self, m):
self.hook = m.register_forward_hook(self.hook_fn)
def hook_fn(self, module, input, output):
self.features = output
def close(self):
self.hook.remove()
def gram(input):
b,c,h,w = input.size()
x = input.view(b*c, -1)
return torch.mm(x, x.t())/input.numel() #*1e6
def build_style_dataframe(path, style):
content_path = path/'coco-images'/'test2015'
image_extensions = set(k for k,v in mimetypes.types_map.items() if v.startswith('image/'))
files = get_files(content_path, image_extensions, recurse=True)
assert len(files) == 81434
style_path = path/'styles'/style
contents = files
styles = [style_path] * len(contents)
assert len(styles) == 81434
te_ = int(len(styles) * 0.001)
tr_ = len(styles) - te_
assert(len(styles) == (te_+tr_))
print(f'Files in validation set: {te_}; Files in training set {tr_}')
splits = ['valid'] * te_ + ['train'] * tr_
shuffle(splits)
df = pd.DataFrame({'content_': contents, 'style_': styles, 'split_': splits})
assert len(df) == 81434
df.to_csv(path/f'{style[:-4]}.csv', index=False)
def calc_loss_ratios(model, path, tmfs, size, bs, vgg, tv_weight=None):
c2s = []
c2t = []
for _ in range(3):
train_ds = StyleTransferDataset(path, train_test='train', transform=tmfs, sample=0.01)
valid_ds = StyleTransferDataset(path, train_test='valid', transform=tmfs, sample=0.5)
dataloaders = {'train': DataLoader(train_ds, batch_size=bs, shuffle=True),
'valid': DataLoader(valid_ds, batch_size=bs)}
fst = FastStyleTransfer(dataloaders, *get_model_opt(model), size=size,
c2s=1, c2t=1, tv_weight=tv_weight, content_weight=1, style_weight=1, vgg=vgg)
fst.train(verbose=False)
d = fst.get_metrics('train')
c2s.append(d['content'].mean()/d['style'].mean())
if tv_weight is not None: c2t.append(d['content'].mean()/d['tv'].mean())
if tv_weight is not None: return np.array(c2s).mean(), np.array(c2t).mean()
return np.array(c2s).mean(), 1.0
def get_model_opt(model, sched=None):
unet = model
optimizer = optim.Adam(filter(lambda p: p.requires_grad, unet.parameters()), lr=1e-3)
if sched: sched = lr_scheduler.CosineAnnealingLR(optimizer, 50)
return [unet, optimizer, sched]
#################################################
# DATASETS & DATALOADERS
#################################################
class StyleTransferDataset(Dataset):
"""Style Transfer dataset."""
def __init__(self, csv_file, train_test, transform=None, sample=None):
data = pd.read_csv(csv_file)
if sample: data = data.sample(int(len(data)*sample))
self.train_test = train_test
data.loc[:,['content_', 'style_']] = data.loc[:,['content_', 'style_']].applymap(lambda x: Path(x))
self.data = data.loc[data.split_==train_test,:].reset_index(drop=True)
self.transform = transform
def __len__(self):
return len(self.data)
def __repr__(self):
item = self.__getitem__(0)
_1 = f'{self.train_test.capitalize()} dataset: {len(self.data)} items\n'
_2 = f'Item: {type(item)} of {len(item)} {type(item[0])}\n'
_3 = f"Item example: 'input':{ item[0].shape},'content':{item[1].shape},'style':{item[2].shape}"
return _1+_2+_3
def __getitem__(self, idx):
if type(idx) == torch.Tensor:
idx = idx.item()
content_img = self.data.content_.iloc[idx]
content_img = PIL.Image.open(content_img)
style_img = self.data.style_.iloc[idx]
style_img = PIL.Image.open(style_img)
#opt_img = np.random.uniform(0, 1, size=(content_img.size + (3,))).astype(np.float32)
#opt_img = ndimage.filters.median_filter(opt_img, [8,8,1])
#item = {'input': PIL.Image.fromarray(np.uint8(opt_img*255)),
# 'content': content_img,
# 'style': style_img}
item = {'content': content_img,
'style': style_img}
if self.transform: item = compose(item, self.transform)
return item['content'], item['content'], item['style']
def compose(x, funcs, *args, order_key='_order', **kwargs):
key = lambda o: getattr(o, order_key, 0)
for f in sorted(list(funcs), key=key): x = f(x, **kwargs)
return x
class Transform(): _order=0
class MakeRGB(Transform):
def __call__(self, item): return {k: v.convert('RGB') for k, v in item.items()}
class ResizeFixed(Transform):
_order=10
def __init__(self, size):
if isinstance(size,int): size=(size,size)
self.size = size
def __call__(self, item): return {k: v.resize(self.size, PIL.Image.BILINEAR) for k, v in item.items()}
class ToByteTensor(Transform):
_order=20
def to_byte_tensor(self, item):
res = torch.ByteTensor(torch.ByteStorage.from_buffer(item.tobytes()))
w,h = item.size
return res.view(h,w,-1).permute(2,0,1)
def __call__(self, item): return {k: self.to_byte_tensor(v) for k, v in item.items()}
class ToFloatTensor(Transform):
_order=30
def to_float_tensor(self, item): return item.float().div_(255.)
def __call__(self, item): return {k: self.to_float_tensor(v) for k, v in item.items()}
class Normalize(Transform):
_order=40
def __init__(self, stats, p=None):
self.mean = torch.as_tensor(stats[0] , dtype=torch.float32)
self.std = torch.as_tensor(stats[1] , dtype=torch.float32)
self.p = p
def normalize(self, item): return item.sub_(self.mean[:, None, None]).div_(self.std[:, None, None])
def pad(self, item): return nn.functional.pad(item[None], pad=(self.p,self.p,self.p,self.p), mode='replicate').squeeze(0)
def __call__(self, item):
if self.p is not None: return {k: self.pad(self.normalize(v)) for k, v in item.items()}
else: return {k: self.normalize(v) for k, v in item.items()}
class PilRandomDihedral(Transform):
_order=15
def __init__(self, p=0.75): self.p=p*7/8 #Little hack to get the 1/8 identity dihedral transform taken into account.
def __call__(self, item):
if random.random()>self.p: return item
return {k: v.transpose(random.randint(0,6)) for k, v in item.items()}
class DeProcess(Transform):
_order=50
def __init__(self, stats, size=None, p=None):
self.mean = torch.as_tensor(stats[0] , dtype=torch.float32)
self.std = torch.as_tensor(stats[1] , dtype=torch.float32)
self.size = size
self.p = p
def de_normalize(self, item): return ((item*self.std[:, None, None]+self.mean[:, None, None])*255.).clamp(0, 255)
def rearrange_axis(self, item): return np.moveaxis(item, 0, -1)
def to_np(self, item): return np.uint8(np.array(item))
def crop(self, item): return item[self.p:self.p+self.size,self.p:self.p+self.size,:]
def de_process(self, item):
if self.size is not None and self.p is not None:
return self.crop(self.rearrange_axis(self.to_np(self.de_normalize(item))))
else:
return self.rearrange_axis(self.to_np(self.de_normalize(item)))
def __call__(self, item):
if isinstance(item, torch.Tensor): return self.de_process(item)
if isinstance(item, tuple): return tuple([self.de_process(v) for v in item])
if isinstance(item, dict): return {k: self.de_process(v) for k, v in item.items()}
#################################################
# RESNET UNET
#################################################
def convrelu(in_channels, out_channels, kernel, padding):
return nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel, padding=padding),
nn.ReLU(inplace=True),
)
class ResNetUNet(nn.Module):
def __init__(self, n_class=3):
super().__init__()
self.base_model = models.resnet18(pretrained=True)
self.base_layers = list(self.base_model.children())
self.layer0 = nn.Sequential(*self.base_layers[:3]) # size=(N, 64, x.H/2, x.W/2)
self.layer0_1x1 = convrelu(64, 64, 1, 0)
self.layer1 = nn.Sequential(*self.base_layers[3:5]) # size=(N, 64, x.H/4, x.W/4)
self.layer1_1x1 = convrelu(64, 64, 1, 0)
self.layer2 = self.base_layers[5] # size=(N, 128, x.H/8, x.W/8)
self.layer2_1x1 = convrelu(128, 128, 1, 0)
self.layer3 = self.base_layers[6] # size=(N, 256, x.H/16, x.W/16)
self.layer3_1x1 = convrelu(256, 256, 1, 0)
self.layer4 = self.base_layers[7] # size=(N, 512, x.H/32, x.W/32)
self.layer4_1x1 = convrelu(512, 512, 1, 0)
self.upsample = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)
self.conv_up3 = convrelu(256 + 512, 512, 3, 1)
self.conv_up2 = convrelu(128 + 512, 256, 3, 1)
self.conv_up1 = convrelu(64 + 256, 256, 3, 1)
self.conv_up0 = convrelu(64 + 256, 128, 3, 1)
self.conv_original_size0 = convrelu(3, 64, 3, 1)
self.conv_original_size1 = convrelu(64, 64, 3, 1)
self.conv_original_size2 = convrelu(64 + 128, 64, 3, 1)
self.conv_last = nn.Conv2d(64, n_class, 1)
def forward(self, input):
x_original = self.conv_original_size0(input)
x_original = self.conv_original_size1(x_original)
layer0 = self.layer0(input)
layer1 = self.layer1(layer0)
layer2 = self.layer2(layer1)
layer3 = self.layer3(layer2)
layer4 = self.layer4(layer3)
layer4 = self.layer4_1x1(layer4)
x = self.upsample(layer4)
layer3 = self.layer3_1x1(layer3)
x = torch.cat([x, layer3], dim=1)
x = self.conv_up3(x)
x = self.upsample(x)
layer2 = self.layer2_1x1(layer2)
x = torch.cat([x, layer2], dim=1)
x = self.conv_up2(x)
x = self.upsample(x)
layer1 = self.layer1_1x1(layer1)
x = torch.cat([x, layer1], dim=1)
x = self.conv_up1(x)
x = self.upsample(x)
layer0 = self.layer0_1x1(layer0)
x = torch.cat([x, layer0], dim=1)
x = self.conv_up0(x)
x = self.upsample(x)
x = torch.cat([x, x_original], dim=1)
x = self.conv_original_size2(x)
out = self.conv_last(x)
return out
#################################################
# TRANSFORMER NET
#################################################
class TransformerNet(torch.nn.Module):
def __init__(self):
super().__init__()
# Initial convolution layers
self.conv1 = ConvLayer(3, 32, kernel_size=9, stride=1)
self.in1 = torch.nn.InstanceNorm2d(32, affine=True)
self.conv2 = ConvLayer(32, 64, kernel_size=3, stride=2)
self.in2 = torch.nn.InstanceNorm2d(64, affine=True)
self.conv3 = ConvLayer(64, 128, kernel_size=3, stride=2)
self.in3 = torch.nn.InstanceNorm2d(128, affine=True)
# Residual layers
self.res1 = ResidualBlock(128)
self.res2 = ResidualBlock(128)
self.res3 = ResidualBlock(128)
self.res4 = ResidualBlock(128)
self.res5 = ResidualBlock(128)
# Upsampling Layers
self.deconv1 = UpsampleConvLayer(128, 64, kernel_size=3, stride=1, upsample=2)
self.in4 = torch.nn.InstanceNorm2d(64, affine=True)
self.deconv2 = UpsampleConvLayer(64, 32, kernel_size=3, stride=1, upsample=2)
self.in5 = torch.nn.InstanceNorm2d(32, affine=True)
self.deconv3 = ConvLayer(32, 3, kernel_size=9, stride=1)
# Non-linearities
self.relu = torch.nn.ReLU()
def forward(self, X):
y = self.relu(self.in1(self.conv1(X)))
y = self.relu(self.in2(self.conv2(y)))
y = self.relu(self.in3(self.conv3(y)))
y = self.res1(y)
y = self.res2(y)
y = self.res3(y)
y = self.res4(y)
y = self.res5(y)
y = self.relu(self.in4(self.deconv1(y)))
y = self.relu(self.in5(self.deconv2(y)))
y = self.deconv3(y)
return y
class ConvLayer(torch.nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride):
super(ConvLayer, self).__init__()
reflection_padding = kernel_size // 2
self.reflection_pad = torch.nn.ReflectionPad2d(reflection_padding)
self.conv2d = torch.nn.Conv2d(in_channels, out_channels, kernel_size, stride)
def forward(self, x):
out = self.reflection_pad(x)
out = self.conv2d(out)
return out
class ResidualBlock(torch.nn.Module):
"""ResidualBlock
introduced in: https://arxiv.org/abs/1512.03385
recommended architecture: http://torch.ch/blog/2016/02/04/resnets.html
"""
def __init__(self, channels):
super(ResidualBlock, self).__init__()
self.conv1 = ConvLayer(channels, channels, kernel_size=3, stride=1)
self.in1 = torch.nn.InstanceNorm2d(channels, affine=True)
self.conv2 = ConvLayer(channels, channels, kernel_size=3, stride=1)
self.in2 = torch.nn.InstanceNorm2d(channels, affine=True)
self.relu = torch.nn.ReLU()
def forward(self, x):
residual = x
out = self.relu(self.in1(self.conv1(x)))
out = self.in2(self.conv2(out))
out = out + residual
return out
class UpsampleConvLayer(torch.nn.Module):
"""UpsampleConvLayer
Upsamples the input and then does a convolution. This method gives better results
compared to ConvTranspose2d.
ref: http://distill.pub/2016/deconv-checkerboard/
"""
def __init__(self, in_channels, out_channels, kernel_size, stride, upsample=None):
super(UpsampleConvLayer, self).__init__()
self.upsample = upsample
reflection_padding = kernel_size // 2
self.reflection_pad = torch.nn.ReflectionPad2d(reflection_padding)
self.conv2d = torch.nn.Conv2d(in_channels, out_channels, kernel_size, stride)
def forward(self, x):
x_in = x
if self.upsample:
x_in = torch.nn.functional.interpolate(x_in, mode='nearest', scale_factor=self.upsample)
out = self.reflection_pad(x_in)
out = self.conv2d(out)
return out
#################################################
# FAST STYLE TRANSFER CLASS
#################################################
class FastStyleTransfer():
def __init__(self, dl, model, opt, sched=None, c2s=1.0, c2t=1.0,
style_weight=1.0, content_weight=1.0,
tv_weight=1.0, size=256, p=30, vgg=16):
self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
self.mseloss = nn.MSELoss()
self.init_vgg(vgg)
self.convs = [i-2 for i,o in enumerate(list(self.vgg.features)) if isinstance(o,nn.MaxPool2d)]
self.model = model.to(self.device)
self.original_model = model.to(self.device)
self.opt = opt
self.sched = sched
self.content_weight = content_weight
self.style_weight = style_weight
self.tv_weight = tv_weight
self.dl = dl
self.initialize_hooks()
self.style_act = None
self.training_done = False
self.size = size
self.p = p
self.c2s = c2s
self.c2t = c2t
def init_vgg(self, vgg):
if vgg==16: self.vgg = models.vgg16(pretrained=True).to(self.device)
if vgg==19: self.vgg = models.vgg19(pretrained=True).to(self.device)
self.vgg.eval()
def reinitialize_unet(self): self.model = copy.deepcopy(self.original_model)
def initialize_hooks(self):
self.act = [SaveFeatures(list(self.vgg.features)[idx]) for idx in self.convs]
self.hooks_initialized = True
def close_hooks(self):
for hook in self.act: hook.close()
self.hooks_initialized = False
def vgg_conv_layers(self): return np.array(list(self.vgg.features))[self.convs]
def content_mse(self, input, target): return self.mseloss(input, target) #*1e3
def gram_mse_loss(self, input, target): return self.mseloss(gram(input), gram(target))
def tv_loss(self):
l = (torch.sum(torch.abs(self.outputs[:, :, :, :-1] - self.outputs[:, :, :, 1:])) +
torch.sum(torch.abs(self.outputs[:, :, :-1, :] - self.outputs[:, :, 1:, :])))
return l
def combined_loss(self):
style_losses = [self.gram_mse_loss(o, s) for o,s in zip(self.input_act, self.style_act)]
#content_losses = [content_mse(o, s) for o,s in zip(opt_cat, target_cont)]
content_losses = [self.content_mse(self.input_act[2], self.content_act[2])]
style = sum(style_losses) * self.style_weight * self.c2s
content = sum(content_losses) * self.content_weight
loss = content + style
if self.tv_weight is None:
tv = None
else:
tv = self.tv_loss() * self.tv_weight * self.c2t
loss += tv
return loss, content, style, tv
def store_metrics(self, phase, epoch, i):
self.metrics[phase]['epoch'] += [epoch]
self.metrics[phase]['batch'] += [i]
self.metrics[phase]['batch_size'] += [self.inputs.size(0)]
self.metrics[phase]['total_loss'] += [self.loss.cpu().detach().numpy()]
self.metrics[phase]['content_loss'] += [self.content_loss.cpu().detach().numpy()]
self.metrics[phase]['style_loss'] += [self.style_loss.cpu().detach().numpy()]
self.metrics[phase]['tv_loss'] += [0 if self.tv_weight is None else self.tv.cpu().detach().numpy()]
def get_epoch_loss(self, phase):
d = pd.DataFrame(self.metrics[phase])
d = d.groupby('epoch')['total_loss','batch_size'].apply(lambda x : x.sum()). \
reset_index().sort_values(by='epoch').tail(1)
d = d.total_loss/d.batch_size
return np.array(d)[0]
def get_metrics(self, phase):
df = | pd.DataFrame(self.metrics[phase]) | pandas.DataFrame |
import pandas as pd
import openpyxl
from automate_insurance_pricing.standard_functions import *
def export_glm_coefs_to_excel(features_coefs, features, glm_file_path):
""" Exports to excel the glm rating factors of the features (either a list or a string)
Arguments --> The features coefficients, the features, and the file path used for the export
"""
new_features = [features] if isinstance(features, str) == True else features
for feature in new_features:
feature_name = remove_words(feature, feature=('feature', ''), label=('label_enc', ''), bins=('bins', ''), scaled=('scaled', ''), underscore=('_', ' '), double_points=(':', 'x'), special_character1=(']', ''))[:31]
data = {feature_name + ' ' + 'value': [i[0] for i in features_coefs[feature]], 'coef_value': [i[1] for i in features_coefs[feature]]}
df_rating_factor = pd.DataFrame(data)
try:
with pd.ExcelWriter(glm_file_path, engine="openpyxl", mode='a') as writer:
df_rating_factor.to_excel(writer, sheet_name=feature_name)
except:
with | pd.ExcelWriter(glm_file_path, engine="openpyxl", mode='w') | pandas.ExcelWriter |
#! /usr/bin/env python3
import re
import pandas as pd
from datetime import date
import category
import category_name
def is_row_in_category(row, categories):
"""Determines if row['place'] is in the given category
Args:
row (pandas.core.series.Series): Single row [date,place,amount] of a dataframe
category (list): Category list
Returns:
bool: True if row in category, False otherwise
"""
for place in categories:
if re.search(place, row['place'], re.IGNORECASE):
return True
return False
def organise_data_by_category(my_dataframe):
"""Parse all spending and populate smaller dataframes by categories
Args:
my_dataframe (pandas.core.frame.DataFrame): my_dataframe Unparsed dataframe with all uncategorized expenses
Returns:
dict: A dictionary of dataframe. [key] = category name; [value] = dataframe with the all categorie's related expenses
"""
print("Organise spendings into categories")
# it is 3 times faster to create a dataframe from a full dictionary rather than appending rows after rows to an already existing dataframe
dic_groc, dic_trans, dic_rest, dic_coffee, dic_bar, dic_misc, dic_bills = {}, {}, {}, {}, {}, {}, {}
g, t, r, c, b, m, f = [0] * 7 # indexes
# Let's go over each rows of the unsorted dataframe and populate the category's dictionary.
for _, row in my_dataframe.iterrows():
if is_row_in_category(row, category.Groceries):
dic_groc[g] = row
g = g + 1
continue
if is_row_in_category(row, category.Transport):
dic_trans[t] = row
t = t + 1
continue
if is_row_in_category(row, category.Restaurant):
dic_rest[r] = row
r = r + 1
continue
if is_row_in_category(row, category.Coffee):
dic_coffee[c] = row
c = c + 1
continue
if is_row_in_category(row, category.Bar):
dic_bar[b] = row
b = b + 1
continue
if is_row_in_category(row, category.Bills):
dic_bills[f] = row
f = f + 1
continue
# If none of the above then let's put it in misc spending
dic_misc[m] = row
m = m + 1
df_groc = pd.DataFrame.from_dict(dic_groc, orient='index', columns=['date', 'place', 'amount'])
df_trans = pd.DataFrame.from_dict(dic_trans, orient='index', columns=['date', 'place', 'amount'])
df_rest = pd.DataFrame.from_dict(dic_rest, orient='index', columns=['date', 'place', 'amount'])
df_coffee = pd.DataFrame.from_dict(dic_coffee, orient='index', columns=['date', 'place', 'amount'])
df_bar = pd.DataFrame.from_dict(dic_bar, orient='index', columns=['date', 'place', 'amount'])
df_misc = pd.DataFrame.from_dict(dic_misc, orient='index', columns=['date', 'place', 'amount'])
df_bills = pd.DataFrame.from_dict(dic_bills, orient='index', columns=['date', 'place', 'amount'])
all_df = {
category_name.GROCERIES: df_groc,
category_name.TRANSPORT: df_trans,
category_name.RESTAURANT: df_rest,
category_name.COFFEE: df_coffee,
category_name.BAR: df_bar,
category_name.MISC: df_misc,
category_name.BILLS: df_bills
}
return all_df
def organise_transport_by_sub_cat(_dfTransport):
"""Parse all spending in transport and populate smaller dataframes by categories
Args:
_dfTransport (pandas.core.frame.DataFrame): Unparsed dataframe with all transport expenses
Returns:
[dict: A dictionary of dataframe. [key] = category name; [value] = dataframe with the all categorie's related expenses
"""
# it is 3 times faster to create a dataframe from a full dictionary rather than appending rows after rows to an already existing dataframe
dic_carshare, dic_rental, dic_cab, dic_translink, dic_misc, dic_car = {}, {}, {}, {}, {}, {}
csh, r, c, t, m, car = [0] * 6 # indexes
# Let's go over each rows of the unsorted dataframe and populate the category's dictionary.
for _, row in _dfTransport.iterrows():
if is_row_in_category(row, category.TransportCarShare):
dic_carshare[csh] = row
csh = csh + 1
continue
if is_row_in_category(row, category.TransportRental):
dic_rental[r] = row
r = r + 1
continue
if is_row_in_category(row, category.TransportCab):
dic_cab[c] = row
c = c + 1
continue
if is_row_in_category(row, category.TransportTranslink):
dic_translink[t] = row
t = t + 1
continue
if is_row_in_category(row, category.TransportMisc):
dic_misc[m] = row
m = m + 1
continue
if is_row_in_category(row, category.TransportCar):
dic_car[car] = row
car = car + 1
continue
# If none of the above then let's put it in misc spending
dic_misc[m] = row
m = m + 1
df_carshare = pd.DataFrame.from_dict(dic_carshare, orient='index', columns=['date', 'place', 'amount'])
df_rental = pd.DataFrame.from_dict(dic_rental, orient='index', columns=['date', 'place', 'amount'])
df_cab = pd.DataFrame.from_dict(dic_cab, orient='index', columns=['date', 'place', 'amount'])
df_translink = pd.DataFrame.from_dict(dic_translink, orient='index', columns=['date', 'place', 'amount'])
df_misc = | pd.DataFrame.from_dict(dic_misc, orient='index', columns=['date', 'place', 'amount']) | pandas.DataFrame.from_dict |
# -*- coding: utf-8 -*-
"""
:Author: <NAME>
<NAME>
:Date: 2018. 7. 18
"""
import os
import platform
import sys
from copy import deepcopy as dc
from datetime import datetime
from warnings import warn
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import pandas.core.common as com
import statsmodels.api as sm
from matplotlib import font_manager, rc
from pandas import DataFrame
from pandas import Series
from pandas.core.index import MultiIndex
from pandas.core.indexing import convert_to_index_sliceable
from performanceanalytics.charts.performance_summary import create_performance_summary
from .columns import *
from .outcomes import *
from ..io.downloader import download_latest_data
from ..util.checker import not_empty
import dropbox
import io
# Hangul font setting
# noinspection PyProtectedMember
font_manager._rebuild()
if platform.system() == 'Windows':
font_name = font_manager.FontProperties(fname='c:/Windows/Fonts/malgun.ttf').get_name()
elif platform.system() == 'Darwin': # OS X
font_name = font_manager.FontProperties(fname='/Library/Fonts/AppleGothic.ttf').get_name()
else: # Linux
fname = '/usr/share/fonts/truetype/nanum/NanumGothicOTF.ttf'
if not os.path.isfile(fname):
raise ResourceWarning("Please install NanumGothicOTF.ttf for plotting Hangul.")
font_name = font_manager.FontProperties(fname=fname).get_name()
rc('font', family=font_name)
# for fix broken Minus sign
matplotlib.rcParams['axes.unicode_minus'] = False
PERCENTAGE = 'percentage'
WEIGHT = 'weight'
WEIGHT_SUM = 'weight_sum'
START_DATE = datetime(year=2001, month=5, day=31)
QUANTILE = 'quantile'
RANK = 'rank'
RANK_CORRELATION = 'Rank correlation'
class Portfolio(DataFrame):
"""
"""
_benchmark = KOSPI
benchmarks = None
factors = None
@property
def _constructor(self):
return Portfolio
@not_empty
def __init__(self, data=None, index=None, columns=None, dtype=None, copy: bool = False,
start_date: datetime = START_DATE, end_date: datetime = None,
include_holding: bool = False, include_finance: bool = False,
include_managed: bool = False, include_suspended: bool = False):
if not end_date:
end_date = datetime.today()
if data is None:
print('Data is being downloaded from KSIF DROPBOX DATA STORAGE')
dbx = dropbox.Dropbox(
oauth2_access_token='<KEY>', timeout=None)
metadata, f = dbx.files_download('/preprocessed/final_msf.csv')
# metadata, f = dbx.files_download('/preprocessed/merged.csv')
binary_file = f.content
data = pd.read_csv(io.BytesIO(binary_file))
#
_, self.benchmarks, self.factors = download_latest_data(download_company_data=False)
#
# if not include_holding:
# data = data.loc[~data[HOLDING], :]
#
# if not include_finance:
# data = data.loc[data[FN_GUIDE_SECTOR] != '금융', :]
#
# if not include_managed:
# data = data.loc[~data[IS_MANAGED], :]
#
# if not include_suspended:
# data = data.loc[~data[IS_SUSPENDED], :]
#
# data = data.loc[(start_date <= data[DATE]) & (data[DATE] <= end_date), :]
else:
_, self.benchmarks, self.factors = download_latest_data(download_company_data=False)
self.benchmarks = self.benchmarks.loc[
(start_date <= self.benchmarks[DATE]) & (self.benchmarks[DATE] <= end_date), :]
self.factors = self.factors.loc[(start_date <= self.factors.index) & (self.factors.index <= end_date), :]
super(Portfolio, self).__init__(data=data) #, index=index, columns=columns, dtype=dtype, copy=copy)
# self.data = data
def __getitem__(self, key):
from pandas.core.dtypes.common import is_list_like, is_integer, is_iterator
key = com.apply_if_callable(key, self)
# shortcut if the key is in columns
try:
if self.columns.is_unique and key in self.columns:
if self.columns.nlevels > 1:
return self._getitem_multilevel(key)
return self._get_item_cache(key)
except (TypeError, ValueError):
# The TypeError correctly catches non hashable "key" (e.g. list)
# The ValueError can be removed once GH #21729 is fixed
pass
# Do we have a slicer (on rows)?
indexer = convert_to_index_sliceable(self, key)
if indexer is not None:
return self._slice(indexer, axis=0)
# Do we have a (boolean) DataFrame?
if isinstance(key, DataFrame):
return self._getitem_frame(key)
# Do we have a (boolean) 1d indexer?
if com.is_bool_indexer(key):
return self._getitem_bool_array(key)
# We are left with two options: a single key, and a collection of keys,
# We interpret tuples as collections only for non-MultiIndex
is_single_key = isinstance(key, tuple) or not | is_list_like(key) | pandas.core.dtypes.common.is_list_like |
import pandas as pd
import numpy as np
import math
import seaborn as sns
import matplotlib.pyplot as plt
sns.set_style("whitegrid")
def plot_distributions(
data: pd.DataFrame = None, cols: list = [], caps: dict = None
) -> None:
"""
Function to plot the distributions and their potential caps
:param df: pandas dataframe with col to plotted
:param cols: list of column names to plot
:param caps: Dictionary containing the caps
:return:
"""
if len(cols) == 0:
cols = data.columns
# filter out object cols
cols = [col for col in cols if data[col].dtype != "O"]
for col in cols:
_ = plt.figure(figsize=(6, 6))
ax = sns.kdeplot(data[col], shade=True, legend=False)
if caps:
ind = caps["feature"].index(col)
_ = plt.axvline(
caps["mean"][ind], linestyle="--", color="black", label="mean"
)
if "plus_2_SD" in caps.keys():
_ = plt.axvline(caps["plus_2_SD"][ind], color="red", label="plus 2 SD")
if "plus_3_SD" in caps.keys():
_ = plt.axvline(caps["plus_3_SD"][ind], color="red", label="plus 3 SD")
if "minus_2_SD" in caps.keys():
_ = plt.axvline(
caps["minus_2_SD"][ind], color="red", label="minus 2 SD"
)
if "minus_3_SD" in caps.keys():
_ = plt.axvline(
caps["minus_3_SD"][ind], color="red", label="minus 3 SD"
)
if "75th_Percentile" in caps.keys():
_ = plt.axvline(
caps["75th_Percentile"][ind], color="red", label="75th percentile"
)
if "90th_Percentile" in caps.keys():
_ = plt.axvline(
caps["90th_Percentile"][ind], color="red", label="90th percentile"
)
_ = plt.title(col + " Distribution")
plt.show()
return None
def plot_missing_values(df: pd.DataFrame = None, cols: list = []) -> None:
if len(cols) == 0:
cols = df.columns
_ = plt.figure(figsize=(12, 12))
cmap = sns.cubehelix_palette(8, start=0, rot=0, dark=0, light=0.95, as_cmap=True)
ax = sns.heatmap(data=pd.isnull(df[cols]), cmap=cmap, cbar=False)
_ = ax.set_title("Missing Data")
return None
def plot_category_histograms(data: pd.DataFrame = None, cols: list = []) -> None:
"""
Function to plot the distributions and their potential caps
:param data: pandas dataframe with col to plotted
:param cols: list of column names to plot
:return:
"""
if len(cols) == 0:
cols = data.columns
# filter out object cols
cols = [col for col in cols if data[col].dtype == "O"]
tmp = data.copy(deep=True)
tmp["count"] = [i for i in range(len(data))]
for col in cols:
_ = plt.figure(figsize=(6, 6))
ax = tmp.groupby(col)["count"].count().plot(kind="bar")
_ = ax.set_title(col + " Counts")
return None
def plot_correlations(data=None, cols: list = [], plot_title="Feature Correlations"):
if len(cols) == 0:
cols = data.columns
# filter out object cols
cols = [col for col in cols if data[col].dtype != "O"]
corr = data[cols].corr()
# Set up the matplotlib figure
f, ax = plt.subplots(figsize=(11, 9))
# Generate a custom diverging colormap
cmap = sns.diverging_palette(240, 10, as_cmap=True)
# Draw the heatmap with the mask and correct aspect ratio
ax = sns.heatmap(
corr,
# mask=mask,
cmap=cmap,
vmin=corr.values.min(),
vmax=corr.values.max(),
center=0,
square=True,
linewidths=0.5,
cbar_kws={"shrink": 0.5},
)
ax.set_title(plot_title)
return
def plot_outcome_boxes(data=None, outcome: list = [], fts: list = []) -> None:
if len(fts) > 10:
fts = np.array_split(fts, math.ceil(len(fts) / 10))
else:
fts = [fts]
for ft in fts:
for out in outcome:
tmp_fts = outcome + ft
df_long = | pd.melt(data[tmp_fts], id_vars=out, var_name="Feature") | pandas.melt |
import logging
import argparse
import robin_stocks.robinhood as rh
import numpy
import pandas
def historicals(sym: str, interval: str, span: str, bounds: str):
"""Return historical information about a stock."""
return rh.get_stock_historicals(sym,interval=interval,span=span,bounds=bounds)
def watchlist_symbols():
"""Return the symbols in your watchlists."""
my_list_names = set()
symbols = []
watchlistInfo = rh.get_all_watchlists()
for list in watchlistInfo['results']:
listName = list['display_name']
my_list_names.add(listName)
for listName in my_list_names:
list = rh.get_watchlist_by_name(name=listName)
for item in list['results']:
symbol = item['symbol']
symbols.append(symbol)
return symbols
def portfolio_symbols():
"""
Returns: the symbol for each stock in your portfolio as a list of strings
"""
symbols = []
holdings_data = rh.get_open_stock_positions()
for item in holdings_data:
if not item:
continue
instrument_data = rh.get_instrument_by_url(item.get('instrument'))
symbol = instrument_data['symbol']
symbols.append(symbol)
return symbols
def position_creation_date(symbol, holdings_data):
"""Returns the time at which we bought a certain stock in our portfolio
Args:
symbol(str): Symbol of the stock that we are trying to figure out when it was bought
holdings_data(dict): dict returned by rh.open_stock_positions()
Returns:
A string containing the date and time the stock was bought, or "Not found" otherwise
"""
instrument = rh.get_instruments_by_symbols(symbol)
url = instrument[0].get('url')
for dict in holdings_data:
if(dict.get('instrument') == url):
return dict.get('created_at')
# wtf?
return "Not found"
def modified_holdings():
""" Retrieves the same dictionary as rh.build_holdings, but includes data about
when the stock was purchased, which is useful for the read_trade_history() method
in tradingstats.py
Returns:
the same dict from rh.build_holdings, but with an extra key-value pair for each
position you have, which is 'bought_at': (the time the stock was purchased)
"""
holdings = rh.build_holdings()
holdings_data = rh.get_open_stock_positions()
for symbol, _ in holdings.items():
bought_at = position_creation_date(symbol, holdings_data)
bought_at = str( | pandas.to_datetime(bought_at) | pandas.to_datetime |
import pandas as pd
import numpy as np
data = []
f = range(1,75,1)
for n in f:
print(n)
in_file = open('/Users/annethessen/NSF_awards/award_data/' + str(n) + '.txt', 'r')
next(in_file)
for line in in_file:
line.strip('\n')
row = line.split('\t')
#print(row[0:24])
data.append(row[0:24])
arr = np.array(data) #dtype=['U7','U150','U25','U50','M8','M8','U25','U25','U25','U25','U25','M8','f8','U25','U25','U25','U25','U25','U25','U25','U25','U25','U25','f8','U500'])
labels = ['AwardNumber','Title','NSFOrganization','Program(s)','StartDate','LastAmendmentDate','PrincipalInvestigator','State','Organization','AwardInstrument','ProgramManager','EndDate','AwardedAmountToDate','Co-PIName(s)','PIEmailAddress','OrganizationStreet','OrganizationCity','OrganizationState','OrganizationZip','OrganizationPhone','NSFDirectorate','ProgramElementCode(s)','ProgramReferenceCode(s)','ARRAAmount','Abstract']
df = | pd.DataFrame(arr, columns=labels, index=['AwardNumber']) | pandas.DataFrame |
import warnings
import numpy as np
import pandas as pd
from pandas.api.types import (
is_categorical_dtype,
is_datetime64tz_dtype,
is_interval_dtype,
is_period_dtype,
is_scalar,
is_sparse,
union_categoricals,
)
from ..utils import is_arraylike, typename
from ._compat import PANDAS_GT_100
from .core import DataFrame, Index, Scalar, Series, _Frame
from .dispatch import (
categorical_dtype_dispatch,
concat,
concat_dispatch,
get_parallel_type,
group_split_dispatch,
hash_object_dispatch,
is_categorical_dtype_dispatch,
make_meta,
make_meta_obj,
meta_nonempty,
tolist_dispatch,
union_categoricals_dispatch,
)
from .extensions import make_array_nonempty, make_scalar
from .utils import (
_empty_series,
_nonempty_scalar,
_scalar_from_dtype,
is_categorical_dtype,
is_float_na_dtype,
is_integer_na_dtype,
)
##########
# Pandas #
##########
@make_scalar.register(np.dtype)
def _(dtype):
return _scalar_from_dtype(dtype)
@make_scalar.register(pd.Timestamp)
@make_scalar.register(pd.Timedelta)
@make_scalar.register(pd.Period)
@make_scalar.register(pd.Interval)
def _(x):
return x
@make_meta.register((pd.Series, pd.DataFrame))
def make_meta_pandas(x, index=None):
return x.iloc[:0]
@make_meta.register(pd.Index)
def make_meta_index(x, index=None):
return x[0:0]
meta_object_types = (pd.Series, pd.DataFrame, pd.Index, pd.MultiIndex)
try:
import scipy.sparse as sp
meta_object_types += (sp.spmatrix,)
except ImportError:
pass
@make_meta_obj.register(meta_object_types)
def make_meta_object(x, index=None):
"""Create an empty pandas object containing the desired metadata.
Parameters
----------
x : dict, tuple, list, pd.Series, pd.DataFrame, pd.Index, dtype, scalar
To create a DataFrame, provide a `dict` mapping of `{name: dtype}`, or
an iterable of `(name, dtype)` tuples. To create a `Series`, provide a
tuple of `(name, dtype)`. If a pandas object, names, dtypes, and index
should match the desired output. If a dtype or scalar, a scalar of the
same dtype is returned.
index : pd.Index, optional
Any pandas index to use in the metadata. If none provided, a
`RangeIndex` will be used.
Examples
--------
>>> make_meta([('a', 'i8'), ('b', 'O')]) # doctest: +SKIP
Empty DataFrame
Columns: [a, b]
Index: []
>>> make_meta(('a', 'f8')) # doctest: +SKIP
Series([], Name: a, dtype: float64)
>>> make_meta('i8') # doctest: +SKIP
1
"""
if is_arraylike(x) and x.shape:
return x[:0]
if index is not None:
index = make_meta(index)
if isinstance(x, dict):
return pd.DataFrame(
{c: _empty_series(c, d, index=index) for (c, d) in x.items()}, index=index
)
if isinstance(x, tuple) and len(x) == 2:
return _empty_series(x[0], x[1], index=index)
elif isinstance(x, (list, tuple)):
if not all(isinstance(i, tuple) and len(i) == 2 for i in x):
raise ValueError(
"Expected iterable of tuples of (name, dtype), got {0}".format(x)
)
return pd.DataFrame(
{c: _empty_series(c, d, index=index) for (c, d) in x},
columns=[c for c, d in x],
index=index,
)
elif not hasattr(x, "dtype") and x is not None:
# could be a string, a dtype object, or a python type. Skip `None`,
# because it is implictly converted to `dtype('f8')`, which we don't
# want here.
try:
dtype = np.dtype(x)
return _scalar_from_dtype(dtype)
except Exception:
# Continue on to next check
pass
if is_scalar(x):
return _nonempty_scalar(x)
raise TypeError("Don't know how to create metadata from {0}".format(x))
@meta_nonempty.register(object)
def meta_nonempty_object(x):
"""Create a nonempty pandas object from the given metadata.
Returns a pandas DataFrame, Series, or Index that contains two rows
of fake data.
"""
if is_scalar(x):
return _nonempty_scalar(x)
else:
raise TypeError(
"Expected Pandas-like Index, Series, DataFrame, or scalar, "
"got {0}".format(typename(type(x)))
)
@meta_nonempty.register(pd.DataFrame)
def meta_nonempty_dataframe(x):
idx = meta_nonempty(x.index)
dt_s_dict = dict()
data = dict()
for i, c in enumerate(x.columns):
series = x.iloc[:, i]
dt = series.dtype
if dt not in dt_s_dict:
dt_s_dict[dt] = _nonempty_series(x.iloc[:, i], idx=idx)
data[i] = dt_s_dict[dt]
res = pd.DataFrame(data, index=idx, columns=np.arange(len(x.columns)))
res.columns = x.columns
if PANDAS_GT_100:
res.attrs = x.attrs
return res
_numeric_index_types = (pd.Int64Index, pd.Float64Index, pd.UInt64Index)
@meta_nonempty.register(pd.Index)
def _nonempty_index(idx):
typ = type(idx)
if typ is pd.RangeIndex:
return pd.RangeIndex(2, name=idx.name)
elif typ in _numeric_index_types:
return typ([1, 2], name=idx.name)
elif typ is pd.Index:
return pd.Index(["a", "b"], name=idx.name)
elif typ is pd.DatetimeIndex:
start = "1970-01-01"
# Need a non-monotonic decreasing index to avoid issues with
# partial string indexing see https://github.com/dask/dask/issues/2389
# and https://github.com/pandas-dev/pandas/issues/16515
# This doesn't mean `_meta_nonempty` should ever rely on
# `self.monotonic_increasing` or `self.monotonic_decreasing`
try:
return pd.date_range(
start=start, periods=2, freq=idx.freq, tz=idx.tz, name=idx.name
)
except ValueError: # older pandas versions
data = [start, "1970-01-02"] if idx.freq is None else None
return pd.DatetimeIndex(
data, start=start, periods=2, freq=idx.freq, tz=idx.tz, name=idx.name
)
elif typ is pd.PeriodIndex:
return pd.period_range(
start="1970-01-01", periods=2, freq=idx.freq, name=idx.name
)
elif typ is pd.TimedeltaIndex:
start = np.timedelta64(1, "D")
try:
return pd.timedelta_range(
start=start, periods=2, freq=idx.freq, name=idx.name
)
except ValueError: # older pandas versions
start = np.timedelta64(1, "D")
data = [start, start + 1] if idx.freq is None else None
return pd.TimedeltaIndex(
data, start=start, periods=2, freq=idx.freq, name=idx.name
)
elif typ is pd.CategoricalIndex:
if len(idx.categories) == 0:
data = pd.Categorical(_nonempty_index(idx.categories), ordered=idx.ordered)
else:
data = pd.Categorical.from_codes(
[-1, 0], categories=idx.categories, ordered=idx.ordered
)
return pd.CategoricalIndex(data, name=idx.name)
elif typ is pd.MultiIndex:
levels = [_nonempty_index(l) for l in idx.levels]
codes = [[0, 0] for i in idx.levels]
try:
return pd.MultiIndex(levels=levels, codes=codes, names=idx.names)
except TypeError: # older pandas versions
return pd.MultiIndex(levels=levels, labels=codes, names=idx.names)
raise TypeError(
"Don't know how to handle index of type {0}".format(typename(type(idx)))
)
@meta_nonempty.register(pd.Series)
def _nonempty_series(s, idx=None):
# TODO: Use register dtypes with make_array_nonempty
if idx is None:
idx = _nonempty_index(s.index)
dtype = s.dtype
if len(s) > 0:
# use value from meta if provided
data = [s.iloc[0]] * 2
elif is_datetime64tz_dtype(dtype):
entry = pd.Timestamp("1970-01-01", tz=dtype.tz)
data = [entry, entry]
elif is_categorical_dtype(dtype):
if len(s.cat.categories):
data = [s.cat.categories[0]] * 2
cats = s.cat.categories
else:
data = _nonempty_index(s.cat.categories)
cats = s.cat.categories[:0]
data = pd.Categorical(data, categories=cats, ordered=s.cat.ordered)
elif is_integer_na_dtype(dtype):
data = pd.array([1, None], dtype=dtype)
elif is_float_na_dtype(dtype):
data = pd.array([1.0, None], dtype=dtype)
elif is_period_dtype(dtype):
# pandas 0.24.0+ should infer this to be Series[Period[freq]]
freq = dtype.freq
data = [pd.Period("2000", freq), pd.Period("2001", freq)]
elif is_sparse(dtype):
entry = _scalar_from_dtype(dtype.subtype)
if PANDAS_GT_100:
data = pd.array([entry, entry], dtype=dtype)
else:
data = pd.SparseArray([entry, entry], dtype=dtype)
elif is_interval_dtype(dtype):
entry = _scalar_from_dtype(dtype.subtype)
data = pd.array([entry, entry], dtype=dtype)
elif type(dtype) in make_array_nonempty._lookup:
data = make_array_nonempty(dtype)
else:
entry = _scalar_from_dtype(dtype)
data = np.array([entry, entry], dtype=dtype)
out = pd.Series(data, name=s.name, index=idx)
if PANDAS_GT_100:
out.attrs = s.attrs
return out
@union_categoricals_dispatch.register(
(pd.DataFrame, pd.Series, pd.Index, pd.Categorical)
)
def union_categoricals_pandas(to_union, sort_categories=False, ignore_order=False):
return pd.api.types.union_categoricals(
to_union, sort_categories=sort_categories, ignore_order=ignore_order
)
@get_parallel_type.register(pd.Series)
def get_parallel_type_series(_):
return Series
@get_parallel_type.register(pd.DataFrame)
def get_parallel_type_dataframe(_):
return DataFrame
@get_parallel_type.register(pd.Index)
def get_parallel_type_index(_):
return Index
@get_parallel_type.register(_Frame)
def get_parallel_type_frame(o):
return get_parallel_type(o._meta)
@get_parallel_type.register(object)
def get_parallel_type_object(_):
return Scalar
@hash_object_dispatch.register((pd.DataFrame, pd.Series, pd.Index))
def hash_object_pandas(
obj, index=True, encoding="utf8", hash_key=None, categorize=True
):
return pd.util.hash_pandas_object(
obj, index=index, encoding=encoding, hash_key=hash_key, categorize=categorize
)
@group_split_dispatch.register((pd.DataFrame, pd.Series, pd.Index))
def group_split_pandas(df, c, k, ignore_index=False):
indexer, locations = pd._libs.algos.groupsort_indexer(
c.astype(np.int64, copy=False), k
)
df2 = df.take(indexer)
locations = locations.cumsum()
parts = [
df2.iloc[a:b].reset_index(drop=True) if ignore_index else df2.iloc[a:b]
for a, b in zip(locations[:-1], locations[1:])
]
return dict(zip(range(k), parts))
@concat_dispatch.register((pd.DataFrame, pd.Series, pd.Index))
def concat_pandas(
dfs,
axis=0,
join="outer",
uniform=False,
filter_warning=True,
ignore_index=False,
**kwargs
):
ignore_order = kwargs.pop("ignore_order", False)
if axis == 1:
return pd.concat(dfs, axis=axis, join=join, **kwargs)
# Support concatenating indices along axis 0
if isinstance(dfs[0], pd.Index):
if isinstance(dfs[0], pd.CategoricalIndex):
for i in range(1, len(dfs)):
if not isinstance(dfs[i], pd.CategoricalIndex):
dfs[i] = dfs[i].astype("category")
return pd.CategoricalIndex(
union_categoricals(dfs, ignore_order=ignore_order), name=dfs[0].name
)
elif isinstance(dfs[0], pd.MultiIndex):
first, rest = dfs[0], dfs[1:]
if all(
(isinstance(o, pd.MultiIndex) and o.nlevels >= first.nlevels)
for o in rest
):
arrays = [
concat([i._get_level_values(n) for i in dfs])
for n in range(first.nlevels)
]
return pd.MultiIndex.from_arrays(arrays, names=first.names)
to_concat = (first.values,) + tuple(k._values for k in rest)
new_tuples = np.concatenate(to_concat)
try:
return pd.MultiIndex.from_tuples(new_tuples, names=first.names)
except Exception:
return | pd.Index(new_tuples) | pandas.Index |
# -*- coding:utf-8 -*-
# @Time : 2020/1/122:48
# @Author : liuqiuxi
# @Email : <EMAIL>
# @File : fundfeedsjqdata.py
# @Project : datafeeds
# @Software: PyCharm
# @Remark : This is class of option market
import pandas as pd
import datetime
import copy
from datafeeds.jqdatafeeds import BaseJqData
from datafeeds.utils import BarFeedConfig
from datafeeds import logger
class AFundQuotationJqData(BaseJqData):
LOGGER_NAME = "AFundQuotationJqData"
def __init__(self):
super(AFundQuotationJqData, self).__init__()
self.__adjust_name_dict = {"F": "pre", "B": "post"}
self.__need_adjust_columns = ["close"]
self.__logger = logger.get_logger(name=self.LOGGER_NAME)
def get_quotation(self, securityIds, items, frequency, begin_datetime, end_datetime, adjusted=None):
securityIds_OTC = []
securityIds_EXC = []
# 判断场内场外基金
for securityId in securityIds:
code_suffix = securityId[securityId.find(".") + 1:]
if code_suffix == "OF":
securityIds_OTC.append(securityId)
elif code_suffix == "SH" or code_suffix == "SZ":
securityIds_EXC.append(securityId)
else:
self.__logger.warning("the securityId: %s did't support in fund quotation, we remove it" % securityId)
# 得到场内基金数据
if len(securityIds_EXC) > 0:
data0 = self.__get_exchange_quotation(securityIds=securityIds_EXC, items=items, frequency=frequency,
begin_datetime=begin_datetime, end_datetime=end_datetime,
adjusted=adjusted)
else:
data0 = pd.DataFrame()
# 得到场外基金数据
if len(securityIds_OTC) > 0:
data1 = self.__get_otc_quotation(securityIds=securityIds_OTC, items=items, frequency=frequency,
begin_datetime=begin_datetime, end_datetime=end_datetime,
adjusted=adjusted)
else:
data1 = pd.DataFrame()
# merge OTC and EXC
if not data0.empty and not data1.empty:
columns = list(set(data0.columns).union(set(data1.columns)))
for column in columns:
if column not in data0.columns:
data0.loc[:, column] = None
if column not in data1.columns:
data1.loc[:, column] = None
data0 = data0.loc[:, columns].copy(deep=True)
data1 = data1.loc[:, columns].copy(deep=True)
data = pd.concat(objs=[data0, data1], axis=0, join="outer")
else:
if data0.empty:
data = data1.copy(deep=True)
elif data1.empty:
data = data0.copy(deep=True)
else:
raise BaseException("[AFundQuotationJqData] something may wrong")
data.reset_index(inplace=True, drop=True)
data.sort_values(by=["securityId", "dateTime"], axis=0, ascending=True, inplace=True)
data.reset_index(inplace=True, drop=True)
non_find_securityIds = list(set(securityIds) - set(data.loc[:, "securityId"]))
if len(non_find_securityIds) > 0:
self.__logger.warning("we can't get securityIds: %s data, please check it" % non_find_securityIds)
return data
def __get_exchange_quotation(self, securityIds, items, frequency, begin_datetime, end_datetime, adjusted):
connect = self.connect()
securityIds = self.wind_to_default(securityIds=securityIds)
frequency = self.get_frequency_cycle(frequency=frequency)
adjusted = self.__adjust_name_dict.get(adjusted, None)
rename_dict = BarFeedConfig.get_jq_data_items().get(self.LOGGER_NAME)
data = | pd.DataFrame() | pandas.DataFrame |
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from keras.models import Sequential, Model
from keras.layers import Dense, Dropout, Input
from keras.layers import Convolution1D, GlobalMaxPooling1D
from keras.layers.merge import Concatenate
from sklearn.model_selection import GridSearchCV
from keras.wrappers.scikit_learn import KerasClassifier
# read in high and low expression one_hot files
high_exp = pd.concat(
[pd.read_csv(f"high_exp_one_hot_{i}.csv", index_col=0)
for i in range(1, 5)]
)
low_exp = pd.concat(
[pd.read_csv(f"low_exp_one_hot_{i}.csv", index_col=0)
for i in range(1, 5)]
)
# concatenate to form a single dataframe
data_df = pd.concat([high_exp, low_exp], axis=0)
# function to convert csv files into
def string_to_matrix(string):
# convert string to list of one_hot lists
string = str(string)
list_of_strings = string.split('], [')
list_of_lists = [channels.strip().replace('[', '').replace(']', '').replace(',', '').split()
for channels in list_of_strings
if 'nan' not in list_of_strings
]
# add padding
remaining_pad = 181 - len(list_of_lists)
while remaining_pad > 0:
list_of_lists.append(list([0 for x in range(0, 64)]))
remaining_pad = remaining_pad - 1
# return padded one_hot matrix
return np.array(list_of_lists).astype(np.float)
data_df['one_hot_matrix'] = data_df['one_hot_matrix'].apply(string_to_matrix)
# get X and y data from data_df
max_len = 181
width = 64
X = np.zeros((22615, max_len, width))
for idx, one_hot_matrix in enumerate(data_df['one_hot_matrix'].values):
X[idx, :, :] = one_hot_matrix
y = data_df['class'].values
# train/test split
x_train, x_test, y_train, y_test = train_test_split(
X, y, test_size=0.3, random_state=42)
# tune hyperparameters for simple model
# define simple model per Yoon Kim (2014)
def create_model(filter_sizes=3, num_filters=10):
# prepare input shape
input_shape = (181, 64)
model_input = Input(shape=input_shape)
z = model_input
# Convolutional block
conv_blocks = []
for sz in filter_sizes:
conv = Convolution1D(filters=num_filters,
kernel_size=sz,
padding="valid",
activation="relu",
strides=1)(z)
conv = GlobalMaxPooling1D()(conv)
conv_blocks.append(conv)
z = Concatenate()(conv_blocks) if len(conv_blocks) > 1 else conv_blocks[0]
z = Dropout(0.5)(z)
model_output = Dense(1, activation="sigmoid")(z)
model = Model(model_input, model_output)
model.compile(loss="binary_crossentropy", optimizer="adam",
metrics=["accuracy"])
return model
# fix random seed for reproducibility
seed = 7
np.random.seed(seed)
model = KerasClassifier(build_fn=create_model, batch_size=50,
epochs=25, verbose=2)
# define the grid search parameters
# model hyperparameters
filter_sizes = [(3, 3, 3), (3, 4, 5), (5, 5, 5),
(3, 5, 7), (7, 7, 7), (5, 7, 10),
(10, 10, 10), (3, 4, 5, 6)]
num_filters = [10, 20, 50, 100, 200]
param_grid = dict(filter_sizes=filter_sizes, num_filters=num_filters)
grid = GridSearchCV(estimator=model, param_grid=param_grid,
cv=10, n_jobs=4, pre_dispatch='n_jobs')
grid_result = grid.fit(x_train, y_train)
# summarize results
print("Best: %f using %s" % (grid_result.best_score_, grid_result.best_params_))
grid_df = | pd.DataFrame(grid_result.cv_results_['params']) | pandas.DataFrame |
import os, glob, pandas as pd, sys, csv
OMEGA = "/usr/local/bin/omega2"
ROCS = "/usr/local/bin/rocs"
def omega_conf(smi,maxconfs=2000):
smi_prefix = os.path.splitext(os.path.basename(smi))[0]
os.system ('{0} -in {1} -out {2}_omega.sdf -prefix {2}_omega -warts true -maxconfs {3}'.format(OMEGA, smi, smi_prefix, maxconfs))
def rocs_alignment(conformer, template_database, rocs_maxconfs_output=100):
sdf_prefix = os.path.basename(os.path.splitext(conformer)[0]).split('_')[0]
for template in template_database:
template_id = "_".join(os.path.basename(template).split("_")[0:3])
os.system ('{0} -dbase {1} -query {2} -prefix {3}_{4}_rocs -oformat sdf -maxconfs 30 -outputquery false -qconflabel title'.format(ROCS, conformer, template, sdf_prefix, template_id))
def combine_report_files(report_file, crystal_structure):
dataframeList = []
for report in report_file:
target_template_file_name = "_".join(os.path.splitext(os.path.basename(report))[0].split("_")[0:5])
lig_id_three_letter = target_template_file_name.split("_")[0]
read_rpt = pd.read_csv(report, sep='\t', dtype=str)
if 'Unnamed: 16' in read_rpt:
del read_rpt['Unnamed: 16']
shapequery_changed_table = read_rpt.replace('untitled-query-1', target_template_file_name)
shapequery_changed_table.to_csv(report, sep='\t', index=None)
dataframeList.append(shapequery_changed_table)
single_rpt_csv_file = | pd.concat(dataframeList, axis=0) | pandas.concat |
import logging
import pytest
from pennprov.connection.mprov import MProvConnection
from pennprov.connection.mprov_connection_cache import MProvConnectionCache
from pennprov.api.decorators import MProvAgg
import pandas as pd
logging.basicConfig(level=logging.DEBUG)
connection_key = MProvConnectionCache.Key()
mprov_conn = MProvConnectionCache.get_connection(connection_key)
if mprov_conn:
mprov_conn.create_or_reset_graph()
else:
raise RuntimeError('Could not connect')
sub_stream_1 = mprov_conn.create_collection('output_ecg_1', 1)
sub_stream_2 = mprov_conn.create_collection('output_ecg_2', 1)
@MProvAgg("ecg", 'output_ecg',['x','y'],['x','y'], sub_stream_1)
@pytest.mark.skip(reason="Not a test fn")
def test(n):
return n.groupby('x').count()
@MProvAgg("ecg", 'output_ecg',['x'],['x'], sub_stream_2)
@pytest.mark.skip(reason="Not a test fn")
def testx(n):
return n.groupby('x').count()
def test_main():
# Test the decorators, which will create entities for the dataframe
# elements, and nodes representing the dataframe components
ecg = | pd.DataFrame([{'x':1, 'y': 2}, {'x':3, 'y':4}]) | pandas.DataFrame |
from statsmodels.tsa.stattools import adfuller, acf, kpss
import xgboost as xgb
import numpy as np
import pandas as pd
from sklearn.decomposition import PCA
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_absolute_error
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.feature_selection import f_regression
from scipy.stats import spearmanr
def size(time_series):
"""Metafeature function returning the number of samples in the dataset.
Args:
time_series (pd.DataFrame): the dataset for which to optimize the superparameters.
Returns:
float
"""
return len(time_series)
def maxminvar(time_series):
"""Metafeature function returning the maximum/minimum ratio of the variance, with a rolling window of 240 samples.
Args:
time_series (pd.DataFrame): the dataset for which to optimize the superparameters.
Returns:
float
"""
rolling_var = time_series["endogenous"].rolling(24 * 10).var()
return rolling_var.max() / rolling_var.min()
def adf(time_series):
"""Metafeature function returning the p-value of an Augmented Dickey-Fuller test.
Args:
time_series (pd.DataFrame): the dataset for which to optimize the superparameters.
Returns:
float
"""
return adfuller(time_series["endogenous"])[1]
def stat_test(time_series):
"""Metafeature function returning the test statistic of a Kwiatkowski-Phillips-Schmidt-Shin test for stationarity.
Args:
time_series (pd.DataFrame): the dataset for which to optimize the superparameters.
Returns:
float
"""
return kpss(time_series["endogenous"], lags="auto")[0]
def cumac(time_series):
"""Metafeature function returning the cumulative abolute autocorrelation of lags 1 to 48.
Args:
time_series (pd.DataFrame): the dataset for which to optimize the superparameters.
Returns:
float
"""
return sum(abs(acf(time_series["endogenous"], fft=True, nlags=48)))
def total_splits(time_series):
"""Metafeature function returning the number of splits after training an XGBoost model with 1000 trees and a maximum
depth of 1000.
Args:
time_series (pd.DataFrame): the dataset for which to optimize the superparameters.
Returns:
float
"""
xg_reg = xgb.XGBRegressor(
objective="reg:squarederror", n_trees=1000, max_depth=1000
)
xg_reg.fit(
time_series.drop(columns=["endogenous"]),
pd.DataFrame(data=time_series["endogenous"]),
)
return sum(xg_reg.get_booster().get_score(importance_type="weight").values())
def feature_xgb_interactions(time_series):
"""Metafeature function representing feature importance plus XGBoost model interactions. The mean absolute error of a
zero forecast is divided by the mean absolute error of a trained xgboost model with one tree and maximum depth of
one.
Args:
time_series (pd.DataFrame): the dataset for which to optimize the superparameters.
Returns:
float
"""
# split time_series in train and test
train, test = train_test_split(time_series)
ex_train = train.drop(columns=["endogenous"])
ex_test = test.drop(columns=["endogenous"])
end_train = pd.DataFrame(data=train["endogenous"])
end_test = pd.DataFrame(data=test["endogenous"])
# XGBoost regression, train and predict
xg_reg = xgb.XGBRegressor(
objective="reg:squarederror", n_estimators=1, max_depth=1000, learning_rate=1
)
xg_reg.fit(ex_train, end_train)
xgb_preds = pd.Series(data=xg_reg.predict(ex_test), index=end_test.index)
# mae of xgb and initial
mae_xgb = mean_absolute_error(end_test, xgb_preds)
mae_initial = mean_absolute_error(end_test, np.zeros(len(end_test)))
return mae_xgb / mae_initial
def xgb_feature_interactions_minus_linear_relations(time_series):
"""Metafeature function returning the importance of feature interactions in XGBoost minus linear correlations.
Args:
time_series (pd.DataFrame): the dataset for which to optimize the superparameters.
Returns:
float
"""
# split time_series in train and test
length = len(time_series)
ex_train = time_series.drop(columns=["endogenous"])[: int(length * 2 / 3)]
ex_test = time_series.drop(columns=["endogenous"])[int(length * 2 / 3) :]
end_train = pd.DataFrame(data=time_series["endogenous"])[: int(length * 2 / 3)]
end_test = | pd.DataFrame(data=time_series["endogenous"]) | pandas.DataFrame |
import pandas as pd
import numpy as np
import os
def read_data():
# set the path of the raw data
raw_data_path = os.path.join(os.path.pardir, 'data', 'raw')
train_data_file = os.path.join(raw_data_path, 'train.csv')
test_data_file = os.path.join(raw_data_path, 'test.csv')
# Read the data with all default parameteres
train_df = | pd.read_csv(train_data_file, index_col='PassengerId') | pandas.read_csv |
"""Provides functions prepare_data_x and dataclass PrepareDataOutput."""
from dataclasses import dataclass, field
from functools import partial
from typing import Any, Callable, Dict, List, Union
import numpy as np
import pandas as pd
from sklearn.model_selection import KFold
from .util import make_columns_lower_case # type: ignore
NUMBER_REGEX = r"\d*\.?\d+"
TEMP_REGEX = (
rf"({NUMBER_REGEX}-{NUMBER_REGEX}|{NUMBER_REGEX}) ?(º|°)[Cc]"
)
PH_REGEX = rf"[pP][hH] ({NUMBER_REGEX})"
MOL_REGEX = rf".* ({NUMBER_REGEX}) ?[mM]"
COFACTORS = [
"ATP",
"NADH",
"NAD+",
"NADPH",
"O2",
"NADP+",
"ADP",
]
DIMS = {
"a_substrate": ["substrate"],
"a_ec4_sub": ["ec4_sub"],
"a_enz_sub": ["enz_sub"],
"a_org_sub": ["org_sub"],
"log_km": ["biology"],
"llik": ["ix_test"],
"yrep": ["ix_test"],
}
TEMPERATURE_RANGE = 10.0, 45.0
PH_RANGE = 5.0, 9.0
HARDCODED_NATURAL_SUBSTRATES = {
"1.1.1.49": [391, 11111], # 6-phosphonogluconate
"3.1.1.31": [4217], # 6-phospho-D-glucono-1,5-lactone
}
# types
StanDict = Dict[str, Union[float, int, List[float], List[int]]]
CoordDict = Dict[str, List[str]]
@dataclass
class PrepareDataOutput:
"""Return value of a prepare_data function."""
name: str
coords: Dict[str, Any]
reports: pd.DataFrame
lits: pd.DataFrame
dims: Dict[str, Any]
number_of_cv_folds: int
standict_function: Callable
biology_maps: Dict[str, List[str]]
standict_prior: StanDict = field(init=False)
standict_posterior: StanDict = field(init=False)
standicts_cv: List[StanDict] = field(init=False)
def __post_init__(self):
"""Add stan input dictionaries."""
ix_all = list(range(len(self.lits)))
splits = []
for train, test in KFold(self.number_of_cv_folds, shuffle=True).split(
self.lits
):
assert isinstance(train, np.ndarray)
assert isinstance(test, np.ndarray)
splits.append([list(train), list(test)])
get_standict_xv = partial(
self.standict_function,
lits=self.lits,
coords=self.coords,
likelihood=True,
)
get_standict_main = partial(
self.standict_function,
lits=self.lits,
coords=self.coords,
train_ix=ix_all,
test_ix=ix_all,
)
self.standict_prior, self.standict_posterior = (
get_standict_main(likelihood=likelihood)
for likelihood in (False, True)
)
self.standicts_cv = [
get_standict_xv(train_ix=train_ix, test_ix=test_ix)
for train_ix, test_ix in splits
]
def process_temperature_column(t: pd.Series) -> pd.Series:
"""Convert a series of string temperatures to floats.
If the reported temperature is e.g '1-2', take the mean (so e.g. 1.5).
:param t: A pandas Series of strings
"""
return t.str.split("-").explode().astype(float).groupby(level=0).mean()
def correct_brenda_dtypes(r: pd.DataFrame):
"""Make sure the columns have the right dtypes.
:param r: dataframe of reports
"""
df_out = r.copy()
float_cols = ["ph", "mols", "temperature", "km", "kcat"]
for col in float_cols:
if col in r.columns:
df_out[col] = r[col].astype(float)
return df_out
def add_columns_to_brenda_reports(
r: pd.DataFrame, nat: pd.DataFrame
) -> pd.DataFrame:
"""Add new columns to a table of reports.
:param r: Dataframe of reports
"""
ec4_to_natural_substrates = (
nat.groupby("ec4")["ligand_structure_id"].apply(list).to_dict()
)
out = r.copy()
out["is_natural"] = out.apply(
lambda row: row["ligand_structure_id"]
in ec4_to_natural_substrates[row["ec4"]]
if row["ec4"] in ec4_to_natural_substrates.keys() else False,
axis=1,
)
out["ph"] = out["commentary"].str.extract(PH_REGEX)[0]
out["mols"] = out["commentary"].str.extract(MOL_REGEX)[0]
out["temperature"] = process_temperature_column(
out["commentary"].str.extract(TEMP_REGEX)[0]
)
for ec in [1, 2, 3]:
out["ec" + str(ec)] = [".".join(s.split(".")[:ec]) for s in out["ec4"]]
return out
def preprocess_brenda_reports(
raw_reports: pd.DataFrame, natural_substrates: pd.DataFrame
) -> pd.DataFrame:
"""Correct names, nulls and dtypes of Brenda reports table."""
return (
raw_reports
.replace(["more", -999], np.nan)
.rename(
columns={
"ecNumber": "ec4",
"kmValue": "km",
"turnoverNumber": "kcat",
"ligandStructureId": "ligand_structure_id",
}
)
.pipe(make_columns_lower_case)
.pipe(add_columns_to_brenda_reports, natural_substrates)
.pipe(correct_brenda_dtypes)
)
def prepare_data_brenda(
name: str,
raw_reports: pd.DataFrame,
natural_substrates: pd.DataFrame,
number_of_cv_folds,
) -> PrepareDataOutput:
"""Get PrepareDataOutput for brenda."""
reports = preprocess_brenda_reports(raw_reports, natural_substrates)
biology_cols = ["organism", "ec4", "substrate"]
lit_cols = biology_cols + ["literature"]
cond = (
reports[biology_cols].notnull().all(axis=1).astype(bool)
& reports["km"].notnull()
& reports["km"].gt(1e-10)
& ~reports["ligand_structure_id"].eq(0)
& (
reports["temperature"].isnull()
| reports["temperature"].astype(float).between(*TEMPERATURE_RANGE)
)
& (
reports["ph"].isnull()
| reports["ph"].astype(float).between(*PH_RANGE)
)
& reports["is_natural"]
)
reports["y"] = np.log(reports["km"].values) # type: ignore
reports["biology"] = (
reports[biology_cols].fillna("").apply("|".join, axis=1)
)
reports = reports.loc[cond].copy()
lits = (
reports.groupby(lit_cols)
.agg({"y": "median", "biology": "first"})
.reset_index()
.loc[lambda df: df.groupby("organism")["y"].transform("size") > 50]
.reset_index()
)
coords = {}
lits["literature"] = lits["literature"].astype(str)
lits["biology"] = lits[biology_cols].apply("|".join, axis=1)
lits["ec4_sub"] = lits["ec4"].str.cat(lits["substrate"], sep="|")
lits["org_sub"] = lits["organism"].str.cat(lits["substrate"], sep="|")
fcts = biology_cols + ["ec4_sub", "org_sub", "literature", "biology"]
fcts_with_unknowns = ["substrate", "ec4_sub", "org_sub"]
for fct in fcts:
if fct in fcts_with_unknowns:
lits[fct + "_stan"] = pd.factorize(lits[fct])[0] + 2
coords[fct] = ["unknown"] + pd.factorize(lits[fct])[1].tolist()
else:
lits[fct + "_stan"] = pd.factorize(lits[fct])[0] + 1
coords[fct] = pd.factorize(lits[fct])[1].tolist()
biology_maps = {
col: lits.groupby("biology")[col].first().tolist()
for col in biology_cols
}
return PrepareDataOutput(
name=name,
lits=lits,
coords=coords,
reports=reports,
dims=DIMS,
number_of_cv_folds=number_of_cv_folds,
standict_function=get_standict_brenda,
biology_maps=biology_maps,
)
def get_standict_brenda(
lits: pd.DataFrame,
coords: CoordDict,
likelihood: bool,
train_ix: List[int],
test_ix: List[int],
) -> StanDict:
"""Get a Stan input for the brenda data.
:param lits: Dataframe of lits
:param coords: Dictionary of coordinates
:param likelihood: Whether or not to run in likelihood mode
:param: train_ix: List of indexes of training lits
:param: test_ix: List of indexes of test lits
"""
return listify_dict(
{
"N_biology": lits["biology"].nunique(),
"N_substrate": len(coords["substrate"]),
"N_ec4_sub": len(coords["ec4_sub"]),
"N_org_sub": len(coords["org_sub"]),
"substrate": lits.groupby("biology")["substrate_stan"].first(),
"ec4_sub": lits.groupby("biology")["ec4_sub_stan"].first(),
"org_sub": lits.groupby("biology")["org_sub_stan"].first(),
"N_train": len(train_ix),
"N_test": len(test_ix),
"N": len(list(set(train_ix + test_ix))),
"biology_train": lits.loc[train_ix, "biology_stan"],
"biology_test": lits.loc[test_ix, "biology_stan"],
"ix_train": [i + 1 for i in train_ix],
"ix_test": [i + 1 for i in test_ix],
"y": lits["y"],
"likelihood": int(likelihood),
}
)
def get_standict_sabio(
lits: pd.DataFrame,
coords: CoordDict,
likelihood: bool,
train_ix: List[int],
test_ix: List[int],
) -> StanDict:
"""Get a Stan input for the sabio data.
:param lits: Dataframe of lits
:param coords: Dictionary of coordinates
:param likelihood: Whether or not to run in likelihood mode
:param: train_ix: List of indexes of training lits
:param: test_ix: List of indexes of test lits
"""
return listify_dict(
{
"N_biology": lits["biology"].nunique(),
"N_substrate": len(coords["substrate"]),
"N_ec4_sub": len(coords["ec4_sub"]),
"N_org_sub": len(coords["org_sub"]),
"N_enz_sub": len(coords["enz_sub"]),
"substrate": lits.groupby("biology")["substrate_stan"].first(),
"ec4_sub": lits.groupby("biology")["ec4_sub_stan"].first(),
"org_sub": lits.groupby("biology")["org_sub_stan"].first(),
"enz_sub": lits.groupby("biology")["enz_sub_stan"].first(),
"N_train": len(train_ix),
"N_test": len(test_ix),
"N": len(list(set(train_ix + test_ix))),
"biology_train": lits.loc[train_ix, "biology_stan"],
"biology_test": lits.loc[test_ix, "biology_stan"],
"ix_train": [i + 1 for i in train_ix],
"ix_test": [i + 1 for i in test_ix],
"y": lits["y"],
"likelihood": int(likelihood),
}
)
def listify_dict(d: Dict) -> StanDict:
"""Make sure a dictionary is a valid Stan input.
:param d: input dictionary, possibly with wrong types
"""
out = {}
for k, v in d.items():
if not isinstance(k, str):
raise ValueError(f"key {str(k)} is not a string!")
elif isinstance(v, pd.Series):
out[k] = v.to_list()
elif isinstance(v, np.ndarray):
out[k] = v.tolist()
elif isinstance(v, (list, int, float)):
out[k] = v
else:
raise ValueError(f"value {str(v)} has wrong type!")
return out
def prepare_hmdb_concs(raw: pd.DataFrame) -> pd.DataFrame:
"""Process raw hmdb table."""
concentration_regex = rf"^({NUMBER_REGEX})"
conc = (
raw["concentration_value"]
.str.extract(concentration_regex)[0]
.astype(float)
)
cond = (
raw["concentration_units"].eq("uM")
& raw["subject_age"].str.contains("Adult")
& raw["subject_condition"].eq("Normal")
& conc.notnull()
)
return raw.loc[cond].copy().assign(concentration_uM=conc)
def prepare_sabio_concentrations(raw: pd.DataFrame) -> pd.DataFrame:
"""Process sabio concentrations table."""
cond = (
raw["parameter.type"].eq("concentration")
& raw["parameter.startValue"].notnull()
& raw["parameter.startValue"].gt(0)
& ~raw["parameter.associatedSpecies"].eq("Enzyme")
& raw["parameter.unit"].eq("M")
)
out = raw.loc[cond].copy()
out["concentration_mM"] = np.exp(
np.log(
out[["parameter.startValue", "parameter.endValue"]].multiply(1000)
).mean(axis=1)
)
return out
def prepare_data_sabio(
name: str,
raw_reports: pd.DataFrame,
number_of_cv_folds,
) -> PrepareDataOutput:
"""Get prepared data for the sabio dataset."""
assert isinstance(raw_reports, pd.DataFrame)
reports = raw_reports.rename(
columns={
"Substrate": "reaction_substrates",
"EnzymeType": "enzyme_type",
"PubMedID": "literature",
"Organism": "organism",
"UniprotID": "uniprot_id",
"ECNumber": "ec4",
"parameter.type": "parameter_type",
"parameter.associatedSpecies": "substrate",
"parameter.startValue": "start_value",
"parameter.endValue": "end_value",
"parameter.standardDeviation": "sd",
"parameter.unit": "unit",
"Temperature": "temperature",
"pH": "ph",
}
).replace("-", np.nan)
biology_cols = ["organism", "ec4", "uniprot_id", "substrate"]
lit_cols = biology_cols + ["literature"]
cond = (
reports["parameter_type"].eq("Km")
& reports["enzyme_type"].str.contains("wildtype")
& reports["start_value"].notnull()
& reports["start_value"].gt(0)
& reports["start_value"].lt(2000)
& reports["unit"].eq("M")
& (
reports["temperature"].isnull()
| reports["temperature"].astype(float).between(*TEMPERATURE_RANGE)
)
& (
reports["ph"].isnull()
| reports["ph"].astype(float).between(*PH_RANGE)
)
& reports["literature"].notnull()
)
reports = reports.loc[cond].copy()
reports["y"] = np.log(
reports[["start_value", "end_value"]]
# multiply by 1000 to convert from M to mM
.multiply(1000)
).mean(axis=1)
reports["uniprot_id"] = np.where(
reports["uniprot_id"].str.contains(" "), np.nan, reports["uniprot_id"]
)
reports["biology"] = (
reports[biology_cols].fillna("").apply("|".join, axis=1)
)
lits = (
reports.loc[cond]
.groupby(lit_cols, dropna=False)
.agg(
{"y": "median", "biology": "first", "reaction_substrates": "first"}
)
.reset_index()
.loc[lambda df: df.groupby("organism")["y"].transform("size") > 50]
.reset_index()
)
lits["literature"] = lits["literature"].astype(int).astype(str)
lits["ec4_sub"] = lits["ec4"].str.cat(lits["substrate"], sep="|")
lits["enz_sub"] = np.where(
lits["uniprot_id"].notnull(),
lits["uniprot_id"].str.cat(lits["substrate"], sep="|"),
np.nan,
)
lits["org_sub"] = lits["organism"].str.cat(lits["substrate"], sep="|")
fcts = biology_cols + [
"ec4_sub",
"org_sub",
"enz_sub",
"literature",
"biology",
]
coords = {}
fcts_with_unknowns = ["substrate", "ec4_sub", "org_sub", "enz_sub"]
for fct in fcts:
if fct in fcts_with_unknowns:
lits[fct + "_stan"] = pd.factorize(lits[fct])[0] + 2
coords[fct] = ["unknown"] + pd.factorize(lits[fct])[1].tolist()
else:
lits[fct + "_stan"] = pd.factorize(lits[fct])[0] + 1
coords[fct] = | pd.factorize(lits[fct]) | pandas.factorize |
import matplotlib.pyplot as plt
import seaborn as sns
import pickle
import os
import time
import numpy as np
import random
import math
import ntpath
from typing import List
import scipy.spatial as spatial
from torch import device
from sys_simulator.devices.devices import d2d_user, mobile_user, base_station
import pandas as pd
def bits_gen(n):
return [random.randint(0, 1) for b in range(1, n+1)]
def db_to_power(x):
return 10**(x/10)
def power_to_db(x):
return 10*np.log10(x)
def scaling(x, a_min, a_max):
x = np.clip(x, a_min, a_max)
return (x - a_min)/(a_max - a_min)
def upsample(input, factor):
z_mat = np.zeros([factor-1, len(input[0])])
aux = np.concatenate((input, z_mat), axis=0)
aux2 = np.transpose(aux)
output = np.reshape(aux2, (1, len(input[0])*factor))
return output
def downsample(input, factor):
output = []
for i in range(0, len(input)):
if i % factor == 0:
output.append(input[i])
return output
def ber(tx_signal, rx_signal):
return np.sum(np.abs(tx_signal - rx_signal))/len(tx_signal)
def bpsk_theoric(snr):
# snr in dB
snr_mag = [10**(x/10) for x in snr]
return [0.5*math.erfc(np.sqrt(i)) for i in snr_mag]
def distribute_users(
mobile_users: List[mobile_user],
d2d_users: List[d2d_user],
base_station: base_station
):
center = base_station.position
radius = base_station.radius
for m in mobile_users:
x = (np.random.rand()-0.5)*2*radius+center[0]
y = (np.random.rand()-0.5)*2*(1-np.sqrt(radius**2-x**2))+center[1]
m.set_position((x, y))
for d in d2d_users:
x = (np.random.rand()-0.5)*2*radius+center[0]
y = (np.random.rand()-0.5)*2*(1-np.sqrt(radius**2-x**2))+center[1]
d.set_position((x, y))
def distribute_nodes(nodes, base_station, nodes_height=1.5):
center = base_station.position
radius = base_station.radius
for n in nodes:
x = (np.random.rand()-0.5)*2*radius+center[0]
y = (np.random.rand()-0.5)*2*(1-np.sqrt(radius**2-x**2))+center[1]
n.set_position((x, y, nodes_height))
n.set_distance_to_bs(
spatial.distance.euclidean(n.position, base_station.position)
)
def distribute_mue_validation(nodes: List[mobile_user], base_station):
if len(nodes) != 1:
raise 'number of mues must be 1'
if base_station.position != (0, 0):
raise 'BS position must be (0,0)'
center = base_station.position
nodes[0].set_position((center[0], center[1]+100))
nodes[0].set_distance_to_bs(spatial.distance.euclidean(
nodes[0].position, base_station.position))
def distribute_pair_fixed_distance_multiple(
nodes_tx: List[d2d_user],
nodes_rx: List[d2d_user],
base_station
):
"""
Distribute d2d pairs. Nodes_tx and nodes_rx should be lists with the length
"""
for i in range(len(nodes_tx)):
center = base_station.position
radius = base_station.radius
is_node2_in_circle = False
x1 = (np.random.rand()-0.5)*2*radius+center[0]
y1 = (np.random.rand()-0.5)*2*(1-np.sqrt(radius**2-x1**2))+center[1]
nodes_tx[i].set_position((x1, y1))
nodes_tx[i].set_distance_to_bs(
spatial.distance.euclidean(center, nodes_tx[i].position))
while(not is_node2_in_circle):
angle = np.random.rand()*2*np.pi
x2 = (np.random.rand()-0.5)*2*nodes_tx[i].distance_d2d+x1
y2 = nodes_tx[i].distance_d2d*np.sin(angle)+y1
nodes_bs_distance = spatial.distance.euclidean(
(x2, y2), base_station.position)
if nodes_bs_distance < radius:
nodes_rx[i].set_position((x2, y2))
nodes_rx[i].set_distance_to_bs(nodes_bs_distance)
is_node2_in_circle = True
def distribute_pair_fixed_distance(
nodes,
base_station,
pair_distance,
device_height=1.5
):
center = base_station.position
radius = base_station.radius
is_node2_in_circle = False
x1 = (np.random.rand()-0.5)*2*radius+center[0]
y1 = (np.random.rand()-0.5)*2*(1-np.sqrt(radius**2-x1**2))+center[1]
nodes[0].set_position((x1, y1, device_height))
nodes[0].set_distance_to_bs(
spatial.distance.euclidean(center, nodes[0].position))
while not is_node2_in_circle:
angle = np.random.rand()*2*np.pi
x2 = pair_distance*np.cos(angle) + x1
y2 = pair_distance*np.sin(angle) + y1
nodes_bs_distance = spatial.distance.euclidean(
(x2, y2), base_station.position[0:2]
)
if nodes_bs_distance < radius:
nodes[1].set_position((x2, y2, device_height))
nodes[1].set_distance_to_bs(
spatial.distance.euclidean(
(x2, y2, device_height), base_station.position
)
)
is_node2_in_circle = True
def distribute_pair_random_distance(
nodes,
base_station,
min_distance,
max_distance,
device_height=1.5,
distribution='uniform',
):
center = base_station.position
radius = base_station.radius
is_node2_in_circle = False
if distribution == 'uniform':
x1 = (np.random.rand()-0.5)*2*radius+center[0]
y1 = (np.random.rand()-0.5)*2*(1-np.sqrt(radius**2-x1**2))+center[1]
elif distribution == 'normal':
r = random.gauss(mu=center[0], sigma=450)
r = np.clip(r, -radius, radius)
ang = random.uniform(0, 2*math.pi)
x1 = r * math.cos(ang)
y1 = r * math.sin(ang)
else:
raise Exception('Invalid pairs distribution option.')
nodes[0].set_position((x1, y1, device_height))
nodes[0].set_distance_to_bs(
spatial.distance.euclidean(center, nodes[0].position))
pair_distance = random.uniform(min_distance, max_distance)
while not is_node2_in_circle:
angle = np.random.rand()*2*np.pi
x2 = pair_distance*np.cos(angle) + x1
y2 = pair_distance*np.sin(angle) + y1
nodes_bs_distance = spatial.distance.euclidean(
(x2, y2), base_station.position[0:2]
)
if nodes_bs_distance < radius:
nodes[1].set_position((x2, y2, device_height))
nodes[1].set_distance_to_bs(
spatial.distance.euclidean(
(x2, y2, device_height), base_station.position
)
)
is_node2_in_circle = True
def distribute_rx_fixed_distance(nodes: device, base_station: base_station,
pair_distance: float):
radius = base_station.radius
is_node2_in_circle = False
x1 = nodes[0].position[0]
y1 = nodes[0].position[1]
while not is_node2_in_circle:
angle = np.random.rand()*2*np.pi
x2 = pair_distance*np.cos(angle) + x1
y2 = pair_distance*np.sin(angle) + y1
nodes_bs_distance = spatial.distance.euclidean((x2, y2),
base_station.position)
if nodes_bs_distance < radius:
nodes[1].set_position((x2, y2))
nodes[1].set_distance_to_bs(nodes_bs_distance)
is_node2_in_circle = True
def distribute_d2d_validation(
pairs: List[List[d2d_user]],
base_station: base_station
):
if len(pairs) != 4:
raise 'number of mues must be 4'
if base_station.position != (0, 0):
raise 'BS position must be (0,0)'
pairs[0][0].set_position((-250, 250))
pairs[0][1].set_position((-250, 300))
pairs[1][0].set_position((-250, -250))
pairs[1][1].set_position((-250, -300))
pairs[2][0].set_position((250, -250))
pairs[2][1].set_position((250, -300))
pairs[3][0].set_position((250, 250))
pairs[3][1].set_position((250, 300))
for p in pairs:
for n in p:
n.set_distance_to_bs(spatial.distance.euclidean(
n.position, base_station.position))
n.set_distance_d2d(50)
def get_distances_table(nodes):
distances_table = [[spatial.distance.euclidean(
node.position, i.position) for i in nodes] for node in nodes]
return np.array(distances_table)
def ceil(x: float, limit: float):
foo = x if x <= limit else limit
return foo
def get_d2d_links(d2d_nodes_distances_table, d2d_nodes, channel):
it_index = [i for i in range(d2d_nodes_distances_table.shape[0])]
smallest_distance = {'table_position': (99, 99), 'distance': 1e6}
d2d_pairs_table = dict()
d2d_pairs_pathloss_table = dict()
d2d_pairs_index = 0
while(len(it_index) > 0):
for i in it_index:
for j in it_index:
if smallest_distance['distance'] >= \
d2d_nodes_distances_table[i][j] and i != j:
smallest_distance['table_position'] = (i, j)
smallest_distance['distance'] = \
d2d_nodes_distances_table[i][j]
x = smallest_distance['table_position'][0]
y = smallest_distance['table_position'][1]
d2d_pairs_table[f'D2D_LINK:{d2d_pairs_index}'] = \
([f'{d2d_nodes[x].id}',
f'{d2d_nodes[y].id}'], smallest_distance['distance'])
d2d_nodes[x].set_link_id(f'D2D_LINK:{d2d_pairs_index}')
d2d_nodes[y].set_link_id(f'D2D_LINK:{d2d_pairs_index}')
it_index.pop(it_index.index(x))
it_index.pop(it_index.index(y))
d2d_pairs_index = d2d_pairs_index+1
smallest_distance = {'table_position': (99, 99), 'distance': 1e6}
for i in d2d_pairs_table.keys():
d2d_pairs_pathloss_table[i] = \
channel.calculate_pathloss(d2d_pairs_table[i][1])
return d2d_pairs_table, d2d_pairs_pathloss_table
def path_leaf(path):
head, tail = ntpath.split(path)
return tail or ntpath.basename(head)
def jain_index(vec: List[float]):
return np.sum(vec) ** 2 / (len(vec)*np.sum([v ** 2 for v in vec]))
def asMinutes(s):
m = math.floor(s / 60)
s -= m * 60
return '%dm %ds' % (m, s)
def timeSince(since, percent):
now = time.time()
s = now - since
es = s / (percent)
rs = es - s
return '%s (- %s)' % (asMinutes(s), asMinutes(rs))
def make_dir(dir_name: str):
if not os.path.exists(dir_name):
os.makedirs(dir_name)
def make_dir_timestamp(dir_name: str):
timestr = time.strftime(r"%Y%m%d-%H%M%S")
path = f'{dir_name}/{timestr}'
make_dir(path)
return path, timestr
def load_with_pickle(path: str, mode='rb'):
p_file = open(path, mode)
return pickle.load(p_file)
def save_with_pickle(obj, path: str):
with open(path, 'wb') as p_file:
pickle.dump(obj, p_file)
def sns_confidence_interval_plot(
y_ticks: np.ndarray,
y_label: str,
legend: str,
x_label='Number of D2D pairs'
):
"""x_ticks is the number of d2d pairs.
"""
aux = np.ones((y_ticks.shape[0], np.prod(y_ticks.shape[1:])))
for i in range(len(aux)):
aux[i] *= i + 1
n_d2d = aux.reshape(-1)
aux2 = y_ticks.reshape(-1)
# dataframe
df = | pd.DataFrame({'y_tick': aux2, 'n_d2d': n_d2d}) | pandas.DataFrame |
from __future__ import absolute_import, division, print_function
import pytest
from datetime import datetime, timedelta
import numpy as np
import pandas as pd
import pandas.util.testing as tm
from pandas import DataFrame, Series
from string import ascii_lowercase
from blaze.compute.core import compute
from blaze import dshape, discover, transform
from blaze.expr import symbol, join, by, summary, distinct, shape
from blaze.expr import (merge, exp, mean, count, nunique, sum, min, max, any,
var, std, concat)
from blaze.compatibility import builtins, xfail, assert_series_equal
t = symbol('t', 'var * {name: string, amount: int, id: int}')
nt = symbol('t', 'var * {name: ?string, amount: float64, id: int}')
df = DataFrame([['Alice', 100, 1],
['Bob', 200, 2],
['Alice', 50, 3]], columns=['name', 'amount', 'id'])
ndf = DataFrame([['Alice', 100.0, 1],
['Bob', np.nan, 2],
[np.nan, 50.0, 3]], columns=['name', 'amount', 'id'])
tbig = symbol('tbig',
'var * {name: string, sex: string[1], amount: int, id: int}')
dfbig = DataFrame([['Alice', 'F', 100, 1],
['Alice', 'F', 100, 3],
['Drew', 'F', 100, 4],
['Drew', 'M', 100, 5],
['Drew', 'M', 200, 5]],
columns=['name', 'sex', 'amount', 'id'])
def test_series_columnwise():
s = Series([1, 2, 3], name='a')
t = symbol('t', 'var * {a: int64}')
result = compute(t.a + 1, s)
assert_series_equal(s + 1, result)
def test_symbol():
tm.assert_frame_equal(compute(t, df), df)
def test_projection():
tm.assert_frame_equal(compute(t[['name', 'id']], df),
df[['name', 'id']])
def test_eq():
assert_series_equal(compute(t['amount'] == 100, df),
df['amount'] == 100)
def test_selection():
tm.assert_frame_equal(compute(t[t['amount'] == 0], df),
df[df['amount'] == 0])
tm.assert_frame_equal(compute(t[t['amount'] > 150], df),
df[df['amount'] > 150])
def test_arithmetic():
assert_series_equal(compute(t['amount'] + t['id'], df),
df.amount + df.id)
assert_series_equal(compute(t['amount'] * t['id'], df),
df.amount * df.id)
assert_series_equal(compute(t['amount'] % t['id'], df),
df.amount % df.id)
def test_join():
left = DataFrame(
[['Alice', 100], ['Bob', 200]], columns=['name', 'amount'])
right = DataFrame([['Alice', 1], ['Bob', 2]], columns=['name', 'id'])
lsym = symbol('L', 'var * {name: string, amount: int}')
rsym = symbol('R', 'var * {name: string, id: int}')
joined = join(lsym, rsym, 'name')
assert (dshape(joined.schema) ==
dshape('{name: string, amount: int, id: int}'))
result = compute(joined, {lsym: left, rsym: right})
expected = DataFrame([['Alice', 100, 1], ['Bob', 200, 2]],
columns=['name', 'amount', 'id'])
tm.assert_frame_equal(result, expected)
assert list(result.columns) == list(joined.fields)
def test_multi_column_join():
left = [(1, 2, 3),
(2, 3, 4),
(1, 3, 5)]
left = DataFrame(left, columns=['x', 'y', 'z'])
right = [(1, 2, 30),
(1, 3, 50),
(1, 3, 150)]
right = DataFrame(right, columns=['x', 'y', 'w'])
lsym = symbol('lsym', 'var * {x: int, y: int, z: int}')
rsym = symbol('rsym', 'var * {x: int, y: int, w: int}')
j = join(lsym, rsym, ['x', 'y'])
expected = [(1, 2, 3, 30),
(1, 3, 5, 50),
(1, 3, 5, 150)]
expected = DataFrame(expected, columns=['x', 'y', 'z', 'w'])
result = compute(j, {lsym: left, rsym: right})
print(result)
tm.assert_frame_equal(result, expected)
assert list(result.columns) == list(j.fields)
def test_unary_op():
assert (compute(exp(t['amount']), df) == np.exp(df['amount'])).all()
def test_abs():
assert (compute(abs(t['amount']), df) == abs(df['amount'])).all()
def test_neg():
assert_series_equal(compute(-t['amount'], df),
-df['amount'])
@xfail(reason='Projection does not support arithmetic')
def test_neg_projection():
assert_series_equal(compute(-t[['amount', 'id']], df),
-df[['amount', 'id']])
def test_columns_series():
assert isinstance(compute(t['amount'], df), Series)
assert isinstance(compute(t['amount'] > 150, df), Series)
def test_reductions():
assert compute(mean(t['amount']), df) == 350 / 3
assert compute(count(t['amount']), df) == 3
assert compute(sum(t['amount']), df) == 100 + 200 + 50
assert compute(min(t['amount']), df) == 50
assert compute(max(t['amount']), df) == 200
assert compute(nunique(t['amount']), df) == 3
assert compute(nunique(t['name']), df) == 2
assert compute(any(t['amount'] > 150), df) is True
assert compute(any(t['amount'] > 250), df) is False
assert compute(var(t['amount']), df) == df.amount.var(ddof=0)
assert compute(var(t['amount'], unbiased=True), df) == df.amount.var()
assert compute(std(t['amount']), df) == df.amount.std(ddof=0)
assert compute(std(t['amount'], unbiased=True), df) == df.amount.std()
assert compute(t.amount[0], df) == df.amount.iloc[0]
assert compute(t.amount[-1], df) == df.amount.iloc[-1]
def test_reductions_on_dataframes():
assert compute(count(t), df) == 3
assert shape(compute(count(t, keepdims=True), df)) == (1,)
def test_1d_reductions_keepdims():
series = df['amount']
for r in [sum, min, max, nunique, count, std, var]:
result = compute(r(t.amount, keepdims=True), {t.amount: series})
assert type(result) == type(series)
def test_distinct():
dftoobig = DataFrame([['Alice', 'F', 100, 1],
['Alice', 'F', 100, 1],
['Alice', 'F', 100, 3],
['Drew', 'F', 100, 4],
['Drew', 'M', 100, 5],
['Drew', 'F', 100, 4],
['Drew', 'M', 100, 5],
['Drew', 'M', 200, 5],
['Drew', 'M', 200, 5]],
columns=['name', 'sex', 'amount', 'id'])
d_t = distinct(tbig)
d_df = compute(d_t, dftoobig)
tm.assert_frame_equal(d_df, dfbig)
# Test idempotence
tm.assert_frame_equal(compute(d_t, d_df), d_df)
def test_distinct_on():
cols = ['name', 'sex', 'amount', 'id']
df = DataFrame([['Alice', 'F', 100, 1],
['Alice', 'F', 100, 3],
['Drew', 'F', 100, 4],
['Drew', 'M', 100, 5],
['Drew', 'F', 100, 4],
['Drew', 'M', 100, 5],
['Drew', 'M', 200, 5]],
columns=cols)
s = symbol('s', discover(df))
computed = compute(s.distinct('sex'), df)
tm.assert_frame_equal(
computed,
pd.DataFrame([['Alice', 'F', 100, 1],
['Drew', 'M', 100, 5]],
columns=cols),
)
def test_by_one():
result = compute(by(t['name'], total=t['amount'].sum()), df)
expected = df.groupby('name')['amount'].sum().reset_index()
expected.columns = ['name', 'total']
tm.assert_frame_equal(result, expected)
def test_by_two():
result = compute(by(tbig[['name', 'sex']],
total=sum(tbig['amount'])), dfbig)
expected = DataFrame([['Alice', 'F', 200],
['Drew', 'F', 100],
['Drew', 'M', 300]],
columns=['name', 'sex', 'total'])
tm.assert_frame_equal(result, expected)
def test_by_three():
expr = by(tbig[['name', 'sex']],
total=(tbig['id'] + tbig['amount']).sum())
result = compute(expr, dfbig)
expected = DataFrame([['Alice', 'F', 204],
['Drew', 'F', 104],
['Drew', 'M', 310]], columns=['name', 'sex', 'total'])
expected.columns = expr.fields
tm.assert_frame_equal(result, expected)
def test_by_four():
t = tbig[['sex', 'amount']]
expr = by(t['sex'], max=t['amount'].max())
result = compute(expr, dfbig)
expected = DataFrame([['F', 100],
['M', 200]], columns=['sex', 'max'])
tm.assert_frame_equal(result, expected)
def test_join_by_arcs():
df_idx = DataFrame([['A', 1],
['B', 2],
['C', 3]],
columns=['name', 'node_id'])
df_arc = DataFrame([[1, 3],
[2, 3],
[3, 1]],
columns=['node_out', 'node_id'])
t_idx = symbol('t_idx', 'var * {name: string, node_id: int32}')
t_arc = symbol('t_arc', 'var * {node_out: int32, node_id: int32}')
joined = join(t_arc, t_idx, "node_id")
want = by(joined['name'], count=joined['node_id'].count())
result = compute(want, {t_arc: df_arc, t_idx: df_idx})
result_pandas = pd.merge(df_arc, df_idx, on='node_id')
gb = result_pandas.groupby('name')
expected = gb.node_id.count().reset_index().rename(columns={
'node_id': 'count'
})
tm.assert_frame_equal(result, expected)
assert list(result.columns) == ['name', 'count']
def test_join_suffixes():
df = pd.DataFrame(
list(dict((k, n) for k in ascii_lowercase[:5]) for n in range(5)),
)
a = symbol('a', discover(df))
b = symbol('b', discover(df))
suffixes = '_x', '_y'
joined = join(a, b, 'a', suffixes=suffixes)
expected = pd.merge(df, df, on='a', suffixes=suffixes)
result = compute(joined, {a: df, b: df})
tm.assert_frame_equal(result, expected)
def test_join_promotion():
a_data = pd.DataFrame([[0.0, 1.5], [1.0, 2.5]], columns=list('ab'))
b_data = pd.DataFrame([[0, 1], [1, 2]], columns=list('ac'))
a = symbol('a', discover(a_data))
b = symbol('b', discover(b_data))
joined = join(a, b, 'a')
assert joined.dshape == dshape('var * {a: float64, b: ?float64, c: int64}')
expected = pd.merge(a_data, b_data, on='a')
result = compute(joined, {a: a_data, b: b_data})
tm.assert_frame_equal(result, expected)
def test_sort():
tm.assert_frame_equal(compute(t.sort('amount'), df),
df.sort('amount'))
tm.assert_frame_equal(compute(t.sort('amount', ascending=True), df),
df.sort('amount', ascending=True))
tm.assert_frame_equal(compute(t.sort(['amount', 'id']), df),
df.sort(['amount', 'id']))
def test_sort_on_series_no_warning(recwarn):
expected = df.amount.order()
recwarn.clear()
assert_series_equal(compute(t['amount'].sort('amount'), df), expected)
# raises as assertion error if no warning occurs, same thing for below
with pytest.raises(AssertionError):
assert recwarn.pop(FutureWarning)
assert_series_equal(compute(t['amount'].sort(), df), expected)
with pytest.raises(AssertionError):
assert recwarn.pop(FutureWarning)
def test_field_on_series():
expr = symbol('s', 'var * int')
data = Series([1, 2, 3, 4], name='s')
assert_series_equal(compute(expr.s, data), data)
def test_head():
tm.assert_frame_equal(compute(t.head(1), df), df.head(1))
def test_tail():
tm.assert_frame_equal(compute(t.tail(1), df), df.tail(1))
def test_label():
expected = df['amount'] * 10
expected.name = 'foo'
assert_series_equal(compute((t['amount'] * 10).label('foo'), df),
expected)
def test_relabel():
result = compute(t.relabel({'name': 'NAME', 'id': 'ID'}), df)
expected = df.rename(columns={'name': 'NAME', 'id': 'ID'})
tm.assert_frame_equal(result, expected)
def test_relabel_series():
result = compute(t.relabel({'name': 'NAME'}), df.name)
assert result.name == 'NAME'
ts = pd.date_range('now', periods=10).to_series().reset_index(drop=True)
tframe = DataFrame({'timestamp': ts})
def test_map_column():
inc = lambda x: x + 1
result = compute(t['amount'].map(inc, 'int'), df)
expected = df['amount'] + 1
assert_series_equal(result, expected)
def test_map():
f = lambda _, amt, id: amt + id
result = compute(t.map(f, 'real'), df)
expected = df['amount'] + df['id']
assert_series_equal(result, expected)
def test_apply_column():
result = compute(t.amount.apply(np.sum, 'real'), df)
expected = np.sum(df['amount'])
assert result == expected
result = compute(t.amount.apply(builtins.sum, 'real'), df)
expected = builtins.sum(df['amount'])
assert result == expected
def test_apply():
result = compute(t.apply(str, 'string'), df)
expected = str(df)
assert result == expected
def test_merge():
col = (t['amount'] * 2).label('new')
expr = merge(t['name'], col)
expected = DataFrame([['Alice', 200],
['Bob', 400],
['Alice', 100]],
columns=['name', 'new'])
result = compute(expr, df)
tm.assert_frame_equal(result, expected)
def test_by_nunique():
result = compute(by(t['name'], count=t['id'].nunique()), df)
expected = DataFrame([['Alice', 2], ['Bob', 1]],
columns=['name', 'count'])
tm.assert_frame_equal(result, expected)
def test_selection_out_of_order():
expr = t['name'][t['amount'] < 100]
expected = df.loc[df.amount < 100, 'name']
result = compute(expr, df)
assert_series_equal(result, expected)
def test_outer_join():
left = [(1, 'Alice', 100),
(2, 'Bob', 200),
(4, 'Dennis', 400)]
left = DataFrame(left, columns=['id', 'name', 'amount'])
right = [('NYC', 1),
('Boston', 1),
('LA', 3),
('Moscow', 4)]
right = DataFrame(right, columns=['city', 'id'])
lsym = symbol('lsym', 'var * {id: int, name: string, amount: real}')
rsym = symbol('rsym', 'var * {city: string, id: int}')
convert = lambda df: set(df.to_records(index=False).tolist())
assert (convert(compute(join(lsym, rsym), {lsym: left, rsym: right})) ==
set([(1, 'Alice', 100, 'NYC'),
(1, 'Alice', 100, 'Boston'),
(4, 'Dennis', 400, 'Moscow')]))
assert (convert(compute(join(lsym, rsym, how='left'),
{lsym: left, rsym: right})) ==
set([(1, 'Alice', 100, 'NYC'),
(1, 'Alice', 100, 'Boston'),
(2, 'Bob', 200, np.nan),
(4, 'Dennis', 400, 'Moscow')]))
df = compute(join(lsym, rsym, how='right'), {lsym: left, rsym: right})
expected = DataFrame([(1., 'Alice', 100., 'NYC'),
(1., 'Alice', 100., 'Boston'),
(3., np.nan, np.nan, 'lsymA'),
(4., 'Dennis', 400., 'Moscow')],
columns=['id', 'name', 'amount', 'city'])
result = df.sort('id').to_records(index=False)
expected = expected.sort('id').to_records(index=False)
np.array_equal(result, expected)
df = compute(join(lsym, rsym, how='outer'), {lsym: left, rsym: right})
expected = DataFrame([(1., 'Alice', 100., 'NYC'),
(1., 'Alice', 100., 'Boston'),
(2., 'Bob', 200., np.nan),
(3., np.nan, np.nan, 'LA'),
(4., 'Dennis', 400., 'Moscow')],
columns=['id', 'name', 'amount', 'city'])
result = df.sort('id').to_records(index=False)
expected = expected.sort('id').to_records(index=False)
np.array_equal(result, expected)
def test_by_on_same_column():
df = pd.DataFrame([[1, 2], [1, 4], [2, 9]], columns=['id', 'value'])
t = symbol('data', 'var * {id: int, value: int}')
gby = by(t['id'], count=t['id'].count())
expected = DataFrame([[1, 2], [2, 1]], columns=['id', 'count'])
result = compute(gby, {t: df})
tm.assert_frame_equal(result, expected)
def test_summary_by():
expr = by(t.name, summary(count=t.id.count(), sum=t.amount.sum()))
result = compute(expr, df)
expected = DataFrame([['Alice', 2, 150],
['Bob', 1, 200]], columns=['name', 'count', 'sum'])
expr = by(t.name, summary(count=t.id.count(), sum=(t.amount + 1).sum()))
result = compute(expr, df)
expected = DataFrame([['Alice', 2, 152],
['Bob', 1, 201]], columns=['name', 'count', 'sum'])
tm.assert_frame_equal(result, expected)
@pytest.mark.xfail(raises=TypeError,
reason=('pandas backend cannot support non Reduction '
'subclasses'))
def test_summary_by_first():
expr = by(t.name, fst=t.amount[0])
result = compute(expr, df)
assert result == df.amount.iloc[0]
def test_summary_by_reduction_arithmetic():
expr = by(t.name, summary(count=t.id.count(), sum=t.amount.sum() + 1))
result = compute(expr, df)
expected = DataFrame([['Alice', 2, 151],
['Bob', 1, 201]], columns=['name', 'count', 'sum'])
tm.assert_frame_equal(result, expected)
def test_summary():
expr = summary(count=t.id.count(), sum=t.amount.sum())
assert_series_equal(compute(expr, df), Series({'count': 3, 'sum': 350}))
def test_summary_on_series():
ser = Series([1, 2, 3])
s = symbol('s', '3 * int')
expr = summary(max=s.max(), min=s.min())
assert compute(expr, ser) == (3, 1)
expr = summary(max=s.max(), min=s.min(), keepdims=True)
assert compute(expr, ser) == [(3, 1)]
def test_summary_keepdims():
expr = summary(count=t.id.count(), sum=t.amount.sum(), keepdims=True)
expected = DataFrame([[3, 350]], columns=['count', 'sum'])
tm.assert_frame_equal(compute(expr, df), expected)
def test_dplyr_transform():
df = DataFrame({'timestamp': pd.date_range('now', periods=5)})
t = symbol('t', discover(df))
expr = transform(t, date=t.timestamp.map(lambda x: x.date(),
schema='datetime'))
lhs = compute(expr, df)
rhs = pd.concat([df, Series(df.timestamp.map(lambda x: x.date()),
name='date').to_frame()], axis=1)
tm.assert_frame_equal(lhs, rhs)
def test_nested_transform():
d = {'timestamp': [1379613528, 1379620047], 'platform': ["Linux",
"Windows"]}
df = DataFrame(d)
t = symbol('t', discover(df))
t = transform(t, timestamp=t.timestamp.map(datetime.fromtimestamp,
schema='datetime'))
expr = transform(t, date=t.timestamp.map(lambda x: x.date(),
schema='datetime'))
result = compute(expr, df)
df['timestamp'] = df.timestamp.map(datetime.fromtimestamp)
df['date'] = df.timestamp.map(lambda x: x.date())
tm.assert_frame_equal(result, df)
def test_like():
expr = t.like(name='Alice*')
expected = DataFrame([['Alice', 100, 1],
['Alice', 50, 3]],
columns=['name', 'amount', 'id'])
result = compute(expr, df).reset_index(drop=True)
tm.assert_frame_equal(result, expected)
def test_strlen():
expr = t.name.strlen()
expected = pd.Series([5, 3, 5], name='name')
result = compute(expr, df).reset_index(drop=True)
assert_series_equal(expected, result)
def test_rowwise_by():
f = lambda _, id, name: id + len(name)
expr = by(t.map(f, 'int'), total=t.amount.sum())
df = pd.DataFrame({'id': [1, 1, 2],
'name': ['alice', 'wendy', 'bob'],
'amount': [100, 200, 300.03]})
expected = pd.DataFrame([(5, 300.03), (6, 300)], columns=expr.fields)
result = compute(expr, df)
tm.assert_frame_equal(result, expected)
def test_datetime_access():
df = DataFrame({'name': ['Alice', 'Bob', 'Joe'],
'when': [datetime(2010, 1, 1, 1, 1, 1)] * 3,
'amount': [100, 200, 300],
'id': [1, 2, 3]})
t = symbol('t', discover(df))
for attr in ['day', 'month', 'minute', 'second']:
expr = getattr(t.when, attr)
assert_series_equal(compute(expr, df),
Series([1, 1, 1], name=expr._name))
def test_frame_slice():
assert_series_equal(compute(t[0], df), df.iloc[0])
assert_series_equal(compute(t[2], df), df.iloc[2])
tm.assert_frame_equal(compute(t[:2], df), df.iloc[:2])
tm.assert_frame_equal(compute(t[1:3], df), df.iloc[1:3])
tm.assert_frame_equal(compute(t[1::2], df), df.iloc[1::2])
tm.assert_frame_equal(compute(t[[2, 0]], df), df.iloc[[2, 0]])
def test_series_slice():
assert compute(t.amount[0], df) == df.amount.iloc[0]
assert compute(t.amount[2], df) == df.amount.iloc[2]
assert_series_equal(compute(t.amount[:2], df), df.amount.iloc[:2])
assert_series_equal(compute(t.amount[1:3], df), df.amount.iloc[1:3])
assert_series_equal(compute(t.amount[1::2], df), df.amount.iloc[1::2])
def test_nelements():
assert compute(t.nelements(), df) == len(df)
assert compute(t.nrows, df) == len(df)
def test_datetime_truncation_minutes():
data = Series(['2000-01-01T12:10:00Z', '2000-06-25T12:35:12Z'],
dtype='M8[ns]')
s = symbol('s', 'var * datetime')
result = compute(s.truncate(20, 'minutes'), data)
expected = Series(['2000-01-01T12:00:00Z', '2000-06-25T12:20:00Z'],
dtype='M8[ns]', name='s')
assert_series_equal(result, expected)
def test_datetime_truncation_nanoseconds():
data = Series(['2000-01-01T12:10:00.000000005',
'2000-01-01T12:10:00.000000025'],
dtype='M8[ns]')
s = symbol('s', 'var * datetime')
expected = Series(['2000-01-01T12:10:00.000000000',
'2000-01-01T12:10:00.000000020'],
dtype='M8[ns]', name='s')
result = compute(s.truncate(nanoseconds=20), data)
assert_series_equal(result, expected)
def test_datetime_truncation_weeks():
data = Series(['2000-01-01T12:10:00Z', '2000-06-25T12:35:12Z'],
dtype='M8[ns]')
s = symbol('s', 'var * datetime')
result = compute(s.truncate(2, 'weeks'), data)
expected = Series(['1999-12-19', '2000-06-18'], dtype='M8[ns]', name='s')
assert_series_equal(result, expected)
def test_datetime_truncation_days():
data = Series(['2000-01-01T12:10:00Z', '2000-06-25T12:35:12Z'],
dtype='M8[ns]')
s = symbol('s', 'var * datetime')
result = compute(s.truncate(days=3), data)
expected = Series(['1999-12-31', '2000-06-25'], dtype='M8[ns]', name='s')
assert_series_equal(result, expected)
def test_datetime_truncation_same_as_python():
data = Series(['2000-01-01T12:10:00Z', '2000-06-25T12:35:12Z'],
dtype='M8[ns]')
s = symbol('s', 'var * datetime')
assert (compute(s.truncate(weeks=2), data[0].to_pydatetime()) ==
datetime(1999, 12, 26).date())
def test_complex_group_by():
expr = by(merge(tbig.amount // 10, tbig.id % 2),
count=tbig.name.count())
result = compute(expr, dfbig) # can we do this? yes we can!
expected = dfbig.groupby([dfbig.amount // 10,
dfbig.id % 2])['name'].count().reset_index()
expected = expected.rename(columns={'name': 'count'})
tm.assert_frame_equal(result, expected)
def test_by_with_complex_summary():
expr = by(t.name, total=t.amount.sum() + t.id.sum() - 1, a=t.id.min())
result = compute(expr, df)
assert list(result.columns) == expr.fields
assert list(result.total) == [150 + 4 - 1, 200 + 2 - 1]
def test_notnull():
assert (compute(nt.name.notnull(), ndf) == ndf.name.notnull()).all()
def test_isnan():
assert (compute(nt.amount.isnan(), ndf) == ndf.amount.isnull()).all()
@pytest.mark.parametrize('keys', [[1], [2, 3]])
def test_isin(keys):
expr = t[t.id.isin(keys)]
result = compute(expr, df)
expected = df.loc[df.id.isin(keys)]
tm.assert_frame_equal(result, expected)
def test_nunique_table():
expr = t.nunique()
result = compute(expr, df)
assert result == len(df.drop_duplicates())
def test_str_concat():
a = Series(('a', 'b', 'c'))
s = symbol('s', "3 * string[1, 'U32']")
expr = s + 'a'
assert (compute(expr, a) == (a + 'a')).all()
def test_str_repeat():
a = Series(('a', 'b', 'c'))
s = symbol('s', "3 * string[1, 'U32']")
expr = s.repeat(3)
assert (compute(expr, a) == (a * 3)).all()
def test_str_interp():
a = Series(('%s', '%s', '%s'))
s = symbol('s', "3 * string[1, 'U32']")
expr = s.interp(1)
assert (compute(expr, a) == (a % 1)).all()
def test_timedelta_arith():
series = Series(pd.date_range('2014-01-01', '2014-02-01'))
sym = symbol('s', discover(series))
delta = timedelta(days=1)
assert (compute(sym + delta, series) == series + delta).all()
assert (compute(sym - delta, series) == series - delta).all()
def test_coerce_series():
s = pd.Series(list('123'), name='a')
t = symbol('t', discover(s))
result = compute(t.coerce(to='int64'), s)
expected = | pd.Series([1, 2, 3], name=s.name) | pandas.Series |
"""
This example shows how to join multiple pandas Series to a DataFrame
For further information take a look at the pandas documentation:
https://pandas.pydata.org/pandas-docs/stable/merging.html
"""
import wapi
import pandas as pd
import matplotlib.pyplot as plt
############################################
# Insert the path to your config file here!
my_config_file = 'path/to/your/config.ini'
############################################
# Create a session to Connect to Wattsight Database
session = wapi.Session(config_file=my_config_file)
## Combine Series with same time index
######################################
# To combine pandas Series that all have the same time index, the simplest
# option is to add a new column for every series
# We first create an empty dataframe
df1 = | pd.DataFrame() | pandas.DataFrame |
import shutil
from glob import glob
from multiprocessing import Pool
from time import time
import numpy as np
import pandas as pd
import xarray as xr
from netCDF4 import Dataset
from cirrus.netCDFtoTIFF import nc2tiff
from cirrus.util.config import is_goias, lats, logger, lons, ormdtype, settings
from cirrus.util.db import engine, save_df_bd
from cirrus.util.hash import data_frame2hash, generate_file_md5
from cirrus.util.functions import ( # isort:skip
exists_in_the_bank,
get_list_nc,
get_time,
save_hash,
)
def netcsf2sql(file_name: str, rootgrp: Dataset, xr_file, force_save_db):
"""_summary_
Args:
file_name (str): _description_
rootgrp (Dataset): _description_
Returns:
bool: _description_
"""
error = False
for name in settings.vars:
try:
tempc = rootgrp.variables[name][:]
logger.info(
f"Processando varivael {name} de {file_name.split('/')[-1]}"
)
vtime, *_ = [
x.flatten()
for x in np.meshgrid(
get_time(file_name), lats, lons, indexing='ij'
)
]
camadas = {}
if len(np.squeeze(tempc)) == 19:
for c, var in enumerate(np.squeeze(tempc), 1):
camadas[f'{name}_{c:02}'] = var.flatten()
nc2tiff(xr_file, name, f'{name}_{c:02}', file_name, c - 1)
else:
camadas = {f'{name}': np.squeeze(tempc).flatten()}
nc2tiff(xr_file, name, name, file_name)
temp_df = pd.DataFrame(
{
'datetime': vtime,
'goias': is_goias['goias'].array,
**camadas,
'point_gid': is_goias.index,
}
)
temp_df = temp_df.dropna(subset=['goias'])
temp_df['datetime'] = | pd.to_datetime(temp_df['datetime']) | pandas.to_datetime |
import ntpath
import os
import pickle
import sys
import time
import warnings
import numpy as np
import pandas as pd
from scipy.stats import kurtosis
from scipy.stats import skew
from statsmodels import robust
import sys
import pandas as pd
import numpy as np
import pickle
import time
import warnings
import ntpath
import os
from scipy.stats import kurtosis
from scipy.stats import skew
from statsmodels import robust
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
dir_online_features = 'online_features'
columns_intermediate = ['frame_no', 'ts', 'ts_delta', 'protocols', 'frame_len', 'eth_src',
'eth_dst', 'ip_src', 'ip_dst', 'tcp_srcport', 'tcp_dstport',
'http_host', 'sni', 'udp_srcport', 'udp_dstport']
columns_state_features = ["meanBytes", "minBytes", "maxBytes", "medAbsDev", "skewLength",
"kurtosisLength", "q10", "q20", "q30", "q40", "q50", "q60",
"q70", "q80", "q90", "spanOfGroup", "meanTBP", "varTBP",
"medianTBP", "kurtosisTBP", "skewTBP", "network_to", "network_from",
"network_both", "network_to_external", "network_local",
"anonymous_source_destination", "device", "state"]
columns_detect_sequence = ['ts', 'ts_end', 'ts_delta', 'num_pkt', 'state']
save_extracted_features = False
RED = "\033[31;1m"
END = "\033[0m"
path = sys.argv[0]
dir_models = ""
usage_stm = """
Usage: python3 {prog_name} pcap_path model_dir device_name model_name result_path
Uses a model to predict the device activity given network traffic of that device.
Example: python3 {prog_name} yi_camera_sample.pcap tagged-models/us/ yi-camera rf results.csv
Arguments:
pcap_path: path to the pcap file with unknown device activity
model_dir: path to the directory containing the directories of the models
device_name: name of the device that generated the data in pcap_path
model_name: name of the model to be used to predict the device activity in pcap_path;
choose from kmeans, knn, or rf
result_path: path to a CSV file to write results; will be generated if it does not
already exist
Note: The dbscan and spectral algorithms cannot be used for prediction.
For more information, see the README or model_details.md.""".format(prog_name=path)
#isError is either 0 or 1
def print_usage(is_error):
print(usage_stm, file=sys.stderr) if is_error else print(usage_stm)
exit(isError)
def main():
global dir_models
for arg in sys.argv:
if arg in ("-h", "--help"):
print_usage(0)
print("Running %s..." % path)
if len(sys.argv) != 6:
print("%s%s: Error: 5 arguments required. %d arguments found.%s"
% (RED, path, (len(sys.argv) - 1), END), file=sys.stderr)
print_usage(1)
pcap_path = sys.argv[1]
dir_models = sys.argv[2] + "/" + sys.argv[4]
device = sys.argv[3]
model_name = sys.argv[4]
file_result = sys.argv[5]
user_intermediates = "user-intermediates/"
errors = False
if not pcap_path.endswith('.pcap'):
print("%s%s: Error: \"%s\" is not a pcap (.pcap) file.%s"
% (RED, path, pcap_path, END), file=sys.stderr)
errors = True
elif not os.path.isfile(pcap_path):
print("%s%s: Error: The pcap file \"%s\" does not exist.%s"
% (RED, path, pcap_path, END), file=sys.stderr)
errors = True
if not file_result.endswith('.csv'):
print("%s%s: Error: Output file \"%s\" is not a CSV (.csv) file.%s"
% (RED, path, file_result, END), file=sys.stderr)
errors = True
if not model_name in ("kmeans", "knn", "rf"):
print("%s%s: Error: \"%s\" is not a valid model name. Choose from: kmeans, knn, or rf.%s"
% (RED, path, model_name, END), file=sys.stderr)
errors = True
elif not os.path.isdir(dir_models):
print("%s%s: Error: The model directory \"%s\" is not a directory.%s"
% (RED, path, dir_models, END), file=sys.stderr)
errors = True
else:
file_model = '%s/%s%s.model' % (dir_models, device, model_name)
file_labels = '%s/%s.label.txt' % (dir_models, device)
if not os.path.isfile(file_model):
print("%s%s: Error: The model file %s cannot be found.\n"
" Please regenerate file, check directory name, or check device name.%s"
% (RED, path, file_model, END), file=sys.stderr)
errors = True
if not os.path.isfile(file_labels):
print("%s%s: Error: The label file %s cannot be found.\n"
" Please regenerate file, check directory name, or check device name.%s"
% (RED, path, file_labels, END), file=sys.stderr)
errors = True
if errors:
print_usage(1)
print("Input pcap: %s" % pcap_path)
print("Input model directory: %s" % dir_models)
print("Device name: %s" % device)
print("Model name: %s" % model_name)
print("Output CSV: %s" % file_result)
if not os.path.exists(user_intermediates):
os.system('mkdir -pv %s' % user_intermediates)
file_intermediate = user_intermediates + "/" + ntpath.basename(pcap_path)[:-4] + "txt"
if os.path.isfile(file_intermediate):
print("%s exists. Delete it to reparse the pcap file." % file_intermediate)
else:
os.system("tshark -r %s -Y ip -Tfields -e frame.number -e frame.time_epoch"
" -e frame.time_delta -e frame.protocols -e frame.len -e eth.src"
" -e eth.dst -e ip.src -e ip.dst -e tcp.srcport -e tcp.dstport"
" -e http.host -e udp.srcport -e udp.dstport -E separator=/t > %s"
" 2>/dev/null" % (pcap_path, file_intermediate))
os.system('mkdir -pv `dirname %s`' % file_result)
res = predict(device, file_intermediate)
if res is None or len(res) == 0:
with open(file_result, 'w') as ff:
ff.write('No behavior found for %s from %s' % (device, file_intermediate))
else:
res['device'] = device
res.to_csv(file_result, index=False)
print('Results saved to %s' % file_result)
def predict(device, file_intermediate):
model, labels = load_model(device)
if model is None:
return
res_detect = detect_states(file_intermediate, model, labels, device)
print('Results:')
print(res_detect)
return res_detect
def detect_states(intermediate_file, trained_model, labels, dname=None):
group_size = 100
warnings.simplefilter("ignore", category=DeprecationWarning)
if not os.path.exists(intermediate_file):
print('reading from %s' % intermediate_file)
return
feature_file = None
ss = trained_model['standard_scaler']
pca = trained_model['pca']
trained_model = trained_model['trained_model']
col_names = columns_intermediate
c = columns_state_features.copy()
col_data_points = ['ts', 'ts_end','ts_delta', 'num_pkt']
c.extend(col_data_points)
pd_obj_all = pd.read_csv(intermediate_file, names=col_names, sep='\t')
pd_obj = pd_obj_all.loc[:, ['ts', 'ts_delta', 'frame_len', 'ip_src', 'ip_dst']]
if pd_obj is None or len(pd_obj) < 1: #Nothing in decoded input pcap file
return
num_total = len(pd_obj_all)
print('Total packets: %s' % num_total)
feature_data = | pd.DataFrame() | pandas.DataFrame |
from numpy.testing import assert_, assert_equal, run_module_suite
from cqed_tools.mf.hamiltonian import *
import pandas as pd
def test_collapse_operators_mf():
params = pd.Series([4, 1.0, 2.0, 3.0], index=['t_levels', 'gamma', 'gamma_phi', 'n_t'], dtype=object)
c_ops = collapse_operators_mf(params)
assert_equal(c_ops[0], destroy(params.t_levels) * np.sqrt(params.gamma * (params.n_t + 1)))
assert_equal(c_ops[1], create(params.t_levels) * np.sqrt(params.gamma * params.n_t))
assert_equal(len(c_ops), 3)
params = pd.Series([14, 0.0, 2.5, 3.7], index=['t_levels', 'gamma', 'gamma_phi', 'n_t'], dtype=object)
c_ops = collapse_operators_mf(params)
assert_equal(len(c_ops), 1)
params = | pd.Series([8, 0.1, 0.0, 5.6], index=['t_levels', 'gamma', 'gamma_phi', 'n_t'], dtype=object) | pandas.Series |
import docx
import numpy as np
import pandas as pd
import scipy.stats
# TODO: Add kruskal-wallis for median[IQR]
class Table1():
'''
Creates table 1 from pandas Dataframe and allows for export in docx, xls format
Attributes
----------
table : pandas Dataframe
output table
'''
plus_minus = u'\u00B1'
def __init__(self, df, stratification_var, names, keep_one_vars=None, rownames=None, colnames=None, col_ordering=None, row_ordering=None, rounding_digits=1, include_overall=True, overall_colname='Overall', total_row=True, deviation_measure_for_numeric='sd', p_val=True):
'''
Parameters
----------
df : pd.DataFrame
Input dataframe
stratification_var : str
Column stratification variable
names : Dict[str, str]
Specifies variables that are to be in the table based on keys. Values contain name mapping for baseline variables to new names. Also All following parameters and methods use the new names as reference.
keep_one_vars : Dict[str, list], optional
In the case of multilevel variables, allows one to pass name:level items such that within variable name, only the given level is retained as a single row (default:None)
rownames : Dict[str, str], optional
Specify rownames with format old name: new name (default:None)
colnames : Dict[str, str], optional
Specify colnames with format old name: new name (default:None)
col_ordering : list, optional
Order of the stratification variable (default:None)
row_ordering : Dict[str, list], optional
Pass name:order items such that variable name is ordered according to order (default:None)
rouding_digits : int, optional
Number of digits to round data to (default:2)
include_overall : bool, optional
Inserts a row-wise total column (default:True)
overall_colname: str, optional
Name of total column (default:'Overall')
total_row: bool, optional
Inserts a row with column-wise totals at top of table (default:True)
deviation_measure_for_numeric: 'str'
For numeric variables, select deviation measure - either 'sd' for standard deviation or 'se' for standard error of the mean (default:'sd')
p_val: bool
Calculate Pearson’s chi-squared test for independence for categorical data or one-way analysis of variance for continuous data and add p_value in a column
'''
assert deviation_measure_for_numeric in ['se', 'sd']
if colnames:
assert isinstance(colnames, dict)
if row_ordering:
assert isinstance(row_ordering, dict)
self.df = df
self.stratification_var = stratification_var
self.names = names
self.keep_one_vars = keep_one_vars
self.rownames = rownames
self.colnames = colnames
self.col_ordering = col_ordering
self.row_ordering = row_ordering
self.rounding_digits = rounding_digits
self.include_overall = include_overall
self.overall_colname = overall_colname
self.total_row = total_row
self.deviation_measure_for_numeric = deviation_measure_for_numeric
self.p_val = p_val
self.reverse_names = {v: k for k, v in self.names.items()}
self.make_table()
@staticmethod
def create(*args, **kwargs):
return Table1(*args, **kwargs).table
def _is_categorical(self, var):
return self.df[var].dtype.name in ['bool', 'object', 'category']
def _p_categorical(self, group):
if self.include_overall:
stat, p, dof, expected = scipy.stats.chi2_contingency(
group.drop(columns=self.overall_colname))
else:
stat, p, dof, expected = scipy.stats.chi2_contingency(group)
return '<0.001' if p < 0.001 else round(p, 3)
def _p_anova(self, var):
unique_levels = [
x for x in self.df[self.stratification_var].unique() if pd.notna(x)]
measures = [self.df.loc[self.df[self.stratification_var]
== level, var] for level in unique_levels]
for i, m in enumerate(measures):
measures[i] = [x for x in m if pd.notna(x)]
stat, p = scipy.stats.f_oneway(*measures)
return '<0.001' if p < 0.001 else round(p, 3)
def _make_categorical_minitable(self, var):
def reformat_categorical(col):
percs = col / group_col_totals[col.name] * 100
percs = percs.round(self.rounding_digits)
return pd.Series([f'{val} ({perc})' for val, perc in zip(col, percs)], index=col.index, name=col.name)
group = self.df.groupby(self.stratification_var)[
var].value_counts(dropna=False).rename().reset_index()
group = group.pivot(var, self.stratification_var)
group.columns = group.columns.droplevel(0)
group.columns.name = ''
group.rename({np.nan: f'Missing {self.names[var]}'}, inplace=True)
group.index.name = self.names[var]
if self.include_overall:
group[self.overall_colname] = group.sum(axis=1)
group_col_totals = group.sum(axis=0)
group = group.fillna(0).astype(int)
if self.p_val:
p = self._p_categorical(group)
group = group.apply(reformat_categorical, axis=0)
if self.p_val:
p_series = pd.Series(
[p] + ['' for _ in range(1, len(group))], index=group.index, name='p-value')
if self.keep_one_vars and self.names[var] in self.keep_one_vars.keys():
to_drop = []
for i in group.index:
if self.rownames and i in self.rownames.keys():
if self.rownames[i] != self.keep_one_vars[self.names[var]]:
to_drop.append(i)
else:
if i != self.keep_one_vars[self.names[var]]:
to_drop.append(i)
group = group.drop(index=to_drop)
group.rename(
{self.keep_one_vars[self.names[var]]: group.index.name}, inplace=True)
if self.p_val:
group['p-value'] = p
else:
header = pd.DataFrame(pd.Series(['' for _ in range(
len(group.columns))], index=group.columns, name=self.names[var])).transpose()
group = pd.concat([header, group])
if self.p_val:
p_series = pd.Series(
[p] + ['' for _ in range(1, len(group))], index=group.index, name='p-value')
group = pd.concat([group, p_series], axis=1)
return group
def _make_numeric_row(self, var):
overall_mn = self.df[var].mean()
mns = self.df.groupby(self.stratification_var)[var].mean()
if self.deviation_measure_for_numeric == 'se':
overall_dev = self.df[var].std() / np.sqrt(len(self.df[var]))
devs = self.df.groupby(self.stratification_var)[var].apply(
lambda x: x.std() / np.sqrt(len(x)))
elif self.deviation_measure_for_numeric == 'sd':
overall_dev = self.df[var].std()
devs = self.df.groupby(self.stratification_var)[var].apply(np.std)
if self.include_overall:
mns = mns.append(
pd.Series(overall_mn, index=[self.overall_colname]))
devs = devs.append(
pd.Series(overall_dev, index=[self.overall_colname]))
overall_mn = round(overall_mn, self.rounding_digits)
mns = mns.round(self.rounding_digits)
devs = devs.round(self.rounding_digits)
ser = pd.DataFrame(pd.Series([f'{mn} {self.plus_minus} {sd}' for mn, sd in zip(
mns, devs)], index=mns.index, name=self.names[var])).transpose()
if self.p_val:
p = self._p_anova(var)
ser['p-value'] = p
return ser
def make_table(self):
self.table = pd.concat([self._make_categorical_minitable(var) if self._is_categorical(
var) else self._make_numeric_row(var) for var in self.names.keys()])
if self.total_row:
self.insert_total_row(return_table=False)
if self.colnames:
self.table = self.table.rename(columns=self.colnames)
if self.row_ordering:
for var, order in self.row_ordering.items():
self.row_reorder(var, order, return_table=False)
if self.rownames:
self.table.rename(self.rownames, inplace=True)
if self.col_ordering:
assert len(self.col_ordering) == len(
self.table.columns), f'Got {len(self.col_ordering)} in col_ordering, expected {len(self.table.columns)}: {self.table.columns}'
self.column_reorder(self.col_ordering)
def column_reorder(self, order, return_table=True):
try:
assert all([o in self.table.columns for o in order])
except AssertionError:
print([o for o in order if o not in self.table.columns])
table = self.table[order]
self.table = table
if return_table:
return self.table
def row_reorder(self, var, order, return_table=True):
og_varname = self.reverse_names[var]
assert self._is_categorical(og_varname)
assert var in self.table.index
try:
assert len(order) == len(self.df[og_varname].unique())
except AssertionError:
unique_levels = self.df[og_varname].unique()
np.place(unique_levels, pd.isna(unique_levels), f'Missing {var}')
discrepancies = set(unique_levels).difference(set(order))
raise ValueError(
f'{discrepancies} found in levels, not provided in order')
i_order = [self.table.index.get_loc(o) for o in order]
new_order = list(np.arange(min(i_order))) + i_order + \
list(np.arange(max(i_order) + 1, len(self.table)))
self.table = self.table.iloc[new_order]
if return_table:
return self.table
def insert_header(self, name, after):
header = pd.DataFrame(pd.Series(['' for _ in range(
len(self.table.columns))], index=self.table.columns, name=name)).transpose()
idx = self.table.index.get_loc(after)
self.table = pd.concat(
[self.table.iloc[:idx + 1], header, self.table.iloc[idx + 1:]])
return self.table
def insert_total_row(self, adornment='n = ', return_table=True):
counts = self.df[self.stratification_var].value_counts(dropna=False)
counts[self.overall_colname] = len(self.df)
counts = pd.Series(
[f'{adornment}{c}' for c in counts], index=counts.index, name='')
sum_row = | pd.DataFrame(counts) | pandas.DataFrame |
import pandas as pd
def convert_nested_to_dataframe(agg, dates_as_key=True):
'''A function that takes nested elasticsearch response with aggregation and returns a nested dataframe
Warning: This is a recursive function, and rather non-intuitive to understand
This function takes nested and crossed aggregations and converts them to an easy to manipulates pandas dataframe
e.g. Here we have a gender aggregation nested in year which is nested in state
the output we want:
state year gender doc_count
CA 2000 male 2
CA 2000 female 5
CA 2001 male 5
CA 2001 female 5
CA 2002 male 5
CA 2002 female 5
MN 2000 male 2
MN 2000 female 5
MN 2001 male 5
MN 2001 female 5
MN 2002 male 5
MN 2002 female 5
NY 2000 male 2
NY 2000 female 5
NY 2001 male 5
NY 2001 female 5
NY 2002 male 5
NY 2002 female 5
What we do is step down through all the layers of nested data (recursively) until we reach the end,
and from the end, start creating pandas dataframes that get merged back into one giant dataframe
this function is in an experimental state, and currently only tested on 3 nested levels,
TODO crossed data does not work
:param agg: an aggregation from elasticsearch results with nesting
:type agg: elasticsearch response.aggregation object
:returns: pandas data frame like example above, with nested data
'''
crossed_cats_expanded = []
high_level_returning = False
agg_as_dict = agg.to_dict()
cat_names = [item for item in agg_as_dict.keys() if type(agg_as_dict[item]) is dict]
for cat_name in cat_names: # TODO deal with multiple aggregations at the same level (Crossing)
expanded_buckets = []
merge_vert = False
if not len(getattr(agg, cat_name).buckets):
raise ValueError('There is no count data in the lowest level of nesting. Is your search setup correctly?')
for bucket in getattr(agg, cat_name).buckets:
bucket_as_dict = bucket.to_dict()
if dict not in [type(item) for item in bucket_as_dict.values()]:
# we are at lowest level, begin return
if ('key_as_string' in bucket_as_dict.keys()) and dates_as_key: # change dates to readble format
bucket_as_dict['key'] = bucket['key_as_string']
bucket_as_dict.pop('key_as_string')
bucket_as_dict[cat_name] = bucket_as_dict.pop(
'key') # change the name of the key to something meaningful
expanded_buckets.append(bucket_as_dict) # combine each dict at the lowest level
else:
# We are at some level other than the lowest
level_name = str(bucket.key) # save the name of this level
lower_level_return = convert_nested_to_dataframe(bucket) # and drop down into the next level
expanded_buckets.append(add_category_labels(level_name, cat_name, lower_level_return))
merge_vert = True
if not merge_vert:
dataframe_out = pd.DataFrame(expanded_buckets)
dataframe_out.rename(columns=lambda x: x.replace('key', cat_name))
crossed_cats_expanded.append(dataframe_out.reset_index(drop=True))
high_level_returning = True
if high_level_returning:
return | pd.concat(crossed_cats_expanded, axis=1) | pandas.concat |
from datetime import date, timedelta, datetime
import html
from itertools import cycle, chain
import logging
from typing import Dict, Optional, Sequence, Union, Any
import warnings
from bokeh.layouts import gridplot, column
from bokeh.models import ColumnDataSource as CDS, Text, Title, Label
import numpy as np
import pandas as pd
from pandas.api.types import is_numeric_dtype
# TODO FIXME also handle errors?
# global error list + plotly like number of errors per plot?
def scatter_matrix(
df,
*,
xs: Sequence[str]=None, ys: Sequence[str]=None,
width=None, height=None,
regression=True,
**kwargs,
):
assert len(df) > 0, 'TODO handle this'
# FIXME handle empty df
source = CDS(df)
# TODO what about non-numeric stuff?
xs = df.columns if xs is None else xs
ys = df.columns if ys is None else ys
ys = list(reversed(ys)) # reorder to move meaningful stuff to the top left corner
isnum = lambda c: | is_numeric_dtype(df.dtypes[c]) | pandas.api.types.is_numeric_dtype |
# Standard libraries
import numpy as np
import pandas as pd
# Various
from sklearn.utils import shuffle
from sklearn.model_selection import train_test_split
def remove_corrupted_images(df):
bad_image_ids = set(1,2,3)
if df.ID in bad_image_ids:
filter = 0
else:
filter = 1
return filter
return 1
def clean_data_csv(data_path = "../data/", csv_name = 'stage_1_train.csv',shuffle_data = True):
data_raw = pd.read_csv(data_path + csv_name)
data_raw['filename'] = data_raw['ID'].apply(lambda x: "ID_" + x.split('_')[1] + ".dcm")
data_raw['type'] = data_raw['ID'].apply(lambda x: x.split('_')[2])
data_pivot = data_raw[['Label', 'filename', 'type']].drop_duplicates().pivot(
index='filename', columns='type', values='Label').reset_index()
if shuffle_data:
data = shuffle(data_pivot)
else:
data = data_pivot
data.reset_index(drop=True,inplace=True)
data = pd.DataFrame(data,columns = list(data.columns))
return data
def balanced_images_all_classes(image_list,n=2500,replace=False,random_state = 12345):
image_subset = [
image_list[image_list[category] ==1 ].sample(
n=n,
replace = replace,
random_state = random_state)
for category
in ['epidural',
'intraparenchymal',
'intraventricular',
'subarachnoid',
'subdural']
]
image_subset.append(
image_list[image_list['any'] == 0 ].sample(
n=n*5,
replace = replace,
random_state = random_state)
)
image_subset_combined = pd.concat(image_subset).drop_duplicates()
image_subset_combined = shuffle(image_subset_combined,random_state = random_state)
image_subset_combined.reset_index(drop=True, inplace = True)
return image_subset_combined
def balanced_images_binary(image_list,n=2500,replace=False,random_state = 12345):
image_subset = [image_list[image_list['any'] ==1 ]]
image_subset.append(
image_list[image_list['any'] == 0 ].sample(
n=len(image_subset[0]),
replace = replace,
random_state = random_state)
)
image_subset_combined = | pd.concat(image_subset) | pandas.concat |
import pickle
#from .retrieve_marks import norm2
import os
import _pickle as cPickle
#import cache
from scipy.interpolate import interp1d
import pandas as pd
import numpy as np
import glob
import pprint
from scipy.signal import find_peaks
chromlength_human =[249250621,243199373,198022430,191154276,180915260,171115067,159138663,146364022,
141213431,135534747,135006516,133851895,115169878,107349540,102531392,90354753,81195210,78077248,
59128983,63025520,48129895,51304566]
chromlength_yeast =[230218,813184,316620,1531933,576874,270161,1090940,562643,439888,
745751,666816,1078177,924431,784333,1091291,948066]
try:
import pyBigWig
import gffutils
except:
print("You may need to install pyBigWig")
pp = pprint.PrettyPrinter(indent=2)
def smooth(ser, sc):
return np.array(pd.Series(ser).rolling(sc, min_periods=1, center=True).mean())
# string,string -> bool,[],res
# select a strain and an experimental value and return if available, the files and the resolution
# def is_available(strain, experiment):
# return True,[],1
ROOT = "../DNaseI/data/"
def nan_polate_c(A, kind="linear"):
ok = ~np.isnan(A)
x = np.arange(len(A))
f2 = interp1d(x[ok], A[ok], kind=kind, bounds_error=False)
# print(ok)
return f2(x)
def is_available_alias(strain, experiment):
alias = {"Hela": ["Hela", "HeLaS3", "Helas3"],
"Helas3": ["Helas3", "HeLaS3", "Hela"],
"GM12878": ["GM12878", "Gm12878"],
"Gm12878": ["GM12878", "Gm12878"]
}
# alias={"Helas3":["HeLaS3","Hela","Helas3"]}
if strain not in alias.keys():
avail, files, res = is_available(strain, experiment)
else:
for strain1 in alias[strain]:
avail, files, res = is_available(strain1, experiment)
if files != []:
if strain1 != strain:
print("Using alias %s" % strain1)
return avail, files, res
return avail, files, res
def is_available(strain, experiment):
avail_exp = ["MRT", "OKSeq", "OKSeqo", "DNaseI", "ORC2", "ExpGenes", "Faire", "Meth", "Meth450",
"Constant", "OKSeqF", "OKSeqR", "OKSeqS", "CNV", "NFR",
"MCM", "HMM", "GC", "Bubble","G4","G4p","G4m","Ini","ORC1","AT_20","AT_5","AT_30","RHMM","MRTstd",
"RNA_seq","MCMo","MCMp","MCM-beda","Mcm3","Mcm7","Orc2","Orc3"]
marks = ['H2az', 'H3k27ac', 'H3k27me3', 'H3k36me3', 'H3k4me1',
'H3k4me2', 'H3k4me3', 'H3k79me2', 'H3k9ac', 'H3k9me1',
'H3k9me3', 'H4k20me1', "SNS"]
marks_bw = [m + "wig" for m in marks]
Prot = ["Rad21","ORC2"]
#print("La")
if strain in ["Yeast-MCM"]:
lroot = ROOT+"/external/"+strain + "/"
resolutions = glob.glob(lroot + "/*")
#print(lroot + "/*")
resolutions = [r.split("/")[-1] for r in resolutions if "kb" in r]
#print(resolutions)
if len(resolutions) != 0:
exps = glob.glob(lroot + resolutions[0]+"/*")
files = []+exps
exps = [exp.split("/")[-1][:] for exp in exps if "csv" in exp]
print(exps)
for iexp,exp in enumerate(exps):
if exp == experiment:
return True,[files[iexp]],int(resolutions[0].replace("kb",""))
if strain in ["Cerevisae"] and experiment =="MCM-beda":
lroot = ROOT+"/external/Yeast-MCM-bedalov/"
return True,glob.glob(lroot+"/*"),0.001
if experiment not in avail_exp + marks + Prot + marks_bw:
print("Exp %s not available" % experiment)
print("Available experiments", avail_exp + marks + Prot)
return False, [], None
if experiment == "Constant":
return True, [], 1
if experiment == "MRT":
if strain == "Cerevisae":
return True, ["/home/jarbona/ifromprof/notebooks/exploratory/Yeast_wt_alvino.csv"], 1
elif strain == "Raji":
files = glob.glob(ROOT + "/external/timing_final//*_Nina_Raji_logE2Lratio_w100kbp_dw10kbp.dat" )
return True, files, 10
else:
root = ROOT + "/Data/UCSC/hsap_hg19/downloads/ENCODE/wgEncodeUwRepliSeq_V2/compute_profiles/timing_final/"
root = ROOT + "/external/timing_final/"
extract = glob.glob(root + "/*Rep1_chr10.dat")
cells = [e.split("_")[-3] for e in extract]
if strain in cells:
files = glob.glob(root + "/timing_final_W100kb_dx10kb_%s*" % strain)
return True, files, 10
if experiment == "MRTstd":
root = ROOT + "/external/Sfrac/"
extract = glob.glob(root + "/*Rep1_chr10.dat")
cells = [e.split("_")[-3] for e in extract]
if strain in cells:
files = glob.glob(root + "/Sfrac_HansenNormDenoised_W100kb_dx10kb_%s*" % strain)
return True, files, 10
if experiment == "ExpGenes":
root = ROOT + "/external/ExpressedGenes/"
extract = glob.glob(root + "/*ExpressedGenes_zero.txt")
# print(extract)
cells = [e.split("/")[-1].replace("ExpressedGenes_zero.txt", "") for e in extract]
print(cells)
if strain in cells:
files = glob.glob(root + "/%sExpressedGenes_zero.txt" % strain)
return True, files, 10
if experiment == "RNA_seq":
root = ROOT + "external//RNA_seq//"
# root = ROOT + "/external/1kb_profiles//"
extract = glob.glob(root + "*Tot")
# print(extract)
cells = [e.split("/")[-1].split("_")[0] for e in extract]
cells.sort()
if strain in cells:
files = glob.glob(root + strain + "_Tot/*")
files.sort()
return True, files, 1
if experiment == "NFR":
root = ROOT + "/external/NFR/"
# root = ROOT + "/external/1kb_profiles//"
extract = glob.glob(root + "*.bed")
return True, extract, 1
if experiment == "Bubble":
root = ROOT + "/external/Bubble/"
# root = ROOT + "/external/1kb_profiles//"
extract = glob.glob(root + "*.bedgraph")
# print(extract)
cells = [e.split("/")[-1].split(".bedgraph")[0] for e in extract]
cells.sort()
if strain in cells:
files = glob.glob(root + strain + ".bedgraph")
files.sort()
return True, files, 1
#print("IRCRRRRRRRRRRRRRRRRRRRR")
if experiment == "ORC1":
#print("LA")
root = ROOT + "/external/ORC1/"
# root = ROOT + "/external/1kb_profiles//"
extract = glob.glob(root + "*.bed")
#print(extract)
cells = [e.split("/")[-1].split(".bed")[0] for e in extract]
cells.sort()
if strain in cells:
files = glob.glob(root + strain + ".bed")
files.sort()
return True, files,
if (experiment in ["Mcm3","Mcm7","Orc2","Orc3"]) and strain =="Raji":
return True,glob.glob(ROOT+"/external/nina_kirstein/*_"+experiment+"_G1_1kbMEAN.txt") ,1
if experiment in ["MCM","MCMp"]:
#print("LA")
if strain != "Hela":
return False,[],1
root = ROOT + "/external/MCM2-bed/R1/"
# root = ROOT + "/external/1kb_profiles//"
extract = glob.glob(root + "*.txt")
#print(extract)
return True, extract, 1
if experiment == "Ini":
root = ROOT + "/external/ini/"
# root = ROOT + "/external/1kb_profiles//"
extract = glob.glob(root + "*.csv")
# print(extract)
cells = [e.split("/")[-1].split(".csv")[0] for e in extract]
cells.sort()
if strain in cells:
files = glob.glob(root + strain + ".csv")
files.sort()
return True, files, 1
if experiment == "GC":
root = ROOT + "/external//1ColProfiles/*1kbp*" # chr1_gc_native_w1kbp.dat
extract = glob.glob(root)
return True, extract, 1
if "AT" in experiment:
root = ROOT + "/external//AT_hooks/c__%s.csv"%experiment.split("_")[1] # chr1_gc_native_w1kbp.dat
extract = glob.glob(root)
return True, extract, 5
if experiment == "SNS":
root = ROOT + "/external/SNS/"
# root = ROOT + "/external/1kb_profiles//"
extract = []
if strain in ["K562"]:
extract = glob.glob(root + "*.bed")
elif strain in ["HeLaS3","Hela","HeLa"]:
extract=glob.glob(root + "*.csv")
#print("Strain",strain)
#print(extract, root)
if strain not in ["K562","HeLaS3"]:
print("Wrong strain")
print("Only K562")
return False, [], 1
return True, extract, 1
if experiment == "MCMo":
if strain not in ["HeLa", "HeLaS3","Hela"]:
print("Wrong strain")
print("Only", "HeLa", "HeLaS3")
raise
root = ROOT + "/external/MCM/"
# root = ROOT + "/external/1kb_profiles//"
extract = glob.glob(root + "*.bed")
print(extract, root)
return True, extract, 1
if experiment == "MCMbw":
if strain not in ["HeLa", "HeLaS3"]:
print("Wrong strain")
print("Only", "HeLa", "HeLaS3")
raise
"""
root = ROOT + "/external/SNS/"
# root = ROOT + "/external/1kb_profiles//"
extract = glob.glob(root + "*.bed")
print(extract, root)
return True, extract, 1"""
if "G4" in experiment:
root = ROOT + "/external/G4/"
# root = ROOT + "/external/1kb_profiles//"
extract = glob.glob(root + "*.bed")
print(extract, root)
return True, extract, 1
if experiment == "CNV":
root = ROOT + "/external/CNV/"
# root = ROOT + "/external/1kb_profiles//"
extract = glob.glob(root + "*.txt")
# print(extract)
cells = [e.split("/")[-1].split(".txt")[0] for e in extract]
cells.sort()
if strain in cells:
files = glob.glob(root + strain + ".txt")
files.sort()
#print(files)
return True, files, 10
if experiment == "HMM":
root = ROOT + "/external/HMM/"
# root = ROOT + "/external/1kb_profiles//"
extract = glob.glob(root + "*.bed")
# print(extract)
cells = [e.split("/")[-1].replace("wgEncodeBroadHmm", "").replace("HMM.bed", "")
for e in extract]
cells.sort()
if strain in cells:
files = glob.glob(root + "wgEncodeBroadHmm%sHMM.bed" % strain)
files.sort()
# print(files)
return True, files, 10
if experiment == "RHMM":
root = ROOT + "/external/RHMM/"
# root = ROOT + "/external/1kb_profiles//"
extract = glob.glob(root + "*.bed")
#print(extract)
cells = [e.split("/")[-1].replace("RHMM.bed", "")
for e in extract]
cells.sort()
if strain in cells:
files = glob.glob(root + "%sRHMM.bed" % strain)
files.sort()
# print(files)
return True, files, 1
if experiment.startswith("OKSeq"):
root = ROOT + "/Data/UCSC/hsap_hg19//local/Okazaki_Hyrien/1kb_profiles/"
root = ROOT + "/external/1kb_profiles//"
extract = glob.glob(root + "*")
cells = [e.split("/")[-1] for e in extract]
cells.sort()
# print(cells)
if strain in cells:
if experiment == "OKSeqo":
files = glob.glob(root + strain + "/*pol*")
if experiment == "OKSeqF":
files = glob.glob(root + strain + "/*F*")
if experiment == "OKSeqR":
files = glob.glob(root + strain + "/*R*")
if experiment in ["OKSeqS", "OKSeq"]:
# print("La")
files = glob.glob(root + strain + "/*R*")
files += glob.glob(root + strain + "/*F*")
files.sort()
return True, files, 1
if experiment == "DNaseI":
root = ROOT + "/external/DNaseI//"
print(root)
if strain == "Cerevisae":
return True, [root + "/yeast.dnaseI.tagCounts.bed"], 0.001
else:
extract = glob.glob(root + "/*.narrowPeak")
cells = [e.split("/")[-1].replace("wgEncodeAwgDnaseUwduke",
"").replace("UniPk.narrowPeak", "") for e in extract]
extract2 = glob.glob(root + "../DNaseIK562/*.narrowPeak")
cells2 = [e.split("/")[-1].replace("wgEncodeOpenChromDnase",
"").replace("Pk.narrowPeak", "") for e in extract2]
extract3 = glob.glob(root + "../DNaseIK562/*.bigWig")
cells3 = [e.split("/")[-1].replace("wgEncodeUwDnase",
"").replace("Rep1.bigWig", "") for e in extract3]
# print(extract2, cells2)
extract += extract2
cells += cells2
extract += extract3
cells += cells3
if strain in cells:
files = [extract[cells.index(strain)]]
return True, files, 0.001
if experiment == "Meth":
root = ROOT + "/external/methylation//"
extract = glob.glob(root + "*.bed")
cells = [e.split("/")[-1].replace(".bed", "") for e in extract]
if strain in cells:
files = [extract[cells.index(strain)]]
return True, files, 1
if experiment == "Meth450":
root = ROOT + "/external/methylation450//"
extract = glob.glob(root + "*.bed")
cells = [e.split("/")[-1].replace(".bed", "") for e in extract]
if strain in cells:
files = [extract[cells.index(strain)]]
return True, files, 1
if experiment == "Faire":
root = ROOT + "/external/Faire//"
extract = glob.glob(root + "*.pk")
cells = [e.split("/")[-1].replace("UncFAIREseq.pk", "") for e in extract]
if strain in cells:
files = [extract[cells.index(strain)]]
return True, files, 1
if experiment in Prot:
root = ROOT + "/external/DNaseI//"
extract = glob.glob(root + "/*.narrowPeak")
cells = [e.split("/")[-1].replace(experiment + "narrowPeak", "") for e in extract]
if strain in cells:
files = [extract[cells.index(strain)]]
return True, files, 1
root = ROOT + "/external/proteins//"
extract = glob.glob(root + "/*.csv")
cells = [e.split("/")[-1].replace("_ORC2_miotto.csv", "") for e in extract]
if strain in cells:
files = glob.glob(root + "/%s_ORC2_miotto.csv" % strain)
return True, files, 1
if experiment in marks:
root = ROOT + "/external/histones//"
if experiment == "H2az" and strain == "IMR90":
experiment = "H2A.Z"
extract = glob.glob(root + "/*%s*.broadPeak" % experiment)
#print(extract)
if strain not in ["IMR90"]:
cells = [e.split("/")[-1].replace("wgEncodeBroadHistone",
"").replace("Std", "").replace("%sPk.broadPeak" % experiment, "") for e in extract]
# print(extract,cells)
if strain in cells:
files = glob.glob(root + "/wgEncodeBroadHistone%s%sStdPk.broadPeak" %
(strain, experiment))
files += glob.glob(root + "/wgEncodeBroadHistone%s%sPk.broadPeak" %
(strain, experiment))
return True, files, 1
else:
cells = [e.split("/")[-1].split("-")[0] for e in
extract]
# print(extract,cells)
print("Larr")
if strain in cells:
files = glob.glob(root + "/%s-%s.broadPeak" %
(strain, experiment))
return True, files, 1
if experiment[:-3] in marks:
root = ROOT + "/external/histones//"
if strain not in ["IMR90"]:
extract = glob.glob(root + "/*%s*.bigWig" % experiment[:-3])
# print(extract)
cells = []
for c in extract:
if "StdSig" in c:
cells.append(c.split("/")[-1].replace("wgEncodeBroadHistone",
"").replace("%sStdSig.bigWig" % experiment[:-3], ""))
else:
cells.append(c.split("/")[-1].replace("wgEncodeBroadHistone",
"").replace("%sSig.bigWig" % experiment[:-3], ""))
# print(extract, cells)
if strain in cells:
files = glob.glob(root + "/wgEncodeBroadHistone%s%sStdSig.bigWig" %
(strain, experiment[:-3]))
if files == []:
#print("Warning using Sig")
files = glob.glob(root + "/wgEncodeBroadHistone%s%sSig.bigWig" %
(strain, experiment[:-3]))
# print(files)
return True, files, 1
else:
exp = experiment[:-3]
exp = exp.replace("k","K") # from roadmap epi
extract = glob.glob(root + "/IMR90_%s*wh.csv" % exp)
print(extract)
cells = []
return True, extract, 1
print("Available cells")
pp.pprint(cells)
return False, [], None
def re_sample(x, y, start, end, resolution=1000):
resampled = np.zeros(int(end / resolution - start / resolution)) + np.nan
# print(data)
# print(resampled.shape)
for p, v in zip(x, y):
#print(v)
if not np.isnan(v):
posi = int((p - start) / resolution)
if np.isnan(resampled[min(posi, len(resampled) - 1)]):
resampled[min(posi, len(resampled) - 1)] = 0
resampled[min(posi, len(resampled) - 1)] += v
if int(posi) > len(resampled) + 1:
print("resample", posi, len(resampled))
# raise "Problem"
return np.arange(len(resampled)) * resolution + start, resampled
def cut_path(start, end, res=1):
initpos = 0 + start
delta = end - start
path = [0 + initpos]
def cond(x): return x <= end
while (initpos + delta) != int(initpos) and cond(initpos):
ddelta = int(initpos) + res - initpos
initpos += ddelta
ddelta -= initpos
path.append(initpos)
path[-1] = end
if len(path) >= 2 and path[-1] == path[-2]:
path.pop(-1)
return path
def overlap(start, end, res):
r = cut_path(start / res, end / res)
return [ri * res for ri in r]
def overlap_fraction(start, end, res):
assert(start <= end)
v = np.array(overlap(start, end, res))
deltas = (v[1:] - v[:-1]) / res
indexes = np.array(v[:-1] / res, dtype=np.int)
return deltas, indexes
def create_index_human(strain,exp,resolution=10,root="./"):
#chromlength = [248956422]
data = {iexp:[] for iexp in exp}
for chrom, length in enumerate(chromlength_human, 1):
for iexp in exp:
data[iexp].append(replication_data(strain, iexp,
chromosome=chrom, start=0,
end=length // 1000,
resolution=resolution)[1])
if iexp == "OKSeq":
data[iexp][-1] /= resolution
ran = [np.arange(len(dat)) * 1000 * resolution for dat in data[exp[0]]]
index = {"chrom": np.concatenate([["chr%i"%i]*len(xran) for i,xran in enumerate(ran,1)]),
"chromStart":np.concatenate(ran),
"chromEnd":np.concatenate(ran)}
print(root)
os.makedirs(root,exist_ok=True)
pd.DataFrame(index).to_csv(root+"/index.csv",index=False)
for iexp in exp:
index.update({"signalValue":np.concatenate(data[iexp])})
Df = pd.DataFrame(index)
Df.to_csv(root + "/%s.csv" % iexp, index=False)
def whole_genome(**kwargs):
data = []
def fl(name):
def sanit(z):
z = z.replace("/", "")
return z
if type(name) == dict:
items = list(name.items())
items.sort()
return "".join(["%s-%s" % (p, sanit(str(fl(value)))) for p, value in items])
else:
return name
redo = kwargs.pop("redo")
root = kwargs.get("root", "./")
# print("ic")
if "root" in kwargs.keys():
# print("la")
kwargs.pop("root")
name = root + "data/saved/"+fl(kwargs)
if os.path.exists(name) and not redo:
with open(name, "rb") as f:
return cPickle.load(f)
strain = kwargs.pop("strain")
experiment = kwargs.pop("experiment")
resolution = kwargs.pop("resolution")
for chrom, length in enumerate(chromlength_human, 1):
data.append(replication_data(strain, experiment,
chromosome=chrom, start=0,
end=length//1000,
resolution=resolution, **kwargs)[1])
if len(data[-1]) != int(length / 1000 / resolution - 0 / resolution):
print(strain, experiment, len(data[-1]),
int(length / 1000 / resolution - 0 / resolution))
raise
with open(name, "wb") as f:
cPickle.dump(data, f)
return data
def replication_data(strain, experiment, chromosome,
start, end, resolution, raw=False,
oData=False, bp=True, bpc=False, filename=None,
pad=False, smoothf=None, signame="signalValue"):
marks = ['H2az', 'H3k27ac', 'H3k27me3', 'H3k36me3', 'H3k4me1',
'H3k4me2', 'H3k4me3', 'H3k79me2', 'H3k9ac', 'H3k9me1',
'H3k9me3', 'H4k20me1']
if experiment != "" and os.path.exists(experiment):
filename = experiment
if os.path.exists(strain) and strain.endswith("csv"):
#print(strain)
data=pd.read_csv(strain)
#print(len(data))
sub = data[data.chrom==chromosome][experiment]
y = np.array(sub[int(start/resolution):int(end/resolution)])
print("Sizes",chromosome,len(sub),int(end/resolution))
return (np.arange(len(y))*resolution + start)*1000,y
#chn = list(set(data.chr))
if experiment.endswith("weight"):
from repli1d.retrieve_marks import norm2
with open(experiment, "rb") as f:
w = pickle.load(f)
if len(w) == 4:
[M, S, bestw, Exp] = w
normed = False
else:
[M, S, bestw, Exp, normed] = w
if normed:
smark = replication_data(chromosome=chromosome, start=start,
end=end, strain=strain, experiment="CNV",
resolution=resolution, raw=False, oData=False,
bp=True, bpc=False, filename=None)[1]
smark[smark == 0] = 4
smark[np.isnan(smark)] = 4
CNV = smark
Signals = {}
for mark in Exp:
if "_" in mark:
markn, smoothf = mark.split("_")
smoothf = int(smoothf)
else:
markn = mark
smark = replication_data(chromosome=chromosome, start=start,
end=end, strain=strain, experiment=markn,
resolution=resolution, raw=False, oData=False,
bp=True, bpc=False, filename=None)[1]
if normed:
smark /= CNV
if mark != "Constant":
Signals[mark] = norm2(smark, mean=M[mark], std=S[mark], cut=15)[0]
else:
Signals[mark] = smark
if smoothf is not None:
Signals[mark] = smooth(Signals[mark], smoothf)
# print(bestw)
if type(bestw[0]) in [list, np.ndarray]:
comp = [bestw[0][i]*(-1+2/(1+np.exp(-bestw[1][i]*(Signals[iexp]-bestw[2][i]))))
for i, iexp in enumerate(Exp)]
else:
comp = np.array([bestw[i] * Signals[iexp] for i, iexp in enumerate(Exp)])
y = np.sum(comp, axis=0)
y[y < 0] = 0
x = np.arange(len(y))*resolution + start
return x, y
# print(smark)
if filename is None:
avail, files, resolution_experiment = is_available_alias(strain, experiment)
if not avail:
return [], []
else:
print("Filename", filename)
avail = True
files = [filename]
resolution_experiment = 0.001
if filename.endswith("bigWig") or filename.endswith("bw"):
cell = pyBigWig.open(files[0])
if "chrI" in cell.chroms().keys():
print("Yeast")
#print(cell.chroms())
from repli1d.tools import int_to_roman
#print(int(chromosome))
chromosome = int_to_roman(int(chromosome))
if end is None:
end = int(cell.chroms()['chr%s' % str(chromosome)] / 1000)
#print(start * 1000, end * 1000, int((end - start) / (resolution)))
#Check the end:
endp = end
smaller =False
if end > cell.chroms()["chr%s" % str(chromosome)]/1000:
print("Warning index > end ch")
endp = int(cell.chroms()["chr%s" % str(chromosome)] /1000)
smaller = True
v = [np.nan if s is None else s for s in cell.stats(
"chr%s" % str(chromosome), start * 1000, endp * 1000, nBins=int((endp - start) / resolution))]
if not smaller:
return np.arange(start, end + 100, resolution)[: len(v)], np.array(v)
else:
x = np.arange(start, end + 0.1, resolution)
y = np.zeros_like(x) + np.nan
y[:len(v)] = np.array(v)
return x[:end], y[:end]
if filename.endswith("narrowPeak"):
index = ["chrom", "chromStart", "chromEnd", "name", "score",
"strand", "signalValue", "pValue", "qValue", "peak"]
chro = str(chromosome)
strain = pd.read_csv(files[0], sep="\t", names=index)
data = strain[(strain.chrom == "chr%s" % chro) & (
strain.chromStart > 1000 * start) & (strain.chromStart < 1000 * end)]
if oData:
return data
x = np.array(data.chromStart / 2 + data.chromEnd / 2) / 1000 # kb
y = np.array(data.signalValue)
return re_sample(x, y, start, end, resolution)
if filename.endswith("bed"):
index = ["chrom", "chromStart", "chromEnd", "name", "score",
"strand", "signalValue", "pValue", "qValue", "peak"]
chro = str(chromosome)
strain = pd.read_csv(files[0], sep="\t", names=index)
if "chrI" in set(strain["chrom"]):
print("Yeast")
# print(cell.chroms())
from repli1d.tools import int_to_roman
# print(int(chromosome))
chro = int_to_roman(int(chromosome))
#print(strain)
data = strain[(strain.chrom == "chr%s" % chro) & (
strain.chromStart > 1000 * start) & (strain.chromStart < 1000 * end)]
#print("La")
#print(data)
if oData:
return data
x = np.array(data.chromStart / 2 + data.chromEnd / 2) / 1000 # kb
y = np.array(data.signalValue)
y = np.ones_like(x)
#print(y)
return re_sample(x, y, start, end, resolution)
if filename.endswith("tagAlign"):
index = ["chrom", "chromStart", "chromEnd", "N", "signalValue","pm"]
chro = str(chromosome)
strain = pd.read_csv(files[0], sep="\t", names=index)
data = strain[(strain.chrom == "chr%s" % chro) & (
strain.chromStart > 1000 * start) & (strain.chromStart < 1000 * end)]
if oData:
return data
x = np.array(data.chromStart / 2 + data.chromEnd / 2) / 1000 # kb
y = np.array(data.signalValue) / 1000 # because the value is 1000 ?
return re_sample(x, y, start, end, resolution)
if filename.endswith("csv"):
#index = ["chrom", "chromStart", "chromEnd", "signalValue"]
chro = str(chromosome)
# print(files[0])
strain = pd.read_csv(files[0], sep="\t")
#print(strain.mean())
#print(strain)
#tmpl = "chr%s"
f = 1000 #Needed because start is in kb
if "chrom" not in strain.columns:
strain = pd.read_csv(files[0], sep=",")
# print(strain)
#tmpl = "chrom%s"
f = 1000
# strain.chrom
# sanitize chrom:
def sanitize(ch):
if type(ch) == int:
return "chr%s"%ch
if type(ch) == str:
if "chrom" in ch:
return ch.replace("chrom","chr")
if (not "chr" in ch) and (not "chrom" in ch):
return "chr%s"%ch
return ch
strain["chrom"] = [sanitize(ch) for ch in strain["chrom"]]
#print(strain)
#print(strain.describe())
#print(strain.head())
#print( tmpl % chro)
#print("F",f)
data = strain[(strain.chrom == chro) & (
strain.chromStart >= f * start) & (strain.chromStart < f * end)]
#print("Warning coold shift one")
#print("Data",len(data))
#print(f)
#print(data)
if oData:
return data
x = np.array(data.chromStart / 2 + data.chromEnd / 2) #/ f # kb
if signame == "signalValue" and signame not in data.columns:
if "signal" in data.columns:
signame = "signal"
print("Warning changing signalValue to signal")
y = np.array(data[signame])
#print(y)
#print(x[:10])
#print(y[:10])
#print(start,end)
#print(chro,np.mean(y),len(y))
return re_sample(x, y, start * f, end * f, resolution*f)
# print(files)
assert(type(files) == list)
if strain in ["Yeast-MCM"]:
#print(files[0])
index = "/".join(files[0].split("/")[:-1]) + "/index.csv"
index = pd.read_csv(index,sep="\t")
strain = index
exp = pd.read_csv(files[0])
if len(index) != len(exp):
raise ValueError("Wrong size of indexing %i %i"%(len(index) , len(exp)))
strain["signal"] = exp
if "Yeast" in strain:
from repli1d.tools import int_to_roman
chro = int_to_roman(int(chromosome))
else:
chro = chromosome
data = strain[(strain.chrom == "chr%s" % chro) & (
strain.chromStart > 1000*start) & (strain.chromStart < 1000*end)]
#print(data)
x = np.array(data.chromStart / 2 + data.chromEnd / 2) / 1000 # kb
y = np.array(data.signal)
if raw:
return x, y
else:
return re_sample(x, y, start, end, resolution)
if experiment in ["MCM","MCMp"]:
#print(chromosome)
files = [f for f in files if "chr%i."%chromosome in f]
#print(files)
files += [f.replace("R1","R2") for f in files]
#print(files)
data = np.sum([np.array(pd.read_csv(f))[:,0] for f in files],axis=0)
x = np.arange(len(data)) # kb
sub = (x> start) & (x < end)
x=x[sub]
y = np.array(data[sub],dtype=np.float)
x,y = re_sample(x, y, start, end, resolution)
if experiment == "MCMp":
print(np.nanpercentile(y,50))
peaks, _ = find_peaks(y / np.nanpercentile(y,50),width=1,prominence=1.)
peaksa = np.zeros_like(y,dtype=np.bool)
for p in peaks:
peaksa[p]=True
print(len(y),len(peaks),"Peaks")
y[~peaksa]=0
#raise "NT"
return x,y
if experiment == "DNaseI":
if strain == "Cerevisae":
index = ["chrom", "chromStart", "chromEnd", "name", "signalValue"]
print(files[0])
strain = pd.read_csv(files[0], sep="\t", names=index)
chro = str(chromosome)
if oData:
return strain
data = strain[(strain.chrom == "chr%s" % chro) & (
strain.chromStart > 1000 * start) & (strain.chromStart < 1000 * end)]
x = np.array(data.chromStart / 2 + data.chromEnd / 2) / 1000 # kb
y = np.array(data.signalValue)
else:
index = ["chrom", "chromStart", "chromEnd", "name", "score",
"strand", "signalValue", "pValue", "qValue", "peak"]
chro = str(chromosome)
if files[0].endswith("narrowPeak"):
strain = pd.read_csv(files[0], sep="\t", names=index)
data = strain[(strain.chrom == "chr%s" % chro) & (
strain.chromStart > 1000 * start) & (strain.chromStart < 1000 * end)]
if oData:
return data
x = np.array(data.chromStart / 2 + data.chromEnd / 2) / 1000 # kb
y = np.array(data.signalValue)
else:
cell = pyBigWig.open(files[0])
if end is None:
end = cell.chroms()['chr%s' % str(chro)]
v = [np.nan if s is None else s for s in cell.stats(
"chr%s" % str(chro), start * 1000, end * 1000, nBins=int(end - start) // (resolution))]
return np.arange(start, end + 100, resolution)[: len(v)], np.array(v)
if experiment == "Faire":
index = ["chrom", "chromStart", "chromEnd", "name", "score",
"strand", "signalValue", "pValue", "qValue", "peak"]
chro = str(chromosome)
strain = pd.read_csv(files[0], sep="\t", names=index)
data = strain[(strain.chrom == "chr%s" % chro) & (
strain.chromStart > 1000 * start) & (strain.chromStart < 1000 * end)]
if oData:
return data
x = np.array(data.chromStart / 2 + data.chromEnd / 2) / 1000 # kb
y = np.array(data.signalValue)
if "MCM-beda" in experiment:
#print(files[0])
strain = pd.read_csv(files[0],sep="\t")
#strain.MCM = smooth(strain.MCM2_ChEC)
chromosome = {1: "I", 2: "II", 3: "III", 4: "IV", 5: "V", 6: "VI", 7: "VII", 8: "VIII", 9: "IX", 10: "X",
11: "XI", 12: "XII", 13: "XIII", 14: "XIV", 15: "XV", 16: "XVI"}[chromosome]
#print(chromosome)
#print(strain)
data = strain[(strain.chr == "chr%s" % chromosome) & (
strain.coord > 1000 * start) & (strain.coord < 1000 * end)]
#print(data)
if oData:
return data
x = np.array(data.coord) / 1000 # kb
#y = np.array(data.cerevisiae_MCM2ChEC_rep1_library_fragment_size_range_51bpto100bp)
y = np.array(data.cerevisiae_MCM2ChEC_rep1_library_fragment_size_range_all)
if "G4" in experiment:
index = ["chrom", "chromStart", "chromEnd"]
chro = str(chromosome)
if "p" in experiment:
ip = np.argmax(["plus" in f for f in files])
print(files[ip],)
strain = | pd.read_csv(files[ip], sep="\t", names=index) | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 3 11:22:03 2021
@author: Simon
"""
# import necessary packages
import pandas as pd
import time
import networkx as nx
import pickle
import os
import numpy
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from difflib import SequenceMatcher
options = Options()
options.add_argument("--window-size=1920,1200")
driver_p = r'C:\Users\Simon\Downloads\chromedriver'
driver = webdriver.Chrome(executable_path = driver_p)
driver2 = webdriver.Chrome(executable_path = driver_p)
# the leagues and tournaments that were scraped
# all the MAIN tournaments and their challenger/academy leagues
# LCS + NA Academy League
URL_LCS = 'https://lol.gamepedia.com/League_Championship_Series'
# LEC + European Masters
URL_LEC = 'https://lol.gamepedia.com/LoL_European_Championship'
# LCK
URL_LCK = 'https://lol.gamepedia.com/League_of_Legends_Champions_Korea'
# LPL
URL_LPL = 'https://lol.gamepedia.com/LoL_Pro_League'
# CBLOL
URL_CBLOL = 'https://lol.gamepedia.com/Circuit_Brazilian_League_of_Legends'
# OPL (Oceanic Pro League)
URL_OPL = 'https://lol.gamepedia.com/Oceanic_Pro_League'
# PCL (Pacific Championship Series)
URL_PCS = 'https://lol.gamepedia.com/Pacific_Championship_Series'
# TCL (Turkish Champion League)
URL_TCL = 'https://lol.gamepedia.com/Turkish_Championship_League'
# VCS (Vietnam League)
URL_VCS = 'https://lol.gamepedia.com/VCS_A/2017_Season'
# LMS (League of Legends Master Series) Disbanded (Taiwan, Hong Kong, and Macao)
URL_LMS = 'https://lol.fandom.com/wiki/League_of_Legends_Master_Series'
#national_leagues = {}
#national_leagues['LCS/North America'] = URL_LCS # done
#national_leagues['LEC/Europe'] = URL_LEC # done
#national_leagues['LCK/South Korea'] = URL_LCK
#national_leagues['LPL/China'] = URL_LPL
#national_leagues['CBLOL/Brazil'] = URL_CBLOL
#national_leagues['OPL/Oceania'] = URL_OPL
#national_leagues['PCS/PCS'] = URL_PCS
#national_leagues['TCL/Turkey'] = URL_TCL
#national_leagues['VCS/Vietnam'] = URL_VCS
#national_leagues['LMS/Taiwan + Hong Kong + Macao'] = URL_LMS
# LCL (LoL Continental League) # special case
URL_LCL = 'https://lol.gamepedia.com/League_of_Legends_Continental_League'
# LJL (LoL Japan League) # special case
URL_LJL = 'https://lol.gamepedia.com/League_of_Legends_Japan_League'
# LL (Liga Latinoamerica) # special case
URL_LL = 'https://lol.gamepedia.com/Liga_Latinoam%C3%A9rica'
# LST (League of Legends SEA Tour)
URL_LST = 'https://lol.fandom.com/wiki/League_of_Legends_SEA_Tour'
#minor_leagues = {}
#minor_leagues['LCL/Russia'] = URL_LCL
#minor_leagues['LJL/Japan'] = URL_LJL
#minor_leagues['LL/Latin America'] = URL_LL
#minor_leagues['LST/Southeast Asia'] = URL_LST
# MSI (Mid Season Invitationals)
URL_MSI = 'https://lol.fandom.com/wiki/Mid-Season_Invitational'
# Worlds (World Championships)
URL_WORLDS = 'https://lol.fandom.com/wiki/World_Championship'
# IEM (Intel Extreme Masters) + ISL
URL_IEM = 'https://lol.gamepedia.com/Intel_Extreme_Masters'
#national_tourn = {}
#national_tourn['MSI'] = URL_MSI
#national_tourn['WORLDS'] = URL_WORLDS
#national_tourn['IEM'] = URL_IEM
def getSeasonData(URL, name_region, special_case = False, MSI_WORLDS = False, IEM = False):
if name_region == 'LCL/Russia' or name_region == 'LJL/Japan' or name_region == 'LL/Latin America' or name_region == 'LST/Southeast Asia':
special_case = True
if name_region == 'MSI' or name_region == 'WORLDS':
MSI_WORLDS = True
if name_region == 'IEM':
IEM = True
# get the webpage from the link
driver.get(URL)
# some pages could only be scraped by looking at the wikitable
if special_case:
competitions = driver.find_elements_by_class_name('wikitable')
# other pages looking at the hlist would do
else:
competitions = driver.find_elements_by_class_name('hlist')
# get all the hyperlinks to all competitions
# MSI and the Worlds tournaments had a different page setup and required more execution
if MSI_WORLDS:
pre_links = []
# the first two loops acquired all the years of that tournament (2012-2021)
for i in [competitions[2]]:
a_list = i.find_elements_by_tag_name('a')
for j in a_list:
pre_links.append(j.get_attribute('href'))
# these two loops acquired the (sub)-tournaments within that tournament
# e.g. play-ins and the main event
links = []
for link in pre_links:
driver.get(link)
rosters_present = driver.find_elements_by_class_name('tabheader-top')
if rosters_present:
rosters_present = rosters_present[0]
if 'Team Rosters' in rosters_present.text:
links.append(link)
else:
a_list = rosters_present.find_elements_by_tag_name('a')
for m in a_list:
links.append(m.get_attribute('href'))
else:
# IEM was another special case
if IEM:
links = []
for i in competitions:
a_list = i.find_elements_by_tag_name('a')
for j in a_list:
links.append(j.get_attribute('href'))
# any other tournament could be scraped using this bottom code
else:
links = []
for i in competitions[0:2]:
a_list = i.find_elements_by_tag_name('a')
for j in a_list:
links.append(j.get_attribute('href'))
# initialize an empty dataframe, empty edge list, empty collection of gamer tags and player names
teams_df = pd.DataFrame()
edge_list = pd.DataFrame(columns = ['From', 'To', 'n_played'])
gamer_tags = []
player_names = []
# initialize an empty meta dataframe
meta_data = pd.DataFrame(columns = ['gamer_tag', 'full_name', 'role', 'residency', 'country', 'team'])
# start the timer
start_time = time.time()
# go over every hyperlink (every tournament)
for link in links:
# go over every possible team, number from 1 to 30, (max is 17 I believe but just incase)
for team_no in range(1, 30):
team_links = link + "/" + "Team_Rosters?action=edit§ion=" + str(team_no)
# try except block incase something goes wrong, sometimes data entry is faulty
try:
# get the page of the team in that specific tournament
driver.get(team_links)
# get the box of text with the desired content
textboxs = driver.find_elements_by_class_name('mw-editfont-default')
# turn it into clean text
if textboxs:
clean_text = textboxs[-1].text
skip_crawl_list = ['== League 1 ==', '== League 2 ==', '== League 3 ==', '== League 4 ==', '==Group A==', '==Group B==', '==Group C==', '==Group D==', '==Group E==', '==Group F==', '==Group Stage==']
skip_this_one = False
for i in skip_crawl_list:
if i in clean_text:
skip_this_one = True
if skip_this_one:
continue
if '<!--' and '-->' in clean_text:
text_to_repl = str(clean_text.partition('<!--')[2].partition('-->')[0])
clean_text = clean_text.replace('<!--' + text_to_repl + '-->', '')
# check if the page exists, if it doesn't then it asks you to login to create the page
# or says that you don't have permission
# if this is the case then ignore this 'tournament' as it is not a tournament
login_req_page = driver.find_elements_by_class_name('firstHeading')
break_main = False
for header in login_req_page:
if header.text == 'Login required' or header.text == 'Permission error':
break_main = True
break
if break_main:
break
if textboxs:
team_name = None
# extract the team name
team_name = str(clean_text.partition("{{team|")[2].partition("}}")[0])
# get the lines that contain player info
players = []
# add a counter incase data entry is irregular and the code doesn't know it should be done
cnt = 0
while 'name=' in clean_text or '{|class="sortable wikitable"' in clean_text:
if 'ExtendedRosterLine' in clean_text:
players.append(clean_text.partition("{{ExtendedRosterLine")[2].partition("}}")[0])
clean_text = clean_text.replace("{{ExtendedRosterLine" + players[-1] + "}}", "")
if 'RosterLineOld' in clean_text:
players.append(clean_text.partition("{{RosterLineOld")[2].partition("}}")[0])
clean_text = clean_text.replace("{{RosterLineOld" + players[-1] + "}}", "")
cnt += 1
if cnt > 100:
break
players = list(set(players))
# get their gamer tag and actual name
player_tags = []
for i in range(len(players)):
gamer_tag = players[i].partition("|player=")[2].partition("|")[0]
name = players[i].partition("|name=")[2].partition("|")[0]
# remove unnecessary spaces
gamer_tag = " ".join(gamer_tag.split())
name = " ".join(name.split())
# go over all the extracted player names
for n in range(len(player_names)):
# if we have a full match then use the name we have
if name == player_names[n]:
name = player_names[n]
gamer_tag = gamer_tags[n]
break
# only add gamer tag and player name to the big list if the name isn't already in there
if name not in player_names:
gamer_tags.append(gamer_tag)
player_names.append(name)
player_tags.append(gamer_tag + " (" + name + ")")
# meta data df order: list(['gamer_tag', 'full_name', 'role', 'residency', 'country'])
if players:
role = " ".join(players[i].partition("|role=")[2].partition("|")[0].split())
residency = " ".join(players[i].partition("|res=")[2].partition("|")[0].split())
country = " ".join(players[i].partition("|flag=")[2].partition("|")[0].split())
if 'role1' in players[i]:
role1 = " ".join(players[i].partition("|role1=")[2].partition("|")[0].split())
role2 = " ".join(players[i].partition("|role2=")[2].partition("|")[0].split())
role = role1 + '/' + role2
temp_df = {'gamer_tag' : gamer_tag, 'full_name' : name, 'role' : role, 'residency' : residency, 'country' : country, 'team' : team_name}
meta_data = meta_data.append(temp_df, ignore_index = True)
# add new data to an existing team name column if the column already exists
if team_name and player_tags:
if team_name in teams_df.columns:
temp_df = pd.DataFrame()
temp_df[team_name] = pd.Series(player_tags)
temp_df[team_name] = temp_df[team_name].astype(str)
teams_df = teams_df.merge(temp_df, left_on = team_name, right_on = team_name, how = 'outer')
# add the team to the dataframe if it isn't in there
else:
teams_df[team_name] = pd.Series(player_tags)
# extract the info about players having played a match together
matching_play_matrix = pd.DataFrame(columns = list(player_tags))
for i in range(len(players)):
if 'r1=' in players[i]:
# sometimes there are two designated roles for a single player
role1_list = list(" ".join(players[i].partition("|r1=")[2].partition("|")[0].replace(',', '')).split())
role2_list = list(" ".join(players[i].partition("|r2=")[2].partition("|")[0].replace(',', '')).split())
role_full_list = []
# if there are 2 roles then the role lists must be merged
if role2_list:
if len(role1_list) != len(role2_list):
if len(role1_list) > len(role2_list):
role1_list = role1_list[0:len(role2_list)]
else:
role2_list = role2_list[0:len(role1_list)]
for y_n in range(len(role1_list)):
if role1_list[y_n] == 'y' or role2_list[y_n] == 'y':
role_full_list.append('y')
# if only r1 exists then we use that
else:
role_full_list = role1_list
matching_play_matrix[player_tags[i]] = pd.Series(role_full_list)
players[i] = players[i].replace("|r1=" + player_tags[i] + "}}", "")
players[i] = players[i].replace("|r2=" + player_tags[i] + "}}", "")
else:
matching_play_matrix[player_tags[i]] = pd.Series(list(" ".join(players[i].partition("|r=")[2].partition("|")[0].replace(',', '')).split()))
# remove duplicates
matching_play_matrix = matching_play_matrix.T.groupby(level=0).first().T
# go over every person twice (for each person match that person with an other person on the same team)
columns_mat = list(matching_play_matrix.columns)
for i in range(len(columns_mat)):
for j in range(i, len(columns_mat)):
# skip if the person is matching with him/herself
if i != j:
# go over every match in the match list
for k in range(len(matching_play_matrix[columns_mat[j]])):
try:
# if they both have a 'y' then it means they played together
if matching_play_matrix[columns_mat[i]][k] == 'y' and matching_play_matrix[columns_mat[j]][k] == 'y':
# create a tuple
tup = [columns_mat[i], columns_mat[j]]
if (((edge_list['From'] == tup[0]) & (edge_list['To'] == tup[1])).any() == False) and (((edge_list['From'] == tup[1]) & (edge_list['To'] == tup[0])).any() == False):
edge_list.loc[len(edge_list) + 1] = [tup[0], tup[1], 1]
# if it does exist then we increase the count
else:
# check if the edge exists
temp = edge_list.loc[(edge_list['From'] == tup[0]) & (edge_list['To'] == tup[1])]
if len(temp) == 1:
edge_list.loc[temp.index[0]]['n_played'] += 1
# if the edge doesnt exist then maybe the reverse edge exists
elif len(temp) == 0:
temp = edge_list.loc[(edge_list['From'] == tup[1]) & (edge_list['To'] == tup[0])]
edge_list.loc[temp.index[0]]['n_played'] += 1
except IndexError:
print(team_links)
continue
except Exception as e:
print(e)
print(team_links)
# remove unnecessary nan values that make the df longer
teams_df = teams_df.apply(lambda x: pd.Series(x.dropna().values))
# remove duplicate rows in meta data
meta_data = meta_data.drop_duplicates()
# add the name of the region as a column
edge_list['Region'] = name_region
filehandler = open('C:/Users/Simon/OneDrive/Documents/GitHub/LeagueTeamsNetwork/' + name_region.split('/')[0] + '.pckl', 'wb')
pickle.dump([edge_list, teams_df, meta_data], filehandler)
filehandler.close
end_time = time.time()
print("Extracting tournament info took: ", round(end_time - start_time, 4), " seconds.")
print("")
return edge_list, teams_df, meta_data
# testing line to see if everything works
edge_list, teams_df, meta_data = getSeasonData(national_leagues['LCK/South Korea'], 'LCK/South Korea')
# 3 loops for the different leagues and tournaments
for nat_league in national_leagues:
edge_list, teams_df, meta_data = getSeasonData(national_leagues[nat_league], nat_league)
for min_league in minor_leagues:
edge_list, teams_df, meta_data = getSeasonData(minor_leagues[min_league], min_league)
for nat_tourn in national_tourn:
edge_list, teams_df, meta_data = getSeasonData(national_tourn[nat_tourn], nat_tourn)
# concatenating all edge lists, meta data and teams data
path = 'C:/Users/Simon/OneDrive/Documents/GitHub/LeagueTeamsNetwork/'
all_pickles = os.listdir(path)
long_edge_list = pd.DataFrame(columns = ['From', 'To', 'n_played', 'Region'])
long_teams_df = pd.DataFrame()
long_meta_data = pd.DataFrame(columns = ['gamer_tag', 'full_name', 'role', 'residency', 'country', 'team'])
# go over all the pickles
for i in all_pickles:
if i[-5:] == '.pckl':
# drop IEM, too much missing data and incorrect data collection
if (i != 'seen_names.pckl') and (i != 'IEM.pckl'):
file = open(path + i, 'rb')
file_content = pickle.load(file)
file.close
long_edge_list = long_edge_list.append(file_content[0], ignore_index = True)
# add new data to an existing team name column if the column already exists
for j in file_content[1].columns:
if j in long_teams_df.columns:
temp_df = pd.DataFrame()
temp_df[j] = pd.Series(file_content[1][j])
temp_df = temp_df.append(pd.DataFrame(long_teams_df[j])).dropna().drop_duplicates()
long_teams_df[j] = pd.Series(list(temp_df[j]))
else:
long_teams_df[j] = pd.Series(file_content[1][j])
long_meta_data = long_meta_data.append(file_content[2], ignore_index = True)
# make sure there are no nan values
long_meta_data.fillna('', inplace = True)
# clean up the meta data information, can check for redundancies in the role residency and country
def removeRedundancies(df, column, collection, lower = False):
# print out the unique column entries
print(set(df[column]))
# loop over all the entries in the column
for i in range(len(df[column])):
# and all the keys in the collection
for j in collection:
# and all the values belonging to that key
for k in collection[j]:
# ensure that it is a string, sometimes nan still pops up
if isinstance(df[column][i], str):
if isinstance(k, str):
if df[column][i].lower() == k.lower():
df[column][i] = j
else:
if df[column][i] == k:
df[column][i] = j
# there may be a dual role, this will occur if there is a '/' present
# only will happen in the role column which will have 'Top' in the collection
if '/' in df[column][i] and 'Top' in collection:
first_role = df[column][i].split('/')[0]
second_role = df[column][i].split('/')[1]
# only acquire a role if it is not empty
if first_role != '':
for j in collection:
for k in collection[j]:
if first_role.lower() == k.lower():
first_role = j
if second_role != '':
for j in collection:
for k in collection[j]:
if second_role.lower() == k.lower():
second_role = j
full_dual_role = first_role + '/' + second_role
df[column][i] = full_dual_role
else:
df[column][i] = first_role
if lower:
df[column][i] = df[column][i].lower()
# we have to go over the column again in case there are duplicate entries as stated below
if 'Top' in collection:
# if the reverse role already exists e.g. full_dual_role = 'Support/Coach'
# and 'Coach/Support' already exists then we also make this entry 'Coach/Support'
for i in range(len(df[column])):
if '/' in df[column][i]:
# reverse the role
reverse_role = (df[column][i].split('/')[1] + '/' + df[column][i].split('/')[0])
if reverse_role in set(df[column][0:i]):
df[column][i] = reverse_role
print()
print(set(df[column]))
# collection of role entries that have to be standardized
role_coll = {}
role_coll['Top'] = ['Top <!--Played as "Yoyo" for the first three rounds-->', 'Top', 't', 'toplane', 'Top <!--李东秀-->', 'Top <!--Played as "noko" in Round 2-->', 'Sub/Top', 'Top Laner', 'top lane']
role_coll['Jungle'] = ['Sub/Jungle', 'Jungle', 'j', 'Jungle <!--申岷升-->', 'J', 'Jungler']
role_coll['Mid'] = ['Mid', 'm', 'Sub/Mid', 'Mid Laner', 'Midlane', 'AP']
role_coll['Ad Carry'] = ['Sub/AD', 'Bot Laner', 'b', 'A', 'Bot', 'AD', 'AD Carry', 'adc']
role_coll['Support'] = ['Support', 'Sub/Support', 'sup', 'S', 'Support <!--Played as "ADC" for the first two rounds-->']
role_coll['Coach'] = ['c', 'C', 'c--> {{ExtendedRosterEnd', 'Coach', 'Assistant Coach', 'Head Coach', 'Strategic Coach', 'coach']
role_coll['Substitute'] = ['Sub', 'sub', 'Substitute']
role_coll['Unknown'] = ['']
role_coll['Top/Ad Carry'] = ['Top, AD']
role_coll['Top/Mid'] = ['Top, AP']
removeRedundancies(long_meta_data, 'role', role_coll)
# collection of region entries that have to be standardized
resid_coll = {}
resid_coll['Unknown/None'] = ['', 'Unrecognized Region', '??', 'None', 'Unknown', 'nan', numpy.nan]
resid_coll['North America'] = ['North America', 'Oceania', 'OCE', 'NA']
resid_coll['Europe'] = ['Europe', 'EU']
resid_coll['South Korea'] = ['South Korea','KR', 'Korea']
resid_coll['China'] = ['CN', 'China']
resid_coll['PCS (Pacific Championship Series)'] = ['PCS', 'Taiwan', 'Taiwan, Hong Kong, and Macao', 'LMS', 'tw', 'SEA', 'TW', 'Southeast Asia']
resid_coll['Brazil'] = ['BR', 'Brazil']
resid_coll['CIS (Commonwealth of Independent States)'] = ['CIS', 'ru', 'Commonwealth of Independent States']
resid_coll['Japan'] = ['Japan', 'JP']
resid_coll['Latin America'] = ['LAT', 'Latin America', 'LAS', 'LAN']
resid_coll['Turkey'] = ['TR', 'Turkey']
resid_coll['Vietnam'] = ['vyn', 'Vietnam', 'VN']
removeRedundancies(long_meta_data, 'residency', resid_coll)
# collection of country entries that have to be standardized
# not all countries were standardized
country_coll = {}
country_coll['China'] = ['China', 'CN', 'cn<!--assumed-->']
country_coll['South Korea'] = ['South Korea', 'Korea', 'KR']
country_coll['Germany'] = ['Germany', 'DE']
country_coll['United Kingdom'] = ['UK', 'United Kingdom']
country_coll['Belgium'] = ['BE', 'Belgium']
country_coll['Netherlands'] = ['Netherlands', 'NL']
country_coll['Vietnam'] = ['Vietnam', 'VN']
country_coll['United States'] = ['United States', 'us', 'usa']
country_coll['Denmark'] = ['Denmark', 'dk']
country_coll['Brazil'] = ['Brazil', 'BR']
country_coll['France'] = ['France', 'FR']
country_coll['Italy'] = ['Italy', 'IT']
country_coll['New Zealand'] = ['NZ', 'New Zealand']
country_coll['Canada'] = ['CA', 'Canada']
country_coll['Sweden'] = ['Sweden']
country_coll['Unknown'] = ['Unknown', 'none', numpy.nan, '']
removeRedundancies(long_meta_data, 'country', country_coll, lower = True)
# drop the duplicates
long_meta_data = long_meta_data.drop_duplicates()
long_meta_data = long_meta_data.reset_index(drop=True)
long_meta_data.fillna('', inplace = True)
# save as csv
#path = 'C:/Users/Simon/OneDrive/Documents/GitHub/LeagueTeamsNetwork/'
#long_meta_data.to_csv(path_or_buf = path + 'meta_data.csv', index = False)
#long_teams_df.to_csv(path_or_buf = path + 'teams_data.csv', index = False)
#long_edge_list.to_csv(path_or_buf = path + 'final_edge_list.csv', index = False)
############################################################################################
# start the cleaning process of the edge list
path = 'C:/Users/Simon/OneDrive/Documents/GitHub/LeagueTeamsNetwork/'
# originals
#edge_list = pd.read_csv(path + 'final_edge_list.csv')
#edge_list = edge_list.reset_index(drop = True)
#teams_df = pd.read_csv(path + 'teams_data.csv')
#teams_df = teams_df.reset_index(drop = True)
#meta_data = pd.read_csv(path + 'meta_data.csv')
#meta_data.fillna('', inplace = True)
#meta_data = meta_data.reset_index(drop = True)
# new ones that are semi-cleaned
edge_list = pd.read_csv(path + 'final_refined_edge_list.csv')
edge_list = edge_list.reset_index(drop = True)
teams_df = pd.read_csv(path + 'final_teams_data.csv')
teams_df = teams_df.reset_index(drop = True)
meta_data = pd.read_csv(path + 'final_meta_data.csv')
meta_data.fillna('', inplace = True)
meta_data = meta_data.reset_index(drop = True)
# a similarity metric for strings, used for names here
def similarity(s1, s2):
return SequenceMatcher(None, s1, s2).ratio()
def evaluateDuplicates(edge_list, meta_deta, teams_df, save = True, check_full_name = False, check_gamer_tag = False):
# load a pickle that contains information about which names we have already compared
file = open(r'C:\Users\Simon\OneDrive\Documents\GitHub\LeagueTeamsNetwork\seen_names.pckl', 'rb')
seen_list = pickle.load(file)
file.close
# get a regular list from the edge list dataframe
edge_list_nondf = edge_list.values.tolist()
# turn the edge list into a graph so we can get a list of nodes
G = nx.Graph()
for i in edge_list_nondf:
nx.add_path(G, (i[0], i[1]))
node_list = list(G.nodes())
# start the timer
start_time = time.time()
# go over all the nodes and find new instances of dual players
empty_dict = {}
for i in range(len(node_list)):
if i % 1000 == 0:
print('I am at' + str(i))
print('This took me this long', round(time.time() - start_time, 4))
for j in range(i, len(node_list)):
if i != j:
# extract the full name of each person
full_name1 = node_list[i].partition('(')[2]
full_name2 = node_list[j].partition('(')[2]
# extract the gamer tag of each person
gamer_tag1 = node_list[i].split('(')[0]
gamer_tag2 = node_list[j].split('(')[0]
if check_full_name:
empty_dict[(node_list[i], node_list[j])] = similarity(full_name1, full_name2)
elif check_gamer_tag:
empty_dict[(node_list[i], node_list[j])] = similarity(gamer_tag1, gamer_tag2)
else:
empty_dict[(node_list[i], node_list[j])] = similarity(node_list[i], node_list[j])
# copy the original edge list and start two counters
edge_list_copy = edge_list.copy(deep = True)
meta_data_copy = meta_data.copy(deep = True)
teams_df_copy = teams_df.copy(deep = True)
# cnt is used for indexing
cnt = 0
# new_cnt is used as to stop the main loop
new_cnt = 0
# sort the list based on the names their similarity
sorted_list = list(sorted(empty_dict.items(), key = lambda item: item[1], reverse = True))
new_players = []
new_full_names = []
new_gamer_tags = []
old_players = []
old_full_names = []
old_gamer_tags = []
while True:
i = sorted_list[cnt]
try:
# if we haven't evaluated the combination of names before then we will evaluate
if [i[0][0], i[0][1]] not in seen_list:
# this extracts the full name without the ( )
full_name1 = i[0][0].partition('(')[2][:-1]
# this removes leading and trailing white spaces
full_name1 = " ".join(full_name1.split())
full_name2 = i[0][1].partition('(')[2][:-1]
full_name2 = " ".join(full_name2.split())
gamer_tag1 = i[0][0].split('(')[0]
gamer_tag1 = " ".join(gamer_tag1.split())
gamer_tag2 = i[0][1].split('(')[0]
gamer_tag2 = " ".join(gamer_tag2.split())
# first if statement ignores any entries that have too much missing data
if ((full_name1 != '') or (full_name2 != '')) and ((gamer_tag1 != '') or (gamer_tag2 != '')):
# if the gamer tags are the same (uncapitalized) and the names are the same
# then we automatically pick the left name
if (gamer_tag1.lower() == gamer_tag2.lower()) and (full_name1 == full_name2):
new_players.append(i[0][0])
new_full_names.append(full_name1)
new_gamer_tags.append(gamer_tag1)
old_players.append(i[0][1])
old_full_names.append(full_name2)
old_gamer_tags.append(gamer_tag2)
else:
# open 2 webpages so we can more easily look at the information available
driver.get('https://lol.fandom.com/wiki/Special:Search?fulltext=1&query=' + gamer_tag1)
driver2.get('https://lol.fandom.com/wiki/Special:Search?fulltext=1&query=' + gamer_tag2)
# left if the left name should be adhered,
# right if the right name should be adhered
print(i)
print("Same person?, left/right or no")
answer = input()
# if it is the same person then we change every occurrence of the left/right name to the right/left name
# e.g. (('mcscrag (<NAME>)', 'mcscrag (<NAME>)'), 0.9130434782608695)
# clearly the same person, high similarity metric
# change every instance of 'mcscrag (<NAME>)' to (('mcscrag (<NAME>)' if left was typed
# if the answer is left, then change all the 2nd names of the comparison to the first names
if answer.lower() == 'left':
new_players.append(i[0][0])
new_full_names.append(full_name1)
new_gamer_tags.append(gamer_tag1)
old_players.append(i[0][1])
old_full_names.append(full_name2)
old_gamer_tags.append(gamer_tag2)
if answer.lower() == 'right':
new_players.append(i[0][1])
new_full_names.append(full_name2)
new_gamer_tags.append(gamer_tag2)
old_players.append(i[0][0])
old_full_names.append(full_name1)
old_gamer_tags.append(gamer_tag1)
if answer.lower() == 'no':
seen_list.append([i[0][0], i[0][1]])
new_cnt += 1
cnt += 1
if new_cnt >= 10:
break
except:
continue
# now we can do all the changes that we've saved
for ans in range(len(new_players)):
newest_player = new_players[ans]
newest_full_name = new_full_names[ans]
newest_gamer_tag = new_gamer_tags[ans]
oldest_player = old_players[ans]
oldest_full_name = old_full_names[ans]
oldest_gamer_tag = old_gamer_tags[ans]
# go over the edge list and make changes
for j in range(len(edge_list_copy['From'])):
if edge_list_copy['From'][j] == oldest_player:
edge_list_copy.loc[j, 'From'] = newest_player
if edge_list_copy['To'][j] == oldest_player:
edge_list_copy.loc[j, 'To'] = newest_player
# go over the meta data and make changes
for j in range(len(meta_data_copy['gamer_tag'])):
if (" ".join(meta_data_copy['gamer_tag'][j].split()) == oldest_gamer_tag) and (" ".join(meta_data_copy['full_name'][j].split()) == oldest_full_name):
meta_data_copy.loc[j, 'gamer_tag'] = newest_gamer_tag
meta_data_copy.loc[j, 'full_name'] = newest_full_name
# change all instances in the teams df too
for j in range(len(teams_df_copy.columns)):
for k in range(len(teams_df_copy.iloc[:, j])):
if teams_df_copy.iloc[:, j][k] == oldest_player:
teams_df_copy.iloc[:, j][k] = newest_player
# save the seen names list
if save:
filehandler = open(r'C:\Users\Simon\OneDrive\Documents\GitHub\LeagueTeamsNetwork\seen_names.pckl', 'wb')
pickle.dump(seen_list, filehandler)
filehandler.close
# if there are duplicates in the dataframe as a result of the renaming process
# then we have to group by first to tally up the number of played matches
# then we can remove the duplicates
edge_list_copy['n_played'] = edge_list_copy.groupby(['From', 'To', 'Region'])['n_played'].transform('sum')
edge_list_copy = edge_list_copy.drop_duplicates(subset = ['From', 'To', 'Region'])
edge_list_copy = edge_list_copy.reset_index(drop = True)
# drop duplicates in the meta data
meta_data_copy = meta_data_copy.drop_duplicates()
meta_data_copy = meta_data_copy.reset_index(drop = True)
new_teams_df = pd.DataFrame(columns = list(teams_df_copy.columns))
# drop duplicates in the teams dataframe
for j in teams_df_copy.columns:
new_column = pd.DataFrame(teams_df_copy.drop_duplicates(subset = [j])[j])
new_teams_df[list(new_column.columns)[0]] = pd.Series(new_column[list(new_column.columns)[0]])
new_teams_df = new_teams_df.reset_index(drop = True)
return edge_list_copy, meta_data_copy, new_teams_df
edge_list_cleaner, meta_data_cleaner, teams_df_cleaner = evaluateDuplicates(edge_list, meta_data, teams_df)
edge_list = edge_list_cleaner
meta_data = meta_data_cleaner
teams_df = teams_df_cleaner
# save as csv
#path = 'C:/Users/Simon/OneDrive/Documents/GitHub/LeagueTeamsNetwork/'
#meta_data.to_csv(path_or_buf = path + 'final_meta_data.csv', index = False)
#teams_df.to_csv(path_or_buf = path + 'final_teams_data.csv', index = False)
#edge_list.to_csv(path_or_buf = path + 'final_refined_edge_list.csv', index = False)
#
# now that we've evaluated duplicate names we should evaluate the edge list
# e.g. A - B and B* - A where B* is now changed to B - A
# this means that A - B is the same edge as B - A, we need to remove one of those
def edgeListDuplicates(edge_list):
# make a copy of the original edge list
edge_list_copy = edge_list.copy(deep = True)
edge_list_copy = edge_list_copy.reset_index(drop = True)
duplicate_entries_index = []
# start the timer
start = time.time()
# go over the edge list, comparing every entry to every other entry
for i in range(len(edge_list_copy)):
if (i % 1000 == 0):
print(i)
print('Time taken: ', round(time.time() - start, 4))
current_row = edge_list_copy.iloc[i]
for j in range(i, len(edge_list_copy)):
if i != j:
# find out if we have the same edge but reversed
if (current_row['From'] == edge_list_copy['To'][j]) and (current_row['To'] == edge_list_copy['From'][j]):
# find out if that edge is in the same league
if current_row['Region'] == edge_list_copy['Region'][j]:
# then we sum up the total matches played
edge_list_copy.loc[i, 'n_played'] += edge_list_copy['n_played'][j]
# and we remove the duplicate edge
duplicate_entries_index.append(j)
edge_list_copy = edge_list_copy.drop(duplicate_entries_index)
return edge_list_copy
test = edgeListDuplicates(edge_list)
path = 'C:/Users/Simon/OneDrive/Documents/GitHub/LeagueTeamsNetwork/'
#test = test.reset_index(drop = True)
#test.to_csv(path_or_buf = path + 'league_2012_2021_edge_list.csv', index = False)
# testing in between to check degrees
path = 'C:/Users/Simon/OneDrive/Documents/GitHub/LeagueTeamsNetwork/'
edge_list = | pd.read_csv(path + 'league_2012_2021_edge_list.csv') | pandas.read_csv |
import os
import warnings
import unittest
import numpy as np
import pandas as pd
from pyseer.lmm import initialise_lmm
from pyseer.lmm import fit_lmm
from pyseer.lmm import fit_lmm_block
from pyseer.classes import LMM
DATA_DIR = 'tests'
P_BINARY = os.path.join(DATA_DIR, 'subset.pheno')
S = os.path.join(DATA_DIR, 'similarity_subset.tsv.gz')
COV = os.path.join(DATA_DIR, 'covariates.txt')
C = os.path.join(DATA_DIR, 'lmm_cache.npz')
K = os.path.join(DATA_DIR, 'unit_tests_data', 'k.txt')
M = os.path.join(DATA_DIR, 'unit_tests_data', 'm.txt')
def eq_lmm(s1, s2):
"""Test whether two LMM objects are the same"""
diff = set()
for p in ['kmer', 'pattern',
'kstrains', 'nkstrains', 'notes',
'prefilter', 'filter']:
x = getattr(s1, p)
y = getattr(s2, p)
if x != y:
diff.add(p)
for p in ['af', 'prep', 'pvalue',
'kbeta', 'bse', 'frac_h2']:
x = getattr(s1, p)
y = getattr(s2, p)
if not np.isfinite(x) and not np.isfinite(y):
continue
if np.isfinite(x) and not np.isfinite(y):
diff.add(p)
if np.isfinite(y) and not np.isfinite(x):
diff.add(p)
if abs(x - y) > 1E-7:
diff.add(p)
if s1.max_lineage is not None and s2.max_lineage is not None:
p = 'max_lineage'
x = getattr(s1, p)
y = getattr(s2, p)
if not np.isfinite(x) and not np.isfinite(y):
pass
else:
if np.isfinite(x) and not np.isfinite(y):
diff.add(p)
if np.isfinite(y) and not np.isfinite(x):
diff.add(p)
if x != y:
diff.add(p)
elif s1.max_lineage is None and s2.max_lineage is None:
pass
else:
diff.add('max_lineage')
return diff
class TestInitialiseLmm(unittest.TestCase):
def test_initialise_lmm(self):
p = pd.read_table(P_BINARY,
index_col=0)['binary']
cov = pd.DataFrame([])
x, y, z = initialise_lmm(p, cov, S,
lmm_cache_in=None,
lmm_cache_out=None)
self.assertEqual(x.shape[0], 50)
self.assertAlmostEqual(y.findH2()['nLL'][0],
35.7033778)
self.assertAlmostEqual(z, 0.0)
# covariates
cov = pd.read_table(COV, index_col=0,
header=None)
x, y, z = initialise_lmm(p, cov, S,
lmm_cache_in=None,
lmm_cache_out=None)
self.assertEqual(x.shape[0], 50)
self.assertAlmostEqual(y.findH2()['nLL'][0],
34.55403861)
self.assertAlmostEqual(z, 0.0)
# sample names not matching
b = pd.Series(np.random.random(100),
index=['test_%d' % x for x in range(100)])
with warnings.catch_warnings():
warnings.simplefilter('ignore')
x, y, z = initialise_lmm(b, cov, S,
lmm_cache_in=None,
lmm_cache_out=None)
self.assertEqual(x.shape[0], 0)
self.assertTrue(not np.isfinite(y.findH2()['nLL'][0]))
self.assertAlmostEqual(z, 0.0)
# save cache
x, y, z = initialise_lmm(p, cov, S,
lmm_cache_in=None,
lmm_cache_out=C)
# load cache
x, y, z = initialise_lmm(p, cov, S,
lmm_cache_in=C,
lmm_cache_out=None)
self.assertEqual(x.shape[0], 50)
self.assertAlmostEqual(y.findH2()['nLL'][0],
34.55403861)
self.assertAlmostEqual(z, 0.0)
# different sizes
b = pd.Series(np.random.random(10),
index=['test_%d' % x for x in range(10)])
with self.assertRaises(SystemExit) as cm:
initialise_lmm(b, cov, S,
lmm_cache_in=C,
lmm_cache_out=None)
self.assertEqual(cm.exception.code, 1)
# matching lineage samples
cov = pd.DataFrame([])
s = pd.read_table(S, index_col=0)
x, y, z = initialise_lmm(p, cov, S,
lmm_cache_in=None,
lmm_cache_out=None,
lineage_samples=s.index)
# non-matching lineage samples
with self.assertRaises(SystemExit) as cm:
x, y, z = initialise_lmm(p, cov, S,
lmm_cache_in=None,
lmm_cache_out=None,
lineage_samples=s.index[:-1])
class TestFitLmm(unittest.TestCase):
def test_fit_lmm(self):
p = pd.read_table(P_BINARY,
index_col=0)['binary']
cov = pd.DataFrame([])
x, y, z = initialise_lmm(p, cov, S,
lmm_cache_in=None,
lmm_cache_out=None)
var = LMM('variant',
'pattern',
0.2,
np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan,
['k%d' % x
for x in range(p[p == 1].shape[0])],
['nk%d' % x
for x in range(p[p == 0].shape[0])],
set(), True, True)
k = np.loadtxt(K)[:p.shape[0]]
variants = [(var, p.values, k),]
variant_mat = k.reshape(-1, 1)
results = fit_lmm(y, z,
variants, variant_mat,
False, [], cov,
False, 1, 1)
test_results = [LMM('variant', 'pattern', 0.2,
0.28252075514059294,
0.2920532220978148,
0.1513687600644123,
0.1420853593711293,
0.1519818397711344,
None,
['k%d' % x
for x in range(p[p == 1].shape[0])],
['nk%d' % x
for x in range(p[p == 0].shape[0])],
set(), False, False),]
for var, test_var in zip(results, test_results):
self.assertEqual(eq_lmm(var, test_var), set())
# af filtering
var = LMM('variant',
None,
0.2,
np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan,
['k%d' % x
for x in range(p[p == 1].shape[0])],
['nk%d' % x
for x in range(p[p == 0].shape[0])],
set(), True, True)
variants = [(var, p.values, k),]
variant_mat = k.reshape(-1, 1)
results = fit_lmm(y, z,
variants, variant_mat,
False, [], cov,
False, 1, 1)
test_results = [LMM('variant', None, 0.2,
np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan,
['k%d' % x
for x in range(p[p == 1].shape[0])],
['nk%d' % x
for x in range(p[p == 0].shape[0])],
set(['af-filter']), True, False),]
for var, test_var in zip(results, test_results):
self.assertEqual(eq_lmm(var, test_var), set())
# bad-chisq
var = LMM('variant',
'pattern',
0.2,
np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan,
['k%d' % x
for x in range(p[p == 1].shape[0])],
['nk%d' % x
for x in range(p[p == 0].shape[0])],
set(), True, True)
bad_k = np.array([1]*5 + [0]*(p.shape[0]-5))
variants = [(var, p.values, bad_k),]
variant_mat = bad_k.reshape(-1, 1)
results = fit_lmm(y, z,
variants, variant_mat,
False, [], cov,
False, 1, 1)
test_results = [LMM('variant', 'pattern', 0.2,
0.2544505826463333,
0.263519965703956,
0.2666666666666663,
0.2357022603955158,
0.16116459280507586,
None,
['k%d' % x
for x in range(p[p == 1].shape[0])],
['nk%d' % x
for x in range(p[p == 0].shape[0])],
set(['bad-chisq']), False, False),]
for var, test_var in zip(results, test_results):
self.assertEqual(eq_lmm(var, test_var), set())
# pre-filtering
var = LMM('variant',
'pattern',
0.2,
np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan,
['k%d' % x
for x in range(p[p == 1].shape[0])],
['nk%d' % x
for x in range(p[p == 0].shape[0])],
set(), True, True)
k = np.loadtxt(K)[:p.shape[0]]
variants = [(var, p.values, k),]
variant_mat = k.reshape(-1, 1)
results = fit_lmm(y, z,
variants, variant_mat,
False, [], cov,
False, 0.05, 1)
test_results = [LMM('variant', 'pattern', 0.2,
0.28252075514059294,
np.nan, np.nan,
np.nan, np.nan, np.nan,
['k%d' % x
for x in range(p[p == 1].shape[0])],
['nk%d' % x
for x in range(p[p == 0].shape[0])],
set(['pre-filtering-failed']), True, False),]
for var, test_var in zip(results, test_results):
self.assertEqual(eq_lmm(var, test_var), set())
# lrt-filtering
var = LMM('variant',
'pattern',
0.2,
np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan,
['k%d' % x
for x in range(p[p == 1].shape[0])],
['nk%d' % x
for x in range(p[p == 0].shape[0])],
set(), True, True)
k = np.loadtxt(K)[:p.shape[0]]
variants = [(var, p.values, k),]
variant_mat = k.reshape(-1, 1)
results = fit_lmm(y, z,
variants, variant_mat,
False, [], cov,
False, 1, 0.05)
test_results = [LMM('variant', 'pattern', 0.2,
0.28252075514059294,
0.2920532220978148,
np.nan, np.nan, np.nan, np.nan,
['k%d' % x
for x in range(p[p == 1].shape[0])],
['nk%d' % x
for x in range(p[p == 0].shape[0])],
set(['lrt-filtering-failed']), False, True),]
for var, test_var in zip(results, test_results):
self.assertEqual(eq_lmm(var, test_var), set())
# lineage fit
var = LMM('variant',
'pattern',
0.2,
np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan,
['k%d' % x
for x in range(p[p == 1].shape[0])],
['nk%d' % x
for x in range(p[p == 0].shape[0])],
set(), True, True)
k = np.loadtxt(K)[:p.shape[0]]
variants = [(var, p.values, k),]
variant_mat = k.reshape(-1, 1)
m = np.loadtxt(M)[:p.shape[0]]
results = fit_lmm(y, z,
variants, variant_mat,
True, m, cov,
False, 1, 1)
test_results = [LMM('variant', 'pattern', 0.2,
0.28252075514059294,
0.2920532220978148,
0.1513687600644123,
0.1420853593711293,
0.1519818397711344,
0,
['k%d' % x
for x in range(p[p == 1].shape[0])],
['nk%d' % x
for x in range(p[p == 0].shape[0])],
set(), False, False),]
for var, test_var in zip(results, test_results):
self.assertEqual(eq_lmm(var, test_var), set())
# lineage fit + covariates
var = LMM('variant',
'pattern',
0.2,
np.nan, np.nan, np.nan,
np.nan, np.nan, np.nan,
['k%d' % x
for x in range(p[p == 1].shape[0])],
['nk%d' % x
for x in range(p[p == 0].shape[0])],
set(), True, True)
k = np.loadtxt(K)[:p.shape[0]]
variants = [(var, p.values, k),]
variant_mat = k.reshape(-1, 1)
m = np.loadtxt(M)[:p.shape[0]]
cov = | pd.read_table(COV, index_col=0, header=None) | pandas.read_table |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import pandas
from pandas.compat import string_types
from pandas.core.dtypes.cast import find_common_type
from pandas.core.dtypes.common import (
is_list_like,
is_numeric_dtype,
is_datetime_or_timedelta_dtype,
)
from pandas.core.index import ensure_index
from pandas.core.base import DataError
from modin.engines.base.frame.partition_manager import BaseFrameManager
from modin.error_message import ErrorMessage
from modin.backends.base.query_compiler import BaseQueryCompiler
class PandasQueryCompiler(BaseQueryCompiler):
"""This class implements the logic necessary for operating on partitions
with a Pandas backend. This logic is specific to Pandas."""
def __init__(
self, block_partitions_object, index, columns, dtypes=None, is_transposed=False
):
assert isinstance(block_partitions_object, BaseFrameManager)
self.data = block_partitions_object
self.index = index
self.columns = columns
if dtypes is not None:
self._dtype_cache = dtypes
self._is_transposed = int(is_transposed)
# Index, columns and dtypes objects
_dtype_cache = None
def _get_dtype(self):
if self._dtype_cache is None:
def dtype_builder(df):
return df.apply(lambda row: find_common_type(row.values), axis=0)
map_func = self._prepare_method(
self._build_mapreduce_func(lambda df: df.dtypes)
)
reduce_func = self._build_mapreduce_func(dtype_builder)
# For now we will use a pandas Series for the dtypes.
if len(self.columns) > 0:
self._dtype_cache = (
self._full_reduce(0, map_func, reduce_func).to_pandas().iloc[0]
)
else:
self._dtype_cache = pandas.Series([])
# reset name to None because we use "__reduced__" internally
self._dtype_cache.name = None
return self._dtype_cache
dtypes = property(_get_dtype)
def compute_index(self, axis, data_object, compute_diff=True):
"""Computes the index after a number of rows have been removed.
Note: In order for this to be used properly, the indexes must not be
changed before you compute this.
Args:
axis: The axis to extract the index from.
data_object: The new data object to extract the index from.
compute_diff: True to use `self` to compute the index from self
rather than data_object. This is used when the dimension of the
index may have changed, but the deleted rows/columns are
unknown.
Returns:
A new pandas.Index object.
"""
def pandas_index_extraction(df, axis):
if not axis:
return df.index
else:
try:
return df.columns
except AttributeError:
return pandas.Index([])
index_obj = self.index if not axis else self.columns
old_blocks = self.data if compute_diff else None
new_indices = data_object.get_indices(
axis=axis,
index_func=lambda df: pandas_index_extraction(df, axis),
old_blocks=old_blocks,
)
return index_obj[new_indices] if compute_diff else new_indices
def _validate_set_axis(self, new_labels, old_labels):
new_labels = ensure_index(new_labels)
old_len = len(old_labels)
new_len = len(new_labels)
if old_len != new_len:
raise ValueError(
"Length mismatch: Expected axis has %d elements, "
"new values have %d elements" % (old_len, new_len)
)
return new_labels
_index_cache = None
_columns_cache = None
def _get_index(self):
return self._index_cache
def _get_columns(self):
return self._columns_cache
def _set_index(self, new_index):
if self._index_cache is None:
self._index_cache = ensure_index(new_index)
else:
new_index = self._validate_set_axis(new_index, self._index_cache)
self._index_cache = new_index
def _set_columns(self, new_columns):
if self._columns_cache is None:
self._columns_cache = ensure_index(new_columns)
else:
new_columns = self._validate_set_axis(new_columns, self._columns_cache)
self._columns_cache = new_columns
columns = property(_get_columns, _set_columns)
index = property(_get_index, _set_index)
# END Index, columns, and dtypes objects
# Internal methods
# These methods are for building the correct answer in a modular way.
# Please be careful when changing these!
def _prepare_method(self, pandas_func, **kwargs):
"""Prepares methods given various metadata.
Args:
pandas_func: The function to prepare.
Returns
Helper function which handles potential transpose.
"""
if self._is_transposed:
def helper(df, internal_indices=[]):
if len(internal_indices) > 0:
return pandas_func(
df.T, internal_indices=internal_indices, **kwargs
)
return pandas_func(df.T, **kwargs)
else:
def helper(df, internal_indices=[]):
if len(internal_indices) > 0:
return pandas_func(df, internal_indices=internal_indices, **kwargs)
return pandas_func(df, **kwargs)
return helper
def numeric_columns(self, include_bool=True):
"""Returns the numeric columns of the Manager.
Returns:
List of index names.
"""
columns = []
for col, dtype in zip(self.columns, self.dtypes):
if is_numeric_dtype(dtype) and (
include_bool or (not include_bool and dtype != np.bool_)
):
columns.append(col)
return columns
def numeric_function_clean_dataframe(self, axis):
"""Preprocesses numeric functions to clean dataframe and pick numeric indices.
Args:
axis: '0' if columns and '1' if rows.
Returns:
Tuple with return value(if any), indices to apply func to & cleaned Manager.
"""
result = None
query_compiler = self
# If no numeric columns and over columns, then return empty Series
if not axis and len(self.index) == 0:
result = pandas.Series(dtype=np.int64)
nonnumeric = [
col
for col, dtype in zip(self.columns, self.dtypes)
if not is_numeric_dtype(dtype)
]
if len(nonnumeric) == len(self.columns):
# If over rows and no numeric columns, return this
if axis:
result = pandas.Series([np.nan for _ in self.index])
else:
result = pandas.Series([0 for _ in self.index])
else:
query_compiler = self.drop(columns=nonnumeric)
return result, query_compiler
# END Internal methods
# Metadata modification methods
def add_prefix(self, prefix, axis=1):
if axis == 1:
new_columns = self.columns.map(lambda x: str(prefix) + str(x))
if self._dtype_cache is not None:
new_dtype_cache = self._dtype_cache.copy()
new_dtype_cache.index = new_columns
else:
new_dtype_cache = None
new_index = self.index
else:
new_index = self.index.map(lambda x: str(prefix) + str(x))
new_columns = self.columns
new_dtype_cache = self._dtype_cache
return self.__constructor__(
self.data, new_index, new_columns, new_dtype_cache, self._is_transposed
)
def add_suffix(self, suffix, axis=1):
if axis == 1:
new_columns = self.columns.map(lambda x: str(x) + str(suffix))
if self._dtype_cache is not None:
new_dtype_cache = self._dtype_cache.copy()
new_dtype_cache.index = new_columns
else:
new_dtype_cache = None
new_index = self.index
else:
new_index = self.index.map(lambda x: str(x) + str(suffix))
new_columns = self.columns
new_dtype_cache = self._dtype_cache
return self.__constructor__(
self.data, new_index, new_columns, new_dtype_cache, self._is_transposed
)
# END Metadata modification methods
# Copy
# For copy, we don't want a situation where we modify the metadata of the
# copies if we end up modifying something here. We copy all of the metadata
# to prevent that.
def copy(self):
return self.__constructor__(
self.data.copy(),
self.index.copy(),
self.columns.copy(),
self._dtype_cache,
self._is_transposed,
)
# END Copy
# Append/Concat/Join (Not Merge)
# The append/concat/join operations should ideally never trigger remote
# compute. These operations should only ever be manipulations of the
# metadata of the resulting object. It should just be a simple matter of
# appending the other object's blocks and adding np.nan columns for the new
# columns, if needed. If new columns are added, some compute may be
# required, though it can be delayed.
#
# Currently this computation is not delayed, and it may make a copy of the
# DataFrame in memory. This can be problematic and should be fixed in the
# future. TODO (devin-petersohn): Delay reindexing
def _join_index_objects(self, axis, other_index, how, sort=True):
"""Joins a pair of index objects (columns or rows) by a given strategy.
Args:
axis: The axis index object to join (0 for columns, 1 for index).
other_index: The other_index to join on.
how: The type of join to join to make (e.g. right, left).
Returns:
Joined indices.
"""
if isinstance(other_index, list):
joined_obj = self.columns if not axis else self.index
# TODO: revisit for performance
for obj in other_index:
joined_obj = joined_obj.join(obj, how=how)
return joined_obj
if not axis:
return self.columns.join(other_index, how=how, sort=sort)
else:
return self.index.join(other_index, how=how, sort=sort)
def join(self, other, **kwargs):
"""Joins a list or two objects together.
Args:
other: The other object(s) to join on.
Returns:
Joined objects.
"""
if not isinstance(other, list):
other = [other]
return self._join_list_of_managers(other, **kwargs)
def concat(self, axis, other, **kwargs):
"""Concatenates two objects together.
Args:
axis: The axis index object to join (0 for columns, 1 for index).
other: The other_index to concat with.
Returns:
Concatenated objects.
"""
return self._append_list_of_managers(other, axis, **kwargs)
def _append_list_of_managers(self, others, axis, **kwargs):
if not isinstance(others, list):
others = [others]
if self._is_transposed:
# If others are transposed, we handle that behavior correctly in
# `copartition`, but it is not handled correctly in the case that `self` is
# transposed.
return (
self.transpose()
._append_list_of_managers(
[o.transpose() for o in others], axis ^ 1, **kwargs
)
.transpose()
)
assert all(
isinstance(other, type(self)) for other in others
), "Different Manager objects are being used. This is not allowed"
sort = kwargs.get("sort", None)
join = kwargs.get("join", "outer")
ignore_index = kwargs.get("ignore_index", False)
new_self, to_append, joined_axis = self.copartition(
axis ^ 1,
others,
join,
sort,
force_repartition=any(obj._is_transposed for obj in [self] + others),
)
new_data = new_self.concat(axis, to_append)
if axis == 0:
# The indices will be appended to form the final index.
# If `ignore_index` is true, we create a RangeIndex that is the
# length of all of the index objects combined. This is the same
# behavior as pandas.
new_index = (
self.index.append([other.index for other in others])
if not ignore_index
else pandas.RangeIndex(
len(self.index) + sum(len(other.index) for other in others)
)
)
return self.__constructor__(new_data, new_index, joined_axis)
else:
# The columns will be appended to form the final columns.
new_columns = self.columns.append([other.columns for other in others])
return self.__constructor__(new_data, joined_axis, new_columns)
def _join_list_of_managers(self, others, **kwargs):
assert isinstance(
others, list
), "This method is for lists of QueryCompiler objects only"
assert all(
isinstance(other, type(self)) for other in others
), "Different Manager objects are being used. This is not allowed"
# Uses join's default value (though should not revert to default)
how = kwargs.get("how", "left")
sort = kwargs.get("sort", False)
lsuffix = kwargs.get("lsuffix", "")
rsuffix = kwargs.get("rsuffix", "")
new_self, to_join, joined_index = self.copartition(
0,
others,
how,
sort,
force_repartition=any(obj._is_transposed for obj in [self] + others),
)
new_data = new_self.concat(1, to_join)
# This stage is to efficiently get the resulting columns, including the
# suffixes.
if len(others) == 1:
others_proxy = pandas.DataFrame(columns=others[0].columns)
else:
others_proxy = [pandas.DataFrame(columns=other.columns) for other in others]
self_proxy = pandas.DataFrame(columns=self.columns)
new_columns = self_proxy.join(
others_proxy, lsuffix=lsuffix, rsuffix=rsuffix
).columns
return self.__constructor__(new_data, joined_index, new_columns)
# END Append/Concat/Join
# Copartition
def copartition(self, axis, other, how_to_join, sort, force_repartition=False):
"""Copartition two QueryCompiler objects.
Args:
axis: The axis to copartition along.
other: The other Query Compiler(s) to copartition against.
how_to_join: How to manage joining the index object ("left", "right", etc.)
sort: Whether or not to sort the joined index.
force_repartition: Whether or not to force the repartitioning. By default,
this method will skip repartitioning if it is possible. This is because
reindexing is extremely inefficient. Because this method is used to
`join` or `append`, it is vital that the internal indices match.
Returns:
A tuple (left query compiler, right query compiler list, joined index).
"""
if isinstance(other, type(self)):
other = [other]
index_obj = (
[o.index for o in other] if axis == 0 else [o.columns for o in other]
)
joined_index = self._join_index_objects(
axis ^ 1, index_obj, how_to_join, sort=sort
)
# We have to set these because otherwise when we perform the functions it may
# end up serializing this entire object.
left_old_idx = self.index if axis == 0 else self.columns
right_old_idxes = index_obj
# Start with this and we'll repartition the first time, and then not again.
reindexed_self = self.data
reindexed_other_list = []
def compute_reindex(old_idx):
"""Create a function based on the old index and axis.
Args:
old_idx: The old index/columns
Returns:
A function that will be run in each partition.
"""
def reindex_partition(df):
if axis == 0:
df.index = old_idx
new_df = df.reindex(index=joined_index)
new_df.index = pandas.RangeIndex(len(new_df.index))
else:
df.columns = old_idx
new_df = df.reindex(columns=joined_index)
new_df.columns = pandas.RangeIndex(len(new_df.columns))
return new_df
return reindex_partition
for i in range(len(other)):
# If the indices are equal we can skip partitioning so long as we are not
# forced to repartition. See note above about `force_repartition`.
if i != 0 or (left_old_idx.equals(joined_index) and not force_repartition):
reindex_left = None
else:
reindex_left = self._prepare_method(compute_reindex(left_old_idx))
if right_old_idxes[i].equals(joined_index) and not force_repartition:
reindex_right = None
else:
reindex_right = compute_reindex(right_old_idxes[i])
reindexed_self, reindexed_other = reindexed_self.copartition_datasets(
axis,
other[i].data,
reindex_left,
reindex_right,
other[i]._is_transposed,
)
reindexed_other_list.append(reindexed_other)
return reindexed_self, reindexed_other_list, joined_index
# Data Management Methods
def free(self):
"""In the future, this will hopefully trigger a cleanup of this object.
"""
# TODO create a way to clean up this object.
return
# END Data Management Methods
# To/From Pandas
def to_pandas(self):
"""Converts Modin DataFrame to Pandas DataFrame.
Returns:
Pandas DataFrame of the QueryCompiler.
"""
df = self.data.to_pandas(is_transposed=self._is_transposed)
if df.empty:
if len(self.columns) != 0:
df = pandas.DataFrame(columns=self.columns).astype(self.dtypes)
else:
df = pandas.DataFrame(columns=self.columns, index=self.index)
else:
ErrorMessage.catch_bugs_and_request_email(
len(df.index) != len(self.index) or len(df.columns) != len(self.columns)
)
df.index = self.index
df.columns = self.columns
return df
@classmethod
def from_pandas(cls, df, block_partitions_cls):
"""Improve simple Pandas DataFrame to an advanced and superior Modin DataFrame.
Args:
cls: DataManger object to convert the DataFrame to.
df: Pandas DataFrame object.
block_partitions_cls: BlockParitions object to store partitions
Returns:
Returns QueryCompiler containing data from the Pandas DataFrame.
"""
new_index = df.index
new_columns = df.columns
new_dtypes = df.dtypes
new_data = block_partitions_cls.from_pandas(df)
return cls(new_data, new_index, new_columns, dtypes=new_dtypes)
# END To/From Pandas
# To NumPy
def to_numpy(self):
"""Converts Modin DataFrame to NumPy Array.
Returns:
NumPy Array of the QueryCompiler.
"""
arr = self.data.to_numpy(is_transposed=self._is_transposed)
ErrorMessage.catch_bugs_and_request_email(
len(arr) != len(self.index) or len(arr[0]) != len(self.columns)
)
return arr
# END To NumPy
# Inter-Data operations (e.g. add, sub)
# These operations require two DataFrames and will change the shape of the
# data if the index objects don't match. An outer join + op is performed,
# such that columns/rows that don't have an index on the other DataFrame
# result in NaN values.
def _inter_manager_operations(self, other, how_to_join, func):
"""Inter-data operations (e.g. add, sub).
Args:
other: The other Manager for the operation.
how_to_join: The type of join to join to make (e.g. right, outer).
Returns:
New QueryCompiler with new data and index.
"""
reindexed_self, reindexed_other_list, joined_index = self.copartition(
0, other, how_to_join, sort=False
)
# unwrap list returned by `copartition`.
reindexed_other = reindexed_other_list[0]
new_columns = self._join_index_objects(
0, other.columns, how_to_join, sort=False
)
# THere is an interesting serialization anomaly that happens if we do
# not use the columns in `inter_data_op_builder` from here (e.g. if we
# pass them in). Passing them in can cause problems, so we will just
# use them from here.
self_cols = self.columns
other_cols = other.columns
def inter_data_op_builder(left, right, func):
left.columns = self_cols
right.columns = other_cols
# We reset here to make sure that the internal indexes match. We aligned
# them in the previous step, so this step is to prevent mismatches.
left.index = pandas.RangeIndex(len(left.index))
right.index = pandas.RangeIndex(len(right.index))
result = func(left, right)
result.columns = pandas.RangeIndex(len(result.columns))
return result
new_data = reindexed_self.inter_data_operation(
1, lambda l, r: inter_data_op_builder(l, r, func), reindexed_other
)
return self.__constructor__(new_data, joined_index, new_columns)
def _inter_df_op_handler(self, func, other, **kwargs):
"""Helper method for inter-manager and scalar operations.
Args:
func: The function to use on the Manager/scalar.
other: The other Manager/scalar.
Returns:
New QueryCompiler with new data and index.
"""
axis = kwargs.get("axis", 0)
axis = pandas.DataFrame()._get_axis_number(axis) if axis is not None else 0
if isinstance(other, type(self)):
# If this QueryCompiler is transposed, copartition can sometimes fail to
# properly co-locate the data. It does not fail if other is transposed, so
# if this object is transposed, we will transpose both and do the operation,
# then transpose at the end.
if self._is_transposed:
return (
self.transpose()
._inter_manager_operations(
other.transpose(), "outer", lambda x, y: func(x, y, **kwargs)
)
.transpose()
)
return self._inter_manager_operations(
other, "outer", lambda x, y: func(x, y, **kwargs)
)
else:
return self._scalar_operations(
axis, other, lambda df: func(df, other, **kwargs)
)
def binary_op(self, op, other, **kwargs):
"""Perform an operation between two objects.
Note: The list of operations is as follows:
- add
- eq
- floordiv
- ge
- gt
- le
- lt
- mod
- mul
- ne
- pow
- rfloordiv
- rmod
- rpow
- rsub
- rtruediv
- sub
- truediv
- __and__
- __or__
- __xor__
Args:
op: The operation. See list of operations above
other: The object to operate against.
Returns:
A new QueryCompiler object.
"""
func = getattr(pandas.DataFrame, op)
return self._inter_df_op_handler(func, other, **kwargs)
def clip(self, lower, upper, **kwargs):
kwargs["upper"] = upper
kwargs["lower"] = lower
axis = kwargs.get("axis", 0)
func = self._prepare_method(pandas.DataFrame.clip, **kwargs)
if is_list_like(lower) or is_list_like(upper):
df = self._map_across_full_axis(axis, func)
return self.__constructor__(df, self.index, self.columns)
return self._scalar_operations(axis, lower or upper, func)
def update(self, other, **kwargs):
"""Uses other manager to update corresponding values in this manager.
Args:
other: The other manager.
Returns:
New QueryCompiler with updated data and index.
"""
assert isinstance(
other, type(self)
), "Must have the same QueryCompiler subclass to perform this operation"
def update_builder(df, other, **kwargs):
# This is because of a requirement in Arrow
df = df.copy()
df.update(other, **kwargs)
return df
return self._inter_df_op_handler(update_builder, other, **kwargs)
def where(self, cond, other, **kwargs):
"""Gets values from this manager where cond is true else from other.
Args:
cond: Condition on which to evaluate values.
Returns:
New QueryCompiler with updated data and index.
"""
assert isinstance(
cond, type(self)
), "Must have the same QueryCompiler subclass to perform this operation"
if isinstance(other, type(self)):
# Note: Currently we are doing this with two maps across the entire
# data. This can be done with a single map, but it will take a
# modification in the `BlockPartition` class.
# If this were in one pass it would be ~2x faster.
# TODO (devin-petersohn) rewrite this to take one pass.
def where_builder_first_pass(cond, other, **kwargs):
return cond.where(cond, other, **kwargs)
def where_builder_second_pass(df, new_other, **kwargs):
return df.where(new_other.eq(True), new_other, **kwargs)
first_pass = cond._inter_manager_operations(
other, "left", where_builder_first_pass
)
final_pass = self._inter_manager_operations(
first_pass, "left", where_builder_second_pass
)
return self.__constructor__(final_pass.data, self.index, self.columns)
else:
axis = kwargs.get("axis", 0)
# Rather than serializing and passing in the index/columns, we will
# just change this index to match the internal index.
if isinstance(other, pandas.Series):
other.index = pandas.RangeIndex(len(other.index))
def where_builder_series(df, cond):
if axis == 0:
df.index = pandas.RangeIndex(len(df.index))
cond.index = pandas.RangeIndex(len(cond.index))
else:
df.columns = pandas.RangeIndex(len(df.columns))
cond.columns = pandas.RangeIndex(len(cond.columns))
return df.where(cond, other, **kwargs)
reindexed_self, reindexed_cond, a = self.copartition(
axis, cond, "left", False
)
# Unwrap from list given by `copartition`
reindexed_cond = reindexed_cond[0]
new_data = reindexed_self.inter_data_operation(
axis, lambda l, r: where_builder_series(l, r), reindexed_cond
)
return self.__constructor__(new_data, self.index, self.columns)
# END Inter-Data operations
# Single Manager scalar operations (e.g. add to scalar, list of scalars)
def _scalar_operations(self, axis, scalar, func):
"""Handler for mapping scalar operations across a Manager.
Args:
axis: The axis index object to execute the function on.
scalar: The scalar value to map.
func: The function to use on the Manager with the scalar.
Returns:
A new QueryCompiler with updated data and new index.
"""
if isinstance(scalar, (list, np.ndarray, pandas.Series)):
new_index = self.index if axis == 0 else self.columns
def list_like_op(df):
if axis == 0:
df.index = new_index
else:
df.columns = new_index
return func(df)
new_data = self._map_across_full_axis(
axis, self._prepare_method(list_like_op)
)
if axis == 1 and isinstance(scalar, pandas.Series):
new_columns = self.columns.union(
[label for label in scalar.index if label not in self.columns]
)
else:
new_columns = self.columns
return self.__constructor__(new_data, self.index, new_columns)
else:
return self._map_partitions(self._prepare_method(func))
# END Single Manager scalar operations
# Reindex/reset_index (may shuffle data)
def reindex(self, axis, labels, **kwargs):
"""Fits a new index for this Manger.
Args:
axis: The axis index object to target the reindex on.
labels: New labels to conform 'axis' on to.
Returns:
A new QueryCompiler with updated data and new index.
"""
if self._is_transposed:
return (
self.transpose()
.reindex(axis=axis ^ 1, labels=labels, **kwargs)
.transpose()
)
# To reindex, we need a function that will be shipped to each of the
# partitions.
def reindex_builer(df, axis, old_labels, new_labels, **kwargs):
if axis:
while len(df.columns) < len(old_labels):
df[len(df.columns)] = np.nan
df.columns = old_labels
new_df = df.reindex(columns=new_labels, **kwargs)
# reset the internal columns back to a RangeIndex
new_df.columns = pandas.RangeIndex(len(new_df.columns))
return new_df
else:
while len(df.index) < len(old_labels):
df.loc[len(df.index)] = np.nan
df.index = old_labels
new_df = df.reindex(index=new_labels, **kwargs)
# reset the internal index back to a RangeIndex
new_df.reset_index(inplace=True, drop=True)
return new_df
old_labels = self.columns if axis else self.index
new_index = self.index if axis else labels
new_columns = labels if axis else self.columns
func = self._prepare_method(
lambda df: reindex_builer(df, axis, old_labels, labels, **kwargs)
)
# The reindex can just be mapped over the axis we are modifying. This
# is for simplicity in implementation. We specify num_splits here
# because if we are repartitioning we should (in the future).
# Additionally this operation is often followed by an operation that
# assumes identical partitioning. Internally, we *may* change the
# partitioning during a map across a full axis.
new_data = self._map_across_full_axis(axis, func)
return self.__constructor__(new_data, new_index, new_columns)
def reset_index(self, **kwargs):
"""Removes all levels from index and sets a default level_0 index.
Returns:
A new QueryCompiler with updated data and reset index.
"""
drop = kwargs.get("drop", False)
new_index = pandas.RangeIndex(len(self.index))
if not drop:
if isinstance(self.index, pandas.MultiIndex):
# TODO (devin-petersohn) ensure partitioning is properly aligned
new_column_names = pandas.Index(self.index.names)
new_columns = new_column_names.append(self.columns)
index_data = pandas.DataFrame(list(zip(*self.index))).T
result = self.data.from_pandas(index_data).concat(1, self.data)
return self.__constructor__(result, new_index, new_columns)
else:
new_column_name = (
self.index.name
if self.index.name is not None
else "index"
if "index" not in self.columns
else "level_0"
)
new_columns = self.columns.insert(0, new_column_name)
result = self.insert(0, new_column_name, self.index)
return self.__constructor__(result.data, new_index, new_columns)
else:
# The copies here are to ensure that we do not give references to
# this object for the purposes of updates.
return self.__constructor__(
self.data.copy(), new_index, self.columns.copy(), self._dtype_cache
)
# END Reindex/reset_index
# Transpose
# For transpose, we aren't going to immediately copy everything. Since the
# actual transpose operation is very fast, we will just do it before any
# operation that gets called on the transposed data. See _prepare_method
# for how the transpose is applied.
#
# Our invariants assume that the blocks are transposed, but not the
# data inside. Sometimes we have to reverse this transposition of blocks
# for simplicity of implementation.
def transpose(self, *args, **kwargs):
"""Transposes this QueryCompiler.
Returns:
Transposed new QueryCompiler.
"""
new_data = self.data.transpose(*args, **kwargs)
# Switch the index and columns and transpose the data within the blocks.
new_manager = self.__constructor__(
new_data, self.columns, self.index, is_transposed=self._is_transposed ^ 1
)
return new_manager
# END Transpose
# Full Reduce operations
#
# These operations result in a reduced dimensionality of data.
# This will return a new QueryCompiler, which will be handled in the front end.
def _full_reduce(self, axis, map_func, reduce_func=None):
"""Apply function that will reduce the data to a Pandas Series.
Args:
axis: 0 for columns and 1 for rows. Default is 0.
map_func: Callable function to map the dataframe.
reduce_func: Callable function to reduce the dataframe. If none,
then apply map_func twice.
Return:
A new QueryCompiler object containing the results from map_func and
reduce_func.
"""
if reduce_func is None:
reduce_func = map_func
mapped_parts = self.data.map_across_blocks(map_func)
full_frame = mapped_parts.map_across_full_axis(axis, reduce_func)
if axis == 0:
columns = self.columns
return self.__constructor__(
full_frame, index=["__reduced__"], columns=columns
)
else:
index = self.index
return self.__constructor__(
full_frame, index=index, columns=["__reduced__"]
)
def _build_mapreduce_func(self, func, **kwargs):
def _map_reduce_func(df):
series_result = func(df, **kwargs)
if kwargs.get("axis", 0) == 0 and isinstance(series_result, pandas.Series):
# In the case of axis=0, we need to keep the shape of the data
# consistent with what we have done. In the case of a reduction, the
# data for axis=0 should be a single value for each column. By
# transposing the data after we convert to a DataFrame, we ensure that
# the columns of the result line up with the columns from the data.
# axis=1 does not have this requirement because the index already will
# line up with the index of the data based on how pandas creates a
# DataFrame from a Series.
return pandas.DataFrame(series_result).T
return pandas.DataFrame(series_result)
return _map_reduce_func
def count(self, **kwargs):
"""Counts the number of non-NaN objects for each column or row.
Return:
A new QueryCompiler object containing counts of non-NaN objects from each
column or row.
"""
if self._is_transposed:
kwargs["axis"] = kwargs.get("axis", 0) ^ 1
return self.transpose().count(**kwargs)
axis = kwargs.get("axis", 0)
map_func = self._build_mapreduce_func(pandas.DataFrame.count, **kwargs)
reduce_func = self._build_mapreduce_func(pandas.DataFrame.sum, **kwargs)
return self._full_reduce(axis, map_func, reduce_func)
def dot(self, other):
"""Computes the matrix multiplication of self and other.
Args:
other: The other query compiler or other array-like to matrix
multiply with self.
Returns:
Returns the result of the matrix multiply.
"""
if self._is_transposed:
return self.transpose().dot(other).transpose()
def map_func(df, other=other):
if isinstance(other, pandas.DataFrame):
other = other.squeeze()
result = df.squeeze().dot(other)
if is_list_like(result):
return pandas.DataFrame(result)
else:
return pandas.DataFrame([result])
if isinstance(other, BaseQueryCompiler):
if len(self.columns) > 1 and len(other.columns) == 1:
# If self is DataFrame and other is a series, we take the transpose
# to copartition along the columns.
new_self = self
other = other.transpose()
axis = 1
new_index = self.index
elif len(self.columns) == 1 and len(other.columns) > 1:
# If self is series and other is a Dataframe, we take the transpose
# to copartition along the columns.
new_self = self.transpose()
axis = 1
new_index = self.index
elif len(self.columns) == 1 and len(other.columns) == 1:
# If both are series, then we copartition along the rows.
new_self = self
axis = 0
new_index = ["__reduce__"]
new_self, list_of_others, _ = new_self.copartition(
axis, other, "left", False
)
other = list_of_others[0]
reduce_func = self._build_mapreduce_func(
pandas.DataFrame.sum, axis=axis, skipna=False
)
new_data = new_self.groupby_reduce(axis, other, map_func, reduce_func)
else:
if len(self.columns) == 1:
axis = 0
new_index = ["__reduce__"]
else:
axis = 1
new_index = self.index
new_data = self.data.map_across_full_axis(axis, map_func)
return self.__constructor__(new_data, index=new_index, columns=["__reduced__"])
def max(self, **kwargs):
"""Returns the maximum value for each column or row.
Return:
A new QueryCompiler object with the maximum values from each column or row.
"""
if self._is_transposed:
kwargs["axis"] = kwargs.get("axis", 0) ^ 1
return self.transpose().max(**kwargs)
mapreduce_func = self._build_mapreduce_func(pandas.DataFrame.max, **kwargs)
return self._full_reduce(kwargs.get("axis", 0), mapreduce_func)
def mean(self, **kwargs):
"""Returns the mean for each numerical column or row.
Return:
A new QueryCompiler object containing the mean from each numerical column or
row.
"""
if self._is_transposed:
kwargs["axis"] = kwargs.get("axis", 0) ^ 1
return self.transpose().mean(**kwargs)
# Pandas default is 0 (though not mentioned in docs)
axis = kwargs.get("axis", 0)
sums = self.sum(**kwargs)
counts = self.count(axis=axis, numeric_only=kwargs.get("numeric_only", None))
if sums._is_transposed and counts._is_transposed:
sums = sums.transpose()
counts = counts.transpose()
result = sums.binary_op("truediv", counts, axis=axis)
return result.transpose() if axis == 0 else result
def min(self, **kwargs):
"""Returns the minimum from each column or row.
Return:
A new QueryCompiler object with the minimum value from each column or row.
"""
if self._is_transposed:
kwargs["axis"] = kwargs.get("axis", 0) ^ 1
return self.transpose().min(**kwargs)
mapreduce_func = self._build_mapreduce_func(pandas.DataFrame.min, **kwargs)
return self._full_reduce(kwargs.get("axis", 0), mapreduce_func)
def _process_sum_prod(self, func, **kwargs):
"""Calculates the sum or product of the DataFrame.
Args:
func: Pandas func to apply to DataFrame.
ignore_axis: Whether to ignore axis when raising TypeError
Return:
A new QueryCompiler object with sum or prod of the object.
"""
axis = kwargs.get("axis", 0)
min_count = kwargs.get("min_count", 0)
def sum_prod_builder(df, **kwargs):
return func(df, **kwargs)
builder_func = self._build_mapreduce_func(sum_prod_builder, **kwargs)
if min_count <= 1:
return self._full_reduce(axis, builder_func)
else:
return self._full_axis_reduce(axis, builder_func)
def prod(self, **kwargs):
"""Returns the product of each numerical column or row.
Return:
A new QueryCompiler object with the product of each numerical column or row.
"""
if self._is_transposed:
kwargs["axis"] = kwargs.get("axis", 0) ^ 1
return self.transpose().prod(**kwargs)
return self._process_sum_prod(pandas.DataFrame.prod, **kwargs)
def sum(self, **kwargs):
"""Returns the sum of each numerical column or row.
Return:
A new QueryCompiler object with the sum of each numerical column or row.
"""
if self._is_transposed:
kwargs["axis"] = kwargs.get("axis", 0) ^ 1
return self.transpose().sum(**kwargs)
return self._process_sum_prod(pandas.DataFrame.sum, **kwargs)
def _process_all_any(self, func, **kwargs):
"""Calculates if any or all the values are true.
Return:
A new QueryCompiler object containing boolean values or boolean.
"""
axis = kwargs.get("axis", 0)
axis = 0 if axis is None else axis
kwargs["axis"] = axis
builder_func = self._build_mapreduce_func(func, **kwargs)
return self._full_reduce(axis, builder_func)
def all(self, **kwargs):
"""Returns whether all the elements are true, potentially over an axis.
Return:
A new QueryCompiler object containing boolean values or boolean.
"""
if self._is_transposed:
# Pandas ignores on axis=1
kwargs["bool_only"] = False
kwargs["axis"] = kwargs.get("axis", 0) ^ 1
return self.transpose().all(**kwargs)
return self._process_all_any(lambda df, **kwargs: df.all(**kwargs), **kwargs)
def any(self, **kwargs):
"""Returns whether any the elements are true, potentially over an axis.
Return:
A new QueryCompiler object containing boolean values or boolean.
"""
if self._is_transposed:
if kwargs.get("axis", 0) == 1:
# Pandas ignores on axis=1
kwargs["bool_only"] = False
kwargs["axis"] = kwargs.get("axis", 0) ^ 1
return self.transpose().any(**kwargs)
return self._process_all_any(lambda df, **kwargs: df.any(**kwargs), **kwargs)
# END Full Reduce operations
# Map partitions operations
# These operations are operations that apply a function to every partition.
def _map_partitions(self, func, new_dtypes=None):
return self.__constructor__(
self.data.map_across_blocks(func), self.index, self.columns, new_dtypes
)
def abs(self):
func = self._prepare_method(pandas.DataFrame.abs)
return self._map_partitions(func, new_dtypes=self.dtypes.copy())
def applymap(self, func):
remote_func = self._prepare_method(pandas.DataFrame.applymap, func=func)
return self._map_partitions(remote_func)
def invert(self):
remote_func = self._prepare_method(pandas.DataFrame.__invert__)
return self._map_partitions(remote_func)
def isin(self, **kwargs):
func = self._prepare_method(pandas.DataFrame.isin, **kwargs)
new_dtypes = pandas.Series(
[np.dtype("bool") for _ in self.columns], index=self.columns
)
return self._map_partitions(func, new_dtypes=new_dtypes)
def isna(self):
func = self._prepare_method(pandas.DataFrame.isna)
new_dtypes = pandas.Series(
[np.dtype("bool") for _ in self.columns], index=self.columns
)
return self._map_partitions(func, new_dtypes=new_dtypes)
def memory_usage(self, axis=0, **kwargs):
"""Returns the memory usage of each column.
Returns:
A new QueryCompiler object containing the memory usage of each column.
"""
if self._is_transposed:
return self.transpose().memory_usage(axis=1, **kwargs)
def memory_usage_builder(df, **kwargs):
axis = kwargs.pop("axis")
# We have to manually change the orientation of the data within the
# partitions because memory_usage does not take in an axis argument
# and always does it along columns.
if axis:
df = df.T
result = df.memory_usage(**kwargs)
return result
def sum_memory_usage(df, **kwargs):
axis = kwargs.pop("axis")
return df.sum(axis=axis)
# Even though memory_usage does not take in an axis argument, we have to
# pass in an axis kwargs for _build_mapreduce_func to properly arrange
# the results.
map_func = self._build_mapreduce_func(memory_usage_builder, axis=axis, **kwargs)
reduce_func = self._build_mapreduce_func(sum_memory_usage, axis=axis, **kwargs)
return self._full_reduce(axis, map_func, reduce_func)
def negative(self, **kwargs):
func = self._prepare_method(pandas.DataFrame.__neg__, **kwargs)
return self._map_partitions(func)
def notna(self):
func = self._prepare_method(pandas.DataFrame.notna)
new_dtypes = pandas.Series(
[np.dtype("bool") for _ in self.columns], index=self.columns
)
return self._map_partitions(func, new_dtypes=new_dtypes)
def round(self, **kwargs):
func = self._prepare_method(pandas.DataFrame.round, **kwargs)
return self._map_partitions(func, new_dtypes=self._dtype_cache)
# END Map partitions operations
# String map partition operations
def _str_map_partitions(self, func, new_dtypes=None, **kwargs):
def str_op_builder(df, **kwargs):
str_series = df.squeeze().str
return func(str_series, **kwargs).to_frame()
builder_func = self._prepare_method(str_op_builder, **kwargs)
return self._map_partitions(builder_func, new_dtypes=new_dtypes)
def str_split(self, **kwargs):
return self._str_map_partitions(
pandas.Series.str.split, new_dtypes=self.dtypes, **kwargs
)
def str_rsplit(self, **kwargs):
return self._str_map_partitions(
pandas.Series.str.rsplit, new_dtypes=self.dtypes, **kwargs
)
def str_get(self, i):
return self._str_map_partitions(
pandas.Series.str.get, new_dtypes=self.dtypes, i=i
)
def str_join(self, sep):
return self._str_map_partitions(
pandas.Series.str.join, new_dtypes=self.dtypes, sep=sep
)
def str_contains(self, pat, **kwargs):
kwargs["pat"] = pat
new_dtypes = pandas.Series([bool])
return self._str_map_partitions(
pandas.Series.str.contains, new_dtypes=new_dtypes, **kwargs
)
def str_replace(self, pat, repl, **kwargs):
kwargs["pat"] = pat
kwargs["repl"] = repl
return self._str_map_partitions(
pandas.Series.str.replace, new_dtypes=self.dtypes, **kwargs
)
def str_repeats(self, repeats):
return self._str_map_partitions(
pandas.Series.str.repeats, new_dtypes=self.dtypes, repeats=repeats
)
def str_pad(self, width, **kwargs):
kwargs["width"] = width
return self._str_map_partitions(
pandas.Series.str.pad, new_dtypes=self.dtypes, **kwargs
)
def str_center(self, width, **kwargs):
kwargs["width"] = width
return self._str_map_partitions(
pandas.Series.str.center, new_dtypes=self.dtypes, **kwargs
)
def str_ljust(self, width, **kwargs):
kwargs["width"] = width
return self._str_map_partitions(
pandas.Series.str.ljust, new_dtypes=self.dtypes, **kwargs
)
def str_rjust(self, width, **kwargs):
kwargs["width"] = width
return self._str_map_partitions(
pandas.Series.str.rjust, new_dtypes=self.dtypes, **kwargs
)
def str_zfill(self, width):
return self._str_map_partitions(
pandas.Series.str.zfill, new_dtypes=self.dtypes, width=width
)
def str_wrap(self, width, **kwargs):
kwargs["width"] = width
return self._str_map_partitions(
pandas.Series.str.wrap, new_dtypes=self.dtypes, **kwargs
)
def str_slice(self, **kwargs):
return self._str_map_partitions(
pandas.Series.str.slice, new_dtypes=self.dtypes, **kwargs
)
def str_slice_replace(self, **kwargs):
return self._str_map_partitions(
pandas.Series.str.slice_replace, new_dtypes=self.dtypes, **kwargs
)
def str_count(self, pat, **kwargs):
kwargs["pat"] = pat
new_dtypes = pandas.Series([int])
# We have to pass in a lambda because pandas.Series.str.count does not exist for python2
return self._str_map_partitions(
lambda str_series: str_series.count(**kwargs), new_dtypes=new_dtypes
)
def str_startswith(self, pat, **kwargs):
kwargs["pat"] = pat
new_dtypes = pandas.Series([bool])
# We have to pass in a lambda because pandas.Series.str.startswith does not exist for python2
return self._str_map_partitions(
lambda str_series: str_series.startswith(**kwargs), new_dtypes=new_dtypes
)
def str_endswith(self, pat, **kwargs):
kwargs["pat"] = pat
new_dtypes = pandas.Series([bool])
# We have to pass in a lambda because pandas.Series.str.endswith does not exist for python2
return self._str_map_partitions(
lambda str_series: str_series.endswith(**kwargs), new_dtypes=new_dtypes
)
def str_findall(self, pat, **kwargs):
kwargs["pat"] = pat
# We have to pass in a lambda because pandas.Series.str.findall does not exist for python2
return self._str_map_partitions(
lambda str_series: str_series.findall(**kwargs), new_dtypes=self.dtypes
)
def str_match(self, pat, **kwargs):
kwargs["pat"] = pat
return self._str_map_partitions(
pandas.Series.str.match, new_dtypes=self.dtypes, **kwargs
)
def str_len(self):
new_dtypes = pandas.Series([int])
return self._str_map_partitions(pandas.Series.str.len, new_dtypes=new_dtypes)
def str_strip(self, **kwargs):
return self._str_map_partitions(
pandas.Series.str.strip, new_dtypes=self.dtypes, **kwargs
)
def str_rstrip(self, **kwargs):
return self._str_map_partitions(
pandas.Series.str.rstrip, new_dtypes=self.dtypes, **kwargs
)
def str_lstrip(self, **kwargs):
return self._str_map_partitions(
pandas.Series.str.lstrip, new_dtypes=self.dtypes, **kwargs
)
def str_partition(self, **kwargs):
return self._str_map_partitions(
pandas.Series.str.partition, new_dtypes=self.dtypes, **kwargs
)
def str_rpartition(self, **kwargs):
return self._str_map_partitions(
pandas.Series.str.rpartition, new_dtypes=self.dtypes, **kwargs
)
def str_lower(self):
# We have to pass in a lambda because pandas.Series.str.lower does not exist for python2
return self._str_map_partitions(
lambda str_series: str_series.lower(), new_dtypes=self.dtypes
)
def str_upper(self):
# We have to pass in a lambda because pandas.Series.str.upper does not exist for python2
return self._str_map_partitions(
lambda str_series: str_series.upper(), new_dtypes=self.dtypes
)
def str_find(self, sub, **kwargs):
kwargs["sub"] = sub
return self._str_map_partitions(
pandas.Series.str.find, new_dtypes=self.dtypes, **kwargs
)
def str_rfind(self, sub, **kwargs):
kwargs["sub"] = sub
return self._str_map_partitions(
pandas.Series.str.rfind, new_dtypes=self.dtypes, **kwargs
)
def str_index(self, sub, **kwargs):
kwargs["sub"] = sub
return self._str_map_partitions(
pandas.Series.str.index, new_dtypes=self.dtypes, **kwargs
)
def str_rindex(self, sub, **kwargs):
kwargs["sub"] = sub
return self._str_map_partitions(
pandas.Series.str.rindex, new_dtypes=self.dtypes, **kwargs
)
def str_capitalize(self):
# We have to pass in a lambda because pandas.Series.str.capitalize does not exist for python2
return self._str_map_partitions(
lambda str_series: str_series.capitalize(), new_dtypes=self.dtypes
)
def str_swapcase(self):
# We have to pass in a lambda because pandas.Series.str.swapcase does not exist for python2
return self._str_map_partitions(
lambda str_series: str_series.swapcase(), new_dtypes=self.dtypes
)
def str_normalize(self, form):
return self._str_map_partitions(
pandas.Series.str.normalize, new_dtypes=self.dtypes, form=form
)
def str_translate(self, table, **kwargs):
kwargs["table"] = table
return self._str_map_partitions(
pandas.Series.str.translate, new_dtypes=self.dtypes, **kwargs
)
def str_isalnum(self):
new_dtypes = pandas.Series([bool])
# We have to pass in a lambda because pandas.Series.str.isalnum does not exist for python2
return self._str_map_partitions(
lambda str_series: str_series.isalnum(), new_dtypes=new_dtypes
)
def str_isalpha(self):
new_dtypes = pandas.Series([bool])
# We have to pass in a lambda because pandas.Series.str.isalpha does not exist for python2
return self._str_map_partitions(
lambda str_series: str_series.isalpha(), new_dtypes=new_dtypes
)
def str_isdigit(self):
new_dtypes = pandas.Series([bool])
# We have to pass in a lambda because pandas.Series.str.isdigit does not exist for python2
return self._str_map_partitions(
lambda str_series: str_series.isdigit(), new_dtypes=new_dtypes
)
def str_isspace(self):
new_dtypes = pandas.Series([bool])
# We have to pass in a lambda because pandas.Series.str.isspace does not exist for python2
return self._str_map_partitions(
lambda str_series: str_series.isspace(), new_dtypes=new_dtypes
)
def str_islower(self):
new_dtypes = pandas.Series([bool])
# We have to pass in a lambda because pandas.Series.str.islower does not exist for python2
return self._str_map_partitions(
lambda str_series: str_series.islower(), new_dtypes=new_dtypes
)
def str_isupper(self):
new_dtypes = pandas.Series([bool])
# We have to pass in a lambda because pandas.Series.str.isupper does not exist for python2
return self._str_map_partitions(
lambda str_series: str_series.isupper(), new_dtypes=new_dtypes
)
def str_istitle(self):
new_dtypes = pandas.Series([bool])
# We have to pass in a lambda because pandas.Series.str.istitle does not exist for python2
return self._str_map_partitions(
lambda str_series: str_series.istitle(), new_dtypes=new_dtypes
)
def str_isnumeric(self):
new_dtypes = pandas.Series([bool])
# We have to pass in a lambda because pandas.Series.str.isnumeric does not exist for python2
return self._str_map_partitions(
lambda str_series: str_series.isnumeric(), new_dtypes=new_dtypes
)
def str_isdecimal(self):
new_dtypes = pandas.Series([bool])
# We have to pass in a lambda because pandas.Series.str.isdecimal does not exist for python2
return self._str_map_partitions(
lambda str_series: str_series.isdecimal(), new_dtypes=new_dtypes
)
# END String map partitions operations
# Map partitions across select indices
def astype(self, col_dtypes, **kwargs):
"""Converts columns dtypes to given dtypes.
Args:
col_dtypes: Dictionary of {col: dtype,...} where col is the column
name and dtype is a numpy dtype.
Returns:
DataFrame with updated dtypes.
"""
# Group indices to update by dtype for less map operations
dtype_indices = {}
columns = col_dtypes.keys()
numeric_indices = list(self.columns.get_indexer_for(columns))
# Create Series for the updated dtypes
new_dtypes = self.dtypes.copy()
for i, column in enumerate(columns):
dtype = col_dtypes[column]
if (
not isinstance(dtype, type(self.dtypes[column]))
or dtype != self.dtypes[column]
):
# Only add dtype only if different
if dtype in dtype_indices.keys():
dtype_indices[dtype].append(numeric_indices[i])
else:
dtype_indices[dtype] = [numeric_indices[i]]
# Update the new dtype series to the proper pandas dtype
try:
new_dtype = np.dtype(dtype)
except TypeError:
new_dtype = dtype
if dtype != np.int32 and new_dtype == np.int32:
new_dtype = np.dtype("int64")
elif dtype != np.float32 and new_dtype == np.float32:
new_dtype = np.dtype("float64")
new_dtypes[column] = new_dtype
# Update partitions for each dtype that is updated
new_data = self.data
for dtype in dtype_indices.keys():
def astype(df, internal_indices=[]):
block_dtypes = {}
for ind in internal_indices:
block_dtypes[df.columns[ind]] = dtype
return df.astype(block_dtypes)
new_data = new_data.apply_func_to_select_indices(
0, astype, dtype_indices[dtype], keep_remaining=True
)
return self.__constructor__(new_data, self.index, self.columns, new_dtypes)
# END Map partitions across select indices
# Column/Row partitions reduce operations
#
# These operations result in a reduced dimensionality of data.
# This will return a new QueryCompiler object which the font end will handle.
def _full_axis_reduce(self, axis, func, alternate_index=None):
"""Applies map that reduce Manager to series but require knowledge of full axis.
Args:
func: Function to reduce the Manager by. This function takes in a Manager.
axis: axis to apply the function to.
alternate_index: If the resulting series should have an index
different from the current query_compiler's index or columns.
Return:
Pandas series containing the reduced data.
"""
result = self.data.map_across_full_axis(axis, func)
if axis == 0:
columns = alternate_index if alternate_index is not None else self.columns
return self.__constructor__(result, index=["__reduced__"], columns=columns)
else:
index = alternate_index if alternate_index is not None else self.index
return self.__constructor__(result, index=index, columns=["__reduced__"])
def first_valid_index(self):
"""Returns index of first non-NaN/NULL value.
Return:
Scalar of index name.
"""
# It may be possible to incrementally check each partition, but this
# computation is fairly cheap.
def first_valid_index_builder(df):
df.index = pandas.RangeIndex(len(df.index))
return df.apply(lambda df: df.first_valid_index())
func = self._build_mapreduce_func(first_valid_index_builder)
# We get the minimum from each column, then take the min of that to get
# first_valid_index. The `to_pandas()` here is just for a single value and
# `squeeze` will convert it to a scalar.
first_result = self._full_axis_reduce(0, func).min(axis=1).to_pandas().squeeze()
return self.index[first_result]
def idxmax(self, **kwargs):
"""Returns the first occurrence of the maximum over requested axis.
Returns:
A new QueryCompiler object containing the maximum of each column or axis.
"""
if self._is_transposed:
kwargs["axis"] = kwargs.get("axis", 0) ^ 1
return self.transpose().idxmax(**kwargs)
axis = kwargs.get("axis", 0)
index = self.index if axis == 0 else self.columns
def idxmax_builder(df, **kwargs):
if axis == 0:
df.index = index
else:
df.columns = index
return df.idxmax(**kwargs)
func = self._build_mapreduce_func(idxmax_builder, **kwargs)
return self._full_axis_reduce(axis, func)
def idxmin(self, **kwargs):
"""Returns the first occurrence of the minimum over requested axis.
Returns:
A new QueryCompiler object containing the minimum of each column or axis.
"""
if self._is_transposed:
kwargs["axis"] = kwargs.get("axis", 0) ^ 1
return self.transpose().idxmin(**kwargs)
axis = kwargs.get("axis", 0)
index = self.index if axis == 0 else self.columns
def idxmin_builder(df, **kwargs):
if axis == 0:
df.index = index
else:
df.columns = index
return df.idxmin(**kwargs)
func = self._build_mapreduce_func(idxmin_builder, **kwargs)
return self._full_axis_reduce(axis, func)
def last_valid_index(self):
"""Returns index of last non-NaN/NULL value.
Return:
Scalar of index name.
"""
def last_valid_index_builder(df):
df.index = pandas.RangeIndex(len(df.index))
return df.apply(lambda df: df.last_valid_index())
func = self._build_mapreduce_func(last_valid_index_builder)
# We get the maximum from each column, then take the max of that to get
# last_valid_index. The `to_pandas()` here is just for a single value and
# `squeeze` will convert it to a scalar.
first_result = self._full_axis_reduce(0, func).max(axis=1).to_pandas().squeeze()
return self.index[first_result]
def median(self, **kwargs):
"""Returns median of each column or row.
Returns:
A new QueryCompiler object containing the median of each column or row.
"""
if self._is_transposed:
kwargs["axis"] = kwargs.get("axis", 0) ^ 1
return self.transpose().median(**kwargs)
# Pandas default is 0 (though not mentioned in docs)
axis = kwargs.get("axis", 0)
func = self._build_mapreduce_func(pandas.DataFrame.median, **kwargs)
return self._full_axis_reduce(axis, func)
def nunique(self, **kwargs):
"""Returns the number of unique items over each column or row.
Returns:
A new QueryCompiler object of ints indexed by column or index names.
"""
if self._is_transposed:
kwargs["axis"] = kwargs.get("axis", 0) ^ 1
return self.transpose().nunique(**kwargs)
axis = kwargs.get("axis", 0)
func = self._build_mapreduce_func(pandas.DataFrame.nunique, **kwargs)
return self._full_axis_reduce(axis, func)
def quantile_for_single_value(self, **kwargs):
"""Returns quantile of each column or row.
Returns:
A new QueryCompiler object containing the quantile of each column or row.
"""
if self._is_transposed:
kwargs["axis"] = kwargs.get("axis", 0) ^ 1
return self.transpose().quantile_for_single_value(**kwargs)
axis = kwargs.get("axis", 0)
q = kwargs.get("q", 0.5)
assert type(q) is float
def quantile_builder(df, **kwargs):
try:
return pandas.DataFrame.quantile(df, **kwargs)
except ValueError:
return pandas.Series()
func = self._build_mapreduce_func(quantile_builder, **kwargs)
result = self._full_axis_reduce(axis, func)
if axis == 0:
result.index = [q]
else:
result.columns = [q]
return result
def skew(self, **kwargs):
"""Returns skew of each column or row.
Returns:
A new QueryCompiler object containing the skew of each column or row.
"""
if self._is_transposed:
kwargs["axis"] = kwargs.get("axis", 0) ^ 1
return self.transpose().skew(**kwargs)
# Pandas default is 0 (though not mentioned in docs)
axis = kwargs.get("axis", 0)
func = self._build_mapreduce_func(pandas.DataFrame.skew, **kwargs)
return self._full_axis_reduce(axis, func)
def std(self, **kwargs):
"""Returns standard deviation of each column or row.
Returns:
A new QueryCompiler object containing the standard deviation of each column
or row.
"""
if self._is_transposed:
kwargs["axis"] = kwargs.get("axis", 0) ^ 1
return self.transpose().std(**kwargs)
# Pandas default is 0 (though not mentioned in docs)
axis = kwargs.get("axis", 0)
func = self._build_mapreduce_func(pandas.DataFrame.std, **kwargs)
return self._full_axis_reduce(axis, func)
def var(self, **kwargs):
"""Returns variance of each column or row.
Returns:
A new QueryCompiler object containing the variance of each column or row.
"""
if self._is_transposed:
kwargs["axis"] = kwargs.get("axis", 0) ^ 1
return self.transpose().var(**kwargs)
# Pandas default is 0 (though not mentioned in docs)
axis = kwargs.get("axis", 0)
func = self._build_mapreduce_func(pandas.DataFrame.var, **kwargs)
return self._full_axis_reduce(axis, func)
# END Column/Row partitions reduce operations
# Column/Row partitions reduce operations over select indices
#
# These operations result in a reduced dimensionality of data.
# This will return a new QueryCompiler object which the front end will handle.
def _full_axis_reduce_along_select_indices(self, func, axis, index):
"""Reduce Manger along select indices using function that needs full axis.
Args:
func: Callable that reduces the dimension of the object and requires full
knowledge of the entire axis.
axis: 0 for columns and 1 for rows. Defaults to 0.
index: Index of the resulting QueryCompiler.
Returns:
A new QueryCompiler object with index or BaseFrameManager object.
"""
# Convert indices to numeric indices
old_index = self.index if axis else self.columns
numeric_indices = [i for i, name in enumerate(old_index) if name in index]
result = self.data.apply_func_to_select_indices_along_full_axis(
axis, func, numeric_indices
)
return result
def describe(self, **kwargs):
"""Generates descriptive statistics.
Returns:
DataFrame object containing the descriptive statistics of the DataFrame.
"""
# Use pandas to calculate the correct columns
new_columns = (
pandas.DataFrame(columns=self.columns)
.astype(self.dtypes)
.describe(**kwargs)
.columns
)
def describe_builder(df, internal_indices=[], **kwargs):
return df.iloc[:, internal_indices].describe(**kwargs)
# Apply describe and update indices, columns, and dtypes
func = self._prepare_method(describe_builder, **kwargs)
new_data = self._full_axis_reduce_along_select_indices(func, 0, new_columns)
new_index = self.compute_index(0, new_data, False)
return self.__constructor__(new_data, new_index, new_columns)
# END Column/Row partitions reduce operations over select indices
# Map across rows/columns
# These operations require some global knowledge of the full column/row
# that is being operated on. This means that we have to put all of that
# data in the same place.
def _map_across_full_axis(self, axis, func):
return self.data.map_across_full_axis(axis, func)
def _cumulative_builder(self, func, **kwargs):
axis = kwargs.get("axis", 0)
func = self._prepare_method(func, **kwargs)
new_data = self._map_across_full_axis(axis, func)
return self.__constructor__(
new_data, self.index, self.columns, self._dtype_cache
)
def cummax(self, **kwargs):
if self._is_transposed:
kwargs["axis"] = kwargs.get("axis", 0) ^ 1
return self.transpose().cummax(**kwargs).transpose()
return self._cumulative_builder(pandas.DataFrame.cummax, **kwargs)
def cummin(self, **kwargs):
if self._is_transposed:
kwargs["axis"] = kwargs.get("axis", 0) ^ 1
return self.transpose().cummin(**kwargs).transpose()
return self._cumulative_builder(pandas.DataFrame.cummin, **kwargs)
def cumsum(self, **kwargs):
if self._is_transposed:
kwargs["axis"] = kwargs.get("axis", 0) ^ 1
return self.transpose().cumsum(**kwargs).transpose()
return self._cumulative_builder(pandas.DataFrame.cumsum, **kwargs)
def cumprod(self, **kwargs):
if self._is_transposed:
kwargs["axis"] = kwargs.get("axis", 0) ^ 1
return self.transpose().cumprod(**kwargs).transpose()
return self._cumulative_builder(pandas.DataFrame.cumprod, **kwargs)
def diff(self, **kwargs):
if self._is_transposed:
kwargs["axis"] = kwargs.get("axis", 0) ^ 1
return self.transpose().diff(**kwargs).transpose()
axis = kwargs.get("axis", 0)
func = self._prepare_method(pandas.DataFrame.diff, **kwargs)
new_data = self._map_across_full_axis(axis, func)
return self.__constructor__(new_data, self.index, self.columns)
def eval(self, expr, **kwargs):
"""Returns a new QueryCompiler with expr evaluated on columns.
Args:
expr: The string expression to evaluate.
Returns:
A new QueryCompiler with new columns after applying expr.
"""
columns = self.index if self._is_transposed else self.columns
index = self.columns if self._is_transposed else self.index
# Make a copy of columns and eval on the copy to determine if result type is
# series or not
columns_copy = pandas.DataFrame(columns=self.columns)
columns_copy = columns_copy.eval(expr, inplace=False, **kwargs)
expect_series = isinstance(columns_copy, pandas.Series)
def eval_builder(df, **kwargs):
# pop the `axis` parameter because it was needed to build the mapreduce
# function but it is not a parameter used by `eval`.
kwargs.pop("axis", None)
df.columns = columns
result = df.eval(expr, inplace=False, **kwargs)
return result
func = self._build_mapreduce_func(eval_builder, axis=1, **kwargs)
new_data = self._map_across_full_axis(1, func)
if expect_series:
new_columns = [columns_copy.name]
new_index = index
else:
new_columns = columns_copy.columns
new_index = self.index
return self.__constructor__(new_data, new_index, new_columns)
def mode(self, **kwargs):
"""Returns a new QueryCompiler with modes calculated for each label along given axis.
Returns:
A new QueryCompiler with modes calculated.
"""
axis = kwargs.get("axis", 0)
def mode_builder(df, **kwargs):
result = df.mode(**kwargs)
# We return a dataframe with the same shape as the input to ensure
# that all the partitions will be the same shape
if not axis and len(df) != len(result):
# Pad columns
append_values = pandas.DataFrame(
columns=result.columns, index=range(len(result), len(df))
)
result = pandas.concat([result, append_values], ignore_index=True)
elif axis and len(df.columns) != len(result.columns):
# Pad rows
append_vals = pandas.DataFrame(
columns=range(len(result.columns), len(df.columns)),
index=result.index,
)
result = pandas.concat([result, append_vals], axis=1)
return pandas.DataFrame(result)
func = self._prepare_method(mode_builder, **kwargs)
new_data = self._map_across_full_axis(axis, func)
new_index = pandas.RangeIndex(len(self.index)) if not axis else self.index
new_columns = self.columns if not axis else pandas.RangeIndex(len(self.columns))
new_dtypes = self._dtype_cache
if new_dtypes is not None:
new_dtypes.index = new_columns
return self.__constructor__(
new_data, new_index, new_columns, new_dtypes
).dropna(axis=axis, how="all")
def fillna(self, **kwargs):
"""Replaces NaN values with the method provided.
Returns:
A new QueryCompiler with null values filled.
"""
axis = kwargs.get("axis", 0)
value = kwargs.get("value")
method = kwargs.get("method", None)
limit = kwargs.get("limit", None)
full_axis = method is not None or limit is not None
if isinstance(value, dict):
value = kwargs.pop("value")
if axis == 0:
index = self.columns
else:
index = self.index
value = {
idx: value[key] for key in value for idx in index.get_indexer_for([key])
}
def fillna_dict_builder(df, func_dict={}):
# We do this to ensure that no matter the state of the columns we get
# the correct ones.
func_dict = {df.columns[idx]: func_dict[idx] for idx in func_dict}
return df.fillna(value=func_dict, **kwargs)
if full_axis:
new_data = self.data.apply_func_to_select_indices_along_full_axis(
axis, fillna_dict_builder, value, keep_remaining=True
)
else:
new_data = self.data.apply_func_to_select_indices(
axis, fillna_dict_builder, value, keep_remaining=True
)
return self.__constructor__(new_data, self.index, self.columns)
else:
func = self._prepare_method(pandas.DataFrame.fillna, **kwargs)
if full_axis:
new_data = self._map_across_full_axis(axis, func)
return self.__constructor__(new_data, self.index, self.columns)
else:
return self._map_partitions(func)
def quantile_for_list_of_values(self, **kwargs):
"""Returns Manager containing quantiles along an axis for numeric columns.
Returns:
QueryCompiler containing quantiles of original QueryCompiler along an axis.
"""
if self._is_transposed:
kwargs["axis"] = kwargs.get("axis", 0) ^ 1
return self.transpose().quantile_for_list_of_values(**kwargs)
axis = kwargs.get("axis", 0)
q = kwargs.get("q")
numeric_only = kwargs.get("numeric_only", True)
assert isinstance(q, (pandas.Series, np.ndarray, pandas.Index, list))
if numeric_only:
new_columns = self.numeric_columns()
else:
new_columns = [
col
for col, dtype in zip(self.columns, self.dtypes)
if (is_numeric_dtype(dtype) or | is_datetime_or_timedelta_dtype(dtype) | pandas.core.dtypes.common.is_datetime_or_timedelta_dtype |
import re
import pandas as pd
import numpy as np
import nltk
import random
from nltk.corpus import stopwords
from bs4 import BeautifulSoup
from sklearn.ensemble import RandomForestClassifier, VotingClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import MultinomialNB
from sklearn.metrics import accuracy_score, confusion_matrix
from sklearn.decomposition import PCA
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.manifold import TSNE
import matplotlib.pyplot as plt
# Loading dataset
def get_reviews(path):
return pd.read_csv(path, header=0)
# Preprocessing
stops = set(stopwords.words("english"))
tokenizer = nltk.data.load('tokenizers/punkt/english.pickle')
def remove_html(s):
return BeautifulSoup(s, 'lxml').get_text()
def remove_punct(s):
return re.sub(r'[^a-zA-Z]', ' ', s)
def get_words(s):
return s.lower().split()
def remove_stop_words(words):
return [w for w in words if not w in stops]
def review_to_words(review, keep_stop_words=True):
words = get_words(remove_punct(remove_html(review)))
if not keep_stop_words:
words = remove_stop_words(words)
return words
def get_clean_reviews(reviews, keep_stop_words=True, join_words=False):
print('Cleaning and parsing the set of movie reviews...')
clean_train_reviews = []
for i, review in enumerate(reviews):
if (i+1) % 1000 == 0:
print('Review {}'.format(i+1))
words = review_to_words(review, keep_stop_words)
clean_train_reviews.append(' '.join(words) if join_words else words)
return clean_train_reviews
# For word2vec
def review_to_sentences(review, keep_stop_words=True):
raw_sentences = tokenizer.tokenize(review.strip())
sentences = []
for raw_sentence in raw_sentences:
if raw_sentence:
sentences.append(review_to_words(raw_sentence, keep_stop_words))
return sentences
def get_sentences(reviews):
print('Cleaning and get sentences from the set of movie reviews...')
sentences = []
for i, review in enumerate(reviews):
if (i+1) % 1000 == 0:
print('Review {}'.format(i+1))
sentences += review_to_sentences(review)
return sentences
# Learning
def train_classifier(algorithm, features, train):
print('Train classifier ({})...'.format(algorithm))
estimators = []
if 'rf' in algorithm:
estimators.append(('rf', RandomForestClassifier(n_estimators=100)))
if 'lr' in algorithm:
estimators.append(('lr', LogisticRegression()))
if 'mb' in algorithm:
estimators.append(('mb', MultinomialNB()))
# Training
classifier = VotingClassifier(estimators=estimators, voting='soft')
classifier.fit(features, train['sentiment'])
return classifier
# Outputs
def predict(features, test, classifier, path):
print('Predict...')
results = classifier.predict(features)
output = | pd.DataFrame(data={'id': test['id'], 'sentiment': results}) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on July 2017
@author: JulienWuthrich
"""
import pandas as pd
def isDateTime(row):
try:
row.hour
return True
except Exception:
return False
def colDateType(df):
date = []
date_time = []
for col in df.columns:
row = df[col].iloc[0]
try:
row.day
if isDateTime(row):
date_time.append(col)
else:
date.append(col)
except Exception:
pass
return date, date_time
def frameFromDatetimeCol(serie, col):
moment = serie.apply(lambda x: 0 if x.hour < 10 else 1 if 10 <= x.hour < 18 else 2)
day = serie.apply(lambda x: x.day)
month = serie.apply(lambda x: x.month)
year = serie.apply(lambda x: x.year)
weekday = serie.apply(lambda x: 0 if x.weekday() > 4 else 1)
data = dict()
for arg in ["moment", "day", "month", "year", "weekday"]:
data[col + "_" + arg] = vars()[arg]
return pd.DataFrame(data)
def frameFromDateCol(serie, col):
day = serie.apply(lambda x: x.day)
month = serie.apply(lambda x: x.month)
year = serie.apply(lambda x: x.year)
weekday = serie.apply(lambda x: 0 if x.weekday() > 4 else 1)
data = dict()
for arg in ["day", "month", "year", "weekday"]:
data[col + "_" + arg] = vars()[arg]
return pd.DataFrame(data)
def buildColsFromDateCols(df):
date, date_time = colDateType(df)
for col in date_time:
new_df = frameFromDatetimeCol(df[col], col)
df = pd.concat([df, new_df], axis=1)
for col in date:
new_df = frameFromDateCol(df[col], col)
df = | pd.concat([df, new_df], axis=1) | pandas.concat |
import copy
from datetime import datetime
import warnings
import numpy as np
from numpy.random import randn
import pytest
import pandas.util._test_decorators as td
import pandas as pd
from pandas import DataFrame, DatetimeIndex, Index, Series, isna, notna
import pandas._testing as tm
from pandas.core.window.common import _flex_binary_moment
from pandas.tests.window.common import (
Base,
check_pairwise_moment,
moments_consistency_cov_data,
moments_consistency_is_constant,
moments_consistency_mock_mean,
moments_consistency_series_data,
moments_consistency_std_data,
moments_consistency_var_data,
moments_consistency_var_debiasing_factors,
)
import pandas.tseries.offsets as offsets
@pytest.mark.filterwarnings("ignore:can't resolve package:ImportWarning")
class TestMoments(Base):
def setup_method(self, method):
self._create_data()
def test_centered_axis_validation(self):
# ok
Series(np.ones(10)).rolling(window=3, center=True, axis=0).mean()
# bad axis
with pytest.raises(ValueError):
Series(np.ones(10)).rolling(window=3, center=True, axis=1).mean()
# ok ok
DataFrame(np.ones((10, 10))).rolling(window=3, center=True, axis=0).mean()
DataFrame(np.ones((10, 10))).rolling(window=3, center=True, axis=1).mean()
# bad axis
with pytest.raises(ValueError):
(DataFrame(np.ones((10, 10))).rolling(window=3, center=True, axis=2).mean())
def test_rolling_sum(self, raw):
self._check_moment_func(
np.nansum, name="sum", zero_min_periods_equal=False, raw=raw
)
def test_rolling_count(self, raw):
counter = lambda x: np.isfinite(x).astype(float).sum()
self._check_moment_func(
counter, name="count", has_min_periods=False, fill_value=0, raw=raw
)
def test_rolling_mean(self, raw):
self._check_moment_func(np.mean, name="mean", raw=raw)
@td.skip_if_no_scipy
def test_cmov_mean(self):
# GH 8238
vals = np.array(
[6.95, 15.21, 4.72, 9.12, 13.81, 13.49, 16.68, 9.48, 10.63, 14.48]
)
result = Series(vals).rolling(5, center=True).mean()
expected = Series(
[
np.nan,
np.nan,
9.962,
11.27,
11.564,
12.516,
12.818,
12.952,
np.nan,
np.nan,
]
)
tm.assert_series_equal(expected, result)
@td.skip_if_no_scipy
def test_cmov_window(self):
# GH 8238
vals = np.array(
[6.95, 15.21, 4.72, 9.12, 13.81, 13.49, 16.68, 9.48, 10.63, 14.48]
)
result = Series(vals).rolling(5, win_type="boxcar", center=True).mean()
expected = Series(
[
np.nan,
np.nan,
9.962,
11.27,
11.564,
12.516,
12.818,
12.952,
np.nan,
np.nan,
]
)
tm.assert_series_equal(expected, result)
@td.skip_if_no_scipy
def test_cmov_window_corner(self):
# GH 8238
# all nan
vals = pd.Series([np.nan] * 10)
result = vals.rolling(5, center=True, win_type="boxcar").mean()
assert np.isnan(result).all()
# empty
vals = pd.Series([], dtype=object)
result = vals.rolling(5, center=True, win_type="boxcar").mean()
assert len(result) == 0
# shorter than window
vals = pd.Series(np.random.randn(5))
result = vals.rolling(10, win_type="boxcar").mean()
assert np.isnan(result).all()
assert len(result) == 5
@td.skip_if_no_scipy
@pytest.mark.parametrize(
"f,xp",
[
(
"mean",
[
[np.nan, np.nan],
[np.nan, np.nan],
[9.252, 9.392],
[8.644, 9.906],
[8.87, 10.208],
[6.81, 8.588],
[7.792, 8.644],
[9.05, 7.824],
[np.nan, np.nan],
[np.nan, np.nan],
],
),
(
"std",
[
[np.nan, np.nan],
[np.nan, np.nan],
[3.789706, 4.068313],
[3.429232, 3.237411],
[3.589269, 3.220810],
[3.405195, 2.380655],
[3.281839, 2.369869],
[3.676846, 1.801799],
[np.nan, np.nan],
[np.nan, np.nan],
],
),
(
"var",
[
[np.nan, np.nan],
[np.nan, np.nan],
[14.36187, 16.55117],
[11.75963, 10.48083],
[12.88285, 10.37362],
[11.59535, 5.66752],
[10.77047, 5.61628],
[13.51920, 3.24648],
[np.nan, np.nan],
[np.nan, np.nan],
],
),
(
"sum",
[
[np.nan, np.nan],
[np.nan, np.nan],
[46.26, 46.96],
[43.22, 49.53],
[44.35, 51.04],
[34.05, 42.94],
[38.96, 43.22],
[45.25, 39.12],
[np.nan, np.nan],
[np.nan, np.nan],
],
),
],
)
def test_cmov_window_frame(self, f, xp):
# Gh 8238
df = DataFrame(
np.array(
[
[12.18, 3.64],
[10.18, 9.16],
[13.24, 14.61],
[4.51, 8.11],
[6.15, 11.44],
[9.14, 6.21],
[11.31, 10.67],
[2.94, 6.51],
[9.42, 8.39],
[12.44, 7.34],
]
)
)
xp = DataFrame(np.array(xp))
roll = df.rolling(5, win_type="boxcar", center=True)
rs = getattr(roll, f)()
| tm.assert_frame_equal(xp, rs) | pandas._testing.assert_frame_equal |
import matplotlib.pyplot as plt
import os
import pandas
import numpy as np
from spinedb import SpineDB
import pandas as pd
def get_participating_technologies_in_capacity_market(db_emlab_powerplantdispatchplans, years_to_generate, years_emlab,
db_emlab_powerplants):
"""
This function returns all participating technologies that get revenue from the Capacity Market.
It returns a set so all values are distinct.
:param db_emlab_powerplantdispatchplans: PPDPs as queried from SpineDB EMLab
:return: Set of technology names
"""
capacity_market_aggregated_per_tech = pd.DataFrame()
for year in years_emlab:
capacity_market_ppdps = [row['object_name'] for row in db_emlab_powerplantdispatchplans if
row['parameter_name'] == 'Market' and
row['parameter_value'] == 'DutchCapacityMarket' and
row['alternative'] == str(year)]
capacity_market_accepted_ppdps = [row['object_name'] for row in db_emlab_powerplantdispatchplans if
row['object_name'] in capacity_market_ppdps and
row['parameter_name'] == 'AcceptedAmount' and
row['parameter_value'] > 0]
list_of_tech_fuel_cominations = []
capacity_market_participating_capacities = []
for ppdp in capacity_market_accepted_ppdps:
plant_name = next(row['parameter_value'] for row in db_emlab_powerplantdispatchplans if
row['object_name'] == ppdp and
row['parameter_name'] == 'Plant')
plant_accepted_amount = next(row['parameter_value'] for row in db_emlab_powerplantdispatchplans if
row['object_name'] == ppdp and
row['parameter_name'] == 'AcceptedAmount')
plant_technology = next(row['parameter_value'] for row in db_emlab_powerplants if
row['object_name'] == plant_name and
row['parameter_name'] == 'TECHTYPENL')
plant_fuel = next(row['parameter_value'] for row in db_emlab_powerplants if
row['object_name'] == plant_name and
row['parameter_name'] == 'FUELNL')
list_of_tech_fuel_cominations.append(plant_technology + ', ' + plant_fuel)
capacity_market_participating_capacities.append(plant_accepted_amount)
df_year = pd.DataFrame({'technology_fuel': list_of_tech_fuel_cominations,
'capacity': capacity_market_participating_capacities})
capacity_market_aggregated_per_tech[year] = df_year.groupby('technology_fuel').sum()
years_dictionary = dict(zip(years_emlab, years_to_generate))
capacity_market_aggregated_per_tech.rename(columns=years_dictionary,
inplace=True)
return capacity_market_aggregated_per_tech.fillna(0)
def plot_mcps_with_filter(db_mcps, market, years_to_generate, path_to_plots, title, file_name, yl, ylim):
# MCP Plots
filtered_mcps = [i['object_name'] for i in db_mcps if
i['parameter_name'] == 'Market' and i['parameter_value'] == market]
mcp_x = []
mcp_y = []
print('Creating ' + str(market) + ' MCP plot')
for row in [i for i in db_mcps if i['object_name'] in filtered_mcps]:
if row['parameter_name'] == 'Price':
mcp_x.append(int(row['alternative']) + years_to_generate[0])
mcp_y.append(row['parameter_value'])
fig7 = plt.figure()
axs7 = plt.axes()
plt.grid(b=True)
axs7.plot(mcp_x, mcp_y, 'o')
axs7.set_axisbelow(True)
plt.xlabel('Years')
plt.ylabel(yl)
plt.ylim(ylim)
axs7.set_title(title)
fig7.savefig(path_to_plots + '/' + file_name, bbox_inches='tight', dpi=300)
def plot_annual_balances(annual_balance, years_to_generate, path_to_plots):
# Annual Balance
print('Create Annual Balance plot')
plt.figure()
annual_balance_df = pd.DataFrame(annual_balance, index=years_to_generate)
axs125 = annual_balance_df.plot.bar(stacked=True, rot=0, colormap='tab20', grid=True, legend=False)
plt.xlabel('Years', fontsize='medium')
plt.ylabel('Supply or Demand (MWh)', fontsize='medium')
axs125.set_title('NL Annual Balance per Technology')
axs125.set_axisbelow(True)
plt.ylim([-0.6e8, 1.75e8])
fig125 = axs125.get_figure()
fig125.savefig(path_to_plots + '/' + 'NL Annual Balance.png', bbox_inches='tight', dpi=300)
def plot_vre_nl_installed_capacity(vre_investment_sums, years_to_generate, path_to_plots):
# VRE Investments plot
print('Create VRE Investments plot')
plt.figure()
vre_investments_df = pd.DataFrame(vre_investment_sums, index=years_to_generate)
axs5 = vre_investments_df.plot.bar(stacked=True, rot=0, colormap='tab20', legend=False)
axs5.set_axisbelow(True)
plt.xlabel('Years', fontsize='medium')
plt.ylabel('Capacity (MW)', fontsize='medium')
axs5.set_title('NL VRE Installed Capacity')
fig5 = axs5.get_figure()
fig5.savefig(path_to_plots + '/' + 'NL Installed Capacity.png', bbox_inches='tight', dpi=300)
def plot_investments(investment_sums, years_to_generate, path_to_plots, look_ahead):
# Investments plot
print('Create Investments plot')
plt.figure()
investments_df = pd.DataFrame(investment_sums,
index=list(range(years_to_generate[0], years_to_generate[-1] + look_ahead + 1)))
axs6 = investments_df.plot.bar(stacked=True, rot=0, colormap='tab20', grid=True, legend=False)
axs6.set_axisbelow(True)
plt.xlabel('Years', fontsize='medium')
plt.ylabel('Capacity (MW)', fontsize='medium')
plt.ylim([-4.3e5, 5.5e5])
# leg = plt.legend(fontsize='medium', loc='upper left', bbox_to_anchor=(1, 1.1))
axs6.set_title('EU Capacity Investments per Technology')
fig6 = axs6.get_figure()
fig6.savefig(path_to_plots + '/' + 'EU Investments.png', bbox_inches='tight', dpi=300)
def plot_nl_investments(investment_sums, years_to_generate, path_to_plots, look_ahead):
# NL Investments plot
print('Create NL Investments plot')
plt.figure()
investments_df = pd.DataFrame(investment_sums,
index=list(range(years_to_generate[0], years_to_generate[-1] + look_ahead + 1)))
axs6 = investments_df.plot.bar(stacked=True, rot=0, colormap='tab20', grid=True, legend=False)
plt.xlabel('Years', fontsize='medium')
plt.ylabel('Capacity (MW)', fontsize='medium')
# plt.legend(fontsize='medium', loc='upper left', bbox_to_anchor=(1, 1.1))
axs6.set_title('NL Capacity Investments per Technology')
axs6.set_axisbelow(True)
plt.ylim([-20e3, 33e3])
fig6 = axs6.get_figure()
fig6.savefig(path_to_plots + '/' + 'NL Investments.png', bbox_inches='tight', dpi=300)
def plot_co2_emissions(co2_emission_sums, years_to_generate, path_to_plots):
# CO2 emissions plot
print('Create annual CO2 Emission per tech plot')
plt.figure()
co2_df = pd.DataFrame(co2_emission_sums, index=years_to_generate)
axs4 = co2_df.plot.bar(stacked=True, rot=0, colormap='tab20', grid=True, legend=False)
plt.xlabel('Years', fontsize='medium')
plt.ylabel('Emissions (ton CO2)', fontsize='medium')
axs4.set_title('NL CO2 Emissions per Technology')
axs4.set_axisbelow(True)
plt.ylim([0, 3.5e7])
# plt.legend(fontsize='medium', loc='upper left', bbox_to_anchor=(1, 1.1))
fig4 = axs4.get_figure()
fig4.savefig(path_to_plots + '/' + 'NL CO2 Emissions.png', bbox_inches='tight', dpi=300)
def plot_nl_unit_generation(path_and_filename_dispatch, year, path_to_plots):
print('Plot NL Unit Generation')
# Plot 3 NL Unit Generation curve
nl_unit_generation_df = pandas.read_excel(path_and_filename_dispatch, 'NL Unit Generation', skiprows=1, index_col=0,
header=0).transpose()
plt.figure()
axs3 = nl_unit_generation_df.plot()
axs3.set_axisbelow(True)
plt.xlabel('Hours', fontsize='medium')
plt.ylabel('Generation (MWh)', fontsize='medium')
plt.legend(fontsize='medium', loc='upper left', bbox_to_anchor=(1, 1.1))
axs3.set_title('NL Unit Generation ' + str(year))
fig3 = axs3.get_figure()
fig3.savefig(path_to_plots + '/' + 'NL Unit Generation ' + str(year) + '.png', bbox_inches='tight', dpi=300)
def plot_and_prepare_hourly_nodal_price_duration_curve(hourly_nodal_prices_df, year, path_to_plots,
price_duration_curves):
# Plot 2.5 Hourly Market Price Duration Curve
print('Create Hourly Nodal Price duration curve')
plt.figure()
axs25 = hourly_nodal_prices_df['NED'].sort_values(ascending=False).plot(use_index=False, grid=True, legend=False)
plt.xlabel('Hours')
plt.ylabel('Price (Euro / MWh)')
axs25.set_title('NL Hourly Electricity Spot Market Price Duration Curve ' + str(year))
axs25.set_axisbelow(True)
plt.ylim([0, min(hourly_nodal_prices_df['NED'].max() + 50, 250)])
fig25 = axs25.get_figure()
fig25.savefig(path_to_plots + '/' + 'NL Nodal Prices Duration Curve ' + str(year) + '.png', bbox_inches='tight', dpi=300)
price_duration_curves[year] = hourly_nodal_prices_df['NED'].sort_values(ascending=False).values
return price_duration_curves
def plot_hourly_nodal_prices(path_and_filename_dispatch, year, path_to_plots):
# Plot 2 Hourly Nodal Prices
print('Read and create hourly nodal prices plot')
hourly_nodal_prices_df = pandas.read_excel(path_and_filename_dispatch, 'Hourly Nodal Prices', skiprows=1,
index_col=0)
# hourly_nodal_prices_df[hourly_nodal_prices_df > 250] = 250
plt.figure()
axs2 = hourly_nodal_prices_df['NED'].plot(grid=True)
axs2.set_axisbelow(True)
plt.xlabel('Hours')
plt.ylabel('Price (Euro / MWh)')
plt.xlim([0, 8760])
plt.ylim([0, min(hourly_nodal_prices_df['NED'].max() + 50, 250)])
axs2.set_title('NL Hourly Electricity Spot Market Prices ' + str(year))
fig2 = axs2.get_figure()
fig2.savefig(path_to_plots + '/' + 'NL Nodal Prices ' + str(year) + '.png', bbox_inches='tight', dpi=300)
return hourly_nodal_prices_df
def plot_and_prepare_residual_load_duration_curve(hourly_nl_balance_demand, hourly_nl_balance_df, year, path_to_plots,
residual_load_curves):
# Plot 1.75: Residual Load Curve
print('Create Res Load duration curve')
plt.figure()
hourly_nl_balance_residual_load = hourly_nl_balance_demand.subtract(hourly_nl_balance_df['Wind Onshore']) \
.subtract(hourly_nl_balance_df['Wind Offshore']) \
.subtract(hourly_nl_balance_df['Sun']) \
.subtract(hourly_nl_balance_df['Hydro Conv.'])
axs175 = hourly_nl_balance_residual_load.sort_values(ascending=False).plot(use_index=False, grid=True, legend=False)
axs175.set_title('NL Residual Load Duration Curve ' + str(year))
axs175.set_axisbelow(True)
plt.xlabel('Hours')
plt.ylabel('Residual Load (MWh)')
plt.xlim([0, 8760])
fig175 = axs175.get_figure()
fig175.savefig(path_to_plots + '/' + 'NL Residual Load Duration Curve ' + str(year) + '.png', bbox_inches='tight', dpi=300)
residual_load_curves[year] = hourly_nl_balance_residual_load.sort_values(ascending=False).values
return residual_load_curves
def plot_and_prepare_load_duration_curve(hourly_nl_balance_demand, year, path_to_plots, load_duration_curves):
# Plot 1.5: Load duration curve
print('Create Load duration curve plot')
plt.figure()
axs15 = hourly_nl_balance_demand.sort_values(ascending=False).plot(use_index=False, grid=True, legend=False)
axs15.set_title('NL Load Duration Curve ' + str(year))
axs15.set_axisbelow(True)
plt.xlabel('Hours')
plt.ylabel('Load (MWh)')
plt.xlim([0, 8760])
fig15 = axs15.get_figure()
fig15.savefig(path_to_plots + '/' + 'NL Load Duration Curve ' + str(year) + '.png', bbox_inches='tight', dpi=300)
load_duration_curves[year] = hourly_nl_balance_demand.sort_values(ascending=False).values
return load_duration_curves
def prepare_annual_nl_balance(hourly_nl_balance_df, annual_balance, years_to_generate, year):
print('Prepare Annual NL Balance plot data')
hourly_nl_annual = hourly_nl_balance_df.sum()
for index, col in hourly_nl_annual.iteritems():
if index in annual_balance.keys():
annual_balance[index].append(col)
else:
annual_balance[index] = [0] * years_to_generate.index(year) + [col]
return annual_balance
def plot_hourly_nl_balance(path_and_filename_dispatch, path_to_plots, year):
# Plot 1 Hourly NL Balance (per year)
print('Read and Create Hourly NL Balance plot')
hourly_nl_balance_df = pandas.read_excel(path_and_filename_dispatch, 'Hourly NL Balance', skiprows=1, index_col=0,
skipfooter=2, usecols='A:W').replace(np.nan, 0)
hourly_nl_balance_demand = hourly_nl_balance_df['Demand']
hourly_nl_balance_df = hourly_nl_balance_df.drop(['Demand'], axis=1)
hourly_nl_balance_df['Exports'] = -1 * hourly_nl_balance_df['Exports']
hourly_nl_balance_df['H2'] = -1 * hourly_nl_balance_df['H2']
hourly_nl_balance_df['Heat'] = -1 * hourly_nl_balance_df['Heat']
hourly_nl_balance_df['HP'] = -1 * hourly_nl_balance_df['HP']
hourly_nl_balance_df['EVs'] = -1 * hourly_nl_balance_df['EVs']
hourly_nl_balance_df['Storage cons.'] = -1 * hourly_nl_balance_df['Storage cons.']
hourly_nl_balance_df_resampled = hourly_nl_balance_df.copy()
hourly_nl_balance_df_resampled['T'] = hourly_nl_balance_df_resampled.sum(axis=1)
hourly_nl_balance_df_resampled.index = pandas.to_timedelta(hourly_nl_balance_df_resampled.index, unit='H')
hourly_nl_balance_df_resampled = hourly_nl_balance_df_resampled.resample('50H').mean()
hourly_nl_balance_df_resampled = hourly_nl_balance_df_resampled.drop(['T'], axis=1)
hourly_nl_balance_df_resampled = hourly_nl_balance_df_resampled.interpolate(method='cubic')
hourly_nl_balance_df_resampled.index = [i * 50 for i in range(0, len(hourly_nl_balance_df_resampled))]
axs = hourly_nl_balance_df_resampled.plot.area(colormap='tab20', linewidth=0, legend=False)
axs.set_title('Hourly NL Balance - All Technologies ' + str(year))
axs.set_axisbelow(True)
plt.xlabel('Hours', fontsize='medium')
plt.ylabel('Supply or Demand (MWh)', fontsize='medium')
plt.xlim([0, 8760])
# plt.legend(fontsize='medium', loc='best', bbox_to_anchor=(1, 1.1))
fig = axs.get_figure()
fig.savefig(path_to_plots + '/' + 'NL Hourly Balance ' + str(year) + '.png', bbox_inches='tight', dpi=300)
return hourly_nl_balance_df, hourly_nl_balance_demand
def prepare_co2_emission_data(path_and_filename_dispatch, co2_emission_sums, years_to_generate, year):
# Preparing values for CO2 Emissions plot, plot after years iterations
print('Prepare CO2 Emission plot data')
co2_emissions = pandas.read_excel(path_and_filename_dispatch, 'CO2 Emissions tech', skiprows=1, index_col=0)
co2_emissions.columns = [i[0] + ',' + i[1] for i in zip(co2_emissions.columns.values, co2_emissions.iloc[0].values)]
for index, value in co2_emissions.loc['NED'].iteritems():
if index in co2_emission_sums.keys():
co2_emission_sums[index].append(value)
else:
co2_emission_sums[index] = [0] * years_to_generate.index(year) + [value]
# Add 0 to values if not in COMPETES results
for key in co2_emission_sums.keys():
if key not in co2_emissions.columns.values:
co2_emission_sums[key].append(0)
return co2_emission_sums
def prepare_vre_investment_data(path_and_filename_investments, vre_investment_sums, years_to_generate, year):
# Preparing values for VRE Investments plot, plot after years iterations
print('Preparing VRE Investment data')
vre_investments = pandas.read_excel(path_and_filename_investments, 'VRE investment', skiprows=2)
for index, row in vre_investments[vre_investments['Bus'] == 'NED'].iterrows():
if row['WindOn'] in vre_investment_sums.keys():
vre_investment_sums[row['WindOn']].append(row['Initial'])
else:
vre_investment_sums[row['WindOn']] = [0] * years_to_generate.index(year) + [row['Initial']]
# Add 0 to values if not in COMPETES results
for key in vre_investment_sums.keys():
if key not in vre_investments[vre_investments['Bus'] == 'NED']['WindOn'].values:
vre_investment_sums[key].append(0)
return vre_investment_sums
def prepare_investment_and_decom_data(path_and_filename_investments, investment_sums, years_to_generate, year,
emlab_spine_powerplants_tech_dict, emlab_spine_powerplants_fuel_dict,
emlab_spine_technologies, look_ahead, nl_investment_sums):
print('Loading investment and decom data')
decommissioning = pandas.read_excel(path_and_filename_investments, 'Decommissioning', skiprows=2, usecols='A:C')
decommissioning = decommissioning.dropna()
nl_decommissioning = decommissioning[decommissioning['node'] == 'NED'].copy()
investments = pandas.read_excel(path_and_filename_investments, 'New Generation Capacity', skiprows=2, usecols="A:D")
investments = investments.dropna()
nl_investments = investments[investments['Node'] == 'NED'].copy()
investment_sums, investments = prepare_investment_data(investments, investment_sums, years_to_generate, year,
emlab_spine_technologies, look_ahead)
nl_investment_sums, nl_investments = prepare_investment_data(nl_investments, nl_investment_sums, years_to_generate,
year, emlab_spine_technologies, look_ahead)
investment_sums = prepare_decom_data(decommissioning, emlab_spine_powerplants_tech_dict, investment_sums,
years_to_generate, year, investments, emlab_spine_powerplants_fuel_dict,
look_ahead)
nl_investment_sums = prepare_decom_data(nl_decommissioning, emlab_spine_powerplants_tech_dict, nl_investment_sums,
years_to_generate, year, nl_investments, emlab_spine_powerplants_fuel_dict,
look_ahead)
return investment_sums, nl_investment_sums
def prepare_decom_data(decommissioning, emlab_spine_powerplants_tech_dict, investment_sums, years_to_generate, year,
investments, emlab_spine_powerplants_fuel_dict, look_ahead):
print('Preparing Decom plot data')
decommissioning['Technology'] = [
emlab_spine_powerplants_fuel_dict[i] + ', ' + emlab_spine_powerplants_tech_dict[i] + ' (D)' for i in
decommissioning['unit'].values]
decommissioning_grouped_and_summed = decommissioning.groupby('Technology')['MW'].sum()
index_years = list(range(years_to_generate[0], years_to_generate[-1] + look_ahead + 1))
for tech, mw_sum in decommissioning_grouped_and_summed.iteritems():
if tech not in investment_sums.keys():
investment_sums[tech] = [0] * len(index_years)
investment_sums[tech][index_years.index(year + look_ahead)] = -1 * mw_sum
return investment_sums
def get_year_online_by_technology(db_emlab_technologies, fuel, techtype, current_competes_tick):
technologies_by_fuel = [i['object_name'] for i in db_emlab_technologies if
i['parameter_name'] == 'FUELNEW' and i['parameter_value'] == fuel]
technologies_by_techtype = [i['object_name'] for i in db_emlab_technologies if
i['parameter_name'] == 'FUELTYPENEW' and i['parameter_value'] == techtype]
technology = next(name for name in technologies_by_fuel if name in technologies_by_techtype)
expected_permit_time = next(int(i['parameter_value']) for i in db_emlab_technologies if
i['object_name'] == technology and i['parameter_name'] == 'expectedPermittime')
expected_lead_time = next(int(i['parameter_value']) for i in db_emlab_technologies if
i['object_name'] == technology and i['parameter_name'] == 'expectedLeadtime')
build_time = expected_permit_time + expected_lead_time
return current_competes_tick + build_time
def prepare_investment_data(investments, investment_sums, years_to_generate, year, emlab_spine_technologies,
look_ahead):
# Preparing values for Investments plot, plot after years iterations
print('Preparing Investment plot data')
investments['CombinedIndex'] = [i[0] + ', ' + i[1] for i in
zip(investments['FUEL'].values, investments['FuelType'].values)]
index_years = list(range(years_to_generate[0], years_to_generate[-1] + look_ahead + 1))
for index, row in investments.iterrows():
# Extracting buildtime
online_in_year = get_year_online_by_technology(emlab_spine_technologies, row['FUEL'], row['FuelType'], year)
if row['CombinedIndex'] not in investment_sums.keys():
investment_sums[row['CombinedIndex']] = [0] * len(index_years)
investment_sums[row['CombinedIndex']][index_years.index(online_in_year)] += row['MW']
return investment_sums, investments
def prepare_annual_installed_capacity(path_and_filename_dispatch, emlab_spine_powerplants_tech_dict,
annual_installed_capacity, year, years_to_generate):
installed_capacity_df = pandas.read_excel(path_and_filename_dispatch, 'Initial generation capacity', skiprows=2)
installed_capacity_df = installed_capacity_df[installed_capacity_df['i'] == 'NED']
installed_capacity_df['Technology'] = [emlab_spine_powerplants_tech_dict[i] for i in
installed_capacity_df['h'].values]
installed_capacity_df = installed_capacity_df.groupby(['Technology']).sum()
for tech, row in installed_capacity_df.iterrows():
if tech in annual_installed_capacity.keys():
annual_installed_capacity[tech].append(row['MW'])
else:
annual_installed_capacity[tech] = [0] * years_to_generate.index(year) + [row['MW']]
return annual_installed_capacity
def plot_annual_installed_capacity(annual_installed_capacity, years_to_generate, path_to_plots):
print('Annual installed capacity NL')
plt.figure()
annual_installed_capacity_df = | pd.DataFrame(annual_installed_capacity, index=years_to_generate) | pandas.DataFrame |
"""Author: <NAME>
This contains the main Spomato class to be used to access the Spotify API and create new playlists based on the user's
defined criteria.
"""
import os
import pandas as pd
import spotipy
class Spomato():
"""Object used to access spotify API through spotipy and generate playlists.
This can take a combination user's saved tracks, playlists, and/or artist's songs to generate a playlist of a
specified length. This was conceived to use the Tomato Timer method as Spotify playlists.
This does require the user to provide a user API token from the spotify API. The API scopes used by this library are
playlist-read-private, playlist-modify-private, and user-library-read.
Parameters
----------
access_token : str
A valid Spotify Access token.
Attributes
----------
data : dictionary
Dictionary storing available data structures to create playlists.
spotipy_session : spotipy.client.Spotify
A spotipy session to access the spotify API.
access_token : str
A valid Spotify Access token. This requires the scopes playlist-read-private, playlist-modify-private,
and user-library-read
current_user_id : str
The string id of the user of the access token used to create the spotipy session.
"""
def __init__(self,
access_token=None):
"""Initialization function that sets access token and generates initial spotipy session.
Parameters
----------
access_token : str
A valid Spotify Access token. This requires the scopes playlist-read-private, playlist-modify-private,
and user-library-read.
Returns
-------
None
"""
self.access_token = access_token
self.data = {}
self.spotipy_session = self._get_spotipy_session()
self.current_user_id = self.spotipy_session.current_user()['id']
def update_token(self, access_token):
"""Updates the token and spotify session with the provided access_token. Generally used if your access token
has expired.
Parameters
----------
access_token : str
A valid Spotify Access token. This requires the scopes playlist-read-private, playlist-modify-private,
and user-library-read.
Returns
-------
None
"""
# update the class access token and the spotipy session
self.access_token = access_token
self.spotipy_session = self._get_spotipy_session()
self.current_user_id = self.spotipy_session.current_user()['id']
def _get_spotipy_session(self):
"""Internal Function to create a new spotify session.
Returns
-------
spotipy_session : spotipy.client.Spotify
A spotipy session to access the spotify API.
"""
return spotipy.Spotify(auth=self.access_token)
@staticmethod
def _parse_album(album_data, market='US'):
"""Parses the album data returned from the Spotify API and returns the song information as a pandas DataFrame.
Parameters
----------
album_data : dict
A dictionary of album data from Spotify API
market : str
A string representation of the Spotify market to filter on. Default is 'US'
Returns
-------
pandas.DataFrame
A dataframe of song ids and time for each song
"""
# iterate over each record in the album data and parse the track data
series_list = []
album_tracks = album_data['tracks']['items']
for record in album_tracks:
songid = record['id']
markets = record['available_markets']
# time is stored in milliseconds, divide to convert to seconds.
time = record['duration_ms']/1000
# filter out any songs that are not in the specified market
if market in markets:
series = pd.Series([songid, time], index=['song_id', 'time'])
series_list.append(series)
if len(series_list) > 0:
song_df = pd.concat(series_list, axis=1).transpose()
else:
song_df = pd.DataFrame(columns=['song_id', 'time'])
return song_df
@staticmethod
def _parse_user_playlist(data, market='US'):
"""Parses a user playlist data set from the Spotify API and returns the song information as a pandas DataFrame.
Parameters
----------
data : dictionary
Contains songs in a playlist from the Spotify API
market : str
A string representation of the Spotify market to filter on. Default is 'US'
Returns
-------
pandas.DataFrame
A dataframe of song ids and time for each song
"""
# iterate over each record in the playlist data and parse the track data
series_list = []
data = data['tracks']['items']
for item in data:
record = item['track']
songid = record['id']
markets = record['available_markets']
# time is stored in milliseconds, divide to convert to seconds.
time = record['duration_ms']/1000
# filter out any songs that are not in the specified market
if market in markets:
series = pd.Series([songid, time], index=['song_id', 'time'])
series_list.append(series)
if len(series_list) > 0:
song_df = pd.concat(series_list, axis=1).transpose()
else:
song_df = pd.DataFrame(columns=['song_id', 'time'])
return song_df
@staticmethod
def _parse_public_playlist(data, market='US'):
"""Parses public playlist data set from the Spotify API and returns the song information as a pandas DataFrame.
Parameters
----------
data : dictionary
Contains songs in a playlist from the Spotify API
market : str
A string representation of the Spotify market to filter on. Default is 'US'
Returns
-------
pandas.DataFrame
A dataframe of song ids and time for each song
"""
# iterate over each record in the playlist data and parse the track data
series_list = []
data = data['items']
for item in data:
record = item['track']
songid = record['id']
markets = record['available_markets']
# time is stored in milliseconds, divide to convert to seconds.
time = record['duration_ms']/1000
# filter out any songs that are not in the specified market
if market in markets:
series = pd.Series([songid, time], index=['song_id', 'time'])
series_list.append(series)
if len(series_list) > 0:
song_df = pd.concat(series_list, axis=1).transpose()
else:
song_df = pd.DataFrame(columns=['song_id', 'time'])
return song_df
@staticmethod
def _parse_saved_tracks(data, market='US'):
"""Parses a the saved songs data set of the user from the Spotify API and returns the song information as a
pandas DataFrame.
Parameters
----------
data : dictionary
Contains saved songs of the user from the Spotify API
market : str
A string representation of the Spotify market to filter on. Default is 'US'
Returns
-------
pandas.DataFrame
A dataframe of song ids and time for each song
"""
# iterate over each record in the saved track data and parse the individual track data
series_list = []
for item in data:
record = item['track']
songid = record['id']
markets = record['available_markets']
# time is stored in milliseconds, divide to convert to seconds.
time = record['duration_ms']/1000
# filter out any songs that are not in the specified market
if market in markets:
series = pd.Series([songid, time], index=['song_id', 'time'])
series_list.append(series)
if len(series_list) > 0:
song_df = pd.concat(series_list, axis=1).transpose()
else:
song_df = pd.DataFrame(columns=['song_id', 'time'])
return song_df
def _cache_data(self, data_key, file_path):
"""Export the results of a dataset of song ids to local filesystem as a csv.
Parameters
----------
data_key : str
Key of the dataset to save
file_path : str
Full path of filename to save the file.
Returns
-------
None
"""
# use pandas dataframe write function to save file
self.data[data_key].to_csv(file_path, index=False)
def _load_cached_data(self, data_key, file_path):
"""Load a Saved Dataset into the Spomato data dictionary. Requires a csv with columns of 'song_id' and 'time'.
Parameters
----------
data_key : str
Key to associate the loaded dataset in the data dictionary.
file_path : str
Full path of filename to load the file.
Returns
-------
None
"""
data = pd.read_csv(file_path)
# ensure the required columns are in the dataset else raise error
if 'song_id' not in data.columns:
raise ValueError('Column song_id not found in loaded data file.')
if 'time' not in data.columns:
raise ValueError('Column song_id not found in loaded data file.')
# data looks correct, add dataset to data
self.data[data_key] = data
def get_file_data(self,
data_key='default',
file_path=None,
overwrite=False):
"""Loads a file of song data into Spomato to be used for generating new playlists.
Parameters
----------
data_key : str
Key to associate the dataset in the data dictionary.
file_path : str
Full path of filename if loading or saving dataset.
overwrite : bool
Boolean to determine if the dataset should be overwritten if it already exists.
Returns
-------
None
"""
if not isinstance(data_key, str):
raise TypeError('Argument data_key must be of type string')
if file_path is not None and not isinstance(file_path, str):
raise TypeError('Argument file_path must be of type string')
# check if the data key already exists to ensure data is not unexpectedly overwritten
if data_key in self.data.keys() and overwrite is False:
msg = (f'Dataset {data_key} already exists and reset argument is set to False. '
'Set reset to True to overwrite dataset.')
raise ValueError(msg)
# read the data from file if the file exists
if os.path.isfile(file_path):
self._load_cached_data(data_key=data_key,
file_path=file_path)
else:
raise ValueError('File path {f} does not exist.'.format(f=file_path))
def get_api_data(self,
data_key='default',
file_path=None,
source=None,
reset=False,
market='US'):
"""Generates a song dataset to load into Spomato to be used for generating new playlists.
Parameters
----------
data_key : str
Key to associate the dataset in the data dictionary.
file_path : str
If not None, the dataset generated will also be saved to the specified file path..
source : dict
Contains all sources you want to use in generating the dataset. The dictionary is keyed by one of 3 source
types: savedtracks, playlist, or artist. For savedtracks the value can be None, as no further data is
required. For playlist or artist, the value should contain a list of all spotify ids of the appropriate
type. If not specified, it defaults to your saved tracks.
reset : bool
Boolean to determine if the dataset should be regenerated if it already exists.
market : str
A string representation of the Spotify market to filter on. Default is 'US'
Returns
-------
None
"""
if not isinstance(data_key, str):
raise TypeError('Argument data_key must be of type string')
if file_path is not None and not isinstance(file_path, str):
raise TypeError('Argument file_path must be of type string')
if source is not None and not isinstance(source, dict):
raise TypeError('Argument source must be of type dict')
if not isinstance(reset, (bool, int)):
raise TypeError('Argument reset must be of type bool or int')
if not isinstance(market, str):
raise TypeError('Argument market must be of type string')
# check if the data key already exists to ensure data is not unexpectedly overwritten
if data_key in self.data.keys() and reset is False:
msg = (f'Dataset {data_key} already exists and reset argument is set to False. '
'Set reset to True to overwrite dataset.')
raise ValueError(msg)
# default the data source to the user's saved tracks if not specified
if source is None:
source = {'savedtracks': None}
# generate the dataset and save it into the Spomato object
self.data[data_key] = self._get_new_data(source=source,
market=market)
# Cache the data if the file_path is specified
if file_path:
self._cache_data(data_key=data_key,
file_path=file_path)
def _get_playlist_dataframe(self,
source_list,
market):
"""Short summary.
Parameters
----------
source_list : list
A list of playlist ids to source songs from
market : str
A string representation of the Spotify market to filter on.
Returns
-------
pandas.DataFrame
A dataframe of songs with song id and time.
"""
# get the list of playlists and filter out datasets included in the source list
playlist_df = self.get_playlists()
playlist_list = []
for pl_id in source_list:
if not playlist_df[playlist_df.playlist_id == pl_id].empty:
pl_json = self.spotipy_session.user_playlist(self.current_user_id, pl_id)
pl_df = self._parse_user_playlist(pl_json, market)
playlist_list.append(pl_df)
else:
pl_json = self.spotipy_session.playlist_tracks(pl_id)
pl_df = self._parse_public_playlist(pl_json, market)
playlist_list.append(pl_df)
if len(playlist_list) == 0:
raise ValueError('No valid playlists.')
# concatinate the dataframes of all the playlist and remove any duplicates
data = pd.concat(playlist_list)
data.drop_duplicates(inplace=True)
return data
def _get_artist_dataframe(self,
source_list,
market):
"""Short summary.
Parameters
----------
source_list : list
A list of playlist ids to source songs from
market : str
A string representation of the Spotify market to filter on.
Returns
-------
pandas.DataFrame
A dataframe of songs with song id and time.
"""
# iterate over each artist, get the data from the Spotify API, and parse the song data
artist_list = []
for artist in source_list:
artist_songs = self._get_artist_data(artist, market)
artist_list.append(artist_songs)
# concatinate the dataframes of all the playlist and remove any duplicates
data = pd.concat(artist_list)
data.drop_duplicates(inplace=True)
return data
def _get_new_data(self,
source=None,
market='US'):
"""Creates a new dataset from the specified source list and returns a pandas DataFrame of song ids and times.
Parameters
----------
source : dict
Contains all sources you want to use in generating the dataset. The dictionary is keyed by one of 3 source
types: savedtracks, playlist, or artist. For savedtracks the value can be None, as no further data is
required. For playlist or artist, the value should contain a list of all spotify ids of the appropriate
type.
market : str
A string representation of the Spotify market to filter on.
Returns
-------
pd.DataFrame
A dataframe of song ids generated from the sources.
"""
# if the source is not specified, default to the saved tracks of the current user.
if source is None:
source = {'savedtracks': None}
elif not isinstance(source, dict):
raise ValueError('Argument source must be of type dict or None.')
elif len(source.keys()) == 0:
raise ValueError('Argument source must contain at least 1 valid key from: savedtracks, artist, playlist')
else:
for key in source.keys():
if key not in ['savedtracks', 'artist', 'playlist']:
raise ValueError(f'{key} is not a valid data source type.')
# iterate over the source types in the source dictionary and parse out the data
data_list = []
for sourcetype in source.keys():
if sourcetype == 'savedtracks':
# print 'SAVEDTRACKS'
data = self._get_saved_tracks(market)
data_list.append(data)
elif sourcetype == 'playlist':
playlist_data = self._get_playlist_dataframe(source_list=source['playlist'],
market=market)
data_list.append(playlist_data)
elif sourcetype == 'artist':
artist_data = self._get_artist_dataframe(source_list=source['artist'],
market=market)
data_list.append(artist_data)
# concatinate the dataframes of all the source types and remove any duplicates
data = pd.concat(data_list)
data.drop_duplicates(inplace=True)
return data
def pick_tracks(self,
data_key,
time=25,
extra=5,
time_limit=None):
"""Using a specified dataset, this generates a subset of the dataframe of songs that fit the time constraints.
Parameters
----------
data_key : str
Name of the dataset to use stored in the data object in Spomato
time : int
The length in minutes to make the playlist
extra : type
The amount of buffer time to add on to the end of the playlist.
time_limit : type
The maximum song length in minutes to include in the playlist.
Returns
-------
pd.DataFrame
A dataframe of song ids generated from the sources.
"""
if not isinstance(data_key, str):
raise TypeError('Argument data_key must be of type string')
if not isinstance(time, (int, float)):
raise TypeError('Argument time must be of type int or float')
if not isinstance(extra, (int, float)):
raise TypeError('Argument extra must be of type int or float')
if time_limit is not None and not isinstance(time_limit, (int, float)):
raise TypeError('Argument time_limit must be of type int or float')
track_df = self.data[data_key]
# the time in our dataframe is specified in seconds, we need to convert the times
time *= 60
extra *= 60
# if time limit is not specified, default it to one third of the parameter time
if time_limit is None:
time_limit = time/3.0
else:
time_limit *= 60
# filter out any records that are longer than the time limit
track_df = track_df[track_df['time'] <= time_limit]
# iterate adding songs to the selected track until the time is reached
time_used = 0
track_list = []
done = False
while not done:
# filter down to tracks that fit in the remaining time
track_df = track_df[track_df.time <= (time + extra - time_used)]
# if the total time is greater than the specified time, mark the iteration done.
if time_used > time:
done = True
# if the filtered song list is empty, there are no songs left, so mark iteration done
elif track_df.empty:
done = True
# otherwise, take a random track from the dataframe, add to the track list, and remove it from being
# selected again
else:
track = track_df.sample().iloc[0]
track_df = track_df[track_df.song_id != track.song_id]
track_list.append(track)
time_used += track.time
# concatinate all of the selected tracks into a dataframe.
picked_track_df = pd.concat(track_list, axis=1).T
return picked_track_df
def _get_saved_tracks(self, market):
"""Access the spotify API to get the saved tracks for a user and returns a dataframe of song ids and times.
Parameters
----------
market : str
A string representation of the Spotify market to filter on.
Returns
-------
pd.DataFrame
A dataframe of song ids generated from the sources.
"""
# iterate over a user's saved tracks until all have been accessed and parsed
end = False
i = 0
track_df_list = []
while not end:
data = self.spotipy_session.current_user_saved_tracks(limit=50, offset=i*50)['items']
if len(data) > 0:
track_df = self._parse_saved_tracks(data, market)
track_df_list.append(track_df)
i += 1
else:
end = True
# concatinate the created dataframes and remove any duplicates
track_df = pd.concat(track_df_list).reset_index(drop=True)
track_df.drop_duplicates(inplace=True)
return track_df
def _get_artist_data(self, artist_id, market):
"""Access the spotify API to get an artist's tracks and returns a dataframe of song ids and times.
Parameters
----------
artist_id : type
Description of parameter `artist_id`.
market : str
A string representation of the Spotify market to filter on.
Returns
-------
pandas.DataFrame
A dataframe of song ids and times generated from the sources.
"""
# get all of the artist's albums ids and parse out the json for each
artist_albums = self.spotipy_session.artist_albums(artist_id)
album_ids = [x['id'] for x in artist_albums['items']]
album_jsons = self.spotipy_session.albums(album_ids)['albums']
# iterate over each album and parse out the songs
songdf = []
for album in album_jsons:
if market in album['available_markets']:
songs = self._parse_album(album, market)
songdf.append(songs)
# concatinate the results from each album into a single dataframe
data = pd.concat(songdf)
return data
def get_playlists(self):
"""Access the spotify API to get the playlists for a user and returns a dataframe of names and ids.
Returns
-------
pandas.DataFrame
A dataframe consisting of the current user's playlist names and playlist ids.
"""
# get the user's playlist and parse the playlist name and id
pl_json = self.spotipy_session.current_user_playlists()
series_index = ['playlist_name', 'playlist_id']
playlist_list = [ | pd.Series([pl['name'], pl['id']], index=series_index) | pandas.Series |
import csv
import numpy as np
import ipaddress
import sys
from pathlib import Path
import pandas as pd
from scapy.all import *
def sort_by_time(elem):
return elem[1]
def csv_numpy(packet_path):
"""
Take the csv files, convert it to "list" structure or "ndarray" structure. Use either of the two
structure for the future data analysis.
:param packet_path: path of the csv file
:return: return the traffic data in a data structure of "list" or "ndarray"
"""
reader = csv.reader(open(packet_path, "r"), delimiter=",")
query_in_list = list(reader)
query_in_list.pop(0)
query_in_array = np.array(query_in_list).astype(float)
query_in_list = query_in_array.tolist()
# for p in query_in_list:
# p.remove(p[0])
return query_in_list
def buflo(query_data, d, f, T):
'''
apply BuFlo countermeasure to traffic data. In BuFlo, it will send a packet of length d every ρ milliseconds until
communications cease and at least τ milliseconds of time have elapsed
:param d, determines the size of fixed-length packets
:param f, determines the rates or frequency (in milliseconds) at which we send packets
:param T, determines the minimum amount of time (in milliseconds) for which we must send packets.
:return:
'''
outgoing = []
incoming = []
for p in query_data:
if p[2] <= d:
overhead = d - p[2]
p[2] = d
# p = np.append(p, overhead)
p.append(overhead)
if p[3] == 1:
outgoing.append(p)
else:
incoming.append(p)
t_start_out = outgoing[0][1]
t_start_in = incoming[0][1]
n_out = 0
for p in outgoing:
p[1] = t_start_out + n_out * (1 / f)
n_out += 1
n_in = 0
for p in incoming:
p[1] = t_start_in + n_in * (1 / f)
n_in += 1
outgoing.reverse()
for p in outgoing:
if p[1] <= T:
outgoing.pop(p)
print("pop")
outgoing.reverse()
incoming.reverse()
for p in incoming:
if p[1] <= T:
incoming.pop(p)
incoming.reverse()
buflo_data_list = outgoing + incoming
# buflo_data_list.sort()
# echo_df1 = pd.DataFrame(buflo_data_list, columns=['index', 'time', 'size', 'direction', 'overhead'])
# echo_df1.to_csv('sports_update_5_30s_buflo1' + ".csv")
buflo_data_list.sort(key=sort_by_time)
echo_df2 = pd.DataFrame(buflo_data_list, columns=['index', 'time', 'size', 'direction', 'overhead'])
echo_df2.to_csv('sports_update_5_30s_buflo2' + ".csv")
print(' f')
def buflo_ordered(csv_path, query_data, d, f, t):
'''
apply BuFlo countermeasure to traffic data. In BuFlo, it will send a packet of length d every ρ milliseconds until
communications cease and at least τ milliseconds of time have elapsed. The order of packets are not changed in this method
:param d, determines the size of fixed-length packets
:param f, determines the rates or frequency (in milliseconds) at which we send packets
:param t, determines the minimum amount of time (in milliseconds) for which we must send packets.
:return:
'''
buflo_path = 'buflo/'
info_path = 'info/'
pf = Path(csv_path)
trace_name = pf.name[0:-4]
start_t = query_data[0][1] #to record the start time of this query
end_time = query_data[-1][1]#to record the end time of this query
index = 0
total_packet = t * f # the minimum amount of packets
total_overhead = 0
original_size = 0
buflo_data = []
for p in query_data:
original_size = original_size + p[2]
for p in query_data:
if p[2] <= d and len(p) ==4: # size of this packt is small than d. Pad the packet
overhead = d - p[2]
total_overhead = total_overhead + overhead
p[2] = d
p.append(overhead)
p[1] = round(start_t + index * (1 / f),2)
p[0] = index
p.append('padded')
index += 1
elif len(p) == 4: # size of this packet is larger than d. Chop the packet
p_left = p[2] - d
p[2] = d
p[1] = round(start_t + index * (1 / f) ,2)
p[0] = index
p.append(0)
p.append('chopped') # a dummy packet will be added
final_left = p_left % d
if final_left == 0:
n_new = int(p_left / d)
else:
n_new = int(p_left / d) + 1
if n_new == 0: # if just one dummy packet are needed
index += 1
new_p = [index, round(start_t + index * (1 / f),2), d, p[3], d - p_left, 'new']
total_overhead = total_overhead + (d - p_left)
query_data.insert(index, new_p) # add dummy packet
while n_new > 0: # if more dummy packets are needed
if n_new == 1:
index += 1
new_p = [index, round(start_t + index * (1 / f),2), d, p[3], d - final_left, 'new']
total_overhead = total_overhead + (d - final_left)
query_data.insert(index, new_p) # add a dummy packet
else:
index += 1
new_p = [index, round(start_t + index * (1 / f),2), d, p[3], 0, 'new']
query_data.insert(index, new_p) # add a dummy packet
n_new = n_new - 1
index += 1
#if index > total_packet != 0:
# query_data = query_data[0:index]
# break
if index < total_packet:
for i in range(index, total_packet):
seed = -1 + 2 * random.random()
direction = float(np.sign(seed))
dummy_packet = [i+1, round(start_t + (i + 1) * (1 / f),2), d, direction, d, 'dummy']
total_overhead = total_overhead + d
query_data.insert(i+1, dummy_packet)
time_delay = query_data[-1][1] - end_time
# query_data.append(last_packet)
echo_df2 = | pd.DataFrame(query_data, columns=['index', 'time', 'size', 'direction', 'overhead', 'status']) | pandas.DataFrame |
# AUTOGENERATED! DO NOT EDIT! File to edit: notebooks/04_Create_Acs_Indicators.ipynb (unless otherwise specified).
__all__ = ['getColName', 'getColByName', 'addKey', 'nullIfEqual', 'sumInts', 'age5', 'age18', 'age24', 'age64', 'age65',
'bahigher', 'carpool', 'drvalone', 'elheat', 'empl', 'fam', 'female', 'femhhs', 'heatgas', 'hisp', 'hh25inc',
'hh40inc', 'hh60inc', 'hh75inc', 'hhchpov', 'hhm75', 'hhs', 'hsdipl', 'lesshs', 'male', 'mhhi', 'drvalone',
'novhcl', 'nohhint', 'othercom', 'paa', 'p2more', 'pasi', 'pubtran', 'pwhite', 'sclemp', 'tpop', 'trav14',
'trav14', 'trav45', 'trav44', 'unempr', 'unempr', 'walked', 'createAcsIndicator']
# Cell
#@title Run This Cell: Misc Function Declarations
# These functions right here are used in the calculations below.
# Finds a column matchings a substring
def getColName (df, col): return df.columns[df.columns.str.contains(pat = col)][0]
def getColByName (df, col): return df[getColName(df, col)]
# Pulls a column from one dataset into a new dataset.
# This is not a crosswalk. calls getColByName()
def addKey(df, fi, col):
key = getColName(df, col)
val = getColByName(df, col)
fi[key] = val
return fi
# Return 0 if two specified columns are equal.
def nullIfEqual(df, c1, c2):
return df.apply(lambda x:
x[getColName(df, c1)]+x[getColName(df, c2)] if x[getColName(df, c1)]+x[getColName(df, c2)] != 0 else 0, axis=1)
# I'm thinking this doesnt need to be a function..
def sumInts(df): return df.sum(numeric_only=True)
# Cell
#@title Run This Cell: Create age5
#File: age5.py
#Author: <NAME>
#Date: 4/16/19
#Section: Bnia
#Email: <EMAIL>
#Description:
# Uses ACS Table B01001 - SEX BY AGE
# Universe: Total population
# Table Creates: tpop, female, male, age5 age18 age24 age64 age65
#purpose:
#input: #output:
import pandas as pd
import glob
def age5( df, columnsToInclude ):
fi = pd.DataFrame()
columns = ['B01001_027E_Total_Female_Under_5_years',
'B01001_003E_Total_Male_Under_5_years',
'B01001_001E_Total' , 'tract']
columns.extend(columnsToInclude)
for col in columns:
fi = addKey(df, fi, col)
fi['final'] = ( df[ 'B01001_003E_Total_Male_Under_5_years' ]
+ df[ 'B01001_027E_Total_Female_Under_5_years' ]
) / df['B01001_001E_Total'] * 100
return fi
# Cell
#@title Run This Cell: age18
#File: age18.py
#Author: <NAME>
#Date: 4/16/19
#Section: Bnia
#Email: <EMAIL>
#Description:
# Uses ACS Table B01001 - SEX BY AGE
# Universe: Total population
# Table Creates: tpop, female, male, age5 age18 age24 age64 age65
#purpose:
#input: #output:
import pandas as pd
import glob
def age18( df, columnsToInclude ):
fi = pd.DataFrame()
columns = ['B01001_001E_Total',
'B01001_004E_Total_Male_5_to_9_years',
'B01001_005E_Total_Male_10_to_14_years' ,
'B01001_006E_Total_Male_15_to_17_years',
'B01001_028E_Total_Female_5_to_9_years',
'B01001_029E_Total_Female_10_to_14_years' ,
'B01001_030E_Total_Female_15_to_17_years']
columns = df.filter(regex='001E|004E|005E|006E|028E|029E|030E').columns.values
columns = numpy.append(columns, columnsToInclude)
for col in columns:
fi = addKey(df, fi, col)
fi['final'] = ( df.filter(regex='004E|005E|006E|028E|029E|030E').sum(axis=1)
) / df['B01001_001E_Total:'] * 100
return fi
# Cell
#@title Run This Cell: Create age24
#File: age24.py
#Author: <NAME>
#Date: 9/8/21
#Section: Bnia
#Email: <EMAIL>
#Description:
# Uses ACS Table B01001 - SEX BY AGE
# Universe: Total population
# Table Creates: tpop, female, male, age5 age18 age24 age64 age65
#purpose:
#input: #output:
import pandas as pd
import glob
def age24( df, columnsToInclude ):
fi = pd.DataFrame()
columns = ['B01001_007E_Total_Male_18_and_19_years',
'B01001_008E_Total_Male_20_years',
'B01001_009E_Total_Male_21_years' ,
'B01001_010E_Total_Male_22_to_24_years' ,
'B01001_031E_Total_Female_18_and_19_years' ,
'B01001_032E_Total_Female_20_years' ,
'B01001_033E_Total_Female_21_years' ,
'B01001_034E_Total_Female_22_to_24_years',
'tract']
columns.extend(columnsToInclude)
for col in columns:
fi = addKey(df, fi, col)
fi['final'] = ( df[ 'B01001_007E_Total_Male_18_and_19_years' ]
+ df[ 'B01001_008E_Total_Male_20_years' ]
+ df[ 'B01001_009E_Total_Male_21_years' ]
+ df[ 'B01001_010E_Total_Male_22_to_24_years' ]
+ df[ 'B01001_031E_Total_Female_18_and_19_years' ]
+ df[ 'B01001_032E_Total_Female_20_years' ]
+ df[ 'B01001_033E_Total_Female_21_years' ]
+ df[ 'B01001_034E_Total_Female_22_to_24_years' ]
) / df['B01001_001E_Total'] * 100
return fi
# Cell
#@title Run This Cell: age64
import pandas as pd
import glob
def age64( df, columnsToInclude ):
fi = pd.DataFrame()
columns = df.filter(regex='012E|013E|014E|015E|016E|017E|018E|019E|036E|037E|038E|039E|040E|041E|042E|043E').columns.values
columns = numpy.append(columns, columnsToInclude)
for col in columns:
fi = addKey(df, fi, col)
fi['final'] = ( df.filter(regex='012E|013E|014E|015E|016E|017E|018E|019E|036E|037E|038E|039E|040E|041E|042E|043E').sum(axis=1)
) / df['B01001_001E_Total:'] * 100
return fi
# Cell
#@title Run This Cell: age65
import pandas as pd
import glob
def age65( df, columnsToInclude ):
fi = pd.DataFrame()
columns = df.filter(regex='001E|020E|021E|022E|023E|024E|025E|044E|045E|046E|047E|048E|049E').columns.values
columns = numpy.append(columns, columnsToInclude)
for col in columns:
fi = addKey(df, fi, col)
fi['final'] = ( df.filter(regex='020E|021E|022E|023E|024E|025E|044E|045E|046E|047E|048E|049E').sum(axis=1)
) / df['B01001_001E_Total:'] * 100
return fi
# Cell
#@title Run This Cell: bahigher
import pandas as pd
import glob
def bahigher( df, columnsToInclude ):
fi = pd.DataFrame()
columns = df.filter(regex='005E|006E|001E').columns.values
columns = numpy.append(columns, columnsToInclude)
for col in columns:
fi = addKey(df, fi, col)
fi['final'] = ( df.filter(regex='005E|006E').sum(axis=1)
) / df['B06009_001E'] * 100
return fi
# Cell
#@title Run This Cell: - carpool
import pandas as pd
import glob
def carpool( df, columnsToInclude ):
# Final Dataframe
fi = pd.DataFrame()
columns = df.filter(regex='001E|049E|017E').columns.values
columns = numpy.append(columns, columnsToInclude)
for col in columns:
fi = addKey(df, fi, col)
fi['final'] = ( df.filter(regex='B08101_017E').sum(axis=1)
) / ( df.filter(regex='B08101_001E|B08101_049E').sum(axis=1)
) * 100
return fi
# Cell
#@title Run This Cell: - drvalone
import pandas as pd
import glob
def drvalone( df, columnsToInclude ):
# Final Dataframe
fi = pd.DataFrame()
columns = df.filter(regex='001E|049E|009E').columns.values
columns = numpy.append(columns, columnsToInclude)
for col in columns:
fi = addKey(df, fi, col)
fi['final'] = ( df.filter(regex='B08101_009E').sum(axis=1)
) / ( df.filter(regex='B08101_001E|B08101_049E').sum(axis=1)
) * 100
return fi
# Cell
#@ title Run This Cell: -elheat
import pandas as pd
import glob
def elheat( df, columnsToInclude ):
fi = pd.DataFrame()
columns = df.filter(regex='B25040_004E|B25040_001E').columns.values
columns = numpy.append(columns, columnsToInclude)
for col in columns:
fi = addKey(df, fi, col)
fi['final'] = ( df.filter(regex='B25040_004E').sum(axis=1)
) / ( df.filter(regex='B25040_001E').sum(axis=1)
) * 100
return fi
# Cell
#@ title Run This Cell: -empl
import pandas as pd
import glob
def empl( df, columnsToInclude ):
fi = pd.DataFrame()
columns = df.filter(regex='001E|049E|009E').columns.values
columns = numpy.append(columns, columnsToInclude)
for col in columns:
fi = addKey(df, fi, col)
fi['final'] = ( df.filter(regex='B08101_009E').sum(axis=1)
) / ( df.filter(regex='B08101_001E|B08101_049E').sum(axis=1)
) * 100
return fi
# Cell
#@ title Run This Cell: -fam
import pandas as pd
import glob
def fam( df, columnsToInclude ):
fi = pd.DataFrame()
columns = df.filter(regex='001E|049E|009E').columns.values
columns = numpy.append(columns, columnsToInclude)
for col in columns:
fi = addKey(df, fi, col)
fi['final'] = ( df.filter(regex='B08101_009E').sum(axis=1)
) / ( df.filter(regex='B08101_001E|B08101_049E').sum(axis=1)
) * 100
return fi
# Cell
#@ title Run This Cell: -female
import pandas as pd
import glob
def female( df, columnsToInclude ):
fi = pd.DataFrame()
columns = df.filter(regex='001E|049E|009E').columns.values
columns = numpy.append(columns, columnsToInclude)
for col in columns:
fi = addKey(df, fi, col)
fi['female'] = ( df.filter(regex='B08101_009E').sum(axis=1)
) / ( df.filter(regex='B08101_001E|B08101_049E').sum(axis=1)
) * 100
return fi
# Cell
#@ title Run This Cell: -femhhs
import pandas as pd
import glob
def femhhs( df, columnsToInclude ):
fi = pd.DataFrame()
columns = df.filter(regex='001E|049E|009E').columns.values
columns = numpy.append(columns, columnsToInclude)
for col in columns:
fi = addKey(df, fi, col)
fi['femhhs'] = ( df.filter(regex='B08101_009E').sum(axis=1)
) / ( df.filter(regex='B08101_001E|B08101_049E').sum(axis=1)
) * 100
return fi
# Cell
#@ title Run This Cell: -heatgas
import pandas as pd
import glob
def heatgas( df, columnsToInclude ):
fi = pd.DataFrame()
columns = df.filter(regex='001E|049E|009E').columns.values
columns = numpy.append(columns, columnsToInclude)
for col in columns:
fi = addKey(df, fi, col)
fi['final'] = ( df.filter(regex='B08101_009E').sum(axis=1)
) / ( df.filter(regex='B08101_001E|B08101_049E').sum(axis=1)
) * 100
return fi
# Cell
#@title Run This Cell: hisp
import pandas as pd
import glob
def hisp( df, columnsToInclude ):
fi = pd.DataFrame()
columns = ['B03002_001E_Total',
'B03002_012E_Total_Hispanic_or_Latino']
columns = df.filter(regex='001E|012E').columns.values
columns = numpy.append(columns, columnsToInclude)
for col in columns:
print('addKey df',df.columns,'fi',fi.columns,'col: ', col)
fi = addKey(df, fi, col)
print(' ')
fi['final'] = ( df.filter(regex='012E').sum(axis=1)
) / df['B03002_001E_Total:'] * 100
return fi
# Cell
#@title Run This Cell: hh25inc
import pandas as pd
import glob
def hh25inc( df, columnsToInclude ):
df.columns = df.columns.str.replace(r"[$]", "")
fi = pd.DataFrame()
columns = ['B19001_001E_Total',
"B19001_002E_Total_Less_than_10,000",
"B19001_003E_Total_10,000_to_14,999",
"B19001_004E_Total_15,000_to_19,999",
"B19001_005E_Total_20,000_to_24,999"]
columns = df.filter(regex='002E|003E|004E|005E|001E').columns.values
columns = numpy.append(columns, columnsToInclude)
for col in columns:
print('addKey col: ', col, df.columns)
fi = addKey(df, fi, col)
fi['final'] = ( df.filter(regex='002E|003E|004E|005E').sum(axis=1)
) / df['B19001_001E_Total:'] * 100
return fi
# Cell
#@ title Run This Cell: -hh40inc
import pandas as pd
import glob
def hh40inc( df, columnsToInclude ):
fi = pd.DataFrame()
columns = df.filter(regex='001E|049E|009E').columns.values
columns = numpy.append(columns, columnsToInclude)
for col in columns:
fi = addKey(df, fi, col)
fi['final'] = ( df.filter(regex='B08101_009E').sum(axis=1)
) / ( df.filter(regex='B08101_001E|B08101_049E').sum(axis=1)
) * 100
return fi
# Cell
#@ title Run This Cell: -hh60inc
import pandas as pd
import glob
def hh60inc( df, columnsToInclude ):
fi = pd.DataFrame()
columns = df.filter(regex='001E|049E|009E').columns.values
columns = numpy.append(columns, columnsToInclude)
for col in columns:
fi = addKey(df, fi, col)
fi['final'] = ( df.filter(regex='B08101_009E').sum(axis=1)
) / ( df.filter(regex='B08101_001E|B08101_049E').sum(axis=1)
) * 100
return fi
# Cell
#@ title Run This Cell: -hh75inc
import pandas as pd
import glob
def hh75inc( df, columnsToInclude ):
fi = pd.DataFrame()
columns = df.filter(regex='001E|049E|009E').columns.values
columns = numpy.append(columns, columnsToInclude)
for col in columns:
fi = addKey(df, fi, col)
fi['final'] = ( df.filter(regex='B08101_009E').sum(axis=1)
) / ( df.filter(regex='B08101_001E|B08101_049E').sum(axis=1)
) * 100
return fi
# Cell
#@ title Run This Cell: -hhchpov
import pandas as pd
import glob
def hhchpov( df, columnsToInclude ):
fi = pd.DataFrame()
columns = df.filter(regex='001E|049E|009E').columns.values
columns = numpy.append(columns, columnsToInclude)
for col in columns:
fi = addKey(df, fi, col)
fi['final'] = ( df.filter(regex='B08101_009E').sum(axis=1)
) / ( df.filter(regex='B08101_001E|B08101_049E').sum(axis=1)
) * 100
return fi
# Cell
#@ title Run This Cell: -hhm75
import pandas as pd
import glob
def hhm75( df, columnsToInclude ):
fi = pd.DataFrame()
columns = df.filter(regex='001E|049E|009E').columns.values
columns = numpy.append(columns, columnsToInclude)
for col in columns:
fi = addKey(df, fi, col)
fi['final'] = ( df.filter(regex='B08101_009E').sum(axis=1)
) / ( df.filter(regex='B08101_001E|B08101_049E').sum(axis=1)
) * 100
return fi
# Cell
#@ title Run This Cell: -hhs
import pandas as pd
import glob
def hhs( df, columnsToInclude ):
fi = pd.DataFrame()
columns = df.filter(regex='001E|049E|009E').columns.values
columns = numpy.append(columns, columnsToInclude)
for col in columns:
fi = addKey(df, fi, col)
fi['final'] = ( df.filter(regex='B08101_009E').sum(axis=1)
) / ( df.filter(regex='B08101_001E|B08101_049E').sum(axis=1)
) * 100
return fi
# Cell
#@ title Run This Cell: -hsdipl
import pandas as pd
import glob
def hsdipl( df, columnsToInclude ):
fi = pd.DataFrame()
columns = df.filter(regex='001E|049E|009E').columns.values
columns = numpy.append(columns, columnsToInclude)
for col in columns:
fi = addKey(df, fi, col)
fi['final'] = ( df.filter(regex='B08101_009E').sum(axis=1)
) / ( df.filter(regex='B08101_001E|B08101_049E').sum(axis=1)
) * 100
return fi
# Cell
#@ title Run This Cell: -lesshs
import pandas as pd
import glob
def lesshs( df, columnsToInclude ):
fi = pd.DataFrame()
columns = df.filter(regex='001E|049E|009E').columns.values
columns = numpy.append(columns, columnsToInclude)
for col in columns:
fi = addKey(df, fi, col)
fi['final'] = ( df.filter(regex='B08101_009E').sum(axis=1)
) / ( df.filter(regex='B08101_001E|B08101_049E').sum(axis=1)
) * 100
return fi
# Cell
#@ title Run This Cell: -male
import pandas as pd
import glob
def male( df, columnsToInclude ):
fi = pd.DataFrame()
columns = df.filter(regex='001E|049E|009E').columns.values
columns = numpy.append(columns, columnsToInclude)
for col in columns:
fi = addKey(df, fi, col)
fi['final'] = ( df.filter(regex='B08101_009E').sum(axis=1)
) / ( df.filter(regex='B08101_001E|B08101_049E').sum(axis=1)
) * 100
return fi
# Cell
# @title Run This Cell : Create MHHI
#File: mhhi.py
#Author: <NAME>
#Date: 1/24/19
#Section: Bnia
#Email: <EMAIL>
#Description:
# Uses ACS Table B19001 - HOUSEHOLD INCOME IN THE PAST 12 MONTHS (IN 2016 INFLATION-ADJUSTED DOLLARS)
# Universe: Households
# Table Creates: hh25 hh40 hh60 hh75 hhm75, mhhi
#purpose: Produce Sustainability - Percent of Population that Walks to Work Indicator
#input:
#output:
import pandas as pd
import glob
def mhhi( df, columnsToInclude = [] ):
info = pd.DataFrame(
[
['B19001_002E', 0, 10000],
['B19001_003E', 10000, 4999 ],
['B19001_004E', 15000, 4999 ],
['B19001_005E', 20000, 4999 ],
['B19001_006E', 25000, 4999 ],
['B19001_007E', 30000, 4999],
['B19001_008E', 35000, 4999 ],
['B19001_009E', 40000, 4999 ],
['B19001_010E', 45000, 4999 ],
['B19001_011E', 50000, 9999 ],
['B19001_012E', 60000, 14999],
['B19001_013E', 75000, 24999 ],
['B19001_014E', 100000, 24999 ],
['B19001_015E', 125000, 24999 ],
['B19001_016E', 150000, 49000 ],
['B19001_017E', 200000, 1000000000000000000000000 ],
],
columns=['variable', 'lower', 'range']
)
# Final Dataframe
data_table = pd.DataFrame()
for index, row in info.iterrows():
data_table = addKey(df, data_table, row['variable'])
# Accumulate totals accross the columns.
# Midpoint: Divide column index 16 (the last column) of the cumulative totals
temp_table = data_table.cumsum(axis=1)
temp_table['midpoint'] = (temp_table.iloc[ : , -1 :] /2) # V3
temp_table['midpoint_index'] = False
temp_table['midpoint_index_value'] = False # Z3
temp_table['midpoint_index_lower'] = False # W3
temp_table['midpoint_index_range'] = False # X3
temp_table['midpoint_index_minus_one_cumulative_sum'] = False #Y3
# step 3 - csa_agg3: get the midpoint index by "when midpoint > agg[1] and midpoint <= agg[2] then 2"
# Get CSA Midpoint Index using the breakpoints in our info table.
for index, row in temp_table.iterrows():
# Get the index of the first column where our midpoint is greater than the columns value.
midpoint = row['midpoint']
midpoint_index = 0
# For each column (except the 6 columns we just created)
# The tracts midpoint was < than the first tracts value at column 'B19001_002E_Total_Less_than_$10,000'
if( midpoint < int(row[0]) or row[-6] == False ):
temp_table.loc[ index, 'midpoint_index' ] = 0
else:
for column in row.iloc[:-6]:
# set midpoint index to the column with the highest value possible that is under midpoint
if( midpoint >= int(column) ):
if midpoint==False: print (str(column) + ' - ' + str(midpoint))
temp_table.loc[ index, 'midpoint_index' ] = midpoint_index +1
midpoint_index += 1
# temp_table = temp_table.drop('Unassigned--Jail')
for index, row in temp_table.iterrows():
temp_table.loc[ index, 'midpoint_index_value' ] = data_table.loc[ index, data_table.columns[row['midpoint_index']] ]
temp_table.loc[ index, 'midpoint_index_lower' ] = info.loc[ row['midpoint_index'] ]['lower']
temp_table.loc[ index, 'midpoint_index_range' ] = info.loc[ row['midpoint_index'] ]['range']
temp_table.loc[ index, 'midpoint_index_minus_one_cumulative_sum'] = row[ row['midpoint_index']-1 ]
# This is our denominator, which cant be negative.
for index, row in temp_table.iterrows():
if row['midpoint_index_value']==False:
temp_table.at[index, 'midpoint_index_value']=1;
#~~~~~~~~~~~~~~~
# Step 3)
# Run the Calculation
# Calculation = (midpoint_lower::numeric + (midpoint_range::numeric * ( (midpoint - midpoint_upto_agg) / nullif(midpoint_total,0)
# Calculation = W3+X3*((V3-Y3)/Z3)
# v3 -> 1 - midpoint of households == sum / 2
# w3 -> 2 - lower limit of the income range containing the midpoint of the housing total == row[lower]
# x3 -> width of the interval containing the medium == row[range]
# z3 -> number of hhs within the interval containing the median == row[total]
# y3 -> 4 - cumulative frequency up to, but no==NOT including the median interval
#~~~~~~~~~~~~~~~
def finalCalc(x):
return ( x['midpoint_index_lower']+ x['midpoint_index_range']*(
( x['midpoint']-x['midpoint_index_minus_one_cumulative_sum'])/ x['midpoint_index_value'] )
)
temp_table['final'] = temp_table.apply(lambda x: finalCalc(x), axis=1)
temp_table[columnsToInclude] = df[columnsToInclude]
return temp_table
# Cell
#@ title Run This Cell: -nilf
import pandas as pd
import glob
def drvalone( df, columnsToInclude ):
fi = pd.DataFrame()
columns = df.filter(regex='001E|049E|009E').columns.values
columns = numpy.append(columns, columnsToInclude)
for col in columns:
fi = addKey(df, fi, col)
fi['final'] = ( df.filter(regex='B08101_009E').sum(axis=1)
) / ( df.filter(regex='B08101_001E|B08101_049E').sum(axis=1)
) * 100
return fi
# Cell
#@title Run This Cell: novhcl
import pandas as pd
import glob
def novhcl( df, columnsToInclude ):
fi = pd.DataFrame()
columns = ['B08201_002E_Total_No_vehicle_available','B08201_001E_Total']
columns = df.filter(regex='002E|003E|004E|005E|001E').columns.values
columns = numpy.append(columns, columnsToInclude)
for col in columns:
print('addKey df',df.columns,'fi',fi.columns,'col: ', col)
fi = addKey(df, fi, col)
fi['final'] = ( df.filter(regex='002E').sum(axis=1)
) / df['B08201_001E_Total:'] * 100
return fi
# Cell
#@title Run This Cell: nohhint
import pandas as pd
import glob
def nohhint( df, columnsToInclude ):
fi = pd.DataFrame()
columns = ['B28011_001E_Total',
'B28011_002E_Total_With_an_Internet_subscription',
'B28011_003E_Total_With_an_Internet_subscription_Dial-up_alone',
'B28011_004E_Total_With_an_Internet_subscription_Broadband_such_as_cable,_fiber_optic,_or_DSL',
'B28011_005E_Total_With_an_Internet_subscription_Satellite_Internet_service',
'B28011_006E_Total_With_an_Internet_subscription_Other_service',
'B28011_007E_Total_Internet_access_without_a_subscription',
'B28011_008E_Total_No_Internet_access']
columns = df.filter(regex='008E|001E').columns.values
columns = numpy.append(columns, columnsToInclude)
for col in columns:
print('addKey df',df.columns,'col: ', col)
fi = addKey(df, fi, col)
print(' ')
# Calculate
fi['nohhint'] = ( df.filter(regex='008E').sum(axis=1)
) / df['B28011_001E_Total:'] * 100
return fi
# Cell
#@ title Run This Cell: -othercom
import pandas as pd
import glob
def othercom( df, columnsToInclude ):
fi = pd.DataFrame()
columns = df.filter(regex='001E|049E|009E').columns.values
columns = numpy.append(columns, columnsToInclude)
for col in columns:
fi = addKey(df, fi, col)
fi['othercom'] = ( df.filter(regex='B08101_009E').sum(axis=1)
) / ( df.filter(regex='B08101_001E|B08101_049E').sum(axis=1)
) * 100
return fi
# Cell
#@title Run This Cell: paa
import pandas as pd
import glob
def paa( df, columnsToInclude ):
fi = pd.DataFrame()
columns = ['B03002_001E_Total:',
'B03002_004E_Total_Not_Hispanic_or_Latino_Black_or_African_American_alone']
columns = df.filter(regex='001E|004E').columns.values
columns = numpy.append(columns, columnsToInclude)
for col in columns:
print('addKey df',df.columns,'fi',fi.columns,'col: ', col)
fi = addKey(df, fi, col)
fi['paa'] = ( df.filter(regex='004E').sum(axis=1)
) / df['B03002_001E_Total:'] * 100
return fi
# Cell
#@title Run This Cell: -p2more
import pandas as pd
import glob
def p2more( df, columnsToInclude ):
fi = pd.DataFrame()
columns = df.filter(regex='001E|049E|009E').columns.values
columns = numpy.append(columns, columnsToInclude)
for col in columns:
fi = addKey(df, fi, col)
fi['final'] = ( df.filter(regex='B08101_009E').sum(axis=1)
) / ( df.filter(regex='B08101_001E|B08101_049E').sum(axis=1)
) * 100
return fi
# Cell
#@title Run This Cell: -pasi ***
import pandas as pd
import glob
def pasi( df, columnsToInclude ):
fi = pd.DataFrame()
columns = df.filter(regex='001E|049E|009E').columns.values
columns = numpy.append(columns, columnsToInclude)
for col in columns:
fi = addKey(df, fi, col)
fi['final'] = ( df.filter(regex='006E').sum(axis=1)
) / ( df.filter(regex='B08101_001E|B08101_049E').sum(axis=1)
) * 100
return fi
# Cell
#@title Run This Cell: -pubtran
import pandas as pd
import glob
def pubtran( df, columnsToInclude ):
fi = pd.DataFrame()
columns = df.filter(regex='025E|001E|049E').columns.values
columns = numpy.append(columns, columnsToInclude)
for col in columns:
fi = addKey(df, fi, col)
fi['pubtran'] = ( df.filter(regex='025E').sum(axis=1)
) / ( df.filter(regex='B08101_001E|B08101_049E').sum(axis=1)
) * 100
return fi
# Cell
#@title Run This Cell: pwhite
import pandas as pd
import glob
def pwhite( df, columnsToInclude ):
fi = | pd.DataFrame() | pandas.DataFrame |
import importlib
from hydroDL.master import basins
from hydroDL.app import waterQuality
from hydroDL import kPath, utils
from hydroDL.model import trainTS
from hydroDL.data import gageII, usgs
from hydroDL.post import axplot, figplot
import torch
import os
import json
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import time
fileSiteNo = os.path.join(kPath.dirData, 'USGS', 'inventory', 'siteNoLst-1979')
siteNoLst = pd.read_csv(fileSiteNo, header=None, dtype=str)[0].tolist()
df = pd.DataFrame(index=siteNoLst, columns=usgs.newC)
df.index.name = 'siteNo'
dfCorr = df.copy()
dfRmse = df.copy()
dirWRTDS = os.path.join(kPath.dirWQ, 'modelStat', 'WRTDS-D', 'All')
dirOut = os.path.join(dirWRTDS, 'output')
dirPar = os.path.join(dirWRTDS, 'params')
t0 = time.time()
for kk, siteNo in enumerate(siteNoLst):
print('{}/{} {:.2f}'.format(
kk, len(siteNoLst), time.time()-t0))
saveFile = os.path.join(dirOut, siteNo)
dfP = | pd.read_csv(saveFile, index_col=None) | pandas.read_csv |
import numpy as np
import pandas as pd
from pytest import approx
from evidently.analyzers.stattests import z_stat_test
from evidently.analyzers.stattests.chisquare_stattest import chi_stat_test
def test_freq_obs_eq_freq_exp() -> None:
# observed and expected frequencies is the same
reference = pd.Series([1, 2, 3, 4, 5, 6]).repeat([16, 18, 16, 14, 12, 12])
current = | pd.Series([1, 2, 3, 4, 5, 6]) | pandas.Series |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Generates the data for Fig 2C-D.
The sex plot data are saved in the files mort_sexo_ICU.csv and
mort_sexo_HOSP.csv, while the vaccination data is saved in the files
mort_vacina_ICU.csv and mort_vacina_HOSP.csv.
Column descriptions (X=male, female, vac, unvac):
age_grp = age group
mean_age_X = mean value of the age inside this age group with the condition X
mort_X = mortality in this age group with the condition X
"""
import numpy as np
import datetime
import pandas as pd
import matplotlib.pyplot as plt
ref = datetime.date(2019, 12, 31)
data_init = pd.read_csv('../Data/SRAG_filtered_morb.csv')
ages = [0, 18, 30, 40, 50, 65, 75, 85, np.inf]
nsep = len(ages) - 1
data_init['AGE_GRP'] = ''
for i in range(nsep):
if i == nsep-1:
data_init.loc[(data_init.NU_IDADE_N>=ages[i]),'AGE_GRP'] = 'AG85+'
else:
data_init.loc[(data_init.NU_IDADE_N>=ages[i])&(data_init.NU_IDADE_N<ages[i+1]), 'AGE_GRP'] = 'AG{}t{}'.format(ages[i],ages[i+1])
data_init = data_init[data_init.AGE_GRP != '']
data_init['VACC'] = (data_init.VACINA_COV==1)
names = ['all', 'ICU', 'HOSP']
datas = [data_init, data_init[~pd.isna(data_init.DT_ENTUTI)], \
data_init[ | pd.isna(data_init.DT_ENTUTI) | pandas.isna |
# write some code using unittest to test our assigment function
import unittest
from my_lambdata.assignment import add_names_column
from pandas import DataFrame
df=DataFrame({"abbrev":["EA","VA","MA","HA","EAY","HAY"]})
class TestMyAssignment(unittest.TestCase):
def test_assignment(self):
df= | DataFrame({"abbrev":["EA","VA","MA","HA","EAY","HAY"]}) | pandas.DataFrame |
### RF TRAINING AND EVALUATION FOR MULTICLASS CLINICAL OUTCOMES ###
# The script is divided in 4 parts:
# 1. Data formatting
# 2. Hyperparameter Tuning (HT_results)
# 3. Model training and cross validation (CV_results)
# 4. Model training and predictions (TEST_results)
## Intended to be run with arguments:
# bsub "python RF_training_mc_outcome.py Day1 D1_raw_data D1_no_pda D1_no_pda_MC_OUTCOME"
###############################################
##### 1. DATA PREPARATION AND ASSESSMENT #####
###############################################
import os
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
import sys, getopt
# TO PASS THE ARGUMENTS:
day = sys.argv[1]
data_type = sys.argv[2]
demog = sys.argv[3]
outcome = sys.argv[4]
# Example:
# day = 'Day1'
# data_type = 'D1_raw_data'
# demog = 'D1_pda'
# outcome = 'D1_pda_MC_OUTCOME'
# RESULTS PATHS:
# results_root = results_root_path
# assessment_path = results_root+'assessment/'
# ht_path = results_root+'HT_results/'
# cv_path = results_root+'CV_results/'
# test_path = results_root+'TEST_results/'
# TEST_features_importance = results_root+'/TEST_features_importance/'
# TEST_trained_models = results_root+'/TEST_trained_models/'
# TO READ THE INPUT DATA (The datasets have been previously created to include only the relevant variables)
# root_path = root_path
file_name = 'file_name.txt'
TTdata = root_path + file_name
df = pd.read_table(TTdata)
df = df.set_index('AE')
input_variables = list(df.columns)
with open(assessment_path+'input_data_column_names.txt', "w") as output:
output.write(str(input_variables))
# DATA PROCESSING: Features and Targets and Convert Data to Arrays
# Outcome (or labels) are the values we want to predict
outcome = pd.DataFrame(df[['id', 'da', 'pdr', 'pdrm', 'pdm']])
descriptors = df.drop(['id', 'da', 'pdr', 'pdrm', 'pdm'], axis = 1)
descriptors_list = list(descriptors.columns)
with open(assessment_path+'input_data_features.txt', "w") as output:
output.write(str(descriptors_list))
# TRAINING/VALIDATION (TV, for hyperparameter tuning) and TEST (Tt, for model evaluation) Sets:
# Split the data into training and testing sets:
TV_features_df, Tt_features_df, TV_outcome_df, Tt_outcome_df = train_test_split(descriptors, outcome,
test_size = 0.30, random_state = 11,
stratify=outcome) # Important to keep the % of classes similar in TV and Tt
# To transform to numpy arrays without index:
TV_features = np.array(TV_features_df)
Tt_features = np.array(Tt_features_df)
TV_outcome = np.array(TV_outcome_df[['id', 'da', 'pdr', 'pdrm', 'pdm']])
Tt_outcome = np.array(Tt_outcome_df[['id', 'da', 'pdr', 'pdrm', 'pdm']])
# Percentage of indviduals in each class:
TV_class_frac = TV_outcome_df.sum(axis = 0, skipna = True)*100/len(TV_outcome)
Tt_class_frac = Tt_outcome_df.sum(axis = 0, skipna = True)*100/len(Tt_outcome)
# Save it:
fractions = pd.DataFrame(columns=['id', 'da', 'pdr', 'pdrm', 'pdm'])
fractions = fractions.append(TV_class_frac,ignore_index=True)
fractions = fractions.append(Tt_class_frac,ignore_index=True)
fractions = fractions.set_index([pd.Index(['TV', 'Test'])])
fractions.to_csv(assessment_path+'perc_class_split.csv', index=True)
target_names = ['id', 'da', 'pdr', 'pdrm', 'pdm']
target_names2 = ['ID', 'DA', 'PDR', 'PDRM', 'PDM']
n_classes = len(target_names)
# To transform the binary multiclass represenation in a one-column reprepresentation to save predictions results in step 4.
def f(row):
if row['id'] == [1]:
val = 'ID'
elif row['da'] == [1]:
val = 'DA'
elif row['pdr'] == [1]:
val = 'PDR'
elif row['pdrm'] == [1]:
val = 'PDRM'
elif row['pdm'] == [1]:
val = 'PDM'
else:
val = '0'
return val
Tt_outcome_df['real'] = Tt_outcome_df.apply(f, axis=1)
Tt_outcome_real = Tt_outcome_df.drop(target_names, axis = 1)
print('All done for 1. DATA PREPARATION AND ASSESSMENT')
###############################################
##### 2.HYPERPARAMETER TUNING ###########
###############################################
from sklearn.model_selection import RandomizedSearchCV
from sklearn.multiclass import OneVsRestClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn import metrics
from sklearn.metrics import roc_auc_score, make_scorer
# 4. Hyperparameter tunning: Stratified CV with Random Grid
# Number of trees in random forest
n_estimators = [int(x) for x in np.linspace(start = 200, stop = 2000, num = 10)]
# Number of features to consider at every split
max_features = ['auto', 'log2'] # auto = sqrt
# Maximum number of levels in tree
max_depth = [int(x) for x in np.linspace(10, 110, num = 11)]
max_depth.append(None)
# Minimum number of samples required to split a node
min_samples_split = [2, 5, 10]
# Minimum number of samples required at each leaf node
min_samples_leaf = [1, 2, 4]
# Method of selecting samples for training each tree
bootstrap = [True, False]
# Create the random grid
random_grid = {
'estimator__n_estimators': n_estimators,
'estimator__max_features': max_features,
'estimator__max_depth': max_depth,
'estimator__min_samples_split': min_samples_split,
'estimator__min_samples_leaf': min_samples_leaf,
'estimator__bootstrap': bootstrap
}
# print(random_grid)
# Use the random grid to search for best hyperparameters
# First create the base model to tune
model_to_set = OneVsRestClassifier(RandomForestClassifier(class_weight='balanced'))
# Random search of parameters, using 5 fold cross validation,
# search across 100 different combinations, and use all available cores
# The score chosen to select the best combination of parameters is weghted roc_auc with a one vs rest approach
scoring = make_scorer(roc_auc_score, multi_class="ovr",average="weighted")
rf_random = RandomizedSearchCV(estimator = model_to_set, param_distributions = random_grid,
# scoring = 'roc_auc', # With very unbalanced datasets, using accuracy as scoring metric to choose the best combination of parameters will not be the best strategy. Use ROC Area instead
scoring = scoring,
return_train_score=True,
#n_iter = 100, cv = 5,
n_iter = 100, cv = 5,
verbose=2,
random_state=42,
n_jobs = -1)
# Note: For integer/None inputs, if the estimator is a classifier and y is either binary or multiclass, StratifiedKFold is used. In all other cases, KFold is used.
# Fit the random search model
rf_random.fit(TV_features, TV_outcome)
## To see the best parameters and results:
# rf_random.best_params_
# rf_random.cv_results_
# rf_random.best_score_
# rf_random.best_index_
# rf_random.scorer_
# To save the csv with the following details:
# Hyperparameter tuning results:
results = pd.DataFrame.from_dict(rf_random.cv_results_)
results.to_csv(os.path.join(ht_path,'HT_results.csv'))
# Hyperparameter tuning best parameters and best score (roc auc)
best_parameters = | pd.DataFrame(rf_random.best_params_, index=[0]) | pandas.DataFrame |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.dates as md
import datetime
import seaborn as sns
from scipy.stats import pearsonr
from matplotlib import cm as cm
from statsmodels.tsa.stattools import adfuller
import calendar
import warnings
import itertools
import statsmodels.api as sm
from statsmodels.tsa.seasonal import seasonal_decompose
import data_plot as dp
import arima_model_fit as ar
#import prophet
#from fbprophet import Prophet
###########Loading the data into dataframes############################################################################################################################
# From Jan to July
y = 2015
new_data = pd.DataFrame()
sample_times = []
for y in range(2014,2015,1):
print (y)
for m in range(1,13,1):
no_of_days = calendar.monthrange(2014,m)[1]
for d in range (1,no_of_days+1,1):
# data = pd.read_csv("C:\\Users\\ahilan\\Dropbox\\Research\\Solar Forecast\\Solar Asia 2018\\Data\\Year %d\\D120318_%d%02d%02d_0000.csv"%(y,y,m, d));
# data = pd.read_csv("C:\\Users\\kahil\\Documents\\Dropbox\\Dropbox\\Research\\Solar Forecast\\Solar Asia 2018\\Data\\Year %d\\D120318_%d%02d%02d_0000.csv"%(y,y,m, d));
if (pd.to_datetime(data['Date/time'][2]) - | pd.to_datetime(data['Date/time'][1]) | pandas.to_datetime |
#%%
import numpy as np
import pandas as pd
import futileprot.viz
import futileprot.fitderiv
import altair as alt
import tqdm
import altair_saver
import scipy.stats
import scipy.signal
colors, palette = futileprot.viz.altair_style()
# Add metadata
DATE = '2021-09-13'
RUN_NO = 1
STRAINS = 'DoubleKO'
MEDIUM = 'glucose-acetate'
med1, med2 = MEDIUM.split('-')
# Load the measurement data
data = | pd.read_csv(f'./output/{DATE}_r{RUN_NO}_{STRAINS}_{MEDIUM}_labeled_regions.csv') | pandas.read_csv |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Feb 19 17:54:32 2019
@author: kazuki.onodera
"""
import numpy as np
import pandas as pd
from tqdm import tqdm
from sklearn.externals import joblib
import os, gc
from itertools import combinations
import utils
utils.start(__file__)
PREF = 'f008'
alpha = 0.5
min_samples_leaf = 10
smooth_coeff = 1.0
impute = True
comb_range = (1600, 2000)
# Encodingの対象とするcategorical
categorical_wo_version = [
'Census_OSUILocaleIdentifier',
'AVProductsInstalled',
'Census_FirmwareVersionIdentifier',
'Wdft_IsGamer',
'Census_ThresholdOptIn',
'RtpStateBitfield',
'Census_IsSecureBootEnabled',
'AVProductsEnabled',
'HasTpm',
'IsProtected',
'Census_PrimaryDiskTypeName',
'PuaMode',
'DefaultBrowsersIdentifier',
'IsSxsPassiveMode',
'OrganizationIdentifier',
'Census_IsAlwaysOnAlwaysConnectedCapable',
'ProductName',
'GeoNameIdentifier',
'Census_IsVirtualDevice',
'Census_PowerPlatformRoleName',
'Census_IsTouchEnabled',
'Census_OSSkuName',
'OsPlatformSubRelease',
'Census_FlightRing',
'Census_OSEdition',
'Census_IsPortableOperatingSystem',
'Firewall',
'OsBuildLab',
'Census_DeviceFamily',
'Census_IsPenCapable',
'SMode',
'Platform',
'Census_IsFlightingInternal',
'Census_OEMNameIdentifier',
'Census_InternalBatteryType',
'OsBuild',
'Census_HasOpticalDiskDrive',
'Census_IsWIMBootEnabled',
'Census_OSBuildRevision',
'CityIdentifier',
'IeVerIdentifier',
'Census_ProcessorClass',
'OsSuite',
'Census_IsFlightsDisabled',
'Census_ChassisTypeName',
'LocaleEnglishNameIdentifier',
'Census_OSArchitecture',
'CountryIdentifier',
'Census_OSInstallLanguageIdentifier',
'Census_OSInstallTypeName',
'Census_OSBuildNumber',
'AutoSampleOptIn',
'OsVer',
'SkuEdition',
'UacLuaenable',
'Census_OEMModelIdentifier',
'Census_OSBranch',
'Processor',
'Census_ProcessorModelIdentifier',
'Census_ActivationChannel',
'IsBeta',
'Census_MDC2FormFactor',
'Census_OSWUAutoUpdateOptionsName',
'AVProductStatesIdentifier',
'Census_GenuineStateName',
'Census_FirmwareManufacturerIdentifier',
'Wdft_RegionIdentifier',
'Census_ProcessorManufacturerIdentifier',
'OsBuildLab_major',
'OsBuildLab_minor',
'OsBuildLab_build',
'OsBuildLab_architecture',
]
X_train = pd.read_feather('../data/train.f')[categorical_wo_version]
X_test = | pd.read_feather('../data/test.f') | pandas.read_feather |
#!/usr/bin/env python
import logging
from pathlib import Path
from typing import Set, List, Dict
import numpy as np
import pandas as pd
import typer
from rich.logging import RichHandler
def main(nextclade_csv: Path, metadata_output: Path):
from rich.traceback import install
install(show_locals=True, width=120, word_wrap=True)
logging.basicConfig(
format="%(message)s",
datefmt="[%Y-%m-%d %X]",
level=logging.INFO,
handlers=[RichHandler(rich_tracebacks=True, tracebacks_show_locals=True)],
)
df_nextclade = pd.read_table(nextclade_csv, sep=';')
# remove surrounding brackets, split on ',' to get Series of list of str AA mutations
aasubs: pd.Series = df_nextclade['aaSubstitutions'].str.replace(r'^(\(|\))$', '', regex=True).str.split(',')
sample_aas = sample_to_aa_mutations(aasubs, df_nextclade['seqName'])
samples = list(sample_aas.keys())
unique_aas = get_sorted_aa_mutations(sample_aas)
arr_aas = fill_aa_mutation_matrix(sample_aas, samples, unique_aas)
dfaa = | pd.DataFrame(arr_aas, index=samples, columns=unique_aas) | pandas.DataFrame |
# test_ForecasterAutoreg.py
import pytest
from pytest import approx
import numpy as np
import pandas as pd
from skforecast import __version__
from skforecast.ForecasterAutoreg import ForecasterAutoreg
from sklearn.linear_model import LinearRegression
def test_init_lags():
'''
Check creation of self.lags attribute when initialize.
'''
forecaster = ForecasterAutoreg(LinearRegression(), lags=10)
assert (forecaster.lags == np.arange(10) + 1).all()
forecaster = ForecasterAutoreg(LinearRegression(), lags=[1, 2, 3])
assert (forecaster.lags == np.array([1, 2, 3])).all()
forecaster = ForecasterAutoreg(LinearRegression(), lags=range(1, 4))
assert (forecaster.lags == np.array(range(1, 4))).all()
forecaster = ForecasterAutoreg(LinearRegression(), lags=np.arange(1, 10))
assert (forecaster.lags == np.arange(1, 10)).all()
def test_init_lags_exceptions():
'''
Check exceptions when initialize lags.
'''
with pytest.raises(Exception):
ForecasterAutoreg(LinearRegression(), lags=-10)
with pytest.raises(Exception):
ForecasterAutoreg(LinearRegression(), lags=range(0, 4))
with pytest.raises(Exception):
ForecasterAutoreg(LinearRegression(), lags=np.arange(0, 4))
with pytest.raises(Exception):
ForecasterAutoreg(LinearRegression(), lags=[0, 1, 2])
def test_create_lags():
'''
Check matrix of lags is created properly
'''
forecaster = ForecasterAutoreg(LinearRegression(), lags=3)
results = forecaster.create_lags(y=np.arange(10))
correct = (np.array([[2., 1., 0.],
[3., 2., 1.],
[4., 3., 2.],
[5., 4., 3.],
[6., 5., 4.],
[7., 6., 5.],
[8., 7., 6.]]),
np.array([3., 4., 5., 6., 7., 8., 9.]))
assert (results[0] == correct[0]).all()
assert (results[1] == correct[1]).all()
def test_create_lags_exceptions():
'''
Check exceptions when creating lags.
'''
with pytest.raises(Exception):
forecaster = ForecasterAutoreg(LinearRegression(), lags=10)
forecaster.create_lags(y=np.arange(5))
def test_fit_exceptions():
'''
Check exceptions during fit.
'''
forecaster = ForecasterAutoreg(LinearRegression(), lags=5)
with pytest.raises(Exception):
forecaster.fit(y=np.arange(50), exog=np.arange(10))
with pytest.raises(Exception):
forecaster.fit(y=np.arange(50), exog=pd.Series(np.arange(10)))
def test_fit_last_window():
'''
Check last window stored during fit.
'''
forecaster = ForecasterAutoreg(LinearRegression(), lags=3)
forecaster.fit(y=np.arange(50))
assert (forecaster.last_window == np.array([47, 48, 49])).all()
def test_predict_exceptions():
'''
Check exceptions when predict.
'''
forecaster = ForecasterAutoreg(LinearRegression(), lags=3)
with pytest.raises(Exception):
forecaster.fit(y=np.arange(50))
forecaster.predict(steps=10, exog=np.arange(10))
with pytest.raises(Exception):
forecaster.fit(y=np.arange(50), exog=np.arange(50))
forecaster.predict(steps=10)
with pytest.raises(Exception):
forecaster.fit(y=np.arange(50), exog=np.arange(50))
forecaster.predict(steps=10, exog=np.arange(5))
with pytest.raises(Exception):
forecaster.fit(y=np.arange(50), exog=np.arange(50))
forecaster.predict(steps=10, exog=np.arange(5))
with pytest.raises(Exception):
forecaster.fit(y=np.arange(50))
forecaster.predict(steps=10, last_window=[1,2,3])
with pytest.raises(Exception):
forecaster.fit(y=np.arange(50))
forecaster.predict(steps=10, last_window= | pd.Series([1, 2]) | pandas.Series |
# 利用ARMA进行时间序列预测
import pandas as pd
import statsmodels.api as sm
import matplotlib.pyplot as plt
from statsmodels.graphics.api import qqplot
from statsmodels.tsa.arima_model import ARMA
# 创建数据
data = [5922, 5308, 5546, 5975, 2704, 1767, 4111, 5542, 4726, 5866, 6183, 3199, 1471, 1325, 6618, 6644, 5337, 7064,
2912, 1456, 4705, 4579, 4990, 4331, 4481, 1813, 1258, 4383, 5451, 5169, 5362, 6259, 3743, 2268, 5397, 5821,
6115, 6631, 6474, 4134, 2728, 5753, 7130, 7860, 6991, 7499, 5301, 2808, 6755, 6658, 7644, 6472, 8680, 6366,
5252, 8223, 8181, 10548, 11823, 14640, 9873, 6613, 14415, 13204, 14982, 9690, 10693, 8276, 4519, 7865, 8137,
10022, 7646, 8749, 5246, 4736, 9705, 7501, 9587, 10078, 9732, 6986, 4385, 8451, 9815, 10894, 10287, 9666, 6072,
5418]
data = | pd.Series(data) | pandas.Series |
#!/usr/bin/env python
### Up to date as of 10/2019 ###
'''Section 0: Import python libraries
This code has a number of dependencies, listed below.
They can be installed using the virtual environment "slab23"
that is setup using script 'library/setup3env.sh'.
Additional functions are housed in file 'slab2functions.py'
and imported below.
There are some additional dependencies used by the function file
that do not need to be installed separately.
'''
# stdlib imports
from datetime import datetime
import os.path
import argparse
import numpy as np
from pandas import DataFrame
import pandas as pd
import warnings
import slab2functions as s2f
import math
import mapio.gmt as gmt
from functools import partial
from multiprocess import Pool
import loops as loops
from scipy import ndimage
import psutil
import cProfile
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
def main(args):
'''Section 1: Setup
In this section:
(1) Identify necessary input files
(2) Load parameters from '[slab]input.par'
(3) Define optional boxes for PDF/print testing
(4) Define output file names
(5) Gathering optional arguments, setting defaults
(6) Define search ellipsoid parameters
(7) Define Average active source profiles
(8) Define reference model (Slab1.0 and/or slab guides)
(9) Define Trench Locations
(10) Open and modify input dataset
(11) Calculate seismogenic zone thickness
(12) Record variable parameters used for this model
(13) Define search grid
(14) Identify tomography datasets
(15) Initialize arrays for Section 2 '''
print('Start Section 1 of 7: Setup')
print(' Loading inputs...')
''' ------ (1) Identify necessary input files ------ '''
trenches = 'library/misc/trenches_usgs_2017_depths.csv'
agesFile = 'library/misc/interp_age.3.2g.nc'
ageerrorsFile = 'library/misc/interp_ageerror.3.2g.nc'
polygonFile = 'library/misc/slab_polygons.txt'
addFile = 'library/misc/addagain.csv'
parFile = args.parFile
pd.options.mode.chained_assignment = None
warnings.filterwarnings("ignore", message="invalid value encountered in less")
warnings.filterwarnings("ignore", message="invalid value encountered in true_divide")
warnings.filterwarnings("ignore", message="invalid value encountered in greater")
warnings.filterwarnings("ignore", message="invalid value encountered in double_scalars")
''' ------ (2) Load parameters from '[slab]input.par' ------'''
for line in open(parFile):
plist = line.split()
if len(plist)>2:
if plist[0] == 'inFile':
inFile = plist[2]
if plist[0] == 'use_box':
use_box = plist[2]
if plist[0] == 'latmin':
latmin = np.float64(plist[2])
if plist[0] == 'latmax':
latmax = np.float64(plist[2])
if plist[0] == 'lonmin':
lonmin = np.float64(plist[2])
if plist[0] == 'lonmax':
lonmax = np.float64(plist[2])
if plist[0] == 'slab':
slab = plist[2]
if plist[0] == 'grid':
grid = np.float64(plist[2])
if plist[0] == 'radius1':
radius1 = np.float64(plist[2])
if plist[0] == 'radius2':
radius2 = np.float64(plist[2])
if plist[0] == 'sdr':
sdr = np.float64(plist[2])
if plist[0] == 'ddr':
ddr = np.float64(plist[2])
if plist[0] == 'taper':
taper = np.float64(plist[2])
if plist[0] == 'T':
T = np.float64(plist[2])
if plist[0] == 'node':
node = np.float64(plist[2])
if plist[0] == 'filt':
filt = np.float64(plist[2])
if plist[0] == 'maxdist':
maxdist = np.float64(plist[2])
if plist[0] == 'minunc':
minunc = np.float64(plist[2])
if plist[0] == 'mindip':
mindip = np.float64(plist[2])
if plist[0] == 'minstk':
minstk = np.float64(plist[2])
if plist[0] == 'maxthickness':
maxthickness = np.float64(plist[2])
if plist[0] == 'seismo_thick':
seismo_thick = np.float64(plist[2])
if plist[0] == 'dipthresh':
dipthresh = np.float64(plist[2])
if plist[0] == 'fracS':
fracS = np.float64(plist[2])
if plist[0] == 'kdeg':
kdeg = np.float64(plist[2])
if plist[0] == 'knot_no':
knot_no = np.float64(plist[2])
if plist[0] == 'rbfs':
rbfs = np.float64(plist[2])
# loop through to find latest slab input file if specified
polyname = slab
if slab == 'kur' or slab == 'izu':
polyname = 'jap'
if inFile == 'latest':
yearmax = 0
monthmax = 0
for filename in os.listdir('Input'):
if filename.endswith('.csv'):
try:
slabname,datei,instring = filename.split('_')
except:
continue
if slabname == polyname and instring == 'input.csv':
try:
monthi, yeari = datei.split('-')
except:
continue
yeari = int(yeari)
monthi = int(monthi)
if yeari >= yearmax:
yearmax = yeari
inFile = 'Input/%s'%filename
if monthi > monthmax:
monthmax = monthi
inFile = 'Input/%s'%filename
print (' using input file: %s'%inFile)
if slab == 'mue' or slab == 'phi' or slab == 'cot' or slab == 'sul' or slab == 'ryu':
if args.undergrid is None:
if slab == 'mue':
print ('This slab is truncated by the Caribbean (car) slab, argument -u cardepgrid is required')
print ('Exiting .... ')
exit()
if slab == 'cot':
print ('This slab is truncated by the Halmahera (hal) slab, argument -u haldepgrid is required')
print ('Exiting .... ')
exit()
if slab == 'sul':
print ('This slab is truncated by the Halmahera (hal) slab, argument -u haldepgrid is required')
print ('Exiting .... ')
exit()
if slab == 'phi':
print ('This slab is truncated by the Halmahera (hal) slab, argument -u haldepgrid is required')
print ('Exiting .... ')
exit()
if slab == 'ryu':
print ('This slab is truncated by the Kurils-Japan (kur) slab, argument -u kurdepgrid is required')
print ('Exiting .... ')
exit()
else:
undergrid = args.undergrid
''' ------ (4) Define output file names ------ '''
date = datetime.today().strftime('%m.%d.%y')
now = datetime.now()
time = '%s.%s' % (now.hour, now.minute)
folder = '%s_slab2_%s' % (slab, date)
os.system('mkdir Output/%s'%folder)
outFile = 'Output/%s/%s_slab2_res_%s.csv' % (folder, slab, date)
dataFile = 'Output/%s/%s_slab2_dat_%s.csv' % (folder, slab, date)
nodeFile = 'Output/%s/%s_slab2_nod_%s.csv' % (folder, slab, date)
fillFile = 'Output/%s/%s_slab2_fil_%s.csv' % (folder, slab, date)
rempFile = 'Output/%s/%s_slab2_rem_%s.csv' % (folder, slab, date)
clipFile = 'Output/%s/%s_slab2_clp_%s.csv' % (folder, slab, date)
these_params = 'Output/%s/%s_slab2_par_%s.csv' % (folder, slab, date)
datainfo = 'Output/%s/%s_slab2_din_%s.csv' % (folder, slab, date)
nodeinfo = 'Output/%s/%s_slab2_nin_%s.csv' % (folder, slab, date)
suppFile = 'Output/%s/%s_slab2_sup_%s.csv' % (folder, slab, date)
nodexFile = 'Output/%s/%s_slab2_nox_%s.csv' % (folder, slab, date)
nodeuFile = 'Output/%s/%s_slab2_nou_%s.csv' % (folder, slab, date)
depTextFile = 'Output/%s/%s_slab2_dep_%s.txt' % (folder, slab, date)
depGridFile = 'Output/%s/%s_slab2_dep_%s.grd' % (folder, slab, date)
strTextFile = 'Output/%s/%s_slab2_str_%s.txt' % (folder, slab, date)
strGridFile = 'Output/%s/%s_slab2_str_%s.grd' % (folder, slab, date)
dipTextFile = 'Output/%s/%s_slab2_dip_%s.txt' % (folder, slab, date)
dipGridFile = 'Output/%s/%s_slab2_dip_%s.grd' % (folder, slab, date)
uncTextFile = 'Output/%s/%s_slab2_unc_%s.txt' % (folder, slab, date)
uncGridFile = 'Output/%s/%s_slab2_unc_%s.grd' % (folder, slab, date)
thickTextFile = 'Output/%s/%s_slab2_thk_%s.txt' % (folder, slab, date)
thickGridFile = 'Output/%s/%s_slab2_thk_%s.grd' % (folder, slab, date)
savedir = 'Output/%s'%folder
''' ------ (3) Define optional boxes for PDF/print testing ------'''
if args.test is not None:
testlonmin = args.test[0]
testlonmax = args.test[1]
testlatmin = args.test[2]
testlatmax = args.test[3]
if testlonmin < 0:
testlonmin += 360
if testlonmax < 0:
testlonmax += 360
testarea = [testlonmin, testlonmax, testlatmin, testlatmax]
printtest = True
os.system('mkdir Output/PDF%s' % (slab))
os.system('mkdir Output/multitest_%s' % (slab))
f = open(datainfo, 'w+')
f.write('dataID, nodeID, used_or_where_filtered')
f.write('\n')
f.close()
f = open(datainfo, 'w+')
f.write('nodeID, len(df), status, details')
f.write('\n')
f.close()
else:
# an area not in range of any slab polygon
testarea = [220, 230, 15, 20]
printtest = False
''' --- (5) Gathering optional arguments, setting defaults ---'''
if use_box == 'yes':
check = 1
slab = s2f.rectangleIntersectsPolygon(lonmin, lonmax, latmin,
latmax, polygonFile)
if isinstance(slab, str):
slab = slab
else:
try:
slab = slab[0]
except:
print('System exit because box does not intersect slab polygon')
raise SystemExit()
elif use_box == 'no':
check = 0
lon1, lon2, lat1, lat2 = s2f.determine_polygon_extrema(slab,
polygonFile)
lonmin = float(lon1)
lonmax = float(lon2)
latmin = float(lat1)
latmax = float(lat2)
else:
print('use_box in slab2input.par must be "yes" or "no"')
raise SystemExit()
''' ------ (6) Define search ellipsoid parameters ------'''
alen = radius1
blen = radius2
ec = math.sqrt(1-((math.pow(blen, 2))/(math.pow(alen, 2))))
mdist = alen * ec
''' ------ (7) Define Average active source profiles ------'''
# Different because alu is variable E/W
if slab == 'alu':
AA_data = pd.read_csv('library/avprofiles/alu_av5.csv')
global_average = False
elif slab == 'him':
AA_data = pd.read_csv('library/avprofiles/him_av.csv')
global_average = False
elif slab == 'kur' or slab == 'izu':
AA_source = 'library/avprofiles/%s_av.txt' % 'jap'
AA_data = pd.read_table(AA_source, delim_whitespace=True,\
header=None, names=['dist', 'depth'])
AA_data = AA_data[AA_data.dist < 125]
global_average = False
# Use RF data like AA data to constrain flat slab in Mexico
elif slab == 'cam':
AA_source = 'library/avprofiles/%s_av.txt' % slab
AA_data = pd.read_table(AA_source, delim_whitespace=True,\
header=None, names=['dist', 'depth'])
RF_data = pd.read_csv('library/avprofiles/cam_RF_av.csv')
AA_data = pd.concat([AA_data,RF_data],sort=True)
global_average = False
else:
global_average = False
# See if there is a averace active source profile for this slab
try:
AA_source = 'library/avprofiles/%s_av.txt' % slab
AA_data = pd.read_table(AA_source, delim_whitespace=True,\
header=None, names=['dist', 'depth'])
# If there is no profile for this slab, use the global profile
except:
AA_global = pd.read_csv('library/avprofiles/global_as_av2.csv')
AA_data = AA_global[['dist', 'depth']]
global_average = True
if slab == 'phi' or slab == 'mue':
AA_data = AA_data[AA_data.dist < 10]
if slab == 'cot':
AA_data = AA_data[AA_data.dist < 10]
if slab == 'ita' or slab == 'puy':
AA_data = AA_data[AA_data.dist < 1]
''' ------ (8) Define reference model (Slab1.0 and/or slab guides) ------'''
polyname = slab
if slab == 'kur' or slab == 'izu':
polyname = 'jap'
# Search for slab guides in library/slabguides
slabguide = None
slabguide2 = None
for SGfile in os.listdir('library/slabguides'):
if SGfile[0:3] == polyname:
SGfile1 = SGfile
slabguide = gmt.GMTGrid.load('library/slabguides/%s'%SGfile1)
# Find secondary slab guide for regions where there are two
if polyname == 'sum' or polyname == 'man' or polyname == 'phi' or polyname =='sam' or polyname == 'sco' or polyname == 'mak' or polyname == 'jap':
for f in os.listdir('library/slabguides'):
if f[0:3] == polyname and f != SGfile:
print ('f',f)
SGfile2 = f
slabguide2 = gmt.GMTGrid.load('library/slabguides/%s'%SGfile2)
break
break
# Get Slab1.0 grid where applicable
try:
depgrid = s2f.get_grid(slab, 'depth')
except:
print (' Slab1.0 does not exist in this region, using slab guide')
depgrid = gmt.GMTGrid.load('library/slabguides/%s'%SGfile1)
slabguide = None
# Calculate strike and dip grids
strgrid, dipgrid = s2f.mkSDgrd(depgrid)
slab1data = s2f.mkSlabData(depgrid, strgrid, dipgrid, printtest)
slab1data.to_csv('gradtest.csv',header=True,index=False)
# Add slab guide data to Slab1.0 grids where necessary
if slabguide is not None:
print ('slab guide for this model:',slabguide)
guidestr, guidedip = s2f.mkSDgrd(slabguide)
guidedata = s2f.mkSlabData(slabguide, guidestr, guidedip, printtest)
if SGfile1 == 'phi_SG_north':
guidedata = guidedata[guidedata.lat>14]
elif slab == 'ryu':
guidedata = guidedata[guidedata.lon>137]
slab1data = slab1data[slab1data.lat<=137]
slab1data = pd.concat([slab1data, guidedata],sort=True)
slab1data = slab1data.reset_index(drop=True)
if slabguide2 is not None:
print ('secondary slab guide for this model:',slabguide2)
guidestr, guidedip = s2f.mkSDgrd(slabguide2)
guidedata = s2f.mkSlabData(slabguide2, guidestr, guidedip, printtest)
if SGfile2 == 'phi_SG_north':
guidedata = guidedata[guidedata.lat>14]
slab1data = pd.concat([slab1data, guidedata],sort=True)
slab1data = slab1data.reset_index(drop=True)
#slab1data.to_csv('slab1data.csv',header=True,index=False)
''' ------ (9) Define Trench Locations ------'''
TR_data = pd.read_csv(trenches)
if slab == 'izu' or slab == 'kur':
TR_data = TR_data[TR_data.slab == 'jap']
else:
TR_data = TR_data[TR_data.slab == slab]
TR_data = TR_data.reset_index(drop=True)
TR_data.loc[TR_data.lon < 0, 'lon']+=360
''' ------ (10) Open and modify input dataset ------'''
eventlistALL = pd.read_table('%s' % inFile, sep=',', dtype={
'lon': np.float64, 'lat': np.float64,'depth': np.float64,
'unc': np.float64, 'etype': str, 'ID': np.int, 'mag': np.float64,
'S1': np.float64, 'D1': np.float64, 'R1': np.float64,
'S2': np.float64, 'D2': np.float64, 'R2': np.float64,
'src': str, 'time': str, 'mlon': np.float64, 'mlat': np.float64,
'mdep': np.float64})
ogcolumns = ['lat', 'lon', 'depth', 'unc', 'etype', 'ID', 'mag', 'time', \
'S1', 'D1', 'R1','S2', 'D2', 'R2', 'src']
kagancols = ['lat', 'lon', 'depth', 'unc', 'etype', 'ID', 'mag', 'time', \
'S1', 'D1', 'R1','S2', 'D2', 'R2', 'src', 'mlon', 'mlat', 'mdep']
eventlist = eventlistALL[kagancols]
if printtest:
lat65 = eventlist[eventlist.lat>65]
if len(lat65)>0:
s2f.addToDataInfo(lat65, 0, 'eventlist = eventlist[eventlist.lat <= 65]', datainfo,'df')
dataGP = eventlist[eventlist.etype == 'GP']
if len(dataGP>0):
s2f.addToDataInfo(dataGP, 0, 'eventlist = eventlist[eventlist.etype != GP]', datainfo,'df')
eventlist = eventlist[eventlist.lat <= 65]
eventlist = eventlist[eventlist.etype != 'GP']
maxID = eventlistALL['ID'].max()
# Add/Remove manually identified points that don't follow general rules
remdir = 'library/points_to_remove/current_files'
for badFile in os.listdir(remdir):
if badFile[0:3] == slab or badFile[0:3] == 'ALL' or ((slab == 'izu' or slab == 'kur') and badFile[0:3] == 'jap'):
print (' manually removing points listed in:',badFile)
donotuse = pd.read_csv('%s/%s'%(remdir,badFile))
eventlist = s2f.removePoints(donotuse, eventlist, lonmin,
lonmax, latmin, latmax, printtest, datainfo, True, slab)
doubleuse = pd.read_csv(addFile)
eventlist, maxID = s2f.doublePoints(doubleuse, eventlist, maxID)
if slab == 'kur':
eventlist.loc[eventlist.etype == 'TO', 'unc'] = 100
if slab == 'sul' or slab == 'man':
eventlist = eventlist[eventlist.etype != 'CP']
if slab == 'him':
eventlist = eventlist[eventlist.src != 'schulte']
if slab == 'sumz' or slab == 'kur' or slab == 'jap' or slab == 'izu':
if printtest:
lat65 = eventlist[eventlist.etype=='TO']
if len(lat65)>0:
s2f.addToDataInfo(lat65, 0, 'eventlist = eventlist[eventlist.etype != TO]', datainfo,'df')
eventlist = eventlist[eventlist.etype != 'TO']
if slab == 'kurz':
if printtest:
lat65 = eventlist[eventlist.etype=='ER']
if len(lat65)>0:
s2f.addToDataInfo(lat65, 0, 'eventlist = eventlist[eventlist.etype != ER]', datainfo,'df')
eventlist = eventlist[eventlist.etype != 'ER']
if slab == 'sol':
if printtest:
lat65 = eventlist[(eventlist.etype == 'BA') & (eventlist.lon <= 149)]
if len(lat65)>0:
s2f.addToDataInfo(lat65, 0, 'eventlist[(eventlist.etype != BA) | (eventlist.lon > 149)]', datainfo,'df')
eventlist = eventlist[(eventlist.etype != 'BA') | (eventlist.lon > 149)]
TR_data = TR_data[TR_data.lon>149]
if slab == 'man':
if printtest:
lat65 = eventlist[(eventlist.etype == 'BA') & (eventlist.lon >= 120)]
if len(lat65)>0:
s2f.addToDataInfo(lat65, 0, 'eventlist[(eventlist.etype == BA) & (eventlist.lon >= 120)]', datainfo,'df')
eventlist = eventlist[(eventlist.etype != 'BA') | ((eventlist.lon < 120)|(eventlist.lat > 15))]
if slab == 'sum':
if printtest:
lat65 = eventlist[(eventlist.etype == 'BA') & (eventlist.lat > 21)]
if len(lat65)>0:
s2f.addToDataInfo(lat65, 0, 'eventlist[(eventlist.etype != BA) | (eventlist.lon > 149)]', datainfo,'df')
eventlist = eventlist[(eventlist.etype != 'BA') | (eventlist.lat <= 21)]
if slab == 'ryu':
ryutodata = eventlist[(eventlist.etype == 'TO')&(eventlist.lon>133)]
if slab == 'hel':
eventlist.loc[eventlist.etype == 'RF', 'etype'] = 'CP'
if slab == 'puyz' or slab == 'mak':
eventlist = eventlist[eventlist.src != 'ddgap']
# Set default uncertainties for events without uncertainties
eventlist.loc[eventlist.etype == 'EQ', 'unc'] = 15.0
eventlist.loc[eventlist.etype == 'CP', 'unc'] = 5.0
eventlist.loc[eventlist.etype == 'BA', 'unc'] = 1.0
eventlist.loc[eventlist.etype == 'TO', 'unc'] = 40.0
eventlist.loc[(eventlist.etype == 'ER') & (eventlist.unc <5), 'unc'] = 5.0
if slab == 'puy':
eventlist.loc[(eventlist.etype == 'ER') & (eventlist.unc <15), 'unc'] = 15.0
eventlist.loc[eventlist.mlon < 0, 'mlon'] += 360
# Ensure all data are within longitudes 0-360
eventlist.loc[eventlist.lon < 0, 'lon']+=360
# Define mean depth of bathymetry (for constraining interp outboard trench)
meanBAlist = eventlist[eventlist.etype == 'BA']
meanBA = meanBAlist['depth'].mean()
del eventlistALL
''' ----- (11) Calculate seismogenic zone thickness ------ '''
# define seismogenic thickness parameters. change if needed
maxdep = 65
maxdepdiff = 20
origorcentl = 'c'
origorcentd = 'c'
slaborev = 'e'
lengthlim = -50
ogcolumns = eventlist.columns
eventlist = s2f.getReferenceKagan(slab1data, eventlist, origorcentl, origorcentd)
if slab != 'hin':
seismo_thick, taper_start = s2f.getSZthickness(eventlist,folder,slab,maxdep,maxdepdiff,origorcentl,origorcentd,slaborev,savedir,lengthlim)
else:
seismo_thick = 20
taper_start = 20
if slab == 'hel' or slab == 'car' or slab == 'mak':
seismo_thick = 40
if slab == 'sol':
seismo_thick = 40
if slab == 'alu' or slab == 'cot' or slab == 'sul':
seismo_thick = 10
if slab == 'sol':
eventlistE = eventlist[eventlist.lon>148]
eventlistW = eventlist[eventlist.lon<=148]
eventlistE = s2f.cmtfilter(eventlistE,seismo_thick,printtest,datainfo,slab)
eventlist = pd.concat([eventlistE,eventlistW],sort=True)
if slab == 'sum':
eventlistS = eventlist[eventlist.lat<=22]
eventlistN = eventlist[eventlist.lat>22]
eventlistS = s2f.cmtfilter(eventlistS,seismo_thick,printtest,datainfo,slab)
eventlist = pd.concat([eventlistS,eventlistN],sort=True)
if slab != 'hal' and slab != 'him' and slab != 'pam' and slab != 'hin' and slab != 'sol' and slab != 'sum' and slab != 'cas':
eventlist = s2f.cmtfilter(eventlist,seismo_thick,printtest,datainfo,slab)
eventlist = eventlist[ogcolumns]
''' ------ (12) Record variable parameters used for this model ------'''
f = open(these_params, 'w+')
f.write('Parameters used to create file for slab_Date_time: %s_%s_%s \n' \
%(slab, date, time))
f.write('\n')
f.close()
f = open(these_params, 'a')
f.write('inFile: %s \n' % inFile)
f.write('use_box: %s \n' % use_box)
f.write('latmin: %s \n' % str(latmin))
f.write('latmax: %s \n' % str(latmax))
f.write('lonmin: %s \n' % str(lonmin))
f.write('lonmax: %s \n' % str(lonmax))
f.write('slab: %s \n' % slab)
f.write('grid: %s \n' % str(grid))
f.write('radius1: %s \n' % str(radius1))
f.write('radius2: %s \n' % str(radius2))
f.write('alen: %s \n' % str(alen))
f.write('blen: %s \n' % str(blen))
f.write('sdr: %s \n' % str(sdr))
f.write('ddr: %s \n' % str(ddr))
f.write('taper: %s \n' % str(taper))
f.write('T: %s \n' % str(T))
f.write('node: %s \n' % str(node))
f.write('filt: %s \n' % str(filt))
f.write('maxdist: %s \n' % str(maxdist))
f.write('mindip: %s \n' % str(mindip))
f.write('minstk: %s \n' % str(minstk))
f.write('maxthickness: %s \n' % str(maxthickness))
f.write('seismo_thick: %s \n' % str(seismo_thick))
f.write('dipthresh: %s \n' % str(dipthresh))
f.write('fracS: %s \n' % str(fracS))
f.write('knot_no: %s \n' % str(knot_no))
f.write('kdeg: %s \n' % str(kdeg))
f.write('rbfs: %s \n' % str(rbfs))
if slab == 'mue' or slab == 'phi' or slab == 'cot' or slab == 'sul' or slab == 'ryu':
f.write('undergrid: %s \n' % str(undergrid))
f.close()
''' ------ (13) Define search grid ------ '''
print(' Creating search grid...')
#Creates a grid over the slab region
regular_grid = s2f.create_grid_nodes3(grid, lonmin, lonmax, latmin, latmax)
grid_in_polygon = s2f.createGridInPolygon2(regular_grid, slab, polygonFile)
lons = grid_in_polygon[:, 0]
lats = grid_in_polygon[:, 1]
lons = np.round(lons,decimals=1)
lats = np.round(lats,decimals=1)
lons[lons <0] += 360
slab1guide,slab1query = s2f.makeReference(slab1data,lons,lats,grid,printtest,slab)
''' ------ (14) Identify tomography datasets ------ '''
## Identify how many tomography datasets are included
tomo_data = eventlist[eventlist.etype == 'TO']
if len(tomo_data) > 0 and slab != 'sam':
sources = tomo_data.src
TOsrc = set()
for x in sources:
TOsrc.add(x)
tomo_sets = TOsrc
tomo = True
else:
tomo_sets = 0
tomo = False
premulti = pd.DataFrame()
postmulti = pd.DataFrame()
OGmulti = pd.DataFrame()
elistAA = pd.DataFrame()
loncuts,latcuts,elistcuts = s2f.getlatloncutoffs(lons,lats,eventlist,printtest)
''' ------ (15) Initialize arrays for Section 2 ------ '''
# Creates list of events that were used for the model based on ID
used_all = np.zeros((1, 2))
used_TO = np.zeros((1, 2))
warnings.filterwarnings('ignore', 'Mean of empty slice.')
pd.options.mode.chained_assignment = None
'''Section 2: First loop
This Accomplishes:
1) Calculate error for each used tomography model.
This is accomplished by determining the difference between measured
depths for tomography and earthquake data, which will be used
outside of the loop.
2) Identify data to constrain depth/coordinate of center of Benioff Zone.
2a) Identify local strike, dip, and depth of Slab1.0.
If Slab 1.0 does not exist, acquire strike from closest trench
location with a strike oriented perpendicularly to this lon/lat.
If extending beyond Slab1.0 depths perpendicularly, find nearest and
most perpendicular point on Slab1.0, and define depth to
search from based on dip and depth of that point on Slab1.0. The
dip is defined as the dip of the local Slab1.0 point.
If extending along strike from Slab1.0, define depth to search from
based on mean depth of data within defined radius of node. The
dip of the node is defined as 0.
2b) Filter by ellipsoid oriented perpendicularly to Slab1.0.
If the local dip is less than mindip, orient ellipsoid vertically
and along strike found in (2a).
If the local dip is greater than mindip, orient ellipsoid
perpendicular to strike/dip found in (2a).
The long axis of the ellipse is defined as radius1, the short axis
is defined as radius2.
The shallow extent of the ellipsoid is defined as sdr at depths
above seismo_thick, and is tapered to 3*sdr at depths greater
than seismo_thick.
The deep extent of the ellipsoid is defined as sdr at depths above
seismo_thick, and is tapered to ddr at depths greater than
seismo_thick.
2c) Nodes outboard of the trench are only constrained by bathymetry.
Nodes inboard of the trench are constrained by all but bathymetry.
2d) Conditionally add average active source/average reciever functions.
If within the distance of the longest AS profile from the trench
identify the average AS profile depth at that distance from
trench. If there is no active source point within the search
ellipsoid defined in (2b), add an average active source data
point to the set of data to constrain the depth at this node.
Reciever functions in cam and alu are being utilized similarly with
defined distances from trench and distances along strike from
key profiles that need to be utilized in the absence of
seismicity.
2e) If information other than tomography is available above 300 km
depth, all tomography is filtered at that node.
2f) If less than two data points are available to constrain a node, no
depth is resolved at that node.
2g) If |strike of Slab1.0 at node - strike of Slab1.0 at farthest data|
> minstrk, filter data at ends until < minstrk.
If this node is outside of Slab1.0, reduce long axis of search
ellipsoid prior to starting filters.
The output of this loop is two numpy arrays and list of nodes with data:
used_TO: local difference between tomography and earthquake depths and
a tomography dataset identifier
used_all: indices for the data used and their associated nodes
This one is created to prevent the need for re-filtering
in later loops
'''
print("Start Section 2 of 7: First loop")
lons1 = (np.ones(len(lons))*-9999).astype(np.float64)
lats1 = (np.ones(len(lons))*-9999).astype(np.float64)
deps1 = (np.ones(len(lons))*-9999).astype(np.float64)
strs1 = (np.ones(len(lons))*-9999).astype(np.float64)
dips1 = (np.ones(len(lons))*-9999).astype(np.float64)
nIDs1 = (np.ones(len(lons))*-9999).astype(np.float64)
aleng = (np.ones(len(lons))*-9999).astype(np.float64)
bleng = (np.ones(len(lons))*-9999).astype(np.float64)
cleng = (np.ones(len(lons))*-9999).astype(np.float64)
sleng = (np.ones(len(lons))*-9999).astype(np.float64)
dleng = (np.ones(len(lons))*-9999).astype(np.float64)
elons1 = (np.ones(len(lons))*-9999).astype(np.float64)
elats1 = (np.ones(len(lons))*-9999).astype(np.float64)
edeps1 = (np.ones(len(lons))*-9999).astype(np.float64)
estrs1 = (np.ones(len(lons))*-9999).astype(np.float64)
edips1 = (np.ones(len(lons))*-9999).astype(np.float64)
enIDs1 = (np.ones(len(lons))*-9999).astype(np.float64)
ealeng = (np.ones(len(lons))*-9999).astype(np.float64)
ebleng = (np.ones(len(lons))*-9999).astype(np.float64)
ecleng = (np.ones(len(lons))*-9999).astype(np.float64)
esleng = (np.ones(len(lons))*-9999).astype(np.float64)
edleng = (np.ones(len(lons))*-9999).astype(np.float64)
if args.nCores is not None:
if args.nCores > 1 and args.nCores < 8:
pooling = True
elif args.nCores == 1:
pooling = False
else:
pooling = False
else:
pooling = False
cutcount = 1
allnewnodes = None
for cut in range(len(loncuts)):
theselats = latcuts[cut]
theselons = loncuts[cut]
theseevents = elistcuts[cut]
indices = range(len(theselats))
if cut == 0:
i2 = 0
cutcount+=1
if pooling:
pool1 = Pool(args.nCores)
partial_loop1 = partial(loops.loop1, theselons, theselats, testarea, slab,
depgrid, strgrid, dipgrid, slab1query, theseevents,
seismo_thick, alen, blen, mdist, sdr, ddr, mindip, maxID,
AA_data, TR_data, maxdist, maxthickness, minstk,
tomo_sets, meanBA,slab1guide,grid,slab1data,dipthresh,datainfo,nodeinfo)
pts = pool1.map(partial_loop1, indices) #$$#
pool1.close()
pool1.join()
for i in range(len(indices)):
thisnode = pts[i]
if thisnode[13]:
lons1[i2] = thisnode[0]
lats1[i2] = thisnode[1]
deps1[i2] = thisnode[2]
strs1[i2] = thisnode[3]
dips1[i2] = thisnode[4]
nIDs1[i2] = thisnode[5]
aleng[i2] = thisnode[6]
bleng[i2] = thisnode[7]
cleng[i2] = thisnode[8]
sleng[i2] = thisnode[14]
dleng[i2] = thisnode[15]
nused_TO = thisnode[9]
if len(nused_TO) > 0:
if used_TO is not None:
used_TO = np.vstack((used_TO, nused_TO))
nused_all = thisnode[10]
if len(nused_all) > 0:
if used_all is not None:
used_all = np.vstack((used_all, nused_all))
AAadd = thisnode[11]
if len(AAadd)>0:
if AAadd['unc'].mean() > 5:
AAadd['etype'] = 'RF'
elistAA = pd.concat([elistAA, AAadd],sort=True)
newnodes = thisnode[12]
if len(newnodes)>0:
if allnewnodes is not None:
allnewnodes = np.vstack((allnewnodes,newnodes))
else:
allnewnodes = newnodes
if not thisnode[13] and np.isfinite(thisnode[2]):
elons1[i2] = thisnode[0]
elats1[i2] = thisnode[1]
edeps1[i2] = thisnode[2]
estrs1[i2] = thisnode[3]
edips1[i2] = thisnode[4]
enIDs1[i2] = thisnode[5]
ealeng[i2] = thisnode[6]
ebleng[i2] = thisnode[7]
ecleng[i2] = thisnode[8]
esleng[i2] = thisnode[14]
edleng[i2] = thisnode[15]
i2 += 1
else:
for nodeno in range(len(theselons)):
alon, alat, alocdep, alocstr, alocdip, anID, aaleng, ableng, acleng, aused_TO, aused_tmp, atrimmedAA, newnodes, anydata, asleng, adleng = loops.loop1(theselons, theselats, testarea, slab, depgrid, strgrid, dipgrid, slab1query, theseevents, seismo_thick, alen, blen, mdist, sdr, ddr, mindip, maxID, AA_data, TR_data, maxdist, maxthickness, minstk, tomo_sets, meanBA, slab1guide, grid, slab1data, dipthresh, datainfo, nodeinfo, nodeno)
if anydata:
lons1[i2] = alon
lats1[i2] = alat
deps1[i2] = alocdep
strs1[i2] = alocstr
dips1[i2] = alocdip
nIDs1[i2] = anID
aleng[i2] = aaleng
bleng[i2] = ableng
cleng[i2] = acleng
sleng[i2] = asleng
dleng[i2] = adleng
nused_TO = aused_TO
if len(nused_TO) > 0:
if used_TO is not None:
used_TO = np.vstack((used_TO, nused_TO))
nused_all = aused_tmp
if len(nused_all) > 0:
if used_all is not None:
used_all = np.vstack((used_all, nused_all))
AAadd = atrimmedAA
if len(AAadd)>0:
if AAadd['unc'].mean() > 5:
AAadd['etype'] = 'RF'
elistAA = pd.concat([elistAA, AAadd],sort=True)
if len(newnodes)>0:
if allnewnodes is not None:
allnewnodes = np.vstack((allnewnodes,newnodes))
else:
allnewnodes = newnodes
if not anydata and np.isfinite(alocdep):
elons1[i2] = alon
elats1[i2] = alat
edeps1[i2] = alocdep
estrs1[i2] = alocstr
edips1[i2] = alocdip
enIDs1[i2] = anID
ealeng[i2] = aaleng
ebleng[i2] = ableng
ecleng[i2] = acleng
esleng[i2] = asleng
edleng[i2] = adleng
i2 += 1
lons1 = lons1[lons1>-999]
lats1 = lats1[lats1>-999]
deps1 = deps1[(deps1>-999)|np.isnan(deps1)]
strs1 = strs1[strs1>-999]
dips1 = dips1[dips1>-999]
nIDs1 = nIDs1[nIDs1>-999]
aleng = aleng[aleng>-999]
bleng = bleng[bleng>-999]
cleng = cleng[cleng>-999]
sleng = sleng[sleng>-999]
dleng = dleng[dleng>-999]
elons1 = elons1[edleng>-999]
elats1 = elats1[edleng>-999]
edeps1 = edeps1[(edeps1>-999)|np.isnan(edeps1)]
estrs1 = estrs1[edleng>-999]
edips1 = edips1[edleng>-999]
enIDs1 = enIDs1[edleng>-999]
ealeng = ealeng[edleng>-999]
ebleng = ebleng[edleng>-999]
ecleng = ecleng[edleng>-999]
esleng = esleng[edleng>-999]
edleng = edleng[edleng>-999]
testdf = pd.DataFrame({'lon':lons1,'lat':lats1,'depth':deps1,'strike':strs1,'dip':dips1,'id':nIDs1,'alen':aleng,'blen':bleng,'clen':cleng,'slen':sleng,'dlen':dleng})
testdf.to_csv('firstloop.csv',header=True,index=False,na_rep=np.nan)
if allnewnodes is not None:
theseIDs = []
for i in range(len(allnewnodes)):
if allnewnodes[i,1]>0:
thisnID = int('%i%i'%(allnewnodes[i,0]*10,allnewnodes[i,1]*10))
else:
thisnID = int('%i0%i'%(allnewnodes[i,0]*10,allnewnodes[i,1]*-10))
theseIDs.append(thisnID)
newlonsdf1 = pd.DataFrame({'lon':allnewnodes[:,0],'lat':allnewnodes[:,1],'nID':theseIDs})
newlonsdf = newlonsdf1.drop_duplicates(['nID'])
theselons = newlonsdf['lon'].values
theselats = newlonsdf['lat'].values
if grid == 0.2:
grid2 = 0.1
elif grid == 0.1:
grid2 = 0.05
else:
grid2 = grid
slab1guide,slab1query = s2f.makeReference(slab1data,theselons,theselats,grid2,printtest,slab)
newlats = []
newlons = []
newdeps = []
newstrs = []
newdips = []
newnIDs = []
newalen = []
newblen = []
newclen = []
newslen = []
newdlen = []
enewlats = []
enewlons = []
enewdeps = []
enewstrs = []
enewdips = []
enewnIDs = []
enewalen = []
enewblen = []
enewclen = []
enewslen = []
enewdlen = []
if pooling:
indices = range(len(theselons))
pool1 = Pool(args.nCores)
partial_loop1 = partial(loops.loop1, theselons, theselats, testarea, slab,
depgrid, strgrid, dipgrid, slab1query, eventlist,
seismo_thick, alen, blen, mdist, sdr, ddr, mindip, maxID,
AA_data, TR_data, maxdist, maxthickness, minstk,
tomo_sets, meanBA,slab1guide,grid,slab1data,dipthresh,datainfo,nodeinfo)
pts = pool1.map(partial_loop1, indices)
pool1.close()
pool1.join()
for i in range(len(indices)):
thisnode = pts[i]
if thisnode[13]:
newlons.append(thisnode[0])
newlats.append(thisnode[1])
newdeps.append(thisnode[2])
newstrs.append(thisnode[3])
newdips.append(thisnode[4])
newnIDs.append(thisnode[5])
newalen.append(thisnode[6])
newblen.append(thisnode[7])
newclen.append(thisnode[8])
newslen.append(thisnode[14])
newdlen.append(thisnode[15])
nused_TO = thisnode[9]
if len(nused_TO) > 0:
if used_TO is not None:
used_TO = np.vstack((used_TO, nused_TO))
nused_all = thisnode[10]
if len(nused_all) > 0:
if used_all is not None:
used_all = np.vstack((used_all, nused_all))
AAadd = thisnode[11]
if len(AAadd)>0:
if AAadd['unc'].mean() > 5:
AAadd['etype'] = 'RF'
elistAA = pd.concat([elistAA, AAadd],sort=True)
if not thisnode[13] and np.isfinite(thisnode[2]):
enewlons.append(thisnode[0])
enewlats.append(thisnode[1])
enewdeps.append(thisnode[2])
enewstrs.append(thisnode[3])
enewdips.append(thisnode[4])
enewnIDs.append(thisnode[5])
enewalen.append(thisnode[6])
enewblen.append(thisnode[7])
enewclen.append(thisnode[8])
enewslen.append(thisnode[14])
enewdlen.append(thisnode[15])
else:
for nodeno in range(len(theselons)):
alon, alat, alocdep, alocstr, alocdip, anID, aalen, ablen, aclen, aused_TO, aused_tmp, atrimmedAA, newnodes, anydata, aslen, adlen = loops.loop1(theselons, theselats, testarea, slab, depgrid, strgrid, dipgrid, slab1query, eventlist, seismo_thick, alen, blen, mdist, sdr, ddr, mindip, maxID, AA_data, TR_data, maxdist, maxthickness, minstk, tomo_sets, meanBA, slab1guide, grid, slab1data, dipthresh, datainfo, nodeinfo, nodeno)
if anydata:
newlons.append(alon)
newlats.append(alat)
newdeps.append(alocdep)
newstrs.append(alocstr)
newdips.append(alocdip)
newnIDs.append(anID)
newalen.append(aalen)
newblen.append(ablen)
newclen.append(aclen)
newslen.append(aslen)
newdlen.append(adlen)
nused_TO = aused_TO
if len(nused_TO) > 0:
if used_TO is not None:
used_TO = np.vstack((used_TO, nused_TO))
nused_all = aused_tmp
if len(nused_all) > 0:
if used_all is not None:
used_all = np.vstack((used_all, nused_all))
AAadd = atrimmedAA
if len(AAadd)>0:
if AAadd['unc'].mean() > 5:
AAadd['etype'] = 'RF'
elistAA = pd.concat([elistAA, AAadd],sort=True)
if not anydata and np.isfinite(alocdep):
enewlons.append(alon)
enewlats.append(alat)
enewdeps.append(alocdep)
enewstrs.append(alocstr)
enewdips.append(alocdip)
enewnIDs.append(anID)
enewalen.append(aalen)
enewblen.append(ablen)
enewclen.append(aclen)
enewslen.append(aslen)
enewdlen.append(adlen)
#np.savetxt('%s_diptest.csv'%slab, allnewnodes, header='lon,lat,depth,strike,dip',fmt='%.2f', delimiter=',',comments='')
if printtest:
fig = plt.figure(figsize=(20, 10))
ax1 = fig.add_subplot(131)
con = ax1.scatter(lons1,lats1,c=dips1,s=10,edgecolors='none',cmap='plasma')
ax1.set_ylabel('Latitude')
ax1.axis('equal')
plt.grid()
title = 'Diptest'
ax1.set_title(title)
cbar = fig.colorbar(con)
cbar.set_label('Dip')
ax2 = fig.add_subplot(132)
con = ax2.scatter(allnewnodes[:,0], allnewnodes[:,1],c=allnewnodes[:,1],s=10,edgecolors='none',cmap='plasma')
ax2.set_xlabel('Longitude')
ax2.set_ylabel('Latitude')
ax2.axis('equal')
plt.grid()
cbar = fig.colorbar(con)
cbar.set_label('Dip')
ax3 = fig.add_subplot(133)
con = ax3.scatter(newlons, newlats,c=newdips,s=10,edgecolors='none',cmap='plasma')
ax3.set_xlabel('Longitude')
ax3.set_ylabel('Latitude')
ax3.axis('equal')
plt.grid()
cbar = fig.colorbar(con)
cbar.set_label('Dip')
figtitle = 'diptest.png'
fig.savefig(figtitle)
plt.close()
lons1 = np.append(lons1, [newlons])
lats1 = np.append(lats1, [newlats])
deps1 = np.append(deps1, [newdeps])
strs1 = np.append(strs1, [newstrs])
dips1 = np.append(dips1, [newdips])
nIDs1 = np.append(nIDs1, [newnIDs])
aleng = np.append(aleng, [newalen])
bleng = np.append(bleng, [newblen])
cleng = np.append(cleng, [newclen])
sleng = np.append(sleng, [newslen])
dleng = np.append(dleng, [newdlen])
elons1 = np.append(elons1, [enewlons])
elats1 = np.append(elats1, [enewlats])
edeps1 = np.append(edeps1, [enewdeps])
estrs1 = np.append(estrs1, [enewstrs])
edips1 = np.append(edips1, [enewdips])
enIDs1 = np.append(enIDs1, [enewnIDs])
ealeng = np.append(ealeng, [enewalen])
ebleng = np.append(ebleng, [enewblen])
ecleng = np.append(ecleng, [enewclen])
esleng = np.append(esleng, [enewslen])
edleng = np.append(edleng, [enewdlen])
#print ('lon',len(elons1),'lat',len(elats1),'ogdep',len(edeps1),'ogstr',len(estrs1),'ogdip',len(edips1),'nID',len(enIDs1),'alen',len(ealeng),'blen',len(ebleng),'clen',len(ecleng),'slen',len(esleng),'dlen',len(edleng))
emptynodes = pd.DataFrame({'lon':elons1,'lat':elats1,'ogdep':edeps1,'ogstr':estrs1,'ogdip':edips1,'nID':enIDs1,'alen':ealeng,'blen':ebleng,'clen':ecleng,'slen':esleng,'dlen':edleng})
#emptynodes.to_csv('emptynodes.csv',header=True,index=False)
refdeps = pd.DataFrame({'lon':lons1, 'lat':lats1, 'ogdep':deps1})
if global_average:
''' # need to fix this after adjusting based on BA depth at trench
AA_global['depthtest'] = (AA_global['depth'].values*100).astype(int)
for index, row in elistAA.iterrows():
depthAA = row['depth']
depthtestAA = int(100*row['depth'])
thisdepth = AA_global[AA_global.depthtest == depthtestAA]
uncAA = thisdepth['unc'].values[0]
elistAA.loc[elistAA.depth == depthAA, 'unc'] = uncAA*2
'''
elistAA['unc'] = 10.0
elistcuts.append(elistAA)
eventlist2 = pd.concat(elistcuts,sort=True)
eventlist = eventlist2.reset_index(drop=True)
del eventlist2
eventlist = eventlist.drop_duplicates(['ID'])
eventlist = eventlist.reset_index(drop=True)
# Remove first line of zeros
used_TO = used_TO[~np.all(used_TO ==0, axis=1)]
used_all = used_all[~np.all(used_all ==0, axis=1)]
'''Section 3: Calculate tomography uncertainties
Here we use the output from the first loop to calculate tomography uncertainties.
For each tomography dataset, we calculate the standard deviation of the distribution of "differences".
We apply this standard deviation as the uncertainty value for each tomography datum from that dataset.
'''
print("Start Section 3 of 7: Assigning tomography uncertainties")
if tomo:
for idx, src in enumerate(tomo_sets):
tomog = used_TO[:][used_TO[:, 1] == idx]
tmp_std = np.std(tomog[:, 0])
if tmp_std > 40.:
tmp_std = 40.
elif tmp_std < 15.:
tmp_std = 15.
elif np.isnan(tmp_std):
tmp_std = 40
eventlist['unc'][eventlist['src'] == src] = tmp_std
'''Section 4: Second loop
The purpose of this loop is to determine a set of "pre-shifted" slab points that do not utilize receiver function data.
This output dataset will represent a transition from slab surface at shallow depths to slab center at deeper depths.
The only output from this loop is an array of the form [ lat lon dep unc nodeID ]
'''
print("Start Section 4 of 7: Second loop")
bzlons, bzlats, bzdeps, stds2, nIDs2 = [], [], [], [], []
lats2, lons2, str2, dip2, centsurf = [], [], [], [], []
bilats, bilons, binods, bistds = [], [], [], []
biindx, bistrs, bidips, bideps = [], [], [], []
baleng, bbleng, bcleng, onlyto = [], [], [], []
rlist = pd.DataFrame()
if pooling:
pool2 = Pool(args.nCores)
npass = args.nCores
partial_loop2 = partial(loops.loop2, testarea, lons1, lats1, nIDs1, deps1, strs1, dips1, used_all, eventlist, sdr, ddr, seismo_thick, slab, maxthickness, rlist, mindip, aleng, bleng, cleng)
indices = range(len(lats1))
pts2 = pool2.map(partial_loop2, indices)
pool2.close()
pool2.join()
for i in range(len(indices)):
thisnode = pts2[i]
if np.isfinite(thisnode[0]):
bzlons.append(thisnode[0])
bzlats.append(thisnode[1])
bzdeps.append(thisnode[2])
stds2.append(thisnode[3])
nIDs2.append(thisnode[4])
lats2.append(thisnode[5])
lons2.append(thisnode[6])
str2.append(thisnode[7])
dip2.append(thisnode[8])
centsurf.append(thisnode[9])
baleng.append(thisnode[20])
bbleng.append(thisnode[21])
bcleng.append(thisnode[22])
onlyto.append(thisnode[23])
if np.isfinite(thisnode[10]):
bilats.append(thisnode[10])
bilons.append(thisnode[11])
binods.append(thisnode[12])
bistds.append(thisnode[13])
biindx.append(thisnode[14])
bistrs.append(thisnode[15])
bidips.append(thisnode[16])
bideps.append(thisnode[17])
rlist = thisnode[18]
if len(rlist) > 0:
removeIDs = np.array(rlist.ID)
thisID = np.ones(len(removeIDs))*thisnode[4]
removearray = list(zip(thisID, removeIDs))
removeIDID = np.array(removearray)
used_all = used_all[~(np.in1d(used_all[:, 1], removeIDID) & np.in1d(used_all[:, 0], thisID))]
multi = thisnode[19]
if len(multi) > 0:
premulti = pd.concat([premulti, multi],sort=True)
del pts2
else:
npass = 1
for nodeno in range(len(lats1)):
bpeak_lon, bpeak_lat, bpeak_depth, bstd, bnID, blat, blon, bcstr, bcdip, bcentsurf, bbilats, bbilons, bbinods, bbistds, bbiindx, bbistrs, bbidips, bbideps, brlist, bpremulti, alen, blen, clen, onlyt = loops.loop2(testarea, lons1, lats1, nIDs1, deps1, strs1, dips1, used_all, eventlist, sdr, ddr, seismo_thick, slab, maxthickness, rlist, mindip, aleng, bleng, cleng, nodeno)
if np.isfinite(bpeak_lon):
bzlons.append(bpeak_lon)
bzlats.append(bpeak_lat)
bzdeps.append(bpeak_depth)
stds2.append(bstd)
nIDs2.append(bnID)
lats2.append(blat)
lons2.append(blon)
str2.append(bcstr)
dip2.append(bcdip)
centsurf.append(bcentsurf)
baleng.append(alen)
bbleng.append(blen)
bcleng.append(clen)
onlyto.append(onlyt)
if np.isfinite(bbilats):
bilats.append(bbilats)
bilons.append(bbilons)
binods.append(bbinods)
bistds.append(bbistds)
biindx.append(bbiindx)
bistrs.append(bbistrs)
bidips.append(bbidips)
bideps.append(bbideps)
rlist = brlist
if len(rlist) > 0:
removeIDs = np.array(rlist.ID)
thisID = np.ones(len(removeIDs))*bnID
removearray = list(zip(thisID, removeIDs))
removeIDID = np.array(removearray)
used_all = used_all[~(np.in1d(used_all[:, 1], removeIDID) & np.in1d(used_all[:, 0], thisID))]
multi = bpremulti
if len(multi) > 0:
premulti = pd.concat([premulti, multi],sort=True)
tmp_res = pd.DataFrame({'bzlon':bzlons,'bzlat':bzlats,'depth':bzdeps,'stdv':stds2,'nID':nIDs2,'lat':lats2,'lon':lons2,'ogstr':str2,'ogdip':dip2,'centsurf':centsurf,'alen':baleng,'blen':bbleng,'clen':bcleng,'onlyto':onlyto})
for j in range(len(bilats)):
lon = bilons[j]
lat = bilats[j]
nID = binods[j]
stdv = bistds[j]
stk = bistrs[j]
dep = bideps[j]
dip = bidips[j]
if dip <= mindip:
peak_depth = s2f.findMultiDepth(lon, lat, nID, tmp_res, grid, premulti, stk, slab, dep, alen, printtest)
peak_lon = lon
peak_lat = lat
else:
peak_lon, peak_lat, peak_depth = s2f.findMultiDepthP(lon, lat, nID, tmp_res, grid, premulti, stk, slab, dep, dip, alen, printtest)
tmp_res.loc[tmp_res.nID == nID, 'bzlon'] = peak_lon
tmp_res.loc[tmp_res.nID == nID, 'bzlat'] = peak_lat
tmp_res.loc[tmp_res.nID == nID, 'depth'] = peak_depth
tmp_res = s2f.addGuidePoints(tmp_res, slab)
if slab == 'sol':
tmp_res = tmp_res[(tmp_res.bzlon>142) & (tmp_res.bzlon<164)]
if slab == 'sul':
tmp_res = tmp_res[(tmp_res.bzlon<123.186518923) | (tmp_res.depth<100)]
tmp_res = tmp_res[(tmp_res.bzlon<122.186518923) | (tmp_res.depth<200)]
# Save data used to file
used_IDs = used_all[:, 1]
used_data = eventlist[eventlist['ID'].isin(used_IDs)]
used_data = used_data[['lat', 'lon', 'depth', 'unc', 'etype', 'ID', 'mag', 'time', 'S1', 'D1', 'R1', 'S2', 'D2', 'R2', 'src']]
used_data = used_data.drop_duplicates(['ID'])
used_data.loc[used_data.lon < 0, 'lon']+=360
if slab == 'hel':
used_data.loc[used_data.etype == 'CP', 'etype']='RF'
used_data.to_csv(dataFile, header=True, index=False, float_format='%0.2f', na_rep = float('nan'), chunksize=100000)
#tmp_res.to_csv('nodetest.csv', header=True, index=False, float_format='%0.2f', na_rep = float('nan'), chunksize=100000)
'''Section 5: Calculate shifts
Here we use the output of the second loop to calculate shifting locations for non-RF results.
A user-specified lithospheric thickness can be read in or lithosphere thickness will be calculated using the nearest oceanic plate age.
The taper and fracshift is set in the paramter file for each subduction zone. fracshift was determined via testing each individual
subduztion zone to match seismicity. Shift direction is determined by the strike and dip of a surface created using the output from the second loop.
A clipping mask is also created in this section using the shifted output data.
'''
print("Start Section 5 of 7: Calculate shifts")
# Calculate shift for each node
print(" Calculating shift...")
surfnode = 0.5
data0 = tmp_res[(tmp_res.stdv > -0.000001)&(tmp_res.stdv < 0.000001)]
tmp_res = tmp_res[(tmp_res.stdv < -0.000001)|(tmp_res.stdv > 0.000001)]
if use_box == 'yes':
if lonmin<0:
lonmin+=360
if lonmax<0:
lonmax+=360
TR_data = TR_data[(TR_data.lon<lonmax)&(TR_data.lon>lonmin)]
TR_data = TR_data[(TR_data.lat<latmax)&(TR_data.lat>latmin)]
TR_data = TR_data.reset_index(drop=True)
# Read in age grid files
ages = gmt.GMTGrid.load(agesFile)
ages_error = gmt.GMTGrid.load(ageerrorsFile)
shift_out, maxthickness = s2f.slabShift_noGMT(tmp_res, node, T, TR_data, seismo_thick, taper, ages, ages_error, filt, slab, maxthickness, grid, 'bzlon', 'bzlat', 'depth', fracS, npass, meanBA, printtest, kdeg, knot_no, rbfs, use_box)
del ages
del ages_error
tmp_res['pslon'] = tmp_res['lon'].values*1.0
tmp_res['pslat'] = tmp_res['lat'].values*1.0
tmp_res['psdepth'] = tmp_res['depth'].values*1.0
tmp_res = tmp_res[['pslon', 'pslat', 'bzlon', 'bzlat', 'psdepth', 'stdv', 'nID', 'ogstr', 'ogdip', 'centsurf', 'alen', 'blen', 'clen']]
shift_out = shift_out.merge(tmp_res)
shift_out.loc[shift_out.pslon < 0, 'pslon']+=360
shift_out['avstr'] = np.nan
shift_out['avdip'] = np.nan
shift_out['avrke'] = np.nan
'''Section 6: Third loop
The purpose of this loop is to produce the final location measurements for the slab.
Here we edit the input data by adding the shift to the depths, then calculate a PDF with receiver functions included.
The only output from this loop is a 10 column array with all results necessary to build the output.
Output is of the format [ lat lon dep unc shift_mag shift_unc avg_str avg_dip avg_rak pre-shift_dep pre-shift_str pre-shift_dip nodeID ]
'''
print("Start Section 6 of 7: Third (final) loop")
bilats, bilons, binods, bistds = [], [], [], []
biindx, bistrs, bidips, bideps = [], [], [], []
if pooling:
pool3 = Pool(args.nCores)
partial_loop3 = partial(loops.loop3, shift_out, testarea, used_all, eventlist, sdr, ddr, seismo_thick, these_params, slab, maxthickness, mindip, taper)
indices = shift_out['nID'].values
pts3 = pool3.map(partial_loop3, indices)
pool3.close()
pool3.join()
for i in range(len(indices)):
thisnode = pts3[i]
if np.isfinite(thisnode[0]):
nID = thisnode[13]
shift_out.loc[shift_out.nID == nID, 'depth'] = thisnode[0]
shift_out.loc[shift_out.nID == nID, 'stdv'] = thisnode[1]
shift_out.loc[shift_out.nID == nID, 'avstr'] = thisnode[2]
shift_out.loc[shift_out.nID == nID, 'avdip'] = thisnode[3]
shift_out.loc[shift_out.nID == nID, 'avrke'] = thisnode[4]
shift_out.loc[shift_out.nID == nID, 'lon'] = thisnode[15]
shift_out.loc[shift_out.nID == nID, 'lat'] = thisnode[16]
if np.isfinite(thisnode[5]):
bilats.append(thisnode[5])
bilons.append(thisnode[6])
binods.append(thisnode[7])
bistds.append(thisnode[8])
biindx.append(thisnode[9])
bistrs.append(thisnode[10])
bidips.append(thisnode[11])
bideps.append(thisnode[12])
multi = thisnode[14]
if len(multi) > 0:
postmulti = pd.concat([postmulti, multi],sort=True)
del pts3
else:
for nodeno in shift_out['nID'].values:
crdepth, crstd, crstrike, crdip, crrake, cbilats, cbilons, cbinods, cbistds, cbiindx, cbistrs, cbidips, cbideps, cnID, cpostmulti, cpeak_lon, cpeak_lat = loops.loop3(shift_out, testarea, used_all, eventlist, sdr, ddr, seismo_thick, these_params, slab, maxthickness, mindip, taper, nodeno)
if np.isfinite(crdepth):
nID = cnID
shift_out.loc[shift_out.nID == nID, 'depth'] = crdepth
shift_out.loc[shift_out.nID == nID, 'stdv'] = crstd
shift_out.loc[shift_out.nID == nID, 'avstr'] = crstrike
shift_out.loc[shift_out.nID == nID, 'avdip'] = crdip
shift_out.loc[shift_out.nID == nID, 'avrke'] = crrake
shift_out.loc[shift_out.nID == nID, 'lon'] = cpeak_lon
shift_out.loc[shift_out.nID == nID, 'lat'] = cpeak_lat
if np.isfinite(cbilats):
bilats.append(cbilats)
bilons.append(cbilons)
binods.append(cbinods)
bistds.append(cbistds)
biindx.append(cbiindx)
bistrs.append(cbistrs)
bidips.append(cbidips)
bideps.append(cbideps)
multi = cpostmulti
if len(multi) > 0:
postmulti = | pd.concat([postmulti, multi],sort=True) | pandas.concat |
"""
Also test support for datetime64[ns] in Series / DataFrame
"""
from datetime import datetime, timedelta
import re
import numpy as np
import pytest
from pandas._libs import iNaT
import pandas._libs.index as _index
import pandas as pd
from pandas import DataFrame, DatetimeIndex, NaT, Series, Timestamp, date_range
import pandas._testing as tm
def test_fancy_getitem():
dti = date_range(
freq="WOM-1FRI", start=datetime(2005, 1, 1), end=datetime(2010, 1, 1)
)
s = Series(np.arange(len(dti)), index=dti)
assert s[48] == 48
assert s["1/2/2009"] == 48
assert s["2009-1-2"] == 48
assert s[datetime(2009, 1, 2)] == 48
assert s[Timestamp(datetime(2009, 1, 2))] == 48
with pytest.raises(KeyError, match=r"^'2009-1-3'$"):
s["2009-1-3"]
tm.assert_series_equal(
s["3/6/2009":"2009-06-05"], s[datetime(2009, 3, 6) : datetime(2009, 6, 5)]
)
def test_fancy_setitem():
dti = date_range(
freq="WOM-1FRI", start=datetime(2005, 1, 1), end=datetime(2010, 1, 1)
)
s = Series(np.arange(len(dti)), index=dti)
s[48] = -1
assert s[48] == -1
s["1/2/2009"] = -2
assert s[48] == -2
s["1/2/2009":"2009-06-05"] = -3
assert (s[48:54] == -3).all()
def test_dti_reset_index_round_trip():
dti = date_range(start="1/1/2001", end="6/1/2001", freq="D")._with_freq(None)
d1 = DataFrame({"v": np.random.rand(len(dti))}, index=dti)
d2 = d1.reset_index()
assert d2.dtypes[0] == np.dtype("M8[ns]")
d3 = d2.set_index("index")
tm.assert_frame_equal(d1, d3, check_names=False)
# #2329
stamp = datetime(2012, 11, 22)
df = DataFrame([[stamp, 12.1]], columns=["Date", "Value"])
df = df.set_index("Date")
assert df.index[0] == stamp
assert df.reset_index()["Date"][0] == stamp
@pytest.mark.slow
def test_slice_locs_indexerror():
times = [datetime(2000, 1, 1) + timedelta(minutes=i * 10) for i in range(100000)]
s = Series(range(100000), times)
s.loc[datetime(1900, 1, 1) : datetime(2100, 1, 1)]
def test_slicing_datetimes():
# GH 7523
# unique
df = DataFrame(
np.arange(4.0, dtype="float64"),
index=[datetime(2001, 1, i, 10, 00) for i in [1, 2, 3, 4]],
)
result = df.loc[datetime(2001, 1, 1, 10) :]
tm.assert_frame_equal(result, df)
result = df.loc[: datetime(2001, 1, 4, 10)]
tm.assert_frame_equal(result, df)
result = df.loc[datetime(2001, 1, 1, 10) : datetime(2001, 1, 4, 10)]
tm.assert_frame_equal(result, df)
result = df.loc[datetime(2001, 1, 1, 11) :]
expected = df.iloc[1:]
tm.assert_frame_equal(result, expected)
result = df.loc["20010101 11":]
tm.assert_frame_equal(result, expected)
# duplicates
df = DataFrame(
np.arange(5.0, dtype="float64"),
index=[datetime(2001, 1, i, 10, 00) for i in [1, 2, 2, 3, 4]],
)
result = df.loc[datetime(2001, 1, 1, 10) :]
tm.assert_frame_equal(result, df)
result = df.loc[: datetime(2001, 1, 4, 10)]
tm.assert_frame_equal(result, df)
result = df.loc[datetime(2001, 1, 1, 10) : datetime(2001, 1, 4, 10)]
tm.assert_frame_equal(result, df)
result = df.loc[datetime(2001, 1, 1, 11) :]
expected = df.iloc[1:]
tm.assert_frame_equal(result, expected)
result = df.loc["20010101 11":]
tm.assert_frame_equal(result, expected)
def test_getitem_setitem_datetime_tz_pytz():
from pytz import timezone as tz
N = 50
# testing with timezone, GH #2785
rng = date_range("1/1/1990", periods=N, freq="H", tz="US/Eastern")
ts = Series(np.random.randn(N), index=rng)
# also test Timestamp tz handling, GH #2789
result = ts.copy()
result["1990-01-01 09:00:00+00:00"] = 0
result["1990-01-01 09:00:00+00:00"] = ts[4]
tm.assert_series_equal(result, ts)
result = ts.copy()
result["1990-01-01 03:00:00-06:00"] = 0
result["1990-01-01 03:00:00-06:00"] = ts[4]
tm.assert_series_equal(result, ts)
# repeat with datetimes
result = ts.copy()
result[datetime(1990, 1, 1, 9, tzinfo=tz("UTC"))] = 0
result[datetime(1990, 1, 1, 9, tzinfo=tz("UTC"))] = ts[4]
tm.assert_series_equal(result, ts)
result = ts.copy()
# comparison dates with datetime MUST be localized!
date = tz("US/Central").localize(datetime(1990, 1, 1, 3))
result[date] = 0
result[date] = ts[4]
tm.assert_series_equal(result, ts)
def test_getitem_setitem_datetime_tz_dateutil():
from dateutil.tz import tzutc
from pandas._libs.tslibs.timezones import dateutil_gettz as gettz
tz = (
lambda x: tzutc() if x == "UTC" else gettz(x)
) # handle special case for utc in dateutil
N = 50
# testing with timezone, GH #2785
rng = date_range("1/1/1990", periods=N, freq="H", tz="America/New_York")
ts = Series(np.random.randn(N), index=rng)
# also test Timestamp tz handling, GH #2789
result = ts.copy()
result["1990-01-01 09:00:00+00:00"] = 0
result["1990-01-01 09:00:00+00:00"] = ts[4]
tm.assert_series_equal(result, ts)
result = ts.copy()
result["1990-01-01 03:00:00-06:00"] = 0
result["1990-01-01 03:00:00-06:00"] = ts[4]
tm.assert_series_equal(result, ts)
# repeat with datetimes
result = ts.copy()
result[datetime(1990, 1, 1, 9, tzinfo=tz("UTC"))] = 0
result[datetime(1990, 1, 1, 9, tzinfo=tz("UTC"))] = ts[4]
tm.assert_series_equal(result, ts)
result = ts.copy()
result[datetime(1990, 1, 1, 3, tzinfo=tz("America/Chicago"))] = 0
result[datetime(1990, 1, 1, 3, tzinfo=tz("America/Chicago"))] = ts[4]
tm.assert_series_equal(result, ts)
def test_getitem_setitem_datetimeindex():
N = 50
# testing with timezone, GH #2785
rng = date_range("1/1/1990", periods=N, freq="H", tz="US/Eastern")
ts = Series(np.random.randn(N), index=rng)
result = ts["1990-01-01 04:00:00"]
expected = ts[4]
assert result == expected
result = ts.copy()
result["1990-01-01 04:00:00"] = 0
result["1990-01-01 04:00:00"] = ts[4]
tm.assert_series_equal(result, ts)
result = ts["1990-01-01 04:00:00":"1990-01-01 07:00:00"]
expected = ts[4:8]
tm.assert_series_equal(result, expected)
result = ts.copy()
result["1990-01-01 04:00:00":"1990-01-01 07:00:00"] = 0
result["1990-01-01 04:00:00":"1990-01-01 07:00:00"] = ts[4:8]
tm.assert_series_equal(result, ts)
lb = "1990-01-01 04:00:00"
rb = "1990-01-01 07:00:00"
# GH#18435 strings get a pass from tzawareness compat
result = ts[(ts.index >= lb) & (ts.index <= rb)]
expected = ts[4:8]
tm.assert_series_equal(result, expected)
lb = "1990-01-01 04:00:00-0500"
rb = "1990-01-01 07:00:00-0500"
result = ts[(ts.index >= lb) & (ts.index <= rb)]
expected = ts[4:8]
tm.assert_series_equal(result, expected)
# But we do not give datetimes a pass on tzawareness compat
# TODO: do the same with Timestamps and dt64
msg = "Cannot compare tz-naive and tz-aware datetime-like objects"
naive = datetime(1990, 1, 1, 4)
with tm.assert_produces_warning(FutureWarning):
# GH#36148 will require tzawareness compat
result = ts[naive]
expected = ts[4]
assert result == expected
result = ts.copy()
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
# GH#36148 will require tzawareness compat
result[datetime(1990, 1, 1, 4)] = 0
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
# GH#36148 will require tzawareness compat
result[datetime(1990, 1, 1, 4)] = ts[4]
tm.assert_series_equal(result, ts)
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
# GH#36148 will require tzawareness compat
result = ts[datetime(1990, 1, 1, 4) : datetime(1990, 1, 1, 7)]
expected = ts[4:8]
tm.assert_series_equal(result, expected)
result = ts.copy()
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
# GH#36148 will require tzawareness compat
result[datetime(1990, 1, 1, 4) : datetime(1990, 1, 1, 7)] = 0
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
# GH#36148 will require tzawareness compat
result[datetime(1990, 1, 1, 4) : datetime(1990, 1, 1, 7)] = ts[4:8]
tm.assert_series_equal(result, ts)
lb = datetime(1990, 1, 1, 4)
rb = datetime(1990, 1, 1, 7)
msg = r"Invalid comparison between dtype=datetime64\[ns, US/Eastern\] and datetime"
with pytest.raises(TypeError, match=msg):
# tznaive vs tzaware comparison is invalid
# see GH#18376, GH#18162
ts[(ts.index >= lb) & (ts.index <= rb)]
lb = pd.Timestamp(datetime(1990, 1, 1, 4)).tz_localize(rng.tzinfo)
rb = pd.Timestamp(datetime(1990, 1, 1, 7)).tz_localize(rng.tzinfo)
result = ts[(ts.index >= lb) & (ts.index <= rb)]
expected = ts[4:8]
tm.assert_series_equal(result, expected)
result = ts[ts.index[4]]
expected = ts[4]
assert result == expected
result = ts[ts.index[4:8]]
expected = ts[4:8]
tm.assert_series_equal(result, expected)
result = ts.copy()
result[ts.index[4:8]] = 0
result.iloc[4:8] = ts.iloc[4:8]
tm.assert_series_equal(result, ts)
# also test partial date slicing
result = ts["1990-01-02"]
expected = ts[24:48]
tm.assert_series_equal(result, expected)
result = ts.copy()
result["1990-01-02"] = 0
result["1990-01-02"] = ts[24:48]
tm.assert_series_equal(result, ts)
def test_getitem_setitem_periodindex():
from pandas import period_range
N = 50
rng = period_range("1/1/1990", periods=N, freq="H")
ts = Series(np.random.randn(N), index=rng)
result = ts["1990-01-01 04"]
expected = ts[4]
assert result == expected
result = ts.copy()
result["1990-01-01 04"] = 0
result["1990-01-01 04"] = ts[4]
tm.assert_series_equal(result, ts)
result = ts["1990-01-01 04":"1990-01-01 07"]
expected = ts[4:8]
tm.assert_series_equal(result, expected)
result = ts.copy()
result["1990-01-01 04":"1990-01-01 07"] = 0
result["1990-01-01 04":"1990-01-01 07"] = ts[4:8]
tm.assert_series_equal(result, ts)
lb = "1990-01-01 04"
rb = "1990-01-01 07"
result = ts[(ts.index >= lb) & (ts.index <= rb)]
expected = ts[4:8]
tm.assert_series_equal(result, expected)
# GH 2782
result = ts[ts.index[4]]
expected = ts[4]
assert result == expected
result = ts[ts.index[4:8]]
expected = ts[4:8]
tm.assert_series_equal(result, expected)
result = ts.copy()
result[ts.index[4:8]] = 0
result.iloc[4:8] = ts.iloc[4:8]
tm.assert_series_equal(result, ts)
def test_datetime_indexing():
index = date_range("1/1/2000", "1/7/2000")
index = index.repeat(3)
s = Series(len(index), index=index)
stamp = Timestamp("1/8/2000")
with pytest.raises(KeyError, match=re.escape(repr(stamp))):
s[stamp]
s[stamp] = 0
assert s[stamp] == 0
# not monotonic
s = Series(len(index), index=index)
s = s[::-1]
with pytest.raises(KeyError, match=re.escape(repr(stamp))):
s[stamp]
s[stamp] = 0
assert s[stamp] == 0
"""
test duplicates in time series
"""
@pytest.fixture
def dups():
dates = [
datetime(2000, 1, 2),
datetime(2000, 1, 2),
datetime(2000, 1, 2),
datetime(2000, 1, 3),
datetime(2000, 1, 3),
datetime(2000, 1, 3),
datetime(2000, 1, 4),
datetime(2000, 1, 4),
datetime(2000, 1, 4),
datetime(2000, 1, 5),
]
return Series(np.random.randn(len(dates)), index=dates)
def test_constructor(dups):
assert isinstance(dups, Series)
assert isinstance(dups.index, DatetimeIndex)
def test_is_unique_monotonic(dups):
assert not dups.index.is_unique
def test_index_unique(dups):
uniques = dups.index.unique()
expected = DatetimeIndex(
[
datetime(2000, 1, 2),
datetime(2000, 1, 3),
datetime(2000, 1, 4),
datetime(2000, 1, 5),
]
)
assert uniques.dtype == "M8[ns]" # sanity
tm.assert_index_equal(uniques, expected)
assert dups.index.nunique() == 4
# #2563
assert isinstance(uniques, DatetimeIndex)
dups_local = dups.index.tz_localize("US/Eastern")
dups_local.name = "foo"
result = dups_local.unique()
expected = DatetimeIndex(expected, name="foo")
expected = expected.tz_localize("US/Eastern")
assert result.tz is not None
assert result.name == "foo"
tm.assert_index_equal(result, expected)
# NaT, note this is excluded
arr = [1370745748 + t for t in range(20)] + [iNaT]
idx = DatetimeIndex(arr * 3)
tm.assert_index_equal(idx.unique(), DatetimeIndex(arr))
assert idx.nunique() == 20
assert idx.nunique(dropna=False) == 21
arr = [
Timestamp("2013-06-09 02:42:28") + timedelta(seconds=t) for t in range(20)
] + [NaT]
idx = DatetimeIndex(arr * 3)
tm.assert_index_equal(idx.unique(), DatetimeIndex(arr))
assert idx.nunique() == 20
assert idx.nunique(dropna=False) == 21
def test_duplicate_dates_indexing(dups):
ts = dups
uniques = ts.index.unique()
for date in uniques:
result = ts[date]
mask = ts.index == date
total = (ts.index == date).sum()
expected = ts[mask]
if total > 1:
tm.assert_series_equal(result, expected)
else:
tm.assert_almost_equal(result, expected[0])
cp = ts.copy()
cp[date] = 0
expected = Series(np.where(mask, 0, ts), index=ts.index)
tm.assert_series_equal(cp, expected)
key = datetime(2000, 1, 6)
with pytest.raises(KeyError, match=re.escape(repr(key))):
ts[key]
# new index
ts[datetime(2000, 1, 6)] = 0
assert ts[datetime(2000, 1, 6)] == 0
def test_range_slice():
idx = DatetimeIndex(["1/1/2000", "1/2/2000", "1/2/2000", "1/3/2000", "1/4/2000"])
ts = Series(np.random.randn(len(idx)), index=idx)
result = ts["1/2/2000":]
expected = ts[1:]
tm.assert_series_equal(result, expected)
result = ts["1/2/2000":"1/3/2000"]
expected = ts[1:4]
tm.assert_series_equal(result, expected)
def test_groupby_average_dup_values(dups):
result = dups.groupby(level=0).mean()
expected = dups.groupby(dups.index).mean()
tm.assert_series_equal(result, expected)
def test_indexing_over_size_cutoff():
import datetime
# #1821
old_cutoff = _index._SIZE_CUTOFF
try:
_index._SIZE_CUTOFF = 1000
# create large list of non periodic datetime
dates = []
sec = datetime.timedelta(seconds=1)
half_sec = datetime.timedelta(microseconds=500000)
d = datetime.datetime(2011, 12, 5, 20, 30)
n = 1100
for i in range(n):
dates.append(d)
dates.append(d + sec)
dates.append(d + sec + half_sec)
dates.append(d + sec + sec + half_sec)
d += 3 * sec
# duplicate some values in the list
duplicate_positions = np.random.randint(0, len(dates) - 1, 20)
for p in duplicate_positions:
dates[p + 1] = dates[p]
df = DataFrame(
np.random.randn(len(dates), 4), index=dates, columns=list("ABCD")
)
pos = n * 3
timestamp = df.index[pos]
assert timestamp in df.index
# it works!
df.loc[timestamp]
assert len(df.loc[[timestamp]]) > 0
finally:
_index._SIZE_CUTOFF = old_cutoff
def test_indexing_over_size_cutoff_period_index(monkeypatch):
# GH 27136
monkeypatch.setattr(_index, "_SIZE_CUTOFF", 1000)
n = 1100
idx = pd.period_range("1/1/2000", freq="T", periods=n)
assert idx._engine.over_size_threshold
s = Series(np.random.randn(len(idx)), index=idx)
pos = n - 1
timestamp = idx[pos]
assert timestamp in s.index
# it works!
s[timestamp]
assert len(s.loc[[timestamp]]) > 0
def test_indexing_unordered():
# GH 2437
rng = date_range(start="2011-01-01", end="2011-01-15")
ts = Series(np.random.rand(len(rng)), index=rng)
ts2 = pd.concat([ts[0:4], ts[-4:], ts[4:-4]])
for t in ts.index:
expected = ts[t]
result = ts2[t]
assert expected == result
# GH 3448 (ranges)
def compare(slobj):
result = ts2[slobj].copy()
result = result.sort_index()
expected = ts[slobj]
expected.index = expected.index._with_freq(None)
| tm.assert_series_equal(result, expected) | pandas._testing.assert_series_equal |
"""
Utility function to load and process the output files of a DeepProfiler run.
"""
import os
import pathlib
import numpy as np
import pandas as pd
import warnings
from pycytominer import aggregate
from pycytominer.cyto_utils import load_npz, infer_cp_features
class AggregateDeepProfiler:
"""This class holds all functions needed to load and annotate the DeepProfiler (DP) run.
Attributes
----------
profile_dir : str
file location of the output profiles from DeepProfiler
(e.g. `/project1/outputs/results/features/`)
aggregate_operation : ['median', 'mean']
method of aggregation
aggregate_on : ['site', 'well', 'plate']
up to which level to aggregate
filename_delimiter : default = '_'
delimiter for the filenames of the profiles (e.g. B02_4.npz).
file_extension : default = '.npz'
extension of the profile file.
index_df : pandas.DataFrame
load in the index.csv file from DeepProfiler, provided by an input index file.
filenames : list of paths
list of Purepaths that point to the npz files.
aggregated_profiles : pandas.DataFrame
df to hold the metadata and profiles.
file_aggregate : dict
dict that holds the file names and metadata.
Is used to load in the npz files in the correct order and grouping.
output_file : str
If provided, will write annotated profiles to folder. Defaults to "none".
Methods
-------
aggregate_deep()
Given an initialized AggregateDeepProfiler() class, run this function to output
level 3 profiles (aggregated profiles with annotated metadata).
"""
def __init__(
self,
index_file,
profile_dir,
aggregate_operation="median",
aggregate_on="well",
filename_delimiter="_",
file_extension=".npz",
output_file="none",
):
"""
__init__ function for this class.
Arguments
---------
index_file : str
file location of the index.csv from DP
See above for all other parameters.
"""
assert aggregate_operation in [
"median",
"mean",
], "Input of aggregate_operation is incorrect, it must be either median or mean"
assert aggregate_on in [
"site",
"well",
"plate",
], "Input of aggregate_on is incorrect, it must be either site or well or plate"
self.index_df = pd.read_csv(index_file, dtype=str)
self.profile_dir = profile_dir
self.aggregate_operation = aggregate_operation
self.aggregate_on = aggregate_on
self.filename_delimiter = filename_delimiter
self.file_extension = file_extension
if not self.file_extension.startswith("."):
self.file_extension = f".{self.file_extension}"
self.output_file = output_file
def build_filenames(self):
"""
Create file names indicated by plate, well, and site information
"""
self.filenames = self.index_df.apply(
self.build_filename_from_index, axis="columns"
)
self.filenames = [
pathlib.PurePath(f"{self.profile_dir}/{x}") for x in self.filenames
]
def build_filename_from_index(self, row):
"""
Builds the name of the profile files
"""
plate = row["Metadata_Plate"]
well = row["Metadata_Well"]
site = row["Metadata_Site"]
filename = f"{plate}/{well}_{site}{self.file_extension}"
return filename
def extract_filename_metadata(self, npz_file, delimiter="_"):
"""
Extract metadata (site, well and plate) from the filename.
The input format of the file: path/plate/well_site.npz
Arguments
---------
npz_file : str
file path
delimiter : str
the delimiter used in the naming convention of the files. default = '_'
Returns
-------
loc : dict
dict with metadata
"""
base_file = os.path.basename(npz_file).strip(".npz").split(delimiter)
site = base_file[-1]
well = base_file[-2]
plate = str(npz_file).split("/")[-2]
loc = {"site": site, "well": well, "plate": plate}
return loc
def setup_aggregate(self):
"""
Sets up the file_aggregate attribute. This is a helper function to aggregate_deep().
the file_aggregate dictionary contains the file locations and metadata for each grouping.
If for example we are grouping by well then the keys of self.file_aggregate would be:
plate1/well1, plate1/well2, plate2/well1, etc.
"""
if not hasattr(self, "filenames"):
self.build_filenames()
self.file_aggregate = {}
for filename in self.filenames:
file_info = self.extract_filename_metadata(
filename, self.filename_delimiter
)
file_key = file_info[self.aggregate_on]
if self.aggregate_on == "site":
file_key = (
f"{file_info['plate']}/{file_info['well']}_{file_info['site']}"
)
if self.aggregate_on == "well":
file_key = f"{file_info['plate']}/{file_info['well']}"
if file_key in self.file_aggregate:
self.file_aggregate[file_key]["files"].append(filename)
else:
self.file_aggregate[file_key] = {}
self.file_aggregate[file_key]["files"] = [filename]
self.file_aggregate[file_key]["metadata"] = file_info
def aggregate_deep(self):
"""
Main function of this class. Aggregates the profiles into a pandas dataframe.
For each key in file_aggregate, the profiles are loaded, concatenated and then aggregated.
If files are missing, we throw a warning but continue the code.
After aggregation, the metadata is concatenated back onto the dataframe.
Returns
-------
df_out : pandas.dataframe
dataframe with all metadata and the feature space.
This is the input to any further pycytominer or pycytominer-eval processing
"""
if not hasattr(self, "file_aggregate"):
self.setup_aggregate()
self.aggregated_profiles = []
self.aggregate_merge_col = f"Metadata_{self.aggregate_on.capitalize()}_Position"
# Iterates over all sites, wells or plates
for metadata_level in self.file_aggregate:
# uses custom load function to create df with metadata and profiles
arr = [load_npz(x) for x in self.file_aggregate[metadata_level]["files"]]
# empty dataframes from missing files are deleted
arr = [x for x in arr if not x.empty]
# if no files were found there is a miss-match between the index and the output files
if not len(arr):
warnings.warn(
f"No files for the key {metadata_level} could be found.\nThis program will continue, but be aware that this might induce errors!"
)
continue
df = | pd.concat(arr) | pandas.concat |
#!/usr/bin/env python
""" findNeighbour4 is a server providing relatedness information for bacterial genomes via a Restful API.
See documentation for full details of its functionality.
There are unit tests for the server component. To run them:
# starting a test RESTFUL server
# NOTE: this uses the default testing config file at config/default_test_config.json, which launches a server on 5020
nohup pipenv run python3 findNeighbour4_server.py &
# And then launching unit tests with
pipenv run python3 -m unittest test/test_server.py # tests the server running on the default testing port, 5020
"""
# import libraries
import os
import requests
import json
import warnings
import datetime
import pandas as pd
import markdown
import codecs
import uuid
# only used for unit testing
from Bio import SeqIO
import unittest
from urllib.parse import urljoin as urljoiner
RESTBASEURL = "http://127.0.0.1:5020"
print(
"Running unit tests against a server expected to be operational on {0}".format(
RESTBASEURL
)
)
ISDEBUG = True
LISTEN_TO = "127.0.0.1" # only local addresses
def isjson(content):
"""returns true if content parses as json, otherwise false. used by unit testing."""
try:
json.loads(content.decode("utf-8"))
return True
except json.decoder.JSONDecodeError:
return False
def tojson(content):
"""json dumps, formatting dates as isoformat"""
def converter(o):
if isinstance(o, datetime.datetime):
return o.isoformat()
else:
return json.JSONEncoder.default(o)
return json.dumps(content, default=converter)
def do_GET(relpath):
"""makes a GET request to relpath.
Used for unit testing."""
url = urljoiner(RESTBASEURL, relpath)
# print("GETing from: {0}".format(url))
session = requests.Session()
session.trust_env = False
# print out diagnostics
# print("About to GET from url {0}".format(url))
response = session.get(url=url, timeout=None)
# print("Result:")
# print("code: {0}".format(response.status_code))
# print("reason: {0}".format(response.reason))
try:
"text: {0}".format(response.text[:100])
except UnicodeEncodeError:
# which is what happens if you try to display a gz file as text, which it isn't
warnings.warn(
"Response cannot be coerced to unicode; is this a gz file? The response content had {0} bytes.".format(
len(response.text)
)
)
warnings.warn("headers: {0}".format(response.headers))
session.close()
return response
def do_POST(relpath, payload):
"""makes a POST request to relpath.
Used for unit testing.
payload should be a dictionary"""
url = urljoiner(RESTBASEURL, relpath)
# print out diagnostics
# print("POSTING to url {0}".format(url))
if not isinstance(payload, dict):
raise TypeError("not a dict {0}".format(payload))
response = requests.post(url=url, data=payload)
# print("Result:")
# print("code: {0}".format(response.status_code))
# print("reason: {0}".format(response.reason))
# print("content: {0}".format(response.content))
return response
def render_markdown(md_file):
"""render markdown as html"""
with codecs.open(md_file, mode="r", encoding="utf-8") as f:
text = f.read()
html = markdown.markdown(text, extensions=["tables"])
return html
class test_reset(unittest.TestCase):
"""tests route /api/v2/reset; ensures that guids are removed"""
def runTest(self):
relpath = "/api/v2/guids"
res = do_GET(relpath)
n_pre = len(res.json()) # len(json.loads(str(res.text))) # get all the guids
#print("Pre insert guids", res.json())
guid_to_insert = "{0}_{1}".format(n_pre + 1, uuid.uuid4().hex)
inputfile = "COMPASS_reference/R39/R00000039.fasta"
with open(inputfile, "rt") as f:
for record in SeqIO.parse(f, "fasta"):
seq = str(record.seq)
relpath = "/api/v2/insert"
#print("inserting", guid_to_insert)
res = do_POST(relpath, payload={"guid": guid_to_insert, "seq": seq})
relpath = "/api/v2/guids"
res = do_GET(relpath)
#print("post-insert guids", res.json())
n_post = len(res.json()) # get all the guids
self.assertEqual(n_post, n_pre + 1)
relpath = "/api/v2/reset"
res = do_POST(relpath, payload={})
#print("Reset message:", res.json())
self.assertEqual(res.status_code, 200)
relpath = "/api/v2/guids"
res = do_GET(relpath)
#print("Post-reset guids", res.json())
n_post_reset = len(res.json()) # get all the guids
#print("number post reset", n_post_reset)
self.assertTrue(n_post_reset == 0)
class test_guid_name_validity(unittest.TestCase):
"""tests whether insertion of guids which don't conform to expectations is permitted"""
def runTest(self):
relpath = "/api/v2/guids"
res = do_GET(relpath)
n_pre = len(json.loads(str(res.text))) # get all the guids
guid_to_insert = "X" * 90 # too long
inputfile = "COMPASS_reference/R39/R00000039.fasta"
with open(inputfile, "rt") as f:
for record in SeqIO.parse(f, "fasta"):
seq = str(record.seq)
relpath = "/api/v2/insert"
res = do_POST(relpath, payload={"guid": guid_to_insert, "seq": seq})
self.assertEqual(res.status_code, 403)
relpath = "/api/v2/guids"
res = do_GET(relpath)
n_post = len(json.loads(str(res.text))) # get all the guids
self.assertEqual(n_post, n_pre)
class test_guids(unittest.TestCase):
"""tests routes /guids, /valid_guids and /invalid_guids"""
def runTest(self):
relpath = "/api/v2/reset"
res = do_POST(relpath, payload={})
relpath = "/api/v2/guids"
res = do_GET(relpath)
self.assertEqual(0, len(json.loads(str(res.text)))) # get all the guids
guid_to_insert = "valid"
inputfile = "COMPASS_reference/R39/R00000039.fasta"
with open(inputfile, "rt") as f:
for record in SeqIO.parse(f, "fasta"):
seq = str(record.seq)
relpath = "/api/v2/insert"
res = do_POST(relpath, payload={"guid": guid_to_insert, "seq": seq})
seq2 = "".join("N" * len(seq))
guid_to_insert = "invalid"
res = do_POST(relpath, payload={"guid": guid_to_insert, "seq": seq2})
relpath = "/api/v2/guids"
res = do_GET(relpath)
self.assertEqual(
set(["valid", "invalid"]), set(json.loads(str(res.text)))
) # get all the guids
relpath = "/api/v2/valid_guids"
res = do_GET(relpath)
self.assertEqual(
set(["valid"]), set(json.loads(str(res.text)))
) # get all the guids
relpath = "/api/v2/invalid_guids"
res = do_GET(relpath)
self.assertEqual(
set(["invalid"]), set(json.loads(str(res.text)))
) # get all the guids
class test_guid_validity(unittest.TestCase):
"""tests routes /validity_guids"""
def runTest(self):
relpath = "/api/v2/reset"
res = do_POST(relpath, payload={})
relpath = "/api/v2/guids"
res = do_GET(relpath)
self.assertEqual(0, len(json.loads(str(res.text)))) # get all the guids
guid_to_insert = "valid_guid"
inputfile = "COMPASS_reference/R39/R00000039.fasta"
with open(inputfile, "rt") as f:
for record in SeqIO.parse(f, "fasta"):
seq = str(record.seq)
relpath = "/api/v2/insert"
res = do_POST(relpath, payload={"guid": guid_to_insert, "seq": seq})
seq2 = "".join("N" * len(seq))
guid_to_insert = "invalid_guid"
res = do_POST(relpath, payload={"guid": guid_to_insert, "seq": seq2})
relpath = "/api/v2/invalid_guid/valid"
res = do_GET(relpath)
valid_code = json.loads(str(res.text))
self.assertEqual(valid_code, 1)
relpath = "/api/v2/valid_guid/valid"
res = do_GET(relpath)
valid_code = json.loads(str(res.text))
self.assertEqual(valid_code, 0)
relpath = "/api/v2/missing_guid/valid"
res = do_GET(relpath)
valid_code = json.loads(str(res.text))
self.assertEqual(valid_code, -1)
class test_cl2network(unittest.TestCase):
"""tests return of a change_id number from clustering engine"""
def runTest(self):
relpath = "/api/v2/reset"
res = do_POST(relpath, payload={})
# add four samples, two mixed
inputfile = "COMPASS_reference/R39/R00000039.fasta"
with open(inputfile, "rt") as f:
for record in SeqIO.parse(f, "fasta"):
originalseq = list(str(record.seq))
guids_inserted = list()
relpath = "/api/v2/guids"
res = do_GET(relpath)
n_pre = len(json.loads(str(res.text))) # get all the guids
for i in range(1, 4):
seq = originalseq
if i % 2 == 0:
is_mixed = True
guid_to_insert = "mixed_cl2_{0}".format(n_pre + i)
else:
is_mixed = False
guid_to_insert = "nomix_cl2_{0}".format(n_pre + i)
# make i mutations at position 500,000
offset = 500000
for j in range(i):
mutbase = offset + j
ref = seq[mutbase]
if is_mixed is False:
if not ref == "T":
seq[mutbase] = "T"
if not ref == "A":
seq[mutbase] = "A"
if is_mixed is True:
seq[mutbase] = "N"
seq = "".join(seq)
guids_inserted.append(guid_to_insert)
relpath = "/api/v2/insert"
res = do_POST(relpath, payload={"guid": guid_to_insert, "seq": seq})
self.assertEqual(res.status_code, 200)
# run the clustering engine.
os.system("pipenv run python3 findNeighbour4_clustering.py")
# do tests
relpath = "/api/v2/clustering/SNV12_ignore/cluster_ids"
res = do_GET(relpath)
self.assertEqual(res.status_code, 200)
retVal = json.loads(res.text)
# plot the cluster with the highest clusterid
relpath = "/api/v2/clustering/SNV12_ignore/{0}/network".format(max(retVal))
res = do_GET(relpath)
self.assertEqual(res.status_code, 200)
jsonresp = json.loads(str(res.text))
self.assertTrue(isinstance(jsonresp, dict))
self.assertTrue("elements" in jsonresp.keys())
# plot the cluster with the highest clusterid
res = None
relpath = "/api/v2/clustering/SNV12_ignore/{0}/minimum_spanning_tree".format(
max(retVal)
)
res = do_GET(relpath)
self.assertEqual(res.status_code, 200)
jsonresp = json.loads(str(res.text))
self.assertTrue(isinstance(jsonresp, dict))
self.assertTrue("elements" in jsonresp.keys())
class test_msa_2(unittest.TestCase):
"""tests route /api/v2/multiple_alignment/guids, with additional samples."""
def runTest(self):
relpath = "/api/v2/reset"
res = do_POST(relpath, payload={})
relpath = "/api/v2/guids"
res = do_GET(relpath)
n_pre = len(json.loads(str(res.text))) # get all the guids
inputfile = "COMPASS_reference/R39/R00000039.fasta"
with open(inputfile, "rt") as f:
for record in SeqIO.parse(f, "fasta"):
originalseq = list(str(record.seq))
inserted_guids = ["guid_ref"]
seq = "".join(originalseq)
res = do_POST("/api/v2/insert", payload={"guid": "guid_ref", "seq": seq})
for k in range(0, 1):
# form one clusters
for i in range(0, 3):
guid_to_insert = "msa2_{1}_guid_{0}".format(n_pre + k * 100 + i, k)
inserted_guids.append(guid_to_insert)
muts = 0
seq = originalseq
# make i mutations at position 500,000
offset = 500000
if k == 1:
for j in range(1000000, 1000100): # make 100 mutants at position 1m
mutbase = offset + j
ref = seq[mutbase]
if not ref == "T":
seq[mutbase] = "T"
if not ref == "A":
seq[mutbase] = "A"
muts += 1
for j in range(i):
mutbase = offset + j
ref = seq[mutbase]
if not ref == "T":
seq[mutbase] = "T"
if not ref == "A":
seq[mutbase] = "A"
muts += 1
seq = "".join(seq)
# print("Adding TB sequence {2} of {0} bytes with {1} mutations relative to ref.".format(len(seq), muts, guid_to_insert))
self.assertEqual(len(seq), 4411532) # check it's the right sequence
relpath = "/api/v2/insert"
res = do_POST(relpath, payload={"guid": guid_to_insert, "seq": seq})
self.assertTrue(isjson(content=res.content))
info = json.loads(res.content.decode("utf-8"))
self.assertEqual(info, "Guid {0} inserted.".format(guid_to_insert))
relpath = "/api/v2/multiple_alignment/guids"
payload = {"guids": ";".join(inserted_guids), "output_format": "html"}
res = do_POST(relpath, payload=payload)
self.assertFalse(isjson(res.content))
self.assertEqual(res.status_code, 200)
self.assertTrue(b"</table>" in res.content)
payload = {"guids": ";".join(inserted_guids), "output_format": "json"}
res = do_POST(relpath, payload=payload)
self.assertTrue(isjson(res.content))
self.assertEqual(res.status_code, 200)
self.assertFalse(b"</table>" in res.content)
d = json.loads(res.content.decode("utf-8"))
expected_keys = set(
[
"variant_positions",
"invalid_guids",
"valid_guids",
"expected_p1",
"sample_size",
"df_dict",
"what_tested",
"outgroup",
"creation_time",
"fconst",
]
)
self.assertEqual(set(d.keys()), set(expected_keys))
payload = {"guids": ";".join(inserted_guids), "output_format": "json-records"}
res = do_POST(relpath, payload=payload)
self.assertTrue(isjson(res.content))
self.assertEqual(res.status_code, 200)
self.assertFalse(b"</table>" in res.content)
d = json.loads(res.content.decode("utf-8"))
payload = {"guids": ";".join(inserted_guids), "output_format": "fasta"}
res = do_POST(relpath, payload=payload)
self.assertFalse(isjson(res.content))
self.assertEqual(res.status_code, 200)
payload = {"guids": ";".join(inserted_guids), "output_format": "json-fasta"}
res = do_POST(relpath, payload=payload)
self.assertTrue(isjson(res.content))
self.assertEqual(res.status_code, 200)
retVal = json.loads(res.content.decode("utf_8"))
self.assertTrue(isinstance(retVal, dict))
self.assertEqual(set(retVal.keys()), set(["fasta"]))
relpath = "/api/v2/multiple_alignment/guids"
payload = {
"guids": ";".join(inserted_guids),
"output_format": "html",
"what": "N",
}
res = do_POST(relpath, payload=payload)
self.assertFalse(isjson(res.content))
self.assertEqual(res.status_code, 200)
self.assertTrue(b"</table>" in res.content)
relpath = "/api/v2/multiple_alignment/guids"
payload = {
"guids": ";".join(inserted_guids),
"output_format": "html",
"what": "M",
}
res = do_POST(relpath, payload=payload)
self.assertFalse(isjson(res.content))
self.assertEqual(res.status_code, 200)
self.assertTrue(b"</table>" in res.content)
relpath = "/api/v2/multiple_alignment/guids"
payload = {
"guids": ";".join(inserted_guids),
"output_format": "html",
"what": "N_or_M",
}
res = do_POST(relpath, payload=payload)
self.assertFalse(isjson(res.content))
self.assertEqual(res.status_code, 200)
self.assertTrue(b"</table>" in res.content)
relpath = "/api/v2/multiple_alignment/guids"
payload = {
"guids": ";".join(inserted_guids),
"output_format": "interactive",
"what": "N_or_M",
}
res = do_POST(relpath, payload=payload)
self.assertFalse(isjson(res.content))
self.assertEqual(res.status_code, 200)
self.assertTrue(b"</html>" in res.content)
relpath = "/api/v2/multiple_alignment/guids"
payload = {
"guids": ";".join(inserted_guids),
"output_format": "json-records",
"what": "N",
}
res = do_POST(relpath, payload=payload)
self.assertEqual(res.status_code, 200)
self.assertTrue(isjson(res.content))
d = json.loads(res.content.decode("utf-8"))
df = | pd.DataFrame.from_records(d) | pandas.DataFrame.from_records |
# -*- coding: utf-8 -*-
"""
Created on Sat Aug 29 11:29:34 2020
@author: Pavan
"""
import pandas as pd
pd.set_option('mode.chained_assignment', None)
import numpy as np
import math
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.ticker as mtick
mpl.rcParams['font.family'] = 'serif'
import scipy.stats as stats
import itertools
from datetime import datetime, date
import os
import yfinance as yf
# from functools import partial
from american_option_pricing import american_option
import density_utilities as du
import prediction_ensemble_py as pe
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
"""
#######################################################################################
Import Data
#######################################################################################
"""
data = pd.read_excel('spy.xlsx', index_col=None)
current_date = date(2020,9,29)
expiry_date = date(2020,10,2)
days_to_expiry = np.busday_count( current_date, expiry_date)-1
# min_p_profit = 35
# hor_leg_factor = 0.05
forecast_dens = False
save_results = True
save_plots = True
Strategies = []
Strategies = ["Butterfly","Double Broken Wing Butterfly","Iron Condor"]
# Strategies = ["Iron Condor"]
"""
#######################################################################################
Get Risk Free Date
#######################################################################################
"""
print("\n Gathering Risk Free Rate")
rf_eod_data = yf.download("^IRX", start="2020-07-01", end= current_date.strftime("%Y-%m-%d"))
for col in rf_eod_data.columns:
rf_eod_data[col] = pd.to_numeric(rf_eod_data[col],errors='coerce')
rf_eod_data=rf_eod_data.fillna(method='ffill')
rf_eod_data['interest']=((1+(rf_eod_data['Adj Close']/100))**(1/252))-1
rf_eod_data['annualized_interest']=252*(((1+(rf_eod_data['Adj Close']/100))**(1/252))-1)
rf_value =rf_eod_data['annualized_interest'].iloc[-1]
print("\nCurrent Risk Free Rate is :",'{:.3f}%'.format(rf_value*100))
"""
#######################################################################################
Data Cleaning
#######################################################################################
"""
def wrang_1(df, col_names):
for col in col_names:
df[col] = df[col].str.rstrip('%')
df[col] = pd.to_numeric(df[col],errors='coerce')
df[col] = [float(x)/100.0 for x in df[col].values]
return df
convert_cols = ["Impl Vol", "Prob.ITM","Prob.OTM","Prob.Touch"]
data = wrang_1(data,convert_cols)
def label_type(row):
if row['Symbol'][0] == "." :
return 'Option'
return 'Stock'
data['Type']=data.apply(lambda row: label_type(row), axis=1)
data['Expiry_Date']= data.Symbol.str.extract('(\d+)')
data['Expiry_Date'] = data['Expiry_Date'].apply(lambda x: pd.to_datetime(str(x), format='%y%m%d'))
expiry_date_str = expiry_date.strftime("%Y%m%d")
data['Expiry_Date'] = data['Expiry_Date'].fillna(pd.Timestamp(expiry_date_str))
data['Expiry_Date'] = data['Expiry_Date'].apply(lambda x: x.strftime('%Y_%m_%d'))
#TODO: Change the logic for new symbol. Works only for this year.
data['Group']= data.Symbol.apply(lambda st: st[st.find(".")+1:st.find("20")])
data['Group'] = np.where(data['Type'] == "Stock", data['Symbol'],data['Group'])
data['Chain_ID'] = data['Group']+"_"+data['Expiry_Date']
data['Spread'] = data['Bid']-data['Ask']
stock_data = data[data['Type'] == "Stock"]
stock_data.rename(columns={"Description": "stock_Description",
"Last": "stock_Last",
"High":"stock_High",
"Low":"stock_Low",
"Open" : "stock_Open",
"Volume":"stock_Volume",
"Bid":"stock_Bid",
"Ask":"stock_Ask",
"Impl Vol":"stock_Impl_Vol",
"Spread":"stock_Spread"}, inplace=True)
stock_data = stock_data[["stock_Description","stock_Last","stock_High","stock_Low",
"stock_Open","stock_Volume","stock_Bid","stock_Ask",
"stock_Impl_Vol","stock_Spread","Chain_ID"]]
option_data = data[data['Type']=="Option"]
option_data['Option_type'] = option_data.loc[:,'Description'].str.split(' ').str[-1]
final_dataset = | pd.merge(option_data,stock_data,on=['Chain_ID']) | pandas.merge |
from collections import Counter
from mypymatch.Matcher import Matcher
import numpy as np
import pandas as pd
from datetime import date, timedelta
import datetime
from tqdm import tqdm
from joblib import Parallel, delayed
import multiprocessing as mp
from collections import defaultdict
import os
import re
import sys
import scipy.stats as stats
import sys
sys.path.append(sys.argv[0])
sys.setrecursionlimit(1000000)
from statsmodels.tools.sm_exceptions import ConvergenceWarning
import warnings
warnings.filterwarnings(
"error",
message='Maximum number of iterations has been exceeded',
category=ConvergenceWarning)
#######################
def turnover_class(Y):
"""
Y: Rank_Turn / DayOut
generate rules converting y to y' ; nominal out come value converted into sub classes:
heavy sell (100,000 dollar sell) ; sell ; zero ; buy ; heavy buy
return a array of y' in accordance of y, and a dictionary of outcome and its classification rule in a dictionary
"""
col = Y.name
Y = pd.DataFrame(Y)
Y = Y.reset_index().sort_values(by=col) # to fast the speed
if col == 'Rank_Turn' or col =='ref':
turnover_threshold = 0.1
mask_z = (Y[col] == 1.0) # rank 1.0 means turnover = 0
mask_b = (Y[col] < 1.0) & (Y[col] >= turnover_threshold)
mask_hb = Y[col] < turnover_threshold
Y = Y.assign(cls='')
Y.cls = Y.cls.mask(mask_z, 'Zero')
Y.cls = Y.cls.mask(mask_b, 'Norm')
Y.cls = Y.cls.mask(mask_hb, 'Popular')
elif col == 'PopRef':
mask_p = (Y[col] == 1.0) # value=1 means Poppular
mask_n = (Y[col] == 0.0)
Y = Y.assign(cls='')
Y.cls = Y.cls.mask(mask_n, 'NonPopular')
Y.cls = Y.cls.mask(mask_p, 'Popular')
elif col == 'DayOut':
turnover_threshold = 50
mask_hs = Y[col] < -1 * turnover_threshold
mask_s = (Y[col] < 0) & (Y[col] >= (-1 * turnover_threshold))
mask_z = Y[col] == 0
mask_b = (Y[col] > 0) & (Y[col] <= turnover_threshold)
mask_hb = Y[col] > turnover_threshold
Y = Y.assign(cls='')
Y.cls = Y.cls.mask(mask_hs, 'HeavySell')
Y.cls = Y.cls.mask(mask_s, 'Sell')
Y.cls = Y.cls.mask(mask_z, 'Zero')
Y.cls = Y.cls.mask(mask_b, 'Buy')
Y.cls = Y.cls.mask(mask_hb, 'HeavyBuy')
return Y
class MyRows:
def __init__(self, rows, outcome_col):
self.value = rows
self.dependent_name = outcome_col
self.dependent = rows[self.dependent_name]
self.dep_val = self.dependent
self.ref_name = None
global args
dep = args.dep
if dep =='mix':
self.ref_name = 'ref'
self.dep_val = rows['ref']
self.corr_data = self.value
def get_correlated_features(self, alpha=np.nan):
"""
Get un-correlated feature rows out from the data sample
Parameters:
df: pd.Dataframe, features columns + outcome columns
outcome_col: object, the column name of outcome
alpha: float, choice of significant level for t-test to keep the correlated variables.
----
return: df : pd.DataFrame ; correlated features + outcome col
"""
if np.isnan(alpha):
global args
alpha = args.alpha
df = self.value
outcome_col = self.dependent_name
#df = pd.get_dummies(df)
if pd.DataFrame.isna(df).any().any():
raise ValueError('Input feature dataframe contains NaN.')
if len(df) < 3:
return df
# change '-' in the column names into '_'
df.columns = df.columns.str.strip().str.replace('-', '_')
# only get numerical columns to check if
no_col = df.select_dtypes(
include=['int', 'float',
'int64',
'float64']).columns
if outcome_col in no_col:
no_col = no_col.drop(outcome_col)
if 'ref' in no_col:
no_col = no_col.drop('ref')
for col in no_col:
arr = df[col]
outcome = df[outcome_col]
corr, pvalue = stats.pearsonr(arr, outcome)
if pvalue > alpha:
# if fail to reject the null hypothesis that the correlation
# coefficient IS NOT significantly different from 0.
df = df.drop(col, axis=1) # remove the column
df = df.reset_index(drop=True)
self.corr_data = df
return df
def find_best_question(Rows, question_excluded):
"""Find the best question to ask by iterating over every feature / value
and calculating the information gain.
para: question_excluded, questions already asked during building the tree"""
Rows.get_correlated_features()
rows, outcome_col = Rows.corr_data, Rows.dependent_name
best_ceffect = 0 # keep track of the best information gain
best_question = None # keep train of the feature / value that produced it
question_list = rows.columns.drop(Rows.dependent_name)
if Rows.ref_name:
question_list = question_list.drop(Rows.ref_name)
qkeys, qcount = np.unique(question_excluded, return_counts=True)
qdict = defaultdict(list)
maxAskCount = 2 # delete the cols(questions) that are asked twice
for (c, k) in zip(qcount, qkeys):
qdict[c].append(k)
if maxAskCount in qdict.keys():
# if the col is used more than maxAskCount,
# remove from the questionlist
for item in qdict[maxAskCount]:
if item in question_list:
question_list = question_list.drop(item)
if len(question_list) == 0:
import warnings
warnings.warn('Find Best Question: Rows is empty')
return best_ceffect, best_question
# get the question list for processing
testQlist = []
for col in question_list: # for each feature
values = list(set(rows[col])) # unique values in the column
if is_numeric(
values[0]): # if too many numeric value in ValueSet,deduct some
global args
SplitCount = args.sp
if len(values) > SplitCount:
values = np.linspace(min(rows[col]), max(
rows[col]), SplitCount)[1:-1]
else:
if len(values) == 2:
values = values[:-1]
# if not a numeric question set and with unique set number of 2,
# it means one is the complimentary of the other, so cancel one
# here.
for val in values: # for each value
testQlist += [Question(col, val)]
# Start multiprocessing
cpu_cores = mp.cpu_count()
if cpu_cores > 10:
num_cores = int(np.floor(mp.cpu_count() / 4))
else:
num_cores = 1
# for q in tqdm(testQlist, desc='calculating p_value and gini for {} rows'.format(len(Rows.value))):
# q.cal_pvalue_gain(Rows)
def para_run(Q): # Q is a Question Instance
global args
alpha = args.alpha
Q.cal_causal_effect_gain(Rows, alpha)
return Q
resQlist = Parallel(n_jobs=num_cores)(delayed(para_run)(q)for q in tqdm(
testQlist, desc='calculating p_value and gini for {} rows'.format(len(Rows.value))))
res_df = pd.DataFrame(columns=['col', 'val', 'ceffect', 'gain'])
for q in resQlist:
res_df.loc[len(res_df), ['col', 'val', 'ceffect', 'gain']] = [
q.column, q.value, q.ceffect, q.gain]
#weights = [0.15, 0.85]
weights = [0.8, 0.2]
# pvalue the smaller the better; gain the bigger the better,here the
# q.gain is the 1-gain
# weighted rank over Causal Effect (bigger the better) and gini info gain
res_df['ranks'] = res_df[['ceffect', 'gain']].mul(weights).sum(1)
res_df = res_df.sort_values(
by=['ranks'],
ascending=False).reset_index(
drop=True)
best_question = Question(
res_df.col[0],
res_df.val[0])
best_ceffect = res_df.ceffect[0]
return best_ceffect, best_question
def is_numeric(value):
"""Test if a value is numeric."""
return isinstance(value, int) or isinstance(value, float)
class Question:
"""A Question is used to partition a dataset.
This class just records a 'column number' (e.g., 0 for Color) and a
'column value' (e.g., Green). The 'match' method is used to compare
the feature value in an example to the feature value stored in the
question. See the demo below.
"""
def __init__(self, column, value):
self.column = column
self.value = value
def match(self, example):
# Compare the feature value in an example to the
# feature value in this question.
val = example[self.column]
if is_numeric(val):
return val < self.value
else:
return val == self.value
def cal_causal_effect_gain(self, Rows, alpha):
Rows.get_correlated_features()
rows, outcome_col = Rows.corr_data, Rows.dependent_name
# try splitting the dataset
true_rows, false_rows = partition(rows, self)
TrRows = MyRows(true_rows, Rows.dependent_name)
FlRows = MyRows(false_rows, Rows.dependent_name)
# Skip this split if it doesn't divide the
# dataset.
if len(true_rows) == 0 or len(false_rows) == 0:
self.gain = 0
self.ceffect = 0
return
try:
# Get Prospensity_matched dataset
matchdf = prospensity_match(rows, self, outcome_col)
except BaseException: # if failed match, then use the original dataset
Yvar = self.column
Yvar_new = varnameQuestion(self)
true_rows, false_rows = partition(rows, self)
true_rows = true_rows.rename(columns={Yvar: Yvar_new})
false_rows = false_rows.rename(columns={Yvar: Yvar_new})
true_rows[Yvar_new] = 1
false_rows[Yvar_new] = 0
matchdf = true_rows.append(false_rows)
try:
# Calculate the p-value from this split
cau_effect, pvalue = match_ttest(matchdf, self, outcome_col)
except BaseException:
pvalue = 1.0
cau_effect = 0
# Calculate the information gain from this split
current_uncertainty = gini_r(Rows)
gain = info_gain(TrRows, FlRows, current_uncertainty)
self.gain = gain
# if pass the significant test ( two groups are significantly
# different)
if pvalue <= alpha:
self.ceffect = cau_effect
else:
self.ceffect = 0
def __repr__(self):
# This is just a helper method to print
# the question in a readable format.
condition = "=="
if is_numeric(self.value):
condition = "<"
return "{0} {1} {2:.2f}" .format(
self.column, condition, self.value)
return "%s %s %s" % (
self.column, condition, str(self.value))
def partition(rows, question):
"""Partitions a dataset.
For each row in the dataset, check if it matches the question. If
so, add it to 'true rows', otherwise, add it to 'false rows'.
"""
true_rows = pd.DataFrame(columns=rows.columns)
false_rows = pd.DataFrame(columns=rows.columns)
for i in rows.index:
row = rows.loc[i, :]
if question.match(row):
true_rows = true_rows.append(row)
else:
false_rows = false_rows.append(row)
return true_rows, false_rows
def varnameQuestion(question):
Yvar = question.column
Value = question.value
if is_numeric(Value) and (type(Value) is not bool):
critic = '_leq_'
val_str = str(np.round(Value, 2)).replace('.', '_')
else:
critic = '_is_'
val_str = str(Value)
return Yvar + critic + val_str
def prospensity_match(df_rows, question, outcome_col):
'''
according to the binary question, return matched df_rows
'''
Yvar = question.column
Yvar_new = varnameQuestion(question)
# get categorical variables into dummy variables
split_rows = pd.get_dummies(df_rows.drop([Yvar], axis=1), drop_first=True)
true_rows, false_rows = partition(split_rows, question)
# get the binary attribute value colum as Yvar
true_rows = true_rows.rename(columns={Yvar: Yvar_new})
false_rows = false_rows.rename(columns={Yvar: Yvar_new})
true_rows[Yvar_new] = 1
false_rows[Yvar_new] = 0
# # Before getting into propensity match, we should exclude cols that cause
# # perfect separations
#
# categorical_cols = list(
# df_rows.select_dtypes(
# include=[
# 'bool',
# 'object']).columns)
# if Yvar in categorical_cols:
# categorical_cols.remove(Yvar) # in case of yvar is an bool or object
#
# # if only categorical_cols in the df_rows cols, then directly return
# # without matching
#
# if len(categorical_cols) + 2 == len(df_rows.columns):
# df_rows = df_rows.rename(columns={Yvar: Yvar_new})
# return df_rows
m = Matcher(
false_rows,
true_rows,
yvar=Yvar_new,
exclude=[outcome_col])
# np.random.seed(20170925)
acc = m.fit_scores(balance=True, ret=True)
if abs(
acc -
0.5) < 0.01: # if it is already a balanced dataset, then no need for propensity match
return m.data
else:
try:
m.predict_scores()
except Exception as e:
# if error in
print('Predict Score Error:{}, We adopt random scores here'.format(e))
m.data['scores'] = np.random.rand(len(m.data))
return m.data
m.match(method="min", nmatches=1, threshold=0.0001)
m.assign_weight_vector()
return m.matched_data
def match_ttest(matchdf, question, outcome_col):
classification_var = varnameQuestion(question)
try:
X = matchdf[matchdf[classification_var] == question.value][outcome_col]
Y = matchdf[matchdf[classification_var] != question.value][outcome_col]
except BaseException:
print(matchdf.columns + '+' + classification_var)
from scipy.stats import ttest_ind
if len(X) < 2 or len(Y) < 2:
return 0
else:
caueffect = np.abs(np.mean(X) - np.mean(Y))
tstats, pvalue = ttest_ind(X, Y)
return caueffect, pvalue
class Leaf:
"""A Leaf node classifies data.
This holds a dictionary of class (e.g., "Apple") -> number of times
it appears in the rows from the training data that reach this leaf.
"""
def __init__(self, Rows):
res = turnover_class(Rows.dep_val)
keys, count = np.unique(res.cls, return_counts=True)
self.predictions = dict(zip(keys, count))
self.value = max(self.predictions, key=self.predictions.get)
self.real = []
class Decision_Node:
"""A Decision Node asks a question.
This holds a reference to the question, and to the two child nodes.
"""
def __init__(self,
question,
true_branch,
false_branch, Rows):
self.question = question
self.true_branch = true_branch
self.false_branch = false_branch
self.dependent = Rows.dependent
self.real = [] # if pruned as a leaf, save it here
res = turnover_class(Rows.dep_val)
keys, count = np.unique(res.cls, return_counts=True)
self.predictions = dict(zip(keys, count))
def build_tree(Rows, height, question_excluded=[]):
"""Builds the tree.
Rules of recursion: 1) Believe that it works. 2) Start by checking
for the base case (no further information gain). 3) Prepare for
giant stack traces.
"""
# Base case: no further info gain
# Since we can ask no further questions,
# we'll return a leaf.
if height == 0 or len(Rows.value.columns) == 1:
return Leaf(Rows)
# Try partitioing the dataset on each of the unique attribute,
# calculate the information gain,
# and return the question that produces the highest gain.
best_ceffect, question = find_best_question(Rows, question_excluded)
if not question: # if no questions can be asked, then return a leaf node
return Leaf(Rows)
else:
question_excluded.append(question.column)
# If we reach here, we have found a useful feature / value
# to partition on.
Rows.get_correlated_features()
true_rows, false_rows = partition(Rows.corr_data, question)
if len(true_rows) == 0 or len(false_rows) == 0:
return Leaf(Rows)
elif len(false_rows) == 0 and len(true_rows) == 0:
raise ValueError('Empty rows from partition')
else:
TrRows = MyRows(true_rows, Rows.dependent_name)
FlRows = MyRows(false_rows, Rows.dependent_name)
# Recursively build the true branch.
# CurrentNodePos = LastNodePos+1
true_branch = build_tree(TrRows, height - 1, question_excluded)
# Recursively build the false branch.
false_branch = build_tree(FlRows, height - 1, question_excluded)
# Return a Question node.
# This records the best feature / value to ask at this point,
# the last node,
# as well as the branches to follow
# depending on the answer.
return Decision_Node(question, true_branch, false_branch, Rows)
# BEST PARTITION
def gini(counts):
"""Calculate the Gini Impurity for a list of counts.
There are a few different ways to do this, I thought this one was
the most concise. See:
https://en.wikipedia.org/wiki/Decision_tree_learning#Gini_impurity
Smaller the purer
"""
impurity = 1
sum_count = sum([val for val in counts.values()])
for lbl in counts:
prob_of_lbl = counts[lbl] / float(sum_count)
impurity -= prob_of_lbl ** 2
return impurity
def gini_r(Rows):
"""Calculate the Gini Impurity for a list of rows.
There are a few different ways to do this, I thought this one was
the most concise. See:
https://en.wikipedia.org/wiki/Decision_tree_learning#Gini_impurity
"""
counts = Leaf(Rows).predictions
return gini(counts)
def info_gain(leftRows, rightRows, current_uncertainty):
"""Information Gain.
The uncertainty of the starting node, minus the weighted impurity of
two child nodes.
bigger the better
"""
left = leftRows.value
right = rightRows.value
p = float(len(left)) / (len(left) + len(right))
return current_uncertainty - p * \
gini_r(leftRows) - (1 - p) * gini_r(rightRows)
def print_tree(
node,
height=0,
spacing="",
sourceFile=open(
'../mytree.txt',
'a+'), last_node=0, branch_type=''):
'''
print the tree from the root node to the sourceFile
Parameters
----------
node: Decision_Node, start node for printing the tree
height: int , current height of the tree, default = 0
spacing: str , the spacing between
sourceFile: output file default '../mytree.txt'
last_node : int, index number of the last number
branch_type: in our example, for the root and leaves, it's '', for the branching nodes, it's either 'True' or 'False'
Returns: None
-------
'''
# Base case: we've reached a leaf
if isinstance(node, Leaf):
this_node = generate_ind()
print(str(last_node) + ' -> ' + str(this_node) +
' [headlabel=' + branch_type + '] ;', file=sourceFile)
print(
str(this_node) + ' [label="' + spacing + spacing, node.value, "\\n\\n", str(
node.predictions).replace(
',', ',\n'), " \\nGINI:{0:.2f}".format(
gini(
node.predictions)), '"] ;', file=sourceFile)
return
if isinstance(node.real, Leaf):
this_node = generate_ind()
print(str(last_node) + ' -> ' + str(this_node) +
' [headlabel=' + branch_type + '] ;', file=sourceFile)
print(
str(this_node) +
' [label="' +
spacing +
spacing,
node.real.value,
"\\n\\n",
str(
node.real.predictions).replace(
',',
',\n'),
" \\nGINI:{0:.2f}".format(
gini(
node.real.predictions)),
'"] ;',
file=sourceFile)
return
# Print the question at this node
this_node = generate_ind()
if last_node != 0:
print(str(last_node) + ' -> ' + str(this_node) +
' [headlabel=' + branch_type + '] ;', file=sourceFile)
print(
str(this_node) + ' [label="' + spacing + str(
node.question) + spacing + "\\n", str(
node.predictions).replace(
',', ',\n'), " \\nGINI:{0:.2f}".format(
gini(
node.predictions)), '"] ;', file=sourceFile)
# Call this function recursively on the true branch
#print(str(height) + spacing + '--> True:', file=sourceFile)
print_tree(
node.true_branch,
height + 1,
spacing + " ",
sourceFile,
this_node,
'True')
# Call this function recursively on the false branch
#print(str(height) + spacing + '--> False:', file=sourceFile)
print_tree(
node.false_branch,
height + 1,
spacing + " ",
sourceFile,
this_node,
'False')
node_num = 0
def generate_ind():
global node_num
node_num = node_num + 1
return node_num
def TreePruning(node):
if isinstance(node, Leaf):
return
if isinstance(node.real, Leaf):
return
original_leaf_check = isinstance(
node.true_branch,
Leaf) and isinstance(
node.false_branch,
Leaf) # if leaf as both children
real_leaf_check = isinstance( # if both children has already been pruned as leaf
node.true_branch.real,
Leaf) and isinstance(
node.false_branch.real,
Leaf)
half_blood_t_leaf_check = isinstance( # if true branch children has already been pruned as leaf
node.true_branch.real,
Leaf) and isinstance(
node.false_branch,
Leaf)
half_blood_f_leaf_check = isinstance( # if false branch children has already been pruned as leaf
node.true_branch,
Leaf) and isinstance(
node.false_branch.real,
Leaf)
if original_leaf_check:
if node.true_branch.value == node.false_branch.value:
node.real = node.true_branch
node.real.predictions = dict(
Counter(
node.true_branch.predictions) +
Counter(
node.false_branch.predictions))
return
if real_leaf_check:
if node.true_branch.real.value == node.false_branch.real.value:
node.real = node.true_branch.real
node.real.predictions = dict(
Counter(
node.true_branch.real.predictions) +
Counter(
node.false_branch.real.predictions))
return
# if both child are the same, then delete the leaves, turn the father
# node into a leaf
if half_blood_t_leaf_check:
if node.true_branch.real.value == node.false_branch.value:
node.real = node.true_branch.real
node.real.predictions = dict(
Counter(
node.true_branch.real.predictions) +
Counter(
node.false_branch.predictions))
return
if half_blood_f_leaf_check:
if node.true_branch.value == node.false_branch.real.value:
node.real = node.false_branch.real
node.real.predictions = dict(
Counter(
node.true_branch.predictions) +
Counter(
node.false_branch.real.predictions))
return
TreePruning(node.true_branch)
TreePruning(node.false_branch)
def _main():
import argparse
parser = argparse.ArgumentParser(
description='A script for causal decision tree for continuous varible')
parser.add_argument(
"--sp",
default=6,
type=int,
help="Number of split for continuous ")
parser.add_argument(
"--hmax",
default=5,
type=int,
help="Maximum height of the tree")
parser.add_argument(
"--random",
default=False,
type=bool,
help="Whether we random pick samples from original data (for testing)")
parser.add_argument(
"--pick",
default=2000,
type=int,
help="Number of random pick from the original data")
parser.add_argument(
"--alpha",
default=0.1,
type=float,
help="Significance level for correlation check")
parser.add_argument(
"--dep",
default = 'Rank_Turn',
type = str,
help = 'Dependent value of the tree: \n'
'Rank_Turn: turnover daily ranking ; \n'
'DayNetTurnover: Net turnover daily ranking ; \n'
'DayOut: Day change of out contracts; \n'
'PopRef: 1.0 if turnover rank<0.1 \n'
'mix: using Rank_Turn to run the tree construction, classification show PopRef'
)
global args
args = parser.parse_args()
h_max = args.hmax
random_flag = args.random
random_pick = args.pick
hsi_df = | pd.read_csv('../data/input.csv') | pandas.read_csv |
import os
import numpy as np
import pandas as pd
from .BaseEmbed import BaseEmbed
pd.set_option('display.max_rows', 500)
| pd.set_option('display.max_columns', 500) | pandas.set_option |
import pandas as pd
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import SGDClassifier
import argparse
rate = "0.5" # 默认为6:4的正负样本比例,若要改为1:1则取rate=“0.5”
class SGD:
def __init__(self, trainfile, validfile, testfile):
super(SGD, self).__init__()
train: pd.DataFrame = pd.read_csv(trainfile)
train: pd.DataFrame = train[train['label'].notna()]
valid: pd.DataFrame = pd.read_csv(validfile)
valid: pd.DataFrame = valid[valid['label'].notna()]
test: pd.DataFrame = | pd.read_csv(testfile) | pandas.read_csv |
from data.data_utils import ensure_dir_exist
import pandas as pd,numpy as np,logging,os
def my_logger(logging_path):
# 生成日志
logger = logging.getLogger(__name__)
logger.setLevel(level=logging.INFO)
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
logger.handlers = []
assert len(logger.handlers) == 0
handler = logging.FileHandler(logging_path)
handler.setLevel(logging.INFO)
handler.setFormatter(formatter)
console = logging.StreamHandler()
console.setLevel(logging.INFO)
logger.addHandler(handler)
logger.addHandler(console)
return logger
def get_num_params(var_list,exclude_embedding_matrix=True):
if exclude_embedding_matrix:
return np.sum([np.prod(v.get_shape().as_list()) for v in var_list
if "embedding_matrix" not in v.name])
else:
return np.sum([np.prod(v.get_shape().as_list()) for v in var_list])
def update_history_summary(mode, history, summary, avg_l, avg_f, avg_a, avg_p, avg_r):
assert mode in ["train", "valid"]
history[mode + "_loss"].append(avg_l)
history[mode + "_f1"].append(avg_f)
history[mode + "_acc"].append(avg_a)
history[mode + "_pre"].append(avg_p)
history[mode + "_rec"].append(avg_r)
if summary is not None:
summary.value[0].simple_value = avg_l
summary.value[1].simple_value = avg_f
summary.value[2].simple_value = avg_a
summary.value[3].simple_value = avg_p
summary.value[4].simple_value = avg_r
return history, summary
def WriteToSubmission(res,fileName):
ensure_dir_exist(os.path.dirname(fileName))
if isinstance(res,dict):
res = [[int(key), int(value)] for (key, value) in res.items()]
tmp= | pd.DataFrame(res,columns=["id","label"]) | pandas.DataFrame |
# imports
#region
import os
import pyreadstat
import pandas as pd
import numpy as np
from statsmodels.stats.weightstats import DescrStatsW
from sklearn.linear_model import LinearRegression
import statsmodels.api as sm
import statsmodels.formula.api as smf
import seaborn as sns
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
from libs.utils import *
from libs.plots import *
from libs.extensions import *
plt.ioff()
#endregion
# -----------------------------
# --- FOOD CARBON FOOTPRINT ---
# Can I parse the LCA meta model database? ---
root = 'D:\\projects\\fakta-o-klimatu\\work\\389-food-carbon-footprint'
path = root + '\\LCA+Meta-Analysis_database-static.xlsx'
df = pd.read_excel(path, header=2)
df.columns
labels = df[np.isfinite(df['Unnamed: 0'])][['Unnamed: 0', 'Reference']]
df['Unnamed: 0'] = df['Unnamed: 0'].fillna(method='ffill')
ghg_cols = {
'GHG Emis \n(kg CO2 eq)': 'ghg_total',
'LUC Burn': 'luc_burn',
'LUC C Stock': 'luc_stock',
'Feed': 'feed',
'Farm': 'farm',
'Prcssing': 'processing',
'Tran & Str': 'transport',
'Packging': 'packaging',
'Ret\nail': 'retail',
'Loss.1': 'loss'
}
w_col = 'Weight'
g_col = 'Unnamed: 0'
data = df[[g_col, w_col, *ghg_cols]].rename(columns={w_col: 'weight', g_col: 'product', **ghg_cols})
data['processing'] = data['processing'].replace(to_replace='-', value=0)
data.show()
data.dtypes
data['luc'] = data['']
ghgs = list(ghg_cols.values())
df.dtypes.reset_index().show()
df.columns[65:76]
data
data.dtypes
data.groupby('product')['weight'].sum()
data.groupby('product')['weight'].sum()
for c in ghgs:
data[f'{c}_w'] = data[c] * data['weight']
avgs = data.groupby('product')[[f'{c}_w' for c in ghgs]].sum().reset_index().rename(columns={f'{c}_w':c for c in ghgs})
labels.columns = ['product', 'label']
labels = labels.iloc[:-1]
avgs = pd.merge(avgs, labels)
avgs = avgs[['label', *ghgs]].copy()
avgs['luc'] = avgs.luc_burn + avgs.luc_stock
avgs.show()
plain = data.groupby('product')[ghgs].mean().reset_index()
plain = | pd.merge(plain, labels) | pandas.merge |
import pandas_datareader.data as web
import pandas as pd
def fetch_from_web(symbol, start_date, end_date):
return web.DataReader(symbol, 'yahoo', start_date, end_date)
def get_data(symbols, start_date, end_date):
"""Load daily data from web"""
dates = | pd.date_range(start_date, end_date) | pandas.date_range |
import pandas as pd
import numpy as np
import sys
# DS TODO: Comment this please :)
"""
Function to convert the parameter dataframe to a scaled and fitted array.
Inputs:
1) The Parameters dataframe
2) The fix/float/constrain constraints dictionary is inside runprops
Outputs:
1) The fitted array of floating parameters
2) The array of the names of each array
3) The dataframe of fixed values
4) The list of all column names in order for recombination
5) The original fit scale which will be needed later during recombination
"""
def from_param_df_to_fit_array(dataframe, runprops):
fix_float_dict = runprops.get("float_dict")
total_df_names = np.array([])
for i in dataframe.columns:
if 'period' in i[0]:
for j in range(runprops.get('numobjects')):
if str(j+1) in i[0]:
total_df_names = np.append(total_df_names, 'sprate_'+str(j+1))
else:
total_df_names = np.append(total_df_names, i[0])
fit_names = []
for i in range(0,runprops.get('numobjects')):
if runprops.get('lockspinanglesflag') == True:
if int(runprops.get('dynamicstoincludeflags')[i]) != 0:
if int(runprops.get('dynamicstoincludeflags')[i]) == 2:
dataframe[['spaop_'+str(i+1)]] = dataframe[['aop_2']].values
dataframe[['spinc_'+str(i+1)]] = dataframe[['inc_2']].values
dataframe[['splan_'+str(i+1)]] = dataframe[['lan_2']].values
if fix_float_dict.get('spaop_'+str(i+1)) == 1:
print('Since you have chosen to lock the spin angles, please change the spaop_'+str(i+1)+' variable in the float_dict to be fixed.')
sys.exit()
if fix_float_dict.get('spinc_'+str(i+1)) == 1:
print('Since you have chosen to lock the spin angles, please change the spinc_'+str(i+1)+' variable in the float_dict to be fixed.')
sys.exit()
if fix_float_dict.get('splan_'+str(i+1)) == 1:
print('Since you have chosen to lock the spin angles, please change the splan_'+str(i+1)+' variable in the float_dict to be fixed.')
sys.exit()
if runprops.get('transform'):
if runprops.get('numobjects') > 3:
print('Warning: Only masses 1-3 will be used in the transformations for now. Future work can be done later to increase this')
if fix_float_dict.get('mass_1') == 1 and fix_float_dict.get('mass_2') == 1:
if fix_float_dict.get('mass_3') == 1:
dataframe[['mass_2']] = np.array(dataframe[['mass_1']])+np.array(dataframe[['mass_2']])
dataframe[['mass_3']] = np.array(dataframe[['mass_3']])+np.array(dataframe[['mass_2']])
fit_names.append('mass1+2')
fit_names.append('mass1+2+3')
else:
dataframe[['mass_2']] = np.array(dataframe[['mass_1']])+np.array(dataframe[['mass_2']])
fit_names.append('mass1+2')
for i in range(runprops.get('numobjects')-1):
pomega = np.array(dataframe[['aop_'+str(i+2)]])+np.array(dataframe[['lan_'+str(i+2)]])
Lambda = pomega + np.array(dataframe[['mea_'+str(i+2)]])
fit_names.append('lambda_'+str(i+2))
fit_names.append('pomega_'+str(i+2))
if fix_float_dict.get('lan_'+str(i+2)) == 1 and fix_float_dict.get('aop_'+str(i+2)) == 1:
dataframe[['aop_'+str(i+2)]] = pomega
if fix_float_dict.get('mea_'+str(i+2)) == 1 and fix_float_dict.get('aop_'+str(i+2)) == 1:
dataframe[['mea_'+str(i+2)]] = Lambda
if fix_float_dict.get('ecc_'+str(i+2)) == 1 and fix_float_dict.get('aop_'+str(i+2)) == 1:
ecc = np.array(dataframe[['ecc_'+str(i+2)]])
pomega_rad = np.array(dataframe[['aop_'+str(i+2)]])*np.pi/180
dataframe[['ecc_'+str(i+2)]] = np.array(ecc)*np.sin(pomega_rad)
dataframe[['aop_'+str(i+2)]] = np.array(ecc)*np.cos(pomega_rad)
if fix_float_dict.get('inc_'+str(i+2)) == 1 and fix_float_dict.get('lan_'+str(i+2)) == 1:
inc = np.array(dataframe[['inc_'+str(i+2)]])*np.pi/180
lan = np.array(dataframe[['lan_'+str(i+2)]])*np.pi/180
dataframe[['inc_'+str(i+2)]] = np.tan(inc/2)*np.sin(lan)
dataframe[['lan_'+str(i+2)]] = np.tan(inc/2)*np.cos(lan)
for i in range(runprops.get('numobjects')):
if fix_float_dict.get('spinc_'+str(i+1)) == 1 and fix_float_dict.get('splan_'+str(i+1)) == 1:
spinc = np.array(dataframe[['spinc_'+str(i+1)]])*np.pi/180
splan = np.array(dataframe[['splan_'+str(i+1)]])*np.pi/180
a = np.cos(spinc/2)*np.sin(splan)
b = np.cos(spinc/2)*np.cos(splan)
dataframe[['spinc_'+str(i+1)]] = a
dataframe[['splan_'+str(i+1)]] = b
num = 0
fit_scale = dataframe.iloc[0]
fit_scale = fit_scale.to_frame().transpose()
#Scale every column down by the values in the first row.
for col in dataframe.columns:
if fit_scale[col][0] != 0.0:
dataframe[col] = dataframe[col]/fit_scale[col][0]
num = num+1
key_list = list(fix_float_dict.keys())
val_list = list(fix_float_dict.values())
fixed_df = pd.DataFrame(index = range(len(dataframe.index)))
float_df = pd.DataFrame()
float_names = []
num = 0
float_array = np.array([])
if len(key_list) == 0:
float_array = dataframe.to_numpy()
else:
#Split the fixed and floating values into seperate dataframes
for col in dataframe.columns:
#If the value is fixed
name = col[0]
if "period" in name:
for i in range(runprops.get('numobjects')):
if str(i+1) in name:
name = "sprate_"+str(i+1)
if fix_float_dict.get(name) == 0:
fixed_df[name] = dataframe[col]
#If the value is floating
elif fix_float_dict.get(col[0]) == 1:
float_df[name] = dataframe[col]
float_names.append(name)
num = num+1
float_arr = float_df.to_numpy()
for col in fit_scale.columns:
if "period" in col[0]:
for i in range(runprops.get('numobjects')):
if str(i+1) in col[0]:
newcol = ('sprate_'+str(i+1),)
fit_scale.rename(columns={col[0]: newcol[0]}, inplace=True)
j = 1
for i in runprops.get('dynamicstoincludeflags'):
if int(i) > 0:
fit_names.append('period_'+str(j))
j = j+1
if int(runprops.get('dynamicstoincludeflags')[0]) > 0:
for i in range(runprops.get('numobjects')-1):
fit_names.append('sat_spin_inc_'+str(i+2))
return float_arr, float_names, fixed_df, total_df_names, fit_scale, fit_names
"""
Function to convert a fitted array into the parameter dataframe
Inputs:
1) The fitted float array
2) names of each column in the float_array
3) Fixed dataframe of values
4) All names of parameters in order
5) The scale of fit variables
Outputs:
1) Dataframe in parameter format
"""
def from_fit_array_to_param_df(float_array, float_names, fixed_df, total_df_names, fit_scale, names_dict, runprops):
#First, turn the float_array back into dataframe with the column names given
#if runprops.get('includesun') == 1:
# np.delete(float_array, np.s_[0:5:1],0)
# np.delete(float_names, np.s_[0:5:1],0)
Index = range(len(fixed_df.index))
float_df = pd.DataFrame(data = [float_array],index = Index, columns = float_names)
param_df = | pd.DataFrame() | pandas.DataFrame |
# -*- coding: utf-8 -*-
# pylint: disable-msg=E1101,W0612
from datetime import datetime, timedelta
import pytest
import re
from numpy import nan as NA
import numpy as np
from numpy.random import randint
from pandas.compat import range, u
import pandas.compat as compat
from pandas import Index, Series, DataFrame, isna, MultiIndex, notna
from pandas.util.testing import assert_series_equal
import pandas.util.testing as tm
import pandas.core.strings as strings
class TestStringMethods(object):
def test_api(self):
# GH 6106, GH 9322
assert Series.str is strings.StringMethods
assert isinstance(Series(['']).str, strings.StringMethods)
# GH 9184
invalid = Series([1])
with tm.assert_raises_regex(AttributeError,
"only use .str accessor"):
invalid.str
assert not hasattr(invalid, 'str')
def test_iter(self):
# GH3638
strs = 'google', 'wikimedia', 'wikipedia', 'wikitravel'
ds = Series(strs)
for s in ds.str:
# iter must yield a Series
assert isinstance(s, Series)
# indices of each yielded Series should be equal to the index of
# the original Series
tm.assert_index_equal(s.index, ds.index)
for el in s:
# each element of the series is either a basestring/str or nan
assert isinstance(el, compat.string_types) or isna(el)
# desired behavior is to iterate until everything would be nan on the
# next iter so make sure the last element of the iterator was 'l' in
# this case since 'wikitravel' is the longest string
assert s.dropna().values.item() == 'l'
def test_iter_empty(self):
ds = Series([], dtype=object)
i, s = 100, 1
for i, s in enumerate(ds.str):
pass
# nothing to iterate over so nothing defined values should remain
# unchanged
assert i == 100
assert s == 1
def test_iter_single_element(self):
ds = Series(['a'])
for i, s in enumerate(ds.str):
pass
assert not i
assert_series_equal(ds, s)
def test_iter_object_try_string(self):
ds = Series([slice(None, randint(10), randint(10, 20)) for _ in range(
4)])
i, s = 100, 'h'
for i, s in enumerate(ds.str):
pass
assert i == 100
assert s == 'h'
def test_cat(self):
one = np.array(['a', 'a', 'b', 'b', 'c', NA], dtype=np.object_)
two = np.array(['a', NA, 'b', 'd', 'foo', NA], dtype=np.object_)
# single array
result = strings.str_cat(one)
exp = 'aabbc'
assert result == exp
result = strings.str_cat(one, na_rep='NA')
exp = 'aabbcNA'
assert result == exp
result = strings.str_cat(one, na_rep='-')
exp = 'aabbc-'
assert result == exp
result = strings.str_cat(one, sep='_', na_rep='NA')
exp = 'a_a_b_b_c_NA'
assert result == exp
result = strings.str_cat(two, sep='-')
exp = 'a-b-d-foo'
assert result == exp
# Multiple arrays
result = strings.str_cat(one, [two], na_rep='NA')
exp = np.array(['aa', 'aNA', 'bb', 'bd', 'cfoo', 'NANA'],
dtype=np.object_)
tm.assert_numpy_array_equal(result, exp)
result = strings.str_cat(one, two)
exp = np.array(['aa', NA, 'bb', 'bd', 'cfoo', NA], dtype=np.object_)
tm.assert_almost_equal(result, exp)
def test_count(self):
values = np.array(['foo', 'foofoo', NA, 'foooofooofommmfoo'],
dtype=np.object_)
result = strings.str_count(values, 'f[o]+')
exp = np.array([1, 2, NA, 4])
tm.assert_numpy_array_equal(result, exp)
result = Series(values).str.count('f[o]+')
exp = Series([1, 2, NA, 4])
assert isinstance(result, Series)
tm.assert_series_equal(result, exp)
# mixed
mixed = ['a', NA, 'b', True, datetime.today(), 'foo', None, 1, 2.]
rs = strings.str_count(mixed, 'a')
xp = np.array([1, NA, 0, NA, NA, 0, NA, NA, NA])
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.count('a')
xp = Series([1, NA, 0, NA, NA, 0, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = [u('foo'), u('foofoo'), NA, u('foooofooofommmfoo')]
result = strings.str_count(values, 'f[o]+')
exp = np.array([1, 2, NA, 4])
tm.assert_numpy_array_equal(result, exp)
result = Series(values).str.count('f[o]+')
exp = Series([1, 2, NA, 4])
assert isinstance(result, Series)
tm.assert_series_equal(result, exp)
def test_contains(self):
values = np.array(['foo', NA, 'fooommm__foo',
'mmm_', 'foommm[_]+bar'], dtype=np.object_)
pat = 'mmm[_]+'
result = strings.str_contains(values, pat)
expected = np.array([False, NA, True, True, False], dtype=np.object_)
tm.assert_numpy_array_equal(result, expected)
result = strings.str_contains(values, pat, regex=False)
expected = np.array([False, NA, False, False, True], dtype=np.object_)
tm.assert_numpy_array_equal(result, expected)
values = ['foo', 'xyz', 'fooommm__foo', 'mmm_']
result = strings.str_contains(values, pat)
expected = np.array([False, False, True, True])
assert result.dtype == np.bool_
tm.assert_numpy_array_equal(result, expected)
# case insensitive using regex
values = ['Foo', 'xYz', 'fOOomMm__fOo', 'MMM_']
result = strings.str_contains(values, 'FOO|mmm', case=False)
expected = np.array([True, False, True, True])
tm.assert_numpy_array_equal(result, expected)
# case insensitive without regex
result = strings.str_contains(values, 'foo', regex=False, case=False)
expected = np.array([True, False, True, False])
tm.assert_numpy_array_equal(result, expected)
# mixed
mixed = ['a', NA, 'b', True, datetime.today(), 'foo', None, 1, 2.]
rs = strings.str_contains(mixed, 'o')
xp = np.array([False, NA, False, NA, NA, True, NA, NA, NA],
dtype=np.object_)
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.contains('o')
xp = Series([False, NA, False, NA, NA, True, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = np.array([u'foo', NA, u'fooommm__foo', u'mmm_'],
dtype=np.object_)
pat = 'mmm[_]+'
result = strings.str_contains(values, pat)
expected = np.array([False, np.nan, True, True], dtype=np.object_)
tm.assert_numpy_array_equal(result, expected)
result = strings.str_contains(values, pat, na=False)
expected = np.array([False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
values = np.array(['foo', 'xyz', 'fooommm__foo', 'mmm_'],
dtype=np.object_)
result = strings.str_contains(values, pat)
expected = np.array([False, False, True, True])
assert result.dtype == np.bool_
tm.assert_numpy_array_equal(result, expected)
# na
values = Series(['om', 'foo', np.nan])
res = values.str.contains('foo', na="foo")
assert res.loc[2] == "foo"
def test_startswith(self):
values = Series(['om', NA, 'foo_nom', 'nom', 'bar_foo', NA, 'foo'])
result = values.str.startswith('foo')
exp = Series([False, NA, True, False, False, NA, True])
tm.assert_series_equal(result, exp)
# mixed
mixed = np.array(['a', NA, 'b', True, datetime.today(),
'foo', None, 1, 2.], dtype=np.object_)
rs = strings.str_startswith(mixed, 'f')
xp = np.array([False, NA, False, NA, NA, True, NA, NA, NA],
dtype=np.object_)
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.startswith('f')
assert isinstance(rs, Series)
xp = Series([False, NA, False, NA, NA, True, NA, NA, NA])
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('om'), NA, u('foo_nom'), u('nom'), u('bar_foo'), NA,
u('foo')])
result = values.str.startswith('foo')
exp = Series([False, NA, True, False, False, NA, True])
tm.assert_series_equal(result, exp)
result = values.str.startswith('foo', na=True)
tm.assert_series_equal(result, exp.fillna(True).astype(bool))
def test_endswith(self):
values = Series(['om', NA, 'foo_nom', 'nom', 'bar_foo', NA, 'foo'])
result = values.str.endswith('foo')
exp = Series([False, NA, False, False, True, NA, True])
tm.assert_series_equal(result, exp)
# mixed
mixed = ['a', NA, 'b', True, datetime.today(), 'foo', None, 1, 2.]
rs = strings.str_endswith(mixed, 'f')
xp = np.array([False, NA, False, NA, NA, False, NA, NA, NA],
dtype=np.object_)
tm.assert_numpy_array_equal(rs, xp)
rs = Series(mixed).str.endswith('f')
xp = Series([False, NA, False, NA, NA, False, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('om'), NA, u('foo_nom'), u('nom'), u('bar_foo'), NA,
u('foo')])
result = values.str.endswith('foo')
exp = Series([False, NA, False, False, True, NA, True])
tm.assert_series_equal(result, exp)
result = values.str.endswith('foo', na=False)
tm.assert_series_equal(result, exp.fillna(False).astype(bool))
def test_title(self):
values = Series(["FOO", "BAR", NA, "Blah", "blurg"])
result = values.str.title()
exp = Series(["Foo", "Bar", NA, "Blah", "Blurg"])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(["FOO", NA, "bar", True, datetime.today(), "blah", None,
1, 2.])
mixed = mixed.str.title()
exp = Series(["Foo", NA, "Bar", NA, NA, "Blah", NA, NA, NA])
tm.assert_almost_equal(mixed, exp)
# unicode
values = Series([u("FOO"), NA, u("bar"), u("Blurg")])
results = values.str.title()
exp = Series([u("Foo"), NA, u("Bar"), u("Blurg")])
tm.assert_series_equal(results, exp)
def test_lower_upper(self):
values = Series(['om', NA, 'nom', 'nom'])
result = values.str.upper()
exp = Series(['OM', NA, 'NOM', 'NOM'])
tm.assert_series_equal(result, exp)
result = result.str.lower()
tm.assert_series_equal(result, values)
# mixed
mixed = Series(['a', NA, 'b', True, datetime.today(), 'foo', None, 1,
2.])
mixed = mixed.str.upper()
rs = Series(mixed).str.lower()
xp = Series(['a', NA, 'b', NA, NA, 'foo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('om'), NA, u('nom'), u('nom')])
result = values.str.upper()
exp = Series([u('OM'), NA, u('NOM'), u('NOM')])
tm.assert_series_equal(result, exp)
result = result.str.lower()
tm.assert_series_equal(result, values)
def test_capitalize(self):
values = Series(["FOO", "BAR", NA, "Blah", "blurg"])
result = values.str.capitalize()
exp = Series(["Foo", "Bar", NA, "Blah", "Blurg"])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(["FOO", NA, "bar", True, datetime.today(), "blah", None,
1, 2.])
mixed = mixed.str.capitalize()
exp = Series(["Foo", NA, "Bar", NA, NA, "Blah", NA, NA, NA])
tm.assert_almost_equal(mixed, exp)
# unicode
values = Series([u("FOO"), NA, u("bar"), u("Blurg")])
results = values.str.capitalize()
exp = Series([u("Foo"), NA, u("Bar"), u("Blurg")])
tm.assert_series_equal(results, exp)
def test_swapcase(self):
values = Series(["FOO", "BAR", NA, "Blah", "blurg"])
result = values.str.swapcase()
exp = Series(["foo", "bar", NA, "bLAH", "BLURG"])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(["FOO", NA, "bar", True, datetime.today(), "Blah", None,
1, 2.])
mixed = mixed.str.swapcase()
exp = Series(["foo", NA, "BAR", NA, NA, "bLAH", NA, NA, NA])
tm.assert_almost_equal(mixed, exp)
# unicode
values = Series([u("FOO"), NA, u("bar"), u("Blurg")])
results = values.str.swapcase()
exp = Series([u("foo"), NA, u("BAR"), u("bLURG")])
tm.assert_series_equal(results, exp)
def test_casemethods(self):
values = ['aaa', 'bbb', 'CCC', 'Dddd', 'eEEE']
s = Series(values)
assert s.str.lower().tolist() == [v.lower() for v in values]
assert s.str.upper().tolist() == [v.upper() for v in values]
assert s.str.title().tolist() == [v.title() for v in values]
assert s.str.capitalize().tolist() == [v.capitalize() for v in values]
assert s.str.swapcase().tolist() == [v.swapcase() for v in values]
def test_replace(self):
values = Series(['fooBAD__barBAD', NA])
result = values.str.replace('BAD[_]*', '')
exp = Series(['foobar', NA])
tm.assert_series_equal(result, exp)
result = values.str.replace('BAD[_]*', '', n=1)
exp = Series(['foobarBAD', NA])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['aBAD', NA, 'bBAD', True, datetime.today(), 'fooBAD',
None, 1, 2.])
rs = Series(mixed).str.replace('BAD[_]*', '')
xp = Series(['a', NA, 'b', NA, NA, 'foo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('fooBAD__barBAD'), NA])
result = values.str.replace('BAD[_]*', '')
exp = Series([u('foobar'), NA])
tm.assert_series_equal(result, exp)
result = values.str.replace('BAD[_]*', '', n=1)
exp = Series([u('foobarBAD'), NA])
tm.assert_series_equal(result, exp)
# flags + unicode
values = Series([b"abcd,\xc3\xa0".decode("utf-8")])
exp = Series([b"abcd, \xc3\xa0".decode("utf-8")])
result = values.str.replace(r"(?<=\w),(?=\w)", ", ", flags=re.UNICODE)
tm.assert_series_equal(result, exp)
# GH 13438
for klass in (Series, Index):
for repl in (None, 3, {'a': 'b'}):
for data in (['a', 'b', None], ['a', 'b', 'c', 'ad']):
values = klass(data)
pytest.raises(TypeError, values.str.replace, 'a', repl)
def test_replace_callable(self):
# GH 15055
values = Series(['fooBAD__barBAD', NA])
# test with callable
repl = lambda m: m.group(0).swapcase()
result = values.str.replace('[a-z][A-Z]{2}', repl, n=2)
exp = Series(['foObaD__baRbaD', NA])
tm.assert_series_equal(result, exp)
# test with wrong number of arguments, raising an error
if compat.PY2:
p_err = r'takes (no|(exactly|at (least|most)) ?\d+) arguments?'
else:
p_err = (r'((takes)|(missing)) (?(2)from \d+ to )?\d+ '
r'(?(3)required )positional arguments?')
repl = lambda: None
with tm.assert_raises_regex(TypeError, p_err):
values.str.replace('a', repl)
repl = lambda m, x: None
with tm.assert_raises_regex(TypeError, p_err):
values.str.replace('a', repl)
repl = lambda m, x, y=None: None
with tm.assert_raises_regex(TypeError, p_err):
values.str.replace('a', repl)
# test regex named groups
values = Series(['Foo Bar Baz', NA])
pat = r"(?P<first>\w+) (?P<middle>\w+) (?P<last>\w+)"
repl = lambda m: m.group('middle').swapcase()
result = values.str.replace(pat, repl)
exp = Series(['bAR', NA])
tm.assert_series_equal(result, exp)
def test_replace_compiled_regex(self):
# GH 15446
values = Series(['fooBAD__barBAD', NA])
# test with compiled regex
pat = re.compile(r'BAD[_]*')
result = values.str.replace(pat, '')
exp = Series(['foobar', NA])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['aBAD', NA, 'bBAD', True, datetime.today(), 'fooBAD',
None, 1, 2.])
rs = Series(mixed).str.replace(pat, '')
xp = Series(['a', NA, 'b', NA, NA, 'foo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('fooBAD__barBAD'), NA])
result = values.str.replace(pat, '')
exp = Series([u('foobar'), NA])
tm.assert_series_equal(result, exp)
result = values.str.replace(pat, '', n=1)
exp = Series([u('foobarBAD'), NA])
tm.assert_series_equal(result, exp)
# flags + unicode
values = Series([b"abcd,\xc3\xa0".decode("utf-8")])
exp = Series([b"abcd, \xc3\xa0".decode("utf-8")])
pat = re.compile(r"(?<=\w),(?=\w)", flags=re.UNICODE)
result = values.str.replace(pat, ", ")
tm.assert_series_equal(result, exp)
# case and flags provided to str.replace will have no effect
# and will produce warnings
values = Series(['fooBAD__barBAD__bad', NA])
pat = re.compile(r'BAD[_]*')
with tm.assert_raises_regex(ValueError,
"case and flags cannot be"):
result = values.str.replace(pat, '', flags=re.IGNORECASE)
with tm.assert_raises_regex(ValueError,
"case and flags cannot be"):
result = values.str.replace(pat, '', case=False)
with tm.assert_raises_regex(ValueError,
"case and flags cannot be"):
result = values.str.replace(pat, '', case=True)
# test with callable
values = Series(['fooBAD__barBAD', NA])
repl = lambda m: m.group(0).swapcase()
pat = re.compile('[a-z][A-Z]{2}')
result = values.str.replace(pat, repl, n=2)
exp = Series(['foObaD__baRbaD', NA])
tm.assert_series_equal(result, exp)
def test_repeat(self):
values = Series(['a', 'b', NA, 'c', NA, 'd'])
result = values.str.repeat(3)
exp = Series(['aaa', 'bbb', NA, 'ccc', NA, 'ddd'])
tm.assert_series_equal(result, exp)
result = values.str.repeat([1, 2, 3, 4, 5, 6])
exp = Series(['a', 'bb', NA, 'cccc', NA, 'dddddd'])
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['a', NA, 'b', True, datetime.today(), 'foo', None, 1,
2.])
rs = Series(mixed).str.repeat(3)
xp = Series(['aaa', NA, 'bbb', NA, NA, 'foofoofoo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('a'), u('b'), NA, u('c'), NA, u('d')])
result = values.str.repeat(3)
exp = Series([u('aaa'), u('bbb'), NA, u('ccc'), NA, u('ddd')])
tm.assert_series_equal(result, exp)
result = values.str.repeat([1, 2, 3, 4, 5, 6])
exp = Series([u('a'), u('bb'), NA, u('cccc'), NA, u('dddddd')])
tm.assert_series_equal(result, exp)
def test_match(self):
# New match behavior introduced in 0.13
values = Series(['fooBAD__barBAD', NA, 'foo'])
result = values.str.match('.*(BAD[_]+).*(BAD)')
exp = Series([True, NA, False])
tm.assert_series_equal(result, exp)
values = Series(['fooBAD__barBAD', NA, 'foo'])
result = values.str.match('.*BAD[_]+.*BAD')
exp = Series([True, NA, False])
tm.assert_series_equal(result, exp)
# test passing as_indexer still works but is ignored
values = Series(['fooBAD__barBAD', NA, 'foo'])
exp = Series([True, NA, False])
with tm.assert_produces_warning(FutureWarning):
result = values.str.match('.*BAD[_]+.*BAD', as_indexer=True)
tm.assert_series_equal(result, exp)
with tm.assert_produces_warning(FutureWarning):
result = values.str.match('.*BAD[_]+.*BAD', as_indexer=False)
tm.assert_series_equal(result, exp)
with tm.assert_produces_warning(FutureWarning):
result = values.str.match('.*(BAD[_]+).*(BAD)', as_indexer=True)
tm.assert_series_equal(result, exp)
pytest.raises(ValueError, values.str.match, '.*(BAD[_]+).*(BAD)',
as_indexer=False)
# mixed
mixed = Series(['aBAD_BAD', NA, 'BAD_b_BAD', True, datetime.today(),
'foo', None, 1, 2.])
rs = Series(mixed).str.match('.*(BAD[_]+).*(BAD)')
xp = Series([True, NA, True, NA, NA, False, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_series_equal(rs, xp)
# unicode
values = Series([u('fooBAD__barBAD'), NA, u('foo')])
result = values.str.match('.*(BAD[_]+).*(BAD)')
exp = Series([True, NA, False])
tm.assert_series_equal(result, exp)
# na GH #6609
res = Series(['a', 0, np.nan]).str.match('a', na=False)
exp = Series([True, False, False])
assert_series_equal(exp, res)
res = Series(['a', 0, np.nan]).str.match('a')
exp = Series([True, np.nan, np.nan])
assert_series_equal(exp, res)
def test_extract_expand_None(self):
values = Series(['fooBAD__barBAD', NA, 'foo'])
with tm.assert_produces_warning(FutureWarning):
values.str.extract('.*(BAD[_]+).*(BAD)', expand=None)
def test_extract_expand_unspecified(self):
values = Series(['fooBAD__barBAD', NA, 'foo'])
with tm.assert_produces_warning(FutureWarning):
values.str.extract('.*(BAD[_]+).*(BAD)')
def test_extract_expand_False(self):
# Contains tests like those in test_match and some others.
values = Series(['fooBAD__barBAD', NA, 'foo'])
er = [NA, NA] # empty row
result = values.str.extract('.*(BAD[_]+).*(BAD)', expand=False)
exp = DataFrame([['BAD__', 'BAD'], er, er])
tm.assert_frame_equal(result, exp)
# mixed
mixed = Series(['aBAD_BAD', NA, 'BAD_b_BAD', True, datetime.today(),
'foo', None, 1, 2.])
rs = Series(mixed).str.extract('.*(BAD[_]+).*(BAD)', expand=False)
exp = DataFrame([['BAD_', 'BAD'], er, ['BAD_', 'BAD'], er, er, er, er,
er, er])
tm.assert_frame_equal(rs, exp)
# unicode
values = Series([u('fooBAD__barBAD'), NA, u('foo')])
result = values.str.extract('.*(BAD[_]+).*(BAD)', expand=False)
exp = DataFrame([[u('BAD__'), u('BAD')], er, er])
tm.assert_frame_equal(result, exp)
# GH9980
# Index only works with one regex group since
# multi-group would expand to a frame
idx = Index(['A1', 'A2', 'A3', 'A4', 'B5'])
with tm.assert_raises_regex(ValueError, "supported"):
idx.str.extract('([AB])([123])', expand=False)
# these should work for both Series and Index
for klass in [Series, Index]:
# no groups
s_or_idx = klass(['A1', 'B2', 'C3'])
f = lambda: s_or_idx.str.extract('[ABC][123]', expand=False)
pytest.raises(ValueError, f)
# only non-capturing groups
f = lambda: s_or_idx.str.extract('(?:[AB]).*', expand=False)
pytest.raises(ValueError, f)
# single group renames series/index properly
s_or_idx = klass(['A1', 'A2'])
result = s_or_idx.str.extract(r'(?P<uno>A)\d', expand=False)
assert result.name == 'uno'
exp = klass(['A', 'A'], name='uno')
if klass == Series:
tm.assert_series_equal(result, exp)
else:
tm.assert_index_equal(result, exp)
s = Series(['A1', 'B2', 'C3'])
# one group, no matches
result = s.str.extract('(_)', expand=False)
exp = Series([NA, NA, NA], dtype=object)
tm.assert_series_equal(result, exp)
# two groups, no matches
result = s.str.extract('(_)(_)', expand=False)
exp = DataFrame([[NA, NA], [NA, NA], [NA, NA]], dtype=object)
tm.assert_frame_equal(result, exp)
# one group, some matches
result = s.str.extract('([AB])[123]', expand=False)
exp = Series(['A', 'B', NA])
tm.assert_series_equal(result, exp)
# two groups, some matches
result = s.str.extract('([AB])([123])', expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]])
tm.assert_frame_equal(result, exp)
# one named group
result = s.str.extract('(?P<letter>[AB])', expand=False)
exp = Series(['A', 'B', NA], name='letter')
tm.assert_series_equal(result, exp)
# two named groups
result = s.str.extract('(?P<letter>[AB])(?P<number>[123])',
expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]],
columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# mix named and unnamed groups
result = s.str.extract('([AB])(?P<number>[123])', expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]],
columns=[0, 'number'])
tm.assert_frame_equal(result, exp)
# one normal group, one non-capturing group
result = s.str.extract('([AB])(?:[123])', expand=False)
exp = Series(['A', 'B', NA])
tm.assert_series_equal(result, exp)
# two normal groups, one non-capturing group
result = Series(['A11', 'B22', 'C33']).str.extract(
'([AB])([123])(?:[123])', expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]])
tm.assert_frame_equal(result, exp)
# one optional group followed by one normal group
result = Series(['A1', 'B2', '3']).str.extract(
'(?P<letter>[AB])?(?P<number>[123])', expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, '3']],
columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# one normal group followed by one optional group
result = Series(['A1', 'B2', 'C']).str.extract(
'(?P<letter>[ABC])(?P<number>[123])?', expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], ['C', NA]],
columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# GH6348
# not passing index to the extractor
def check_index(index):
data = ['A1', 'B2', 'C']
index = index[:len(data)]
s = Series(data, index=index)
result = s.str.extract(r'(\d)', expand=False)
exp = Series(['1', '2', NA], index=index)
tm.assert_series_equal(result, exp)
result = Series(data, index=index).str.extract(
r'(?P<letter>\D)(?P<number>\d)?', expand=False)
e_list = [
['A', '1'],
['B', '2'],
['C', NA]
]
exp = DataFrame(e_list, columns=['letter', 'number'], index=index)
tm.assert_frame_equal(result, exp)
i_funs = [
tm.makeStringIndex, tm.makeUnicodeIndex, tm.makeIntIndex,
tm.makeDateIndex, tm.makePeriodIndex, tm.makeRangeIndex
]
for index in i_funs:
check_index(index())
# single_series_name_is_preserved.
s = Series(['a3', 'b3', 'c2'], name='bob')
r = s.str.extract(r'(?P<sue>[a-z])', expand=False)
e = Series(['a', 'b', 'c'], name='sue')
tm.assert_series_equal(r, e)
assert r.name == e.name
def test_extract_expand_True(self):
# Contains tests like those in test_match and some others.
values = Series(['fooBAD__barBAD', NA, 'foo'])
er = [NA, NA] # empty row
result = values.str.extract('.*(BAD[_]+).*(BAD)', expand=True)
exp = DataFrame([['BAD__', 'BAD'], er, er])
tm.assert_frame_equal(result, exp)
# mixed
mixed = Series(['aBAD_BAD', NA, 'BAD_b_BAD', True, datetime.today(),
'foo', None, 1, 2.])
rs = Series(mixed).str.extract('.*(BAD[_]+).*(BAD)', expand=True)
exp = DataFrame([['BAD_', 'BAD'], er, ['BAD_', 'BAD'], er, er,
er, er, er, er])
tm.assert_frame_equal(rs, exp)
# unicode
values = Series([u('fooBAD__barBAD'), NA, u('foo')])
result = values.str.extract('.*(BAD[_]+).*(BAD)', expand=True)
exp = DataFrame([[u('BAD__'), u('BAD')], er, er])
tm.assert_frame_equal(result, exp)
# these should work for both Series and Index
for klass in [Series, Index]:
# no groups
s_or_idx = klass(['A1', 'B2', 'C3'])
f = lambda: s_or_idx.str.extract('[ABC][123]', expand=True)
pytest.raises(ValueError, f)
# only non-capturing groups
f = lambda: s_or_idx.str.extract('(?:[AB]).*', expand=True)
pytest.raises(ValueError, f)
# single group renames series/index properly
s_or_idx = klass(['A1', 'A2'])
result_df = s_or_idx.str.extract(r'(?P<uno>A)\d', expand=True)
assert isinstance(result_df, DataFrame)
result_series = result_df['uno']
assert_series_equal(result_series, Series(['A', 'A'], name='uno'))
def test_extract_series(self):
# extract should give the same result whether or not the
# series has a name.
for series_name in None, "series_name":
s = Series(['A1', 'B2', 'C3'], name=series_name)
# one group, no matches
result = s.str.extract('(_)', expand=True)
exp = DataFrame([NA, NA, NA], dtype=object)
tm.assert_frame_equal(result, exp)
# two groups, no matches
result = s.str.extract('(_)(_)', expand=True)
exp = DataFrame([[NA, NA], [NA, NA], [NA, NA]], dtype=object)
tm.assert_frame_equal(result, exp)
# one group, some matches
result = s.str.extract('([AB])[123]', expand=True)
exp = DataFrame(['A', 'B', NA])
tm.assert_frame_equal(result, exp)
# two groups, some matches
result = s.str.extract('([AB])([123])', expand=True)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]])
tm.assert_frame_equal(result, exp)
# one named group
result = s.str.extract('(?P<letter>[AB])', expand=True)
exp = DataFrame({"letter": ['A', 'B', NA]})
tm.assert_frame_equal(result, exp)
# two named groups
result = s.str.extract(
'(?P<letter>[AB])(?P<number>[123])',
expand=True)
e_list = [
['A', '1'],
['B', '2'],
[NA, NA]
]
exp = DataFrame(e_list, columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# mix named and unnamed groups
result = s.str.extract('([AB])(?P<number>[123])', expand=True)
exp = DataFrame(e_list, columns=[0, 'number'])
tm.assert_frame_equal(result, exp)
# one normal group, one non-capturing group
result = s.str.extract('([AB])(?:[123])', expand=True)
exp = DataFrame(['A', 'B', NA])
tm.assert_frame_equal(result, exp)
def test_extract_optional_groups(self):
# two normal groups, one non-capturing group
result = Series(['A11', 'B22', 'C33']).str.extract(
'([AB])([123])(?:[123])', expand=True)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]])
tm.assert_frame_equal(result, exp)
# one optional group followed by one normal group
result = Series(['A1', 'B2', '3']).str.extract(
'(?P<letter>[AB])?(?P<number>[123])', expand=True)
e_list = [
['A', '1'],
['B', '2'],
[NA, '3']
]
exp = DataFrame(e_list, columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# one normal group followed by one optional group
result = Series(['A1', 'B2', 'C']).str.extract(
'(?P<letter>[ABC])(?P<number>[123])?', expand=True)
e_list = [
['A', '1'],
['B', '2'],
['C', NA]
]
exp = DataFrame(e_list, columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# GH6348
# not passing index to the extractor
def check_index(index):
data = ['A1', 'B2', 'C']
index = index[:len(data)]
result = Series(data, index=index).str.extract(
r'(\d)', expand=True)
exp = DataFrame(['1', '2', NA], index=index)
tm.assert_frame_equal(result, exp)
result = Series(data, index=index).str.extract(
r'(?P<letter>\D)(?P<number>\d)?', expand=True)
e_list = [
['A', '1'],
['B', '2'],
['C', NA]
]
exp = DataFrame(e_list, columns=['letter', 'number'], index=index)
tm.assert_frame_equal(result, exp)
i_funs = [
tm.makeStringIndex, tm.makeUnicodeIndex, tm.makeIntIndex,
tm.makeDateIndex, tm.makePeriodIndex, tm.makeRangeIndex
]
for index in i_funs:
check_index(index())
def test_extract_single_group_returns_frame(self):
# GH11386 extract should always return DataFrame, even when
# there is only one group. Prior to v0.18.0, extract returned
# Series when there was only one group in the regex.
s = Series(['a3', 'b3', 'c2'], name='series_name')
r = s.str.extract(r'(?P<letter>[a-z])', expand=True)
e = DataFrame({"letter": ['a', 'b', 'c']})
tm.assert_frame_equal(r, e)
def test_extractall(self):
subject_list = [
'<EMAIL>',
'<EMAIL>',
'<EMAIL>',
'<EMAIL> some text <EMAIL>',
'<EMAIL> some text c@d.<EMAIL> and <EMAIL>',
np.nan,
"",
]
expected_tuples = [
("dave", "google", "com"),
("tdhock5", "gmail", "com"),
("maudelaperriere", "gmail", "com"),
("rob", "gmail", "com"), ("steve", "gmail", "com"),
("a", "b", "com"), ("c", "d", "com"), ("e", "f", "com"),
]
named_pattern = r"""
(?P<user>[a-z0-9]+)
@
(?P<domain>[a-z]+)
\.
(?P<tld>[a-z]{2,4})
"""
expected_columns = ["user", "domain", "tld"]
S = Series(subject_list)
# extractall should return a DataFrame with one row for each
# match, indexed by the subject from which the match came.
expected_index = MultiIndex.from_tuples([
(0, 0),
(1, 0),
(2, 0),
(3, 0),
(3, 1),
(4, 0),
(4, 1),
(4, 2),
], names=(None, "match"))
expected_df = DataFrame(
expected_tuples, expected_index, expected_columns)
computed_df = S.str.extractall(named_pattern, re.VERBOSE)
tm.assert_frame_equal(computed_df, expected_df)
# The index of the input Series should be used to construct
# the index of the output DataFrame:
series_index = MultiIndex.from_tuples([
("single", "Dave"),
("single", "Toby"),
("single", "Maude"),
("multiple", "robAndSteve"),
("multiple", "abcdef"),
("none", "missing"),
("none", "empty"),
])
Si = Series(subject_list, series_index)
expected_index = MultiIndex.from_tuples([
("single", "Dave", 0),
("single", "Toby", 0),
("single", "Maude", 0),
("multiple", "robAndSteve", 0),
("multiple", "robAndSteve", 1),
("multiple", "abcdef", 0),
("multiple", "abcdef", 1),
("multiple", "abcdef", 2),
], names=(None, None, "match"))
expected_df = DataFrame(
expected_tuples, expected_index, expected_columns)
computed_df = Si.str.extractall(named_pattern, re.VERBOSE)
tm.assert_frame_equal(computed_df, expected_df)
# MultiIndexed subject with names.
Sn = Series(subject_list, series_index)
Sn.index.names = ("matches", "description")
expected_index.names = ("matches", "description", "match")
expected_df = DataFrame(
expected_tuples, expected_index, expected_columns)
computed_df = Sn.str.extractall(named_pattern, re.VERBOSE)
tm.assert_frame_equal(computed_df, expected_df)
# optional groups.
subject_list = ['', 'A1', '32']
named_pattern = '(?P<letter>[AB])?(?P<number>[123])'
computed_df = Series(subject_list).str.extractall(named_pattern)
expected_index = MultiIndex.from_tuples([
(1, 0),
(2, 0),
(2, 1),
], names=(None, "match"))
expected_df = DataFrame([
('A', '1'),
(NA, '3'),
(NA, '2'),
], expected_index, columns=['letter', 'number'])
tm.assert_frame_equal(computed_df, expected_df)
# only one of two groups has a name.
pattern = '([AB])?(?P<number>[123])'
computed_df = Series(subject_list).str.extractall(pattern)
expected_df = DataFrame([
('A', '1'),
(NA, '3'),
(NA, '2'),
], expected_index, columns=[0, 'number'])
tm.assert_frame_equal(computed_df, expected_df)
def test_extractall_single_group(self):
# extractall(one named group) returns DataFrame with one named
# column.
s = Series(['a3', 'b3', 'd4c2'], name='series_name')
r = s.str.extractall(r'(?P<letter>[a-z])')
i = MultiIndex.from_tuples([
(0, 0),
(1, 0),
(2, 0),
(2, 1),
], names=(None, "match"))
e = DataFrame({"letter": ['a', 'b', 'd', 'c']}, i)
tm.assert_frame_equal(r, e)
# extractall(one un-named group) returns DataFrame with one
# un-named column.
r = s.str.extractall(r'([a-z])')
e = DataFrame(['a', 'b', 'd', 'c'], i)
tm.assert_frame_equal(r, e)
def test_extractall_single_group_with_quantifier(self):
# extractall(one un-named group with quantifier) returns
# DataFrame with one un-named column (GH13382).
s = Series(['ab3', 'abc3', 'd4cd2'], name='series_name')
r = s.str.extractall(r'([a-z]+)')
i = MultiIndex.from_tuples([
(0, 0),
(1, 0),
(2, 0),
(2, 1),
], names=(None, "match"))
e = DataFrame(['ab', 'abc', 'd', 'cd'], i)
tm.assert_frame_equal(r, e)
def test_extractall_no_matches(self):
s = Series(['a3', 'b3', 'd4c2'], name='series_name')
# one un-named group.
r = s.str.extractall('(z)')
e = DataFrame(columns=[0])
tm.assert_frame_equal(r, e)
# two un-named groups.
r = s.str.extractall('(z)(z)')
e = DataFrame(columns=[0, 1])
tm.assert_frame_equal(r, e)
# one named group.
r = s.str.extractall('(?P<first>z)')
e = DataFrame(columns=["first"])
tm.assert_frame_equal(r, e)
# two named groups.
r = s.str.extractall('(?P<first>z)(?P<second>z)')
e = DataFrame(columns=["first", "second"])
tm.assert_frame_equal(r, e)
# one named, one un-named.
r = s.str.extractall('(z)(?P<second>z)')
e = DataFrame(columns=[0,
"second"])
tm.assert_frame_equal(r, e)
def test_extractall_stringindex(self):
s = Series(["a1a2", "b1", "c1"], name='xxx')
res = s.str.extractall(r"[ab](?P<digit>\d)")
exp_idx = MultiIndex.from_tuples([(0, 0), (0, 1), (1, 0)],
names=[None, 'match'])
exp = DataFrame({'digit': ["1", "2", "1"]}, index=exp_idx)
tm.assert_frame_equal(res, exp)
# index should return the same result as the default index without name
# thus index.name doesn't affect to the result
for idx in [Index(["a1a2", "b1", "c1"]),
Index(["a1a2", "b1", "c1"], name='xxx')]:
res = idx.str.extractall(r"[ab](?P<digit>\d)")
tm.assert_frame_equal(res, exp)
s = Series(["a1a2", "b1", "c1"], name='s_name',
index=Index(["XX", "yy", "zz"], name='idx_name'))
res = s.str.extractall(r"[ab](?P<digit>\d)")
exp_idx = MultiIndex.from_tuples([("XX", 0), ("XX", 1), ("yy", 0)],
names=["idx_name", 'match'])
exp = DataFrame({'digit': ["1", "2", "1"]}, index=exp_idx)
tm.assert_frame_equal(res, exp)
def test_extractall_errors(self):
# Does not make sense to use extractall with a regex that has
# no capture groups. (it returns DataFrame with one column for
# each capture group)
s = Series(['a3', 'b3', 'd4c2'], name='series_name')
with tm.assert_raises_regex(ValueError, "no capture groups"):
s.str.extractall(r'[a-z]')
def test_extract_index_one_two_groups(self):
s = Series(['a3', 'b3', 'd4c2'], index=["A3", "B3", "D4"],
name='series_name')
r = s.index.str.extract(r'([A-Z])', expand=True)
e = DataFrame(['A', "B", "D"])
tm.assert_frame_equal(r, e)
# Prior to v0.18.0, index.str.extract(regex with one group)
# returned Index. With more than one group, extract raised an
# error (GH9980). Now extract always returns DataFrame.
r = s.index.str.extract(
r'(?P<letter>[A-Z])(?P<digit>[0-9])', expand=True)
e_list = [
("A", "3"),
("B", "3"),
("D", "4"),
]
e = DataFrame(e_list, columns=["letter", "digit"])
tm.assert_frame_equal(r, e)
def test_extractall_same_as_extract(self):
s = Series(['a3', 'b3', 'c2'], name='series_name')
pattern_two_noname = r'([a-z])([0-9])'
extract_two_noname = s.str.extract(pattern_two_noname, expand=True)
has_multi_index = s.str.extractall(pattern_two_noname)
no_multi_index = has_multi_index.xs(0, level="match")
tm.assert_frame_equal(extract_two_noname, no_multi_index)
pattern_two_named = r'(?P<letter>[a-z])(?P<digit>[0-9])'
extract_two_named = s.str.extract(pattern_two_named, expand=True)
has_multi_index = s.str.extractall(pattern_two_named)
no_multi_index = has_multi_index.xs(0, level="match")
tm.assert_frame_equal(extract_two_named, no_multi_index)
pattern_one_named = r'(?P<group_name>[a-z])'
extract_one_named = s.str.extract(pattern_one_named, expand=True)
has_multi_index = s.str.extractall(pattern_one_named)
no_multi_index = has_multi_index.xs(0, level="match")
tm.assert_frame_equal(extract_one_named, no_multi_index)
pattern_one_noname = r'([a-z])'
extract_one_noname = s.str.extract(pattern_one_noname, expand=True)
has_multi_index = s.str.extractall(pattern_one_noname)
no_multi_index = has_multi_index.xs(0, level="match")
tm.assert_frame_equal(extract_one_noname, no_multi_index)
def test_extractall_same_as_extract_subject_index(self):
# same as above tests, but s has an MultiIndex.
i = MultiIndex.from_tuples([
("A", "first"),
("B", "second"),
("C", "third"),
], names=("capital", "ordinal"))
s = Series(['a3', 'b3', 'c2'], i, name='series_name')
pattern_two_noname = r'([a-z])([0-9])'
extract_two_noname = s.str.extract(pattern_two_noname, expand=True)
has_match_index = s.str.extractall(pattern_two_noname)
no_match_index = has_match_index.xs(0, level="match")
tm.assert_frame_equal(extract_two_noname, no_match_index)
pattern_two_named = r'(?P<letter>[a-z])(?P<digit>[0-9])'
extract_two_named = s.str.extract(pattern_two_named, expand=True)
has_match_index = s.str.extractall(pattern_two_named)
no_match_index = has_match_index.xs(0, level="match")
tm.assert_frame_equal(extract_two_named, no_match_index)
pattern_one_named = r'(?P<group_name>[a-z])'
extract_one_named = s.str.extract(pattern_one_named, expand=True)
has_match_index = s.str.extractall(pattern_one_named)
no_match_index = has_match_index.xs(0, level="match")
tm.assert_frame_equal(extract_one_named, no_match_index)
pattern_one_noname = r'([a-z])'
extract_one_noname = s.str.extract(pattern_one_noname, expand=True)
has_match_index = s.str.extractall(pattern_one_noname)
no_match_index = has_match_index.xs(0, level="match")
tm.assert_frame_equal(extract_one_noname, no_match_index)
def test_empty_str_methods(self):
empty_str = empty = Series(dtype=object)
empty_int = Series(dtype=int)
empty_bool = Series(dtype=bool)
empty_bytes = Series(dtype=object)
# GH7241
# (extract) on empty series
tm.assert_series_equal(empty_str, empty.str.cat(empty))
assert '' == empty.str.cat()
tm.assert_series_equal(empty_str, empty.str.title())
tm.assert_series_equal(empty_int, empty.str.count('a'))
tm.assert_series_equal(empty_bool, empty.str.contains('a'))
tm.assert_series_equal(empty_bool, empty.str.startswith('a'))
tm.assert_series_equal(empty_bool, empty.str.endswith('a'))
tm.assert_series_equal(empty_str, empty.str.lower())
tm.assert_series_equal(empty_str, empty.str.upper())
tm.assert_series_equal(empty_str, empty.str.replace('a', 'b'))
tm.assert_series_equal(empty_str, empty.str.repeat(3))
tm.assert_series_equal(empty_bool, empty.str.match('^a'))
tm.assert_frame_equal(
DataFrame(columns=[0], dtype=str),
empty.str.extract('()', expand=True))
tm.assert_frame_equal(
DataFrame(columns=[0, 1], dtype=str),
empty.str.extract('()()', expand=True))
tm.assert_series_equal(
empty_str,
empty.str.extract('()', expand=False))
tm.assert_frame_equal(
DataFrame(columns=[0, 1], dtype=str),
empty.str.extract('()()', expand=False))
tm.assert_frame_equal(DataFrame(dtype=str), empty.str.get_dummies())
tm.assert_series_equal(empty_str, empty_str.str.join(''))
tm.assert_series_equal(empty_int, empty.str.len())
tm.assert_series_equal(empty_str, empty_str.str.findall('a'))
tm.assert_series_equal(empty_int, empty.str.find('a'))
tm.assert_series_equal(empty_int, empty.str.rfind('a'))
tm.assert_series_equal(empty_str, empty.str.pad(42))
tm.assert_series_equal(empty_str, empty.str.center(42))
tm.assert_series_equal(empty_str, empty.str.split('a'))
tm.assert_series_equal(empty_str, empty.str.rsplit('a'))
tm.assert_series_equal(empty_str,
empty.str.partition('a', expand=False))
tm.assert_series_equal(empty_str,
empty.str.rpartition('a', expand=False))
tm.assert_series_equal(empty_str, empty.str.slice(stop=1))
tm.assert_series_equal(empty_str, empty.str.slice(step=1))
tm.assert_series_equal(empty_str, empty.str.strip())
tm.assert_series_equal(empty_str, empty.str.lstrip())
tm.assert_series_equal(empty_str, empty.str.rstrip())
tm.assert_series_equal(empty_str, empty.str.wrap(42))
tm.assert_series_equal(empty_str, empty.str.get(0))
tm.assert_series_equal(empty_str, empty_bytes.str.decode('ascii'))
tm.assert_series_equal(empty_bytes, empty.str.encode('ascii'))
tm.assert_series_equal(empty_str, empty.str.isalnum())
tm.assert_series_equal(empty_str, empty.str.isalpha())
tm.assert_series_equal(empty_str, empty.str.isdigit())
tm.assert_series_equal(empty_str, empty.str.isspace())
tm.assert_series_equal(empty_str, empty.str.islower())
tm.assert_series_equal(empty_str, empty.str.isupper())
tm.assert_series_equal(empty_str, empty.str.istitle())
tm.assert_series_equal(empty_str, empty.str.isnumeric())
tm.assert_series_equal(empty_str, empty.str.isdecimal())
tm.assert_series_equal(empty_str, empty.str.capitalize())
tm.assert_series_equal(empty_str, empty.str.swapcase())
tm.assert_series_equal(empty_str, empty.str.normalize('NFC'))
if compat.PY3:
table = str.maketrans('a', 'b')
else:
import string
table = string.maketrans('a', 'b')
tm.assert_series_equal(empty_str, empty.str.translate(table))
def test_empty_str_methods_to_frame(self):
empty = Series(dtype=str)
empty_df = DataFrame([])
tm.assert_frame_equal(empty_df, empty.str.partition('a'))
tm.assert_frame_equal(empty_df, empty.str.rpartition('a'))
def test_ismethods(self):
values = ['A', 'b', 'Xy', '4', '3A', '', 'TT', '55', '-', ' ']
str_s = Series(values)
alnum_e = [True, True, True, True, True, False, True, True, False,
False]
alpha_e = [True, True, True, False, False, False, True, False, False,
False]
digit_e = [False, False, False, True, False, False, False, True, False,
False]
# TODO: unused
num_e = [False, False, False, True, False, False, # noqa
False, True, False, False]
space_e = [False, False, False, False, False, False, False, False,
False, True]
lower_e = [False, True, False, False, False, False, False, False,
False, False]
upper_e = [True, False, False, False, True, False, True, False, False,
False]
title_e = [True, False, True, False, True, False, False, False, False,
False]
tm.assert_series_equal(str_s.str.isalnum(), Series(alnum_e))
tm.assert_series_equal(str_s.str.isalpha(), Series(alpha_e))
tm.assert_series_equal(str_s.str.isdigit(), Series(digit_e))
tm.assert_series_equal(str_s.str.isspace(), Series(space_e))
tm.assert_series_equal(str_s.str.islower(), Series(lower_e))
tm.assert_series_equal(str_s.str.isupper(), Series(upper_e))
tm.assert_series_equal(str_s.str.istitle(), Series(title_e))
assert str_s.str.isalnum().tolist() == [v.isalnum() for v in values]
assert str_s.str.isalpha().tolist() == [v.isalpha() for v in values]
assert str_s.str.isdigit().tolist() == [v.isdigit() for v in values]
assert str_s.str.isspace().tolist() == [v.isspace() for v in values]
assert str_s.str.islower().tolist() == [v.islower() for v in values]
assert str_s.str.isupper().tolist() == [v.isupper() for v in values]
assert str_s.str.istitle().tolist() == [v.istitle() for v in values]
def test_isnumeric(self):
# 0x00bc: ¼ VULGAR FRACTION ONE QUARTER
# 0x2605: ★ not number
# 0x1378: ፸ ETHIOPIC NUMBER SEVENTY
# 0xFF13: 3 Em 3
values = ['A', '3', u'¼', u'★', u'፸', u'3', 'four']
s = Series(values)
numeric_e = [False, True, True, False, True, True, False]
decimal_e = [False, True, False, False, False, True, False]
tm.assert_series_equal(s.str.isnumeric(), Series(numeric_e))
tm.assert_series_equal(s.str.isdecimal(), Series(decimal_e))
unicodes = [u'A', u'3', u'¼', u'★', u'፸', u'3', u'four']
assert s.str.isnumeric().tolist() == [v.isnumeric() for v in unicodes]
assert s.str.isdecimal().tolist() == [v.isdecimal() for v in unicodes]
values = ['A', np.nan, u'¼', u'★', np.nan, u'3', 'four']
s = Series(values)
numeric_e = [False, np.nan, True, False, np.nan, True, False]
decimal_e = [False, np.nan, False, False, np.nan, True, False]
tm.assert_series_equal(s.str.isnumeric(), Series(numeric_e))
tm.assert_series_equal(s.str.isdecimal(), Series(decimal_e))
def test_get_dummies(self):
s = Series(['a|b', 'a|c', np.nan])
result = s.str.get_dummies('|')
expected = DataFrame([[1, 1, 0], [1, 0, 1], [0, 0, 0]],
columns=list('abc'))
tm.assert_frame_equal(result, expected)
s = Series(['a;b', 'a', 7])
result = s.str.get_dummies(';')
expected = DataFrame([[0, 1, 1], [0, 1, 0], [1, 0, 0]],
columns=list('7ab'))
tm.assert_frame_equal(result, expected)
# GH9980, GH8028
idx = Index(['a|b', 'a|c', 'b|c'])
result = idx.str.get_dummies('|')
expected = MultiIndex.from_tuples([(1, 1, 0), (1, 0, 1),
(0, 1, 1)], names=('a', 'b', 'c'))
tm.assert_index_equal(result, expected)
def test_get_dummies_with_name_dummy(self):
# GH 12180
# Dummies named 'name' should work as expected
s = Series(['a', 'b,name', 'b'])
result = s.str.get_dummies(',')
expected = DataFrame([[1, 0, 0], [0, 1, 1], [0, 1, 0]],
columns=['a', 'b', 'name'])
tm.assert_frame_equal(result, expected)
idx = Index(['a|b', 'name|c', 'b|name'])
result = idx.str.get_dummies('|')
expected = MultiIndex.from_tuples([(1, 1, 0, 0), (0, 0, 1, 1),
(0, 1, 0, 1)],
names=('a', 'b', 'c', 'name'))
tm.assert_index_equal(result, expected)
def test_join(self):
values = Series(['a_b_c', 'c_d_e', np.nan, 'f_g_h'])
result = values.str.split('_').str.join('_')
tm.assert_series_equal(values, result)
# mixed
mixed = Series(['a_b', NA, 'asdf_cas_asdf', True, datetime.today(),
'foo', None, 1, 2.])
rs = Series(mixed).str.split('_').str.join('_')
xp = Series(['a_b', NA, 'asdf_cas_asdf', NA, NA, 'foo', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('a_b_c'), u('c_d_e'), np.nan, u('f_g_h')])
result = values.str.split('_').str.join('_')
tm.assert_series_equal(values, result)
def test_len(self):
values = Series(['foo', 'fooo', 'fooooo', np.nan, 'fooooooo'])
result = values.str.len()
exp = values.map(lambda x: len(x) if notna(x) else NA)
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['a_b', NA, 'asdf_cas_asdf', True, datetime.today(),
'foo', None, 1, 2.])
rs = Series(mixed).str.len()
xp = Series([3, NA, 13, NA, NA, 3, NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('foo'), u('fooo'), u('fooooo'), np.nan, u(
'fooooooo')])
result = values.str.len()
exp = values.map(lambda x: len(x) if notna(x) else NA)
tm.assert_series_equal(result, exp)
def test_findall(self):
values = Series(['fooBAD__barBAD', NA, 'foo', 'BAD'])
result = values.str.findall('BAD[_]*')
exp = Series([['BAD__', 'BAD'], NA, [], ['BAD']])
tm.assert_almost_equal(result, exp)
# mixed
mixed = Series(['fooBAD__barBAD', NA, 'foo', True, datetime.today(),
'BAD', None, 1, 2.])
rs = Series(mixed).str.findall('BAD[_]*')
xp = Series([['BAD__', 'BAD'], NA, [], NA, NA, ['BAD'], NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('fooBAD__barBAD'), NA, u('foo'), u('BAD')])
result = values.str.findall('BAD[_]*')
exp = Series([[u('BAD__'), u('BAD')], NA, [], [u('BAD')]])
tm.assert_almost_equal(result, exp)
def test_find(self):
values = Series(['ABCDEFG', 'BCDEFEF', 'DEFGHIJEF', 'EFGHEF', 'XXXX'])
result = values.str.find('EF')
tm.assert_series_equal(result, Series([4, 3, 1, 0, -1]))
expected = np.array([v.find('EF') for v in values.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.rfind('EF')
tm.assert_series_equal(result, Series([4, 5, 7, 4, -1]))
expected = np.array([v.rfind('EF') for v in values.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.find('EF', 3)
tm.assert_series_equal(result, Series([4, 3, 7, 4, -1]))
expected = np.array([v.find('EF', 3) for v in values.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.rfind('EF', 3)
tm.assert_series_equal(result, Series([4, 5, 7, 4, -1]))
expected = np.array([v.rfind('EF', 3) for v in values.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.find('EF', 3, 6)
tm.assert_series_equal(result, Series([4, 3, -1, 4, -1]))
expected = np.array([v.find('EF', 3, 6) for v in values.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.rfind('EF', 3, 6)
tm.assert_series_equal(result, Series([4, 3, -1, 4, -1]))
expected = np.array([v.rfind('EF', 3, 6) for v in values.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
with tm.assert_raises_regex(TypeError,
"expected a string object, not int"):
result = values.str.find(0)
with tm.assert_raises_regex(TypeError,
"expected a string object, not int"):
result = values.str.rfind(0)
def test_find_nan(self):
values = Series(['ABCDEFG', np.nan, 'DEFGHIJEF', np.nan, 'XXXX'])
result = values.str.find('EF')
tm.assert_series_equal(result, Series([4, np.nan, 1, np.nan, -1]))
result = values.str.rfind('EF')
tm.assert_series_equal(result, Series([4, np.nan, 7, np.nan, -1]))
result = values.str.find('EF', 3)
tm.assert_series_equal(result, Series([4, np.nan, 7, np.nan, -1]))
result = values.str.rfind('EF', 3)
tm.assert_series_equal(result, Series([4, np.nan, 7, np.nan, -1]))
result = values.str.find('EF', 3, 6)
tm.assert_series_equal(result, Series([4, np.nan, -1, np.nan, -1]))
result = values.str.rfind('EF', 3, 6)
tm.assert_series_equal(result, Series([4, np.nan, -1, np.nan, -1]))
def test_index(self):
def _check(result, expected):
if isinstance(result, Series):
tm.assert_series_equal(result, expected)
else:
tm.assert_index_equal(result, expected)
for klass in [Series, Index]:
s = klass(['ABCDEFG', 'BCDEFEF', 'DEFGHIJEF', 'EFGHEF'])
result = s.str.index('EF')
_check(result, klass([4, 3, 1, 0]))
expected = np.array([v.index('EF') for v in s.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = s.str.rindex('EF')
_check(result, klass([4, 5, 7, 4]))
expected = np.array([v.rindex('EF') for v in s.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = s.str.index('EF', 3)
_check(result, klass([4, 3, 7, 4]))
expected = np.array([v.index('EF', 3) for v in s.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = s.str.rindex('EF', 3)
_check(result, klass([4, 5, 7, 4]))
expected = np.array([v.rindex('EF', 3) for v in s.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = s.str.index('E', 4, 8)
_check(result, klass([4, 5, 7, 4]))
expected = np.array([v.index('E', 4, 8) for v in s.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
result = s.str.rindex('E', 0, 5)
_check(result, klass([4, 3, 1, 4]))
expected = np.array([v.rindex('E', 0, 5) for v in s.values],
dtype=np.int64)
tm.assert_numpy_array_equal(result.values, expected)
with tm.assert_raises_regex(ValueError,
"substring not found"):
result = s.str.index('DE')
with tm.assert_raises_regex(TypeError,
"expected a string "
"object, not int"):
result = s.str.index(0)
# test with nan
s = Series(['abcb', 'ab', 'bcbe', np.nan])
result = s.str.index('b')
tm.assert_series_equal(result, Series([1, 1, 0, np.nan]))
result = s.str.rindex('b')
tm.assert_series_equal(result, Series([3, 1, 2, np.nan]))
def test_pad(self):
values = Series(['a', 'b', NA, 'c', NA, 'eeeeee'])
result = values.str.pad(5, side='left')
exp = Series([' a', ' b', NA, ' c', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
result = values.str.pad(5, side='right')
exp = Series(['a ', 'b ', NA, 'c ', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
result = values.str.pad(5, side='both')
exp = Series([' a ', ' b ', NA, ' c ', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
# mixed
mixed = Series(['a', NA, 'b', True, datetime.today(), 'ee', None, 1, 2.
])
rs = Series(mixed).str.pad(5, side='left')
xp = Series([' a', NA, ' b', NA, NA, ' ee', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
mixed = Series(['a', NA, 'b', True, datetime.today(), 'ee', None, 1, 2.
])
rs = Series(mixed).str.pad(5, side='right')
xp = Series(['a ', NA, 'b ', NA, NA, 'ee ', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
mixed = Series(['a', NA, 'b', True, datetime.today(), 'ee', None, 1, 2.
])
rs = Series(mixed).str.pad(5, side='both')
xp = Series([' a ', NA, ' b ', NA, NA, ' ee ', NA, NA, NA])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('a'), u('b'), NA, u('c'), NA, u('eeeeee')])
result = values.str.pad(5, side='left')
exp = Series([u(' a'), u(' b'), NA, u(' c'), NA, u('eeeeee')])
tm.assert_almost_equal(result, exp)
result = values.str.pad(5, side='right')
exp = Series([u('a '), u('b '), NA, u('c '), NA, u('eeeeee')])
tm.assert_almost_equal(result, exp)
result = values.str.pad(5, side='both')
exp = Series([u(' a '), u(' b '), NA, u(' c '), NA, u('eeeeee')])
tm.assert_almost_equal(result, exp)
def test_pad_fillchar(self):
values = Series(['a', 'b', NA, 'c', NA, 'eeeeee'])
result = values.str.pad(5, side='left', fillchar='X')
exp = Series(['XXXXa', 'XXXXb', NA, 'XXXXc', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
result = values.str.pad(5, side='right', fillchar='X')
exp = Series(['aXXXX', 'bXXXX', NA, 'cXXXX', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
result = values.str.pad(5, side='both', fillchar='X')
exp = Series(['XXaXX', 'XXbXX', NA, 'XXcXX', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
with tm.assert_raises_regex(TypeError,
"fillchar must be a "
"character, not str"):
result = values.str.pad(5, fillchar='XY')
with tm.assert_raises_regex(TypeError,
"fillchar must be a "
"character, not int"):
result = values.str.pad(5, fillchar=5)
def test_pad_width(self):
# GH 13598
s = Series(['1', '22', 'a', 'bb'])
for f in ['center', 'ljust', 'rjust', 'zfill', 'pad']:
with tm.assert_raises_regex(TypeError,
"width must be of "
"integer type, not*"):
getattr(s.str, f)('f')
def test_translate(self):
def _check(result, expected):
if isinstance(result, Series):
tm.assert_series_equal(result, expected)
else:
tm.assert_index_equal(result, expected)
for klass in [Series, Index]:
s = klass(['abcdefg', 'abcc', 'cdddfg', 'cdefggg'])
if not compat.PY3:
import string
table = string.maketrans('abc', 'cde')
else:
table = str.maketrans('abc', 'cde')
result = s.str.translate(table)
expected = klass(['cdedefg', 'cdee', 'edddfg', 'edefggg'])
_check(result, expected)
# use of deletechars is python 2 only
if not compat.PY3:
result = s.str.translate(table, deletechars='fg')
expected = klass(['cdede', 'cdee', 'eddd', 'ede'])
_check(result, expected)
result = s.str.translate(None, deletechars='fg')
expected = klass(['abcde', 'abcc', 'cddd', 'cde'])
_check(result, expected)
else:
with tm.assert_raises_regex(
ValueError, "deletechars is not a valid argument"):
result = s.str.translate(table, deletechars='fg')
# Series with non-string values
s = Series(['a', 'b', 'c', 1.2])
expected = Series(['c', 'd', 'e', np.nan])
result = s.str.translate(table)
tm.assert_series_equal(result, expected)
def test_center_ljust_rjust(self):
values = Series(['a', 'b', NA, 'c', NA, 'eeeeee'])
result = values.str.center(5)
exp = Series([' a ', ' b ', NA, ' c ', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
result = values.str.ljust(5)
exp = Series(['a ', 'b ', NA, 'c ', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
result = values.str.rjust(5)
exp = Series([' a', ' b', NA, ' c', NA, 'eeeeee'])
tm.assert_almost_equal(result, exp)
# mixed
mixed = Series(['a', NA, 'b', True, datetime.today(), 'c', 'eee', None,
1, 2.])
rs = Series(mixed).str.center(5)
xp = Series([' a ', NA, ' b ', NA, NA, ' c ', ' eee ', NA, NA, NA
])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
rs = Series(mixed).str.ljust(5)
xp = Series(['a ', NA, 'b ', NA, NA, 'c ', 'eee ', NA, NA, NA
])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
rs = Series(mixed).str.rjust(5)
xp = Series([' a', NA, ' b', NA, NA, ' c', ' eee', NA, NA, NA
])
assert isinstance(rs, Series)
tm.assert_almost_equal(rs, xp)
# unicode
values = Series([u('a'), u('b'), NA, u('c'), NA, u('eeeeee')])
result = values.str.center(5)
exp = Series([u(' a '), u(' b '), NA, u(' c '), NA, u('eeeeee')])
tm.assert_almost_equal(result, exp)
result = values.str.ljust(5)
exp = Series([u('a '), u('b '), NA, u('c '), NA, u('eeeeee')])
tm.assert_almost_equal(result, exp)
result = values.str.rjust(5)
exp = Series([u(' a'), u(' b'), NA, u(' c'), NA, u('eeeeee')])
tm.assert_almost_equal(result, exp)
def test_center_ljust_rjust_fillchar(self):
values = Series(['a', 'bb', 'cccc', 'ddddd', 'eeeeee'])
result = values.str.center(5, fillchar='X')
expected = Series(['XXaXX', 'XXbbX', 'Xcccc', 'ddddd', 'eeeeee'])
tm.assert_series_equal(result, expected)
expected = np.array([v.center(5, 'X') for v in values.values],
dtype=np.object_)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.ljust(5, fillchar='X')
expected = Series(['aXXXX', 'bbXXX', 'ccccX', 'ddddd', 'eeeeee'])
tm.assert_series_equal(result, expected)
expected = np.array([v.ljust(5, 'X') for v in values.values],
dtype=np.object_)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.rjust(5, fillchar='X')
expected = Series(['XXXXa', 'XXXbb', 'Xcccc', 'ddddd', 'eeeeee'])
tm.assert_series_equal(result, expected)
expected = np.array([v.rjust(5, 'X') for v in values.values],
dtype=np.object_)
tm.assert_numpy_array_equal(result.values, expected)
# If fillchar is not a charatter, normal str raises TypeError
# 'aaa'.ljust(5, 'XY')
# TypeError: must be char, not str
with tm.assert_raises_regex(TypeError,
"fillchar must be a "
"character, not str"):
result = values.str.center(5, fillchar='XY')
with tm.assert_raises_regex(TypeError,
"fillchar must be a "
"character, not str"):
result = values.str.ljust(5, fillchar='XY')
with tm.assert_raises_regex(TypeError,
"fillchar must be a "
"character, not str"):
result = values.str.rjust(5, fillchar='XY')
with tm.assert_raises_regex(TypeError,
"fillchar must be a "
"character, not int"):
result = values.str.center(5, fillchar=1)
with tm.assert_raises_regex(TypeError,
"fillchar must be a "
"character, not int"):
result = values.str.ljust(5, fillchar=1)
with tm.assert_raises_regex(TypeError,
"fillchar must be a "
"character, not int"):
result = values.str.rjust(5, fillchar=1)
def test_zfill(self):
values = Series(['1', '22', 'aaa', '333', '45678'])
result = values.str.zfill(5)
expected = Series(['00001', '00022', '00aaa', '00333', '45678'])
tm.assert_series_equal(result, expected)
expected = np.array([v.zfill(5) for v in values.values],
dtype=np.object_)
tm.assert_numpy_array_equal(result.values, expected)
result = values.str.zfill(3)
expected = Series(['001', '022', 'aaa', '333', '45678'])
tm.assert_series_equal(result, expected)
expected = np.array([v.zfill(3) for v in values.values],
dtype=np.object_)
tm.assert_numpy_array_equal(result.values, expected)
values = Series(['1', np.nan, 'aaa', np.nan, '45678'])
result = values.str.zfill(5)
expected = Series(['00001', np.nan, '00aaa', np.nan, '45678'])
tm.assert_series_equal(result, expected)
def test_split(self):
values = Series(['a_b_c', 'c_d_e', NA, 'f_g_h'])
result = values.str.split('_')
exp = Series([['a', 'b', 'c'], ['c', 'd', 'e'], NA, ['f', 'g', 'h']])
tm.assert_series_equal(result, exp)
# more than one char
values = Series(['a__b__c', 'c__d__e', NA, 'f__g__h'])
result = values.str.split('__')
tm.assert_series_equal(result, exp)
result = values.str.split('__', expand=False)
tm.assert_series_equal(result, exp)
# mixed
mixed = Series(['a_b_c', NA, 'd_e_f', True, datetime.today(), None, 1,
2.])
result = mixed.str.split('_')
exp = Series([['a', 'b', 'c'], NA, ['d', 'e', 'f'], NA, NA, NA, NA, NA
])
assert isinstance(result, Series)
tm.assert_almost_equal(result, exp)
result = mixed.str.split('_', expand=False)
assert isinstance(result, Series)
tm.assert_almost_equal(result, exp)
# unicode
values = Series([u('a_b_c'), u('c_d_e'), NA, u('f_g_h')])
result = values.str.split('_')
exp = Series([[u('a'), u('b'), u('c')], [u('c'), u('d'), u('e')], NA,
[u('f'), u('g'), u('h')]])
| tm.assert_series_equal(result, exp) | pandas.util.testing.assert_series_equal |
# notes
# ask michael if we can get the locations of the different cells
# this thing (LSE) but on the whole brain
# compare to the omni one
# bic curves for both
# compute ARI
# slides for tomorrow
# when we present (seems like it should be obvious)
# then show the result, know whether it is what they would have expected
# ARI curve
# best ARI
# BIC Curve
# best bic
# at least one where we get cliques (across cliques)
#%% Imports
import math
import os
from operator import itemgetter
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from graspy.cluster import GaussianCluster
from graspy.embed import AdjacencySpectralEmbed, OmnibusEmbed
from graspy.models import SBMEstimator
from graspy.plot import heatmap, pairplot
from graspy.utils import binarize, cartprod, pass_to_ranks
from joblib.parallel import Parallel, delayed
from matplotlib.colors import LogNorm
from sklearn.cluster import KMeans
from sklearn.metrics import adjusted_rand_score
from spherecluster import SphericalKMeans
from src.data import load_everything
from src.utils import savefig, export_skeleton_json
from src.visualization import sankey
FNAME = os.path.basename(__file__)[:-3]
print(FNAME)
# %% [markdown]
# # Parameters
MB_VERSION = "mb_2019-09-23"
BRAIN_VERSION = "2019-09-18-v2"
GRAPH_TYPES = ["Gad", "Gaa", "Gdd", "Gda"]
GRAPH_TYPE_LABELS = [r"A $\to$ D", r"A $\to$ A", r"D $\to$ D", r"D $\to$ A"]
N_GRAPH_TYPES = len(GRAPH_TYPES)
SAVEFIGS = False
DEFAULT_FMT = "png"
DEFUALT_DPI = 150
MAX_CLUSTERS = 6
MIN_CLUSTERS = 6
N_INIT = 1
PTR = True
# Functions
def stashfig(name, **kws):
if SAVEFIGS:
savefig(name, foldername=FNAME, fmt=DEFAULT_FMT, dpi=DEFUALT_DPI, **kws)
def annotate_arrow(ax, coords=(0.061, 0.93)):
arrow_args = dict(
arrowstyle="-|>",
color="k",
connectionstyle="arc3,rad=-0.4", # "angle3,angleA=90,angleB=90"
)
t = ax.annotate("Target", xy=coords, xycoords="figure fraction")
ax.annotate(
"Source", xy=(0, 0.5), xycoords=t, xytext=(-1.4, -2.1), arrowprops=arrow_args
)
def ase(adj, n_components):
if PTR:
adj = pass_to_ranks(adj)
ase = AdjacencySpectralEmbed(n_components=n_components)
latent = ase.fit_transform(adj)
latent = np.concatenate(latent, axis=-1)
return latent
def to_laplace(graph, form="DAD", regularizer=None):
r"""
A function to convert graph adjacency matrix to graph laplacian.
Currently supports I-DAD, DAD, and R-DAD laplacians, where D is the diagonal
matrix of degrees of each node raised to the -1/2 power, I is the
identity matrix, and A is the adjacency matrix.
R-DAD is regularized laplacian: where :math:`D_t = D + regularizer*I`.
Parameters
----------
graph: object
Either array-like, (n_vertices, n_vertices) numpy array,
or an object of type networkx.Graph.
form: {'I-DAD' (default), 'DAD', 'R-DAD'}, string, optional
- 'I-DAD'
Computes :math:`L = I - D*A*D`
- 'DAD'
Computes :math:`L = D*A*D`
- 'R-DAD'
Computes :math:`L = D_t*A*D_t` where :math:`D_t = D + regularizer*I`
regularizer: int, float or None, optional (default=None)
Constant to be added to the diagonal of degree matrix. If None, average
node degree is added. If int or float, must be >= 0. Only used when
``form`` == 'R-DAD'.
Returns
-------
L: numpy.ndarray
2D (n_vertices, n_vertices) array representing graph
laplacian of specified form
References
----------
.. [1] <NAME>, and <NAME>. "Regularized spectral clustering
under the degree-corrected stochastic blockmodel." In Advances
in Neural Information Processing Systems, pp. 3120-3128. 2013
"""
valid_inputs = ["I-DAD", "DAD", "R-DAD"]
if form not in valid_inputs:
raise TypeError("Unsuported Laplacian normalization")
A = graph
in_degree = np.sum(A, axis=0)
out_degree = np.sum(A, axis=1)
# regularize laplacian with parameter
# set to average degree
if form == "R-DAD":
if regularizer is None:
regularizer = 1
elif not isinstance(regularizer, (int, float)):
raise TypeError(
"Regularizer must be a int or float, not {}".format(type(regularizer))
)
elif regularizer < 0:
raise ValueError("Regularizer must be greater than or equal to 0")
regularizer = regularizer * np.mean(out_degree)
in_degree += regularizer
out_degree += regularizer
with np.errstate(divide="ignore"):
in_root = 1 / np.sqrt(in_degree) # this is 10x faster than ** -0.5
out_root = 1 / np.sqrt(out_degree)
in_root[np.isinf(in_root)] = 0
out_root[np.isinf(out_root)] = 0
in_root = np.diag(in_root) # just change to sparse diag for sparse support
out_root = np.diag(out_root)
if form == "I-DAD":
L = np.diag(in_degree) - A
L = in_root @ L @ in_root
elif form == "DAD" or form == "R-DAD":
L = out_root @ A @ in_root
# return symmetrize(L, method="avg") # sometimes machine prec. makes this necessary
return L
def lse(adj, n_components, regularizer=None):
if PTR:
adj = pass_to_ranks(adj)
lap = to_laplace(adj, form="R-DAD")
ase = AdjacencySpectralEmbed(n_components=n_components)
latent = ase.fit_transform(lap)
latent = np.concatenate(latent, axis=-1)
return latent
def omni(adjs, n_components):
if PTR:
adjs = [pass_to_ranks(a) for a in adjs]
omni = OmnibusEmbed(n_components=n_components // len(adjs))
latent = omni.fit_transform(adjs)
latent = np.concatenate(latent, axis=-1) # first is for in/out
latent = np.concatenate(latent, axis=-1) # second is for concat. each graph
return latent
def ase_concatenate(adjs, n_components):
if PTR:
adjs = [pass_to_ranks(a) for a in adjs]
ase = AdjacencySpectralEmbed(n_components=n_components // len(adjs))
graph_latents = []
for a in adjs:
latent = ase.fit_transform(a)
latent = np.concatenate(latent, axis=-1)
graph_latents.append(latent)
latent = np.concatenate(graph_latents, axis=-1)
return latent
def degree(adjs, *args):
deg_mat = np.zeros((n_verts, 2 * N_GRAPH_TYPES))
for i, g in enumerate(adjs):
deg_mat[:, i] = g.sum(axis=0)
deg_mat[:, i + N_GRAPH_TYPES] = g.sum(axis=1)
return deg_mat
def get_sbm_prob(adj, labels):
sbm = SBMEstimator(directed=True, loops=True)
sbm.fit(binarize(adj), y=labels)
data = sbm.block_p_
uni_labels, counts = np.unique(labels, return_counts=True)
sort_inds = np.argsort(counts)[::-1]
uni_labels = uni_labels[sort_inds]
data = data[np.ix_(sort_inds, sort_inds)]
prob_df = | pd.DataFrame(columns=uni_labels, index=uni_labels, data=data) | pandas.DataFrame |
#!/usr/bin/env python
# coding: utf-8
# # EDA + CenterNet Baseline
#
# References:
# * Took 3D visualization code from https://www.kaggle.com/zstusnoopy/visualize-the-location-and-3d-bounding-box-of-car
# * CenterNet paper https://arxiv.org/pdf/1904.07850.pdf
# * CenterNet repository https://github.com/xingyizhou/CenterNet
# # What is this competition about?
# 1. You are given the images taken from the roof of a car
# * ~4k training images
# * Always the same car and the same camera
# 2. You are asked to detect other cars on that image
# * There can be many cars
# * You need to predict their positions
# 
#
# ## What is in this notebook?
# * Data distributions: 1D, 2D and 3D
# * Functions to transform between camera coordinates and road coordinates
# * Simple CenterNet baseline
#
# ## CenterNet
# This architecture predicts centers of objects as a heatmap.
# It predicts sizes of the boxes as a regression task.
# 
#
# It is also used for pose estimation:
# 
# *(images from the [original repository](https://github.com/xingyizhou/CenterNet))*
# Coordinates of human joints are also predicted using regression.
#
# I use this idea to predict `x, y, z` coordinates of the vehicle and also `yaw, pitch_cos, pitch_sin, roll` angles.
# For `pitch` I predict sin and cos, because, as we will see, this angle can be both near 0 and near 3.14.
# These 7 parameters are my regression target variables instead of `shift_x, shift_y, size_x, size_y`.
# In[2]:
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import cv2
from tqdm import tqdm
import matplotlib.pyplot as plt
import seaborn as sns
from functools import reduce
import os
from sklearn.model_selection import train_test_split
from scipy.optimize import minimize
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.optim import lr_scheduler
from torch.utils.data import Dataset, DataLoader
from torchvision import models
from torchvision import transforms, utils
PATH = '/home/hy/pkuad/'
os.listdir(PATH)
# # Load data
# In[3]:
train = pd.read_csv(PATH + 'train.csv')
test = pd.read_csv(PATH + 'sample_submission.csv')
bad_list = ['ID_1a5a10365',
'ID_1db0533c7',
'ID_53c3fe91a',
'ID_408f58e9f',
'ID_4445ae041',
'ID_bb1d991f6',
'ID_c44983aeb',
'ID_f30ebe4d4',
'ID_1a5a10365',
'ID_4d238ae90',
'ID_408f58e9f',
'ID_bb1d991f6',
'ID_c44983aeb']
train = train.loc[~train['ImageId'].isin(bad_list)]
# From camera.zip
camera_matrix = np.array([[2304.5479, 0, 1686.2379],
[0, 2305.8757, 1354.9849],
[0, 0, 1]], dtype=np.float32)
camera_matrix_inv = np.linalg.inv(camera_matrix)
train.head()
# **ImageId** column contains names of images:
# In[4]:
def str2coords(s, names=['id', 'yaw', 'pitch', 'roll', 'x', 'y', 'z']):
'''
Input:
s: PredictionString (e.g. from train dataframe)
names: array of what to extract from the string
Output:
list of dicts with keys from `names`
'''
coords = []
for l in np.array(s.split()).reshape([-1, 7]):
coords.append(dict(zip(names, l.astype('float'))))
if 'id' in coords[-1]:
coords[-1]['id'] = int(coords[-1]['id'])
return coords
# In[5]:
inp = train['PredictionString'][0]
print('Example input:\n', inp)
print()
print('Output:\n', str2coords(inp))
# In[6]:
def imread(path, fast_mode=False):
img = cv2.imread(path)
if not fast_mode and img is not None and len(img.shape) == 3:
img = np.array(img[:, :, ::-1])
return img
img = imread(PATH + 'train_images/ID_8a6e65317' + '.jpg')
IMG_SHAPE = img.shape
# In[7]:
def get_img_coords(s):
'''
Input is a PredictionString (e.g. from train dataframe)
Output is two arrays:
xs: x coordinates in the image
ys: y coordinates in the image
'''
coords = str2coords(s)
xs = [c['x'] for c in coords]
ys = [c['y'] for c in coords]
zs = [c['z'] for c in coords]
P = np.array(list(zip(xs, ys, zs))).T
img_p = np.dot(camera_matrix, P).T
img_p[:, 0] /= img_p[:, 2]
img_p[:, 1] /= img_p[:, 2]
img_xs = img_p[:, 0]
img_ys = img_p[:, 1]
img_zs = img_p[:, 2] # z = Distance from the camera
return img_xs, img_ys
# In[8]:
def rotate(x, angle):
x = x + angle
x = x - (x + np.pi) // (2 * np.pi) * 2 * np.pi
return x
# One point is out of image!
# Let's look at the distribution of all points. Image is here just for reference.
# In[9]:
from math import sin, cos
# convert euler angle to rotation matrix
def euler_to_Rot(yaw, pitch, roll):
Y = np.array([[cos(yaw), 0, sin(yaw)],
[0, 1, 0],
[-sin(yaw), 0, cos(yaw)]])
P = np.array([[1, 0, 0],
[0, cos(pitch), -sin(pitch)],
[0, sin(pitch), cos(pitch)]])
R = np.array([[cos(roll), -sin(roll), 0],
[sin(roll), cos(roll), 0],
[0, 0, 1]])
return np.dot(Y, np.dot(P, R))
# # Image preprocessing
# In[10]:
IMG_WIDTH = 2052
IMG_HEIGHT = 1026
MODEL_SCALE = 8
def _regr_preprocess(regr_dict):
for name in ['x', 'y', 'z']:
regr_dict[name] = regr_dict[name] / 100
regr_dict['roll'] = rotate(regr_dict['roll'], np.pi)
regr_dict['pitch_sin'] = sin(regr_dict['pitch'])
regr_dict['pitch_cos'] = cos(regr_dict['pitch'])
regr_dict.pop('pitch')
regr_dict.pop('id')
return regr_dict
def _regr_back(regr_dict):
for name in ['x', 'y', 'z']:
regr_dict[name] = regr_dict[name] * 100
regr_dict['roll'] = rotate(regr_dict['roll'], -np.pi)
pitch_sin = regr_dict['pitch_sin'] / np.sqrt(regr_dict['pitch_sin']**2 + regr_dict['pitch_cos']**2)
pitch_cos = regr_dict['pitch_cos'] / np.sqrt(regr_dict['pitch_sin']**2 + regr_dict['pitch_cos']**2)
regr_dict['pitch'] = np.arccos(pitch_cos) * np.sign(pitch_sin)
return regr_dict
def preprocess_image(img):
img = img[img.shape[0] // 2:]
bg = np.ones_like(img) * img.mean(1, keepdims=True).astype(img.dtype)
bg = bg[:, :img.shape[1] // 4]
img = np.concatenate([bg, img, bg], 1)
img = cv2.resize(img, (IMG_WIDTH, IMG_HEIGHT))
return (img / 255).astype('float32')
def get_mask_and_regr(img, labels):
mask = np.zeros([IMG_HEIGHT // MODEL_SCALE, IMG_WIDTH // MODEL_SCALE], dtype='float32')
regr_names = ['x', 'y', 'z', 'yaw', 'pitch', 'roll']
regr = np.zeros([IMG_HEIGHT // MODEL_SCALE, IMG_WIDTH // MODEL_SCALE, 7], dtype='float32')
coords = str2coords(labels)
xs, ys = get_img_coords(labels)
for x, y, regr_dict in zip(xs, ys, coords):
x, y = y, x
x = (x - img.shape[0] // 2) * IMG_HEIGHT / (img.shape[0] // 2) / MODEL_SCALE
x = np.round(x).astype('int')
y = (y + img.shape[1] // 4) * IMG_WIDTH / (img.shape[1] * 1.5) / MODEL_SCALE
y = np.round(y).astype('int')
if x >= 0 and x < IMG_HEIGHT // MODEL_SCALE and y >= 0 and y < IMG_WIDTH // MODEL_SCALE:
mask[x, y] = 1
regr_dict = _regr_preprocess(regr_dict)
regr[x, y] = [regr_dict[n] for n in sorted(regr_dict)]
return mask, regr
# In[11]:
img0 = imread(PATH + 'train_images/' + train['ImageId'][0] + '.jpg')
img = preprocess_image(img0)
mask, regr = get_mask_and_regr(img0, train['PredictionString'][0])
print('img.shape', img.shape, 'std:', np.std(img))
print('mask.shape', mask.shape, 'std:', np.std(mask))
print('regr.shape', regr.shape, 'std:', np.std(regr))
# # PyTorch Dataset
# In[12]:
class CarDataset(Dataset):
"""Car dataset."""
def __init__(self, dataframe, root_dir, training=True, transform=None):
self.df = dataframe
self.root_dir = root_dir
self.transform = transform
self.training = training
def __len__(self):
return len(self.df)
def __getitem__(self, idx):
if torch.is_tensor(idx):
idx = idx.tolist()
# Get image name
idx, labels = self.df.values[idx]
img_name = self.root_dir.format(idx)
# Read image
img0 = imread(img_name, True)
img = preprocess_image(img0)
img = np.rollaxis(img, 2, 0)
# Get mask and regression maps
if self.training:
mask, regr = get_mask_and_regr(img0, labels)
regr = np.rollaxis(regr, 2, 0)
else:
mask, regr = 0, 0
return [img, mask, regr]
# In[13]:
train_images_dir = PATH + 'train_images/{}.jpg'
test_images_dir = PATH + 'test_images/{}.jpg'
df_train, df_dev = train_test_split(train, test_size=0.1, random_state=42)
df_test = test
# Create dataset objects
train_dataset = CarDataset(df_train, train_images_dir)
dev_dataset = CarDataset(df_dev, train_images_dir)
test_dataset = CarDataset(df_test, test_images_dir)
# Show some generated examples
# In[ ]:
BATCH_SIZE = 2
# Create data generators - they will produce batches
train_loader = DataLoader(dataset=train_dataset, batch_size=BATCH_SIZE, shuffle=True, num_workers=4)
dev_loader = DataLoader(dataset=dev_dataset, batch_size=BATCH_SIZE, shuffle=False, num_workers=4)
test_loader = DataLoader(dataset=test_dataset, batch_size=BATCH_SIZE, shuffle=False, num_workers=4)
# # PyTorch Model
# In[ ]:
#!pip install efficientnet-pytorch
# In[ ]:
from efficientnet_pytorch import EfficientNet
# In[ ]:
class double_conv(nn.Module):
'''(conv => BN => ReLU) * 2'''
def __init__(self, in_ch, out_ch):
super(double_conv, self).__init__()
self.conv = nn.Sequential(
nn.Conv2d(in_ch, out_ch, 3, padding=1),
nn.BatchNorm2d(out_ch),
nn.ReLU(inplace=True),
nn.Conv2d(out_ch, out_ch, 3, padding=1),
nn.BatchNorm2d(out_ch),
nn.ReLU(inplace=True)
)
def forward(self, x):
x = self.conv(x)
return x
class up(nn.Module):
def __init__(self, in_ch, out_ch, bilinear=True):
super(up, self).__init__()
# would be a nice idea if the upsampling could be learned too,
# but my machine do not have enough memory to handle all those weights
if bilinear:
self.up = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)
else:
self.up = nn.ConvTranspose2d(in_ch//2, in_ch//2, 2, stride=2)
self.conv = double_conv(in_ch, out_ch)
def forward(self, x1, x2=None):
x1 = self.up(x1)
# input is CHW
diffY = x2.size()[2] - x1.size()[2]
diffX = x2.size()[3] - x1.size()[3]
x1 = F.pad(x1, (diffX // 2, diffX - diffX//2,
diffY // 2, diffY - diffY//2))
# for padding issues, see
# https://github.com/HaiyongJiang/U-Net-Pytorch-Unstructured-Buggy/commit/0e854509c2cea854e247a9c615f175f76fbb2e3a
# https://github.com/xiaopeng-liao/Pytorch-UNet/commit/8ebac70e633bac59fc22bb5195e513d5832fb3bd
if x2 is not None:
x = torch.cat([x2, x1], dim=1)
else:
x = x1
x = self.conv(x)
return x
def get_mesh(batch_size, shape_x, shape_y):
mg_x, mg_y = np.meshgrid(np.linspace(0, 1, shape_y), np.linspace(0, 1, shape_x))
mg_x = np.tile(mg_x[None, None, :, :], [batch_size, 1, 1, 1]).astype('float32')
mg_y = np.tile(mg_y[None, None, :, :], [batch_size, 1, 1, 1]).astype('float32')
mesh = torch.cat([torch.tensor(mg_x).to(device), torch.tensor(mg_y).to(device)], 1)
return mesh
# In[ ]:
class MyUNet(nn.Module):
'''Mixture of previous classes'''
def __init__(self, n_classes):
super(MyUNet, self).__init__()
self.base_model = EfficientNet.from_pretrained('efficientnet-b0')
self.conv0 = double_conv(5, 64)
self.conv1 = double_conv(64, 128)
self.conv2 = double_conv(128, 512)
self.conv3 = double_conv(512, 1024)
self.mp = nn.MaxPool2d(2)
#self.up1 = up(3074, 512)
self.up1 = up(1282 + 1024, 512)
self.up2 = up(512 + 512, 256)
self.outc = nn.Conv2d(256, n_classes, 1)
def forward(self, x):
batch_size = x.shape[0]
mesh1 = get_mesh(batch_size, x.shape[2], x.shape[3])
x0 = torch.cat([x, mesh1], 1)
x1 = self.mp(self.conv0(x0))
x2 = self.mp(self.conv1(x1))
x3 = self.mp(self.conv2(x2))
x4 = self.mp(self.conv3(x3))
feats = self.base_model.extract_features(x)
# Add positional info
mesh2 = get_mesh(batch_size, feats.shape[2], feats.shape[3])
feats = torch.cat([feats, mesh2], 1)
x = self.up1(feats, x4)
x = self.up2(x, x3)
x = self.outc(x)
return x
# In[ ]:
# Gets the GPU if there is one, otherwise the cpu
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(device)
n_epochs = 16
model = MyUNet(8).to(device)
optimizer = optim.AdamW(model.parameters(), lr=0.001, betas=(0.9, 0.999), eps=1e-08, weight_decay=0.01, amsgrad=False)
exp_lr_scheduler = lr_scheduler.StepLR(optimizer, step_size=max(n_epochs, 10) * len(train_loader) // 3, gamma=0.1)
# # Training
# In[ ]:
def criterion(prediction, mask, regr, size_average=True):
# Binary mask loss
pred_mask = torch.sigmoid(prediction[:, 0])
# mask_loss = mask * (1 - pred_mask)**2 * torch.log(pred_mask + 1e-12) + (1 - mask) * pred_mask**2 * torch.log(1 - pred_mask + 1e-12)
mask_loss = mask * torch.log(pred_mask + 1e-12) + (1 - mask) * torch.log(1 - pred_mask + 1e-12)
mask_loss = -mask_loss.mean(0).sum()
# Regression L1 loss
pred_regr = prediction[:, 1:]
regr_loss = (torch.abs(pred_regr - regr).sum(1) * mask).sum(1).sum(1) / mask.sum(1).sum(1)
regr_loss = regr_loss.mean(0)
# Sum
loss = mask_loss + regr_loss
if not size_average:
loss *= prediction.shape[0]
return loss
# In[ ]:
def train(epoch, history=None):
model.train()
for batch_idx, (img_batch, mask_batch, regr_batch) in enumerate(tqdm(train_loader)):
img_batch = img_batch.to(device)
mask_batch = mask_batch.to(device)
regr_batch = regr_batch.to(device)
optimizer.zero_grad()
output = model(img_batch)
loss = criterion(output, mask_batch, regr_batch)
if history is not None:
history.loc[epoch + batch_idx / len(train_loader), 'train_loss'] = loss.data.cpu().numpy()
loss.backward()
optimizer.step()
exp_lr_scheduler.step()
print('Train Epoch: {} \tLR: {:.6f}\tLoss: {:.6f}'.format(
epoch,
optimizer.state_dict()['param_groups'][0]['lr'],
loss.data))
def evaluate(epoch, history=None):
model.eval()
loss = 0
with torch.no_grad():
for img_batch, mask_batch, regr_batch in dev_loader:
img_batch = img_batch.to(device)
mask_batch = mask_batch.to(device)
regr_batch = regr_batch.to(device)
output = model(img_batch)
loss += criterion(output, mask_batch, regr_batch, size_average=False).data
loss /= len(dev_loader.dataset)
if history is not None:
history.loc[epoch, 'dev_loss'] = loss.cpu().numpy()
print('Dev loss: {:.4f}'.format(loss))
# In[ ]:
#get_ipython().run_cell_magic('time', '', "import gc\n\nhistory = pd.DataFrame()\n\nfor epoch in range(n_epochs):\n torch.cuda.empty_cache()\n gc.collect()\n train(epoch, history)\n torch.save(model.state_dict(), './b0_sz-2052-1026_ep-%s.pth'%(epoch))\n evaluate(epoch, history)")
import gc
history = pd.DataFrame()
for epoch in range(n_epochs):
torch.cuda.empty_cache()
gc.collect()
train(epoch, history)
torch.save(model.state_dict(), './b0_sz-2052-1026_ep-%s.pth'%(epoch))
evaluate(epoch, history)
# In[ ]:
torch.save(model.state_dict(), './b0_sz-2052-1026_final.pth')
# In[ ]:
#history['train_loss'].iloc[100:].plot();
# In[ ]:
#series = history.dropna()['dev_loss']
#plt.scatter(series.index, series);
# # Visualize predictions
# In[ ]:
DISTANCE_THRESH_CLEAR = 2
def convert_3d_to_2d(x, y, z, fx = 2304.5479, fy = 2305.8757, cx = 1686.2379, cy = 1354.9849):
# stolen from https://www.kaggle.com/theshockwaverider/eda-visualization-baseline
return x * fx / z + cx, y * fy / z + cy
def optimize_xy(r, c, x0, y0, z0):
def distance_fn(xyz):
x, y, z = xyz
x, y = convert_3d_to_2d(x, y, z0)
y, x = x, y
x = (x - IMG_SHAPE[0] // 2) * IMG_HEIGHT / (IMG_SHAPE[0] // 2) / MODEL_SCALE
x = np.round(x).astype('int')
y = (y + IMG_SHAPE[1] // 4) * IMG_WIDTH / (IMG_SHAPE[1] * 1.5) / MODEL_SCALE
y = np.round(y).astype('int')
return (x-r)**2 + (y-c)**2
res = minimize(distance_fn, [x0, y0, z0], method='Powell')
x_new, y_new, z_new = res.x
return x_new, y_new, z0
def clear_duplicates(coords):
for c1 in coords:
xyz1 = np.array([c1['x'], c1['y'], c1['z']])
for c2 in coords:
xyz2 = np.array([c2['x'], c2['y'], c2['z']])
distance = np.sqrt(((xyz1 - xyz2)**2).sum())
if distance < DISTANCE_THRESH_CLEAR:
if c1['confidence'] < c2['confidence']:
c1['confidence'] = -1
return [c for c in coords if c['confidence'] > 0]
def extract_coords(prediction):
logits = prediction[0]
regr_output = prediction[1:]
points = np.argwhere(logits > 0)
col_names = sorted(['x', 'y', 'z', 'yaw', 'pitch_sin', 'pitch_cos', 'roll'])
coords = []
for r, c in points:
regr_dict = dict(zip(col_names, regr_output[:, r, c]))
coords.append(_regr_back(regr_dict))
coords[-1]['confidence'] = 1 / (1 + np.exp(-logits[r, c]))
coords[-1]['x'], coords[-1]['y'], coords[-1]['z'] = optimize_xy(r, c, coords[-1]['x'], coords[-1]['y'], coords[-1]['z'])
coords = clear_duplicates(coords)
return coords
def coords2str(coords, names=['yaw', 'pitch', 'roll', 'x', 'y', 'z', 'confidence']):
s = []
for c in coords:
for n in names:
s.append(str(c.get(n, 0)))
return ' '.join(s)
# In[ ]:
torch.cuda.empty_cache()
gc.collect()
# # Make submission
# In[ ]:
predictions = []
test_loader = DataLoader(dataset=test_dataset, batch_size=16, shuffle=False, num_workers=4)
model.eval()
print('In predcition Stage')
for img, _, _ in tqdm(test_loader):
with torch.no_grad():
output = model(img.to(device))
output = output.data.cpu().numpy()
for out in output:
coords = extract_coords(out)
s = coords2str(coords)
predictions.append(s)
# In[ ]:
test = | pd.read_csv(PATH + 'sample_submission.csv') | pandas.read_csv |
import os
import streamlit as st
import pandas as pd
import altair as alt
import sqlite3
from sqlite3 import Connection
import requests
import json
import plotly.express as px
# spotify stuff
SPOTIFY_CLIENT_ID = os.environ.get('SPOTIFY_CLIENT_ID')
SPOTIFY_CLIENT_SECRET = os.environ.get('SPOTIFY_CLIENT_SECRET')
def get_spotify_token():
url='https://accounts.spotify.com/api/token'
grant_type = 'client_credentials'
body_params = {'grant_type' : grant_type}
r = requests.post(url, data=body_params, auth = (SPOTIFY_CLIENT_ID, SPOTIFY_CLIENT_SECRET))
r.raise_for_status()
token_raw = json.loads(r.text)
token = token_raw["access_token"]
return token
def spotify_search(song):
token = get_spotify_token()
url = f'https://api.spotify.com/v1/search?q={song}&type=track&limit=1'
headers = {
'Accept': 'application/json',
'Content-type': 'application/json',
'Authorization': f'Bearer {token}'
}
r = requests.get(url, headers=headers)
r.raise_for_status()
if r.status_code == 200:
data = r.json()
result = data['tracks']['items'][0]
thirty_sec_preview_url = result['preview_url']
return thirty_sec_preview_url
else:
raise Exception('Failed to get Spotify data.')
@st.cache(hash_funcs={Connection: id}) # add caching so we load the data only once
def get_connection(path_to_db):
# connect to db
try:
conn = sqlite3.connect(path_to_db, check_same_thread=False)
return conn
except Exception as e:
print(e)
def get_data(conn: Connection):
sql_query = """
SELECT
song, artist, album, date, energy, valence, danceability, instrumentalness, tempo
FROM
acoustic_features
WHERE
artist LIKE '%<NAME>%'
ORDER BY date DESC
"""
df = | pd.read_sql(sql_query, con=conn) | pandas.read_sql |
"""
Test the datasets module
"""
# Author: <NAME>
# License: simplified BSD
import os
import uuid
from pathlib import Path
import re
import gzip
from collections import OrderedDict
import numpy as np
import json
import nibabel
import pandas as pd
import pytest
from nibabel.tmpdirs import TemporaryDirectory
from sklearn.utils import check_random_state
from nilearn.datasets import func
from nilearn.datasets._testing import list_to_archive, dict_to_archive
from nilearn.datasets.utils import _get_dataset_dir
from nilearn._utils.testing import check_deprecation
def _load_localizer_index():
data_dir = Path(__file__).parent / "data"
with (data_dir / "localizer_index.json").open() as of:
localizer_template = json.load(of)
localizer_index = {}
for idx in range(1, 95):
sid = 'S{:02}'.format(idx)
localizer_index.update(dict(
(key.format(sid), uuid.uuid4().hex)
for key in localizer_template))
localizer_index['/localizer/phenotype/behavioural.tsv'] = uuid.uuid4().hex
localizer_index['/localizer/participants.tsv'] = uuid.uuid4().hex
tsv_files = {}
tsv_files['/localizer/phenotype/behavioural.tsv'] = pd.read_csv(
str(data_dir / 'localizer_behavioural.tsv'), sep='\t')
tsv_files['/localizer/participants.tsv'] = pd.read_csv(
str(data_dir / 'localizer_participants.tsv'), sep='\t')
return localizer_index, tsv_files
@pytest.fixture()
def localizer_mocker(request_mocker):
""" Mocks the index for localizer dataset.
"""
index, tsv_files = _load_localizer_index()
request_mocker.url_mapping["https://osf.io/hwbm2/download"] = json.dumps(
index)
for k, v in tsv_files.items():
request_mocker.url_mapping[
"*{}?".format(index[k][1:])] = v.to_csv(index=False, sep="\t")
def _make_haxby_subject_data(match, response):
sub_files = ['bold.nii.gz', 'labels.txt',
'mask4_vt.nii.gz', 'mask8b_face_vt.nii.gz',
'mask8b_house_vt.nii.gz', 'mask8_face_vt.nii.gz',
'mask8_house_vt.nii.gz', 'anat.nii.gz']
return list_to_archive(Path(match.group(1), f) for f in sub_files)
def test_fetch_haxby(tmp_path, request_mocker):
request_mocker.url_mapping[
re.compile(r".*(subj\d).*\.tar\.gz")] = _make_haxby_subject_data
for i in range(1, 6):
haxby = func.fetch_haxby(data_dir=tmp_path, subjects=[i],
verbose=0)
# subject_data + (md5 + mask if first subj)
assert request_mocker.url_count == i + 2
assert len(haxby.func) == 1
assert len(haxby.anat) == 1
assert len(haxby.session_target) == 1
assert haxby.mask is not None
assert len(haxby.mask_vt) == 1
assert len(haxby.mask_face) == 1
assert len(haxby.mask_house) == 1
assert len(haxby.mask_face_little) == 1
assert len(haxby.mask_house_little) == 1
assert haxby.description != ''
# subjects with list
subjects = [1, 2, 6]
haxby = func.fetch_haxby(data_dir=tmp_path, subjects=subjects,
verbose=0)
assert len(haxby.func) == len(subjects)
assert len(haxby.mask_house_little) == len(subjects)
assert len(haxby.anat) == len(subjects)
assert haxby.anat[2] is None
assert isinstance(haxby.mask, str)
assert len(haxby.mask_face) == len(subjects)
assert len(haxby.session_target) == len(subjects)
assert len(haxby.mask_vt) == len(subjects)
assert len(haxby.mask_face_little) == len(subjects)
subjects = ['a', 8]
message = "You provided invalid subject id {0} in a list"
for sub_id in subjects:
with pytest.raises(ValueError, match=message.format(sub_id)):
func.fetch_haxby(data_dir=tmp_path, subjects=[sub_id])
def _adhd_example_subject(match, request):
contents = [
Path("data", match.group(1), match.expand(r"\1_regressors.csv")),
Path("data", match.group(1),
match.expand(r"\1_rest_tshift_RPI_voreg_mni.nii.gz"))
]
return list_to_archive(contents)
def _adhd_metadata():
sub1 = [3902469, 7774305, 3699991]
sub2 = [2014113, 4275075, 1019436,
3154996, 3884955, 27034,
4134561, 27018, 6115230,
27037, 8409791, 27011]
sub3 = [3007585, 8697774, 9750701,
10064, 21019, 10042,
10128, 2497695, 4164316,
1552181, 4046678, 23012]
sub4 = [1679142, 1206380, 23008,
4016887, 1418396, 2950754,
3994098, 3520880, 1517058,
9744150, 1562298, 3205761, 3624598]
subs = | pd.DataFrame({"Subject": sub1 + sub2 + sub3 + sub4}) | pandas.DataFrame |
# Copyright WillianFuks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Tests for module main.py. Fixtures comes from file conftest.py located at the same dir
of this file.
"""
import mock
import numpy as np
import pandas as pd
import pytest
import tensorflow as tf
import tensorflow_probability as tfp
from numpy.testing import assert_array_equal
from pandas.util.testing import assert_frame_equal
from causalimpact import CausalImpact
from causalimpact.misc import standardize
@pytest.mark.slow
def test_default_causal_cto(rand_data, pre_int_period, post_int_period):
ci = CausalImpact(rand_data, pre_int_period, post_int_period)
assert_frame_equal(ci.data, rand_data)
assert ci.pre_period == pre_int_period
assert ci.post_period == post_int_period
pre_data = rand_data.loc[pre_int_period[0]: pre_int_period[1], :]
assert_frame_equal(ci.pre_data, pre_data)
post_data = rand_data.loc[post_int_period[0]: post_int_period[1], :]
assert_frame_equal(ci.post_data, post_data)
assert ci.alpha == 0.05
normed_pre_data, (mu, sig) = standardize(pre_data)
assert_frame_equal(ci.normed_pre_data, normed_pre_data)
normed_post_data = (post_data - mu) / sig
assert_frame_equal(ci.normed_post_data, normed_post_data)
assert ci.mu_sig == (mu[0], sig[0])
assert ci.model_args == {'fit_method': 'hmc', 'niter': 1000, 'prior_level_sd': 0.01,
'season_duration': 1, 'nseasons': 1, 'standardize': True}
assert isinstance(ci.model, tfp.sts.Sum)
design_matrix = ci.model.components[1].design_matrix.to_dense()
assert_array_equal(
design_matrix,
pd.concat([normed_pre_data, normed_post_data]).astype(np.float32).iloc[:, 1:]
)
assert ci.inferences is not None
assert ci.inferences.index.dtype == rand_data.index.dtype
assert ci.summary_data is not None
assert ci.p_value > 0 and ci.p_value < 1
assert ci.model_args['niter'] == 1000
assert ci.model_samples is not None
@pytest.mark.slow
def test_default_causal_cto_with_date_index(date_rand_data, pre_str_period,
post_str_period):
ci = CausalImpact(date_rand_data, pre_str_period, post_str_period)
assert_frame_equal(ci.data, date_rand_data)
assert ci.pre_period == pre_str_period
assert ci.post_period == post_str_period
pre_data = date_rand_data.loc[pre_str_period[0]: pre_str_period[1], :]
assert_frame_equal(ci.pre_data, pre_data)
post_data = date_rand_data.loc[post_str_period[0]: post_str_period[1], :]
assert_frame_equal(ci.post_data, post_data)
assert ci.alpha == 0.05
normed_pre_data, (mu, sig) = standardize(pre_data)
assert_frame_equal(ci.normed_pre_data, normed_pre_data)
normed_post_data = (post_data - mu) / sig
assert_frame_equal(ci.normed_post_data, normed_post_data)
assert ci.mu_sig == (mu[0], sig[0])
assert ci.model_args == {'fit_method': 'hmc', 'niter': 1000, 'prior_level_sd': 0.01,
'season_duration': 1, 'nseasons': 1, 'standardize': True}
assert isinstance(ci.model, tfp.sts.Sum)
design_matrix = ci.model.components[1].design_matrix.to_dense()
assert_array_equal(
design_matrix,
pd.concat([normed_pre_data, normed_post_data]).astype(np.float32).iloc[:, 1:]
)
assert ci.inferences is not None
assert ci.inferences.index.dtype == date_rand_data.index.dtype
assert ci.summary_data is not None
assert ci.p_value > 0 and ci.p_value < 1
assert ci.model_args['niter'] == 1000
assert ci.model_samples is not None
@pytest.mark.slow
def test_default_causal_cto_no_covariates(rand_data, pre_int_period, post_int_period):
rand_data = pd.DataFrame(rand_data.iloc[:, 0])
ci = CausalImpact(rand_data, pre_int_period, post_int_period)
assert_frame_equal(ci.data, rand_data)
assert ci.pre_period == pre_int_period
assert ci.post_period == post_int_period
pre_data = rand_data.loc[pre_int_period[0]: pre_int_period[1], :]
assert_frame_equal(ci.pre_data, pre_data)
post_data = rand_data.loc[post_int_period[0]: post_int_period[1], :]
assert_frame_equal(ci.post_data, post_data)
assert ci.alpha == 0.05
normed_pre_data, (mu, sig) = standardize(pre_data)
assert_frame_equal(ci.normed_pre_data, normed_pre_data)
normed_post_data = (post_data - mu) / sig
assert_frame_equal(ci.normed_post_data, normed_post_data)
assert ci.mu_sig == (mu[0], sig[0])
assert ci.model_args == {'fit_method': 'hmc', 'niter': 1000, 'prior_level_sd': 0.01,
'season_duration': 1, 'nseasons': 1, 'standardize': True}
assert isinstance(ci.model, tfp.sts.LocalLevel)
assert ci.inferences is not None
assert ci.inferences.index.dtype == rand_data.index.dtype
assert ci.summary_data is not None
assert ci.p_value > 0 and ci.p_value < 1
assert ci.model_args['niter'] == 1000
assert ci.model_samples is not None
@pytest.mark.slow
def test_default_causal_cto_with_np_array(rand_data, pre_int_period, post_int_period):
data = rand_data.values
ci = CausalImpact(data, pre_int_period, post_int_period)
assert_array_equal(ci.data, data)
assert ci.pre_period == pre_int_period
assert ci.post_period == post_int_period
data = pd.DataFrame(data)
pre_data = data.loc[pre_int_period[0]: pre_int_period[1], :]
assert_frame_equal(ci.pre_data, pre_data)
post_data = data.loc[post_int_period[0]: post_int_period[1], :]
assert_frame_equal(ci.post_data, post_data)
assert ci.alpha == 0.05
normed_pre_data, (mu, sig) = standardize(pre_data)
assert_frame_equal(ci.normed_pre_data, normed_pre_data)
normed_post_data = (post_data - mu) / sig
| assert_frame_equal(ci.normed_post_data, normed_post_data) | pandas.util.testing.assert_frame_equal |
# CONTINUED IN https://github.com/rcsmit/COVIDcases/blob/main/covid_dashboard_rcsmit.py
# 27/28 feb 2021
# Calculate the relation between gliding R and mobility (Apple and Google)
# Calculate the corelation with hospital admissions and factors mentioned above
# Plotting a heatmap with correlations
# Plotting a scattermap
# Plotting a graph in time, with an adjusted x-
# 1 maart 2021
# Merging files on date in different date formats
# Remove outliers (doesnt work)
# Calculating moving avarages
# Make different statistics for weekdays and weekend
# Scraping statistics from RIVM
# 2 maart
# R van ziekenhuisopnames
# weekgrafiek
# corrigeren vd merge functie
# 3 maart
# added restrictions (file van @HK_nien, MIT-licence)
# downloaden en mergen hospital admission
# downloaden en mergen r-getal RIVM
# alles omgeFzet in functies
# 4 maart
# meer onderverdeling in functies. Alles aan te roepen vanuit main() met parameters
# 5 maart
# custom colors
# weekend different color in barplot
# annoying problem met een join (van outer naar inner naar outer en toen werkte het weer)
# R value (14 days back due to smoothing)
#6 maart
# last row in bar-graph was omitted due to ["date of statistics"] instead of ["date"] in addwalkingR
# Bug wit an reset.index() somewhere. Took a long time to find out
# Tried to first calculate SMA and R, and then cut of FROM/UNTIL. Doesnt
# work. Took also a huge amount of time. Reversed everything afterwards
# 7 maart
# weekgraph function with parameters
# 8 maart
# find columns with max correlation
# find the timelag between twee columns
# added a second way to calculate and display R
# 9-11 maart: Grafieken van Dissel : bezetting bedden vs R
# 12 maart
# Genormeerde grafiek (max = 1 of begin = 100)
# Various Tg vd de R-number-curves
# I used iloc. Iterating through pandas objects is generally slow.
# In many cases, iterating manually over the rows is not needed and
# can be avoided with one of the following approaches:
# http://pandas-docs.github.io/pandas-docs-travis/getting_started/basics.html#iteration
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
import matplotlib.dates as mdates
import seaborn as sn
from scipy import stats
import datetime as dt
from datetime import datetime, timedelta
from matplotlib.backends.backend_agg import RendererAgg
from matplotlib.font_manager import FontProperties
from matplotlib.ticker import (MultipleLocator, FormatStrFormatter,
AutoMinorLocator)
import matplotlib.ticker as ticker
import math
_lock = RendererAgg.lock
from scipy.signal import savgol_filter
import urllib
import urllib.request
from pathlib import Path
from inspect import currentframe, getframeinfo
# R-numbers from 'https://data.rivm.nl/covid-19/COVID-19_reproductiegetal.json'
# Google mobilty from https://www.google.com/covid19/mobility/?hl=nl
# Apple mobility from https://covid19.apple.com/mobility
# # Merged in one file in Excel and saved to CSV
# Hospitals from RIVM 'https://data.rivm.nl/covid-19/COVID-19_ziekenhuisopnames.csv
def download_mob_r():
""" _ _ _ """
df_mob_r = pd.read_csv(
r'covid19_seir_models\input\mobility.csv',
delimiter=';',
low_memory=False
)
# datum is 16-2-2020
df_mob_r['date']=pd.to_datetime(df_mob_r['date'], format="%d-%m-%Y")
df_mob_r.set_index('date')
return df_mob_r
def download_hospital_admissions():
""" _ _ _ """
# THIS ARE THE SAME NUMBERS AS ON THE DASHBOARD
if download :
# Code by <NAME> - MIT License
url='https://data.rivm.nl/covid-19/COVID-19_ziekenhuisopnames.csv'
#url="https://lcps.nu/wp-content/uploads/covid-19.csv"
fpath = Path('covid19_seir_models\input\COVID-19_ziekenhuisopnames.csv')
print(f'Getting new daily case statistics file ziekenhuisopnames. ..')
with urllib.request.urlopen(url) as response:
data_bytes = response.read()
fpath.write_bytes(data_bytes)
print(f'Wrote {fpath} .')
df_hospital = pd.read_csv(
r'covid19_seir_models\input\COVID-19_ziekenhuisopnames.csv',
delimiter=';',
#delimiter=',',
low_memory=False
)
# datum is 2020-02-27
df_hospital['Date_of_statistics'] = df_hospital['Date_of_statistics'].astype('datetime64[D]')
df_hospital = df_hospital.groupby(['Date_of_statistics'] , sort=True).sum().reset_index()
#print ("Last hospital admissions :
# " (df_hospital.iloc[len (df_hospital)]['Date_of_statistics'])
# compression_opts = dict(method=None,
# archive_name='out.csv')
# df_hospital.to_csv('outhospital.csv', index=False,
# compression=compression_opts)
#print (df_hospital)
#save_df(df_hospital,"ziekenhuisopnames_RIVM")
return df_hospital
def download_lcps():
"""Download data from LCPS"""
if download :
# Code by <NAME> - MIT License
url='https://lcps.nu/wp-content/uploads/covid-19.csv'
#url="https://lcps.nu/wp-content/uploads/covid-19.csv"
fpath = Path('covid19_seir_models\input\LCPS.csv')
print(f'Getting new daily case statistics file LCPS...')
with urllib.request.urlopen(url) as response:
data_bytes = response.read()
fpath.write_bytes(data_bytes)
print(f'Wrote {fpath} .')
df_lcps = pd.read_csv(
r'covid19_seir_models\input\LCPS.csv',
delimiter=',',
#delimiter=',',
low_memory=False
)
# print (df_lcps)
# print (df_lcps.dtypes)
# Datum,IC_Bedden_COVID,IC_Bedden_Non_COVID,Kliniek_Bedden,IC_Nieuwe_Opnames_COVID,
# Kliniek_Nieuwe_Opnames_COVID
# datum is 2020-02-27
df_lcps['Datum']=pd.to_datetime(df_lcps['Datum'], format="%d-%m-%Y")
#df_lcps = df_lcps.groupby(['Datum'] , sort=True).sum().reset_index()
# compression_opts = dict(method=None,
# archive_name='out.csv')
# df_hospital.to_csv('outhospital.csv', index=False,
# compression=compression_opts)
return df_lcps
def download_reproductiegetal():
""" _ _ _ """
#https://data.rivm.nl/covid-19/COVID-19_reproductiegetal.json
if download == True:
print ("Download reproductiegetal-file")
#df_reprogetal = pd.read_json (r'covid19_seir_models\input\COVID-19_reproductiegetal.json')
df_reprogetal = pd.read_json (r'https://data.rivm.nl/covid-19/COVID-19_reproductiegetal.json')
# url = 'https://data.rivm.nl/covid-19/COVID-19_reproductiegetal.json'
compression_opts = dict(method=None,
archive_name='reprogetal.csv')
df_reprogetal.to_csv('covid19_seir_models\\input\\reprogetal.csv', index=False,
compression=compression_opts)
df_reprogetal.set_index("Date")
else:
df_reprogetal = pd.read_csv(
r'covid19_seir_models\input\reprogetal.csv',
delimiter=',',
#delimiter=',',
low_memory=False)
df_reprogetal['Date']=pd.to_datetime(df_reprogetal['Date'], format="%Y-%m-%d")
# als er nog geen reprogetal berekend is, neem dan het gemiddeld van low en up
# vanaf half juni is dit altijd het geval geweest (0,990 en 1,000)
#df_reprogetal.loc[df_reprogetal["Rt_avg"].isnull(),'Rt_avg'] = round(((df_reprogetal["Rt_low"] + df_reprogetal["Rt_up"])/2),2)
#print (df_reprogetal)
return df_reprogetal
def download_gemeente_per_dag():
""" _ _ _ """
if download :
# Code by <NAME> - MIT License
url='https://data.rivm.nl/covid-19/COVID-19_aantallen_gemeente_per_dag.csv'
# Het werkelijke aantal COVID-19 patiënten opgenomen in het ziekenhuis is hoger dan
# het aantal opgenomen patiënten gemeld in de surveillance, omdat de GGD niet altijd op
# de hoogte is van ziekenhuisopname als deze na melding plaatsvindt.
# Daarom benoemt het RIVM sinds 6 oktober actief de geregistreerde ziekenhuisopnames
# van Stichting NICE
#url="https://lcps.nu/wp-content/uploads/covid-19.csv"
fpath = Path('covid19_seir_models\input\COVID-19_aantallen_gemeente_per_dag.csv')
print(f'Getting new daily case statistics file - aantallen-gemeente-per-dag - ...')
with urllib.request.urlopen(url) as response:
data_bytes = response.read()
fpath.write_bytes(data_bytes)
print(f'Wrote {fpath} .')
df_gemeente_per_dag = pd.read_csv(
r'covid19_seir_models\input\COVID-19_aantallen_gemeente_per_dag.csv',
delimiter=';',
#delimiter=',',
low_memory=False)
df_gemeente_per_dag['Date_of_publication'] = df_gemeente_per_dag['Date_of_publication'].astype('datetime64[D]')
df_gemeente_per_dag = df_gemeente_per_dag.groupby(['Date_of_publication'] , sort=True).sum().reset_index()
#save_df(df_gemeente_per_dag,"COVID-19_aantallen_per_dag")
return df_gemeente_per_dag
def download_uitgevoerde_testen():
""" _ _ _ """
# Version;Date_of_report;Date_of_statistics;Security_region_code;
# Security_region_name;Tested_with_result;Tested_positive
if download :
# Code by <NAME> - MIT License
url='https://data.rivm.nl/covid-19/COVID-19_uitgevoerde_testen.csv'
fpath = Path('covid19_seir_models\input\COVID-19_uitgevoerde_testen.csv')
print(f'Getting new daily case statistics file - testen - ...')
with urllib.request.urlopen(url) as response:
data_bytes = response.read()
fpath.write_bytes(data_bytes)
print(f'Wrote {fpath} .')
df_uitgevoerde_testen = pd.read_csv(
r'covid19_seir_models\input\COVID-19_uitgevoerde_testen.csv',
delimiter=';',
#delimiter=',',
low_memory=False)
#df_uitgevoerde_testen['Date_of_publication'] = df_uitgevoerde_testen['Date_of_publication'].astype('datetime64[D]')
df_uitgevoerde_testen['Date_of_statistics'] = df_uitgevoerde_testen['Date_of_statistics'].astype('datetime64[D]')
df_uitgevoerde_testen = df_uitgevoerde_testen.groupby(['Date_of_statistics'] , sort=True).sum().reset_index()
df_uitgevoerde_testen['Percentage_positive'] = round((df_uitgevoerde_testen['Tested_positive'] /
df_uitgevoerde_testen['Tested_with_result'] * 100),2 )
#save_df(df_uitgevoerde_testen,"COVID-19_uitgevoerde_testen")
return df_uitgevoerde_testen
###################################################################
def get_data():
""" _ _ _ """
df_hospital = download_hospital_admissions()
#sliding_r_df = walkingR(df_hospital, "Hospital_admission")
df_lcps = download_lcps()
df_mob_r = download_mob_r()
df_gemeente_per_dag = download_gemeente_per_dag()
df_reprogetal = download_reproductiegetal()
df_uitgevoerde_testen = download_uitgevoerde_testen()
type_of_join = "outer"
df = pd.merge(df_mob_r, df_hospital, how=type_of_join, left_on = 'date',
right_on="Date_of_statistics")
#df = df_hospital
df.loc[df['date'].isnull(),'date'] = df['Date_of_statistics']
df = pd.merge(df, df_lcps, how=type_of_join, left_on = 'date', right_on="Datum")
df.loc[df['date'].isnull(),'date'] = df['Datum']
#df = pd.merge(df, sliding_r_df, how=type_of_join, left_on = 'date', right_on="date_sR", left_index=True )
df = pd.merge(df, df_gemeente_per_dag, how=type_of_join, left_on = 'date', right_on="Date_of_publication",
left_index=True )
df = pd.merge(df, df_reprogetal, how=type_of_join, left_on = 'date', right_on="Date",
left_index=True )
df = pd.merge(df, df_uitgevoerde_testen, how=type_of_join, left_on = 'date', right_on="Date_of_statistics",
left_index=True )
df = df.sort_values(by=['date'])
df = splitupweekweekend(df)
df, werkdagen, weekend_ = last_manipulations(df, None, None)
df.set_index('date')
return df, werkdagen, weekend_
###################################################
def calculate_cases(df):
column = df["date"]
b_= column.max().date()
#fr = '2021-1-10' #doesnt work
fr = FROM
a_ = dt.datetime.strptime(fr,'%Y-%m-%d').date()
#b_ = dt.datetime.strptime(UNTIL,'%Y-%m-%d').date()
datediff = ( abs((a_ - b_).days))+1+30
f = 1
ry1 = 0.8 * f
ry2 = 1.15 * f
total_cases_0 = 7500
sec_variant = 10
population = 17_500_000
immune_day_zero = 2_500_000
suspectible_0 = population - immune_day_zero
cumm_cases = 0
cases_1 = ((100-sec_variant)/100)* total_cases_0
cases_2 = (sec_variant/100)* total_cases_0
temp_1 = cases_1
temp_2 = cases_2
r_temp_1 = ry1
r_temp_2 = ry2
immeratio = 1
df_calculated = pd.DataFrame({'date_calc': a_,
'variant_1': cases_1,'variant_2' :cases_2, 'variant_12' : int(cases_1+cases_2)}, index=[0])
Tg = 4
#print (df_calculated.dtypes)
#a_ = dt.datetime.strptime(a,'%m/%d/%Y').date()
column = df["date"]
max_value = column. max()
for day in range (1, datediff):
thalf1 = Tg * math.log(0.5) / math.log(immeratio*ry1)
thalf2 = Tg * math.log(0.5) / math.log(immeratio*ry2)
day = a_ + timedelta(days=day)
pt1 = (temp_1 * (0.5**(1/thalf1)))
pt2 = (temp_2* (0.5**(1/thalf2)))
day_ = day.strftime("%Y-%m-%d") # FROM object TO string
day__ = dt.datetime.strptime(day_,'%Y-%m-%d') # from string to daytime
df_calculated =df_calculated .append({'date_calc':day_,
'variant_1' : int(pt1),
'variant_2' : int(pt2) , 'variant_12' : int(pt1+pt2) },ignore_index=True)
temp_1 = pt1
temp_2 = pt2
cumm_cases += pt1 + pt2
immeratio = (1-(cumm_cases/suspectible_0 ))
df_calculated['date_calc'] = pd.to_datetime( df_calculated['date_calc'])
df = pd.merge(df, df_calculated, how='outer', left_on = 'date', right_on="date_calc",
left_index=True )
print (df.dtypes)
df.loc[df['date'].isnull(),'date'] = df['date_calc']
return df, ry1, ry2
def splitupweekweekend(df):
""" _ _ _ """
# SPLIT UP IN WEEKDAY AND WEEKEND
# https://stackoverflow.com/posts/56336718/revisions
df['WEEKDAY'] = pd.to_datetime(df['date']).dt.dayofweek # monday = 0, sunday = 6
df['weekend'] = 0 # Initialize the column with default value of 0
df.loc[df['WEEKDAY'].isin([5, 6]), 'weekend'] = 1 # 5 and 6 correspond to Sat and Sun
return df
# remove outliers - doesnt work
# df = df[(np.abs(stats.zscore(df['retail_and_recreation'])) < 3)]
# df = df[(np.abs(stats.zscore(df['transit_stations'])) < 3)]
# df = df[(np.abs(stats.zscore(df['workplaces'])) < 3)]
# df = df[(np.abs(stats.zscore(df['grocery_and_pharmacy'])) < 3)]
def add_walking_r(df, smoothed_columns, how_to_smooth, tg):
""" _ _ _ """
#print(df)
# Calculate walking R from a certain base. Included a second methode to calculate R
# de rekenstappen: (1) n=lopend gemiddelde over 7 dagen; (2) Rt=exp(Tc*d(ln(n))/dt)
# met Tc=4 dagen, (3) data opschuiven met rapportagevertraging (10 d) + vertraging
# lopend gemiddelde (3 d).
# https://twitter.com/hk_nien/status/1320671955796844546
column_list_r_smoothened = []
column_list_r_sec_smoothened = []
d= 1
d2=2
r_sec = []
for base in smoothed_columns:
column_name_R = 'R_value_from_'+ base +'_tg'+str(tg)
column_name_R_sec = 'R_value_(hk)_from_'+ base
#df, new_column = smooth_columnlist(df,[base],how_to_smooth)
column_name_r_smoothened = 'R_value_from_'+ base +'_tg'+str(tg) + '_'+ how_to_smooth + '_' + str(WDW3)
column_name_r_sec_smoothened = 'R_value_sec_from_'+ base +'_tg'+str(tg) + '_'+ how_to_smooth + '_' + str(WDW3)
#df[SMA1] = df.iloc[:,df.columns.get_loc(base)].rolling(window=WDW2).mean()
sliding_r_df= pd.DataFrame({'date_sR': [],
column_name_R: [],column_name_R_sec: []})
for i in range(len(df)):
if df.iloc[i][base] != None:
date_ = | pd.to_datetime(df.iloc[i]['date'], format="%Y-%m-%d") | pandas.to_datetime |
# %%
import pandas as pd
import numpy as np
import os,sys
import re
from sklearn.preprocessing import OneHotEncoder
from sklearn.decomposition import PCA
#%% initial path
cur_path = sys.path[0].split(os.path.sep)
workspace_path = os.path.sep.join(cur_path[:cur_path.index("bestpaycup2020")+1])
os.chdir(workspace_path) # 把运行目录强制转移到【工作区】
print(f"把运行目录强制转移到【工作区】{os.getcwd()}")
#%%
TRAIN_BASE_PATH = "./dataset/raw_dataset/trainset/train_base.csv"
TRAIN_OP_PATH = "./dataset/raw_dataset/trainset/train_op.csv"
TRAIN_TRANS_PATH = "./dataset/raw_dataset/trainset/train_trans.csv"
TEST_A_BASE_PATH = "./dataset/raw_dataset/testset/test_a_base.csv"
TEST_A_OP_PATH = "./dataset/raw_dataset/testset/test_a_op.csv"
TEST_A_TRANS_PATH = "./dataset/raw_dataset/testset/test_a_trans.csv"
TEST_B_BASE_PATH = "./dataset/raw_dataset/testset/test_b_base.csv"
TEST_B_OP_PATH = "./dataset/raw_dataset/testset/test_b_op.csv"
TEST_B_TRANS_PATH = "./dataset/raw_dataset/testset/test_b_trans.csv"
SAMPLE_BASE_PATH = "./dataset/raw_dataset/sample_trainset/sample_base.csv"
SAMPLE_OP_PATH = "./dataset/raw_dataset/sample_trainset/sample_op.csv"
SAMPLE_TRANS_PATH = "./dataset/raw_dataset/sample_trainset/sample_trans.csv"
PROCESSED_TRAIN_BASE_PATH = "./dataset/dataset1/trainset/train_base.csv"
PROCESSED_TEST_A_BASE_PATH = "./dataset/dataset1/testset/test_a_base.csv"
PROCESSED_TEST_B_BASE_PATH = "./dataset/dataset1/testset/test_b_base.csv"
# %%
def process_base(base_path,verbose=False):
# TODO: provider, province和city都各有一个缺失值,需要众数填补
def to_int(entry):
if type(entry) is str:
level = re.search("^(category |level |Train_|TestA_|TestB_)([0-9]+)",entry)
if level:
return int(level.group(2))
return entry
# 读取数据,制作备份,数据转型
base = pd.read_csv(base_path)
base2 = base.copy()
for e in base2.columns:
base2[e] = base[e].apply(to_int)
# 处理缺失值
# base2["sex"][base2["sex"].isna()] = 3
base2["sex"].fillna(base2["sex"].mode()[0], inplace=True)
base2["balance_avg"].fillna(base2["balance_avg"].mode()[0],inplace=True)
base2["balance1_avg"].fillna(base2["balance1_avg"].mode()[0],inplace=True)
# base2["balance1_avg"][base2["balance1_avg"].isna()]=base2["balance1_avg"].mode()[0]
# 合并service3项
base2["service3"][base2["service3"]==0] = -1
base2["service3"][base2["service3"] != -1] = base2["service3_level"][base2["service3_level"].notna()]
base2.drop("service3_level",axis=1,inplace=True) # 删除service3_level列
print(f"{base_path} has shape {base2.shape} after processing")
if verbose:
print(base2.info())
print(base2.discribe())
return base2
#%%
for base_path,processed_base_path in [(TRAIN_BASE_PATH,PROCESSED_TRAIN_BASE_PATH),
(TEST_A_BASE_PATH,PROCESSED_TEST_A_BASE_PATH),
(TEST_B_BASE_PATH,PROCESSED_TEST_B_BASE_PATH)]:
base2 = process_base(base_path)
if not os.path.exists(os.path.split(processed_base_path)[0]):
os.makedirs(os.path.split(processed_base_path)[0])
with open(processed_base_path,"w") as f:
base2.to_csv(f,index=False,line_terminator='\n')
# %%
# base_path = TEST_B_BASE_PATH
# processed_base_path = PROCESSED_TEST_B_BASE_PATH
# base_df = process_base(base_path)
# if not os.path.exists(os.path.split(processed_base_path)[0]):
# os.makedirs(os.path.split(processed_base_path)[0])
# with open(processed_base_path, "w") as f:
# base_df.to_csv(f, index=False, line_terminator='\n')
# %%
def process_base_onehot(base_dir, dim):
train_df = pd.read_csv(base_dir + '/dataset/dataset1/trainset/train_base.csv')
test_a_df = pd.read_csv(base_dir + '/dataset/dataset1/testset/test_a_base.csv')
test_b_df = pd.read_csv(base_dir + '/dataset/dataset1/testset/test_b_base.csv')
province = | pd.concat([train_df['province'], test_a_df['province'], test_b_df['province']]) | pandas.concat |
# -*- coding: utf-8 -*-
"""
Created on Mon Jan 18 21:33:10 2021
@author: Devineni
"""
import pandas as pd
import numpy as np
from statistics import mean
import time
import datetime as dt
import matplotlib.pyplot as plt
from tabulate import tabulate
# import mysql.connector
import os
import pymysql
from sqlalchemy import create_engine
from openpyxl import load_workbook
import statistics
from easygui import *
import sys
def prRed(skk): print("\033[31;1;m {}\033[00m" .format(skk))
from uncertainties import ufloat
engine = create_engine("mysql+pymysql://root:Password123@localhost/",pool_pre_ping=True)
#%%%
import datetime
import matplotlib.dates as mdates
import matplotlib.units as munits
from pylab import rcParams
rcParams['figure.figsize'] = 7,4.5
# rcParams['figure.figsize'] = 19,15 # word
plt.rcParams["font.family"] = "calibri"
plt.rcParams["font.weight"] = "normal"
plt.rcParams["font.size"] = 10
#%% Automated Outdoor results
'''
This section deals with taking input selection of the experiment
easygui module was used to create the dialogue boxes for easy input
this is just a more visual way for experiment selection
'''
i=0
result1 = pd.DataFrame(["Outdoor Summary"])
databases = ["ESHL_summer", "ESHL_winter", "CBo_summer", "CBo_winter"]
for database in databases:
times = pd.read_excel('C:/Users/Raghavakrishna/OneDrive - bwedu/MA_Raghavakrishna/0_Evaluation/excel_files/Times_thesis.xlsx', sheet_name= database)
choices = list(times['short name'])
for experiment in choices:
z = int(times[times['short name'] == experiment].index.values)
Vdot_sheets = {"ESHL_summer":"ESHL_Vdot", "ESHL_winter":"ESHL_Vdot", "CBo_summer":"CBo_Vdot", "CBo_winter":"CBo_Vdot"}
t0 = times.loc[z,"Start"]
tn = times.loc[z,"End"]
#%%%
result = pd.DataFrame(["Outdoor Results"]) ;result.loc[0,1] = experiment
r = 1
adf = pd.read_sql_query("SELECT * FROM weather.außen WHERE datetime BETWEEN '{}' AND '{}'".format(t0,tn), con = engine).drop("index", axis = 1).set_index("datetime")
result.loc[r,0] = "parameter" ; result.loc[r,1] = "min" ; result.loc[r,2] = "max"; result.loc[r,3] = "mean" ; result.loc[r,4] = "std"
r += 1
result.loc[r,0] = "temp_°C" ; result.loc[r,1] = adf["temp_°C"].min() ; result.loc[r,2] = adf["temp_°C"].max(); result.loc[r,3] = adf["temp_°C"].mean() ; result.loc[r,4] = adf["temp_°C"].std()
r += 1
result.loc[r,0] = "RH_%rH" ; result.loc[r,1] = adf["RH_%rH"].min() ; result.loc[r,2] = adf["RH_%rH"].max(); result.loc[r,3] = adf["RH_%rH"].mean() ; result.loc[r,4] = adf["RH_%rH"].std()
r += 1
result.loc[r,0] = "CO2_ppm" ; result.loc[r,1] = adf["CO2_ppm"].min() ; result.loc[r,2] = adf["CO2_ppm"].max(); result.loc[r,3] = adf["CO2_ppm"].mean() ; result.loc[r,4] = adf["CO2_ppm"].std()
wdf = pd.read_sql_query("SELECT * FROM weather.weather_all WHERE datetime BETWEEN '{}' AND '{}'".format(t0,tn), con = engine).set_index("datetime")
r += 1
result.loc[r,0] = "Wind Speed, m/s" ; result.loc[r,1] = wdf["Wind Speed, m/s"].min() ; result.loc[r,2] = wdf["Wind Speed, m/s"].max(); result.loc[r,3] = wdf["Wind Speed, m/s"].mean() ; result.loc[r,4] = wdf["Wind Speed, m/s"].std()
r += 1
result.loc[r,0] = "Gust Speed, m/s" ; result.loc[r,1] = wdf["Gust Speed, m/s"].min() ; result.loc[r,2] = wdf["Gust Speed, m/s"].max(); result.loc[r,3] = wdf["Gust Speed, m/s"].mean() ; result.loc[r,4] = wdf["Gust Speed, m/s"].std()
#%%%writer
# path = "C:/Users/Devineni/OneDrive - bwedu/MA_Raghavakrishna/0_Evaluation/results/outdoor_results.xlsx"
# book = load_workbook(path)
# writer = pd.ExcelWriter(path, engine = 'openpyxl')
# writer.book = book
# result.to_excel(writer, index = False , sheet_name = experiment[:10])
# writer.save()
# writer.close()
#%%%
result1.loc[i+1,0] = experiment; result1.loc[i+1,1] = adf["temp_°C"].mean(); result1.loc[i+1,2] = adf["temp_°C"].std()
result1.loc[i+1,3] = adf["RH_%rH"].mean();result1.loc[i+1,4] = adf["RH_%rH"].std()
result1.loc[i+1,5] = adf["CO2_ppm"].mean();result1.loc[i+1,6] = adf["CO2_ppm"].std()
result1.loc[i+1,7] = wdf["Wind Speed, m/s"].mean();result1.loc[i+1,8] = wdf["Wind Speed, m/s"].std()
result1.loc[i+1,9] = wdf["Gust Speed, m/s"].mean();result1.loc[i+1,10] = wdf["Gust Speed, m/s"].std();
result1.loc[i+1,11] = wdf["Wind Direction"].mean();result1.loc[i+1,12] = wdf["Wind Direction"].std()
result1.loc[i+1,13] = wdf["Temperature °C"].mean();result1.loc[i+1,14] = wdf["Temperature °C"].std();
result1.loc[i+1,15] = wdf["RH %"].mean();result1.loc[i+1,16] = wdf["RH %"].std()
i = i+1
result1.columns = ["experiment", "temp_°C","std","RH_%rH","std","CO2_ppm" ,"std","Wind Speed, m/s","std","Gust Speed, m/s","std","Wind Direction","std", "Temperature °C","std", "RH %","std"]
#%%%writer
# path = "C:/Users/Devineni/OneDrive - bwedu/MA_Raghavakrishna/0_Evaluation/results/outdoor_results.xlsx"
# book = load_workbook(path)
# writer = pd.ExcelWriter(path, engine = 'openpyxl')
# writer.book = book
# result1.to_excel(writer, index = False , sheet_name = "summary")
# writer.save()
# writer.close()
#%%% Manual selection
# '''
# This section deals with taking input selection of the experiment
# easygui module was used to create the dialogue boxes for easy input
# this is just a more visual way for experiment selection
# '''
# msg ="Please select a Location/Season you like to analyze"
# title = "Season selection"
# choices = ["ESHL_summer", "ESHL_winter", "CBo_summer", "CBo_winter"]
# database = choicebox(msg, title, choices)
# times = pd.read_excel('C:/Users/Devineni/OneDrive - bwedu/MA_Raghavakrishna/0_Evaluation/excel_files/Times_thesis.xlsx', sheet_name= database)
# msg ="Please select an experiment you would like to analyse in {database}".format(database = str(database))
# title = "Experiment selection"
# choices = list(times['short name'])
# experiment = choicebox(msg, title, choices)
# z = int(times[times['short name'] == experiment].index.values)
# Vdot_sheets = {"ESHL_summer":"ESHL_Vdot", "ESHL_winter":"ESHL_Vdot", "CBo_summer":"CBo_Vdot", "CBo_winter":"CBo_Vdot"}
# t0 = times.loc[z,"Start"]
# tn = times.loc[z,"End"]
# #%%
# result = pd.DataFrame(["Outdoor Results"]) ;result.loc[0,1] = experiment
# r = 1
# adf = pd.read_sql_query("SELECT * FROM weather.außen WHERE datetime BETWEEN '{}' AND '{}'".format(t0,tn), con = engine).drop("index", axis = 1).set_index("datetime")
# result.loc[r,0] = "parameter" ; result.loc[r,1] = "min" ; result.loc[r,2] = "max"; result.loc[r,3] = "mean" ; result.loc[r,4] = "std"
# r += 1
# result.loc[r,0] = "temp_°C" ; result.loc[r,1] = adf["temp_°C"].min() ; result.loc[r,2] = adf["temp_°C"].max(); result.loc[r,3] = adf["temp_°C"].mean() ; result.loc[r,4] = adf["temp_°C"].std()
# r += 1
# result.loc[r,0] = "RH_%rH" ; result.loc[r,1] = adf["RH_%rH"].min() ; result.loc[r,2] = adf["RH_%rH"].max(); result.loc[r,3] = adf["RH_%rH"].mean() ; result.loc[r,4] = adf["RH_%rH"].std()
# r += 1
# result.loc[r,0] = "CO2_ppm" ; result.loc[r,1] = adf["CO2_ppm"].min() ; result.loc[r,2] = adf["CO2_ppm"].max(); result.loc[r,3] = adf["CO2_ppm"].mean() ; result.loc[r,4] = adf["CO2_ppm"].std()
# wdf = pd.read_sql_query("SELECT * FROM weather.weather_all WHERE datetime BETWEEN '{}' AND '{}'".format(t0,tn), con = engine).set_index("datetime")
# r += 1
# result.loc[r,0] = "Wind Speed, m/s" ; result.loc[r,1] = wdf["Wind Speed, m/s"].min() ; result.loc[r,2] = wdf["Wind Speed, m/s"].max(); result.loc[r,3] = wdf["Wind Speed, m/s"].mean() ; result.loc[r,4] = wdf["Wind Speed, m/s"].std()
# r += 1
# result.loc[r,0] = "Gust Speed, m/s" ; result.loc[r,1] = wdf["Gust Speed, m/s"].min() ; result.loc[r,2] = wdf["Gust Speed, m/s"].max(); result.loc[r,3] = wdf["Gust Speed, m/s"].mean() ; result.loc[r,4] = wdf["Gust Speed, m/s"].std()
# #%%
# path = "C:/Users/Devineni/OneDrive - bwedu/MA_Raghavakrishna/0_Evaluation/results/outdoor_results.xlsx"
# book = load_workbook(path)
# writer = pd.ExcelWriter(path, engine = 'openpyxl')
# writer.book = book
# result.to_excel(writer, index = False , sheet_name = experiment[:10])
# writer.save()
# writer.close()
#%% Wall temperature Indoor
i=0
wall_dict = {"ESHL_summer":"eshl_summer_wall", "ESHL_winter":"eshl_winter_wall", "CBo_summer":"cbo_summer_wall", "CBo_winter":"cbo_winter_wall"}
result_wall = pd.DataFrame(["Wall Summary"])
databases = ["ESHL_summer", "ESHL_winter", "CBo_summer", "CBo_winter"]
for database in databases:
times = pd.read_excel('C:/Users/Raghavakrishna/OneDrive - bwedu/MA_Raghavakrishna/0_Evaluation/excel_files/Times_thesis.xlsx', sheet_name= database)
choices = list(times['short name'])
for experiment in choices:
z = int(times[times['short name'] == experiment].index.values)
Vdot_sheets = {"ESHL_summer":"ESHL_Vdot", "ESHL_winter":"ESHL_Vdot", "CBo_summer":"CBo_Vdot", "CBo_winter":"CBo_Vdot"}
t0 = times.loc[z,"Start"]
tn = times.loc[z,"End"]
#%%
schema = "weather"
''' this engine is used where ever connection is required to database'''
engine = create_engine("mysql+pymysql://root:Password123@localhost/{}".format(schema),pool_pre_ping=True)
wadf = pd.read_sql_query("SELECT * FROM weather.{} WHERE datetime BETWEEN '{}' AND '{}'".format(wall_dict[database],t0,tn), con = engine).set_index("datetime")
result_wall.loc[i+1,0] = experiment; result_wall.loc[i+1,1] = wadf.mean().mean();result_wall.loc[i+1,2] = wadf.mean().std()
i = i+1
result_wall.columns = ["experiment", "temp_°C", "std"]
#%%writer
# path = "C:/Users/Devineni/OneDrive - bwedu/MA_Raghavakrishna/0_Evaluation/results/indoor_results.xlsx"
# book = load_workbook(path)
# writer = pd.ExcelWriter(path, engine = 'openpyxl')
# writer.book = book
# result_wall.to_excel(writer, index = False , sheet_name = "wall_temp")
# writer.save()
# writer.close()
#%% Humidity Indoor
result_humidity = | pd.DataFrame(["Humidity Summary"]) | pandas.DataFrame |
# License: Apache-2.0
import databricks.koalas as ks
import numpy as np
import pandas as pd
import pytest
from pandas.testing import assert_frame_equal
from gators.feature_generation.elementary_arithmethics import ElementaryArithmetics
@pytest.fixture
def data_add():
X = pd.DataFrame(np.arange(9).reshape(3, 3), columns=list("ABC"))
X_expected = pd.DataFrame(
np.array(
[
[0.0, 1.0, 2.0, -2.0, -4.0],
[3.0, 4.0, 5.0, -5.0, -7.0],
[6.0, 7.0, 8.0, -8.0, -10.0],
]
),
columns=["A", "B", "C", "A__-__B", "A__-__C"],
)
obj = ElementaryArithmetics(
columns_a=list("AA"), columns_b=list("BC"), coef=-2.0, operator="+"
).fit(X)
return obj, X, X_expected
@pytest.fixture
def data_float32_add():
X = pd.DataFrame(np.arange(9).reshape(3, 3), columns=list("ABC"))
X_expected = pd.DataFrame(
np.array(
[
[0.0, 1.0, 2.0, -2.0, -4.0],
[3.0, 4.0, 5.0, -5.0, -7.0],
[6.0, 7.0, 8.0, -8.0, -10.0],
]
),
columns=["A", "B", "C", "A__-__B", "A__-__C"],
).astype(np.float32)
obj = ElementaryArithmetics(
columns_a=list("AA"),
columns_b=list("BC"),
coef=-2.0,
operator="+",
dtype=np.float32,
).fit(X)
return obj, X, X_expected
@pytest.fixture
def data_name_add():
X = pd.DataFrame(np.arange(9).reshape(3, 3), columns=list("ABC"), dtype=np.float64)
X_expected = pd.DataFrame(
np.array(
[
[0.0, 1.0, 2.0, -2.0, -4.0],
[3.0, 4.0, 5.0, -5.0, -7.0],
[6.0, 7.0, 8.0, -8.0, -10.0],
]
),
columns=["A", "B", "C", "A+B", "A+C"],
)
obj = ElementaryArithmetics(
columns_a=list("AA"),
columns_b=list("BC"),
coef=-2.0,
operator="+",
column_names=["A+B", "A+C"],
).fit(X)
return obj, X, X_expected
@pytest.fixture
def data_mult():
X = pd.DataFrame(np.arange(9).reshape(3, 3), columns=list("ABC"), dtype=np.float64)
X_expected = pd.DataFrame(
np.array(
[
[0.0, 1.0, 2.0, 0.0, 0.0],
[3.0, 4.0, 5.0, 12.0, 15.0],
[6.0, 7.0, 8.0, 42.0, 48.0],
]
),
columns=["A", "B", "C", "A__*__B", "A__*__C"],
)
obj = ElementaryArithmetics(
columns_a=list("AA"), columns_b=list("BC"), operator="*"
).fit(X)
return obj, X, X_expected
@pytest.fixture
def data_div():
X = pd.DataFrame(np.arange(9).reshape(3, 3), columns=list("ABC"), dtype=np.float64)
X_expected = pd.DataFrame(
np.array(
[
[0.0, 1.0, 2.0, 0.0, 0],
[3.0, 4.0, 5.0, 0.75, 0.59999988],
[6.0, 7.0, 8.0, 0.85714286, 0.7499999],
]
),
columns=["A", "B", "C", "A__/__B", "A__/__C"],
)
obj = ElementaryArithmetics(
columns_a=list("AA"), columns_b=list("BC"), operator="/"
).fit(X)
return obj, X, X_expected
@pytest.fixture
def data_add_ks():
X = ks.DataFrame(np.arange(9).reshape(3, 3), columns=list("ABC"))
X_expected = pd.DataFrame(
np.array(
[
[0.0, 1.0, 2.0, -2.0, -4.0],
[3.0, 4.0, 5.0, -5.0, -7.0],
[6.0, 7.0, 8.0, -8.0, -10.0],
]
),
columns=["A", "B", "C", "A__-__B", "A__-__C"],
)
obj = ElementaryArithmetics(
columns_a=list("AA"), columns_b=list("BC"), coef=-2.0, operator="+"
).fit(X)
return obj, X, X_expected
@pytest.fixture
def data_float32_add_ks():
X = ks.DataFrame(np.arange(9).reshape(3, 3), columns=list("ABC"))
X_expected = pd.DataFrame(
np.array(
[
[0.0, 1.0, 2.0, -2.0, -4.0],
[3.0, 4.0, 5.0, -5.0, -7.0],
[6.0, 7.0, 8.0, -8.0, -10.0],
]
),
columns=["A", "B", "C", "A__-__B", "A__-__C"],
).astype(np.float32)
obj = ElementaryArithmetics(
columns_a=list("AA"),
columns_b=list("BC"),
coef=-2.0,
operator="+",
dtype=np.float32,
).fit(X)
return obj, X, X_expected
@pytest.fixture
def data_name_add_ks():
X = ks.DataFrame(np.arange(9).reshape(3, 3), columns=list("ABC"), dtype=np.float64)
X_expected = pd.DataFrame(
np.array(
[
[0.0, 1.0, 2.0, -2.0, -4.0],
[3.0, 4.0, 5.0, -5.0, -7.0],
[6.0, 7.0, 8.0, -8.0, -10.0],
]
),
columns=["A", "B", "C", "A+B", "A+C"],
)
obj = ElementaryArithmetics(
columns_a=list("AA"),
columns_b=list("BC"),
coef=-2.0,
operator="+",
column_names=["A+B", "A+C"],
).fit(X)
return obj, X, X_expected
@pytest.fixture
def data_mult_ks():
X = ks.DataFrame(np.arange(9).reshape(3, 3), columns=list("ABC"), dtype=np.float64)
X_expected = pd.DataFrame(
np.array(
[
[0.0, 1.0, 2.0, 0.0, 0.0],
[3.0, 4.0, 5.0, 12.0, 15.0],
[6.0, 7.0, 8.0, 42.0, 48.0],
]
),
columns=["A", "B", "C", "A__*__B", "A__*__C"],
)
obj = ElementaryArithmetics(
columns_a=list("AA"), columns_b=list("BC"), operator="*"
).fit(X)
return obj, X, X_expected
@pytest.fixture
def data_div_ks():
X = ks.DataFrame(np.arange(9).reshape(3, 3), columns=list("ABC"), dtype=np.float64)
X_expected = pd.DataFrame(
np.array(
[
[0.0, 1.0, 2.0, 0.0, 0],
[3.0, 4.0, 5.0, 0.75, 0.59999988],
[6.0, 7.0, 8.0, 0.85714286, 0.7499999],
]
),
columns=["A", "B", "C", "A__/__B", "A__/__C"],
)
obj = ElementaryArithmetics(
columns_a=list("AA"), columns_b=list("BC"), operator="/"
).fit(X)
return obj, X, X_expected
def test_add_pd(data_add):
obj, X, X_expected = data_add
X_new = obj.transform(X)
assert_frame_equal(X_new, X_expected)
@pytest.mark.koalas
def test_add_ks(data_add_ks):
obj, X, X_expected = data_add_ks
X_new = obj.transform(X)
assert_frame_equal(X_new.to_pandas(), X_expected)
def test_add_pd_np(data_add):
obj, X, X_expected = data_add
X_numpy_new = obj.transform_numpy(X.to_numpy())
X_new = pd.DataFrame(X_numpy_new)
X_expected = pd.DataFrame(X_expected.values)
assert_frame_equal(X_new, X_expected)
@pytest.mark.koalas
def test_add_ks_np(data_add_ks):
obj, X, X_expected = data_add_ks
X_numpy_new = obj.transform_numpy(X.to_numpy())
X_new = pd.DataFrame(X_numpy_new)
X_expected = pd.DataFrame(X_expected.values)
assert_frame_equal(X_new, X_expected)
def test_float32_add_pd(data_float32_add):
obj, X, X_expected = data_float32_add
X_new = obj.transform(X)
assert_frame_equal(X_new, X_expected)
@pytest.mark.koalas
def test_float32_add_ks_ks(data_float32_add_ks):
obj, X, X_expected = data_float32_add_ks
X_new = obj.transform(X)
assert_frame_equal(X_new.to_pandas(), X_expected)
def test_float32_add_pd_np(data_float32_add):
obj, X, X_expected = data_float32_add
X_numpy_new = obj.transform_numpy(X.to_numpy())
X_new = pd.DataFrame(X_numpy_new)
X_expected = pd.DataFrame(X_expected.values)
assert_frame_equal(X_new, X_expected)
@pytest.mark.koalas
def test_float32_add_ks_np_ks(data_float32_add_ks):
obj, X, X_expected = data_float32_add_ks
X_numpy_new = obj.transform_numpy(X.to_numpy())
X_new = pd.DataFrame(X_numpy_new)
X_expected = pd.DataFrame(X_expected.values)
assert_frame_equal(X_new, X_expected)
def test_mult_pd(data_mult):
obj, X, X_expected = data_mult
X_new = obj.transform(X)
assert_frame_equal(X_new, X_expected)
@pytest.mark.koalas
def test_mult_ks(data_mult_ks):
obj, X, X_expected = data_mult_ks
X_new = obj.transform(X)
assert_frame_equal(X_new.to_pandas(), X_expected)
def test_mult_pd_np(data_mult):
obj, X, X_expected = data_mult
X_numpy_new = obj.transform_numpy(X.to_numpy())
X_new = pd.DataFrame(X_numpy_new)
X_expected = pd.DataFrame(X_expected.values)
assert_frame_equal(X_new, X_expected)
@pytest.mark.koalas
def test_mult_ks_np(data_mult_ks):
obj, X, X_expected = data_mult_ks
X_numpy_new = obj.transform_numpy(X.to_numpy())
X_new = pd.DataFrame(X_numpy_new)
X_expected = pd.DataFrame(X_expected.values)
| assert_frame_equal(X_new, X_expected) | pandas.testing.assert_frame_equal |
import pandas as pd
import sys
# 节点列
nodes = ['车系1', '车型', '基本参数', '车身', '变速箱', '底盘转向', '车轮制动', '发动机']
# 基本参数
base_parameters = ['厂商', '级别', '能源类型', '环保标准', '上市时间', '最大功率(kW)',
'发动机', '变速箱', '车身结构', '最高车速(km/h)', '工信部综合油耗(L/100km)', '整车质保']
# 车身
car_body = ['高度(mm)', '轴距(mm)', '前轮距(mm)', '后轮距(mm)', '最小离地间隙(mm)', '整备质量(kg)', '车身结构',
'车门数(个)', '座位数(个)', '后排车门开启方式', '油箱容积(L)', '货箱尺寸(mm)']
# 变速箱
bian_su_xiang = ['简称', '变速箱类型']
# 底盘转向
di_pan_zhuan_xiang = ['前悬架类型', '后悬架类型', '助力类型', '车体结构']
# 车轮制动
che_lun_zhi_dong = ['前轮胎规格', '后轮胎规格']
# 发动机
fa_dong_ji = [ '排量(L)', '进气形式', '气缸排列形式', '气缸数(个)', '每缸气门数(个)', '压缩比', '配气机构', '缸径(mm)',
'行程(mm)', '最大马力(Ps)', '最大功率(kW)', '最大功率转速(rpm)', '最大扭矩(N·m)', '最大扭矩转速(rpm)',
'燃料形式', '燃油标号', '供油方式', '缸盖材料', '缸体材料', '环保标准']
# 文件名mapping
filenames_mapping = {'车系1':'che_xi', '车型':'che_xing', '基本参数':'base_parameters', '车身':'car_body', '变速箱':'bian_su_xiang',
'底盘转向':'di_pan_zhuan_xiang', '车轮制动':'che_lun_zhi_dong', '发动机':'fa_dong_ji'}
if __name__ == '__main__':
data = pd.read_excel('./data/test3.xlsx', index_col='Unnamed: 0')
# 车系和车型节点
current_index = 0
for node in nodes[:2]:
result = pd.DataFrame(columns=['id:ID', 'name'])
temp = list(set(data[node]))
result['name'] = temp
result['id:ID'] = [i for i in range(current_index, current_index + len(temp))]
current_index += len(temp)
# to csv
result.to_csv('./data/entities/' + filenames_mapping[node] + '.csv', encoding='utf-8', index=False)
che_xi = | pd.read_csv('./data/entities/che_xi.csv') | pandas.read_csv |
""" Titanic: Machine Learning from Disaster
Predict which passengers survived the Titanic shipwreck.
https://www.kaggle.com/c/titanic/overview """
import pickle
import numpy as np
import pandas as pd
from sklearn import preprocessing
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix, classification_report, f1_score
from sklearn.linear_model import LogisticRegressionCV
from sklearn import svm
from sklearn.ensemble import RandomForestClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.neural_network import MLPClassifier
from sklearn.naive_bayes import ComplementNB
from xgboost import XGBClassifier
# ------------------------ Helper Functions ------------------------
def scale_feature(data):
scaler = preprocessing.MinMaxScaler()
dataScaled = scaler.fit_transform(data)
return dataScaled
def encode_words(data):
encoder = preprocessing.OrdinalEncoder().fit(data)
dataEnc = encoder.transform(data)
return dataEnc
def count_survivors(dataY):
""" DEPRECATED """
survived = np.count_nonzero(dataY)
died = len(dataY) - survived
print('Survived/Died: {}/{}'.format(survived, died))
return survived, died
def balance_data(data):
""" DEPRECATED """
survived, died = count_survivors(data['Survived'])
remove_n = abs(survived - died)
dropId = []
if survived > died:
dropid = np.random.choice(data[data['Survived'] == 1].index, remove_n, replace=false)
elif survived < died:
dropId = np.random.choice(data[data['Survived'] == 0].index, remove_n, replace=False)
dataBalanced = data.drop(dropId)
return dataBalanced
# ------------------------ Features Settings ------------------------
def get_features(data):
# scale numerical features
data['Age'] = scale_feature(data['Age'].fillna(data['Age'].median()).values.reshape(-1, 1))
data['Fare'] = scale_feature(data['Fare'].fillna(data['Fare'].median()).values.reshape(-1, 1))
# encode cathegorical features
data['Sex'] = encode_words(data['Sex'].values.reshape(-1, 1))
data['Embarked'] = encode_words(data['Embarked'].fillna('A').values.reshape(-1, 1))
# select features
xlabels = ['Pclass', 'Age', 'SibSp', 'Fare', 'Parch', 'Sex', 'Embarked']
dataX = data[xlabels].values.reshape(-1, len(xlabels))
return dataX
# ------------------------ Model settings ------------------------
def log_regression(trainX, trainY):
""" Logistic regression with CV """
model = LogisticRegressionCV(penalty='l2', solver='liblinear')
model.fit(trainX, trainY)
pickle.dump(model, open('model.pickle', 'wb'))
def naive_bayes(trainX, trainY):
""" The Complement Naive Bayes classifier """
model = ComplementNB()
model.fit(trainX, trainY)
pickle.dump(model, open('model.pickle', 'wb'))
def svm_rbf(trainX, trainY):
""" SVM model. Kernel: linear, poly, rbf, sigmoid """
model = svm.SVC(kernel='rbf')
model.fit(trainX, trainY)
pickle.dump(model, open('model.pickle', 'wb'))
def random_forest(trainX, trainY):
""" A random forest classifier. """
model = RandomForestClassifier(n_estimators=20, max_depth=5)
model.fit(trainX, trainY)
pickle.dump(model, open('model.pickle', 'wb'))
def decision_tree(trainX, trainY):
""" A decision tree classifier. Criterion: gini, entropy """
model = DecisionTreeClassifier(criterion='gini', max_depth=3)
model.fit(trainX, trainY)
pickle.dump(model, open('model.pickle', 'wb'))
def mlp_classifier(trainX, trainY):
model = MLPClassifier(
hidden_layer_sizes=(128, ),
activation='relu',
solver='adam',
alpha=0.0001,
batch_size=20,
learning_rate='adaptive',
learning_rate_init=0.001,
early_stopping=True)
model.fit(trainX, trainY)
pickle.dump(model, open('model.pickle', 'wb'))
def xgboost_classifier(trainX, trainY):
model = XGBClassifier(max_depth=2, learning_rate=0.001, reg_alpha=0.0001, reg_lambda=0.0001)
model.fit(trainX, trainY)
pickle.dump(model, open('model.pickle', 'wb'))
# ------------------------ Test settings ------------------------
def predict(dataX):
# load model
model = pickle.load(open('model.pickle', 'rb'))
# predict
predY = model.predict(dataX)
return predY
def run_test(testX, testY):
predY = predict(testX)
# model metrics
confMatrix = confusion_matrix(testY, predY)
report = classification_report(testY, predY)
f1 = f1_score(testY, predY)
# print(confMatrix)
# print(report)
return f1
def train_test_all(trainX, testX, trainY, testY):
log_regression(trainX, trainY)
print('log_regression_f1 = {:.2f}'.format(run_test(testX, testY)))
naive_bayes(trainX, trainY)
print('naive_bayes_f1 = {:.2f}'.format(run_test(testX, testY)))
svm_rbf(trainX, trainY)
print('svm_rbf_f1 = {:.2f}'.format(run_test(testX, testY)))
random_forest(trainX, trainY)
print('random_forest_f1 = {:.2f}'.format(run_test(testX, testY)))
decision_tree(trainX, trainY)
print('decision_tree_f1 = {:.2f}'.format(run_test(testX, testY)))
mlp_classifier(trainX, trainY)
print('mlp_classifier_f1 = {:.2f}'.format(run_test(testX, testY)))
xgboost_classifier(trainX, trainY)
print('xgboost_classifier_f1 = {:.2f}'.format(run_test(testX, testY)))
def get_submission(data):
dataX = get_features(data)
predY = predict(dataX)
# write to csv
result = pd.DataFrame({'Survived': predY}, index=data.index)
result.to_csv('submission.csv', index=True)
print('Submission saved.')
def main():
# load data
dataTrain = pd.read_csv('train.csv', index_col='PassengerId', header=0)
dataTest = | pd.read_csv('test.csv', index_col='PassengerId', header=0) | pandas.read_csv |
# -*- coding: utf-8 -*-
# @author: Elie
#%% ==========================================================
# Import libraries set library params
# ============================================================
import pandas as pd
import numpy as np
import os
pd.options.mode.chained_assignment = None #Pandas warnings off
#plotting
import seaborn as sns
from matplotlib import pyplot as plt
import matplotlib.lines as mlines
import matplotlib as mpl
# stats
from scipy import stats
#set matplotlib rcparams
mpl.rcParams['savefig.transparent'] = "False"
mpl.rcParams['axes.facecolor'] = "white"
mpl.rcParams['figure.facecolor'] = "white"
mpl.rcParams['font.size'] = "5"
plt.rcParams['savefig.transparent'] = "False"
plt.rcParams['axes.facecolor'] = "white"
plt.rcParams['figure.facecolor'] = "white"
plt.rcParams['font.size'] = "5"
#%% ==========================================================
# define these feature/headers here in case the headers
# are out of order in input files (often the case)
# ============================================================
snv_categories = ["sample",
"A[C>A]A", "A[C>A]C", "A[C>A]G", "A[C>A]T",
"C[C>A]A", "C[C>A]C", "C[C>A]G", "C[C>A]T",
"G[C>A]A", "G[C>A]C", "G[C>A]G", "G[C>A]T",
"T[C>A]A", "T[C>A]C", "T[C>A]G", "T[C>A]T",
"A[C>G]A", "A[C>G]C", "A[C>G]G", "A[C>G]T",
"C[C>G]A", "C[C>G]C", "C[C>G]G", "C[C>G]T",
"G[C>G]A", "G[C>G]C", "G[C>G]G", "G[C>G]T",
"T[C>G]A", "T[C>G]C", "T[C>G]G", "T[C>G]T",
"A[C>T]A", "A[C>T]C", "A[C>T]G", "A[C>T]T",
"C[C>T]A", "C[C>T]C", "C[C>T]G", "C[C>T]T",
"G[C>T]A", "G[C>T]C", "G[C>T]G", "G[C>T]T",
"T[C>T]A", "T[C>T]C", "T[C>T]G", "T[C>T]T",
"A[T>A]A", "A[T>A]C", "A[T>A]G", "A[T>A]T",
"C[T>A]A", "C[T>A]C", "C[T>A]G", "C[T>A]T",
"G[T>A]A", "G[T>A]C", "G[T>A]G", "G[T>A]T",
"T[T>A]A", "T[T>A]C", "T[T>A]G", "T[T>A]T",
"A[T>C]A", "A[T>C]C", "A[T>C]G", "A[T>C]T",
"C[T>C]A", "C[T>C]C", "C[T>C]G", "C[T>C]T",
"G[T>C]A", "G[T>C]C", "G[T>C]G", "G[T>C]T",
"T[T>C]A", "T[T>C]C", "T[T>C]G", "T[T>C]T",
"A[T>G]A", "A[T>G]C", "A[T>G]G", "A[T>G]T",
"C[T>G]A", "C[T>G]C", "C[T>G]G", "C[T>G]T",
"G[T>G]A", "G[T>G]C", "G[T>G]G", "G[T>G]T",
"T[T>G]A", "T[T>G]C", "T[T>G]G", "T[T>G]T"]
indel_categories = ["sample",
"1:Del:C:0", "1:Del:C:1", "1:Del:C:2", "1:Del:C:3", "1:Del:C:4", "1:Del:C:5",
"1:Del:T:0", "1:Del:T:1", "1:Del:T:2", "1:Del:T:3", "1:Del:T:4", "1:Del:T:5",
"1:Ins:C:0", "1:Ins:C:1", "1:Ins:C:2", "1:Ins:C:3", "1:Ins:C:4", "1:Ins:C:5",
"1:Ins:T:0", "1:Ins:T:1", "1:Ins:T:2", "1:Ins:T:3", "1:Ins:T:4", "1:Ins:T:5",
"2:Del:R:0", "2:Del:R:1", "2:Del:R:2", "2:Del:R:3", "2:Del:R:4", "2:Del:R:5",
"3:Del:R:0", "3:Del:R:1", "3:Del:R:2", "3:Del:R:3", "3:Del:R:4", "3:Del:R:5",
"4:Del:R:0", "4:Del:R:1", "4:Del:R:2", "4:Del:R:3", "4:Del:R:4", "4:Del:R:5",
"5:Del:R:0", "5:Del:R:1", "5:Del:R:2", "5:Del:R:3", "5:Del:R:4", "5:Del:R:5",
"2:Ins:R:0", "2:Ins:R:1", "2:Ins:R:2", "2:Ins:R:3", "2:Ins:R:4", "2:Ins:R:5",
"3:Ins:R:0", "3:Ins:R:1", "3:Ins:R:2", "3:Ins:R:3", "3:Ins:R:4", "3:Ins:R:5",
"4:Ins:R:0", "4:Ins:R:1", "4:Ins:R:2", "4:Ins:R:3", "4:Ins:R:4", "4:Ins:R:5",
"5:Ins:R:0", "5:Ins:R:1", "5:Ins:R:2", "5:Ins:R:3", "5:Ins:R:4", "5:Ins:R:5",
"2:Del:M:1", "3:Del:M:1", "3:Del:M:2", "4:Del:M:1", "4:Del:M:2", "4:Del:M:3",
"5:Del:M:1", "5:Del:M:2", "5:Del:M:3", "5:Del:M:4", "5:Del:M:5"]
cnv_categories = ["sample",
"BCper10mb_0", "BCper10mb_1", "BCper10mb_2", "BCper10mb_3",
"CN_0", "CN_1", "CN_2", "CN_3", "CN_4", "CN_5", "CN_6", "CN_7", "CN_8",
"CNCP_0", "CNCP_1", "CNCP_2", "CNCP_3", "CNCP_4", "CNCP_5", "CNCP_6", "CNCP_7",
"BCperCA_0", "BCperCA_1", "BCperCA_2", "BCperCA_3", "BCperCA_4", "BCperCA_5",
"SegSize_0", "SegSize_1", "SegSize_2", "SegSize_3", "SegSize_4", "SegSize_5",
"SegSize_6", "SegSize_7", "SegSize_8", "SegSize_9", "SegSize_10",
"CopyFraction_0", "CopyFraction_1", "CopyFraction_2", "CopyFraction_3", "CopyFraction_4",
"CopyFraction_5", "CopyFraction_6"]
#%% ==========================================================
# make concat sig dataframe
# ============================================================
def load_data(snv_counts_path, indel_counts_path, cnv_counts_path):
df_snv = pd.read_csv(snv_counts_path, sep='\t', low_memory=False)
df_snv = df_snv[snv_categories]
df_snv["sample"] = df_snv["sample"].astype(str)
df_indel = pd.read_csv(indel_counts_path, sep='\t', low_memory=False)
df_indel = df_indel[indel_categories]
df_indel["sample"] = df_indel["sample"].astype(str)
df_cnv = pd.read_csv(cnv_counts_path, sep='\t', low_memory=False)
df_cnv = df_cnv[cnv_categories]
df_cnv["sample"] = df_cnv["sample"].astype(str)
df_sigs = pd.merge(df_snv, df_indel, on="sample", how='left').fillna(0)
df_sigs = pd.merge(df_sigs, df_cnv, on="sample", how='left').reset_index(drop=True)
return df_sigs
#%% ==========================================================
# get paths, load data and make df with each file merged
# ============================================================
#file from paths relative to this script
rootdir = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))
figdir = os.path.join(rootdir, "figures", "sup_fig1")
datadir = os.path.join(rootdir, "data")
cohort_data = os.path.join(datadir, "cohort.tsv")
snv_features = os.path.join(datadir, "tns_features.tsv")
ndl_features = os.path.join(datadir, "ndl_features.tsv")
cnv_features = os.path.join(datadir, "cnv_features.tsv")
sigs = load_data(snv_features, ndl_features, cnv_features)
sample_labels = | pd.read_csv(cohort_data, sep='\t', low_memory=False) | pandas.read_csv |
#!/usr/bin/env python
# coding: utf-8
# # Vatsal's Code
# This notebook shows you how to build a model for predicting degradation at various locations along RNA sequence.
# * We will first pre-process and tokenize the sequence, secondary structure and loop type.
# * Then, we will use all the information to train a model on degradations recorded by the researchers from OpenVaccine.
# * Finally, we run our model on the public test set (shorter sequences) and the private test set (longer sequences), and submit the predictions.
#
# In[1]:
# %%capture
# !pip install forgi
# !yes Y |conda install -c bioconda viennarna
# In[2]:
import json,os, math
import subprocess
# from forgi.graph import bulge_graph
# import forgi.visual.mplotlib as fvm
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import tensorflow.keras.backend as K
import plotly.express as px
import tensorflow.keras.layers as L
import tensorflow as tf
import warnings
warnings.filterwarnings('ignore')
import tensorflow_addons as tfa
from itertools import combinations_with_replacement
from sklearn.model_selection import train_test_split, KFold, StratifiedKFold,GroupKFold
from keras.utils import plot_model
from colorama import Fore, Back, Style
# ### Configuration
# In[3]:
###### USE DIFFERENT SEED FOR DIFFERENT STRATIFIED KFOLD
SEED = 53
###### NUMBER OF FOLDS. USE 3, 5, 7,...
n_folds=5
###### TRAIN DEBUG
debug=True
###### APPLY WINDOW FEATURES
Window_features = True
###### Number of Feature Given to Model
# cat_feature = 3 ## ( Categorical Features Only)
# num_features = 1 ## ( Numerical Features Only)
###### Model Configuration ######
model_name="GG" ## MODEL NAME (Files will save according to this )
epochs=100 ## NUMBER OF EPOCHS MODEL TRAIN IN EACH FOLD. USE 3, 5, 7,...
BATCH_SIZE = 32 ## NUMBER OF BATCH_SIZE USE 16, 32, 64, 128,...
n_layers = 2 ## Number of Layers Present in model # ex. 3 Layer of GRU Model
layers = ["GRU","GRU"] ## Stacking sequence of GRU and LSTM (list of length == n_layers)
hidden_dim = [128, 128] ## Hidden Dimension in Model (Default : [128,128]) (list of length == n_layers)
dropout = [0.5, 0.5] ## 1.0 means no dropout, and 0.0 means no outputs from the layer.
sp_dropout = 0.2 ## SpatialDropout1D (Fraction of the input units to drop) [https://stackoverflow.com/a/55244985]
embed_dim = 250 ## Output Dimention of Embedding Layer (Default : 75)
num_hidden_units = 8 ## Number of GRU units after num_input layer
###### LR Schedular ######
Cosine_Schedule = True ## cosine_schedule Rate
Rampup_decy_lr = False ## Rampup decy lr Schedule
# ### Set Seed
# In[4]:
def seed_everything(seed=1234):
np.random.seed(seed)
tf.random.set_seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
os.environ['TF_DETERMINISTIC_OPS'] = '1'
seed_everything(SEED)
# ### Used Columns
#
# In[5]:
target_cols = ['reactivity', 'deg_Mg_pH10', 'deg_Mg_50C', 'deg_pH10', 'deg_50C']
window_columns = ['sequence','structure','predicted_loop_type']
categorical_features = ['sequence', 'structure', 'predicted_loop_type',]
# 'predicted_loop_index']
cat_feature = len(categorical_features)
if Window_features:
cat_feature += len(window_columns)
numerical_features = ['BPPS_Max','BPPS_nb', 'BPPS_sum',
'positional_entropy',
'stems', 'interior_loops', 'multiloops',#'hairpin loops', 'fiveprimes', 'threeprimes',
'A_percent', 'G_percent','C_percent', 'U_percent',
'U-G', 'C-G', 'U-A', 'G-C', 'A-U', 'G-U',
# 'E', 'S', 'H', 'B', 'X', 'I', 'M',
'pair_map', 'pair_distance', ]
num_features = len(numerical_features) ## ( Numerical Features Only)
feature_cols = categorical_features + numerical_features
pred_col_names = ["pred_"+c_name for c_name in target_cols]
target_eval_col = ['reactivity','deg_Mg_pH10','deg_Mg_50C']
pred_eval_col = ["pred_"+c_name for c_name in target_eval_col]
# ### Load and preprocess data
# In[6]:
data_dir = '/kaggle/input/stanford-covid-vaccine/'
fearure_data_path = '../input/openvaccine/'
# train = pd.read_csv(fearure_data_path+'train.csv')
# test = pd.read_csv(fearure_data_path+'test.csv')
train = pd.read_json(fearure_data_path+'train.json')
test = pd.read_json(fearure_data_path+'test.json')
# train_j = pd.read_json(data_dir + 'train.json', lines=True)
# test_j = pd.read_json(data_dir + 'test.json', lines=True)
sample_sub = pd.read_csv(data_dir + 'sample_submission.csv')
# In[7]:
train[target_cols] = train[target_cols].applymap(lambda x: x[1:-1].split(", "))
# In[8]:
# train = train[train['SN_filter'] == 1]
train = train[train['signal_to_noise'] >= 0.5]
# In[9]:
def pair_feature(row):
arr = list(row)
its = [iter(['_']+arr[:]) ,iter(arr[1:]+['_'])]
list_touple = list(zip(*its))
return list(map("".join,list_touple))
# In[10]:
def preprocess_categorical_inputs(df, cols=categorical_features,Window_features=Window_features):
if Window_features:
for c in window_columns:
df["pair_"+c] = df[c].apply(pair_feature)
cols.append("pair_"+c)
cols = list(set(cols))
return np.transpose(
np.array(
df[cols]
.applymap(lambda seq: [token2int[x] for x in seq])
.values
.tolist()
),
(0, 2, 1)
)
# In[11]:
def preprocess_numerical_inputs(df, cols=numerical_features):
return np.transpose(
np.array(
df[cols].values.tolist()
),
(0, 2, 1)
)
# In[12]:
# We will use this dictionary to map each character to an integer
# so that it can be used as an input in keras
# ().ACGUBEHIMSXshftim0123456789[]{}'_,
token_list = list("().<KEY>")
if Window_features:
comb = combinations_with_replacement(list('_().<KEY>'*2), 2)
token_list += list(set(list(map("".join,comb))))
token2int = {x:i for i, x in enumerate(list(set(token_list)))}
print("token_list Size :",len(token_list))
train_inputs_all_cat = preprocess_categorical_inputs(train,cols=categorical_features)
train_inputs_all_num = preprocess_numerical_inputs(train,cols=numerical_features)
train_labels_all = np.array(train[target_cols].values.tolist(),dtype =np.float32).transpose((0, 2, 1))
print("Train categorical Features Shape : ",train_inputs_all_cat.shape)
print("Train numerical Features Shape : ",train_inputs_all_num.shape)
print("Train labels Shape : ",train_labels_all.shape)
# ### Reduce Train Data
# In[13]:
# train_inputs_all_cat = train_inputs_all_cat[:,:68,:]
# train_inputs_all_num = train_inputs_all_num[:,:68,:]
# train_labels_all = train_labels_all[:,:68,:]
# print("Train categorical Features Shape : ",train_inputs_all_cat.shape)
# print("Train numerical Features Shape : ",train_inputs_all_num.shape)
# print("Train labels Shape : ",train_labels_all.shape)
# #### Public and private sets have different sequence lengths, so we will preprocess them separately and load models of different tensor shapes.
# In[14]:
public_df = test.query("seq_length == 107")
private_df = test.query("seq_length == 130")
print("public_df : ",public_df.shape)
print("private_df : ",private_df.shape)
public_inputs_cat = preprocess_categorical_inputs(public_df)
private_inputs_cat = preprocess_categorical_inputs(private_df)
public_inputs_num = preprocess_numerical_inputs(public_df,cols=numerical_features)
private_inputs_num = preprocess_numerical_inputs(private_df,cols=numerical_features)
print("Public categorical Features Shape : ",public_inputs_cat.shape)
print("Public numerical Features Shape : ",public_inputs_num.shape)
print("Private categorical Features Shape : ",private_inputs_cat.shape)
print("Private numerical Features Shape : ",private_inputs_num.shape)
# ### loss Function
# In[15]:
### Custom Loss Function for ['reactivity','deg_Mg_pH10','deg_Mg_50C'] target Columns
# def rmse(y_actual, y_pred):
# mse = tf.keras.losses.mean_squared_error(y_actual, y_pred)
# return K.sqrt(mse)
# def MCRMSE(y_actual, y_pred, num_scored=3):
# score = 0
# for i in range(num_scored):
# score += rmse(y_actual[:,:, i], y_pred[:,:, i]) / num_scored
# return score
def MCRMSE(y_true, y_pred):
colwise_mse = tf.reduce_mean(tf.square(y_true[:,:,:3] - y_pred[:,:,:3]), axis=1)
return tf.reduce_mean(tf.sqrt(colwise_mse), axis=1)
# ### Learning Rate Schedulars
# ### Rampup decy lr Schedule
# In[16]:
def get_lr_callback(batch_size=8):
lr_start = 0.00001
lr_max = 0.004
lr_min = 0.00005
lr_ramp_ep = 45
lr_sus_ep = 2
lr_decay = 0.8
def lrfn(epoch):
if epoch < lr_ramp_ep:
lr = (lr_max - lr_start) / lr_ramp_ep * epoch + lr_start
elif epoch < lr_ramp_ep + lr_sus_ep:
lr = lr_max
else:
lr = (lr_max - lr_min) * lr_decay**(epoch - lr_ramp_ep - lr_sus_ep) + lr_min
return lr
lr_callback = tf.keras.callbacks.LearningRateScheduler(lrfn, verbose=False)
return lr_callback
# ### Cosine schedule with warmup
# In[17]:
def get_cosine_schedule_with_warmup(lr,num_warmup_steps, num_training_steps, num_cycles=3.5):
"""
Modified version of the get_cosine_schedule_with_warmup from huggingface.
(https://huggingface.co/transformers/_modules/transformers/optimization.html#get_cosine_schedule_with_warmup)
Create a schedule with a learning rate that decreases following the
values of the cosine function between 0 and `pi * cycles` after a warmup
period during which it increases linearly between 0 and 1.
"""
def lrfn(epoch):
if epoch < num_warmup_steps:
return (float(epoch) / float(max(1, num_warmup_steps))) * lr
progress = float(epoch - num_warmup_steps) / float(max(1, num_training_steps - num_warmup_steps))
return max(0.0, 0.5 * (1.0 + math.cos(math.pi * float(num_cycles) * 2.0 * progress))) * lr
return tf.keras.callbacks.LearningRateScheduler(lrfn, verbose=False)
# ### Different Layers
# In[18]:
def lstm_layer(hidden_dim, dropout):
return tf.keras.layers.Bidirectional(
tf.keras.layers.LSTM(hidden_dim,
dropout=dropout,
return_sequences=True,
kernel_initializer = 'orthogonal'))
# In[19]:
def gru_layer(hidden_dim, dropout):
return L.Bidirectional(
L.GRU(hidden_dim, dropout=dropout, return_sequences=True, kernel_initializer='orthogonal')
)
# ### Model Building
# In[20]:
# def build_model(embed_size,
# seq_len = 107,
# pred_len = 68,
# dropout = dropout,
# sp_dropout = sp_dropout,
# num_features = num_features,
# num_hidden_units = num_hidden_units,
# embed_dim = embed_dim,
# layers = layers,
# hidden_dim = hidden_dim,
# n_layers = n_layers,
# cat_feature = cat_feature):
# inputs = L.Input(shape=(seq_len, cat_feature),name='category_input')
# embed = L.Embedding(input_dim=embed_size, output_dim=embed_dim)(inputs)
# reshaped = tf.reshape(embed, shape=(-1, embed.shape[1], embed.shape[2] * embed.shape[3]))
# reshaped_conv = tf.keras.layers.Conv1D(filters=512, kernel_size=3,strides=1, padding='same', activation='elu')(reshaped)
# numerical_input = L.Input(shape=(seq_len, num_features), name='numeric_input')
# n_Dense_1 = L.Dense(64)(numerical_input)
# n_Dense_2 = L.Dense(128)(n_Dense_1)
# numerical_conv = tf.keras.layers.Conv1D(filters=256, kernel_size=4,strides=1, padding='same', activation='elu')(n_Dense_2)
# hidden = L.concatenate([reshaped_conv, numerical_conv])
# hidden = L.SpatialDropout1D(sp_dropout)(hidden)
# for x in range(n_layers):
# if layers[x] == "GRU":
# hidden = gru_layer(hidden_dim[x], dropout[x])(hidden)
# else:
# hidden = lstm_layer(hidden_dim[x], dropout[x])(hidden)
# # Since we are only making predictions on the first part of each sequence,
# # we have to truncate it
# truncated = hidden[:, :pred_len]
# out = L.Dense(5)(truncated)
# model = tf.keras.Model(inputs=[inputs] + [numerical_input], outputs=out)
# adam = tf.optimizers.Adam()
# radam = tfa.optimizers.RectifiedAdam()
# lookahead = tfa.optimizers.Lookahead(adam, sync_period=6)
# ranger = tfa.optimizers.Lookahead(radam, sync_period=6)
# model.compile(optimizer=radam, loss=MCRMSE)
# return model
# In[21]:
def build_model(embed_size,
seq_len = 107,
pred_len = 68,
dropout = dropout,
sp_dropout = sp_dropout,
num_features = num_features,
num_hidden_units = num_hidden_units,
embed_dim = embed_dim,
layers = layers,
hidden_dim = hidden_dim,
n_layers = n_layers,
cat_feature = cat_feature):
inputs = L.Input(shape=(seq_len, cat_feature),name='category_input')
embed = L.Embedding(input_dim=embed_size, output_dim=embed_dim)(inputs)
reshaped = tf.reshape(embed, shape=(-1, embed.shape[1], embed.shape[2] * embed.shape[3]))
reshaped = L.SpatialDropout1D(sp_dropout)(reshaped)
reshaped_conv = tf.keras.layers.Conv1D(filters=512, kernel_size=3,strides=1, padding='same', activation='elu')(reshaped)
numerical_input = L.Input(shape=(seq_len, num_features), name='numeric_input')
# n_Dense_1 = L.Dense(64)(numerical_input)
# n_Dense_2 = L.Dense(128)(n_Dense_1)
# numerical_conv = tf.keras.layers.Conv1D(filters=256, kernel_size=4,strides=1, padding='same', activation='elu')(n_Dense_2)
hidden = L.concatenate([reshaped_conv, numerical_input])
hidden_1 = tf.keras.layers.Conv1D(filters=256, kernel_size=4,strides=1, padding='same', activation='elu')(hidden)
hidden = gru_layer(128, 0.5)(hidden_1)
hidden = L.concatenate([hidden, hidden_1])
# hidden = L.SpatialDropout1D(sp_dropout)(hidden)
for x in range(n_layers):
if layers[x] == "GRU":
hidden = gru_layer(hidden_dim[x], dropout[x])(hidden)
else:
hidden = lstm_layer(hidden_dim[x], dropout[x])(hidden)
hidden = L.concatenate([hidden, hidden_1])
# Since we are only making predictions on the first part of each sequence,
# we have to truncate it
truncated = hidden[:, :pred_len]
out = L.Dense(5)(truncated)
model = tf.keras.Model(inputs=[inputs] + [numerical_input], outputs=out)
adam = tf.optimizers.Adam()
radam = tfa.optimizers.RectifiedAdam()
lookahead = tfa.optimizers.Lookahead(adam, sync_period=6)
ranger = tfa.optimizers.Lookahead(radam, sync_period=6)
model.compile(optimizer=radam, loss=MCRMSE)
return model
# ### Build and train model
#
# We will train a bi-directional GRU model. It has three layer and has dropout. To learn more about RNNs, LSTM and GRU, please see [this blog post](https://colah.github.io/posts/2015-08-Understanding-LSTMs/).
# In[22]:
model = build_model(embed_size=len(token_list))
plot_model(model, to_file='model_plot.png', show_shapes=True, show_layer_names=True)
# ### Add Augmentation Data
# ### stratify_group Based on structure and SN_Filter
# In[23]:
def get_stratify_group(row):
snf = row['SN_filter']
snr = row['signal_to_noise']
cnt = row['cnt']
id_ = row['id']
structure = row['structure']
if snf == 0:
if snr<0:
snr_c = 0
elif 0<= snr < 2:
snr_c = 1
elif 2<= snr < 4:
snr_c = 2
elif 4<= snr < 5.5:
snr_c = 3
elif 5.5<= snr < 10:
snr_c = 4
elif snr >= 10:
snr_c = 5
else: # snf == 1
if snr<0:
snr_c = 6
elif 0<= snr < 1:
snr_c = 7
elif 1<= snr < 2:
snr_c = 8
elif 2<= snr < 3:
snr_c = 9
elif 3<= snr < 4:
snr_c = 10
elif 4<= snr < 5:
snr_c = 11
elif 5<= snr < 6:
snr_c = 12
elif 6<= snr < 7:
snr_c = 13
elif 7<= snr < 8:
snr_c = 14
elif 8<= snr < 9:
snr_c = 15
elif 9<= snr < 10:
snr_c = 15
elif snr >= 10:
snr_c = 16
return '{}_{}'.format(id_,snr_c)
train['stratify_group'] = train.apply(get_stratify_group, axis=1)
train['stratify_group'] = train['stratify_group'].astype('category').cat.codes
skf = StratifiedKFold(n_folds, shuffle=True, random_state=SEED)
gkf = GroupKFold(n_splits=n_folds)
fig, ax = plt.subplots(n_folds,3,figsize=(20,5*n_folds))
for Fold, (train_index, val_index) in enumerate(gkf.split(train_inputs_all_cat, groups=train['stratify_group'])):
print(Fore.YELLOW);print('#'*45);print("### Fold : ", str(Fold+1));print('#'*45);print(Style.RESET_ALL)
train_data = train.iloc[train_index]
val_data = train.iloc[val_index]
print("Augmented data Present in Val Data : ",len(val_data[val_data['cnt'] != 1]))
print("Augmented data Present in Train Data : ",len(train_data[train_data['cnt'] != 1]))
val_data = val_data[val_data['cnt'] == 1]
print("Data Lekage : ",len(val_data[val_data['id'].isin(train_data['id'])]))
# print(train_data['stratify_group'].unique(),val_data['stratify_group'].unique())
print("number of Train Data points : ",len(train_data))
print("number of val_data Data points : ",len(val_data))
print("number of unique Structure in Train data : ", len(train_data.structure.unique()))
print("number of unique Structure in val data : ",len(val_data.structure.unique()), val_data.structure.value_counts()[:5].values)
print("Train SN_Filter == 1 : ", len(train_data[train_data['SN_filter']==1]))
print("val_data SN_Filter == 1 : ", len(val_data[val_data['SN_filter']==1]))
print("Train SN_Filter == 0 : ", len(train_data[train_data['SN_filter']==0]))
print("val_data SN_Filter == 0 : ", len(val_data[val_data['SN_filter']==0]))
print("Unique ID :",len(train_data.id.unique()))
sns.kdeplot(train[train['SN_filter']==0]['signal_to_noise'],ax=ax[Fold][0],color="Red",label='Train All')
sns.kdeplot(train_data[train_data['SN_filter']==0]['signal_to_noise'],ax=ax[Fold][0],color="Blue",label='Train')
sns.kdeplot(val_data[val_data['SN_filter']==0]['signal_to_noise'],ax=ax[Fold][0],color="Green",label='Validation')
ax[Fold][0].set_title(f'Fold : {Fold+1} Signal/Noise & SN_filter == 0')
sns.kdeplot(train[train['SN_filter']==1]['signal_to_noise'],ax=ax[Fold][1],color="Red",label='Train All')
sns.kdeplot(train_data[train_data['SN_filter']==1]['signal_to_noise'],ax=ax[Fold][1],color="Blue",label='Train')
sns.kdeplot(val_data[val_data['SN_filter']==1]['signal_to_noise'],ax=ax[Fold][1],color="Green",label='Validation')
ax[Fold][1].set_title(f'Fold : {Fold+1} Signal/Noise & SN_filter == 1')
sns.kdeplot(train['signal_to_noise'],ax=ax[Fold][2],color="Red",label='Train All')
sns.kdeplot(train_data['signal_to_noise'],ax=ax[Fold][2],color="Blue",label='Train')
sns.kdeplot(val_data['signal_to_noise'],ax=ax[Fold][2],color="Green",label='Validation')
ax[Fold][2].set_title(f'Fold : {Fold+1} Signal/Noise')
plt.show()
# In[24]:
submission = pd.DataFrame(index=sample_sub.index, columns=target_cols).fillna(0) # test dataframe with 0 values
val_losses = []
historys = []
oof_preds_all = []
stacking_pred_all = []
kf = KFold(n_folds, shuffle=True, random_state=SEED)
skf = StratifiedKFold(n_folds, shuffle=True, random_state=SEED)
gkf = GroupKFold(n_splits=n_folds)
for Fold, (train_index, val_index) in enumerate(gkf.split(train_inputs_all_cat, groups=train['stratify_group'])):
print(Fore.YELLOW);print('#'*45);print("### Fold : ", str(Fold+1));print('#'*45);print(Style.RESET_ALL)
print(f"|| Batch_size: {BATCH_SIZE} \n|| n_layers: {n_layers} \n|| embed_dim: {embed_dim}")
print(f"|| cat_feature: {cat_feature} \n|| num_features: {num_features}")
print(f"|| layers : {layers} \n|| hidden_dim: {hidden_dim} \n|| dropout: {dropout} \n|| sp_dropout: {sp_dropout}")
train_data = train.iloc[train_index]
val_data = train.iloc[val_index]
print("|| number Augmented data Present in Val Data : ",len(val_data[val_data['cnt'] != 1]))
print("|| number Augmented data Present in Train Data : ",len(train_data[train_data['cnt'] != 1]))
print("|| Data Lekage : ",len(val_data[val_data['id'].isin(train_data['id'])]))
val_data = val_data[val_data['cnt'] == 1]
model_train = build_model(embed_size=len(token_list))
model_short = build_model(embed_size=len(token_list),seq_len=107, pred_len=107)
model_long = build_model(embed_size=len(token_list),seq_len=130, pred_len=130)
train_inputs_cat = preprocess_categorical_inputs(train_data,cols=categorical_features)
train_inputs_num = preprocess_numerical_inputs(train_data,cols=numerical_features)
train_labels = np.array(train_data[target_cols].values.tolist(),dtype =np.float32).transpose((0, 2, 1))
val_inputs_cat = preprocess_categorical_inputs(val_data,cols=categorical_features)
val_inputs_num = preprocess_numerical_inputs(val_data,cols=numerical_features)
val_labels = np.array(val_data[target_cols].values.tolist(),dtype =np.float32).transpose((0, 2, 1))
# train_inputs_cat, train_labels = train_inputs_all_cat[train_index], train_labels_all[train_index]
# val_inputs_cat, val_labels = train_inputs_all_cat[val_index], train_labels_all[val_index]
# train_inputs_num, val_inputs_num = train_inputs_all_num[train_index],train_inputs_all_num[val_index]
# csv_logger
csv_logger = tf.keras.callbacks.CSVLogger(f'Fold_{Fold}_log.csv', separator=',', append=False)
# SAVE BEST MODEL EACH FOLD
checkpoint = tf.keras.callbacks.ModelCheckpoint(f'{model_name}_Fold_{Fold}.h5',
monitor='val_loss',
verbose=0,
mode='min',
save_freq='epoch')
if Cosine_Schedule:
#cosine Callback
lr_schedule= get_cosine_schedule_with_warmup(lr=0.001, num_warmup_steps=20, num_training_steps=epochs)
elif Rampup_decy_lr :
# Rampup decy lr
lr_schedule = get_lr_callback(BATCH_SIZE)
else:
lr_schedule = tf.keras.callbacks.ReduceLROnPlateau()
history = model_train.fit(
{'numeric_input': train_inputs_num,
'category_input': train_inputs_cat} , train_labels,
validation_data=({'numeric_input': val_inputs_num,
'category_input': val_inputs_cat}
,val_labels),
batch_size=BATCH_SIZE,
epochs=epochs,
callbacks=[lr_schedule, checkpoint, csv_logger,lr_schedule],
verbose=1 if debug else 0
)
print("Min Validation Loss : ", min(history.history['val_loss']))
print("Min Validation Epoch : ",np.argmin( history.history['val_loss'] )+1)
val_losses.append(min(history.history['val_loss']))
historys.append(history)
model_short.load_weights(f'{model_name}_Fold_{Fold}.h5')
model_long.load_weights(f'{model_name}_Fold_{Fold}.h5')
public_preds = model_short.predict({'numeric_input': public_inputs_num,
'category_input': public_inputs_cat})
private_preds = model_long.predict({'numeric_input': private_inputs_num,
'category_input': private_inputs_cat})
oof_preds = model_train.predict({'numeric_input': val_inputs_num,
'category_input': val_inputs_cat})
stacking_pred = model_short.predict({'numeric_input': val_inputs_num,
'category_input': val_inputs_cat})
preds_model = []
for df, preds in [(public_df, public_preds), (private_df, private_preds)]:
for i, uid in enumerate(df.id):
single_pred = preds[i]
single_df = pd.DataFrame(single_pred, columns=target_cols)
single_df['id_seqpos'] = [f'{uid}_{x}' for x in range(single_df.shape[0])]
preds_model.append(single_df)
preds_model_df = pd.concat(preds_model)
preds_model_df = preds_model_df.groupby(['id_seqpos'],as_index=True).mean()
submission[target_cols] += preds_model_df[target_cols].values / n_folds
for df, preds in [(val_data, oof_preds)]:
for i, uid in enumerate(df.id):
single_pred = preds[i]
single_label = val_labels[i]
single_label_df = pd.DataFrame(single_label, columns=target_cols)
single_label_df['id_seqpos'] = [f'{uid}_{x}' for x in range(single_label_df.shape[0])]
single_label_df['id'] = [f'{uid}' for x in range(single_label_df.shape[0])]
single_label_df['s_id'] = [x for x in range(single_label_df.shape[0])]
single_df = pd.DataFrame(single_pred, columns=pred_col_names)
single_df['id_seqpos'] = [f'{uid}_{x}' for x in range(single_df.shape[0])]
single_df = pd.merge(single_label_df,single_df, on="id_seqpos", how="left")
oof_preds_all.append(single_df)
for df, preds in [(val_data, stacking_pred)]:
for i, uid in enumerate(df.id):
single_pred = preds[i]
# single_label = val_labels[i]
# single_label_df = pd.DataFrame(single_label, columns=target_cols)
# single_label_df['id_seqpos'] = [f'{uid}_{x}' for x in range(single_label_df.shape[0])]
# single_label_df['id'] = [f'{uid}' for x in range(single_label_df.shape[0])]
# single_label_df['s_id'] = [x for x in range(single_label_df.shape[0])]
single_df = pd.DataFrame(single_pred, columns=pred_col_names)
single_df['id_seqpos'] = [f'{uid}_{x}' for x in range(single_df.shape[0])]
single_df['id'] = [uid for x in range(single_df.shape[0])]
stacking_pred_all.append(single_df)
# PLOT TRAINING
history_data = pd.read_csv(f'Fold_{Fold}_log.csv')
EPOCHS = len(history_data['epoch'])
history = pd.DataFrame({'history':history_data.to_dict('list')})
fig = plt.figure(figsize=(15,5))
plt.plot(np.arange(EPOCHS),history.history['lr'],'-',label='Learning Rate',color='#ff7f0e')
x = np.argmax( history.history['lr'] ); y = np.max( history.history['lr'] )
xdist = plt.xlim()[1] - plt.xlim()[0]; ydist = plt.ylim()[1] - plt.ylim()[0]
plt.scatter(x,y,s=200,color='#1f77b4'); plt.text(x-0.03*xdist,y-0.13*ydist,f'Max Learning Rate : {y}' ,size=12)
plt.ylabel('Learning Rate',size=14); plt.xlabel('Epoch',size=14)
plt.legend(loc=1)
plt2 = plt.gca().twinx()
plt2.plot(np.arange(EPOCHS),history.history['loss'],'-o',label='Train Loss',color='#2ca02c')
plt2.plot(np.arange(EPOCHS),history.history['val_loss'],'-o',label='Val Loss',color='#d62728')
x = np.argmin( history.history['val_loss'] ); y = np.min( history.history['val_loss'] )
ydist = plt.ylim()[1] - plt.ylim()[0]
plt.scatter(x,y,s=200,color='#d62728'); plt.text(x-0.03*xdist,y+0.05*ydist,'min loss',size=14)
plt.ylabel('Loss',size=14)
fig.text(s=f"Model Name : {model_name}" , x=0.5, y=1.08, fontsize=18, ha='center', va='center',color="green")
fig.text(s=f"|| Fold : {Fold+1} | Batch_size: {BATCH_SIZE} | num_features: {num_features} | cat_feature: {cat_feature} |n_layers: {n_layers} | embed_dim: {embed_dim} ||", x=0.5, y=1.0, fontsize=15, ha='center', va='center',color="red")
fig.text(s=f"|| layers : {layers} | hidden_dim: {hidden_dim} | dropout: {dropout} | sp_dropout: {sp_dropout} ||", x=0.5, y=0.92, fontsize=15, ha='center', va='center',color="blue")
plt.legend(loc=3)
plt.savefig(f'Fold_{Fold+1}.png', bbox_inches='tight')
plt.show()
submission["id_seqpos"] = preds_model_df.index
submission = | pd.merge(sample_sub["id_seqpos"], submission, on="id_seqpos", how="left") | pandas.merge |
# -*- coding: utf-8 -*-
"""
Created on Mon May 17 10:42:22 2021
@author: ali.raza
"""
import socket
import pickle
import threading
import time
from mlsocket import MLSocket
import pygad
import pygad.nn
import pygad.gann
import numpy
import pygad
import pygad.nn
import pygad.gann
import numpy
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.metrics import classification_report
from sklearn.model_selection import train_test_split
from sklearn.metrics import f1_score
from sklearn.metrics import confusion_matrix
from keras.utils.np_utils import to_categorical
from sklearn.utils import class_weight
import warnings
from keras.callbacks import TensorBoard
import numpy as np
from tensorflow import keras
from tensorflow.keras import layers
from keras.models import Model
from keras.models import Sequential
from keras.layers import Convolution1D, ZeroPadding1D, MaxPooling1D, BatchNormalization, Activation, Dropout, Flatten, Dense
from keras.layers import Conv1D, Dense, MaxPool1D, Flatten, Input
import tensorflow as tf
from sklearn.utils import resample
from sklearn.model_selection import train_test_split
from tensorflow.keras import datasets, layers, models
from sklearn.metrics import precision_recall_curve, roc_curve
from sklearn.preprocessing import label_binarize
from tensorflow.keras.models import Sequential, load_model
import cv2
import h5py
from sklearn.model_selection import train_test_split
#-----------------------------------------Datan Preparation--------------------------------------------------------------------------
warnings.filterwarnings('ignore')
train_df=pd.read_csv('mitbih_train.csv',header=None)
test_df=pd.read_csv('mitbih_test.csv',header=None)
df_1=train_df[train_df[188]==1]
df_2=train_df[train_df[188]==2]
df_3=train_df[train_df[188]==3]
df_4=train_df[train_df[188]==4]
df_0=(train_df[train_df[188]==0]).sample(n=20000,random_state=42)
df_1_upsample=resample(df_1,replace=True,n_samples=20000,random_state=123)
df_2_upsample=resample(df_2,replace=True,n_samples=20000,random_state=124)
df_3_upsample=resample(df_3,replace=True,n_samples=20000,random_state=125)
df_4_upsample=resample(df_4,replace=True,n_samples=20000,random_state=126)
df=pd.concat([df_0,df_1_upsample,df_2_upsample,df_3_upsample,df_4_upsample])
dft_1=test_df[test_df[188]==1]
dft_2=test_df[test_df[188]==2]
dft_3=test_df[test_df[188]==3]
dft_4=test_df[test_df[188]==4]
dft_0=(test_df[test_df[188]==0]).sample(n=10000,random_state=42)
dft_1_upsample=resample(dft_1,replace=True,n_samples=10000,random_state=123)
dft_2_upsample=resample(dft_2,replace=True,n_samples=10000,random_state=124)
dft_3_upsample=resample(dft_3,replace=True,n_samples=10000,random_state=125)
dft_4_upsample=resample(dft_4,replace=True,n_samples=10000,random_state=126)
dft= | pd.concat([dft_0,dft_1_upsample,dft_2_upsample,dft_3_upsample,dft_4_upsample]) | pandas.concat |
import nose
import unittest
import os
import sys
import warnings
from datetime import datetime
import numpy as np
from pandas import (Series, DataFrame, Panel, MultiIndex, bdate_range,
date_range, Index)
from pandas.io.pytables import HDFStore, get_store, Term, IncompatibilityWarning
import pandas.util.testing as tm
from pandas.tests.test_series import assert_series_equal
from pandas.tests.test_frame import assert_frame_equal
from pandas import concat, Timestamp
try:
import tables
except ImportError:
raise nose.SkipTest('no pytables')
from distutils.version import LooseVersion
_default_compressor = LooseVersion(tables.__version__) >= '2.2' \
and 'blosc' or 'zlib'
_multiprocess_can_split_ = False
class TestHDFStore(unittest.TestCase):
path = '__test__.h5'
scratchpath = '__scratch__.h5'
def setUp(self):
self.store = HDFStore(self.path)
def tearDown(self):
self.store.close()
os.remove(self.path)
def test_factory_fun(self):
try:
with get_store(self.scratchpath) as tbl:
raise ValueError('blah')
except ValueError:
pass
with get_store(self.scratchpath) as tbl:
tbl['a'] = tm.makeDataFrame()
with get_store(self.scratchpath) as tbl:
self.assertEquals(len(tbl), 1)
self.assertEquals(type(tbl['a']), DataFrame)
os.remove(self.scratchpath)
def test_keys(self):
self.store['a'] = tm.makeTimeSeries()
self.store['b'] = tm.makeStringSeries()
self.store['c'] = tm.makeDataFrame()
self.store['d'] = tm.makePanel()
self.store['foo/bar'] = tm.makePanel()
self.assertEquals(len(self.store), 5)
self.assert_(set(self.store.keys()) == set(['/a', '/b', '/c', '/d', '/foo/bar']))
def test_repr(self):
repr(self.store)
self.store['a'] = tm.makeTimeSeries()
self.store['b'] = tm.makeStringSeries()
self.store['c'] = tm.makeDataFrame()
self.store['d'] = tm.makePanel()
self.store['foo/bar'] = tm.makePanel()
self.store.append('e', tm.makePanel())
repr(self.store)
str(self.store)
def test_contains(self):
self.store['a'] = tm.makeTimeSeries()
self.store['b'] = tm.makeDataFrame()
self.store['foo/bar'] = tm.makeDataFrame()
self.assert_('a' in self.store)
self.assert_('b' in self.store)
self.assert_('c' not in self.store)
self.assert_('foo/bar' in self.store)
self.assert_('/foo/bar' in self.store)
self.assert_('/foo/b' not in self.store)
self.assert_('bar' not in self.store)
def test_versioning(self):
self.store['a'] = tm.makeTimeSeries()
self.store['b'] = tm.makeDataFrame()
df = tm.makeTimeDataFrame()
self.store.remove('df1')
self.store.append('df1', df[:10])
self.store.append('df1', df[10:])
self.assert_(self.store.root.a._v_attrs.pandas_version == '0.10')
self.assert_(self.store.root.b._v_attrs.pandas_version == '0.10')
self.assert_(self.store.root.df1._v_attrs.pandas_version == '0.10')
# write a file and wipe its versioning
self.store.remove('df2')
self.store.append('df2', df)
self.store.get_node('df2')._v_attrs.pandas_version = None
self.store.select('df2')
self.store.select('df2', [ Term('index','>',df.index[2]) ])
def test_meta(self):
raise nose.SkipTest('no meta')
meta = { 'foo' : [ 'I love pandas ' ] }
s = tm.makeTimeSeries()
s.meta = meta
self.store['a'] = s
self.assert_(self.store['a'].meta == meta)
df = tm.makeDataFrame()
df.meta = meta
self.store['b'] = df
self.assert_(self.store['b'].meta == meta)
# this should work, but because slicing doesn't propgate meta it doesn
self.store.remove('df1')
self.store.append('df1', df[:10])
self.store.append('df1', df[10:])
results = self.store['df1']
#self.assert_(getattr(results,'meta',None) == meta)
# no meta
df = tm.makeDataFrame()
self.store['b'] = df
self.assert_(hasattr(self.store['b'],'meta') == False)
def test_reopen_handle(self):
self.store['a'] = tm.makeTimeSeries()
self.store.open('w', warn=False)
self.assert_(self.store.handle.isopen)
self.assertEquals(len(self.store), 0)
def test_flush(self):
self.store['a'] = tm.makeTimeSeries()
self.store.flush()
def test_get(self):
self.store['a'] = tm.makeTimeSeries()
left = self.store.get('a')
right = self.store['a']
tm.assert_series_equal(left, right)
left = self.store.get('/a')
right = self.store['/a']
tm.assert_series_equal(left, right)
self.assertRaises(KeyError, self.store.get, 'b')
def test_put(self):
ts = tm.makeTimeSeries()
df = tm.makeTimeDataFrame()
self.store['a'] = ts
self.store['b'] = df[:10]
self.store['foo/bar/bah'] = df[:10]
self.store['foo'] = df[:10]
self.store['/foo'] = df[:10]
self.store.put('c', df[:10], table=True)
# not OK, not a table
self.assertRaises(ValueError, self.store.put, 'b', df[10:], append=True)
# node does not currently exist, test _is_table_type returns False in
# this case
self.assertRaises(ValueError, self.store.put, 'f', df[10:], append=True)
# OK
self.store.put('c', df[10:], append=True)
# overwrite table
self.store.put('c', df[:10], table=True, append=False)
tm.assert_frame_equal(df[:10], self.store['c'])
def test_put_string_index(self):
index = Index([ "I am a very long string index: %s" % i for i in range(20) ])
s = Series(np.arange(20), index = index)
df = DataFrame({ 'A' : s, 'B' : s })
self.store['a'] = s
tm.assert_series_equal(self.store['a'], s)
self.store['b'] = df
tm.assert_frame_equal(self.store['b'], df)
# mixed length
index = Index(['abcdefghijklmnopqrstuvwxyz1234567890'] + [ "I am a very long string index: %s" % i for i in range(20) ])
s = Series(np.arange(21), index = index)
df = DataFrame({ 'A' : s, 'B' : s })
self.store['a'] = s
tm.assert_series_equal(self.store['a'], s)
self.store['b'] = df
tm.assert_frame_equal(self.store['b'], df)
def test_put_compression(self):
df = tm.makeTimeDataFrame()
self.store.put('c', df, table=True, compression='zlib')
tm.assert_frame_equal(self.store['c'], df)
# can't compress if table=False
self.assertRaises(ValueError, self.store.put, 'b', df,
table=False, compression='zlib')
def test_put_compression_blosc(self):
tm.skip_if_no_package('tables', '2.2', app='blosc support')
df = tm.makeTimeDataFrame()
# can't compress if table=False
self.assertRaises(ValueError, self.store.put, 'b', df,
table=False, compression='blosc')
self.store.put('c', df, table=True, compression='blosc')
tm.assert_frame_equal(self.store['c'], df)
def test_put_integer(self):
# non-date, non-string index
df = DataFrame(np.random.randn(50, 100))
self._check_roundtrip(df, tm.assert_frame_equal)
def test_append(self):
df = tm.makeTimeDataFrame()
self.store.remove('df1')
self.store.append('df1', df[:10])
self.store.append('df1', df[10:])
tm.assert_frame_equal(self.store['df1'], df)
self.store.remove('df2')
self.store.put('df2', df[:10], table=True)
self.store.append('df2', df[10:])
tm.assert_frame_equal(self.store['df2'], df)
self.store.remove('df3')
self.store.append('/df3', df[:10])
self.store.append('/df3', df[10:])
tm.assert_frame_equal(self.store['df3'], df)
# this is allowed by almost always don't want to do it
warnings.filterwarnings('ignore', category=tables.NaturalNameWarning)
self.store.remove('/df3 foo')
self.store.append('/df3 foo', df[:10])
self.store.append('/df3 foo', df[10:])
tm.assert_frame_equal(self.store['df3 foo'], df)
warnings.filterwarnings('always', category=tables.NaturalNameWarning)
# panel
wp = tm.makePanel()
self.store.remove('wp1')
self.store.append('wp1', wp.ix[:,:10,:])
self.store.append('wp1', wp.ix[:,10:,:])
tm.assert_panel_equal(self.store['wp1'], wp)
# ndim
p4d = tm.makePanel4D()
self.store.remove('p4d')
self.store.append('p4d', p4d.ix[:,:,:10,:])
self.store.append('p4d', p4d.ix[:,:,10:,:])
tm.assert_panel4d_equal(self.store['p4d'], p4d)
# test using axis labels
self.store.remove('p4d')
self.store.append('p4d', p4d.ix[:,:,:10,:], axes=['items','major_axis','minor_axis'])
self.store.append('p4d', p4d.ix[:,:,10:,:], axes=['items','major_axis','minor_axis'])
tm.assert_panel4d_equal(self.store['p4d'], p4d)
# test using differnt number of items on each axis
p4d2 = p4d.copy()
p4d2['l4'] = p4d['l1']
p4d2['l5'] = p4d['l1']
self.store.remove('p4d2')
self.store.append('p4d2', p4d2, axes=['items','major_axis','minor_axis'])
tm.assert_panel4d_equal(self.store['p4d2'], p4d2)
# test using differt order of items on the non-index axes
self.store.remove('wp1')
wp_append1 = wp.ix[:,:10,:]
self.store.append('wp1', wp_append1)
wp_append2 = wp.ix[:,10:,:].reindex(items = wp.items[::-1])
self.store.append('wp1', wp_append2)
tm.assert_panel_equal(self.store['wp1'], wp)
def test_append_frame_column_oriented(self):
# column oriented
df = tm.makeTimeDataFrame()
self.store.remove('df1')
self.store.append('df1', df.ix[:,:2], axes = ['columns'])
self.store.append('df1', df.ix[:,2:])
tm.assert_frame_equal(self.store['df1'], df)
result = self.store.select('df1', 'columns=A')
expected = df.reindex(columns=['A'])
tm.assert_frame_equal(expected, result)
# this isn't supported
self.assertRaises(Exception, self.store.select, 'df1', ('columns=A', Term('index','>',df.index[4])))
# selection on the non-indexable
result = self.store.select('df1', ('columns=A', Term('index','=',df.index[0:4])))
expected = df.reindex(columns=['A'],index=df.index[0:4])
tm.assert_frame_equal(expected, result)
def test_ndim_indexables(self):
""" test using ndim tables in new ways"""
p4d = tm.makePanel4D()
def check_indexers(key, indexers):
for i,idx in enumerate(indexers):
self.assert_(getattr(getattr(self.store.root,key).table.description,idx)._v_pos == i)
# append then change (will take existing schema)
indexers = ['items','major_axis','minor_axis']
self.store.remove('p4d')
self.store.append('p4d', p4d.ix[:,:,:10,:], axes=indexers)
self.store.append('p4d', p4d.ix[:,:,10:,:])
tm.assert_panel4d_equal(self.store.select('p4d'),p4d)
check_indexers('p4d',indexers)
# same as above, but try to append with differnt axes
self.store.remove('p4d')
self.store.append('p4d', p4d.ix[:,:,:10,:], axes=indexers)
self.store.append('p4d', p4d.ix[:,:,10:,:], axes=['labels','items','major_axis'])
tm.assert_panel4d_equal(self.store.select('p4d'),p4d)
check_indexers('p4d',indexers)
# pass incorrect number of axes
self.store.remove('p4d')
self.assertRaises(Exception, self.store.append, 'p4d', p4d.ix[:,:,:10,:], axes=['major_axis','minor_axis'])
# different than default indexables #1
indexers = ['labels','major_axis','minor_axis']
self.store.remove('p4d')
self.store.append('p4d', p4d.ix[:,:,:10,:], axes=indexers)
self.store.append('p4d', p4d.ix[:,:,10:,:])
tm.assert_panel4d_equal(self.store['p4d'], p4d)
check_indexers('p4d',indexers)
# different than default indexables #2
indexers = ['major_axis','labels','minor_axis']
self.store.remove('p4d')
self.store.append('p4d', p4d.ix[:,:,:10,:], axes=indexers)
self.store.append('p4d', p4d.ix[:,:,10:,:])
tm.assert_panel4d_equal(self.store['p4d'], p4d)
check_indexers('p4d',indexers)
# partial selection
result = self.store.select('p4d',['labels=l1'])
expected = p4d.reindex(labels = ['l1'])
tm.assert_panel4d_equal(result, expected)
# partial selection2
result = self.store.select('p4d',[Term('labels=l1'), Term('items=ItemA'), Term('minor_axis=B')])
expected = p4d.reindex(labels = ['l1'], items = ['ItemA'], minor_axis = ['B'])
tm.assert_panel4d_equal(result, expected)
# non-existant partial selection
result = self.store.select('p4d',[Term('labels=l1'), Term('items=Item1'), Term('minor_axis=B')])
expected = p4d.reindex(labels = ['l1'], items = [], minor_axis = ['B'])
tm.assert_panel4d_equal(result, expected)
def test_append_with_strings(self):
wp = tm.makePanel()
wp2 = wp.rename_axis(dict([ (x,"%s_extra" % x) for x in wp.minor_axis ]), axis = 2)
self.store.append('s1', wp, min_itemsize = 20)
self.store.append('s1', wp2)
expected = concat([ wp, wp2], axis = 2)
expected = expected.reindex(minor_axis = sorted(expected.minor_axis))
tm.assert_panel_equal(self.store['s1'], expected)
# test dict format
self.store.append('s2', wp, min_itemsize = { 'minor_axis' : 20 })
self.store.append('s2', wp2)
expected = concat([ wp, wp2], axis = 2)
expected = expected.reindex(minor_axis = sorted(expected.minor_axis))
tm.assert_panel_equal(self.store['s2'], expected)
# apply the wrong field (similar to #1)
self.store.append('s3', wp, min_itemsize = { 'major_axis' : 20 })
self.assertRaises(Exception, self.store.append, 's3')
# test truncation of bigger strings
self.store.append('s4', wp)
self.assertRaises(Exception, self.store.append, 's4', wp2)
# avoid truncation on elements
df = DataFrame([[123,'asdqwerty'], [345,'dggnhebbsdfbdfb']])
self.store.append('df_big',df, min_itemsize = { 'values' : 1024 })
tm.assert_frame_equal(self.store.select('df_big'), df)
# appending smaller string ok
df2 = DataFrame([[124,'asdqy'], [346,'dggnhefbdfb']])
self.store.append('df_big',df2)
expected = concat([ df, df2 ])
tm.assert_frame_equal(self.store.select('df_big'), expected)
# avoid truncation on elements
df = DataFrame([[123,'as<PASSWORD>'], [345,'dggnhebbsdfbdfb']])
self.store.append('df_big2',df, min_itemsize = { 'values' : 10 })
tm.assert_frame_equal(self.store.select('df_big2'), df)
# bigger string on next append
self.store.append('df_new',df, min_itemsize = { 'values' : 16 })
df_new = DataFrame([[124,'abcdefqhij'], [346, 'abcdefghijklmnopqrtsuvwxyz']])
self.assertRaises(Exception, self.store.append, 'df_new',df_new)
def test_create_table_index(self):
wp = tm.makePanel()
self.store.append('p5', wp)
self.store.create_table_index('p5')
assert(self.store.handle.root.p5.table.cols.major_axis.is_indexed == True)
assert(self.store.handle.root.p5.table.cols.minor_axis.is_indexed == False)
# default optlevels
assert(self.store.handle.root.p5.table.cols.major_axis.index.optlevel == 6)
assert(self.store.handle.root.p5.table.cols.major_axis.index.kind == 'medium')
# let's change the indexing scheme
self.store.create_table_index('p5')
assert(self.store.handle.root.p5.table.cols.major_axis.index.optlevel == 6)
assert(self.store.handle.root.p5.table.cols.major_axis.index.kind == 'medium')
self.store.create_table_index('p5', optlevel=9)
assert(self.store.handle.root.p5.table.cols.major_axis.index.optlevel == 9)
assert(self.store.handle.root.p5.table.cols.major_axis.index.kind == 'medium')
self.store.create_table_index('p5', kind='full')
assert(self.store.handle.root.p5.table.cols.major_axis.index.optlevel == 9)
assert(self.store.handle.root.p5.table.cols.major_axis.index.kind == 'full')
self.store.create_table_index('p5', optlevel=1, kind='light')
assert(self.store.handle.root.p5.table.cols.major_axis.index.optlevel == 1)
assert(self.store.handle.root.p5.table.cols.major_axis.index.kind == 'light')
df = tm.makeTimeDataFrame()
self.store.append('f', df[:10])
self.store.append('f', df[10:])
self.store.create_table_index('f')
# try to index a non-table
self.store.put('f2', df)
self.assertRaises(Exception, self.store.create_table_index, 'f2')
# try to change the version supports flag
from pandas.io import pytables
pytables._table_supports_index = False
self.assertRaises(Exception, self.store.create_table_index, 'f')
# test out some versions
original = tables.__version__
for v in ['2.2','2.2b']:
pytables._table_mod = None
pytables._table_supports_index = False
tables.__version__ = v
self.assertRaises(Exception, self.store.create_table_index, 'f')
for v in ['2.3.1','2.3.1b','2.4dev','2.4',original]:
pytables._table_mod = None
pytables._table_supports_index = False
tables.__version__ = v
self.store.create_table_index('f')
pytables._table_mod = None
pytables._table_supports_index = False
tables.__version__ = original
def test_big_table(self):
raise nose.SkipTest('no big table')
# create and write a big table
wp = Panel(np.random.randn(20, 1000, 1000), items= [ 'Item%s' % i for i in xrange(20) ],
major_axis=date_range('1/1/2000', periods=1000), minor_axis = [ 'E%s' % i for i in xrange(1000) ])
wp.ix[:,100:200,300:400] = np.nan
try:
store = HDFStore(self.scratchpath)
store._debug_memory = True
store.append('wp',wp)
recons = store.select('wp')
finally:
store.close()
os.remove(self.scratchpath)
def test_append_diff_item_order(self):
raise nose.SkipTest('append diff item order')
wp = tm.makePanel()
wp1 = wp.ix[:, :10, :]
wp2 = wp.ix[['ItemC', 'ItemB', 'ItemA'], 10:, :]
self.store.put('panel', wp1, table=True)
self.assertRaises(Exception, self.store.put, 'panel', wp2,
append=True)
def test_table_index_incompatible_dtypes(self):
df1 = DataFrame({'a': [1, 2, 3]})
df2 = DataFrame({'a': [4, 5, 6]},
index=date_range('1/1/2000', periods=3))
self.store.put('frame', df1, table=True)
self.assertRaises(Exception, self.store.put, 'frame', df2,
table=True, append=True)
def test_table_values_dtypes_roundtrip(self):
df1 = DataFrame({'a': [1, 2, 3]}, dtype = 'f8')
self.store.append('df1', df1)
assert df1.dtypes == self.store['df1'].dtypes
df2 = DataFrame({'a': [1, 2, 3]}, dtype = 'i8')
self.store.append('df2', df2)
assert df2.dtypes == self.store['df2'].dtypes
# incompatible dtype
self.assertRaises(Exception, self.store.append, 'df2', df1)
def test_table_mixed_dtypes(self):
# frame
def _make_one_df():
df = tm.makeDataFrame()
df['obj1'] = 'foo'
df['obj2'] = 'bar'
df['bool1'] = df['A'] > 0
df['bool2'] = df['B'] > 0
df['bool3'] = True
df['int1'] = 1
df['int2'] = 2
return df.consolidate()
df1 = _make_one_df()
self.store.append('df1_mixed', df1)
tm.assert_frame_equal(self.store.select('df1_mixed'), df1)
# panel
def _make_one_panel():
wp = tm.makePanel()
wp['obj1'] = 'foo'
wp['obj2'] = 'bar'
wp['bool1'] = wp['ItemA'] > 0
wp['bool2'] = wp['ItemB'] > 0
wp['int1'] = 1
wp['int2'] = 2
return wp.consolidate()
p1 = _make_one_panel()
self.store.append('p1_mixed', p1)
tm.assert_panel_equal(self.store.select('p1_mixed'), p1)
# ndim
def _make_one_p4d():
wp = tm.makePanel4D()
wp['obj1'] = 'foo'
wp['obj2'] = 'bar'
wp['bool1'] = wp['l1'] > 0
wp['bool2'] = wp['l2'] > 0
wp['int1'] = 1
wp['int2'] = 2
return wp.consolidate()
p4d = _make_one_p4d()
self.store.append('p4d_mixed', p4d)
tm.assert_panel4d_equal(self.store.select('p4d_mixed'), p4d)
def test_remove(self):
ts = tm.makeTimeSeries()
df = tm.makeDataFrame()
self.store['a'] = ts
self.store['b'] = df
self.store.remove('a')
self.assertEquals(len(self.store), 1)
tm.assert_frame_equal(df, self.store['b'])
self.store.remove('b')
self.assertEquals(len(self.store), 0)
# pathing
self.store['a'] = ts
self.store['b/foo'] = df
self.store.remove('foo')
self.store.remove('b/foo')
self.assertEquals(len(self.store), 1)
self.store['a'] = ts
self.store['b/foo'] = df
self.store.remove('b')
self.assertEquals(len(self.store), 1)
# __delitem__
self.store['a'] = ts
self.store['b'] = df
del self.store['a']
del self.store['b']
self.assertEquals(len(self.store), 0)
def test_remove_where(self):
# non-existance
crit1 = Term('index','>','foo')
self.store.remove('a', where=[crit1])
# try to remove non-table (with crit)
# non-table ok (where = None)
wp = tm.makePanel()
self.store.put('wp', wp, table=True)
self.store.remove('wp', [('minor_axis', ['A', 'D'])])
rs = self.store.select('wp')
expected = wp.reindex(minor_axis = ['B','C'])
tm.assert_panel_equal(rs,expected)
# empty where
self.store.remove('wp')
self.store.put('wp', wp, table=True)
# deleted number (entire table)
n = self.store.remove('wp', [])
assert(n == 120)
# non - empty where
self.store.remove('wp')
self.store.put('wp', wp, table=True)
self.assertRaises(Exception, self.store.remove,
'wp', ['foo'])
# selectin non-table with a where
#self.store.put('wp2', wp, table=False)
#self.assertRaises(Exception, self.store.remove,
# 'wp2', [('column', ['A', 'D'])])
def test_remove_crit(self):
wp = tm.makePanel()
# group row removal
date4 = wp.major_axis.take([ 0,1,2,4,5,6,8,9,10 ])
crit4 = Term('major_axis',date4)
self.store.put('wp3', wp, table=True)
n = self.store.remove('wp3', where=[crit4])
assert(n == 36)
result = self.store.select('wp3')
expected = wp.reindex(major_axis = wp.major_axis-date4)
tm.assert_panel_equal(result, expected)
# upper half
self.store.put('wp', wp, table=True)
date = wp.major_axis[len(wp.major_axis) // 2]
crit1 = Term('major_axis','>',date)
crit2 = Term('minor_axis',['A', 'D'])
n = self.store.remove('wp', where=[crit1])
assert(n == 56)
n = self.store.remove('wp', where=[crit2])
assert(n == 32)
result = self.store['wp']
expected = wp.truncate(after=date).reindex(minor=['B', 'C'])
tm.assert_panel_equal(result, expected)
# individual row elements
self.store.put('wp2', wp, table=True)
date1 = wp.major_axis[1:3]
crit1 = Term('major_axis',date1)
self.store.remove('wp2', where=[crit1])
result = self.store.select('wp2')
expected = wp.reindex(major_axis=wp.major_axis-date1)
tm.assert_panel_equal(result, expected)
date2 = wp.major_axis[5]
crit2 = Term('major_axis',date2)
self.store.remove('wp2', where=[crit2])
result = self.store['wp2']
expected = wp.reindex(major_axis=wp.major_axis-date1-Index([date2]))
tm.assert_panel_equal(result, expected)
date3 = [wp.major_axis[7],wp.major_axis[9]]
crit3 = Term('major_axis',date3)
self.store.remove('wp2', where=[crit3])
result = self.store['wp2']
expected = wp.reindex(major_axis=wp.major_axis-date1-Index([date2])-Index(date3))
tm.assert_panel_equal(result, expected)
# corners
self.store.put('wp4', wp, table=True)
n = self.store.remove('wp4', where=[Term('major_axis','>',wp.major_axis[-1])])
result = self.store.select('wp4')
tm.assert_panel_equal(result, wp)
def test_terms(self):
wp = tm.makePanel()
p4d = tm.makePanel4D()
self.store.put('wp', wp, table=True)
self.store.put('p4d', p4d, table=True)
# some invalid terms
terms = [
[ 'minor', ['A','B'] ],
[ 'index', ['20121114'] ],
[ 'index', ['20121114', '20121114'] ],
]
for t in terms:
self.assertRaises(Exception, self.store.select, 'wp', t)
self.assertRaises(Exception, Term.__init__)
self.assertRaises(Exception, Term.__init__, 'blah')
self.assertRaises(Exception, Term.__init__, 'index')
self.assertRaises(Exception, Term.__init__, 'index', '==')
self.assertRaises(Exception, Term.__init__, 'index', '>', 5)
# panel
result = self.store.select('wp',[ | Term('major_axis<20000108') | pandas.io.pytables.Term |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import math
from sklearn.preprocessing import MinMaxScaler
from sklearn.cluster import DBSCAN
class ClusterBound:
def __init__(self, x1, y1, w, h):
self.x1 = x1
self.x2 = x1 + w
self.y1 = y1
self.y2 = y1 + h
def contains(self, xp, yp):
return self.x1 <= xp <= self.x2 and self.y1 <= yp <= self.y2
class ManualClusterModel():
def __init__(self, cluster_bounds):
self.cluster_bounds = cluster_bounds
def fit(self, X):
def find_cluster(x, cluster_bounds):
for i, c in enumerate(cluster_bounds):
if c.contains(x[0], x[1]):
return i
return -1
self.labels_ = X.apply(lambda x: find_cluster(x, self.cluster_bounds), axis=1)
MirageCalloutClusteringModel = ManualClusterModel([
ClusterBound(162, 169, 64, 65), # van
ClusterBound(227, 173, 32, 41), # b plat
ClusterBound(259, 173, 89, 40), # b front apt
ClusterBound(112, 231, 49, 93), # bench
ClusterBound(162, 214, 167, 41), # b default out of site
ClusterBound(203, 254, 68, 75), # b site
ClusterBound(170, 395, 32, 90), # kitchen door
ClusterBound(207, 396, 133, 90), # kitchen
ClusterBound(342, 234, 54, 46), # side cat
ClusterBound(342, 280, 160, 45), # cat site
ClusterBound(430, 328, 28, 119), # underpass
ClusterBound(463, 409, 218, 38), # cat
ClusterBound(396, 435, 32, 62), # window
ClusterBound(433, 446, 60, 59), # bottom mid
ClusterBound(495, 448, 59, 56), # mid mid
ClusterBound(556, 447, 131, 56), # top mid
ClusterBound(682, 313, 69, 124), # top top mid
ClusterBound(712, 440, 39, 59), # boxes
ClusterBound(383, 571, 84, 79), # jungle
ClusterBound(482, 508, 65, 91), # connector
ClusterBound(573, 504, 179, 28), # mid chair
ClusterBound(469, 601, 66, 54), # connector by stairs
ClusterBound(538, 601, 29, 69), # stairs
ClusterBound(643, 696, 42, 86), # palace deck/shadow
ClusterBound(382, 498, 45, 71), # mid window hidden
ClusterBound(648, 783, 50, 40), # front palace
ClusterBound(441, 827, 43, 49), # ticket booth
ClusterBound(319, 772, 149, 56), # ct
ClusterBound(164, 332, 175, 60), # b market side
ClusterBound(692, 627, 127, 57), # A ramp
ClusterBound(568, 646, 30, 20), # sandwich
ClusterBound(617, 624, 37, 29), # tetris
ClusterBound(480, 741, 42, 47), # triple box
ClusterBound(579, 791, 51, 35), # firebox
ClusterBound(521, 737, 93, 51), # front a site
ClusterBound(479, 671, 158, 65), # open a site
ClusterBound(463, 329, 52, 79) # b short
])
#Convert map coordinates to image coordinates, from <NAME>'s analysis
def pointx_to_resolutionx(xinput,startX=-3217,endX=1912,resX=1024):
sizeX = endX - startX
if startX < 0:
xinput += startX * (-1.0)
else:
xinput += startX
xoutput = float((xinput / abs(sizeX)) * resX);
return xoutput
def pointy_to_resolutiony(yinput,startY=-3401,endY=1682,resY=1024):
sizeY=endY-startY
if startY < 0:
yinput += startY *(-1.0)
else:
yinput += startY
youtput = float((yinput / abs(sizeY)) * resY);
return resY-youtput
def cluster_positions(firefight_df, cluster_map, verbose=False, scale=True):
"""
Clusters the dataframe spatially into common positions by type of position, map, and team. Clusters DMG_VIC and DMG_ATT together.
Input:
cluster_df: result of DataLoader.load_firefight_df, with columns ['file_round', 'seconds', 'pos_x', 'pos_y', 'hp_dmg']
eps_map: the eps to use for DBSCAN for each pos_type
Output:
the input cluster_df, with new columns ['pos_cluster']
"""
min_max_scaler = MinMaxScaler()
cluster_df = firefight_df.copy()
if scale:
cluster_df[["pos_x", "pos_y"]] = min_max_scaler.fit_transform(cluster_df[["pos_x", "pos_y"]])
cluster_df['pos_cluster'] = None
for map_name in cluster_df['map'].unique():
for team in cluster_df['att_side'].unique():
# Cluster nade positions
for pos_type in [t for t in cluster_df['pos_type'].unique() if t not in ['DMG_VIC', 'DMG_ATT']]:
mask = (cluster_df['map'] == map_name) & (cluster_df['pos_type'] == pos_type) & (cluster_df['att_side'] == team)
group = cluster_df[mask]
# https://medium.com/@tarammullin/dbscan-parameter-estimation-ff8330e3a3bd
cluster_model = cluster_map[pos_type]
#cluster_model = DBSCAN(eps=0.05, min_samples=min_samples)
pts = pd.concat([group['pos_x'], group['pos_y']], axis=1)
cluster_model.fit(pts)
firefight_df.loc[mask, 'pos_cluster'] = cluster_model.labels_
if verbose:
print(f"{team}, {pos_type}, {map_name}: {np.unique(cluster_model.labels_, return_counts=True)}")
# Cluster attack/victim positions
print(cluster_df['pos_type'].unique())
mask = ((cluster_df['pos_type'] == 'DMG_VIC') | (cluster_df['pos_type'] == 'DMG_ATT')) & (cluster_df['att_side'] == team) & (cluster_df['map'] == map_name)
group = cluster_df[mask]
# https://medium.com/@tarammullin/dbscan-parameter-estimation-ff8330e3a3bd
cluster_model = cluster_map['DMG']
#cluster_model = DBSCAN(eps=0.05, min_samples=min_samples)
pts = pd.concat([group['pos_x'], group['pos_y']], axis=1)
cluster_model.fit(pts)
firefight_df.loc[mask, 'pos_cluster'] = cluster_model.labels_
if verbose:
print(f"{team}, DMG, {map_name}: {np.unique(cluster_model.labels_, return_counts=True)}")
return firefight_df
def cluster_firefights(firefight_df, eps=0.08, min_samples=6, n_seconds_equiv_to_quarter_map=20, verbose=False, return_scaled=False):
"""
Clusters the dataframe spatio-temporally into "firefights" - groups of points within a round that
are within a similar space and time. Also calculates the net damage taken by either team within each
firefight.
Input:
cluster_df: result of DataLoader.load_firefight_df, with columns ['file_round', 'seconds', 'pos_x', 'pos_y', 'hp_dmg']
eps: the eps to use for DBSCAN
min_samples: the min_samples to use for DBSCAN
n_seconds_equiv_to_quarter_map: The number of seconds considered equivalent to a quarter of the map when clustering.
Output:
the input cluster_df, with new columns ['firefight_cluster', 'firefight_net_t_dmg', ''firefight_net_ct_dmg']
"""
max_round_length = firefight_df['seconds'].max()
min_max_scaler = MinMaxScaler()
cluster_df = firefight_df.copy()
cluster_df[["seconds", "pos_x", "pos_y"]] = min_max_scaler.fit_transform(cluster_df[["seconds", "pos_x", "pos_y"]])
# scale time so that 20 seconds is roughly equivalent to one quarter of the map
cluster_df['seconds'] *= (max_round_length/n_seconds_equiv_to_quarter_map) * (1/4)
# cluster firefights spatio-temporally
firefight_df['firefight_cluster'] = None
cluster_df['firefight_cluster'] = None
firefight_df['firefight_net_t_dmg'] = None
firefight_df['firefight_net_ct_dmg'] = None
num_filerounds = len(cluster_df['file_round'].unique())
for i, (name, group) in enumerate(cluster_df.groupby('file_round')):
# https://medium.com/@tarammullin/dbscan-parameter-estimation-ff8330e3a3bd
cluster_model = DBSCAN(eps=eps, min_samples=min_samples)
pts = pd.concat([group['seconds'], group['pos_x'], group['pos_y']], axis=1)
cluster_model.fit(pts)
cluster_df.loc[(firefight_df['file_round'] == name), 'firefight_cluster'] = cluster_model.labels_
firefight_df.loc[(firefight_df['file_round'] == name), 'firefight_cluster'] = cluster_model.labels_
if verbose:
print(f"{i}/{num_filerounds}, {name}: {np.unique(cluster_model.labels_)}")
# Find net damage for each firefight
for name, group in cluster_df.groupby(['file_round', 'firefight_cluster']):
ct_att_pts = group[(group['pos_type'] == 'DMG_VIC') & (group['att_side'] == 'CounterTerrorist')]
t_att_pts = group[(group['pos_type'] == 'DMG_VIC') & (group['att_side'] == 'Terrorist')]
t_net_dmg = np.sum(t_att_pts['hp_dmg'])
ct_net_dmg = np.sum(ct_att_pts['hp_dmg'])
mask = (firefight_df['file_round'] == name[0]) & (firefight_df['firefight_cluster'] == name[1])
firefight_df.loc[mask, 'firefight_net_t_dmg'] = t_net_dmg
firefight_df.loc[mask, 'firefight_net_ct_dmg'] = ct_net_dmg
cluster_df.loc[mask, 'firefight_net_t_dmg'] = t_net_dmg
cluster_df.loc[mask, 'firefight_net_ct_dmg'] = ct_net_dmg
if verbose:
print(f"{name}: t_dmg={t_net_dmg}, ct_dmg={ct_net_dmg}")
if return_scaled:
return cluster_df
return firefight_df
class DataLoader:
def __init__(self, use_data_pt2=False):
self.use_data_pt2 = use_data_pt2
def load_map_df(self):
map_df = pd.read_csv('../data/map_data.csv')
map_df = map_df.rename( columns={'Unnamed: 0':'map_name'}).set_index('map_name')
return map_df
def load_meta_df(self):
meta_df = pd.read_csv('../data/esea_meta_demos.part1.csv')
if self.use_data_pt2:
meta_df = meta_df.append(pd.read_csv('../data/esea_meta_demos.part2.csv'))
meta_df = meta_df[['file', 'map', 'round', 'start_seconds', 'winner_side', 'round_type', 'ct_eq_val', 't_eq_val']]
return meta_df
def load_dmg_df(self, nrows=None, scale_to_map=True, dropna=True, map_name=None):
dmg_df = pd.read_csv('../data/esea_master_dmg_demos.part1.csv', nrows=nrows)
if self.use_data_pt2:
dmg_df = dmg_df.append(pd.read_csv('../data/esea_master_kills_demos.part2.csv', nrows=None if nrows is None else nrows - len(dmg_df)))
dmg_df = dmg_df[['file', 'round', 'seconds', 'att_side', 'vic_side', 'is_bomb_planted', 'bomb_site', 'hp_dmg', 'arm_dmg', 'hitbox', 'wp', 'wp_type', 'att_id', 'vic_id', 'att_pos_x', 'att_pos_y', 'vic_pos_x', 'vic_pos_y']]
meta_df = self.load_meta_df()
dmg_df = pd.merge(dmg_df, meta_df, how='left', left_on=['file','round'], right_on = ['file','round'])
if map_name is not None:
dmg_df = dmg_df[dmg_df['map'] == map_name]
dmg_df['seconds'] -= dmg_df['start_seconds']
dmg_df = dmg_df.drop(columns=['start_seconds'])
if scale_to_map:
map_df = self.load_map_df()
for map_info in map_df.iterrows():
map_name = map_info[0]
map_data = map_info[1]
mask = (dmg_df['map'] == map_name)
map_df = dmg_df[mask]
dmg_df.loc[mask, 'att_pos_y'] = map_df['att_pos_y'].apply(pointy_to_resolutiony, args=(map_data['StartY'], map_data['EndY'], map_data['ResY']))
dmg_df.loc[mask, 'att_pos_x'] = map_df['att_pos_x'].apply(pointx_to_resolutionx, args=(map_data['StartX'], map_data['EndX'], map_data['ResX']))
dmg_df.loc[mask, 'vic_pos_y'] = map_df['vic_pos_y'].apply(pointy_to_resolutiony, args=(map_data['StartY'], map_data['EndY'], map_data['ResY']))
dmg_df.loc[mask, 'vic_pos_x'] = map_df['vic_pos_x'].apply(pointx_to_resolutionx, args=(map_data['StartX'], map_data['EndX'], map_data['ResX']))
if dropna:
dmg_df = dmg_df.dropna()
return dmg_df
def load_kill_df(self, nrows=None):
kill_df = pd.read_csv('../data/esea_master_dmg_demos.part1.csv', nrows=nrows)
if self.use_data_pt2:
kill_df = grenade_df.append(pd.read_csv('../data/esea_master_kills_demos.part2.csv'))
kill_df = kill_df[['file', 'round', 'seconds', 'att_side', 'vic_side', 'is_bomb_planted', 'wp', 'wp_type']]
meta_df = self.load_meta_df()
kill_df = | pd.merge(kill_df, meta_df, how='left', left_on=['file','round'], right_on = ['file','round']) | pandas.merge |
import numpy as np
import pandas as pd
from numba import njit, typeof
from numba.typed import List
from datetime import datetime, timedelta
import pytest
from copy import deepcopy
import vectorbt as vbt
from vectorbt.portfolio.enums import *
from vectorbt.generic.enums import drawdown_dt
from vectorbt.utils.random_ import set_seed
from vectorbt.portfolio import nb
from tests.utils import record_arrays_close
seed = 42
day_dt = np.timedelta64(86400000000000)
price = pd.Series([1., 2., 3., 4., 5.], index=pd.Index([
datetime(2020, 1, 1),
datetime(2020, 1, 2),
datetime(2020, 1, 3),
datetime(2020, 1, 4),
datetime(2020, 1, 5)
]))
price_wide = price.vbt.tile(3, keys=['a', 'b', 'c'])
big_price = pd.DataFrame(np.random.uniform(size=(1000,)))
big_price.index = [datetime(2018, 1, 1) + timedelta(days=i) for i in range(1000)]
big_price_wide = big_price.vbt.tile(1000)
# ############# Global ############# #
def setup_module():
vbt.settings.numba['check_func_suffix'] = True
vbt.settings.portfolio['attach_call_seq'] = True
vbt.settings.caching.enabled = False
vbt.settings.caching.whitelist = []
vbt.settings.caching.blacklist = []
def teardown_module():
vbt.settings.reset()
# ############# nb ############# #
def assert_same_tuple(tup1, tup2):
for i in range(len(tup1)):
assert tup1[i] == tup2[i] or np.isnan(tup1[i]) and np.isnan(tup2[i])
def test_execute_order_nb():
# Errors, ignored and rejected orders
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(-100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(np.nan, 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., np.inf, 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., np.nan, 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., np.nan, 100., 10., 1100., 0, 0),
nb.order_nb(10, 10))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., -10., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., np.nan, 10., 1100., 0, 0),
nb.order_nb(10, 10))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, size_type=-2))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, size_type=20))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, direction=-2))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, direction=20))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., -100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, direction=Direction.LongOnly))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, direction=Direction.ShortOnly))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, np.inf))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, -10))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, fees=np.inf))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, fees=-1))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, fixed_fees=np.inf))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, fixed_fees=-1))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, slippage=np.inf))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, slippage=-1))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, min_size=np.inf))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, min_size=-1))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, max_size=0))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, max_size=-10))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, reject_prob=np.nan))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, reject_prob=-1))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, reject_prob=2))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., np.nan, 0, 0),
nb.order_nb(1, 10, size_type=SizeType.TargetPercent))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=1, status_info=3))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., -10., 0, 0),
nb.order_nb(1, 10, size_type=SizeType.TargetPercent))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=4))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., np.inf, 1100., 0, 0),
nb.order_nb(10, 10, size_type=SizeType.Value))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., -10., 1100, 0, 0),
nb.order_nb(10, 10, size_type=SizeType.Value))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., np.nan, 1100., 0, 0),
nb.order_nb(10, 10, size_type=SizeType.Value))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., np.inf, 1100., 0, 0),
nb.order_nb(10, 10, size_type=SizeType.TargetValue))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., -10., 1100, 0, 0),
nb.order_nb(10, 10, size_type=SizeType.TargetValue))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., np.nan, 1100., 0, 0),
nb.order_nb(10, 10, size_type=SizeType.TargetValue))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=1, status_info=2))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., -10., 0., 100., 10., 1100., 0, 0),
nb.order_nb(np.inf, 10, direction=Direction.ShortOnly))
assert exec_state == ExecuteOrderState(cash=200.0, position=-20.0, debt=100.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10.0, price=10.0, fees=0.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., -10., 0., 100., 10., 1100., 0, 0),
nb.order_nb(-np.inf, 10, direction=Direction.Both))
assert exec_state == ExecuteOrderState(cash=200.0, position=-20.0, debt=100.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10.0, price=10.0, fees=0.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 10., 0., 100., 10., 1100., 0, 0),
nb.order_nb(0, 10))
assert exec_state == ExecuteOrderState(cash=100.0, position=10.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=1, status_info=5))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(15, 10, max_size=10, allow_partial=False))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=9))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, reject_prob=1.))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=10))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(0., 100., 0., 0., 10., 1100., 0, 0),
nb.order_nb(10, 10, direction=Direction.LongOnly))
assert exec_state == ExecuteOrderState(cash=0.0, position=100.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=7))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(0., 100., 0., 0., 10., 1100., 0, 0),
nb.order_nb(10, 10, direction=Direction.Both))
assert exec_state == ExecuteOrderState(cash=0.0, position=100.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=7))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(np.inf, 100, 0., np.inf, np.nan, 1100., 0, 0),
nb.order_nb(np.inf, 10, direction=Direction.LongOnly))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(np.inf, 100., 0., np.inf, 10., 1100., 0, 0),
nb.order_nb(np.inf, 10, direction=Direction.Both))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 1100., 0, 0),
nb.order_nb(-10, 10, direction=Direction.ShortOnly))
assert exec_state == ExecuteOrderState(cash=100.0, position=0.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=8))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(np.inf, 100., 0., np.inf, 10., 1100., 0, 0),
nb.order_nb(-np.inf, 10, direction=Direction.ShortOnly))
with pytest.raises(Exception):
_ = nb.execute_order_nb(
ProcessOrderState(np.inf, 100., 0., np.inf, 10., 1100., 0, 0),
nb.order_nb(-np.inf, 10, direction=Direction.Both))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 1100., 0, 0),
nb.order_nb(-10, 10, direction=Direction.LongOnly))
assert exec_state == ExecuteOrderState(cash=100.0, position=0.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=8))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, fixed_fees=100))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=11))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(10, 10, min_size=100))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=12))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(100, 10, allow_partial=False))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=13))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(-10, 10, min_size=100))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=12))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(-200, 10, direction=Direction.LongOnly, allow_partial=False))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=13))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 100., 0., 100., 10., 1100., 0, 0),
nb.order_nb(-10, 10, fixed_fees=1000))
assert exec_state == ExecuteOrderState(cash=100.0, position=100.0, debt=0.0, free_cash=100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=11))
# Calculations
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(10, 10, fees=0.1, fixed_fees=1, slippage=0.1))
assert exec_state == ExecuteOrderState(cash=0.0, position=8.18181818181818, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=8.18181818181818, price=11.0, fees=10.000000000000014, side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(100, 10, fees=0.1, fixed_fees=1, slippage=0.1))
assert exec_state == ExecuteOrderState(cash=0.0, position=8.18181818181818, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=8.18181818181818, price=11.0, fees=10.000000000000014, side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(-10, 10, fees=0.1, fixed_fees=1, slippage=0.1))
assert exec_state == ExecuteOrderState(cash=180.0, position=-10.0, debt=90.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10.0, price=9.0, fees=10.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(-100, 10, fees=0.1, fixed_fees=1, slippage=0.1))
assert exec_state == ExecuteOrderState(cash=909.0, position=-100.0, debt=900.0, free_cash=-891.0)
assert_same_tuple(order_result, OrderResult(
size=100.0, price=9.0, fees=91.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(10, 10, size_type=SizeType.TargetAmount))
assert exec_state == ExecuteOrderState(cash=0.0, position=10.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(-10, 10, size_type=SizeType.TargetAmount))
assert exec_state == ExecuteOrderState(cash=200.0, position=-10.0, debt=100.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(100, 10, size_type=SizeType.Value))
assert exec_state == ExecuteOrderState(cash=0.0, position=10.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(-100, 10, size_type=SizeType.Value))
assert exec_state == ExecuteOrderState(cash=200.0, position=-10.0, debt=100.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(100, 10, size_type=SizeType.TargetValue))
assert exec_state == ExecuteOrderState(cash=0.0, position=10.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(-100, 10, size_type=SizeType.TargetValue))
assert exec_state == ExecuteOrderState(cash=200.0, position=-10.0, debt=100.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(1, 10, size_type=SizeType.TargetPercent))
assert exec_state == ExecuteOrderState(cash=0.0, position=10.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(-1, 10, size_type=SizeType.TargetPercent))
assert exec_state == ExecuteOrderState(cash=200.0, position=-10.0, debt=100.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., 5., 0., 50., 10., 100., 0, 0),
nb.order_nb(1, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=0.0, position=10.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=5.0, price=10.0, fees=0.0, side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., 5., 0., 50., 10., 100., 0, 0),
nb.order_nb(0.5, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=25.0, position=7.5, debt=0.0, free_cash=25.0)
assert_same_tuple(order_result, OrderResult(
size=2.5, price=10.0, fees=0.0, side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., 5., 0., 50., 10., 100., 0, 0),
nb.order_nb(-0.5, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=125.0, position=-2.5, debt=25.0, free_cash=75.0)
assert_same_tuple(order_result, OrderResult(
size=7.5, price=10.0, fees=0.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., 5., 0., 50., 10., 100., 0, 0),
nb.order_nb(-1, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=200.0, position=-10.0, debt=100.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=15.0, price=10.0, fees=0.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., 0., 0., 50., 10., 100., 0, 0),
nb.order_nb(1, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=0.0, position=5.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=5.0, price=10.0, fees=0.0, side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., 0., 0., 50., 10., 100., 0, 0),
nb.order_nb(0.5, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=25.0, position=2.5, debt=0.0, free_cash=25.0)
assert_same_tuple(order_result, OrderResult(
size=2.5, price=10.0, fees=0.0, side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., 0., 0., 50., 10., 100., 0, 0),
nb.order_nb(-0.5, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=75.0, position=-2.5, debt=25.0, free_cash=25.0)
assert_same_tuple(order_result, OrderResult(
size=2.5, price=10.0, fees=0.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., 0., 0., 50., 10., 100., 0, 0),
nb.order_nb(-1, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=100.0, position=-5.0, debt=50.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=5.0, price=10.0, fees=0.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., -5., 0., 50., 10., 100., 0, 0),
nb.order_nb(1, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=0.0, position=0.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=5.0, price=10.0, fees=0.0, side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., -5., 0., 50., 10., 100., 0, 0),
nb.order_nb(0.5, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=25.0, position=-2.5, debt=0.0, free_cash=25.0)
assert_same_tuple(order_result, OrderResult(
size=2.5, price=10.0, fees=0.0, side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., -5., 0., 50., 10., 100., 0, 0),
nb.order_nb(-0.5, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=75.0, position=-7.5, debt=25.0, free_cash=25.0)
assert_same_tuple(order_result, OrderResult(
size=2.5, price=10.0, fees=0.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(50., -5., 0., 50., 10., 100., 0, 0),
nb.order_nb(-1, 10, size_type=SizeType.Percent))
assert exec_state == ExecuteOrderState(cash=100.0, position=-10.0, debt=50.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=5.0, price=10.0, fees=0.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(np.inf, 10))
assert exec_state == ExecuteOrderState(cash=0.0, position=10.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., -5., 0., 100., 10., 100., 0, 0),
nb.order_nb(np.inf, 10))
assert exec_state == ExecuteOrderState(cash=0.0, position=5.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(-np.inf, 10))
assert exec_state == ExecuteOrderState(cash=200.0, position=-10.0, debt=100.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10., price=10.0, fees=0., side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(150., -5., 0., 150., 10., 100., 0, 0),
nb.order_nb(-np.inf, 10))
assert exec_state == ExecuteOrderState(cash=300.0, position=-20.0, debt=150.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=15.0, price=10.0, fees=0.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., 0., 0., 50., 10., 100., 0, 0),
nb.order_nb(10, 10, lock_cash=True))
assert exec_state == ExecuteOrderState(cash=50.0, position=5.0, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=5.0, price=10.0, fees=0.0, side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(1000., -5., 50., 50., 10., 100., 0, 0),
nb.order_nb(10, 17.5, lock_cash=True))
assert exec_state == ExecuteOrderState(cash=850.0, position=3.571428571428571, debt=0.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=8.571428571428571, price=17.5, fees=0.0, side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(100., -5., 50., 50., 10., 100., 0, 0),
nb.order_nb(10, 100, lock_cash=True))
assert exec_state == ExecuteOrderState(cash=37.5, position=-4.375, debt=43.75, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=0.625, price=100.0, fees=0.0, side=0, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(0., 10., 0., -50., 10., 100., 0, 0),
nb.order_nb(-20, 10, lock_cash=True))
assert exec_state == ExecuteOrderState(cash=150.0, position=-5.0, debt=50.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=15.0, price=10.0, fees=0.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(0., 1., 0., -50., 10., 100., 0, 0),
nb.order_nb(-10, 10, lock_cash=True))
assert exec_state == ExecuteOrderState(cash=10.0, position=0.0, debt=0.0, free_cash=-40.0)
assert_same_tuple(order_result, OrderResult(
size=1.0, price=10.0, fees=0.0, side=1, status=0, status_info=-1))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(0., 0., 0., -100., 10., 100., 0, 0),
nb.order_nb(-10, 10, lock_cash=True))
assert exec_state == ExecuteOrderState(cash=0.0, position=0.0, debt=0.0, free_cash=-100.0)
assert_same_tuple(order_result, OrderResult(
size=np.nan, price=np.nan, fees=np.nan, side=-1, status=2, status_info=6))
exec_state, order_result = nb.execute_order_nb(
ProcessOrderState(0., 0., 0., 100., 10., 100., 0, 0),
nb.order_nb(-20, 10, fees=0.1, slippage=0.1, fixed_fees=1., lock_cash=True))
assert exec_state == ExecuteOrderState(cash=80.0, position=-10.0, debt=90.0, free_cash=0.0)
assert_same_tuple(order_result, OrderResult(
size=10.0, price=9.0, fees=10.0, side=1, status=0, status_info=-1))
def test_build_call_seq_nb():
group_lens = np.array([1, 2, 3, 4])
np.testing.assert_array_equal(
nb.build_call_seq_nb((10, 10), group_lens, CallSeqType.Default),
nb.build_call_seq((10, 10), group_lens, CallSeqType.Default)
)
np.testing.assert_array_equal(
nb.build_call_seq_nb((10, 10), group_lens, CallSeqType.Reversed),
nb.build_call_seq((10, 10), group_lens, CallSeqType.Reversed)
)
set_seed(seed)
out1 = nb.build_call_seq_nb((10, 10), group_lens, CallSeqType.Random)
set_seed(seed)
out2 = nb.build_call_seq((10, 10), group_lens, CallSeqType.Random)
np.testing.assert_array_equal(out1, out2)
# ############# from_orders ############# #
order_size = pd.Series([np.inf, -np.inf, np.nan, np.inf, -np.inf], index=price.index)
order_size_wide = order_size.vbt.tile(3, keys=['a', 'b', 'c'])
order_size_one = pd.Series([1, -1, np.nan, 1, -1], index=price.index)
def from_orders_both(close=price, size=order_size, **kwargs):
return vbt.Portfolio.from_orders(close, size, direction='both', **kwargs)
def from_orders_longonly(close=price, size=order_size, **kwargs):
return vbt.Portfolio.from_orders(close, size, direction='longonly', **kwargs)
def from_orders_shortonly(close=price, size=order_size, **kwargs):
return vbt.Portfolio.from_orders(close, size, direction='shortonly', **kwargs)
class TestFromOrders:
def test_one_column(self):
record_arrays_close(
from_orders_both().order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 200.0, 2.0, 0.0, 1), (2, 0, 3, 100.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly().order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 100.0, 2.0, 0.0, 1), (2, 0, 3, 50.0, 4.0, 0.0, 0),
(3, 0, 4, 50.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly().order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 1), (1, 0, 1, 100.0, 2.0, 0.0, 0)
], dtype=order_dt)
)
pf = from_orders_both()
pd.testing.assert_index_equal(
pf.wrapper.index,
pd.DatetimeIndex(['2020-01-01', '2020-01-02', '2020-01-03', '2020-01-04', '2020-01-05'])
)
pd.testing.assert_index_equal(
pf.wrapper.columns,
pd.Int64Index([0], dtype='int64')
)
assert pf.wrapper.ndim == 1
assert pf.wrapper.freq == day_dt
assert pf.wrapper.grouper.group_by is None
def test_multiple_columns(self):
record_arrays_close(
from_orders_both(close=price_wide).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 200.0, 2.0, 0.0, 1), (2, 0, 3, 100.0, 4.0, 0.0, 0),
(3, 1, 0, 100.0, 1.0, 0.0, 0), (4, 1, 1, 200.0, 2.0, 0.0, 1), (5, 1, 3, 100.0, 4.0, 0.0, 0),
(6, 2, 0, 100.0, 1.0, 0.0, 0), (7, 2, 1, 200.0, 2.0, 0.0, 1), (8, 2, 3, 100.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(close=price_wide).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 100.0, 2.0, 0.0, 1), (2, 0, 3, 50.0, 4.0, 0.0, 0),
(3, 0, 4, 50.0, 5.0, 0.0, 1), (4, 1, 0, 100.0, 1.0, 0.0, 0), (5, 1, 1, 100.0, 2.0, 0.0, 1),
(6, 1, 3, 50.0, 4.0, 0.0, 0), (7, 1, 4, 50.0, 5.0, 0.0, 1), (8, 2, 0, 100.0, 1.0, 0.0, 0),
(9, 2, 1, 100.0, 2.0, 0.0, 1), (10, 2, 3, 50.0, 4.0, 0.0, 0), (11, 2, 4, 50.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(close=price_wide).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 1), (1, 0, 1, 100.0, 2.0, 0.0, 0), (2, 1, 0, 100.0, 1.0, 0.0, 1),
(3, 1, 1, 100.0, 2.0, 0.0, 0), (4, 2, 0, 100.0, 1.0, 0.0, 1), (5, 2, 1, 100.0, 2.0, 0.0, 0)
], dtype=order_dt)
)
pf = from_orders_both(close=price_wide)
pd.testing.assert_index_equal(
pf.wrapper.index,
pd.DatetimeIndex(['2020-01-01', '2020-01-02', '2020-01-03', '2020-01-04', '2020-01-05'])
)
pd.testing.assert_index_equal(
pf.wrapper.columns,
pd.Index(['a', 'b', 'c'], dtype='object')
)
assert pf.wrapper.ndim == 2
assert pf.wrapper.freq == day_dt
assert pf.wrapper.grouper.group_by is None
def test_size_inf(self):
record_arrays_close(
from_orders_both(size=[[np.inf, -np.inf]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 1, 0, 100.0, 1.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=[[np.inf, -np.inf]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=[[np.inf, -np.inf]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 1)
], dtype=order_dt)
)
def test_price(self):
record_arrays_close(
from_orders_both(price=price * 1.01).order_records,
np.array([
(0, 0, 0, 99.00990099009901, 1.01, 0.0, 0), (1, 0, 1, 198.01980198019803, 2.02, 0.0, 1),
(2, 0, 3, 99.00990099009901, 4.04, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(price=price * 1.01).order_records,
np.array([
(0, 0, 0, 99.00990099009901, 1.01, 0.0, 0), (1, 0, 1, 99.00990099009901, 2.02, 0.0, 1),
(2, 0, 3, 49.504950495049506, 4.04, 0.0, 0), (3, 0, 4, 49.504950495049506, 5.05, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(price=price * 1.01).order_records,
np.array([
(0, 0, 0, 99.00990099009901, 1.01, 0.0, 1), (1, 0, 1, 99.00990099009901, 2.02, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_both(price=np.inf).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 200.0, 2.0, 0.0, 1), (2, 0, 3, 100.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(price=np.inf).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 100.0, 2.0, 0.0, 1),
(2, 0, 3, 50.0, 4.0, 0.0, 0), (3, 0, 4, 50.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(price=np.inf).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 1), (1, 0, 1, 100.0, 2.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_both(price=-np.inf).order_records,
np.array([
(0, 0, 1, 100.0, 1.0, 0.0, 1), (1, 0, 3, 66.66666666666667, 3.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(price=-np.inf).order_records,
np.array([
(0, 0, 3, 33.333333333333336, 3.0, 0.0, 0), (1, 0, 4, 33.333333333333336, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(price=-np.inf).order_records,
np.array([
(0, 0, 3, 33.333333333333336, 3.0, 0.0, 1), (1, 0, 4, 33.333333333333336, 4.0, 0.0, 0)
], dtype=order_dt)
)
def test_val_price(self):
price_nan = pd.Series([1, 2, np.nan, 4, 5], index=price.index)
record_arrays_close(
from_orders_both(close=price_nan, size=order_size_one, val_price=np.inf,
size_type='value').order_records,
from_orders_both(close=price_nan, size=order_size_one, val_price=price,
size_type='value').order_records
)
record_arrays_close(
from_orders_longonly(close=price_nan, size=order_size_one, val_price=np.inf,
size_type='value').order_records,
from_orders_longonly(close=price_nan, size=order_size_one, val_price=price,
size_type='value').order_records
)
record_arrays_close(
from_orders_shortonly(close=price_nan, size=order_size_one, val_price=np.inf,
size_type='value').order_records,
from_orders_shortonly(close=price_nan, size=order_size_one, val_price=price,
size_type='value').order_records
)
shift_price = price_nan.ffill().shift(1)
record_arrays_close(
from_orders_both(close=price_nan, size=order_size_one, val_price=-np.inf,
size_type='value').order_records,
from_orders_both(close=price_nan, size=order_size_one, val_price=shift_price,
size_type='value').order_records
)
record_arrays_close(
from_orders_longonly(close=price_nan, size=order_size_one, val_price=-np.inf,
size_type='value').order_records,
from_orders_longonly(close=price_nan, size=order_size_one, val_price=shift_price,
size_type='value').order_records
)
record_arrays_close(
from_orders_shortonly(close=price_nan, size=order_size_one, val_price=-np.inf,
size_type='value').order_records,
from_orders_shortonly(close=price_nan, size=order_size_one, val_price=shift_price,
size_type='value').order_records
)
record_arrays_close(
from_orders_both(close=price_nan, size=order_size_one, val_price=np.inf,
size_type='value', ffill_val_price=False).order_records,
from_orders_both(close=price_nan, size=order_size_one, val_price=price_nan,
size_type='value', ffill_val_price=False).order_records
)
record_arrays_close(
from_orders_longonly(close=price_nan, size=order_size_one, val_price=np.inf,
size_type='value', ffill_val_price=False).order_records,
from_orders_longonly(close=price_nan, size=order_size_one, val_price=price_nan,
size_type='value', ffill_val_price=False).order_records
)
record_arrays_close(
from_orders_shortonly(close=price_nan, size=order_size_one, val_price=np.inf,
size_type='value', ffill_val_price=False).order_records,
from_orders_shortonly(close=price_nan, size=order_size_one, val_price=price_nan,
size_type='value', ffill_val_price=False).order_records
)
shift_price_nan = price_nan.shift(1)
record_arrays_close(
from_orders_both(close=price_nan, size=order_size_one, val_price=-np.inf,
size_type='value', ffill_val_price=False).order_records,
from_orders_both(close=price_nan, size=order_size_one, val_price=shift_price_nan,
size_type='value', ffill_val_price=False).order_records
)
record_arrays_close(
from_orders_longonly(close=price_nan, size=order_size_one, val_price=-np.inf,
size_type='value', ffill_val_price=False).order_records,
from_orders_longonly(close=price_nan, size=order_size_one, val_price=shift_price_nan,
size_type='value', ffill_val_price=False).order_records
)
record_arrays_close(
from_orders_shortonly(close=price_nan, size=order_size_one, val_price=-np.inf,
size_type='value', ffill_val_price=False).order_records,
from_orders_shortonly(close=price_nan, size=order_size_one, val_price=shift_price_nan,
size_type='value', ffill_val_price=False).order_records
)
def test_fees(self):
record_arrays_close(
from_orders_both(size=order_size_one, fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 1.0, 2.0, 0.0, 1), (2, 0, 3, 1.0, 4.0, 0.0, 0),
(3, 0, 4, 1.0, 5.0, 0.0, 1), (4, 1, 0, 1.0, 1.0, 0.1, 0), (5, 1, 1, 1.0, 2.0, 0.2, 1),
(6, 1, 3, 1.0, 4.0, 0.4, 0), (7, 1, 4, 1.0, 5.0, 0.5, 1), (8, 2, 0, 1.0, 1.0, 1.0, 0),
(9, 2, 1, 1.0, 2.0, 2.0, 1), (10, 2, 3, 1.0, 4.0, 4.0, 0), (11, 2, 4, 1.0, 5.0, 5.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one, fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 1.0, 2.0, 0.0, 1), (2, 0, 3, 1.0, 4.0, 0.0, 0),
(3, 0, 4, 1.0, 5.0, 0.0, 1), (4, 1, 0, 1.0, 1.0, 0.1, 0), (5, 1, 1, 1.0, 2.0, 0.2, 1),
(6, 1, 3, 1.0, 4.0, 0.4, 0), (7, 1, 4, 1.0, 5.0, 0.5, 1), (8, 2, 0, 1.0, 1.0, 1.0, 0),
(9, 2, 1, 1.0, 2.0, 2.0, 1), (10, 2, 3, 1.0, 4.0, 4.0, 0), (11, 2, 4, 1.0, 5.0, 5.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one, fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 1, 1.0, 2.0, 0.0, 0), (2, 0, 3, 1.0, 4.0, 0.0, 1),
(3, 0, 4, 1.0, 5.0, 0.0, 0), (4, 1, 0, 1.0, 1.0, 0.1, 1), (5, 1, 1, 1.0, 2.0, 0.2, 0),
(6, 1, 3, 1.0, 4.0, 0.4, 1), (7, 1, 4, 1.0, 5.0, 0.5, 0), (8, 2, 0, 1.0, 1.0, 1.0, 1),
(9, 2, 1, 1.0, 2.0, 2.0, 0), (10, 2, 3, 1.0, 4.0, 4.0, 1), (11, 2, 4, 1.0, 5.0, 5.0, 0)
], dtype=order_dt)
)
def test_fixed_fees(self):
record_arrays_close(
from_orders_both(size=order_size_one, fixed_fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 1.0, 2.0, 0.0, 1), (2, 0, 3, 1.0, 4.0, 0.0, 0),
(3, 0, 4, 1.0, 5.0, 0.0, 1), (4, 1, 0, 1.0, 1.0, 0.1, 0), (5, 1, 1, 1.0, 2.0, 0.1, 1),
(6, 1, 3, 1.0, 4.0, 0.1, 0), (7, 1, 4, 1.0, 5.0, 0.1, 1), (8, 2, 0, 1.0, 1.0, 1.0, 0),
(9, 2, 1, 1.0, 2.0, 1.0, 1), (10, 2, 3, 1.0, 4.0, 1.0, 0), (11, 2, 4, 1.0, 5.0, 1.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one, fixed_fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 1.0, 2.0, 0.0, 1), (2, 0, 3, 1.0, 4.0, 0.0, 0),
(3, 0, 4, 1.0, 5.0, 0.0, 1), (4, 1, 0, 1.0, 1.0, 0.1, 0), (5, 1, 1, 1.0, 2.0, 0.1, 1),
(6, 1, 3, 1.0, 4.0, 0.1, 0), (7, 1, 4, 1.0, 5.0, 0.1, 1), (8, 2, 0, 1.0, 1.0, 1.0, 0),
(9, 2, 1, 1.0, 2.0, 1.0, 1), (10, 2, 3, 1.0, 4.0, 1.0, 0), (11, 2, 4, 1.0, 5.0, 1.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one, fixed_fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 1, 1.0, 2.0, 0.0, 0), (2, 0, 3, 1.0, 4.0, 0.0, 1),
(3, 0, 4, 1.0, 5.0, 0.0, 0), (4, 1, 0, 1.0, 1.0, 0.1, 1), (5, 1, 1, 1.0, 2.0, 0.1, 0),
(6, 1, 3, 1.0, 4.0, 0.1, 1), (7, 1, 4, 1.0, 5.0, 0.1, 0), (8, 2, 0, 1.0, 1.0, 1.0, 1),
(9, 2, 1, 1.0, 2.0, 1.0, 0), (10, 2, 3, 1.0, 4.0, 1.0, 1), (11, 2, 4, 1.0, 5.0, 1.0, 0)
], dtype=order_dt)
)
def test_slippage(self):
record_arrays_close(
from_orders_both(size=order_size_one, slippage=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 1.0, 2.0, 0.0, 1), (2, 0, 3, 1.0, 4.0, 0.0, 0),
(3, 0, 4, 1.0, 5.0, 0.0, 1), (4, 1, 0, 1.0, 1.1, 0.0, 0), (5, 1, 1, 1.0, 1.8, 0.0, 1),
(6, 1, 3, 1.0, 4.4, 0.0, 0), (7, 1, 4, 1.0, 4.5, 0.0, 1), (8, 2, 0, 1.0, 2.0, 0.0, 0),
(9, 2, 1, 1.0, 0.0, 0.0, 1), (10, 2, 3, 1.0, 8.0, 0.0, 0), (11, 2, 4, 1.0, 0.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one, slippage=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 1.0, 2.0, 0.0, 1), (2, 0, 3, 1.0, 4.0, 0.0, 0),
(3, 0, 4, 1.0, 5.0, 0.0, 1), (4, 1, 0, 1.0, 1.1, 0.0, 0), (5, 1, 1, 1.0, 1.8, 0.0, 1),
(6, 1, 3, 1.0, 4.4, 0.0, 0), (7, 1, 4, 1.0, 4.5, 0.0, 1), (8, 2, 0, 1.0, 2.0, 0.0, 0),
(9, 2, 1, 1.0, 0.0, 0.0, 1), (10, 2, 3, 1.0, 8.0, 0.0, 0), (11, 2, 4, 1.0, 0.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one, slippage=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 1, 1.0, 2.0, 0.0, 0), (2, 0, 3, 1.0, 4.0, 0.0, 1),
(3, 0, 4, 1.0, 5.0, 0.0, 0), (4, 1, 0, 1.0, 0.9, 0.0, 1), (5, 1, 1, 1.0, 2.2, 0.0, 0),
(6, 1, 3, 1.0, 3.6, 0.0, 1), (7, 1, 4, 1.0, 5.5, 0.0, 0), (8, 2, 0, 1.0, 0.0, 0.0, 1),
(9, 2, 1, 1.0, 4.0, 0.0, 0), (10, 2, 3, 1.0, 0.0, 0.0, 1), (11, 2, 4, 1.0, 10.0, 0.0, 0)
], dtype=order_dt)
)
def test_min_size(self):
record_arrays_close(
from_orders_both(size=order_size_one, min_size=[[0., 1., 2.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 1.0, 2.0, 0.0, 1), (2, 0, 3, 1.0, 4.0, 0.0, 0),
(3, 0, 4, 1.0, 5.0, 0.0, 1), (4, 1, 0, 1.0, 1.0, 0.0, 0), (5, 1, 1, 1.0, 2.0, 0.0, 1),
(6, 1, 3, 1.0, 4.0, 0.0, 0), (7, 1, 4, 1.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one, min_size=[[0., 1., 2.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 1.0, 2.0, 0.0, 1), (2, 0, 3, 1.0, 4.0, 0.0, 0),
(3, 0, 4, 1.0, 5.0, 0.0, 1), (4, 1, 0, 1.0, 1.0, 0.0, 0), (5, 1, 1, 1.0, 2.0, 0.0, 1),
(6, 1, 3, 1.0, 4.0, 0.0, 0), (7, 1, 4, 1.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one, min_size=[[0., 1., 2.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 1, 1.0, 2.0, 0.0, 0), (2, 0, 3, 1.0, 4.0, 0.0, 1),
(3, 0, 4, 1.0, 5.0, 0.0, 0), (4, 1, 0, 1.0, 1.0, 0.0, 1), (5, 1, 1, 1.0, 2.0, 0.0, 0),
(6, 1, 3, 1.0, 4.0, 0.0, 1), (7, 1, 4, 1.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
def test_max_size(self):
record_arrays_close(
from_orders_both(size=order_size_one, max_size=[[0.5, 1., np.inf]]).order_records,
np.array([
(0, 0, 0, 0.5, 1.0, 0.0, 0), (1, 0, 1, 0.5, 2.0, 0.0, 1), (2, 0, 3, 0.5, 4.0, 0.0, 0),
(3, 0, 4, 0.5, 5.0, 0.0, 1), (4, 1, 0, 1.0, 1.0, 0.0, 0), (5, 1, 1, 1.0, 2.0, 0.0, 1),
(6, 1, 3, 1.0, 4.0, 0.0, 0), (7, 1, 4, 1.0, 5.0, 0.0, 1), (8, 2, 0, 1.0, 1.0, 0.0, 0),
(9, 2, 1, 1.0, 2.0, 0.0, 1), (10, 2, 3, 1.0, 4.0, 0.0, 0), (11, 2, 4, 1.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one, max_size=[[0.5, 1., np.inf]]).order_records,
np.array([
(0, 0, 0, 0.5, 1.0, 0.0, 0), (1, 0, 1, 0.5, 2.0, 0.0, 1), (2, 0, 3, 0.5, 4.0, 0.0, 0),
(3, 0, 4, 0.5, 5.0, 0.0, 1), (4, 1, 0, 1.0, 1.0, 0.0, 0), (5, 1, 1, 1.0, 2.0, 0.0, 1),
(6, 1, 3, 1.0, 4.0, 0.0, 0), (7, 1, 4, 1.0, 5.0, 0.0, 1), (8, 2, 0, 1.0, 1.0, 0.0, 0),
(9, 2, 1, 1.0, 2.0, 0.0, 1), (10, 2, 3, 1.0, 4.0, 0.0, 0), (11, 2, 4, 1.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one, max_size=[[0.5, 1., np.inf]]).order_records,
np.array([
(0, 0, 0, 0.5, 1.0, 0.0, 1), (1, 0, 1, 0.5, 2.0, 0.0, 0), (2, 0, 3, 0.5, 4.0, 0.0, 1),
(3, 0, 4, 0.5, 5.0, 0.0, 0), (4, 1, 0, 1.0, 1.0, 0.0, 1), (5, 1, 1, 1.0, 2.0, 0.0, 0),
(6, 1, 3, 1.0, 4.0, 0.0, 1), (7, 1, 4, 1.0, 5.0, 0.0, 0), (8, 2, 0, 1.0, 1.0, 0.0, 1),
(9, 2, 1, 1.0, 2.0, 0.0, 0), (10, 2, 3, 1.0, 4.0, 0.0, 1), (11, 2, 4, 1.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
def test_reject_prob(self):
record_arrays_close(
from_orders_both(size=order_size_one, reject_prob=[[0., 0.5, 1.]], seed=42).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 1.0, 2.0, 0.0, 1), (2, 0, 3, 1.0, 4.0, 0.0, 0),
(3, 0, 4, 1.0, 5.0, 0.0, 1), (4, 1, 1, 1.0, 2.0, 0.0, 1), (5, 1, 3, 1.0, 4.0, 0.0, 0),
(6, 1, 4, 1.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one, reject_prob=[[0., 0.5, 1.]], seed=42).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 1.0, 2.0, 0.0, 1), (2, 0, 3, 1.0, 4.0, 0.0, 0),
(3, 0, 4, 1.0, 5.0, 0.0, 1), (4, 1, 3, 1.0, 4.0, 0.0, 0), (5, 1, 4, 1.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one, reject_prob=[[0., 0.5, 1.]], seed=42).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 1, 1.0, 2.0, 0.0, 0), (2, 0, 3, 1.0, 4.0, 0.0, 1),
(3, 0, 4, 1.0, 5.0, 0.0, 0), (4, 1, 3, 1.0, 4.0, 0.0, 1), (5, 1, 4, 1.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
def test_lock_cash(self):
pf = vbt.Portfolio.from_orders(
pd.Series([1, 1]),
pd.DataFrame([[-25, -25], [np.inf, np.inf]]),
group_by=True, cash_sharing=True,
lock_cash=False, fees=0.01, fixed_fees=1., slippage=0.01)
np.testing.assert_array_equal(
pf.asset_flow().values,
np.array([
[-25.0, -25.0],
[143.12812469365747, 0.0]
])
)
np.testing.assert_array_equal(
pf.cash(group_by=False, in_sim_order=True).values,
np.array([
[123.5025, 147.005],
[0.0, 0.0]
])
)
np.testing.assert_array_equal(
pf.cash(group_by=False, in_sim_order=True, free=True).values,
np.array([
[74.0025, 48.004999999999995],
[-49.5, -49.5]
])
)
pf = vbt.Portfolio.from_orders(
pd.Series([1, 1]),
pd.DataFrame([[-25, -25], [np.inf, np.inf]]),
group_by=True, cash_sharing=True,
lock_cash=True, fees=0.01, fixed_fees=1., slippage=0.01)
np.testing.assert_array_equal(
pf.asset_flow().values,
np.array([
[-25.0, -25.0],
[94.6034702480149, 47.54435839623566]
])
)
np.testing.assert_array_equal(
pf.cash(group_by=False, in_sim_order=True).values,
np.array([
[123.5025, 147.005],
[49.5, 0.0]
])
)
np.testing.assert_array_equal(
pf.cash(group_by=False, in_sim_order=True, free=True).values,
np.array([
[74.0025, 48.004999999999995],
[0.0, 0.0]
])
)
pf = vbt.Portfolio.from_orders(
pd.Series([1, 100]),
pd.DataFrame([[-25, -25], [np.inf, np.inf]]),
group_by=True, cash_sharing=True,
lock_cash=False, fees=0.01, fixed_fees=1., slippage=0.01)
np.testing.assert_array_equal(
pf.asset_flow().values,
np.array([
[-25.0, -25.0],
[1.4312812469365748, 0.0]
])
)
np.testing.assert_array_equal(
pf.cash(group_by=False, in_sim_order=True).values,
np.array([
[123.5025, 147.005],
[0.0, 0.0]
])
)
np.testing.assert_array_equal(
pf.cash(group_by=False, in_sim_order=True, free=True).values,
np.array([
[74.0025, 48.004999999999995],
[-96.16606313106556, -96.16606313106556]
])
)
pf = vbt.Portfolio.from_orders(
pd.Series([1, 100]),
pd.DataFrame([[-25, -25], [np.inf, np.inf]]),
group_by=True, cash_sharing=True,
lock_cash=True, fees=0.01, fixed_fees=1., slippage=0.01)
np.testing.assert_array_equal(
pf.asset_flow().values,
np.array([
[-25.0, -25.0],
[0.4699090272918124, 0.0]
])
)
np.testing.assert_array_equal(
pf.cash(group_by=False, in_sim_order=True).values,
np.array([
[123.5025, 147.005],
[98.06958012596222, 98.06958012596222]
])
)
np.testing.assert_array_equal(
pf.cash(group_by=False, in_sim_order=True, free=True).values,
np.array([
[74.0025, 48.004999999999995],
[0.0, 0.0]
])
)
pf = from_orders_both(size=order_size_one * 1000, lock_cash=[[False, True]])
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 0, 1, 1000., 2., 0., 1),
(2, 0, 3, 500., 4., 0., 0), (3, 0, 4, 1000., 5., 0., 1),
(4, 1, 0, 100., 1., 0., 0), (5, 1, 1, 200., 2., 0., 1),
(6, 1, 3, 100., 4., 0., 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.cash(free=True).values,
np.array([
[0.0, 0.0],
[-1600.0, 0.0],
[-1600.0, 0.0],
[-1600.0, 0.0],
[-6600.0, 0.0]
])
)
pf = from_orders_longonly(size=order_size_one * 1000, lock_cash=[[False, True]])
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 0, 1, 100., 2., 0., 1),
(2, 0, 3, 50., 4., 0., 0), (3, 0, 4, 50., 5., 0., 1),
(4, 1, 0, 100., 1., 0., 0), (5, 1, 1, 100., 2., 0., 1),
(6, 1, 3, 50., 4., 0., 0), (7, 1, 4, 50., 5., 0., 1)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.cash(free=True).values,
np.array([
[0.0, 0.0],
[200.0, 200.0],
[200.0, 200.0],
[0.0, 0.0],
[250.0, 250.0]
])
)
pf = from_orders_shortonly(size=order_size_one * 1000, lock_cash=[[False, True]])
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 1000., 1., 0., 1), (1, 0, 1, 550., 2., 0., 0),
(2, 0, 3, 1000., 4., 0., 1), (3, 0, 4, 800., 5., 0., 0),
(4, 1, 0, 100., 1., 0., 1), (5, 1, 1, 100., 2., 0., 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.cash(free=True).values,
np.array([
[-900.0, 0.0],
[-900.0, 0.0],
[-900.0, 0.0],
[-4900.0, 0.0],
[-3989.6551724137926, 0.0]
])
)
def test_allow_partial(self):
record_arrays_close(
from_orders_both(size=order_size_one * 1000, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 1000.0, 2.0, 0.0, 1), (2, 0, 3, 500.0, 4.0, 0.0, 0),
(3, 0, 4, 1000.0, 5.0, 0.0, 1), (4, 1, 1, 1000.0, 2.0, 0.0, 1), (5, 1, 4, 1000.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one * 1000, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 100.0, 2.0, 0.0, 1), (2, 0, 3, 50.0, 4.0, 0.0, 0),
(3, 0, 4, 50.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one * 1000, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 1000.0, 1.0, 0.0, 1), (1, 0, 1, 550.0, 2.0, 0.0, 0), (2, 0, 3, 1000.0, 4.0, 0.0, 1),
(3, 0, 4, 800.0, 5.0, 0.0, 0), (4, 1, 0, 1000.0, 1.0, 0.0, 1), (5, 1, 3, 1000.0, 4.0, 0.0, 1),
(6, 1, 4, 1000.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_both(size=order_size, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 200.0, 2.0, 0.0, 1), (2, 0, 3, 100.0, 4.0, 0.0, 0),
(3, 1, 0, 100.0, 1.0, 0.0, 0), (4, 1, 1, 200.0, 2.0, 0.0, 1), (5, 1, 3, 100.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 100.0, 2.0, 0.0, 1), (2, 0, 3, 50.0, 4.0, 0.0, 0),
(3, 0, 4, 50.0, 5.0, 0.0, 1), (4, 1, 0, 100.0, 1.0, 0.0, 0), (5, 1, 1, 100.0, 2.0, 0.0, 1),
(6, 1, 3, 50.0, 4.0, 0.0, 0), (7, 1, 4, 50.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 1), (1, 0, 1, 100.0, 2.0, 0.0, 0), (2, 1, 0, 100.0, 1.0, 0.0, 1),
(3, 1, 1, 100.0, 2.0, 0.0, 0)
], dtype=order_dt)
)
def test_raise_reject(self):
record_arrays_close(
from_orders_both(size=order_size_one * 1000, allow_partial=True, raise_reject=True).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 1000.0, 2.0, 0.0, 1), (2, 0, 3, 500.0, 4.0, 0.0, 0),
(3, 0, 4, 1000.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one * 1000, allow_partial=True, raise_reject=True).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 100.0, 2.0, 0.0, 1), (2, 0, 3, 50.0, 4.0, 0.0, 0),
(3, 0, 4, 50.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one * 1000, allow_partial=True, raise_reject=True).order_records,
np.array([
(0, 0, 0, 1000.0, 1.0, 0.0, 1), (1, 0, 1, 550.0, 2.0, 0.0, 0), (2, 0, 3, 1000.0, 4.0, 0.0, 1),
(3, 0, 4, 800.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
with pytest.raises(Exception):
_ = from_orders_both(size=order_size_one * 1000, allow_partial=False, raise_reject=True).order_records
with pytest.raises(Exception):
_ = from_orders_longonly(size=order_size_one * 1000, allow_partial=False, raise_reject=True).order_records
with pytest.raises(Exception):
_ = from_orders_shortonly(size=order_size_one * 1000, allow_partial=False, raise_reject=True).order_records
def test_log(self):
record_arrays_close(
from_orders_both(log=True).log_records,
np.array([
(0, 0, 0, 0, 100.0, 0.0, 0.0, 100.0, 1.0, 100.0, np.inf, 1.0, 0, 2,
0.0, 0.0, 0.0, 1e-08, np.inf, 0.0, False, True, False, True, 0.0,
100.0, 0.0, 0.0, 1.0, 100.0, 100.0, 1.0, 0.0, 0, 0, -1, 0),
(1, 0, 0, 1, 0.0, 100.0, 0.0, 0.0, 2.0, 200.0, -np.inf, 2.0, 0, 2,
0.0, 0.0, 0.0, 1e-08, np.inf, 0.0, False, True, False, True, 400.0,
-100.0, 200.0, 0.0, 2.0, 200.0, 200.0, 2.0, 0.0, 1, 0, -1, 1),
(2, 0, 0, 2, 400.0, -100.0, 200.0, 0.0, 3.0, 100.0, np.nan, 3.0, 0,
2, 0.0, 0.0, 0.0, 1e-08, np.inf, 0.0, False, True, False, True, 400.0,
-100.0, 200.0, 0.0, 3.0, 100.0, np.nan, np.nan, np.nan, -1, 1, 0, -1),
(3, 0, 0, 3, 400.0, -100.0, 200.0, 0.0, 4.0, 0.0, np.inf, 4.0, 0, 2,
0.0, 0.0, 0.0, 1e-08, np.inf, 0.0, False, True, False, True, 0.0, 0.0,
0.0, 0.0, 4.0, 0.0, 100.0, 4.0, 0.0, 0, 0, -1, 2),
(4, 0, 0, 4, 0.0, 0.0, 0.0, 0.0, 5.0, 0.0, -np.inf, 5.0, 0, 2, 0.0,
0.0, 0.0, 1e-08, np.inf, 0.0, False, True, False, True, 0.0, 0.0,
0.0, 0.0, 5.0, 0.0, np.nan, np.nan, np.nan, -1, 2, 6, -1)
], dtype=log_dt)
)
def test_group_by(self):
pf = from_orders_both(close=price_wide, group_by=np.array([0, 0, 1]))
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 1, 200.0, 2.0, 0.0, 1), (2, 0, 3, 100.0, 4.0, 0.0, 0),
(3, 1, 0, 100.0, 1.0, 0.0, 0), (4, 1, 1, 200.0, 2.0, 0.0, 1), (5, 1, 3, 100.0, 4.0, 0.0, 0),
(6, 2, 0, 100.0, 1.0, 0.0, 0), (7, 2, 1, 200.0, 2.0, 0.0, 1), (8, 2, 3, 100.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
pd.testing.assert_index_equal(
pf.wrapper.grouper.group_by,
pd.Int64Index([0, 0, 1], dtype='int64')
)
pd.testing.assert_series_equal(
pf.init_cash,
pd.Series([200., 100.], index=pd.Int64Index([0, 1], dtype='int64')).rename('init_cash')
)
assert not pf.cash_sharing
def test_cash_sharing(self):
pf = from_orders_both(close=price_wide, group_by=np.array([0, 0, 1]), cash_sharing=True)
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 0, 1, 200., 2., 0., 1),
(2, 0, 3, 100., 4., 0., 0), (3, 2, 0, 100., 1., 0., 0),
(4, 2, 1, 200., 2., 0., 1), (5, 2, 3, 100., 4., 0., 0)
], dtype=order_dt)
)
pd.testing.assert_index_equal(
pf.wrapper.grouper.group_by,
pd.Int64Index([0, 0, 1], dtype='int64')
)
pd.testing.assert_series_equal(
pf.init_cash,
pd.Series([100., 100.], index=pd.Int64Index([0, 1], dtype='int64')).rename('init_cash')
)
assert pf.cash_sharing
with pytest.raises(Exception):
_ = pf.regroup(group_by=False)
def test_call_seq(self):
pf = from_orders_both(close=price_wide, group_by=np.array([0, 0, 1]), cash_sharing=True)
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 0, 1, 200., 2., 0., 1),
(2, 0, 3, 100., 4., 0., 0), (3, 2, 0, 100., 1., 0., 0),
(4, 2, 1, 200., 2., 0., 1), (5, 2, 3, 100., 4., 0., 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[0, 1, 0],
[0, 1, 0],
[0, 1, 0],
[0, 1, 0],
[0, 1, 0]
])
)
pf = from_orders_both(
close=price_wide, group_by=np.array([0, 0, 1]),
cash_sharing=True, call_seq='reversed')
record_arrays_close(
pf.order_records,
np.array([
(0, 1, 0, 100., 1., 0., 0), (1, 1, 1, 200., 2., 0., 1),
(2, 1, 3, 100., 4., 0., 0), (3, 2, 0, 100., 1., 0., 0),
(4, 2, 1, 200., 2., 0., 1), (5, 2, 3, 100., 4., 0., 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0]
])
)
pf = from_orders_both(
close=price_wide, group_by=np.array([0, 0, 1]),
cash_sharing=True, call_seq='random', seed=seed)
record_arrays_close(
pf.order_records,
np.array([
(0, 1, 0, 100., 1., 0., 0), (1, 1, 1, 200., 2., 0., 1),
(2, 1, 3, 100., 4., 0., 0), (3, 2, 0, 100., 1., 0., 0),
(4, 2, 1, 200., 2., 0., 1), (5, 2, 3, 100., 4., 0., 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[1, 0, 0],
[0, 1, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0]
])
)
kwargs = dict(
close=1.,
size=pd.DataFrame([
[0., 0., np.inf],
[0., np.inf, -np.inf],
[np.inf, -np.inf, 0.],
[-np.inf, 0., np.inf],
[0., np.inf, -np.inf],
]),
group_by=np.array([0, 0, 0]),
cash_sharing=True,
call_seq='auto'
)
pf = from_orders_both(**kwargs)
record_arrays_close(
pf.order_records,
np.array([
(0, 2, 0, 100., 1., 0., 0), (1, 2, 1, 200., 1., 0., 1),
(2, 1, 1, 200., 1., 0., 0), (3, 1, 2, 200., 1., 0., 1),
(4, 0, 2, 200., 1., 0., 0), (5, 0, 3, 200., 1., 0., 1),
(6, 2, 3, 200., 1., 0., 0), (7, 2, 4, 200., 1., 0., 1),
(8, 1, 4, 200., 1., 0., 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[0, 1, 2],
[2, 0, 1],
[1, 2, 0],
[0, 1, 2],
[2, 0, 1]
])
)
pf = from_orders_longonly(**kwargs)
record_arrays_close(
pf.order_records,
np.array([
(0, 2, 0, 100., 1., 0., 0), (1, 2, 1, 100., 1., 0., 1),
(2, 1, 1, 100., 1., 0., 0), (3, 1, 2, 100., 1., 0., 1),
(4, 0, 2, 100., 1., 0., 0), (5, 0, 3, 100., 1., 0., 1),
(6, 2, 3, 100., 1., 0., 0), (7, 2, 4, 100., 1., 0., 1),
(8, 1, 4, 100., 1., 0., 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[0, 1, 2],
[2, 0, 1],
[1, 2, 0],
[0, 1, 2],
[2, 0, 1]
])
)
pf = from_orders_shortonly(**kwargs)
record_arrays_close(
pf.order_records,
np.array([
(0, 2, 0, 100., 1., 0., 1), (1, 2, 1, 100., 1., 0., 0),
(2, 0, 2, 100., 1., 0., 1), (3, 0, 3, 100., 1., 0., 0),
(4, 1, 4, 100., 1., 0., 1)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[2, 0, 1],
[1, 0, 2],
[0, 2, 1],
[2, 1, 0],
[1, 0, 2]
])
)
def test_value(self):
record_arrays_close(
from_orders_both(size=order_size_one, size_type='value').order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 0.5, 2.0, 0.0, 1),
(2, 0, 3, 0.25, 4.0, 0.0, 0), (3, 0, 4, 0.2, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=order_size_one, size_type='value').order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 1, 0.5, 2.0, 0.0, 1),
(2, 0, 3, 0.25, 4.0, 0.0, 0), (3, 0, 4, 0.2, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=order_size_one, size_type='value').order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 1, 0.5, 2.0, 0.0, 0),
(2, 0, 3, 0.25, 4.0, 0.0, 1), (3, 0, 4, 0.2, 5.0, 0.0, 0)
], dtype=order_dt)
)
def test_target_amount(self):
record_arrays_close(
from_orders_both(size=[[75., -75.]], size_type='targetamount').order_records,
np.array([
(0, 0, 0, 75.0, 1.0, 0.0, 0), (1, 1, 0, 75.0, 1.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=[[75., -75.]], size_type='targetamount').order_records,
np.array([
(0, 0, 0, 75.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=[[75., -75.]], size_type='targetamount').order_records,
np.array([
(0, 0, 0, 75.0, 1.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_both(
close=price_wide, size=75., size_type='targetamount',
group_by=np.array([0, 0, 0]), cash_sharing=True).order_records,
np.array([
(0, 0, 0, 75.0, 1.0, 0.0, 0), (1, 1, 0, 25.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
def test_target_value(self):
record_arrays_close(
from_orders_both(size=[[50., -50.]], size_type='targetvalue').order_records,
np.array([
(0, 0, 0, 50.0, 1.0, 0.0, 0), (1, 0, 1, 25.0, 2.0, 0.0, 1),
(2, 0, 2, 8.333333333333332, 3.0, 0.0, 1), (3, 0, 3, 4.166666666666668, 4.0, 0.0, 1),
(4, 0, 4, 2.5, 5.0, 0.0, 1), (5, 1, 0, 50.0, 1.0, 0.0, 1),
(6, 1, 1, 25.0, 2.0, 0.0, 0), (7, 1, 2, 8.333333333333332, 3.0, 0.0, 0),
(8, 1, 3, 4.166666666666668, 4.0, 0.0, 0), (9, 1, 4, 2.5, 5.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=[[50., -50.]], size_type='targetvalue').order_records,
np.array([
(0, 0, 0, 50.0, 1.0, 0.0, 0), (1, 0, 1, 25.0, 2.0, 0.0, 1),
(2, 0, 2, 8.333333333333332, 3.0, 0.0, 1), (3, 0, 3, 4.166666666666668, 4.0, 0.0, 1),
(4, 0, 4, 2.5, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=[[50., -50.]], size_type='targetvalue').order_records,
np.array([
(0, 0, 0, 50.0, 1.0, 0.0, 1), (1, 0, 1, 25.0, 2.0, 0.0, 0),
(2, 0, 2, 8.333333333333332, 3.0, 0.0, 0), (3, 0, 3, 4.166666666666668, 4.0, 0.0, 0),
(4, 0, 4, 2.5, 5.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_both(
close=price_wide, size=50., size_type='targetvalue',
group_by=np.array([0, 0, 0]), cash_sharing=True).order_records,
np.array([
(0, 0, 0, 50.0, 1.0, 0.0, 0), (1, 1, 0, 50.0, 1.0, 0.0, 0),
(2, 0, 1, 25.0, 2.0, 0.0, 1), (3, 1, 1, 25.0, 2.0, 0.0, 1),
(4, 2, 1, 25.0, 2.0, 0.0, 0), (5, 0, 2, 8.333333333333332, 3.0, 0.0, 1),
(6, 1, 2, 8.333333333333332, 3.0, 0.0, 1), (7, 2, 2, 8.333333333333332, 3.0, 0.0, 1),
(8, 0, 3, 4.166666666666668, 4.0, 0.0, 1), (9, 1, 3, 4.166666666666668, 4.0, 0.0, 1),
(10, 2, 3, 4.166666666666668, 4.0, 0.0, 1), (11, 0, 4, 2.5, 5.0, 0.0, 1),
(12, 1, 4, 2.5, 5.0, 0.0, 1), (13, 2, 4, 2.5, 5.0, 0.0, 1)
], dtype=order_dt)
)
def test_target_percent(self):
record_arrays_close(
from_orders_both(size=[[0.5, -0.5]], size_type='targetpercent').order_records,
np.array([
(0, 0, 0, 50.0, 1.0, 0.0, 0), (1, 0, 1, 12.5, 2.0, 0.0, 1), (2, 0, 2, 6.25, 3.0, 0.0, 1),
(3, 0, 3, 3.90625, 4.0, 0.0, 1), (4, 0, 4, 2.734375, 5.0, 0.0, 1), (5, 1, 0, 50.0, 1.0, 0.0, 1),
(6, 1, 1, 37.5, 2.0, 0.0, 0), (7, 1, 2, 6.25, 3.0, 0.0, 0), (8, 1, 3, 2.34375, 4.0, 0.0, 0),
(9, 1, 4, 1.171875, 5.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=[[0.5, -0.5]], size_type='targetpercent').order_records,
np.array([
(0, 0, 0, 50.0, 1.0, 0.0, 0), (1, 0, 1, 12.5, 2.0, 0.0, 1), (2, 0, 2, 6.25, 3.0, 0.0, 1),
(3, 0, 3, 3.90625, 4.0, 0.0, 1), (4, 0, 4, 2.734375, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=[[0.5, -0.5]], size_type='targetpercent').order_records,
np.array([
(0, 0, 0, 50.0, 1.0, 0.0, 1), (1, 0, 1, 37.5, 2.0, 0.0, 0), (2, 0, 2, 6.25, 3.0, 0.0, 0),
(3, 0, 3, 2.34375, 4.0, 0.0, 0), (4, 0, 4, 1.171875, 5.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_both(
close=price_wide, size=0.5, size_type='targetpercent',
group_by=np.array([0, 0, 0]), cash_sharing=True).order_records,
np.array([
(0, 0, 0, 50.0, 1.0, 0.0, 0), (1, 1, 0, 50.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
def test_update_value(self):
record_arrays_close(
from_orders_both(size=0.5, size_type='targetpercent', fees=0.01, slippage=0.01,
update_value=False).order_records,
from_orders_both(size=0.5, size_type='targetpercent', fees=0.01, slippage=0.01,
update_value=True).order_records
)
record_arrays_close(
from_orders_both(
close=price_wide, size=0.5, size_type='targetpercent', fees=0.01, slippage=0.01,
group_by=np.array([0, 0, 0]), cash_sharing=True, update_value=False).order_records,
np.array([
(0, 0, 0, 50.0, 1.01, 0.505, 0),
(1, 1, 0, 48.02960494069208, 1.01, 0.485099009900992, 0),
(2, 0, 1, 0.9851975296539592, 1.98, 0.019506911087148394, 1),
(3, 1, 1, 0.9465661198057499, 2.02, 0.019120635620076154, 0),
(4, 0, 2, 0.019315704924103727, 2.9699999999999998, 0.0005736764362458806, 1),
(5, 1, 2, 0.018558300554959377, 3.0300000000000002, 0.0005623165068152705, 0),
(6, 0, 3, 0.00037870218456959037, 3.96, 1.4996606508955778e-05, 1),
(7, 1, 3, 0.0003638525743521767, 4.04, 1.4699644003827875e-05, 0),
(8, 0, 4, 7.424805112066224e-06, 4.95, 3.675278530472781e-07, 1),
(9, 1, 4, 7.133664827307231e-06, 5.05, 3.6025007377901643e-07, 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_both(
close=price_wide, size=0.5, size_type='targetpercent', fees=0.01, slippage=0.01,
group_by=np.array([0, 0, 0]), cash_sharing=True, update_value=True).order_records,
np.array([
(0, 0, 0, 50.0, 1.01, 0.505, 0),
(1, 1, 0, 48.02960494069208, 1.01, 0.485099009900992, 0),
(2, 0, 1, 0.9851975296539592, 1.98, 0.019506911087148394, 1),
(3, 1, 1, 0.7303208018821721, 2.02, 0.014752480198019875, 0),
(4, 2, 1, 0.21624531792357785, 2.02, 0.0043681554220562635, 0),
(5, 0, 2, 0.019315704924103727, 2.9699999999999998, 0.0005736764362458806, 1),
(6, 1, 2, 0.009608602243410758, 2.9699999999999998, 0.00028537548662929945, 1),
(7, 2, 2, 0.02779013180558861, 3.0300000000000002, 0.0008420409937093393, 0),
(8, 0, 3, 0.0005670876809631409, 3.96, 2.2456672166140378e-05, 1),
(9, 1, 3, 0.00037770350099464167, 3.96, 1.4957058639387809e-05, 1),
(10, 2, 3, 0.0009077441794302741, 4.04, 3.6672864848982974e-05, 0),
(11, 0, 4, 1.8523501267964093e-05, 4.95, 9.169133127642227e-07, 1),
(12, 1, 4, 1.2972670177191503e-05, 4.95, 6.421471737709794e-07, 1),
(13, 2, 4, 3.0261148547590434e-05, 5.05, 1.5281880016533242e-06, 0)
], dtype=order_dt)
)
def test_percent(self):
record_arrays_close(
from_orders_both(size=[[0.5, -0.5]], size_type='percent').order_records,
np.array([
(0, 0, 0, 50., 1., 0., 0), (1, 0, 1, 12.5, 2., 0., 0),
(2, 0, 2, 4.16666667, 3., 0., 0), (3, 0, 3, 1.5625, 4., 0., 0),
(4, 0, 4, 0.625, 5., 0., 0), (5, 1, 0, 50., 1., 0., 1),
(6, 1, 1, 12.5, 2., 0., 1), (7, 1, 2, 4.16666667, 3., 0., 1),
(8, 1, 3, 1.5625, 4., 0., 1), (9, 1, 4, 0.625, 5., 0., 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_longonly(size=[[0.5, -0.5]], size_type='percent').order_records,
np.array([
(0, 0, 0, 50., 1., 0., 0), (1, 0, 1, 12.5, 2., 0., 0),
(2, 0, 2, 4.16666667, 3., 0., 0), (3, 0, 3, 1.5625, 4., 0., 0),
(4, 0, 4, 0.625, 5., 0., 0)
], dtype=order_dt)
)
record_arrays_close(
from_orders_shortonly(size=[[0.5, -0.5]], size_type='percent').order_records,
np.array([
(0, 0, 0, 50., 1., 0., 1), (1, 0, 1, 12.5, 2., 0., 1),
(2, 0, 2, 4.16666667, 3., 0., 1), (3, 0, 3, 1.5625, 4., 0., 1),
(4, 0, 4, 0.625, 5., 0., 1)
], dtype=order_dt)
)
record_arrays_close(
from_orders_both(
close=price_wide, size=0.5, size_type='percent',
group_by=np.array([0, 0, 0]), cash_sharing=True).order_records,
np.array([
(0, 0, 0, 5.00000000e+01, 1., 0., 0), (1, 1, 0, 2.50000000e+01, 1., 0., 0),
(2, 2, 0, 1.25000000e+01, 1., 0., 0), (3, 0, 1, 3.12500000e+00, 2., 0., 0),
(4, 1, 1, 1.56250000e+00, 2., 0., 0), (5, 2, 1, 7.81250000e-01, 2., 0., 0),
(6, 0, 2, 2.60416667e-01, 3., 0., 0), (7, 1, 2, 1.30208333e-01, 3., 0., 0),
(8, 2, 2, 6.51041667e-02, 3., 0., 0), (9, 0, 3, 2.44140625e-02, 4., 0., 0),
(10, 1, 3, 1.22070312e-02, 4., 0., 0), (11, 2, 3, 6.10351562e-03, 4., 0., 0),
(12, 0, 4, 2.44140625e-03, 5., 0., 0), (13, 1, 4, 1.22070312e-03, 5., 0., 0),
(14, 2, 4, 6.10351562e-04, 5., 0., 0)
], dtype=order_dt)
)
def test_auto_seq(self):
target_hold_value = pd.DataFrame({
'a': [0., 70., 30., 0., 70.],
'b': [30., 0., 70., 30., 30.],
'c': [70., 30., 0., 70., 0.]
}, index=price.index)
pd.testing.assert_frame_equal(
from_orders_both(
close=1., size=target_hold_value, size_type='targetvalue',
group_by=np.array([0, 0, 0]), cash_sharing=True,
call_seq='auto').asset_value(group_by=False),
target_hold_value
)
pd.testing.assert_frame_equal(
from_orders_both(
close=1., size=target_hold_value / 100, size_type='targetpercent',
group_by=np.array([0, 0, 0]), cash_sharing=True,
call_seq='auto').asset_value(group_by=False),
target_hold_value
)
def test_max_orders(self):
_ = from_orders_both(close=price_wide)
_ = from_orders_both(close=price_wide, max_orders=9)
with pytest.raises(Exception):
_ = from_orders_both(close=price_wide, max_orders=8)
def test_max_logs(self):
_ = from_orders_both(close=price_wide, log=True)
_ = from_orders_both(close=price_wide, log=True, max_logs=15)
with pytest.raises(Exception):
_ = from_orders_both(close=price_wide, log=True, max_logs=14)
# ############# from_signals ############# #
entries = pd.Series([True, True, True, False, False], index=price.index)
entries_wide = entries.vbt.tile(3, keys=['a', 'b', 'c'])
exits = pd.Series([False, False, True, True, True], index=price.index)
exits_wide = exits.vbt.tile(3, keys=['a', 'b', 'c'])
def from_signals_both(close=price, entries=entries, exits=exits, **kwargs):
return vbt.Portfolio.from_signals(close, entries, exits, direction='both', **kwargs)
def from_signals_longonly(close=price, entries=entries, exits=exits, **kwargs):
return vbt.Portfolio.from_signals(close, entries, exits, direction='longonly', **kwargs)
def from_signals_shortonly(close=price, entries=entries, exits=exits, **kwargs):
return vbt.Portfolio.from_signals(close, entries, exits, direction='shortonly', **kwargs)
def from_ls_signals_both(close=price, entries=entries, exits=exits, **kwargs):
return vbt.Portfolio.from_signals(close, entries, False, exits, False, **kwargs)
def from_ls_signals_longonly(close=price, entries=entries, exits=exits, **kwargs):
return vbt.Portfolio.from_signals(close, entries, exits, False, False, **kwargs)
def from_ls_signals_shortonly(close=price, entries=entries, exits=exits, **kwargs):
return vbt.Portfolio.from_signals(close, False, False, entries, exits, **kwargs)
class TestFromSignals:
@pytest.mark.parametrize(
"test_ls",
[False, True],
)
def test_one_column(self, test_ls):
_from_signals_both = from_ls_signals_both if test_ls else from_signals_both
_from_signals_longonly = from_ls_signals_longonly if test_ls else from_signals_longonly
_from_signals_shortonly = from_ls_signals_shortonly if test_ls else from_signals_shortonly
record_arrays_close(
_from_signals_both().order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 0, 3, 200., 4., 0., 1)
], dtype=order_dt)
)
record_arrays_close(
_from_signals_longonly().order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 0, 3, 100., 4., 0., 1)
], dtype=order_dt)
)
record_arrays_close(
_from_signals_shortonly().order_records,
np.array([
(0, 0, 0, 100., 1., 0., 1), (1, 0, 3, 50., 4., 0., 0)
], dtype=order_dt)
)
pf = _from_signals_both()
pd.testing.assert_index_equal(
pf.wrapper.index,
pd.DatetimeIndex(['2020-01-01', '2020-01-02', '2020-01-03', '2020-01-04', '2020-01-05'])
)
pd.testing.assert_index_equal(
pf.wrapper.columns,
pd.Int64Index([0], dtype='int64')
)
assert pf.wrapper.ndim == 1
assert pf.wrapper.freq == day_dt
assert pf.wrapper.grouper.group_by is None
@pytest.mark.parametrize(
"test_ls",
[False, True],
)
def test_multiple_columns(self, test_ls):
_from_signals_both = from_ls_signals_both if test_ls else from_signals_both
_from_signals_longonly = from_ls_signals_longonly if test_ls else from_signals_longonly
_from_signals_shortonly = from_ls_signals_shortonly if test_ls else from_signals_shortonly
record_arrays_close(
_from_signals_both(close=price_wide).order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 0, 3, 200., 4., 0., 1),
(2, 1, 0, 100., 1., 0., 0), (3, 1, 3, 200., 4., 0., 1),
(4, 2, 0, 100., 1., 0., 0), (5, 2, 3, 200., 4., 0., 1)
], dtype=order_dt)
)
record_arrays_close(
_from_signals_longonly(close=price_wide).order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 0, 3, 100., 4., 0., 1),
(2, 1, 0, 100., 1., 0., 0), (3, 1, 3, 100., 4., 0., 1),
(4, 2, 0, 100., 1., 0., 0), (5, 2, 3, 100., 4., 0., 1)
], dtype=order_dt)
)
record_arrays_close(
_from_signals_shortonly(close=price_wide).order_records,
np.array([
(0, 0, 0, 100., 1., 0., 1), (1, 0, 3, 50., 4., 0., 0),
(2, 1, 0, 100., 1., 0., 1), (3, 1, 3, 50., 4., 0., 0),
(4, 2, 0, 100., 1., 0., 1), (5, 2, 3, 50., 4., 0., 0)
], dtype=order_dt)
)
pf = _from_signals_both(close=price_wide)
pd.testing.assert_index_equal(
pf.wrapper.index,
pd.DatetimeIndex(['2020-01-01', '2020-01-02', '2020-01-03', '2020-01-04', '2020-01-05'])
)
pd.testing.assert_index_equal(
pf.wrapper.columns,
pd.Index(['a', 'b', 'c'], dtype='object')
)
assert pf.wrapper.ndim == 2
assert pf.wrapper.freq == day_dt
assert pf.wrapper.grouper.group_by is None
def test_custom_signal_func(self):
@njit
def signal_func_nb(c, long_num_arr, short_num_arr):
long_num = nb.get_elem_nb(c, long_num_arr)
short_num = nb.get_elem_nb(c, short_num_arr)
is_long_entry = long_num > 0
is_long_exit = long_num < 0
is_short_entry = short_num > 0
is_short_exit = short_num < 0
return is_long_entry, is_long_exit, is_short_entry, is_short_exit
pf_base = vbt.Portfolio.from_signals(
pd.Series([1, 2, 3, 4, 5]),
entries=pd.Series([True, False, False, False, False]),
exits=pd.Series([False, False, True, False, False]),
short_entries=pd.Series([False, True, False, True, False]),
short_exits=pd.Series([False, False, False, False, True]),
size=1,
upon_opposite_entry='ignore'
)
pf = vbt.Portfolio.from_signals(
pd.Series([1, 2, 3, 4, 5]),
signal_func_nb=signal_func_nb,
signal_args=(vbt.Rep('long_num_arr'), vbt.Rep('short_num_arr')),
broadcast_named_args=dict(
long_num_arr=pd.Series([1, 0, -1, 0, 0]),
short_num_arr=pd.Series([0, 1, 0, 1, -1])
),
size=1,
upon_opposite_entry='ignore'
)
record_arrays_close(
pf_base.order_records,
pf.order_records
)
def test_amount(self):
record_arrays_close(
from_signals_both(size=[[0, 1, np.inf]], size_type='amount').order_records,
np.array([
(0, 1, 0, 1.0, 1.0, 0.0, 0), (1, 1, 3, 2.0, 4.0, 0.0, 1),
(2, 2, 0, 100.0, 1.0, 0.0, 0), (3, 2, 3, 200.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=[[0, 1, np.inf]], size_type='amount').order_records,
np.array([
(0, 1, 0, 1.0, 1.0, 0.0, 0), (1, 1, 3, 1.0, 4.0, 0.0, 1),
(2, 2, 0, 100.0, 1.0, 0.0, 0), (3, 2, 3, 100.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=[[0, 1, np.inf]], size_type='amount').order_records,
np.array([
(0, 1, 0, 1.0, 1.0, 0.0, 1), (1, 1, 3, 1.0, 4.0, 0.0, 0),
(2, 2, 0, 100.0, 1.0, 0.0, 1), (3, 2, 3, 50.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
def test_value(self):
record_arrays_close(
from_signals_both(size=[[0, 1, np.inf]], size_type='value').order_records,
np.array([
(0, 1, 0, 1.0, 1.0, 0.0, 0), (1, 1, 3, 0.3125, 4.0, 0.0, 1),
(2, 1, 4, 0.1775, 5.0, 0.0, 1), (3, 2, 0, 100.0, 1.0, 0.0, 0),
(4, 2, 3, 200.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=[[0, 1, np.inf]], size_type='value').order_records,
np.array([
(0, 1, 0, 1.0, 1.0, 0.0, 0), (1, 1, 3, 1.0, 4.0, 0.0, 1),
(2, 2, 0, 100.0, 1.0, 0.0, 0), (3, 2, 3, 100.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=[[0, 1, np.inf]], size_type='value').order_records,
np.array([
(0, 1, 0, 1.0, 1.0, 0.0, 1), (1, 1, 3, 1.0, 4.0, 0.0, 0),
(2, 2, 0, 100.0, 1.0, 0.0, 1), (3, 2, 3, 50.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
def test_percent(self):
with pytest.raises(Exception):
_ = from_signals_both(size=0.5, size_type='percent')
record_arrays_close(
from_signals_both(size=0.5, size_type='percent', upon_opposite_entry='close').order_records,
np.array([
(0, 0, 0, 50., 1., 0., 0), (1, 0, 3, 50., 4., 0., 1), (2, 0, 4, 25., 5., 0., 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_both(size=0.5, size_type='percent', upon_opposite_entry='close',
accumulate=True).order_records,
np.array([
(0, 0, 0, 50.0, 1.0, 0.0, 0), (1, 0, 1, 12.5, 2.0, 0.0, 0),
(2, 0, 3, 62.5, 4.0, 0.0, 1), (3, 0, 4, 27.5, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=0.5, size_type='percent').order_records,
np.array([
(0, 0, 0, 50., 1., 0., 0), (1, 0, 3, 50., 4., 0., 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=0.5, size_type='percent').order_records,
np.array([
(0, 0, 0, 50., 1., 0., 1), (1, 0, 3, 37.5, 4., 0., 0)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(
close=price_wide, size=0.5, size_type='percent',
group_by=np.array([0, 0, 0]), cash_sharing=True).order_records,
np.array([
(0, 0, 0, 50., 1., 0., 0), (1, 1, 0, 25., 1., 0., 0),
(2, 2, 0, 12.5, 1., 0., 0), (3, 0, 3, 50., 4., 0., 1),
(4, 1, 3, 25., 4., 0., 1), (5, 2, 3, 12.5, 4., 0., 1)
], dtype=order_dt)
)
def test_price(self):
record_arrays_close(
from_signals_both(price=price * 1.01).order_records,
np.array([
(0, 0, 0, 99.00990099009901, 1.01, 0.0, 0), (1, 0, 3, 198.01980198019803, 4.04, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(price=price * 1.01).order_records,
np.array([
(0, 0, 0, 99.00990099, 1.01, 0., 0), (1, 0, 3, 99.00990099, 4.04, 0., 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(price=price * 1.01).order_records,
np.array([
(0, 0, 0, 99.00990099009901, 1.01, 0.0, 1), (1, 0, 3, 49.504950495049506, 4.04, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_signals_both(price=np.inf).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 3, 200.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(price=np.inf).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 3, 100.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(price=np.inf).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 1), (1, 0, 3, 50.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_signals_both(price=-np.inf).order_records,
np.array([
(0, 0, 1, 100.0, 1.0, 0.0, 0), (1, 0, 3, 200.0, 3.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(price=-np.inf).order_records,
np.array([
(0, 0, 1, 100.0, 1.0, 0.0, 0), (1, 0, 3, 100.0, 3.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(price=-np.inf).order_records,
np.array([
(0, 0, 1, 100.0, 1.0, 0.0, 1), (1, 0, 3, 66.66666666666667, 3.0, 0.0, 0)
], dtype=order_dt)
)
def test_val_price(self):
price_nan = pd.Series([1, 2, np.nan, 4, 5], index=price.index)
record_arrays_close(
from_signals_both(close=price_nan, size=1, val_price=np.inf,
size_type='value').order_records,
from_signals_both(close=price_nan, size=1, val_price=price,
size_type='value').order_records
)
record_arrays_close(
from_signals_longonly(close=price_nan, size=1, val_price=np.inf,
size_type='value').order_records,
from_signals_longonly(close=price_nan, size=1, val_price=price,
size_type='value').order_records
)
record_arrays_close(
from_signals_shortonly(close=price_nan, size=1, val_price=np.inf,
size_type='value').order_records,
from_signals_shortonly(close=price_nan, size=1, val_price=price,
size_type='value').order_records
)
shift_price = price_nan.ffill().shift(1)
record_arrays_close(
from_signals_both(close=price_nan, size=1, val_price=-np.inf,
size_type='value').order_records,
from_signals_both(close=price_nan, size=1, val_price=shift_price,
size_type='value').order_records
)
record_arrays_close(
from_signals_longonly(close=price_nan, size=1, val_price=-np.inf,
size_type='value').order_records,
from_signals_longonly(close=price_nan, size=1, val_price=shift_price,
size_type='value').order_records
)
record_arrays_close(
from_signals_shortonly(close=price_nan, size=1, val_price=-np.inf,
size_type='value').order_records,
from_signals_shortonly(close=price_nan, size=1, val_price=shift_price,
size_type='value').order_records
)
record_arrays_close(
from_signals_both(close=price_nan, size=1, val_price=np.inf,
size_type='value', ffill_val_price=False).order_records,
from_signals_both(close=price_nan, size=1, val_price=price_nan,
size_type='value', ffill_val_price=False).order_records
)
record_arrays_close(
from_signals_longonly(close=price_nan, size=1, val_price=np.inf,
size_type='value', ffill_val_price=False).order_records,
from_signals_longonly(close=price_nan, size=1, val_price=price_nan,
size_type='value', ffill_val_price=False).order_records
)
record_arrays_close(
from_signals_shortonly(close=price_nan, size=1, val_price=np.inf,
size_type='value', ffill_val_price=False).order_records,
from_signals_shortonly(close=price_nan, size=1, val_price=price_nan,
size_type='value', ffill_val_price=False).order_records
)
shift_price_nan = price_nan.shift(1)
record_arrays_close(
from_signals_both(close=price_nan, size=1, val_price=-np.inf,
size_type='value', ffill_val_price=False).order_records,
from_signals_both(close=price_nan, size=1, val_price=shift_price_nan,
size_type='value', ffill_val_price=False).order_records
)
record_arrays_close(
from_signals_longonly(close=price_nan, size=1, val_price=-np.inf,
size_type='value', ffill_val_price=False).order_records,
from_signals_longonly(close=price_nan, size=1, val_price=shift_price_nan,
size_type='value', ffill_val_price=False).order_records
)
record_arrays_close(
from_signals_shortonly(close=price_nan, size=1, val_price=-np.inf,
size_type='value', ffill_val_price=False).order_records,
from_signals_shortonly(close=price_nan, size=1, val_price=shift_price_nan,
size_type='value', ffill_val_price=False).order_records
)
def test_fees(self):
record_arrays_close(
from_signals_both(size=1, fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 3, 2.0, 4.0, 0.0, 1), (2, 1, 0, 1.0, 1.0, 0.1, 0),
(3, 1, 3, 2.0, 4.0, 0.8, 1), (4, 2, 0, 1.0, 1.0, 1.0, 0), (5, 2, 3, 2.0, 4.0, 8.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1, fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 3, 1.0, 4.0, 0.0, 1), (2, 1, 0, 1.0, 1.0, 0.1, 0),
(3, 1, 3, 1.0, 4.0, 0.4, 1), (4, 2, 0, 1.0, 1.0, 1.0, 0), (5, 2, 3, 1.0, 4.0, 4.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=1, fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 3, 1.0, 4.0, 0.0, 0), (2, 1, 0, 1.0, 1.0, 0.1, 1),
(3, 1, 3, 1.0, 4.0, 0.4, 0), (4, 2, 0, 1.0, 1.0, 1.0, 1), (5, 2, 3, 1.0, 4.0, 4.0, 0)
], dtype=order_dt)
)
def test_fixed_fees(self):
record_arrays_close(
from_signals_both(size=1, fixed_fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 3, 2.0, 4.0, 0.0, 1), (2, 1, 0, 1.0, 1.0, 0.1, 0),
(3, 1, 3, 2.0, 4.0, 0.1, 1), (4, 2, 0, 1.0, 1.0, 1.0, 0), (5, 2, 3, 2.0, 4.0, 1.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1, fixed_fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 3, 1.0, 4.0, 0.0, 1), (2, 1, 0, 1.0, 1.0, 0.1, 0),
(3, 1, 3, 1.0, 4.0, 0.1, 1), (4, 2, 0, 1.0, 1.0, 1.0, 0), (5, 2, 3, 1.0, 4.0, 1.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=1, fixed_fees=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 3, 1.0, 4.0, 0.0, 0), (2, 1, 0, 1.0, 1.0, 0.1, 1),
(3, 1, 3, 1.0, 4.0, 0.1, 0), (4, 2, 0, 1.0, 1.0, 1.0, 1), (5, 2, 3, 1.0, 4.0, 1.0, 0)
], dtype=order_dt)
)
def test_slippage(self):
record_arrays_close(
from_signals_both(size=1, slippage=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 3, 2.0, 4.0, 0.0, 1), (2, 1, 0, 1.0, 1.1, 0.0, 0),
(3, 1, 3, 2.0, 3.6, 0.0, 1), (4, 2, 0, 1.0, 2.0, 0.0, 0), (5, 2, 3, 2.0, 0.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1, slippage=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 3, 1.0, 4.0, 0.0, 1), (2, 1, 0, 1.0, 1.1, 0.0, 0),
(3, 1, 3, 1.0, 3.6, 0.0, 1), (4, 2, 0, 1.0, 2.0, 0.0, 0), (5, 2, 3, 1.0, 0.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=1, slippage=[[0., 0.1, 1.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 3, 1.0, 4.0, 0.0, 0), (2, 1, 0, 1.0, 0.9, 0.0, 1),
(3, 1, 3, 1.0, 4.4, 0.0, 0), (4, 2, 0, 1.0, 0.0, 0.0, 1), (5, 2, 3, 1.0, 8.0, 0.0, 0)
], dtype=order_dt)
)
def test_min_size(self):
record_arrays_close(
from_signals_both(size=1, min_size=[[0., 1., 2.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 3, 2.0, 4.0, 0.0, 1), (2, 1, 0, 1.0, 1.0, 0.0, 0),
(3, 1, 3, 2.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1, min_size=[[0., 1., 2.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 3, 1.0, 4.0, 0.0, 1), (2, 1, 0, 1.0, 1.0, 0.0, 0),
(3, 1, 3, 1.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=1, min_size=[[0., 1., 2.]]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 3, 1.0, 4.0, 0.0, 0), (2, 1, 0, 1.0, 1.0, 0.0, 1),
(3, 1, 3, 1.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
def test_max_size(self):
record_arrays_close(
from_signals_both(size=1, max_size=[[0.5, 1., np.inf]]).order_records,
np.array([
(0, 0, 0, 0.5, 1.0, 0.0, 0), (1, 0, 3, 0.5, 4.0, 0.0, 1), (2, 0, 4, 0.5, 5.0, 0.0, 1),
(3, 1, 0, 1.0, 1.0, 0.0, 0), (4, 1, 3, 1.0, 4.0, 0.0, 1), (5, 1, 4, 1.0, 5.0, 0.0, 1),
(6, 2, 0, 1.0, 1.0, 0.0, 0), (7, 2, 3, 2.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1, max_size=[[0.5, 1., np.inf]]).order_records,
np.array([
(0, 0, 0, 0.5, 1.0, 0.0, 0), (1, 0, 3, 0.5, 4.0, 0.0, 1), (2, 1, 0, 1.0, 1.0, 0.0, 0),
(3, 1, 3, 1.0, 4.0, 0.0, 1), (4, 2, 0, 1.0, 1.0, 0.0, 0), (5, 2, 3, 1.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=1, max_size=[[0.5, 1., np.inf]]).order_records,
np.array([
(0, 0, 0, 0.5, 1.0, 0.0, 1), (1, 0, 3, 0.5, 4.0, 0.0, 0), (2, 1, 0, 1.0, 1.0, 0.0, 1),
(3, 1, 3, 1.0, 4.0, 0.0, 0), (4, 2, 0, 1.0, 1.0, 0.0, 1), (5, 2, 3, 1.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
def test_reject_prob(self):
record_arrays_close(
from_signals_both(size=1., reject_prob=[[0., 0.5, 1.]], seed=42).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 3, 2.0, 4.0, 0.0, 1), (2, 1, 1, 1.0, 2.0, 0.0, 0),
(3, 1, 3, 2.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1., reject_prob=[[0., 0.5, 1.]], seed=42).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 3, 1.0, 4.0, 0.0, 1), (2, 1, 1, 1.0, 2.0, 0.0, 0),
(3, 1, 3, 1.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=1., reject_prob=[[0., 0.5, 1.]], seed=42).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 3, 1.0, 4.0, 0.0, 0), (2, 1, 1, 1.0, 2.0, 0.0, 1),
(3, 1, 3, 1.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
def test_allow_partial(self):
record_arrays_close(
from_signals_both(size=1000, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 3, 1100.0, 4.0, 0.0, 1), (2, 1, 3, 1000.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1000, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 3, 100.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=1000, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 1000.0, 1.0, 0.0, 1), (1, 0, 3, 275.0, 4.0, 0.0, 0), (2, 1, 0, 1000.0, 1.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_both(size=np.inf, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 3, 200.0, 4.0, 0.0, 1), (2, 1, 0, 100.0, 1.0, 0.0, 0),
(3, 1, 3, 200.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=np.inf, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 3, 100.0, 4.0, 0.0, 1), (2, 1, 0, 100.0, 1.0, 0.0, 0),
(3, 1, 3, 100.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=np.inf, allow_partial=[[True, False]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 1), (1, 0, 3, 50.0, 4.0, 0.0, 0), (2, 1, 0, 100.0, 1.0, 0.0, 1)
], dtype=order_dt)
)
def test_raise_reject(self):
record_arrays_close(
from_signals_both(size=1000, allow_partial=True, raise_reject=True).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 3, 1100.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1000, allow_partial=True, raise_reject=True).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 3, 100.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
with pytest.raises(Exception):
_ = from_signals_shortonly(size=1000, allow_partial=True, raise_reject=True).order_records
with pytest.raises(Exception):
_ = from_signals_both(size=1000, allow_partial=False, raise_reject=True).order_records
with pytest.raises(Exception):
_ = from_signals_longonly(size=1000, allow_partial=False, raise_reject=True).order_records
with pytest.raises(Exception):
_ = from_signals_shortonly(size=1000, allow_partial=False, raise_reject=True).order_records
def test_log(self):
record_arrays_close(
from_signals_both(log=True).log_records,
np.array([
(0, 0, 0, 0, 100.0, 0.0, 0.0, 100.0, 1.0, 100.0, np.inf, 1.0, 0, 2, 0.0, 0.0,
0.0, 1e-08, np.inf, 0.0, False, True, False, True, 0.0, 100.0, 0.0, 0.0, 1.0,
100.0, 100.0, 1.0, 0.0, 0, 0, -1, 0),
(1, 0, 0, 3, 0.0, 100.0, 0.0, 0.0, 4.0, 400.0, -np.inf, 4.0, 0, 2, 0.0,
0.0, 0.0, 1e-08, np.inf, 0.0, False, True, False, True, 800.0, -100.0,
400.0, 0.0, 4.0, 400.0, 200.0, 4.0, 0.0, 1, 0, -1, 1)
], dtype=log_dt)
)
def test_accumulate(self):
record_arrays_close(
from_signals_both(size=1, accumulate=[['disabled', 'addonly', 'removeonly', 'both']]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 3, 2.0, 4.0, 0.0, 1), (2, 1, 0, 1.0, 1.0, 0.0, 0),
(3, 1, 1, 1.0, 2.0, 0.0, 0), (4, 1, 3, 3.0, 4.0, 0.0, 1), (5, 1, 4, 1.0, 5.0, 0.0, 1),
(6, 2, 0, 1.0, 1.0, 0.0, 0), (7, 2, 3, 1.0, 4.0, 0.0, 1), (8, 2, 4, 1.0, 5.0, 0.0, 1),
(9, 3, 0, 1.0, 1.0, 0.0, 0), (10, 3, 1, 1.0, 2.0, 0.0, 0), (11, 3, 3, 1.0, 4.0, 0.0, 1),
(12, 3, 4, 1.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(size=1, accumulate=[['disabled', 'addonly', 'removeonly', 'both']]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 3, 1.0, 4.0, 0.0, 1), (2, 1, 0, 1.0, 1.0, 0.0, 0),
(3, 1, 1, 1.0, 2.0, 0.0, 0), (4, 1, 3, 2.0, 4.0, 0.0, 1), (5, 2, 0, 1.0, 1.0, 0.0, 0),
(6, 2, 3, 1.0, 4.0, 0.0, 1), (7, 3, 0, 1.0, 1.0, 0.0, 0), (8, 3, 1, 1.0, 2.0, 0.0, 0),
(9, 3, 3, 1.0, 4.0, 0.0, 1), (10, 3, 4, 1.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(size=1, accumulate=[['disabled', 'addonly', 'removeonly', 'both']]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 3, 1.0, 4.0, 0.0, 0), (2, 1, 0, 1.0, 1.0, 0.0, 1),
(3, 1, 1, 1.0, 2.0, 0.0, 1), (4, 1, 3, 2.0, 4.0, 0.0, 0), (5, 2, 0, 1.0, 1.0, 0.0, 1),
(6, 2, 3, 1.0, 4.0, 0.0, 0), (7, 3, 0, 1.0, 1.0, 0.0, 1), (8, 3, 1, 1.0, 2.0, 0.0, 1),
(9, 3, 3, 1.0, 4.0, 0.0, 0), (10, 3, 4, 1.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
def test_upon_long_conflict(self):
kwargs = dict(
close=price[:3],
entries=pd.DataFrame([
[True, True, True, True, True, True, True],
[True, True, True, True, False, True, False],
[True, True, True, True, True, True, True]
]),
exits=pd.DataFrame([
[True, True, True, True, True, True, True],
[False, False, False, False, True, False, True],
[True, True, True, True, True, True, True]
]),
size=1.,
accumulate=True,
upon_long_conflict=[[
'ignore',
'entry',
'exit',
'adjacent',
'adjacent',
'opposite',
'opposite'
]]
)
record_arrays_close(
from_signals_longonly(**kwargs).order_records,
np.array([
(0, 0, 1, 1.0, 2.0, 0.0, 0),
(1, 1, 0, 1.0, 1.0, 0.0, 0), (2, 1, 1, 1.0, 2.0, 0.0, 0), (3, 1, 2, 1.0, 3.0, 0.0, 0),
(4, 2, 1, 1.0, 2.0, 0.0, 0), (5, 2, 2, 1.0, 3.0, 0.0, 1),
(6, 3, 1, 1.0, 2.0, 0.0, 0), (7, 3, 2, 1.0, 3.0, 0.0, 0),
(8, 5, 1, 1.0, 2.0, 0.0, 0), (9, 5, 2, 1.0, 3.0, 0.0, 1)
], dtype=order_dt)
)
def test_upon_short_conflict(self):
kwargs = dict(
close=price[:3],
entries=pd.DataFrame([
[True, True, True, True, True, True, True],
[True, True, True, True, False, True, False],
[True, True, True, True, True, True, True]
]),
exits=pd.DataFrame([
[True, True, True, True, True, True, True],
[False, False, False, False, True, False, True],
[True, True, True, True, True, True, True]
]),
size=1.,
accumulate=True,
upon_short_conflict=[[
'ignore',
'entry',
'exit',
'adjacent',
'adjacent',
'opposite',
'opposite'
]]
)
record_arrays_close(
from_signals_shortonly(**kwargs).order_records,
np.array([
(0, 0, 1, 1.0, 2.0, 0.0, 1),
(1, 1, 0, 1.0, 1.0, 0.0, 1), (2, 1, 1, 1.0, 2.0, 0.0, 1), (3, 1, 2, 1.0, 3.0, 0.0, 1),
(4, 2, 1, 1.0, 2.0, 0.0, 1), (5, 2, 2, 1.0, 3.0, 0.0, 0),
(6, 3, 1, 1.0, 2.0, 0.0, 1), (7, 3, 2, 1.0, 3.0, 0.0, 1),
(8, 5, 1, 1.0, 2.0, 0.0, 1), (9, 5, 2, 1.0, 3.0, 0.0, 0)
], dtype=order_dt)
)
def test_upon_dir_conflict(self):
kwargs = dict(
close=price[:3],
entries=pd.DataFrame([
[True, True, True, True, True, True, True],
[True, True, True, True, False, True, False],
[True, True, True, True, True, True, True]
]),
exits=pd.DataFrame([
[True, True, True, True, True, True, True],
[False, False, False, False, True, False, True],
[True, True, True, True, True, True, True]
]),
size=1.,
accumulate=True,
upon_dir_conflict=[[
'ignore',
'long',
'short',
'adjacent',
'adjacent',
'opposite',
'opposite'
]]
)
record_arrays_close(
from_signals_both(**kwargs).order_records,
np.array([
(0, 0, 1, 1.0, 2.0, 0.0, 0),
(1, 1, 0, 1.0, 1.0, 0.0, 0), (2, 1, 1, 1.0, 2.0, 0.0, 0), (3, 1, 2, 1.0, 3.0, 0.0, 0),
(4, 2, 0, 1.0, 1.0, 0.0, 1), (5, 2, 1, 1.0, 2.0, 0.0, 0), (6, 2, 2, 1.0, 3.0, 0.0, 1),
(7, 3, 1, 1.0, 2.0, 0.0, 0), (8, 3, 2, 1.0, 3.0, 0.0, 0),
(9, 4, 1, 1.0, 2.0, 0.0, 1), (10, 4, 2, 1.0, 3.0, 0.0, 1),
(11, 5, 1, 1.0, 2.0, 0.0, 0), (12, 5, 2, 1.0, 3.0, 0.0, 1),
(13, 6, 1, 1.0, 2.0, 0.0, 1), (14, 6, 2, 1.0, 3.0, 0.0, 0)
], dtype=order_dt)
)
def test_upon_opposite_entry(self):
kwargs = dict(
close=price[:3],
entries=pd.DataFrame([
[True, False, True, False, True, False, True, False, True, False],
[False, True, False, True, False, True, False, True, False, True],
[True, False, True, False, True, False, True, False, True, False]
]),
exits=pd.DataFrame([
[False, True, False, True, False, True, False, True, False, True],
[True, False, True, False, True, False, True, False, True, False],
[False, True, False, True, False, True, False, True, False, True]
]),
size=1.,
upon_opposite_entry=[[
'ignore',
'ignore',
'close',
'close',
'closereduce',
'closereduce',
'reverse',
'reverse',
'reversereduce',
'reversereduce'
]]
)
record_arrays_close(
from_signals_both(**kwargs).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0),
(1, 1, 0, 1.0, 1.0, 0.0, 1),
(2, 2, 0, 1.0, 1.0, 0.0, 0), (3, 2, 1, 1.0, 2.0, 0.0, 1), (4, 2, 2, 1.0, 3.0, 0.0, 0),
(5, 3, 0, 1.0, 1.0, 0.0, 1), (6, 3, 1, 1.0, 2.0, 0.0, 0), (7, 3, 2, 1.0, 3.0, 0.0, 1),
(8, 4, 0, 1.0, 1.0, 0.0, 0), (9, 4, 1, 1.0, 2.0, 0.0, 1), (10, 4, 2, 1.0, 3.0, 0.0, 0),
(11, 5, 0, 1.0, 1.0, 0.0, 1), (12, 5, 1, 1.0, 2.0, 0.0, 0), (13, 5, 2, 1.0, 3.0, 0.0, 1),
(14, 6, 0, 1.0, 1.0, 0.0, 0), (15, 6, 1, 2.0, 2.0, 0.0, 1), (16, 6, 2, 2.0, 3.0, 0.0, 0),
(17, 7, 0, 1.0, 1.0, 0.0, 1), (18, 7, 1, 2.0, 2.0, 0.0, 0), (19, 7, 2, 2.0, 3.0, 0.0, 1),
(20, 8, 0, 1.0, 1.0, 0.0, 0), (21, 8, 1, 2.0, 2.0, 0.0, 1), (22, 8, 2, 2.0, 3.0, 0.0, 0),
(23, 9, 0, 1.0, 1.0, 0.0, 1), (24, 9, 1, 2.0, 2.0, 0.0, 0), (25, 9, 2, 2.0, 3.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_both(**kwargs, accumulate=True).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 0), (1, 0, 2, 1.0, 3.0, 0.0, 0),
(2, 1, 0, 1.0, 1.0, 0.0, 1), (3, 1, 2, 1.0, 3.0, 0.0, 1),
(4, 2, 0, 1.0, 1.0, 0.0, 0), (5, 2, 1, 1.0, 2.0, 0.0, 1), (6, 2, 2, 1.0, 3.0, 0.0, 0),
(7, 3, 0, 1.0, 1.0, 0.0, 1), (8, 3, 1, 1.0, 2.0, 0.0, 0), (9, 3, 2, 1.0, 3.0, 0.0, 1),
(10, 4, 0, 1.0, 1.0, 0.0, 0), (11, 4, 1, 1.0, 2.0, 0.0, 1), (12, 4, 2, 1.0, 3.0, 0.0, 0),
(13, 5, 0, 1.0, 1.0, 0.0, 1), (14, 5, 1, 1.0, 2.0, 0.0, 0), (15, 5, 2, 1.0, 3.0, 0.0, 1),
(16, 6, 0, 1.0, 1.0, 0.0, 0), (17, 6, 1, 2.0, 2.0, 0.0, 1), (18, 6, 2, 2.0, 3.0, 0.0, 0),
(19, 7, 0, 1.0, 1.0, 0.0, 1), (20, 7, 1, 2.0, 2.0, 0.0, 0), (21, 7, 2, 2.0, 3.0, 0.0, 1),
(22, 8, 0, 1.0, 1.0, 0.0, 0), (23, 8, 1, 1.0, 2.0, 0.0, 1), (24, 8, 2, 1.0, 3.0, 0.0, 0),
(25, 9, 0, 1.0, 1.0, 0.0, 1), (26, 9, 1, 1.0, 2.0, 0.0, 0), (27, 9, 2, 1.0, 3.0, 0.0, 1)
], dtype=order_dt)
)
def test_init_cash(self):
record_arrays_close(
from_signals_both(close=price_wide, size=1., init_cash=[0., 1., 100.]).order_records,
np.array([
(0, 0, 3, 1.0, 4.0, 0.0, 1), (1, 1, 0, 1.0, 1.0, 0.0, 0), (2, 1, 3, 2.0, 4.0, 0.0, 1),
(3, 2, 0, 1.0, 1.0, 0.0, 0), (4, 2, 3, 2.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(close=price_wide, size=1., init_cash=[0., 1., 100.]).order_records,
np.array([
(0, 1, 0, 1.0, 1.0, 0.0, 0), (1, 1, 3, 1.0, 4.0, 0.0, 1), (2, 2, 0, 1.0, 1.0, 0.0, 0),
(3, 2, 3, 1.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(close=price_wide, size=1., init_cash=[0., 1., 100.]).order_records,
np.array([
(0, 0, 0, 1.0, 1.0, 0.0, 1), (1, 0, 3, 0.25, 4.0, 0.0, 0), (2, 1, 0, 1.0, 1.0, 0.0, 1),
(3, 1, 3, 0.5, 4.0, 0.0, 0), (4, 2, 0, 1.0, 1.0, 0.0, 1), (5, 2, 3, 1.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
with pytest.raises(Exception):
_ = from_signals_both(init_cash=np.inf).order_records
with pytest.raises(Exception):
_ = from_signals_longonly(init_cash=np.inf).order_records
with pytest.raises(Exception):
_ = from_signals_shortonly(init_cash=np.inf).order_records
def test_group_by(self):
pf = from_signals_both(close=price_wide, group_by=np.array([0, 0, 1]))
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0), (1, 0, 3, 200.0, 4.0, 0.0, 1), (2, 1, 0, 100.0, 1.0, 0.0, 0),
(3, 1, 3, 200.0, 4.0, 0.0, 1), (4, 2, 0, 100.0, 1.0, 0.0, 0), (5, 2, 3, 200.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
pd.testing.assert_index_equal(
pf.wrapper.grouper.group_by,
pd.Int64Index([0, 0, 1], dtype='int64')
)
pd.testing.assert_series_equal(
pf.init_cash,
pd.Series([200., 100.], index=pd.Int64Index([0, 1], dtype='int64')).rename('init_cash')
)
assert not pf.cash_sharing
def test_cash_sharing(self):
pf = from_signals_both(close=price_wide, group_by=np.array([0, 0, 1]), cash_sharing=True)
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 0, 3, 200., 4., 0., 1),
(2, 2, 0, 100., 1., 0., 0), (3, 2, 3, 200., 4., 0., 1)
], dtype=order_dt)
)
pd.testing.assert_index_equal(
pf.wrapper.grouper.group_by,
pd.Int64Index([0, 0, 1], dtype='int64')
)
pd.testing.assert_series_equal(
pf.init_cash,
pd.Series([100., 100.], index=pd.Int64Index([0, 1], dtype='int64')).rename('init_cash')
)
assert pf.cash_sharing
with pytest.raises(Exception):
_ = pf.regroup(group_by=False)
def test_call_seq(self):
pf = from_signals_both(close=price_wide, group_by=np.array([0, 0, 1]), cash_sharing=True)
record_arrays_close(
pf.order_records,
np.array([
(0, 0, 0, 100., 1., 0., 0), (1, 0, 3, 200., 4., 0., 1),
(2, 2, 0, 100., 1., 0., 0), (3, 2, 3, 200., 4., 0., 1)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[0, 1, 0],
[0, 1, 0],
[0, 1, 0],
[0, 1, 0],
[0, 1, 0]
])
)
pf = from_signals_both(
close=price_wide, group_by=np.array([0, 0, 1]),
cash_sharing=True, call_seq='reversed')
record_arrays_close(
pf.order_records,
np.array([
(0, 1, 0, 100., 1., 0., 0), (1, 1, 3, 200., 4., 0., 1),
(2, 2, 0, 100., 1., 0., 0), (3, 2, 3, 200., 4., 0., 1)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0]
])
)
pf = from_signals_both(
close=price_wide, group_by=np.array([0, 0, 1]),
cash_sharing=True, call_seq='random', seed=seed)
record_arrays_close(
pf.order_records,
np.array([
(0, 1, 0, 100., 1., 0., 0), (1, 1, 3, 200., 4., 0., 1),
(2, 2, 0, 100., 1., 0., 0), (3, 2, 3, 200., 4., 0., 1)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[1, 0, 0],
[0, 1, 0],
[1, 0, 0],
[1, 0, 0],
[1, 0, 0]
])
)
kwargs = dict(
close=1.,
entries=pd.DataFrame([
[False, False, True],
[False, True, False],
[True, False, False],
[False, False, True],
[False, True, False],
]),
exits=pd.DataFrame([
[False, False, False],
[False, False, True],
[False, True, False],
[True, False, False],
[False, False, True],
]),
group_by=np.array([0, 0, 0]),
cash_sharing=True,
call_seq='auto'
)
pf = from_signals_both(**kwargs)
record_arrays_close(
pf.order_records,
np.array([
(0, 2, 0, 100., 1., 0., 0), (1, 2, 1, 200., 1., 0., 1),
(2, 1, 1, 200., 1., 0., 0), (3, 1, 2, 200., 1., 0., 1),
(4, 0, 2, 200., 1., 0., 0), (5, 0, 3, 200., 1., 0., 1),
(6, 2, 3, 200., 1., 0., 0), (7, 2, 4, 200., 1., 0., 1),
(8, 1, 4, 200., 1., 0., 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[0, 1, 2],
[2, 0, 1],
[1, 2, 0],
[0, 1, 2],
[2, 0, 1]
])
)
pf = from_signals_longonly(**kwargs)
record_arrays_close(
pf.order_records,
np.array([
(0, 2, 0, 100., 1., 0., 0), (1, 2, 1, 100., 1., 0., 1),
(2, 1, 1, 100., 1., 0., 0), (3, 1, 2, 100., 1., 0., 1),
(4, 0, 2, 100., 1., 0., 0), (5, 0, 3, 100., 1., 0., 1),
(6, 2, 3, 100., 1., 0., 0), (7, 2, 4, 100., 1., 0., 1),
(8, 1, 4, 100., 1., 0., 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[0, 1, 2],
[2, 0, 1],
[1, 2, 0],
[0, 1, 2],
[2, 0, 1]
])
)
pf = from_signals_shortonly(**kwargs)
record_arrays_close(
pf.order_records,
np.array([
(0, 2, 0, 100., 1., 0., 1), (1, 2, 1, 100., 1., 0., 0),
(2, 0, 2, 100., 1., 0., 1), (3, 0, 3, 100., 1., 0., 0),
(4, 1, 4, 100., 1., 0., 1)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[2, 0, 1],
[1, 0, 2],
[0, 1, 2],
[2, 1, 0],
[1, 0, 2]
])
)
pf = from_signals_longonly(**kwargs, size=1., size_type='percent')
record_arrays_close(
pf.order_records,
np.array([
(0, 2, 0, 100.0, 1.0, 0.0, 0), (1, 2, 1, 100.0, 1.0, 0.0, 1), (2, 1, 1, 100.0, 1.0, 0.0, 0),
(3, 1, 2, 100.0, 1.0, 0.0, 1), (4, 0, 2, 100.0, 1.0, 0.0, 0), (5, 0, 3, 100.0, 1.0, 0.0, 1),
(6, 2, 3, 100.0, 1.0, 0.0, 0), (7, 2, 4, 100.0, 1.0, 0.0, 1), (8, 1, 4, 100.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
np.testing.assert_array_equal(
pf.call_seq.values,
np.array([
[0, 1, 2],
[2, 0, 1],
[1, 0, 2],
[0, 1, 2],
[2, 0, 1]
])
)
def test_sl_stop(self):
entries = pd.Series([True, False, False, False, False], index=price.index)
exits = pd.Series([False, False, False, False, False], index=price.index)
with pytest.raises(Exception):
_ = from_signals_both(sl_stop=-0.1)
close = pd.Series([5., 4., 3., 2., 1.], index=price.index)
open = close + 0.25
high = close + 0.5
low = close - 0.5
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.1, 0.5, np.inf]]).order_records,
np.array([
(0, 0, 0, 20.0, 5.0, 0.0, 0),
(1, 1, 0, 20.0, 5.0, 0.0, 0), (2, 1, 1, 20.0, 4.0, 0.0, 1),
(3, 2, 0, 20.0, 5.0, 0.0, 0), (4, 2, 3, 20.0, 2.0, 0.0, 1),
(5, 3, 0, 20.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.1, 0.5, np.inf]]).order_records,
np.array([
(0, 0, 0, 20.0, 5.0, 0.0, 1),
(1, 1, 0, 20.0, 5.0, 0.0, 1),
(2, 2, 0, 20.0, 5.0, 0.0, 1),
(3, 3, 0, 20.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_both(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.1, 0.5, np.inf]]).order_records,
from_signals_longonly(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.1, 0.5, np.inf]]).order_records
)
record_arrays_close(
from_signals_both(
close=close, entries=exits, exits=entries,
sl_stop=[[np.nan, 0.1, 0.5, np.inf]]).order_records,
from_signals_shortonly(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.1, 0.5, np.inf]]).order_records
)
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
sl_stop=[[np.nan, 0.1, 0.15, 0.2, np.inf]]).order_records,
np.array([
(0, 0, 0, 20.0, 5.0, 0.0, 0),
(1, 1, 0, 20.0, 5.0, 0.0, 0), (2, 1, 1, 20.0, 4.25, 0.0, 1),
(3, 2, 0, 20.0, 5.0, 0.0, 0), (4, 2, 1, 20.0, 4.25, 0.0, 1),
(5, 3, 0, 20.0, 5.0, 0.0, 0), (6, 3, 1, 20.0, 4.0, 0.0, 1),
(7, 4, 0, 20.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
sl_stop=[[np.nan, 0.1, 0.15, 0.2, np.inf]]).order_records,
np.array([
(0, 0, 0, 20.0, 5.0, 0.0, 1),
(1, 1, 0, 20.0, 5.0, 0.0, 1),
(2, 2, 0, 20.0, 5.0, 0.0, 1),
(3, 3, 0, 20.0, 5.0, 0.0, 1),
(4, 4, 0, 20.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
close = pd.Series([1., 2., 3., 4., 5.], index=price.index)
open = close - 0.25
high = close + 0.5
low = close - 0.5
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.5, 3., np.inf]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0),
(1, 1, 0, 100.0, 1.0, 0.0, 0),
(2, 2, 0, 100.0, 1.0, 0.0, 0),
(3, 3, 0, 100.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.5, 3., np.inf]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 1),
(1, 1, 0, 100.0, 1.0, 0.0, 1), (2, 1, 1, 100.0, 2.0, 0.0, 0),
(3, 2, 0, 100.0, 1.0, 0.0, 1), (4, 2, 3, 50.0, 4.0, 0.0, 0),
(5, 3, 0, 100.0, 1.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_both(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.5, 3., np.inf]]).order_records,
from_signals_longonly(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.5, 3., np.inf]]).order_records
)
record_arrays_close(
from_signals_both(
close=close, entries=exits, exits=entries,
sl_stop=[[np.nan, 0.5, 3., np.inf]]).order_records,
from_signals_shortonly(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.5, 3., np.inf]]).order_records
)
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
sl_stop=[[np.nan, 0.5, 0.75, 1., np.inf]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0),
(1, 1, 0, 100.0, 1.0, 0.0, 0),
(2, 2, 0, 100.0, 1.0, 0.0, 0),
(3, 3, 0, 100.0, 1.0, 0.0, 0),
(4, 4, 0, 100.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
sl_stop=[[np.nan, 0.5, 0.75, 1., np.inf]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 1),
(1, 1, 0, 100.0, 1.0, 0.0, 1), (2, 1, 1, 100.0, 1.75, 0.0, 0),
(3, 2, 0, 100.0, 1.0, 0.0, 1), (4, 2, 1, 100.0, 1.75, 0.0, 0),
(5, 3, 0, 100.0, 1.0, 0.0, 1), (6, 3, 1, 100.0, 2.0, 0.0, 0),
(7, 4, 0, 100.0, 1.0, 0.0, 1)
], dtype=order_dt)
)
def test_ts_stop(self):
entries = pd.Series([True, False, False, False, False], index=price.index)
exits = pd.Series([False, False, False, False, False], index=price.index)
with pytest.raises(Exception):
_ = from_signals_both(ts_stop=-0.1)
close = pd.Series([4., 5., 4., 3., 2.], index=price.index)
open = close + 0.25
high = close + 0.5
low = close - 0.5
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.1, 0.5, np.inf]], sl_trail=True).order_records,
np.array([
(0, 0, 0, 25.0, 4.0, 0.0, 0),
(1, 1, 0, 25.0, 4.0, 0.0, 0), (2, 1, 2, 25.0, 4.0, 0.0, 1),
(3, 2, 0, 25.0, 4.0, 0.0, 0), (4, 2, 4, 25.0, 2.0, 0.0, 1),
(5, 3, 0, 25.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.1, 0.5, np.inf]], sl_trail=True).order_records,
np.array([
(0, 0, 0, 25.0, 4.0, 0.0, 1),
(1, 1, 0, 25.0, 4.0, 0.0, 1), (2, 1, 1, 25.0, 5.0, 0.0, 0),
(3, 2, 0, 25.0, 4.0, 0.0, 1),
(4, 3, 0, 25.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_both(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.1, 0.5, np.inf]], sl_trail=True).order_records,
from_signals_longonly(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.1, 0.5, np.inf]], sl_trail=True).order_records
)
print('here')
record_arrays_close(
from_signals_both(
close=close, entries=exits, exits=entries,
sl_stop=[[np.nan, 0.1, 0.5, np.inf]], sl_trail=True).order_records,
from_signals_shortonly(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.1, 0.5, np.inf]], sl_trail=True).order_records
)
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
sl_stop=[[np.nan, 0.15, 0.2, 0.25, np.inf]], sl_trail=True).order_records,
np.array([
(0, 0, 0, 25.0, 4.0, 0.0, 0),
(1, 1, 0, 25.0, 4.0, 0.0, 0), (2, 1, 2, 25.0, 4.25, 0.0, 1),
(3, 2, 0, 25.0, 4.0, 0.0, 0), (4, 2, 2, 25.0, 4.25, 0.0, 1),
(5, 3, 0, 25.0, 4.0, 0.0, 0), (6, 3, 2, 25.0, 4.125, 0.0, 1),
(7, 4, 0, 25.0, 4.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
sl_stop=[[np.nan, 0.15, 0.2, 0.25, np.inf]], sl_trail=True).order_records,
np.array([
(0, 0, 0, 25.0, 4.0, 0.0, 1),
(1, 1, 0, 25.0, 4.0, 0.0, 1), (2, 1, 1, 25.0, 5.25, 0.0, 0),
(3, 2, 0, 25.0, 4.0, 0.0, 1), (4, 2, 1, 25.0, 5.25, 0.0, 0),
(5, 3, 0, 25.0, 4.0, 0.0, 1), (6, 3, 1, 25.0, 5.25, 0.0, 0),
(7, 4, 0, 25.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
close = pd.Series([2., 1., 2., 3., 4.], index=price.index)
open = close - 0.25
high = close + 0.5
low = close - 0.5
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.5, 3., np.inf]], sl_trail=True).order_records,
np.array([
(0, 0, 0, 50.0, 2.0, 0.0, 0),
(1, 1, 0, 50.0, 2.0, 0.0, 0), (2, 1, 1, 50.0, 1.0, 0.0, 1),
(3, 2, 0, 50.0, 2.0, 0.0, 0),
(4, 3, 0, 50.0, 2.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.5, 3., np.inf]], sl_trail=True).order_records,
np.array([
(0, 0, 0, 50.0, 2.0, 0.0, 1),
(1, 1, 0, 50.0, 2.0, 0.0, 1), (2, 1, 2, 50.0, 2.0, 0.0, 0),
(3, 2, 0, 50.0, 2.0, 0.0, 1), (4, 2, 4, 50.0, 4.0, 0.0, 0),
(5, 3, 0, 50.0, 2.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_both(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.5, 3., np.inf]], sl_trail=True).order_records,
from_signals_longonly(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.5, 3., np.inf]], sl_trail=True).order_records
)
record_arrays_close(
from_signals_both(
close=close, entries=exits, exits=entries,
sl_stop=[[np.nan, 0.5, 3., np.inf]], sl_trail=True).order_records,
from_signals_shortonly(
close=close, entries=entries, exits=exits,
sl_stop=[[np.nan, 0.5, 3., np.inf]], sl_trail=True).order_records
)
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
sl_stop=[[np.nan, 0.5, 0.75, 1., np.inf]], sl_trail=True).order_records,
np.array([
(0, 0, 0, 50.0, 2.0, 0.0, 0),
(1, 1, 0, 50.0, 2.0, 0.0, 0), (2, 1, 1, 50.0, 0.75, 0.0, 1),
(3, 2, 0, 50.0, 2.0, 0.0, 0), (4, 2, 1, 50.0, 0.5, 0.0, 1),
(5, 3, 0, 50.0, 2.0, 0.0, 0),
(6, 4, 0, 50.0, 2.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
sl_stop=[[np.nan, 0.5, 0.75, 1., np.inf]], sl_trail=True).order_records,
np.array([
(0, 0, 0, 50.0, 2.0, 0.0, 1),
(1, 1, 0, 50.0, 2.0, 0.0, 1), (2, 1, 2, 50.0, 1.75, 0.0, 0),
(3, 2, 0, 50.0, 2.0, 0.0, 1), (4, 2, 2, 50.0, 1.75, 0.0, 0),
(5, 3, 0, 50.0, 2.0, 0.0, 1), (6, 3, 2, 50.0, 1.75, 0.0, 0),
(7, 4, 0, 50.0, 2.0, 0.0, 1)
], dtype=order_dt)
)
def test_tp_stop(self):
entries = pd.Series([True, False, False, False, False], index=price.index)
exits = pd.Series([False, False, False, False, False], index=price.index)
with pytest.raises(Exception):
_ = from_signals_both(sl_stop=-0.1)
close = pd.Series([5., 4., 3., 2., 1.], index=price.index)
open = close + 0.25
high = close + 0.5
low = close - 0.5
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
tp_stop=[[np.nan, 0.1, 0.5, np.inf]]).order_records,
np.array([
(0, 0, 0, 20.0, 5.0, 0.0, 0),
(1, 1, 0, 20.0, 5.0, 0.0, 0),
(2, 2, 0, 20.0, 5.0, 0.0, 0),
(3, 3, 0, 20.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(
close=close, entries=entries, exits=exits,
tp_stop=[[np.nan, 0.1, 0.5, np.inf]]).order_records,
np.array([
(0, 0, 0, 20.0, 5.0, 0.0, 1),
(1, 1, 0, 20.0, 5.0, 0.0, 1), (2, 1, 1, 20.0, 4.0, 0.0, 0),
(3, 2, 0, 20.0, 5.0, 0.0, 1), (4, 2, 3, 20.0, 2.0, 0.0, 0),
(5, 3, 0, 20.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_both(
close=close, entries=entries, exits=exits,
tp_stop=[[np.nan, 0.1, 0.5, np.inf]]).order_records,
from_signals_longonly(
close=close, entries=entries, exits=exits,
tp_stop=[[np.nan, 0.1, 0.5, np.inf]]).order_records
)
record_arrays_close(
from_signals_both(
close=close, entries=exits, exits=entries,
tp_stop=[[np.nan, 0.1, 0.5, np.inf]]).order_records,
from_signals_shortonly(
close=close, entries=entries, exits=exits,
tp_stop=[[np.nan, 0.1, 0.5, np.inf]]).order_records
)
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
tp_stop=[[np.nan, 0.1, 0.15, 0.2, np.inf]]).order_records,
np.array([
(0, 0, 0, 20.0, 5.0, 0.0, 0),
(1, 1, 0, 20.0, 5.0, 0.0, 0),
(2, 2, 0, 20.0, 5.0, 0.0, 0),
(3, 3, 0, 20.0, 5.0, 0.0, 0),
(4, 4, 0, 20.0, 5.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
tp_stop=[[np.nan, 0.1, 0.15, 0.2, np.inf]]).order_records,
np.array([
(0, 0, 0, 20.0, 5.0, 0.0, 1),
(1, 1, 0, 20.0, 5.0, 0.0, 1), (2, 1, 1, 20.0, 4.25, 0.0, 0),
(3, 2, 0, 20.0, 5.0, 0.0, 1), (4, 2, 1, 20.0, 4.25, 0.0, 0),
(5, 3, 0, 20.0, 5.0, 0.0, 1), (6, 3, 1, 20.0, 4.0, 0.0, 0),
(7, 4, 0, 20.0, 5.0, 0.0, 1)
], dtype=order_dt)
)
close = pd.Series([1., 2., 3., 4., 5.], index=price.index)
open = close - 0.25
high = close + 0.5
low = close - 0.5
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
tp_stop=[[np.nan, 0.5, 3., np.inf]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0),
(1, 1, 0, 100.0, 1.0, 0.0, 0), (2, 1, 1, 100.0, 2.0, 0.0, 1),
(3, 2, 0, 100.0, 1.0, 0.0, 0), (4, 2, 3, 100.0, 4.0, 0.0, 1),
(5, 3, 0, 100.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(
close=close, entries=entries, exits=exits,
tp_stop=[[np.nan, 0.5, 3., np.inf]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 1),
(1, 1, 0, 100.0, 1.0, 0.0, 1),
(2, 2, 0, 100.0, 1.0, 0.0, 1),
(3, 3, 0, 100.0, 1.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_both(
close=close, entries=entries, exits=exits,
tp_stop=[[np.nan, 0.5, 3., np.inf]]).order_records,
from_signals_longonly(
close=close, entries=entries, exits=exits,
tp_stop=[[np.nan, 0.5, 3., np.inf]]).order_records
)
record_arrays_close(
from_signals_both(
close=close, entries=exits, exits=entries,
tp_stop=[[np.nan, 0.5, 3., np.inf]]).order_records,
from_signals_shortonly(
close=close, entries=entries, exits=exits,
tp_stop=[[np.nan, 0.5, 3., np.inf]]).order_records
)
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
tp_stop=[[np.nan, 0.5, 0.75, 1., np.inf]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 0),
(1, 1, 0, 100.0, 1.0, 0.0, 0), (2, 1, 1, 100.0, 1.75, 0.0, 1),
(3, 2, 0, 100.0, 1.0, 0.0, 0), (4, 2, 1, 100.0, 1.75, 0.0, 1),
(5, 3, 0, 100.0, 1.0, 0.0, 0), (6, 3, 1, 100.0, 2.0, 0.0, 1),
(7, 4, 0, 100.0, 1.0, 0.0, 0)
], dtype=order_dt)
)
record_arrays_close(
from_signals_shortonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
tp_stop=[[np.nan, 0.5, 0.75, 1., np.inf]]).order_records,
np.array([
(0, 0, 0, 100.0, 1.0, 0.0, 1),
(1, 1, 0, 100.0, 1.0, 0.0, 1),
(2, 2, 0, 100.0, 1.0, 0.0, 1),
(3, 3, 0, 100.0, 1.0, 0.0, 1),
(4, 4, 0, 100.0, 1.0, 0.0, 1)
], dtype=order_dt)
)
def test_stop_entry_price(self):
entries = pd.Series([True, False, False, False, False], index=price.index)
exits = pd.Series([False, False, False, False, False], index=price.index)
close = pd.Series([5., 4., 3., 2., 1.], index=price.index)
open = close + 0.25
high = close + 0.5
low = close - 0.5
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
sl_stop=[[0.05, 0.5, 0.75]], price=1.1 * close, val_price=1.05 * close,
stop_entry_price='val_price',
stop_exit_price='stoplimit', slippage=0.1).order_records,
np.array([
(0, 0, 0, 16.52892561983471, 6.050000000000001, 0.0, 0),
(1, 0, 1, 16.52892561983471, 4.25, 0.0, 1),
(2, 1, 0, 16.52892561983471, 6.050000000000001, 0.0, 0),
(3, 1, 2, 16.52892561983471, 2.625, 0.0, 1),
(4, 2, 0, 16.52892561983471, 6.050000000000001, 0.0, 0),
(5, 2, 4, 16.52892561983471, 1.25, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
sl_stop=[[0.05, 0.5, 0.75]], price=1.1 * close, val_price=1.05 * close,
stop_entry_price='price',
stop_exit_price='stoplimit', slippage=0.1).order_records,
np.array([
(0, 0, 0, 16.52892561983471, 6.050000000000001, 0.0, 0),
(1, 0, 1, 16.52892561983471, 4.25, 0.0, 1),
(2, 1, 0, 16.52892561983471, 6.050000000000001, 0.0, 0),
(3, 1, 2, 16.52892561983471, 2.75, 0.0, 1),
(4, 2, 0, 16.52892561983471, 6.050000000000001, 0.0, 0),
(5, 2, 4, 16.52892561983471, 1.25, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
sl_stop=[[0.05, 0.5, 0.75]], price=1.1 * close, val_price=1.05 * close,
stop_entry_price='fillprice',
stop_exit_price='stoplimit', slippage=0.1).order_records,
np.array([
(0, 0, 0, 16.52892561983471, 6.050000000000001, 0.0, 0),
(1, 0, 1, 16.52892561983471, 4.25, 0.0, 1),
(2, 1, 0, 16.52892561983471, 6.050000000000001, 0.0, 0),
(3, 1, 2, 16.52892561983471, 3.0250000000000004, 0.0, 1),
(4, 2, 0, 16.52892561983471, 6.050000000000001, 0.0, 0),
(5, 2, 3, 16.52892561983471, 1.5125000000000002, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
sl_stop=[[0.05, 0.5, 0.75]], price=1.1 * close, val_price=1.05 * close,
stop_entry_price='close',
stop_exit_price='stoplimit', slippage=0.1).order_records,
np.array([
(0, 0, 0, 16.52892561983471, 6.050000000000001, 0.0, 0),
(1, 0, 1, 16.52892561983471, 4.25, 0.0, 1),
(2, 1, 0, 16.52892561983471, 6.050000000000001, 0.0, 0),
(3, 1, 2, 16.52892561983471, 2.5, 0.0, 1),
(4, 2, 0, 16.52892561983471, 6.050000000000001, 0.0, 0),
(5, 2, 4, 16.52892561983471, 1.25, 0.0, 1)
], dtype=order_dt)
)
def test_stop_exit_price(self):
entries = pd.Series([True, False, False, False, False], index=price.index)
exits = pd.Series([False, False, False, False, False], index=price.index)
close = pd.Series([5., 4., 3., 2., 1.], index=price.index)
open = close + 0.25
high = close + 0.5
low = close - 0.5
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
sl_stop=[[0.05, 0.5, 0.75]], price=1.1 * close,
stop_exit_price='stoplimit', slippage=0.1).order_records,
np.array([
(0, 0, 0, 16.528926, 6.05, 0.0, 0), (1, 0, 1, 16.528926, 4.25, 0.0, 1),
(2, 1, 0, 16.528926, 6.05, 0.0, 0), (3, 1, 2, 16.528926, 2.5, 0.0, 1),
(4, 2, 0, 16.528926, 6.05, 0.0, 0), (5, 2, 4, 16.528926, 1.25, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
sl_stop=[[0.05, 0.5, 0.75]], price=1.1 * close,
stop_exit_price='stopmarket', slippage=0.1).order_records,
np.array([
(0, 0, 0, 16.528926, 6.05, 0.0, 0), (1, 0, 1, 16.528926, 3.825, 0.0, 1),
(2, 1, 0, 16.528926, 6.05, 0.0, 0), (3, 1, 2, 16.528926, 2.25, 0.0, 1),
(4, 2, 0, 16.528926, 6.05, 0.0, 0), (5, 2, 4, 16.528926, 1.125, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
sl_stop=[[0.05, 0.5, 0.75]], price=1.1 * close,
stop_exit_price='close', slippage=0.1).order_records,
np.array([
(0, 0, 0, 16.528926, 6.05, 0.0, 0), (1, 0, 1, 16.528926, 3.6, 0.0, 1),
(2, 1, 0, 16.528926, 6.05, 0.0, 0), (3, 1, 2, 16.528926, 2.7, 0.0, 1),
(4, 2, 0, 16.528926, 6.05, 0.0, 0), (5, 2, 4, 16.528926, 0.9, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_longonly(
close=close, entries=entries, exits=exits,
open=open, high=high, low=low,
sl_stop=[[0.05, 0.5, 0.75]], price=1.1 * close,
stop_exit_price='price', slippage=0.1).order_records,
np.array([
(0, 0, 0, 16.528926, 6.05, 0.0, 0), (1, 0, 1, 16.528926, 3.9600000000000004, 0.0, 1),
(2, 1, 0, 16.528926, 6.05, 0.0, 0), (3, 1, 2, 16.528926, 2.97, 0.0, 1),
(4, 2, 0, 16.528926, 6.05, 0.0, 0), (5, 2, 4, 16.528926, 0.9900000000000001, 0.0, 1)
], dtype=order_dt)
)
def test_upon_stop_exit(self):
entries = pd.Series([True, False, False, False, False], index=price.index)
exits = pd.Series([False, False, False, False, False], index=price.index)
close = pd.Series([5., 4., 3., 2., 1.], index=price.index)
record_arrays_close(
from_signals_both(
close=close, entries=entries, exits=exits, size=1,
sl_stop=0.1, upon_stop_exit=[['close', 'closereduce', 'reverse', 'reversereduce']],
accumulate=True).order_records,
np.array([
(0, 0, 0, 1.0, 5.0, 0.0, 0), (1, 0, 1, 1.0, 4.0, 0.0, 1),
(2, 1, 0, 1.0, 5.0, 0.0, 0), (3, 1, 1, 1.0, 4.0, 0.0, 1),
(4, 2, 0, 1.0, 5.0, 0.0, 0), (5, 2, 1, 2.0, 4.0, 0.0, 1),
(6, 3, 0, 1.0, 5.0, 0.0, 0), (7, 3, 1, 1.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
record_arrays_close(
from_signals_both(
close=close, entries=entries, exits=exits, size=1,
sl_stop=0.1, upon_stop_exit=[['close', 'closereduce', 'reverse', 'reversereduce']]).order_records,
np.array([
(0, 0, 0, 1.0, 5.0, 0.0, 0), (1, 0, 1, 1.0, 4.0, 0.0, 1),
(2, 1, 0, 1.0, 5.0, 0.0, 0), (3, 1, 1, 1.0, 4.0, 0.0, 1),
(4, 2, 0, 1.0, 5.0, 0.0, 0), (5, 2, 1, 2.0, 4.0, 0.0, 1),
(6, 3, 0, 1.0, 5.0, 0.0, 0), (7, 3, 1, 2.0, 4.0, 0.0, 1)
], dtype=order_dt)
)
def test_upon_stop_update(self):
entries = pd.Series([True, True, False, False, False], index=price.index)
exits = | pd.Series([False, False, False, False, False], index=price.index) | pandas.Series |
__author__ = "<NAME>, University of Kansas"
__version__ = "1.3"
# Change these to your values, you will also likely have to edit the variable names (such as RH for humidity
# or AT for the Temperature) in the below code
CWOPid = "FW####"
DataFile = 'Mesonet.dat'
Lat = '####.##N'
Lon = '#####.##W'
StationHeight = 67 # in M
#----------------------------------------------------------------------------------
# Imports necessary modules
import pandas as pd
import subprocess as sp
import socket
import schedule
import time
# Unit Conversion Functions
def mps_to_mph(WindSpeed):
return WindSpeed * 2.23694
def Cel_to_F(Temperature):
return ((9.0/5.0) * Temperature) + 32
def AltimeterAdjust(Press):
Height = StationHeight
P1 = Press - 0.3
Frac1 = ((1013.25**0.19284) * 0.0065) / 288
Frac2 = Height / ((Press-0.3)**(0.190284))
P2 = (1 + (Frac1*Frac2))**(1/0.190284)
return P1*P2
# Definition of Operational Code
class MesoToCWOP:
def __init__(self, file):
self.filename = file
def GetLastData(self):
'''Retrieves and saves the last line of data'''
File = self.filename
self.LastData = | pd.read_csv(File, skiprows=[0,2,3], parse_dates=[0]) | pandas.read_csv |
import pandas as pd
import numpy as np
from sklearn.datasets import make_blobs
from sklearn.decomposition import PCA
import altair as alt
def show_clusters(data, clusters, centroids = None):
"""
This function reduces a data set to 2 dimensions using principle component analysis (PCA) and colours clusters of points.
Parameters
----------
data : DataFrame
Scaled data
clusters : list, pandas Series
corresponding cluster for X
centroids: 2d array
Coordinates of cluster centroids
Returns
-------
plot
A 2d principle components scatter plot coloured by cluster
Examples
--------
>>> from sklearn.datasets import make_blobs
>>> X, y = make_blobs(n_samples=10, centers=3, n_features=2)
>>> show_clusters(X, y, centroids)
"""
# Exception handling
try:
data = | pd.DataFrame(data) | pandas.DataFrame |
import pandas as pd
import pytest
from pybaseball.datahelpers import transform
from unittest.mock import patch
@pytest.fixture(name='fielding')
def _fielding() -> pd.DataFrame:
return pd.DataFrame([
['1', 2015, 'P', 157],
['1', 2015, 'CF', 5],
['1', 2016, 'CF', 162],
['2', 2015, 'C', 1],
],
columns=['playerID', 'yearID', 'POS', 'G'])
@pytest.fixture(name='people')
def _people() -> pd.DataFrame:
return pd.DataFrame([
['1', 1990],
['2', 1985],
],
columns=['playerID', 'birthYear'])
@pytest.fixture(name='stats')
def _stats() -> pd.DataFrame:
return pd.DataFrame([
['1', 2015],
['1', 2016],
['2', 2015],
],
columns=['playerID', 'yearID'])
def test_get_age(stats: pd.DataFrame, people: pd.DataFrame) -> None:
expected = pd.DataFrame([
['1', 2015, 25],
['1', 2016, 26],
['2', 2015, 30],
],
columns=['playerID', 'yearID', 'age']
)
result = transform.get_age(stats, people)
pd.testing.assert_frame_equal(expected, result, check_dtype=False)
def test_get_age_default_people(stats: pd.DataFrame, people: pd.DataFrame) -> None:
expected = pd.DataFrame([
['1', 2015, 25],
['1', 2016, 26],
['2', 2015, 30],
],
columns=['playerID', 'yearID', 'age']
)
with patch('pybaseball.datahelpers.transform.people', return_value=people) as people_mock:
result = transform.get_age(stats)
pd.testing.assert_frame_equal(expected, result, check_dtype=False)
people_mock.assert_called_once()
def test_get_primary_position(fielding: pd.DataFrame) -> None:
expected = pd.DataFrame([
['1', 2015, 'P'],
['1', 2016, 'CF'],
['2', 2015, 'C'],
],
columns=['playerID', 'yearID', 'primaryPos'],
index= | pd.RangeIndex(1, 4) | pandas.RangeIndex |
##set default values for all constants
##these can all be changed when calling PostClassificationModel
embeddings_path = '../Data/glove.6B.50d.txt'
max_features = 40000 #number of words to put in dictionary
maxlen=40 #number of words of title (or title+selftext combination) you will use
batch_size = 32 #batch size for training NN
epochs = 20 #number of epochs for training NN
meta_embedding_dims = 64 #dimension of the embedding for the time information
dense_layer_size = 256 #size of the final dense layer in the NN
text_cols_used=['title'] #which text columns to use
exclude_removed = True #exclude removed and deleted posts from the data set
use_year = True #whether to include the year in the calculation
split = 0.25 #percent of training set to use for validation
test_size = 0.2 #percent of data set to use for testing
optimization_quantity = ['val_main_out_accuracy','max'] #we want to maximize the accuracy on the validation set
early_stopping_patience = 5 #how soon to stop if accuracy is not improving
model_loss='binary_crossentropy' #loss function used in model
model_optimizer='adam' #optimizer used in model
model_metrics=['accuracy'] #metric used to gauge model performance
model_loss_weights=[1, 0.2] #first weight is for main loss, second is for auxiliary loss (adjusting the word embedding)
custom_seed = 123
import numpy as np
import os
import csv
from random import random, sample, seed
import pandas as pd
from datetime import datetime
from keras.preprocessing import sequence
from keras.preprocessing.text import text_to_word_sequence, Tokenizer
from keras.models import Input, Model
from keras.layers import Dense, Embedding, GlobalAveragePooling1D, concatenate, Activation
from keras.layers.core import Masking, Dropout, Reshape
from keras.layers.normalization import BatchNormalization
from keras.callbacks import EarlyStopping
from matplotlib import pyplot as plt
from sklearn.model_selection import train_test_split
def buildnets(subreddits):
results = []
for subreddit in subreddits:
data_path = f'../Data/subreddit_{subreddit}'
model, accuracies, word_tokenizer, cleaned_df = PostClassificationModel(data_path = data_path, use_year = True)
#save model
model.save( f'../Data/subreddit_{subreddit}/NN_model.keras')
results.append( (model, accuracies, word_tokenizer, cleaned_df ))
return results
#For predicting time series of post popularity.
def encode_text(text, word_tokenizer, maxlen=maxlen):
encoded = word_tokenizer.texts_to_sequences([text])
return sequence.pad_sequences(encoded, maxlen=maxlen)
def timeseries(df, text, model, word_tokenizer):
#get the minimum year appearing in the data set
min_year = min(np.array(df.utc.apply(lambda x : x.year), dtype=int))
df['date'] = df.utc.apply( lambda x : x.date())
all_dates_utcs = df.date.unique()
#all_dates_utc = [datetime.datetime.(x[0]+min_year,1,1) + datetime.timedelta(x[1]) for x in all_dates]
encoded_text = encode_text(text,word_tokenizer)
# Fixing a specific time for input on each day
input_hour = np.array([12])
input_minute = np.array([0])
predict_list = []
for date in all_dates_utcs:
input_dayofweek = np.array([date.weekday()])
input_dayofyear = np.array([date.timetuple().tm_yday-1])
input_year = np.array([date.year-min_year])
predict_list.append(model.predict([encoded_text, input_hour, input_dayofweek, input_minute, input_dayofyear, input_year])[0][0][0])
plt.ylim(0,1)
ax = plt.gca()
for tick in ax.get_xticklabels():
tick.set_rotation(45)
plt.xlabel("Date")
plt.ylabel("Probability of Success")
plt.scatter(all_dates_utcs, predict_list)
#returns 1 if ups>threshold, 0 otherwise
def GoodPost(ups,threshold=1):
if ups>threshold:
return 1
return 0
#take dfog, restrict to only posts with selftext, exclude posts that were removed or deleted (if exclude_removed is True), and drop columns in drop_na_cols with NaN entries
def DataSetup(dfog, exclude_removed=True, drop_na_cols=['title']):
if exclude_removed:
tempdf=dfog.loc[(((dfog.removed_by_category.isnull()))) & ((dfog.is_self==True))]
#tempdf=dfog.loc[(((dfog.removed_by_category.isnull()))) & ((dfog.is_self==True) & ~(dfog["title"].str.contains("Thread|thread|Sunday Live Chat|consolidation zone|Containment Zone|Daily Discussion|Daily discussion|Saturday Chat|What Are Your Moves Tomorrow|What Are Your Moves Today|MEGATHREAD",na=False)))]
else:
tempdf=dfog.loc[dfog.is_self==True]
#tempdf=dfog.loc[((dfog.is_self==True) & ~(dfog["title"].str.contains("Thread|thread|Sunday Live Chat|consolidation zone|Containment Zone|Daily Discussion|Daily discussion|Saturday Chat|What Are Your Moves Tomorrow|What Are Your Moves Today|MEGATHREAD",na=False)))]
tempdf=tempdf.dropna(subset = drop_na_cols)
tempdf['utc']=tempdf.created_utc.apply(lambda x : datetime.utcfromtimestamp(x))
return tempdf
def make_embedding_matrix(word_tokenizer, embeddings_path):
embedding_vectors = {}
with open(embeddings_path, 'r',encoding='latin-1') as f:
for line in f:
#print(line)
line_split = line.strip().split(" ")
vec = np.array(line_split[1:], dtype=float)
word = line_split[0]
embedding_vectors[word] = vec
embedding_dims = len(embedding_vectors['the'])
weights_matrix = np.zeros((max_features + 1, embedding_dims))
for word, i in word_tokenizer.word_index.items():
embedding_vector = embedding_vectors.get(word)
if embedding_vector is not None and i <= max_features:
weights_matrix[i] = embedding_vector
return weights_matrix, embedding_dims
#data_path should either be a string (the location of the subreddit data) or a pandas dataframe
#embeddings_path should be a string, the location of the embeddings file
#returns a pair: the model and a list [accuracy to beat, accuracy on validation set, accuracy on test set]
def PostClassificationModel(data_path, embeddings_path = embeddings_path, custom_seed=custom_seed,
max_features = max_features, maxlen = maxlen, batch_size = batch_size,
epochs = epochs, meta_embedding_dims = meta_embedding_dims,
dense_layer_size = dense_layer_size, text_cols_used = text_cols_used,
exclude_removed = exclude_removed, use_year = use_year, split = split,
test_size = test_size, optimization_quantity = optimization_quantity,
early_stopping_patience = early_stopping_patience, model_loss = model_loss,
model_optimizer = model_optimizer, model_metrics = model_metrics,
model_loss_weights = model_loss_weights):
print("Starting Post Classification Model.")
#if data_path is a string, read in the corresponding file as df. Otherwise we assume it's a pandas dataframe
if type(data_path) == str:
df = | pd.read_csv(data_path + "/full.csv") | pandas.read_csv |
"""
Created on Jun 28 10:39 2018
@author: nishit
"""
import time
import math
import pandas as pd
import numpy as np
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import MinMaxScaler
from utils_intern.messageLogger import MessageLogger
logger = MessageLogger.get_logger_parent()
class OfflineProcessingData:
def __init__(self):
self.new_df = None
def expand_and_resample(self, raw_data, dT):
step = float(dT)
j = len(raw_data) - 1
new_data = []
if j > 0:
start_time = raw_data[j][0]
start_value = raw_data[j][1]
new_data.append([start_time, start_value])
prev_time = start_time
prev_value = start_value
required_diff = step
j -= 1
while j >= 0:
end_time = raw_data[j][0]
end_value = raw_data[j][1]
diff_sec = prev_time - end_time
if diff_sec >= required_diff:
ratio = required_diff / diff_sec
inter_time = prev_time - required_diff
inter_value = prev_value - (prev_value - end_value) * ratio
new_data.append([inter_time, inter_value])
prev_time = inter_time
prev_value = inter_value
required_diff = step
else:
required_diff -= diff_sec
prev_time = end_time
prev_value = end_value
j -= 1
else:
new_data = raw_data
new_data.reverse()
return new_data
def preprocess_data_predict(self, raw_data, num_timesteps, output_size):
# Loading Data
# df = pd.DataFrame(raw_data, columns=col_heads)
df = pd.DataFrame(raw_data)
df = df[df.columns[:2]]
df.columns = ['Time', 'Electricity']
new_df = df
new_df.columns = ['DateTime', 'Electricity']
# Changing dtype to pandas datetime format
new_df['DateTime'] = pd.to_datetime(new_df['DateTime'], unit='s')
new_df = new_df.set_index('DateTime')
# checking for null values and if any, replacing them with last valid observation
new_df.isnull().sum()
new_df.Electricity.fillna(method='pad', inplace=True)
# scale the data to be in the range (0, 1)
data = new_df.values.reshape(-1, 1)
scaler = MinMaxScaler(feature_range=(0, 1), copy=False)
data = scaler.fit_transform(data)
look_back = num_timesteps
num_features = 1
nb_samples = data.shape[0] - num_timesteps
x_train_reshaped = np.zeros((nb_samples, look_back, num_features))
# y_train_reshaped = np.zeros((nb_samples, output_size))
logger.info("data dim = "+str(data.shape))
for i in range(nb_samples):
y_position_start = i + look_back
x_train_reshaped[i] = data[i:y_position_start]
Xtest = x_train_reshaped
logger.debug(str(Xtest.shape))
return Xtest, scaler, 0
def postprocess_data(self, prediction, startTimestamp, delta, scaler):
data = prediction.reshape(-1, 1)
data = scaler.inverse_transform(data)
data = data.reshape(-1)
startTime = startTimestamp
result = {}
for pred in data:
result[startTime] = pred
startTime += delta
return result
def append_mock_data(self, data, num_timesteps, dT):
l = len(data)
diff = num_timesteps - l
if l == 0:
earliest_timestamp = time.time()
else:
earliest_timestamp = data[0][0]
new_data = data
for i in range(diff):
earliest_timestamp -= dT
new_data.insert(0, [earliest_timestamp, - 0.000001])
return new_data
def break_series_into_countinous_blocks(self, raw_data, dT, horizon_steps):
allowed_continous_gap_percent = 0.1
duration_of_one_data_set = horizon_steps * dT
required_mins = math.ceil(duration_of_one_data_set / 60.0)
allowed_continous_gap_mins = required_mins * allowed_continous_gap_percent
continous_series = []
temp_data = []
logger.info("allowed "+str(allowed_continous_gap_mins))
prev_time = raw_data[0][0]
for i in range(len(raw_data)):
curr_time = raw_data[i][0]
minute_diff = (curr_time - prev_time) / 60.0
if minute_diff > allowed_continous_gap_mins:
continous_series.append(temp_data.copy())
temp_data = []
temp_data.append(raw_data[i])
prev_time = curr_time
if len(temp_data) > 0:
continous_series.append(temp_data.copy())
return continous_series
def expand_and_resample_into_blocks(self, raw_data, dT, horizon_steps, num_timesteps, output_size):
if len(raw_data) > 0:
blocks = self.break_series_into_countinous_blocks(raw_data, dT, horizon_steps)
logger.info("num blocks = "+str(len(blocks)))
resampled_blocks = []
block_has_min_length = []
merged = False
min_length = num_timesteps + output_size
for block in blocks:
resampled_block = self.expand_and_resample(block, dT)
if len(resampled_block) > 0:
resampled_blocks.append(resampled_block)
logger.info("block size = "+str(len(resampled_block)))
if len(resampled_block) >= min_length:
block_has_min_length.append(True)
else:
block_has_min_length.append(False)
if len(block_has_min_length) > 0 and not any(block_has_min_length):
logger.info("merging block because insufficient data")
new_block = []
end_time = resampled_blocks[-1][-1][0]
# TODO : check logic
for i in reversed(range(len(resampled_blocks))):
rsb = resampled_blocks[i]
start_time = rsb[0][0]
if end_time - start_time < min_length * dT:
rsb.extend(new_block)
new_block = rsb
merged = True
else:
rsb.extend(new_block)
new_block = rsb
merged = True
break
logger.info(new_block)
if merged:
new_block = self.expand_and_resample(new_block, dT)
logger.info(new_block)
logger.info("length of merged blocks after expand = "+str(len(new_block)))
new_blocks = [new_block]
resampled_blocks = new_blocks
return resampled_blocks, merged
else:
return [], False
def preprocess_data_train(self, blocks, num_timesteps, output_size):
x_list = []
y_list = []
look_back = num_timesteps
num_features = 1
count = 0
for raw_data in blocks:
# Loading Data
if len(raw_data) >= num_timesteps + output_size + 5:
raw_data = raw_data[-7200:]
latest_timestamp = raw_data[-1:][0][0]
logger.debug(latest_timestamp)
# df = pd.DataFrame(raw_data, columns=col_heads)
df = pd.DataFrame(raw_data)
df = df[df.columns[:2]]
df.columns = ['Time', 'Electricity']
new_df = df
new_df.columns = ['DateTime', 'Electricity']
# Changing dtype to pandas datetime format
new_df['DateTime'] = | pd.to_datetime(new_df['DateTime'], unit='s') | pandas.to_datetime |
import pandas as pd
import matplotlib.pyplot as plt
def read_dataframe():
# Read input file
df = | pd.read_csv('../data/epa_raw/Alabama/AQDM_907365160.txt', low_memory=False) | pandas.read_csv |
'''
Script for preprocessing in single machine
'''
import os
from typing import Dict, List, Set, Tuple, Union
import re
import json
import datetime as dt
from collections import Counter
from tqdm.auto import tqdm
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import TfidfVectorizer
RAW_BASE_PATH = '../data/raw/{fname}'
ADMISSIONS_FNAME = 'ADMISSIONS.csv.gz'
DIAGNOSES_FNAME = 'DIAGNOSES_ICD.csv.gz'
LABEVENTS_FNAME = 'LABEVENTS.csv.gz'
PRESCRIPTIONS_FNAME = 'PRESCRIPTIONS.csv.gz'
PATIENTS_FNAME = 'PATIENTS.csv.gz'
NOTES_FNAME = 'NOTEEVENTS.csv.gz'
PATH_PROCESSED = '../data/processed/'
DIAG_PATH = RAW_BASE_PATH.format(fname=DIAGNOSES_FNAME)
PATIENTS_PATH = RAW_BASE_PATH.format(fname=PATIENTS_FNAME)
PATIENT_SAMPLE_SIZE = 1000 # total is 46520
TRAIN_SIZE = 0.8
# We need to take into account only the events that happened during the observation window. The end of observation window is N days before death for deceased patients and date of last event for alive patients. We can have several sets of events (e.g. labs, diags, meds), so we need to choose the latest date out of those.
# OBSERVATION_WINDOW = 2000
OBSERVATION_WINDOW = 365*2
PREDICTION_WINDOW = 50
RANDOM_SEED = 1
# Respiratory illnesses ICD9 diag codes. Sources:
# https://en.wikipedia.org/wiki/List_of_ICD-9_codes_460–519:_diseases_of_the_respiratory_system
# https://basicmedicalkey.com/diseases-of-the-respiratory-system-icd-9-cm-chapter-8-codes-460-519-and-icd-10-cm-chapter-10-codes-j00-j99/
acute_diag_codes = [
460, 461, 462, 463, 464, 465, 466
]
other_resp_tract_diag_codes = [
470, 471, 472, 473, 474, 475, 476, 477, 478
]
pneumonia_and_influenza_diag_codes = [
480, 481, 482, 483, 484, 485, 486, 487, 488
]
grp4 = [
490, 491, 492, 493, 494, 495, 496
]
grp5 = [
500, 501, 502, 503, 504, 505, 506, 507, 508
]
grp6 = [
510, 511, 512, 513, 514, 515, 516, 517, 518, 519
]
relevant_diag_codes: List[int] = [*acute_diag_codes, *other_resp_tract_diag_codes, *pneumonia_and_influenza_diag_codes, *grp4, *grp5, *grp6]
RELEVANT_DIAG_CODES = [str(e) for e in relevant_diag_codes]
def get_patient_sample() -> Tuple[set, pd.Series, Dict[int, dt.date]]:
patients = pd.read_csv(PATIENTS_PATH)
#sampling random patients
patients_sample = patients.sample(n=PATIENT_SAMPLE_SIZE, random_state=RANDOM_SEED)
sample_ids = set(patients_sample.SUBJECT_ID)
patients_sample = patients[patients.SUBJECT_ID.isin(sample_ids)]
# Moratality set
deceased_to_date: Dict[int, dt.date] = patients_sample[patients_sample.EXPIRE_FLAG == 1] \
.set_index('SUBJECT_ID').DOD.map(lambda x: pd.to_datetime(x).date()).to_dict()
return sample_ids, patients_sample, deceased_to_date
def _get_data_for_sample(patient_ids: set,
file_name: str, chunksize: int = 10_000) -> pd.DataFrame:
'''Get the data only relevant for the sample.'''
full_path = RAW_BASE_PATH.format(fname=file_name)
iterator = | pd.read_csv(full_path, iterator=True, chunksize=chunksize) | pandas.read_csv |
import os
import pandas as pd
import pytest
from pandas.testing import assert_frame_equal
from .. import read_sql
@pytest.fixture(scope="module") # type: ignore
def mssql_url() -> str:
conn = os.environ["MSSQL_URL"]
return conn
@pytest.mark.xfail
def test_on_non_select(mssql_url: str) -> None:
query = "CREATE TABLE non_select(id INTEGER NOT NULL)"
df = read_sql(mssql_url, query)
def test_aggregation(mssql_url: str) -> None:
query = (
"SELECT test_bool, SUM(test_float) as sum FROM test_table GROUP BY test_bool"
)
df = read_sql(mssql_url, query)
expected = pd.DataFrame(
index=range(3),
data={
"test_bool": pd.Series([None, False, True], dtype="boolean"),
"sum": pd.Series([10.9, 5.2, -10.0], dtype="float64"),
},
)
assert_frame_equal(df, expected, check_names=True)
def test_partition_on_aggregation(mssql_url: str) -> None:
query = (
"SELECT test_bool, SUM(test_int) AS test_int FROM test_table GROUP BY test_bool"
)
df = read_sql(mssql_url, query, partition_on="test_int", partition_num=2)
expected = pd.DataFrame(
index=range(3),
data={
"test_bool": pd.Series([None, False, True], dtype="boolean"),
"test_int": pd.Series([4, 5, 1315], dtype="Int64"),
},
)
df.sort_values(by="test_int", inplace=True, ignore_index=True)
assert_frame_equal(df, expected, check_names=True)
def test_aggregation2(mssql_url: str) -> None:
query = "select DISTINCT(test_bool) from test_table"
df = read_sql(mssql_url, query)
expected = pd.DataFrame(
index=range(3),
data={
"test_bool": pd.Series([None, False, True], dtype="boolean"),
},
)
assert_frame_equal(df, expected, check_names=True)
def test_partition_on_aggregation2(mssql_url: str) -> None:
query = "select MAX(test_int) as max, MIN(test_int) as min from test_table"
df = read_sql(mssql_url, query, partition_on="max", partition_num=2)
expected = pd.DataFrame(
index=range(1),
data={
"max": | pd.Series([1314], dtype="Int64") | pandas.Series |
#
# Copyright (C) 2019 Databricks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from distutils.version import LooseVersion
import inspect
import numpy as np
import pandas as pd
import pyspark
import databricks.koalas as ks
from databricks.koalas.exceptions import PandasNotImplementedError
from databricks.koalas.missing.indexes import _MissingPandasLikeIndex, _MissingPandasLikeMultiIndex
from databricks.koalas.testing.utils import ReusedSQLTestCase, TestUtils
class IndexesTest(ReusedSQLTestCase, TestUtils):
@property
def pdf(self):
return pd.DataFrame({
'a': [1, 2, 3, 4, 5, 6, 7, 8, 9],
'b': [4, 5, 6, 3, 2, 1, 0, 0, 0],
}, index=[0, 1, 3, 5, 6, 8, 9, 9, 9])
@property
def kdf(self):
return ks.from_pandas(self.pdf)
def test_index(self):
for pdf in [pd.DataFrame(np.random.randn(10, 5), index=list('abcdefghij')),
pd.DataFrame(np.random.randn(10, 5),
index=pd.date_range('2011-01-01', freq='D', periods=10)),
pd.DataFrame(np.random.randn(10, 5),
columns=list('abcde')).set_index(['a', 'b'])]:
kdf = ks.from_pandas(pdf)
self.assert_eq(kdf.index, pdf.index)
def test_index_getattr(self):
kidx = self.kdf.index
item = 'databricks'
expected_error_message = ("'Index' object has no attribute '{}'".format(item))
with self.assertRaisesRegex(AttributeError, expected_error_message):
kidx.__getattr__(item)
def test_multi_index_getattr(self):
arrays = [[1, 1, 2, 2], ['red', 'blue', 'red', 'blue']]
idx = pd.MultiIndex.from_arrays(arrays, names=('number', 'color'))
pdf = pd.DataFrame(np.random.randn(4, 5), idx)
kdf = ks.from_pandas(pdf)
kidx = kdf.index
item = 'databricks'
expected_error_message = ("'MultiIndex' object has no attribute '{}'".format(item))
with self.assertRaisesRegex(AttributeError, expected_error_message):
kidx.__getattr__(item)
def test_to_series(self):
pidx = self.pdf.index
kidx = self.kdf.index
self.assert_eq(kidx.to_series(), pidx.to_series())
self.assert_eq(kidx.to_series(name='a'), pidx.to_series(name='a'))
# FIXME: the index values are not addressed the change. (#1190)
# self.assert_eq((kidx + 1).to_series(), (pidx + 1).to_series())
pidx = self.pdf.set_index('b', append=True).index
kidx = self.kdf.set_index('b', append=True).index
with self.sql_conf({'spark.sql.execution.arrow.enabled': False}):
self.assert_eq(kidx.to_series(), pidx.to_series())
self.assert_eq(kidx.to_series(name='a'), pidx.to_series(name='a'))
def test_to_frame(self):
pidx = self.pdf.index
kidx = self.kdf.index
self.assert_eq(repr(kidx.to_frame()), repr(pidx.to_frame()))
self.assert_eq(repr(kidx.to_frame(index=False)), repr(pidx.to_frame(index=False)))
pidx.name = 'a'
kidx.name = 'a'
self.assert_eq(repr(kidx.to_frame()), repr(pidx.to_frame()))
self.assert_eq(repr(kidx.to_frame(index=False)), repr(pidx.to_frame(index=False)))
if LooseVersion(pd.__version__) >= LooseVersion('0.24'):
# The `name` argument is added in pandas 0.24.
self.assert_eq(repr(kidx.to_frame(name='x')), repr(pidx.to_frame(name='x')))
self.assert_eq(repr(kidx.to_frame(index=False, name='x')),
repr(pidx.to_frame(index=False, name='x')))
pidx = self.pdf.set_index('b', append=True).index
kidx = self.kdf.set_index('b', append=True).index
self.assert_eq(repr(kidx.to_frame()), repr(pidx.to_frame()))
self.assert_eq(repr(kidx.to_frame(index=False)), repr(pidx.to_frame(index=False)))
if LooseVersion(pd.__version__) >= LooseVersion('0.24'):
# The `name` argument is added in pandas 0.24.
self.assert_eq(repr(kidx.to_frame(name=['x', 'y'])),
repr(pidx.to_frame(name=['x', 'y'])))
self.assert_eq(repr(kidx.to_frame(index=False, name=['x', 'y'])),
repr(pidx.to_frame(index=False, name=['x', 'y'])))
def test_index_names(self):
kdf = self.kdf
self.assertIsNone(kdf.index.name)
idx = pd.Index([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], name='x')
pdf = pd.DataFrame(np.random.randn(10, 5), idx)
kdf = ks.from_pandas(pdf)
self.assertEqual(kdf.index.name, pdf.index.name)
self.assertEqual(kdf.index.names, pdf.index.names)
pidx = pdf.index
kidx = kdf.index
pidx.name = 'renamed'
kidx.name = 'renamed'
self.assertEqual(kidx.name, pidx.name)
self.assertEqual(kidx.names, pidx.names)
self.assert_eq(kidx, pidx)
pidx.name = None
kidx.name = None
self.assertEqual(kidx.name, pidx.name)
self.assertEqual(kidx.names, pidx.names)
self.assert_eq(kidx, pidx)
with self.assertRaisesRegex(ValueError, "Names must be a list-like"):
kidx.names = 'hi'
expected_error_message = ("Length of new names must be {}, got {}"
.format(len(kdf._internal.index_map), len(['0', '1'])))
with self.assertRaisesRegex(ValueError, expected_error_message):
kidx.names = ['0', '1']
def test_multi_index_names(self):
arrays = [[1, 1, 2, 2], ['red', 'blue', 'red', 'blue']]
idx = pd.MultiIndex.from_arrays(arrays, names=('number', 'color'))
pdf = pd.DataFrame(np.random.randn(4, 5), idx)
kdf = ks.from_pandas(pdf)
self.assertEqual(kdf.index.names, pdf.index.names)
pidx = pdf.index
kidx = kdf.index
pidx.names = ['renamed_number', 'renamed_color']
kidx.names = ['renamed_number', 'renamed_color']
self.assertEqual(kidx.names, pidx.names)
pidx.names = ['renamed_number', None]
kidx.names = ['renamed_number', None]
self.assertEqual(kidx.names, pidx.names)
if LooseVersion(pyspark.__version__) < LooseVersion('2.4'):
# PySpark < 2.4 does not support struct type with arrow enabled.
with self.sql_conf({'spark.sql.execution.arrow.enabled': False}):
self.assert_eq(kidx, pidx)
else:
self.assert_eq(kidx, pidx)
with self.assertRaises(PandasNotImplementedError):
kidx.name
with self.assertRaises(PandasNotImplementedError):
kidx.name = 'renamed'
def test_index_rename(self):
pdf = pd.DataFrame(np.random.randn(10, 5),
index=pd.Index([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], name='x'))
kdf = ks.from_pandas(pdf)
pidx = pdf.index
kidx = kdf.index
self.assert_eq(kidx.rename('y'), pidx.rename('y'))
self.assert_eq(kdf.index.names, pdf.index.names)
kidx.rename('z', inplace=True)
pidx.rename('z', inplace=True)
self.assert_eq(kidx, pidx)
self.assert_eq(kdf.index.names, pdf.index.names)
self.assert_eq(kidx.rename(None), pidx.rename(None))
self.assert_eq(kdf.index.names, pdf.index.names)
def test_multi_index_rename(self):
arrays = [[1, 1, 2, 2], ['red', 'blue', 'red', 'blue']]
idx = pd.MultiIndex.from_arrays(arrays, names=('number', 'color'))
pdf = pd.DataFrame(np.random.randn(4, 5), idx)
kdf = ks.from_pandas(pdf)
pmidx = pdf.index
kmidx = kdf.index
self.assert_eq(kmidx.rename(['n', 'c']), pmidx.rename(['n', 'c']))
self.assert_eq(kdf.index.names, pdf.index.names)
kmidx.rename(['num', 'col'], inplace=True)
pmidx.rename(['num', 'col'], inplace=True)
self.assert_eq(kmidx, pmidx)
self.assert_eq(kdf.index.names, pdf.index.names)
self.assert_eq(kmidx.rename([None, None]), pmidx.rename([None, None]))
self.assert_eq(kdf.index.names, pdf.index.names)
self.assertRaises(TypeError, lambda: kmidx.rename('number'))
self.assertRaises(ValueError, lambda: kmidx.rename(['number']))
def test_multi_index_levshape(self):
pidx = pd.MultiIndex.from_tuples([('a', 'x', 1), ('b', 'y', 2)])
kidx = ks.MultiIndex.from_tuples([('a', 'x', 1), ('b', 'y', 2)])
self.assertEqual(pidx.levshape, kidx.levshape)
def test_index_unique(self):
kidx = self.kdf.index
# here the output is different than pandas in terms of order
expected = [0, 1, 3, 5, 6, 8, 9]
self.assert_eq(expected, sorted(kidx.unique().to_pandas()))
self.assert_eq(expected, sorted(kidx.unique(level=0).to_pandas()))
expected = [1, 2, 4, 6, 7, 9, 10]
self.assert_eq(expected, sorted((kidx + 1).unique().to_pandas()))
with self.assertRaisesRegexp(IndexError, "Too many levels*"):
kidx.unique(level=1)
with self.assertRaisesRegexp(KeyError, "Requested level (hi)*"):
kidx.unique(level='hi')
def test_multi_index_copy(self):
arrays = [[1, 1, 2, 2], ['red', 'blue', 'red', 'blue']]
idx = pd.MultiIndex.from_arrays(arrays, names=('number', 'color'))
pdf = pd.DataFrame(np.random.randn(4, 5), idx)
kdf = ks.from_pandas(pdf)
self.assert_eq(kdf.index.copy(), pdf.index.copy())
def test_index_symmetric_difference(self):
idx = ks.Index(['a', 'b', 'c'])
midx = ks.MultiIndex.from_tuples([('a', 'x'), ('b', 'y'), ('c', 'z')])
with self.assertRaisesRegexp(NotImplementedError, "Doesn't support*"):
idx.symmetric_difference(midx)
def test_multi_index_symmetric_difference(self):
idx = ks.Index(['a', 'b', 'c'])
midx = ks.MultiIndex.from_tuples([('a', 'x'), ('b', 'y'), ('c', 'z')])
midx_ = ks.MultiIndex.from_tuples([('a', 'x'), ('b', 'y'), ('c', 'z')])
self.assert_eq(
midx.symmetric_difference(midx_),
midx.to_pandas().symmetric_difference(midx_.to_pandas()))
with self.assertRaisesRegexp(NotImplementedError, "Doesn't support*"):
midx.symmetric_difference(idx)
def test_missing(self):
kdf = ks.DataFrame({'a': [1, 2, 3], 'b': [4, 5, 6], 'c': [7, 8, 9]})
# Index functions
missing_functions = inspect.getmembers(_MissingPandasLikeIndex, inspect.isfunction)
unsupported_functions = [name for (name, type_) in missing_functions
if type_.__name__ == 'unsupported_function']
for name in unsupported_functions:
with self.assertRaisesRegex(
PandasNotImplementedError,
"method.*Index.*{}.*not implemented( yet\\.|\\. .+)".format(name)):
getattr(kdf.set_index('a').index, name)()
deprecated_functions = [name for (name, type_) in missing_functions
if type_.__name__ == 'deprecated_function']
for name in deprecated_functions:
with self.assertRaisesRegex(PandasNotImplementedError,
"method.*Index.*{}.*is deprecated".format(name)):
getattr(kdf.set_index('a').index, name)()
# MultiIndex functions
missing_functions = inspect.getmembers(_MissingPandasLikeMultiIndex, inspect.isfunction)
unsupported_functions = [name for (name, type_) in missing_functions
if type_.__name__ == 'unsupported_function']
for name in unsupported_functions:
with self.assertRaisesRegex(
PandasNotImplementedError,
"method.*Index.*{}.*not implemented( yet\\.|\\. .+)".format(name)):
getattr(kdf.set_index(['a', 'b']).index, name)()
deprecated_functions = [name for (name, type_) in missing_functions
if type_.__name__ == 'deprecated_function']
for name in deprecated_functions:
with self.assertRaisesRegex(PandasNotImplementedError,
"method.*Index.*{}.*is deprecated".format(name)):
getattr(kdf.set_index(['a', 'b']).index, name)()
# Index properties
missing_properties = inspect.getmembers(_MissingPandasLikeIndex,
lambda o: isinstance(o, property))
unsupported_properties = [name for (name, type_) in missing_properties
if type_.fget.__name__ == 'unsupported_property']
for name in unsupported_properties:
with self.assertRaisesRegex(
PandasNotImplementedError,
"property.*Index.*{}.*not implemented( yet\\.|\\. .+)".format(name)):
getattr(kdf.set_index('a').index, name)
deprecated_properties = [name for (name, type_) in missing_properties
if type_.fget.__name__ == 'deprecated_property']
for name in deprecated_properties:
with self.assertRaisesRegex(PandasNotImplementedError,
"property.*Index.*{}.*is deprecated".format(name)):
getattr(kdf.set_index('a').index, name)
# MultiIndex properties
missing_properties = inspect.getmembers(_MissingPandasLikeMultiIndex,
lambda o: isinstance(o, property))
unsupported_properties = [name for (name, type_) in missing_properties
if type_.fget.__name__ == 'unsupported_property']
for name in unsupported_properties:
with self.assertRaisesRegex(
PandasNotImplementedError,
"property.*Index.*{}.*not implemented( yet\\.|\\. .+)".format(name)):
getattr(kdf.set_index(['a', 'b']).index, name)
deprecated_properties = [name for (name, type_) in missing_properties
if type_.fget.__name__ == 'deprecated_property']
for name in deprecated_properties:
with self.assertRaisesRegex(PandasNotImplementedError,
"property.*Index.*{}.*is deprecated".format(name)):
getattr(kdf.set_index(['a', 'b']).index, name)
def test_index_has_duplicates(self):
indexes = [("a", "b", "c"), ("a", "a", "c"), (1, 3, 3), (1, 2, 3)]
names = [None, 'ks', 'ks', None]
has_dup = [False, True, True, False]
for idx, name, expected in zip(indexes, names, has_dup):
pdf = pd.DataFrame({"a": [1, 2, 3]}, index=pd.Index(idx, name=name))
kdf = ks.from_pandas(pdf)
self.assertEqual(kdf.index.has_duplicates, expected)
def test_multiindex_has_duplicates(self):
indexes = [[list("abc"), list("edf")], [list("aac"), list("edf")],
[list("aac"), list("eef")], [[1, 4, 4], [4, 6, 6]]]
has_dup = [False, False, True, True]
for idx, expected in zip(indexes, has_dup):
pdf = pd.DataFrame({"a": [1, 2, 3]}, index=idx)
kdf = ks.from_pandas(pdf)
self.assertEqual(kdf.index.has_duplicates, expected)
def test_multi_index_not_supported(self):
kdf = ks.DataFrame({'a': [1, 2, 3], 'b': [4, 5, 6], 'c': [7, 8, 9]})
with self.assertRaisesRegex(TypeError,
"cannot perform any with this index type"):
kdf.set_index(['a', 'b']).index.any()
with self.assertRaisesRegex(TypeError,
"cannot perform all with this index type"):
kdf.set_index(['a', 'b']).index.all()
def test_index_nlevels(self):
pdf = pd.DataFrame({"a": [1, 2, 3]}, index=pd.Index(['a', 'b', 'c']))
kdf = ks.from_pandas(pdf)
self.assertEqual(kdf.index.nlevels, 1)
def test_multiindex_nlevel(self):
pdf = pd.DataFrame({'a': [1, 2, 3]}, index=[list('abc'), list('def')])
kdf = ks.from_pandas(pdf)
self.assertEqual(kdf.index.nlevels, 2)
def test_multiindex_from_arrays(self):
arrays = [['a', 'a', 'b', 'b'], ['red', 'blue', 'red', 'blue']]
pidx = pd.MultiIndex.from_arrays(arrays)
kidx = ks.MultiIndex.from_arrays(arrays)
self.assert_eq(pidx, kidx)
def test_multiindex_swaplevel(self):
pidx = pd.MultiIndex.from_arrays([['a', 'b'], [1, 2]])
kidx = ks.MultiIndex.from_arrays([['a', 'b'], [1, 2]])
self.assert_eq(pidx.swaplevel(0, 1), kidx.swaplevel(0, 1))
pidx = pd.MultiIndex.from_arrays([['a', 'b'], [1, 2]], names=['word', 'number'])
kidx = ks.MultiIndex.from_arrays([['a', 'b'], [1, 2]], names=['word', 'number'])
self.assert_eq(pidx.swaplevel(0, 1), kidx.swaplevel(0, 1))
pidx = pd.MultiIndex.from_arrays([['a', 'b'], [1, 2]], names=['word', None])
kidx = ks.MultiIndex.from_arrays([['a', 'b'], [1, 2]], names=['word', None])
self.assert_eq(pidx.swaplevel(-2, -1), kidx.swaplevel(-2, -1))
self.assert_eq(pidx.swaplevel(0, 1), kidx.swaplevel(0, 1))
self.assert_eq(pidx.swaplevel('word', 1), kidx.swaplevel('word', 1))
with self.assertRaisesRegex(IndexError, "Too many levels: Index"):
kidx.swaplevel(-3, 'word')
with self.assertRaisesRegex(IndexError, "Too many levels: Index"):
kidx.swaplevel(0, 2)
with self.assertRaisesRegex(IndexError, "Too many levels: Index"):
kidx.swaplevel(0, -3)
with self.assertRaisesRegex(KeyError, "Level work not found"):
kidx.swaplevel(0, 'work')
def test_index_fillna(self):
pidx = pd.DataFrame({'a': ['a', 'b', 'c']}, index=[1, 2, None]).index
kidx = ks.DataFrame({'a': ['a', 'b', 'c']}, index=[1, 2, None]).index
self.assert_eq(pidx.fillna(0), kidx.fillna(0))
self.assert_eq(pidx.rename('name').fillna(0), kidx.rename('name').fillna(0))
with self.assertRaisesRegex(TypeError, "Unsupported type <class 'list'>"):
kidx.fillna([1, 2])
def test_index_drop(self):
pidx = pd.DataFrame({'a': ['a', 'b', 'c']}, index=[1, 2, 3]).index
kidx = ks.DataFrame({'a': ['a', 'b', 'c']}, index=[1, 2, 3]).index
self.assert_eq(pidx.drop(1), kidx.drop(1))
self.assert_eq(pidx.drop([1, 2]), kidx.drop([1, 2]))
def test_multiindex_drop(self):
pidx = pd.MultiIndex.from_tuples([('a', 'x'), ('b', 'y'), ('c', 'z')],
names=['level1', 'level2'])
kidx = ks.MultiIndex.from_tuples([('a', 'x'), ('b', 'y'), ('c', 'z')],
names=['level1', 'level2'])
self.assert_eq(pidx.drop('a'), kidx.drop('a'))
self.assert_eq(pidx.drop(['a', 'b']), kidx.drop(['a', 'b']))
self.assert_eq(pidx.drop(['x', 'y'], level='level2'),
kidx.drop(['x', 'y'], level='level2'))
def test_sort_values(self):
pidx = pd.Index([-10, -100, 200, 100])
kidx = ks.Index([-10, -100, 200, 100])
self.assert_eq(pidx.sort_values(), kidx.sort_values())
self.assert_eq(pidx.sort_values(ascending=False), kidx.sort_values(ascending=False))
pidx.name = 'koalas'
kidx.name = 'koalas'
self.assert_eq(pidx.sort_values(), kidx.sort_values())
self.assert_eq(pidx.sort_values(ascending=False), kidx.sort_values(ascending=False))
pidx = pd.MultiIndex.from_tuples([('a', 'x', 1), ('b', 'y', 2), ('c', 'z', 3)])
kidx = ks.MultiIndex.from_tuples([('a', 'x', 1), ('b', 'y', 2), ('c', 'z', 3)])
pidx.names = ['hello', 'koalas', 'goodbye']
kidx.names = ['hello', 'koalas', 'goodbye']
self.assert_eq(pidx.sort_values(), kidx.sort_values())
self.assert_eq(pidx.sort_values(ascending=False), kidx.sort_values(ascending=False))
def test_index_drop_duplicates(self):
pidx = pd.Index([1, 1, 2])
kidx = ks.Index([1, 1, 2])
self.assert_eq(pidx.drop_duplicates().sort_values(), kidx.drop_duplicates().sort_values())
pidx = pd.MultiIndex.from_tuples([(1, 1), (1, 1), (2, 2)], names=['level1', 'level2'])
kidx = ks.MultiIndex.from_tuples([(1, 1), (1, 1), (2, 2)], names=['level1', 'level2'])
self.assert_eq(pidx.drop_duplicates().sort_values(), kidx.drop_duplicates().sort_values())
def test_index_sort(self):
idx = ks.Index([1, 2, 3, 4, 5])
midx = ks.MultiIndex.from_tuples([('a', 'x', 1), ('b', 'y', 2)])
with self.assertRaisesRegex(
TypeError,
"cannot sort an Index object in-place, use sort_values instead"):
idx.sort()
with self.assertRaisesRegex(
TypeError,
"cannot sort an Index object in-place, use sort_values instead"):
midx.sort()
def test_multiindex_isna(self):
kidx = ks.MultiIndex.from_tuples([('a', 'x', 1), ('b', 'y', 2), ('c', 'z', 3)])
with self.assertRaisesRegex(
NotImplementedError,
"isna is not defined for MultiIndex"):
kidx.isna()
with self.assertRaisesRegex(
NotImplementedError,
"isna is not defined for MultiIndex"):
kidx.isnull()
with self.assertRaisesRegex(
NotImplementedError,
"notna is not defined for MultiIndex"):
kidx.notna()
with self.assertRaisesRegex(
NotImplementedError,
"notna is not defined for MultiIndex"):
kidx.notnull()
def test_index_nunique(self):
pidx = pd.Index([1, 1, 2, None])
kidx = ks.Index([1, 1, 2, None])
self.assert_eq(pidx.nunique(), kidx.nunique())
self.assert_eq(pidx.nunique(dropna=True), kidx.nunique(dropna=True))
def test_multiindex_nunique(self):
kidx = ks.MultiIndex.from_tuples([('a', 'x', 1), ('b', 'y', 2), ('c', 'z', 3)])
with self.assertRaisesRegex(
NotImplementedError,
"notna is not defined for MultiIndex"):
kidx.notnull()
def test_multiindex_rename(self):
pidx = pd.MultiIndex.from_tuples([('a', 'x', 1), ('b', 'y', 2), ('c', 'z', 3)])
kidx = ks.MultiIndex.from_tuples([('a', 'x', 1), ('b', 'y', 2), ('c', 'z', 3)])
pidx = pidx.rename(list('ABC'))
kidx = kidx.rename(list('ABC'))
self.assert_eq(pidx, kidx)
pidx = pidx.rename(['my', 'name', 'is'])
kidx = kidx.rename(['my', 'name', 'is'])
self.assert_eq(pidx, kidx)
def test_multiindex_set_names(self):
pidx = pd.MultiIndex.from_tuples([('a', 'x', 1), ('b', 'y', 2), ('c', 'z', 3)])
kidx = ks.MultiIndex.from_tuples([('a', 'x', 1), ('b', 'y', 2), ('c', 'z', 3)])
pidx = pidx.set_names(['set', 'new', 'names'])
kidx = kidx.set_names(['set', 'new', 'names'])
self.assert_eq(pidx, kidx)
pidx.set_names(['set', 'new', 'names'], inplace=True)
kidx.set_names(['set', 'new', 'names'], inplace=True)
self.assert_eq(pidx, kidx)
pidx = pidx.set_names('first', level=0)
kidx = kidx.set_names('first', level=0)
self.assert_eq(pidx, kidx)
pidx = pidx.set_names('second', level=1)
kidx = kidx.set_names('second', level=1)
self.assert_eq(pidx, kidx)
pidx = pidx.set_names('third', level=2)
kidx = kidx.set_names('third', level=2)
self.assert_eq(pidx, kidx)
pidx.set_names('first', level=0, inplace=True)
kidx.set_names('first', level=0, inplace=True)
self.assert_eq(pidx, kidx)
pidx.set_names('second', level=1, inplace=True)
kidx.set_names('second', level=1, inplace=True)
self.assert_eq(pidx, kidx)
pidx.set_names('third', level=2, inplace=True)
kidx.set_names('third', level=2, inplace=True)
self.assert_eq(pidx, kidx)
def test_multiindex_from_product(self):
iterables = [[0, 1, 2], ['green', 'purple']]
pidx = pd.MultiIndex.from_product(iterables)
kidx = ks.MultiIndex.from_product(iterables)
self.assert_eq(pidx, kidx)
def test_multiindex_tuple_column_name(self):
column_index = pd.MultiIndex.from_tuples([('a', 'x'), ('a', 'y'), ('b', 'z')])
pdf = pd.DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]], columns=column_index)
pdf.set_index(('a', 'x'), append=True, inplace=True)
kdf = ks.from_pandas(pdf)
self.assert_eq(pdf, kdf)
def test_len(self):
pidx = pd.Index(range(10000))
kidx = ks.Index(range(10000))
self.assert_eq(len(pidx), len(kidx))
pidx = pd.MultiIndex.from_tuples([('a', 'x', 1), ('b', 'y', 2), ('c', 'z', 3)])
kidx = ks.MultiIndex.from_tuples([('a', 'x', 1), ('b', 'y', 2), ('c', 'z', 3)])
self.assert_eq(len(pidx), len(kidx))
def test_append(self):
# Index
pidx = pd.Index(range(10000))
kidx = ks.Index(range(10000))
self.assert_eq(
pidx.append(pidx),
kidx.append(kidx))
# Index with name
pidx1 = pd.Index(range(10000), name='a')
pidx2 = pd.Index(range(10000), name='b')
kidx1 = ks.Index(range(10000), name='a')
kidx2 = ks.Index(range(10000), name='b')
self.assert_eq(
pidx1.append(pidx2),
kidx1.append(kidx2))
self.assert_eq(
pidx2.append(pidx1),
kidx2.append(kidx1))
# Index from DataFrame
pdf1 = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6]},
index=['a', 'b', 'c'])
pdf2 = pd.DataFrame({
'a': [7, 8, 9],
'd': [10, 11, 12]},
index=['x', 'y', 'z'])
kdf1 = ks.from_pandas(pdf1)
kdf2 = ks.from_pandas(pdf2)
pidx1 = pdf1.set_index('a').index
pidx2 = pdf2.set_index('d').index
kidx1 = kdf1.set_index('a').index
kidx2 = kdf2.set_index('d').index
self.assert_eq(
pidx1.append(pidx2),
kidx1.append(kidx2))
self.assert_eq(
pidx2.append(pidx1),
kidx2.append(kidx1))
# Index from DataFrame with MultiIndex columns
pdf1 = pd.DataFrame({
'a': [1, 2, 3],
'b': [4, 5, 6]})
pdf2 = pd.DataFrame({
'a': [7, 8, 9],
'd': [10, 11, 12]})
pdf1.columns = pd.MultiIndex.from_tuples([('a', 'x'), ('b', 'y')])
pdf2.columns = pd.MultiIndex.from_tuples([('a', 'x'), ('d', 'y')])
kdf1 = ks.from_pandas(pdf1)
kdf2 = ks.from_pandas(pdf2)
pidx1 = pdf1.set_index(('a', 'x')).index
pidx2 = pdf2.set_index(('d', 'y')).index
kidx1 = kdf1.set_index(('a', 'x')).index
kidx2 = kdf2.set_index(('d', 'y')).index
self.assert_eq(
pidx1.append(pidx2),
kidx1.append(kidx2))
self.assert_eq(
pidx2.append(pidx1),
kidx2.append(kidx1))
# MultiIndex
pmidx = pd.MultiIndex.from_tuples([('a', 'x', 1), ('b', 'y', 2), ('c', 'z', 3)])
kmidx = ks.MultiIndex.from_tuples([('a', 'x', 1), ('b', 'y', 2), ('c', 'z', 3)])
self.assert_eq(pmidx.append(pmidx), kmidx.append(kmidx))
# MultiIndex with names
pmidx1 = pd.MultiIndex.from_tuples(
[('a', 'x', 1), ('b', 'y', 2), ('c', 'z', 3)],
names=['x', 'y', 'z'])
pmidx2 = pd.MultiIndex.from_tuples(
[('a', 'x', 1), ('b', 'y', 2), ('c', 'z', 3)],
names=['p', 'q', 'r'])
kmidx1 = ks.MultiIndex.from_tuples(
[('a', 'x', 1), ('b', 'y', 2), ('c', 'z', 3)],
names=['x', 'y', 'z'])
kmidx2 = ks.MultiIndex.from_tuples(
[('a', 'x', 1), ('b', 'y', 2), ('c', 'z', 3)],
names=['p', 'q', 'r'])
self.assert_eq(
pmidx1.append(pmidx2),
kmidx1.append(kmidx2))
self.assert_eq(
pmidx2.append(pmidx1),
kmidx2.append(kmidx1))
self.assert_eq(
pmidx1.append(pmidx2).names,
kmidx1.append(kmidx2).names)
self.assert_eq(
pmidx1.append(pmidx2).names,
kmidx1.append(kmidx2).names)
# Index & MultiIndex currently is not supported
expected_error_message = r"append\(\) between Index & MultiIndex currently is not supported"
with self.assertRaisesRegex(NotImplementedError, expected_error_message):
kidx.append(kmidx)
with self.assertRaisesRegex(NotImplementedError, expected_error_message):
kmidx.append(kidx)
def test_argmin(self):
pidx = pd.Index([100, 50, 10, 20, 30, 60, 0, 50, 0, 100, 100, 100, 20, 0, 0])
kidx = ks.Index([100, 50, 10, 20, 30, 60, 0, 50, 0, 100, 100, 100, 20, 0, 0])
self.assert_eq(pidx.argmin(), kidx.argmin())
# MultiIndex
kidx = ks.MultiIndex.from_tuples([('a', 'x', 1), ('b', 'y', 2), ('c', 'z', 3)])
with self.assertRaisesRegex(
TypeError,
"reduction operation 'argmin' not allowed for this dtype"):
kidx.argmin()
def test_argmax(self):
pidx = | pd.Index([100, 50, 10, 20, 30, 60, 0, 50, 0, 100, 100, 100, 20, 0, 0]) | pandas.Index |
import json
import networkx as nx
import numpy as np
import os
import pandas as pd
from sklearn.decomposition import TruncatedSVD
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.preprocessing import LabelEncoder
from tqdm import tqdm
from config import logger, config
def read_profile_data():
profile_na = np.zeros(67)
profile_na[0] = -1
profile_na = pd.DataFrame(profile_na.reshape(1, -1))
profile_df = pd.read_csv(config.profile_file)
profile_na.columns = profile_df.columns
profile_df = profile_df.append(profile_na)
return profile_df
def merge_raw_data():
tr_queries = pd.read_csv(config.train_query_file, parse_dates=['req_time'])
te_queries = pd.read_csv(config.test_query_file, parse_dates=['req_time'])
tr_plans = pd.read_csv(config.train_plan_file, parse_dates=['plan_time'])
te_plans = pd.read_csv(config.test_plan_file, parse_dates=['plan_time'])
tr_click = pd.read_csv(config.train_click_file)
trn = tr_queries.merge(tr_click, on='sid', how='left')
trn = trn.merge(tr_plans, on='sid', how='left')
trn = trn.drop(['click_time'], axis=1)
trn['click_mode'] = trn['click_mode'].fillna(0)
tst = te_queries.merge(te_plans, on='sid', how='left')
tst['click_mode'] = -1
df = pd.concat([trn, tst], axis=0, sort=False)
df = df.drop(['plan_time'], axis=1)
df = df.reset_index(drop=True)
df['weekday'] = df['req_time'].dt.weekday
df['day'] = df['req_time'].dt.day
df['hour'] = df['req_time'].dt.hour
df = df.drop(['req_time'], axis=1)
logger.info('total data size: {}'.format(df.shape))
logger.info('data columns: {}'.format(', '.join(df.columns)))
return df
def extract_plans(df):
plans = []
for sid, plan in tqdm(zip(df['sid'].values, df['plans'].values)):
try:
p = json.loads(plan)
for x in p:
x['sid'] = sid
plans.extend(p)
except:
pass
return pd.DataFrame(plans)
def generate_od_features(df):
feat = df[['o','d']].drop_duplicates()
feat = feat.merge(df.groupby('o')[['day', 'hour', 'pid', 'click_mode']].nunique().reset_index(), how='left', on='o')
feat.rename(columns={'day': 'o_nunique_day',
'hour': 'o_nunique_hour',
'pid': 'o_nunique_pid',
'click_mode': 'o_nunique_click'}, inplace=True)
feat = feat.merge(df.groupby('d')[['day', 'hour', 'pid', 'click_mode']].nunique().reset_index(), how='left', on='d')
feat.rename(columns={'day': 'd_nunique_day',
'hour': 'd_nunique_hour',
'pid': 'd_nunique_pid',
'click_mode': 'd_nunique_click'}, inplace=True)
feat = feat.merge(df.groupby(['o', 'd'])[['day', 'hour', 'pid', 'click_mode']].nunique().reset_index(), how='left', on=['o', 'd'])
feat.rename(columns={'day': 'od_nunique_day',
'hour': 'od_nunique_hour',
'pid': 'od_nunique_pid',
'click_mode': 'od_nunique_click'}, inplace=True)
return feat
def generate_pid_features(df):
feat = df.groupby('pid')[['hour', 'day']].nunique().reset_index()
feat.rename(columns={'hour': 'pid_nunique_hour', 'day': 'pid_nunique_day'}, inplace=True)
feat['nunique_hour_d_nunique_day'] = feat['pid_nunique_hour'] / feat['pid_nunique_day']
feat = feat.merge(df.groupby('pid')[['o', 'd']].nunique().reset_index(), how='left', on='pid')
feat.rename(columns={'o': 'pid_nunique_o', 'd': 'pid_nunique_d'}, inplace=True)
feat['nunique_o_d_nunique_d'] = feat['pid_nunique_o'] / feat['pid_nunique_d']
return feat
def generate_od_cluster_features(df):
G = nx.Graph()
G.add_nodes_from(df['o'].unique().tolist())
G.add_nodes_from(df['d'].unique().tolist())
edges = df[['o','d']].apply(lambda x: (x[0],x[1]), axis=1).tolist()
G.add_edges_from(edges)
cluster = nx.clustering(G)
cluster_df = pd.DataFrame([{'od': key, 'cluster': cluster[key]} for key in cluster.keys()])
return cluster_df
def gen_od_feas(data):
data['o1'] = data['o'].apply(lambda x: float(x.split(',')[0]))
data['o2'] = data['o'].apply(lambda x: float(x.split(',')[1]))
data['d1'] = data['d'].apply(lambda x: float(x.split(',')[0]))
data['d2'] = data['d'].apply(lambda x: float(x.split(',')[1]))
data = data.drop(['o', 'd'], axis=1)
return data
def gen_plan_feas(data):
n = data.shape[0]
mode_list_feas = np.zeros((n, 12))
max_dist, min_dist, mean_dist, std_dist = np.zeros((n,)), np.zeros((n,)), np.zeros((n,)), np.zeros((n,))
max_price, min_price, mean_price, std_price = np.zeros((n,)), np.zeros((n,)), np.zeros((n,)), np.zeros((n,))
max_eta, min_eta, mean_eta, std_eta = np.zeros((n,)), np.zeros((n,)), np.zeros((n,)), np.zeros((n,))
min_dist_mode, max_dist_mode, min_price_mode, max_price_mode, min_eta_mode, max_eta_mode, first_mode = np.zeros(
(n,)), np.zeros((n,)), np.zeros((n,)), np.zeros((n,)), np.zeros((n,)), np.zeros((n,)), np.zeros((n,))
mode_texts = []
for i, plan in tqdm(enumerate(data['plans'].values)):
try:
cur_plan_list = json.loads(plan)
except:
cur_plan_list = []
if len(cur_plan_list) == 0:
mode_list_feas[i, 0] = 1
first_mode[i] = 0
max_dist[i] = -1
min_dist[i] = -1
mean_dist[i] = -1
std_dist[i] = -1
max_price[i] = -1
min_price[i] = -1
mean_price[i] = -1
std_price[i] = -1
max_eta[i] = -1
min_eta[i] = -1
mean_eta[i] = -1
std_eta[i] = -1
min_dist_mode[i] = -1
max_dist_mode[i] = -1
min_price_mode[i] = -1
max_price_mode[i] = -1
min_eta_mode[i] = -1
max_eta_mode[i] = -1
mode_texts.append('word_null')
else:
distance_list = []
price_list = []
eta_list = []
mode_list = []
for tmp_dit in cur_plan_list:
distance_list.append(int(tmp_dit['distance']))
if tmp_dit['price'] == '':
price_list.append(0)
else:
price_list.append(int(tmp_dit['price']))
eta_list.append(int(tmp_dit['eta']))
mode_list.append(int(tmp_dit['transport_mode']))
mode_texts.append(
' '.join(['word_{}'.format(mode) for mode in mode_list]))
distance_list = np.array(distance_list)
price_list = np.array(price_list)
eta_list = np.array(eta_list)
mode_list = np.array(mode_list, dtype='int')
mode_list_feas[i, mode_list] = 1
distance_sort_idx = np.argsort(distance_list)
price_sort_idx = np.argsort(price_list)
eta_sort_idx = np.argsort(eta_list)
max_dist[i] = distance_list[distance_sort_idx[-1]]
min_dist[i] = distance_list[distance_sort_idx[0]]
mean_dist[i] = np.mean(distance_list)
std_dist[i] = np.std(distance_list)
max_price[i] = price_list[price_sort_idx[-1]]
min_price[i] = price_list[price_sort_idx[0]]
mean_price[i] = np.mean(price_list)
std_price[i] = np.std(price_list)
max_eta[i] = eta_list[eta_sort_idx[-1]]
min_eta[i] = eta_list[eta_sort_idx[0]]
mean_eta[i] = np.mean(eta_list)
std_eta[i] = np.std(eta_list)
first_mode[i] = mode_list[0]
max_dist_mode[i] = mode_list[distance_sort_idx[-1]]
min_dist_mode[i] = mode_list[distance_sort_idx[0]]
max_price_mode[i] = mode_list[price_sort_idx[-1]]
min_price_mode[i] = mode_list[price_sort_idx[0]]
max_eta_mode[i] = mode_list[eta_sort_idx[-1]]
min_eta_mode[i] = mode_list[eta_sort_idx[0]]
feature_data = pd.DataFrame(mode_list_feas)
feature_data.columns = ['mode_feas_{}'.format(i) for i in range(12)]
feature_data['max_dist'] = max_dist
feature_data['min_dist'] = min_dist
feature_data['mean_dist'] = mean_dist
feature_data['std_dist'] = std_dist
feature_data['max_price'] = max_price
feature_data['min_price'] = min_price
feature_data['mean_price'] = mean_price
feature_data['std_price'] = std_price
feature_data['max_eta'] = max_eta
feature_data['min_eta'] = min_eta
feature_data['mean_eta'] = mean_eta
feature_data['std_eta'] = std_eta
feature_data['max_dist_mode'] = max_dist_mode
feature_data['min_dist_mode'] = min_dist_mode
feature_data['max_price_mode'] = max_price_mode
feature_data['min_price_mode'] = min_price_mode
feature_data['max_eta_mode'] = max_eta_mode
feature_data['min_eta_mode'] = min_eta_mode
feature_data['first_mode'] = first_mode
logger.info('mode tfidf...')
tfidf_enc = TfidfVectorizer(ngram_range=(1, 2))
tfidf_vec = tfidf_enc.fit_transform(mode_texts)
svd_enc = TruncatedSVD(n_components=10, n_iter=20, random_state=2019)
mode_svd = svd_enc.fit_transform(tfidf_vec)
mode_svd = pd.DataFrame(mode_svd)
mode_svd.columns = ['svd_mode_{}'.format(i) for i in range(10)]
data = pd.concat([data, feature_data, mode_svd], axis=1)
data = data.drop(['plans'], axis=1)
return data
def gen_profile_feas(data):
profile_data = read_profile_data()
x = profile_data.drop(['pid'], axis=1).values
svd = TruncatedSVD(n_components=20, n_iter=20, random_state=2019)
svd_x = svd.fit_transform(x)
svd_feas = pd.DataFrame(svd_x)
svd_feas.columns = ['svd_fea_{}'.format(i) for i in range(20)]
svd_feas['pid'] = profile_data['pid'].values
data['pid'] = data['pid'].fillna(-1)
data = data.merge(svd_feas, on='pid', how='left')
return data
def group_weekday_and_hour(row):
if row['weekday'] == 0 or row['weekday'] == 6:
w = 0
else:
w = row['weekday']
if row['hour'] > 7 and row['hour'] < 18: # 7:00 - 18:00
h = row['hour']
elif row['hour'] >= 18 and row['hour'] < 21: # 18:00 - 21:00
h = 1
elif row['hour'] >= 21 or row['hour'] < 6: # 21:00 - 6:00
h = 0
else: # 6:00 - 7:00
h = 2
return str(w) + '_' + str(h)
def gen_ratio_feas(data):
data['dist-d-eta'] = data['mean_dist'] / data['mean_eta']
data['price-d-dist'] = data['mean_price'] / data['mean_dist']
data['price-d-eta'] = data['mean_price'] / data['mean_eta']
data['o1-d-d1'] = data['o1'] / data['d1']
data['o2-d-d2'] = data['o2'] / data['d2']
return data
def gen_fly_dist_feas(data):
data['fly-dist'] = ((data['d1'] - data['o1'])**2 + (data['d2'] - data['o2'])**2)**0.5
data['fly-dist-d-dist'] = data['fly-dist'] / data['mean_dist']
data['fly-dist-d-eta'] = data['fly-dist'] / data['mean_eta']
data['price-d-fly-dist'] = data['mean_price'] / data['fly-dist']
return data
def gen_aggregate_profile_feas(data):
aggr = data.groupby('pid')['sid'].agg(['count'])
aggr.columns = ['%s_%s' % ('sid', col) for col in aggr.columns.values]
aggr = aggr.reset_index()
aggr.loc[aggr['pid'] == -1.0,'sid_count'] = 0 # reset in case pid == -1
data = data.merge(aggr, how='left', on=['pid'])
return data
def gen_pid_feat(data):
feat = pd.read_csv(config.pid_feature_file)
data = data.merge(feat, how='left', on='pid')
return data
def gen_od_feat(data):
feat = pd.read_csv(config.od_feature_file)
tr_sid = pd.read_csv(config.train_query_file, usecols=['sid','o','d'])
te_sid = pd.read_csv(config.test_query_file, usecols=['sid','o','d'])
sid = pd.concat((tr_sid, te_sid))
logger.info('sid shape={}'.format(sid.shape))
feat = sid.merge(feat, how='left', on=['o','d']).drop(['o','d'], axis=1)
logger.info('feature shape={}'.format(feat.shape))
logger.info('feature columns={}'.format(feat.columns))
data = data.merge(feat, how='left', on='sid')
click_cols = [c for c in feat.columns if c.endswith('click')]
data.drop(click_cols, axis=1, inplace=True)
return data
def gen_od_cluster_feat(data):
feat = pd.read_csv(config.od_cluster_feature_file)
tr_sid = pd.read_csv(config.train_query_file, usecols=['sid','o','d'])
te_sid = pd.read_csv(config.test_query_file, usecols=['sid','o','d'])
sid = pd.concat((tr_sid, te_sid))
f = feat.copy()
feat = sid.merge(feat, how='left', left_on='o', right_on='od').drop(['od','o'], axis=1)
feat.rename(columns={'cluster': 'o_cluster'}, inplace=True)
feat = feat.merge(f, how='left', left_on='d', right_on='od').drop(['od','d'], axis=1)
feat.rename(columns={'cluster': 'd_cluster'}, inplace=True)
data = data.merge(feat, how='left', on='sid')
return data
def gen_od_eq_feat(data):
data['o1-eq-d1'] = (data['o1'] == data['d1']).astype(int)
data['o2-eq-d2'] = (data['o2'] == data['d2']).astype(int)
data['o-eq-d'] = data['o1-eq-d1']*data['o2-eq-d2']
data['o1-m-o2'] = np.abs(data['o1'] - data['o2'])
data['d1-m-d2'] = np.abs(data['d1'] - data['d2'])
data['od_area'] = data['o1-m-o2']*data['d1-m-d2']
data['od_ratio'] = data['o1-m-o2']/data['d1-m-d2']
return data
def gen_od_mode_cnt_feat(data):
feat = pd.read_csv(config.od_mode_cnt_feature_file)
tr_sid = pd.read_csv(config.train_query_file, usecols=['sid','o','d'])
te_sid = pd.read_csv(config.test_query_file, usecols=['sid','o','d'])
sid = pd.concat((tr_sid, te_sid))
feat = sid.merge(feat, how='left', on=['o','d']).drop(['o','d'], axis=1)
data = data.merge(feat, how='left', on='sid')
return data
def gen_weekday_hour_cnt_feat(data):
feat = pd.read_csv(config.weekday_hour_feature_file)
tr_sid = pd.read_csv(config.train_query_file, usecols=['sid','req_time'])
te_sid = pd.read_csv(config.test_query_file, usecols=['sid','req_time'])
sid = pd.concat((tr_sid, te_sid))
sid['req_time'] = pd.to_datetime(sid['req_time'])
sid['hour'] = sid['req_time'].map(lambda x: x.hour)
sid['weekday'] = sid['req_time'].map(lambda x: x.weekday())
feat = sid.merge(feat, how='left', on=['hour','weekday']).drop(['hour','weekday','req_time'], axis=1)
data = data.merge(feat, how='left', on='sid')
return data
def gen_od_plan_agg_feat(data):
#feat = pd.read_csv(config.od_plan_agg_feature_file)
#tr_sid = pd.read_csv(config.train_query_file, usecols=['sid','o','d','req_time'])
#te_sid = pd.read_csv(config.test_query_file, usecols=['sid','o','d', 'req_time'])
#sid = pd.concat((tr_sid, te_sid))
#sid['req_time'] = pd.to_datetime(sid['req_time'])
#sid['hour'] = sid['req_time'].map(lambda x: x.hour)
#feat = sid.merge(feat, how='left', on=['o','d','hour']).drop(['o','d','hour','req_time'], axis=1)
feat = pd.read_csv(config.od_plan_agg_feature_file)
data = data.merge(feat, how='left', on='sid')
return data
def gen_mode_feat(data):
feat = pd.read_csv(config.mode_feature_file)
data = data.merge(feat, how='left', on='sid')
return data
def gen_mode_stats_feat(data):
feat = | pd.read_csv(config.od_stats_file) | pandas.read_csv |
"""
Methods to edit existing protocols to add:
- new traces
- reversal ramp (activating prepulse + deactivating ramp)
- interleaved pulse trains
"""
import pyabf
import numpy as np
import pandas as pd
import glob
import os
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
plt.style.use("dark_background")
cmap = plt.cm.get_cmap("coolwarm")
# where output will be saved, if saving is enabled
out_dir = "./data/protocols/"
# whether to save output
save_output = False
def find_abf_epochs(fname):
data_dir = r"C:/Users/delbe/Downloads/wut/wut/Post_grad/UBC/Research/lab/data_files/delbert/"
fname += ".abf"
for root, dirs, files in os.walk(data_dir):
for name in files:
if fname in name:
path = os.path.join(root, name)
break
# open abf file
abf = pyabf.ABF(path)
# return epochs
p1s = []
levels = []
for i in abf.sweepList:
abf.setSweep(i)
p1s.append(abf.sweepEpochs.p1s)
levels.append(abf.sweepEpochs.levels)
return p1s, levels
class edit_existing_protocol():
def __init__(self, fname, out_name, csv_path=None):
if csv_path is None:
csv_path = r"C:/Users/delbe/Downloads/wut/wut/Post_grad/UBC/Research/lab/Github_repos/hcn-gating-kinetics/data/current_time_course/Pooled_2020/"
try:
df = pd.read_csv(csv_path + fname + ".csv", header=None, index_col=0)
# extract voltage command
N = int(df.shape[1]/2)
# check that there are equal number of current and voltage sweeps
if df.shape[1] != 2*N:
print(" Uneven number of current/voltage sweeps. Deleting last voltage sweep.")
df = df.iloc[:,:-1]
N = int(df.shape[1]/2)
# voltage protocol
self.df = df.iloc[:,N:]
except:
print(" %s not found in csv_path." % fname)
try:
# CellML csv export
df = pd.read_csv(fname, header=0, index_col=None)
df.index *= 1/2
self.df = df
except:
print(" Could not open `fname` as file.")
exit()
# sampling frequency
self.khz = int( 1/(df.index[1] - df.index[0]))
self.fname = fname
self.out_name = out_name
def create_reversal_ramp(self, Vact=-120, Tact=3000, Vramp=[-50, 10], Tramp=150, Tcap=10):
"""
Create steps for reversal ramp: maximally activating prepulse, followed by deactivating ramp.
`Vact` = prepulse voltage \\
`Tact` = duration of activation prepulse \\
`Vramp` = start and end voltages of deactivating ramp \\
`Tramp` = duration of reversal ramp
`Tcap` = short pulse of same voltage as `Vramp[0]` to cancel capacitive currents
Returns `ramp`, array containing prepulse and ramp command
"""
# duration of capacitive step
Tcap = 20
# slope of reversal ramp
dvdt = (Vramp[1] - Vramp[0])/Tramp
# times of reversal ramp
Tramp = np.arange(0, Tramp+Tcap, 1/self.khz)
# convert Tcap to samples
Tcap = int(Tcap * self.khz)
ramp = Tramp.copy()
ramp[:Tcap] = Vramp[0]
ramp[Tcap:] = [(dvdt*(t-Tramp[Tcap]) + Vramp[0]) for t in Tramp[Tcap:]]
return ramp
def create_leak_ramp(self, volts=[-35, 35], thalf=500, khz=2, add_MT_step=True):
"""
Create array of voltages for leak ramp
"""
if add_MT_step:
out = np.zeros((thalf*2*khz + 2000*khz,))
else:
out = np.zeros((thalf*2*khz,))
out[:1000*khz] = -35
ts = np.arange(0, thalf, 1/khz)
out[1000*khz:(thalf+1000)*khz] = (ts*(volts[1]-volts[0])/(thalf)) + volts[0]
if add_MT_step:
out[(thalf+1000)*khz:-1000*khz] = (ts*(volts[0]-volts[1])/thalf) + volts[1]
t = 1000
while t > 400:
out[-t*khz:-(t-200)*khz] = -35
out[-(t-200)*khz:-(t-400)*khz] = 20
t -= 400
out[-t*khz:] = -35
else:
out[(thalf+1000)*khz:] = (ts*(volts[0]-volts[1])/thalf) + volts[1]
return out
def add_leak_ramp(self):
leak_ramp = self.create_leak_ramp(khz=int(1/self.df.index[1]))
out = pd.DataFrame(np.array([leak_ramp,]*self.df.shape[1])).T
try:
out = | pd.concat([out, self.df], axis=0, ignore_index=True) | pandas.concat |
from typing import List
from datetime import datetime
from pandas import DataFrame, Series
import pandas as pd
from pydantic import BaseModel, Field
from .measurements import IntensityForecast
from .mixes import GenerationMixDetails, MixComponent
class Region(BaseModel):
region_id: int = Field(..., alias="regionid")
dno_region: str = Field(..., alias="dnoregion")
short_name: str = Field(..., alias="shortname")
def to_series(self):
record = {
"region_id": self.region_id,
"dno_region": self.dno_region,
"short_name": self.short_name,
}
return Series(record)
class RegionGenerationMix(Region):
intensity: IntensityForecast
generation_mix: List[MixComponent] = Field(..., alias="generationmix")
def to_series(self):
record = Series(
{
**self.intensity.dict(),
**{item.fuel: item.percentage for item in self.generation_mix},
}
)
return super().to_series().append(record)
class RegionListDetails(BaseModel):
from_: datetime = Field(..., alias="from")
to: datetime
regions: List[RegionGenerationMix]
def to_dataframe(self) -> DataFrame:
record = Series({"from": self.from_, "to": self.to})
regions = [region.to_series().append(record) for region in self.regions]
return | DataFrame(regions) | pandas.DataFrame |
"""
Tests for SARIMAX models
Author: <NAME>
License: Simplified-BSD
"""
import os
import warnings
from statsmodels.compat.platform import PLATFORM_WIN
import numpy as np
import pandas as pd
import pytest
from statsmodels.tsa.statespace import sarimax, tools
from .results import results_sarimax
from statsmodels.tools import add_constant
from statsmodels.tools.tools import Bunch
from numpy.testing import (
assert_, assert_equal, assert_almost_equal, assert_raises, assert_allclose
)
current_path = os.path.dirname(os.path.abspath(__file__))
realgdp_path = os.path.join('results', 'results_realgdpar_stata.csv')
realgdp_results = pd.read_csv(current_path + os.sep + realgdp_path)
coverage_path = os.path.join('results', 'results_sarimax_coverage.csv')
coverage_results = pd.read_csv(os.path.join(current_path, coverage_path))
class TestSARIMAXStatsmodels(object):
"""
Test ARIMA model using SARIMAX class against statsmodels ARIMA class
Notes
-----
Standard errors are quite good for the OPG case.
"""
@classmethod
def setup_class(cls):
cls.true = results_sarimax.wpi1_stationary
endog = cls.true['data']
# Old results from statsmodels.arima.ARIMA taken before it was removed
# to let test continue to run. On old statsmodels, can run
# result_a = arima.ARIMA(endog, order=(1, 1, 1)).fit(disp=-1)
result_a = Bunch()
result_a.llf = -135.3513139733829
result_a.aic = 278.7026279467658
result_a.bic = 289.9513653682555
result_a.hqic = 283.27183681851653
result_a.params = np.array([0.74982449, 0.87421135, -0.41202195])
result_a.bse = np.array([0.29207409, 0.06377779, 0.12208469])
cls.result_a = result_a
cls.model_b = sarimax.SARIMAX(endog, order=(1, 1, 1), trend='c',
simple_differencing=True,
hamilton_representation=True)
cls.result_b = cls.model_b.fit(disp=-1)
def test_loglike(self):
assert_allclose(self.result_b.llf, self.result_a.llf)
def test_aic(self):
assert_allclose(self.result_b.aic, self.result_a.aic)
def test_bic(self):
assert_allclose(self.result_b.bic, self.result_a.bic)
def test_hqic(self):
assert_allclose(self.result_b.hqic, self.result_a.hqic)
def test_mle(self):
# ARIMA estimates the mean of the process, whereas SARIMAX estimates
# the intercept. Convert the mean to intercept to compare
params_a = self.result_a.params.copy()
params_a[0] = (1 - params_a[1]) * params_a[0]
assert_allclose(self.result_b.params[:-1], params_a, atol=5e-5)
def test_bse(self):
# Test the complex step approximated BSE values
cpa = self.result_b._cov_params_approx(approx_complex_step=True)
bse = cpa.diagonal()**0.5
assert_allclose(bse[1:-1], self.result_a.bse[1:], atol=1e-5)
def test_t_test(self):
import statsmodels.tools._testing as smt
# to trigger failure, un-comment the following:
# self.result_b._cache['pvalues'] += 1
smt.check_ttest_tvalues(self.result_b)
smt.check_ftest_pvalues(self.result_b)
class TestRealGDPARStata(object):
"""
Includes tests of filtered states and standardized forecast errors.
Notes
-----
Could also test the usual things like standard errors, etc. but those are
well-tested elsewhere.
"""
@classmethod
def setup_class(cls):
dlgdp = np.log(realgdp_results['value']).diff()[1:].values
cls.model = sarimax.SARIMAX(dlgdp, order=(12, 0, 0), trend='n',
hamilton_representation=True)
# Estimated by Stata
params = [
.40725515, .18782621, -.01514009, -.01027267, -.03642297,
.11576416, .02573029, -.00766572, .13506498, .08649569, .06942822,
-.10685783, .00007999607
]
cls.results = cls.model.filter(params)
def test_filtered_state(self):
for i in range(12):
assert_allclose(
realgdp_results.iloc[1:]['u%d' % (i+1)],
self.results.filter_results.filtered_state[i],
atol=1e-6
)
def test_standardized_forecasts_error(self):
assert_allclose(
realgdp_results.iloc[1:]['rstd'],
self.results.filter_results.standardized_forecasts_error[0],
atol=1e-3
)
class SARIMAXStataTests(object):
def test_loglike(self):
assert_almost_equal(
self.result.llf,
self.true['loglike'], 4
)
def test_aic(self):
assert_almost_equal(
self.result.aic,
self.true['aic'], 3
)
def test_bic(self):
assert_almost_equal(
self.result.bic,
self.true['bic'], 3
)
def test_hqic(self):
hqic = (
-2*self.result.llf +
2*np.log(np.log(self.result.nobs_effective)) *
self.result.params.shape[0]
)
assert_almost_equal(
self.result.hqic,
hqic, 3
)
def test_standardized_forecasts_error(self):
cython_sfe = self.result.standardized_forecasts_error
self.result._standardized_forecasts_error = None
python_sfe = self.result.standardized_forecasts_error
assert_allclose(cython_sfe, python_sfe)
class ARIMA(SARIMAXStataTests):
"""
ARIMA model
Stata arima documentation, Example 1
"""
@classmethod
def setup_class(cls, true, *args, **kwargs):
cls.true = true
endog = true['data']
kwargs.setdefault('simple_differencing', True)
kwargs.setdefault('hamilton_representation', True)
cls.model = sarimax.SARIMAX(endog, order=(1, 1, 1), trend='c',
*args, **kwargs)
# Stata estimates the mean of the process, whereas SARIMAX estimates
# the intercept of the process. Get the intercept.
intercept = (1 - true['params_ar'][0]) * true['params_mean'][0]
params = np.r_[intercept, true['params_ar'], true['params_ma'],
true['params_variance']]
cls.result = cls.model.filter(params)
def test_mle(self):
result = self.model.fit(disp=-1)
assert_allclose(
result.params, self.result.params,
atol=1e-3
)
class TestARIMAStationary(ARIMA):
"""
Notes
-----
Standard errors are very good for the OPG and complex step approximation
cases.
"""
@classmethod
def setup_class(cls):
super(TestARIMAStationary, cls).setup_class(
results_sarimax.wpi1_stationary
)
def test_bse(self):
# test defaults
assert_equal(self.result.cov_type, 'opg')
assert_equal(self.result._cov_approx_complex_step, True)
assert_equal(self.result._cov_approx_centered, False)
# default covariance type (opg)
assert_allclose(self.result.bse[1], self.true['se_ar_opg'], atol=1e-7)
assert_allclose(self.result.bse[2], self.true['se_ma_opg'], atol=1e-7)
def test_bse_approx(self):
# complex step
bse = self.result._cov_params_approx(
approx_complex_step=True).diagonal()**0.5
assert_allclose(bse[1], self.true['se_ar_oim'], atol=1e-7)
assert_allclose(bse[2], self.true['se_ma_oim'], atol=1e-7)
# The below tests pass irregularly; they give a sense of the precision
# available with finite differencing
# finite difference, non-centered
# with warnings.catch_warnings():
# warnings.simplefilter("ignore")
# bse = self.result._cov_params_approx(
# approx_complex_step=False).diagonal()**0.5
# assert_allclose(bse[1], self.true['se_ar_oim'], atol=1e-2)
# assert_allclose(bse[2], self.true['se_ma_oim'], atol=1e-1)
# # finite difference, centered
# cpa = self.result._cov_params_approx(
# approx_complex_step=False, approx_centered=True)
# bse = cpa.diagonal()**0.5
# assert_allclose(bse[1], self.true['se_ar_oim'], atol=1e-3)
# assert_allclose(bse[2], self.true['se_ma_oim'], atol=1e-3)
def test_bse_oim(self):
# OIM covariance type
oim_bse = self.result.cov_params_oim.diagonal()**0.5
assert_allclose(oim_bse[1], self.true['se_ar_oim'], atol=1e-3)
assert_allclose(oim_bse[2], self.true['se_ma_oim'], atol=1e-2)
def test_bse_robust(self):
robust_oim_bse = self.result.cov_params_robust_oim.diagonal()**0.5
cpra = self.result.cov_params_robust_approx
robust_approx_bse = cpra.diagonal()**0.5
true_robust_bse = np.r_[
self.true['se_ar_robust'], self.true['se_ma_robust']
]
assert_allclose(robust_oim_bse[1:3], true_robust_bse, atol=1e-2)
assert_allclose(robust_approx_bse[1:3], true_robust_bse, atol=1e-3)
class TestARIMADiffuse(ARIMA):
"""
Notes
-----
Standard errors are very good for the OPG and quite good for the complex
step approximation cases.
"""
@classmethod
def setup_class(cls, **kwargs):
kwargs['initialization'] = 'approximate_diffuse'
kwargs['initial_variance'] = (
results_sarimax.wpi1_diffuse['initial_variance']
)
super(TestARIMADiffuse, cls).setup_class(results_sarimax.wpi1_diffuse,
**kwargs)
def test_bse(self):
# test defaults
assert_equal(self.result.cov_type, 'opg')
assert_equal(self.result._cov_approx_complex_step, True)
assert_equal(self.result._cov_approx_centered, False)
# default covariance type (opg)
assert_allclose(self.result.bse[1], self.true['se_ar_opg'], atol=1e-7)
assert_allclose(self.result.bse[2], self.true['se_ma_opg'], atol=1e-7)
def test_bse_approx(self):
# complex step
bse = self.result._cov_params_approx(
approx_complex_step=True).diagonal()**0.5
assert_allclose(bse[1], self.true['se_ar_oim'], atol=1e-4)
assert_allclose(bse[2], self.true['se_ma_oim'], atol=1e-4)
# The below tests do not pass
# with warnings.catch_warnings():
# warnings.simplefilter("ignore")
# # finite difference, non-centered : failure
# bse = self.result._cov_params_approx(
# approx_complex_step=False).diagonal()**0.5
# assert_allclose(bse[1], self.true['se_ar_oim'], atol=1e-4)
# assert_allclose(bse[2], self.true['se_ma_oim'], atol=1e-4)
# # finite difference, centered : failure
# cpa = self.result._cov_params_approx(
# approx_complex_step=False, approx_centered=True)
# bse = cpa.diagonal()**0.5
# assert_allclose(bse[1], self.true['se_ar_oim'], atol=1e-4)
# assert_allclose(bse[2], self.true['se_ma_oim'], atol=1e-4)
def test_bse_oim(self):
# OIM covariance type
bse = self.result._cov_params_oim().diagonal()**0.5
assert_allclose(bse[1], self.true['se_ar_oim'], atol=1e-2)
assert_allclose(bse[2], self.true['se_ma_oim'], atol=1e-1)
class AdditiveSeasonal(SARIMAXStataTests):
"""
ARIMA model with additive seasonal effects
Stata arima documentation, Example 2
"""
@classmethod
def setup_class(cls, true, *args, **kwargs):
cls.true = true
endog = np.log(true['data'])
kwargs.setdefault('simple_differencing', True)
kwargs.setdefault('hamilton_representation', True)
cls.model = sarimax.SARIMAX(
endog, order=(1, 1, (1, 0, 0, 1)), trend='c', *args, **kwargs
)
# Stata estimates the mean of the process, whereas SARIMAX estimates
# the intercept of the process. Get the intercept.
intercept = (1 - true['params_ar'][0]) * true['params_mean'][0]
params = np.r_[intercept, true['params_ar'], true['params_ma'],
true['params_variance']]
cls.result = cls.model.filter(params)
def test_mle(self):
result = self.model.fit(disp=-1)
assert_allclose(
result.params, self.result.params,
atol=1e-3
)
class TestAdditiveSeasonal(AdditiveSeasonal):
"""
Notes
-----
Standard errors are very good for the OPG and quite good for the complex
step approximation cases.
"""
@classmethod
def setup_class(cls):
super(TestAdditiveSeasonal, cls).setup_class(
results_sarimax.wpi1_seasonal
)
def test_bse(self):
# test defaults
assert_equal(self.result.cov_type, 'opg')
assert_equal(self.result._cov_approx_complex_step, True)
assert_equal(self.result._cov_approx_centered, False)
# default covariance type (opg)
assert_allclose(self.result.bse[1], self.true['se_ar_opg'], atol=1e-6)
assert_allclose(self.result.bse[2:4], self.true['se_ma_opg'],
atol=1e-5)
def test_bse_approx(self):
# complex step
bse = self.result._cov_params_approx(
approx_complex_step=True).diagonal()**0.5
assert_allclose(bse[1], self.true['se_ar_oim'], atol=1e-4)
assert_allclose(bse[2:4], self.true['se_ma_oim'], atol=1e-4)
# The below tests pass irregularly; they give a sense of the precision
# available with finite differencing
# with warnings.catch_warnings():
# warnings.simplefilter("ignore")
# # finite difference, non-centered
# bse = self.result._cov_params_approx(
# approx_complex_step=False).diagonal()**0.5
# assert_allclose(bse[1], self.true['se_ar_oim'], atol=1e-2)
# assert_allclose(bse[2:4], self.true['se_ma_oim'], atol=1e-2)
# # finite difference, centered
# cpa = self.result._cov_params_approx(
# approx_complex_step=False, approx_centered=True)
# bse = cpa.diagonal()**0.5
# assert_allclose(bse[1], self.true['se_ar_oim'], atol=1e-3)
# assert_allclose(bse[2:4], self.true['se_ma_oim'], atol=1e-3)
def test_bse_oim(self):
# OIM covariance type
bse = self.result._cov_params_oim().diagonal()**0.5
assert_allclose(bse[1], self.true['se_ar_oim'], atol=1e-2)
assert_allclose(bse[2:4], self.true['se_ma_oim'], atol=1e-1)
class Airline(SARIMAXStataTests):
"""
Multiplicative SARIMA model: "Airline" model
Stata arima documentation, Example 3
"""
@classmethod
def setup_class(cls, true, *args, **kwargs):
cls.true = true
endog = np.log(true['data'])
kwargs.setdefault('simple_differencing', True)
kwargs.setdefault('hamilton_representation', True)
cls.model = sarimax.SARIMAX(
endog, order=(0, 1, 1), seasonal_order=(0, 1, 1, 12),
trend='n', *args, **kwargs
)
params = np.r_[true['params_ma'], true['params_seasonal_ma'],
true['params_variance']]
cls.result = cls.model.filter(params)
def test_mle(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
result = self.model.fit(disp=-1)
assert_allclose(
result.params, self.result.params,
atol=1e-4
)
class TestAirlineHamilton(Airline):
"""
Notes
-----
Standard errors are very good for the OPG and complex step approximation
cases.
"""
@classmethod
def setup_class(cls):
super(TestAirlineHamilton, cls).setup_class(
results_sarimax.air2_stationary
)
def test_bse(self):
# test defaults
assert_equal(self.result.cov_type, 'opg')
assert_equal(self.result._cov_approx_complex_step, True)
assert_equal(self.result._cov_approx_centered, False)
# default covariance type (opg)
assert_allclose(self.result.bse[0], self.true['se_ma_opg'], atol=1e-6)
assert_allclose(self.result.bse[1], self.true['se_seasonal_ma_opg'],
atol=1e-6)
def test_bse_approx(self):
# complex step
bse = self.result._cov_params_approx(
approx_complex_step=True).diagonal()**0.5
assert_allclose(bse[0], self.true['se_ma_oim'], atol=1e-6)
assert_allclose(bse[1], self.true['se_seasonal_ma_oim'], atol=1e-6)
# The below tests pass irregularly; they give a sense of the precision
# available with finite differencing
# with warnings.catch_warnings():
# warnings.simplefilter("ignore")
# # finite difference, non-centered
# bse = self.result._cov_params_approx(
# approx_complex_step=False).diagonal()**0.5
# assert_allclose(bse[0], self.true['se_ma_oim'], atol=1e-2)
# assert_allclose(bse[1], self.true['se_seasonal_ma_oim'],
# atol=1e-2)
# # finite difference, centered
# cpa = self.result._cov_params_approx(
# approx_complex_step=False, approx_centered=True)
# bse = cpa.diagonal()**0.5
# assert_allclose(bse[0], self.true['se_ma_oim'], atol=1e-4)
# assert_allclose(bse[1], self.true['se_seasonal_ma_oim'],
# atol=1e-4)
def test_bse_oim(self):
# OIM covariance type
oim_bse = self.result.cov_params_oim.diagonal()**0.5
assert_allclose(oim_bse[0], self.true['se_ma_oim'], atol=1e-1)
assert_allclose(oim_bse[1], self.true['se_seasonal_ma_oim'], atol=1e-1)
class TestAirlineHarvey(Airline):
"""
Notes
-----
Standard errors are very good for the OPG and complex step approximation
cases.
"""
@classmethod
def setup_class(cls):
super(TestAirlineHarvey, cls).setup_class(
results_sarimax.air2_stationary, hamilton_representation=False
)
def test_bse(self):
# test defaults
assert_equal(self.result.cov_type, 'opg')
assert_equal(self.result._cov_approx_complex_step, True)
assert_equal(self.result._cov_approx_centered, False)
# default covariance type (opg)
assert_allclose(self.result.bse[0], self.true['se_ma_opg'], atol=1e-6)
assert_allclose(self.result.bse[1], self.true['se_seasonal_ma_opg'],
atol=1e-6)
def test_bse_approx(self):
# complex step
bse = self.result._cov_params_approx(
approx_complex_step=True).diagonal()**0.5
assert_allclose(bse[0], self.true['se_ma_oim'], atol=1e-6)
assert_allclose(bse[1], self.true['se_seasonal_ma_oim'], atol=1e-6)
# The below tests pass irregularly; they give a sense of the precision
# available with finite differencing
# with warnings.catch_warnings():
# warnings.simplefilter("ignore")
# # finite difference, non-centered
# bse = self.result._cov_params_approx(
# approx_complex_step=False).diagonal()**0.5
# assert_allclose(bse[0], self.true['se_ma_oim'], atol=1e-2)
# assert_allclose(bse[1], self.true['se_seasonal_ma_oim'],
# atol=1e-2)
# # finite difference, centered
# cpa = self.result._cov_params_approx(
# approx_complex_step=False, approx_centered=True)
# bse = cpa.diagonal()**0.5
# assert_allclose(bse[0], self.true['se_ma_oim'], atol=1e-4)
# assert_allclose(bse[1], self.true['se_seasonal_ma_oim'],
# atol=1e-4)
def test_bse_oim(self):
# OIM covariance type
oim_bse = self.result.cov_params_oim.diagonal()**0.5
assert_allclose(oim_bse[0], self.true['se_ma_oim'], atol=1e-1)
assert_allclose(oim_bse[1], self.true['se_seasonal_ma_oim'], atol=1e-1)
class TestAirlineStateDifferencing(Airline):
"""
Notes
-----
Standard errors are very good for the OPG and quite good for the complex
step approximation cases.
"""
@classmethod
def setup_class(cls):
super(TestAirlineStateDifferencing, cls).setup_class(
results_sarimax.air2_stationary, simple_differencing=False,
hamilton_representation=False
)
def test_bic(self):
# Due to diffuse component of the state (which technically changes the
# BIC calculation - see Durbin and Koopman section 7.4), this is the
# best we can do for BIC
assert_almost_equal(
self.result.bic,
self.true['bic'], 0
)
def test_mle(self):
result = self.model.fit(method='nm', maxiter=1000, disp=0)
assert_allclose(
result.params, self.result.params,
atol=1e-3)
def test_bse(self):
# test defaults
assert_equal(self.result.cov_type, 'opg')
assert_equal(self.result._cov_approx_complex_step, True)
assert_equal(self.result._cov_approx_centered, False)
# default covariance type (opg)
assert_allclose(self.result.bse[0], self.true['se_ma_opg'], atol=1e-6)
assert_allclose(self.result.bse[1], self.true['se_seasonal_ma_opg'],
atol=1e-6)
def test_bse_approx(self):
# complex step
bse = self.result._cov_params_approx(
approx_complex_step=True).diagonal()**0.5
assert_allclose(bse[0], self.true['se_ma_oim'], atol=1e-4)
assert_allclose(bse[1], self.true['se_seasonal_ma_oim'], atol=1e-4)
# The below tests do not pass
# with warnings.catch_warnings():
# warnings.simplefilter("ignore")
# # finite difference, non-centered : failure with NaNs
# bse = self.result._cov_params_approx(
# approx_complex_step=False).diagonal()**0.5
# assert_allclose(bse[0], self.true['se_ma_oim'], atol=1e-2)
# assert_allclose(bse[1], self.true['se_seasonal_ma_oim'],
# atol=1e-2)
# # finite difference, centered : failure with NaNs
# cpa = self.result._cov_params_approx(
# approx_complex_step=False, approx_centered=True)
# bse = cpa.diagonal()**0.5
# assert_allclose(bse[0], self.true['se_ma_oim'], atol=1e-4)
# assert_allclose(bse[1], self.true['se_seasonal_ma_oim'],
# atol=1e-4)
def test_bse_oim(self):
# OIM covariance type
oim_bse = self.result.cov_params_oim.diagonal()**0.5
assert_allclose(oim_bse[0], self.true['se_ma_oim'], atol=1e-1)
assert_allclose(oim_bse[1], self.true['se_seasonal_ma_oim'], atol=1e-1)
class Friedman(SARIMAXStataTests):
"""
ARMAX model: Friedman quantity theory of money
Stata arima documentation, Example 4
"""
@classmethod
def setup_class(cls, true, exog=None, *args, **kwargs):
cls.true = true
endog = np.r_[true['data']['consump']]
if exog is None:
exog = add_constant(true['data']['m2'])
kwargs.setdefault('simple_differencing', True)
kwargs.setdefault('hamilton_representation', True)
cls.model = sarimax.SARIMAX(
endog, exog=exog, order=(1, 0, 1), *args, **kwargs
)
params = np.r_[true['params_exog'], true['params_ar'],
true['params_ma'], true['params_variance']]
cls.result = cls.model.filter(params)
class TestFriedmanMLERegression(Friedman):
"""
Notes
-----
Standard errors are very good for the OPG and complex step approximation
cases.
"""
@classmethod
def setup_class(cls):
super(TestFriedmanMLERegression, cls).setup_class(
results_sarimax.friedman2_mle
)
def test_mle(self):
result = self.model.fit(disp=-1)
# Use ratio to make atol more meaningful parameter scale differs
ratio = result.params / self.result.params
assert_allclose(ratio, np.ones(5), atol=1e-2, rtol=1e-3)
def test_bse(self):
# test defaults
assert_equal(self.result.cov_type, 'opg')
assert_equal(self.result._cov_approx_complex_step, True)
assert_equal(self.result._cov_approx_centered, False)
# default covariance type (opg)
assert_allclose(self.result.bse[0:2], self.true['se_exog_opg'],
atol=1e-4)
assert_allclose(self.result.bse[2], self.true['se_ar_opg'], atol=1e-6)
assert_allclose(self.result.bse[3], self.true['se_ma_opg'], atol=1e-6)
def test_bse_approx(self):
# complex step
bse = self.result._cov_params_approx(
approx_complex_step=True).diagonal()**0.5
assert_allclose(bse[0:2], self.true['se_exog_oim'], atol=1e-4)
assert_allclose(bse[2], self.true['se_ar_oim'], atol=1e-6)
assert_allclose(bse[3], self.true['se_ma_oim'], atol=1e-6)
# The below tests pass irregularly; they give a sense of the precision
# available with finite differencing
# with warnings.catch_warnings():
# warnings.simplefilter("ignore")
# # finite difference, non-centered
# bse = self.result._cov_params_approx(
# approx_complex_step=False).diagonal()**0.5
# assert_allclose(bse[0], self.true['se_exog_oim'][0], rtol=1)
# assert_allclose(bse[1], self.true['se_exog_oim'][1], atol=1e-2)
# assert_allclose(bse[2], self.true['se_ar_oim'], atol=1e-2)
# assert_allclose(bse[3], self.true['se_ma_oim'], atol=1e-2)
# # finite difference, centered
# cpa = self.result._cov_params_approx(
# approx_complex_step=False, approx_centered=True)
# bse = cpa.diagonal()**0.5
# assert_allclose(bse[0], self.true['se_exog_oim'][0], rtol=1)
# assert_allclose(bse[1], self.true['se_exog_oim'][1], atol=1e-2)
# assert_allclose(bse[2], self.true['se_ar_oim'], atol=1e-2)
# assert_allclose(bse[3], self.true['se_ma_oim'], atol=1e-2)
def test_bse_oim(self):
# OIM covariance type
bse = self.result.cov_params_oim.diagonal()**0.5
assert_allclose(bse[0], self.true['se_exog_oim'][0], rtol=1)
assert_allclose(bse[1], self.true['se_exog_oim'][1], atol=1e-2)
assert_allclose(bse[2], self.true['se_ar_oim'], atol=1e-2)
assert_allclose(bse[3], self.true['se_ma_oim'], atol=1e-2)
class TestFriedmanStateRegression(Friedman):
"""
Notes
-----
MLE is not very close and standard errors are not very close for any set of
parameters.
This is likely because we're comparing against the model where the
regression coefficients are also estimated by MLE. So this test should be
considered just a very basic "sanity" test.
"""
@classmethod
def setup_class(cls):
# Remove the regression coefficients from the parameters, since they
# will be estimated as part of the state vector
true = dict(results_sarimax.friedman2_mle)
exog = add_constant(true['data']['m2']) / 10.
true['mle_params_exog'] = true['params_exog'][:]
true['mle_se_exog'] = true['se_exog_opg'][:]
true['params_exog'] = []
true['se_exog'] = []
super(TestFriedmanStateRegression, cls).setup_class(
true, exog=exog, mle_regression=False
)
cls.true_params = np.r_[true['params_exog'], true['params_ar'],
true['params_ma'], true['params_variance']]
cls.result = cls.model.filter(cls.true_params)
def test_mle(self):
result = self.model.fit(disp=-1)
assert_allclose(
result.params, self.result.params,
atol=1e-1, rtol=2e-1
)
def test_regression_parameters(self):
# The regression effects are integrated into the state vector as
# the last two states (thus the index [-2:]). The filtered
# estimates of the state vector produced by the Kalman filter and
# stored in `filtered_state` for these state elements give the
# recursive least squares estimates of the regression coefficients
# at each time period. To get the estimates conditional on the
# entire dataset, use the filtered states from the last time
# period (thus the index [-1]).
assert_almost_equal(
self.result.filter_results.filtered_state[-2:, -1] / 10.,
self.true['mle_params_exog'], 1
)
# Loglikelihood (and so aic, bic) is slightly different when states are
# integrated into the state vector
def test_loglike(self):
pass
def test_aic(self):
pass
def test_bic(self):
pass
def test_bse(self):
# test defaults
assert_equal(self.result.cov_type, 'opg')
assert_equal(self.result._cov_approx_complex_step, True)
assert_equal(self.result._cov_approx_centered, False)
# default covariance type (opg)
assert_allclose(self.result.bse[0], self.true['se_ar_opg'], atol=1e-2)
assert_allclose(self.result.bse[1], self.true['se_ma_opg'], atol=1e-2)
def test_bse_approx(self):
# complex step
bse = self.result._cov_params_approx(
approx_complex_step=True).diagonal()**0.5
assert_allclose(bse[0], self.true['se_ar_oim'], atol=1e-1)
assert_allclose(bse[1], self.true['se_ma_oim'], atol=1e-1)
# The below tests do not pass
# with warnings.catch_warnings():
# warnings.simplefilter("ignore")
# # finite difference, non-centered :
# # failure (catastrophic cancellation)
# bse = self.result._cov_params_approx(
# approx_complex_step=False).diagonal()**0.5
# assert_allclose(bse[0], self.true['se_ar_oim'], atol=1e-3)
# assert_allclose(bse[1], self.true['se_ma_oim'], atol=1e-2)
# # finite difference, centered : failure (nan)
# cpa = self.result._cov_params_approx(
# approx_complex_step=False, approx_centered=True)
# bse = cpa.diagonal()**0.5
# assert_allclose(bse[0], self.true['se_ar_oim'], atol=1e-3)
# assert_allclose(bse[1], self.true['se_ma_oim'], atol=1e-3)
def test_bse_oim(self):
# OIM covariance type
bse = self.result._cov_params_oim().diagonal()**0.5
assert_allclose(bse[0], self.true['se_ar_oim'], atol=1e-1)
assert_allclose(bse[1], self.true['se_ma_oim'], atol=1e-1)
class TestFriedmanPredict(Friedman):
"""
ARMAX model: Friedman quantity theory of money, prediction
Stata arima postestimation documentation, Example 1 - Dynamic forecasts
This follows the given Stata example, although it is not truly forecasting
because it compares using the actual data (which is available in the
example but just not used in the parameter MLE estimation) against dynamic
prediction of that data. Here `test_predict` matches the first case, and
`test_dynamic_predict` matches the second.
"""
@classmethod
def setup_class(cls):
super(TestFriedmanPredict, cls).setup_class(
results_sarimax.friedman2_predict
)
# loglike, aic, bic are not the point of this test (they could pass, but we
# would have to modify the data so that they were calculated to
# exclude the last 15 observations)
def test_loglike(self):
pass
def test_aic(self):
pass
def test_bic(self):
pass
def test_predict(self):
assert_almost_equal(
self.result.predict(),
self.true['predict'], 3
)
def test_dynamic_predict(self):
dynamic = len(self.true['data']['consump'])-15-1
assert_almost_equal(
self.result.predict(dynamic=dynamic),
self.true['dynamic_predict'], 3
)
class TestFriedmanForecast(Friedman):
"""
ARMAX model: Friedman quantity theory of money, forecasts
Variation on:
Stata arima postestimation documentation, Example 1 - Dynamic forecasts
This is a variation of the Stata example, in which the endogenous data is
actually made to be missing so that the predict command must forecast.
As another unit test, we also compare against the case in State when
predict is used against missing data (so forecasting) with the dynamic
option also included. Note, however, that forecasting in State space models
amounts to running the Kalman filter against missing datapoints, so it is
not clear whether "dynamic" forecasting (where instead of missing
datapoints for lags, we plug in previous forecasted endog values) is
meaningful.
"""
@classmethod
def setup_class(cls):
true = dict(results_sarimax.friedman2_predict)
true['forecast_data'] = {
'consump': true['data']['consump'][-15:],
'm2': true['data']['m2'][-15:]
}
true['data'] = {
'consump': true['data']['consump'][:-15],
'm2': true['data']['m2'][:-15]
}
super(TestFriedmanForecast, cls).setup_class(true)
cls.result = cls.model.filter(cls.result.params)
# loglike, aic, bic are not the point of this test (they could pass, but we
# would have to modify the data so that they were calculated to
# exclude the last 15 observations)
def test_loglike(self):
pass
def test_aic(self):
pass
def test_bic(self):
pass
def test_forecast(self):
end = len(self.true['data']['consump'])+15-1
exog = add_constant(self.true['forecast_data']['m2'])
assert_almost_equal(
self.result.predict(end=end, exog=exog),
self.true['forecast'], 3
)
def test_dynamic_forecast(self):
end = len(self.true['data']['consump'])+15-1
dynamic = len(self.true['data']['consump'])-1
exog = add_constant(self.true['forecast_data']['m2'])
assert_almost_equal(
self.result.predict(end=end, dynamic=dynamic, exog=exog),
self.true['dynamic_forecast'], 3
)
class SARIMAXCoverageTest(object):
@classmethod
def setup_class(cls, i, decimal=4, endog=None, *args, **kwargs):
# Dataset
if endog is None:
endog = results_sarimax.wpi1_data
# Loglikelihood, parameters
cls.true_loglike = coverage_results.loc[i]['llf']
cls.true_params = np.array([
float(x) for x in coverage_results.loc[i]['parameters'].split(',')]
)
# Stata reports the standard deviation; make it the variance
cls.true_params[-1] = cls.true_params[-1]**2
# Test parameters
cls.decimal = decimal
# Compare using the Hamilton representation and simple differencing
kwargs.setdefault('simple_differencing', True)
kwargs.setdefault('hamilton_representation', True)
cls.model = sarimax.SARIMAX(endog, *args, **kwargs)
def test_loglike(self):
self.result = self.model.filter(self.true_params)
assert_allclose(
self.result.llf,
self.true_loglike,
atol=0.7 * 10**(-self.decimal)
)
def test_start_params(self):
# just a quick test that start_params is not throwing an exception
# (other than related to invertibility)
stat = self.model.enforce_stationarity
inv = self.model.enforce_invertibility
self.model.enforce_stationarity = False
self.model.enforce_invertibility = False
self.model.start_params
self.model.enforce_stationarity = stat
self.model.enforce_invertibility = inv
def test_transform_untransform(self):
model = self.model
stat, inv = model.enforce_stationarity, model.enforce_invertibility
true_constrained = self.true_params
# Sometimes the parameters given by Stata are not stationary and / or
# invertible, so we need to skip those transformations for those
# parameter sets
model.update(self.true_params)
par = model.polynomial_ar
psar = model.polynomial_seasonal_ar
contracted_psar = psar[psar.nonzero()]
model.enforce_stationarity = (
(model.k_ar == 0 or tools.is_invertible(np.r_[1, -par[1:]])) and
(len(contracted_psar) <= 1 or
tools.is_invertible(np.r_[1, -contracted_psar[1:]]))
)
pma = model.polynomial_ma
psma = model.polynomial_seasonal_ma
contracted_psma = psma[psma.nonzero()]
model.enforce_invertibility = (
(model.k_ma == 0 or tools.is_invertible(np.r_[1, pma[1:]])) and
(len(contracted_psma) <= 1 or
tools.is_invertible(np.r_[1, contracted_psma[1:]]))
)
unconstrained = model.untransform_params(true_constrained)
constrained = model.transform_params(unconstrained)
assert_almost_equal(constrained, true_constrained, 4)
model.enforce_stationarity = stat
model.enforce_invertibility = inv
def test_results(self):
self.result = self.model.filter(self.true_params)
# Just make sure that no exceptions are thrown during summary
self.result.summary()
# Make sure no expections are thrown calculating any of the
# covariance matrix types
self.result.cov_params_default
self.result.cov_params_approx
self.result.cov_params_oim
self.result.cov_params_opg
self.result.cov_params_robust_oim
self.result.cov_params_robust_approx
@pytest.mark.matplotlib
def test_plot_diagnostics(self, close_figures):
# Make sure that no exceptions are thrown during plot_diagnostics
self.result = self.model.filter(self.true_params)
self.result.plot_diagnostics()
def test_predict(self):
result = self.model.filter(self.true_params)
# Test predict does not throw exceptions, and produces the right shaped
# output
predict = result.predict()
assert_equal(predict.shape, (self.model.nobs,))
predict = result.predict(start=10, end=20)
assert_equal(predict.shape, (11,))
predict = result.predict(start=10, end=20, dynamic=10)
assert_equal(predict.shape, (11,))
# Test forecasts
if self.model.k_exog == 0:
predict = result.predict(start=self.model.nobs,
end=self.model.nobs+10, dynamic=-10)
assert_equal(predict.shape, (11,))
predict = result.predict(start=self.model.nobs,
end=self.model.nobs+10, dynamic=-10)
forecast = result.forecast()
assert_equal(forecast.shape, (1,))
forecast = result.forecast(10)
assert_equal(forecast.shape, (10,))
else:
k_exog = self.model.k_exog
exog = np.r_[[0]*k_exog*11].reshape(11, k_exog)
predict = result.predict(start=self.model.nobs,
end=self.model.nobs+10, dynamic=-10,
exog=exog)
assert_equal(predict.shape, (11,))
predict = result.predict(start=self.model.nobs,
end=self.model.nobs+10, dynamic=-10,
exog=exog)
exog = np.r_[[0]*k_exog].reshape(1, k_exog)
forecast = result.forecast(exog=exog)
assert_equal(forecast.shape, (1,))
def test_init_keys_replicate(self):
mod1 = self.model
kwargs = self.model._get_init_kwds()
endog = mod1.data.orig_endog
exog = mod1.data.orig_exog
model2 = sarimax.SARIMAX(endog, exog, **kwargs)
res1 = self.model.filter(self.true_params)
res2 = model2.filter(self.true_params)
rtol = 1e-6 if PLATFORM_WIN else 1e-13
assert_allclose(res2.llf, res1.llf, rtol=rtol)
class Test_ar(SARIMAXCoverageTest):
# // AR: (p, 0, 0) x (0, 0, 0, 0)
# arima wpi, arima(3, 0, 0) noconstant vce(oim)
# save_results 1
@classmethod
def setup_class(cls, *args, **kwargs):
kwargs['order'] = (3, 0, 0)
super(Test_ar, cls).setup_class(0, *args, **kwargs)
class Test_ar_as_polynomial(SARIMAXCoverageTest):
# // AR: (p, 0, 0) x (0, 0, 0, 0)
# arima wpi, arima(3, 0, 0) noconstant vce(oim)
# save_results 1
@classmethod
def setup_class(cls, *args, **kwargs):
kwargs['order'] = ([1, 1, 1], 0, 0)
super(Test_ar_as_polynomial, cls).setup_class(0, *args, **kwargs)
class Test_ar_trend_c(SARIMAXCoverageTest):
# // 'c'
# arima wpi c, arima(3, 0, 0) noconstant vce(oim)
# save_results 2
@classmethod
def setup_class(cls, *args, **kwargs):
kwargs['order'] = (3, 0, 0)
kwargs['trend'] = 'c'
super(Test_ar_trend_c, cls).setup_class(1, *args, **kwargs)
# Modify true params to convert from mean to intercept form
tps = cls.true_params
cls.true_params[0] = (1 - tps[1:4].sum()) * tps[0]
class Test_ar_trend_ct(SARIMAXCoverageTest):
# // 'ct'
# arima wpi c t, arima(3, 0, 0) noconstant vce(oim)
# save_results 3
@classmethod
def setup_class(cls, *args, **kwargs):
kwargs['order'] = (3, 0, 0)
kwargs['trend'] = 'ct'
super(Test_ar_trend_ct, cls).setup_class(2, *args, **kwargs)
# Modify true params to convert from mean to intercept form
tps = cls.true_params
cls.true_params[:2] = (1 - tps[2:5].sum()) * tps[:2]
class Test_ar_trend_polynomial(SARIMAXCoverageTest):
# // polynomial [1, 0, 0, 1]
# arima wpi c t3, arima(3, 0, 0) noconstant vce(oim)
# save_results 4
@classmethod
def setup_class(cls, *args, **kwargs):
kwargs['order'] = (3, 0, 0)
kwargs['trend'] = [1, 0, 0, 1]
super(Test_ar_trend_polynomial, cls).setup_class(3, *args, **kwargs)
# Modify true params to convert from mean to intercept form
tps = cls.true_params
cls.true_params[:2] = (1 - tps[2:5].sum()) * tps[:2]
class Test_ar_diff(SARIMAXCoverageTest):
# // AR and I(d): (p, d, 0) x (0, 0, 0, 0)
# arima wpi, arima(3, 2, 0) noconstant vce(oim)
# save_results 5
@classmethod
def setup_class(cls, *args, **kwargs):
kwargs['order'] = (3, 2, 0)
super(Test_ar_diff, cls).setup_class(4, *args, **kwargs)
class Test_ar_seasonal_diff(SARIMAXCoverageTest):
# // AR and I(D): (p, 0, 0) x (0, D, 0, s)
# arima wpi, arima(3, 0, 0) sarima(0, 2, 0, 4) noconstant vce(oim)
# save_results 6
@classmethod
def setup_class(cls, *args, **kwargs):
kwargs['order'] = (3, 0, 0)
kwargs['seasonal_order'] = (0, 2, 0, 4)
super(Test_ar_seasonal_diff, cls).setup_class(5, *args, **kwargs)
class Test_ar_diffuse(SARIMAXCoverageTest):
# // AR and diffuse initialization
# arima wpi, arima(3, 0, 0) noconstant vce(oim) diffuse
# save_results 7
@classmethod
def setup_class(cls, *args, **kwargs):
kwargs['order'] = (3, 0, 0)
kwargs['initialization'] = 'approximate_diffuse'
kwargs['initial_variance'] = 1e9
super(Test_ar_diffuse, cls).setup_class(6, *args, **kwargs)
class Test_ar_no_enforce(SARIMAXCoverageTest):
# // AR: (p, 0, 0) x (0, 0, 0, 0)
# arima wpi, arima(3, 0, 0) noconstant vce(oim)
# save_results 1
@classmethod
def setup_class(cls, *args, **kwargs):
kwargs['order'] = (3, 0, 0)
kwargs['enforce_stationarity'] = False
kwargs['enforce_invertibility'] = False
kwargs['initial_variance'] = 1e9
kwargs['loglikelihood_burn'] = 0
super(Test_ar_no_enforce, cls).setup_class(6, *args, **kwargs)
# Reset loglikelihood burn, which gets automatically set to the number
# of states if enforce_stationarity = False
cls.model.ssm.loglikelihood_burn = 0
def test_init_keys_replicate(self):
mod1 = self.model
kwargs = self.model._get_init_kwds()
endog = mod1.data.orig_endog
exog = mod1.data.orig_exog
model2 = sarimax.SARIMAX(endog, exog, **kwargs)
# Fixes needed for edge case model
model2.ssm.initialization = mod1.ssm.initialization
res1 = self.model.filter(self.true_params)
res2 = model2.filter(self.true_params)
rtol = 1e-6 if PLATFORM_WIN else 1e-13
assert_allclose(res2.llf, res1.llf, rtol=rtol)
class Test_ar_exogenous(SARIMAXCoverageTest):
# // ARX
# arima wpi x, arima(3, 0, 0) noconstant vce(oim)
# save_results 8
@classmethod
def setup_class(cls, *args, **kwargs):
kwargs['order'] = (3, 0, 0)
endog = results_sarimax.wpi1_data
kwargs['exog'] = (endog - np.floor(endog))**2
super(Test_ar_exogenous, cls).setup_class(7, *args, **kwargs)
class Test_ar_exogenous_in_state(SARIMAXCoverageTest):
# // ARX
# arima wpi x, arima(3, 0, 0) noconstant vce(oim)
# save_results 8
@classmethod
def setup_class(cls, *args, **kwargs):
kwargs['order'] = (3, 0, 0)
endog = results_sarimax.wpi1_data
kwargs['exog'] = (endog - np.floor(endog))**2
kwargs['mle_regression'] = False
super(Test_ar_exogenous_in_state, cls).setup_class(7, *args, **kwargs)
cls.true_regression_coefficient = cls.true_params[0]
cls.true_params = cls.true_params[1:]
def test_loglike(self):
# Regression in the state vector gives a different loglikelihood, so
# just check that it's approximately the same
self.result = self.model.filter(self.true_params)
assert_allclose(
self.result.llf,
self.true_loglike,
atol=2
)
def test_regression_coefficient(self):
# Test that the regression coefficient (estimated as the last filtered
# state estimate for the regression state) is the same as the Stata
# MLE state
self.result = self.model.filter(self.true_params)
assert_allclose(
self.result.filter_results.filtered_state[3][-1],
self.true_regression_coefficient,
self.decimal
)
class Test_ma(SARIMAXCoverageTest):
# // MA: (0, 0, q) x (0, 0, 0, 0)
# arima wpi, arima(0, 0, 3) noconstant vce(oim)
# save_results 9
@classmethod
def setup_class(cls, *args, **kwargs):
kwargs['order'] = (0, 0, 3)
super(Test_ma, cls).setup_class(8, *args, **kwargs)
class Test_ma_as_polynomial(SARIMAXCoverageTest):
# // MA: (0, 0, q) x (0, 0, 0, 0)
# arima wpi, arima(0, 0, 3) noconstant vce(oim)
# save_results 9
@classmethod
def setup_class(cls, *args, **kwargs):
kwargs['order'] = (0, 0, [1, 1, 1])
super(Test_ma_as_polynomial, cls).setup_class(8, *args, **kwargs)
class Test_ma_trend_c(SARIMAXCoverageTest):
# // 'c'
# arima wpi c, arima(0, 0, 3) noconstant vce(oim)
# save_results 10
@classmethod
def setup_class(cls, *args, **kwargs):
kwargs['order'] = (0, 0, 3)
kwargs['trend'] = 'c'
super(Test_ma_trend_c, cls).setup_class(9, *args, **kwargs)
class Test_ma_trend_ct(SARIMAXCoverageTest):
# // 'ct'
# arima wpi c t, arima(0, 0, 3) noconstant vce(oim)
# save_results 11
@classmethod
def setup_class(cls, *args, **kwargs):
kwargs['order'] = (0, 0, 3)
kwargs['trend'] = 'ct'
super(Test_ma_trend_ct, cls).setup_class(10, *args, **kwargs)
class Test_ma_trend_polynomial(SARIMAXCoverageTest):
# // polynomial [1, 0, 0, 1]
# arima wpi c t3, arima(0, 0, 3) noconstant vce(oim)
# save_results 12
@classmethod
def setup_class(cls, *args, **kwargs):
kwargs['order'] = (0, 0, 3)
kwargs['trend'] = [1, 0, 0, 1]
super(Test_ma_trend_polynomial, cls).setup_class(11, *args, **kwargs)
class Test_ma_diff(SARIMAXCoverageTest):
# // MA and I(d): (0, d, q) x (0, 0, 0, 0)
# arima wpi, arima(0, 2, 3) noconstant vce(oim)
# save_results 13
@classmethod
def setup_class(cls, *args, **kwargs):
kwargs['order'] = (0, 2, 3)
super(Test_ma_diff, cls).setup_class(12, *args, **kwargs)
class Test_ma_seasonal_diff(SARIMAXCoverageTest):
# // MA and I(D): (p, 0, 0) x (0, D, 0, s)
# arima wpi, arima(0, 0, 3) sarima(0, 2, 0, 4) noconstant vce(oim)
# save_results 14
@classmethod
def setup_class(cls, *args, **kwargs):
kwargs['order'] = (0, 0, 3)
kwargs['seasonal_order'] = (0, 2, 0, 4)
super(Test_ma_seasonal_diff, cls).setup_class(13, *args, **kwargs)
class Test_ma_diffuse(SARIMAXCoverageTest):
# // MA and diffuse initialization
# arima wpi, arima(0, 0, 3) noconstant vce(oim) diffuse
# save_results 15
@classmethod
def setup_class(cls, *args, **kwargs):
kwargs['order'] = (0, 0, 3)
kwargs['initialization'] = 'approximate_diffuse'
kwargs['initial_variance'] = 1e9
super(Test_ma_diffuse, cls).setup_class(14, *args, **kwargs)
class Test_ma_exogenous(SARIMAXCoverageTest):
# // MAX
# arima wpi x, arima(0, 0, 3) noconstant vce(oim)
# save_results 16
@classmethod
def setup_class(cls, *args, **kwargs):
kwargs['order'] = (0, 0, 3)
endog = results_sarimax.wpi1_data
kwargs['exog'] = (endog - np.floor(endog))**2
super(Test_ma_exogenous, cls).setup_class(15, *args, **kwargs)
class Test_arma(SARIMAXCoverageTest):
# // ARMA: (p, 0, q) x (0, 0, 0, 0)
# arima wpi, arima(3, 0, 3) noconstant vce(oim)
# save_results 17
@classmethod
def setup_class(cls, *args, **kwargs):
kwargs['order'] = (3, 0, 3)
super(Test_arma, cls).setup_class(16, *args, **kwargs)
class Test_arma_trend_c(SARIMAXCoverageTest):
# // 'c'
# arima wpi c, arima(3, 0, 2) noconstant vce(oim)
# save_results 18
@classmethod
def setup_class(cls, *args, **kwargs):
kwargs['order'] = (3, 0, 2)
kwargs['trend'] = 'c'
super(Test_arma_trend_c, cls).setup_class(17, *args, **kwargs)
# Modify true params to convert from mean to intercept form
tps = cls.true_params
cls.true_params[:1] = (1 - tps[1:4].sum()) * tps[:1]
class Test_arma_trend_ct(SARIMAXCoverageTest):
# // 'ct'
# arima wpi c t, arima(3, 0, 2) noconstant vce(oim)
# save_results 19
@classmethod
def setup_class(cls, *args, **kwargs):
kwargs['order'] = (3, 0, 2)
kwargs['trend'] = 'ct'
super(Test_arma_trend_ct, cls).setup_class(18, *args, **kwargs)
# Modify true params to convert from mean to intercept form
tps = cls.true_params
cls.true_params[:2] = (1 - tps[2:5].sum()) * tps[:2]
class Test_arma_trend_polynomial(SARIMAXCoverageTest):
# // polynomial [1, 0, 0, 1]
# arima wpi c t3, arima(3, 0, 2) noconstant vce(oim)
# save_results 20
@classmethod
def setup_class(cls, *args, **kwargs):
kwargs['order'] = (3, 0, 2)
kwargs['trend'] = [1, 0, 0, 1]
super(Test_arma_trend_polynomial, cls).setup_class(19, *args, **kwargs)
# Modify true params to convert from mean to intercept form
tps = cls.true_params
cls.true_params[:2] = (1 - tps[2:5].sum()) * tps[:2]
class Test_arma_diff(SARIMAXCoverageTest):
# // ARMA and I(d): (p, d, q) x (0, 0, 0, 0)
# arima wpi, arima(3, 2, 2) noconstant vce(oim)
# save_results 21
@classmethod
def setup_class(cls, *args, **kwargs):
kwargs['order'] = (3, 2, 2)
super(Test_arma_diff, cls).setup_class(20, *args, **kwargs)
class Test_arma_seasonal_diff(SARIMAXCoverageTest):
# // ARMA and I(D): (p, 0, q) x (0, D, 0, s)
# arima wpi, arima(3, 0, 2) sarima(0, 2, 0, 4) noconstant vce(oim)
# save_results 22
@classmethod
def setup_class(cls, *args, **kwargs):
kwargs['order'] = (3, 0, 2)
kwargs['seasonal_order'] = (0, 2, 0, 4)
super(Test_arma_seasonal_diff, cls).setup_class(21, *args, **kwargs)
class Test_arma_diff_seasonal_diff(SARIMAXCoverageTest):
# // ARMA and I(d) and I(D): (p, d, q) x (0, D, 0, s)
# arima wpi, arima(3, 2, 2) sarima(0, 2, 0, 4) noconstant vce(oim)
# save_results 23
@classmethod
def setup_class(cls, *args, **kwargs):
kwargs['order'] = (3, 2, 2)
kwargs['seasonal_order'] = (0, 2, 0, 4)
super(Test_arma_diff_seasonal_diff, cls).setup_class(
22, *args, **kwargs)
class Test_arma_diffuse(SARIMAXCoverageTest):
# // ARMA and diffuse initialization
# arima wpi, arima(3, 0, 2) noconstant vce(oim) diffuse
# save_results 24
@classmethod
def setup_class(cls, *args, **kwargs):
kwargs['order'] = (3, 0, 2)
kwargs['initialization'] = 'approximate_diffuse'
kwargs['initial_variance'] = 1e9
super(Test_arma_diffuse, cls).setup_class(23, *args, **kwargs)
class Test_arma_exogenous(SARIMAXCoverageTest):
# // ARMAX
# arima wpi x, arima(3, 0, 2) noconstant vce(oim)
# save_results 25
@classmethod
def setup_class(cls, *args, **kwargs):
kwargs['order'] = (3, 0, 2)
endog = results_sarimax.wpi1_data
kwargs['exog'] = (endog - np.floor(endog))**2
super(Test_arma_exogenous, cls).setup_class(24, *args, **kwargs)
class Test_seasonal_ar(SARIMAXCoverageTest):
# // SAR: (0, 0, 0) x (P, 0, 0, s)
# arima wpi, sarima(3, 0, 0, 4) noconstant vce(oim)
# save_results 26
@classmethod
def setup_class(cls, *args, **kwargs):
kwargs['order'] = (0, 0, 0)
kwargs['seasonal_order'] = (3, 0, 0, 4)
super(Test_seasonal_ar, cls).setup_class(25, *args, **kwargs)
class Test_seasonal_ar_as_polynomial(SARIMAXCoverageTest):
# // SAR: (0, 0, 0) x (P, 0, 0, s)
# arima wpi, sarima(3, 0, 0, 4) noconstant vce(oim)
# save_results 26
@classmethod
def setup_class(cls, *args, **kwargs):
kwargs['order'] = (0, 0, 0)
kwargs['seasonal_order'] = ([1, 1, 1], 0, 0, 4)
super(Test_seasonal_ar_as_polynomial, cls).setup_class(
25, *args, **kwargs)
class Test_seasonal_ar_trend_c(SARIMAXCoverageTest):
# // 'c'
# arima wpi c, sarima(3, 0, 0, 4) noconstant vce(oim)
# save_results 27
@classmethod
def setup_class(cls, *args, **kwargs):
kwargs['order'] = (0, 0, 0)
kwargs['seasonal_order'] = (3, 0, 0, 4)
kwargs['trend'] = 'c'
super(Test_seasonal_ar_trend_c, cls).setup_class(26, *args, **kwargs)
# Modify true params to convert from mean to intercept form
tps = cls.true_params
cls.true_params[:1] = (1 - tps[1:4].sum()) * tps[:1]
class Test_seasonal_ar_trend_ct(SARIMAXCoverageTest):
# // 'ct'
# arima wpi c t, sarima(3, 0, 0, 4) noconstant vce(oim)
# save_results 28
@classmethod
def setup_class(cls, *args, **kwargs):
kwargs['order'] = (0, 0, 0)
kwargs['seasonal_order'] = (3, 0, 0, 4)
kwargs['trend'] = 'ct'
super(Test_seasonal_ar_trend_ct, cls).setup_class(27, *args, **kwargs)
# Modify true params to convert from mean to intercept form
tps = cls.true_params
cls.true_params[:2] = (1 - tps[2:5].sum()) * tps[:2]
class Test_seasonal_ar_trend_polynomial(SARIMAXCoverageTest):
# // polynomial [1, 0, 0, 1]
# arima wpi c t3, sarima(3, 0, 0, 4) noconstant vce(oim)
# save_results 29
@classmethod
def setup_class(cls, *args, **kwargs):
kwargs['order'] = (0, 0, 0)
kwargs['seasonal_order'] = (3, 0, 0, 4)
kwargs['trend'] = [1, 0, 0, 1]
super(Test_seasonal_ar_trend_polynomial, cls).setup_class(
28, *args, **kwargs)
# Modify true params to convert from mean to intercept form
tps = cls.true_params
cls.true_params[:2] = (1 - tps[2:5].sum()) * tps[:2]
class Test_seasonal_ar_diff(SARIMAXCoverageTest):
# // SAR and I(d): (0, d, 0) x (P, 0, 0, s)
# arima wpi, arima(0, 2, 0) sarima(3, 0, 0, 4) noconstant vce(oim)
# save_results 30
@classmethod
def setup_class(cls, *args, **kwargs):
kwargs['order'] = (0, 2, 0)
kwargs['seasonal_order'] = (3, 0, 0, 4)
super(Test_seasonal_ar_diff, cls).setup_class(29, *args, **kwargs)
class Test_seasonal_ar_seasonal_diff(SARIMAXCoverageTest):
# // SAR and I(D): (0, 0, 0) x (P, D, 0, s)
# arima wpi, sarima(3, 2, 0, 4) noconstant vce(oim)
# save_results 31
@classmethod
def setup_class(cls, *args, **kwargs):
kwargs['order'] = (0, 0, 0)
kwargs['seasonal_order'] = (3, 2, 0, 4)
super(Test_seasonal_ar_seasonal_diff, cls).setup_class(
30, *args, **kwargs)
class Test_seasonal_ar_diffuse(SARIMAXCoverageTest):
# // SAR and diffuse initialization
# arima wpi, sarima(3, 0, 0, 4) noconstant vce(oim) diffuse
# save_results 32
@classmethod
def setup_class(cls, *args, **kwargs):
kwargs['order'] = (0, 0, 0)
kwargs['seasonal_order'] = (3, 0, 0, 4)
kwargs['initialization'] = 'approximate_diffuse'
kwargs['initial_variance'] = 1e9
super(Test_seasonal_ar_diffuse, cls).setup_class(31, *args, **kwargs)
class Test_seasonal_ar_exogenous(SARIMAXCoverageTest):
# // SARX
# arima wpi x, sarima(3, 0, 0, 4) noconstant vce(oim)
# save_results 33
@classmethod
def setup_class(cls, *args, **kwargs):
kwargs['order'] = (0, 0, 0)
kwargs['seasonal_order'] = (3, 0, 0, 4)
endog = results_sarimax.wpi1_data
kwargs['exog'] = (endog - np.floor(endog))**2
super(Test_seasonal_ar_exogenous, cls).setup_class(32, *args, **kwargs)
class Test_seasonal_ma(SARIMAXCoverageTest):
# // SMA
# arima wpi, sarima(0, 0, 3, 4) noconstant vce(oim)
# save_results 34
@classmethod
def setup_class(cls, *args, **kwargs):
kwargs['order'] = (0, 0, 0)
kwargs['seasonal_order'] = (0, 0, 3, 4)
super(Test_seasonal_ma, cls).setup_class(33, *args, **kwargs)
class Test_seasonal_ma_as_polynomial(SARIMAXCoverageTest):
# // SMA
# arima wpi, sarima(0, 0, 3, 4) noconstant vce(oim)
# save_results 34
@classmethod
def setup_class(cls, *args, **kwargs):
kwargs['order'] = (0, 0, 0)
kwargs['seasonal_order'] = (0, 0, [1, 1, 1], 4)
super(Test_seasonal_ma_as_polynomial, cls).setup_class(
33, *args, **kwargs)
class Test_seasonal_ma_trend_c(SARIMAXCoverageTest):
# // 'c'
# arima wpi c, sarima(0, 0, 3, 4) noconstant vce(oim)
# save_results 35
@classmethod
def setup_class(cls, *args, **kwargs):
kwargs['order'] = (0, 0, 0)
kwargs['seasonal_order'] = (0, 0, 3, 4)
kwargs['trend'] = 'c'
kwargs['decimal'] = 3
super(Test_seasonal_ma_trend_c, cls).setup_class(34, *args, **kwargs)
class Test_seasonal_ma_trend_ct(SARIMAXCoverageTest):
# // 'ct'
# arima wpi c t, sarima(0, 0, 3, 4) noconstant vce(oim)
# save_results 36
@classmethod
def setup_class(cls, *args, **kwargs):
kwargs['order'] = (0, 0, 0)
kwargs['seasonal_order'] = (0, 0, 3, 4)
kwargs['trend'] = 'ct'
super(Test_seasonal_ma_trend_ct, cls).setup_class(35, *args, **kwargs)
class Test_seasonal_ma_trend_polynomial(SARIMAXCoverageTest):
# // polynomial [1, 0, 0, 1]
# arima wpi c t3, sarima(0, 0, 3, 4) noconstant vce(oim)
# save_results 37
@classmethod
def setup_class(cls, *args, **kwargs):
kwargs['order'] = (0, 0, 0)
kwargs['seasonal_order'] = (0, 0, 3, 4)
kwargs['trend'] = [1, 0, 0, 1]
kwargs['decimal'] = 3
super(Test_seasonal_ma_trend_polynomial, cls).setup_class(
36, *args, **kwargs)
class Test_seasonal_ma_diff(SARIMAXCoverageTest):
# // SMA and I(d): (0, d, 0) x (0, 0, Q, s)
# arima wpi, arima(0, 2, 0) sarima(0, 0, 3, 4) noconstant vce(oim)
# save_results 38
@classmethod
def setup_class(cls, *args, **kwargs):
kwargs['order'] = (0, 2, 0)
kwargs['seasonal_order'] = (0, 0, 3, 4)
super(Test_seasonal_ma_diff, cls).setup_class(37, *args, **kwargs)
class Test_seasonal_ma_seasonal_diff(SARIMAXCoverageTest):
# // SMA and I(D): (0, 0, 0) x (0, D, Q, s)
# arima wpi, sarima(0, 2, 3, 4) noconstant vce(oim)
# save_results 39
@classmethod
def setup_class(cls, *args, **kwargs):
kwargs['order'] = (0, 0, 0)
kwargs['seasonal_order'] = (0, 2, 3, 4)
super(Test_seasonal_ma_seasonal_diff, cls).setup_class(
38, *args, **kwargs)
class Test_seasonal_ma_diffuse(SARIMAXCoverageTest):
# // SMA and diffuse initialization
# arima wpi, sarima(0, 0, 3, 4) noconstant vce(oim) diffuse
# save_results 40
@classmethod
def setup_class(cls, *args, **kwargs):
kwargs['order'] = (0, 0, 0)
kwargs['seasonal_order'] = (0, 0, 3, 4)
kwargs['initialization'] = 'approximate_diffuse'
kwargs['initial_variance'] = 1e9
super(Test_seasonal_ma_diffuse, cls).setup_class(39, *args, **kwargs)
class Test_seasonal_ma_exogenous(SARIMAXCoverageTest):
# // SMAX
# arima wpi x, sarima(0, 0, 3, 4) noconstant vce(oim)
# save_results 41
@classmethod
def setup_class(cls, *args, **kwargs):
kwargs['order'] = (0, 0, 0)
kwargs['seasonal_order'] = (0, 0, 3, 4)
endog = results_sarimax.wpi1_data
kwargs['exog'] = (endog - np.floor(endog))**2
super(Test_seasonal_ma_exogenous, cls).setup_class(40, *args, **kwargs)
class Test_seasonal_arma(SARIMAXCoverageTest):
# // SARMA: (0, 0, 0) x (P, 0, Q, s)
# arima wpi, sarima(3, 0, 2, 4) noconstant vce(oim)
# save_results 42
@classmethod
def setup_class(cls, *args, **kwargs):
kwargs['order'] = (0, 0, 0)
kwargs['seasonal_order'] = (3, 0, 2, 4)
super(Test_seasonal_arma, cls).setup_class(41, *args, **kwargs)
class Test_seasonal_arma_trend_c(SARIMAXCoverageTest):
# // 'c'
# arima wpi c, sarima(3, 0, 2, 4) noconstant vce(oim)
# save_results 43
@classmethod
def setup_class(cls, *args, **kwargs):
kwargs['order'] = (0, 0, 0)
kwargs['seasonal_order'] = (3, 0, 2, 4)
kwargs['trend'] = 'c'
super(Test_seasonal_arma_trend_c, cls).setup_class(42, *args, **kwargs)
# Modify true params to convert from mean to intercept form
tps = cls.true_params
cls.true_params[:1] = (1 - tps[1:4].sum()) * tps[:1]
class Test_seasonal_arma_trend_ct(SARIMAXCoverageTest):
# // 'ct'
# arima wpi c t, sarima(3, 0, 2, 4) noconstant vce(oim)
# save_results 44
@classmethod
def setup_class(cls, *args, **kwargs):
kwargs['order'] = (0, 0, 0)
kwargs['seasonal_order'] = (3, 0, 2, 4)
kwargs['trend'] = 'ct'
super(Test_seasonal_arma_trend_ct, cls).setup_class(
43, *args, **kwargs)
# Modify true params to convert from mean to intercept form
tps = cls.true_params
cls.true_params[:2] = (1 - tps[2:5].sum()) * tps[:2]
class Test_seasonal_arma_trend_polynomial(SARIMAXCoverageTest):
# // polynomial [1, 0, 0, 1]
# arima wpi c t3, sarima(3, 0, 2, 4) noconstant vce(oim)
# save_results 45
@classmethod
def setup_class(cls, *args, **kwargs):
kwargs['order'] = (0, 0, 0)
kwargs['seasonal_order'] = (3, 0, 2, 4)
kwargs['trend'] = [1, 0, 0, 1]
kwargs['decimal'] = 3
super(Test_seasonal_arma_trend_polynomial, cls).setup_class(
44, *args, **kwargs)
# Modify true params to convert from mean to intercept form
tps = cls.true_params
cls.true_params[:2] = (1 - tps[2:5].sum()) * tps[:2]
def test_results(self):
self.result = self.model.filter(self.true_params)
# Just make sure that no exceptions are thrown during summary
self.result.summary()
# Make sure no expections are thrown calculating any of the
# covariance matrix types
self.result.cov_params_default
# Known failure due to the complex step inducing non-stationary
# parameters, causing a failure in the solve_discrete_lyapunov call
# self.result.cov_params_approx
self.result.cov_params_oim
self.result.cov_params_opg
class Test_seasonal_arma_diff(SARIMAXCoverageTest):
# // SARMA and I(d): (0, d, 0) x (P, 0, Q, s)
# arima wpi, arima(0, 2, 0) sarima(3, 0, 2, 4) noconstant vce(oim)
# save_results 46
@classmethod
def setup_class(cls, *args, **kwargs):
kwargs['order'] = (0, 2, 0)
kwargs['seasonal_order'] = (3, 0, 2, 4)
super(Test_seasonal_arma_diff, cls).setup_class(45, *args, **kwargs)
class Test_seasonal_arma_seasonal_diff(SARIMAXCoverageTest):
# // SARMA and I(D): (0, 0, 0) x (P, D, Q, s)
# arima wpi, sarima(3, 2, 2, 4) noconstant vce(oim)
# save_results 47
@classmethod
def setup_class(cls, *args, **kwargs):
kwargs['order'] = (0, 0, 0)
kwargs['seasonal_order'] = (3, 2, 2, 4)
super(Test_seasonal_arma_seasonal_diff, cls).setup_class(
46, *args, **kwargs)
class Test_seasonal_arma_diff_seasonal_diff(SARIMAXCoverageTest):
# // SARMA and I(d) and I(D): (0, d, 0) x (P, D, Q, s)
# arima wpi, arima(0, 2, 0) sarima(3, 2, 2, 4) noconstant vce(oim)
# save_results 48
@classmethod
def setup_class(cls, *args, **kwargs):
kwargs['order'] = (0, 2, 0)
kwargs['seasonal_order'] = (3, 2, 2, 4)
super(Test_seasonal_arma_diff_seasonal_diff, cls).setup_class(
47, *args, **kwargs)
def test_results(self):
self.result = self.model.filter(self.true_params)
# Just make sure that no exceptions are thrown during summary
self.result.summary()
# Make sure no expections are thrown calculating any of the
# covariance matrix types
self.result.cov_params_default
# Known failure due to the complex step inducing non-stationary
# parameters, causing a failure in the solve_discrete_lyapunov call
# self.result.cov_params_approx
self.result.cov_params_oim
self.result.cov_params_opg
class Test_seasonal_arma_diffuse(SARIMAXCoverageTest):
# // SARMA and diffuse initialization
# arima wpi, sarima(3, 0, 2, 4) noconstant vce(oim) diffuse
# save_results 49
@classmethod
def setup_class(cls, *args, **kwargs):
kwargs['order'] = (0, 0, 0)
kwargs['seasonal_order'] = (3, 0, 2, 4)
kwargs['decimal'] = 3
kwargs['initialization'] = 'approximate_diffuse'
kwargs['initial_variance'] = 1e9
super(Test_seasonal_arma_diffuse, cls).setup_class(48, *args, **kwargs)
class Test_seasonal_arma_exogenous(SARIMAXCoverageTest):
# // SARMAX
# arima wpi x, sarima(3, 0, 2, 4) noconstant vce(oim)
# save_results 50
@classmethod
def setup_class(cls, *args, **kwargs):
kwargs['order'] = (0, 0, 0)
kwargs['seasonal_order'] = (3, 0, 2, 4)
endog = results_sarimax.wpi1_data
kwargs['exog'] = (endog - np.floor(endog))**2
super(Test_seasonal_arma_exogenous, cls).setup_class(
49, *args, **kwargs)
class Test_sarimax_exogenous(SARIMAXCoverageTest):
# // SARIMAX and exogenous
# arima wpi x, arima(3, 2, 2) sarima(3, 2, 2, 4) noconstant vce(oim)
# save_results 51
@classmethod
def setup_class(cls, *args, **kwargs):
kwargs['order'] = (3, 2, 2)
kwargs['seasonal_order'] = (3, 2, 2, 4)
endog = results_sarimax.wpi1_data
kwargs['exog'] = (endog - np.floor(endog))**2
super(Test_sarimax_exogenous, cls).setup_class(50, *args, **kwargs)
def test_results_params(self):
result = self.model.filter(self.true_params)
assert_allclose(self.true_params[1:4], result.arparams)
assert_allclose(self.true_params[4:6], result.maparams)
assert_allclose(self.true_params[6:9], result.seasonalarparams)
assert_allclose(self.true_params[9:11], result.seasonalmaparams)
class Test_sarimax_exogenous_not_hamilton(SARIMAXCoverageTest):
# // SARIMAX and exogenous
# arima wpi x, arima(3, 2, 2) sarima(3, 2, 2, 4) noconstant vce(oim)
# save_results 51
@classmethod
def setup_class(cls, *args, **kwargs):
kwargs['order'] = (3, 2, 2)
kwargs['seasonal_order'] = (3, 2, 2, 4)
endog = results_sarimax.wpi1_data
kwargs['exog'] = (endog - np.floor(endog))**2
kwargs['hamilton_representation'] = False
kwargs['simple_differencing'] = False
super(Test_sarimax_exogenous_not_hamilton, cls).setup_class(
50, *args, **kwargs)
class Test_sarimax_exogenous_diffuse(SARIMAXCoverageTest):
# // SARIMAX and exogenous diffuse
# arima wpi x, arima(3, 2, 2) sarima(3, 2, 2, 4) noconstant vce(oim)
# diffuse
# save_results 52
@classmethod
def setup_class(cls, *args, **kwargs):
kwargs['order'] = (3, 2, 2)
kwargs['seasonal_order'] = (3, 2, 2, 4)
endog = results_sarimax.wpi1_data
kwargs['exog'] = (endog - np.floor(endog))**2
kwargs['decimal'] = 2
kwargs['initialization'] = 'approximate_diffuse'
kwargs['initial_variance'] = 1e9
super(Test_sarimax_exogenous_diffuse, cls).setup_class(
51, *args, **kwargs)
class Test_arma_exog_trend_polynomial_missing(SARIMAXCoverageTest):
# // ARMA and exogenous and trend polynomial and missing
# gen wpi2 = wpi
# replace wpi2 = . in 10/19
# arima wpi2 x c t3, arima(3, 0, 2) noconstant vce(oim)
# save_results 53
@classmethod
def setup_class(cls, *args, **kwargs):
endog = np.r_[results_sarimax.wpi1_data]
# Note we're using the non-missing exog data
kwargs['exog'] = ((endog - np.floor(endog))**2)[1:]
endog[9:19] = np.nan
endog = endog[1:] - endog[:-1]
endog[9] = np.nan
kwargs['order'] = (3, 0, 2)
kwargs['trend'] = [0, 0, 0, 1]
kwargs['decimal'] = 1
super(Test_arma_exog_trend_polynomial_missing, cls).setup_class(
52, endog=endog, *args, **kwargs)
# Modify true params to convert from mean to intercept form
tps = cls.true_params
cls.true_params[0] = (1 - tps[2:5].sum()) * tps[0]
# Miscellaneous coverage tests
def test_simple_time_varying():
# This tests time-varying parameters regression when in fact the parameters
# are not time-varying, and in fact the regression fit is perfect
endog = np.arange(100)*1.0
exog = 2*endog
mod = sarimax.SARIMAX(
endog,
exog=exog,
order=(0, 0, 0),
time_varying_regression=True,
mle_regression=False)
# Ignore the warning that MLE does not converge
with warnings.catch_warnings():
warnings.simplefilter("ignore")
res = mod.fit(disp=-1)
# Test that the estimated variances of the errors are essentially zero
# 5 digits necessary to accommodate 32-bit numpy/scipy with OpenBLAS 0.2.18
assert_almost_equal(res.params, [0, 0], 5)
# Test that the time-varying coefficients are all 0.5 (except the first
# one)
assert_almost_equal(res.filter_results.filtered_state[0][1:], [0.5]*99, 9)
def test_invalid_time_varying():
assert_raises(
ValueError,
sarimax.SARIMAX,
endog=[1, 2, 3],
mle_regression=True,
time_varying_regression=True)
def test_manual_stationary_initialization():
endog = results_sarimax.wpi1_data
# Create the first model to compare against
mod1 = sarimax.SARIMAX(endog, order=(3, 0, 0))
res1 = mod1.filter([0.5, 0.2, 0.1, 1])
# Create a second model with "known" initialization
mod2 = sarimax.SARIMAX(endog, order=(3, 0, 0))
mod2.ssm.initialize_known(res1.filter_results.initial_state,
res1.filter_results.initial_state_cov)
res2 = mod2.filter([0.5, 0.2, 0.1, 1])
# Create a third model with "known" initialization, but specified in kwargs
mod3 = sarimax.SARIMAX(
endog, order=(3, 0, 0),
initialization='known',
initial_state=res1.filter_results.initial_state,
initial_state_cov=res1.filter_results.initial_state_cov)
res3 = mod3.filter([0.5, 0.2, 0.1, 1])
# Create the forth model with stationary initialization specified in kwargs
mod4 = sarimax.SARIMAX(endog, order=(3, 0, 0), initialization='stationary')
res4 = mod4.filter([0.5, 0.2, 0.1, 1])
# Just test a couple of things to make sure the results are the same
assert_almost_equal(res1.llf, res2.llf)
assert_almost_equal(res1.filter_results.filtered_state,
res2.filter_results.filtered_state)
assert_almost_equal(res1.llf, res3.llf)
assert_almost_equal(res1.filter_results.filtered_state,
res3.filter_results.filtered_state)
assert_almost_equal(res1.llf, res4.llf)
assert_almost_equal(res1.filter_results.filtered_state,
res4.filter_results.filtered_state)
def test_manual_approximate_diffuse_initialization():
endog = results_sarimax.wpi1_data
# Create the first model to compare against
mod1 = sarimax.SARIMAX(endog, order=(3, 0, 0))
mod1.ssm.initialize_approximate_diffuse(1e9)
res1 = mod1.filter([0.5, 0.2, 0.1, 1])
# Create a second model with "known" initialization
mod2 = sarimax.SARIMAX(endog, order=(3, 0, 0))
mod2.ssm.initialize_known(res1.filter_results.initial_state,
res1.filter_results.initial_state_cov)
res2 = mod2.filter([0.5, 0.2, 0.1, 1])
# Create a third model with "known" initialization, but specified in kwargs
mod3 = sarimax.SARIMAX(
endog, order=(3, 0, 0),
initialization='known',
initial_state=res1.filter_results.initial_state,
initial_state_cov=res1.filter_results.initial_state_cov)
res3 = mod3.filter([0.5, 0.2, 0.1, 1])
# Create the forth model with approximate diffuse initialization specified
# in kwargs
mod4 = sarimax.SARIMAX(endog, order=(3, 0, 0),
initialization='approximate_diffuse',
initial_variance=1e9)
res4 = mod4.filter([0.5, 0.2, 0.1, 1])
# Just test a couple of things to make sure the results are the same
assert_almost_equal(res1.llf, res2.llf)
assert_almost_equal(res1.filter_results.filtered_state,
res2.filter_results.filtered_state)
assert_almost_equal(res1.llf, res3.llf)
assert_almost_equal(res1.filter_results.filtered_state,
res3.filter_results.filtered_state)
assert_almost_equal(res1.llf, res4.llf)
assert_almost_equal(res1.filter_results.filtered_state,
res4.filter_results.filtered_state)
def test_results():
endog = results_sarimax.wpi1_data
mod = sarimax.SARIMAX(endog, order=(1, 0, 1))
res = mod.filter([0.5, -0.5, 1], cov_type='oim')
assert_almost_equal(res.arroots, 2.)
assert_almost_equal(res.maroots, 2.)
assert_almost_equal(res.arfreq, np.arctan2(0, 2) / (2*np.pi))
assert_almost_equal(res.mafreq, np.arctan2(0, 2) / (2*np.pi))
assert_almost_equal(res.arparams, [0.5])
assert_almost_equal(res.maparams, [-0.5])
def test_misc_exog():
# Tests for missing data
nobs = 20
k_endog = 1
np.random.seed(1208)
endog = np.random.normal(size=(nobs, k_endog))
endog[:4, 0] = np.nan
exog1 = np.random.normal(size=(nobs, 1))
exog2 = np.random.normal(size=(nobs, 2))
index = pd.date_range('1970-01-01', freq='QS', periods=nobs)
endog_pd = pd.DataFrame(endog, index=index)
exog1_pd = pd.Series(exog1.squeeze(), index=index)
exog2_pd = pd.DataFrame(exog2, index=index)
models = [
sarimax.SARIMAX(endog, exog=exog1, order=(1, 1, 0)),
sarimax.SARIMAX(endog, exog=exog2, order=(1, 1, 0)),
sarimax.SARIMAX(endog, exog=exog2, order=(1, 1, 0),
simple_differencing=False),
sarimax.SARIMAX(endog_pd, exog=exog1_pd, order=(1, 1, 0)),
sarimax.SARIMAX(endog_pd, exog=exog2_pd, order=(1, 1, 0)),
sarimax.SARIMAX(endog_pd, exog=exog2_pd, order=(1, 1, 0),
simple_differencing=False),
]
for mod in models:
# Smoke tests
mod.start_params
res = mod.fit(disp=False)
res.summary()
res.predict()
res.predict(dynamic=True)
res.get_prediction()
oos_exog = np.random.normal(size=(1, mod.k_exog))
res.forecast(steps=1, exog=oos_exog)
res.get_forecast(steps=1, exog=oos_exog)
# Smoke tests for invalid exog
oos_exog = np.random.normal(size=(2, mod.k_exog))
assert_raises(ValueError, res.forecast, steps=1, exog=oos_exog)
oos_exog = np.random.normal(size=(1, mod.k_exog + 1))
assert_raises(ValueError, res.forecast, steps=1, exog=oos_exog)
# Test invalid model specifications
assert_raises(ValueError, sarimax.SARIMAX, endog, exog=np.zeros((10, 4)),
order=(1, 1, 0))
@pytest.mark.smoke
def test_datasets():
# Test that some unusual types of datasets work
np.random.seed(232849)
endog = np.random.binomial(1, 0.5, size=100)
exog = np.random.binomial(1, 0.5, size=100)
mod = sarimax.SARIMAX(endog, exog=exog, order=(1, 0, 0))
mod.fit(disp=-1)
def test_predict_custom_index():
np.random.seed(328423)
endog = pd.DataFrame(np.random.normal(size=50))
mod = sarimax.SARIMAX(endog, order=(1, 0, 0))
res = mod.smooth(mod.start_params)
out = res.predict(start=1, end=1, index=['a'])
assert_equal(out.index.equals( | pd.Index(['a']) | pandas.Index |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.