prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
import base64
import io
import pandas as pd
from dash import html
def check_non_default_index(df):
if not ((type(df.index) == pd.RangeIndex) and (df.index.name is None)):
return True
else:
return False
def numeric_cols_in_df(df):
numeric_cols = ~ df.apply(lambda s:
|
pd.to_numeric(s, errors='coerce')
|
pandas.to_numeric
|
import copy
import numpy as np
import pandas as pd
import pytest
from jointly import ShakeMissingException, SyncPairTimeshift
from jointly.helpers import (
calculate_magnitude,
normalize,
verify_segments,
get_segment_data,
infer_freq,
get_max_ref_frequency,
get_stretch_factor,
stretch_signals,
get_equidistant_signals,
)
from jointly.types import SynchronizationPair
from tests.parquet_reader import get_parquet_test_data
def test_calculate_magnitude():
df = pd.DataFrame({"x": [1, 2, 3], "y": [-1.5, 2, 0], "z": [-1.5, 25, 1234]})
magnitude = calculate_magnitude(df, ["x", "y", "z"], "testname")
correct = pd.DataFrame(
{"testname": [2.345207879911715, 25.15949125081825, 1234.0036466720833]}
)
assert magnitude.equals(correct), "Should have correct magnitude results"
df["Magnitude"] = magnitude
# noinspection PyUnresolvedReferences
assert df["Magnitude"].equals(
magnitude["testname"]
), "Should be possible to set+rename result to old dataframe"
def test_normalize():
assert np.array_equal(normalize([1, 2, 3]), [-1, 0, 1]), "should be normalized"
assert np.array_equal(normalize([-1, 0, 1]), [-1, 0, 1]), "should be normalized"
with pytest.raises(ValueError):
normalize([])
with pytest.raises(ValueError):
normalize([1])
with pytest.raises(ZeroDivisionError):
normalize([0, 0])
def test_get_equidistant_signals():
test_data = get_parquet_test_data("faros-internal.parquet", 667)
result = get_equidistant_signals(test_data, frequency=1_000)
for col in result.columns:
assert infer_freq(result[col]) == 1000, f"{col} should have 1000 Hz"
result = get_equidistant_signals(test_data, frequency=500)
for col in result.columns:
assert infer_freq(result[col]) == 500, f"{col} should have 500 Hz"
result = get_equidistant_signals(test_data, frequency=1)
for col in result.columns:
assert infer_freq(result[col]) == 1, f"{col} should have 1 Hz"
def test_get_max_ref_frequency():
test_data = get_parquet_test_data("faros-internal.parquet", 667)
assert get_max_ref_frequency(test_data) == 500, "max(all) should be 500 Hz"
assert (
get_max_ref_frequency(test_data[["ACCELERATION_X", "ACCELERATION_Y"]]) == 100
), "max(acc) should be 100 Hz"
assert (
get_max_ref_frequency(test_data["ACCELERATION_Y"].to_frame()) == 100
), "max(acc) should be 100 Hz"
with pytest.raises(ValueError):
get_max_ref_frequency(test_data["ACCELERATION_X"])
with pytest.raises(ValueError):
get_max_ref_frequency(pd.DataFrame())
def test_infer_freq():
test_data = get_parquet_test_data("faros-internal.parquet", 667)
assert infer_freq(test_data["ECG"]) == 500, "ECG should be 500 Hz"
assert infer_freq(test_data["ACCELERATION_X"]) == 100, "Acc. should be 100 Hz"
assert infer_freq(test_data["ACCELERATION_Y"]) == 100, "Acc. should be 100 Hz"
assert infer_freq(test_data["ACCELERATION_Z"]) == 100, "Acc. should be 100 Hz"
def test_stretch_signals():
test_idx = pd.date_range(start="1/1/2018", periods=8)
test_data = [42] * 8
test_df =
|
pd.DataFrame(test_data, test_idx)
|
pandas.DataFrame
|
import numpy as np
import numpy.testing as npt
import pandas as pd
import pandas.testing as pdt
import pytest
from plateau.utils.pandas import (
aggregate_to_lists,
concat_dataframes,
drop_sorted_duplicates_keep_last,
is_dataframe_sorted,
mask_sorted_duplicates_keep_last,
merge_dataframes_robust,
sort_dataframe,
)
class TestConcatDataframes:
@pytest.fixture(params=[True, False])
def dummy_default(self, request):
if request.param:
return pd.DataFrame(data={"a": [-2, -3], "b": 1.0}, columns=["a", "b"])
else:
return None
@pytest.fixture(params=[True, False])
def maybe_iter(self, request):
if request.param:
return iter
else:
return list
def test_many(self, dummy_default, maybe_iter):
dfs = [
pd.DataFrame(
data={"a": [0, 1], "b": 1.0}, columns=["a", "b"], index=[10, 11]
),
pd.DataFrame(
data={"a": [2, 3], "b": 2.0}, columns=["a", "b"], index=[10, 11]
),
pd.DataFrame(data={"a": [4, 5], "b": 3.0}, columns=["a", "b"]),
]
expected = pd.DataFrame(
{"a": [0, 1, 2, 3, 4, 5], "b": [1.0, 1.0, 2.0, 2.0, 3.0, 3.0]},
columns=["a", "b"],
)
actual = concat_dataframes(maybe_iter(dfs), dummy_default)
pdt.assert_frame_equal(actual, expected)
def test_single(self, dummy_default, maybe_iter):
df = pd.DataFrame(
data={"a": [0, 1], "b": 1.0}, columns=["a", "b"], index=[10, 11]
)
actual = concat_dataframes(maybe_iter([df.copy()]), dummy_default)
pdt.assert_frame_equal(actual, df)
def test_default(self, maybe_iter):
df = pd.DataFrame(
data={"a": [0, 1], "b": 1.0}, columns=["a", "b"], index=[10, 11]
)
actual = concat_dataframes(maybe_iter([]), df)
pdt.assert_frame_equal(actual, df)
def test_fail_no_default(self, maybe_iter):
with pytest.raises(ValueError) as exc:
concat_dataframes(maybe_iter([]), None)
assert str(exc.value) == "Cannot concatenate 0 dataframes."
@pytest.mark.parametrize(
"dfs",
[
[pd.DataFrame({"a": [0, 1]})],
[pd.DataFrame({"a": [0, 1]}), pd.DataFrame({"a": [2, 3]})],
],
)
def test_whipe_list(self, dfs):
concat_dataframes(dfs)
assert dfs == []
@pytest.mark.parametrize(
"dfs,expected",
[
(
# dfs
[pd.DataFrame(index=range(3))],
# expected
pd.DataFrame(index=range(3)),
),
(
# dfs
[pd.DataFrame(index=range(3)), pd.DataFrame(index=range(2))],
# expected
pd.DataFrame(index=range(5)),
),
],
)
def test_no_columns(self, dfs, expected):
actual = concat_dataframes(dfs)
pdt.assert_frame_equal(actual, expected)
def test_fail_different_colsets(self, maybe_iter):
dfs = [pd.DataFrame({"a": [1]}), pd.DataFrame({"a": [1], "b": [2]})]
with pytest.raises(
ValueError, match="Not all DataFrames have the same set of columns!"
):
concat_dataframes(maybe_iter(dfs))
@pytest.mark.parametrize(
"df,columns",
[
(
# df
pd.DataFrame({"a": [1, 2, 3], "b": [1, 2, 3]}),
# columns
["a"],
),
(
# df
pd.DataFrame({"a": [3, 2, 1], "b": [1, 2, 3]}),
# columns
["a"],
),
(
# df
pd.DataFrame({"a": [3, 2, 1, 3, 2, 1], "b": [2, 2, 2, 1, 1, 1]}),
# columns
["a", "b"],
),
(
# df
pd.DataFrame({"a": [3, 2, 1], "b": [1, 2, 3]}, index=[1000, 2000, 3000]),
# columns
["a"],
),
(
# df
pd.DataFrame({"a": [3.0, 2.0, 1.0], "b": [1, 2, 3]}),
# columns
["a"],
),
(
# df
pd.DataFrame({"a": ["3", "2", "1"], "b": [1, 2, 3]}),
# columns
["a"],
),
(
# df
pd.DataFrame({"a": [True, False], "b": [1, 2]}),
# columns
["a"],
),
(
# df
pd.DataFrame(
{
"a": [
pd.Timestamp("2018-01-03"),
pd.Timestamp("2018-01-02"),
pd.Timestamp("2018-01-01"),
],
"b": [1, 2, 3],
}
),
# columns
["a"],
),
(
# df
pd.DataFrame(
{"a": pd.Series(["3", "2", "1"]).astype("category"), "b": [1, 2, 3]}
),
# columns
["a"],
),
],
)
def test_sort_dataframe(df, columns):
expected = df.sort_values(columns).reset_index(drop=True)
actual = sort_dataframe(df, columns)
pdt.assert_frame_equal(actual, expected)
@pytest.mark.parametrize(
"df,columns,expected_mask",
[
(
# df
pd.DataFrame({"a": [1, 2, 3]}),
# columns
["a"],
# expected_mask
np.array([False, False, False]),
),
(
# df
pd.DataFrame({"a": [1, 1, 3]}),
# columns
["a"],
# expected_mask
np.array([True, False, False]),
),
(
# df
pd.DataFrame({"a": [1, 1, 3], "b": [1, 2, 3]}),
# columns
["a"],
# expected_mask
np.array([True, False, False]),
),
(
# df
pd.DataFrame({"a": [1, 1, 3], "b": [1, 2, 3]}),
# columns
["a", "b"],
# expected_mask
np.array([False, False, False]),
),
(
# df
pd.DataFrame({"a": [1, 1, 3], "b": [1, 1, 3]}),
# columns
["a", "b"],
# expected_mask
np.array([True, False, False]),
),
(
# df
pd.DataFrame({"a": [1]}),
# columns
["a"],
# expected_mask
np.array([False]),
),
(
# df
pd.DataFrame({"a": []}),
# columns
["a"],
# expected_mask
np.array([], dtype=bool),
),
(
# df
pd.DataFrame(
{
"a": [1, 1, 3],
"b": [1.0, 1.0, 3.0],
"c": ["a", "a", "b"],
"d": [True, True, False],
"e": [
pd.Timestamp("2018"),
pd.Timestamp("2018"),
pd.Timestamp("2019"),
],
"f": pd.Series(["a", "a", "b"]).astype("category"),
}
),
# columns
["a", "b", "c", "d", "e", "f"],
# expected_mask
np.array([True, False, False]),
),
(
# df
pd.DataFrame({"a": [1, 2, 3, 4, 4, 5, 6, 6]}),
# columns
["a"],
# expected_mask
np.array([False, False, False, True, False, False, True, False]),
),
(
# df
pd.DataFrame({"a": [2, 2, 3]}),
# columns
[],
# expected_mask
np.array([False, False, False]),
),
(
# df
pd.DataFrame({"a": [1, 1, 3]}, index=[1000, 2000, 1]),
# columns
["a"],
# expected_mask
np.array([True, False, False]),
),
],
)
def test_sorted_duplicates_keep_last(df, columns, expected_mask):
actual_mask = mask_sorted_duplicates_keep_last(df, columns)
assert actual_mask.dtype == bool
npt.assert_array_equal(actual_mask, expected_mask)
actual_df = drop_sorted_duplicates_keep_last(df, columns)
expected_df = df[~expected_mask]
pdt.assert_frame_equal(actual_df, expected_df)
if columns:
# pandas crashes for empty column lists
pd_mask = df.duplicated(subset=columns, keep="last").values
pd_df = df.drop_duplicates(subset=columns, keep="last")
npt.assert_array_equal(pd_mask, expected_mask)
pdt.assert_frame_equal(pd_df, expected_df)
@pytest.mark.parametrize(
"df_input,by",
[
(
# df_input
pd.DataFrame({"x": [0], "y": [0], "v": ["a"]}),
# by
["x", "y"],
),
(
# df_input
pd.DataFrame({"x": [0, 0], "y": [0, 0], "v": ["a", "b"]}),
# by
["x", "y"],
),
(
# df_input
pd.DataFrame({"x": [0, 0], "y": [0, 0], "v": ["a", "a"]}),
# by
["x", "y"],
),
(
# df_input
pd.DataFrame(
{
"x": [1, 0, 0, 1, 1],
"y": [1, 0, 0, 0, 1],
"v": ["a", "b", "c", "d", "e"],
}
),
# by
["x", "y"],
),
(
# df_input
pd.DataFrame({"x": [], "y": [], "v": []}),
# by
["x", "y"],
),
(
# df_input
pd.DataFrame({"x": [0, 0], "y": [0, 0], "v": ["a", "a"]}),
# by
[],
),
],
)
def test_aggregate_to_lists(df_input, by):
data_col = "v"
# pandas is broken for empty DFs
if df_input.empty:
df_expected = df_input
else:
if by:
df_expected = df_input.groupby(by=by, as_index=False)[data_col].agg(
lambda series: list(series.values)
)
else:
df_expected = pd.DataFrame(
{data_col: pd.Series([list(df_input[data_col].values)])}
)
df_actual = aggregate_to_lists(df_input, by, data_col)
pdt.assert_frame_equal(df_actual, df_expected)
def test_is_dataframe_sorted_no_cols():
df = pd.DataFrame({})
with pytest.raises(ValueError, match="`columns` must contain at least 1 column"):
is_dataframe_sorted(df, [])
@pytest.mark.parametrize(
"df,columns",
[
(
# df
pd.DataFrame({"x": []}),
# columns
["x"],
),
(
# df
pd.DataFrame({"x": [], "y": [], "z": []}),
# columns
["x", "y", "z"],
),
(
# df
pd.DataFrame({"x": [0, 1, 10]}),
# columns
["x"],
),
(
# df
pd.DataFrame({"x": [0, 1, 10], "y": [20, 21, 210]}),
# columns
["x", "y"],
),
(
# df
pd.DataFrame({"x": [0, 1, 1], "y": [10, 0, 1]}),
# columns
["x", "y"],
),
(
# df
pd.DataFrame({"x": [0, 1], "y": [1, 0]}),
# columns
["x"],
),
(
# df
pd.DataFrame(
{
"x": [0, 0, 0, 0, 1, 1, 1, 1],
"y": [0, 0, 1, 1, 0, 0, 1, 1],
"z": [0, 1, 0, 1, 0, 1, 0, 1],
}
),
# columns
["x", "y", "z"],
),
(
# df
pd.DataFrame({"x": [0, 0], "y": [0, 0], "z": [0, 0]}),
# columns
["x", "y", "z"],
),
(
# df
pd.DataFrame({"x": pd.Series(["1", "2"]).astype("category")}),
# columns
["x"],
),
],
)
def test_assert_df_sorted_ok(df, columns):
assert is_dataframe_sorted(df, columns)
@pytest.mark.parametrize(
"df,columns",
[
(
# df
pd.DataFrame({"x": [1, 0]}),
# columns
["x"],
),
(
# df
pd.DataFrame({"x": [0, 1, 1], "y": [0, 1, 0]}),
# columns
["x", "y"],
),
(
# df
pd.DataFrame({"x": [0, 0], "y": [0, 0], "z": [1, 0]}),
# columns
["x", "y", "z"],
),
(
# df
pd.DataFrame({"x": [0, 0], "y": [1, 0], "z": [0, 0]}),
# columns
["x", "y", "z"],
),
(
# df
pd.DataFrame({"x": [1, 0], "y": [0, 0], "z": [0, 0]}),
# columns
["x", "y", "z"],
),
(
# df
pd.DataFrame({"x": pd.Series(["2", "1"]).astype("category")}),
# columns
["x"],
),
],
)
def test_assert_df_sorted_no(df, columns):
assert not is_dataframe_sorted(df, columns)
@pytest.mark.parametrize(
"df1,df2,how,expected",
[
(
# df1
pd.DataFrame({"i": [0, 1], "x": [0, 1], "v1": [11, 12]}),
# df2
pd.DataFrame({"x": [0, 1], "v2": [21, 22]}),
# how
"inner",
# expected
pd.DataFrame({"i": [0, 1], "x": [0, 1], "v1": [11, 12], "v2": [21, 22]}),
),
(
# df1
pd.DataFrame({"i": [0, 1], "x": [0, 1], "v1": [11, 12]}),
# df2
pd.DataFrame({"x": [0, 1], "v2": [21, 22]}),
# how
"left",
# expected
pd.DataFrame({"i": [0, 1], "x": [0, 1], "v1": [11, 12], "v2": [21, 22]}),
),
(
# df1
pd.DataFrame({"i": [0, 1], "x": [0, 1], "v1": [11, 12]}),
# df2
pd.DataFrame({"x": [0], "v2": [21]}),
# how
"inner",
# expected
pd.DataFrame({"i": [0], "x": [0], "v1": [11], "v2": [21]}),
),
(
# df1
pd.DataFrame({"i": [0, 1], "x": [0, 1], "v1": [11, 12]}),
# df2
pd.DataFrame({"x": [0], "v2": [21]}),
# how
"left",
# expected
pd.DataFrame(
{"i": [0, 1], "x": [0, 1], "v1": [11, 12], "v2": [21, np.nan]}
),
),
(
# df1
pd.DataFrame({"i": [0, 1], "v1": [11, 12]}),
# df2
pd.DataFrame({"v2": [21, 22]}),
# how
"inner",
# expected
pd.DataFrame(
{"i": [0, 0, 1, 1], "v1": [11, 11, 12, 12], "v2": [21, 22, 21, 22]}
),
),
(
# df1
pd.DataFrame({"i": [0, 1], "v1": [11, 12]}),
# df2
pd.DataFrame({"v2": [21, 22]}),
# how
"left",
# expected
pd.DataFrame(
{"i": [0, 0, 1, 1], "v1": [11, 11, 12, 12], "v2": [21, 22, 21, 22]}
),
),
(
# df1
pd.DataFrame({"i": [0, 1], "v1": [11, 12]}),
# df2
pd.DataFrame({"v2": pd.Series([], dtype=int)}),
# how
"inner",
# expected
pd.DataFrame(
{
"i": pd.Series([], dtype=int),
"v1": pd.Series([], dtype=int),
"v2": pd.Series([], dtype=int),
}
),
),
(
# df1
pd.DataFrame({"i": [0, 1], "v1": [11, 12]}),
# df2
pd.DataFrame({"v2": pd.Series([], dtype=int)}),
# how
"left",
# expected
|
pd.DataFrame({"i": [0, 1], "v1": [11, 12], "v2": [np.nan, np.nan]})
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
# pylint: disable=W0612,E1101
from collections import OrderedDict
from datetime import datetime
import numpy as np
import pytest
from pandas.compat import lrange
from pandas import DataFrame, MultiIndex, Series, date_range, notna
import pandas.core.panel as panelm
from pandas.core.panel import Panel
import pandas.util.testing as tm
from pandas.util.testing import (
assert_almost_equal, assert_frame_equal, assert_series_equal,
makeCustomDataframe as mkdf, makeMixedDataFrame)
from pandas.tseries.offsets import MonthEnd
@pytest.mark.filterwarnings("ignore:\\nPanel:FutureWarning")
class PanelTests(object):
panel = None
def not_hashable(self):
c_empty = Panel()
c = Panel(Panel([[[1]]]))
pytest.raises(TypeError, hash, c_empty)
pytest.raises(TypeError, hash, c)
@pytest.mark.filterwarnings("ignore:\\nPanel:FutureWarning")
class SafeForSparse(object):
# issue 7692
def test_raise_when_not_implemented(self):
p = Panel(np.arange(3 * 4 * 5).reshape(3, 4, 5),
items=['ItemA', 'ItemB', 'ItemC'],
major_axis=date_range('20130101', periods=4),
minor_axis=list('ABCDE'))
d = p.sum(axis=1).iloc[0]
ops = ['add', 'sub', 'mul', 'truediv',
'floordiv', 'div', 'mod', 'pow']
for op in ops:
with pytest.raises(NotImplementedError):
getattr(p, op)(d, axis=0)
@pytest.mark.filterwarnings("ignore:\\nPanel:FutureWarning")
class CheckIndexing(object):
def test_delitem_and_pop(self):
values = np.empty((3, 3, 3))
values[0] = 0
values[1] = 1
values[2] = 2
panel = Panel(values, lrange(3), lrange(3), lrange(3))
# did we delete the right row?
panelc = panel.copy()
del panelc[0]
tm.assert_frame_equal(panelc[1], panel[1])
tm.assert_frame_equal(panelc[2], panel[2])
panelc = panel.copy()
del panelc[1]
tm.assert_frame_equal(panelc[0], panel[0])
tm.assert_frame_equal(panelc[2], panel[2])
panelc = panel.copy()
del panelc[2]
tm.assert_frame_equal(panelc[1], panel[1])
tm.assert_frame_equal(panelc[0], panel[0])
def test_setitem(self):
# bad shape
p = Panel(np.random.randn(4, 3, 2))
msg = (r"shape of value must be \(3, 2\), "
r"shape of given object was \(4, 2\)")
with pytest.raises(ValueError, match=msg):
p[0] = np.random.randn(4, 2)
def test_setitem_ndarray(self):
timeidx = date_range(start=datetime(2009, 1, 1),
end=datetime(2009, 12, 31),
freq=MonthEnd())
lons_coarse = np.linspace(-177.5, 177.5, 72)
lats_coarse = np.linspace(-87.5, 87.5, 36)
P = Panel(items=timeidx, major_axis=lons_coarse,
minor_axis=lats_coarse)
data = np.random.randn(72 * 36).reshape((72, 36))
key = datetime(2009, 2, 28)
P[key] = data
assert_almost_equal(P[key].values, data)
def test_set_minor_major(self):
# GH 11014
df1 = DataFrame(['a', 'a', 'a', np.nan, 'a', np.nan])
df2 = DataFrame([1.0, np.nan, 1.0, np.nan, 1.0, 1.0])
panel = Panel({'Item1': df1, 'Item2': df2})
newminor = notna(panel.iloc[:, :, 0])
panel.loc[:, :, 'NewMinor'] = newminor
assert_frame_equal(panel.loc[:, :, 'NewMinor'],
newminor.astype(object))
newmajor = notna(panel.iloc[:, 0, :])
panel.loc[:, 'NewMajor', :] = newmajor
assert_frame_equal(panel.loc[:, 'NewMajor', :],
newmajor.astype(object))
def test_getitem_fancy_slice(self):
pass
def test_ix_setitem_slice_dataframe(self):
a = Panel(items=[1, 2, 3], major_axis=[11, 22, 33],
minor_axis=[111, 222, 333])
b = DataFrame(np.random.randn(2, 3), index=[111, 333],
columns=[1, 2, 3])
a.loc[:, 22, [111, 333]] = b
assert_frame_equal(a.loc[:, 22, [111, 333]], b)
def test_ix_align(self):
from pandas import Series
b = Series(np.random.randn(10), name=0)
b.sort_values()
df_orig = Panel(np.random.randn(3, 10, 2))
df = df_orig.copy()
df.loc[0, :, 0] = b
assert_series_equal(df.loc[0, :, 0].reindex(b.index), b)
df = df_orig.swapaxes(0, 1)
df.loc[:, 0, 0] = b
assert_series_equal(df.loc[:, 0, 0].reindex(b.index), b)
df = df_orig.swapaxes(1, 2)
df.loc[0, 0, :] = b
assert_series_equal(df.loc[0, 0, :].reindex(b.index), b)
def test_ix_frame_align(self):
# GH3830, panel assignent by values/frame
for dtype in ['float64', 'int64']:
panel = Panel(np.arange(40).reshape((2, 4, 5)),
items=['a1', 'a2'], dtype=dtype)
df1 = panel.iloc[0]
df2 = panel.iloc[1]
tm.assert_frame_equal(panel.loc['a1'], df1)
tm.assert_frame_equal(panel.loc['a2'], df2)
# Assignment by Value Passes for 'a2'
panel.loc['a2'] = df1.values
tm.assert_frame_equal(panel.loc['a1'], df1)
tm.assert_frame_equal(panel.loc['a2'], df1)
# Assignment by DataFrame Ok w/o loc 'a2'
panel['a2'] = df2
tm.assert_frame_equal(panel.loc['a1'], df1)
tm.assert_frame_equal(panel.loc['a2'], df2)
# Assignment by DataFrame Fails for 'a2'
panel.loc['a2'] = df2
tm.assert_frame_equal(panel.loc['a1'], df1)
tm.assert_frame_equal(panel.loc['a2'], df2)
def test_logical_with_nas(self):
d = Panel({'ItemA': {'a': [np.nan, False]},
'ItemB': {'a': [True, True]}})
result = d['ItemA'] | d['ItemB']
expected = DataFrame({'a': [np.nan, True]})
assert_frame_equal(result, expected)
# this is autodowncasted here
result = d['ItemA'].fillna(False) | d['ItemB']
expected = DataFrame({'a': [True, True]})
assert_frame_equal(result, expected)
@pytest.mark.filterwarnings("ignore:\\nPanel:FutureWarning")
class TestPanel(PanelTests, CheckIndexing, SafeForSparse):
def test_constructor_cast(self):
# can't cast
data = [[['foo', 'bar', 'baz']]]
pytest.raises(ValueError, Panel, data, dtype=float)
def test_constructor_empty_panel(self):
empty = Panel()
assert len(empty.items) == 0
assert len(empty.major_axis) == 0
assert len(empty.minor_axis) == 0
def test_constructor_observe_dtype(self):
# GH #411
panel = Panel(items=lrange(3), major_axis=lrange(3),
minor_axis=
|
lrange(3)
|
pandas.compat.lrange
|
import pytest
import collections
from pathlib import Path
import pandas as pd
from mbf_genomics import DelayedDataFrame
from mbf_genomics.annotator import Constant, Annotator
import pypipegraph as ppg
from pypipegraph.testing import run_pipegraph, force_load
from pandas.testing import assert_frame_equal
from mbf_genomics.util import find_annos_from_column
class LenAnno(Annotator):
def __init__(self, name):
self.columns = [name]
def calc(self, df):
return pd.DataFrame(
{self.columns[0]: ["%s%i" % (self.columns[0], len(df))] * len(df)}
)
@pytest.mark.usefixtures("no_pipegraph")
@pytest.mark.usefixtures("clear_annotators")
class Test_DelayedDataFrameDirect:
def test_create(self):
test_df = pd.DataFrame({"A": [1, 2]})
def load():
return test_df
a = DelayedDataFrame("shu", load)
assert_frame_equal(a.df, test_df)
assert a.non_annotator_columns == "A"
def test_create_from_df(self):
test_df = pd.DataFrame({"A": [1, 2]})
a = DelayedDataFrame("shu", test_df)
assert_frame_equal(a.df, test_df)
assert a.non_annotator_columns == "A"
def test_write(self):
test_df = pd.DataFrame({"A": [1, 2]})
def load():
return test_df
a = DelayedDataFrame("shu", load, result_dir="sha")
assert Path("sha").exists()
assert_frame_equal(a.df, test_df)
assert a.non_annotator_columns == "A"
fn = a.write()[1]
assert "/sha" in str(fn.parent.absolute())
assert fn.exists()
assert_frame_equal(pd.read_csv(fn, sep="\t"), test_df)
def test_write_excel(self):
test_df = pd.DataFrame({"A": [1, 2]})
def load():
return test_df
a = DelayedDataFrame("shu", load, result_dir="sha")
assert Path("sha").exists()
assert_frame_equal(a.df, test_df)
assert a.non_annotator_columns == "A"
fn = a.write("sha.xls")[1]
assert fn.exists()
assert_frame_equal(pd.read_excel(fn), test_df)
def test_write_excel2(self):
data = {}
for i in range(0, 257):
c = "A%i" % i
d = [1, 1]
data[c] = d
test_df = pd.DataFrame(data)
def load():
return test_df
a = DelayedDataFrame("shu", load, result_dir="sha")
fn = a.write("sha.xls")[1]
assert fn.exists()
assert_frame_equal(pd.read_excel(fn), test_df)
def test_write_mangle(self):
test_df = pd.DataFrame({"A": [1, 2], "B": ["c", "d"]})
def load():
return test_df
a = DelayedDataFrame("shu", load)
assert_frame_equal(a.df, test_df)
assert (a.non_annotator_columns == ["A", "B"]).all()
def mangle(df):
df = df.drop("A", axis=1)
df = df[df.B == "c"]
return df
fn = a.write("test.csv", mangle)[1]
assert fn.exists()
assert_frame_equal(pd.read_csv(fn, sep="\t"), mangle(test_df))
def test_magic(self):
test_df = pd.DataFrame({"A": [1, 2], "B": ["c", "d"]})
a = DelayedDataFrame("shu", lambda: test_df)
assert hash(a)
assert a.name in str(a)
assert a.name in repr(a)
def test_annotator(self):
a = DelayedDataFrame(
"shu", lambda: pd.DataFrame({"A": [1, 2], "B": ["c", "d"]})
)
a += Constant("column", "value")
a.annotate()
assert "column" in a.df.columns
assert (a.df["column"] == "value").all()
def test_add_non_anno(self):
a = DelayedDataFrame(
"shu", lambda: pd.DataFrame({"A": [1, 2], "B": ["c", "d"]})
)
with pytest.raises(TypeError):
a += 5
def test_annotator_wrong_columns(self):
class WrongConstant(Annotator):
def __init__(self, column_name, value):
self.columns = [column_name]
self.value = value
def calc(self, df):
return pd.DataFrame({"shu": self.value}, index=df.index)
a = DelayedDataFrame(
"shu", lambda: pd.DataFrame({"A": [1, 2], "B": ["c", "d"]})
)
with pytest.raises(ValueError):
a += WrongConstant("column", "value")
def test_annotator_minimum_columns(self):
a = DelayedDataFrame(
"shu", lambda: pd.DataFrame({"A": [1, 2], "B": ["c", "d"]})
)
assert "Direct" in str(a.load_strategy)
class MissingCalc(Annotator):
column_names = ["shu"]
with pytest.raises(AttributeError):
a += MissingCalc()
class EmptyColumnNames(Annotator):
columns = []
def calc(self, df):
return pd.DataFrame({})
with pytest.raises(IndexError):
a += EmptyColumnNames()
class EmptyColumnNamesButCacheName(Annotator):
cache_name = "shu"
columns = []
def calc(self, df):
return pd.DataFrame({})
with pytest.raises(IndexError):
a += EmptyColumnNamesButCacheName()
class MissingColumnNames(Annotator):
def calc(self, df):
pass
with pytest.raises(AttributeError):
a += MissingColumnNames()
class NonListColumns(Annotator):
columns = "shu"
def calc(self, df):
pass
with pytest.raises(ValueError):
a += NonListColumns()
def test_DynamicColumNames(self):
a = DelayedDataFrame(
"shu", lambda: pd.DataFrame({"A": [1, 2], "B": ["c", "d"]})
)
class Dynamic(Annotator):
@property
def columns(self):
return ["a"]
def calc(self, df):
return pd.DataFrame({"a": ["x", "y"]})
a += Dynamic()
a.annotate()
assert_frame_equal(
a.df, pd.DataFrame({"A": [1, 2], "B": ["c", "d"], "a": ["x", "y"]})
)
def test_annos_added_only_once(self):
count = [0]
class CountingConstant(Annotator):
def __init__(self, column_name, value):
count[0] += 1
self.columns = [column_name]
self.value = value
def calc(self, df):
return pd.DataFrame({self.columns[0]: self.value}, index=df.index)
a = DelayedDataFrame(
"shu", lambda: pd.DataFrame({"A": [1, 2], "B": ["c", "d"]})
)
c = CountingConstant("hello", "c")
a += c
a.annotate()
assert "hello" in a.df.columns
assert count[0] == 1
a += c # this get's ignored
def test_annos_same_column_different_anno(self):
count = [0]
class CountingConstant(Annotator):
def __init__(self, column_name, value):
count[0] += 1
self.columns = [column_name]
self.value = value
def calc(self, df):
return pd.DataFrame({self.columns[0]: self.value}, index=df.index)
a = DelayedDataFrame(
"shu", lambda: pd.DataFrame({"A": [1, 2], "B": ["c", "d"]})
)
c = CountingConstant("hello", "c")
a += c
a.annotate()
assert "hello" in a.df.columns
assert count[0] == 1
c = CountingConstant("hello2", "c")
a += c
a.annotate()
assert "hello2" in a.df.columns
assert count[0] == 2
d = CountingConstant("hello2", "d")
assert c is not d
with pytest.raises(ValueError):
a += d
def test_annos_same_column_different_anno2(self):
class A(Annotator):
cache_name = "hello"
columns = ["aa"]
def calc(self, df):
return pd.DataFrame({self.columns[0]: "a"}, index=df.index)
class B(Annotator):
cache_name = "hello2"
columns = ["aa"]
def calc(self, df):
return
|
pd.DataFrame({self.columns[0]: "a"}, index=df.index)
|
pandas.DataFrame
|
import requests
import pandas as pd
import time
# Program collects all the information about photos in ajapaik API to the dataframe and puts it in csv file.
#gives roughly % how much of collecting is done
#takes "pageNR" on what iteration are we
def progress(pageNR):
pr = round(pageNR / 16822 * 100, 4)
return "\r" + "Collecting data: " + str(pr) + " %"
url ="https://opendata.ajapaik.ee/photos/?fbclid=IwAR3Zf_f2bM07WRrJuGijOQNoJmJawYV70hKgdUVAJPkWy5_rtNjKwwVdvpE&page=1"
list_of_pic_dictionaries = []
start_time = time.time()
pageNR = 1
while url is not None:
response = requests.get(url)
#in case something goes wrong
if response.status_code != 200:
print("Something went wrong, couldn't get the url content!")
print("Page number: " + str(pageNR))
print(url)
break
nextURl = response.json()["next"]
pictures = response.json()["results"]
#Iter over all the pictures on that page.
for i in range(len(pictures)):
pic = response.json()["results"][i]
list_of_pic_dictionaries.append(pic)
#if there are rephotos, add those too
for re in pic["rephotos"]:
list_of_pic_dictionaries.append(re)
url = nextURl
print(progress(pageNR), end="")
pageNR += 1
df =
|
pd.DataFrame(list_of_pic_dictionaries)
|
pandas.DataFrame
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed May 9 02:35:02 2018
@author: elvex
"""
"""Boite à outils de manipulation des base de données de tweets. """
#import json
import pandas as pd
import txt_analysis as TA
from math import log10
from glob import glob
from os.path import abspath
from re import split
from math import pi
from numpy import cos, sin
import datetime
def json2pd(adr):
"""
Convertit un json de tweets en base de donnée panda.
Entrée : l'adresse du json
Sortie : la base de donnée panda
"""
with open(adr, 'r') as f:
r = f.read()
bdd = pd.read_json(r, orient = 'records', lines = True)
bdd = bdd['user'].apply(pd.Series).join(bdd.drop('user', 1),
how = "left", lsuffix="_profile", rsuffix="_tweet")
return bdd
def filterBYlanguage(bdd, lan = 'fr'):
bdd = bdd[(bdd.lang_tweet == lan)]
return bdd
def keepNdropPD_txt(bdd):
bdd = bdd.loc[:, ["id_profile", "text"]]
return bdd
def aggregate_bddFiltered(bdd):
grp = bdd.groupby("id_profile")
bdd = grp.agg(["count", lambda x: "\n".join(x)])
bdd.columns = bdd.columns.droplevel(0)
bdd = bdd.rename(columns={ bdd.columns[0]: "counting", bdd.columns[1]: "text"})
return bdd
def json2bdd_agreg(json):
return aggregate_bddFiltered(keepNdropPD_txt(filterBYlanguage(json2pd(json))))
#bdd = aggregate_bddFiltered(keepNdropPD_txt(filterBYlanguage(json2pd(file))))
def concat_bdd_aggreg(bdd1, bdd2):
bdd21 = bdd1.counting.add(bdd2.counting, fill_value=0)
bdd22 = bdd1.text.add(bdd2.text, fill_value="")
bdd2 = pd.concat([bdd21, bdd22], axis=1)
return bdd2
def concat_dir(dirname):
path = abspath(dirname)
lst = glob(path+"/*.json")
bdd = json2bdd_agreg(lst[0])
for i in range(1, len(lst)):
try:
bdd2 = json2bdd_agreg(lst[i])
bdd = concat_bdd_aggreg(bdd, bdd2)
except ValueError as e:
print("Erreur '{}' sur l'étape {}".format(e, i))
continue
return bdd
def drop_profile(bdd, n = 2):
return bdd.loc[bdd["counting"] >= n, "text"]
def bdd2bow(bdd):
"""
Transforme un Data Frame panda de tweet en base donnée bag of words,
chaque collonne correspondant à un mot spécifique
et chaque ligne à un utilisateur,
avec comme contenu de la cellule le nombre d'occurence du mot dans le tweet.
Entrée : le dataframe panda
Sortie : le dataframe bag of word
"""
T = bdd["text"] if isinstance(bdd, pd.core.frame.DataFrame) else bdd
T = T.map(TA.formate_txt)
T = T.map(TA.bow)
bow = pd.DataFrame.from_dict(T.tolist())
bow = bow.fillna(0)
return bow
def filter_bow(bow, mini = 1):
"""
Permet de filtrer un dataframe bag of words en stipulant un nombre minimum
de tweets dans lequels les mots doivent apparaître.
Entrée :
bow : pandas dataframe bag of words
mini : entier stipulant le minimum
Sortie :
bow_f : le dataframe bag of words filtré
"""
test = (((bow > 0).sum()) >= mini).values
bow_f = bow.iloc[:, test]
return bow_f
def tf_idf(bow, lst = [], fonction = "idfi"):
"""
À partir d'un dataframe bag of words, applique une métrique de tf idf pour
pondérer le score des mots.
Entrée :
bow : dataframe bag of words
lst : liste de mots à garder dans le dataframe, si nul, tous les mots son gardés
fonction : fonction de pondération :
idfn => pas de pondération
idfi => prend en compte le nombre de tweets et la fréquence d'utilisation des mots
idfl => comme idfi mais en se laissant une sécurité sur le log10(0)
idfs => comme idfi mais en se laissant une autre sécurité sur le log10(0)
idff => prend simplement en compte la fréquence d'utilisation des mots
idfp => prend en compte le nombre de tweets et la fréquence d'utilisation des mots
"""
dico = {"idfi" : idfi,
"idfn" : idfn,
"idfl" : idfl,
"idfp" : idfp,
"idff" : idff,
"idfs" : idfs}
D, df = len(bow), (bow > 0).sum()
f_poids = dico.get(fonction, "idfi")
idf = bow * f_poids(D, df)
if len(lst) > 0: idf = intersection(bow, lst)
return idf
def intersection(bdd, lst):
"""Renvoie les colonnes d'une bdd pandas qui correspondent aux mots entrés.
Entrées :
bdd : panda dataframe
lst : liste de mots
Sortie :
nouvelle dataframe pandas
"""
s = set(map(str.lower, lst))
s = s.intersection(set(bdd.columns.values.tolist()))
return bdd.loc[:, list(s)]
def idfi(D, df):
return (D/df).apply(log10)
def idfn(D, df):
return 1
def idfl(D, df):
return (D/df + 1).apply(log10)
def idff(D, df):
return 1/df
def idfp(D, df):
return ((D - df) / df).apply(log10)
def idfs (D, df):
return (((D + 1) / df).apply(log10)) ** 2
def df2np(df):
"""Convertit un dataframe panda en matrice, renvoie cette matrice et le vecteur d'indice.
Entrée :
df, panda dataframe
Sortie :
idx : numpy array des indices de la dataframe
mtx : numpy array des valeurs de la dataframe
"""
mtx = df.values
idx = df.index.values
return (idx, mtx)
def dateBDD(bdd):
dico_month = {1 : 31, 2 : 28, 3 : 31, 4 : 30, 5 : 31, 6 : 30, 7 : 31,
8 : 31, 9 : 30, 10 : 31, 11 : 30, 12 : 30}
bdd = bdd.loc[:, ['id_tweet', 'created_at_tweet']].set_index('id_tweet')
bdd.created_at_tweet = bdd.created_at_tweet.apply(lambda x: list(map(int, split('[: -]', str(x)))))
bdd["hour"] = bdd.created_at_tweet.apply(lambda lst: (lst[-3] + lst[-2] / 60 + lst[-1] / (60**2)) * (pi/12))
bdd["hour_X"] = bdd.hour.apply(cos)
bdd["hour_Y"] = bdd.hour.apply(sin)
bdd["day_X"] = bdd.created_at_tweet.apply(lambda x: cos(x[2] * pi / 6))
bdd["day_Y"] = bdd.created_at_tweet.apply(lambda x: sin(x[2] * pi / 6))
bdd["dayweek"] = bdd.created_at_tweet.apply(lambda x: datetime.date(x[0], x[1], x[2]).weekday())
bdd["dayweek_X"] = bdd.dayweek.apply(lambda x: cos(x * 2 * pi / 7))
bdd["dayweek_Y"] = bdd.dayweek.apply(lambda x: sin(x * 2 * pi / 7))
bdd["month_X"] = bdd.created_at_tweet.apply(lambda x: cos(x[1] * pi / dico_month[x[2]]))
bdd["month_Y"] = bdd.created_at_tweet.apply(lambda x: sin(x[1] * pi / dico_month[x[2]]))
bdd["year"] = bdd.created_at_tweet.apply(lambda x: x[0])
bdd.drop(labels = ["created_at_tweet", "hour", "dayweek"], axis = 1, inplace = True)
return bdd
def json2dateBDD(json):
return dateBDD(filterBYlanguage(json2pd(json)))
def date_dir(dirname):
path = abspath(dirname)
lst = glob(path+"/*.json")
bdd = json2dateBDD(lst[0])
for i in range(1, len(lst)):
try:
bdd2 = json2dateBDD(lst[i])
bdd = pd.concat([bdd, bdd2], axis=0)
except ValueError as e:
print("Erreur '{}' sur l'étape {}".format(e, i))
continue
return bdd
def print_means_words(km, col, lim = 10):
means = km.means
D =
|
pd.DataFrame(means, columns=col)
|
pandas.DataFrame
|
# -*- coding:UTF-8
import pandas as pd
import numpy as np
from collections import defaultdict
from gensim.corpora import Dictionary
from gensim.models import LdaMulticore
from sklearn.metrics.pairwise import cosine_distances, euclidean_distances
import json
RANDOM_SEED = 42
NUM_TOPICS = 300
def ngram_features(test_data):
def split_string_as_list_by_ngram(input_string,ngram_value):
input_string="".join([string for string in input_string if string.strip()])
length = len(input_string)
result_string=[]
for i in range(length):
if i + ngram_value < length + 1:
result_string.append(input_string[i:i+ngram_value])
return result_string
def compute_blue_ngram(x1_list,x2_list):
"""
compute blue score use ngram information. x1_list as predict sentence,x2_list as target sentence
:param x1_list:
:param x2_list:
:return:
"""
count_dict={}
count_dict_clip={}
#1. count for each token at predict sentence side.
for token in x1_list:
if token not in count_dict:
count_dict[token]=1
else:
count_dict[token]=count_dict[token]+1
count=np.sum([value for key,value in count_dict.items()])
#2.count for tokens existing in predict sentence for target sentence side.
for token in x2_list:
if token in count_dict:
if token not in count_dict_clip:
count_dict_clip[token]=1
else:
count_dict_clip[token]=count_dict_clip[token]+1
#3. clip value to ceiling value for that token
count_dict_clip={key:(value if value<=count_dict[key] else count_dict[key]) for key,value in count_dict_clip.items()}
count_clip=np.sum([value for key,value in count_dict_clip.items()])
result=float(count_clip)/(float(count)+0.00000001)
return result
def cal_ngram(csv_data, ngram_value):
ngram_lt1 = []
ngram_lt2 = []
for i in range(csv_data.shape[0]):
x1_list = csv_data.iloc[i, 1].split(' ')
x2_list = csv_data.iloc[i, 2].split(' ')
res1 = compute_blue_ngram(split_string_as_list_by_ngram(x1_list, ngram_value),
split_string_as_list_by_ngram(x2_list,ngram_value))
res2 = compute_blue_ngram(split_string_as_list_by_ngram(x2_list, ngram_value),
split_string_as_list_by_ngram(x1_list,ngram_value))
ngram_lt1.append(res1)
ngram_lt2.append(res2)
return ngram_lt1,ngram_lt2
fea_dict = {}
for ngram in range(1, 9):
ngram_lt1,ngram_lt2 = cal_ngram(test_data, ngram)
fea_dict['ngram1'+str(ngram)] = ngram_lt1
fea_dict['ngram2'+str(ngram)] = ngram_lt2
save_data = pd.DataFrame(fea_dict)
return save_data.values
def lda_features(test_data):
train_data = pd.read_csv('data/aux/train_char_indexvec.csv')
documents = list(train_data.iloc[:, 1])
documents.extend(list(train_data.iloc[:, 2]))
documents = [item.split(' ') for item in documents]
dictionary = Dictionary(documents)
corpus = [dictionary.doc2bow(document) for document in documents]
model = LdaMulticore(
corpus,
num_topics=NUM_TOPICS,
id2word=dictionary,
random_state=RANDOM_SEED,
)
def compute_topic_distances(pair):
q1_bow = dictionary.doc2bow(pair[0])
q2_bow = dictionary.doc2bow(pair[1])
q1_topic_vec = np.array(model.get_document_topics(q1_bow, minimum_probability=0))[:, 1].reshape(1, -1)
q2_topic_vec = np.array(model.get_document_topics(q2_bow, minimum_probability=0))[:, 1].reshape(1, -1)
return [
cosine_distances(q1_topic_vec, q2_topic_vec)[0][0],
euclidean_distances(q1_topic_vec, q2_topic_vec)[0][0],
]
cosine_lt = []
euclidean_lt = []
for i in range(test_data.shape[0]):
cosine_val, euclidean_val = compute_topic_distances((test_data.iloc[i, 1].split(' '), test_data.iloc[i, 2].split(' ')))
cosine_lt.append(cosine_val)
euclidean_lt.append(euclidean_val)
lda_feas = pd.DataFrame({'cosine_distances':cosine_lt, 'euclidean_distances':euclidean_lt})
lda_feas = lda_feas.values
return lda_feas
def occur_features(test_data):
df_all_pairs = test_data.copy()
columns = list(df_all_pairs.columns.values)
columns[:3] = ['id', 'question1', 'question2']
df_all_pairs.columns = columns
df_unique_texts = pd.read_csv('data/test/occur_uniq_texts.csv')
with open('data/test/occur_counts.json') as f:
q_counts = json.load(f)
question_ids =
|
pd.Series(df_unique_texts.index.values, index=df_unique_texts['question'].values)
|
pandas.Series
|
"""Analysis of sentence similarity model event primitive prediction."""
from argparse import ArgumentParser, Namespace
from dataclasses import dataclass
import json
from pathlib import Path
from typing import Any, Iterable, List, Mapping, Sequence, cast
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sn
from sklearn.metrics import confusion_matrix
from pycurator.flask_backend.event_prediction import init_embeddings, init_ss_model, request_top_n
@dataclass
class SchemaAnalysisObj:
"""Store information about the schema for easy write out to CSV.
Attributes:
text: Text corresponding to the event step.
true_event_primitive: Event primitive assigned to this event step by annotator.
pred_event_primitives: Event primitives predicted by the sentence similarity model, listed
in order of most likely -> least likely.
"""
text: str
true_event_primitive: str
pred_event_primitives: Sequence[str]
def __iter__(self) -> Iterable[str]:
"""Represent the obj as an iterable."""
return iter([self.text, self.true_event_primitive, *self.pred_event_primitives])
def main(args: Namespace) -> None:
"""Read in schemas, score for accuracy and MRR, write out results, and plot (optional).
Arguments:
args: Arguments read in from the command line.
"""
schema_path = args.input
schemas = read_in_schemas(schema_path)
ss_model = init_ss_model()
definition_emb, template_emb = init_embeddings(ss_model)
annotated_schemas = []
for schema in schemas:
for step in schema["steps"]:
description = step["name"].replace(",", ";").replace("-", " ")
top_5: List[str] = []
for pred in request_top_n(
description,
n=5,
ss_model=ss_model,
definition_embeddings=definition_emb,
template_embeddings=template_emb,
):
pred_type = cast(str, pred["type"])
top_5.append(pred_type)
s = SchemaAnalysisObj(
description,
".".join(step["@type"].split("/")[-1].split(".")[:2]),
top_5,
)
annotated_schemas.append(s)
path_to_schema_events = Path("all_schema_events.csv")
# Checkpoint in case we want to do anything else with this information
schemas_df = write_out_scores(annotated_schemas, path_to_schema_events)
output_path = args.output
analyze_results(output_path, schemas_df)
if args.plot:
plot(schemas_df)
def plot(results: pd.DataFrame) -> None:
"""Create confusion matrix from results of the analysis.
Arguments:
results: Results from events and predictions.
"""
labels = results.true_primitive.unique().tolist()
cm = confusion_matrix(
results.true_primitive.tolist(),
results.rec_primitive_1.tolist(),
labels=labels,
normalize="true",
)
plt.figure(figsize=(15, 15))
sn.heatmap(cm, cmap="YlGnBu", xticklabels=labels, yticklabels=labels)
plt.savefig("confusion_matrix.pdf", bbox_inches="tight", dpi=500)
def write_out_scores(
annotated_schemas: Sequence[SchemaAnalysisObj], path_to_schema_events: Path
) -> pd.DataFrame:
"""Write out results from predicting event types for each step.
Arguments:
annotated_schemas: List of SchemaAnalysisObj's containing all the important
information from the schemas and predictions over event types.
path_to_schema_events: Path to the schema events file.
Returns:
DataFrame containing matching event text, true event primitive, and top five recommended
primitives.
"""
df = pd.DataFrame.from_records(
annotated_schemas,
columns=[
"event_text",
"true_primitive",
"rec_primitive_1",
"rec_primitive_2",
"rec_primitive_3",
"rec_primitive_4",
"rec_primitive_5",
],
)
df.to_csv(path_to_schema_events)
return df
def mean_reciprocal_rank(rs: Sequence[Sequence[int]]) -> np.float64:
"""Calculate MRR (from https://gist.github.com/bwhite/3726239).
Arguments:
rs: Matches for each event primitive type.
Returns:
Calculated MRR.
"""
rs_gen = (np.asarray(r).nonzero()[0] for r in rs)
return cast(np.float64, np.mean([1.0 / (r[0] + 1) if r.size else 0.0 for r in rs_gen]))
def analyze_results(output_path: Path, results: pd.DataFrame) -> None:
"""Analyze results of the event primitive prediction over schemas.
Specifically, calculate how many matches there are between the true event primitive and
predictive event primitives. Then get the mean reciprocal rank, and the accuracy for
predictions at the first prediction, top three predictions, and top five predictions.
Arguments:
output_path: Path to the output file.
results: Pandas DataFrame containing event text, true primitive, predicted primitives.
"""
temp_df =
|
pd.DataFrame()
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
"""
Created on Sun Feb 28 15:24:03 2021
@author: suriy
"""
from numba import jit
# Fitting the nan values with the average
def avgfit(l):
na = pd.isna(l)
arr = []
for i in range(len(l)):
if na[i] == False:
arr.append(l[i])
avg = sum(arr)/len(arr)
fit_arr = []
for i in range(len(l)):
if na[i] == False:
fit_arr.append(l[i])
elif na[i] == True:
fit_arr.append(avg)
return(fit_arr)
#@jit(nopython=True)
# Weighted Mean Absolute Percentage Error
def mean_absolute_percentage_error(y_true, y_pred):
y_true, y_pred = list(y_true), list(y_pred)
l = len(y_true)
num = 0
den = 0
for i in range(l):
num = num + (abs(y_pred[i] - y_true[i]))
den = den + y_true[i]
return abs(num/den) * 100
# Importing the Libraries
import joblib
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.svm import SVC
from sklearn.metrics import explained_variance_score
from sklearn.metrics import confusion_matrix, accuracy_score
from sklearn.utils import class_weight
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import RandomForestRegressor
import warnings
warnings.simplefilter(action='ignore')
# Importing the Dataset
dataset = pd.read_csv('antenna.csv')
#X
X = dataset.loc[:, dataset.columns != 'vswr']
X = X.loc[:, X.columns != 'gain']
X = X.loc[:, X.columns != 'bandwidth']
Xi = X.iloc[:, :-3]
Xi = pd.DataFrame(Xi)
#y
bw = avgfit(list(dataset['bandwidth']))
dataset['bandwidth'] = bw
for i in range(len(bw)):
if bw[i] < 100:
bw[i] = 'Class 1'
elif bw[i] >= 100 and bw[i] < 115:
bw[i] = 'Class 2'
elif bw[i] >= 115 and bw[i] < 120:
bw[i] = 'Class 3'
elif bw[i] >= 120 and bw[i] < 121:
bw[i] = 'Class 4'
elif bw[i] >= 121 and bw[i] < 122:
bw[i] = 'Class 5'
elif bw[i] >= 122 :
bw[i] = 'Class 6'
gain =avgfit(list(dataset['gain']))
dataset['gain'] = gain
for i in range(len(gain)):
if gain[i] < 1.3:
gain[i] = 'Class 1'
elif gain[i] >= 1.3 and gain[i] < 1.5:
gain[i] = 'Class 2'
elif gain[i] >= 1.5 and gain[i] < 2.4:
gain[i] = 'Class 3'
elif gain[i] >= 2.4 and gain[i] < 2.7:
gain[i] = 'Class 4'
elif gain[i] >= 2.7 and gain[i] < 2.9:
gain[i] = 'Class 5'
elif gain[i] >= 2.9 and gain[i] < 3.5:
gain[i] = 'Class 6'
vswr =avgfit(list(dataset['vswr']))
dataset['vswr'] = vswr
for i in range(len(vswr)):
if vswr[i] >= 1 and vswr[i] < 1.16:
vswr[i] = 'Class 1'
elif vswr[i] >= 1.16 and vswr[i] < 1.32:
vswr[i] = 'Class 2'
elif vswr[i] >= 1.32 and vswr[i] < 1.5:
vswr[i] = 'Class 3'
elif vswr[i] >= 1.5 and vswr[i] < 2:
vswr[i] = 'Class 4'
elif vswr[i] >= 2 and vswr[i] < 4:
vswr[i] = 'Class 5'
elif vswr[i] >= 4:
vswr[i] = 'Class 6'
y1 =
|
pd.DataFrame(bw)
|
pandas.DataFrame
|
# Import modules
import pandas as pd
import networkx as nx
import matplotlib.pyplot as plt
# read book 1
book1 =
|
pd.read_csv('datasets/book1.csv')
|
pandas.read_csv
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
INPUT_DIR = "~/data/query-result/"
OUTPUT_DIR = "~/data/summary-stats/"
RES_LIST = ['cpu', 'mem', 'net_send', 'net_receive', 'disk_read', 'disk_write']
METRIC_LIST = ['_util_per_instance_95p', '_util_per_instance_max', '_util_per_pool', '_util_per_pod']
COST_MAP = {'action-classify': 0.248, 'action-gke': 1.22, 'db': 0.663, 'db-preempt': 0.663, 'druid-preempt': 0.663,
'druid-ssd-preempt': 0.704, 'mixed': 0.248, 'mixed-preempt': 0.248, 'nginx': 0.266, 'ping-gke': 0.69}
PERCENTILES = [.5, .95, .99]
END_TIME = 1514995200917
#END_TIME = 1515028900917
class StatsAggregator(object):
def __init__(self, metric_name):
self.metric_name = metric_name
def get_csv_list(self, res_list, data_dir):
csv_list = {}
for res in res_list:
csv_file = data_dir + res + self.metric_name + ".csv"
csv_list[res] = csv_file
print("Constructed list of csv filess:", csv_list)
return csv_list
def process_csv(self, res, csvfile, outfile):
df = pd.read_csv(csvfile, sep=',')
summary_df = pd.DataFrame()
for nodepool in df['node_pool'].unique():
stats_pool = df.loc[(df['node_pool'] == nodepool) & (df['time'] <= END_TIME)]
summary_df[nodepool] = stats_pool.value.describe(PERCENTILES)
print("Summarizing %d data points for resource %s, node pool %s"
%(len(stats_pool), res, nodepool))
fig_name = res + self.metric_name + "_" + nodepool
stats_pool.loc[:, 'time'] = pd.to_datetime(stats_pool['time'], unit='ms')
stats_pool.plot(x='time', y='value', title=fig_name)
plt.ylabel('Percent (%)')
plt.legend().set_visible(False)
plt.savefig(fig_name+".png")
print("\nWriting summary stats of %s resource for all node pools to %s\n" %(res, outfile))
summary_df.to_csv(outfile)
plt.close('all')
def compute_waste_res(self, res, csv_util, csv_num, outfile):
df_util = pd.read_csv(csv_util, sep=',')
df_num = pd.read_csv(csv_num, sep=',')
waste_list = []
for nodepool in df_util['node_pool'].unique():
util_pool = df_util.loc[(df_util['node_pool'] == nodepool) & (df_util['time'] <= END_TIME)][['time', 'value']]
num_pool = df_num.loc[(df_num['node_pool'] == nodepool) & (df_num['time'] <= END_TIME)][['time', 'value']]
num_avg = num_pool.value.mean()
print("Average provisioned instances for nodepool %s: %.1f" %(nodepool, num_avg))
util_pool['time'] = (util_pool['time'] / 1000).astype('int64')
num_pool['time'] = (num_pool['time'] / 1000).astype('int64')
df_joined = util_pool.set_index('time').join(num_pool.set_index('time'), how='inner',
lsuffix='_util', rsuffix='_num')
waste_num = ( (1 - df_joined.value_util/100) * df_joined.value_num ).mean()
waste_cost = waste_num * COST_MAP[nodepool]
waste_list.append({'node pool': nodepool, 'live instances': num_avg,
'unused instances': waste_num, 'wasted cost': waste_cost})
print("Average hourly cost wasted for %s resource in nodepool %s: %.2f" %(res, nodepool, waste_cost))
waste_df = pd.DataFrame(waste_list)
waste_df.to_csv(outfile)
def compute_waste_mixed(self, res_list, csv_list, csv_num, outfile):
if len(res_list) > 2:
print("Cannot combine more than two resources!")
return
df_util1 = pd.read_csv(csv_list[res_list[0]], sep=',')
df_util2 =
|
pd.read_csv(csv_list[res_list[1]], sep=',')
|
pandas.read_csv
|
# Stats Imports
import importlib
import platform
import random
from statistics import stdev
import GPUtil
import pandas as pd
# Data Brokers
import psutil as psutil
import yfinance as yf
# Utils
from datetime import timedelta, datetime
from typing import List
import os
from os import listdir
from os.path import isfile, join
# Custom Utils
import settings
from util.langUtil import strtotimedelta, timedeltatosigstr, normify_name, yahoolimitperiod_leftover, \
get_size_bytes, try_int, craft_instrument_filename, strtoyahootimestr, get_yahoo_intervals
# DataFrame
def retrieve_str(s: str,
interval: str,
period: str,
write: bool = False,
progress: bool = False, name=""):
return retrieve(s, datetime.now() - strtotimedelta(period), datetime.now(), interval, write, progress, name)
def retrieve(
s: str,
start: datetime,
end: datetime,
interval: str,
write: bool = False,
progress: bool = False, name=""):
period = end - start
if not name:
name = craft_instrument_filename(s, interval,
timedeltatosigstr(period)) # F'{s}-{interval}-{timedeltatosigstr(period)}'
print(F'Retrieving {name}, write: {write}')
if interval not in get_yahoo_intervals():
print(F'Note, interval of {interval} not accepted in yfinance api. Closest alternative will be used.')
interval = strtoyahootimestr(interval)
# Loop through smaller time periods if period is too big for given interval (denied by yfinance)
loop_period, n_loop, leftover = yahoolimitperiod_leftover(period, interval)
if strtotimedelta(interval) < timedelta(days=1) and period > timedelta(days=730):
print('Note: Data past 730 days will likely fail to download.')
df_array = []
# Retrieve data slowly... 'Failed download' will be printed by yfinance upon failure
for i in range(n_loop + 1):
if i == n_loop:
_loop_period = leftover
else:
_loop_period = loop_period
if _loop_period > timedelta(minutes=1):
df = yf.download(s,
start=start,
end=start + _loop_period,
interval=interval,
progress=progress)
df_array.append(df)
# Next starting date
start += _loop_period
final = pd.concat(df_array)
success = True
if len(final.index) <= 1:
success = False
if write and success:
write_df(final, name)
return final, success
def retrieve_ds(ds_name: str, write: bool = False, progress: bool = False):
df = load_dataset(ds_name)
if df is None:
print("Failed to open dataset. Cancelling retrieval.")
return False
for index, row in df.iterrows():
name = craft_instrument_filename(row['symbol'], row['interval'], row['period'])
df, suc = retrieve_str(row['symbol'], row['interval'], row['period'], write, progress, name)
if not suc:
remove_from_dataset(ds_name, row['symbol'], row['interval'], row['period'])
return True
# Read/Write from local
def write_df(df, name: str):
folder = F'static/data'
if not name.endswith('.csv'):
name += '.csv'
os.makedirs(folder, exist_ok=True)
df.to_csv(F'{folder}/{name}')
print(F'Creating {folder}/{name}')
def load_df(name: str):
folder = F'static/data'
if not name.endswith('.csv'):
name += '.csv'
path = F'{folder}/{name}'
if not file_exists(path):
print(F'{path} datafile not found')
return pd.DataFrame()
df = pd.read_csv(path, index_col=0)
print(F'Reading {path}')
return df
def load_df_sig(sym: str, inv: str, per: str):
path = F'{settings.DATA_FOLDER}/'
name = craft_instrument_filename(sym, inv, per)
# Get list of files that end with .csv
df_list = [f for f in listdir(path) if isfile(join(path, f)) and f.endswith('.csv')]
for df in df_list:
if df == name:
return df
return None
def load_df_list():
path = F'{settings.DATA_FOLDER}/'
# Get list of files that end with .csv
df_list = [f for f in listdir(path) if isfile(join(path, f)) and f.endswith('.csv')]
return df_list
def load_ds_df_list(ds_name: str):
dsf = load_dataset(ds_name)
df_list = []
# Get list of dsf's dfs
for i, row in dsf.iterrows():
# Find file
df = load_df_sig(row['symbol'], row['interval'], row['period'])
if df:
df_list.append(df)
pass
return df_list
def remove_all_df():
path = F'static/data/'
df_list = load_df_list()
for df in df_list:
os.remove(F'{path}{df}')
def remove_df(df):
path = settings.DATA_FOLDER
df_list = load_df_list()
for _df in df_list:
if df == _df:
os.remove(F'{path}/{df}')
break
def remove_ds_df(ds_name: str):
path = settings.DATA_FOLDER
df_list = load_ds_df_list(ds_name)
for _df in df_list:
os.remove(F'{path}/{_df}')
break
def get_random_df(ds_name: str):
folder = settings.DATASETDEF_FOLDER
if not ds_name.endswith('.csv'):
ds_name += '.csv'
# Get random row from dataset
dsf = pd.read_csv(F'{folder}/{ds_name}', index_col=0)
r = random.randint(0, len(dsf))
row = dsf.iloc[r]
d_name = F'{craft_instrument_filename(row["symbol"], row["interval"], row["period"])}'
return d_name
# DataSet
def load_dataset(ds_name: str) -> pd.DataFrame:
folder = settings.DATASETDEF_FOLDER
if not ds_name.endswith('.csv'):
ds_name += '.csv'
full_path = F'{folder}/{ds_name}'
# If file doesn't exist
if not file_exists(full_path):
print(F'Failed to find file at {full_path}!')
return None
dsf = pd.read_csv(full_path, index_col=0)
print(F'Reading {full_path}')
return dsf
def load_dataset_list():
"""Load list of dataset files in datasetdef."""
path = settings.DATASETDEF_FOLDER
# Get list of files that end with .csv
df_list = [f for f in listdir(path) if isfile(join(path, f)) and f.endswith('.csv')]
df_list.sort()
return df_list
def save_dataset(ds_name, dsf):
folder = settings.DATASETDEF_FOLDER
if not ds_name.endswith('.csv'):
ds_name += '.csv'
# save new dsf into ds_name
dsf.to_csv(F'{folder}/{ds_name}')
print(F'Saving into {folder}/{ds_name}')
def write_dataset(ds_name, dsf):
folder = settings.DATASETDEF_FOLDER
os.makedirs(folder, exist_ok=True)
if not ds_name.endswith('.csv'):
ds_name = F'{ds_name}.csv'
dsf.to_csv(F'{folder}/{ds_name}')
print(F'Creating dataset {folder}/{ds_name}')
def write_new_dataset(ds_name, dsf):
"""This function writes the dataset but also notes it as a change"""
write_dataset(ds_name, dsf)
add_as_dataset_change(ds_name)
print(F'Overwriting dataset {ds_name}.csv')
def write_new_empty_dataset(ds_name):
"""Same as above but dataset is empty"""
ds_name = normify_name(ds_name)
data = {
'symbol': [],
'interval': [],
'period': [],
}
dsf = pd.DataFrame(data)
write_dataset(ds_name, dsf)
add_as_dataset_change(ds_name)
print(F'Creating new dataset {ds_name}.csv')
def remove_from_dataset(ds_name: str, symbol: str, interval: str, period: str):
dsf = load_dataset(ds_name)
dsf = dsf.drop(dsf[(dsf.symbol == symbol) & (dsf.interval == interval) & (dsf.period == period)].index)
print(F"Removing {symbol}-{interval}-{period} from {ds_name}")
dsf = dsf.reset_index(drop=True)
write_dataset(ds_name, dsf)
def remove_dataset(ds_name: str):
folder = settings.DATASETDEF_FOLDER
print(F'Removing {ds_name} completely.')
if not ds_name.endswith('.csv'):
ds_name += '.csv'
full_path = F'{folder}/{ds_name}'
if not os.path.exists(full_path):
return False
if 'windows' in full_path.lower():
return False
os.remove(full_path)
return True
def number_of_datafiles(ds_name_list):
total_len = 0
for ds_name in ds_name_list:
dsf = load_dataset(ds_name)
total_len += len(dsf.index)
return total_len
# Dataset Files
def load_dataset_data_list(ds_name: str) -> List[str]:
folder = 'static/data/'
ds_df = load_df(ds_name)
d_list = []
for index, row in ds_df.iterrows():
# Form d_name (SYM_INT_PER)
d_name = F'{row["symbol"]}__{row["interval"]}__{row["period"]}'
d_list.append(d_name)
return d_list
def load_dataset_data(d_list: List[str]) -> List[pd.DataFrame]:
all_data = []
for d_name in d_list:
all_data.append(load_dataset(d_name))
return all_data
# Dataset-Changes
def get_dataset_changes() -> pd.DataFrame:
"""index, name"""
path = settings.DATASET_CHANGES_PATH
dsc = pd.read_csv(path, index_col=0)
return dsc
def update_all_dataset_changes(): # Downloading
dsc = get_dataset_changes()
print(F'Updating all datasets...')
for index, row in dsc.iterrows(dsc):
retrieve_ds(row['name'])
clear_dataset_changes()
def update_specific_dataset_change(ds_name): # Downloading
print(F'Updating dataset {ds_name}')
# Removing specific dataset change flag
dsc = get_dataset_changes()
for index, row in iter(dsc):
if row['name'] == ds_name:
dsc.drop([ds_name])
# Download data
retrieve_ds(ds_name)
remove_dataset_change(ds_name)
def add_as_dataset_change(ds_name: str):
'''Changes to any instrument signature contained within the dataset or addition/subtraction of instruments
count as a dataset change.'''
path = settings.DATASET_CHANGES_PATH
dsc = pd.read_csv(F'{os.getcwd()}/{path}', index_col=0)
if not ds_name.endswith('.csv'):
ds_name = F'{ds_name}.csv'
print("---------------")
if ds_name in list(dsc['name']):
print(F'Overwriting dataset {ds_name} - Abort, already most updated')
else:
_new = pd.DataFrame([[ds_name]], columns=['name'], index=[len(dsc.index)])
dsc = dsc.append(_new)
print(F'Overwriting dataset {ds_name}')
write_dataset_change(dsc)
def write_dataset_change(dsc_df: pd.DataFrame):
path = settings.DATASET_CHANGES_PATH
print(F'Noting changes in datasetchanges.txt')
dsc_df.to_csv(path)
def replace_dataset_change(ds_name: str, new_name: str):
dsc = get_dataset_changes()
_exists = False
for i, row in dsc.iterrows():
if row['name'].lower() == ds_name.lower():
_exists = True
if _exists:
remove_dataset_change(ds_name)
add_as_dataset_change(new_name)
def remove_dataset_change(ds_name: str):
dsc = get_dataset_changes()
dsc.drop(dsc[dsc.name == ds_name].index)
set_dataset_changes(dsc)
def clear_dataset_changes():
path = settings.DATASET_CHANGES_PATH
df = pd.DataFrame(columns=['name'])
df.to_csv(path)
def set_dataset_changes(dsc: pd.DataFrame):
path = settings.DATASET_CHANGES_PATH
dsc.to_csv(path)
def rename_dataset(ds_name: str, new_name: str):
# Get df
dsf = load_dataset(ds_name)
# Create new file
write_dataset(new_name, dsf)
# destroy old file
remove_dataset(ds_name)
# If file in dataset_changes, remove and update if removed (otherwise, ignore)
replace_dataset_change(ds_name, new_name)
# List of instruments
def load_speed_suggestions():
return settings.SUGGESTIONS['simulation']['speed']
def load_contract_size_suggestions():
return settings.SUGGESTIONS['contract_size']
def load_flat_commission_suggestions():
return settings.SUGGESTIONS['flat_commission']
def load_capital_suggestions():
return settings.SUGGESTIONS['capital']
def load_lag_suggestions():
return settings.SUGGESTIONS['lag']
def load_leverage_suggestions():
return settings.SUGGESTIONS['leverage']
def load_optim_depth_suggestions():
return settings.SUGGESTIONS['optim_depth']
def load_optim_width_suggestions():
return settings.SUGGESTIONS['optim_width']
def load_setting(name: str):
return settings.SUGGESTIONS[name]
def load_instrument_type_suggestions():
return settings.SUGGESTIONS['instrument_type']
def load_symbol_suggestions() -> pd.DataFrame:
common_symbols = F'static/common/common_symbols.txt'
ss_df = pd.read_csv(common_symbols, index_col=0)
return ss_df['symbol']
def write_symbol_suggestions(ss_df: pd.DataFrame):
common_symbols = F'static/common/common_symbols.txt'
ss_df.to_csv(common_symbols)
def add_symbol_suggestion(ss_add):
ss_df = load_symbol_suggestions()
ss_df = ss_df.append(ss_add)
write_symbol_suggestions(ss_df)
def load_interval_suggestions():
common_intervals = F'static/common/common_intervals.txt'
is_df = pd.read_csv(common_intervals, index_col=0)
return is_df['interval']
def write_interval_suggestions(is_df: pd.DataFrame):
common_intervals = F'static/common/common_intervals.txt'
is_df.to_csv(common_intervals)
def add_interval_suggestion():
is_df = load_interval_suggestions()
write_interval_suggestions(is_df)
def load_period_suggestions():
common_periods = F'static/common/common_periods.txt'
ps_df = pd.read_csv(common_periods, index_col=0)
return ps_df['period']
def write_period_suggestions(ps_df: pd.DataFrame):
common_periods = F'static/common/common_periods.txt'
ps_df.to_csv(common_periods)
def add_period_suggestions(ps2: pd.DataFrame):
ps_df = load_interval_suggestions()
ps_df = ps_df.append(ps2)
write_period_suggestions(ps_df)
# Simulation
def load_sim_speed_suggestions():
return settings.SUGGESTIONS['sim_speed']
# Check
def is_valid_dataset():
return True
def is_valid_df():
return True
# Data Transformation Util
def table_to_dataframe(data):
return pd.DataFrame(data)
def dataframe_to_table(df):
table = []
return table
# Trade Advisors/Robots
def load_trade_advisor_list():
path = F'robot'
# Get list of files that end with .py
robot_list = [os.path.splitext(f)[0] for f in listdir(path) if isfile(join(path, f)) and
f.endswith('.py') and '__init__' not in f]
return robot_list
# Algos
def load_algo_list():
path = F'algo'
# Get list of files that end with .py
robot_list = [os.path.splitext(f)[0] for f in listdir(path) if isfile(join(path, f)) and
f.endswith('.py') and '__init__' not in f]
return robot_list
# IVar
def ivar_to_list(idf: pd.DataFrame):
ivar = []
for col in idf:
if not col == 'ivar_name':
ivar.append(idf.loc[:, col])
return ivar
def load_ivar(ta_name: str, ivar_name: str):
idf = load_ivar_df(ta_name)
return idf[idf.index == ivar_name]
def load_ivar_vars(ta_name: str, ivar_name: str):
keys = load_ivar(ta_name, ivar_name).keys()
for key in keys:
if key in ['name', 'fitness', 'type']:
keys.remove(key)
return load_ivar(ta_name, ivar_name).keys()
def load_ivar_as_dict(ta_name: str, ivar_name: str):
idf = load_ivar(ta_name, ivar_name)
ivar_dict = {}
for col in idf.columns:
ivar_dict.update({
col: {
'default': idf[col][0],
}
})
ivar_dict.update({
'name': {
'default': idf.index[-1]
}
})
return ivar_dict
def load_ivar_df(ta_name: str, meta=False) -> pd.DataFrame:
"""Load iVar entry point"""
folder = F'{settings.ROBOT_FOLDER}{settings.IVAR_SUB_FOLDER}'
ivar_file = F'{ta_name}_ivar'
path = F'{folder}/{ivar_file}.csv'
if not file_exists(path):
generate_ivar(ta_name)
idf = pd.read_csv(path, index_col='name')
# Strip meta attributes
if not meta:
if 'type' in idf.columns:
idf.drop('type', inplace=True, axis=1)
if 'fitness' in idf.columns:
idf.drop('fitness', inplace=True, axis=1)
return idf
def load_ivar_as_list(ta_name: str, ivar_name: str):
"""Robot takes in input as a List."""
idf = load_ivar(ta_name, ivar_name)
ivars = []
for col in idf.columns:
ivars.append(idf[col][0])
return ivars
def load_ivar_list(ta_name: str):
"""Returns IVar names only"""
idf = load_ivar_df(ta_name)
return list(idf.index)
def load_ivar_file_list():
path = F'robot/ivar'
# Get list of files that end with .csv
ivar_file_list = [f for f in listdir(path) if isfile(join(path, f)) and f.endswith('.csv')]
return ivar_file_list
def generate_ivar(ta_name: str):
folder = F'{settings.ROBOT_FOLDER}{settings.IVAR_SUB_FOLDER}'
ivar_file = F'{ta_name}_ivar'
path = F'{folder}/{ivar_file}.csv'
# args_str = eval(F'{ta_name}.{ta_name}.ARGS_STR')
# args = eval(F'{ta_name}.{ta_name}.ARGS_DEFAULT')
module = importlib.import_module(F'robot.{ta_name}')
globals().update(
{n: getattr(module, n) for n in module.__all__} if hasattr(module, '__all__')
else
{k: v for (k, v) in module.__dict__.items() if not k.startswith('_')
})
args_dict = eval(F'{ta_name}.ARGS_DICT')
data = {
# meta
'name': ['*Default'],
'fitness': [-1],
'type': ['default'],
}
# for i in range(len(args_str)):
# data.update({args_str[i]: args[i]})
for key in args_dict.keys():
data.update({
key: [args_dict[key]['default']]
})
df = pd.DataFrame.from_dict(data)
df.to_csv(path, index=False)
def generate_algo_ivar(algo_name: str):
folder = F'{settings.ALGO_FOLDER}{settings.IVAR_SUB_FOLDER}'
ivar_file = F'{algo_name}_ivar'
path = F'{folder}/{ivar_file}.csv'
# todo
pass
def ivar_to_arr(idf: pd.DataFrame):
columns = idf.columns
arr = []
for i in range(len(idf.index)): # rows
_arr = []
for u in range(len(columns)):
_arr.append(idf[columns[u + 1]][i])
arr.append(_arr)
return arr
def insert_ivar(ta_name: str, ivar):
ivar_list = []
if 'ivar' not in ivar:
# If not in dict form
ivar_list.append({
'ivar': ivar
})
else:
ivar_list.append(ivar)
insert_ivars(ta_name, ivar_list)
def insert_ivars(ta_name: str, ivar_list):
path = get_ivar_path(ta_name)
# Load stored ivars
idf = load_ivar_df(ta_name)
cols = list(idf.columns)
if 'name' not in cols: # Name is used as index
cols.append('name')
# Sanity check
if len(ivar_list[-1]['ivar'].keys()) != len(cols) - 1:
# len of ivar keys != len of ivar meta - name (fitness and type excluded automatically)
print(F'Insert ivars not compatible. Forcing insert: {ivar_list}')
one_true = False
for col in cols:
for key in ivar_list[-1]['ivar'].keys():
if col == key:
one_true = True
break
if one_true:
break
if not one_true:
print(F'Not a single ivar column is similar. Cancelling process.')
return
# Add ivars
for ivar_dict in ivar_list:
data = {}
# Flatten ivar dict structure
ivar = ivar_dict['ivar']
for key in ivar:
if key not in cols:
continue # Ignore if not inside.
if key == 'name': # Add name as index
ivar_dict['name'] = ivar['name']
continue
data.update({
key: [ivar[key]['default']],
})
# Create indexer
name = ["none"]
if 'name' in ivar_dict:
name = [ivar_dict['name']]
# Fill in meta attributes
data.update({
'type': ['unknown'],
'fitness': [0],
})
if 'fitness' in ivar_dict:
data.update({
'fitness': [ivar_dict['fitness']],
})
if 'type' in ivar_dict:
data.update({
'type': [ivar_dict['type']],
})
n_idf = pd.DataFrame(data, index=name)
idf = idf.append(n_idf)
# pd.concat(idf, n_idf)
idf.index.name = 'name'
idf.to_csv(path)
def insert_ivar_df(ta_name: str, ivar_df):
path = get_ivar_path(ta_name)
# Load stored ivars
idf = load_ivar_df(ta_name)
idf.append(ivar_df)
idf.index.name = 'name'
idf.to_csv(path)
def delete_ivar(ta_name: str, ivar_name: str):
path = get_ivar_path(ta_name)
# Load stored ivars
idf = load_ivar_df(ta_name)
if idf[idf.index == ivar_name].__len__():
print(F'Deleted {ivar_name} from {ta_name} IVars')
else:
print(F'Failed to delete {ivar_name} from {ta_name} IVars')
# Drop row
idf = idf.drop(labels=ivar_name, axis=0)
idf.index.name = 'name'
idf.to_csv(path)
def get_ivar_path(ta_name):
folder = F'robot/ivar'
ivar_file = F'{ta_name}_ivar'
path = F'{folder}/{ivar_file}.csv'
return path
def get_test_steps(ds_name: str):
"""Get number of dataset(s)"""
dsf = load_dataset(ds_name)
return len(dsf)
def result_dict_to_dataset(result_dict):
data = {}
for key in result_dict:
data.update({
key: [result_dict[key]]
})
return
|
pd.DataFrame(data, index=False)
|
pandas.DataFrame
|
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
#-------------read csv---------------------
df_2010_2011 = pd.read_csv("/mnt/nadavrap-students/STS/data/data_Shapira_20200911_2010_2011.csv")
df_2012_2013 = pd.read_csv("/mnt/nadavrap-students/STS/data/data_Shapira_20200911_2012_2013.csv")
df_2014_2015 = pd.read_csv("/mnt/nadavrap-students/STS/data/data_Shapira_20200911_2014_2015.csv")
df_2016_2017 = pd.read_csv("/mnt/nadavrap-students/STS/data/data_Shapira_20200911_2016_2017.csv")
df_2018_2019 = pd.read_csv("/mnt/nadavrap-students/STS/data/data_Shapira_20200911_2018_2019.csv")
df_2010_2011['prcab'].fillna(0)
df_2012_2013['prcab'].fillna(0)
df_2014_2015['prcab'].fillna(0)
df_2016_2017['prcab'].fillna(0)
df_2018_2019['prcab'].fillna(0)
print(df_2018_2019['prcab'])
mask = df_2010_2011['surgyear'] != 2010
df_2011 = df_2010_2011[mask]
df_2010 = df_2010_2011[~mask]
mask2 = df_2012_2013['surgyear'] != 2012
df_2013 = df_2012_2013[mask2]
df_2012 = df_2012_2013[~mask2]
mask3 = df_2014_2015['surgyear'] != 2014
df_2015 = df_2014_2015[mask3]
df_2014 = df_2014_2015[~mask3]
mask4 = df_2016_2017['surgyear'] != 2016
df_2017 = df_2016_2017[mask4]
df_2016 = df_2016_2017[~mask4]
mask5 = df_2018_2019['surgyear'] != 2018
df_2019 = df_2018_2019[mask5]
df_2018 = df_2018_2019[~mask5]
avg_siteid = pd.DataFrame()
avg_surgid = pd.DataFrame()
# #tmpHilla=df_2018_2019.columns
# tmpHilla=pd.DataFrame(df_2018_2019.columns.values.tolist())
# tmpHilla.to_csv("/tmp/pycharm_project_355/columns.csv")
# my_list = df_2010_2011.columns.values.tolist()
# print (my_list)
# print()
# my_list = df_2012_2013.columns.values.tolist()
# print (my_list)
# print()
# my_list = df_2014_2015.columns.values.tolist()
# print (my_list)
# print()
# my_list = df_2016_2017.columns.values.tolist()
# print (my_list)
# print()
# my_list = df_2018_2019.columns.values.tolist()
# print (my_list)
# print()
#-------------------merge all csv--------------------------
# dfMerge1 = pd.merge(df_2010_2011, df_2012_2013, on='surgorder')
# dfMerge2 = pd.merge(dfMerge1, df_2014_2015, on='surgorder')
# dfMerge = pd.merge(dfMerge2, df_2016_2017, on='surgorder')
#dfMerge = pd.merge(df_2010_2011, df_2012_2013, on='SiteID')
#count distinc
#table.groupby('YEARMONTH').CLIENTCODE.nunique()
def groupby_siteid():
df_2010 = df_2010_2011.groupby('siteid')['surgyear'].apply(lambda x: (x== 2010 ).sum()).reset_index(name='2010')
df_2011 = df_2010_2011.groupby('siteid')['surgyear'].apply(lambda x: (x== 2011 ).sum()).reset_index(name='2011')
df_2012 = df_2012_2013.groupby('siteid')['surgyear'].apply(lambda x: (x== 2012 ).sum()).reset_index(name='2012')
df_2013 = df_2012_2013.groupby('siteid')['surgyear'].apply(lambda x: (x== 2013 ).sum()).reset_index(name='2013')
df_2014 = df_2014_2015.groupby('siteid')['surgyear'].apply(lambda x: (x== 2014 ).sum()).reset_index(name='2014')
df_2015 = df_2014_2015.groupby('siteid')['surgyear'].apply(lambda x: (x== 2015 ).sum()).reset_index(name='2015')
df_2016 = df_2016_2017.groupby('siteid')['surgyear'].apply(lambda x: (x== 2016 ).sum()).reset_index(name='2016')
df_2017 = df_2016_2017.groupby('siteid')['surgyear'].apply(lambda x: (x== 2017 ).sum()).reset_index(name='2017')
df_2018 = df_2018_2019.groupby('siteid')['surgyear'].apply(lambda x: (x== 2018 ).sum()).reset_index(name='2018')
df_2019 = df_2018_2019.groupby('siteid')['surgyear'].apply(lambda x: (x== 2019 ).sum()).reset_index(name='2019')
df1 =pd.merge(df_2010, df_2011, on='siteid')
df2 =pd.merge(df1, df_2012, on='siteid')
df3 =pd.merge(df2, df_2013, on='siteid')
df4 =pd.merge(df3, df_2014, on='siteid')
df5 =pd.merge(df4, df_2015, on='siteid')
df6 =pd.merge(df5, df_2016, on='siteid')
df7 =pd.merge(df6, df_2017, on='siteid')
df8 =pd.merge(df7, df_2018, on='siteid')
df_sum_all_Years =pd.merge(df8, df_2019, on='siteid')
cols = df_sum_all_Years.columns.difference(['siteid'])
df_sum_all_Years['Distinct_years'] = df_sum_all_Years[cols].gt(0).sum(axis=1)
cols_sum = df_sum_all_Years.columns.difference(['siteid','Distinct_years'])
df_sum_all_Years['Year_sum'] =df_sum_all_Years.loc[:,cols_sum].sum(axis=1)
df_sum_all_Years['Year_avg'] = df_sum_all_Years['Year_sum']/df_sum_all_Years['Distinct_years']
df_sum_all_Years.to_csv("total op sum all years siteid.csv")
print("details on site id dist:")
print ("num of all sites: ", len(df_sum_all_Years))
less_8 =df_sum_all_Years[df_sum_all_Years['Distinct_years'] !=10]
less_8.to_csv("total op less 10 years siteid.csv")
print("num of sites with less years: ", len(less_8))
x = np.array(less_8['Distinct_years'])
print(np.unique(x))
avg_siteid['siteid'] = df_sum_all_Years['siteid']
avg_siteid['total_year_avg'] = df_sum_all_Years['Year_avg']
def groupby_surgid():
df_2010 = df_2010_2011.groupby('surgid')['surgyear'].apply(lambda x: (x== 2010 ).sum()).reset_index(name='2010')
df_2011 = df_2010_2011.groupby('surgid')['surgyear'].apply(lambda x: (x== 2011 ).sum()).reset_index(name='2011')
df_2012 = df_2012_2013.groupby('surgid')['surgyear'].apply(lambda x: (x== 2012 ).sum()).reset_index(name='2012')
df_2013 = df_2012_2013.groupby('surgid')['surgyear'].apply(lambda x: (x== 2013 ).sum()).reset_index(name='2013')
df_2014 = df_2014_2015.groupby('surgid')['surgyear'].apply(lambda x: (x== 2014 ).sum()).reset_index(name='2014')
df_2015 = df_2014_2015.groupby('surgid')['surgyear'].apply(lambda x: (x== 2015 ).sum()).reset_index(name='2015')
df_2016 = df_2016_2017.groupby('surgid')['surgyear'].apply(lambda x: (x== 2016 ).sum()).reset_index(name='2016')
df_2017 = df_2016_2017.groupby('surgid')['surgyear'].apply(lambda x: (x== 2017 ).sum()).reset_index(name='2017')
df_2018 = df_2018_2019.groupby('surgid')['surgyear'].apply(lambda x: (x== 2018 ).sum()).reset_index(name='2018')
df_2019 = df_2018_2019.groupby('surgid')['surgyear'].apply(lambda x: (x== 2019 ).sum()).reset_index(name='2019')
df1 =pd.merge(df_2010, df_2011, on='surgid')
df2 =pd.merge(df1, df_2012, on='surgid')
df3 =pd.merge(df2, df_2013, on='surgid')
df4 =pd.merge(df3, df_2014, on='surgid')
df5 =pd.merge(df4, df_2015, on='surgid')
df6 =pd.merge(df5, df_2016, on='surgid')
df7 =pd.merge(df6, df_2017, on='surgid')
df8 =pd.merge(df7, df_2018, on='surgid')
df_sum_all_Years =pd.merge(df8, df_2019, on='surgid')
cols = df_sum_all_Years.columns.difference(['surgid'])
df_sum_all_Years['Distinct_years'] = df_sum_all_Years[cols].gt(0).sum(axis=1)
cols_sum = df_sum_all_Years.columns.difference(['surgid','Distinct_years'])
df_sum_all_Years['Year_sum'] =df_sum_all_Years.loc[:,cols_sum].sum(axis=1)
df_sum_all_Years['Year_avg'] = df_sum_all_Years['Year_sum']/df_sum_all_Years['Distinct_years']
df_sum_all_Years.to_csv("sum all years surgid.csv")
print()
print("details of surgid dist:")
print("num of all surgid: ", len(df_sum_all_Years))
less_8 =df_sum_all_Years[df_sum_all_Years['Distinct_years'] !=10]
less_8.to_csv("less 10 years surgid.csv")
print("num of doctors with less years: ", len(less_8))
x = np.array(less_8['Distinct_years'])
print(np.unique(x))
avg_surgid['surgid'] = df_sum_all_Years['surgid']
avg_surgid['total_year_avg'] = df_sum_all_Years['Year_avg']
def groupby_hospid():
df_2010 = df_2010_2011.groupby('hospid')['surgyear'].apply(lambda x: (x== 2010 ).sum()).reset_index(name='2010')
df_2011 = df_2010_2011.groupby('hospid')['surgyear'].apply(lambda x: (x== 2011 ).sum()).reset_index(name='2011')
df_2012 = df_2012_2013.groupby('hospid')['surgyear'].apply(lambda x: (x== 2012 ).sum()).reset_index(name='2012')
df_2013 = df_2012_2013.groupby('hospid')['surgyear'].apply(lambda x: (x== 2013 ).sum()).reset_index(name='2013')
df_2014 = df_2014_2015.groupby('hospid')['surgyear'].apply(lambda x: (x== 2014 ).sum()).reset_index(name='2014')
df_2015 = df_2014_2015.groupby('hospid')['surgyear'].apply(lambda x: (x== 2015 ).sum()).reset_index(name='2015')
df_2016 = df_2016_2017.groupby('hospid')['surgyear'].apply(lambda x: (x== 2016 ).sum()).reset_index(name='2016')
df_2017 = df_2016_2017.groupby('hospid')['surgyear'].apply(lambda x: (x== 2017 ).sum()).reset_index(name='2017')
df_2018 = df_2018_2019.groupby('hospid')['surgyear'].apply(lambda x: (x== 2018 ).sum()).reset_index(name='2018')
df_2019 = df_2018_2019.groupby('hospid')['surgyear'].apply(lambda x: (x== 2019 ).sum()).reset_index(name='2019')
df1 =pd.merge(df_2010, df_2011, on='hospid')
df2 =pd.merge(df1, df_2012, on='hospid')
df3 =pd.merge(df2, df_2013, on='hospid')
df4 =pd.merge(df3, df_2014, on='hospid')
df5 =pd.merge(df4, df_2015, on='hospid')
df6 =pd.merge(df5, df_2016, on='hospid')
df7 =pd.merge(df6, df_2017, on='hospid')
df8 =pd.merge(df7, df_2018, on='hospid')
df_sum_all_Years =pd.merge(df8, df_2019, on='hospid')
cols = df_sum_all_Years.columns.difference(['hospid'])
df_sum_all_Years['Distinct_years'] = df_sum_all_Years[cols].gt(0).sum(axis=1)
cols_sum = df_sum_all_Years.columns.difference(['hospid','Distinct_years'])
df_sum_all_Years['Year_sum'] =df_sum_all_Years.loc[:,cols_sum].sum(axis=1)
df_sum_all_Years['Year_avg'] = df_sum_all_Years['Year_sum']/df_sum_all_Years['Distinct_years']
df_sum_all_Years.to_csv("sum all years hospid.csv")
print(df_sum_all_Years)
print ("num of all sites: ", len(df_sum_all_Years))
less_8 =df_sum_all_Years[df_sum_all_Years['Distinct_years'] !=10]
less_8.to_csv("less 10 years hospid.csv")
print("num of hospital with less years: ", len(less_8))
x = np.array(less_8['Distinct_years'])
print(np.unique(x))
return df_sum_all_Years
def draw_hist(data,num_of_bins,title,x_title,y_title,color):
plt.hist(data, bins=num_of_bins, color=color,ec="black")
plt.title(title)
plt.xlabel(x_title)
plt.ylabel(y_title)
plt.show()
def group_by_count(group_by_value,name):
df_2010_2011_gb = df_2010_2011.groupby(group_by_value)[group_by_value].count().reset_index(name=name)
df_2012_2013_gb = df_2012_2013.groupby(group_by_value)[group_by_value].count().reset_index(name=name)
df_2014_2015_gb = df_2014_2015.groupby(group_by_value)[group_by_value].count().reset_index(name=name)
df_2016_2017_gb = df_2016_2017.groupby(group_by_value)[group_by_value].count().reset_index(name=name)
df_2018_2019_gb = df_2018_2019.groupby(group_by_value)[group_by_value].count().reset_index(name=name)
df_merge_1=pd.merge(df_2010_2011_gb,df_2012_2013_gb, on=group_by_value)
df_merge_2=pd.merge(df_merge_1,df_2014_2015_gb, on=group_by_value)
df_merge_3=pd.merge(df_merge_2,df_2016_2017_gb, on=group_by_value)
df_merge_4=pd.merge(df_merge_3,df_2018_2019_gb, on=group_by_value)
cols = df_merge_4.columns.difference([group_by_value])
df_merge_4[name] = df_merge_4.loc[:,cols].sum(axis=1)
df_new=pd.DataFrame()
df_new[group_by_value] = df_merge_4[group_by_value]
df_new[name] = df_merge_4[name]
return df_new
def groupby_siteid_prcab():
df2010 = df_2010.groupby('siteid')['prcab'].apply(lambda x: (x == 1).sum()).reset_index(name='2010_reop')
df2011 = df_2011.groupby('siteid')['prcab'].apply(lambda x: (x == 1).sum()).reset_index(name='2011_reop')
df2012 = df_2012.groupby('siteid')['prcab'].apply(lambda x: (x == 1).sum()).reset_index(name='2012_reop')
df2013 = df_2013.groupby('siteid')['prcab'].apply(lambda x: (x == 1).sum()).reset_index(name='2013_reop')
df2014 = df_2014.groupby('siteid')['prcab'].apply(lambda x: (x == 1).sum()).reset_index(name='2014_reop')
df2015 = df_2015.groupby('siteid')['prcab'].apply(lambda x: (x == 1).sum()).reset_index(name='2015_reop')
df2016 = df_2016.groupby('siteid')['prcab'].apply(lambda x: (x == 1).sum()).reset_index(name='2016_reop')
df2017 = df_2017.groupby('siteid')['prcab'].apply(lambda x: (x == 1).sum()).reset_index(name='2017_reop')
df2018 = df_2018.groupby('siteid')['prcab'].apply(lambda x: (x == 1).sum()).reset_index(name='2018_reop')
df2019 = df_2019.groupby('siteid')['prcab'].apply(lambda x: (x == 1).sum()).reset_index(name='2019_reop')
df1 = pd.merge(df2010, df2011, on='siteid')
df2 = pd.merge(df1, df2012, on='siteid')
df3 = pd.merge(df2, df2013, on='siteid')
df4 = pd.merge(df3, df2014, on='siteid')
df5 = pd.merge(df4, df2015, on='siteid')
df6 = pd.merge(df5, df2016, on='siteid')
df7 = pd.merge(df6, df2017, on='siteid')
df8 = pd.merge(df7, df2018, on='siteid')
df_sum_all_Years = pd.merge(df8, df2019, on='siteid')
cols = df_sum_all_Years.columns.difference(['siteid'])
df_sum_all_Years['Distinct_years_reop'] = df_sum_all_Years[cols].gt(0).sum(axis=1)
cols_sum = df_sum_all_Years.columns.difference(['siteid', 'Distinct_years_reop'])
df_sum_all_Years['Year_sum_reop'] = df_sum_all_Years.loc[:, cols_sum].sum(axis=1)
df_sum_all_Years['Year_avg_reop'] = df_sum_all_Years['Year_sum_reop'] / df_sum_all_Years['Distinct_years_reop']
df_sum_all_Years.to_csv("sum all years siteid reop.csv")
less_8 = df_sum_all_Years[df_sum_all_Years['Distinct_years_reop'] != 10]
less_8.to_csv("less 10 years reop siteid.csv")
print("num of sites with less years: ", len(less_8))
x = np.array(less_8['Distinct_years_reop'])
print(np.unique(x))
df_10 = df_2010.groupby('siteid')['prcab'].apply(lambda x:((x==2) | (x==0)).sum()).reset_index(name='2010_Firstop')
df_11 = df_2011.groupby('siteid')['prcab'].apply(lambda x:((x==2) | (x==0)).sum()).reset_index(name='2011_Firstop')
df_12 = df_2012.groupby('siteid')['prcab'].apply(lambda x:((x==2) | (x==0)).sum()).reset_index(name='2012_Firstop')
df_13 = df_2013.groupby('siteid')['prcab'].apply(lambda x:((x==2) | (x==0)).sum()).reset_index(name='2013_Firstop')
df_14 = df_2014.groupby('siteid')['prcab'].apply(lambda x:((x==2) | (x==0)).sum()).reset_index(name='2014_Firstop')
df_15 = df_2015.groupby('siteid')['prcab'].apply(lambda x:((x==2) | (x==0)).sum()).reset_index(name='2015_Firstop')
df_16 = df_2016.groupby('siteid')['prcab'].apply(lambda x:((x==2) | (x==0)).sum()).reset_index(name='2016_Firstop')
df_17 = df_2017.groupby('siteid')['prcab'].apply(lambda x:((x==2) | (x==0)).sum()).reset_index(name='2017_Firstop')
df_18 = df_2018.groupby('siteid')['prcab'].apply(lambda x:((x==2) | (x==0)).sum()).reset_index(name='2018_Firstop')
df_19 = df_2019.groupby('siteid')['prcab'].apply(lambda x:((x==2) | (x==0)).sum()).reset_index(name='2019_Firstop')
d1 = pd.merge(df_10, df_11, on='siteid')
d2 = pd.merge(d1, df_12, on='siteid')
d3 = pd.merge(d2, df_13, on='siteid')
d4 = pd.merge(d3, df_14, on='siteid')
d5 = pd.merge(d4, df_15, on='siteid')
d6 = pd.merge(d5, df_16, on='siteid')
d7 = pd.merge(d6, df_17, on='siteid')
d8 = pd.merge(d7, df_18, on='siteid')
df_sum_all_Years_total = pd.merge(d8, df_19, on='siteid')
cols = df_sum_all_Years_total.columns.difference(['siteid'])
df_sum_all_Years_total['Distinct_years'] = df_sum_all_Years_total[cols].gt(0).sum(axis=1)
cols_sum = df_sum_all_Years_total.columns.difference(['siteid', 'Distinct_years'])
df_sum_all_Years_total['Year_sum'] = df_sum_all_Years_total.loc[:, cols_sum].sum(axis=1)
df_sum_all_Years_total['Year_avg'] = df_sum_all_Years_total['Year_sum'] / df_sum_all_Years_total['Distinct_years']
df_sum_all_Years_total.to_csv("First op sum all years siteid.csv")
# df_sum_all_Years.to_csv("sum all years siteid.csv")
# print(df_sum_all_Years)
# print("num of all sites: ", len(df_sum_all_Years))
#
less = df_sum_all_Years_total[df_sum_all_Years_total['Distinct_years'] != 10]
less.to_csv("First op less 10 years siteid.csv")
print("First op num of sites with less years: ", len(less))
x = np.array(less['Distinct_years'])
print(np.unique(x))
temp_first =
|
pd.DataFrame()
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.svm import SVC
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
## CSV読み込み
train = pd.read_csv("titanic/train.csv")
test = pd.read_csv("titanic/test.csv")
### セル表示
# train ## 全体
# train.head(10) ## 先頭から10行目
# train.describe() ## 統計量
# train.info() ## 要約情報
# train['Age'].isnull().values.sum() ## nullの件数
### ヒストグラム
# plt.hist(train['Age'].dropna(), bins=20)
### 欠損値補完 (NULL -> AVG)
mean = np.mean(train['Age'])
train['Age'] = train['Age'].fillna(mean)
### 定量化 (男 -> 1, 女 -> 2)
train['Sex'] = train['Sex'].str.replace('female', '2')
train['Sex'] = train['Sex'].str.replace('male', '1')
### データセットの作成 (説明変数 -> X, 目的変数 -> Y)
X = pd.DataFrame({'Pclass':train['Pclass'], 'Sex':train['Sex'], 'Age':train['Age']})
y =
|
pd.DataFrame({'Survived':train['Survived']})
|
pandas.DataFrame
|
from numpy import NaN
import pandas as pd
import csv
from datetime import datetime
medals_file_name = 'Medals.xlsx'
file_output = 'Medalhas.csv'
paises_file_name = 'Paises.csv'
header = ['id','rank', 'pais_id', 'ouro', 'prata', 'bronze', 'total', 'rank_total', 'data'];
class MedalBuilder:
def build(self, input_file_path, output_file_path):
paises_df = pd.read_csv(output_file_path+paises_file_name)
data =
|
pd.read_excel(input_file_path+medals_file_name)
|
pandas.read_excel
|
import json
from sklearn.feature_extraction.text import TfidfVectorizer
import pandas as pd
from sklearn.metrics.pairwise import cosine_similarity
class Model:
def __init__(self, jsonData):
self.user_text = jsonData['desc']
self.user_language_input = jsonData['pl']
self.user_input = self.user_text + " " + self.user_language_input
def combined_features(self, row):
return row['Repository Name']+" "+row['Language']+" "+row['Tags']+row['Description']
def createData(self):
data = pd.read_csv("https://query.data.world/s/zxtxz5ro2skvafkuaycnh74g5ppp2g")
data = data.drop(["Last Update Date", "Number of Stars"], axis=1)
data["Tags"] = data["Tags"].replace(',', ' ', regex=True)
data["Description"] = data["Description"].replace(',', ' ', regex=True)
data = data.fillna(" ")
data["combined_features"] = data.apply(self.combined_features, axis=1)
self.data = data
self.user_series = pd.Series([self.user_input])
self.data_with_User = data["combined_features"].append(
self.user_series, ignore_index=True)
def createModel(self):
cv = TfidfVectorizer()
count_matrix = cv.fit_transform(self.data_with_User.values.astype('U'))
self.cosine_sim = cosine_similarity(count_matrix)
self.data["index"] = pd.DataFrame([i for i in range(980)])
self.user_repo_index = 980
def flatten(self, t):
return [item for sublist in t for item in sublist]
def get_title_from_index(self, index):
if len(self.data[self.data["index"] == index]["index"]) != 0:
return self.data[self.data["index"] == index]["index"].values.tolist()
def find_similar_repos(self, user_repo_index):
similar = list(enumerate(self.cosine_sim[user_repo_index]))
sorted_similar = sorted(similar, key=lambda x: x[1], reverse=True)
test_list = []
for i in range(len(sorted_similar)):
test_list.append(self.get_title_from_index(sorted_similar[i][0]))
res = []
for val in test_list:
if val is not None:
res.append(val)
res = self.flatten(res)
return res[:15]
def recommend(self):
self.find_similar_repos(self.user_repo_index)
df = pd.DataFrame(columns=["Username", "Repository Name",
"Description", "Language", "Tags", "Url"])
for i in self.find_similar_repos(self.user_repo_index):
df =
|
pd.DataFrame(df)
|
pandas.DataFrame
|
# coding: utf-8
import pandas as pd
def acl_bib2csv(file_bib):
with open(file_bib, 'r', encoding="utf-8") as f:
bib_list = f.read().strip().split("@")
papers = []
for bib in bib_list:
if bib.startswith("inproceedings"):
kvs = bib.split("\n")
paper = dict()
for kv in kvs:
res = kv.split('=')
if len(res) == 2:
k, v = res
paper[k.strip()] = v.strip(" \",")
papers.append(paper)
df =
|
pd.DataFrame(papers)
|
pandas.DataFrame
|
import datetime
import pprint
from typing import Optional, List, Tuple, Set, Dict
import numpy as np
import pandas as pd
from overrides import overrides
from sklearn.impute import SimpleImputer
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import FunctionTransformer
from python import DOCUMENT_ID, PUBLISH_DATE, SUBTOPIC, TOPIC_ID, MENTION_TYPE, TIME_OF_THE_DAY, TIME_DATE
from python.handwritten_baseline import TIMEX_NORMALIZED_PARSED, TIME, TIMEX_NORMALIZED
from python.handwritten_baseline.pipeline.data.base import Dataset
from python.handwritten_baseline.pipeline.model.feature_extr import TIME_EXTR, fix_all_nan_columns
from python.handwritten_baseline.pipeline.model.feature_extr.base_mixin import FeatureExtractorMixin
from python.handwritten_baseline.pipeline.model.feature_extr.time_and_space import \
look_up_document_level_event_component, look_up_event_component_by_sentence, \
look_up_event_component_by_srl, look_up_event_component_from_closest_preceding_sentence
class TimeFeatureExtractorPipelineCreator:
"""
The temporal distance feature consists of two pipeline stages: extracting features in the transform() method of
`TemporalDistanceFeature` followed by imputation of missing values.
"""
@classmethod
def from_params(cls, config: Dict):
extractor = TemporalDistanceFeature.from_params(config)
fix_nan_columns = FunctionTransformer(fix_all_nan_columns)
# Our feature extraction returns NaNs in case one of two mentions in a pair has no temporal information, so we
# need to fill those NaNs. 0 and -1 would be misleading for the classifier, therefore use the median feature
# value.
imputer = SimpleImputer(missing_values=np.nan, strategy="median")
return make_pipeline(extractor, fix_nan_columns, imputer)
class TemporalDistanceFeature(FeatureExtractorMixin):
"""
Computes temporal distance (hour, day, ...) between temporal expressions of a mention pair. Two variants for finding
temporal expressions exist: (1) document-level, where we pick the first temporal expression in a document and (2)
mention-level, where we use SRL to find the temporal expression attached to the mention action, or fall back to the
first temporal expression in the same sentence or fall back to the closest temporal expression from a previous sentence.
"""
def __init__(self,
use_cache: bool,
features_to_select: Optional[List[str]]):
super(TemporalDistanceFeature, self).__init__(TIME_EXTR, use_cache, features_to_select)
@staticmethod
def compute_temporal_distance_features(a_date: Optional[datetime.datetime], b_date: Optional[datetime.datetime]):
"""
Compute temporal distance between two datetimes per day, month, etc.
:param a_date:
:param b_date:
:return: list of distances
"""
if a_date is None or b_date is None or pd.isna(a_date) or
|
pd.isna(b_date)
|
pandas.isna
|
import pandas as pd
import pickle
from tqdm import tqdm
import json
import reverse_geocoder as rg
import networkx as nx
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
# from OllivierRicci import ricciCurvature
from RIPEprobes import haversine
from GraphRicciCurvature.OllivierRicci import OllivierRicci
from os import listdir,walk
from os.path import join, isfile
continent = pd.read_csv('/Users/geode/PycharmProjects/RIPE/Internet_of_Space_and_Time/datasets/country_continent.csv')
continent = continent.fillna('None')
c = 299792458 # in m.s**-1
list_of_aws = list(range(6460,6474))
list_of_aws.append(6394)
dico_aws = {'6474':'Ashburn','6473':'San Francisco','6472':'Columbus','6471':'London','6470':'Singapore','6469':'Stockholm',
'6468':'Seoul','6467':'Tokyo','6466':'Mumbai','6465':'Dublin','6464':'Paris','6463':'Frankfurt am Main','6462':'Montreal',
'6461':'Sao Paulo','6460':'Sydney','6394':'Portland'}
def symmetrize(data):
mat = data.values
newmat = np.ndarray
indexes = data.index
columns = data.columns
X, Y = mat.shape
symDict = {}
for key1 in columns:
symDict[key1] = {}
for key2 in columns:
symDict[key1][key2] = np.nan
for i in range(X):
for j in range(Y):
if np.isnan(mat[i, j]):
if not np.isnan(symDict[columns[j]][indexes[i]]):
symDict[indexes[i]][columns[j]] = symDict[columns[j]][indexes[i]]
else:
if np.isnan(symDict[columns[j]][indexes[i]]):
symDict[indexes[i]][columns[j]] = mat[i, j]
symDict[columns[j]][indexes[i]] = mat[i, j]
else:
symDict[indexes[i]][columns[j]] = min(mat[i, j], symDict[columns[j]][indexes[i]])
symDict[columns[j]][indexes[i]] = symDict[indexes[i]][columns[j]]
symData = pd.DataFrame(symDict)
return symData
def reverseGeocode(coordinates):
result = rg.search(coordinates)
cont = continent[continent.iso2==result[0]['cc']]['continent code'].values[0]
if len(cont)<2:
print('cont')
cont = 'NA'
return (result[0]['admin1'],result[0]['cc'],cont)
def reverse_countries(cc_code,AWS=False):
probes = {d['id']: d for d in json.load(open('../probes_dataset/20210906.json'))['objects']}
df = pd.DataFrame(probes).transpose()
print(df.columns)
print(df['status'][df.index==6025])
print(df)
# df = df[df['status']==1]
if not(AWS):
df = df[df['country_code']==cc_code][['latitude', 'longitude', 'id']]
else:
df = df[df['id'].isin(list_of_aws)][['latitude','longitude','id']]
df_data = pd.read_csv('/Users/geode/Documents/Datasets/Trivia/ISO-3166-Countries-with-Regional-Codes-master/all/all.csv')
print(df.head())
cities = {}
countries = {}
continents = {}
sub_contin = {}
for (coord) in tqdm(df.values):
rev = reverseGeocode(tuple(coord[0:2]))
cities[str(coord[2])] = rev[0]
countries[str(coord[2])] = rev[1]
continents[str(coord[2])] = rev[2]
sub_contin[str(coord[2])] = df_data['sub-region'][df_data['alpha-2'] == countries[str(coord[2])]].values[0]
return (cities, countries, continents, sub_contin)
def graph_inference(df,list_of_ids,outcome,type='all'):
G = nx.Graph()
df = symmetrize(df)
G.add_nodes_from(list(df.index))
print(len(set(df.index)&set(df.columns)))
# nx.set_node_attributes(G, list_of_ids[0], 'city')
# nx.set_node_attributes(G,list_of_ids[1],'country')
# nx.set_node_attributes(G,list_of_ids[2],'continents')
nx.set_node_attributes(G,dico_aws,'city')
print(df.shape,df.head())
ran = range(20,500,10)
for m in ran:
print(m)
for t in G.nodes():
for s in G.nodes():
if t!=s :
if df[s][t] < m:
G.add_edge(s,t)
elif df[t][s] < m:
G.add_edge(s,t)
print(nx.info(G))
print([len(c) for c in sorted(nx.connected_components(G), key=len, reverse=True)])
ori = OllivierRicci(G)
ori.compute_ricci_curvature()
G = ori.G
nx.write_graphml(G,outcome+str(m)+'.graphml')
def anchoring_space_AWS():
mypath ='../graph/AWS/'
fileList = [f for f in listdir(mypath) if isfile(join(mypath, f))]
probes = {d['id']: d for d in json.load(open('../probes_dataset/20210906.json'))['objects']}
df = pd.DataFrame(probes).transpose()
if isfile(mypath+'/cityDict.p'):
cityDict = pickle.load( open(mypath+'/cityDict.p' , "rb" ) )
else:
cityDict={}
print(cityDict)
for file in fileList:
print(file)
if file == '.DS_Store':
continue
if file.split('.')[1]=='graphml':
print(file)
G=nx.read_graphml(mypath+'/'+file)
G = nx.relabel_nodes(G, nx.get_node_attributes(G,'id_node'))
ricci = nx.get_edge_attributes(G,'curvature')
actual_ricci = {}
if '6201' in G.nodes():
G.remove_node('6201')
if '6231' in G.nodes():
G.remove_node('6231')
for node in G.nodes(data=True):
print(node)
if node[0] == '6422':
node[1]['city'] = 'Florida'
elif int(node[0]) in df['id']:
latitude = df[df['id']==int(node[0])]['latitude'].values[0]
longitude = df[df['id']==int(node[0])]['longitude'].values[0]
G.nodes[node[0]]['lat']=latitude
G.nodes[node[0]]['long']=longitude
nx.write_graphml(G,mypath+'/'+file)
def anchoring_space(cc_code):
mypath ='../Internet_of_Space_and_Time/data_country/2019-07-01/graph/'+cc_code+'/'
fileList = [f for f in listdir(mypath) if isfile(join(mypath, f))]
probes = {d['id']: d for d in json.load(open('../probes_dataset/20210906.json'))['objects']}
df = pd.DataFrame(probes).transpose()
if isfile(mypath+'/cityDict.p'):
cityDict = pickle.load( open(mypath+'/cityDict.p' , "rb" ) )
else:
cityDict={}
print(cityDict)
for file in fileList:
print(file)
if file == '.DS_Store':
continue
if file.split('.')[1]=='graphml':
print(file)
G=nx.read_graphml(mypath+'/'+file)
G = nx.relabel_nodes(G, nx.get_node_attributes(G,'id_node'))
ricci = nx.get_edge_attributes(G,'curvature')
actual_ricci = {}
if '6201' in G.nodes():
G.remove_node('6201')
if '6231' in G.nodes():
G.remove_node('6231')
for node in G.nodes(data=True):
print(node)
if node[0] == '6422':
node[1]['city'] = 'Florida'
elif int(node[0]) in df['id']:
latitude = df[df['id']==int(node[0])]['latitude'].values[0]
longitude = df[df['id']==int(node[0])]['longitude'].values[0]
G.nodes[node[0]]['lat']=latitude
G.nodes[node[0]]['long']=longitude
nx.write_graphml(G,mypath+'/'+file)
def boxplot_AWS(MONTH,YEAR):
plt.rc('font', family='serif')
u_1 = []
u_2 = []
size = {}
# values = range(2,50,2)
values = range(10, 300, 10)
for (i, m) in enumerate(values):
ricci_curv = []
try:
G= nx.read_graphml('../graph/AWS/graph'+YEAR+'-'+MONTH+'-'+str(m)+'.graphml')
except:
u_2.append(i)
u_1.extend([0,0])
continue
val_max = 0
# ricci_curving = nx.get_edge_attributes(G,'curvature')
ricci_curving = nx.get_edge_attributes(G, 'ricciCurvature')
ricci_curv = ricci_curving.values()
ricci_curving = sorted(ricci_curving.items(), key=lambda x: x[1])[0:10]
city = nx.get_node_attributes(G, 'cities')
print(m, city)
new_val = {}
for t in ricci_curving:
try:
v = city[t[0][0]]
n = city[t[0][1]]
new_val[(v, t[0][0], n, t[0][1])] = t[1]
except:
continue
size[m] = len(sorted(nx.connected_components(G), key=len, reverse=True))
u_1.extend(ricci_curv)
u_2.extend([m] * len(ricci_curv))
print(u_2)
# print(cloud)
df = pd.DataFrame(u_1, columns=['Ricci Curvature'])
df['Threshold'] = pd.Series(u_2)
f = plt.figure(figsize=(12, 10))
ax = f.add_subplot()
size = pd.DataFrame(pd.Series(size), columns=['# of connected components'])
from sklearn import preprocessing
print(size)
size = size.apply(lambda x: ((3 * x / float(max(size['# of connected components']))) - 2))
# print(size)
size = size[size.index.isin(list(set(df['Threshold'].values)))]
# # fig = plt.figure(figsize=(12,10))
# size.plot(c='g',marker='v',ax=ax)
# ax.yaxis.tick_right()
plt.ylabel('Ricci Curvature', fontsize=25)
bplot = sns.boxplot(y='Ricci Curvature', x='Threshold',
data=df,
width=0.3, color='grey', whis=[3, 97], ax=ax)
#
# plt.xlabel('Thresholding', fontsize=20)
plt.xticks(fontsize=12)
plt.yticks(fontsize=20)
plt.xlabel('Threshold', fontsize=25)
# plt.ylim([-2,1.1])
# plt.show()
plt.savefig('../Internet_of_Space_and_Time/cloud_internet/aws_history/boxplot' +MONTH+'-'+YEAR+'.pdf')
def boxplot(cc_code):
u_1 = []
u_2 = []
size = {}
values = range(1,30,1)
for (i, m) in enumerate(values):
G = nx.read_graphml('../Internet_of_Space_and_Time/data_country/2019-07-01/graph/'+cc_code+'/graph'+cc_code+str(m)+'.graphml')
ricci_curving = nx.get_edge_attributes(G, 'ricciCurvature')
ricci_curv = ricci_curving.values()
ricci_curving = sorted(ricci_curving.items(), key=lambda x: x[1])[0:10]
city = nx.get_node_attributes(G, 'city')
print(m, city)
new_val = {}
for t in ricci_curving:
try:
v = city[t[0][0]]
n = city[t[0][1]]
new_val[(v, t[0][0], n, t[0][1])] = t[1]
except:
continue
size[m] = len(sorted(nx.connected_components(G), key=len, reverse=True))
u_1.extend(ricci_curv)
u_2.extend([m] * len(ricci_curv))
print(u_2)
df = pd.DataFrame(u_1, columns=['Ricci Curvature'])
df['Threshold'] = pd.Series(u_2)
f = plt.figure(figsize=(12, 10))
ax = f.add_subplot()
size = pd.DataFrame(
|
pd.Series(size)
|
pandas.Series
|
from hls4ml.model.hls_model import HLSModel
from hls4ml.model.hls_layers import IntegerPrecisionType, FixedPrecisionType
import matplotlib.pyplot as plt
import numpy as np
import pandas
import seaborn as sb
from hls4ml.model.hls_model import HLSModel
try:
from tensorflow import keras
import qkeras
__tf_profiling_enabled__ = True
except ImportError:
__tf_profiling_enabled__ = False
try:
import torch
__torch_profiling_enabled__ = True
except ImportError:
__torch_profiling_enabled__ = False
def array_to_summary(x, fmt='boxplot'):
if fmt == 'boxplot':
y = {'med' : np.median(x),
'q1' : np.percentile(x, 25),
'q3' : np.percentile(x, 75),
'whislo' : min(x),
'whishi' : max(x)
}
elif fmt == 'histogram':
# Power of 2 bins covering data range
high = np.ceil(np.log2(max(x))) + 1
low = np.floor(np.log2(min(x))) - 1
bits = np.arange(low, high, 1)
bins = 2 ** bits
h, b = np.histogram(x, bins=bins)
h = h * 1. / float(sum(h)) # normalize
y = {'h' : h,
'b' : np.log2(b)}
return y
def boxplot(data, fmt='longform'):
if fmt == 'longform':
f = plt.figure() #figsize=(3, 3))
hue = 'layer' if 'layer' in data.keys() else None
vp = sb.boxplot(x='x', y='weight', hue=hue, data=data[data['x'] > 0], showfliers=False)
vp.set_yticklabels(vp.get_yticklabels(), rotation=45, ha='right')
if hue is not None:
vp.get_legend().remove()
vp.set_xscale('log', base=2)
return f
elif fmt == 'summary':
from matplotlib.patches import Rectangle
medianprops = dict(linestyle='-', color='k')
f, ax = plt.subplots(1, 1)
data.reverse()
colors = sb.color_palette("Blues", len(data))
bp = ax.bxp(data, showfliers=False, vert=False, medianprops=medianprops)
# add colored boxes
for line, color in zip(bp['boxes'], colors):
x = line.get_xdata()
xl, xh = min(x), max(x)
y = line.get_ydata()
yl, yh = min(y), max(y)
rect = Rectangle((xl, yl), (xh-xl), (yh-yl), fill=True, color=color)
ax.add_patch(rect)
ax.set_yticklabels([d['weight'] for d in data])
ax.set_xscale('log', base=2)
plt.xlabel('x')
return f
else:
return None
def histogram(data, fmt='longform'):
f = plt.figure()
from matplotlib.ticker import MaxNLocator
n = len(data) if fmt == 'summary' else len(data['weight'].unique())
colors = sb.color_palette("husl", n)
if fmt == 'longform':
for i, weight in enumerate(data['weight'].unique()):
y = array_to_summary(data[data['weight'] == weight]['x'], fmt='histogram')
plt.bar(y['b'][:-1], y['h'], width=1, fill=False, label=weight, edgecolor=colors[i])
elif fmt == 'summary':
for i, weight in enumerate(data):
plt.bar(weight['b'][:-1], weight['h'], width=1, fill=False, label=weight['weight'], edgecolor=colors[i])
plt.gca().xaxis.set_major_locator(MaxNLocator(integer=True))
plt.xlabel('log2(x)')
plt.ylabel('frequency')
plt.legend()
return f
plots = {'boxplot' : boxplot,
'histogram' : histogram}
def types_boxplot(data, fmt='longform'):
from matplotlib.patches import PathPatch
from matplotlib.patches import Rectangle
ax = plt.gca()
f = plt.gcf()
# Scale the data
data['low'] = 2.**data['low']
data['high'] = 2.**data['high']
# Plot the custom precisions
ticks = np.array([tick.get_text() for tick in plt.yticks()[1]])
# Get the coordinates of the boxes to place the markers
if fmt == 'longform':
# seaborn adjusts the box positions slightly in groups
boxes = [c.get_extents().inverse_transformed(ax.transData) for c in ax.get_children() if isinstance(c, PathPatch)]
ys = [(box.y0 + box.y1) / 2 for box in boxes]
ys = [(y, y) for y in ys]
elif fmt == 'summary':
ys = [(y, y) for y in plt.yticks()[0]]
for irow, row in data[data['layer'] != 'model'].iterrows():
if row['layer'] in ticks:
iy = np.argwhere(ticks == row['layer'])[0][0] # Determine which layer in the plot
rectangle = Rectangle((row['low'], ys[iy][0]-0.4), row['high']-row['low'], 0.8, fill=True, color='grey', alpha=0.2)
ax.add_patch(rectangle)
def types_histogram(data, fmt='longform'):
ax = plt.gca()
layers = np.array(ax.get_legend_handles_labels()[1])
colors = sb.color_palette("husl", len(layers))
ylim = ax.get_ylim()
for irow, row in data[data['layer'] != 'model'].iterrows():
if row['layer'] in layers:
col = colors[np.argwhere(layers == row['layer'])[0][0]]
plt.plot((row['low'], row['low']), ylim, '--', color=col)
plt.plot((row['high'], row['high']), ylim, '--', color=col)
types_plots = {'boxplot' : types_boxplot,
'histogram' : types_histogram}
def ap_fixed_WIF(dtype):
from hls4ml.templates.vivado_template import VivadoBackend
dtype = VivadoBackend.convert_precision_string(None, dtype)
W, I, F = dtype.width, dtype.integer, dtype.fractional
return W, I, F
def types_hlsmodel(model):
suffix = ['w', 'b']
data = {'layer' : [], 'low' : [], 'high' : []}
# Plot the default precision
default_precision = model.config.model_precision['default']
# assumes ap_fixed
W, I, F = ap_fixed_WIF(default_precision)
data['layer'].append('model')
data['low'].append(-F)
data['high'].append(I-1)
for layer in model.get_layers():
for iw, weight in enumerate(layer.get_weights()):
wname = '{}/{}'.format(layer.name, suffix[iw])
T = weight.type
if T.name != 'model':
W, I, F = ap_fixed_WIF(T.precision)
data['layer'].append(wname)
data['low'].append(-F)
data['high'].append(I-1)
data = pandas.DataFrame(data)
return data
def activation_types_hlsmodel(model):
data = {'layer' : [], 'low' : [], 'high' : []}
# Get the default precision
default_precision = model.config.model_precision['default']
W, I, F = ap_fixed_WIF(default_precision)
data['layer'].append('model')
data['low'].append(-F)
data['high'].append(I-1)
for layer in model.get_layers():
T = layer.get_output_variable().type.precision
W, I, F = ap_fixed_WIF(T)
data['layer'].append(layer.name)
data['low'].append(-F)
data['high'].append(I-1)
data = pandas.DataFrame(data)
return data
def weights_hlsmodel(model, fmt='longform', plot='boxplot'):
suffix = ['w', 'b']
if fmt == 'longform':
data = {'x' : [], 'layer' : [], 'weight' : []}
elif fmt == 'summary':
data = []
for layer in model.get_layers():
name = layer.name
for iw, weight in enumerate(layer.get_weights()):
l = '{}/{}'.format(name, suffix[iw])
w = weight.data.flatten()
w = abs(w[w != 0])
n = len(w)
if n == 0:
break
if fmt == 'longform':
data['x'].extend(w.tolist())
data['layer'].extend([name for i in range(len(w))])
data['weight'].extend([l for i in range(len(w))])
elif fmt == 'summary':
data.append(array_to_summary(w, fmt=plot))
data[-1]['layer'] = name
data[-1]['weight'] = l
if fmt == 'longform':
data = pandas.DataFrame(data)
return data
def weights_keras(model, fmt='longform', plot='boxplot'):
suffix = ['w', 'b']
if fmt == 'longform':
data = {'x' : [], 'layer' : [], 'weight' : []}
elif fmt == 'summary':
data = []
for layer in model.layers:
name = layer.name
weights = layer.get_weights()
for i, w in enumerate(weights):
l = '{}/{}'.format(name, suffix[i])
w = w.flatten()
w = abs(w[w != 0])
n = len(w)
if n == 0:
break
if fmt == 'longform':
data['x'].extend(w.tolist())
data['layer'].extend([name for j in range(n)])
data['weight'].extend([l for j in range(n)])
elif fmt == 'summary':
data.append(array_to_summary(w, fmt=plot))
data[-1]['layer'] = name
data[-1]['weight'] = l
if fmt == 'longform':
data = pandas.DataFrame(data)
return data
def activations_keras(model, X, fmt='longform', plot='boxplot'):
# test layer by layer on data
if fmt == 'longform':
# return long form pandas dataframe for
# seaborn boxplot
data = {'x' : [], 'weight' : []}
elif fmt == 'summary':
# return summary statistics for matplotlib.axes.Axes.bxp
# or histogram bin edges and heights
data = []
for layer in model.layers:
print(" {}".format(layer.name))
if not isinstance(layer, keras.layers.InputLayer):
y = _get_output(layer, X, model.input).flatten()
y = abs(y[y != 0])
if fmt == 'longform':
data['x'].extend(y.tolist())
data['weight'].extend([layer.name for i in range(len(y))])
elif fmt == 'summary':
data.append(array_to_summary(y, fmt=plot))
data[-1]['weight'] = layer.name
if fmt == 'longform':
data = pandas.DataFrame(data)
return data
def weights_torch(model, fmt='longform', plot='boxplot'):
suffix = ['w', 'b']
if fmt == 'longform':
data = {'x': [], 'layer': [], 'weight': []}
elif fmt == 'summary':
data = []
for layer in model.children():
if isinstance(layer, torch.nn.Linear):
name = layer.__class__.__name__
weights = list(layer.parameters())
for i, w in enumerate(weights):
l = '{}/{}'.format(name, suffix[i])
w = weights[i].detach().numpy()
w = w.flatten()
w = abs(w[w != 0])
n = len(w)
if n == 0:
break
if fmt == 'longform':
data['x'].extend(w.tolist())
data['layer'].extend([name for _ in range(n)])
data['weight'].extend([l for _ in range(n)])
elif fmt == 'summary':
data.append(array_to_summary(w, fmt=plot))
data[-1]['layer'] = name
data[-1]['weight'] = l
if fmt == 'longform':
data = pandas.DataFrame(data)
return data
def activations_torch(model, X, fmt='longform', plot='boxplot'):
X = torch.Tensor(X)
if fmt == 'longform':
data = {'x': [], 'weight': []}
elif fmt == 'summary':
data = []
partial_model = torch.nn.Sequential
layers = []
for layer in model.children():
lname = layer.__class__.__name__
layers.append(layer)
pm = partial_model(*layers)
print(" {}".format(lname))
y = pm(X).flatten().detach().numpy()
y = abs(y[y != 0])
if fmt == 'longform':
data['x'].extend(y.tolist())
data['weight'].extend([lname for _ in range(len(y))])
elif fmt == 'summary':
data.append(array_to_summary(y, fmt=plot))
data[-1]['weight'] = lname
if fmt == 'longform':
data =
|
pandas.DataFrame(data)
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 7 12:28:24 2018
@author: admin
"""
import pandas as pd
import numpy as np
import pymysql
import statsmodels.api as sm
from datetime import datetime
config = {
'host': 'magiquant.mysql.rds.aliyuncs.com',
'port': 3306,
'user':'haoamc',
'passwd':'<PASSWORD>',
'db': 'quant'
}
def get_month_tradedate(begin,end):
try:
conn = pymysql.connect(**config)
cursor = conn.cursor()
query = "SELECT calendar_date FROM trade_calendar WHERE is_month_end = 1 \
AND calendar_date >= '%s' AND calendar_date <= '%s';"%(begin,end)
cursor.execute(query)
data = pd.DataFrame(list(cursor.fetchall()))
return data
finally:
if conn:
conn.close()
def get_weekly_tradedate(begin,end):
try:
conn = pymysql.connect(**config)
cursor = conn.cursor()
query = "SELECT calendar_date FROM trade_calendar WHERE is_week_end = 1 \
AND calendar_date >= '%s' AND calendar_date <= '%s';"%(begin,end)
cursor.execute(query)
data = pd.DataFrame(list(cursor.fetchall()))
return data
finally:
if conn:
conn.close()
def get_tradedate(begin,end):
try:
conn = pymysql.connect(**config)
cursor = conn.cursor()
query = "SELECT calendar_date FROM trade_calendar WHERE is_trade_day = 1 \
AND calendar_date >= '%s' AND calendar_date <= '%s';"%(begin,end)
cursor.execute(query)
data = pd.DataFrame(list(cursor.fetchall()))
return data
finally:
if conn:
conn.close()
def get_barra_factor_from_sql(date):
try:
conn = pymysql.connect(**config)
cursor = conn.cursor()
query = "SELECT * FROM barra_style_factors_stand WHERE trade_date = '%s';"%(date)
cursor.execute(query)
data = pd.DataFrame(list(cursor.fetchall()))
data.columns = ['date','secID','Beta','Momentum','Size','EY','RV','Growth',\
'BP','Leverage','Liquidity']
del data['date']
data = data.set_index('secID')
return data
finally:
if conn:
conn.close()
def get_ret(date):
try:
conn = pymysql.connect(**config)
cursor = conn.cursor()
query = "SELECT stock_id,PctChg FROM stock_market_data WHERE trade_date = '%s';"%(date)
cursor.execute(query)
data = pd.DataFrame(list(cursor.fetchall()))
data.columns = ['secID','ret']
data = data.set_index('secID')
return data
finally:
if conn:
conn.close()
def get_cap(date):
try:
conn = pymysql.connect(**config)
cursor = conn.cursor()
query = "SELECT stock_id,TotalValue FROM stock_alpha_factors WHERE trade_date = '%s';"%(date)
cursor.execute(query)
data = pd.DataFrame(list(cursor.fetchall()))
data.columns = ['secID','cap']
data = data.set_index('secID')
return data
finally:
if conn:
conn.close()
def get_index_composition(date,index_name):
try:
conn = pymysql.connect(**config)
cursor = conn.cursor()
query = "SELECT stock_id,weight FROM index_constitution WHERE \
trade_date = '%s' AND index_id = '%s';"%(date,index_name)
cursor.execute(query)
data = pd.DataFrame(list(cursor.fetchall()))
data.columns = ['secID','weight']
data = data.set_index('secID')
return data
finally:
if conn:
conn.close()
def get_factor_ret(date,date_lag):
"""
compute the style factor ret
"""
ret = get_ret(date_lag)
cap = get_cap(date)
style_factor = get_barra_factor_from_sql(date)
data_all =
|
pd.concat([ret,cap,style_factor],axis = 1,join = 'inner')
|
pandas.concat
|
import pytz
from datetime import datetime, timedelta
import numpy as np
import pandas as pd
import os
import settings
import time
import random
"""
Convert dates to default format and timezone
"""
def convert_datetime_with_timezone(date, time_zone = settings.DEFAULT_TIME_ZONE, format_date=settings.DEFAULT_FORMAT):
date = datetime.strptime(date, format_date)
timezone = pytz.timezone(time_zone).localize(date)
return timezone
"""
Convert dates to default format and timezone
"""
def convert_datetime(date, format_date=settings.DEFAULT_FORMAT):
date = datetime.strptime(date, format_date)
return date
def get_dataframe(symbol, start, end, type='T', frame=1, sep=';', format_date=settings.DEFAULT_FORMAT, backperiods=20, serie=None):
assert symbol in settings.SYMBOL_LIST, 'The symbol name is not registered in settings file.'
assert isinstance(symbol, str)
assert isinstance(start, str)
assert isinstance(end, str)
assert type in settings.FREQUENCY_LIST, "The frequence selected is unknown."
assert isinstance(frame, int)
assert isinstance(sep, str)
assert backperiods > 0, "El parametro backperiods no puede ser igual o menor que cero"
path = r"{}/{}/{}/{}.csv".format(settings.DATA_DIRECTORY, symbol, "{}{}".format(frame,settings.FREQUENCY_LIST[type]), 'last')
if type == 'T' or type == 'Min' or type == 'H':
path = r"{}/{}/{}/{}.csv".format(settings.DATA_DIRECTORY, symbol, "1Min", 'last')
elif type == 'M' or type == 'W' or type == 'D':
path = r"{}/{}/{}/{}.csv".format(settings.DATA_DIRECTORY, symbol, "1D", 'last')
elif type == 'tick':
pass
#data = pd.read_csv(path, sep=sep, usecols=['open', 'high', 'low', 'close'], parse_dates=['dateTime'])
data =
|
pd.read_csv(path, sep=sep, usecols=['dateTime', 'open', 'high', 'low', 'close', 'volume'])
|
pandas.read_csv
|
from pathlib import Path
import sys
project_dir = Path("__file__").resolve().parents[1]
from sklearn.preprocessing import MinMaxScaler
from temporal_granularity.src.metrics.metrics import Metrics
from temporal_granularity.src.models.manipulations.approximations import ApproximateData
import logging
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
logger = logging.getLogger(__name__)
if __name__ == "__main__":
log_fmt = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
logging.basicConfig(level=logging.DEBUG, format=log_fmt)
logger.info("Starting")
# onshore_data = pd.read_csv(
# '/Users/b1017579/Documents/PhD/Projects/14-temporal-granularity/temporal_granularity/data/processed/resources/onshore_processed.csv')
# load_data = pd.read_csv(
# "/Users/b1017579/Documents/PhD/Projects/14-temporal-granularity/temporal_granularity/data/processed/demand/load_processed_normalised.csv")
# pv_data = pd.read_csv(
# "/Users/b1017579/Documents/PhD/Projects/14-temporal-granularity/temporal_granularity/data/processed/resources/pv_processed.csv")
onshore_data = pd.read_csv(
'{}/temporal_granularity/data/processed/resources/onshore_processed.csv'.format(project_dir))
load_data = pd.read_csv(
"{}/temporal_granularity/data/processed/demand/load_processed_normalised.csv".format(project_dir))
# offshore_data = pd.read_csv(
# '{}/temporal_granularity/data/processed/resources/offshore_processed.csv'.format(project_dir))
pv_data = pd.read_csv(
'{}/temporal_granularity/data/processed/resources/pv_processed.csv'.format(project_dir))
data = [pv_data, onshore_data, load_data]
results = []
for i in range(100):
logger.info("Running iteration {}".format(i))
for method in ['centroids', 'medoids']:
logger.info("Approximating using {} method".format(method))
# for num_days in [4]:
for num_days in [4, 8, 12, 24, 48, 61]:
logger.info("Calculating using {} days".format(num_days))
original_ldcs = []
approximated_ldcs = []
original_rdcs = []
approximated_rdcs = []
original_ldcs.clear()
approximated_ldcs.clear()
original_rdcs.clear()
approximated_rdcs.clear()
for dat in data:
approximator = ApproximateData(dat, num_days)
original_ldc = approximator.get_load_duration_curve(
year="2014")
original_ldcs.append(original_ldc)
medoids_approximation = approximator.get_approximated_ldc(
method)
approximated_ldcs.append(medoids_approximation)
original_rdc = approximator.get_ramp_duration_curve(
year="2014")
original_rdcs.append(original_rdc)
approximated_rdc = approximator.get_approximated_rdc(
method)
approximated_rdcs.append(approximated_rdc)
rdc_metrics_calculator = Metrics(
original_rdcs[0], approximated_rdcs[0], original_rdcs[1],
approximated_rdcs[1], original_rdcs[2], approximated_rdcs[2], "rdc")
mean_rdc_errors = rdc_metrics_calculator.get_mean_error_metrics()
mean_rdc_errors['num_days'] = num_days
mean_rdc_errors['method'] = method
results.append(mean_rdc_errors)
ldc_metrics_calculator = Metrics(
original_ldcs[0], approximated_ldcs[0], original_ldcs[1], approximated_ldcs[1], original_ldcs[2], approximated_ldcs[2], "dc")
mean_ldc_errors = ldc_metrics_calculator.get_mean_error_metrics()
mean_ldc_errors['num_days'] = num_days
mean_ldc_errors['method'] = method
results.append(mean_ldc_errors)
results_dataframe =
|
pd.concat(results)
|
pandas.concat
|
import datetime
from datetime import timedelta
from distutils.version import LooseVersion
from io import BytesIO
import os
import re
from warnings import catch_warnings, simplefilter
import numpy as np
import pytest
from pandas.compat import is_platform_little_endian, is_platform_windows
import pandas.util._test_decorators as td
from pandas.core.dtypes.common import is_categorical_dtype
import pandas as pd
from pandas import (
Categorical,
CategoricalIndex,
DataFrame,
DatetimeIndex,
Index,
Int64Index,
MultiIndex,
RangeIndex,
Series,
Timestamp,
bdate_range,
concat,
date_range,
isna,
timedelta_range,
)
from pandas.tests.io.pytables.common import (
_maybe_remove,
create_tempfile,
ensure_clean_path,
ensure_clean_store,
safe_close,
safe_remove,
tables,
)
import pandas.util.testing as tm
from pandas.io.pytables import (
ClosedFileError,
HDFStore,
PossibleDataLossError,
Term,
read_hdf,
)
from pandas.io import pytables as pytables # noqa: E402 isort:skip
from pandas.io.pytables import TableIterator # noqa: E402 isort:skip
_default_compressor = "blosc"
ignore_natural_naming_warning = pytest.mark.filterwarnings(
"ignore:object name:tables.exceptions.NaturalNameWarning"
)
@pytest.mark.single
class TestHDFStore:
def test_format_kwarg_in_constructor(self, setup_path):
# GH 13291
with ensure_clean_path(setup_path) as path:
with pytest.raises(ValueError):
HDFStore(path, format="table")
def test_context(self, setup_path):
path = create_tempfile(setup_path)
try:
with HDFStore(path) as tbl:
raise ValueError("blah")
except ValueError:
pass
finally:
safe_remove(path)
try:
with HDFStore(path) as tbl:
tbl["a"] = tm.makeDataFrame()
with HDFStore(path) as tbl:
assert len(tbl) == 1
assert type(tbl["a"]) == DataFrame
finally:
safe_remove(path)
def test_conv_read_write(self, setup_path):
path = create_tempfile(setup_path)
try:
def roundtrip(key, obj, **kwargs):
obj.to_hdf(path, key, **kwargs)
return read_hdf(path, key)
o = tm.makeTimeSeries()
tm.assert_series_equal(o, roundtrip("series", o))
o = tm.makeStringSeries()
tm.assert_series_equal(o, roundtrip("string_series", o))
o = tm.makeDataFrame()
tm.assert_frame_equal(o, roundtrip("frame", o))
# table
df = DataFrame(dict(A=range(5), B=range(5)))
df.to_hdf(path, "table", append=True)
result = read_hdf(path, "table", where=["index>2"])
tm.assert_frame_equal(df[df.index > 2], result)
finally:
safe_remove(path)
def test_long_strings(self, setup_path):
# GH6166
df = DataFrame(
{"a": tm.rands_array(100, size=10)}, index=tm.rands_array(100, size=10)
)
with ensure_clean_store(setup_path) as store:
store.append("df", df, data_columns=["a"])
result = store.select("df")
tm.assert_frame_equal(df, result)
def test_api(self, setup_path):
# GH4584
# API issue when to_hdf doesn't accept append AND format args
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
df.iloc[:10].to_hdf(path, "df", append=True, format="table")
df.iloc[10:].to_hdf(path, "df", append=True, format="table")
tm.assert_frame_equal(read_hdf(path, "df"), df)
# append to False
df.iloc[:10].to_hdf(path, "df", append=False, format="table")
df.iloc[10:].to_hdf(path, "df", append=True, format="table")
tm.assert_frame_equal(read_hdf(path, "df"), df)
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
df.iloc[:10].to_hdf(path, "df", append=True)
df.iloc[10:].to_hdf(path, "df", append=True, format="table")
tm.assert_frame_equal(read_hdf(path, "df"), df)
# append to False
df.iloc[:10].to_hdf(path, "df", append=False, format="table")
df.iloc[10:].to_hdf(path, "df", append=True)
tm.assert_frame_equal(read_hdf(path, "df"), df)
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
df.to_hdf(path, "df", append=False, format="fixed")
tm.assert_frame_equal(read_hdf(path, "df"), df)
df.to_hdf(path, "df", append=False, format="f")
tm.assert_frame_equal(read_hdf(path, "df"), df)
df.to_hdf(path, "df", append=False)
tm.assert_frame_equal(read_hdf(path, "df"), df)
df.to_hdf(path, "df")
tm.assert_frame_equal(read_hdf(path, "df"), df)
with ensure_clean_store(setup_path) as store:
path = store._path
df = tm.makeDataFrame()
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=True, format="table")
store.append("df", df.iloc[10:], append=True, format="table")
tm.assert_frame_equal(store.select("df"), df)
# append to False
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=False, format="table")
store.append("df", df.iloc[10:], append=True, format="table")
tm.assert_frame_equal(store.select("df"), df)
# formats
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=False, format="table")
store.append("df", df.iloc[10:], append=True, format="table")
tm.assert_frame_equal(store.select("df"), df)
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=False, format="table")
store.append("df", df.iloc[10:], append=True, format=None)
tm.assert_frame_equal(store.select("df"), df)
with ensure_clean_path(setup_path) as path:
# Invalid.
df = tm.makeDataFrame()
with pytest.raises(ValueError):
df.to_hdf(path, "df", append=True, format="f")
with pytest.raises(ValueError):
df.to_hdf(path, "df", append=True, format="fixed")
with pytest.raises(TypeError):
df.to_hdf(path, "df", append=True, format="foo")
with pytest.raises(TypeError):
df.to_hdf(path, "df", append=False, format="bar")
# File path doesn't exist
path = ""
with pytest.raises(FileNotFoundError):
read_hdf(path, "df")
def test_api_default_format(self, setup_path):
# default_format option
with ensure_clean_store(setup_path) as store:
df = tm.makeDataFrame()
pd.set_option("io.hdf.default_format", "fixed")
_maybe_remove(store, "df")
store.put("df", df)
assert not store.get_storer("df").is_table
with pytest.raises(ValueError):
store.append("df2", df)
pd.set_option("io.hdf.default_format", "table")
_maybe_remove(store, "df")
store.put("df", df)
assert store.get_storer("df").is_table
_maybe_remove(store, "df2")
store.append("df2", df)
assert store.get_storer("df").is_table
pd.set_option("io.hdf.default_format", None)
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
pd.set_option("io.hdf.default_format", "fixed")
df.to_hdf(path, "df")
with HDFStore(path) as store:
assert not store.get_storer("df").is_table
with pytest.raises(ValueError):
df.to_hdf(path, "df2", append=True)
pd.set_option("io.hdf.default_format", "table")
df.to_hdf(path, "df3")
with HDFStore(path) as store:
assert store.get_storer("df3").is_table
df.to_hdf(path, "df4", append=True)
with HDFStore(path) as store:
assert store.get_storer("df4").is_table
pd.set_option("io.hdf.default_format", None)
def test_keys(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeStringSeries()
store["c"] = tm.makeDataFrame()
assert len(store) == 3
expected = {"/a", "/b", "/c"}
assert set(store.keys()) == expected
assert set(store) == expected
def test_keys_ignore_hdf_softlink(self, setup_path):
# GH 20523
# Puts a softlink into HDF file and rereads
with ensure_clean_store(setup_path) as store:
df = DataFrame(dict(A=range(5), B=range(5)))
store.put("df", df)
assert store.keys() == ["/df"]
store._handle.create_soft_link(store._handle.root, "symlink", "df")
# Should ignore the softlink
assert store.keys() == ["/df"]
def test_iter_empty(self, setup_path):
with ensure_clean_store(setup_path) as store:
# GH 12221
assert list(store) == []
def test_repr(self, setup_path):
with ensure_clean_store(setup_path) as store:
repr(store)
store.info()
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeStringSeries()
store["c"] = tm.makeDataFrame()
df = tm.makeDataFrame()
df["obj1"] = "foo"
df["obj2"] = "bar"
df["bool1"] = df["A"] > 0
df["bool2"] = df["B"] > 0
df["bool3"] = True
df["int1"] = 1
df["int2"] = 2
df["timestamp1"] = Timestamp("20010102")
df["timestamp2"] = Timestamp("20010103")
df["datetime1"] = datetime.datetime(2001, 1, 2, 0, 0)
df["datetime2"] = datetime.datetime(2001, 1, 3, 0, 0)
df.loc[3:6, ["obj1"]] = np.nan
df = df._consolidate()._convert(datetime=True)
with catch_warnings(record=True):
simplefilter("ignore", pd.errors.PerformanceWarning)
store["df"] = df
# make a random group in hdf space
store._handle.create_group(store._handle.root, "bah")
assert store.filename in repr(store)
assert store.filename in str(store)
store.info()
# storers
with ensure_clean_store(setup_path) as store:
df = tm.makeDataFrame()
store.append("df", df)
s = store.get_storer("df")
repr(s)
str(s)
@ignore_natural_naming_warning
def test_contains(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeDataFrame()
store["foo/bar"] = tm.makeDataFrame()
assert "a" in store
assert "b" in store
assert "c" not in store
assert "foo/bar" in store
assert "/foo/bar" in store
assert "/foo/b" not in store
assert "bar" not in store
# gh-2694: tables.NaturalNameWarning
with catch_warnings(record=True):
store["node())"] = tm.makeDataFrame()
assert "node())" in store
def test_versioning(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeDataFrame()
df = tm.makeTimeDataFrame()
_maybe_remove(store, "df1")
store.append("df1", df[:10])
store.append("df1", df[10:])
assert store.root.a._v_attrs.pandas_version == "0.15.2"
assert store.root.b._v_attrs.pandas_version == "0.15.2"
assert store.root.df1._v_attrs.pandas_version == "0.15.2"
# write a file and wipe its versioning
_maybe_remove(store, "df2")
store.append("df2", df)
# this is an error because its table_type is appendable, but no
# version info
store.get_node("df2")._v_attrs.pandas_version = None
with pytest.raises(Exception):
store.select("df2")
def test_mode(self, setup_path):
df = tm.makeTimeDataFrame()
def check(mode):
with ensure_clean_path(setup_path) as path:
# constructor
if mode in ["r", "r+"]:
with pytest.raises(IOError):
HDFStore(path, mode=mode)
else:
store = HDFStore(path, mode=mode)
assert store._handle.mode == mode
store.close()
with ensure_clean_path(setup_path) as path:
# context
if mode in ["r", "r+"]:
with pytest.raises(IOError):
with HDFStore(path, mode=mode) as store: # noqa
pass
else:
with HDFStore(path, mode=mode) as store:
assert store._handle.mode == mode
with ensure_clean_path(setup_path) as path:
# conv write
if mode in ["r", "r+"]:
with pytest.raises(IOError):
df.to_hdf(path, "df", mode=mode)
df.to_hdf(path, "df", mode="w")
else:
df.to_hdf(path, "df", mode=mode)
# conv read
if mode in ["w"]:
with pytest.raises(ValueError):
read_hdf(path, "df", mode=mode)
else:
result = read_hdf(path, "df", mode=mode)
tm.assert_frame_equal(result, df)
def check_default_mode():
# read_hdf uses default mode
with ensure_clean_path(setup_path) as path:
df.to_hdf(path, "df", mode="w")
result = read_hdf(path, "df")
tm.assert_frame_equal(result, df)
check("r")
check("r+")
check("a")
check("w")
check_default_mode()
def test_reopen_handle(self, setup_path):
with ensure_clean_path(setup_path) as path:
store = HDFStore(path, mode="a")
store["a"] = tm.makeTimeSeries()
# invalid mode change
with pytest.raises(PossibleDataLossError):
store.open("w")
store.close()
assert not store.is_open
# truncation ok here
store.open("w")
assert store.is_open
assert len(store) == 0
store.close()
assert not store.is_open
store = HDFStore(path, mode="a")
store["a"] = tm.makeTimeSeries()
# reopen as read
store.open("r")
assert store.is_open
assert len(store) == 1
assert store._mode == "r"
store.close()
assert not store.is_open
# reopen as append
store.open("a")
assert store.is_open
assert len(store) == 1
assert store._mode == "a"
store.close()
assert not store.is_open
# reopen as append (again)
store.open("a")
assert store.is_open
assert len(store) == 1
assert store._mode == "a"
store.close()
assert not store.is_open
def test_open_args(self, setup_path):
with ensure_clean_path(setup_path) as path:
df =
|
tm.makeDataFrame()
|
pandas.util.testing.makeDataFrame
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import datetime as dt
import time
import os
import imageio
from tqdm import tqdm
import sys
import statsmodels.api as sm
from scipy.stats import shapiro
from environment import StaticEnvironment
def date_to_index(date_string, start_date):
return (dt.datetime.strptime(date_string, '%Y-%m-%d') - start_date).days
def index_to_date(index, start_date):
return(start_date + dt.timedelta(index)).strftime('%Y-%m-%d')
def portfolio(w :np.array, r: np.array, mean_model, cov_model, satisfaction_model, annualize = False):
'''
:param w: n x 1 portfolio weights
:param r: t x n portfolio returns
:param mean_model: function for modelling the expected value of the portfolio
:param cov_model: function for modelling the covariance matrix
:param satisfaction: satisfaction function
:return:
'''
mu_hat = mean_model(r)
sig_hat = cov_model(r)
if annualize:
mu_hat *= 252
sig_hat *= 252
r_p = np.sum(mu_hat * w)
sig_p = np.sqrt(np.dot(w.T, np.dot(sig_hat, w)))
#satisfaction measure
satis = satisfaction_model(r_p, sig_p)
return np.array([r_p, sig_p, satis])
def test_static_agent(env, agent, optimization_problem,
fitting_period, rebalancing_period,
factors=None, **kwargs):
returns = []
actions = []
counter = 0
tic = time.perf_counter()
# factor indexing
if optimization_problem == 'sr_factor':
factor_env = StaticEnvironment(factors.loc[env.prices.index[0]:], **kwargs)
for trade in range(fitting_period, len(env.prices), rebalancing_period):
# print(trade, counter*rebalancing_period)
s_t = env.get_state(trade, counter * rebalancing_period)
# print(s_t.shape)
if optimization_problem == 'sr_factor':
s_t_factor = env.get_state(trade, counter * rebalancing_period)
a_t = agent.act(s_t, optimization_problem, factors=s_t_factor)
else:
a_t = agent.act(s_t, optimization_problem, **kwargs)
actions.append(a_t)
s_t_trade = s_t.iloc[-rebalancing_period:, :]
#transaction costs
if len(actions) > 1:
a_delta = actions[len(actions) - 1] - actions[len(actions) - 2]
r_t = np.dot(s_t_trade, a_t) - np.dot(s_t_trade * env.transaction_cost, a_delta)
else:
r_t = np.dot(s_t_trade, a_t)
returns.append(r_t)
counter += 1
returns = np.vstack(returns).flatten()
toc = time.perf_counter()
print(f"Tested {optimization_problem} in {toc - tic:0.4f} seconds")
return returns
def load_data_long(db_path):
db_long_prices = pd.read_csv(db_path + '00_db_long__PX_LAST.csv', index_col=0, parse_dates=True)
db_long_prices = db_long_prices.loc['2015':]
db_long_RL = db_long_prices.loc[:, ~db_long_prices.iloc[0].isna()].fillna(method='ffill')
return db_long_RL
def plot_training_result(rl_history, benchmark_history, n_, actions_to_plot, column_names):
rl_result = np.array(rl_history).cumsum()
benchmark_result = np.array(benchmark_history).cumsum()
fig = plt.figure(figsize=(12,6))
top = plt.subplot2grid((4, 4), (0, 0), rowspan=2, colspan=4)
bottom = plt.subplot2grid((4, 4), (2, 0), rowspan=2, colspan=4)
#returns
top.plot(rl_result, color='black', ls = '-')
top.plot(benchmark_result, color = 'grey', ls = '--')
#weights
for a in actions_to_plot:
plt.bar(np.arange(n_), a, color = 'goldenrod', alpha = 0.25)
plt.xticks(np.arange(n_), column_names, rotation = 'vertical')
plt_show()
def plot_histograms(ew, subset):
sns.set_palette('bright')
fig, ax = plt.subplots(figsize=(20, 15))
for i, column in enumerate(subset.columns, 1):
plt.subplot(3, 3, i)
to_plot = pd.concat([ew, subset[column]], axis=1)
sns.histplot(to_plot, kde=True, multiple='stack', alpha=0.5)
plt.xlim(-.13,.13)
def create_weights_gif(weights, model_name, saving_path, **plot_kwargs):
'''
@param weights: array of weights
@param model_name: name of the model, string
@param saving_path: path to save, string
@param plot_kwargs: list of kwargs to unpack for plot
@return: None
'''
tic = time.perf_counter()
n_frames = 5
x = weights.columns.to_list()
# obtain lists of weights for each day
y_lists = []
for _, row in weights.iterrows():
rw = row.to_list()
y_lists.append(rw)
# iterate over each row
filenames = []
y_cache = []
with tqdm(total=round(len(y_lists) / 20, 0), file=sys.stdout) as pbar:
for index in np.arange(0, len(y_lists) - 1, step=20):
y = y_lists[index]
y1 = y_lists[index + 1]
# distance to next pos
y_path = np.array(y1) - np.array(y)
# obtain image for each frame
for i in np.arange(0, n_frames + 1):
y_temp = (y + (y_path / n_frames) * i)
y_cache.append(y_temp)
# plot
fig, ax = plt.subplots(figsize=(10, 8))
plt.barh(x, y_temp, color='goldenrod', **plot_kwargs)
# cache and plot dissipating weights
if len(y_cache) > 0:
for idx, cache in enumerate(y_cache):
plt.barh(x, cache, color='goldenrod', alpha=0.4 - 0.05 * idx)
plt.xlim(0, 0.07)
# if cache is full first in last out
if len(y_cache) == 8:
y_cache.pop(0)
# build a filename
filename = os.path.join(saving_path, f'gif/frame_{index}_{i}.png')
filenames.append(filename)
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.set_title(f'{model_name} test trading day: #{index}')
ax.set_xlabel('weight')
# last frame needs to stay longer
if (i == n_frames):
for i in range(2):
filenames.append(filename)
# save images
plt.savefig(filename, dpi=96)
plt.close()
pbar.update(1)
print('Charts saved \n')
print('Creating gif\n')
# create the gif
with imageio.get_writer(os.path.join(saving_path, f'{model_name}_weights.gif'), mode='I') as writer:
for filename in filenames:
image = imageio.imread(filename)
writer.append_data(image)
toc = time.perf_counter()
print(f'Gif produced in {(toc - tic) / 60 :0.4f} minutes')
# print('Removing Images\n')
# # Remove files
# for filename in set(filenames):
# os.remove(filename)
print('DONE')
def normality_test(ew, subset):
fig, axes = plt.subplots(nrows=2, ncols=3, figsize=(20, 12))
ax = axes.flatten()
subset = pd.concat([ew, subset], axis=1)
subset = subset.iloc[1:, ]
for i, column in enumerate(subset.columns):
_, p_value = shapiro(subset[column])
sm.qqplot(subset[column], line='q', ax=ax[i])
ax[i].set_title(column + ': Shapiro-Wilk p-value:' + str(round(p_value, 4)))
def plt_show():
'''Text-blocking version of plt.show()
Use this instead of plt.show()'''
plt.draw()
plt.pause(0.001)
input("Press enter to continue...")
plt.close()
# convert List of weights to DataFrame
weights = pd.DataFrame(weightsRebal, index=r.iloc[estimation_window:].index, columns=r.columns)
returns = (weights * r).sum(axis="columns", min_count=1) # mincount is to generate NAs if all inputs are NAs
return returns
def read_ohlcv(db_name, db_path):
c = pd.read_csv(db_path + db_name + '__PX_LAST.csv', parse_dates=True, index_col=0)
c = c.loc[:, ~c.iloc[0].isna()]
#drop names that were not traded
c = c.loc[:, ~c.iloc[-1].isna()]
ticks = c.columns
o = pd.read_csv(db_path + db_name + '__PX_OPEN.csv', parse_dates=True, index_col=0).reindex(ticks, axis = 1)
h =
|
pd.read_csv(db_path + db_name + '__PX_HIGH.csv', parse_dates=True, index_col=0)
|
pandas.read_csv
|
import copy
from datetime import datetime
import warnings
import numpy as np
from numpy.random import randn
import pytest
import pandas.util._test_decorators as td
import pandas as pd
from pandas import DataFrame, DatetimeIndex, Index, Series, isna, notna
import pandas._testing as tm
from pandas.core.window.common import _flex_binary_moment
from pandas.tests.window.common import (
Base,
check_pairwise_moment,
moments_consistency_cov_data,
moments_consistency_is_constant,
moments_consistency_mock_mean,
moments_consistency_series_data,
moments_consistency_std_data,
moments_consistency_var_data,
moments_consistency_var_debiasing_factors,
)
import pandas.tseries.offsets as offsets
@pytest.mark.filterwarnings("ignore:can't resolve package:ImportWarning")
class TestMoments(Base):
def setup_method(self, method):
self._create_data()
def test_centered_axis_validation(self):
# ok
Series(np.ones(10)).rolling(window=3, center=True, axis=0).mean()
# bad axis
with pytest.raises(ValueError):
Series(np.ones(10)).rolling(window=3, center=True, axis=1).mean()
# ok ok
DataFrame(np.ones((10, 10))).rolling(window=3, center=True, axis=0).mean()
DataFrame(np.ones((10, 10))).rolling(window=3, center=True, axis=1).mean()
# bad axis
with pytest.raises(ValueError):
(DataFrame(np.ones((10, 10))).rolling(window=3, center=True, axis=2).mean())
def test_rolling_sum(self, raw):
self._check_moment_func(
np.nansum, name="sum", zero_min_periods_equal=False, raw=raw
)
def test_rolling_count(self, raw):
counter = lambda x: np.isfinite(x).astype(float).sum()
self._check_moment_func(
counter, name="count", has_min_periods=False, fill_value=0, raw=raw
)
def test_rolling_mean(self, raw):
self._check_moment_func(np.mean, name="mean", raw=raw)
@td.skip_if_no_scipy
def test_cmov_mean(self):
# GH 8238
vals = np.array(
[6.95, 15.21, 4.72, 9.12, 13.81, 13.49, 16.68, 9.48, 10.63, 14.48]
)
result = Series(vals).rolling(5, center=True).mean()
expected = Series(
[
np.nan,
np.nan,
9.962,
11.27,
11.564,
12.516,
12.818,
12.952,
np.nan,
np.nan,
]
)
tm.assert_series_equal(expected, result)
@td.skip_if_no_scipy
def test_cmov_window(self):
# GH 8238
vals = np.array(
[6.95, 15.21, 4.72, 9.12, 13.81, 13.49, 16.68, 9.48, 10.63, 14.48]
)
result = Series(vals).rolling(5, win_type="boxcar", center=True).mean()
expected = Series(
[
np.nan,
np.nan,
9.962,
11.27,
11.564,
12.516,
12.818,
12.952,
np.nan,
np.nan,
]
)
tm.assert_series_equal(expected, result)
@td.skip_if_no_scipy
def test_cmov_window_corner(self):
# GH 8238
# all nan
vals = pd.Series([np.nan] * 10)
result = vals.rolling(5, center=True, win_type="boxcar").mean()
assert np.isnan(result).all()
# empty
vals = pd.Series([], dtype=object)
result = vals.rolling(5, center=True, win_type="boxcar").mean()
assert len(result) == 0
# shorter than window
vals = pd.Series(np.random.randn(5))
result = vals.rolling(10, win_type="boxcar").mean()
assert np.isnan(result).all()
assert len(result) == 5
@td.skip_if_no_scipy
@pytest.mark.parametrize(
"f,xp",
[
(
"mean",
[
[np.nan, np.nan],
[np.nan, np.nan],
[9.252, 9.392],
[8.644, 9.906],
[8.87, 10.208],
[6.81, 8.588],
[7.792, 8.644],
[9.05, 7.824],
[np.nan, np.nan],
[np.nan, np.nan],
],
),
(
"std",
[
[np.nan, np.nan],
[np.nan, np.nan],
[3.789706, 4.068313],
[3.429232, 3.237411],
[3.589269, 3.220810],
[3.405195, 2.380655],
[3.281839, 2.369869],
[3.676846, 1.801799],
[np.nan, np.nan],
[np.nan, np.nan],
],
),
(
"var",
[
[np.nan, np.nan],
[np.nan, np.nan],
[14.36187, 16.55117],
[11.75963, 10.48083],
[12.88285, 10.37362],
[11.59535, 5.66752],
[10.77047, 5.61628],
[13.51920, 3.24648],
[np.nan, np.nan],
[np.nan, np.nan],
],
),
(
"sum",
[
[np.nan, np.nan],
[np.nan, np.nan],
[46.26, 46.96],
[43.22, 49.53],
[44.35, 51.04],
[34.05, 42.94],
[38.96, 43.22],
[45.25, 39.12],
[np.nan, np.nan],
[np.nan, np.nan],
],
),
],
)
def test_cmov_window_frame(self, f, xp):
# Gh 8238
df = DataFrame(
np.array(
[
[12.18, 3.64],
[10.18, 9.16],
[13.24, 14.61],
[4.51, 8.11],
[6.15, 11.44],
[9.14, 6.21],
[11.31, 10.67],
[2.94, 6.51],
[9.42, 8.39],
[12.44, 7.34],
]
)
)
xp = DataFrame(np.array(xp))
roll = df.rolling(5, win_type="boxcar", center=True)
rs = getattr(roll, f)()
tm.assert_frame_equal(xp, rs)
@td.skip_if_no_scipy
def test_cmov_window_na_min_periods(self):
# min_periods
vals = Series(np.random.randn(10))
vals[4] = np.nan
vals[8] = np.nan
xp = vals.rolling(5, min_periods=4, center=True).mean()
rs = vals.rolling(5, win_type="boxcar", min_periods=4, center=True).mean()
tm.assert_series_equal(xp, rs)
@td.skip_if_no_scipy
def test_cmov_window_regular(self, win_types):
# GH 8238
vals = np.array(
[6.95, 15.21, 4.72, 9.12, 13.81, 13.49, 16.68, 9.48, 10.63, 14.48]
)
xps = {
"hamming": [
np.nan,
np.nan,
8.71384,
9.56348,
12.38009,
14.03687,
13.8567,
11.81473,
np.nan,
np.nan,
],
"triang": [
np.nan,
np.nan,
9.28667,
10.34667,
12.00556,
13.33889,
13.38,
12.33667,
np.nan,
np.nan,
],
"barthann": [
np.nan,
np.nan,
8.4425,
9.1925,
12.5575,
14.3675,
14.0825,
11.5675,
np.nan,
np.nan,
],
"bohman": [
np.nan,
np.nan,
7.61599,
9.1764,
12.83559,
14.17267,
14.65923,
11.10401,
np.nan,
np.nan,
],
"blackmanharris": [
np.nan,
np.nan,
6.97691,
9.16438,
13.05052,
14.02156,
15.10512,
10.74574,
np.nan,
np.nan,
],
"nuttall": [
np.nan,
np.nan,
7.04618,
9.16786,
13.02671,
14.03559,
15.05657,
10.78514,
np.nan,
np.nan,
],
"blackman": [
np.nan,
np.nan,
7.73345,
9.17869,
12.79607,
14.20036,
14.57726,
11.16988,
np.nan,
np.nan,
],
"bartlett": [
np.nan,
np.nan,
8.4425,
9.1925,
12.5575,
14.3675,
14.0825,
11.5675,
np.nan,
np.nan,
],
}
xp = Series(xps[win_types])
rs = Series(vals).rolling(5, win_type=win_types, center=True).mean()
tm.assert_series_equal(xp, rs)
@td.skip_if_no_scipy
def test_cmov_window_regular_linear_range(self, win_types):
# GH 8238
vals = np.array(range(10), dtype=np.float)
xp = vals.copy()
xp[:2] = np.nan
xp[-2:] = np.nan
xp = Series(xp)
rs = Series(vals).rolling(5, win_type=win_types, center=True).mean()
tm.assert_series_equal(xp, rs)
@td.skip_if_no_scipy
def test_cmov_window_regular_missing_data(self, win_types):
# GH 8238
vals = np.array(
[6.95, 15.21, 4.72, 9.12, 13.81, 13.49, 16.68, np.nan, 10.63, 14.48]
)
xps = {
"bartlett": [
np.nan,
np.nan,
9.70333,
10.5225,
8.4425,
9.1925,
12.5575,
14.3675,
15.61667,
13.655,
],
"blackman": [
np.nan,
np.nan,
9.04582,
11.41536,
7.73345,
9.17869,
12.79607,
14.20036,
15.8706,
13.655,
],
"barthann": [
np.nan,
np.nan,
9.70333,
10.5225,
8.4425,
9.1925,
12.5575,
14.3675,
15.61667,
13.655,
],
"bohman": [
np.nan,
np.nan,
8.9444,
11.56327,
7.61599,
9.1764,
12.83559,
14.17267,
15.90976,
13.655,
],
"hamming": [
np.nan,
np.nan,
9.59321,
10.29694,
8.71384,
9.56348,
12.38009,
14.20565,
15.24694,
13.69758,
],
"nuttall": [
np.nan,
np.nan,
8.47693,
12.2821,
7.04618,
9.16786,
13.02671,
14.03673,
16.08759,
13.65553,
],
"triang": [
np.nan,
np.nan,
9.33167,
9.76125,
9.28667,
10.34667,
12.00556,
13.82125,
14.49429,
13.765,
],
"blackmanharris": [
np.nan,
np.nan,
8.42526,
12.36824,
6.97691,
9.16438,
13.05052,
14.02175,
16.1098,
13.65509,
],
}
xp = Series(xps[win_types])
rs = Series(vals).rolling(5, win_type=win_types, min_periods=3).mean()
tm.assert_series_equal(xp, rs)
@td.skip_if_no_scipy
def test_cmov_window_special(self, win_types_special):
# GH 8238
kwds = {
"kaiser": {"beta": 1.0},
"gaussian": {"std": 1.0},
"general_gaussian": {"power": 2.0, "width": 2.0},
"exponential": {"tau": 10},
}
vals = np.array(
[6.95, 15.21, 4.72, 9.12, 13.81, 13.49, 16.68, 9.48, 10.63, 14.48]
)
xps = {
"gaussian": [
np.nan,
np.nan,
8.97297,
9.76077,
12.24763,
13.89053,
13.65671,
12.01002,
np.nan,
np.nan,
],
"general_gaussian": [
np.nan,
np.nan,
9.85011,
10.71589,
11.73161,
13.08516,
12.95111,
12.74577,
np.nan,
np.nan,
],
"kaiser": [
np.nan,
np.nan,
9.86851,
11.02969,
11.65161,
12.75129,
12.90702,
12.83757,
np.nan,
np.nan,
],
"exponential": [
np.nan,
np.nan,
9.83364,
11.10472,
11.64551,
12.66138,
12.92379,
12.83770,
np.nan,
np.nan,
],
}
xp = Series(xps[win_types_special])
rs = (
Series(vals)
.rolling(5, win_type=win_types_special, center=True)
.mean(**kwds[win_types_special])
)
tm.assert_series_equal(xp, rs)
@td.skip_if_no_scipy
def test_cmov_window_special_linear_range(self, win_types_special):
# GH 8238
kwds = {
"kaiser": {"beta": 1.0},
"gaussian": {"std": 1.0},
"general_gaussian": {"power": 2.0, "width": 2.0},
"slepian": {"width": 0.5},
"exponential": {"tau": 10},
}
vals = np.array(range(10), dtype=np.float)
xp = vals.copy()
xp[:2] = np.nan
xp[-2:] = np.nan
xp = Series(xp)
rs = (
Series(vals)
.rolling(5, win_type=win_types_special, center=True)
.mean(**kwds[win_types_special])
)
tm.assert_series_equal(xp, rs)
def test_rolling_median(self, raw):
self._check_moment_func(np.median, name="median", raw=raw)
def test_rolling_min(self, raw):
self._check_moment_func(np.min, name="min", raw=raw)
a = pd.Series([1, 2, 3, 4, 5])
result = a.rolling(window=100, min_periods=1).min()
expected = pd.Series(np.ones(len(a)))
tm.assert_series_equal(result, expected)
with pytest.raises(ValueError):
pd.Series([1, 2, 3]).rolling(window=3, min_periods=5).min()
def test_rolling_max(self, raw):
self._check_moment_func(np.max, name="max", raw=raw)
a = pd.Series([1, 2, 3, 4, 5], dtype=np.float64)
b = a.rolling(window=100, min_periods=1).max()
tm.assert_almost_equal(a, b)
with pytest.raises(ValueError):
pd.Series([1, 2, 3]).rolling(window=3, min_periods=5).max()
@pytest.mark.parametrize("q", [0.0, 0.1, 0.5, 0.9, 1.0])
def test_rolling_quantile(self, q, raw):
def scoreatpercentile(a, per):
values = np.sort(a, axis=0)
idx = int(per / 1.0 * (values.shape[0] - 1))
if idx == values.shape[0] - 1:
retval = values[-1]
else:
qlow = float(idx) / float(values.shape[0] - 1)
qhig = float(idx + 1) / float(values.shape[0] - 1)
vlow = values[idx]
vhig = values[idx + 1]
retval = vlow + (vhig - vlow) * (per - qlow) / (qhig - qlow)
return retval
def quantile_func(x):
return scoreatpercentile(x, q)
self._check_moment_func(quantile_func, name="quantile", quantile=q, raw=raw)
def test_rolling_quantile_np_percentile(self):
# #9413: Tests that rolling window's quantile default behavior
# is analogous to Numpy's percentile
row = 10
col = 5
idx = pd.date_range("20100101", periods=row, freq="B")
df = DataFrame(np.random.rand(row * col).reshape((row, -1)), index=idx)
df_quantile = df.quantile([0.25, 0.5, 0.75], axis=0)
np_percentile = np.percentile(df, [25, 50, 75], axis=0)
tm.assert_almost_equal(df_quantile.values, np.array(np_percentile))
@pytest.mark.parametrize("quantile", [0.0, 0.1, 0.45, 0.5, 1])
@pytest.mark.parametrize(
"interpolation", ["linear", "lower", "higher", "nearest", "midpoint"]
)
@pytest.mark.parametrize(
"data",
[
[1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0],
[8.0, 1.0, 3.0, 4.0, 5.0, 2.0, 6.0, 7.0],
[0.0, np.nan, 0.2, np.nan, 0.4],
[np.nan, np.nan, np.nan, np.nan],
[np.nan, 0.1, np.nan, 0.3, 0.4, 0.5],
[0.5],
[np.nan, 0.7, 0.6],
],
)
def test_rolling_quantile_interpolation_options(
self, quantile, interpolation, data
):
# Tests that rolling window's quantile behavior is analogous to
# Series' quantile for each interpolation option
s = Series(data)
q1 = s.quantile(quantile, interpolation)
q2 = s.expanding(min_periods=1).quantile(quantile, interpolation).iloc[-1]
if np.isnan(q1):
assert np.isnan(q2)
else:
assert q1 == q2
def test_invalid_quantile_value(self):
data = np.arange(5)
s = Series(data)
msg = "Interpolation 'invalid' is not supported"
with pytest.raises(ValueError, match=msg):
s.rolling(len(data), min_periods=1).quantile(0.5, interpolation="invalid")
def test_rolling_quantile_param(self):
ser = Series([0.0, 0.1, 0.5, 0.9, 1.0])
with pytest.raises(ValueError):
ser.rolling(3).quantile(-0.1)
with pytest.raises(ValueError):
ser.rolling(3).quantile(10.0)
with pytest.raises(TypeError):
ser.rolling(3).quantile("foo")
def test_rolling_apply(self, raw):
# suppress warnings about empty slices, as we are deliberately testing
# with a 0-length Series
def f(x):
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore",
message=".*(empty slice|0 for slice).*",
category=RuntimeWarning,
)
return x[np.isfinite(x)].mean()
self._check_moment_func(np.mean, name="apply", func=f, raw=raw)
def test_rolling_std(self, raw):
self._check_moment_func(lambda x: np.std(x, ddof=1), name="std", raw=raw)
self._check_moment_func(
lambda x: np.std(x, ddof=0), name="std", ddof=0, raw=raw
)
def test_rolling_std_1obs(self):
vals = pd.Series([1.0, 2.0, 3.0, 4.0, 5.0])
result = vals.rolling(1, min_periods=1).std()
expected = pd.Series([np.nan] * 5)
tm.assert_series_equal(result, expected)
result = vals.rolling(1, min_periods=1).std(ddof=0)
expected = pd.Series([0.0] * 5)
tm.assert_series_equal(result, expected)
result = pd.Series([np.nan, np.nan, 3, 4, 5]).rolling(3, min_periods=2).std()
assert np.isnan(result[2])
def test_rolling_std_neg_sqrt(self):
# unit test from Bottleneck
# Test move_nanstd for neg sqrt.
a = pd.Series(
[
0.0011448196318903589,
0.00028718669878572767,
0.00028718669878572767,
0.00028718669878572767,
0.00028718669878572767,
]
)
b = a.rolling(window=3).std()
assert np.isfinite(b[2:]).all()
b = a.ewm(span=3).std()
assert np.isfinite(b[2:]).all()
def test_rolling_var(self, raw):
self._check_moment_func(lambda x: np.var(x, ddof=1), name="var", raw=raw)
self._check_moment_func(
lambda x: np.var(x, ddof=0), name="var", ddof=0, raw=raw
)
@td.skip_if_no_scipy
def test_rolling_skew(self, raw):
from scipy.stats import skew
self._check_moment_func(lambda x: skew(x, bias=False), name="skew", raw=raw)
@td.skip_if_no_scipy
def test_rolling_kurt(self, raw):
from scipy.stats import kurtosis
self._check_moment_func(lambda x: kurtosis(x, bias=False), name="kurt", raw=raw)
def _check_moment_func(
self,
static_comp,
name,
raw,
has_min_periods=True,
has_center=True,
has_time_rule=True,
fill_value=None,
zero_min_periods_equal=True,
**kwargs,
):
# inject raw
if name == "apply":
kwargs = copy.copy(kwargs)
kwargs["raw"] = raw
def get_result(obj, window, min_periods=None, center=False):
r = obj.rolling(window=window, min_periods=min_periods, center=center)
return getattr(r, name)(**kwargs)
series_result = get_result(self.series, window=50)
assert isinstance(series_result, Series)
tm.assert_almost_equal(series_result.iloc[-1], static_comp(self.series[-50:]))
frame_result = get_result(self.frame, window=50)
assert isinstance(frame_result, DataFrame)
tm.assert_series_equal(
frame_result.iloc[-1, :],
self.frame.iloc[-50:, :].apply(static_comp, axis=0, raw=raw),
check_names=False,
)
# check time_rule works
if has_time_rule:
win = 25
minp = 10
series = self.series[::2].resample("B").mean()
frame = self.frame[::2].resample("B").mean()
if has_min_periods:
series_result = get_result(series, window=win, min_periods=minp)
frame_result = get_result(frame, window=win, min_periods=minp)
else:
series_result = get_result(series, window=win, min_periods=0)
frame_result = get_result(frame, window=win, min_periods=0)
last_date = series_result.index[-1]
prev_date = last_date - 24 * offsets.BDay()
trunc_series = self.series[::2].truncate(prev_date, last_date)
trunc_frame = self.frame[::2].truncate(prev_date, last_date)
tm.assert_almost_equal(series_result[-1], static_comp(trunc_series))
tm.assert_series_equal(
frame_result.xs(last_date),
trunc_frame.apply(static_comp, raw=raw),
check_names=False,
)
# excluding NaNs correctly
obj = Series(randn(50))
obj[:10] = np.NaN
obj[-10:] = np.NaN
if has_min_periods:
result = get_result(obj, 50, min_periods=30)
tm.assert_almost_equal(result.iloc[-1], static_comp(obj[10:-10]))
# min_periods is working correctly
result = get_result(obj, 20, min_periods=15)
assert isna(result.iloc[23])
assert not isna(result.iloc[24])
assert not isna(result.iloc[-6])
assert isna(result.iloc[-5])
obj2 = Series(randn(20))
result = get_result(obj2, 10, min_periods=5)
assert isna(result.iloc[3])
assert notna(result.iloc[4])
if zero_min_periods_equal:
# min_periods=0 may be equivalent to min_periods=1
result0 = get_result(obj, 20, min_periods=0)
result1 = get_result(obj, 20, min_periods=1)
tm.assert_almost_equal(result0, result1)
else:
result = get_result(obj, 50)
tm.assert_almost_equal(result.iloc[-1], static_comp(obj[10:-10]))
# window larger than series length (#7297)
if has_min_periods:
for minp in (0, len(self.series) - 1, len(self.series)):
result = get_result(self.series, len(self.series) + 1, min_periods=minp)
expected = get_result(self.series, len(self.series), min_periods=minp)
nan_mask = isna(result)
tm.assert_series_equal(nan_mask, isna(expected))
nan_mask = ~nan_mask
tm.assert_almost_equal(result[nan_mask], expected[nan_mask])
else:
result = get_result(self.series, len(self.series) + 1, min_periods=0)
expected = get_result(self.series, len(self.series), min_periods=0)
nan_mask = isna(result)
tm.assert_series_equal(nan_mask, isna(expected))
nan_mask = ~nan_mask
tm.assert_almost_equal(result[nan_mask], expected[nan_mask])
# check center=True
if has_center:
if has_min_periods:
result = get_result(obj, 20, min_periods=15, center=True)
expected = get_result(
pd.concat([obj, Series([np.NaN] * 9)]), 20, min_periods=15
)[9:].reset_index(drop=True)
else:
result = get_result(obj, 20, min_periods=0, center=True)
print(result)
expected = get_result(
pd.concat([obj, Series([np.NaN] * 9)]), 20, min_periods=0
)[9:].reset_index(drop=True)
tm.assert_series_equal(result, expected)
# shifter index
s = [f"x{x:d}" for x in range(12)]
if has_min_periods:
minp = 10
series_xp = (
get_result(
self.series.reindex(list(self.series.index) + s),
window=25,
min_periods=minp,
)
.shift(-12)
.reindex(self.series.index)
)
frame_xp = (
get_result(
self.frame.reindex(list(self.frame.index) + s),
window=25,
min_periods=minp,
)
.shift(-12)
.reindex(self.frame.index)
)
series_rs = get_result(
self.series, window=25, min_periods=minp, center=True
)
frame_rs = get_result(
self.frame, window=25, min_periods=minp, center=True
)
else:
series_xp = (
get_result(
self.series.reindex(list(self.series.index) + s),
window=25,
min_periods=0,
)
.shift(-12)
.reindex(self.series.index)
)
frame_xp = (
get_result(
self.frame.reindex(list(self.frame.index) + s),
window=25,
min_periods=0,
)
.shift(-12)
.reindex(self.frame.index)
)
series_rs = get_result(
self.series, window=25, min_periods=0, center=True
)
frame_rs = get_result(self.frame, window=25, min_periods=0, center=True)
if fill_value is not None:
series_xp = series_xp.fillna(fill_value)
frame_xp = frame_xp.fillna(fill_value)
tm.assert_series_equal(series_xp, series_rs)
tm.assert_frame_equal(frame_xp, frame_rs)
def _rolling_consistency_cases():
for window in [1, 2, 3, 10, 20]:
for min_periods in {0, 1, 2, 3, 4, window}:
if min_periods and (min_periods > window):
continue
for center in [False, True]:
yield window, min_periods, center
class TestRollingMomentsConsistency(Base):
def setup_method(self, method):
self._create_data()
# binary moments
def test_rolling_cov(self):
A = self.series
B = A + randn(len(A))
result = A.rolling(window=50, min_periods=25).cov(B)
tm.assert_almost_equal(result[-1], np.cov(A[-50:], B[-50:])[0, 1])
def test_rolling_corr(self):
A = self.series
B = A + randn(len(A))
result = A.rolling(window=50, min_periods=25).corr(B)
tm.assert_almost_equal(result[-1], np.corrcoef(A[-50:], B[-50:])[0, 1])
# test for correct bias correction
a = tm.makeTimeSeries()
b = tm.makeTimeSeries()
a[:5] = np.nan
b[:10] = np.nan
result = a.rolling(window=len(a), min_periods=1).corr(b)
tm.assert_almost_equal(result[-1], a.corr(b))
@pytest.mark.parametrize("func", ["cov", "corr"])
def test_rolling_pairwise_cov_corr(self, func):
check_pairwise_moment(self.frame, "rolling", func, window=10, min_periods=5)
@pytest.mark.parametrize("method", ["corr", "cov"])
def test_flex_binary_frame(self, method):
series = self.frame[1]
res = getattr(series.rolling(window=10), method)(self.frame)
res2 = getattr(self.frame.rolling(window=10), method)(series)
exp = self.frame.apply(lambda x: getattr(series.rolling(window=10), method)(x))
tm.assert_frame_equal(res, exp)
tm.assert_frame_equal(res2, exp)
frame2 = self.frame.copy()
frame2.values[:] = np.random.randn(*frame2.shape)
res3 = getattr(self.frame.rolling(window=10), method)(frame2)
exp = DataFrame(
{
k: getattr(self.frame[k].rolling(window=10), method)(frame2[k])
for k in self.frame
}
)
tm.assert_frame_equal(res3, exp)
@pytest.mark.slow
@pytest.mark.parametrize(
"window,min_periods,center", list(_rolling_consistency_cases())
)
def test_rolling_apply_consistency(
consistency_data, base_functions, no_nan_functions, window, min_periods, center
):
x, is_constant, no_nans = consistency_data
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore", message=".*(empty slice|0 for slice).*", category=RuntimeWarning,
)
# test consistency between rolling_xyz() and either (a)
# rolling_apply of Series.xyz(), or (b) rolling_apply of
# np.nanxyz()
functions = base_functions
# GH 8269
if no_nans:
functions = no_nan_functions + base_functions
for (f, require_min_periods, name) in functions:
rolling_f = getattr(
x.rolling(window=window, center=center, min_periods=min_periods), name,
)
if (
require_min_periods
and (min_periods is not None)
and (min_periods < require_min_periods)
):
continue
if name == "count":
rolling_f_result = rolling_f()
rolling_apply_f_result = x.rolling(
window=window, min_periods=min_periods, center=center
).apply(func=f, raw=True)
else:
if name in ["cov", "corr"]:
rolling_f_result = rolling_f(pairwise=False)
else:
rolling_f_result = rolling_f()
rolling_apply_f_result = x.rolling(
window=window, min_periods=min_periods, center=center
).apply(func=f, raw=True)
# GH 9422
if name in ["sum", "prod"]:
tm.assert_equal(rolling_f_result, rolling_apply_f_result)
@pytest.mark.parametrize("window", range(7))
def test_rolling_corr_with_zero_variance(window):
# GH 18430
s = pd.Series(np.zeros(20))
other = pd.Series(np.arange(20))
assert s.rolling(window=window).corr(other=other).isna().all()
def test_flex_binary_moment():
# GH3155
# don't blow the stack
msg = "arguments to moment function must be of type np.ndarray/Series/DataFrame"
with pytest.raises(TypeError, match=msg):
_flex_binary_moment(5, 6, None)
def test_corr_sanity():
# GH 3155
df = DataFrame(
np.array(
[
[0.87024726, 0.18505595],
[0.64355431, 0.3091617],
[0.92372966, 0.50552513],
[0.00203756, 0.04520709],
[0.84780328, 0.33394331],
[0.78369152, 0.63919667],
]
)
)
res = df[0].rolling(5, center=True).corr(df[1])
assert all(np.abs(np.nan_to_num(x)) <= 1 for x in res)
# and some fuzzing
for _ in range(10):
df = DataFrame(np.random.rand(30, 2))
res = df[0].rolling(5, center=True).corr(df[1])
try:
assert all(np.abs(np.nan_to_num(x)) <= 1 for x in res)
except AssertionError:
print(res)
def test_rolling_cov_diff_length():
# GH 7512
s1 = Series([1, 2, 3], index=[0, 1, 2])
s2 = Series([1, 3], index=[0, 2])
result = s1.rolling(window=3, min_periods=2).cov(s2)
expected = Series([None, None, 2.0])
tm.assert_series_equal(result, expected)
s2a = Series([1, None, 3], index=[0, 1, 2])
result = s1.rolling(window=3, min_periods=2).cov(s2a)
tm.assert_series_equal(result, expected)
def test_rolling_corr_diff_length():
# GH 7512
s1 = Series([1, 2, 3], index=[0, 1, 2])
s2 = Series([1, 3], index=[0, 2])
result = s1.rolling(window=3, min_periods=2).corr(s2)
expected =
|
Series([None, None, 1.0])
|
pandas.Series
|
from sklearn.preprocessing import MultiLabelBinarizer
import pandas as pd
import numpy as np
import copy
features = [
'StoreID', 'Date', 'IsOpen', 'IsHoliday', 'HasPromotions', 'NumberOfSales',
'StoreType_Hyper Market','StoreType_Shopping Center', 'StoreType_Standard Market',
'StoreType_Super Market',
'NearestCompetitor',
'WeekDay_0', 'WeekDay_1', 'WeekDay_2','WeekDay_3',
'WeekDay_4', 'WeekDay_5', 'WeekDay_6', 'Region_0',
'Region_1', 'Region_2', 'Region_3', 'Region_4', 'Region_5',
'Region_6','Region_7', 'Region_8', 'Region_9', 'Region_10',
'Month_1', 'Month_2','Month_3', 'Month_4', 'Month_5', 'Month_6',
'Month_7', 'Month_8','Month_9', 'Month_10', 'Month_11', 'Month_12',
'AssortmentType_General','AssortmentType_With Fish Department', 'AssortmentType_With Non-Food Department'
]
class TrainPreprocessingReg():
def __init__(self, data, to_date):
self.data = copy.deepcopy(data)
fields = ["StoreType", "WeekDay", "Region", "Month", "AssortmentType"]
self.data["Date"] = pd.to_datetime(self.data['Date'], format='%d/%m/%Y')
self.add_date_columns(date_column_name="Date", month=True, day_of_week=True)
self.one_hot_encode(fields=fields)
self.data = self.data[features]
self.data = self.data[self.data["Date"] < to_date]
self.data = self.data[self.data["IsOpen"] == 1]
self.X = np.array(self.data.drop(["StoreID", "IsOpen", "Date", "NumberOfSales"], inplace=False, axis = 1))
self.y = np.array(self.data["NumberOfSales"])
def get_X_y(self):
return self.X, self.y
def one_hot_encode(self, fields):
self.data = pd.get_dummies(self.data, columns=fields)
def split_attribute_list(self, column, attributes, fillna):
mlb = MultiLabelBinarizer(classes=attributes)
if fillna is not None:
self.data[column] = self.data[column].fillna(fillna, inplace=False)
self.data[column] = self.data[column].apply(lambda x: x.split('-'))
new_columns_values = mlb.fit_transform(self.data[column].values.tolist())
self.data[attributes] = pd.DataFrame(new_columns_values, index=self.data.index)
def add_date_columns(self, date_column_name, year=False, month=False, day_n=False, day_of_week=False):
if year:
self.data["Year"] = self.data[date_column_name].dt.year
if month:
self.data["Month"] = self.data[date_column_name].dt.month
if day_n:
self.data["Day"] = self.data[date_column_name].dt.day
if day_of_week :
self.data["WeekDay"] = self.data[date_column_name].dt.dayofweek
class TestPreprocessingReg():
def __init__(self, data, from_date):
self.data = copy.deepcopy(data)
fields = ["StoreType", "WeekDay", "Region", "Month", "AssortmentType"]
self.data["Date"] = pd.to_datetime(self.data['Date'], format='%d/%m/%Y')
self.add_date_columns(date_column_name="Date", month=True, day_of_week=True)
self.one_hot_encode(fields=fields)
self.data = self.data[features]
self.data = self.data[self.data["Date"] >= from_date]
self.dates = sorted(list(self.data["Date"].value_counts().index))
self.X = self.data.drop(["IsOpen", "NumberOfSales"], inplace=False, axis = 1)
self.y = self.data[["StoreID", "Date", "NumberOfSales"]]
def one_hot_encode(self, fields):
self.data = pd.get_dummies(self.data, columns=fields)
def add_date_columns(self, date_column_name, year=False, month=False, day_n=False, day_of_week=False):
if year:
self.data["Year"] = self.data[date_column_name].dt.year
if month:
self.data["Month"] = self.data[date_column_name].dt.month
if day_n:
self.data["Day"] = self.data[date_column_name].dt.day
if day_of_week :
self.data["WeekDay"] = self.data[date_column_name].dt.dayofweek
def split_attribute_list(self, column, attributes, fillna):
mlb = MultiLabelBinarizer(classes=attributes)
if fillna is not None:
self.data[column] = self.data[column].fillna(fillna, inplace=False)
self.data[column] = self.data[column].apply(lambda x: x.split('-'))
new_columns_values = mlb.fit_transform(self.data[column].values.tolist())
self.data[attributes] =
|
pd.DataFrame(new_columns_values, index=self.data.index)
|
pandas.DataFrame
|
#!/usr/bin/python3
# Data adapter for the THYME dataset
import ast
import os
import pandas
from lxml import etree
from .data_adapter import DataAdapter
from data_tools import data_util
class DataAdapterThyme(DataAdapter):
def load_data(self, filename, drop_unlabeled=True):
print('DataAdapterThyme.load_data', filename)
df =
|
pandas.DataFrame(columns=self.column_names)
|
pandas.DataFrame
|
import os
import random
import math
import numpy as np
import pandas as pd
import itertools
from functools import lru_cache
##########################
## Compliance functions ##
##########################
def delayed_ramp_fun(Nc_old, Nc_new, t, tau_days, l, t_start):
"""
t : timestamp
current date
tau : int
number of days before measures start having an effect
l : int
number of additional days after the time delay until full compliance is reached
"""
return Nc_old + (Nc_new-Nc_old)/l * (t-t_start-tau_days)/pd.Timedelta('1D')
def ramp_fun(Nc_old, Nc_new, t, t_start, l):
"""
t : timestamp
current date
l : int
number of additional days after the time delay until full compliance is reached
"""
return Nc_old + (Nc_new-Nc_old)/l * (t-t_start)/pd.Timedelta('1D')
###############################
## Mobility update functions ##
###############################
def load_all_mobility_data(agg, dtype='fractional', beyond_borders=False):
"""
Function that fetches all available mobility data and adds it to a DataFrame with dates as indices and numpy matrices as values. Make sure to regularly update the mobility data with the notebook notebooks/preprocessing/Quick-update_mobility-matrices.ipynb to get the data for the most recent days. Also returns the average mobility over all available data, which might NOT always be desirable as a back-up mobility.
Input
-----
agg : str
Denotes the spatial aggregation at hand. Either 'prov', 'arr' or 'mun'
dtype : str
Choose the type of mobility data to return. Either 'fractional' (default), staytime (all available hours for region g spent in h), or visits (all unique visits from region g to h)
beyond_borders : boolean
If true, also include mobility abroad and mobility from foreigners
Returns
-------
all_mobility_data : pd.DataFrame
DataFrame with datetime objects as indices ('DATE') and np.arrays ('place') as value column
average_mobility_data : np.array
average mobility matrix over all available dates
"""
### Validate input ###
if agg not in ['mun', 'arr', 'prov']:
raise ValueError(
"spatial stratification '{0}' is not legitimate. Possible spatial "
"stratifications are 'mun', 'arr', or 'prov'".format(agg)
)
if dtype not in ['fractional', 'staytime', 'visits']:
raise ValueError(
"data type '{0}' is not legitimate. Possible mobility matrix "
"data types are 'fractional', 'staytime', or 'visits'".format(dtype)
)
### Load all available data ###
# Define absolute location of this file
abs_dir = os.path.dirname(__file__)
# Define data location for this particular aggregation level
data_location = f'../../../data/interim/mobility/{agg}/{dtype}'
# Iterate over all available interim mobility data
all_available_dates=[]
all_available_places=[]
directory=os.path.join(abs_dir, f'{data_location}')
for csv in os.listdir(directory):
# take YYYYMMDD information from processed CSVs. NOTE: this supposes a particular data name format!
datum = csv[-12:-4]
# Create list of datetime objects
all_available_dates.append(pd.to_datetime(datum, format="%Y%m%d"))
# Load the CSV as a np.array
if beyond_borders:
place = pd.read_csv(f'{directory}/{csv}', index_col='mllp_postalcode').values
else:
place = pd.read_csv(f'{directory}/{csv}', index_col='mllp_postalcode').drop(index='Foreigner', columns='ABROAD').values
if dtype=='fractional':
# make sure the rows sum up to 1 nicely again after dropping a row and a column
place = place / place.sum(axis=1)
# Create list of places
all_available_places.append(place)
# Create new empty dataframe with available dates. Load mobility later
df = pd.DataFrame({'DATE' : all_available_dates, 'place' : all_available_places}).set_index('DATE')
all_mobility_data = df.copy()
# Take average of all available mobility data
average_mobility_data = df['place'].values.mean()
return all_mobility_data, average_mobility_data
class make_mobility_update_function():
"""
Output the time-dependent mobility function with the data loaded in cache
Input
-----
proximus_mobility_data : DataFrame
Pandas DataFrame with dates as indices and matrices as values. Output of mobility.get_proximus_mobility_data.
proximus_mobility_data_avg : np.array
Average mobility matrix over all matrices
"""
def __init__(self, proximus_mobility_data, proximus_mobility_data_avg):
self.proximus_mobility_data = proximus_mobility_data
self.proximus_mobility_data_avg = proximus_mobility_data_avg
@lru_cache()
# Define mobility_update_func
def __call__(self, t, default_mobility=None):
"""
time-dependent function which has a mobility matrix of type dtype for every date.
Note: only works with datetime input (no integer time steps). This
Input
-----
t : timestamp
current date as datetime object
states : str
formal necessity
param : str
formal necessity
default_mobility : np.array or None
If None (default), returns average mobility over all available dates. Else, return user-defined mobility
Returns
-------
place : np.array
square matrix with mobility of type dtype (fractional, staytime or visits), dimension depending on agg
"""
t = pd.Timestamp(t.date())
try: # if there is data available for this date (if the key exists)
place = self.proximus_mobility_data['place'][t]
except:
if default_mobility: # If there is no data available and a user-defined input is given
place = self.default_mobility
else: # No data and no user input: fall back on average mobility
place = self.proximus_mobility_data_avg
return place
def mobility_wrapper_func(self, t, states, param, default_mobility=None):
t = pd.Timestamp(t.date())
if t <= pd.Timestamp('2020-03-17'):
place = self.__call__(t, default_mobility=default_mobility)
return np.eye(place.shape[0])
else:
return self.__call__(t, default_mobility=default_mobility)
###################
## VOC functions ##
###################
class make_VOC_function():
"""
Class that returns a time-dependant parameter function for COVID-19 SEIRD model parameter alpha (variant fraction).
Current implementation includes the alpha - delta strains.
If the class is initialized without arguments, a logistic model fitted to prelevance data of the alpha-gamma variant is used. The class can also be initialized with the alpha-gamma prelavence data provided by Prof. <NAME>.
A logistic model fitted to prelevance data of the delta variant is always used.
Input
-----
*df_abc: pd.dataFrame (optional)
Alpha, Beta, Gamma prelevance dataset by <NAME>, obtained using:
`from covid19model.data import VOC`
`df_abc = VOC.get_abc_data()`
`VOC_function = make_VOC_function(df_abc)`
Output
------
__class__ : function
Default variant function
"""
def __init__(self, *df_abc):
self.df_abc = df_abc
self.data_given = False
if self.df_abc != ():
self.df_abc = df_abc[0] # First entry in list of optional arguments (dataframe)
self.data_given = True
@lru_cache()
def VOC_abc_data(self,t):
return self.df_abc.iloc[self.df_abc.index.get_loc(t, method='nearest')]['baselinesurv_f_501Y.V1_501Y.V2_501Y.V3']
@lru_cache()
def VOC_abc_logistic(self,t):
# Parameters obtained by fitting logistic model to weekly prevalence data
t_sig = pd.Timestamp('2021-02-14')
k = 0.07
# Function to return the fraction of the delta-variant
return 1/(1+np.exp(-k*(t-t_sig)/pd.Timedelta(days=1)))
@lru_cache()
def VOC_delta_logistic(self,t):
# Parameters obtained by fitting logistic model to weekly prevalence data
t_sig = pd.Timestamp('2021-06-25')
k = 0.11
# Function to return the fraction of the delta-variant
return 1/(1+np.exp(-k*(t-t_sig)/pd.Timedelta(days=1)))
# Default VOC function includes British and Indian variants
def __call__(self, t, states, param):
# Convert time to timestamp
t = pd.Timestamp(t.date())
# Introduction Indian variant
t1 = pd.Timestamp('2021-05-01')
# Construct alpha
if t <= t1:
if self.data_given:
return np.array([1-self.VOC_abc_data(t), self.VOC_abc_data(t), 0])
else:
return np.array([1-self.VOC_abc_logistic(t), self.VOC_abc_logistic(t), 0])
else:
return np.array([0, 1-self.VOC_delta_logistic(t), self.VOC_delta_logistic(t)])
###########################
## Vaccination functions ##
###########################
from covid19model.data.model_parameters import construct_initN
class make_vaccination_function():
"""
Class that returns a two-fold time-dependent parameter function for the vaccination strategy by default. First, first dose data by sciensano are used. In the future, a hypothetical scheme is used. If spatial data is given, the output consists of vaccination data per NIS code.
Input
-----
df : pd.dataFrame
*either* Sciensano public dataset, obtained using:
`from covid19model.data import sciensano`
`df = sciensano.get_sciensano_COVID19_data(update=False)`
*or* public spatial vaccination data, obtained using:
`from covid19model.data import sciensano`
`df = sciensano.get_public_spatial_vaccination_data(update=False,agg='arr')`
spatial : Boolean
True if df is spatially explicit. None by default.
Output
------
__class__ : function
Default vaccination function
"""
def __init__(self, df, age_classes=pd.IntervalIndex.from_tuples([(0,12),(12,18),(18,25),(25,35),(35,45),(45,55),(55,65),(65,75),(75,85),(85,120)], closed='left')):
age_stratification_size = len(age_classes)
# Assign inputs to object
self.df = df
self.age_agg = age_stratification_size
# Check if spatial data is provided
self.spatial = None
if 'NIS' in self.df.index.names:
self.spatial = True
self.space_agg = len(self.df.index.get_level_values('NIS').unique().values)
# infer aggregation (prov, arr or mun)
if self.space_agg == 11:
self.agg = 'prov'
elif self.space_agg == 43:
self.agg = 'arr'
elif self.space_agg == 581:
self.agg = 'mun'
else:
raise Exception(f"Space is {G}-fold stratified. This is not recognized as being stratification at Belgian province, arrondissement, or municipality level.")
# Check if dose data is provided
self.doses = None
if 'dose' in self.df.index.names:
self.doses = True
self.dose_agg = len(self.df.index.get_level_values('dose').unique().values)
# Define start- and enddate
self.df_start = pd.Timestamp(self.df.index.get_level_values('date').min())
self.df_end = pd.Timestamp(self.df.index.get_level_values('date').max())
# Perform age conversion
# Define dataframe with desired format
iterables=[]
for index_name in self.df.index.names:
if index_name != 'age':
iterables += [self.df.index.get_level_values(index_name).unique()]
else:
iterables += [age_classes]
index = pd.MultiIndex.from_product(iterables, names=self.df.index.names)
self.new_df = pd.Series(index=index)
# Four possibilities exist: can this be sped up?
if self.spatial:
if self.doses:
# Shorten?
for date in self.df.index.get_level_values('date').unique():
for NIS in self.df.index.get_level_values('NIS').unique():
for dose in self.df.index.get_level_values('dose').unique():
data = self.df.loc[(date, NIS, slice(None), dose)]
self.new_df.loc[(date, NIS, slice(None), dose)] = self.convert_age_stratified_vaccination_data(data, age_classes, self.agg, NIS).values
else:
for date in self.df.index.get_level_values('date').unique():
for NIS in self.df.index.get_level_values('NIS').unique():
data = self.df.loc[(date,NIS)]
self.new_df.loc[(date, NIS)] = self.convert_age_stratified_vaccination_data(data, age_classes, self.agg, NIS).values
else:
if self.doses:
for date in self.df.index.get_level_values('date').unique():
for dose in self.df.index.get_level_values('dose').unique():
data = self.df.loc[(date, slice(None), dose)]
self.new_df.loc[(date, slice(None), dose)] = self.convert_age_stratified_vaccination_data(data, age_classes).values
else:
for date in self.df.index.get_level_values('date').unique():
data = self.df.loc[(date)]
self.new_df.loc[(date)] = self.convert_age_stratified_vaccination_data(data, age_classes).values
self.df = self.new_df
def convert_age_stratified_vaccination_data(self, data, age_classes, agg=None, NIS=None):
"""
A function to convert the sciensano vaccination data to the desired model age groups
Parameters
----------
data: pd.Series
A series of age-stratified vaccination incidences. Index must be of type pd.Intervalindex.
age_classes : pd.IntervalIndex
Desired age groups of the vaccination dataframe.
agg: str
Spatial aggregation: prov, arr or mun
NIS : str
NIS code of consired spatial element
Returns
-------
out: pd.Series
Converted data.
"""
# Pre-allocate new series
out = pd.Series(index = age_classes, dtype=float)
# Extract demographics
if agg:
data_n_individuals = construct_initN(data.index.get_level_values('age'), agg).loc[NIS,:].values
demographics = construct_initN(None, agg).loc[NIS,:].values
else:
data_n_individuals = construct_initN(data.index.get_level_values('age'), agg).values
demographics = construct_initN(None, agg).values
# Loop over desired intervals
for idx,interval in enumerate(age_classes):
result = []
for age in range(interval.left, interval.right):
try:
result.append(demographics[age]/data_n_individuals[data.index.get_level_values('age').contains(age)]*data.iloc[np.where(data.index.get_level_values('age').contains(age))[0][0]])
except:
result.append(0)
out.iloc[idx] = sum(result)
return out
@lru_cache()
def get_data(self,t):
if self.spatial:
if self.doses:
try:
# Only includes doses A, B and C (so not boosters!) for now
data = np.zeros([self.space_agg, self.age_agg, self.dose_agg+1])
data[:,:,:-1] = np.array(self.df.loc[t,:,:,:].values).reshape( (self.space_agg, self.age_agg, self.dose_agg) )
return data
except:
return np.zeros([self.space_agg, self.age_agg, self.dose_agg+1])
else:
try:
return np.array(self.df.loc[t,:,:].values).reshape( (self.space_agg, self.age_agg) )
except:
return np.zeros([self.space_agg, self.age_agg])
else:
if self.doses:
try:
return np.array(self.df.loc[t,:,:].values).reshape( (self.age_agg, self.dose_agg) )
except:
return np.zeros([self.age_agg, self.dose_agg])
else:
try:
return np.array(self.df.loc[t,:].values)
except:
return np.zeros(self.age_agg)
def unidose_2021_vaccination_campaign(self, states, initN, daily_doses, delay_immunity, vacc_order, stop_idx, refusal):
# Compute the number of vaccine eligible individuals
VE = states['S'] + states['R']
# Initialize N_vacc
N_vacc = np.zeros(self.age_agg)
# Start vaccination loop
idx = 0
while daily_doses > 0:
if idx == stop_idx:
daily_doses = 0 #End vaccination campaign at age 20
elif VE[vacc_order[idx]] - initN[vacc_order[idx]]*refusal[vacc_order[idx]] > daily_doses:
N_vacc[vacc_order[idx]] = daily_doses
daily_doses = 0
else:
N_vacc[vacc_order[idx]] = VE[vacc_order[idx]] - initN[vacc_order[idx]]*refusal[vacc_order[idx]]
daily_doses = daily_doses - (VE[vacc_order[idx]] - initN[vacc_order[idx]]*refusal[vacc_order[idx]])
idx = idx + 1
return N_vacc
def booster_campaign(self, states, daily_doses, vacc_order, stop_idx, refusal):
# Compute the number of booster eligible individuals
VE = states['S'][:,2] + states['E'][:,2] + states['I'][:,2] + states['A'][:,2] + states['R'][:,2] \
+ states['S'][:,3] + states['E'][:,3] + states['I'][:,3] + states['A'][:,3] + states['R'][:,3]
# Initialize N_vacc
N_vacc = np.zeros([self.age_agg,self.dose_agg])
# Booster vaccination strategy without refusal
idx = 0
while daily_doses > 0:
if idx == stop_idx:
daily_doses= 0 #End vaccination campaign at age 20
elif VE[vacc_order[idx]] - self.fully_vaccinated_0[vacc_order[idx]]*refusal[vacc_order[idx]] > daily_doses:
N_vacc[vacc_order[idx],3] = daily_doses
daily_doses= 0
else:
N_vacc[vacc_order[idx],3] = VE[vacc_order[idx]] - self.fully_vaccinated_0[vacc_order[idx]]*refusal[vacc_order[idx]]
daily_doses = daily_doses - (VE[vacc_order[idx]] - self.fully_vaccinated_0[vacc_order[idx]]*refusal[vacc_order[idx]])
idx = idx + 1
return N_vacc
# Default vaccination strategy = Sciensano data + hypothetical scheme after end of data collection for unidose model only (for now)
def __call__(self, t, states, param, initN, daily_doses=60000, delay_immunity = 21, vacc_order = [8,7,6,5,4,3,2,1,0], stop_idx=9, refusal = [0.3,0.3,0.3,0.3,0.3,0.3,0.3,0.3,0.3]):
"""
time-dependent function for the Belgian vaccination strategy
First, all available first-dose data from Sciensano are used. Then, the user can specify a custom vaccination strategy of "daily_first_dose" first doses per day,
administered in the order specified by the vector "vacc_order" with a refusal propensity of "refusal" in every age group.
This vaccination strategy does not distinguish between vaccination doses, individuals are transferred to the vaccination circuit after some time delay after the first dose.
For use with the model `COVID19_SEIRD` and `COVID19_SEIRD_spatial_vacc` in `~src/models/models.py`
Parameters
----------
t : int
Simulation time
states: dict
Dictionary containing values of model states
param : dict
Model parameter dictionary
initN : list or np.array
Demographics according to the epidemiological model age bins
daily_first_dose : int
Number of doses administered per day. Default is 30000 doses/day.
delay_immunity : int
Time delay between first dose vaccination and start of immunity. Default is 21 days.
vacc_order : array
Vector containing vaccination prioritization preference. Default is old to young. Must be equal in length to the number of age bins in the model.
stop_idx : float
Index of age group at which the vaccination campaign is halted. An index of 9 corresponds to vaccinating all age groups, an index of 8 corresponds to not vaccinating the age group corresponding with vacc_order[idx].
refusal: array
Vector containing the fraction of individuals refusing a vaccine per age group. Default is 30% in every age group. Must be equal in length to the number of age bins in the model.
Return
------
N_vacc : np.array
Number of individuals to be vaccinated at simulation time "t" per age, or per [patch,age]
"""
# Convert time to suitable format
t = pd.Timestamp(t.date())
# Convert delay to a timedelta
delay = pd.Timedelta(str(int(delay_immunity))+'D')
# Compute vaccinated individuals after spring-summer 2021 vaccination campaign
check_time = pd.Timestamp('2021-10-01')
# Only for non-spatial multi-vaccindation dose model
if not self.spatial:
if self.doses:
if t == check_time:
self.fully_vaccinated_0 = states['S'][:,2] + states['E'][:,2] + states['I'][:,2] + states['A'][:,2] + states['R'][:,2] + \
states['S'][:,3] + states['E'][:,3] + states['I'][:,3] + states['A'][:,3] + states['R'][:,3]
# Use data
if t <= self.df_end + delay:
return self.get_data(t-delay)
# Projection into the future
else:
if self.spatial:
if self.doses:
# No projection implemented
return np.zeros([self.space_agg, self.age_agg, self.dose_agg+1])
else:
# No projection implemented
return np.zeros([self.space_agg,self.age_agg])
else:
if self.doses:
return self.booster_campaign(states, daily_doses, vacc_order, stop_idx, refusal)
else:
return self.unidose_2021_vaccination_campaign(states, initN, daily_doses, delay_immunity, vacc_order, stop_idx, refusal)
###################################
## Google social policy function ##
###################################
class make_contact_matrix_function():
"""
Class that returns contact matrix based on 4 prevention parameters by default, but has other policies defined as well.
Input
-----
Nc_all : dictionnary
contact matrices for home, schools, work, transport, leisure and others
df_google : dataframe
google mobility data
Output
------
__class__ : default function
Default output function, based on contact_matrix_4prev
"""
def __init__(self, df_google, Nc_all):
self.df_google = df_google.astype(float)
self.Nc_all = Nc_all
# Compute start and endtimes of dataframe
self.df_google_start = df_google.index.get_level_values('date')[0]
self.df_google_end = df_google.index.get_level_values('date')[-1]
# Check if provincial data is provided
self.provincial = None
if 'NIS' in self.df_google.index.names:
self.provincial = True
self.space_agg = len(self.df_google.index.get_level_values('NIS').unique().values)
@lru_cache() # once the function is run for a set of parameters, it doesn't need to compile again
def __call__(self, t, prev_home=1, prev_schools=1, prev_work=1, prev_rest = 1,
school=None, work=None, transport=None, leisure=None, others=None, home=None):
"""
t : timestamp
current date
prev_... : float [0,1]
prevention parameter to estimate
school, work, transport, leisure, others : float [0,1]
level of opening of these sectors
if None, it is calculated from google mobility data
only school cannot be None!
"""
if school is None:
raise ValueError(
"Please indicate to which extend schools are open")
places_var = [work, transport, leisure, others]
places_names = ['work', 'transport', 'leisure', 'others']
GCMR_names = ['work', 'transport', 'retail_recreation', 'grocery']
if self.provincial:
if t < pd.Timestamp('2020-03-17'):
return np.ones(self.space_agg)[:,np.newaxis,np.newaxis]*self.Nc_all['total']
elif pd.Timestamp('2020-03-17') <= t <= self.df_google_end:
# Extract row at timestep t
row = -self.df_google.loc[(t, slice(None)),:]/100
else:
# Extract last 14 days and take the mean
row = -self.df_google.loc[(self.df_google_end - pd.Timedelta(days=14)): self.df_google_end, slice(None)].mean(level='NIS')/100
# Sort NIS codes from low to high
row.sort_index(level='NIS', ascending=True,inplace=True)
# Extract values
values_dict={}
for idx,place in enumerate(places_var):
if place is None:
place = 1 - row[GCMR_names[idx]].values
else:
try:
test=len(place)
except:
place = place*np.ones(self.space_agg)
values_dict.update({places_names[idx]: place})
# Schools:
try:
test=len(school)
except:
school = school*np.ones(self.space_agg)
# Construct contact matrix
CM = (prev_home*np.ones(self.space_agg)[:, np.newaxis,np.newaxis]*self.Nc_all['home'] +
(prev_schools*school)[:, np.newaxis,np.newaxis]*self.Nc_all['schools'] +
(prev_work*values_dict['work'])[:,np.newaxis,np.newaxis]*self.Nc_all['work'] +
(prev_rest*values_dict['transport'])[:,np.newaxis,np.newaxis]*self.Nc_all['transport'] +
(prev_rest*values_dict['leisure'])[:,np.newaxis,np.newaxis]*self.Nc_all['leisure'] +
(prev_rest*values_dict['others'])[:,np.newaxis,np.newaxis]*self.Nc_all['others'])
else:
if t < pd.Timestamp('2020-03-17'):
return self.Nc_all['total']
elif pd.Timestamp('2020-03-17') <= t <= self.df_google_end:
# Extract row at timestep t
row = -self.df_google.loc[t]/100
else:
# Extract last 14 days and take the mean
row = -self.df_google[-14:-1].mean()/100
# Extract values
values_dict={}
for idx,place in enumerate(places_var):
if place is None:
place = 1 - row[GCMR_names[idx]]
values_dict.update({places_names[idx]: place})
# Construct contact matrix
CM = (prev_home*self.Nc_all['home'] +
prev_schools*school*self.Nc_all['schools'] +
prev_work*values_dict['work']*self.Nc_all['work'] +
prev_rest*values_dict['transport']*self.Nc_all['transport'] +
prev_rest*values_dict['leisure']*self.Nc_all['leisure'] +
prev_rest*values_dict['others']*self.Nc_all['others'])
return CM
def all_contact(self):
return self.Nc_all['total']
def all_contact_no_schools(self):
return self.Nc_all['total'] - self.Nc_all['schools']
def ramp_fun(self, Nc_old, Nc_new, t, t_start, l):
"""
t : timestamp
current simulation time
t_start : timestamp
start of policy change
l : int
number of additional days after the time delay until full compliance is reached
"""
return Nc_old + (Nc_new-Nc_old)/l * float( (t-t_start)/pd.Timedelta('1D') )
def delayed_ramp_fun(self, Nc_old, Nc_new, t, tau_days, l, t_start):
"""
t : timestamp
current simulation time
t_start : timestamp
start of policy change
tau : int
number of days before measures start having an effect
l : int
number of additional days after the time delay until full compliance is reached
"""
return Nc_old + (Nc_new-Nc_old)/l * float( (t-t_start-tau_days)/pd.Timedelta('1D') )
####################
## National model ##
####################
def policies_all(self, t, states, param, l1, l2, prev_schools, prev_work, prev_rest_lockdown, prev_rest_relaxation, prev_home):
'''
Function that returns the time-dependant social contact matrix Nc for all COVID waves.
Input
-----
t : Timestamp
simulation time
states : xarray
model states
param : dict
model parameter dictionary
l1 : float
Compliance parameter for social policies during first lockdown 2020 COVID-19 wave
l2 : float
Compliance parameter for social policies during second lockdown 2020 COVID-19 wave
prev_{location} : float
Effectivity of contacts at {location}
Returns
-------
CM : np.array (9x9)
Effective contact matrix (output of __call__ function)
'''
t = pd.Timestamp(t.date())
# Convert compliance l to dates
l1_days = pd.Timedelta(l1, unit='D')
l2_days = pd.Timedelta(l2, unit='D')
# Define key dates of first wave
t1 = pd.Timestamp('2020-03-15') # start of lockdown
t2 = pd.Timestamp('2020-05-15') # gradual re-opening of schools (assume 50% of nominal scenario)
t3 = pd.Timestamp('2020-07-01') # start of summer holidays
t4 = pd.Timestamp('2020-08-03') # Summer lockdown in Antwerp
t5 = pd.Timestamp('2020-08-24') # End of summer lockdown in Antwerp
t6 = pd.Timestamp('2020-09-01') # end of summer holidays
t7 = pd.Timestamp('2020-09-21') # Opening universities
# Define key dates of second wave
t8 = pd.Timestamp('2020-10-19') # lockdown (1)
t9 = pd.Timestamp('2020-11-02') # lockdown (2)
t10 =
|
pd.Timestamp('2020-11-16')
|
pandas.Timestamp
|
'''
sim table parser
'''
import re
import time
import numpy as np
import pandas as pd
from .plot import SimPlot
months = ['JAN', 'FEB', 'MAR', 'APR', 'MAY', 'JUN',
'JUL', 'AUG', 'SEP', 'OCT', 'NOV', 'DEC']
month_sort_dict = {'JAN': '01_JAN',
'FEB': '02_FEB',
'MAR': '03_MAR',
'APR': '04_APR',
'MAY': '05_MAY',
'JUN': '06_JUN',
'JUL': '07_JUL',
'AUG': '08_AUG',
'SEP': '09_SEP',
'OCT': '10_OCT',
'NOV': '11_NOV',
'DEC': '12_DEC'}
def try_numeric(df):
def lambda_numeric(x):
try:
return pd.to_numeric(x, errors='raise')
except:
return x
df = df.apply(lambda x: lambda_numeric(x))
return df
def str_to_new_col(df, col, text, newcol):
'''finds textstr in col of df, puts it into newcol'''
if type(text) == str:
df[newcol] = df[col].apply(
lambda x: str(x) if text in str(x) else np.nan)
elif type(text) == list:
df[newcol] = df[col].apply(
lambda x: str(x) if str(x) in text else np.nan)
return df
def filter_numerics(df, col, inverse=False):
def try_numeric_filt(string):
try:
return float(string)
except:
return (string)
df[col] = df[col].apply(lambda x: try_numeric_filt(x))
if not inverse:
df = df[pd.to_numeric(df[col], errors='coerce').notnull()]
elif inverse:
df = df[~pd.to_numeric(df[col], errors='coerce').notnull()]
return df
def shiftcol(df, col, steps):
'''shifts column by number of steps'''
df[col] = df[col].shift(steps)
return df
def inlist(df, col, filtlist, inverse=False):
'''FILTERS OUT ROWS WHERE ROW OF COL IN DF IS NOT A MONTH'''
if inverse:
df = df[df[col].isin(filtlist)]
elif not inverse:
df = df[~df[col].isin(filtlist)]
return df
def add_normalization(df, numerator, denominator, replacecolstr, newcolstr, replacefrom='numerator',
factor=1, ): # todo: handle inf and nans
'''takes df and list or string of numerators/denominators (one has to be a single value),
adds new columns to dataframe based on replacecolstr, newcolstr'''
if replacefrom == 'numerator':
newcolstr = df[numerator].name.replace(replacecolstr, newcolstr)
elif replacefrom == 'denominator':
newcolstr = df[denominator].name.replace(replacecolstr, newcolstr)
df[newcolstr] = (df[numerator].astype(float) /
df[denominator].astype(float)) * factor
# df[newcolstr] = df[newcolstr].apply(lambda x: x.replace(np.inf,0))
return df
class RptHandler:
'''container for customized report dataframes, can pass on various metadata
into it (zone/volume, etc) and transformation methods'''
def __init__(self, path):
self.path = path
self.plot = SimPlot(self) # exposes 'plot.py' for Sim files
def _sim_to_text_dict(self):
'''parses sim file, returns dictionary of tables and sub tables'''
with open(self.path) as f:
fstr = f.read()
fstr = fstr + '\f'
rpt_text = re.findall(r'(REPORT.*?\f)', fstr, re.DOTALL)
# handle tables
rptdict = {}
for r in rpt_text:
report = re.findall("REPORT-.*WEATHER", r)[0]
report = report.replace(
"REPORT- ", "").replace("WEATHER", "").strip()
# handle special parse cases
if "DESIGN DAY" in report:
top = re.split(r'\s{2,}', report)[0] + ' (DESIGN DAY)'
try:
bottom = re.split(r'\s{2,}', report)[1]
if bottom == "":
bottom = 'None'
except:
bottom = 'None'
elif "LS-J Daylight Illuminance Frequency" in report:
top = "LS-J Daylight Illuminance Frequency"
bottom = report.replace((top + " "), "")
elif "LS-M Daylight Illuminance Ref Pnt 1" in report:
top = "LS-M Daylight Illuminance Ref Pnt 1"
bottom = report.replace((top + " "), "")
elif "SS-P Heating Performance Summary of" in report:
top = "SS-P Heating Performance Summary of"
bottom = report.replace((top + " "), "")
elif "SS-P Cooling Performance Summary of" in report:
top = "SS-P Cooling Performance Summary of"
bottom = report.replace((top + " "), "")
elif "SS-Q Heat Pump Cooling Summary for" in report:
top = "SS-Q Heat Pump Cooling Summary for"
bottom = report.replace((top + " "), "")
elif "SS-Q Heat Pump Heating Summary for" in report:
top = "SS-Q Heat Pump Heating Summary for"
bottom = report.replace((top + " "), "")
else:
top = re.split(r'\s{2,}', report)[0]
try:
bottom = re.split(r'\s{2,}', report)[1]
if bottom == "":
bottom = 'None'
except:
bottom = 'None'
# rptdata = r.split("\n")[3:] # changed on 2020-01-30 for lv-d
rptdata = r.split("\n")[2:]
# populate dictionaries
if top not in rptdict:
rptdict[top] = {bottom: rptdata}
else:
if bottom in rptdict[top]:
rptdict[top][bottom] = rptdict[top][bottom] + rptdata
else:
rptdict[top].update({bottom: rptdata})
return rptdict
self.txtdict = _sim_to_text_dict(self)
def _make_dirty_rpt_list(self, report):
'''
takes either full report or simplified, without hyphen:
LV-C Details of Space is accessible via LV-C, LVC, lfc, or
LV-C Details of Space
returns dirty list
'''
rptlist = [key for key in self.txtdict.keys()]
for rpt in rptlist:
find = report.replace("-", "").upper()
rpt_fmt = rpt.replace("-", "").upper()
pattern = "^" + find + ".*"
match = re.search(pattern, rpt_fmt)
if match:
return self.txtdict[rpt]
def _make_dirty_rpt_df(self, rptname, colpat, colnames=None, fullname=None):
'''
takes rpt list and
returns df with column pattern and column length.
rpt_ref used in case name of report generated
is different from sim name. ex: 'unmet' is
taken from 'beps'report;
'rpt' = unmet, rpt_ref = 'beps'
'''
txtlist = self._make_dirty_rpt_list(rptname)
poslist = [i for i, letter in enumerate(colpat) if letter == '%']
dflist = []
for key, value in txtlist.items(): #
collist = []
this_row = []
for row in value:
this_row = []
for num in range(len(poslist)):
try:
this_row_col = row[poslist[num] :poslist[num + 1]].strip()
this_row.append(this_row_col)
except:
pass
this_row = [x if len(x) > 0 else np.nan for x in this_row]
collist.append(this_row)
if colnames is not None:
df =
|
pd.DataFrame(collist, columns=colnames)
|
pandas.DataFrame
|
"""
Compute the aggregate effects for each individual neuron.
Save the effects as $model_neuron_effects.csv.
Usage:
python compute_and_save_neuron_agg_effect.py $result_file_path $model_name
"""
import os
import sys
import pandas as pd
def analyze_effect_results(results_df, effect, word, alt, savefig=None):
# calculate odds.
if alt == "man":
odds_base = (
results_df["candidate1_base_prob"] / results_df["candidate2_base_prob"]
)
odds_intervention = (
results_df["candidate1_prob"] / results_df["candidate2_prob"]
)
else:
odds_base = (
results_df["candidate2_base_prob"] / results_df["candidate1_base_prob"]
)
odds_intervention = (
results_df["candidate2_prob"] / results_df["candidate1_prob"]
)
odds_ratio = odds_intervention / odds_base
results_df["odds_ratio"] = odds_ratio
if word == "all":
# average over words
results_df = results_df.groupby(["layer", "neuron"], as_index=False).mean()
else:
# choose one word
results_df = results_df[results_df["word"] == word]
results_df = results_df.pivot("neuron", "layer", "odds_ratio")
def get_all_effects(fname, direction="woman"):
"""
Give fname from a direct effect file
"""
# Step 1: Load results for current folder and gender
print(fname)
indirect_result_df = pd.read_csv(fname)
analyze_effect_results(
results_df=indirect_result_df, effect="indirect", word="all", alt=direction
)
fname = fname.replace("_indirect_", "_direct_")
direct_result_df =
|
pd.read_csv(fname)
|
pandas.read_csv
|
import builtins
from io import StringIO
from itertools import product
from string import ascii_lowercase
import numpy as np
import pytest
from pandas.errors import UnsupportedFunctionCall
import pandas as pd
from pandas import (
DataFrame, Index, MultiIndex, Series, Timestamp, date_range, isna)
import pandas.core.nanops as nanops
from pandas.util import testing as tm
@pytest.mark.parametrize("agg_func", ['any', 'all'])
@pytest.mark.parametrize("skipna", [True, False])
@pytest.mark.parametrize("vals", [
['foo', 'bar', 'baz'], ['foo', '', ''], ['', '', ''],
[1, 2, 3], [1, 0, 0], [0, 0, 0],
[1., 2., 3.], [1., 0., 0.], [0., 0., 0.],
[True, True, True], [True, False, False], [False, False, False],
[np.nan, np.nan, np.nan]
])
def test_groupby_bool_aggs(agg_func, skipna, vals):
df = DataFrame({'key': ['a'] * 3 + ['b'] * 3, 'val': vals * 2})
# Figure out expectation using Python builtin
exp = getattr(builtins, agg_func)(vals)
# edge case for missing data with skipna and 'any'
if skipna and all(isna(vals)) and agg_func == 'any':
exp = False
exp_df = DataFrame([exp] * 2, columns=['val'], index=Index(
['a', 'b'], name='key'))
result = getattr(df.groupby('key'), agg_func)(skipna=skipna)
tm.assert_frame_equal(result, exp_df)
def test_max_min_non_numeric():
# #2700
aa = DataFrame({'nn': [11, 11, 22, 22],
'ii': [1, 2, 3, 4],
'ss': 4 * ['mama']})
result = aa.groupby('nn').max()
assert 'ss' in result
result = aa.groupby('nn').max(numeric_only=False)
assert 'ss' in result
result = aa.groupby('nn').min()
assert 'ss' in result
result = aa.groupby('nn').min(numeric_only=False)
assert 'ss' in result
def test_intercept_builtin_sum():
s = Series([1., 2., np.nan, 3.])
grouped = s.groupby([0, 1, 2, 2])
result = grouped.agg(builtins.sum)
result2 = grouped.apply(builtins.sum)
expected = grouped.sum()
tm.assert_series_equal(result, expected)
tm.assert_series_equal(result2, expected)
# @pytest.mark.parametrize("f", [max, min, sum])
# def test_builtins_apply(f):
@pytest.mark.parametrize("f", [max, min, sum])
@pytest.mark.parametrize('keys', [
"jim", # Single key
["jim", "joe"] # Multi-key
])
def test_builtins_apply(keys, f):
# see gh-8155
df = pd.DataFrame(np.random.randint(1, 50, (1000, 2)),
columns=["jim", "joe"])
df["jolie"] = np.random.randn(1000)
fname = f.__name__
result = df.groupby(keys).apply(f)
ngroups = len(df.drop_duplicates(subset=keys))
assert_msg = ("invalid frame shape: {} "
"(expected ({}, 3))".format(result.shape, ngroups))
assert result.shape == (ngroups, 3), assert_msg
tm.assert_frame_equal(result, # numpy's equivalent function
df.groupby(keys).apply(getattr(np, fname)))
if f != sum:
expected = df.groupby(keys).agg(fname).reset_index()
expected.set_index(keys, inplace=True, drop=False)
tm.assert_frame_equal(result, expected, check_dtype=False)
tm.assert_series_equal(getattr(result, fname)(),
getattr(df, fname)())
def test_arg_passthru():
# make sure that we are passing thru kwargs
# to our agg functions
# GH3668
# GH5724
df = pd.DataFrame(
{'group': [1, 1, 2],
'int': [1, 2, 3],
'float': [4., 5., 6.],
'string': list('abc'),
'category_string': pd.Series(list('abc')).astype('category'),
'category_int': [7, 8, 9],
'datetime': pd.date_range('20130101', periods=3),
'datetimetz': pd.date_range('20130101',
periods=3,
tz='US/Eastern'),
'timedelta': pd.timedelta_range('1 s', periods=3, freq='s')},
columns=['group', 'int', 'float', 'string',
'category_string', 'category_int',
'datetime', 'datetimetz',
'timedelta'])
expected_columns_numeric = Index(['int', 'float', 'category_int'])
# mean / median
expected = pd.DataFrame(
{'category_int': [7.5, 9],
'float': [4.5, 6.],
'timedelta': [pd.Timedelta('1.5s'),
pd.Timedelta('3s')],
'int': [1.5, 3],
'datetime': [pd.Timestamp('2013-01-01 12:00:00'),
pd.Timestamp('2013-01-03 00:00:00')],
'datetimetz': [
pd.Timestamp('2013-01-01 12:00:00', tz='US/Eastern'),
pd.Timestamp('2013-01-03 00:00:00', tz='US/Eastern')]},
index=Index([1, 2], name='group'),
columns=['int', 'float', 'category_int',
'datetime', 'datetimetz', 'timedelta'])
for attr in ['mean', 'median']:
f = getattr(df.groupby('group'), attr)
result = f()
tm.assert_index_equal(result.columns, expected_columns_numeric)
result = f(numeric_only=False)
tm.assert_frame_equal(result.reindex_like(expected), expected)
# TODO: min, max *should* handle
# categorical (ordered) dtype
expected_columns = Index(['int', 'float', 'string',
'category_int',
'datetime', 'datetimetz',
'timedelta'])
for attr in ['min', 'max']:
f = getattr(df.groupby('group'), attr)
result = f()
tm.assert_index_equal(result.columns, expected_columns)
result = f(numeric_only=False)
tm.assert_index_equal(result.columns, expected_columns)
expected_columns = Index(['int', 'float', 'string',
'category_string', 'category_int',
'datetime', 'datetimetz',
'timedelta'])
for attr in ['first', 'last']:
f = getattr(df.groupby('group'), attr)
result = f()
tm.assert_index_equal(result.columns, expected_columns)
result = f(numeric_only=False)
tm.assert_index_equal(result.columns, expected_columns)
expected_columns = Index(['int', 'float', 'string',
'category_int', 'timedelta'])
for attr in ['sum']:
f = getattr(df.groupby('group'), attr)
result = f()
tm.assert_index_equal(result.columns, expected_columns_numeric)
result = f(numeric_only=False)
tm.assert_index_equal(result.columns, expected_columns)
expected_columns = Index(['int', 'float', 'category_int'])
for attr in ['prod', 'cumprod']:
f = getattr(df.groupby('group'), attr)
result = f()
tm.assert_index_equal(result.columns, expected_columns_numeric)
result = f(numeric_only=False)
tm.assert_index_equal(result.columns, expected_columns)
# like min, max, but don't include strings
expected_columns = Index(['int', 'float',
'category_int',
'datetime', 'datetimetz',
'timedelta'])
for attr in ['cummin', 'cummax']:
f = getattr(df.groupby('group'), attr)
result = f()
# GH 15561: numeric_only=False set by default like min/max
tm.assert_index_equal(result.columns, expected_columns)
result = f(numeric_only=False)
|
tm.assert_index_equal(result.columns, expected_columns)
|
pandas.util.testing.assert_index_equal
|
def Moder_merger(params : dict):
def Solo_M1mHpC4H11N(ion_idx, mgf_file):
mz = mgf_file[ion_idx].get('pepmass')[0]
peaks = pd.Series(mgf_file[ion_idx].peaks.mz)
mz_H = -1.007825 + mz - 72.081324
mz_Cl = 34.968853 + mz - 72.081324
valid_H = peaks.between(mz_H - prec_mass_error, mz_H + prec_mass_error, inclusive = "both").sum() > 0
valid_Cl = peaks.between(mz_Cl - prec_mass_error, mz_Cl + prec_mass_error, inclusive = "both").sum() > 0
return sum([valid_H, valid_Cl])
def Solo_M1mHpHCOOH(ion_idx, mgf_file):
mz = mgf_file[ion_idx].get('pepmass')[0]
peaks = pd.Series(mgf_file[ion_idx].peaks.mz)
mz_H = -1.007825 + mz - 44.997654
mz_Cl = 34.968853 + mz - 44.997654
valid_H = peaks.between(mz_H - prec_mass_error, mz_H + prec_mass_error, inclusive = "both").sum() > 0
valid_Cl = peaks.between(mz_Cl - prec_mass_error, mz_Cl + prec_mass_error, inclusive = "both").sum() > 0
return sum([valid_H, valid_Cl])
def Solo_M1m2HpNapHCOOH(ion_idx, mgf_file):
mz = mgf_file[ion_idx].get('pepmass')[0]
peaks = pd.Series(mgf_file[ion_idx].peaks.mz)
mz_H = -1.007825 + mz - 66.979600
mz_Cl = 34.968853 + mz - 66.979600
mz_m2HpNa = 20.97412 + mz - 66.979600
valid_H = peaks.between(mz_H - prec_mass_error, mz_H + prec_mass_error, inclusive = "both").sum() > 0
valid_Cl = peaks.between(mz_Cl - prec_mass_error, mz_Cl + prec_mass_error, inclusive = "both").sum() > 0
valid_m2HpNa = peaks.between(mz_m2HpNa - prec_mass_error, mz_m2HpNa + prec_mass_error, inclusive = "both").sum() > 0
return sum([valid_H, valid_Cl, valid_m2HpNa])
def Solo_M1m2HpNa(ion_idx, mgf_file):
mz = mgf_file[ion_idx].get('pepmass')[0]
peaks = pd.Series(mgf_file[ion_idx].peaks.mz)
mz_H = -1.007825 + mz - 66.979600
valid_H = peaks.between(mz_H - prec_mass_error, mz_H + prec_mass_error, inclusive = "both").sum() > 0
return sum([valid_H])
def Solo_M1m2HpK(ion_idx, mgf_file):
mz = mgf_file[ion_idx].get('pepmass')[0]
peaks = pd.Series(mgf_file[ion_idx].peaks.mz)
mz_H = -1.007825 + mz - 36.948058
valid_H = peaks.between(mz_H - prec_mass_error, mz_H + prec_mass_error, inclusive = "both").sum() > 0
return sum([valid_H])
def Solo_M2mHpC4H11N(ion_idx, mgf_file):
mz = mgf_file[ion_idx].get('pepmass')[0]
peaks = pd.Series(mgf_file[ion_idx].peaks.mz)
mz_H = -1.007825 + (mz - 72.081324)/2
mz_Cl = 34.968853 + (mz - 72.081324)/2
mz_m2HpNa = 20.97412 + (mz - 72.081324)/2
valid_H = peaks.between(mz_H - prec_mass_error, mz_H + prec_mass_error, inclusive = "both").sum() > 0
valid_Cl = peaks.between(mz_Cl - prec_mass_error, mz_Cl + prec_mass_error, inclusive = "both").sum() > 0
valid_m2HpNa = peaks.between(mz_m2HpNa - prec_mass_error, mz_m2HpNa + prec_mass_error, inclusive = "both").sum() > 0
return sum([valid_H, valid_Cl, valid_m2HpNa])
def Solo_M2mHpHCOOH(ion_idx, mgf_file):
mz = mgf_file[ion_idx].get('pepmass')[0]
peaks = pd.Series(mgf_file[ion_idx].peaks.mz)
mz_H = -1.007825 + (mz - 44.997654)/2
mz_Cl = 34.968853 + (mz - 44.997654)/2
mz_m2HpNa = 20.97412 + (mz - 44.997654)/2
mz_mHpHCOOH = 44.997654 + (mz - 44.997654)/2
valid_H = peaks.between(mz_H - prec_mass_error, mz_H + prec_mass_error, inclusive = "both").sum() > 0
valid_Cl = peaks.between(mz_Cl - prec_mass_error, mz_Cl + prec_mass_error, inclusive = "both").sum() > 0
valid_m2HpNa = peaks.between(mz_m2HpNa - prec_mass_error, mz_m2HpNa + prec_mass_error, inclusive = "both").sum() > 0
valid_mHpHCOOH = peaks.between(mz_mHpHCOOH - prec_mass_error, mz_mHpHCOOH + prec_mass_error, inclusive = "both").sum() > 0
return sum([valid_H, valid_Cl, valid_m2HpNa, valid_mHpHCOOH])
def Solo_M2mH(ion_idx, mgf_file):
mz = mgf_file[ion_idx].get('pepmass')[0]
peaks = pd.Series(mgf_file[ion_idx].peaks.mz)
mz_H = -1.007825 + (mz + 1.007825)/2
mz_Cl = 34.968853 + (mz + 1.007825)/2
mz_m2HpNa = 20.97412 + (mz + 1.007825)/2
valid_H = peaks.between(mz_H - prec_mass_error, mz_H + prec_mass_error, inclusive = "both").sum() > 0
valid_Cl = peaks.between(mz_Cl - prec_mass_error, mz_Cl + prec_mass_error, inclusive = "both").sum() > 0
valid_m2HpNa = peaks.between(mz_m2HpNa - prec_mass_error, mz_m2HpNa + prec_mass_error, inclusive = "both").sum() > 0
return sum([valid_H, valid_Cl, valid_m2HpNa])
def Solo_M2pCl(ion_idx, mgf_file):
mz = mgf_file[ion_idx].get('pepmass')[0]
peaks = pd.Series(mgf_file[ion_idx].peaks.mz)
mz_H = -1.007825 + (mz - 34.968853)/2
mz_Cl = 34.968853 + (mz - 34.968853)/2
mz_m2HpNa = 20.97412 + (mz - 34.968853)/2
valid_H = peaks.between(mz_H - prec_mass_error, mz_H + prec_mass_error, inclusive = "both").sum() > 0
valid_Cl = peaks.between(mz_Cl - prec_mass_error, mz_Cl + prec_mass_error, inclusive = "both").sum() > 0
valid_m2HpNa = peaks.between(mz_m2HpNa - prec_mass_error, mz_m2HpNa + prec_mass_error, inclusive = "both").sum() > 0
return sum([valid_H, valid_Cl, valid_m2HpNa])
def Solo_M2m2HpNapHCOOH(ion_idx, mgf_file):
mz = mgf_file[ion_idx].get('pepmass')[0]
peaks = pd.Series(mgf_file[ion_idx].peaks.mz)
mz_H = -1.007825 + (mz - 66.979600)/2
mz_Cl = 34.968853 + (mz - 66.979600)/2
mz_m2HpNa = 20.97412 + (mz - 66.979600)/2
mz_m2HpNapHCOOH = 66.9796 + (mz - 66.979600)/2
valid_H = peaks.between(mz_H - prec_mass_error, mz_H + prec_mass_error, inclusive = "both").sum() > 0
valid_Cl = peaks.between(mz_Cl - prec_mass_error, mz_Cl + prec_mass_error, inclusive = "both").sum() > 0
valid_m2HpNa = peaks.between(mz_m2HpNa - prec_mass_error, mz_m2HpNa + prec_mass_error, inclusive = "both").sum() > 0
valid_m2HpNapHCOOH = peaks.between(mz_m2HpNapHCOOH - prec_mass_error, mz_m2HpNapHCOOH + prec_mass_error, inclusive = "both").sum() > 0
return sum([valid_H, valid_Cl, valid_m2HpNa, valid_m2HpNapHCOOH])
def Solo_M2m2HpNa(ion_idx, mgf_file):
mz = mgf_file[ion_idx].get('pepmass')[0]
peaks = pd.Series(mgf_file[ion_idx].peaks.mz)
mz_H = -1.007825 + (mz - 20.97412)/2
mz_Cl = 34.968853 + (mz - 20.97412)/2
mz_m2HpNa = 20.97412 + (mz - 20.97412)/2
mz_m2HpNapHCOOH = 66.9796 + (mz - 20.97412)/2
valid_H = peaks.between(mz_H - prec_mass_error, mz_H + prec_mass_error, inclusive = "both").sum() > 0
valid_Cl = peaks.between(mz_Cl - prec_mass_error, mz_Cl + prec_mass_error, inclusive = "both").sum() > 0
valid_m2HpNa = peaks.between(mz_m2HpNa - prec_mass_error, mz_m2HpNa + prec_mass_error, inclusive = "both").sum() > 0
valid_m2HpNapHCOOH = peaks.between(mz_m2HpNapHCOOH - prec_mass_error, mz_m2HpNapHCOOH + prec_mass_error, inclusive = "both").sum() > 0
return sum([valid_H, valid_Cl, valid_m2HpNa, valid_m2HpNapHCOOH])
def Solo_M2m2HpK(ion_idx, mgf_file):
mz = mgf_file[ion_idx].get('pepmass')[0]
peaks = pd.Series(mgf_file[ion_idx].peaks.mz)
mz_H = -1.007825 + (mz - 36.948058)/2
mz_Cl = 34.968853 + (mz - 36.948058)/2
mz_m2HpNa = 20.97412 + (mz - 36.948058)/2
mz_m2HpNapHCOOH = 66.9796 + (mz - 36.948058)/2
mz_m2HpK = 36.948058 + (mz - 36.948058)/2
valid_H = peaks.between(mz_H - prec_mass_error, mz_H + prec_mass_error, inclusive = "both").sum() > 0
valid_Cl = peaks.between(mz_Cl - prec_mass_error, mz_Cl + prec_mass_error, inclusive = "both").sum() > 0
valid_m2HpNa = peaks.between(mz_m2HpNa - prec_mass_error, mz_m2HpNa + prec_mass_error, inclusive = "both").sum() > 0
valid_m2HpNapHCOOH = peaks.between(mz_m2HpNapHCOOH - prec_mass_error, mz_m2HpNapHCOOH + prec_mass_error, inclusive = "both").sum() > 0
valid_m2HpK = peaks.between(mz_m2HpK - prec_mass_error, mz_m2HpK + prec_mass_error, inclusive = "both").sum() > 0
return sum([valid_H, valid_Cl, valid_m2HpNa, valid_m2HpNapHCOOH, valid_m2HpK])
def Solo_M3mH(ion_idx, mgf_file):
mz = mgf_file[ion_idx].get('pepmass')[0]
peaks = pd.Series(mgf_file[ion_idx].peaks.mz)
mz_H = -1.007825 + (mz + 1.007825)/3
mz_Cl = 34.968853 + (mz + 1.007825)/3
mz_m2HpNa = 20.97412 + (mz + 1.007825)/3
mz_m2HpNapHCOOH = 66.9796 + (mz + 1.007825)/3
mz_m2HpK = 36.948058 + (mz + 1.007825)/3
mz_M2mH = -1.007825 + (mz + 1.007825)*(2/3)
mz_M2pCl = 34.968853 + (mz + 1.007825)*(2/3)
mz_M2m2HpNa = 20.97412 + (mz + 1.007825)*(2/3)
mz_M2m2HpNapHCOOH = 66.9796 + (mz + 1.007825)*(2/3)
mz_M2m2HpK = 36.948058 + (mz + 1.007825)*(2/3)
valid_H = peaks.between(mz_H - prec_mass_error, mz_H + prec_mass_error, inclusive = "both").sum() > 0
valid_Cl = peaks.between(mz_Cl - prec_mass_error, mz_Cl + prec_mass_error, inclusive = "both").sum() > 0
valid_m2HpNa = peaks.between(mz_m2HpNa - prec_mass_error, mz_m2HpNa + prec_mass_error, inclusive = "both").sum() > 0
valid_m2HpNapHCOOH = peaks.between(mz_m2HpNapHCOOH - prec_mass_error, mz_m2HpNapHCOOH + prec_mass_error, inclusive = "both").sum() > 0
valid_m2HpK = peaks.between(mz_m2HpK - prec_mass_error, mz_m2HpK + prec_mass_error, inclusive = "both").sum() > 0
valid_M2mH = peaks.between(mz_M2mH - prec_mass_error, mz_M2mH + prec_mass_error, inclusive = "both").sum() > 0
valid_M2pCl = peaks.between(mz_M2pCl - prec_mass_error, mz_M2pCl + prec_mass_error, inclusive = "both").sum() > 0
valid_M2m2HpNa = peaks.between(mz_M2m2HpNa - prec_mass_error, mz_M2m2HpNa + prec_mass_error, inclusive = "both").sum() > 0
valid_M2m2HpNapHCOOH = peaks.between(mz_M2m2HpNapHCOOH - prec_mass_error, mz_M2m2HpNapHCOOH + prec_mass_error, inclusive = "both").sum() > 0
valid_M2m2HpK = peaks.between(mz_M2m2HpK - prec_mass_error, mz_M2m2HpK + prec_mass_error, inclusive = "both").sum() > 0
return sum([valid_H, valid_Cl, valid_m2HpNa, valid_m2HpNapHCOOH, valid_m2HpK,
valid_M2mH, valid_M2pCl, valid_M2m2HpNa, valid_M2m2HpNapHCOOH, valid_M2m2HpK])
def Solo_M3pCl(ion_idx, mgf_file):
mz = mgf_file[ion_idx].get('pepmass')[0]
peaks = pd.Series(mgf_file[ion_idx].peaks.mz)
mz_H = -1.007825 + (mz - 34.968853)/3
mz_Cl = 34.968853 + (mz - 34.968853)/3
mz_m2HpNa = 20.97412 + (mz - 34.968853)/3
mz_m2HpNapHCOOH = 66.9796 + (mz - 34.968853)/3
mz_m2HpK = 36.948058 + (mz - 34.968853)/3
mz_M2mH = -1.007825 + (mz - 34.968853)*(2/3)
mz_M2pCl = 34.968853 + (mz - 34.968853)*(2/3)
mz_M2m2HpNa = 20.97412 + (mz - 34.968853)*(2/3)
mz_M2m2HpNapHCOOH = 66.9796 + (mz - 34.968853)*(2/3)
mz_M2m2HpK = 36.948058 + (mz - 34.968853)*(2/3)
valid_H = peaks.between(mz_H - prec_mass_error, mz_H + prec_mass_error, inclusive = "both").sum() > 0
valid_Cl = peaks.between(mz_Cl - prec_mass_error, mz_Cl + prec_mass_error, inclusive = "both").sum() > 0
valid_m2HpNa = peaks.between(mz_m2HpNa - prec_mass_error, mz_m2HpNa + prec_mass_error, inclusive = "both").sum() > 0
valid_m2HpNapHCOOH = peaks.between(mz_m2HpNapHCOOH - prec_mass_error, mz_m2HpNapHCOOH + prec_mass_error, inclusive = "both").sum() > 0
valid_m2HpK = peaks.between(mz_m2HpK - prec_mass_error, mz_m2HpK + prec_mass_error, inclusive = "both").sum() > 0
valid_M2mH = peaks.between(mz_M2mH - prec_mass_error, mz_M2mH + prec_mass_error, inclusive = "both").sum() > 0
valid_M2pCl = peaks.between(mz_M2pCl - prec_mass_error, mz_M2pCl + prec_mass_error, inclusive = "both").sum() > 0
valid_M2m2HpNa = peaks.between(mz_M2m2HpNa - prec_mass_error, mz_M2m2HpNa + prec_mass_error, inclusive = "both").sum() > 0
valid_M2m2HpNapHCOOH = peaks.between(mz_M2m2HpNapHCOOH - prec_mass_error, mz_M2m2HpNapHCOOH + prec_mass_error, inclusive = "both").sum() > 0
valid_M2m2HpK = peaks.between(mz_M2m2HpK - prec_mass_error, mz_M2m2HpK + prec_mass_error, inclusive = "both").sum() > 0
return sum([valid_H, valid_Cl, valid_m2HpNa, valid_m2HpNapHCOOH, valid_m2HpK,
valid_M2mH, valid_M2pCl, valid_M2m2HpNa, valid_M2m2HpNapHCOOH, valid_M2m2HpK])
def Solo_M3m2HpNapHCOOH(ion_idx, mgf_file):
mz = mgf_file[ion_idx].get('pepmass')[0]
peaks = pd.Series(mgf_file[ion_idx].peaks.mz)
mz_H = -1.007825 + (mz - 66.979600)/3
mz_Cl = 34.968853 + (mz - 66.979600)/3
mz_m2HpNa = 20.97412 + (mz - 66.979600)/3
mz_m2HpNapHCOOH = 66.9796 + (mz - 66.979600)/3
mz_m2HpK = 36.948058 + (mz - 66.979600)/3
mz_M2mH = -1.007825 + (mz - 66.979600)*(2/3)
mz_M2pCl = 34.968853 + (mz - 66.979600)*(2/3)
mz_M2m2HpNa = 20.97412 + (mz - 66.979600)*(2/3)
mz_M2m2HpNapHCOOH = 66.9796 + (mz - 66.979600)*(2/3)
mz_M2m2HpK = 36.948058 + (mz - 66.979600)*(2/3)
valid_H = peaks.between(mz_H - prec_mass_error, mz_H + prec_mass_error, inclusive = "both").sum() > 0
valid_Cl = peaks.between(mz_Cl - prec_mass_error, mz_Cl + prec_mass_error, inclusive = "both").sum() > 0
valid_m2HpNa = peaks.between(mz_m2HpNa - prec_mass_error, mz_m2HpNa + prec_mass_error, inclusive = "both").sum() > 0
valid_m2HpNapHCOOH = peaks.between(mz_m2HpNapHCOOH - prec_mass_error, mz_m2HpNapHCOOH + prec_mass_error, inclusive = "both").sum() > 0
valid_m2HpK = peaks.between(mz_m2HpK - prec_mass_error, mz_m2HpK + prec_mass_error, inclusive = "both").sum() > 0
valid_M2mH = peaks.between(mz_M2mH - prec_mass_error, mz_M2mH + prec_mass_error, inclusive = "both").sum() > 0
valid_M2pCl = peaks.between(mz_M2pCl - prec_mass_error, mz_M2pCl + prec_mass_error, inclusive = "both").sum() > 0
valid_M2m2HpNa = peaks.between(mz_M2m2HpNa - prec_mass_error, mz_M2m2HpNa + prec_mass_error, inclusive = "both").sum() > 0
valid_M2m2HpNapHCOOH = peaks.between(mz_M2m2HpNapHCOOH - prec_mass_error, mz_M2m2HpNapHCOOH + prec_mass_error, inclusive = "both").sum() > 0
valid_M2m2HpK = peaks.between(mz_M2m2HpK - prec_mass_error, mz_M2m2HpK + prec_mass_error, inclusive = "both").sum() > 0
return sum([valid_H, valid_Cl, valid_m2HpNa, valid_m2HpNapHCOOH, valid_m2HpK,
valid_M2mH, valid_M2pCl, valid_M2m2HpNa, valid_M2m2HpNapHCOOH, valid_M2m2HpK])
def Solo_M3m2HpNa(ion_idx, mgf_file):
mz = mgf_file[ion_idx].get('pepmass')[0]
peaks = pd.Series(mgf_file[ion_idx].peaks.mz)
mz_H = -1.007825 + (mz - 20.97412)/3
mz_Cl = 34.968853 + (mz - 20.97412)/3
mz_m2HpNa = 20.97412 + (mz - 20.97412)/3
mz_m2HpNapHCOOH = 66.9796 + (mz - 20.97412)/3
mz_m2HpK = 36.948058 + (mz - 20.97412)/3
mz_M2mH = -1.007825 + (mz - 20.97412)*(2/3)
mz_M2pCl = 34.968853 + (mz - 20.97412)*(2/3)
mz_M2m2HpNa = 20.97412 + (mz - 20.97412)*(2/3)
mz_M2m2HpNapHCOOH = 66.9796 + (mz - 20.97412)*(2/3)
mz_M2m2HpK = 36.948058 + (mz - 20.97412)*(2/3)
valid_H = peaks.between(mz_H - prec_mass_error, mz_H + prec_mass_error, inclusive = "both").sum() > 0
valid_Cl = peaks.between(mz_Cl - prec_mass_error, mz_Cl + prec_mass_error, inclusive = "both").sum() > 0
valid_m2HpNa = peaks.between(mz_m2HpNa - prec_mass_error, mz_m2HpNa + prec_mass_error, inclusive = "both").sum() > 0
valid_m2HpNapHCOOH = peaks.between(mz_m2HpNapHCOOH - prec_mass_error, mz_m2HpNapHCOOH + prec_mass_error, inclusive = "both").sum() > 0
valid_m2HpK = peaks.between(mz_m2HpK - prec_mass_error, mz_m2HpK + prec_mass_error, inclusive = "both").sum() > 0
valid_M2mH = peaks.between(mz_M2mH - prec_mass_error, mz_M2mH + prec_mass_error, inclusive = "both").sum() > 0
valid_M2pCl = peaks.between(mz_M2pCl - prec_mass_error, mz_M2pCl + prec_mass_error, inclusive = "both").sum() > 0
valid_M2m2HpNa = peaks.between(mz_M2m2HpNa - prec_mass_error, mz_M2m2HpNa + prec_mass_error, inclusive = "both").sum() > 0
valid_M2m2HpNapHCOOH = peaks.between(mz_M2m2HpNapHCOOH - prec_mass_error, mz_M2m2HpNapHCOOH + prec_mass_error, inclusive = "both").sum() > 0
valid_M2m2HpK = peaks.between(mz_M2m2HpK - prec_mass_error, mz_M2m2HpK + prec_mass_error, inclusive = "both").sum() > 0
return sum([valid_H, valid_Cl, valid_m2HpNa, valid_m2HpNapHCOOH, valid_m2HpK,
valid_M2mH, valid_M2pCl, valid_M2m2HpNa, valid_M2m2HpNapHCOOH, valid_M2m2HpK])
def Solo_M4mH(ion_idx, mgf_file):
mz = mgf_file[ion_idx].get('pepmass')[0]
peaks = pd.Series(mgf_file[ion_idx].peaks.mz)
mz_H = -1.007825 + (mz + 1.007825)/4
mz_Cl = 34.968853 + (mz + 1.007825)/4
mz_m2HpNa = 20.97412 + (mz + 1.007825)/4
mz_m2HpNapHCOOH = 66.9796 + (mz + 1.007825)/4
mz_m2HpK = 36.948058 + (mz + 1.007825)/4
mz_M2mH = -1.007825 + (mz + 1.007825)/2
mz_M2pCl = 34.968853 + (mz + 1.007825)/2
mz_M2m2HpNa = 20.97412 + (mz + 1.007825)/2
mz_M2m2HpNapHCOOH = 66.9796 + (mz + 1.007825)/2
mz_M2m2HpK = 36.948058 + (mz + 1.007825)/2
mz_M3mH = -1.007825 + (mz + 1.007825)*(3/4)
mz_M3pCl = 34.968853 + (mz + 1.007825)*(3/4)
mz_M3m2HpNa = 20.97412 + (mz + 1.007825)*(3/4)
mz_M3m2HpNapHCOOH = 66.9796 + (mz + 1.007825)*(3/4)
mz_M3m2HpK = 36.948058 + (mz + 1.007825)*(3/4)
valid_H = peaks.between(mz_H - prec_mass_error, mz_H + prec_mass_error, inclusive = "both").sum() > 0
valid_Cl = peaks.between(mz_Cl - prec_mass_error, mz_Cl + prec_mass_error, inclusive = "both").sum() > 0
valid_m2HpNa = peaks.between(mz_m2HpNa - prec_mass_error, mz_m2HpNa + prec_mass_error, inclusive = "both").sum() > 0
valid_m2HpNapHCOOH = peaks.between(mz_m2HpNapHCOOH - prec_mass_error, mz_m2HpNapHCOOH + prec_mass_error, inclusive = "both").sum() > 0
valid_m2HpK = peaks.between(mz_m2HpK - prec_mass_error, mz_m2HpK + prec_mass_error, inclusive = "both").sum() > 0
valid_M2mH = peaks.between(mz_M2mH - prec_mass_error, mz_M2mH + prec_mass_error, inclusive = "both").sum() > 0
valid_M2pCl = peaks.between(mz_M2pCl - prec_mass_error, mz_M2pCl + prec_mass_error, inclusive = "both").sum() > 0
valid_M2m2HpNa = peaks.between(mz_M2m2HpNa - prec_mass_error, mz_M2m2HpNa + prec_mass_error, inclusive = "both").sum() > 0
valid_M2m2HpNapHCOOH = peaks.between(mz_M2m2HpNapHCOOH - prec_mass_error, mz_M2m2HpNapHCOOH + prec_mass_error, inclusive = "both").sum() > 0
valid_M2m2HpK = peaks.between(mz_M2m2HpK - prec_mass_error, mz_M2m2HpK + prec_mass_error, inclusive = "both").sum() > 0
valid_M3mH = peaks.between(mz_M3mH - prec_mass_error, mz_M3mH + prec_mass_error, inclusive = "both").sum() > 0
valid_M3pCl = peaks.between(mz_M3pCl - prec_mass_error, mz_M3pCl + prec_mass_error, inclusive = "both").sum() > 0
valid_M3m2HpNa = peaks.between(mz_M3m2HpNa - prec_mass_error, mz_M3m2HpNa + prec_mass_error, inclusive = "both").sum() > 0
valid_M3m2HpNapHCOOH = peaks.between(mz_M3m2HpNapHCOOH - prec_mass_error, mz_M3m2HpNapHCOOH + prec_mass_error, inclusive = "both").sum() > 0
valid_M3m2HpK = peaks.between(mz_M3m2HpK - prec_mass_error, mz_M3m2HpK + prec_mass_error, inclusive = "both").sum() > 0
return sum([valid_H, valid_Cl, valid_m2HpNa, valid_m2HpNapHCOOH, valid_m2HpK,
valid_M2mH, valid_M2pCl, valid_M2m2HpNa, valid_M2m2HpNapHCOOH, valid_M2m2HpK,
valid_M3mH, valid_M3pCl, valid_M3m2HpNa, valid_M3m2HpNapHCOOH, valid_M3m2HpK])
def Solo_M4pCl(ion_idx, mgf_file):
mz = mgf_file[ion_idx].get('pepmass')[0]
peaks = pd.Series(mgf_file[ion_idx].peaks.mz)
mz_H = -1.007825 + (mz - 34.968853)/4
mz_Cl = 34.968853 + (mz - 34.968853)/4
mz_m2HpNa = 20.97412 + (mz - 34.968853)/4
mz_m2HpNapHCOOH = 66.9796 + (mz - 34.968853)/4
mz_m2HpK = 36.948058 + (mz - 34.968853)/4
mz_M2mH = -1.007825 + (mz - 34.968853)/2
mz_M2pCl = 34.968853 + (mz - 34.968853)/2
mz_M2m2HpNa = 20.97412 + (mz - 34.968853)/2
mz_M2m2HpNapHCOOH = 66.9796 + (mz - 34.968853)/2
mz_M2m2HpK = 36.948058 + (mz - 34.968853)/2
mz_M3mH = -1.007825 + (mz - 34.968853)*(3/4)
mz_M3pCl = 34.968853 + (mz - 34.968853)*(3/4)
mz_M3m2HpNa = 20.97412 + (mz - 34.968853)*(3/4)
mz_M3m2HpNapHCOOH = 66.9796 + (mz - 34.968853)*(3/4)
mz_M3m2HpK = 36.948058 + (mz - 34.968853)*(3/4)
valid_H = peaks.between(mz_H - prec_mass_error, mz_H + prec_mass_error, inclusive = "both").sum() > 0
valid_Cl = peaks.between(mz_Cl - prec_mass_error, mz_Cl + prec_mass_error, inclusive = "both").sum() > 0
valid_m2HpNa = peaks.between(mz_m2HpNa - prec_mass_error, mz_m2HpNa + prec_mass_error, inclusive = "both").sum() > 0
valid_m2HpNapHCOOH = peaks.between(mz_m2HpNapHCOOH - prec_mass_error, mz_m2HpNapHCOOH + prec_mass_error, inclusive = "both").sum() > 0
valid_m2HpK = peaks.between(mz_m2HpK - prec_mass_error, mz_m2HpK + prec_mass_error, inclusive = "both").sum() > 0
valid_M2mH = peaks.between(mz_M2mH - prec_mass_error, mz_M2mH + prec_mass_error, inclusive = "both").sum() > 0
valid_M2pCl = peaks.between(mz_M2pCl - prec_mass_error, mz_M2pCl + prec_mass_error, inclusive = "both").sum() > 0
valid_M2m2HpNa = peaks.between(mz_M2m2HpNa - prec_mass_error, mz_M2m2HpNa + prec_mass_error, inclusive = "both").sum() > 0
valid_M2m2HpNapHCOOH = peaks.between(mz_M2m2HpNapHCOOH - prec_mass_error, mz_M2m2HpNapHCOOH + prec_mass_error, inclusive = "both").sum() > 0
valid_M2m2HpK = peaks.between(mz_M2m2HpK - prec_mass_error, mz_M2m2HpK + prec_mass_error, inclusive = "both").sum() > 0
valid_M3mH = peaks.between(mz_M3mH - prec_mass_error, mz_M3mH + prec_mass_error, inclusive = "both").sum() > 0
valid_M3pCl = peaks.between(mz_M3pCl - prec_mass_error, mz_M3pCl + prec_mass_error, inclusive = "both").sum() > 0
valid_M3m2HpNa = peaks.between(mz_M3m2HpNa - prec_mass_error, mz_M3m2HpNa + prec_mass_error, inclusive = "both").sum() > 0
valid_M3m2HpNapHCOOH = peaks.between(mz_M3m2HpNapHCOOH - prec_mass_error, mz_M3m2HpNapHCOOH + prec_mass_error, inclusive = "both").sum() > 0
valid_M3m2HpK = peaks.between(mz_M3m2HpK - prec_mass_error, mz_M3m2HpK + prec_mass_error, inclusive = "both").sum() > 0
return sum([valid_H, valid_Cl, valid_m2HpNa, valid_m2HpNapHCOOH, valid_m2HpK,
valid_M2mH, valid_M2pCl, valid_M2m2HpNa, valid_M2m2HpNapHCOOH, valid_M2m2HpK,
valid_M3mH, valid_M3pCl, valid_M3m2HpNa, valid_M3m2HpNapHCOOH, valid_M3m2HpK])
def Solo_M4m2HpNapHCOOH(ion_idx, mgf_file):
mz = mgf_file[ion_idx].get('pepmass')[0]
peaks = pd.Series(mgf_file[ion_idx].peaks.mz)
mz_H = -1.007825 + (mz - 66.979600)/4
mz_Cl = 34.968853 + (mz - 66.979600)/4
mz_m2HpNa = 20.97412 + (mz - 66.979600)/4
mz_m2HpNapHCOOH = 66.9796 + (mz - 66.979600)/4
mz_m2HpK = 36.948058 + (mz - 66.979600)/4
mz_M2mH = -1.007825 + (mz - 66.979600)/2
mz_M2pCl = 34.968853 + (mz - 66.979600)/2
mz_M2m2HpNa = 20.97412 + (mz - 66.979600)/2
mz_M2m2HpNapHCOOH = 66.9796 + (mz - 66.979600)/2
mz_M2m2HpK = 36.948058 + (mz - 66.979600)/2
mz_M3mH = -1.007825 + (mz - 66.979600)*(3/4)
mz_M3pCl = 34.968853 + (mz - 66.979600)*(3/4)
mz_M3m2HpNa = 20.97412 + (mz - 66.979600)*(3/4)
mz_M3m2HpNapHCOOH = 66.9796 + (mz - 66.979600)*(3/4)
mz_M3m2HpK = 36.948058 + (mz - 66.979600)*(3/4)
valid_H = peaks.between(mz_H - prec_mass_error, mz_H + prec_mass_error, inclusive = "both").sum() > 0
valid_Cl = peaks.between(mz_Cl - prec_mass_error, mz_Cl + prec_mass_error, inclusive = "both").sum() > 0
valid_m2HpNa = peaks.between(mz_m2HpNa - prec_mass_error, mz_m2HpNa + prec_mass_error, inclusive = "both").sum() > 0
valid_m2HpNapHCOOH = peaks.between(mz_m2HpNapHCOOH - prec_mass_error, mz_m2HpNapHCOOH + prec_mass_error, inclusive = "both").sum() > 0
valid_m2HpK = peaks.between(mz_m2HpK - prec_mass_error, mz_m2HpK + prec_mass_error, inclusive = "both").sum() > 0
valid_M2mH = peaks.between(mz_M2mH - prec_mass_error, mz_M2mH + prec_mass_error, inclusive = "both").sum() > 0
valid_M2pCl = peaks.between(mz_M2pCl - prec_mass_error, mz_M2pCl + prec_mass_error, inclusive = "both").sum() > 0
valid_M2m2HpNa = peaks.between(mz_M2m2HpNa - prec_mass_error, mz_M2m2HpNa + prec_mass_error, inclusive = "both").sum() > 0
valid_M2m2HpNapHCOOH = peaks.between(mz_M2m2HpNapHCOOH - prec_mass_error, mz_M2m2HpNapHCOOH + prec_mass_error, inclusive = "both").sum() > 0
valid_M2m2HpK = peaks.between(mz_M2m2HpK - prec_mass_error, mz_M2m2HpK + prec_mass_error, inclusive = "both").sum() > 0
valid_M3mH = peaks.between(mz_M3mH - prec_mass_error, mz_M3mH + prec_mass_error, inclusive = "both").sum() > 0
valid_M3pCl = peaks.between(mz_M3pCl - prec_mass_error, mz_M3pCl + prec_mass_error, inclusive = "both").sum() > 0
valid_M3m2HpNa = peaks.between(mz_M3m2HpNa - prec_mass_error, mz_M3m2HpNa + prec_mass_error, inclusive = "both").sum() > 0
valid_M3m2HpNapHCOOH = peaks.between(mz_M3m2HpNapHCOOH - prec_mass_error, mz_M3m2HpNapHCOOH + prec_mass_error, inclusive = "both").sum() > 0
valid_M3m2HpK = peaks.between(mz_M3m2HpK - prec_mass_error, mz_M3m2HpK + prec_mass_error, inclusive = "both").sum() > 0
return sum([valid_H, valid_Cl, valid_m2HpNa, valid_m2HpNapHCOOH, valid_m2HpK,
valid_M2mH, valid_M2pCl, valid_M2m2HpNa, valid_M2m2HpNapHCOOH, valid_M2m2HpK,
valid_M3mH, valid_M3pCl, valid_M3m2HpNa, valid_M3m2HpNapHCOOH, valid_M3m2HpK])
def Solo_M4m2HpNa(ion_idx, mgf_file):
mz = mgf_file[ion_idx].get('pepmass')[0]
peaks = pd.Series(mgf_file[ion_idx].peaks.mz)
mz_H = -1.007825 + (mz - 20.97412)/4
mz_Cl = 34.968853 + (mz - 20.97412)/4
mz_m2HpNa = 20.97412 + (mz - 20.97412)/4
mz_m2HpNapHCOOH = 66.9796 + (mz - 20.97412)/4
mz_m2HpK = 36.948058 + (mz - 20.97412)/4
mz_M2mH = -1.007825 + (mz - 20.97412)/2
mz_M2pCl = 34.968853 + (mz - 20.97412)/2
mz_M2m2HpNa = 20.97412 + (mz - 20.97412)/2
mz_M2m2HpNapHCOOH = 66.9796 + (mz - 20.97412)/2
mz_M2m2HpK = 36.948058 + (mz - 20.97412)/2
mz_M3mH = -1.007825 + (mz - 20.97412)*(3/4)
mz_M3pCl = 34.968853 + (mz - 20.97412)*(3/4)
mz_M3m2HpNa = 20.97412 + (mz - 20.97412)*(3/4)
mz_M3m2HpNapHCOOH = 66.9796 + (mz - 20.97412)*(3/4)
mz_M3m2HpK = 36.948058 + (mz - 20.97412)*(3/4)
valid_H = peaks.between(mz_H - prec_mass_error, mz_H + prec_mass_error, inclusive = "both").sum() > 0
valid_Cl = peaks.between(mz_Cl - prec_mass_error, mz_Cl + prec_mass_error, inclusive = "both").sum() > 0
valid_m2HpNa = peaks.between(mz_m2HpNa - prec_mass_error, mz_m2HpNa + prec_mass_error, inclusive = "both").sum() > 0
valid_m2HpNapHCOOH = peaks.between(mz_m2HpNapHCOOH - prec_mass_error, mz_m2HpNapHCOOH + prec_mass_error, inclusive = "both").sum() > 0
valid_m2HpK = peaks.between(mz_m2HpK - prec_mass_error, mz_m2HpK + prec_mass_error, inclusive = "both").sum() > 0
valid_M2mH = peaks.between(mz_M2mH - prec_mass_error, mz_M2mH + prec_mass_error, inclusive = "both").sum() > 0
valid_M2pCl = peaks.between(mz_M2pCl - prec_mass_error, mz_M2pCl + prec_mass_error, inclusive = "both").sum() > 0
valid_M2m2HpNa = peaks.between(mz_M2m2HpNa - prec_mass_error, mz_M2m2HpNa + prec_mass_error, inclusive = "both").sum() > 0
valid_M2m2HpNapHCOOH = peaks.between(mz_M2m2HpNapHCOOH - prec_mass_error, mz_M2m2HpNapHCOOH + prec_mass_error, inclusive = "both").sum() > 0
valid_M2m2HpK = peaks.between(mz_M2m2HpK - prec_mass_error, mz_M2m2HpK + prec_mass_error, inclusive = "both").sum() > 0
valid_M3mH = peaks.between(mz_M3mH - prec_mass_error, mz_M3mH + prec_mass_error, inclusive = "both").sum() > 0
valid_M3pCl = peaks.between(mz_M3pCl - prec_mass_error, mz_M3pCl + prec_mass_error, inclusive = "both").sum() > 0
valid_M3m2HpNa = peaks.between(mz_M3m2HpNa - prec_mass_error, mz_M3m2HpNa + prec_mass_error, inclusive = "both").sum() > 0
valid_M3m2HpNapHCOOH = peaks.between(mz_M3m2HpNapHCOOH - prec_mass_error, mz_M3m2HpNapHCOOH + prec_mass_error, inclusive = "both").sum() > 0
valid_M3m2HpK = peaks.between(mz_M3m2HpK - prec_mass_error, mz_M3m2HpK + prec_mass_error, inclusive = "both").sum() > 0
return sum([valid_H, valid_Cl, valid_m2HpNa, valid_m2HpNapHCOOH, valid_m2HpK,
valid_M2mH, valid_M2pCl, valid_M2m2HpNa, valid_M2m2HpNapHCOOH, valid_M2m2HpK,
valid_M3mH, valid_M3pCl, valid_M3m2HpNa, valid_M3m2HpNapHCOOH, valid_M3m2HpK])
def Solo_M2pH(ion_idx, mgf_file):
mz = mgf_file[ion_idx].get('pepmass')[0]
peaks = pd.Series(mgf_file[ion_idx].peaks.mz)
mz_H = 1.007825 + (mz - 1.007825)/2
valid_H = peaks.between(mz_H - prec_mass_error, mz_H + prec_mass_error, inclusive = "both").sum() > 0
return sum([valid_H])
def Solo_M2pHpCH3CN(ion_idx, mgf_file):
mz = mgf_file[ion_idx].get('pepmass')[0]
peaks = pd.Series(mgf_file[ion_idx].peaks.mz)
mz_H = 1.007825 + (mz - 42.034374)/2
mz_Na = 22.98977 + (mz - 42.034374)/2
mz_K = 38.963708 + (mz - 42.034374)/2
mz_HpCH3CN = 42.034374 + (mz - 42.034374)/2
mz_HpCH3OH = 33.034040 + (mz - 42.034374)/2
mz_NapCH3CN = 64.016319 + (mz - 42.034374)/2
mz_NapCH3OH = 55.015985 + (mz - 42.034374)/2
mz_KpCH3CN = 79.990257 + (mz - 42.034374)/2
mz_KpCH3OH = 70.989923 + (mz - 42.034374)/2
valid_H = peaks.between(mz_H - prec_mass_error, mz_H + prec_mass_error, inclusive = "both").sum() > 0
valid_Na = peaks.between(mz_Na - prec_mass_error, mz_Na + prec_mass_error, inclusive = "both").sum() > 0
valid_K = peaks.between(mz_K - prec_mass_error, mz_K + prec_mass_error, inclusive = "both").sum() > 0
valid_HpCH3CN = peaks.between(mz_HpCH3CN - prec_mass_error, mz_HpCH3CN + prec_mass_error, inclusive = "both").sum() > 0
valid_HpCH3OH = peaks.between(mz_HpCH3OH - prec_mass_error, mz_HpCH3OH + prec_mass_error, inclusive = "both").sum() > 0
valid_NapCH3CN = peaks.between(mz_NapCH3CN - prec_mass_error, mz_NapCH3CN + prec_mass_error, inclusive = "both").sum() > 0
valid_NapCH3OH = peaks.between(mz_NapCH3OH - prec_mass_error, mz_NapCH3OH + prec_mass_error, inclusive = "both").sum() > 0
valid_KpCH3CN = peaks.between(mz_KpCH3CN - prec_mass_error, mz_KpCH3CN + prec_mass_error, inclusive = "both").sum() > 0
valid_KpCH3OH = peaks.between(mz_KpCH3OH - prec_mass_error, mz_KpCH3OH + prec_mass_error, inclusive = "both").sum() > 0
return sum([valid_H, valid_Na, valid_K, valid_HpCH3CN, valid_HpCH3OH, valid_NapCH3CN, valid_NapCH3OH, valid_KpCH3CN, valid_KpCH3OH])
def Solo_M2pHpCH3OH(ion_idx, mgf_file):
mz = mgf_file[ion_idx].get('pepmass')[0]
peaks = pd.Series(mgf_file[ion_idx].peaks.mz)
mz_H = 1.007825 + (mz - 33.034040)/2
mz_Na = 22.98977 + (mz - 33.034040)/2
mz_K = 38.963708 + (mz - 33.034040)/2
mz_HpCH3CN = 42.034374 + (mz - 33.034040)/2
mz_HpCH3OH = 33.034040 + (mz - 33.034040)/2
mz_NapCH3CN = 64.016319 + (mz - 33.034040)/2
mz_NapCH3OH = 55.015985 + (mz - 33.034040)/2
mz_KpCH3CN = 79.990257 + (mz - 33.034040)/2
mz_KpCH3OH = 70.989923 + (mz - 33.034040)/2
valid_H = peaks.between(mz_H - prec_mass_error, mz_H + prec_mass_error, inclusive = "both").sum() > 0
valid_Na = peaks.between(mz_Na - prec_mass_error, mz_Na + prec_mass_error, inclusive = "both").sum() > 0
valid_K = peaks.between(mz_K - prec_mass_error, mz_K + prec_mass_error, inclusive = "both").sum() > 0
valid_HpCH3CN = peaks.between(mz_HpCH3CN - prec_mass_error, mz_HpCH3CN + prec_mass_error, inclusive = "both").sum() > 0
valid_HpCH3OH = peaks.between(mz_HpCH3OH - prec_mass_error, mz_HpCH3OH + prec_mass_error, inclusive = "both").sum() > 0
valid_NapCH3CN = peaks.between(mz_NapCH3CN - prec_mass_error, mz_NapCH3CN + prec_mass_error, inclusive = "both").sum() > 0
valid_NapCH3OH = peaks.between(mz_NapCH3OH - prec_mass_error, mz_NapCH3OH + prec_mass_error, inclusive = "both").sum() > 0
valid_KpCH3CN = peaks.between(mz_KpCH3CN - prec_mass_error, mz_KpCH3CN + prec_mass_error, inclusive = "both").sum() > 0
valid_KpCH3OH = peaks.between(mz_KpCH3OH - prec_mass_error, mz_KpCH3OH + prec_mass_error, inclusive = "both").sum() > 0
return sum([valid_H, valid_Na, valid_K, valid_HpCH3CN, valid_HpCH3OH, valid_NapCH3CN, valid_NapCH3OH, valid_KpCH3CN, valid_KpCH3OH])
def Solo_M2pHpHCOOH(ion_idx, mgf_file):
mz = mgf_file[ion_idx].get('pepmass')[0]
peaks = pd.Series(mgf_file[ion_idx].peaks.mz)
mz_H = 1.007825 + (mz - 47.013304)/2
mz_Na = 22.98977 + (mz - 47.013304)/2
mz_K = 38.963708 + (mz - 47.013304)/2
mz_HpCH3CN = 42.034374 + (mz - 47.0133042)/2
mz_HpCH3OH = 33.034040 + (mz - 47.013304)/2
mz_NapCH3CN = 64.016319 + (mz - 47.013304)/2
mz_NapCH3OH = 55.015985 + (mz - 47.013304)/2
mz_KpCH3CN = 79.990257 + (mz - 47.013304)/2
mz_KpCH3OH = 70.989923 + (mz - 47.013304)/2
valid_H = peaks.between(mz_H - prec_mass_error, mz_H + prec_mass_error, inclusive = "both").sum() > 0
valid_Na = peaks.between(mz_Na - prec_mass_error, mz_Na + prec_mass_error, inclusive = "both").sum() > 0
valid_K = peaks.between(mz_K - prec_mass_error, mz_K + prec_mass_error, inclusive = "both").sum() > 0
valid_HpCH3CN = peaks.between(mz_HpCH3CN - prec_mass_error, mz_HpCH3CN + prec_mass_error, inclusive = "both").sum() > 0
valid_HpCH3OH = peaks.between(mz_HpCH3OH - prec_mass_error, mz_HpCH3OH + prec_mass_error, inclusive = "both").sum() > 0
valid_NapCH3CN = peaks.between(mz_NapCH3CN - prec_mass_error, mz_NapCH3CN + prec_mass_error, inclusive = "both").sum() > 0
valid_NapCH3OH = peaks.between(mz_NapCH3OH - prec_mass_error, mz_NapCH3OH + prec_mass_error, inclusive = "both").sum() > 0
valid_KpCH3CN = peaks.between(mz_KpCH3CN - prec_mass_error, mz_KpCH3CN + prec_mass_error, inclusive = "both").sum() > 0
valid_KpCH3OH = peaks.between(mz_KpCH3OH - prec_mass_error, mz_KpCH3OH + prec_mass_error, inclusive = "both").sum() > 0
return sum([valid_H, valid_Na, valid_K, valid_HpCH3CN, valid_HpCH3OH, valid_NapCH3CN, valid_NapCH3OH, valid_KpCH3CN, valid_KpCH3OH])
def Solo_M2pNH4(ion_idx, mgf_file):
mz = mgf_file[ion_idx].get('pepmass')[0]
peaks = pd.Series(mgf_file[ion_idx].peaks.mz)
mz_H = 1.007825 + (mz - 18.034374)/2
mz_NH4 = 18.034374 + (mz - 18.034374)/2
mz_Na = 22.98977 + (mz - 18.034374)/2
mz_K = 38.963708 + (mz - 18.034374)/2
mz_HpCH3CN = 42.034374 + (mz - 18.034374)/2
mz_HpCH3OH = 33.034040 + (mz - 18.034374)/2
mz_NapCH3CN = 64.016319 + (mz - 18.034374)/2
mz_NapCH3OH = 55.015985 + (mz - 18.034374)/2
mz_KpCH3CN = 79.990257 + (mz - 18.034374)/2
mz_KpCH3OH = 70.989923 + (mz - 18.034374)/2
valid_H = peaks.between(mz_H - prec_mass_error, mz_H + prec_mass_error, inclusive = "both").sum() > 0
valid_NH4 = peaks.between(mz_NH4 - prec_mass_error, mz_NH4 + prec_mass_error, inclusive = "both").sum() > 0
valid_Na = peaks.between(mz_Na - prec_mass_error, mz_Na + prec_mass_error, inclusive = "both").sum() > 0
valid_K = peaks.between(mz_K - prec_mass_error, mz_K + prec_mass_error, inclusive = "both").sum() > 0
valid_HpCH3CN = peaks.between(mz_HpCH3CN - prec_mass_error, mz_HpCH3CN + prec_mass_error, inclusive = "both").sum() > 0
valid_HpCH3OH = peaks.between(mz_HpCH3OH - prec_mass_error, mz_HpCH3OH + prec_mass_error, inclusive = "both").sum() > 0
valid_NapCH3CN = peaks.between(mz_NapCH3CN - prec_mass_error, mz_NapCH3CN + prec_mass_error, inclusive = "both").sum() > 0
valid_NapCH3OH = peaks.between(mz_NapCH3OH - prec_mass_error, mz_NapCH3OH + prec_mass_error, inclusive = "both").sum() > 0
valid_KpCH3CN = peaks.between(mz_KpCH3CN - prec_mass_error, mz_KpCH3CN + prec_mass_error, inclusive = "both").sum() > 0
valid_KpCH3OH = peaks.between(mz_KpCH3OH - prec_mass_error, mz_KpCH3OH + prec_mass_error, inclusive = "both").sum() > 0
return sum([valid_H, valid_NH4, valid_Na, valid_K, valid_HpCH3CN, valid_HpCH3OH, valid_NapCH3CN, valid_NapCH3OH, valid_KpCH3CN, valid_KpCH3OH])
def Solo_M2pNa(ion_idx, mgf_file):
mz = mgf_file[ion_idx].get('pepmass')[0]
peaks = pd.Series(mgf_file[ion_idx].peaks.mz)
mz_H = 1.007825 + (mz - 22.98977)/2
mz_Na = 22.98977 + (mz - 22.98977)/2
mz_NapCH3CN = 64.016319 + (mz - 22.98977)/2
mz_NapCH3OH = 55.015985 + (mz - 22.98977)/2
valid_H = peaks.between(mz_H - prec_mass_error, mz_H + prec_mass_error, inclusive = "both").sum() > 0
valid_Na = peaks.between(mz_Na - prec_mass_error, mz_Na + prec_mass_error, inclusive = "both").sum() > 0
valid_NapCH3CN = peaks.between(mz_NapCH3CN - prec_mass_error, mz_NapCH3CN + prec_mass_error, inclusive = "both").sum() > 0
valid_NapCH3OH = peaks.between(mz_NapCH3OH - prec_mass_error, mz_NapCH3OH + prec_mass_error, inclusive = "both").sum() > 0
return sum([valid_H, valid_Na, valid_NapCH3CN, valid_NapCH3OH])
def Solo_M2pNapCH3OH(ion_idx, mgf_file) :
mz = mgf_file[ion_idx].get('pepmass')[0]
peaks = pd.Series(mgf_file[ion_idx].peaks.mz)
mz_H = 1.007825 + (mz - 55.015985)/2
mz_Na = 22.98977 + (mz - 55.015985)/2
mz_NapCH3CN = 64.016319 + (mz - 55.015985)/2
mz_NapCH3OH = 55.015985 + (mz - 55.015985)/2
valid_H = peaks.between(mz_H - prec_mass_error, mz_H + prec_mass_error, inclusive = "both").sum() > 0
valid_Na = peaks.between(mz_Na - prec_mass_error, mz_Na + prec_mass_error, inclusive = "both").sum() > 0
valid_NapCH3CN = peaks.between(mz_NapCH3CN - prec_mass_error, mz_NapCH3CN + prec_mass_error, inclusive = "both").sum() > 0
valid_NapCH3OH = peaks.between(mz_NapCH3OH - prec_mass_error, mz_NapCH3OH + prec_mass_error, inclusive = "both").sum() > 0
return sum([valid_H, valid_Na, valid_NapCH3CN, valid_NapCH3OH])
def Solo_M2pNapCH3CN(ion_idx, mgf_file) :
mz = mgf_file[ion_idx].get('pepmass')[0]
peaks = pd.Series(mgf_file[ion_idx].peaks.mz)
mz_H = 1.007825 + (mz - 64.016319)/2
mz_Na = 22.98977 + (mz - 64.016319)/2
mz_NapCH3CN = 64.016319 + (mz - 64.016319)/2
mz_NapCH3OH = 55.015985 + (mz - 64.016319)/2
valid_H = peaks.between(mz_H - prec_mass_error, mz_H + prec_mass_error, inclusive = "both").sum() > 0
valid_Na = peaks.between(mz_Na - prec_mass_error, mz_Na + prec_mass_error, inclusive = "both").sum() > 0
valid_NapCH3CN = peaks.between(mz_NapCH3CN - prec_mass_error, mz_NapCH3CN + prec_mass_error, inclusive = "both").sum() > 0
valid_NapCH3OH = peaks.between(mz_NapCH3OH - prec_mass_error, mz_NapCH3OH + prec_mass_error, inclusive = "both").sum() > 0
return sum([valid_H, valid_Na, valid_NapCH3CN, valid_NapCH3OH])
def Solo_M2pK(ion_idx, mgf_file):
mz = mgf_file[ion_idx].get('pepmass')[0]
peaks = pd.Series(mgf_file[ion_idx].peaks.mz)
mz_H = 1.007825 + (mz - 38.963708)/2
mz_Na = 22.98977 + (mz - 38.963708)/2
mz_K = 38.963708 + (mz - 38.963708)/2
valid_H = peaks.between(mz_H - prec_mass_error, mz_H + prec_mass_error, inclusive = "both").sum() > 0
valid_Na = peaks.between(mz_Na - prec_mass_error, mz_Na + prec_mass_error, inclusive = "both").sum() > 0
valid_K = peaks.between(mz_K - prec_mass_error, mz_K + prec_mass_error, inclusive = "both").sum() > 0
return sum([valid_H, valid_Na, valid_K])
def Solo_M1pHpCH3CN(ion_idx, mgf_file):
mz = mgf_file[ion_idx].get('pepmass')[0]
peaks = pd.Series(mgf_file[ion_idx].peaks.mz)
mz_H = 1.007825 + mz - 42.034374
mz_Na = 22.98977 + mz - 42.034374
mz_HpCH3OH = 33.034040 + mz - 42.034374
valid_H = peaks.between(mz_H - prec_mass_error, mz_H + prec_mass_error, inclusive = "both").sum() > 0
valid_Na = peaks.between(mz_Na - prec_mass_error, mz_Na + prec_mass_error, inclusive = "both").sum() > 0
valid_HpCH3OH = peaks.between(mz_HpCH3OH - prec_mass_error, mz_HpCH3OH + prec_mass_error, inclusive = "both").sum() > 0
return sum([valid_H, valid_Na, valid_HpCH3OH])
def Solo_M1pHpCH3OH(ion_idx, mgf_file):
mz = mgf_file[ion_idx].get('pepmass')[0]
peaks = pd.Series(mgf_file[ion_idx].peaks.mz)
mz_H = 1.007825 + mz - 33.034040
mz_Na = 22.98977 + mz - 33.034040
valid_H = peaks.between(mz_H - prec_mass_error, mz_H + prec_mass_error, inclusive = "both").sum() > 0
valid_Na = peaks.between(mz_Na - prec_mass_error, mz_Na + prec_mass_error, inclusive = "both").sum() > 0
return sum([valid_H, valid_Na])
def Solo_M1pHpHCOOH(ion_idx, mgf_file):
mz = mgf_file[ion_idx].get('pepmass')[0]
peaks = pd.Series(mgf_file[ion_idx].peaks.mz)
mz_H = 1.007825 + mz - 47.013304
mz_Na = 22.98977 + mz - 47.013304
mz_HpCH3OH = 33.034040 + mz - 47.013304
mz_HpCH3CN = 42.034374 + mz - 47.013304
valid_H = peaks.between(mz_H - prec_mass_error, mz_H + prec_mass_error, inclusive = "both").sum() > 0
valid_Na = peaks.between(mz_Na - prec_mass_error, mz_Na + prec_mass_error, inclusive = "both").sum() > 0
valid_HpCH3OH = peaks.between(mz_HpCH3OH - prec_mass_error, mz_HpCH3OH + prec_mass_error, inclusive = "both").sum() > 0
valid_HpCH3CN = peaks.between(mz_HpCH3CN - prec_mass_error, mz_HpCH3CN + prec_mass_error, inclusive = "both").sum() > 0
return sum([valid_H, valid_Na, valid_HpCH3OH, valid_HpCH3CN])
def Solo_M1pNa(ion_idx, mgf_file):
mz = mgf_file[ion_idx].get('pepmass')[0]
peaks =
|
pd.Series(mgf_file[ion_idx].peaks.mz)
|
pandas.Series
|
# -*- coding:utf-8 -*-
"""
股票技术指标接口
Created on 2018/07/26
@author: Wangzili
@group : **
@contact: <EMAIL>
所有指标中参数df为通过get_k_data获取的股票数据
"""
import pandas as pd
import numpy as np
import itertools
def ma(df, n=10):
"""
移动平均线 Moving Average
MA(N)=(第1日收盘价+第2日收盘价—+……+第N日收盘价)/N
"""
pv = pd.DataFrame()
pv['date'] = df['date']
pv['v'] = df.close.rolling(n).mean()
return pv
def _ma(series, n):
"""
移动平均
"""
return series.rolling(n).mean()
def md(df, n=10):
"""
移动标准差
STD=S(CLOSE,N)=[∑(CLOSE-MA(CLOSE,N))^2/N]^0.5
"""
_md = pd.DataFrame()
_md['date'] = df.date
_md["md"] = df.close.rolling(n).std(ddof=0)
return _md
def _md(series, n):
"""
标准差MD
"""
return series.rolling(n).std(ddof=0) # 有时候会用ddof=1
def ema(df, n=12):
"""
指数平均数指标 Exponential Moving Average
今日EMA(N)=2/(N+1)×今日收盘价+(N-1)/(N+1)×昨日EMA(N)
EMA(X,N)=[2×X+(N-1)×EMA(ref(X),N]/(N+1)
"""
_ema = pd.DataFrame()
_ema['date'] = df['date']
_ema['ema'] = df.close.ewm(ignore_na=False, span=n, min_periods=0, adjust=False).mean()
return _ema
def _ema(series, n):
"""
指数平均数
"""
return series.ewm(ignore_na=False, span=n, min_periods=0, adjust=False).mean()
def macd(df, n=12, m=26, k=9):
"""
平滑异同移动平均线(Moving Average Convergence Divergence)
今日EMA(N)=2/(N+1)×今日收盘价+(N-1)/(N+1)×昨日EMA(N)
DIFF= EMA(N1)- EMA(N2)
DEA(DIF,M)= 2/(M+1)×DIF +[1-2/(M+1)]×DEA(REF(DIF,1),M)
MACD(BAR)=2×(DIF-DEA)
return:
osc: MACD bar / OSC 差值柱形图 DIFF - DEM
diff: 差离值
dea: 讯号线
"""
_macd = pd.DataFrame()
_macd['date'] = df['date']
_macd['diff'] = _ema(df.close, n) - _ema(df.close, m)
_macd['dea'] = _ema(_macd['diff'], k)
_macd['macd'] = _macd['diff'] - _macd['dea']
return _macd
def kdj(df, n=9):
"""
随机指标KDJ
N日RSV=(第N日收盘价-N日内最低价)/(N日内最高价-N日内最低价)×100%
当日K值=2/3前1日K值+1/3×当日RSV=SMA(RSV,M1)
当日D值=2/3前1日D值+1/3×当日K= SMA(K,M2)
当日J值=3 ×当日K值-2×当日D值
"""
_kdj = pd.DataFrame()
_kdj['date'] = df['date']
rsv = (df.close - df.low.rolling(n).min()) / (df.high.rolling(n).max() - df.low.rolling(n).min()) * 100
_kdj['k'] = sma(rsv, 3)
_kdj['d'] = sma(_kdj.k, 3)
_kdj['j'] = 3 * _kdj.k - 2 * _kdj.d
return _kdj
def rsi(df, n=6):
"""
相对强弱指标(Relative Strength Index,简称RSI
LC= REF(CLOSE,1)
RSI=SMA(MAX(CLOSE-LC,0),N,1)/SMA(ABS(CLOSE-LC),N1,1)×100
SMA(C,N,M)=M/N×今日收盘价+(N-M)/N×昨日SMA(N)
"""
# pd.set_option('display.max_rows', 1000)
_rsi = pd.DataFrame()
_rsi['date'] = df['date']
px = df.close - df.close.shift(1)
px[px < 0] = 0
_rsi['rsi'] = sma(px, n) / sma((df['close'] - df['close'].shift(1)).abs(), n) * 100
# def tmax(x):
# if x < 0:
# x = 0
# return x
# _rsi['rsi'] = sma((df['close'] - df['close'].shift(1)).apply(tmax), n) / sma((df['close'] - df['close'].shift(1)).abs(), n) * 100
return _rsi
def vrsi(df, n=6):
"""
量相对强弱指标
VRSI=SMA(最大值(成交量-REF(成交量,1),0),N,1)/SMA(ABS((成交量-REF(成交量,1),N,1)×100%
"""
_vrsi = pd.DataFrame()
_vrsi['date'] = df['date']
px = df['volume'] - df['volume'].shift(1)
px[px < 0] = 0
_vrsi['vrsi'] = sma(px, n) / sma((df['volume'] - df['volume'].shift(1)).abs(), n) * 100
return _vrsi
def boll(df, n=26, k=2):
"""
布林线指标BOLL boll(26,2) MID=MA(N)
标准差MD=根号[∑(CLOSE-MA(CLOSE,N))^2/N]
UPPER=MID+k×MD
LOWER=MID-k×MD
"""
_boll = pd.DataFrame()
_boll['date'] = df.date
_boll['mid'] = _ma(df.close, n)
_mdd = _md(df.close, n)
_boll['up'] = _boll.mid + k * _mdd
_boll['low'] = _boll.mid - k * _mdd
return _boll
def bbiboll(df, n=10, k=3):
"""
BBI多空布林线 bbiboll(10,3)
BBI={MA(3)+ MA(6)+ MA(12)+ MA(24)}/4
标准差MD=根号[∑(BBI-MA(BBI,N))^2/N]
UPR= BBI+k×MD
DWN= BBI-k×MD
"""
# pd.set_option('display.max_rows', 1000)
_bbiboll = pd.DataFrame()
_bbiboll['date'] = df.date
_bbiboll['bbi'] = (_ma(df.close, 3) + _ma(df.close, 6) + _ma(df.close, 12) + _ma(df.close, 24)) / 4
_bbiboll['md'] = _md(_bbiboll.bbi, n)
_bbiboll['upr'] = _bbiboll.bbi + k * _bbiboll.md
_bbiboll['dwn'] = _bbiboll.bbi - k * _bbiboll.md
return _bbiboll
def wr(df, n=14):
"""
威廉指标 w&r
WR=[最高值(最高价,N)-收盘价]/[最高值(最高价,N)-最低值(最低价,N)]×100%
"""
_wr = pd.DataFrame()
_wr['date'] = df['date']
higest = df.high.rolling(n).max()
_wr['wr'] = (higest - df.close) / (higest - df.low.rolling(n).min()) * 100
return _wr
def bias(df, n=12):
"""
乖离率 bias
bias=[(当日收盘价-12日平均价)/12日平均价]×100%
"""
_bias = pd.DataFrame()
_bias['date'] = df.date
_mav = df.close.rolling(n).mean()
_bias['bias'] = (np.true_divide((df.close - _mav), _mav)) * 100
# _bias["bias"] = np.vectorize(lambda x: round(Decimal(x), 4))(BIAS)
return _bias
def asi(df, n=5):
"""
振动升降指标(累计震动升降因子) ASI # 同花顺给出的公式不完整就不贴出来了
"""
_asi = pd.DataFrame()
_asi['date'] = df.date
_m = pd.DataFrame()
_m['a'] = (df.high - df.close.shift()).abs()
_m['b'] = (df.low - df.close.shift()).abs()
_m['c'] = (df.high - df.low.shift()).abs()
_m['d'] = (df.close.shift() - df.open.shift()).abs()
_m['r'] = _m.apply(lambda x: x.a + 0.5 * x.b + 0.25 * x.d if max(x.a, x.b, x.c) == x.a else (
x.b + 0.5 * x.a + 0.25 * x.d if max(x.a, x.b, x.c) == x.b else x.c + 0.25 * x.d
), axis=1)
_m['x'] = df.close - df.close.shift() + 0.5 * (df.close - df.open) + df.close.shift() - df.open.shift()
_m['k'] = np.maximum(_m.a, _m.b)
_asi['si'] = 16 * (_m.x / _m.r) * _m.k
_asi["asi"] = _ma(_asi.si, n)
return _asi
def vr_rate(df, n=26):
"""
成交量变异率 vr or vr_rate
VR=(AVS+1/2CVS)/(BVS+1/2CVS)×100
其中:
AVS:表示N日内股价上涨成交量之和
BVS:表示N日内股价下跌成交量之和
CVS:表示N日内股价不涨不跌成交量之和
"""
_vr = pd.DataFrame()
_vr['date'] = df['date']
_m = pd.DataFrame()
_m['volume'] = df.volume
_m['cs'] = df.close - df.close.shift(1)
_m['avs'] = _m.apply(lambda x: x.volume if x.cs > 0 else 0, axis=1)
_m['bvs'] = _m.apply(lambda x: x.volume if x.cs < 0 else 0, axis=1)
_m['cvs'] = _m.apply(lambda x: x.volume if x.cs == 0 else 0, axis=1)
_vr["vr"] = (_m.avs.rolling(n).sum() + 1 / 2 * _m.cvs.rolling(n).sum()
) / (_m.bvs.rolling(n).sum() + 1 / 2 * _m.cvs.rolling(n).sum()) * 100
return _vr
def vr(df, n=5):
"""
开市后平均每分钟的成交量与过去5个交易日平均每分钟成交量之比
量比:=V/REF(MA(V,5),1);
涨幅:=(C-REF(C,1))/REF(C,1)*100;
1)量比大于1.8,涨幅小于2%,现价涨幅在0—2%之间,在盘中选股的
选股:量比>1.8 AND 涨幅>0 AND 涨幅<2;
"""
_vr = pd.DataFrame()
_vr['date'] = df.date
_vr['vr'] = df.volume / _ma(df.volume, n).shift(1)
_vr['rr'] = (df.close - df.close.shift(1)) / df.close.shift(1) * 100
return _vr
def arbr(df, n=26):
"""
人气意愿指标 arbr(26)
N日AR=N日内(H-O)之和除以N日内(O-L)之和
其中,H为当日最高价,L为当日最低价,O为当日开盘价,N为设定的时间参数,一般原始参数日设定为26日
N日BR=N日内(H-CY)之和除以N日内(CY-L)之和
其中,H为当日最高价,L为当日最低价,CY为前一交易日的收盘价,N为设定的时间参数,一般原始参数日设定为26日。
"""
_arbr = pd.DataFrame()
_arbr['date'] = df.date
_arbr['ar'] = (df.high - df.open).rolling(n).sum() / (df.open - df.low).rolling(n).sum() * 100
_arbr['br'] = (df.high - df.close.shift(1)).rolling(n).sum() / (df.close.shift() - df.low).rolling(n).sum() * 100
return _arbr
def dpo(df, n=20, m=6):
"""
区间震荡线指标 dpo(20,6)
DPO=CLOSE-MA(CLOSE, N/2+1)
MADPO=MA(DPO,M)
"""
_dpo = pd.DataFrame()
_dpo['date'] = df['date']
_dpo['dpo'] = df.close - _ma(df.close, int(n / 2 + 1))
_dpo['dopma'] = _ma(_dpo.dpo, m)
return _dpo
def trix(df, n=12, m=20):
"""
三重指数平滑平均 TRIX(12)
TR= EMA(EMA(EMA(CLOSE,N),N),N),即进行三次平滑处理
TRIX=(TR-昨日TR)/ 昨日TR×100
TRMA=MA(TRIX,M)
"""
_trix = pd.DataFrame()
_trix['date'] = df.date
tr = _ema(_ema(_ema(df.close, n), n), n)
_trix['trix'] = (tr - tr.shift()) / tr.shift() * 100
_trix['trma'] = _ma(_trix.trix, m)
return _trix
def bbi(df):
"""
多空指数 BBI(3,6,12,24)
BBI=(3日均价+6日均价+12日均价+24日均价)/4
"""
_bbi = pd.DataFrame()
_bbi['date'] = df['date']
_bbi['bbi'] = (_ma(df.close, 3) + _ma(df.close, 6) + _ma(df.close, 12) + _ma(df.close, 24)) / 4
return _bbi
def mtm(df, n=6, m=5):
"""
动力指标 MTM(6,5)
MTM(N日)=C-REF(C,N)式中,C=当日的收盘价,REF(C,N)=N日前的收盘价;N日是只计算交易日期,剔除掉节假日。
MTMMA(MTM,N1)= MA(MTM,N1)
N表示间隔天数,N1表示天数
"""
_mtm = pd.DataFrame()
_mtm['date'] = df.date
_mtm['mtm'] = df.close - df.close.shift(n)
_mtm['mtmma'] = _ma(_mtm.mtm, m)
return _mtm
def obv(df):
"""
能量潮 On Balance Volume
多空比率净额= [(收盘价-最低价)-(最高价-收盘价)] ÷( 最高价-最低价)×V # 同花顺貌似用的下面公式
主公式:当日OBV=前一日OBV+今日成交量
1.基期OBV值为0,即该股上市的第一天,OBV值为0
2.若当日收盘价>上日收盘价,则当日OBV=前一日OBV+今日成交量
3.若当日收盘价<上日收盘价,则当日OBV=前一日OBV-今日成交量
4.若当日收盘价=上日收盘价,则当日OBV=前一日OBV
"""
_obv = pd.DataFrame()
_obv["date"] = df['date']
# tmp = np.true_divide(((df.close - df.low) - (df.high - df.close)), (df.high - df.low))
# _obv['obvv'] = tmp * df.volume
# _obv["obv"] = _obv.obvv.expanding(1).sum() / 100
_m =
|
pd.DataFrame()
|
pandas.DataFrame
|
#!/usr/bin/env python
# coding: utf8
#
# Copyright (c) 2020 Centre National d'Etudes Spatiales (CNES).
#
# This file is part of CARS
# (see https://github.com/CNES/cars).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
This module is responsible for the transition between triangulation and
rasterization steps
"""
# Standard imports
import logging
from collections import namedtuple
from typing import List, Tuple, Union
# Third party imports
import numpy as np
import pandas
import xarray as xr
from scipy.spatial import cKDTree # pylint: disable=no-name-in-module
# CARS imports
from cars.core import constants as cst
from cars.core import projection
def create_combined_cloud( # noqa: C901
cloud_list: List[xr.Dataset],
dsm_epsg: int,
color_list: List[xr.Dataset] = None,
resolution: float = None,
xstart: float = None,
ystart: float = None,
xsize: int = None,
ysize: int = None,
on_ground_margin: int = 0,
epipolar_border_margin: int = 0,
radius: float = 1,
with_coords: bool = False,
) -> Tuple[pandas.DataFrame, int]:
"""
Combine a list of clouds (and their colors) into a pandas dataframe
structured with the following labels:
* if no colors in input and no mask data present in cloud_list datasets:
>>> labels=[cst.POINTS_CLOUD_VALID_DATA, cst.X, cst.Y, cst.Z]
The combined cloud has x, y, z columns along with 'valid data' one.
The valid data is a mask set to True if the data
are not on the epipolar image margin (epipolar_border_margin),
otherwise it is set to False.
* if no colors in input and mask data present in cloud_list datasets:
>>> labels=[cst.POINTS_CLOUD_VALID_DATA,
>>> cst.X, cst.Y, cst.Z, cst.POINTS_CLOUD_MSK]
The mask values are added to the dataframe.
* if colors are set in input and mask data are present
in the cloud_list datasets:
>>> labels=[cst.POINTS_CLOUD_VALID_DATA,
>>> cst.X, cst.Y, cst.Z, cst.POINTS_CLOUD_MSK,
>>> cst.POINTS_CLOUD_CLR_KEY_ROOT+"0",
>>> cst.POINTS_CLOUD_CLR_KEY_ROOT+"1",
>>> cst.POINTS_CLOUD_CLR_KEY_ROOT+"2"]
Color channels information are added to the dataframe.
* if colors in input, mask data present in the cloud_list datasets and
the with_coords option is activated:
>>> labels=[cst.POINTS_CLOUD_VALID_DATA,
>>> cst.X, cst.Y, cst.Z, cst.POINTS_CLOUD_MSK,
>>> cst.POINTS_CLOUD_CLR_KEY_ROOT+"0",
>>> cst.POINTS_CLOUD_CLR_KEY_ROOT+"1",
>>> cst.POINTS_CLOUD_CLR_KEY_ROOT+"2"
>>> cst.POINTS_CLOUD_COORD_EPI_GEOM_I,
>>> cst.POINTS_CLOUD_COORD_EPI_GEOM_J,
>>> cst.POINTS_CLOUD_IDX_IM_EPI]
The pixel position of the xyz point in the original epipolar
image (coord_epi_geom_i, coord_epi_geom_j) are added
to the dataframe along with the index of its original cloud
in the cloud_list input.
:raise Exception: if a color_list is set
but does not have the same length as the cloud list
:param cloud_list: list of cloud points to rasterize
:param dsm_epsg: epsg code for the CRS of the final output raster
:param color_list: Additional list of images
with bands to rasterize (same size as cloud_list), or None
:param resolution: Resolution of rasterized cells, in cloud CRS units
(if None, the whole clouds are combined)
:param xstart: xstart of the rasterization grid
(if None, the whole clouds are combined)
:param ystart: ystart of the rasterization grid
(if None, the whole clouds are combined)
:param xsize: xsize of the rasterization grid
(if None, the whole clouds are combined)
:param ysize: ysize of the rasterization grid
(if None, the whole clouds are combined)
:param on_ground_margin: Margin added to the rasterization grid
(default value: 0)
:param epipolar_border_margin: Margin used
to invalidate cells too close to epipolar border. (default value: 0)
:param radius: Radius for hole filling
(if None, the whole clouds are combined).
:param with_coords: Option enabling the adding to the combined cloud
of information of each point to retrieve their positions
in the original epipolar images
:return: Tuple formed with the combined clouds and color
in a single pandas dataframe and the epsg code
"""
worker_logger = logging.getLogger("distributed.worker")
# check input data consistency
if color_list is not None and len(cloud_list) != len(color_list):
raise Exception("There shall be as many cloud elements as color ones")
epsg = None
for cloud_list_item in cloud_list:
if epsg is None:
epsg = int(cloud_list_item.attrs[cst.EPSG])
elif int(cloud_list_item.attrs[cst.EPSG]) != epsg:
worker_logger.error(
"All points clouds do not have the same epsg code"
)
# compute margin/roi and final number of data to add to the combined cloud
roi = (
resolution is not None
and xstart is not None
and ystart is not None
and xsize is not None
and ysize is not None
)
if roi:
total_margin = (on_ground_margin + radius + 1) * resolution
xend = xstart + (xsize + 1) * resolution
yend = ystart - (ysize + 1) * resolution
nb_data = [cst.POINTS_CLOUD_VALID_DATA, cst.X, cst.Y, cst.Z]
# check if the input mask values are present in the dataset
nb_data_msk = 0
for cloud_list_item in cloud_list:
ds_values_list = [key for key, _ in cloud_list_item.items()]
if cst.POINTS_CLOUD_MSK in ds_values_list:
nb_data.append(cst.POINTS_CLOUD_MSK)
nb_data_msk = 1
break
if color_list is not None:
clr_im = color_list[0].im.values
nb_band_clr = clr_im.shape[0]
list_clr = [
"{}{}".format(cst.POINTS_CLOUD_CLR_KEY_ROOT, i)
for i in range(nb_band_clr)
]
nb_data.extend(list_clr)
else:
nb_band_clr = 0
if with_coords:
nb_data.extend(
[
cst.POINTS_CLOUD_COORD_EPI_GEOM_I,
cst.POINTS_CLOUD_COORD_EPI_GEOM_J,
cst.POINTS_CLOUD_IDX_IM_EPI,
]
)
# iterate trough input clouds
cloud = np.zeros((0, len(nb_data)), dtype=np.float64)
nb_points = 0
for cloud_list_idx, cloud_list_item in enumerate(cloud_list):
full_x = cloud_list_item[cst.X].values
full_y = cloud_list_item[cst.Y].values
full_z = cloud_list_item[cst.Z].values
# get mask of points inside the roi (plus margins)
if roi:
# if the points clouds are not in the same referential as the roi,
# it is converted using the dsm_epsg
if epsg != dsm_epsg:
(
full_x,
full_y,
) = projection.get_converted_xy_np_arrays_from_dataset(
cloud_list_item, dsm_epsg
)
msk_xstart = np.where(full_x > xstart - total_margin, True, False)
msk_xend = np.where(full_x < xend + total_margin, True, False)
msk_yend = np.where(full_y > yend - total_margin, True, False)
msk_ystart = np.where(full_y < ystart + total_margin, True, False)
terrain_tile_data_msk = np.logical_and(
msk_xstart,
np.logical_and(msk_xend, np.logical_and(msk_ystart, msk_yend)),
)
terrain_tile_data_msk_pos = terrain_tile_data_msk.astype(
np.int8
).nonzero()
# if the points clouds are not in the same referential as the roi,
# retrieve the initial values
if epsg != dsm_epsg:
full_x = cloud_list_item[cst.X].values
full_y = cloud_list_item[cst.Y].values
# if no point is found, continue
if terrain_tile_data_msk_pos[0].shape[0] == 0:
continue
# get useful data bounding box
bbox = [
np.min(terrain_tile_data_msk_pos[0]),
np.min(terrain_tile_data_msk_pos[1]),
np.max(terrain_tile_data_msk_pos[0]),
np.max(terrain_tile_data_msk_pos[1]),
]
else:
bbox = [0, 0, full_y.shape[0] - 1, full_y.shape[1] - 1]
# add (x, y, z) information to the current cloud
c_x = full_x[bbox[0] : bbox[2] + 1, bbox[1] : bbox[3] + 1]
c_y = full_y[bbox[0] : bbox[2] + 1, bbox[1] : bbox[3] + 1]
c_z = full_z[bbox[0] : bbox[2] + 1, bbox[1] : bbox[3] + 1]
c_cloud = np.zeros(
(len(nb_data), (bbox[2] - bbox[0] + 1) * (bbox[3] - bbox[1] + 1))
)
c_cloud[1, :] = np.ravel(c_x)
c_cloud[2, :] = np.ravel(c_y)
c_cloud[3, :] = np.ravel(c_z)
ds_values_list = [key for key, _ in cloud_list_item.items()]
if cst.POINTS_CLOUD_MSK in ds_values_list:
c_msk = cloud_list_item[cst.POINTS_CLOUD_MSK].values[
bbox[0] : bbox[2] + 1, bbox[1] : bbox[3] + 1
]
c_cloud[4, :] = np.ravel(c_msk)
# add data valid mask
# (points that are not in the border of the epipolar image)
if epipolar_border_margin == 0:
epipolar_margin_mask = np.full(
(
cloud_list_item[cst.X].values.shape[0],
cloud_list_item[cst.X].values.shape[1],
),
True,
)
else:
epipolar_margin_mask = np.full(
(
cloud_list_item[cst.X].values.shape[0],
cloud_list_item[cst.X].values.shape[1],
),
False,
)
epipolar_margin_mask[
epipolar_border_margin:-epipolar_border_margin,
epipolar_border_margin:-epipolar_border_margin,
] = True
c_epipolar_margin_mask = epipolar_margin_mask[
bbox[0] : bbox[2] + 1, bbox[1] : bbox[3] + 1
]
c_cloud[0, :] = np.ravel(c_epipolar_margin_mask)
# add the color information to the current cloud
if color_list is not None:
c_color = color_list[cloud_list_idx].im.values[
:, bbox[0] : bbox[2] + 1, bbox[1] : bbox[3] + 1
]
for band in range(nb_band_clr):
c_cloud[4 + nb_data_msk + band, :] = np.ravel(
c_color[band, :, :]
)
# add the original image coordinates information to the current cloud
if with_coords:
coords_line = np.linspace(bbox[0], bbox[2], bbox[2] - bbox[0] + 1)
coords_col = np.linspace(bbox[1], bbox[3], bbox[3] - bbox[1] + 1)
coords_col, coords_line = np.meshgrid(coords_col, coords_line)
c_cloud[4 + nb_data_msk + nb_band_clr, :] = np.ravel(coords_line)
c_cloud[4 + nb_data_msk + nb_band_clr + 1, :] = np.ravel(coords_col)
c_cloud[4 + nb_data_msk + nb_band_clr + 2, :] = cloud_list_idx
# remove masked data (pandora + out of the terrain tile points)
c_terrain_tile_data_msk = (
cloud_list_item[cst.POINTS_CLOUD_CORR_MSK].values[
bbox[0] : bbox[2] + 1, bbox[1] : bbox[3] + 1
]
== 255
)
if roi:
c_terrain_tile_data_msk = np.logical_and(
c_terrain_tile_data_msk,
terrain_tile_data_msk[
bbox[0] : bbox[2] + 1, bbox[1] : bbox[3] + 1
],
)
c_terrain_tile_data_msk = np.ravel(c_terrain_tile_data_msk)
c_terrain_tile_data_msk_pos = np.nonzero(~c_terrain_tile_data_msk)
nb_points += c_cloud.shape[1]
c_cloud = np.delete(
c_cloud.transpose(), c_terrain_tile_data_msk_pos[0], 0
)
# add current cloud to the combined one
cloud = np.concatenate([cloud, c_cloud], axis=0)
worker_logger.debug("Received {} points to rasterize".format(nb_points))
worker_logger.debug(
"Keeping {}/{} points "
"inside rasterization grid".format(cloud.shape[0], nb_points)
)
pd_cloud =
|
pandas.DataFrame(cloud, columns=nb_data)
|
pandas.DataFrame
|
"""
MIT License
Copyright (c) 2019 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do
so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
IN THE SOFTWARE.
"""
import pandas as pd
import numpy as np
import sys, os, site, zipfile, math, time, json, io
import googlemaps, urllib, shapely, shutil, requests
import xml.etree.ElementTree as ET
from glob import glob
from urllib.error import HTTPError
from urllib.request import URLError
from http.client import IncompleteRead
from zipfile import BadZipFile
from tqdm import tqdm, trange
from warnings import warn
###########################
### IMPORT PROJECT PATH ###
import pvvm.settings
revmpath = pvvm.settings.revmpath
datapath = pvvm.settings.datapath
apikeys = pvvm.settings.apikeys
nsrdbparams = pvvm.settings.nsrdbparams
#####################
### Imports from pvvm
import pvvm.toolbox
import pvvm.io
#######################
### DICTS AND LISTS ###
#######################
isos = ['CAISO', 'ERCOT', 'MISO', 'PJM', 'NYISO', 'ISONE']
resolutionlmps = {
('CAISO', 'da'): 60, ('CAISO', 'rt'): 5,
('ERCOT', 'da'): 60, ('ERCOT', 'rt'): 5,
('MISO', 'da'): 60, ('MISO', 'rt'): 60,
('PJM', 'da'): 60, ('PJM', 'rt'): 60,
('NYISO', 'da'): 60, ('NYISO', 'rt'): 5,
('ISONE', 'da'): 60, ('ISONE', 'rt'): 60,
}
################
### DOWNLOAD ###
################
###############
### General use
def constructpayload(**kwargs):
out = []
for kwarg in kwargs:
out.append('{}={}'.format(kwarg, kwargs[kwarg]))
stringout = '&'.join(out)
return stringout
def constructquery(urlstart, **kwargs):
out = '{}{}'.format(urlstart, constructpayload(**kwargs))
return out
def stampify(date, interval=pd.Timedelta('1H')):
datetime = pd.Timestamp(date)
if interval == pd.Timedelta('1H'):
dateout = '{}{:02}{:02}T{:02}'.format(
datetime.year, datetime.month,
datetime.day, datetime.hour)
elif interval == pd.Timedelta('1D'):
dateout = '{}{:02}{:02}'.format(
datetime.year, datetime.month,
datetime.day)
return dateout
def download_file_series(urlstart, urlend, fileseries, filepath,
overwrite=False, sleeptime=60, numattempts=200, seriesname=True):
"""
Example
-------
You want to download a list of files at urls = [
'http://www.test.com/foo001.csv', 'http://www.test.com/foo002.csv'].
Then:
urlstart = 'http://www.test.com/foo'
urlend = '.csv'
fileseries = ['001', '002']
If you want the files to be named 'foo001.csv', use seriesname=False
If you want the files to be named '001.csv', use seriesname=True
"""
filepath = pvvm.toolbox.pathify(filepath, make=True)
### Make lists of urls, files to download, and filenames
urls = [(urlstart + file + urlend) for file in fileseries]
todownload = [os.path.basename(url) for url in urls]
if seriesname == True:
filenames = [os.path.basename(file) + urlend for file in fileseries]
else:
filenames = todownload
### Get the list of downloaded files
downloaded = [os.path.basename(file) for file in glob(filepath + '*')]
### Remake the list if overwrite == False
if overwrite == False:
filestodownload = []
urlstodownload = []
fileseriesnames = []
for i in range(len(filenames)):
if filenames[i] not in downloaded:
filestodownload.append(todownload[i])
urlstodownload.append(urls[i])
fileseriesnames.append(filenames[i])
elif overwrite == True:
filestodownload = todownload
urlstodownload = urls
fileseriesnames = filenames
### Download the files
for i in trange(len(urlstodownload)):
### Attempt the download
attempts = 0
while attempts < numattempts:
try:
urllib.request.urlretrieve(
urlstodownload[i], filepath + fileseriesnames[i])
break
except (HTTPError, IncompleteRead, EOFError) as err:
print(urlstodownload[i])
print(filestodownload[i])
print('Rebuffed on attempt # {} at {} by "{}".'
'Will retry in {} seconds.'.format(
attempts, pvvm.toolbox.nowtime(), err, sleeptime))
attempts += 1
time.sleep(sleeptime)
###########################
### Geographic manipulation
def rowlatlon2x(row):
latrad = row['latitude'] * math.pi / 180
lonrad = row['longitude'] * math.pi / 180
x = math.cos(latrad) * math.cos(lonrad)
return x
def rowlatlon2y(row):
latrad = row['latitude'] * math.pi / 180
lonrad = row['longitude'] * math.pi / 180
y = math.cos(latrad) * math.sin(lonrad)
return y
def rowlatlon2z(row):
latrad = row['latitude'] * math.pi / 180
z = math.sin(latrad)
return z
############
### ISO LMPs
"""
Note: These scripts worked as of early 2018, but MISO, PJM, and NYISO have since
changed their websites, and CAISO has removed data prior to 20150303. Scripts
are included here for documentary purposes and as a resource for future
data collection, but are unlikely to work given ISO website changes.
"""
def download_caiso_lmp_allnodes(market, start, filepathout,
product='LMP', numattempts=200, waittime=10):
urlstart = 'http://oasis.caiso.com/oasisapi/GroupZip?'
columnsout = [
'INTERVALSTARTTIME_GMT', 'NODE', 'MW',
'OPR_DT', 'OPR_HR', 'OPR_INTERVAL']
if market in ['RTM', 'HASP', 'RTPD']:
interval = pd.Timedelta('1H')
elif market in ['DAM', 'RUC']:
interval = pd.Timedelta('1D')
starttimestamp = pd.Timestamp(start)
endtimestamp = starttimestamp + interval
startdatetime = '{}{:02}{:02}T{:02}:00-0000'.format(
starttimestamp.year, starttimestamp.month,
starttimestamp.day, starttimestamp.hour)
enddatetime = '{}{:02}{:02}T{:02}:00-0000'.format(
endtimestamp.year, endtimestamp.month,
endtimestamp.day, endtimestamp.hour)
if interval == pd.Timedelta('1D'):
fileout = '{}{:02}{:02}.gz'.format(
starttimestamp.year, starttimestamp.month,
starttimestamp.day)
elif interval == pd.Timedelta('1H'):
fileout = '{}{:02}{:02}T{:02}.gz'.format(
starttimestamp.year, starttimestamp.month,
starttimestamp.day, starttimestamp.hour)
url = constructquery(
urlstart,
groupid='{}_LMP_GRP'.format(market),
startdatetime=startdatetime,
enddatetime=enddatetime,
version=1,
resultformat=6)
attempts = 0
while attempts < numattempts:
try:
# if product.lower() in ['mcc', 'mce', 'mcl']:
# if (market.upper() in ['DAM', 'RUC']) and (starttimestamp.year >= 2016):
# if market.upper() in ['DAM', 'RUC']:
if ((product.lower() in ['mcc', 'mce', 'mcl'])
or ((market == 'DAM') and product.lower() == 'lmp')):
zip_file = zipfile.ZipFile(io.BytesIO(
urllib.request.urlopen(url).read()))
for csv_file in zip_file.infolist():
if csv_file.filename.endswith(
'{}_v1.csv'.format(product.upper())):
df = pd.read_csv(zip_file.open(csv_file.filename))
else:
df = pd.read_csv(url, compression='zip')
dfout = df[df['LMP_TYPE'] == product.upper()][columnsout]
dfout.to_csv(
'{}{}'.format(filepathout, fileout),
columns=columnsout,
index=False,
compression='gzip')
return dfout
except (
URLError, IncompleteRead, pd.errors.ParserError,
BadZipFile, KeyError, HTTPError, UnboundLocalError) as error:
print(
'Error for {} on attempt {}/{}: {}'.format(
start, attempts, numattempts, error),
# end='\r',
)
attempts += 1
time.sleep(waittime)
if attempts >= numattempts:
raise URLError('{}{}'.format(filepathout, fileout))
def download_lmps(year, iso, market, overwrite=False, sleeptime=60,
product='LMP', submarket=None, numattempts=200, subset=None,
waittime=10, filepath=None):
"""
Inputs
------
subset: None or slice()
Notes
-----
* ERCOT LMPs more than 30 days old must be requested from ERCOT.
Requests can be filed at http://www.ercot.com/about/contact/inforequest.
Files should be placed in the folder
revmpath + 'ERCOT/in/lmp/{}/{}/'.format(market, year)
where year is the year of the timestamp within the files.
Note that the date in the filename for day-ahead LMPs is the date before
the timestamps within the file: for example, file
('cdr.00012328.0000000000000000.20151231.125905514.DAMHRLMPNP4183_csv')
contains timestamps for 20160101, and should be placed in the 2016 folder.
"""
### Normalize inputs
iso = iso.upper()
market = market.lower()
year = int(year)
assert market in ['da', 'rt']
assert iso in ['CAISO', 'MISO', 'PJM', 'NYISO', 'ISONE']
### Set file structure
if filepath is None:
filepath = revmpath+'{}/in/lmp/{}/'.format(iso, market)
if not os.path.exists(filepath): os.makedirs(filepath)
### Adjust inputs for different isos
urlstart = {
'ISONE': {
'da': 'https://www.iso-ne.com/static-transform/csv/histRpts/da-lmp/WW_DALMP_ISO_',
'rt': 'https://www.iso-ne.com/static-transform/csv/histRpts/rt-lmp/lmp_rt_final_'},
'MISO': {
# 'da': 'https://old.misoenergy.org/Library/Repository/Market%20Reports/',
# 'rt': 'https://old.misoenergy.org/Library/Repository/Market%20Reports/',
'da': 'https://docs.misoenergy.org/marketreports/',
'rt': 'https://docs.misoenergy.org/marketreports/',
},
'PJM': {
'da': 'http://www.pjm.com/pub/account/lmpda/',
'rt': 'http://www.pjm.com/pub/account/lmp/'},
'NYISO': {
'da': 'http://mis.nyiso.com/public/csv/damlbmp/',
'rt': 'http://mis.nyiso.com/public/csv/realtime/'},
}
urlend = {
'ISONE': {'da': '.csv', 'rt': '.csv'},
'MISO': {'da': '_da_lmp.csv', 'rt': '_rt_lmp_final.csv'},
'PJM': {'da': '-da.zip', 'rt': '.zip'},
'NYISO': {'da': 'damlbmp_gen_csv.zip', 'rt': 'realtime_gen_csv.zip'},
}
files = {
'ISONE': pvvm.toolbox.makedays(year),
'MISO': pvvm.toolbox.makedays(year),
'PJM': pvvm.toolbox.makedays(year),
'NYISO': ['{}{:02}01'.format(year, month) for month in range(1,13)]
}
### Download files
if iso == 'ISONE':
download_file_series(
urlstart=urlstart[iso][market], urlend=urlend[iso][market],
fileseries=files[iso], filepath=filepath,
overwrite=overwrite, sleeptime=sleeptime, numattempts=numattempts)
elif iso == 'MISO':
urls = [(urlstart[iso][market] + file + '_da_expost_lmp.csv')
if (int(file) >= 20150301) and (market == 'da')
else (urlstart[iso][market] + file + urlend[iso][market])
for file in files[iso]]
download_file_series(
urlstart='', urlend='', fileseries=urls, filepath=filepath,
overwrite=overwrite, sleeptime=sleeptime, numattempts=numattempts)
elif iso == 'PJM':
da_updated = {
'20151201': '-da_updated.zip',
'20150930': '-da_updated.zip',
'20140617': '-da_updated.zip',
'20150616': '-da_updated.zip',
'20150615': '-da_updated.zip',
'20150614': '-da_updated.zip',
'20140613': '-da_updated.zip',
'20150603': '-da_updated.zip',
'20150602': '-da_updated.zip',
'20150601': '-da_updated.zip',
'20150409': '-da_updated.zip',
'20140327': '-da_updated.zip',
'20111012': '-da_update.zip',
'20111011': '-da_update.zip',
}
rt_updated = {
'20170116': '_updated.zip',
'20170115': '_updated.zip',
'20170114': '_updated.zip',
'20170113': '_updated.zip',
'20160923': '_updated.zip',
'20160417': '_updated.zip',
'20160416': '_updated.zip',
'20160415': '_updated.zip',
'20151110': '_updated.zip',
'20150929': '_updated.zip',
'20150901': '_updated.zip',
'20150831': '_updated.zip',
'20150601': '_updated.zip',
'20150504': '_updated.zip',
'20150427': '_updated.zip',
'20150407': '_updated.zip',
'20150310': '_updated.zip',
'20150309': '_updated.zip',
'20150201': '_updated.zip',
'20150131': '_updated.zip',
'20150130': '_updated.zip',
'20141112': '_updated.zip',
'20141023': '_updated.zip',
'20141013': '_updated.zip',
'20140805': '_updated.zip',
'20140710': '_updated.zip',
'20140507': '_updated.zip',
'20140128': '_updated.zip',
'20131125': '_updated.zip',
'20131120': '_updated.zip',
'20130424': '_updated.zip',
'20130307': '_updated.zip',
'20121109': '_updated.zip',
'20121023': '_updated.zip',
'20121004': '_updated.zip',
'20121003': '_updated2.zip',
'20121001': '_updated.zip',
'20110914': '_updated.zip',
'20110829': '_updated.zip',
'20110617': '_updated.zip',
'20110306': '_updated.zip',
'20110305': '_updated.zip',
'20110304': '_updated.zip',
'20101005': '_updated.zip',
'20100526': '_updated.zip',
'20100201': '_updated.zip',
'20100129': '_updated.zip',
'20100125': '_updated.zip',
'20080904': '_updated.zip',
'20080413': '_updated.zip',
'20080305': '_updated.zip',
'20080215': '_updated.zip',
'20080214': '_updated.zip',
'20071002': '_updated.zip',
'20070822': '_updated.zip',
}
if market == 'da':
# print("Download 'updated' files from http://www.pjm.com/markets-and-operations/"
# "energy/day-ahead/lmpda.aspx and replace the files of the corresponding date"
# "downloaded here")
# ### Files switch from .zip to .csv on 20171109 for day-ahead
# urls = [(urlstart[iso][market] + file + '-da.csv')
# if int(file) >= 20171109
# else (urlstart[iso][market] + file + '-da.zip')
# for file in files[iso]]
# ^ Out of date; files have been reposted as zips (20180621)
urls = [(urlstart[iso][market] + file + da_updated[file])
if file in da_updated.keys()
else (urlstart[iso][market] + file + '-da.zip')
for file in files[iso]]
elif market == 'rt':
# print("Download 'updated' files from http://www.pjm.com/markets-and-operations/"
# "energy/real-time/lmpda.aspx and replace the files of the corresponding date"
# "downloaded here")
# ### Files switch from .zip to .csv on 20171212 for real-time
# urls = [(urlstart[iso][market] + file + '.csv')
# if int(file) >= 20171212
# else (urlstart[iso][market] + file + '.zip')
# for file in files[iso]]
# ^ Out of date; files have been reposted as zips (20180621)
urls = [(urlstart[iso][market] + file + rt_updated[file])
if file in rt_updated.keys()
else (urlstart[iso][market] + file + '.zip')
for file in files[iso]]
download_file_series(
urlstart='', urlend='', fileseries=urls, filepath=filepath,
overwrite=overwrite, sleeptime=sleeptime, numattempts=numattempts)
elif iso == 'NYISO':
### NYISO files are zipped by month; put them in a separate folder
zippath = '{}/in/lmp/{}-zip/'.format(iso, market)
if not os.path.exists(zippath): os.makedirs(zippath)
download_file_series(
urlstart=urlstart[iso][market], urlend=urlend[iso][market],
fileseries=files[iso], filepath=zippath,
overwrite=overwrite, sleeptime=sleeptime, numattempts=numattempts)
### Unzip files
zips = [(zippath + file + urlend[iso][market]) for file in files[iso]]
for i in trange(len(zips)):
zip_ref = zipfile.ZipFile(zips[i], 'r')
zip_ref.extractall(filepath)
zip_ref.close()
elif iso == 'CAISO':
if (submarket == None) and (market == 'rt'): submarket = 'RTM'
elif (submarket == None) and (market == 'da'): submarket = 'DAM'
if submarket in ['RTM', 'HASP', 'RTPD']:
interval = pd.Timedelta('1H')
elif submarket in ['DAM', 'RUC']:
interval = pd.Timedelta('1D')
### Set output filepath
filepath = '{}/in/{}/{}/'.format(iso, product.lower(), market)
if (((market == 'da') and (submarket != 'DAM'))
or ((market == 'rt') and (submarket != 'RTM'))):
filepath = '{}/in/{}/{}/{}/'.format(
iso, product.lower(), market, submarket)
if not os.path.exists(filepath): os.makedirs(filepath)
queries = pd.date_range(
start=pd.Timestamp('{}-01-01T00:00'.format(year)),
end=(pd.Timestamp('{}-01-01T00:00'.format(year+1)) - interval),
freq=interval)
### Initialize error container and subset if necessary
errors = []
if subset == None: subset = slice(None)
# already_downloaded = glob('{}{}*'.format(filepath, year))
for query in tqdm(queries[subset]):
# if '{}{}.gz'.format(filepath, stampify(query)) not in already_downloaded:
if interval == pd.Timedelta('1D'):
fileout = stampify(query)[:-3]
elif interval == pd.Timedelta('1H'):
fileout = stampify(query)
if not os.path.exists('{}{}.gz'.format(filepath, fileout)):
# if overwrite == False:
# if os.path.exists('{}{}.gz'.format(filepath, stampify(query))):
# break
try:
download_caiso_lmp_allnodes(
market=submarket, start=str(query), filepathout=filepath,
product=product, numattempts=numattempts, waittime=waittime)
except (URLError, IncompleteRead, pd.errors.ParserError,
BadZipFile, HTTPError) as error:
errors.append(error)
print(error)
if len(errors) > 0:
pd.Series(errors).to_csv(
'{}__Errors__{}.csv'.format(filepath, time.strftime('%Y%m%dT%H%M%S')),
index=False)
################
### NODALIZE ###
def nodalize(year, market, iso,
filepathin=None, filepathout=None, nodesfile=None,
product='LMP', submarket=None, fillmissinghour=True):
"""
"""
### Set defaults if necessary
if iso.upper() == 'CAISO':
if filepathin == None:
filepathin = revmpath+'{}/in/{}/{}'.format(
iso, product.lower(), market)
if (((market == 'da') and (submarket != 'DAM'))
or ((market == 'rt') and (submarket != 'RTM'))):
filepathin = revmpath+'{}/in/{}/{}/{}/'.format(
iso, product.lower(), market, submarket)
if filepathout == None:
filepathout = revmpath+'{}/io/{}-nodal/{}/'.format(
iso, product.lower(), market)
if (market == 'rt') and (submarket == 'RTM'):
filepathout = revmpath+'{}/io/{}-nodal/{}-month/'.format(
iso, product.lower(), market)
if (((market == 'da') and (submarket != 'DAM'))
or ((market == 'rt') and (submarket != 'RTM'))):
filepathout = revmpath+'{}/io/{}-nodal/{}/{}/'.format(
iso, product.lower(), market, submarket)
if (submarket == None) and (market == 'rt'): submarket = 'RTM'
elif (submarket == None) and (market == 'da'): submarket = 'DAM'
elif iso.upper() == 'ERCOT':
if (filepathin == None) and (market == 'da'):
filepathin = revmpath+'{}/in/lmp/{}/{}/'.format(iso, market, year)
elif (filepathout == None) and (market == 'rt'):
filepathout = revmpath+'{}/io/lmp-nodal/{}-month/'.format(iso, market)
elif filepathout == None:
filepathout = revmpath+'{}/io/lmp-nodal/{}/'.format(iso, market)
else:
if filepathin == None:
filepathin = revmpath+'{}/in/lmp/{}/'.format(iso, market)
if filepathout == None:
filepathout = revmpath+'{}/io/lmp-nodal/{}/'.format(iso, market)
### Make output folders if necessary
if not os.path.exists(filepathout):
os.makedirs(filepathout, exist_ok=True)
if not os.path.exists(revmpath+'{}/io/missingnodes/'.format(iso.upper())):
os.makedirs(revmpath+'{}/io/missingnodes/'.format(iso.upper()), exist_ok=True)
if not os.path.exists(revmpath+'{}/io/datatimes/'.format(iso.upper())):
os.makedirs(revmpath+'{}/io/datatimes/'.format(iso.upper()), exist_ok=True)
if not os.path.exists(revmpath+'{}/io/fulltimenodes/year/'.format(iso.upper())):
os.makedirs(revmpath+'{}/io/datatimes/'.format(iso.upper()), exist_ok=True)
if not os.path.exists(revmpath+'{}/io/fulltimenodes/day/{}/'.format(iso.upper(), market)):
os.makedirs(revmpath+'{}/io/fulltimenodes/day/{}/'.format(iso.upper(), market),
exist_ok=True)
print(filepathout)
### Shared components
nodesfiles = {
'CAISO': revmpath+'CAISO/io/caiso-node-latlon.csv',
'ERCOT': revmpath+'ERCOT/io/ercot-node-latlon.csv',
#'MISO': revmpath+'MISO/in/miso-node-map.csv',
'MISO': revmpath+'MISO/io/miso-node-latlon.csv',
# 'PJM': revmpath+'PJM/io/pjm-pnode-latlon-uniquepoints.csv',
'PJM': revmpath+'PJM/io/pjm-node-latlon.csv',
'NYISO': revmpath+'NYISO/io/nyiso-node-latlon.csv',
'ISONE': revmpath+'ISONE/io/isone-node-latlon.csv'
}
if nodesfile is None:
nodesfile = nodesfiles[iso]
resolution = {
'CAISO': {'da': 60, 'rt': 5}, 'ERCOT': {'da': 60, 'rt': 5},
'MISO': {'da': 60, 'rt': 60}, 'PJM': {'da': 60, 'rt': 60},
'NYISO': {'da': 60, 'rt': 5}, 'ISONE': {'da': 60, 'rt': 60},
}
### Get file list and iso/market info
# files = glob('{}{}*'.format(filepathin, year))
files = sorted(glob('{}{}*'.format(filepathin, year)))
print('head(files):')
for file in files[:3]:
print(file)
print('tail(files):')
for file in files[-3:]:
print(file)
timezone = pvvm.toolbox.tz_iso[iso]
res = resolution[iso][market]
### Make the inputs easier to work with
iso = iso.upper()
hours = pvvm.toolbox.yearhours(year)
dates = pvvm.toolbox.makedays(year)
### DO: figure out how to generalize this
# if len(files) != len(dates):
# print('len(files) = {}'.format(len(files)))
# print('len(dates) = {}'.format(len(dates)))
# raise Exception("files and dates don't match")
if iso == 'ISONE':
### Load file containing nodes with geographic information
nodesin = pd.read_csv(nodesfile, usecols=[0], squeeze=True,
names=['Node'], skiprows=1)
### Load daily files
colnames = ['intime', 'node', 'lmp']
dfdict = {}
for i in trange(len(files)):
dfday = pd.read_csv(
files[i], skiprows=6, usecols=[2,4,6], names=colnames,
dtype={'intime':str, 'node':'category', 'lmp':float})
dfday.drop(dfday.index[-1], inplace=True)
dfday.loc[:,'intime'] = dates[i] + 'H' + dfday.loc[:,'intime']
dfdict[dates[i]] = dfday
### Concat into one dataframe with localized datetime index
dfall = pd.concat(dfdict)
### Make new index
oldtime = list(dfall.intime.unique())
newtime = list(pd.date_range(dates[0], freq='H', periods=pvvm.toolbox.yearhours(year)))
for i in range(len(newtime)):
newtime[i] = str(newtime[i])
indexconvert = dict(zip(oldtime, newtime))
dfall.loc[:,'intime'] = dfall.loc[:,'intime'].apply(
lambda x: indexconvert[x])
dfall.loc[:,'intime'] = pd.to_datetime(dfall['intime'])
fullindex = pd.date_range(dates[0], freq='H', periods=pvvm.toolbox.yearhours(year))
fullindex = fullindex.tz_localize(timezone)
fullindex = pd.DataFrame(index=fullindex)
### Determine missing nodes and data coverage, and save as one-node files
missingnodes = []
datalength = []
for j in trange(len(nodesin)):
try:
df = dfall[dfall['node'] == nodesin[j]][['intime','lmp']].copy()
df.index = df['intime'].values
del df['intime']
df.index = df.index.tz_localize(timezone)
df = df.merge(fullindex, how='right', left_index=True, right_index=True)
numhours = hours - len(df[df['lmp'].isnull()])
datalength.append([nodesin[j], numhours])
df.to_csv('{}{}-{}.gz'.format(filepathout, nodesin[j], year),
compression='gzip', header=False)
except KeyError:
missingnodes.append(nodesin[j])
continue
elif iso == 'MISO':
### Load file containing nodes with geographic information
nodesin = pd.read_csv(nodesfile, usecols=[0], squeeze=True, names=['Node'])
### Pick columns from input file
usecols = [0, 2,
3, 4, 5, 6, 7, 8, 9, 10,11,12,13,14,
15,16,17,18,19,20,21,22,23,24,25,26]
### Load daily files
dfdict = {}
for i in trange(len(files)):
colnames = ['Node', 'Value']
for j in range(24):
colnames.append(dates[i] + 'H{:02d}'.format(j))
dfin = pd.read_csv(
files[i], skiprows=5, header=None,
usecols=usecols,
dtype={0: 'category'}, names=colnames)
dfday = dfin.loc[dfin['Value'] == 'LMP'].T.copy()
dfday.columns = dfday.iloc[0,:]
dfday = dfday.drop(dfday.index[[0,1]])
dfdict[dates[i]] = dfday
### Concat into one dataframe with localized datetime index
dfall = pd.concat(dfdict)
dfall.index = dfall.index.droplevel(0)
dfall.index = pd.date_range(dates[0], periods=hours, freq='H')
dfall.index = dfall.index.tz_localize(timezone)
### Determine missing nodes and data coverage, and save as one-node files
missingnodes = []
datalength = []
for j in trange(len(nodesin)):
try:
df = pd.DataFrame(dfall.loc[:,nodesin[j]])
numhours = hours - len(df[df[nodesin[j]].isnull()])
datalength.append([nodesin[j], numhours])
df.to_csv('{}{}-{}.gz'.format(filepathout, nodesin[j], year),
compression='gzip', header=False)
except KeyError:
missingnodes.append(nodesin[j])
continue
elif iso == 'PJM':
### Set skiprows (different headers for 'da' and 'rt' markets)
skiprows = {'da': 8, 'rt': 18}
### Load file containing nodes with geographic information
nodesin = pd.read_csv(nodesfile, usecols=[0], squeeze=True)
### Pick columns from input file
usecols = [1,
7, 10, 13, 16, 19, 22, 25, 28, 31, 34, 37, 40,
43, 46, 49, 52, 55, 58, 61, 64, 67, 70, 73, 76]
usecols_dst_springforward = [1,
7, 10, 16, 19, 22, 25, 28, 31, 34, 37, 40,
43, 46, 49, 52, 55, 58, 61, 64, 67, 70, 73, 76]
usecols_dst_fallback = [1,
7, 10, 13, 16, 19, 22, 25, 28, 31, 34, 37, 40,
43, 46, 49, 52, 55, 58, 61, 64, 67, 70, 73, 76, 79]
### Load daily files
dfdict = {}
for i in trange(len(files)):
colnames = ['PnodeID']
if dates[i] not in [pvvm.toolbox.dst_springforward[year], pvvm.toolbox.dst_fallback[year]]:
for j in range(24):
colnames.append(dates[i] + 'H{:02d}'.format(j))
dfin = pd.read_csv(
files[i], skiprows=skiprows[market], header=None,
usecols=usecols,
dtype={1: 'category'}, names=colnames)
elif dates[i] == pvvm.toolbox.dst_springforward[year]:
for j in range(23):
colnames.append(dates[i] + 'H{:02d}'.format(j))
dfin = pd.read_csv(
files[i], skiprows=skiprows[market], header=None,
usecols=usecols_dst_springforward,
dtype={1: 'category'}, names=colnames)
elif dates[i] == pvvm.toolbox.dst_fallback[year]:
for j in range(25):
colnames.append(dates[i] + 'H{:02d}'.format(j))
dfin = pd.read_csv(
files[i], skiprows=skiprows[market], header=None,
usecols=usecols_dst_fallback,
dtype={1: 'category'}, names=colnames)
dfday = dfin.T.copy()
dfday.columns = dfday.iloc[0,:]
dfday = dfday.drop(dfday.index[[0]])
del dfday[np.nan]
dfdict[dates[i]] = dfday
### Concat into one dataframe with localized datetime index
dfall = pd.concat(dfdict)
dfall.index = dfall.index.droplevel(0)
dfall.index = pd.date_range(dates[0], periods=hours, freq='H')
dfall.index = dfall.index.tz_localize(timezone)
### Determine missing nodes and data coverage, and save as one-node files
missingnodes = []
datalength = []
for j in trange(len(nodesin)):
try:
df = pd.DataFrame(dfall.loc[:,nodesin[j].astype(str)])
numhours = hours - len(df[df[nodesin[j].astype(str)].isnull()])
datalength.append([nodesin[j], numhours])
df.to_csv(filepathout + '{}-{}.gz'.format(nodesin[j], year),
compression='gzip', header=False)
except KeyError:
missingnodes.append(nodesin[j])
continue
elif iso == 'NYISO':
### Load file containing nodes with geographic information
nodesin = pd.read_csv(nodesfile, usecols=[0], squeeze=True, names=['node'], skiprows=1)
if market == 'da':
dates = pvvm.toolbox.makedays(year)
if len(files) != len(dates):
print('len(files) = {}'.format(len(files)))
print('len(dates) = {}'.format(len(dates)))
raise Exception("files and dates don't match")
### Make daylight savings mangler
def dstfallback(dataframe):
fallback = pvvm.toolbox.dst_fallback[year]
backfall = '{}/{}/{}'.format(fallback[4:6], fallback[6:], fallback[:4])
fallbackhalf = int(len(dataframe[dataframe['intime'] == backfall + ' 01:00'])/2)
if str(dataframe[dataframe['intime'] == backfall + ' 01:00'].iloc[0,1]) != \
str(dataframe[dataframe['intime'] == backfall + ' 01:00'].iloc[fallbackhalf,1]):
raise Exception("DST fallback ptid's don't match.")
mask = dataframe['intime'] == backfall + ' 01:00'
mask.iloc[fallbackhalf:2*fallbackhalf] = False
dataframe.loc[mask, 'intime'] = backfall + ' 01:00 DST'
print("DST fallback conversion worked!")
return dataframe
### Make datetime converter
def makeindexconvert(files, dates):
"""
"""
dicttimes = {}
for i in trange(len(files)):
df = pd.read_csv(files[i],
usecols = [0,2,3], skiprows=1,
names=['intime', 'node', 'lmp'],
dtype={'ptid': 'category', 'lmp': float})
if dates[i] == pvvm.toolbox.dst_fallback[year]:
# print(df.head())
df = dstfallback(df)
dicttimes[dates[i]] = df
dftimes = pd.concat(dicttimes, copy=False)
oldtime = list(dftimes.intime.unique())
print('len(oldtime) = {}'.format(len(oldtime)))
newtime = list(pd.date_range(dates[0], freq='H', periods=pvvm.toolbox.yearhours(year)))
print('len(newtime) = {}'.format(len(newtime)))
for i in range(len(newtime)):
newtime[i] = str(newtime[i])
indexconvert = dict(zip(oldtime, newtime))
return indexconvert
indexconvert = makeindexconvert(files, dates)
### Load daily files
dfdict = {}
for i in trange(len(files)):
dfday = pd.read_csv(files[i],
usecols = [0,2,3], skiprows=1,
names=['intime', 'node', 'lmp'],
dtype={'ptid': 'category', 'lmp': float})
if dates[i] == pvvm.toolbox.dst_fallback[year]:
dfday = dstfallback(dfday)
dfday.loc[:,'intime'] = dfday.loc[:,'intime'].apply(lambda x: indexconvert[x])
dfday.loc[:,'intime'] = pd.to_datetime(dfday['intime'])
dfdict[dates[i]] = dfday
### Concat into one dataframe with localized datetime index
### copy=False is experimental
dfall = pd.concat(dfdict, copy=False)
### Change node type to 'category'. SUPER important. >10x speedup.
dfall['node'] = dfall['node'].astype('category')
### Make new index
fullindex = pd.date_range(dates[0], freq='H', periods=pvvm.toolbox.yearhours(year))
fullindex = fullindex.tz_localize(timezone)
fullindex = pd.DataFrame(index=fullindex)
### Determine missing nodes and data coverage, and save as one-node files
missingnodes = []
datalength = []
fulldaynodes = {}
for j in trange(len(nodesin)):
# for j in trange(20):
node = str(nodesin[j])
try:
df = dfall[dfall['node'] == nodesin[j]][['intime','lmp']].copy()
df.index = df['intime'].values
del df['intime']
df.index = df.index.tz_localize(timezone)
df = df.merge(fullindex, how='right', left_index=True, right_index=True)
## Record datapoints
numhours = hours - len(df[df['lmp'].isnull()])
datalength.append([nodesin[j], numhours])
## Determine full-data days
dfcount = df.groupby([df.index.month, df.index.day]).count()
for date in dates:
month = int(date[4:6])
day = int(date[6:])
count = dfcount.loc[month].loc[day][0]
if count == 24:
nodes = fulldaynodes.get(date, [])
nodes.append(node)
fulldaynodes[date] = nodes
## Write nodalized file
df.to_csv('{}{}-{}.gz'.format(filepathout, nodesin[j], year),
compression='gzip', header=False)
except KeyError:
missingnodes.append(nodesin[j])
continue
elif market == 'rt':
datesprev = pvvm.toolbox.makedays(year - 1)
datesthis = pvvm.toolbox.makedays(year)
dates = [datesprev[-1]] + datesthis
filesprev = sorted(glob('{}{}*'.format(filepathin, (year - 1))))
filesthis = sorted(glob('{}{}*'.format(filepathin, year)))
files = [filesprev[-1]] + filesthis
if len(files) != len(dates):
print('len(files) = {}'.format(len(files)))
print('len(dates) = {}'.format(len(dates)))
for date in dates:
if date not in [file[88:96] for file in files]:
print(date)
raise Exception("files and dates don't match")
### Make nice index
niceindex_hourstart = pd.date_range(
start='{}-01-01 00:00'.format(year),
periods = hours * 12,
freq = '5T',
tz=pvvm.toolbox.tz_iso[iso])
niceindex = pd.DataFrame(index=niceindex_hourstart)
### Load daily files
dfdict = {}
for i in trange(len(files)):
df = pd.read_csv(
files[i],
usecols=[0,2,3],
skiprows=1,
names=['intime', 'node', 'lmp'],
dtype={'intime': 'category',
'node': 'category',
'lmp': float},
parse_dates=['intime'],
infer_datetime_format=True)
dfdict[dates[i]] = df
### Concat into one dataframe with localized datetime index
dfall = pd.concat(dfdict, copy=False)
### Change node type to 'category'. SUPER important. >10x speedup.
dfall['node'] = dfall['node'].astype('category')
### Check number of nodes. Good for error checking.
numnodes = len(dfall['node'].unique())
print("len(dfall['node']): {}".format(numnodes))
### Reset index
dfall.index = dfall['intime'].values
dfall.index = dfall.index.tz_localize(pvvm.toolbox.tz_iso[iso])
### Fix DST
dststart = dfall.index.get_loc(pvvm.toolbox.dst_springforward[year] + ' 01:55')
print('len(dststart) = {}'.format(len(dststart)))
print('num nodes = {}'.format(numnodes))
if len(dststart) > numnodes:
raise Exception('len(dststart) > numnodes')
dststart = dststart[-1] + 1
if year == 2012:
dstend = dfall.index.get_loc(pvvm.toolbox.dst_fallback[year] + ' 01:59:34')
else:
dstend = dfall.index.get_loc(pvvm.toolbox.dst_fallback[year] + ' 01:55')
print('len(dstend) = {}'.format(len(dstend)))
if year == 2012:
if len(dstend) > numnodes:
raise Exception('len(dststart) > numnodes')
dstend = dstend[-1]
else:
if len(dstend) % 2 != 0:
raise Exception('len(dstend) % 2 != 0')
if len(dstend) / 2 > numnodes:
raise Exception('len(dstend) / 2 > numnodes')
if ((dstend[int(len(dstend)/2) + 0] - dstend[int(len(dstend)/2) - 1] - 1) / 11
!= (len(dstend) / 2)):
print((dstend[int(len(dstend)/2) + 0] - dstend[int(len(dstend)/2) - 1] - 1) / 11)
print(len(dstend) / 2)
raise Exception('node added or lost during DST fallback')
dstend = dstend[int(len(dstend)/2) - 1]
dfall.iloc[dststart:(dstend + 1),0] = (
dfall.iloc[dststart:(dstend + 1),0]
+ pd.Timedelta(-1, unit='h'))
### Reset index
dfall.index = dfall['intime'].values
dfall.index = dfall.index.tz_localize(pvvm.toolbox.tz_iso[iso])
### Determine missing nodes and data coverage, and save as one-node files
missingnodes = []
datalength = []
fulldaynodes = {}
for j in trange(len(nodesin)):
node = str(nodesin[j])
try:
dfin = dfall[dfall['node'] == node].copy()
## Add missing timestamps
df = dfin.merge(
niceindex,
how='outer',
left_index=True, right_index=True)
## Fill gaps, using off-5T values
df = df['lmp'].interpolate(method='time', limit=11)
## Remove off-5T values
dfout = pd.DataFrame(df).merge(
niceindex,
how='right',
left_index=True, right_index=True)
## Fill missing hour if desired
if fillmissinghour:
dfout = dfout.interpolate('linear', limit=12)
## Record datapoints
numpoints = dfout.notnull().sum().values[0]
datalength.append([nodesin[j], numpoints])
## Determine full-data days
dfcount = dfout.groupby([dfout.index.month, dfout.index.day]).count()
for date in dates[1:]:
month = int(date[4:6])
day = int(date[6:])
count = dfcount.loc[month].loc[day][0]
if count == 288:
nodes = fulldaynodes.get(date, [])
nodes.append(node)
fulldaynodes[date] = nodes
## Write nodalized file
dfout.to_csv(
'{}{}-{}.gz'.format(
filepathout, nodesin[j], year),
compression='gzip', header=False)
except KeyError:
missingnodes.append(node)
continue
elif iso == 'CAISO':
if market == 'da':
### Input housekeeping
filesin = sorted(glob('{}{}*'.format(filepathin, year)))
datesin = pvvm.toolbox.makedays(year)
if len(filesin) != len(datesin):
print('filepathin = {}'.format(filepathin))
print('len(filesin) = {}'.format(len(filesin)))
print('len(datesin) = {}'.format(len(datesin)))
raise Exception("files and dates don't match")
### Load file containing nodes with geographic information
nodesin = pd.read_csv(nodesfile, usecols=[0], squeeze=True,
names=['Node'], skiprows=1)
### Make nice hourly index
hourlyindex = pd.date_range(
start='{}-01-01 00:00'.format(year),
end='{}-12-31 23:00'.format(year),
freq = '1H',
tz=pvvm.toolbox.tz_iso[iso])
hourlyindex = pd.DataFrame(index=hourlyindex)
### Make nice daily index
dailyindex = pd.date_range(
start='{}-01-01'.format(year),
end='{}-12-31'.format(year),
freq='1D')
### Load daily files
dfdict = {}
for i in trange(len(filesin)):
if ((product == 'lmp') and (market == 'da')):
df = pd.read_csv(
filesin[i],
usecols=[1,2,3],
skiprows=1,
names=['node', 'intime', product],
dtype={'intime':'category', 'node':'category', product:float},
# index_col='intime',
parse_dates=['intime'],
infer_datetime_format=True
)
# df.intime = df.intime.map(
# lambda x: pd.to_datetime('{}{}{} {}:00'.format(x[:4], x[5:7], x[9:11], x[12:14])))
else:
df = pd.read_csv(
filesin[i],
usecols=[0,1,2],
skiprows=1,
names=['intime', 'node', product],
dtype={'intime':'category', 'node':'category', product:float},
parse_dates=['intime'],
infer_datetime_format=True
)
dfdict[datesin[i]] = df
### Concat into one dataframe
dfall = pd.concat(dfdict, copy=False)
# dfall.reset_index(level=0, drop=True, inplace=True)
### Categorize nodes (accelerates lookup)
dfall['node'] = dfall['node'].astype('category')
### Check number of nodes. Good for error checking.
numnodes = len(dfall['node'].unique())
print("numnodes = {}".format(numnodes))
### Reset index and set to local timezone
dfall.index = dfall['intime'].values
dfall.index = (
dfall.index
.tz_localize('UTC')
.tz_convert(pvvm.toolbox.tz_iso[iso]))
### Determine missing nodes and data coverage, and save as one-node files
missingnodes = []
datalength = []
fulldaynodes = {}
for j in trange(len(nodesin)):
node = str(nodesin[j])
try:
dfin = dfall[dfall['node'] == node].copy()
## Add missing timestamps
df = dfin.merge(
hourlyindex,
how='right',
left_index=True, right_index=True)
df = pd.DataFrame(df[product])
## Record datapoints
numpoints = df.notnull().sum().values[0]
datalength.append([nodesin[j], numpoints])
## Determine full-data days
dfcount = df.groupby([df.index.month, df.index.day]).count()
for date in dailyindex:
month = date.month
day = date.day
count = dfcount.loc[month].loc[day][0]
if count == 24:
nodes = fulldaynodes.get(date.strftime('%Y%m%d'), [])
nodes.append(node)
fulldaynodes[date.strftime('%Y%m%d')] = nodes
## Write nodalized file
## ONLY if it contains data
if df.notnull().sum()[0] > 0:
df.to_csv(
'{}{}-{}.gz'.format(
filepathout, node, year),
compression='gzip', header=False)
else:
missingnodes.append(node)
except KeyError:
missingnodes.append(node)
elif market == 'rt':
### Make convenience variables
months = list(range(1,13))
### Load file containing nodes with geographic information
nodesin = list(pd.read_csv(
nodesfile,
usecols=[0],
squeeze=True
))
### Loop over months
for month in months:
datetimesin = pd.date_range(
start='{}{:02}01T{:02}:00'.format(year, month, abs(pvvm.toolbox.timezone_iso[iso])),
periods = pvvm.toolbox.monthhours(year, month),
freq = 'H')
files = ['{}{}.gz'.format(filepathin, d.strftime('%Y%m%dT%H')) for d in datetimesin]
### Make nice MONTHLY index
niceindex = pd.date_range(
start='{}-{:02}-01 00:00'.format(year, month),
periods = (pvvm.toolbox.monthhours(year, month) * 60 / res),
freq = '5T',
tz=pvvm.toolbox.tz_iso[iso])
niceindex =
|
pd.DataFrame(index=niceindex)
|
pandas.DataFrame
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Apr 30 20:44:07 2021
@author: romainloirs
@content:
"""
def get_first_zi_to_visit(pt_depart,cluster_depart,matrice_adj,list_zone_id,road_id,route_data,package_data,stop_data):
from ML_first_stop import get_ML_dataset_prediction
import pickle
import pandas as pd
from sklearn.preprocessing import StandardScaler
#création du dataset
dataset, dic_zi= get_ML_dataset_prediction(pt_depart,road_id,route_data,package_data,stop_data)
list_zi=list(dataset["zi_name"])
to_del = ['Road_id', 'zi_name',"distane_barycentre","distane_barycentre_ind","Km_depot_to_first_zi_ind","Km_depot_to_first_zi"]
dataset = dataset.drop(to_del, axis = 1)
# appliquer le modèle de ML
model_path = "data/model_build_outputs/"
scaler = pickle.load(open(model_path + "scaler_first_stop.sav", 'rb'))
model = pickle.load(open(model_path + "model_first_stop.sav", 'rb'))
dataset = scaler.transform(dataset)
prediction = model.predict_proba(dataset)
# Trouver la première zi à l'aide des prédictions.
prediction_1 = list(prediction[:,1])
max_value = max(prediction_1)
index_max = prediction_1.index(max_value)
first_zi= dic_zi[list_zi[index_max]]
if first_zi in list_zone_id:
return first_zi
else:
print("la zi n'est pas dans la liste")
return first_zi
# obtenir un sous problème compatible avec le programme de RO dans lequel
# les clusters sont traités comme des stops
# paramétre: la matrice d'adjacence des stops, la list des clusters, le cluster de départ
# et toutes les times windows associés aux stops
# return: la matrice d'adjacence des clusters, le numéro du cluster de départ,
# les times windows du nouveau problème
def get_matrice_cluster_as_sommet(matrice_adj,list_cluster,cluster_depart,all_time_windows):
import numpy as np
from time_windows import get_time_windows_stop
from time_windows import max_list_time_windows
from time_windows import min_list_time_windows
n_cluster=len(list_cluster)
matrice_cluster=np.zeros((n_cluster,n_cluster))
n_cluster_depart=0
time_windows=[]
#pour chaque cluster
for i in range(n_cluster):
#trouver le numéro du cluster de départ
if list_cluster[i]==cluster_depart:
n_cluster_depart=i
# déterminer la nouvelle time_windows associé au cluster
time_windows_stop_cluster=get_time_windows_stop(list_cluster[i],all_time_windows)
min_time_windows=min_list_time_windows(time_windows_stop_cluster)
max_time_windows=max_list_time_windows(time_windows_stop_cluster)
if min_time_windows==max_time_windows:
time_windows.append(None)
else:
time_windows.append((min_time_windows,max_time_windows))
#compléter la ligne de la matrice d'adjacence
for j in range(n_cluster):
if i!=j:
matrice_extraite=matrice_adj.loc[list_cluster[i],list_cluster[j]].to_numpy()
distance=matrice_extraite.mean()
matrice_cluster[i][j]= distance
return matrice_cluster,n_cluster_depart,time_windows
# si un pt est trop éloigné des autres, on le rapproche tout en conservant
# les proportions pour le problème de RO
def modif_matrice_cluster(matrice_cluster):
import numpy as np
vmax=np.nanmax(matrice_cluster)
ligne_max1=np.where(matrice_cluster == vmax)[0][0]
ligne_max2=np.where(matrice_cluster == vmax)[1][0]
vmin_1_1=sorted(matrice_cluster[ligne_max1,:].tolist())[1]
vmin_1_2=sorted(matrice_cluster[:,ligne_max1].tolist())[1]
vmin_2_1=sorted(matrice_cluster[ligne_max2,:].tolist())[1]
vmin_2_2=sorted(matrice_cluster[:,ligne_max2].tolist())[1]
ligne_max=0
my_min=0
if vmin_1_1>vmin_2_1:
ligne_max=ligne_max1
if vmin_1_1>vmin_1_2:
my_min=vmin_1_2
else:
my_min=vmin_1_1
else:
ligne_max=ligne_max2
if vmin_2_1>vmin_2_2:
my_min=vmin_2_2
else:
my_min=vmin_2_1
matrice_cluster[ligne_max,:]=matrice_cluster[ligne_max,:]-my_min
matrice_cluster[:,ligne_max]=matrice_cluster[:,ligne_max]-my_min
matrice_cluster[ligne_max,ligne_max]=0
# trouver une route desservant tout les clusters
def ordonne_cluster(matrice_adj,list_cluster, cluster_depart,all_time_windows,seuil=15):
import pandas as pd
import numpy as np
from math import ceil
from clustering import same_size_clusters
from RO_main import RO_main_cycle
from RO_main import sequence_zone_id
from RO_main import sequence_zone_id_2
from RO_main import sequence_zone_id_3
from RO_main import sequence_zone_id_cycle
from clustering import find_pt_arrive_depart
from clustering import cluster_bis
#transformer le problème en un problème pouvant etre résolu par la RO
matrice_zone_id,n_cluster_depart,time_windows= get_matrice_cluster_as_sommet(matrice_adj,list_cluster,cluster_depart,all_time_windows)
modif_matrice_cluster(matrice_zone_id)
n_cluster=matrice_zone_id.shape[0]
# si il y a trop de cluster pour le problème de RO
if n_cluster>seuil:
#créer des clusters de zone_id
pd_zone_id=pd.DataFrame(matrice_zone_id)
cluster_zone_id=cluster_bis(pd_zone_id,seuil)
#trouver une route qui relie les cluster de zone_ID
matrice_cluster,n_cluster_depart_2,time_windows_2 =get_matrice_cluster_as_sommet(pd_zone_id,cluster_zone_id,n_cluster_depart,time_windows)
seq_cluster=sequence_zone_id_cycle(cluster_zone_id,matrice_cluster,n_cluster_depart,time_windows_2)
#trouver à l'intérieur des cluster de zone_id, une route qui relie le zone_id
seq_zone_id=[]
pt_depart_current=n_cluster_depart
for zi in seq_cluster:
pt_arrive_current,pt_depart_next= find_pt_arrive_depart(pt_depart_current,zi,seq_cluster, pd.DataFrame(matrice_zone_id))
if len(zi)==1:
seq_zone_id+=zi
else:
list_extraction=np.array([ True if i in zi else False for i in range(n_cluster)],dtype=bool)
aux=matrice_zone_id[list_extraction,:]
matrice_zi=aux[:,list_extraction]
time_windows_zi=[None]*len(zi)
seq,opt,time= sequence_zone_id_3(zi,pd.DataFrame(matrice_zi),pt_depart_current,pt_arrive_current,time_windows_zi)
seq_zone_id+= seq
pt_depart_current=pt_depart_next
return [list_cluster[i] for i in seq_zone_id]
#si le nombre de cluster est raisonnable
else:
res=RO_main_cycle(matrice_zone_id,n_cluster_depart,time_windows)
res2=res[0]
return [list_cluster[i] for i in res2 ]
# obtenir un sous problème compatible avec le programme de RO dans lequel
# les clusters sont traités comme des stops
# paramétre: la matrice d'adjacence des stops, la list des clusters, le cluster de départ
# et toutes les times windows associés aux stops
# return: la matrice d'adjacence des clusters, le numéro du cluster de départ,
# les times windows du nouveau problème
def get_matrice_cluster_as_sommet_2(matrice_adj,list_cluster,cluster_depart,cluster_arrive,all_time_windows):
import numpy as np
from time_windows import get_time_windows_stop
from time_windows import max_list_time_windows
from time_windows import min_list_time_windows
n_cluster=len(list_cluster)
matrice_cluster=np.zeros((n_cluster,n_cluster))
n_cluster_depart=0
n_cluster_arrive=0
time_windows=[]
#pour chaque cluster
for i in range(n_cluster):
#trouver le numéro du cluster de départ
if list_cluster[i]==cluster_depart:
n_cluster_depart=i
if list_cluster[i]==cluster_arrive:
n_cluster_arrive=i
# déterminer la nouvelle time_windows associé au cluster
time_windows_stop_cluster=get_time_windows_stop(list_cluster[i],all_time_windows)
min_time_windows=min_list_time_windows(time_windows_stop_cluster)
max_time_windows=max_list_time_windows(time_windows_stop_cluster)
if min_time_windows==max_time_windows:
time_windows.append(None)
else:
time_windows.append((min_time_windows,max_time_windows))
#compléter la ligne de la matrice d'adjacence
for j in range(n_cluster):
if i!=j:
matrice_extraite=matrice_adj.loc[list_cluster[i],list_cluster[j]].to_numpy()
distance=matrice_extraite.mean()
matrice_cluster[i][j]= distance
return matrice_cluster,n_cluster_depart,n_cluster_arrive,time_windows
# trouver une route desservant tout les clusters
def ordonne_cluster_2(matrice_adj,list_cluster, cluster_depart,first_zi_to_visit,all_time_windows,seuil=15):
import pandas as pd
import numpy as np
from math import ceil
from clustering import same_size_clusters
from RO_main import RO_main_cycle
from RO_main import sequence_zone_id
from RO_main import sequence_zone_id_2
from RO_main import sequence_zone_id_3
from RO_main import sequence_zone_id_cycle
from clustering import find_pt_arrive_depart
from clustering import cluster_bis
from clustering import cluster_ter
#transformer le problème en un problème pouvant etre résolu par la RO
matrice_zone_id,n_cluster_depart,n_cluster_arrive,time_windows= get_matrice_cluster_as_sommet_2(matrice_adj,list_cluster,first_zi_to_visit,cluster_depart,all_time_windows)
modif_matrice_cluster(matrice_zone_id)
n_cluster=matrice_zone_id.shape[0]
# si il y a trop de cluster pour le problème de RO
if n_cluster>seuil:
#créer des clusters de zone_id
pd_zone_id=pd.DataFrame(matrice_zone_id)
cluster_zone_id=cluster_ter(pd_zone_id,n_cluster_depart,n_cluster_arrive,seuil=15)
#cluster_zone_id=cluster_bis(pd_zone_id,seuil)
#trouver une route qui relie les cluster de zone_ID
#matrice_cluster,n_cluster_depart_2,time_windows_2 =get_matrice_cluster_as_sommet(pd_zone_id,cluster_zone_id,n_cluster_depart,time_windows)
matrice_zone_id_2,n_cluster_depart_2,n_cluster_arrive_2,time_windows_2= get_matrice_cluster_as_sommet_2(pd_zone_id,cluster_zone_id,n_cluster_depart,n_cluster_arrive,time_windows)
n_cluster_depart_2=0
n_cluster_arrive_2=0
for i in range(len(cluster_zone_id)):
if n_cluster_depart in cluster_zone_id[i]:
n_cluster_depart_2=i
if n_cluster_arrive in cluster_zone_id[i]:
n_cluster_arrive_2=i
seq_cluster=sequence_zone_id_3(cluster_zone_id,pd.DataFrame(matrice_zone_id_2),cluster_zone_id[n_cluster_depart_2],cluster_zone_id[n_cluster_arrive_2],time_windows_2)[0]
#print(seq_cluster)
#trouver à l'intérieur des cluster de zone_id, une route qui relie le zone_id
seq_zone_id=[]
pt_depart_current=cluster_zone_id[n_cluster_depart_2]
for zi in seq_cluster:
pt_arrive_current,pt_depart_next= find_pt_arrive_depart(pt_depart_current,zi,seq_cluster, pd.DataFrame(matrice_zone_id))
if len(zi)==1:
seq_zone_id+=zi
else:
list_extraction=np.array([ True if i in zi else False for i in range(n_cluster)],dtype=bool)
aux=matrice_zone_id[list_extraction,:]
matrice_zi=aux[:,list_extraction]
time_windows_zi=[None]*len(zi)
seq,opt,time= sequence_zone_id_3(zi,pd.DataFrame(matrice_zi),pt_depart_current,pt_arrive_current,time_windows_zi)
seq_zone_id+= seq
pt_depart_current=pt_depart_next
cluster_depart = seq_zone_id.pop()
seq_zone_id.insert(0,cluster_depart)
return [list_cluster[i] for i in seq_zone_id]
#si le nombre de cluster est raisonnable
else:
time_windows_zi=[None]*10000000
res=sequence_zone_id_3(list(range(n_cluster)),
|
pd.DataFrame(matrice_zone_id)
|
pandas.DataFrame
|
"""
Conducts cross-validated NMF to determine the number of factors to use and
regularization constant (alpha) to use, using the bi-cross-validation procedure
outlined in Owen and Perry.
This particular version considers an input list of seeds rather than generating
them.
"""
import argparse
import feather
import itertools
import joblib
import logging
import numpy as np
import pandas as pd
import tqdm
from collections import namedtuple
from sklearn.decomposition import NMF
from sklearn.model_selection import KFold
def get_arguments():
"""Obtains command-line arguments."""
parser = argparse.ArgumentParser()
parser.add_argument(
'--input',
type=argparse.FileType('rU'),
required=True,
metavar='INPUT',
help='read input data from CSV file %(metavar)s')
parser.add_argument(
'--seedlist',
type=argparse.FileType('rU'),
required=True,
metavar='INPUT',
help='read input seeds from text file %(metavar)s')
parser.add_argument(
'--output',
required=True,
metavar='OUTPUT',
help='write Q2 values to Feather file %(metavar)s')
parser.add_argument(
'--init',
choices=('random', 'nndsvd', 'nndsvda', 'nndsvdar'),
default='nndsvd',
metavar='INIT',
help='use method %(metavar)s to initialize values (default: '
'%(default)s)')
parser.add_argument(
'--l1-ratio',
type=float,
default=0.,
metavar='L1-RATIO',
help='use %(metavar)s as the regularization mixing parameter (use 0 '
'to specify an L2 penalty, 1 to specify an L1 penalty, or a value in '
'(0, 1) to specify a combination; default: %(default)s)')
parser.add_argument(
'--k',
type=int,
nargs='+',
metavar='K',
help='calculate Q2 for given ranks %(metavar)ss')
parser.add_argument(
'--alpha',
type=float,
nargs='+',
metavar='ALPHA',
help='calculate Q2 for given regularization constants %(metavar)s')
parser.add_argument(
'--alpha-base', type=float, metavar='ALPHA-BASE', default=2.)
parser.add_argument(
'--alpha-exp-start', type=int, metavar='ALPHA-EXP-START', default=-10)
parser.add_argument(
'--alpha-exp-end', type=int, metavar='ALPHA-EXP-END', default=10)
parser.add_argument(
'--folds',
type=int,
default=3,
metavar='FOLDS',
help='run bi-cross-validation with %(metavar)s folds (default: '
'%(default)s)')
parser.add_argument(
'--cores',
type=int,
metavar='CORES',
default=-1,
help='use %(metavar)s cores for the analysis')
parser.add_argument(
'--log',
metavar='LOG',
help='write logging information to %(metavar)s')
return parser.parse_args()
def configure_logging(log=None):
"""
Configures logging.
:param str log
"""
if log:
logging.basicConfig(
level=logging.DEBUG,
filename=log,
filemode='w',
format='%(asctime)s %(levelname)-8s %(message)s')
console = logging.StreamHandler()
console.setLevel(logging.INFO)
console.setFormatter(logging.Formatter('%(message)s'))
logging.getLogger().addHandler(console)
else:
logging.basicConfig(level=logging.INFO, format='%(message)s')
def load_data(handle):
"""
Loads data from the given handle.
:param io.file handle
:rtype: pd.DataFrame
"""
logging.info('Loading data')
result = pd.read_csv(handle, index_col=0)
logging.info('Loaded a table with shape {}'.format(result.shape))
return result
def load_seedlist(handle):
"""
Loads seeds from the given handle.
:param io.file handle
:rtype: List[int]
"""
logging.info('Loading seeds')
stripped_data = (l.strip() for l in handle)
filtered_data = (l for l in handle if l)
seeds = [int(x) for x in filtered_data]
logging.info('Loaded {} seeds'.format(len(seeds)))
return seeds
def get_k(data, k, folds):
"""
Determines the values of k to use.
:param pd.DataFrame data
:param List[int] k
:param int folds
:rtype: List[int]
"""
result = k if k is not None else np.arange(
np.floor(min(data.shape) * (1 - 1 / folds)) - 1, dtype=int) + 1
logging.info('Setting k = {}'.format(', '.join(str(i) for i in result)))
return list(result)
def get_alpha_range(base, start, end):
"""
Obtains a range of regularization constants for the given base and start
and end exponents inclusive.
:param float base
:param int start
:param int end
:rtype: List[float]
"""
result = list(base**np.arange(start, end + 1))
logging.info('Setting alpha values to test to {}'.format(result))
return result
class InvalidFoldError(Exception):
"""
An exception that is raised when the number of folds is not supported by
the data.
"""
def __init__(self):
super().__init__('number of folds is not supported by the data')
def validate_k(data, k):
"""
Validates that the number of folds is valid.
:param pd.DataFrame data
:param int k
:raises InvalidFoldError: if the number of folds is not supported by the
data
"""
if k > min(data.shape):
raise InvalidFoldError()
Parameters = namedtuple('Parameters', [
'seed', 'k', 'alpha', 'fold', 'observation_train_ix',
'observation_test_ix', 'measurement_train_ix', 'measurement_test_ix'
])
def _get_parameter_tuples(seed, k, alpha, folds, df):
"""
Obtains a parameter tuple for the given seed, number of factors,
regularization constant, and number of folds.
In order, the parameters yielded are as follows as a named tuple:
- Seed (i.e., iteration)
- Number of factors
- Alpha
- Fold number
- Observation training set indices
- Observation test set indices
- Measurement training set indices
- Measurement test set indices
:param int seed: the seed to use
:param int k: the number of factors to test
:param float alpha: the value of alpha to test
:param int folds: the number of folds to split the data into
:param pd.DataFrame df: the data to split
:rtype: generator
"""
np.random.seed(seed)
fold_generator = KFold(n_splits=folds, shuffle=True)
sample_folds = fold_generator.split(data)
feature_folds = fold_generator.split(data.T)
for f, ix in enumerate(zip(sample_folds, feature_folds)):
observation_ix, measurement_ix = ix
yield Parameters(seed, k, alpha, f + 1, observation_ix[0],
observation_ix[1], measurement_ix[0],
measurement_ix[1])
def _get_parameter_generator(seeds, k, alpha, folds, df):
"""
Produces a generator that yields parameters for a single iteration.
In order, the parameters yielded are as follows as a named tuple:
- Seed (i.e., iteration)
- Number of factors
- Alpha
- Fold number
- Observation training set indices
- Observation test set indices
- Measurement training set indices
- Measurement test set indices
:param List[int] seeds
:param List[int] k: the numbers of factors to test
:param List[float] alpha: the values of alpha to test
:param int folds: the number of folds to split the data into
:param pd.DataFrame df: the data to split
:rtype: generator
"""
return itertools.chain(*(_get_parameter_tuples(s, k_, a, folds, df)
for s in seeds for k_ in k for a in alpha))
def _make_q2_result(seed, k, alpha, fold, q2):
"""
Produces a result with the given values.
:param int seed
:param int k
:param float alpha
:param int fold
:param float q2
"""
return pd.DataFrame({
'seed': [seed],
'k': [k],
'alpha': [alpha],
'fold': [fold],
'q2': [q2]
})
def _cross_validate(df, init, l1_ratio, parameters):
"""
Conducts cross-validation for the given data and parameters, returning a
result containing a value of Q2.
:param pd.DataFrame df
:param str init
:param float l1_ratio
:param Parameters parameters
:rtype: pd.DataFrame
"""
# Split the data.
test_data = data.iloc[parameters.observation_test_ix,
parameters.measurement_test_ix]
# If the test data is all zeros, this will fail.
denominator = (test_data**2).sum().sum()
if denominator == 0.:
return _make_q2_result(parameters.seed, parameters.k, parameters.alpha,
parameters.fold, np.nan)
train_data = data.iloc[parameters.observation_train_ix,
parameters.measurement_train_ix]
bottomleft_data = data.iloc[parameters.observation_test_ix,
parameters.measurement_train_ix]
topright_data = data.iloc[parameters.observation_train_ix,
parameters.measurement_test_ix]
# Set the seed.
np.random.seed(parameters.seed)
# Run NMF.
nmf = NMF(n_components=parameters.k,
alpha=parameters.alpha,
tol=1e-6,
max_iter=200,
init=init,
l1_ratio=l1_ratio)
coefficients = nmf.fit_transform(train_data)
test_reconstructions = bottomleft_data.values.dot(
np.linalg.pinv(nmf.components_)).dot(np.linalg.pinv(coefficients)).dot(
topright_data.values)
q2 = 1 - ((test_data - test_reconstructions)**2).sum().sum() / (
test_data**2).sum().sum()
return _make_q2_result(parameters.seed, parameters.k, parameters.alpha,
parameters.fold, q2)
def cross_validate(data, folds, k, alpha, init, l1_ratio, cores, seeds):
"""
Conducts cross-validation.
:param pd.DataFrame data
:param int folds
:param List[int] k
:param List[float] alpha
:param str init
:param float l1_ratio
:param int cores
:param List[int] seeds
:rtype: pd.DataFrame
"""
logging.info('Conducting cross-validation')
# Obtain an iterator to better distribute jobs across all cores and better
# measure progress.
parameter_generator = _get_parameter_generator(seeds, k, alpha, folds,
data)
# Calculate the number of jobs to perform.
n_jobs = len(seeds) * len(k) * len(alpha) * folds
progress = tqdm.tqdm(parameter_generator, total=n_jobs, mininterval=1.)
multiprocess = cores != 1 or (cores == -1 and joblib.cpu_count() == 1)
logging.info('Multiprocessing: {}'.format(multiprocess))
result = joblib.Parallel(n_jobs=cores)(
joblib.delayed(_cross_validate)(data, init, l1_ratio, p)
for p in progress) if multiprocess else (_cross_validate(data, init,
l1_ratio, p)
for p in progress)
logging.info('Concatenating results')
return
|
pd.concat(result)
|
pandas.concat
|
import shapely.geometry as sg
import numpy as np
import pandas as pd
from geopandas.array import from_shapely
from hypothesis import given, example, strategies as st
from spatialpandas import GeoSeries
from spatialpandas.geometry import PointArray, Point, MultiPoint, Line, MultiLine, \
Polygon, MultiPolygon
from tests.geometry.strategies import st_point_array, st_multipoint_array, hyp_settings, \
st_line_array, st_polygon_array, st_multipolygon_array
@given(st_point_array(), st_point_array(min_size=1, max_size=1))
@hyp_settings
def test_points_intersects_point(gp_points, gp_point):
# Get scalar Point
sg_point = gp_point[0]
if len(gp_points) > 0:
# Add gp_point to gp_points so we know something will intersect
gp_points = pd.concat([pd.Series(gp_points), pd.Series(gp_point)]).array
# Compute expected intersection
expected = gp_points.intersects(sg_point)
# Create spatialpandas PointArray
point = Point.from_shapely(sg_point)
points = PointArray.from_geopandas(gp_points)
points_series = GeoSeries(points, index=np.arange(10, 10 + len(points)))
# Test Point.intersects
result = np.array([
point_el.intersects(point) for point_el in points
])
np.testing.assert_equal(result, expected)
# Test PointArray.intersect
result = points.intersects(point)
np.testing.assert_equal(result, expected)
# Test PointArray.intersects with inds
inds = np.flipud(np.arange(0, len(points)))
result = points.intersects(point, inds)
np.testing.assert_equal(result, np.flipud(expected))
# Test GeoSeries.intersects
pd.testing.assert_series_equal(
points_series.intersects(point),
pd.Series(expected, index=points_series.index)
)
@given(
st_point_array(min_size=6),
st.integers(min_value=0, max_value=5),
st_point_array(min_size=1, max_size=1)
)
@hyp_settings
def test_points_intersects_point_offset(gp_points, offset, gp_point):
# Get scalar Point
sg_point = gp_point[0]
if len(gp_points) > 0:
# Add gp_point to gp_points so we know something will intersect
gp_points = pd.concat([pd.Series(gp_points), pd.Series(gp_point)]).array
# Compute expected intersection
expected = gp_points.intersects(sg_point)[offset:]
# Create spatialpandas PointArray
point = Point.from_shapely(sg_point)
points = PointArray.from_geopandas(gp_points)[offset:]
# Test PointArray.intersect
result = points.intersects(point)
np.testing.assert_equal(result, expected)
# Test PointArray.intersects with inds
inds = np.flipud(np.arange(0, len(points)))
result = points.intersects(point, inds)
np.testing.assert_equal(result, np.flipud(expected))
@given(st_point_array(), st_multipoint_array(min_size=1, max_size=1))
@hyp_settings
def test_points_intersects_multipoint(gp_points, gp_multipoint):
# Get scalar Point
sg_multipoint = gp_multipoint[0]
if len(gp_points) > 0:
# Add gp_point to gp_multipoints so we know something will intersect
gp_points = from_shapely(list(gp_points) + [gp_multipoint[0][-1]])
# Compute expected intersection
expected = gp_points.intersects(sg_multipoint)
# Create spatialpandas PointArray
multipoint = MultiPoint.from_shapely(sg_multipoint)
points = PointArray.from_geopandas(gp_points)
points_series = GeoSeries(points, index=np.arange(10, 10 + len(points)))
# Test Point.intersects
result = np.array([
point_el.intersects(multipoint) for point_el in points
])
np.testing.assert_equal(result, expected)
# Test PointArray.intersect
result = points.intersects(multipoint)
np.testing.assert_equal(result, expected)
# Test PointArray.intersects with inds
inds = np.flipud(np.arange(0, len(points)))
result = points.intersects(multipoint, inds)
np.testing.assert_equal(result, np.flipud(expected))
# Test GeoSeries.intersects
pd.testing.assert_series_equal(
points_series.intersects(multipoint),
pd.Series(expected, index=points_series.index)
)
@given(st_point_array(), st_line_array(min_size=1, max_size=1))
@example(
from_shapely([
sg.Point([0.25, 0.25]), # on line
sg.Point([1, 1]), # on vertex
sg.Point([1.01, 1.01]) # on ray, just past vertex
]),
from_shapely([sg.LineString([(0, 0), (1, 1), (2, 0)])]),
)
@hyp_settings
def test_points_intersects_line(gp_points, gp_line):
# Get scalar Line
sg_line = gp_line[0]
# Compute expected intersection
expected = gp_points.intersects(sg_line)
# Create spatialpandas objects
line = Line.from_shapely(sg_line)
points = PointArray.from_geopandas(gp_points)
points_series = GeoSeries(points, index=np.arange(10, 10 + len(points)))
# Test Point.intersects
result = np.array([
point_el.intersects(line) for point_el in points
])
np.testing.assert_equal(result, expected)
# Test PointArray.intersect
result = points.intersects(line)
np.testing.assert_equal(result, expected)
# Test PointArray.intersects with inds
inds = np.flipud(np.arange(0, len(points)))
result = points.intersects(line, inds)
np.testing.assert_equal(result, np.flipud(expected))
# Test GeoSeries.intersects
pd.testing.assert_series_equal(
points_series.intersects(line),
pd.Series(expected, index=points_series.index)
)
@given(st_point_array(), st_line_array(min_size=1, max_size=1))
@example(
from_shapely([
sg.Point([0.25, 0.25]), # on line
sg.Point([1, 1]), # on vertex
sg.Point([1.01, 1.01]) # on ray, just past vertex
]),
from_shapely([sg.MultiLineString([
[(1, 0.5), (2, 0)],
[(0, 0), (1, 1)],
])])
)
@hyp_settings
def test_points_intersects_multiline(gp_points, gp_multiline):
# Get scalar MultiLine
sg_multiline = gp_multiline[0]
# Compute expected intersection
expected = gp_points.intersects(sg_multiline)
# Create spatialpandas objects
multiline = MultiLine.from_shapely(sg_multiline)
points = PointArray.from_geopandas(gp_points)
points_series = GeoSeries(points, index=np.arange(10, 10 + len(points)))
# Test Point.intersects
result = np.array([
point_el.intersects(multiline) for point_el in points
])
np.testing.assert_equal(result, expected)
# Test PointArray.intersect
result = points.intersects(multiline)
np.testing.assert_equal(result, expected)
# Test PointArray.intersects with inds
inds = np.flipud(np.arange(0, len(points)))
result = points.intersects(multiline, inds)
np.testing.assert_equal(result, np.flipud(expected))
# Test GeoSeries.intersects
pd.testing.assert_series_equal(
points_series.intersects(multiline),
|
pd.Series(expected, index=points_series.index)
|
pandas.Series
|
import glob
import math
import os
import sys
import warnings
from decimal import Decimal
import numpy as np
import pandas as pd
import pytest
from packaging.version import parse as parse_version
import dask
import dask.dataframe as dd
import dask.multiprocessing
from dask.blockwise import Blockwise, optimize_blockwise
from dask.dataframe._compat import PANDAS_GT_110, PANDAS_GT_121, PANDAS_GT_130
from dask.dataframe.io.parquet.utils import _parse_pandas_metadata
from dask.dataframe.optimize import optimize_dataframe_getitem
from dask.dataframe.utils import assert_eq
from dask.layers import DataFrameIOLayer
from dask.utils import natural_sort_key
from dask.utils_test import hlg_layer
try:
import fastparquet
except ImportError:
fastparquet = False
fastparquet_version = parse_version("0")
else:
fastparquet_version = parse_version(fastparquet.__version__)
try:
import pyarrow as pa
except ImportError:
pa = False
pa_version = parse_version("0")
else:
pa_version = parse_version(pa.__version__)
try:
import pyarrow.parquet as pq
except ImportError:
pq = False
SKIP_FASTPARQUET = not fastparquet
FASTPARQUET_MARK = pytest.mark.skipif(SKIP_FASTPARQUET, reason="fastparquet not found")
if sys.platform == "win32" and pa and pa_version == parse_version("2.0.0"):
SKIP_PYARROW = True
SKIP_PYARROW_REASON = (
"skipping pyarrow 2.0.0 on windows: "
"https://github.com/dask/dask/issues/6093"
"|https://github.com/dask/dask/issues/6754"
)
else:
SKIP_PYARROW = not pq
SKIP_PYARROW_REASON = "pyarrow not found"
PYARROW_MARK = pytest.mark.skipif(SKIP_PYARROW, reason=SKIP_PYARROW_REASON)
# "Legacy" and "Dataset"-specific MARK definitions
SKIP_PYARROW_LE = SKIP_PYARROW
SKIP_PYARROW_LE_REASON = "pyarrow not found"
SKIP_PYARROW_DS = SKIP_PYARROW
SKIP_PYARROW_DS_REASON = "pyarrow not found"
if not SKIP_PYARROW_LE:
# NOTE: We should use PYARROW_LE_MARK to skip
# pyarrow-legacy tests once pyarrow officially
# removes ParquetDataset support in the future.
PYARROW_LE_MARK = pytest.mark.filterwarnings(
"ignore::DeprecationWarning",
"ignore::FutureWarning",
)
else:
PYARROW_LE_MARK = pytest.mark.skipif(SKIP_PYARROW_LE, reason=SKIP_PYARROW_LE_REASON)
PYARROW_DS_MARK = pytest.mark.skipif(SKIP_PYARROW_DS, reason=SKIP_PYARROW_DS_REASON)
ANY_ENGINE_MARK = pytest.mark.skipif(
SKIP_FASTPARQUET and SKIP_PYARROW,
reason="No parquet engine (fastparquet or pyarrow) found",
)
nrows = 40
npartitions = 15
df = pd.DataFrame(
{
"x": [i * 7 % 5 for i in range(nrows)], # Not sorted
"y": [i * 2.5 for i in range(nrows)], # Sorted
},
index=pd.Index([10 * i for i in range(nrows)], name="myindex"),
)
ddf = dd.from_pandas(df, npartitions=npartitions)
@pytest.fixture(
params=[
pytest.param("fastparquet", marks=FASTPARQUET_MARK),
pytest.param("pyarrow-legacy", marks=PYARROW_LE_MARK),
pytest.param("pyarrow-dataset", marks=PYARROW_DS_MARK),
]
)
def engine(request):
return request.param
def write_read_engines(**kwargs):
"""Product of both engines for write/read:
To add custom marks, pass keyword of the form: `mark_writer_reader=reason`,
or `mark_engine=reason` to apply to all parameters with that engine."""
backends = {"pyarrow-dataset", "pyarrow-legacy", "fastparquet"}
# Skip if uninstalled
skip_marks = {
"fastparquet": FASTPARQUET_MARK,
"pyarrow-legacy": PYARROW_LE_MARK,
"pyarrow-dataset": PYARROW_DS_MARK,
}
marks = {(w, r): [skip_marks[w], skip_marks[r]] for w in backends for r in backends}
# Custom marks
for kw, val in kwargs.items():
kind, rest = kw.split("_", 1)
key = tuple(rest.split("_"))
if kind not in ("xfail", "skip") or len(key) > 2 or set(key) - backends:
raise ValueError("unknown keyword %r" % kw)
val = getattr(pytest.mark, kind)(reason=val)
if len(key) == 2:
marks[key].append(val)
else:
for k in marks:
if key in k:
marks[k].append(val)
return pytest.mark.parametrize(
("write_engine", "read_engine"),
[pytest.param(*k, marks=tuple(v)) for (k, v) in sorted(marks.items())],
)
pyarrow_fastparquet_msg = "pyarrow schema and pandas metadata may disagree"
write_read_engines_xfail = write_read_engines(
**{
"xfail_pyarrow-dataset_fastparquet": pyarrow_fastparquet_msg,
"xfail_pyarrow-legacy_fastparquet": pyarrow_fastparquet_msg,
}
)
if (
fastparquet
and fastparquet_version < parse_version("0.5")
and PANDAS_GT_110
and not PANDAS_GT_121
):
# a regression in pandas 1.1.x / 1.2.0 caused a failure in writing partitioned
# categorical columns when using fastparquet 0.4.x, but this was (accidentally)
# fixed in fastparquet 0.5.0
fp_pandas_msg = "pandas with fastparquet engine does not preserve index"
fp_pandas_xfail = write_read_engines(
**{
"xfail_pyarrow-dataset_fastparquet": pyarrow_fastparquet_msg,
"xfail_pyarrow-legacy_fastparquet": pyarrow_fastparquet_msg,
"xfail_fastparquet_fastparquet": fp_pandas_msg,
"xfail_fastparquet_pyarrow-dataset": fp_pandas_msg,
"xfail_fastparquet_pyarrow-legacy": fp_pandas_msg,
}
)
else:
fp_pandas_msg = "pandas with fastparquet engine does not preserve index"
fp_pandas_xfail = write_read_engines()
@PYARROW_MARK
def test_pyarrow_getengine():
from dask.dataframe.io.parquet.arrow import ArrowDatasetEngine
from dask.dataframe.io.parquet.core import get_engine
# Check that the default engine for "pyarrow"/"arrow"
# is the `pyarrow.dataset`-based engine
assert get_engine("pyarrow") == ArrowDatasetEngine
assert get_engine("arrow") == ArrowDatasetEngine
if SKIP_PYARROW_LE:
with pytest.warns(FutureWarning):
get_engine("pyarrow-legacy")
@write_read_engines()
def test_local(tmpdir, write_engine, read_engine):
tmp = str(tmpdir)
data = pd.DataFrame(
{
"i32": np.arange(1000, dtype=np.int32),
"i64": np.arange(1000, dtype=np.int64),
"f": np.arange(1000, dtype=np.float64),
"bhello": np.random.choice(["hello", "yo", "people"], size=1000).astype(
"O"
),
}
)
df = dd.from_pandas(data, chunksize=500)
df.to_parquet(tmp, write_index=False, engine=write_engine)
files = os.listdir(tmp)
assert "_common_metadata" in files
assert "_metadata" in files
assert "part.0.parquet" in files
df2 = dd.read_parquet(tmp, index=False, engine=read_engine)
assert len(df2.divisions) > 1
out = df2.compute(scheduler="sync").reset_index()
for column in df.columns:
assert (data[column] == out[column]).all()
@pytest.mark.parametrize("index", [False, True])
@write_read_engines_xfail
def test_empty(tmpdir, write_engine, read_engine, index):
fn = str(tmpdir)
df = pd.DataFrame({"a": ["a", "b", "b"], "b": [4, 5, 6]})[:0]
if index:
df.set_index("a", inplace=True, drop=True)
ddf = dd.from_pandas(df, npartitions=2)
ddf.to_parquet(fn, write_index=index, engine=write_engine)
read_df = dd.read_parquet(fn, engine=read_engine)
assert_eq(ddf, read_df)
@write_read_engines()
def test_simple(tmpdir, write_engine, read_engine):
fn = str(tmpdir)
if write_engine != "fastparquet":
df = pd.DataFrame({"a": [b"a", b"b", b"b"], "b": [4, 5, 6]})
else:
df = pd.DataFrame({"a": ["a", "b", "b"], "b": [4, 5, 6]})
df.set_index("a", inplace=True, drop=True)
ddf = dd.from_pandas(df, npartitions=2)
ddf.to_parquet(fn, engine=write_engine)
read_df = dd.read_parquet(fn, index=["a"], engine=read_engine)
assert_eq(ddf, read_df)
@write_read_engines()
def test_delayed_no_metadata(tmpdir, write_engine, read_engine):
fn = str(tmpdir)
df = pd.DataFrame({"a": ["a", "b", "b"], "b": [4, 5, 6]})
df.set_index("a", inplace=True, drop=True)
ddf = dd.from_pandas(df, npartitions=2)
ddf.to_parquet(
fn, engine=write_engine, compute=False, write_metadata_file=False
).compute()
files = os.listdir(fn)
assert "_metadata" not in files
# Fastparquet doesn't currently handle a directory without "_metadata"
read_df = dd.read_parquet(
os.path.join(fn, "*.parquet"),
index=["a"],
engine=read_engine,
gather_statistics=True,
)
assert_eq(ddf, read_df)
@write_read_engines()
def test_read_glob(tmpdir, write_engine, read_engine):
tmp_path = str(tmpdir)
ddf.to_parquet(tmp_path, engine=write_engine)
if os.path.exists(os.path.join(tmp_path, "_metadata")):
os.unlink(os.path.join(tmp_path, "_metadata"))
files = os.listdir(tmp_path)
assert "_metadata" not in files
ddf2 = dd.read_parquet(
os.path.join(tmp_path, "*.parquet"),
engine=read_engine,
index="myindex", # Must specify index without _metadata
gather_statistics=True,
)
assert_eq(ddf, ddf2)
@write_read_engines()
def test_gather_statistics_false(tmpdir, write_engine, read_engine):
tmp_path = str(tmpdir)
ddf.to_parquet(tmp_path, write_index=False, engine=write_engine)
ddf2 = dd.read_parquet(
tmp_path,
engine=read_engine,
index=False,
gather_statistics=False,
)
assert_eq(ddf, ddf2, check_index=False, check_divisions=False)
@write_read_engines()
def test_read_list(tmpdir, write_engine, read_engine):
if write_engine == read_engine == "fastparquet" and os.name == "nt":
# fastparquet or dask is not normalizing filepaths correctly on
# windows.
pytest.skip("filepath bug.")
tmpdir = str(tmpdir)
ddf.to_parquet(tmpdir, engine=write_engine)
files = sorted(
(
os.path.join(tmpdir, f)
for f in os.listdir(tmpdir)
if not f.endswith("_metadata")
),
key=natural_sort_key,
)
ddf2 = dd.read_parquet(
files, engine=read_engine, index="myindex", gather_statistics=True
)
assert_eq(ddf, ddf2)
@write_read_engines()
def test_columns_auto_index(tmpdir, write_engine, read_engine):
fn = str(tmpdir)
ddf.to_parquet(fn, engine=write_engine)
# XFAIL, auto index selection no longer supported (for simplicity)
# ### Empty columns ###
# With divisions if supported
assert_eq(dd.read_parquet(fn, columns=[], engine=read_engine), ddf[[]])
# No divisions
assert_eq(
dd.read_parquet(fn, columns=[], engine=read_engine, gather_statistics=False),
ddf[[]].clear_divisions(),
check_divisions=True,
)
# ### Single column, auto select index ###
# With divisions if supported
assert_eq(dd.read_parquet(fn, columns=["x"], engine=read_engine), ddf[["x"]])
# No divisions
assert_eq(
dd.read_parquet(fn, columns=["x"], engine=read_engine, gather_statistics=False),
ddf[["x"]].clear_divisions(),
check_divisions=True,
)
@write_read_engines()
def test_columns_index(tmpdir, write_engine, read_engine):
fn = str(tmpdir)
ddf.to_parquet(fn, engine=write_engine)
# With Index
# ----------
# ### Empty columns, specify index ###
# With divisions if supported
assert_eq(
dd.read_parquet(fn, columns=[], engine=read_engine, index="myindex"), ddf[[]]
)
# No divisions
assert_eq(
dd.read_parquet(
fn, columns=[], engine=read_engine, index="myindex", gather_statistics=False
),
ddf[[]].clear_divisions(),
check_divisions=True,
)
# ### Single column, specify index ###
# With divisions if supported
assert_eq(
dd.read_parquet(fn, index="myindex", columns=["x"], engine=read_engine),
ddf[["x"]],
)
# No divisions
assert_eq(
dd.read_parquet(
fn,
index="myindex",
columns=["x"],
engine=read_engine,
gather_statistics=False,
),
ddf[["x"]].clear_divisions(),
check_divisions=True,
)
# ### Two columns, specify index ###
# With divisions if supported
assert_eq(
dd.read_parquet(fn, index="myindex", columns=["x", "y"], engine=read_engine),
ddf,
)
# No divisions
assert_eq(
dd.read_parquet(
fn,
index="myindex",
columns=["x", "y"],
engine=read_engine,
gather_statistics=False,
),
ddf.clear_divisions(),
check_divisions=True,
)
def test_nonsense_column(tmpdir, engine):
fn = str(tmpdir)
ddf.to_parquet(fn, engine=engine)
with pytest.raises((ValueError, KeyError)):
dd.read_parquet(fn, columns=["nonesense"], engine=engine)
with pytest.raises((Exception, KeyError)):
dd.read_parquet(fn, columns=["nonesense"] + list(ddf.columns), engine=engine)
@write_read_engines()
def test_columns_no_index(tmpdir, write_engine, read_engine):
fn = str(tmpdir)
ddf.to_parquet(fn, engine=write_engine)
ddf2 = ddf.reset_index()
# No Index
# --------
# All columns, none as index
assert_eq(
dd.read_parquet(fn, index=False, engine=read_engine, gather_statistics=True),
ddf2,
check_index=False,
check_divisions=True,
)
# Two columns, none as index
assert_eq(
dd.read_parquet(
fn,
index=False,
columns=["x", "y"],
engine=read_engine,
gather_statistics=True,
),
ddf2[["x", "y"]],
check_index=False,
check_divisions=True,
)
# One column and one index, all as columns
assert_eq(
dd.read_parquet(
fn,
index=False,
columns=["myindex", "x"],
engine=read_engine,
gather_statistics=True,
),
ddf2[["myindex", "x"]],
check_index=False,
check_divisions=True,
)
@write_read_engines()
def test_gather_statistics_no_index(tmpdir, write_engine, read_engine):
fn = str(tmpdir)
ddf.to_parquet(fn, engine=write_engine, write_index=False)
df = dd.read_parquet(fn, engine=read_engine, index=False)
assert df.index.name is None
assert not df.known_divisions
def test_columns_index_with_multi_index(tmpdir, engine):
fn = os.path.join(str(tmpdir), "test.parquet")
index = pd.MultiIndex.from_arrays(
[np.arange(10), np.arange(10) + 1], names=["x0", "x1"]
)
df = pd.DataFrame(np.random.randn(10, 2), columns=["a", "b"], index=index)
df2 = df.reset_index(drop=False)
if engine == "fastparquet":
fastparquet.write(fn, df.reset_index(), write_index=False)
else:
pq.write_table(pa.Table.from_pandas(df.reset_index(), preserve_index=False), fn)
ddf = dd.read_parquet(fn, engine=engine, index=index.names)
assert_eq(ddf, df)
d = dd.read_parquet(fn, columns="a", engine=engine, index=index.names)
assert_eq(d, df["a"])
d = dd.read_parquet(fn, index=["a", "b"], columns=["x0", "x1"], engine=engine)
assert_eq(d, df2.set_index(["a", "b"])[["x0", "x1"]])
# Just index
d = dd.read_parquet(fn, index=False, engine=engine)
assert_eq(d, df2)
d = dd.read_parquet(fn, columns=["b"], index=["a"], engine=engine)
assert_eq(d, df2.set_index("a")[["b"]])
d = dd.read_parquet(fn, columns=["a", "b"], index=["x0"], engine=engine)
assert_eq(d, df2.set_index("x0")[["a", "b"]])
# Just columns
d = dd.read_parquet(fn, columns=["x0", "a"], index=["x1"], engine=engine)
assert_eq(d, df2.set_index("x1")[["x0", "a"]])
# Both index and columns
d = dd.read_parquet(fn, index=False, columns=["x0", "b"], engine=engine)
assert_eq(d, df2[["x0", "b"]])
for index in ["x1", "b"]:
d = dd.read_parquet(fn, index=index, columns=["x0", "a"], engine=engine)
assert_eq(d, df2.set_index(index)[["x0", "a"]])
# Columns and index intersect
for index in ["a", "x0"]:
with pytest.raises(ValueError):
d = dd.read_parquet(fn, index=index, columns=["x0", "a"], engine=engine)
# Series output
for ind, col, sol_df in [
("x1", "x0", df2.set_index("x1")),
(False, "b", df2),
(False, "x0", df2[["x0"]]),
("a", "x0", df2.set_index("a")[["x0"]]),
("a", "b", df2.set_index("a")),
]:
d = dd.read_parquet(fn, index=ind, columns=col, engine=engine)
assert_eq(d, sol_df[col])
@write_read_engines()
def test_no_index(tmpdir, write_engine, read_engine):
fn = str(tmpdir)
df = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})
ddf = dd.from_pandas(df, npartitions=2)
ddf.to_parquet(fn, engine=write_engine)
ddf2 = dd.read_parquet(fn, engine=read_engine)
assert_eq(df, ddf2, check_index=False)
def test_read_series(tmpdir, engine):
fn = str(tmpdir)
ddf.to_parquet(fn, engine=engine)
ddf2 = dd.read_parquet(fn, columns=["x"], index="myindex", engine=engine)
assert_eq(ddf[["x"]], ddf2)
ddf2 = dd.read_parquet(fn, columns="x", index="myindex", engine=engine)
assert_eq(ddf.x, ddf2)
def test_names(tmpdir, engine):
fn = str(tmpdir)
ddf.to_parquet(fn, engine=engine)
def read(fn, **kwargs):
return dd.read_parquet(fn, engine=engine, **kwargs)
assert set(read(fn).dask) == set(read(fn).dask)
assert set(read(fn).dask) != set(read(fn, columns=["x"]).dask)
assert set(read(fn, columns=("x",)).dask) == set(read(fn, columns=["x"]).dask)
@write_read_engines()
def test_roundtrip_from_pandas(tmpdir, write_engine, read_engine):
fn = str(tmpdir.join("test.parquet"))
dfp = df.copy()
dfp.index.name = "index"
dfp.to_parquet(
fn, engine="pyarrow" if write_engine.startswith("pyarrow") else "fastparquet"
)
ddf = dd.read_parquet(fn, index="index", engine=read_engine)
assert_eq(dfp, ddf)
@write_read_engines()
def test_categorical(tmpdir, write_engine, read_engine):
tmp = str(tmpdir)
df = pd.DataFrame({"x": ["a", "b", "c"] * 100}, dtype="category")
ddf = dd.from_pandas(df, npartitions=3)
dd.to_parquet(ddf, tmp, engine=write_engine)
ddf2 = dd.read_parquet(tmp, categories="x", engine=read_engine)
assert ddf2.compute().x.cat.categories.tolist() == ["a", "b", "c"]
ddf2 = dd.read_parquet(tmp, categories=["x"], engine=read_engine)
assert ddf2.compute().x.cat.categories.tolist() == ["a", "b", "c"]
# autocat
if read_engine == "fastparquet":
ddf2 = dd.read_parquet(tmp, engine=read_engine)
assert ddf2.compute().x.cat.categories.tolist() == ["a", "b", "c"]
ddf2.loc[:1000].compute()
assert assert_eq(df, ddf2)
# dereference cats
ddf2 = dd.read_parquet(tmp, categories=[], engine=read_engine)
ddf2.loc[:1000].compute()
assert (df.x == ddf2.x.compute()).all()
def test_append(tmpdir, engine):
"""Test that appended parquet equal to the original one."""
tmp = str(tmpdir)
df = pd.DataFrame(
{
"i32": np.arange(1000, dtype=np.int32),
"i64": np.arange(1000, dtype=np.int64),
"f": np.arange(1000, dtype=np.float64),
"bhello": np.random.choice(["hello", "yo", "people"], size=1000).astype(
"O"
),
}
)
df.index.name = "index"
half = len(df) // 2
ddf1 = dd.from_pandas(df.iloc[:half], chunksize=100)
ddf2 = dd.from_pandas(df.iloc[half:], chunksize=100)
ddf1.to_parquet(tmp, engine=engine)
ddf2.to_parquet(tmp, append=True, engine=engine)
ddf3 = dd.read_parquet(tmp, engine=engine)
assert_eq(df, ddf3)
def test_append_create(tmpdir, engine):
"""Test that appended parquet equal to the original one."""
tmp_path = str(tmpdir)
df = pd.DataFrame(
{
"i32": np.arange(1000, dtype=np.int32),
"i64": np.arange(1000, dtype=np.int64),
"f": np.arange(1000, dtype=np.float64),
"bhello": np.random.choice(["hello", "yo", "people"], size=1000).astype(
"O"
),
}
)
df.index.name = "index"
half = len(df) // 2
ddf1 = dd.from_pandas(df.iloc[:half], chunksize=100)
ddf2 = dd.from_pandas(df.iloc[half:], chunksize=100)
ddf1.to_parquet(tmp_path, append=True, engine=engine)
ddf2.to_parquet(tmp_path, append=True, engine=engine)
ddf3 = dd.read_parquet(tmp_path, engine=engine)
assert_eq(df, ddf3)
def test_append_with_partition(tmpdir, engine):
tmp = str(tmpdir)
df0 = pd.DataFrame(
{
"lat": np.arange(0, 10, dtype="int64"),
"lon": np.arange(10, 20, dtype="int64"),
"value": np.arange(100, 110, dtype="int64"),
}
)
df0.index.name = "index"
df1 = pd.DataFrame(
{
"lat": np.arange(10, 20, dtype="int64"),
"lon": np.arange(10, 20, dtype="int64"),
"value": np.arange(120, 130, dtype="int64"),
}
)
df1.index.name = "index"
# Check that nullable dtypes work
# (see: https://github.com/dask/dask/issues/8373)
df0["lat"] = df0["lat"].astype("Int64")
df1["lat"].iloc[0] = np.nan
df1["lat"] = df1["lat"].astype("Int64")
dd_df0 = dd.from_pandas(df0, npartitions=1)
dd_df1 = dd.from_pandas(df1, npartitions=1)
dd.to_parquet(dd_df0, tmp, partition_on=["lon"], engine=engine)
dd.to_parquet(
dd_df1,
tmp,
partition_on=["lon"],
append=True,
ignore_divisions=True,
engine=engine,
)
out = dd.read_parquet(
tmp, engine=engine, index="index", gather_statistics=True
).compute()
# convert categorical to plain int just to pass assert
out["lon"] = out.lon.astype("int64")
# sort required since partitioning breaks index order
assert_eq(
out.sort_values("value"), pd.concat([df0, df1])[out.columns], check_index=False
)
def test_partition_on_cats(tmpdir, engine):
tmp = str(tmpdir)
d = pd.DataFrame(
{
"a": np.random.rand(50),
"b": np.random.choice(["x", "y", "z"], size=50),
"c": np.random.choice(["x", "y", "z"], size=50),
}
)
d = dd.from_pandas(d, 2)
d.to_parquet(tmp, partition_on=["b"], engine=engine)
df = dd.read_parquet(tmp, engine=engine)
assert set(df.b.cat.categories) == {"x", "y", "z"}
@PYARROW_MARK
@pytest.mark.parametrize("meta", [False, True])
@pytest.mark.parametrize("stats", [False, True])
def test_partition_on_cats_pyarrow(tmpdir, stats, meta):
tmp = str(tmpdir)
d = pd.DataFrame(
{
"a": np.random.rand(50),
"b": np.random.choice(["x", "y", "z"], size=50),
"c": np.random.choice(["x", "y", "z"], size=50),
}
)
d = dd.from_pandas(d, 2)
d.to_parquet(tmp, partition_on=["b"], engine="pyarrow", write_metadata_file=meta)
df = dd.read_parquet(tmp, engine="pyarrow", gather_statistics=stats)
assert set(df.b.cat.categories) == {"x", "y", "z"}
def test_partition_on_cats_2(tmpdir, engine):
tmp = str(tmpdir)
d = pd.DataFrame(
{
"a": np.random.rand(50),
"b": np.random.choice(["x", "y", "z"], size=50),
"c": np.random.choice(["x", "y", "z"], size=50),
}
)
d = dd.from_pandas(d, 2)
d.to_parquet(tmp, partition_on=["b", "c"], engine=engine)
df = dd.read_parquet(tmp, engine=engine)
assert set(df.b.cat.categories) == {"x", "y", "z"}
assert set(df.c.cat.categories) == {"x", "y", "z"}
df = dd.read_parquet(tmp, columns=["a", "c"], engine=engine)
assert set(df.c.cat.categories) == {"x", "y", "z"}
assert "b" not in df.columns
assert_eq(df, df.compute())
df = dd.read_parquet(tmp, index="c", engine=engine)
assert set(df.index.categories) == {"x", "y", "z"}
assert "c" not in df.columns
# series
df = dd.read_parquet(tmp, columns="b", engine=engine)
assert set(df.cat.categories) == {"x", "y", "z"}
def test_append_wo_index(tmpdir, engine):
"""Test append with write_index=False."""
tmp = str(tmpdir.join("tmp1.parquet"))
df = pd.DataFrame(
{
"i32": np.arange(1000, dtype=np.int32),
"i64": np.arange(1000, dtype=np.int64),
"f": np.arange(1000, dtype=np.float64),
"bhello": np.random.choice(["hello", "yo", "people"], size=1000).astype(
"O"
),
}
)
half = len(df) // 2
ddf1 = dd.from_pandas(df.iloc[:half], chunksize=100)
ddf2 = dd.from_pandas(df.iloc[half:], chunksize=100)
ddf1.to_parquet(tmp, engine=engine)
with pytest.raises(ValueError) as excinfo:
ddf2.to_parquet(tmp, write_index=False, append=True, engine=engine)
assert "Appended columns" in str(excinfo.value)
tmp = str(tmpdir.join("tmp2.parquet"))
ddf1.to_parquet(tmp, write_index=False, engine=engine)
ddf2.to_parquet(tmp, write_index=False, append=True, engine=engine)
ddf3 = dd.read_parquet(tmp, index="f", engine=engine)
assert_eq(df.set_index("f"), ddf3)
def test_append_overlapping_divisions(tmpdir, engine):
"""Test raising of error when divisions overlapping."""
tmp = str(tmpdir)
df = pd.DataFrame(
{
"i32": np.arange(1000, dtype=np.int32),
"i64": np.arange(1000, dtype=np.int64),
"f": np.arange(1000, dtype=np.float64),
"bhello": np.random.choice(["hello", "yo", "people"], size=1000).astype(
"O"
),
}
)
half = len(df) // 2
ddf1 = dd.from_pandas(df.iloc[:half], chunksize=100)
ddf2 = dd.from_pandas(df.iloc[half - 10 :], chunksize=100)
ddf1.to_parquet(tmp, engine=engine)
with pytest.raises(ValueError) as excinfo:
ddf2.to_parquet(tmp, engine=engine, append=True)
assert "Appended divisions" in str(excinfo.value)
ddf2.to_parquet(tmp, engine=engine, append=True, ignore_divisions=True)
def test_append_different_columns(tmpdir, engine):
"""Test raising of error when non equal columns."""
tmp = str(tmpdir)
df1 = pd.DataFrame({"i32": np.arange(100, dtype=np.int32)})
df2 = pd.DataFrame({"i64": np.arange(100, dtype=np.int64)})
df3 = pd.DataFrame({"i32": np.arange(100, dtype=np.int64)})
ddf1 = dd.from_pandas(df1, chunksize=2)
ddf2 = dd.from_pandas(df2, chunksize=2)
ddf3 = dd.from_pandas(df3, chunksize=2)
ddf1.to_parquet(tmp, engine=engine)
with pytest.raises(ValueError) as excinfo:
ddf2.to_parquet(tmp, engine=engine, append=True)
assert "Appended columns" in str(excinfo.value)
with pytest.raises(ValueError) as excinfo:
ddf3.to_parquet(tmp, engine=engine, append=True)
assert "Appended dtypes" in str(excinfo.value)
def test_append_dict_column(tmpdir, engine):
# See: https://github.com/dask/dask/issues/7492
if engine == "fastparquet":
pytest.xfail("Fastparquet engine is missing dict-column support")
elif pa_version < parse_version("1.0.1"):
pytest.skip("PyArrow 1.0.1+ required for dict-column support.")
tmp = str(tmpdir)
dts = pd.date_range("2020-01-01", "2021-01-01")
df = pd.DataFrame(
{"value": [{"x": x} for x in range(len(dts))]},
index=dts,
)
ddf1 = dd.from_pandas(df, npartitions=1)
# Write ddf1 to tmp, and then append it again
ddf1.to_parquet(tmp, append=True, engine=engine)
ddf1.to_parquet(tmp, append=True, engine=engine, ignore_divisions=True)
# Read back all data (ddf1 + ddf1)
ddf2 = dd.read_parquet(tmp, engine=engine)
# Check computed result
expect = pd.concat([df, df])
result = ddf2.compute()
assert_eq(expect, result)
@write_read_engines_xfail
def test_ordering(tmpdir, write_engine, read_engine):
tmp = str(tmpdir)
df = pd.DataFrame(
{"a": [1, 2, 3], "b": [10, 20, 30], "c": [100, 200, 300]},
index=pd.Index([-1, -2, -3], name="myindex"),
columns=["c", "a", "b"],
)
ddf = dd.from_pandas(df, npartitions=2)
dd.to_parquet(ddf, tmp, engine=write_engine)
if read_engine == "fastparquet":
pf = fastparquet.ParquetFile(tmp)
assert pf.columns == ["myindex", "c", "a", "b"]
ddf2 = dd.read_parquet(tmp, index="myindex", engine=read_engine)
assert_eq(ddf, ddf2, check_divisions=False)
def test_read_parquet_custom_columns(tmpdir, engine):
tmp = str(tmpdir)
data = pd.DataFrame(
{"i32": np.arange(1000, dtype=np.int32), "f": np.arange(1000, dtype=np.float64)}
)
df = dd.from_pandas(data, chunksize=50)
df.to_parquet(tmp, engine=engine)
df2 = dd.read_parquet(tmp, columns=["i32", "f"], engine=engine)
assert_eq(df[["i32", "f"]], df2, check_index=False)
fns = glob.glob(os.path.join(tmp, "*.parquet"))
df2 = dd.read_parquet(fns, columns=["i32"], engine=engine).compute()
df2.sort_values("i32", inplace=True)
assert_eq(df[["i32"]], df2, check_index=False, check_divisions=False)
df3 = dd.read_parquet(tmp, columns=["f", "i32"], engine=engine)
assert_eq(df[["f", "i32"]], df3, check_index=False)
@pytest.mark.parametrize(
"df,write_kwargs,read_kwargs",
[
(pd.DataFrame({"x": [3, 2, 1]}), {}, {}),
(pd.DataFrame({"x": ["c", "a", "b"]}), {}, {}),
(pd.DataFrame({"x": ["cc", "a", "bbb"]}), {}, {}),
(pd.DataFrame({"x": [b"a", b"b", b"c"]}), {"object_encoding": "bytes"}, {}),
(
pd.DataFrame({"x": pd.Categorical(["a", "b", "a"])}),
{},
{"categories": ["x"]},
),
(pd.DataFrame({"x": pd.Categorical([1, 2, 1])}), {}, {"categories": ["x"]}),
(pd.DataFrame({"x": list(map(pd.Timestamp, [3000, 2000, 1000]))}), {}, {}),
(pd.DataFrame({"x": [3000, 2000, 1000]}).astype("M8[ns]"), {}, {}),
pytest.param(
pd.DataFrame({"x": [3, 2, 1]}).astype("M8[ns]"),
{},
{},
),
(pd.DataFrame({"x": [3, 2, 1]}).astype("M8[us]"), {}, {}),
(pd.DataFrame({"x": [3, 2, 1]}).astype("M8[ms]"), {}, {}),
(pd.DataFrame({"x": [3000, 2000, 1000]}).astype("datetime64[ns]"), {}, {}),
(pd.DataFrame({"x": [3000, 2000, 1000]}).astype("datetime64[ns, UTC]"), {}, {}),
(pd.DataFrame({"x": [3000, 2000, 1000]}).astype("datetime64[ns, CET]"), {}, {}),
(pd.DataFrame({"x": [3, 2, 1]}).astype("uint16"), {}, {}),
(pd.DataFrame({"x": [3, 2, 1]}).astype("float32"), {}, {}),
(pd.DataFrame({"x": [3, 1, 2]}, index=[3, 2, 1]), {}, {}),
(pd.DataFrame({"x": [3, 1, 5]}, index=pd.Index([1, 2, 3], name="foo")), {}, {}),
(pd.DataFrame({"x": [1, 2, 3], "y": [3, 2, 1]}), {}, {}),
(pd.DataFrame({"x": [1, 2, 3], "y": [3, 2, 1]}, columns=["y", "x"]), {}, {}),
(pd.DataFrame({"0": [3, 2, 1]}), {}, {}),
(pd.DataFrame({"x": [3, 2, None]}), {}, {}),
(pd.DataFrame({"-": [3.0, 2.0, None]}), {}, {}),
(pd.DataFrame({".": [3.0, 2.0, None]}), {}, {}),
(pd.DataFrame({" ": [3.0, 2.0, None]}), {}, {}),
],
)
def test_roundtrip(tmpdir, df, write_kwargs, read_kwargs, engine):
if "x" in df and df.x.dtype == "M8[ns]" and "arrow" in engine:
pytest.xfail(reason="Parquet pyarrow v1 doesn't support nanosecond precision")
if (
"x" in df
and df.x.dtype == "M8[ns]"
and engine == "fastparquet"
and fastparquet_version <= parse_version("0.6.3")
):
pytest.xfail(reason="fastparquet doesn't support nanosecond precision yet")
if (
PANDAS_GT_130
and read_kwargs.get("categories", None)
and engine == "fastparquet"
and fastparquet_version <= parse_version("0.6.3")
):
pytest.xfail("https://github.com/dask/fastparquet/issues/577")
tmp = str(tmpdir)
if df.index.name is None:
df.index.name = "index"
ddf = dd.from_pandas(df, npartitions=2)
oe = write_kwargs.pop("object_encoding", None)
if oe and engine == "fastparquet":
dd.to_parquet(ddf, tmp, engine=engine, object_encoding=oe, **write_kwargs)
else:
dd.to_parquet(ddf, tmp, engine=engine, **write_kwargs)
ddf2 = dd.read_parquet(tmp, index=df.index.name, engine=engine, **read_kwargs)
if str(ddf2.dtypes.get("x")) == "UInt16" and engine == "fastparquet":
# fastparquet choooses to use masked type to be able to get true repr of
# 16-bit int
assert_eq(ddf.astype("UInt16"), ddf2)
else:
assert_eq(ddf, ddf2)
def test_categories(tmpdir, engine):
fn = str(tmpdir)
df = pd.DataFrame({"x": [1, 2, 3, 4, 5], "y": list("caaab")})
ddf = dd.from_pandas(df, npartitions=2)
ddf["y"] = ddf.y.astype("category")
ddf.to_parquet(fn, engine=engine)
ddf2 = dd.read_parquet(fn, categories=["y"], engine=engine)
# Shouldn't need to specify categories explicitly
ddf3 = dd.read_parquet(fn, engine=engine)
assert_eq(ddf3, ddf2)
with pytest.raises(NotImplementedError):
ddf2.y.cat.categories
assert set(ddf2.y.compute().cat.categories) == {"a", "b", "c"}
cats_set = ddf2.map_partitions(lambda x: x.y.cat.categories.sort_values()).compute()
assert cats_set.tolist() == ["a", "c", "a", "b"]
if engine == "fastparquet":
assert_eq(ddf.y, ddf2.y, check_names=False)
with pytest.raises(TypeError):
# attempt to load as category that which is not so encoded
ddf2 = dd.read_parquet(fn, categories=["x"], engine=engine).compute()
with pytest.raises((ValueError, FutureWarning)):
# attempt to load as category unknown column
ddf2 = dd.read_parquet(fn, categories=["foo"], engine=engine)
def test_categories_unnamed_index(tmpdir, engine):
# Check that we can handle an unnamed categorical index
# https://github.com/dask/dask/issues/6885
tmpdir = str(tmpdir)
df = pd.DataFrame(
data={"A": [1, 2, 3], "B": ["a", "a", "b"]}, index=["x", "y", "y"]
)
ddf = dd.from_pandas(df, npartitions=1)
ddf = ddf.categorize(columns=["B"])
ddf.to_parquet(tmpdir, engine=engine)
ddf2 = dd.read_parquet(tmpdir, engine=engine)
assert_eq(ddf.index, ddf2.index, check_divisions=False)
def test_empty_partition(tmpdir, engine):
fn = str(tmpdir)
df = pd.DataFrame({"a": range(10), "b": range(10)})
ddf = dd.from_pandas(df, npartitions=5)
ddf2 = ddf[ddf.a <= 5]
ddf2.to_parquet(fn, engine=engine)
ddf3 = dd.read_parquet(fn, engine=engine)
assert ddf3.npartitions < 5
sol = ddf2.compute()
assert_eq(sol, ddf3, check_names=False, check_index=False)
def test_timestamp_index(tmpdir, engine):
fn = str(tmpdir)
df = dd._compat.makeTimeDataFrame()
df.index.name = "foo"
ddf = dd.from_pandas(df, npartitions=5)
ddf.to_parquet(fn, engine=engine)
ddf2 = dd.read_parquet(fn, engine=engine)
assert_eq(ddf, ddf2)
@FASTPARQUET_MARK
@PYARROW_MARK
def test_to_parquet_default_writes_nulls(tmpdir):
fn = str(tmpdir.join("test.parquet"))
df = pd.DataFrame({"c1": [1.0, np.nan, 2, np.nan, 3]})
ddf = dd.from_pandas(df, npartitions=1)
ddf.to_parquet(fn)
table = pq.read_table(fn)
assert table[1].null_count == 2
@PYARROW_LE_MARK
def test_to_parquet_pyarrow_w_inconsistent_schema_by_partition_fails_by_default(tmpdir):
df = pd.DataFrame(
{"partition_column": [0, 0, 1, 1], "strings": ["a", "b", None, None]}
)
ddf = dd.from_pandas(df, npartitions=2)
# In order to allow pyarrow to write an inconsistent schema,
# we need to avoid writing the _metadata file (will fail >0.17.1)
# and need to avoid schema inference (i.e. use `schema=None`)
ddf.to_parquet(
str(tmpdir),
engine="pyarrow",
partition_on=["partition_column"],
write_metadata_file=False,
schema=None,
)
# Test that schema is not validated by default
# (shouldn't raise error with legacy dataset)
dd.read_parquet(
str(tmpdir),
engine="pyarrow-legacy",
gather_statistics=False,
).compute()
# Test that read fails when validate_schema=True
# Note: This fails differently for pyarrow.dataset api
with pytest.raises(ValueError) as e_info:
dd.read_parquet(
str(tmpdir),
engine="pyarrow-legacy",
gather_statistics=False,
dataset={"validate_schema": True},
).compute()
assert e_info.message.contains("ValueError: Schema in partition")
assert e_info.message.contains("was different")
@PYARROW_MARK
def test_to_parquet_pyarrow_w_inconsistent_schema_by_partition_succeeds_w_manual_schema(
tmpdir,
):
# Data types to test: strings, arrays, ints, timezone aware timestamps
in_arrays = [[0, 1, 2], [3, 4], np.nan, np.nan]
out_arrays = [[0, 1, 2], [3, 4], None, None]
in_strings = ["a", "b", np.nan, np.nan]
out_strings = ["a", "b", None, None]
tstamp = pd.Timestamp(1513393355, unit="s")
in_tstamps = [tstamp, tstamp, pd.NaT, pd.NaT]
out_tstamps = [
# Timestamps come out in numpy.datetime64 format
tstamp.to_datetime64(),
tstamp.to_datetime64(),
np.datetime64("NaT"),
np.datetime64("NaT"),
]
timezone = "US/Eastern"
tz_tstamp = pd.Timestamp(1513393355, unit="s", tz=timezone)
in_tz_tstamps = [tz_tstamp, tz_tstamp, pd.NaT, pd.NaT]
out_tz_tstamps = [
# Timezones do not make it through a write-read cycle.
tz_tstamp.tz_convert(None).to_datetime64(),
tz_tstamp.tz_convert(None).to_datetime64(),
np.datetime64("NaT"),
np.datetime64("NaT"),
]
df = pd.DataFrame(
{
"partition_column": [0, 0, 1, 1],
"arrays": in_arrays,
"strings": in_strings,
"tstamps": in_tstamps,
"tz_tstamps": in_tz_tstamps,
}
)
ddf = dd.from_pandas(df, npartitions=2)
schema = pa.schema(
[
("arrays", pa.list_(pa.int64())),
("strings", pa.string()),
("tstamps", pa.timestamp("ns")),
("tz_tstamps", pa.timestamp("ns", timezone)),
("partition_column", pa.int64()),
]
)
ddf.to_parquet(
str(tmpdir), engine="pyarrow", partition_on="partition_column", schema=schema
)
ddf_after_write = (
dd.read_parquet(str(tmpdir), engine="pyarrow", gather_statistics=False)
.compute()
.reset_index(drop=True)
)
# Check array support
arrays_after_write = ddf_after_write.arrays.values
for i in range(len(df)):
assert np.array_equal(arrays_after_write[i], out_arrays[i]), type(out_arrays[i])
# Check datetime support
tstamps_after_write = ddf_after_write.tstamps.values
for i in range(len(df)):
# Need to test NaT separately
if np.isnat(tstamps_after_write[i]):
assert np.isnat(out_tstamps[i])
else:
assert tstamps_after_write[i] == out_tstamps[i]
# Check timezone aware datetime support
tz_tstamps_after_write = ddf_after_write.tz_tstamps.values
for i in range(len(df)):
# Need to test NaT separately
if np.isnat(tz_tstamps_after_write[i]):
assert np.isnat(out_tz_tstamps[i])
else:
assert tz_tstamps_after_write[i] == out_tz_tstamps[i]
# Check string support
assert np.array_equal(ddf_after_write.strings.values, out_strings)
# Check partition column
assert np.array_equal(ddf_after_write.partition_column, df.partition_column)
@PYARROW_MARK
@pytest.mark.parametrize("index", [False, True])
@pytest.mark.parametrize("schema", ["infer", "complex"])
def test_pyarrow_schema_inference(tmpdir, index, engine, schema):
if schema == "complex":
schema = {"index": pa.string(), "amount": pa.int64()}
tmpdir = str(tmpdir)
df = pd.DataFrame(
{
"index": ["1", "2", "3", "2", "3", "1", "4"],
"date": pd.to_datetime(
[
"2017-01-01",
"2017-01-01",
"2017-01-01",
"2017-01-02",
"2017-01-02",
"2017-01-06",
"2017-01-09",
]
),
"amount": [100, 200, 300, 400, 500, 600, 700],
},
index=range(7, 14),
)
if index:
df = dd.from_pandas(df, npartitions=2).set_index("index")
else:
df = dd.from_pandas(df, npartitions=2)
df.to_parquet(tmpdir, engine="pyarrow", schema=schema)
df_out = dd.read_parquet(tmpdir, engine=engine)
df_out.compute()
if index and engine == "fastparquet":
# Fastparquet fails to detect int64 from _metadata
df_out["amount"] = df_out["amount"].astype("int64")
# Fastparquet not handling divisions for
# pyarrow-written dataset with string index
assert_eq(df, df_out, check_divisions=False)
else:
assert_eq(df, df_out)
def test_partition_on(tmpdir, engine):
tmpdir = str(tmpdir)
df = pd.DataFrame(
{
"a1": np.random.choice(["A", "B", "C"], size=100),
"a2": np.random.choice(["X", "Y", "Z"], size=100),
"b": np.random.random(size=100),
"c": np.random.randint(1, 5, size=100),
"d": np.arange(0, 100),
}
)
d = dd.from_pandas(df, npartitions=2)
d.to_parquet(tmpdir, partition_on=["a1", "a2"], engine=engine)
# Note #1: Cross-engine functionality is missing
# Note #2: The index is not preserved in pyarrow when partition_on is used
out = dd.read_parquet(
tmpdir, engine=engine, index=False, gather_statistics=False
).compute()
for val in df.a1.unique():
assert set(df.d[df.a1 == val]) == set(out.d[out.a1 == val])
# Now specify the columns and allow auto-index detection
out = dd.read_parquet(tmpdir, engine=engine, columns=["d", "a2"]).compute()
for val in df.a2.unique():
assert set(df.d[df.a2 == val]) == set(out.d[out.a2 == val])
def test_partition_on_duplicates(tmpdir, engine):
# https://github.com/dask/dask/issues/6445
tmpdir = str(tmpdir)
df = pd.DataFrame(
{
"a1": np.random.choice(["A", "B", "C"], size=100),
"a2": np.random.choice(["X", "Y", "Z"], size=100),
"data": np.random.random(size=100),
}
)
d = dd.from_pandas(df, npartitions=2)
for _ in range(2):
d.to_parquet(tmpdir, partition_on=["a1", "a2"], engine=engine)
out = dd.read_parquet(tmpdir, engine=engine).compute()
assert len(df) == len(out)
for root, dirs, files in os.walk(tmpdir):
for file in files:
assert file in (
"part.0.parquet",
"part.1.parquet",
"_common_metadata",
"_metadata",
)
@PYARROW_MARK
@pytest.mark.parametrize("partition_on", ["aa", ["aa"]])
def test_partition_on_string(tmpdir, partition_on):
tmpdir = str(tmpdir)
with dask.config.set(scheduler="single-threaded"):
tmpdir = str(tmpdir)
df = pd.DataFrame(
{
"aa": np.random.choice(["A", "B", "C"], size=100),
"bb": np.random.random(size=100),
"cc": np.random.randint(1, 5, size=100),
}
)
d = dd.from_pandas(df, npartitions=2)
d.to_parquet(
tmpdir, partition_on=partition_on, write_index=False, engine="pyarrow"
)
out = dd.read_parquet(
tmpdir, index=False, gather_statistics=False, engine="pyarrow"
)
out = out.compute()
for val in df.aa.unique():
assert set(df.bb[df.aa == val]) == set(out.bb[out.aa == val])
@write_read_engines()
def test_filters_categorical(tmpdir, write_engine, read_engine):
tmpdir = str(tmpdir)
cats = ["2018-01-01", "2018-01-02", "2018-01-03", "2018-01-04"]
dftest = pd.DataFrame(
{
"dummy": [1, 1, 1, 1],
"DatePart": pd.Categorical(cats, categories=cats, ordered=True),
}
)
ddftest = dd.from_pandas(dftest, npartitions=4).set_index("dummy")
ddftest.to_parquet(tmpdir, partition_on="DatePart", engine=write_engine)
ddftest_read = dd.read_parquet(
tmpdir,
index="dummy",
engine=read_engine,
filters=[(("DatePart", "<=", "2018-01-02"))],
)
assert len(ddftest_read) == 2
@write_read_engines()
def test_filters(tmpdir, write_engine, read_engine):
tmp_path = str(tmpdir)
df = pd.DataFrame({"x": range(10), "y": list("aabbccddee")})
ddf = dd.from_pandas(df, npartitions=5)
assert ddf.npartitions == 5
ddf.to_parquet(tmp_path, engine=write_engine)
a = dd.read_parquet(tmp_path, engine=read_engine, filters=[("x", ">", 4)])
assert a.npartitions == 3
assert (a.x > 3).all().compute()
b = dd.read_parquet(tmp_path, engine=read_engine, filters=[("y", "==", "c")])
assert b.npartitions == 1
assert (b.y == "c").all().compute()
c = dd.read_parquet(
tmp_path, engine=read_engine, filters=[("y", "==", "c"), ("x", ">", 6)]
)
assert c.npartitions <= 1
assert not len(c)
assert_eq(c, c)
d = dd.read_parquet(
tmp_path,
engine=read_engine,
filters=[
# Select two overlapping ranges
[("x", ">", 1), ("x", "<", 6)],
[("x", ">", 3), ("x", "<", 8)],
],
)
assert d.npartitions == 3
assert ((d.x > 1) & (d.x < 8)).all().compute()
e = dd.read_parquet(tmp_path, engine=read_engine, filters=[("x", "in", (0, 9))])
assert e.npartitions == 2
assert ((e.x < 2) | (e.x > 7)).all().compute()
f = dd.read_parquet(tmp_path, engine=read_engine, filters=[("y", "=", "c")])
assert f.npartitions == 1
assert len(f)
assert (f.y == "c").all().compute()
@write_read_engines()
def test_filters_v0(tmpdir, write_engine, read_engine):
if write_engine == "fastparquet" or read_engine == "fastparquet":
pytest.importorskip("fastparquet", minversion="0.3.1")
# Recent versions of pyarrow support full row-wise filtering
# (fastparquet and older pyarrow versions do not)
pyarrow_row_filtering = read_engine == "pyarrow-dataset"
fn = str(tmpdir)
df = pd.DataFrame({"at": ["ab", "aa", "ba", "da", "bb"]})
ddf = dd.from_pandas(df, npartitions=1)
# Ok with 1 partition and filters
ddf.repartition(npartitions=1, force=True).to_parquet(
fn, write_index=False, engine=write_engine
)
ddf2 = dd.read_parquet(
fn, index=False, engine=read_engine, filters=[("at", "==", "aa")]
).compute()
ddf3 = dd.read_parquet(
fn, index=False, engine=read_engine, filters=[("at", "=", "aa")]
).compute()
if pyarrow_row_filtering:
assert_eq(ddf2, ddf[ddf["at"] == "aa"], check_index=False)
assert_eq(ddf3, ddf[ddf["at"] == "aa"], check_index=False)
else:
assert_eq(ddf2, ddf)
assert_eq(ddf3, ddf)
# with >1 partition and no filters
ddf.repartition(npartitions=2, force=True).to_parquet(fn, engine=write_engine)
ddf2 = dd.read_parquet(fn, engine=read_engine).compute()
assert_eq(ddf2, ddf)
# with >1 partition and filters using base fastparquet
if read_engine == "fastparquet":
ddf.repartition(npartitions=2, force=True).to_parquet(fn, engine=write_engine)
df2 = fastparquet.ParquetFile(fn).to_pandas(filters=[("at", "==", "aa")])
df3 = fastparquet.ParquetFile(fn).to_pandas(filters=[("at", "=", "aa")])
assert len(df2) > 0
assert len(df3) > 0
# with >1 partition and filters
ddf.repartition(npartitions=2, force=True).to_parquet(fn, engine=write_engine)
ddf2 = dd.read_parquet(
fn, engine=read_engine, filters=[("at", "==", "aa")]
).compute()
ddf3 = dd.read_parquet(
fn, engine=read_engine, filters=[("at", "=", "aa")]
).compute()
assert len(ddf2) > 0
assert len(ddf3) > 0
assert_eq(ddf2, ddf3)
def test_filtering_pyarrow_dataset(tmpdir, engine):
pytest.importorskip("pyarrow", minversion="1.0.0")
fn = str(tmpdir)
df = pd.DataFrame({"aa": range(100), "bb": ["cat", "dog"] * 50})
ddf = dd.from_pandas(df, npartitions=10)
ddf.to_parquet(fn, write_index=False, engine=engine)
# Filtered read
aa_lim = 40
bb_val = "dog"
filters = [[("aa", "<", aa_lim), ("bb", "==", bb_val)]]
ddf2 = dd.read_parquet(fn, index=False, engine="pyarrow-dataset", filters=filters)
# Check that partitions are filetered for "aa" filter
nonempty = 0
for part in ddf[ddf["aa"] < aa_lim].partitions:
nonempty += int(len(part.compute()) > 0)
assert ddf2.npartitions == nonempty
# Check that rows are filtered for "aa" and "bb" filters
df = df[df["aa"] < aa_lim]
df = df[df["bb"] == bb_val]
assert_eq(df, ddf2.compute(), check_index=False)
def test_fiters_file_list(tmpdir, engine):
df = pd.DataFrame({"x": range(10), "y": list("aabbccddee")})
ddf = dd.from_pandas(df, npartitions=5)
ddf.to_parquet(str(tmpdir), engine=engine)
fils = str(tmpdir.join("*.parquet"))
ddf_out = dd.read_parquet(
fils, gather_statistics=True, engine=engine, filters=[("x", ">", 3)]
)
assert ddf_out.npartitions == 3
assert_eq(df[df["x"] > 3], ddf_out.compute(), check_index=False)
# Check that first parition gets filtered for single-path input
ddf2 = dd.read_parquet(
str(tmpdir.join("part.0.parquet")),
gather_statistics=True,
engine=engine,
filters=[("x", ">", 3)],
)
assert len(ddf2) == 0
def test_pyarrow_filter_divisions(tmpdir):
pytest.importorskip("pyarrow")
# Write simple dataset with an index that will only
# have a sorted index if certain row-groups are filtered out.
# In this case, we filter "a" <= 3 to get a sorted
# index. Otherwise, "a" is NOT monotonically increasing.
df = pd.DataFrame({"a": [0, 1, 10, 12, 2, 3, 8, 9], "b": range(8)}).set_index("a")
df.iloc[:4].to_parquet(
str(tmpdir.join("file.0.parquet")), engine="pyarrow", row_group_size=2
)
df.iloc[4:].to_parquet(
str(tmpdir.join("file.1.parquet")), engine="pyarrow", row_group_size=2
)
# Only works for ArrowDatasetEngine.
# Legacy code will not apply filters on individual row-groups
# when `split_row_groups=False`.
ddf = dd.read_parquet(
str(tmpdir),
engine="pyarrow-dataset",
split_row_groups=False,
gather_statistics=True,
filters=[("a", "<=", 3)],
)
assert ddf.divisions == (0, 2, 3)
ddf = dd.read_parquet(
str(tmpdir),
engine="pyarrow-dataset",
split_row_groups=True,
gather_statistics=True,
filters=[("a", "<=", 3)],
)
assert ddf.divisions == (0, 2, 3)
def test_divisions_read_with_filters(tmpdir):
pytest.importorskip("fastparquet", minversion="0.3.1")
tmpdir = str(tmpdir)
# generate dataframe
size = 100
categoricals = []
for value in ["a", "b", "c", "d"]:
categoricals += [value] * int(size / 4)
df = pd.DataFrame(
{
"a": categoricals,
"b": np.random.random(size=size),
"c": np.random.randint(1, 5, size=size),
}
)
d = dd.from_pandas(df, npartitions=4)
# save it
d.to_parquet(tmpdir, write_index=True, partition_on=["a"], engine="fastparquet")
# read it
out = dd.read_parquet(tmpdir, engine="fastparquet", filters=[("a", "==", "b")])
# test it
expected_divisions = (25, 49)
assert out.divisions == expected_divisions
def test_divisions_are_known_read_with_filters(tmpdir):
pytest.importorskip("fastparquet", minversion="0.3.1")
tmpdir = str(tmpdir)
# generate dataframe
df = pd.DataFrame(
{
"unique": [0, 0, 1, 1, 2, 2, 3, 3],
"id": ["id1", "id2", "id1", "id2", "id1", "id2", "id1", "id2"],
},
index=[0, 0, 1, 1, 2, 2, 3, 3],
)
d = dd.from_pandas(df, npartitions=2)
# save it
d.to_parquet(tmpdir, partition_on=["id"], engine="fastparquet")
# read it
out = dd.read_parquet(tmpdir, engine="fastparquet", filters=[("id", "==", "id1")])
# test it
assert out.known_divisions
expected_divisions = (0, 2, 3)
assert out.divisions == expected_divisions
@FASTPARQUET_MARK
@pytest.mark.xfail(reason="No longer accept ParquetFile objects")
def test_read_from_fastparquet_parquetfile(tmpdir):
fn = str(tmpdir)
df = pd.DataFrame(
{
"a": np.random.choice(["A", "B", "C"], size=100),
"b": np.random.random(size=100),
"c": np.random.randint(1, 5, size=100),
}
)
d = dd.from_pandas(df, npartitions=2)
d.to_parquet(fn, partition_on=["a"], engine="fastparquet")
pq_f = fastparquet.ParquetFile(fn)
# OK with no filters
out = dd.read_parquet(pq_f).compute()
for val in df.a.unique():
assert set(df.b[df.a == val]) == set(out.b[out.a == val])
# OK with filters
out = dd.read_parquet(pq_f, filters=[("a", "==", "B")]).compute()
assert set(df.b[df.a == "B"]) == set(out.b)
# Engine should not be set to 'pyarrow'
with pytest.raises(AssertionError):
out = dd.read_parquet(pq_f, engine="pyarrow")
@pytest.mark.parametrize("scheduler", ["threads", "processes"])
def test_to_parquet_lazy(tmpdir, scheduler, engine):
tmpdir = str(tmpdir)
df = pd.DataFrame({"a": [1, 2, 3, 4], "b": [1.0, 2.0, 3.0, 4.0]})
df.index.name = "index"
ddf = dd.from_pandas(df, npartitions=2)
value = ddf.to_parquet(tmpdir, compute=False, engine=engine)
assert hasattr(value, "dask")
value.compute(scheduler=scheduler)
assert os.path.exists(tmpdir)
ddf2 = dd.read_parquet(tmpdir, engine=engine)
assert_eq(ddf, ddf2)
@FASTPARQUET_MARK
def test_timestamp96(tmpdir):
fn = str(tmpdir)
df = pd.DataFrame({"a": [pd.to_datetime("now", utc=True)]})
ddf = dd.from_pandas(df, 1)
ddf.to_parquet(fn, write_index=False, times="int96")
pf = fastparquet.ParquetFile(fn)
assert pf._schema[1].type == fastparquet.parquet_thrift.Type.INT96
out = dd.read_parquet(fn, index=False).compute()
assert_eq(out, df)
@FASTPARQUET_MARK
def test_drill_scheme(tmpdir):
fn = str(tmpdir)
N = 5
df1 = pd.DataFrame({c: np.random.random(N) for i, c in enumerate(["a", "b", "c"])})
df2 = pd.DataFrame({c: np.random.random(N) for i, c in enumerate(["a", "b", "c"])})
files = []
for d in ["test_data1", "test_data2"]:
dn = os.path.join(fn, d)
if not os.path.exists(dn):
os.mkdir(dn)
files.append(os.path.join(dn, "data1.parq"))
fastparquet.write(files[0], df1)
fastparquet.write(files[1], df2)
df = dd.read_parquet(files)
assert "dir0" in df.columns
out = df.compute()
assert "dir0" in out
assert (np.unique(out.dir0) == ["test_data1", "test_data2"]).all()
def test_parquet_select_cats(tmpdir, engine):
fn = str(tmpdir)
df = pd.DataFrame(
{
"categories": pd.Series(
np.random.choice(["a", "b", "c", "d", "e", "f"], size=100),
dtype="category",
),
"ints": pd.Series(list(range(0, 100)), dtype="int"),
"floats": pd.Series(list(range(0, 100)), dtype="float"),
}
)
ddf = dd.from_pandas(df, 1)
ddf.to_parquet(fn, engine=engine)
rddf = dd.read_parquet(fn, columns=["ints"], engine=engine)
assert list(rddf.columns) == ["ints"]
rddf = dd.read_parquet(fn, engine=engine)
assert list(rddf.columns) == list(df)
def test_columns_name(tmpdir, engine):
if engine == "fastparquet" and fastparquet_version <= parse_version("0.3.1"):
pytest.skip("Fastparquet does not write column_indexes up to 0.3.1")
tmp_path = str(tmpdir)
df = pd.DataFrame({"A": [1, 2]}, index=pd.Index(["a", "b"], name="idx"))
df.columns.name = "cols"
ddf = dd.from_pandas(df, 2)
ddf.to_parquet(tmp_path, engine=engine)
result = dd.read_parquet(tmp_path, engine=engine, index=["idx"])
assert_eq(result, df)
def check_compression(engine, filename, compression):
if engine == "fastparquet":
pf = fastparquet.ParquetFile(filename)
md = pf.fmd.row_groups[0].columns[0].meta_data
if compression is None:
assert md.total_compressed_size == md.total_uncompressed_size
else:
assert md.total_compressed_size != md.total_uncompressed_size
else:
metadata = pa.parquet.ParquetDataset(filename).metadata
names = metadata.schema.names
for i in range(metadata.num_row_groups):
row_group = metadata.row_group(i)
for j in range(len(names)):
column = row_group.column(j)
if compression is None:
assert (
column.total_compressed_size == column.total_uncompressed_size
)
else:
compress_expect = compression
if compression == "default":
compress_expect = "snappy"
assert compress_expect.lower() == column.compression.lower()
assert (
column.total_compressed_size != column.total_uncompressed_size
)
@pytest.mark.parametrize("compression,", ["default", None, "gzip", "snappy"])
def test_writing_parquet_with_compression(tmpdir, compression, engine):
fn = str(tmpdir)
if compression in ["snappy", "default"]:
pytest.importorskip("snappy")
df = pd.DataFrame({"x": ["a", "b", "c"] * 10, "y": [1, 2, 3] * 10})
df.index.name = "index"
ddf = dd.from_pandas(df, npartitions=3)
ddf.to_parquet(fn, compression=compression, engine=engine)
out = dd.read_parquet(fn, engine=engine)
assert_eq(out, ddf)
check_compression(engine, fn, compression)
@pytest.mark.parametrize("compression,", ["default", None, "gzip", "snappy"])
def test_writing_parquet_with_partition_on_and_compression(tmpdir, compression, engine):
fn = str(tmpdir)
if compression in ["snappy", "default"]:
pytest.importorskip("snappy")
df = pd.DataFrame({"x": ["a", "b", "c"] * 10, "y": [1, 2, 3] * 10})
df.index.name = "index"
ddf = dd.from_pandas(df, npartitions=3)
ddf.to_parquet(fn, compression=compression, engine=engine, partition_on=["x"])
check_compression(engine, fn, compression)
@pytest.fixture(
params=[
# fastparquet 0.1.3
{
"columns": [
{
"metadata": None,
"name": "idx",
"numpy_type": "int64",
"pandas_type": "int64",
},
{
"metadata": None,
"name": "A",
"numpy_type": "int64",
"pandas_type": "int64",
},
],
"index_columns": ["idx"],
"pandas_version": "0.21.0",
},
# pyarrow 0.7.1
{
"columns": [
{
"metadata": None,
"name": "A",
"numpy_type": "int64",
"pandas_type": "int64",
},
{
"metadata": None,
"name": "idx",
"numpy_type": "int64",
"pandas_type": "int64",
},
],
"index_columns": ["idx"],
"pandas_version": "0.21.0",
},
# pyarrow 0.8.0
{
"column_indexes": [
{
"field_name": None,
"metadata": {"encoding": "UTF-8"},
"name": None,
"numpy_type": "object",
"pandas_type": "unicode",
}
],
"columns": [
{
"field_name": "A",
"metadata": None,
"name": "A",
"numpy_type": "int64",
"pandas_type": "int64",
},
{
"field_name": "__index_level_0__",
"metadata": None,
"name": "idx",
"numpy_type": "int64",
"pandas_type": "int64",
},
],
"index_columns": ["__index_level_0__"],
"pandas_version": "0.21.0",
},
# TODO: fastparquet update
]
)
def pandas_metadata(request):
return request.param
def test_parse_pandas_metadata(pandas_metadata):
index_names, column_names, mapping, column_index_names = _parse_pandas_metadata(
pandas_metadata
)
assert index_names == ["idx"]
assert column_names == ["A"]
assert column_index_names == [None]
# for new pyarrow
if pandas_metadata["index_columns"] == ["__index_level_0__"]:
assert mapping == {"__index_level_0__": "idx", "A": "A"}
else:
assert mapping == {"idx": "idx", "A": "A"}
assert isinstance(mapping, dict)
def test_parse_pandas_metadata_null_index():
# pyarrow 0.7.1 None for index
e_index_names = [None]
e_column_names = ["x"]
e_mapping = {"__index_level_0__": None, "x": "x"}
e_column_index_names = [None]
md = {
"columns": [
{
"metadata": None,
"name": "x",
"numpy_type": "int64",
"pandas_type": "int64",
},
{
"metadata": None,
"name": "__index_level_0__",
"numpy_type": "int64",
"pandas_type": "int64",
},
],
"index_columns": ["__index_level_0__"],
"pandas_version": "0.21.0",
}
index_names, column_names, mapping, column_index_names = _parse_pandas_metadata(md)
assert index_names == e_index_names
assert column_names == e_column_names
assert mapping == e_mapping
assert column_index_names == e_column_index_names
# pyarrow 0.8.0 None for index
md = {
"column_indexes": [
{
"field_name": None,
"metadata": {"encoding": "UTF-8"},
"name": None,
"numpy_type": "object",
"pandas_type": "unicode",
}
],
"columns": [
{
"field_name": "x",
"metadata": None,
"name": "x",
"numpy_type": "int64",
"pandas_type": "int64",
},
{
"field_name": "__index_level_0__",
"metadata": None,
"name": None,
"numpy_type": "int64",
"pandas_type": "int64",
},
],
"index_columns": ["__index_level_0__"],
"pandas_version": "0.21.0",
}
index_names, column_names, mapping, column_index_names = _parse_pandas_metadata(md)
assert index_names == e_index_names
assert column_names == e_column_names
assert mapping == e_mapping
assert column_index_names == e_column_index_names
@PYARROW_MARK
def test_read_no_metadata(tmpdir, engine):
# use pyarrow.parquet to create a parquet file without
# pandas metadata
tmp = str(tmpdir) + "table.parq"
table = pa.Table.from_arrays(
[pa.array([1, 2, 3]), pa.array([3, 4, 5])], names=["A", "B"]
)
pq.write_table(table, tmp)
result = dd.read_parquet(tmp, engine=engine)
expected = pd.DataFrame({"A": [1, 2, 3], "B": [3, 4, 5]})
assert_eq(result, expected)
def test_parse_pandas_metadata_duplicate_index_columns():
md = {
"column_indexes": [
{
"field_name": None,
"metadata": {"encoding": "UTF-8"},
"name": None,
"numpy_type": "object",
"pandas_type": "unicode",
}
],
"columns": [
{
"field_name": "A",
"metadata": None,
"name": "A",
"numpy_type": "int64",
"pandas_type": "int64",
},
{
"field_name": "__index_level_0__",
"metadata": None,
"name": "A",
"numpy_type": "object",
"pandas_type": "unicode",
},
],
"index_columns": ["__index_level_0__"],
"pandas_version": "0.21.0",
}
(
index_names,
column_names,
storage_name_mapping,
column_index_names,
) = _parse_pandas_metadata(md)
assert index_names == ["A"]
assert column_names == ["A"]
assert storage_name_mapping == {"__index_level_0__": "A", "A": "A"}
assert column_index_names == [None]
def test_parse_pandas_metadata_column_with_index_name():
md = {
"column_indexes": [
{
"field_name": None,
"metadata": {"encoding": "UTF-8"},
"name": None,
"numpy_type": "object",
"pandas_type": "unicode",
}
],
"columns": [
{
"field_name": "A",
"metadata": None,
"name": "A",
"numpy_type": "int64",
"pandas_type": "int64",
},
{
"field_name": "__index_level_0__",
"metadata": None,
"name": "A",
"numpy_type": "object",
"pandas_type": "unicode",
},
],
"index_columns": ["__index_level_0__"],
"pandas_version": "0.21.0",
}
(
index_names,
column_names,
storage_name_mapping,
column_index_names,
) = _parse_pandas_metadata(md)
assert index_names == ["A"]
assert column_names == ["A"]
assert storage_name_mapping == {"__index_level_0__": "A", "A": "A"}
assert column_index_names == [None]
def test_writing_parquet_with_kwargs(tmpdir, engine):
fn = str(tmpdir)
path1 = os.path.join(fn, "normal")
path2 = os.path.join(fn, "partitioned")
pytest.importorskip("snappy")
df = pd.DataFrame(
{
"a": np.random.choice(["A", "B", "C"], size=100),
"b": np.random.random(size=100),
"c": np.random.randint(1, 5, size=100),
}
)
df.index.name = "index"
ddf = dd.from_pandas(df, npartitions=3)
engine_kwargs = {
"pyarrow-dataset": {
"compression": "snappy",
"coerce_timestamps": None,
"use_dictionary": True,
},
"fastparquet": {"compression": "snappy", "times": "int64", "fixed_text": None},
}
engine_kwargs["pyarrow-legacy"] = engine_kwargs["pyarrow-dataset"]
ddf.to_parquet(path1, engine=engine, **engine_kwargs[engine])
out = dd.read_parquet(path1, engine=engine)
assert_eq(out, ddf, check_index=(engine != "fastparquet"))
# Avoid race condition in pyarrow 0.8.0 on writing partitioned datasets
with dask.config.set(scheduler="sync"):
ddf.to_parquet(
path2, engine=engine, partition_on=["a"], **engine_kwargs[engine]
)
out = dd.read_parquet(path2, engine=engine).compute()
for val in df.a.unique():
assert set(df.b[df.a == val]) == set(out.b[out.a == val])
def test_writing_parquet_with_unknown_kwargs(tmpdir, engine):
fn = str(tmpdir)
with pytest.raises(TypeError):
ddf.to_parquet(fn, engine=engine, unknown_key="unknown_value")
@ANY_ENGINE_MARK
def test_to_parquet_with_get(tmpdir):
from dask.multiprocessing import get as mp_get
tmpdir = str(tmpdir)
flag = [False]
def my_get(*args, **kwargs):
flag[0] = True
return mp_get(*args, **kwargs)
df = pd.DataFrame({"x": ["a", "b", "c", "d"], "y": [1, 2, 3, 4]})
ddf = dd.from_pandas(df, npartitions=2)
ddf.to_parquet(tmpdir, compute_kwargs={"scheduler": my_get})
assert flag[0]
result = dd.read_parquet(os.path.join(tmpdir, "*"))
assert_eq(result, df, check_index=False)
def test_select_partitioned_column(tmpdir, engine):
pytest.importorskip("snappy")
fn = str(tmpdir)
size = 20
d = {
"signal1": np.random.normal(0, 0.3, size=size).cumsum() + 50,
"fake_categorical1": np.random.choice(["A", "B", "C"], size=size),
"fake_categorical2": np.random.choice(["D", "E", "F"], size=size),
}
df = dd.from_pandas(
|
pd.DataFrame(d)
|
pandas.DataFrame
|
import itertools
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
datasets = [
'BPI11/f1/',
'BPI11/f2/',
'BPI11/f3/',
'BPI11/f4/',
'BPI15/f1/',
'BPI15/f2/',
'BPI15/f3/',
'Drift1/f1/',
'Drift2/f1/'
]
split_sizes = [
'0-40_80-100',
'0-60_80-100',
'0-80_80-100',
'40-60_80-100',
'60-80_80-100',
'40-55_80-100',
'0-55_80-100'
]
splits = {
'BPI11/f1/': {
'0-40_80-100': 138,
'0-80_80-100': 139,
'40-80_80-100': 140,
},
'BPI11/f2/': {
'0-40_80-100': 141,
'0-80_80-100': 142,
'40-80_80-100': 143,
},
'BPI11/f3/': {
'0-40_80-100': 144,
'0-80_80-100': 145,
'40-80_80-100': 146,
},
'BPI11/f4/': {
'0-40_80-100': 147,
'0-80_80-100': 148,
'40-80_80-100': 149,
},
'BPI15/f1/': {
'0-40_80-100': 150,
'0-80_80-100': 151,
'40-80_80-100': 152,
},
'BPI15/f2/': {
'0-40_80-100': 153,
'0-80_80-100': 154,
'40-80_80-100': 155,
},
'BPI15/f3/': {
'0-40_80-100': 156,
'0-80_80-100': 157,
'40-80_80-100': 158,
},
'Drift1/f1/': {
'0-40_80-100': 159,
'0-80_80-100': 160,
'40-80_80-100': 161,
'40-60_80-100': 1111,
'0-60_80-100': 1111,
'40-55_80-100': 1111,
'0-55_80-100': 1111
},
'Drift2/f1/': {
'0-40_80-100': 162,
'0-80_80-100': 163,
'40-80_80-100': 164,
'40-60_80-100': 1111,
'0-60_80-100': 1111,
'40-55_80-100': 1111,
'0-55_80-100': 1111
}
}
# }
# splits = {
# 'BPI11/f1/': {
# '0-40_80-100': 101,
# '0-80_80-100': 102,
# '40-80_80-100': 103,
# },
# 'BPI11/f2/': {
# '0-40_80-100': 104,
# '0-80_80-100': 105,
# '40-80_80-100': 106,
# },
# 'BPI11/f3/': {
# '0-40_80-100': 107,
# '0-80_80-100': 108,
# '40-80_80-100': 109,
# },
# 'BPI11/f4/': {
# '0-40_80-100': 110,
# '0-80_80-100': 111,
# '40-80_80-100': 112,
# },
# 'BPI15/f1/': {
# '0-40_80-100': 113,
# '0-80_80-100': 114,
# '40-80_80-100': 115,
# },
# 'BPI15/f2/': {
# '0-40_80-100': 116,
# '0-80_80-100': 117,
# '40-80_80-100': 118,
# },
# 'BPI15/f3/': {
# '0-40_80-100': 119,
# '0-80_80-100': 120,
# '40-80_80-100': 121,
# },
# 'Drift1/f1/': {
# '0-40_80-100': 122,
# '0-80_80-100': 123,
# '40-80_80-100': 124,
#
# '40-60_80-100': 1111,
# '0-60_80-100': 1111,
# '40-55_80-100': 1111,
# '0-55_80-100': 1111
# },
# 'Drift2/f1/': {
# '0-40_80-100': 125,
# '0-80_80-100': 126,
# '40-80_80-100': 127,
#
# '40-60_80-100': 1111,
# '0-60_80-100': 1111,
# '40-55_80-100': 1111,
# '0-55_80-100': 1111
# }
# }
# splits = {
# 'BPI11/f1/': {
# '0-40_80-100': 55,
# '0-80_80-100': 56,
# '40-80_80-100': 73,
# },
# 'BPI11/f2/': {
# '0-40_80-100': 57,
# '0-80_80-100': 58,
# '40-80_80-100': 74,
# },
# 'BPI11/f3/': {
# '0-40_80-100': 59,
# '0-80_80-100': 60,
# '40-80_80-100': 75,
# },
# 'BPI11/f4/': {
# '0-40_80-100': 61,
# '0-80_80-100': 62,
# '40-80_80-100': 76,
# },
# 'BPI15/f1/': {
# '0-40_80-100': 63,
# '0-80_80-100': 64,
# '40-80_80-100': 77,
# },
# 'BPI15/f2/': {
# '0-40_80-100': 65,
# '0-80_80-100': 66,
# '40-80_80-100': 78,
# },
# 'BPI15/f3/': {
# '0-40_80-100': 67,
# '0-80_80-100': 68,
# '40-80_80-100': 79,
# },
# 'Drift1/f1/': {
# '0-40_80-100': 69,
# '0-80_80-100': 70,
# '40-80_80-100': 80,
#
# '40-60_80-100': 1111,
# '0-60_80-100': 1111,
# '40-55_80-100': 1111,
# '0-55_80-100': 1111
# },
# 'Drift2/f1/': {
# '0-40_80-100': 90,#71,
# '0-80_80-100': 91,#72,
# '40-80_80-100': 92,#81,
#
# '40-60_80-100': 1111,
# '0-60_80-100': 1111,
# '40-55_80-100': 1111,
# '0-55_80-100': 1111
# }
# }
def get_row_metrics(table):
curr_row = table['evaluation_f1_score']
f1_score_mean, f1_score_std, f1_score_max = curr_row.mean(), curr_row.std(), curr_row.max()
curr_row = table['evaluation_accuracy']
accuracy_mean, accuracy_std, accuracy_max = curr_row.mean(), curr_row.std(), curr_row.max()
curr_row = table['evaluation_precision']
precision_mean, precision_std, precision_max = curr_row.mean(), curr_row.std(), curr_row.max()
curr_row = table['evaluation_recall']
recall_mean, recall_std, recall_max = curr_row.mean(), curr_row.std(), curr_row.max()
curr_row = table['evaluation_auc']
auc_mean, auc_std, auc_max = curr_row.mean(), curr_row.std(), curr_row.max()
curr_row = pd.to_timedelta(table['evaluation_elapsed_time'])
elapsed_time_mean, elapsed_time_std, elapsed_time_max, elapsed_time_min = curr_row.mean(), curr_row.std(), curr_row.max(), curr_row.min()
return f1_score_mean, f1_score_std, f1_score_max, \
accuracy_mean, accuracy_std, accuracy_max, \
precision_mean, precision_std, precision_max, \
recall_mean, recall_std, recall_max, \
auc_mean, auc_std, auc_max, \
elapsed_time_mean, elapsed_time_std, elapsed_time_max, elapsed_time_min
def quantitative_scores(experiments_df_path='../DUMP_INCREMENTAL_hyper_last_train.csv', where_save='quantitative_scores.csv'):
|
pd.set_option("display.precision", 4)
|
pandas.set_option
|
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.keys import Keys
import requests
import time
from datetime import datetime
import pandas as pd
from urllib import parse
from config import ENV_VARIABLE
from os.path import getsize
fold_path = "./crawler_data/"
page_Max = 100
def stripID(url, wantStrip):
loc = url.find(wantStrip)
length = len(wantStrip)
return url[loc+length:]
def Kklee():
shop_id = 13
name = 'kklee'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.kklee.co/products?page=" + \
str(p) + "&sort_by=&order_by=&limit=24"
#
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
except:
break
time.sleep(1)
i = 1
while(i < 25):
try:
title = chrome.find_element_by_xpath(
"//a[%i]/div[@class='Product-info']/div[1]" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//div[@class='col-xs-12 ProductList-list']/a[%i]" % (i,)).get_attribute('href')
make_id = parse.urlsplit(page_link)
page_id = make_id.path
page_id = page_id.lstrip("/products/")
find_href = chrome.find_element_by_xpath(
"//a[%i]/div[1]/div[1]" % (i,))
bg_url = find_href.value_of_css_property('background-image')
pic_link = bg_url.lstrip('url("').rstrip(')"')
except:
i += 1
if(i == 25):
p += 1
continue
try:
sale_price = chrome.find_element_by_xpath(
"//a[%i]/div[@class='Product-info']/div[2]" % (i,)).text
sale_price = sale_price.strip('NT$')
sale_price = sale_price.split()
sale_price = sale_price[0]
ori_price = chrome.find_element_by_xpath(
"//a[%i]/div[@class='Product-info']/div[3]" % (i,)).text
ori_price = ori_price.strip('NT$')
except:
try:
sale_price = chrome.find_element_by_xpath(
"//a[%i]/div[@class='Product-info']/div[2]" % (i,)).text
sale_price = sale_price.strip('NT$')
sale_price = sale_price.split()
sale_price = sale_price[0]
ori_price = ""
except:
i += 1
if(i == 25):
p += 1
continue
i += 1
if(i == 25):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Wishbykorea():
shop_id = 14
name = 'wishbykorea'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if(close == 1):
chrome.quit()
break
url = "https://www.wishbykorea.com/collection-727&pgno=" + str(p)
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
print(url)
except:
break
time.sleep(1)
i = 1
while(i < 17):
try:
title = chrome.find_element_by_xpath(
"//div[@class='collection_item'][%i]/div/div/label" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//div[@class='collection_item'][%i]/a[@href]" % (i,)).get_attribute('href')
page_id = page_link.replace("https://www.wishbykorea.com/collection-view-", "").replace("&ca=727", "")
find_href = chrome.find_element_by_xpath(
"//div[@class='collection_item'][%i]/a/div" % (i,))
bg_url = find_href.value_of_css_property('background-image')
pic_link = bg_url.lstrip('url("').rstrip('")')
except:
i += 1
if(i == 17):
p += 1
continue
try:
sale_price = chrome.find_element_by_xpath(
"//div[@class='collection_item'][%i]/div[@class='collection_item_info']/div[2]/label" % (i,)).text
sale_price = sale_price.strip('NT$')
ori_price = ""
except:
try:
sale_price = chrome.find_element_by_xpath(
"//div[@class='collection_item'][%i]/div[@class='collection_item_info']/div[2]" % (i,)).text
sale_price = sale_price.strip('NT$')
ori_price = ""
except:
i += 1
if(i == 17):
p += 1
continue
if(sale_price == "0"):
i += 1
if(i == 17):
p += 1
continue
i += 1
if(i == 17):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Aspeed():
shop_id = 15
name = 'aspeed'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if(close == 1):
chrome.quit()
break
url = "https://www.aspeed.co/products?page=" + \
str(p) + "&sort_by=&order_by=&limit=72"
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
except:
break
time.sleep(1)
i = 1
while(i < 73):
try:
title = chrome.find_element_by_xpath(
"//div[@class='product-item'][%i]/product-item/a/div[2]/div/div[1]" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//div[@class='product-item'][%i]/product-item/a[@href]" % (i,)).get_attribute('href')
make_id = parse.urlsplit(page_link)
page_id = make_id.path
page_id = page_id.lstrip("/products/")
find_href = chrome.find_element_by_xpath(
"//div[@class='product-item'][%i]/product-item/a/div[1]/div[1]" % (i,))
bg_url = find_href.value_of_css_property('background-image')
pic_link = bg_url.lstrip('url("').rstrip(')"')
except:
i += 1
if(i == 73):
p += 1
continue
try:
sale_price = chrome.find_element_by_xpath(
"//div[@class='product-item'][%i]/product-item/a/div[2]/div/div[2]/div[1]" % (i,)).text
sale_price = sale_price.strip('NT$')
ori_price = chrome.find_element_by_xpath(
"//div[@class='product-item'][%i]/product-item/a/div[2]/div/div[2]/div[2]" % (i,)).text
ori_price = ori_price.strip('NT$')
except:
try:
sale_price = chrome.find_element_by_xpath(
"//div[@class='product-item'][%i]/product-item/a/div[2]/div/div[2]/div[1]" % (i,)).text
sale_price = sale_price.strip('NT$')
ori_price = ""
except:
i += 1
if(i == 73):
p += 1
continue
i += 1
if(i == 73):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Openlady():
shop_id = 17
name = 'openlady'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.openlady.tw/item.html?&id=157172&page=" + \
str(p)
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
except:
break
time.sleep(1)
i = 1
while(i < 17):
try:
title = chrome.find_element_by_xpath(
"//li[@class='item_block item_block_y'][%i]/div[@class='item_text']/p[@class='item_name']/a[@class='mymy_item_link']" % (i,)).text
page_link = chrome.find_element_by_xpath(
"//li[@class='item_block item_block_y'][%i]/div[@class='item_text']/p[@class='item_name']/a[@href]" % (i,)).get_attribute('href')
make_id = parse.urlsplit(page_link)
page_id = make_id.query
page_id = page_id.replace("&id=", "")
except:
close += 1
break
try:
pic_link = chrome.find_element_by_xpath(
"//li[@class='item_block item_block_y'][%i]/div[@class='item_img']/a[@class='mymy_item_link']/img[@src]" % (i,)).get_attribute("src")
except:
i += 1
if(i == 17):
p += 1
continue
try:
sale_price = chrome.find_element_by_xpath(
"//li[@class='item_block item_block_y'][%i]/div[@class='item_text']/p[@class='item_amount']/span[2]" % (i,)).text
sale_price = sale_price.strip('NT$ ')
ori_price = chrome.find_element_by_xpath(
"//li[@class='item_block item_block_y'][%i]/div[@class='item_text']/p[@class='item_amount']/span[1]" % (i,)).text
ori_price = ori_price.strip('NT$ ')
except:
try:
sale_price = chrome.find_element_by_xpath(
"//li[@class='item_block item_block_y'][%i]/div[@class='item_text']/p[@class='item_amount']/span[1]" % (i,)).text
sale_price = sale_price.strip('NT$ ')
ori_price = ""
except:
i += 1
if(i == 17):
p += 1
continue
i += 1
if(i == 17):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Azoom():
shop_id = 20
name = 'azoom'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if(close == 1):
chrome.quit()
break
url = "https://www.aroom1988.com/categories/view-all?page=" + \
str(p) + "&sort_by=&order_by=&limit=24"
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
except:
break
time.sleep(1)
i = 1
while(i < 24):
try:
title = chrome.find_element_by_xpath(
"//div[@class='product-item'][%i]/product-item/a/div[2]/div/div[1]" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//div[@class='product-item'][%i]/product-item/a[@href]" % (i,)).get_attribute('href')
make_id = parse.urlsplit(page_link)
page_id = make_id.path
page_id = page_id.strip("/products/")
find_href = chrome.find_element_by_xpath(
"//div[@class='product-item'][%i]/product-item/a/div[1]/div[1]" % (i,))
bg_url = find_href.value_of_css_property('background-image')
pic_link = bg_url.lstrip('url("').rstrip('")')
except:
i += 1
if(i == 24):
p += 1
continue
try:
sale_price = chrome.find_element_by_xpath(
"//div[@class='product-item'][%i]/product-item/a/div[2]/div/div/div" % (i,)).text
sale_price = sale_price.strip('NT$')
ori_price = ""
except:
i += 1
if(i == 24):
p += 1
continue
i += 1
if(i == 24):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Roxy():
shop_id = 21
name = 'roxy'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.roxytaiwan.com.tw/new-collection?p=" + \
str(p)
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
except:
break
time.sleep(1)
i = 1
while(i < 65):
try:
title = chrome.find_element_by_xpath(
"//div[@class='product-container product-thumb'][%i]/div[@class='product-thumb-info']/p[@class='product-title']/a" % (i,)).text
page_link = chrome.find_element_by_xpath(
"//div[@class='product-container product-thumb'][%i]/div[@class='product-thumb-info']/p[@class='product-title']/a[@href]" % (i,)).get_attribute('href')
page_id = stripID(page_link, "default=")
except:
close += 1
break
try:
pic_link = chrome.find_element_by_xpath(
"//div[@class='product-container product-thumb'][%i]/div[@class='product-img']/a[@class='img-link']/picture[@class='main-picture']/img[@data-src]" % (i,)).get_attribute("data-src")
except:
i += 1
if(i == 65):
p += 1
continue
try:
sale_price = chrome.find_element_by_xpath(
"//div[@class='product-container product-thumb'][%i]//span[@class='special-price']//span[@class='price-dollars']" % (i,)).text
sale_price = sale_price.replace('TWD', "")
ori_price = chrome.find_element_by_xpath(
"//div[@class='product-container product-thumb'][%i]//span[@class='old-price']//span[@class='price-dollars']" % (i,)).text
ori_price = ori_price.replace('TWD', "")
except:
try:
sale_price = chrome.find_element_by_xpath(
"//div[@class='product-container product-thumb'][%i]//span[@class='price-dollars']" % (i,)).text
sale_price = sale_price.replace('TWD', "")
ori_price = ""
except:
i += 1
if(i == 65):
p += 1
continue
i += 1
if(i == 65):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Shaxi():
shop_id = 22
name = 'shaxi'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.shaxi.tw/products?page=" + str(p)
try:
chrome.get(url)
except:
break
i = 1
while(i < 49):
try:
title = chrome.find_element_by_xpath(
"//li[%i]/product-item/a/div[2]/div/div[1]" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//li[%i]/product-item/a[@href]" % (i,)).get_attribute('href')
make_id = parse.urlsplit(page_link)
page_id = make_id.path
page_id = page_id.lstrip("/products/")
find_href = chrome.find_element_by_xpath(
"//li[%i]/product-item/a/div[1]/div" % (i,))
bg_url = find_href.value_of_css_property('background-image')
pic_link = bg_url.lstrip('url("').rstrip(')"')
except:
i += 1
if(i == 49):
p += 1
continue
try:
sale_price = chrome.find_element_by_xpath(
"//li[%i]/product-item/a/div/div/div[2]/div[2]" % (i,)).text
sale_price = sale_price.strip('NT$')
sale_price = sale_price.split()
sale_price = sale_price[0]
ori_price = chrome.find_element_by_xpath(
"//li[%i]/product-item/a/div/div/div[2]/div[1]" % (i,)).text
ori_price = ori_price.strip('NT$')
except:
try:
sale_price = chrome.find_element_by_xpath(
"//li[%i]/product-item/a/div/div/div[2]/div[1]" % (i,)).text
sale_price = sale_price.strip('NT$')
sale_price = sale_price.split()
sale_price = sale_price[0]
ori_price = ""
except:
i += 1
if(i == 49):
p += 1
continue
i += 1
if(i == 49):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Cici():
shop_id = 23
name = 'cici'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.cici2.tw/products?page=" + str(p)
try:
chrome.get(url)
except:
break
i = 1
while(i < 49):
try:
title = chrome.find_element_by_xpath(
"//li[%i]/product-item/a/div[2]/div/div[1]" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//li[%i]/product-item/a[@href]" % (i,)).get_attribute('href')
make_id = parse.urlsplit(page_link)
page_id = make_id.path
page_id = page_id.lstrip("/products/")
find_href = chrome.find_element_by_xpath(
"//li[%i]/product-item/a/div[1]/div" % (i,))
bg_url = find_href.value_of_css_property('background-image')
pic_link = bg_url.lstrip('url("').rstrip(')"')
except:
i += 1
if(i == 49):
p += 1
continue
try:
sale_price = chrome.find_element_by_xpath(
"//li[%i]/product-item/a/div/div/div[2]/div[2]" % (i,)).text
sale_price = sale_price.strip('NT$')
sale_price = sale_price.split()
sale_price = sale_price[0]
ori_price = chrome.find_element_by_xpath(
"//li[%i]/product-item/a/div/div/div[2]/div[1]" % (i,)).text
ori_price = ori_price.strip('NT$')
except:
try:
sale_price = chrome.find_element_by_xpath(
"//li[%i]/product-item/a/div/div/div[2]/div[1]" % (i,)).text
sale_price = sale_price.strip('NT$')
sale_price = sale_price.split()
sale_price = sale_price[0]
ori_price = ""
except:
i += 1
if(i == 49):
p += 1
continue
i += 1
if(i == 49):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Amesoeur():
shop_id = 25
name = 'amesour'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.amesoeur.co/categories/%E5%85%A8%E9%83%A8%E5%95%86%E5%93%81?page=" + \
str(p)
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
except:
break
time.sleep(1)
i = 1
while(i < 25):
try:
title = chrome.find_element_by_xpath(
"//li[%i]/a/div[2]/div/div[1]" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//div[2]/ul/li[%i]/a[@href]" % (i,)).get_attribute('href')
page_id = chrome.find_element_by_xpath(
"//div[2]/ul/li[%i]/a[@href]" % (i,)).get_attribute('product-id')
find_href = chrome.find_element_by_xpath(
"//li[%i]/a/div[1]/div" % (i,))
bg_url = find_href.value_of_css_property('background-image')
pic_link = bg_url.lstrip('url("').rstrip(')"')
except:
i += 1
if(i == 25):
p += 1
continue
try:
sale_price = chrome.find_element_by_xpath(
"//li[%i]/a/div[2]/div/div[3]" % (i,)).text
sale_price = sale_price.strip('NT$')
sale_price = sale_price.split()
sale_price = sale_price[0]
ori_price = chrome.find_element_by_xpath(
"//li[%i]/a/div[2]/div/div[2]" % (i,)).text
ori_price = ori_price.strip('NT$')
except:
try:
sale_price = chrome.find_element_by_xpath(
"//li[%i]/a/div[2]/div/div[2]" % (i,)).text
sale_price = sale_price.strip('NT$')
sale_price = sale_price.split()
sale_price = sale_price[0]
ori_price = ""
except:
i += 1
if(i == 25):
p += 1
continue
i += 1
if(i == 25):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Singular():
shop_id = 27
name = 'singular'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
i = 1
offset = (p-1) * 50
url = "https://www.singular-official.com/products?limit=50&offset=" + \
str(offset) + "&price=0%2C10000&sort=createdAt-desc"
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
except:
break
time.sleep(1)
while(i < 51):
try:
title = chrome.find_element_by_xpath(
"//div[@class='rm<PASSWORD>1ca3'][%i]/div[2]" % (i,)).text
except:
close += 1
# print(i, "title")
break
try:
page_link = chrome.find_element_by_xpath(
"//div[@class='rmq-3ab81ca3'][%i]//a[@href]" % (i,)).get_attribute('href')
make_id = parse.urlsplit(page_link)
page_id = make_id.path
page_id = page_id.lstrip("/product/")
pic_link = chrome.find_element_by_xpath(
"//div[@class='rm<PASSWORD>1ca3'][%i]//img" % (i,)).get_attribute('src')
sale_price = chrome.find_element_by_xpath(
"//div[@class='rmq-3ab81ca3'][%i]/div[3]/div[2]" % (i,)).text
sale_price = sale_price.strip('NT$ ')
ori_price = chrome.find_element_by_xpath(
"//div[@class='rm<PASSWORD>3'][%i]/div[3]/div[1]/span/s" % (i,)).text
ori_price = ori_price.strip('NT$ ')
ori_price = ori_price.split()
ori_price = ori_price[0]
except:
i += 1
if(i == 51):
p += 1
continue
i += 1
if(i == 51):
p += 1
chrome.find_element_by_tag_name('body').send_keys(Keys.PAGE_DOWN)
time.sleep(1)
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Folie():
shop_id = 28
name = 'folie'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.folief.com/products?page=" + \
str(p) + "&sort_by=&order_by=&limit=24"
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
except:
break
time.sleep(1)
i = 1
while(i < 25):
try:
title = chrome.find_element_by_xpath(
"//div[%i]/product-item/a/div[2]/div/div[1]" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//div[%i]/product-item/a[@href]" % (i,)).get_attribute('href')
make_id = parse.urlsplit(page_link)
page_id = make_id.path
page_id = page_id.lstrip("/products/")
find_href = chrome.find_element_by_xpath(
"//div[%i]/product-item/a/div[1]/div[1]" % (i,))
bg_url = find_href.value_of_css_property('background-image')
pic_link = bg_url.lstrip('url("').rstrip(')"')
except:
i += 1
if(i == 25):
p += 1
continue
try:
sale_price = chrome.find_element_by_xpath(
"//div[%i]/product-item/a/div/div/div[2]/div[1]" % (i,)).text
sale_price = sale_price.strip('NT$')
sale_price = sale_price.split()
sale_price = sale_price[0]
ori_price = chrome.find_element_by_xpath(
"//div[%i]/product-item/a/div/div/div[2]/div[2]" % (i,)).text
ori_price = ori_price.strip('NT$')
except:
try:
sale_price = chrome.find_element_by_xpath(
"//div[%i]/product-item/a/div/div/div[2]/div[1]" % (i,)).text
sale_price = sale_price.strip('NT$')
sale_price = sale_price.split()
sale_price = sale_price[0]
ori_price = ""
except:
i += 1
if(i == 25):
p += 1
continue
i += 1
if(i == 25):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Corban():
shop_id = 29
name = 'corban'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
i = 1
offset = (p-1) * 50
url = "https://www.corban.com.tw/products?limit=50&offset=" + \
str(offset) + "&price=0%2C10000&sort=createdAt-desc&tags=ALL%20ITEMS"
try:
chrome.get(url)
except:
break
while(i < 51):
try:
title = chrome.find_element_by_xpath(
"//div[@class='rmq-3ab81ca3'][%i]/div[2]" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//div[@class='rmq-3ab81ca3'][%i]//a[@href]" % (i,)).get_attribute('href')
make_id = parse.urlsplit(page_link)
page_id = make_id.path
page_id = page_id.lstrip("/product/")
pic_link = chrome.find_element_by_xpath(
"//div[@class='rm<PASSWORD>'][%i]//img" % (i,)).get_attribute('src')
sale_price = chrome.find_element_by_xpath(
"//div[@class='rm<PASSWORD>3'][%i]/div[3]/div[2]" % (i,)).text
sale_price = sale_price.strip('NT$ ')
ori_price = chrome.find_element_by_xpath(
"//div[@class='rm<PASSWORD>3'][%i]/div[3]/div[1]/span/s" % (i,)).text
ori_price = ori_price.strip('NT$ ')
except:
i += 1
if(i == 51):
p += 1
continue
i += 1
if(i == 51):
p += 1
chrome.find_element_by_tag_name('body').send_keys(Keys.PAGE_DOWN)
time.sleep(1)
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Gmorning():
shop_id = 30
name = 'gmorning'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.gmorning.co/products?page=" + \
str(p) + "&sort_by=&order_by=&limit=24"
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
except:
break
time.sleep(1)
i = 1
while(i < 25):
try:
title = chrome.find_element_by_xpath(
"//div[%i]/product-item/a/div[2]/div/div[1]" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//div[%i]/product-item/a[@href]" % (i,)).get_attribute('href')
make_id = parse.urlsplit(page_link)
page_id = make_id.path
page_id = page_id.lstrip("/products/")
find_href = chrome.find_element_by_xpath(
"//div[%i]/product-item/a/div[1]/div[1]" % (i,))
bg_url = find_href.value_of_css_property('background-image')
pic_link = bg_url.lstrip('url("').rstrip(')"')
except:
i += 1
if(i == 25):
p += 1
continue
try:
sale_price = chrome.find_element_by_xpath(
"//div[%i]/product-item/a/div/div/div[2]/div[1]" % (i,)).text
sale_price = sale_price.strip('NT$')
ori_price = chrome.find_element_by_xpath(
"//div[%i]/product-item/a/div/div/div[2]/div[2]" % (i,)).text
ori_price = ori_price.strip('NT$')
except:
try:
sale_price = chrome.find_element_by_xpath(
"//div[%i]/product-item/a/div/div/div[2]/div[1]" % (i,)).text
sale_price = sale_price.strip('NT$')
sale_price = sale_price.split()
sale_price = sale_price[0]
ori_price = ""
except:
i += 1
if(i == 25):
p += 1
continue
i += 1
if(i == 25):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def July():
shop_id = 31
name = 'july'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.july2017.co/products?page=" + str(p)
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
except:
break
time.sleep(1)
i = 1
while(i < 25):
try:
title = chrome.find_element_by_xpath(
"//li[%i]/product-item/a/div[2]/div/div[1]" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//li[%i]/product-item/a[@href]" % (i,)).get_attribute('href')
make_id = parse.urlsplit(page_link)
page_id = make_id.path
page_id = page_id.lstrip("/products/")
find_href = chrome.find_element_by_xpath(
"//li[%i]/product-item/a/div[1]/div" % (i,))
bg_url = find_href.value_of_css_property('background-image')
pic_link = bg_url.lstrip('url("').rstrip(')"')
except:
i += 1
if(i == 25):
p += 1
continue
try:
sale_price = chrome.find_element_by_xpath(
"//li[%i]/product-item/a/div/div/div[2]/div[2]" % (i,)).text
sale_price = sale_price.strip('NT$')
sale_price = sale_price.split()
sale_price = sale_price[0]
ori_price = chrome.find_element_by_xpath(
"//li[%i]/product-item/a/div/div/div[2]/div[1]" % (i,)).text
ori_price = ori_price.strip('NT$')
except:
try:
sale_price = chrome.find_element_by_xpath(
"//li[%i]/product-item/a/div/div/div[2]/div[1]" % (i,)).text
sale_price = sale_price.strip('NT$')
sale_price = sale_price.split()
sale_price = sale_price[0]
ori_price = ""
except:
i += 1
if(i == 25):
p += 1
continue
i += 1
if(i == 25):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Per():
shop_id = 32
name = 'per'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.perdot.com.tw/categories/all?page=" + str(p)
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
except:
break
time.sleep(1)
i = 1
while(i < 25):
try:
title = chrome.find_element_by_xpath(
"//li[%i]/product-item/a/div[2]/div/div[1]" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//li[%i]/product-item/a[@href]" % (i,)).get_attribute('href')
make_id = parse.urlsplit(page_link)
page_id = make_id.path
page_id = page_id.lstrip("/products/")
find_href = chrome.find_element_by_xpath(
"//li[%i]/product-item/a/div[1]/div" % (i,))
bg_url = find_href.value_of_css_property('background-image')
pic_link = bg_url.lstrip('url("').rstrip(')"')
except:
i += 1
if(i == 25):
p += 1
continue
try:
sale_price = chrome.find_element_by_xpath(
"//li[%i]/product-item/a/div/div/div[2]/div[2]" % (i,)).text
sale_price = sale_price.strip('NT$')
ori_price = chrome.find_element_by_xpath(
"//li[%i]/product-item/a/div/div/div[2]/div[1]" % (i,)).text
ori_price = ori_price.strip('NT$')
except:
try:
sale_price = chrome.find_element_by_xpath(
"//li[%i]/product-item/a/div/div/div[2]/div[1]" % (i,)).text
sale_price = sale_price.strip('NT$')
sale_price = sale_price.split()
sale_price = sale_price[0]
ori_price = ""
except:
i += 1
if(i == 25):
p += 1
continue
i += 1
if(i == 25):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Cereal():
shop_id = 33
name = 'cereal'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.cerealoutfit.com/new/page/" + str(p) + "/"
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
except:
break
time.sleep(1)
try:
chrome.find_element_by_xpath(
"//button[@class='mfp-close']").click()
except:
pass
i = 1
while(i < 25):
try:
title = chrome.find_element_by_xpath(
"//div[@data-loop='%i']/h3/a" % (i,)).text
if(title == ""):
i += 1
if(i == 25):
p += 1
continue
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//div[@data-loop='%i']/div[1]/a[@href]" % (i,)).get_attribute('href')
page_id = chrome.find_element_by_xpath(
"//div[@data-loop='%i']" % (i,)).get_attribute('126-id')
pic_link = chrome.find_element_by_xpath(
"//div[@data-loop='%i']/div[1]/a/img" % (i,)).get_attribute('src')
except:
i += 1
if(i == 25):
p += 1
continue
try:
sale_price = chrome.find_element_by_xpath(
"//div[@data-loop='%i']//ins//bdi" % (i,)).text
sale_price = sale_price.rstrip(' NT$')
ori_price = chrome.find_element_by_xpath(
"//div[@data-loop='%i']//del//bdi" % (i,)).text
ori_price = ori_price.rstrip(' NT$')
except:
try:
sale_price = chrome.find_element_by_xpath(
"//div[@data-loop='%i']/div[2]//span[@class='woocommerce-Price-amount amount']" % (i,)).text
sale_price = sale_price.rstrip(' NT$')
ori_price = ""
except:
i += 1
if(i == 25):
p += 1
continue
i += 1
if(i == 25):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Jcjc():
shop_id = 35
name = 'jcjc'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.jcjc-dailywear.com/collections/in-stock?limit=24&page=" + \
str(p) + "&sort=featured"
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
except:
break
time.sleep(1)
i = 1
while(i < 25):
try:
title = chrome.find_element_by_xpath(
"//div[@class='grid-uniform grid-link__container']/div[%i]/div/a/p[1]" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//div[@class='grid-uniform grid-link__container']/div[%i]/div/a[1][@href]" % (i,)).get_attribute('href')
pic_link = chrome.find_element_by_xpath(
"//div[@class='grid-uniform grid-link__container']/div[%i]/div/span/a/img" % (i,)).get_attribute('src')
page_id = pic_link[pic_link.find("i/")+2:pic_link.find(".j")]
except:
i += 1
if(i == 25):
p += 1
continue
try:
sale_price = chrome.find_element_by_xpath(
"//div[@class='grid-uniform grid-link__container']/div[%i]/div/a/p[2]/span" % (i,)).text
sale_price = sale_price.strip('NT$ ')
ori_price = chrome.find_element_by_xpath(
"//div[@class='grid-uniform grid-link__container']/div[%i]/div/a/p[2]/s/span" % (i,)).text
ori_price = ori_price.strip('NT$ ')
except:
try:
sale_price = chrome.find_element_by_xpath(
"//div[@class='grid-uniform grid-link__container']/div[%i]/div/a/p[2]/span" % (i,)).text
sale_price = sale_price.strip('NT$ ')
ori_price = ""
except:
i += 1
if(i == 25):
p += 1
continue
i += 1
if(i == 25):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Ccshop():
shop_id = 36
name = 'ccshop'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.ccjshop.com/products?page=" + str(p)
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
except:
break
time.sleep(1)
i = 1
while(i < 25):
try:
title = chrome.find_element_by_xpath(
"//li[%i]/a/div[2]/div/div[1]" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//div[2]/ul/li[%i]/a[@href]" % (i,)).get_attribute('href')
make_id = parse.urlsplit(page_link)
page_id = make_id.path
page_id = page_id.lstrip("/products/")
find_href = chrome.find_element_by_xpath(
"//li[%i]/a/div[1]/div" % (i,))
bg_url = find_href.value_of_css_property('background-image')
pic_link = bg_url.lstrip('url("').rstrip(')"')
except:
i += 1
if(i == 25):
p += 1
continue
try:
sale_price = chrome.find_element_by_xpath(
"//li[%i]/a/div[2]/div/div[2]" % (i,)).text
sale_price = sale_price.strip('NT$')
sale_price = sale_price.split()
sale_price = sale_price[0]
ori_price = ""
except:
try:
sale_price = chrome.find_element_by_xpath(
"//li[%i]/a/div/div/div[2]/div[1]" % (i,)).text
sale_price = sale_price.strip('NT$')
sale_price = sale_price.split()
sale_price = sale_price[0]
ori_price = ""
except:
i += 1
if(i == 25):
p += 1
continue
i += 1
if(i == 25):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Iris():
shop_id = 37
name = 'iris'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.irisgarden.com.tw/products?page=" + str(p)
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
except:
break
time.sleep(1)
i = 1
while(i < 25):
try:
title = chrome.find_element_by_xpath(
"//li[%i]/a/div[2]/div/div[1]" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//li[@class='boxify-item product-item ng-isolate-scope'][%i]/a[@href]" % (i,)).get_attribute('href')
make_id = parse.urlsplit(page_link)
page_id = make_id.path
page_id = page_id.lstrip("/products/")
find_href = chrome.find_element_by_xpath(
"//li[%i]/a/div[1]/div" % (i,))
bg_url = find_href.value_of_css_property('background-image')
pic_link = bg_url.lstrip('url("').rstrip(')"')
except:
i += 1
if(i == 25):
p += 1
continue
try:
sale_price = chrome.find_element_by_xpath(
"//li[%i]/a/div[2]/div/div[2]" % (i,)).text
sale_price = sale_price.strip('NT$')
sale_price = sale_price.split()
sale_price = sale_price[0]
ori_price = ""
except:
try:
sale_price = chrome.find_element_by_xpath(
"//li[%i]/a/div/div/div[2]/div[1]" % (i,)).text
sale_price = sale_price.strip('NT$')
sale_price = sale_price.split()
sale_price = sale_price[0]
ori_price = ""
except:
i += 1
if(i == 25):
p += 1
continue
i += 1
if(i == 25):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Nook():
shop_id = 39
name = 'nook'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.nooknook.me/products?page=" + str(p)
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
except:
break
time.sleep(1)
i = 1
while(i < 25):
try:
title = chrome.find_element_by_xpath(
"//li[%i]/product-item/a/div[2]/div/div[1]" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//li[%i]/product-item/a[@href]" % (i,)).get_attribute('href')
make_id = parse.urlsplit(page_link)
page_id = make_id.path
page_id = page_id.lstrip("/products/")
find_href = chrome.find_element_by_xpath(
"//li[%i]/product-item/a/div[1]/div" % (i,))
bg_url = find_href.value_of_css_property('background-image')
pic_link = bg_url.lstrip('url("').rstrip(')"')
except:
i += 1
if(i == 25):
p += 1
continue
try:
sale_price = chrome.find_element_by_xpath(
"//li[%i]/product-item/a/div/div/div[2]/div[2]" % (i,)).text
sale_price = sale_price.strip('NT$')
sale_price = sale_price.split()
sale_price = sale_price[0]
ori_price = chrome.find_element_by_xpath(
"//li[%i]/product-item/a/div/div/div[2]/div[1]" % (i,)).text
ori_price = ori_price.strip('NT$')
except:
try:
sale_price = chrome.find_element_by_xpath(
"//li[%i]/product-item/a/div/div/div[2]/div[1]" % (i,)).text
sale_price = sale_price.strip('NT$')
sale_price = sale_price.split()
sale_price = sale_price[0]
ori_price = ""
except:
i += 1
if(i == 25):
p += 1
continue
i += 1
if(i == 25):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Greenpea():
shop_id = 40
name = 'greenpea'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.greenpea-tw.com/products?page=" + str(p)
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
except:
break
time.sleep(1)
i = 1
while(i < 25):
try:
title = chrome.find_element_by_xpath(
"//li[%i]/a/div[2]/div/div[1]" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//div[2]/ul/li[%i]/a[@href]" % (i,)).get_attribute('href')
make_id = parse.urlsplit(page_link)
page_id = make_id.path
page_id = page_id.lstrip("/products/")
find_href = chrome.find_element_by_xpath(
"//li[%i]/a/div[1]/div" % (i,))
bg_url = find_href.value_of_css_property('background-image')
pic_link = bg_url.lstrip('url("').rstrip(')"')
except:
i += 1
if(i == 25):
p += 1
continue
try:
sale_price = chrome.find_element_by_xpath(
"//li[%i]/a/div[2]/div/div[3]" % (i,)).text
sale_price = sale_price.strip('NT$')
sale_price = sale_price.split()
sale_price = sale_price[0]
ori_price = chrome.find_element_by_xpath(
"//li[%i]/a/div[2]/div/div[2]" % (i,)).text
ori_price = ori_price.strip('NT$')
except:
try:
sale_price = chrome.find_element_by_xpath(
"//li[%i]/a/div[2]/div/div[2]" % (i,)).text
sale_price = sale_price.strip('NT$')
sale_price = sale_price.split()
sale_price = sale_price[0]
ori_price = ""
except:
i += 1
if(i == 25):
p += 1
i += 1
if(i == 25):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Queen():
shop_id = 42
name = 'queen'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.queenshop.com.tw/zh-TW/QueenShop/ProductList?item1=01&item2=all&Page=" + \
str(p) + "&View=4"
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
except:
break
i = 1
while(i < 17):
try:
title = chrome.find_element_by_xpath(
"//ul[@class='items-list list-array-4']/li[%i]/a/p" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//ul[@class='items-list list-array-4']/li[%i]/a[@href]" % (i,)).get_attribute('href')
page_id = stripID(page_link, "SaleID=")
pic_link = chrome.find_element_by_xpath(
"//ul[@class='items-list list-array-4']/li[%i]/a/img[1]" % (i,)).get_attribute('data-src')
except:
i += 1
if(i == 17):
p += 1
continue
try:
sale_price = chrome.find_element_by_xpath(
"//ul[@class='items-list list-array-4']/li[%i]/p[2]/span[2]" % (i,)).text
sale_price = sale_price.strip('NT. ')
ori_price = chrome.find_element_by_xpath(
"//ul[@class='items-list list-array-4']/li[%i]/p[2]/span[1]" % (i,)).text
ori_price = ori_price.strip('NT. ')
except:
try:
sale_price = chrome.find_element_by_xpath(
"//ul[@class='items-list list-array-4']/li[%i]/p[2]/span[1]" % (i,)).text
sale_price = sale_price.strip('NT. ')
ori_price = ""
except:
i += 1
if(i == 17):
p += 1
continue
i += 1
if(i == 17):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Cozyfee():
shop_id = 48
name = 'cozyfee'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.cozyfee.com/product.php?page=" + \
str(p) + "&cid=55#prod_list"
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
except:
break
time.sleep(1)
i = 1
while(i < 41):
try:
title = chrome.find_element_by_xpath(
"//li[%i]/div[2]/a" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//li[%i]/div[2]/a[@href]" % (i,)).get_attribute('href')
make_id = parse.urlsplit(page_link)
page_id = make_id.query
page_id = page_id.lstrip("action=detail&pid=")
pic_link = chrome.find_element_by_xpath(
"//li[%i]/div[1]/a/img[1]" % (i,)).get_attribute('data-original')
sale_price = chrome.find_element_by_xpath(
"//li[%i]/div[3]/span" % (i,)).text
sale_price = sale_price.strip('NT.')
ori_price = ""
except:
i += 1
if(i == 41):
p += 1
continue
i += 1
if(i == 41):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Reishop():
shop_id = 49
name = 'reishop'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.reishop.com.tw/pdlist2.asp?item1=all&item2=&item3=&keyword=&ob=A&pagex=&pageno=" + \
str(p)
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
except:
break
time.sleep(1)
i = 1
while(i < 31):
try:
title = chrome.find_element_by_xpath(
"//figcaption[%i]/a/span[2]/span[1]" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//figcaption[%i]/a[@href]" % (i,)).get_attribute('href')
make_id = parse.urlsplit(page_link)
page_id = make_id.query
page_id = page_id.lstrip("yano=YA")
page_id = page_id.replace("&color=", "")
pic_link = chrome.find_element_by_xpath(
"//figcaption[%i]/a/span/img[1]" % (i,)).get_attribute('src')
sale_price = chrome.find_element_by_xpath(
"//figcaption[%i]/a/span[2]/span[2]/span" % (i,)).text
sale_price = sale_price.strip('NT.')
ori_price = ""
except:
i += 1
if(i == 31):
p += 1
continue
i += 1
if(i == 31):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Yourz():
shop_id = 50
name = 'yourz'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.yourz.com.tw/product/category/34/1/" + str(p)
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
except:
break
time.sleep(1)
i = 1
while(i < 13):
try:
title = chrome.find_element_by_xpath(
"//div[@class='pro_list'][%i]/div/table/tbody/tr/td/div/a" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//div[@class='pro_list'][%i]/div/table/tbody/tr/td/div/a[@href]" % (i,)).get_attribute('href')
make_id = parse.urlsplit(page_link)
page_id = make_id.path
page_id = page_id.lstrip("/product/detail/")
pic_link = chrome.find_element_by_xpath(
"//div[@class='pro_list'][%i]/div/a/img" % (i,)).get_attribute('src')
sale_price = chrome.find_element_by_xpath(
"//div[@class='pro_list'][%i]/div[4]/p/font" % (i,)).text
sale_price = sale_price.replace('VIP價:NT$ ', '')
sale_price = sale_price.rstrip('元')
ori_price = chrome.find_element_by_xpath(
"//div[@class='pro_list'][%i]/div[4]/p/br" % (i,)).text
ori_price = ori_price.replace('NT$ ', '')
ori_price = ori_price.rstrip('元')
except:
i += 1
if(i == 13):
p += 1
continue
i += 1
if(i == 13):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Seoulmate():
shop_id = 54
name = 'seoulmate'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.seoulmate.com.tw/catalog.php?m=115&s=249&t=0&sort=&page=" + \
str(p)
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
except:
break
time.sleep(1)
i = 1
while(i < 33):
try:
title = chrome.find_element_by_xpath(
"//li[%i]/p[1]/a" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//ul/li[%i]/p[1]/a[@href]" % (i,)).get_attribute('href')
make_id = parse.urlsplit(page_link)
page_id = make_id.query
page_id = page_id.replace("m=115&s=249&t=0&id=", "")
pic_link = chrome.find_element_by_xpath(
"//ul/li[%i]/a/img[1]" % (i,)).get_attribute('src')
if(pic_link == ""):
i += 1
if(i == 33):
p += 1
continue
except:
i += 1
if(i == 33):
p += 1
continue
try:
ori_price = chrome.find_element_by_xpath(
"//ul/li[%i]/p[3]/del" % (i,)).text
ori_price = ori_price.strip('NT.')
sale_price = chrome.find_element_by_xpath(
"//ul/li[%i]/p[3]" % (i,)).text
sale_price = sale_price.strip('NT.')
sale_price = sale_price.strip('NT.')
locate = sale_price.find("NT.")
sale_price = sale_price[locate+3:len(sale_price)]
except:
try:
sale_price = chrome.find_element_by_xpath(
"//ul/li[%i]/p[3]" % (i,)).text
sale_price = sale_price.strip('NT.')
ori_price = ""
except:
i += 1
if(i == 33):
p += 1
continue
i += 1
if(i == 33):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Sweesa():
shop_id = 55
name = 'sweesa'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.sweesa.com/Shop/itemList.aspx?&m=20&o=5&sa=1&smfp=" + \
str(p)
try:
chrome.get(url)
except:
break
time.sleep(1)
i = 1
while(i < 45):
try:
title = chrome.find_element_by_xpath(
"//div[@class='itemListDiv'][%i]/div[2]/a" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//div[@class='itemListDiv'][%i]/div[2]/a" % (i,)).get_attribute('href')
make_id = parse.urlsplit(page_link)
page_id = make_id.query
page_id = page_id.replace("mNo1=", "")
page_id = page_id.replace("&m=20", "")
pic_link = chrome.find_element_by_xpath(
"//div[@class='itemListDiv'][%i]//a/img[@src]" % (i,)).get_attribute("src")
sale_price = chrome.find_element_by_xpath(
"//div[@class='itemListDiv'][%i]/div[4]/span" % (i,)).text
sale_price = sale_price.strip('TWD.')
ori_price = ""
except:
i += 1
if(i == 45):
p += 1
continue
i += 1
if(i == 45):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Pazzo():
shop_id = 56
name = 'pazzo'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.pazzo.com.tw/recent?P=" + str(p)
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
except:
break
time.sleep(1)
i = 1
while(i < 41):
try:
title = chrome.find_element_by_xpath(
"//li[%i]/div[2]/p/a" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//li[%i]/div[2]/p/a[@href]" % (i,)).get_attribute('href')
make_id = parse.urlsplit(page_link)
page_id = make_id.query
page_id = page_id.lstrip("c=")
pic_link = chrome.find_element_by_xpath(
"//li[@class='item'][%i]/div[@class='item__images']/a/picture/img[@class='img-fluid']" % (i,)).get_attribute('src')
except:
i += 1
if(i == 41):
p += 1
continue
try:
sale_price = chrome.find_element_by_xpath(
"//li[%i]/div[2]/p[2]/span[2]" % (i,)).text
sale_price = sale_price.strip('NT.')
ori_price = chrome.find_element_by_xpath(
"//li[%i]/div[2]/p[2]/span[1]" % (i,)).text
ori_price = ori_price.strip('NT.')
except:
sale_price = chrome.find_element_by_xpath(
"//li[%i]/div[2]/p[2]/span" % (i,)).text
sale_price = sale_price.strip('NT.')
ori_price = ""
i += 1
if(i == 41):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Meierq():
shop_id = 57
name = 'meierq'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
page = 0
prefix_urls = [
"https://www.meierq.com/zh-tw/category/bottomclothing?P=",
"https://www.meierq.com/zh-tw/category/jewelry?P=",
"https://www.meierq.com/zh-tw/category/outerclothing?P=",
"https://www.meierq.com/zh-tw/category/accessories?P=",
]
for prefix in prefix_urls:
page += 1
for i in range(1, page_Max):
url = f"{prefix}{i}"
try:
print(url)
chrome.get(url)
chrome.find_element_by_xpath("//div[@class='items__image']")
except:
print("find_element_by_xpath_break", page)
if(page == 4):
chrome.quit()
print("break")
break
break
i = 1
while(i < 41):
try:
title = chrome.find_element_by_xpath(
"//li[%i]/div/p/a" % (i,)).text
except:
break
try:
page_link = chrome.find_element_by_xpath(
"//li[%i]/div/p/a[@href]" % (i,)).get_attribute('href')
page_id = stripID(page_link, "n/")
page_id = page_id[:page_id.find("?c")]
pic_link = chrome.find_element_by_xpath(
"//li[%i]/div/img" % (i,)).get_attribute('src')
try:
sale_price = chrome.find_element_by_xpath(
"//li[%i]/div/p/span[2]" % (i,)).text
sale_price = sale_price.strip('NT.')
ori_price = chrome.find_element_by_xpath(
"//li[%i]/div/p/span" % (i,)).text
ori_price = ori_price.strip('NT.')
except:
sale_price = chrome.find_element_by_xpath(
"//li[%i]/div/p/span" % (i,)).text
sale_price = sale_price.strip('NT.')
ori_price = ""
except:
i += 1
if(i == 41):
p += 1
continue
i += 1
if(i == 41):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Harper():
shop_id = 58
name = 'harper'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
while True:
url = "https://www.harper.com.tw/Shop/itemList.aspx?&m=13&smfp=" + \
str(p)
if(p > 20):
chrome.quit()
break
try:
chrome.get(url)
except:
chrome.quit()
break
i = 1
while(i < 80):
try:
title = chrome.find_element_by_xpath(
"//div[@class='itemListDiv'][%i]/div[2]/a" % (i,)).text
except:
p += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//div[@class='itemListDiv'][%i]/div[2]/a" % (i,)).get_attribute('href')
page_id = stripID(page_link, "cno=")
page_id = page_id.replace("&m=13", "")
pic_link = chrome.find_element_by_xpath(
"//div[@class='itemListDiv'][%i]//a/img[@src]" % (i,)).get_attribute("src")
sale_price = chrome.find_element_by_xpath(
"//div[@class='itemListDiv'][%i]/div[4]/span" % (i,)).text
sale_price = sale_price.strip('NT.')
ori_price = ""
except:
i += 1
if(i == 79):
p += 1
continue
i += 1
if(i == 79):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Lurehsu():
shop_id = 59
name = 'lurehsu'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.lurehsu.com/zh-TW/lure/productList?item1=00&item2=16&page=" + \
str(p)
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
except:
break
i = 1
while(i < 28):
try:
title = chrome.find_element_by_xpath(
"//div[@class='grid-item'][%i]/a/div[2]/p" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//div[@class='grid-item'][%i]/a[@href]" % (i,)).get_attribute('href')
make_id = parse.urlsplit(page_link)
page_id = make_id.query
page_id = page_id.lstrip("SaleID=")
page_id = page_id[:page_id.find("&Color")]
pic_link = chrome.find_element_by_xpath(
"//div[@class='grid-item'][%i]/a/div/img" % (i,)).get_attribute('src')
except:
i += 1
if(i == 28):
p += 1
continue
try:
sale_price = chrome.find_element_by_xpath(
"//div[@class='grid-item'][%i]/a/div[2]/div/p/span[2]" % (i,)).text
sale_price = sale_price.strip('NTD.')
ori_price = chrome.find_element_by_xpath(
"//div[@class='grid-item'][%i]/a/div[2]/div/p/span[1]" % (i,)).text
ori_price = ori_price.strip('NTD.')
except:
try:
sale_price = chrome.find_element_by_xpath(
"//div[@class='grid-item'][%i]/a/div[2]/div/p" % (i,)).text
sale_price = sale_price.strip('NTD.')
ori_price = ""
except:
i += 1
if(i == 28):
p += 1
continue
i += 1
if(i == 28):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Pufii():
shop_id = 61
name = 'pufii'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.pufii.com.tw/Shop/itemList.aspx?&m=6&smfp=" + str(p)
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
except:
break
time.sleep(1)
i = 1
while(i < 37):
try:
title = chrome.find_element_by_xpath(
"//div[@class='itemListDiv'][%i]/div[3]" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//div[@class='itemListDiv'][%i]/div[1]/a" % (i,)).get_attribute('href')
make_id = parse.urlsplit(page_link)
page_id = make_id.query
page_id = page_id.replace("mNo1=P", "")
page_id = page_id.replace("&m=6", "")
pic_link = chrome.find_element_by_xpath(
"//div[@class='itemListDiv'][%i]//a/img[@src]" % (i,)).get_attribute("src")
try:
sale_price = chrome.find_element_by_xpath(
"//div[@class='itemListDiv'][%i]/div[@class='pricediv']/span[2]" % (i,)).text
sale_price = sale_price.strip('活動價NT')
ori_price = chrome.find_element_by_xpath(
"//div[@class='itemListDiv'][%i]/div[@class='pricediv']/span[1]" % (i,)).text
ori_price = ori_price.strip('NT')
except:
sale_price = chrome.find_element_by_xpath(
"//div[@class='itemListDiv'][%i]/div[@class='pricediv']/span[1]" % (i,)).text
sale_price = sale_price.strip('NT')
ori_price = ""
except:
i += 1
if(i == 37):
p += 1
continue
i += 1
if(i == 37):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Mouggan():
shop_id = 62
name = 'mouggan'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.mouggan.com/zh-tw/category/ALL-ITEM?P=" + str(p)
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
except:
break
try:
chrome.find_element_by_xpath(
"//a[@class='close p-0']/i[@class='icon-popup-close']").click()
except:
pass
i = 1
while(i < 19):
try:
title = chrome.find_element_by_xpath(
"//div[2]/div[%i]/div[2]/a" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//div[2]/div[%i]/div[2]/a[@href]" % (i,)).get_attribute('href')
page_id = stripID(page_link, "c=")
pic_link = chrome.find_element_by_xpath(
"//div[2]/div[%i]/div[1]/div/a/img" % (i,)).get_attribute('src')
except:
i += 1
if(i == 19):
p += 1
continue
try:
sale_price = chrome.find_element_by_xpath(
"//div[2]/div[%i]/div[2]/div[1]/span[2]" % (i,)).text
sale_price = sale_price.strip('NT$')
ori_price = chrome.find_element_by_xpath(
"//div[2]/div[%i]/div[2]/div[1]/span[1]" % (i,)).text
ori_price = ori_price.strip('NT$')
except:
try:
sale_price = chrome.find_element_by_xpath(
"//div[2]/div[%i]/div[2]/div[1]/span[1]" % (i,)).text
sale_price = sale_price.strip('NT$')
ori_price = ""
except:
i += 1
if(i == 19):
p += 1
continue
i += 1
if(i == 19):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Mercci():
shop_id = 64
name = 'mercci'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.mercci22.com/zh-tw/tag/HOTTEST?P=" + str(p)
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
except:
break
time.sleep(1)
# chrome.find_element_by_xpath("//a[@class='close p-0']/i[@class='icon-popup-close']").click()
i = 1
while(i < 41):
try:
title = chrome.find_element_by_xpath(
"//li[%i]/div[@class='items__info']/div[@class='pdname']/a" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//li[%i]/div[@class='items__info']/div[@class='pdname']/a[@href]" % (i,)).get_attribute('href')
page_id = stripID(page_link, "c=")
pic_link = chrome.find_element_by_xpath(
"//li[%i]/a[@class='items__image js-loaded']/img" % (i,)).get_attribute('src')
except:
i += 1
if(i == 41):
p += 1
continue
try:
sale_price = chrome.find_element_by_xpath(
"//li[%i]/div[@class='items__info']/div[@class='price']" % (i,)).text
sale_price = sale_price.strip('NT.')
k = sale_price.find("NT.")
sale_price = sale_price[k+3:len(sale_price)]
ori_price = chrome.find_element_by_xpath(
"//li[%i]/div[@class='items__info']/div[@class='price']/span" % (i,)).text
ori_price = ori_price.strip('NT.')
except:
try:
sale_price = chrome.find_element_by_xpath(
"//li[%i]/div[@class='items__info']/p[@class='price']/span" % (i,)).text
sale_price = sale_price.strip('NT.')
ori_price = ""
except:
i += 1
if(i == 41):
p += 1
continue
i += 1
if(i == 41):
p += 1
if(sale_price == ""):
continue
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Sivir():
shop_id = 65
name = 'sivir'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.sivir.com.tw/collections/new-all-%E6%89%80%E6%9C%89?page=" + \
str(p)
try:
chrome.get(url)
except:
break
time.sleep(1)
i = 1
while(i < 25):
try:
title = chrome.find_element_by_xpath(
"//div[@class='product col-lg-3 col-sm-4 col-6'][%i]/div[2]/a" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//div[@class='product col-lg-3 col-sm-4 col-6'][%i]/div[2]/a[@href]" % (i,)).get_attribute('href')
page_id = chrome.find_element_by_xpath(
"//div[@class='product col-lg-3 col-sm-4 col-6'][%i]/div[2]/a[@data-id]" % (i,)).get_attribute('data-id')
pic_link = chrome.find_element_by_xpath(
"//div[@class='product col-lg-3 col-sm-4 col-6'][%i]/div[1]/a/img" % (i,)).get_attribute('data-src')
pic_link = f"https:{pic_link}"
sale_price = chrome.find_element_by_xpath(
"//div[@class='product col-lg-3 col-sm-4 col-6'][%i]/div[4]/span" % (i,)).text
sale_price = sale_price.replace('NT$', '')
ori_price = ""
except:
i += 1
if(i == 25):
p += 1
continue
i += 1
if(i == 25):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Nana():
shop_id = 66
name = 'nana'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.2nana.tw/product.php?page=" + \
str(p) + "&cid=1#prod_list"
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
except:
break
time.sleep(1)
i = 1
while(i < 75):
try:
title = chrome.find_element_by_xpath(
"//div[@class='col-xs-6 col-sm-4 col-md-3'][%i]/div/div[2]/div[1]/a" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//div[@class='col-xs-6 col-sm-4 col-md-3'][%i]/div/div[1]/a[@href]" % (i,)).get_attribute('href')
make_id = parse.urlsplit(page_link)
page_id = make_id.query
page_id = page_id.lstrip("action=detail&pid=")
pic_link = chrome.find_element_by_xpath(
"//div[@class='col-xs-6 col-sm-4 col-md-3'][%i]/div/div[1]/a/img" % (i,)).get_attribute('data-original')
sale_price = chrome.find_element_by_xpath(
"//div[@class='col-xs-6 col-sm-4 col-md-3'][%i]/div/div[2]/div[2]/span" % (i,)).text
sale_price = sale_price.strip('NT.')
ori_price = chrome.find_element_by_xpath(
"//div[@class='col-xs-6 col-sm-4 col-md-3'][%i]/div/div[2]/div[2]/del" % (i,)).text
ori_price = ori_price.strip('NT.')
except:
i += 1
if(i == 75):
p += 1
continue
i += 1
if(i == 75):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Aachic():
shop_id = 70
name = 'aachic'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.aachic.com/categories/all-%E6%89%80%E6%9C%89%E5%95%86%E5%93%81?page=" + \
str(p) + "&sort_by=&order_by=&limit=24"
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
except:
break
time.sleep(1)
i = 1
while(i < 25):
try:
title = chrome.find_element_by_xpath(
"//a[%i]/div[2]/div[1]" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//div[@class='col-xs-12 ProductList-list']/a[%i][@href]" % (i,)).get_attribute('href')
make_id = parse.urlsplit(page_link)
page_id = make_id.path
page_id = page_id.lstrip("/products/")
find_href = chrome.find_element_by_xpath(
"//a[%i]/div[1]/div[1]" % (i,))
bg_url = find_href.value_of_css_property('background-image')
pic_link = bg_url.lstrip('url("').rstrip(')"')
sale_price = chrome.find_element_by_xpath(
"//a[%i]/div[2]/div[2]" % (i,)).text
sale_price = sale_price.strip('NT$')
sale_price = sale_price.split()
sale_price = sale_price[0]
ori_price = chrome.find_element_by_xpath(
"//a[%i]/div[2]/div[3]" % (i,)).text
ori_price = ori_price.strip('NT$')
except:
i += 1
if(i == 25):
p += 1
continue
i += 1
if(i == 25):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Lovso():
shop_id = 71
name = 'lovso'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.lovso.com.tw/Shop/itemList.aspx?m=8&o=0&sa=0&smfp=" + \
str(p)
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
except:
break
time.sleep(1)
i = 1
while(i < 37):
try:
title = chrome.find_element_by_xpath(
"//div[@class='itemListDiv'][%i]/div[2]" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//div[@class='itemListDiv'][%i]/div[1]/center/a" % (i,)).get_attribute('href')
make_id = parse.urlsplit(page_link)
page_id = make_id.query
page_id = page_id.replace("mNo1=", "")
page_id = page_id.replace("&m=8", "")
pic_link = chrome.find_element_by_xpath(
"//div[@class='itemListDiv'][%i]//a/img[@src]" % (i,)).get_attribute("src")
sale_price = chrome.find_element_by_xpath(
"//div[@class='itemListDiv'][%i]/div[4]" % (i,)).text
sale_price = sale_price.strip('NT.')
ori_price = chrome.find_element_by_xpath(
"//div[@class='itemListDiv'][%i]/div[3]" % (i,)).text
ori_price = ori_price.strip('NT.')
except:
i += 1
if(i == 37):
p += 1
continue
i += 1
if(i == 37):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Bowwow():
shop_id = 72
name = 'bowwow'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.bowwowkorea.com/products?page=" + \
str(p) + "&sort_by=&order_by=&limit=48"
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
except:
break
time.sleep(1)
i = 1
while(i < 49):
try:
title = chrome.find_element_by_xpath(
"//div[%i]/product-item/a/div[2]/div/div[1]" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//div[%i]/product-item/a[@href]" % (i,)).get_attribute('href')
make_id = parse.urlsplit(page_link)
page_id = make_id.path
page_id = page_id.lstrip("/products/")
find_href = chrome.find_element_by_xpath(
"//div[%i]/product-item/a/div[1]/div[1]" % (i,))
bg_url = find_href.value_of_css_property('background-image')
pic_link = bg_url.lstrip('url("').rstrip(')"')
sale_price = chrome.find_element_by_xpath(
"//div[%i]/product-item/a/div/div/div[2]/div" % (i,)).text
sale_price = sale_price.strip('NT$')
sale_price = sale_price.split()
sale_price = sale_price[0]
ori_price = ""
except:
i += 1
if(i == 49):
p += 1
continue
i += 1
if(i == 49):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Suitangtang():
shop_id = 74
name = 'suitangtang'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
i = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.suitangtang.com/Catalog/WOMAN"
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
except:
break
time.sleep(1)
chrome.find_element_by_tag_name('body').send_keys(Keys.END)
time.sleep(1)
while(True):
try:
title = chrome.find_element_by_xpath(
"//div[@class='product-list'][%i]/div[@class='name']" % (i,)).text
k = title.find("NT$")
title = title[0:k-1]
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//div[@class='product-list'][%i]/a[@href]" % (i,)).get_attribute('href')
page_id = stripID(page_link, "/Product/")
page_id = page_id[:page_id.find("?c=")]
pic_link = chrome.find_element_by_xpath(
"//div[@class='product-list'][%i]/a/img" % (i,)).get_attribute('data-original')
except:
i += 1
continue
try:
sale_price = chrome.find_element_by_xpath(
"//div[@class='product-list'][%i]/div[2]/span" % (i,)).text
sale_price = sale_price.strip('NT$')
k = sale_price.find("NT$")
sale_price = sale_price[k+3:len(sale_price)]
ori_price = chrome.find_element_by_xpath(
"//div[@class='product-list'][%i]/div[2]/span/span" % (i,)).text
ori_price = ori_price.strip('NT$')
except:
try:
sale_price = chrome.find_element_by_xpath(
"//div[@class='product-list'][%i]/div[2]/span" % (i,)).text
sale_price = sale_price.strip('NT$')
ori_price = ""
except:
i += 1
continue
i += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Chochobee():
shop_id = 78
name = 'chochobee'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.chochobee.com/catalog.php?m=40&s=0&t=0&sort=&page=" + \
str(p)
try:
chrome.get(url)
except:
break
time.sleep(1)
i = 1
while(i < 25):
try:
title = chrome.find_element_by_xpath(
"//section/ul/li[%i]/span[2]" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//section/ul/li[%i]/a[@href]" % (i,)).get_attribute('href')
make_id = parse.urlsplit(page_link)
page_id = make_id.query
page_id = page_id.replace("m=40&s=0&t=0&id=", "")
pic_link = chrome.find_element_by_xpath(
"//section/ul/li[%i]/a/div/img" % (i,)).get_attribute('src')
sale_price = chrome.find_element_by_xpath(
"//section/ul/li[%i]/span[3]" % (i,)).text
sale_price = sale_price.strip('NT.$')
ori_price = ""
except:
i += 1
if(i == 25):
p += 1
continue
i += 1
if(i == 25):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Asobi():
shop_id = 80
name = 'asobi'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.asobi.com.tw/Shop/itemList.aspx?undefined&smfp=" + \
str(p)
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
except:
break
time.sleep(1)
i = 1
while(i < 34):
try:
title = chrome.find_element_by_xpath(
"//div[@class='itemListDiv'][%i]/div[2]" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//div[@class='itemListDiv'][%i]/div[1]/a" % (i,)).get_attribute('href')
make_id = parse.urlsplit(page_link)
page_id = make_id.query
page_id = page_id.replace("mNo1=", "")
page_id = page_id.replace("&&m=1&o=5&sa=1", "")
pic_link = chrome.find_element_by_xpath(
"//div[@class='itemListDiv'][%i]//a/img[@src]" % (i,)).get_attribute("src")
sale_price = chrome.find_element_by_xpath(
"//div[@class='itemListDiv'][%i]/div[3]/div/span" % (i,)).text
sale_price = sale_price.strip('NT$')
ori_price = ""
except:
i += 1
if(i == 34):
p += 1
continue
i += 1
if(i == 34):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Kiyumi():
shop_id = 81
name = 'kiyumi'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
flag = 0
while True:
if (flag == 1):
chrome.quit()
break
url = "https://www.kiyumishop.com/catalog.php?m=73&s=0&t=0&sort=&page=" + \
str(p)
try:
chrome.get(url)
except:
break
time.sleep(1)
i = 1
while(i < 25):
try:
title = chrome.find_element_by_xpath(
"//section/ul/li[%i]/span[2]" % (i,)).text
except:
flag += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//section/ul/li[%i]/a[@href]" % (i,)).get_attribute('href')
make_id = parse.urlsplit(page_link)
page_id = make_id.query
page_id = page_id.replace("m=73&s=0&t=0&id=", "")
pic_link = chrome.find_element_by_xpath(
"//section/ul/li[%i]/a/div/img" % (i,)).get_attribute('src')
sale_price = chrome.find_element_by_xpath(
"//section/ul/li[%i]/span[3]" % (i,)).text
sale_price = sale_price.strip('NT.$')
ori_price = ""
except:
i += 1
if(i == 25):
p += 1
continue
i += 1
if(i == 25):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Genquo():
shop_id = 82
name = 'genquo'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
flag = 0
while True:
if (flag == 1):
chrome.quit()
break
url = "https://www.genquo.com/zh-tw/category/women?P=" + str(p)
print("處理頁面:", url)
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
except:
break
i = 1
while(i < 37):
try:
title = chrome.find_element_by_xpath(
"//li[@class='item'][%i]/div/p/a" % (i,)).text
except:
flag += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//li[@class='item'][%i]/div/p/a[@href]" % (i,)).get_attribute('href')
make_id = parse.urlsplit(page_link)
page_id = make_id.path + '?' + make_id.query
page_id = page_id.lstrip("/zh-tw/market/n/")
page_id = page_id[:page_id.find("?c=")]
pic_link = chrome.find_element_by_xpath(
"//li[@class='item'][%i]/div/a/img" % (i,)).get_attribute('src')
except:
i += 1
if(i == 37):
p += 1
continue
try:
sale_price = chrome.find_element_by_xpath(
"//li[@class='item'][%i]/div/p/span[1]" % (i,)).text
sale_price = sale_price.strip('NT.')
ori_price = ""
except:
try:
sale_price = chrome.find_element_by_xpath(
"//li[@class='item'][%i]/div/p/span[2]" % (i,)).text
sale_price = sale_price.strip('NT.')
ori_price = chrome.find_element_by_xpath(
"//li[@class='item'][%i]/div/p/span[1]" % (i,)).text
ori_price = ori_price.strip('NT.')
except:
i += 1
if(i == 37):
p += 1
continue
i += 1
if(i == 37):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Oolala():
shop_id = 86
name = 'oolala'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
flag = 0
while True:
if (flag == 1):
chrome.quit()
break
url = "https://www.styleoolala.com/products?page=" + \
str(p) + "&sort_by=&order_by=&limit=48"
print("處理頁面:", url)
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
except:
break
time.sleep(1)
i = 1
while(i < 49):
try:
title = chrome.find_element_by_xpath(
"//div[%i]/product-item/a/div[2]/div/div[1]" % (i,)).text
except:
flag += 1
print(p, i)
break
try:
page_link = chrome.find_element_by_xpath(
"//div[%i]/product-item/a[@href]" % (i,)).get_attribute('href')
make_id = parse.urlsplit(page_link)
page_id = make_id.path
page_id = page_id.lstrip("/products/")
find_href = chrome.find_element_by_xpath(
"//div[%i]/product-item/a/div[1]/div[1]" % (i,))
bg_url = find_href.value_of_css_property('background-image')
pic_link = bg_url.lstrip('url("').rstrip(')"')
except:
i += 1
if(i == 49):
p += 1
continue
try:
sale_price = chrome.find_element_by_xpath(
"//div[%i]/product-item/a/div/div/div[2]/div[1]" % (i,)).text
sale_price = sale_price.strip('NT$')
ori_price = chrome.find_element_by_xpath(
"//div[%i]/product-item/a/div/div/div[2]/div[2]" % (i,)).text
ori_price = ori_price.strip('NT$')
except:
try:
sale_price = chrome.find_element_by_xpath(
"//div[%i]/product-item/a/div/div/div[2]/div[1]" % (i,)).text
sale_price = sale_price.strip('NT$')
ori_price = ""
except:
i += 1
if(i == 49):
p += 1
continue
i += 1
if(i == 49):
p += 1
if(sale_price == ""):
continue
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Pattis():
shop_id = 87
name = 'pattis'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.i-pattis.com/catalog.php?m=1&s=21&t=0&sort=&page=" + \
str(p)
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
except:
break
time.sleep(1)
i = 1
while(i < 25):
try:
title = chrome.find_element_by_xpath(
"//section[@class='cataList']/ul/li[%i]/span[2]" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//section[@class='cataList']/ul/li[%i]/a[@href]" % (i,)).get_attribute('href')
make_id = parse.urlsplit(page_link)
page_id = make_id.query
page_id = page_id.replace("m=1&s=21&t=0&id=", "")
pic_link = chrome.find_element_by_xpath(
"//li[%i]/a/img" % (i,)).get_attribute('src')
sale_price = chrome.find_element_by_xpath(
"//ul/li[%i]/span[3]" % (i,)).text
sale_price = sale_price.strip('NT.$')
ori_price = chrome.find_element_by_xpath(
"//ul/li[%i]/del" % (i,)).text
ori_price = ori_price.strip('NT.$')
except:
i += 1
if(i == 25):
p += 1
continue
i += 1
if(i == 25):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Scheminggg():
shop_id = 90
name = 'scheminggg'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.scheminggg.com/productlist?page=" + str(p)
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
except:
break
time.sleep(1)
i = 1
while(i < 37):
try:
title = chrome.find_element_by_xpath(
"//div[@class='columns']/div[%i]/a/p" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//div[@class='columns']/div[%i]/a[1][@href]" % (i,)).get_attribute('href')
make_id = parse.urlsplit(page_link)
page_id = make_id.query
page_id = page_id.lstrip("/products?saleid=")
page_id = page_id.rstrip("&colorid=")
pic_link = chrome.find_element_by_xpath(
"//div[@class='columns']/div[%i]/a/img" % (i,)).get_attribute('src')
if (pic_link == ""):
i += 1
if(i == 37):
p += 1
continue
except:
i += 1
if(i == 37):
p += 1
continue
try:
sale_price = chrome.find_element_by_xpath(
"//div[@class='columns']/div[%i]/p[2]" % (i,)).text
sale_price = sale_price.strip('NT. ')
ori_price = chrome.find_element_by_xpath(
"//div[@class='columns']/div[%i]/p[1]" % (i,)).text
ori_price = ori_price.strip('NT. ')
except:
try:
sale_price = chrome.find_element_by_xpath(
"//div[@class='columns']/div[%i]/p[1]" % (i,)).text
sale_price = sale_price.strip('NT. ')
ori_price = ""
except:
i += 1
if(i == 37):
p += 1
continue
i += 1
if(i == 37):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Laconic():
shop_id = 94
name = 'laconic'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
i = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://laconic.waca.ec/product/all"
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
except:
break
time.sleep(1)
while(True):
try:
title = chrome.find_element_by_xpath(
"//li[@class=' item_block js_is_photo_style '][%i]//h4" % (i,)).text
except:
close += 1
# print(i, "title")
break
try:
page_link = chrome.find_element_by_xpath(
"//li[@class=' item_block js_is_photo_style '][%i]/a[@href]" % (i,)).get_attribute('href')
make_id = parse.urlsplit(page_link)
page_id = make_id.path
page_id = page_id.lstrip("/product/detail/")
find_href = chrome.find_element_by_xpath(
"//li[@class=' item_block js_is_photo_style '][%i]//a/span" % (i,))
bg_url = find_href.value_of_css_property('background-image')
pic_link = bg_url.replace('url("', '')
pic_link = pic_link.replace('")', '')
sale_price = chrome.find_element_by_xpath(
"//li[@class=' item_block js_is_photo_style '][%i]//li/span" % (i,)).text
sale_price = sale_price.strip('$')
ori_price = ""
except:
i += 1
if(i % 10 == 1):
chrome.find_element_by_tag_name('body').send_keys(Keys.END)
time.sleep(1)
continue
i += 1
if(i % 10 == 1):
chrome.find_element_by_tag_name('body').send_keys(Keys.END)
time.sleep(1)
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Pixelcake():
shop_id = 96
name = 'pixelcake'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll = pd.DataFrame() # 存放所有資料
close = 0
while True:
if (close == 1):
chrome.quit()
break
url = "https://www.pixelcake.com.tw/zh-tw/category/ALL?P=" + str(p)
# 如果頁面超過(找不到),直接印出completed然後break跳出迴圈
try:
chrome.get(url)
except:
break
try:
chrome.find_element_by_xpath(
"//button[@class='aiq-2-w6Qa']").click()
chrome.find_element_by_xpath(
"//i[@class='icon-popup-close']").click()
except:
pass
time.sleep(1)
i = 1
while(i < 17):
try:
title = chrome.find_element_by_xpath(
"//div[@id='category-item-wrap']//div[%i]/div[2]/a" % (i,)).text
except:
close += 1
break
try:
page_link = chrome.find_element_by_xpath(
"//div[@id='category-item-wrap']/div[1]/div[%i]/div[1]/a[@href]" % (i,)).get_attribute('href')
page_id = chrome.find_element_by_xpath(
"//div[@id='category-item-wrap']/div[1]/div[%i]/div[2]//div[@class='like-counter ']" % (i,)).get_attribute('data-custommarketid')
pic_link = chrome.find_element_by_xpath(
"//div[%i]/div[1]/a/picture/img" % (i,)).get_attribute('src')
except:
i += 1
if(i == 17):
p += 1
continue
try:
sale_price = chrome.find_element_by_xpath(
"//div[@id='category-item-wrap']//div[%i]/div[2]/div[1]/span[1]" % (i,)).text
sale_price = sale_price.strip('NT$')
ori_price = chrome.find_element_by_xpath(
"//div[%i]/div[5]/div[2]/div[1]/span[2]" % (i,)).text
ori_price = ori_price.strip('NT$')
except:
try:
sale_price = chrome.find_element_by_xpath(
"//div[@id='category-item-wrap']//div[%i]/div[2]/div[1]/span[1]" % (i,)).text
sale_price = sale_price.strip('NT$')
ori_price = ""
except:
i += 1
if(i == 17):
p += 1
continue
i += 1
if(i == 17):
p += 1
df = pd.DataFrame(
{
"title": [title],
"page_link": [page_link],
"page_id": [page_id],
"pic_link": [pic_link],
"ori_price": [ori_price],
"sale_price": [sale_price]
})
dfAll = pd.concat([dfAll, df])
dfAll = dfAll.reset_index(drop=True)
save(shop_id, name, dfAll)
upload(shop_id, name)
def Miyuki():
shop_id = 97
name = 'miyuki'
options = Options() # 啟動無頭模式
options.add_argument('--headless') # 規避google bug
options.add_argument('--disable-gpu')
options.add_argument('--ignore-certificate-errors')
options.add_argument('--no-sandbox')
options.add_argument('--disable-dev-shm-usage')
options.add_argument("--remote-debugging-port=5566")
chrome = webdriver.Chrome(
executable_path='./chromedriver', chrome_options=options)
p = 1
df = pd.DataFrame() # 暫存當頁資料,換頁時即整併到dfAll
dfAll =
|
pd.DataFrame()
|
pandas.DataFrame
|
import geopandas as gpd
import pandas as pd
from shapely.geometry import Polygon,Point
import math
import numpy as np
def rect_grids(bounds,accuracy = 500):
'''
Generate the rectangular grids in the bounds
Parameters
-------
bounds : List
Create the bounds, [lon1, lat1, lon2, lat2](WGS84), where lon1 , lat1 are the lower-left coordinates, lon2 , lat2 are the upper-right coordinates
accuracy : number
Grid size (meter)
Returns
-------
grid : GeoDataFrame
Grids’ GeoDataFrame, LONCOL and LATCOL are the index of grids, HBLON and HBLAT are the center of the grids
params : List
Gridding parameters (lonStart,latStart,deltaLon,deltaLat), lonStart and latStart are the lower-left coordinates, deltaLon, deltaLat are the length and width of a single grid
'''
lon1,lat1,lon2,lat2 = bounds
if (lon1>lon2)|(lat1>lat2)|(abs(lat1)>90)|(abs(lon1)>180)|(abs(lat2)>90)|(abs(lon2)>180):
raise Exception('Bounds error. The input bounds should be in the order of [lon1,lat1,lon2,lat2]. (lon1,lat1) is the lower left corner and (lon2,lat2) is the upper right corner.')
latStart = min(lat1, lat2);
lonStart = min(lon1, lon2);
deltaLon = accuracy * 360 / (2 * math.pi * 6371004 * math.cos((lat1 + lat2) * math.pi / 360));
deltaLat = accuracy * 360 / (2 * math.pi * 6371004);
data = gpd.GeoDataFrame()
LONCOL_list = []
LATCOL_list = []
geometry_list = []
HBLON_list = []
HBLAT_list = []
lonsnum = int((lon2-lon1)/deltaLon)+1
latsnum = int((lat2-lat1)/deltaLat)+1
for i in range(lonsnum):
for j in range(latsnum):
HBLON = i*deltaLon + lonStart
HBLAT = j*deltaLat + latStart
HBLON_1 = (i+1)*deltaLon + lonStart
HBLAT_1 = (j+1)*deltaLat + latStart
grid_ij = Polygon([
(HBLON-deltaLon/2,HBLAT-deltaLat/2),
(HBLON_1-deltaLon/2,HBLAT-deltaLat/2),
(HBLON_1-deltaLon/2,HBLAT_1-deltaLat/2),
(HBLON-deltaLon/2,HBLAT_1-deltaLat/2)])
LONCOL_list.append(i)
LATCOL_list.append(j)
HBLON_list.append(HBLON)
HBLAT_list.append(HBLAT)
geometry_list.append(grid_ij)
data['LONCOL'] = LONCOL_list
data['LATCOL'] = LATCOL_list
data['HBLON'] = HBLON_list
data['HBLAT'] = HBLAT_list
data['geometry'] = geometry_list
params = (lonStart,latStart,deltaLon,deltaLat)
return data,params
def grid_params(bounds,accuracy = 500):
'''
Generate gridding params
Parameters
-------
bounds : List
Bounds of the study area, [lon1, lat1, lon2, lat2](WGS84), where lon1 , lat1 are the lower-left coordinates, lon2 , lat2 are the upper-right coordinates
accuracy : number
Grid size (meter)
Returns
-------
params : List
Gridding parameters (lonStart,latStart,deltaLon,deltaLat), lonStart and latStart are the lower-left coordinates, deltaLon, deltaLat are the length and width of a single grid
Examples
-------
>>> import transbigdata as tbd
>>> bounds = [113.6,22.4,114.8,22.9]
>>> tbd.grid_params(bounds,accuracy = 500)
(113.6, 22.4, 0.004872390756896538, 0.004496605206422906)
'''
lon1,lat1,lon2,lat2 = bounds
if (lon1>lon2)|(lat1>lat2)|(abs(lat1)>90)|(abs(lon1)>180)|(abs(lat2)>90)|(abs(lon2)>180):
raise Exception('Bounds error. The input bounds should be in the order of [lon1,lat1,lon2,lat2]. (lon1,lat1) is the lower left corner and (lon2,lat2) is the upper right corner.')
latStart = min(lat1, lat2);
lonStart = min(lon1, lon2);
deltaLon = accuracy * 360 / (2 * math.pi * 6371004 * math.cos((lat1 + lat2) * math.pi / 360));
deltaLat = accuracy * 360 / (2 * math.pi * 6371004);
return (lonStart,latStart,deltaLon,deltaLat)
def GPS_to_grids(lon,lat,params):
'''
Match the GPS data to the grids. The input is the columns of longitude, latitude, and the grids parameter. The output is the grid ID.
Parameters
-------
lon : Series
The column of longitude
lat : Series
The column of latitude
params : List
Gridding parameters (lonStart,latStart,deltaLon,deltaLat), lonStart and latStart are the lower-left coordinates, deltaLon, deltaLat are the length and width of a single grid
Returns
-------
LONCOL : Series
The index of the grid longitude. The two columns LONCOL and LATCOL together can specify a grid.
LATCOL : Series
The index of the grid latitude. The two columns LONCOL and LATCOL together can specify a grid.
'''
(lonStart,latStart,deltaLon,deltaLat) = params
import numpy as np
loncol = np.floor(((lon - (lonStart - deltaLon / 2))/deltaLon)).astype('int')
latcol = np.floor(((lat - (latStart - deltaLat / 2))/deltaLat)).astype('int')
return loncol,latcol
def grids_centre(loncol,latcol,params):
'''
The center location of the grid. The input is the grid ID and parameters, the output is the grid center location.
Parameters
-------
LONCOL : Series
The index of the grid longitude. The two columns LONCOL and LATCOL together can specify a grid.
LATCOL : Series
The index of the grid latitude. The two columns LONCOL and LATCOL together can specify a grid.
params : List
Gridding parameters (lonStart,latStart,deltaLon,deltaLat), lonStart and latStart are the lower-left coordinates, deltaLon, deltaLat are the length and width of a single grid
Returns
-------
HBLON : Series
The longitude of the grid center
HBLAT : Series
The latitude of the grid center
'''
(lonStart,latStart,deltaLon,deltaLat) = params
hblon = loncol*deltaLon + lonStart
hblat = latcol*deltaLat + latStart
return hblon,hblat
def gridid_to_polygon(loncol,latcol,params):
'''
Generate the geometry column based on the grid ID. The input is the grid ID, the output is the geometry.
Parameters
-------
LONCOL : Series
The index of the grid longitude. The two columns LONCOL and LATCOL together can specify a grid.
LATCOL : Series
The index of the grid latitude. The two columns LONCOL and LATCOL together can specify a grid.
params : List
Gridding parameters (lonStart,latStart,deltaLon,deltaLat), lonStart and latStart are the lower-left coordinates, deltaLon, deltaLat are the length and width of a single grid
Returns
-------
geometry : Series
The column of grid geographic polygon
'''
(lonStart,latStart,deltaLon,deltaLat) = params
HBLON = loncol*deltaLon + lonStart
HBLAT = latcol*deltaLat + latStart
HBLON_1 = (loncol+1)*deltaLon + lonStart
HBLAT_1 = (latcol+1)*deltaLat + latStart
df = pd.DataFrame()
df['HBLON'] = HBLON
df['HBLAT'] = HBLAT
df['HBLON_1'] = HBLON_1
df['HBLAT_1'] = HBLAT_1
return df.apply(lambda r:Polygon([
(r['HBLON']-deltaLon/2,r['HBLAT']-deltaLat/2),
(r['HBLON_1']-deltaLon/2,r['HBLAT']-deltaLat/2),
(r['HBLON_1']-deltaLon/2,r['HBLAT_1']-deltaLat/2),
(r['HBLON']-deltaLon/2,r['HBLAT_1']-deltaLat/2)]),axis = 1)
def hexagon_grids(bounds,accuracy = 500):
'''
Generate hexagonal grids in the bounds
Parameters
-------
bounds : List
Create the bounds, [lon1, lat1, lon2, lat2](WGS84), where lon1 , lat1 are the lower-left coordinates, lon2 , lat2 are the upper-right coordinates
accuracy : number
Side length of hexagon (m)
Returns
-------
hexagon : GeoDataFrame
hexagon grid’s geographic polygon
'''
lon1,lat1,lon2,lat2 = bounds
if (lon1>lon2)|(lat1>lat2)|(abs(lat1)>90)|(abs(lon1)>180)|(abs(lat2)>90)|(abs(lon2)>180):
raise Exception('Bounds error. The input bounds should be in the order of [lon1,lat1,lon2,lat2]. (lon1,lat1) is the lower left corner and (lon2,lat2) is the upper right corner.')
latStart = min(lat1, lat2);
lonStart = min(lon1, lon2);
latEnd = max(lat1, lat2);
lonEnd = max(lon1, lon2);
origin = gpd.GeoDataFrame([Point(lonStart,latStart),Point(lonEnd,latEnd)],columns = ['geometry'])
origin.crs = {'init':'epsg:4326'}
origin = origin.to_crs(epsg = 3857)
x_o = origin['geometry'].iloc[0].x
y_o = origin['geometry'].iloc[0].y
x_d = origin['geometry'].iloc[1].x
y_d = origin['geometry'].iloc[1].y
lonsnum = (x_d-x_o)/accuracy
latsnum = (y_d-y_o)/accuracy
#1
xs = np.arange(0,lonsnum,3)
ys = np.arange(0,latsnum,2*(3/4)**0.5)
xs = pd.DataFrame(xs,columns = ['x'])
xs['tmp'] = 1
ys = pd.DataFrame(ys,columns = ['y'])
ys['tmp'] = 1
df1 = pd.merge(xs,ys)
#2
xs = np.arange(1.5,lonsnum,3)
ys = np.arange((3/4)**0.5,latsnum,2*(3/4)**0.5)
xs = pd.DataFrame(xs,columns = ['x'])
xs['tmp'] = 1
ys = pd.DataFrame(ys,columns = ['y'])
ys['tmp'] = 1
df2 = pd.merge(xs,ys)
df = pd.concat([df1,df2])
df['x'],df['y'] = x_o+df['x']*accuracy,y_o+df['y']*accuracy
def get_hexagon(x,y,accuracy):
return Polygon([(x-accuracy,y),
(x-accuracy/2,y+accuracy*(3/4)**0.5),
(x+accuracy/2,y+accuracy*(3/4)**0.5),
(x+accuracy,y),
(x+accuracy/2,y-accuracy*(3/4)**0.5),
(x-accuracy/2,y-accuracy*(3/4)**0.5),
(x-accuracy,y)
])
df['geometry'] = df.apply(lambda r:get_hexagon(r['x'],r['y'],accuracy),axis = 1)
df = gpd.GeoDataFrame(df)
df.crs = {'init':'epsg:3857'}
df = df.to_crs(epsg = 4326)
df = df[['geometry']]
df['ID'] = range(len(df))
return df
def gridid_sjoin_shape(data,shape,params,col = ['LONCOL','LATCOL']):
'''
Input the two columns of grid ID, the geographic polygon and gridding paramters. The output is the grid.
Parameters
-------
data : DataFrame
Data, with two columns of grid ID
shape : GeoDataFrame
Geographic polygon
params : List
Gridding parameters (lonStart,latStart,deltaLon,deltaLat), lonStart and latStart are the lower-left coordinates, deltaLon, deltaLat are the length and width of a single grid
col : List
Column names [LONCOL,LATCOL]
Returns
-------
data1 : DataFrame
Data gridding and mapping to the corresponding geographic polygon
'''
LONCOL,LATCOL = col
data1 = data.copy()
data1 = gpd.GeoDataFrame(data1)
data1['geometry'] = gridid_to_polygon(data1[LONCOL],data1[LATCOL],params)
data1 = gpd.sjoin(data1,shape)
return data1
def grid_params_gini(data,col = ['lon','lat'],accuracy = 500,gini = 'max',gap = 10,sample = 10000):
'''
Obtain the best griding param
Parameters
----------
data : DataFrame
data
col : List
Column names [lon,lat]
accuracy : number
Size of the grids
gini : number
min,max,or median
gap : number
the step of the algorithm
sample : number
sample rate
Returns
----------
params : List
calculated params
'''
trajdata = data.copy()
if len(trajdata)>sample:
trajdata = trajdata.sample(sample)
lon,lat = col
lon1 = trajdata[lon].mean()
lat1 = trajdata[lat].mean()
bounds = [lon1,lat1,lon1,lat1]
params = grid_params(bounds,accuracy = accuracy)
lonstart,latstart,deltalon,deltalat = params
x = np.linspace(lonstart,lonstart+deltalon,gap)
y = np.linspace(latstart,latstart+deltalat,gap)
xx,yy = np.meshgrid(x,y)
tmp1 = pd.DataFrame()
xx=xx.reshape(1,-1)
yy=yy.reshape(1,-1)
tmp1['lon'] = xx[0]
tmp1['lat'] = yy[0]
r = tmp1.iloc[0]
def GiniIndex(p):
cum = np.cumsum(sorted(np.append(p, 0)))
sum = cum[-1]
x = np.array(range(len(cum))) / len(p)
y = cum / sum
B = np.trapz(y, x=x)
A = 0.5 - B
G = A / (A + B)
return G
def getgini(r):
lon1,lat1 = r['lon'], r['lat']
params_tmp=[lon1,lat1,deltalon,deltalat]
tmp =
|
pd.DataFrame()
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
"""
Created on Fri Oct 26 17:00:35 2018
@author: a002028
"""
import os
import pandas as pd
from ctdpy.core.data_handlers import DataFrameHandler
from ctdpy.core.data_handlers import SeriesHandler
from ctdpy.core.writers.txt_writer import TxtWriter
from ctdpy.core import utils
class StandardCTDWriter(SeriesHandler, DataFrameHandler):
"""Writer to write CTD data to the standrad SMHI CTD format."""
def __init__(self, settings):
"""Initialize and store the settings object."""
super().__init__(settings)
self._file_name = None
self.collection_folder = True
# self.setup_standard_format()
self.writer = self._get_writer_settings()
self.txt_writer = TxtWriter()
self._sensorinfo_boolean = False
self.std_format = False
self.meta_translation = {
'Förklaring': 'delivery_note',
'Metadata': 'metadata',
'Sensorinfo': 'sensorinfo',
'Information': 'information',
}
def write(self, datasets, keep_original_file_names=False,
collection_folder=True, **kwargs):
"""Conduct the writing process.
Call methods in order to strucure data according to standard and then writes to standard output format
Args:
datasets: All loaded datasets [pd.DataFrame(s), pd.Series]
keep_original_file_names (bool): False or True
collection_folder (bool):
"""
self.collection_folder = collection_folder
self._check_dataset_format(datasets)
if self.std_format:
self._redirect_to_data_update(datasets)
else:
meta = self.get_metadata_sets(datasets) # dict
data = self.get_datasets(datasets) # list
self.setup_metadata_information(meta)
for dataset in data:
for fid, item in dataset.items():
self.sensorinfo_boolean = item['metadata'].get('INSTRUMENT_SERIE') # @sensorinfo_boolean.setter
instrument_metadata = self._get_instrument_metadata(item.get('raw_format'),
separator=self.writer['separator_metadata'],
data_identifier=item.get('identifier_data'))
metadata = self.extract_metadata(fid, separator=self.writer['separator_metadata'])
metadata_df = self.extract_metadata_dataframe(fid)
self.reset_index(metadata_df)
self._update_visit_info(metadata_df)
data_df = self._get_data_columns(item['data'], metadata_df)
data_series = self._get_data_serie(data_df, separator=self.writer['separator_data'])
data_series = self._append_information(self.template_format,
self.delimiters['meta'],
self.delimiters['data'],
metadata,
self.sensorinfo[item['metadata'].get('INSTRUMENT_SERIE')],
self.information,
instrument_metadata,
pd.Series(item['metadata'].get('COMNTS')),
data_series)
self._write(fid, data_series, keep_original_file_names=keep_original_file_names)
self._write_delivery_note()
self._write_metadata()
self._write_sensorinfo()
self._write_information()
def _write_delivery_note(self):
"""Write delivery information to text file."""
serie = pd.Series(self.writer['standard_delivery_note_header'])
info = []
for key in serie:
info.append(self.delivery_note.get(self.writer['mapper_delivery_note'].get(key), ''))
info = pd.Series(info)
serie = serie.str.cat(
info,
join=None,
sep=self.writer['separator_delivery_note'],
)
self._write('delivery_note', serie)
def _write_metadata(self):
"""Write metadata information to text file."""
save_path = self._get_save_path('metadata')
self.txt_writer.write_with_pandas(data=self.df_metadata,
header=True,
save_path=save_path)
def _write_sensorinfo(self):
"""Write sensor information to text file."""
save_path = self._get_save_path('sensorinfo')
self.txt_writer.write_with_pandas(data=self.df_sensorinfo,
header=True, save_path=save_path)
def _write_information(self):
"""Write "other" information to text file."""
exclude_str = self.writer['prefix_info'] + self.writer['separator_metadata']
self._write('information', self.information.str.replace(exclude_str, ''))
def _write(self, fid, data_series, **kwargs):
"""Write CTD-cast text file according to standard format to file."""
save_path = self._get_save_path(fid, **kwargs)
self.txt_writer.write_with_numpy(data=data_series, save_path=save_path)
def _add_new_information_to_metadata(self):
"""Add information to metadata (originally loaded from excel-template (delivery))"""
prefix = self.writer.get('filename')
suffix = self.writer.get('extension_filename')
new_column = self.df_metadata[['SDATE', 'SHIPC', 'SERNO']].apply(
lambda x: prefix + '_'.join(x) + suffix,
axis=1,
)
new_column = new_column.str.replace('-', '')
if 'FILE_NAME_DATABASE' in self.df_metadata:
self.df_metadata['FILE_NAME_DATABASE'] = new_column
else:
self.df_metadata.insert(self.df_metadata.columns.get_loc('FILE_NAME') + 1,
'FILE_NAME_DATABASE', new_column)
def _adjust_data_formats(self):
"""Adjust foramts."""
self.file_name = self.df_metadata['FILE_NAME'] # property.setter, returns upper case filenames
self.df_metadata['SDATE'] = self.df_metadata['SDATE'].str.replace(' 00:00:00', '')
self.df_metadata['STIME'] = self.df_metadata['STIME'].apply(lambda x: x[:5])
self.df_metadata['SERNO'] = self.df_metadata['SERNO'].apply(lambda x: x.zfill(4))
def _append_information(self, *args):
"""Append and return information as pd.Serie."""
out_serie = pd.Series([])
out_serie = out_serie.append([serie for serie in args if serie.any()])
return out_serie
def setup_metadata_information(self, meta):
"""Set up standard metadata formats that later is writable.
Args:
meta: Contains pd.DataFrame(s) of metadata delivered to datahost
in excel spreadsheets [Metadata, Sensorinfo, Information]
"""
# TODO should be able to handle multiple metadatasets? (.xlsx delivery files)
self.delimiters = self._get_delimiters()
self.df_metadata = self._get_reduced_dataframe(meta.get('metadata'))
self.delivery_note = self._get_delivery_note(meta.get('delivery_note'))
self.template_format = self._get_template_format()
self.df_sensorinfo = self._get_reduced_dataframe(meta.get('sensorinfo'))
self.sensorinfo = self._get_sensorinfo_serie(separator=self.writer['separator_metadata'])
self.information = self._get_information_serie(meta.get('information', pd.DataFrame(columns=[''])),
separator=self.writer['separator_metadata'])
self._adjust_data_formats()
self._add_new_information_to_metadata()
def _get_template_format(self):
"""Get standard format of template output."""
# TODO adjust reader, get format from tamplate ('Förklaring').. ö..
return pd.Series([''.join([self.writer['prefix_format'],
'=', self.delivery_note['DTYPE']])])
def _get_delimiters(self):
"""Return delimiters of data and meta."""
# FIXME Redo and delete hardcoded parts.. Get better solution than '\ t'.replace(' ','')
# for tabb-sign self.writer['separator_data']])}
return {'meta': pd.Series([''.join([self.writer['prefix_metadata_delimiter'],
'=', self.writer['separator_metadata']])]),
'data': pd.Series([''.join([self.writer['prefix_data_delimiter'],
'=', '\ t'.replace(' ', '')])])} # noqa: W605
def _get_delivery_note(self, delivery_info):
"""Return dictionary with information taken from the delivery_note."""
info = {}
if delivery_info.columns[0] == 0:
if delivery_info[0][0].startswith('MYEAR'):
for key, value in delivery_info[0].str.split(':'):
info.setdefault(key.strip(), value.strip())
else:
xy_index = self._get_index(delivery_info, check_values=['MYEAR', 'DTYPE'])
for key, value in zip(delivery_info.iloc[xy_index[0][0]:, xy_index[1][0]],
delivery_info.iloc[xy_index[0][0]:, xy_index[1][0] + 2]):
if not pd.isnull(key):
info[key] = value
else:
break
# Check for info in self.df_metadata
for key in ['MYEAR', 'ORDERER', 'PROJ']:
if key not in info:
info[key] = ','.join(self.df_metadata[key].unique())
return info
@staticmethod
def _get_index(df, check_values=None):
"""Return numpy.array containing indices.
Args:
df (pd.DataFrame): data
check_values (list): Checklist in order of importance
Returns:
(array([15], dtype=int64), array([0], dtype=int64))
"""
for value in check_values:
if any(df == value):
return utils.get_index_where_df_equals_x(df, value)
return None
def extract_metadata(self, filename, separator=None):
"""Return pandas.Serie with metadata according to template standards.
Takes columns from dataframe and merges with dataframe values together with a seperator.
Example of one pd.Serie value: '//METADATA;SDATE;2020-08-25'
Args:
filename (str): filename of raw data
separator (str): separator to separate row values
"""
meta = self.extract_metadata_dataframe(filename)
self.reset_index(meta)
meta = meta.iloc[0].to_list()
serie = pd.Series(self.df_metadata.columns)
serie = serie.str.cat(meta, sep=separator)
serie = serie.radd(self.writer['prefix_metadata'] + separator)
return serie
def extract_metadata_dataframe(self, filename):
"""Return one row from self.df_metadata (pd.DataFrame)."""
boolean = self.get_index(self.file_name, filename.upper(),
equals=True, as_boolean=True)
return self.df_metadata.loc[boolean, :]
@staticmethod
def _get_reduced_dataframe(df):
"""Exclude empty column "Tabellhuvud:"."""
if 'Tabellhuvud:' in df:
df.pop('Tabellhuvud:')
return df
def _get_sensorinfo_serie(self, separator=None):
"""Get dictionary with sensorinfo.
Args:
separator (str): separator to separate row values
"""
out_info = {}
for inst_serno in self.df_sensorinfo['INSTRUMENT_SERIE'].unique():
out_info[inst_serno] = [
|
pd.Series(self.df_sensorinfo.columns)
|
pandas.Series
|
# Licensed to Elasticsearch B.V. under one or more contributor
# license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright
# ownership. Elasticsearch B.V. licenses this file to you under
# the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import re
import warnings
from enum import Enum
from typing import Any, Callable, Dict, List, Optional, Tuple, Union, cast
import numpy as np # type: ignore
import pandas as pd # type: ignore
from elasticsearch import Elasticsearch
# Default number of rows displayed (different to pandas where ALL could be displayed)
DEFAULT_NUM_ROWS_DISPLAYED = 60
DEFAULT_CHUNK_SIZE = 10000
DEFAULT_CSV_BATCH_OUTPUT_SIZE = 10000
DEFAULT_PROGRESS_REPORTING_NUM_ROWS = 10000
DEFAULT_ES_MAX_RESULT_WINDOW = 10000 # index.max_result_window
DEFAULT_PAGINATION_SIZE = 5000 # for composite aggregations
PANDAS_VERSION: Tuple[int, ...] = tuple(
int(part) for part in pd.__version__.split(".") if part.isdigit()
)[:2]
with warnings.catch_warnings():
warnings.simplefilter("ignore")
EMPTY_SERIES_DTYPE = pd.Series().dtype
def build_pd_series(
data: Dict[str, Any], dtype: Optional[np.dtype] = None, **kwargs: Any
) -> pd.Series:
"""Builds a pd.Series while squelching the warning
for unspecified dtype on empty series
"""
dtype = dtype or (EMPTY_SERIES_DTYPE if not data else dtype)
if dtype is not None:
kwargs["dtype"] = dtype
return pd.Series(data, **kwargs)
def docstring_parameter(*sub: Any) -> Callable[[Any], Any]:
def dec(obj: Any) -> Any:
obj.__doc__ = obj.__doc__.format(*sub)
return obj
return dec
class SortOrder(Enum):
ASC = 0
DESC = 1
@staticmethod
def reverse(order: "SortOrder") -> "SortOrder":
if order == SortOrder.ASC:
return SortOrder.DESC
return SortOrder.ASC
@staticmethod
def to_string(order: "SortOrder") -> str:
if order == SortOrder.ASC:
return "asc"
return "desc"
@staticmethod
def from_string(order: str) -> "SortOrder":
if order == "asc":
return SortOrder.ASC
return SortOrder.DESC
def elasticsearch_date_to_pandas_date(
value: Union[int, str], date_format: Optional[str]
) -> pd.Timestamp:
"""
Given a specific Elasticsearch format for a date datatype, returns the
'partial' `to_datetime` function to parse a given value in that format
**Date Formats: https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping-date-format.html#built-in-date-formats
Parameters
----------
value: Union[int, str]
The date value.
date_format: str
The Elasticsearch date format (ex. 'epoch_millis', 'epoch_second', etc.)
Returns
-------
datetime: pd.Timestamp
From https://www.elastic.co/guide/en/elasticsearch/reference/current/date.html
Date formats can be customised, but if no format is specified then it uses the default:
"strict_date_optional_time||epoch_millis"
Therefore if no format is specified we assume either strict_date_optional_time
or epoch_millis.
"""
if date_format is None or isinstance(value, (int, float)):
try:
return pd.to_datetime(
value, unit="s" if date_format == "epoch_second" else "ms"
)
except ValueError:
return pd.to_datetime(value)
elif date_format == "epoch_millis":
return pd.to_datetime(value, unit="ms")
elif date_format == "epoch_second":
return pd.to_datetime(value, unit="s")
elif date_format == "strict_date_optional_time":
return pd.to_datetime(value, format="%Y-%m-%dT%H:%M:%S.%f%z", exact=False)
elif date_format == "basic_date":
return pd.to_datetime(value, format="%Y%m%d")
elif date_format == "basic_date_time":
return pd.to_datetime(value, format="%Y%m%dT%H%M%S.%f", exact=False)
elif date_format == "basic_date_time_no_millis":
return pd.to_datetime(value, format="%Y%m%dT%H%M%S%z")
elif date_format == "basic_ordinal_date":
return pd.to_datetime(value, format="%Y%j")
elif date_format == "basic_ordinal_date_time":
return pd.to_datetime(value, format="%Y%jT%H%M%S.%f%z", exact=False)
elif date_format == "basic_ordinal_date_time_no_millis":
return pd.to_datetime(value, format="%Y%jT%H%M%S%z")
elif date_format == "basic_time":
return pd.to_datetime(value, format="%H%M%S.%f%z", exact=False)
elif date_format == "basic_time_no_millis":
return pd.to_datetime(value, format="%H%M%S%z")
elif date_format == "basic_t_time":
return pd.to_datetime(value, format="T%H%M%S.%f%z", exact=False)
elif date_format == "basic_t_time_no_millis":
return pd.to_datetime(value, format="T%H%M%S%z")
elif date_format == "basic_week_date":
return pd.to_datetime(value, format="%GW%V%u")
elif date_format == "basic_week_date_time":
return pd.to_datetime(value, format="%GW%V%uT%H%M%S.%f%z", exact=False)
elif date_format == "basic_week_date_time_no_millis":
return pd.to_datetime(value, format="%GW%V%uT%H%M%S%z")
elif date_format == "strict_date":
return pd.to_datetime(value, format="%Y-%m-%d")
elif date_format == "date":
return pd.to_datetime(value, format="%Y-%m-%d")
elif date_format == "strict_date_hour":
return pd.to_datetime(value, format="%Y-%m-%dT%H")
elif date_format == "date_hour":
return pd.to_datetime(value, format="%Y-%m-%dT%H")
elif date_format == "strict_date_hour_minute":
return pd.to_datetime(value, format="%Y-%m-%dT%H:%M")
elif date_format == "date_hour_minute":
return pd.to_datetime(value, format="%Y-%m-%dT%H:%M")
elif date_format == "strict_date_hour_minute_second":
return pd.to_datetime(value, format="%Y-%m-%dT%H:%M:%S")
elif date_format == "date_hour_minute_second":
return pd.to_datetime(value, format="%Y-%m-%dT%H:%M:%S")
elif date_format == "strict_date_hour_minute_second_fraction":
return pd.to_datetime(value, format="%Y-%m-%dT%H:%M:%S.%f", exact=False)
elif date_format == "date_hour_minute_second_fraction":
return pd.to_datetime(value, format="%Y-%m-%dT%H:%M:%S.%f", exact=False)
elif date_format == "strict_date_hour_minute_second_millis":
return pd.to_datetime(value, format="%Y-%m-%dT%H:%M:%S.%f", exact=False)
elif date_format == "date_hour_minute_second_millis":
return pd.to_datetime(value, format="%Y-%m-%dT%H:%M:%S.%f", exact=False)
elif date_format == "strict_date_time":
return pd.to_datetime(value, format="%Y-%m-%dT%H:%M:%S.%f%z", exact=False)
elif date_format == "date_time":
return pd.to_datetime(value, format="%Y-%m-%dT%H:%M:%S.%f%z", exact=False)
elif date_format == "strict_date_time_no_millis":
return pd.to_datetime(value, format="%Y-%m-%dT%H:%M:%S%z")
elif date_format == "date_time_no_millis":
return pd.to_datetime(value, format="%Y-%m-%dT%H:%M:%S%z")
elif date_format == "strict_hour":
return pd.to_datetime(value, format="%H")
elif date_format == "hour":
return pd.to_datetime(value, format="%H")
elif date_format == "strict_hour_minute":
return pd.to_datetime(value, format="%H:%M")
elif date_format == "hour_minute":
return pd.to_datetime(value, format="%H:%M")
elif date_format == "strict_hour_minute_second":
return pd.to_datetime(value, format="%H:%M:%S")
elif date_format == "hour_minute_second":
return pd.to_datetime(value, format="%H:%M:%S")
elif date_format == "strict_hour_minute_second_fraction":
return pd.to_datetime(value, format="%H:%M:%S.%f", exact=False)
elif date_format == "hour_minute_second_fraction":
return pd.to_datetime(value, format="%H:%M:%S.%f", exact=False)
elif date_format == "strict_hour_minute_second_millis":
return pd.to_datetime(value, format="%H:%M:%S.%f", exact=False)
elif date_format == "hour_minute_second_millis":
return pd.to_datetime(value, format="%H:%M:%S.%f", exact=False)
elif date_format == "strict_ordinal_date":
return pd.to_datetime(value, format="%Y-%j")
elif date_format == "ordinal_date":
return pd.to_datetime(value, format="%Y-%j")
elif date_format == "strict_ordinal_date_time":
return pd.to_datetime(value, format="%Y-%jT%H:%M:%S.%f%z", exact=False)
elif date_format == "ordinal_date_time":
return pd.to_datetime(value, format="%Y-%jT%H:%M:%S.%f%z", exact=False)
elif date_format == "strict_ordinal_date_time_no_millis":
return pd.to_datetime(value, format="%Y-%jT%H:%M:%S%z")
elif date_format == "ordinal_date_time_no_millis":
return pd.to_datetime(value, format="%Y-%jT%H:%M:%S%z")
elif date_format == "strict_time":
return pd.to_datetime(value, format="%H:%M:%S.%f%z", exact=False)
elif date_format == "time":
return pd.to_datetime(value, format="%H:%M:%S.%f%z", exact=False)
elif date_format == "strict_time_no_millis":
return pd.to_datetime(value, format="%H:%M:%S%z")
elif date_format == "time_no_millis":
return pd.to_datetime(value, format="%H:%M:%S%z")
elif date_format == "strict_t_time":
return pd.to_datetime(value, format="T%H:%M:%S.%f%z", exact=False)
elif date_format == "t_time":
return pd.to_datetime(value, format="T%H:%M:%S.%f%z", exact=False)
elif date_format == "strict_t_time_no_millis":
return pd.to_datetime(value, format="T%H:%M:%S%z")
elif date_format == "t_time_no_millis":
return pd.to_datetime(value, format="T%H:%M:%S%z")
elif date_format == "strict_week_date":
return pd.to_datetime(value, format="%G-W%V-%u")
elif date_format == "week_date":
return pd.to_datetime(value, format="%G-W%V-%u")
elif date_format == "strict_week_date_time":
return pd.to_datetime(value, format="%G-W%V-%uT%H:%M:%S.%f%z", exact=False)
elif date_format == "week_date_time":
return pd.to_datetime(value, format="%G-W%V-%uT%H:%M:%S.%f%z", exact=False)
elif date_format == "strict_week_date_time_no_millis":
return pd.to_datetime(value, format="%G-W%V-%uT%H:%M:%S%z")
elif date_format == "week_date_time_no_millis":
return pd.to_datetime(value, format="%G-W%V-%uT%H:%M:%S%z")
elif date_format == "strict_weekyear" or date_format == "weekyear":
# TODO investigate if there is a way of converting this
raise NotImplementedError(
"strict_weekyear is not implemented due to support in pandas"
)
return pd.to_datetime(value, format="%G")
# Not supported in pandas
# ValueError: ISO year directive '%G' must be used with the ISO week directive '%V'
# and a weekday directive '%A', '%a', '%w', or '%u'.
elif date_format == "strict_weekyear_week" or date_format == "weekyear_week":
# TODO investigate if there is a way of converting this
raise NotImplementedError(
"strict_weekyear_week is not implemented due to support in pandas"
)
return pd.to_datetime(value, format="%G-W%V")
# Not supported in pandas
# ValueError: ISO year directive '%G' must be used with the ISO week directive '%V'
# and a weekday directive '%A', '%a', '%w', or '%u'.
elif date_format == "strict_weekyear_week_day":
return pd.to_datetime(value, format="%G-W%V-%u")
elif date_format == "weekyear_week_day":
return pd.to_datetime(value, format="%G-W%V-%u")
elif date_format == "strict_year":
return pd.to_datetime(value, format="%Y")
elif date_format == "year":
return pd.to_datetime(value, format="%Y")
elif date_format == "strict_year_month":
return pd.to_datetime(value, format="%Y-%m")
elif date_format == "year_month":
return
|
pd.to_datetime(value, format="%Y-%m")
|
pandas.to_datetime
|
import numpy as np
import pandas as pd
from pandas import read_csv
import matplotlib.pyplot as plt
#%matplotlib inline
#%%
attrib = read_csv('attributes.csv', delim_whitespace = True)
data = read_csv('communities.data', names = attrib['attributes'])
print(data.shape)
#%%
data.head()
#%%
'''
Remove non-predictive features
state: US state (by number) - not counted as predictive above, but if considered, should be considered nominal (nominal)
county: numeric code for county - not predictive, and many missing values (numeric)
community: numeric code for community - not predictive and many missing values (numeric)
communityname: community name - not predictive - for information only (string)
fold: fold number for non-random 10 fold cross validation, potentially useful for debugging, paired tests - not predictive (numeric)
'''
data = data.drop(columns=['state','county',
'community','communityname',
'fold'], axis=1)
#%%
data.head()
#%%
'''
Remove column with NA
Some of the features contained many missing values as some surveys were not conducted in some communities,
so they were removed from the data:
'OtherPerCap', 'LemasSwornFT', 'LemasSwFTPerPop', 'LemasSwFTFieldOps',
'LemasSwFTFieldPerPop', 'LemasTotalReq', 'LemasTotReqPerPop', 'PolicReqPerOffic', 'PolicPerPop',
'RacialMatchCommPol', 'PctPolicWhite', 'PctPolicBlack', 'PctPolicHisp', 'PctPolicAsian', 'PctPolicMinor',
'OfficAssgnDrugUnits', 'NumKindsDrugsSeiz', 'PolicAveOTWorked', 'PolicCars', 'PolicOperBudg', 'LemasPctPolicOnPatr',
'LemasGangUnitDeploy', 'PolicBudgPerPop'
'''
from pandas import DataFrame
data = data.replace('?', np.nan)
feat_miss = data.columns[data.isnull().any()]
print(feat_miss)
data = data.drop(columns=list(feat_miss), axis=1)
#%%
print(data.shape)
data.head()
#%%
data.describe()
#%%
# ViolentCrimesPerPop: total number of violent crimes per 100K popuation (numeric - decimal)
# GOAL attribute (to be predicted)
data.hist(column = ['ViolentCrimesPerPop'], bins = 30, color = 'red', alpha = 0.8)
plt.show()
#%%
# TODO Correlations
import seaborn as sns
corrmat = data.corr()
fig = plt.figure(figsize = (16, 12))
sns.heatmap(corrmat, vmax = 0.8)
plt.show()
#%%
corrT = data.corr(method = 'pearson').round(4)
corrT = corrT.sort_values(by=['ViolentCrimesPerPop'])
corrT_VCPP = corrT['ViolentCrimesPerPop']
#%%
'''
Remove Multicollinearity
set VIF = 5, R^2 = 0.8 to remove attributes
'''
'''Dimensionality Reduction - Principal Component Analysis (PCA)
The dataset contain many variables highly
correlated. Multicolinearity will increase the model variance. Dimensionality reduction utilizing PCA can provide an
optimal set of orthogonal features. Let's adopt the criterion in which we select those principal components
responsible to explain more than a unit variance ("eigenvalue one criterion"). '''
X_DF = data.iloc[:, 0:99]
# data.to_csv("data_removed.csv")
# Detecting Multicollinearity using VIF
from statsmodels.stats.outliers_influence import variance_inflation_factor
def calc_vif(X_DF):
# X_DF = pd.DataFrame(X)
# Calculating VIF
vif = pd.DataFrame()
vif["variables"] = X_DF.columns
vif["VIF"] = [variance_inflation_factor(X_DF.values, i) for i in range(X_DF.shape[1])]
return(vif)
VIF = calc_vif(X_DF)
#%%
data_to_dump = VIF.where(VIF['VIF'] > 30)
data_to_dump = data_to_dump.dropna(how='any')
columns_to_dump = list(data_to_dump.iloc[:, 0])
X_DF = data.drop(columns=columns_to_dump, axis=1)
#%%
# VIF_2 = calc_vif(X_DF)
'''
Now we have two racePct*** remain, consider corrT_VCPP['racePctAsian'] = 0.0376, corrT_VCPP['racePctHisp'] = 0.2931,
which means racePctAsian is not very related to ViolentCrimesPerPop, so to simplify
the model, we only keep racePctWhite as our sensitive variable.
'''
X_DF = X_DF.drop(columns=['racePctAsian', 'racePctHisp'], axis=1)
print("Removed columns(", len(columns_to_dump) + 2, "):\n", (columns_to_dump + ['racePctAsian', 'racePctHisp']))
#%%
from sklearn.model_selection import train_test_split
X = X_DF.values
y = data.iloc[:, 99].values
seed = 0
# Train test split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.3, random_state = seed)
print(X.shape)
print(y.shape)
#%%
from sklearn.preprocessing import StandardScaler
# Standardize features by removing the mean and scaling to unit variance
sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_test = sc.transform(X_test)
#%%
# Perform PCA
# from sklearn.decomposition import PCA
#
# c = 14
# pca = PCA(n_components = c)
# X_train = pca.fit_transform(X_train)
# X_test = pca.transform(X_test)
#
# print("Amount of variance: %s" % pca.explained_variance_)
# print("Sum of the variance: %s" % sum(pca.explained_variance_).round(2))
#
# print("Percentage of variance: %s" % pca.explained_variance_ratio_)
# print("Sum of the percentage of variance: %s" % sum(pca.explained_variance_ratio_).round(2))
#
#
# plt.scatter(np.arange(1,(c+1)),pca.explained_variance_, c = 'red')
# plt.plot((0,15), (1,1), color = 'black', linestyle = 'dashed')
# plt.xlabel('PC')
# plt.ylabel('Amount of variance explained')
# plt.show()
# print(X_train.shape)
#%%
pd.DataFrame(X_train).to_csv('X_train.csv')
pd.DataFrame(X_test).to_csv('X_test.csv')
pd.DataFrame(y_train).to_csv('y_train.csv')
|
pd.DataFrame(y_test)
|
pandas.DataFrame
|
import numpy as np
import pandas as pd
from sklearn.decomposition import PCA
from scipy import stats as st
def uniques(data, max_length=0, max_shown=10, ascending=False):
"""
Ranks the unique values in a dataframe's columns
Args:
dataframe: Pandas dataframe/series to be analysed
max_length: maximum display length of unique values (0 = no limit)
max_shown: maximum number of unique values shown (0 = no limit)
ascending: show values from least to most common, or vice versa
Returns:
A table that decribes for each column in the dataframe:
* The total number of values
* The type of the values
* The number of unique values, exluding NaN entries
* The unique non-NaN values, ordered by count
* The number of NaN entries (if any) and their count
"""
dataframe = pd.DataFrame(data)
cols = list(dataframe)
min_shown = 0
# Determine the maximum number of unique values that will be shown
# Then create the dataframe
for col_name in cols:
min_shown = np.maximum(
min_shown, len(dataframe[col_name].value_counts(dropna=True)))
if max_shown is 0 or max_shown is False:
max_shown = min_shown
else:
max_shown = np.minimum(min_shown, max_shown)
idx_arr = list(range(1, max_shown + 1))
row_index = ['type', 'count', 'unique', 'NaN'] + idx_arr
df = pd.DataFrame(index=row_index, columns=cols)
# Fill the dataframe
for col_name in cols:
col = dataframe[col_name]
count = col.value_counts(dropna=True)
vals = count.index.tolist()
if ascending:
count = count[::-1]
vals = vals[::-1]
nans = col.isnull().sum()
length = len(col)
number_values_shown = np.minimum(max_shown, len(vals))
df.at['type', col_name] = col.dtype
df.at['count', col_name] = len(col)
df.at['unique', col_name] = len(vals)
for i in list(range(number_values_shown)):
val = str(vals[i])
val_count = count.iloc[i] / length
if max_length > 0 and len(val) > max_length:
val = val[:max_length] + u'\u2026'
df.at[i + 1, col_name] = ('{}<br>{}<br>{:.1%}'.format(
val, count.iloc[i], val_count))
if nans > 0:
df.at['NaN', col_name] = ('{}<br>{:.1%}'.format(
nans, nans / length))
else:
df.at['NaN', col_name] = ""
return df.fillna('').style.set_properties(
**{'word-wrap': 'break-word', 'line-height': '125%'})
def outliers(sr, threshold=5, robust=False, verbose=True):
"""
Finds the outliers in a Pandas series and returns a list with their indices.
Args:
sr: Pandas series
threshold: maximum deviation to be tolerated
robust: use median absolute deviation instead of standard deviation
verbose: print the number of columns dropped
Returns:
A list with the indices of the outliers
"""
method = 'MAD/MeanAd' if robust else 'SD'
x = sr
try:
if robust:
x = (x - x.median()).abs() # Absolute distances from the mean
mad = x.median() # Median distance from the mean
if mad == 0:
x /= 1.486 * x.mean()
else:
x /= 1.253314 * mad
else:
x = (x - x.mean()).abs() / x.std()
except:
print('Found no outliers in {}'.format(sr))
mask = x > threshold
if verbose: print('Found {:,} outliers ({} > {}) in {}'.format(
len(x[mask]), method, threshold, sr.name))
return sr[mask].index
def polytrans(x, y, order=2):
"""
Fits values in Pandas series x to the values in series y using an
nth-order polynomial.
Args:
x: Pandas series to be fitted
y: Pandas series against which x will be fitted
order: the maximum order polynomial used in the fitting
Returns:
The transformed Pandas series x
"""
def transform_x(x, coef):
result = .0
for c in range(len(coef)):
result += coef[c] * x**(len(coef) - c - 1)
return result
data =
|
pd.concat([x, y], axis=1)
|
pandas.concat
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.feature_selection import RFECV
from sklearn.model_selection import StratifiedKFold
from sklearn.svm import SVC
col_names = [
"duration", "protocol_type", "service", "flag", "src_bytes", "dst_bytes", "land",
"wrong_fragment", "urgent", "hot", "num_failed_logins", "logged_in", "num_compromised",
"root_shell", "su_attempted", "num_root", "num_file_creations", "num_shells", "num_access_files",
"num_outbound_cmds", "is_host_login", "is_guest_login", "count", "srv_count", "serror_rate",
"srv_serror_rate", "rerror_rate", "srv_rerror_rate", "same_srv_rate", "diff_srv_rate",
"srv_diff_host_rate", "dst_host_count", "dst_host_srv_count", "dst_host_same_srv_rate",
"dst_host_diff_srv_rate", "dst_host_same_src_port_rate", "dst_host_srv_diff_host_rate",
"dst_host_serror_rate", "dst_host_srv_serror_rate", "dst_host_rerror_rate", "dst_host_srv_rerror_rate", "label"]
protocol_type_list = ['tcp', 'udp', 'icmp']
service_list = ['aol', 'auth', 'bgp', 'courier', 'csnet_ns', 'ctf', 'daytime', 'discard', 'domain', 'domain_u',
'echo', 'eco_i', 'ecr_i', 'efs', 'exec', 'finger', 'ftp', 'ftp_data', 'gopher', 'harvest',
'hostnames','http', 'http_2784', 'http_443', 'http_8001', 'imap4', 'IRC', 'iso_tsap', 'klogin', 'kshell',
'ldap', 'link', 'login', 'mtp', 'name', 'netbios_dgm', 'netbios_ns', 'netbios_ssn', 'netstat', 'nnsp',
'nntp', 'ntp_u', 'other', 'pm_dump', 'pop_2', 'pop_3', 'printer', 'private', 'red_i', 'remote_job', 'rje',
'shell', 'smtp', 'sql_net', 'ssh', 'sunrpc', 'supdup', 'systat', 'telnet', 'tftp_u', 'tim_i', 'time',
'urh_i', 'urp_i', 'uucp', 'uucp_path', 'vmnet', 'whois', 'X11', 'Z39_50']
flag_list = ['OTH', 'REJ', 'RSTO', 'RSTOS0', 'RSTR', 'S0', 'S1', 'S2', 'S3', 'SF', 'SH']
label_list = ['normal.', 'buffer_overflow.', 'loadmodule.', 'perl.', 'neptune.', 'smurf.',
'guess_passwd.', 'pod.', 'teardrop.', 'portsweep.', 'ipsweep.', 'land.', 'ftp_write.',
'back.', 'imap.', 'satan.', 'phf.', 'nmap.', 'multihop.', 'warezmaster.', 'warezclient.',
'spy.', 'rootkit.']
# 获取文件路径
def get_file_path(data_loc_path, file_name, file_type):
return '{}/{}.{}'.format(data_loc_path, file_name, file_type)
# 将读入的数据存取为csv文件
def read_file_to_csv(data_loc_path, from_file_name, to_file_name):
from_path = data_loc_path + '/{}'.format(from_file_name)
to_path = data_loc_path + '/{}'.format(to_file_name)
print("# 开始读取文件")
data = pd.read_csv(from_path, header=None, names=col_names)
print(data[:2])
print("# 成功读取文件,开始转换为CSV")
data.to_csv(to_path, columns=None, index=False)
print("# 转换完成")
# 查找字符串在数组中的下标
def find_index(string, array):
for i in range(len(array)):
if string == array[i]:
return i
# 数据标准化计算
def compute_regression_value(val, std, mean):
if std == 0 or mean == 0:
return 0
return (val - mean) / std
# 数据归一化计算
def compute_normalization_value(val, min_val, max_val):
if min_val == max_val or val == min_val:
return 0
return (val - min_val) / (max_val - min_val)
# 数据处理
def process_data(file_path):
# 读取数据
data_frame = pd.read_csv(file_path)
data_num = data_frame.shape[0]
# 将dataFrame转换为list
print("# 准备开始数据无量纲化")
data_list = data_frame.values.tolist()
'''
1.数据无量纲化:
将数据类型统一以便后续操作
'''
print("# 准备完成, 开始数据无量纲化")
# 遍历数组将字符改为数值
for idx in range(len(data_list)):
row = data_list[idx]
# 将数据中字符转化为数字
row[1] = find_index(row[1], protocol_type_list)
row[2] = find_index(row[2], service_list)
row[3] = find_index(row[3], flag_list)
row[41] = find_index(row[41], label_list)
print("## 已完成{}%".format((idx / data_num) * 100))
print("# 数据无量纲化完成, 正在处理数据...")
# 转化为DataFrame
data_frame = pd.DataFrame(data_list, columns=col_names)
print("# 数据处理完成")
print(data_list[:3])
'''
2.数据标准化
x' = [x-mean(x)] / std(x)
** 此处理需要将最后一列排除 **
'''
print("# 准备开始数据标准化")
# 计算每一列的标准差
std = data_frame.std(axis='rows').tolist()
# 计算每一列的均值
mean = data_frame.mean(axis='rows').tolist()
# 将dataFrame转换为list
data_list = data_frame.values.tolist()
print("# 准备完成, 开始数据标准化")
# 遍历数组并计算
for idx in range(len(data_list)):
row = data_list[idx]
for col_idx in range(len(row)-1):
row[col_idx] = abs(compute_regression_value(row[col_idx], std[col_idx], mean[col_idx]))
print("## 已完成{}%".format((idx / data_num) * 100))
print("# 数据标准化完成, 正在处理数据...")
data_frame =
|
pd.DataFrame(data_list, columns=col_names)
|
pandas.DataFrame
|
__author__ = "<NAME>"
__date__ = "Nov 11, 2019"
from threading import Thread
from pathlib import Path
from numpy import string_, array, NaN, empty
from pandas import DataFrame
import json
from corems.encapsulation.constant import Atoms
from corems.encapsulation.constant import Labels
from corems.encapsulation.output import parameter_to_dict
from corems.mass_spectrum.factory.MassSpectrumClasses import MassSpecfromFreq
class HighResMassSpecExport(Thread):
def __init__(self, out_file_path, mass_spectrum, output_type='excel'):
'''
output_type:str
'excel', 'csv', 'hdf5' or 'pandas'
'''
Thread.__init__(self)
self.output_file = Path(out_file_path)
# 'excel', 'csv' or 'pandas'
self.output_type = output_type
self.mass_spectrum = mass_spectrum
# collect all assigned atoms and order them accordingly to the Atoms.atoms_order list
self.atoms_order_list = self.get_all_used_atoms_in_order(self.mass_spectrum)
self._init_columns()
def _init_columns(self):
# column labels in order
self.columns_label = ['Index',
'm/z',
'Calibrated m/z',
'Calculated m/z',
'Peak Height',
'Peak Area',
'Resolving Power',
'S/N',
'Ion Charge',
'm/z Error (ppm)',
'm/z Error Score',
'Isotopologue Similarity',
'Confidence Score',
'DBE',
'H/C',
'O/C',
'Heteroatom Class',
'Ion Type',
'Is Isotopologue',
'Mono Isotopic Index',
'Molecular Formula'
]
@property
def output_type(self):
return self._output_type
@output_type.setter
def output_type(self, output_type):
output_types = ['excel', 'csv', 'pandas', 'hdf5']
if output_type in output_types:
self._output_type = output_type
else:
raise TypeError(
'Supported types are "excel", "csv" or "pandas", %s entered' % output_type)
def save(self):
'''wrapper to run in a separated thread'''
if self.output_type == 'excel':
self.to_excel()
elif self.output_type == 'csv':
self.to_csv()
elif self.output_type == 'pandas':
self.to_pandas()
elif self.output_type == 'hdf5':
self.to_hdf()
else:
raise ValueError(
"Unkown output type: %s; it can be 'excel', 'csv' or 'pandas'" % self.output_type)
def run(self):
''' run is called when the Tread start
call exportMS.start() '''
self.save()
def get_pandas_df(self):
columns = self.columns_label + self.get_all_used_atoms_in_order(self.mass_spectrum)
dict_data_list = self.get_list_dict_data(self.mass_spectrum)
df = DataFrame(dict_data_list, columns=columns)
df.name = self.output_file
return df
def write_settings(self, output_path, mass_spectrum):
import json
dict_setting = parameter_to_dict.get_dict_data_ms(mass_spectrum)
dict_setting['MassSpecAttrs'] = self.get_mass_spec_attrs(mass_spectrum)
dict_setting['analyzer'] = mass_spectrum.analyzer
dict_setting['instrument_label'] = mass_spectrum.instrument_label
dict_setting['sample_name'] = mass_spectrum.sample_name
with open(output_path.with_suffix('.json'), 'w', encoding='utf8', ) as outfile:
output = json.dumps(dict_setting, sort_keys=True, indent=4, separators=(',', ': '))
outfile.write(output)
def to_pandas(self, write_metadata=True):
columns = self.columns_label + self.get_all_used_atoms_in_order(self.mass_spectrum)
dict_data_list = self.get_list_dict_data(self.mass_spectrum)
df =
|
DataFrame(dict_data_list, columns=columns)
|
pandas.DataFrame
|
#!/usr/bin/env bin
# -*- coding: utf-8 -*-
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from pandas import DataFrame
from sklearn import preprocessing
from scipy.signal import savgol_filter
from copy import deepcopy
import pywt
'''
参考博客
https://blog.csdn.net/Joseph__Lagrange/article/details/95302398
https://blog.csdn.net/Joseph__Lagrange/article/details/95302953
'''
class Pretreatment:
def PlotSpectrum(self, spec, title='原始光谱', x=0, m=5):
"""
:param spec: shape (n_samples, n_features)
:return: plt
"""
if isinstance(spec, pd.DataFrame):
spec = spec.values
spec = spec[:, :(spec.shape[1]-1)]
plt.rcParams['font.sans-serif'] = ['SimHei'] # 用来正常显示中文标签
plt.rcParams['axes.unicode_minus'] = False # 用来正常显示负号
wl = np.linspace(x, x+(spec.shape[1]-1)*m,spec.shape[1])
with plt.style.context(('ggplot')):
fonts = 6
plt.figure(figsize=(5.2, 3.1), dpi=200)
plt.plot(wl, spec.T)
plt.xlabel('Wavelength (nm)', fontsize=fonts)
plt.ylabel('reabsorbance', fontsize=fonts)
plt.title(title, fontsize=fonts)
return plt
def mean_centralization(self, sdata):
"""
均值中心化
"""
sdata = deepcopy(sdata)
temp1 = np.mean(sdata, axis=0)
temp2 = np.tile(temp1, sdata.shape[0]).reshape(
(sdata.shape[0], sdata.shape[1]))
return sdata - temp2
def standardlize(self, sdata):
"""
标准化
"""
sdata = deepcopy(sdata)
if isinstance(sdata, pd.DataFrame):
sdata = sdata.values
sdata = preprocessing.scale(sdata)
return sdata
def msc(self, sdata):
sdata = deepcopy(sdata)
if isinstance(sdata, pd.DataFrame):
sdata = sdata.values
n = sdata.shape[0] # 样本数量
k = np.zeros(sdata.shape[0])
b = np.zeros(sdata.shape[0])
M = np.array(np.mean(sdata, axis=0))
from sklearn.linear_model import LinearRegression
for i in range(n):
y = sdata[i, :]
y = y.reshape(-1, 1)
M = M.reshape(-1, 1)
model = LinearRegression()
model.fit(M, y)
k[i] = model.coef_
b[i] = model.intercept_
spec_msc = np.zeros_like(sdata)
for i in range(n):
bb = np.repeat(b[i], sdata.shape[1])
kk = np.repeat(k[i], sdata.shape[1])
temp = (sdata[i, :] - bb) / kk
spec_msc[i, :] = temp
return spec_msc
# def msc(self, data_x):
#
# absorbances = data_x.columns.values
# from sklearn.linear_model import LinearRegression
# ## 计算平均光谱做为标准光谱
# mean = np.mean(data_x,axis = 0)
#
# n,p = data_x.shape
# msc_x = np.ones((n,p))
#
# for i in range(n):
# y = data_x.values[i,:]
# lin = LinearRegression()
# lin.fit(mean.reshape(-1,1),y.reshape(-1,1))
# k = lin.coef_
# b = lin.intercept_
# msc_x[i,:] = (y - b) / k
#
# msc_x = DataFrame(msc_x, columns=absorbances)
# return msc_x
def D1(self, sdata):
"""
一阶差分
"""
sdata = deepcopy(sdata)
if isinstance(sdata, pd.DataFrame):
sdata = sdata.values
temp1 = pd.DataFrame(sdata)
temp2 = temp1.diff(axis=1)
temp3 = temp2.values
return np.delete(temp3, 0, axis=1)
def D2(self, sdata):
"""
二阶差分
"""
sdata = deepcopy(sdata)
if isinstance(sdata, pd.DataFrame):
sdata = sdata.values
temp2 = (
|
pd.DataFrame(sdata)
|
pandas.DataFrame
|
from botbuilder.dialogs import (
ComponentDialog,
WaterfallDialog,
WaterfallStepContext,
DialogTurnResult,
)
from botbuilder.dialogs.prompts import TextPrompt, PromptOptions, ChoicePrompt, ConfirmPrompt
from botbuilder.core import MessageFactory, TurnContext, CardFactory, UserState
from botbuilder.schema import InputHints, CardAction, ActionTypes, SuggestedActions
import orderApp
import pandas as pd
class ViewOrderDialog(ComponentDialog):
def __init__(self, dialog_id: str = None):
super(ViewOrderDialog, self).__init__(dialog_id or ViewOrderDialog.__name__)
self.add_dialog(TextPrompt(TextPrompt.__name__))
self.add_dialog(ChoicePrompt(ChoicePrompt.__name__))
self.add_dialog(ConfirmPrompt(ConfirmPrompt.__name__))
self.add_dialog(
WaterfallDialog(
WaterfallDialog.__name__, [self.view_step,]
)
)
self.initial_dialog_id = WaterfallDialog.__name__
async def view_step(self, step_context: WaterfallStepContext) -> DialogTurnResult:
user_details = step_context.options
user_id = user_details.user_id
df =
|
pd.DataFrame()
|
pandas.DataFrame
|
import re
from unittest.mock import Mock, call, patch
import numpy as np
import pandas as pd
import pytest
from rdt.transformers.categorical import (
CategoricalFuzzyTransformer, CategoricalTransformer, LabelEncodingTransformer,
OneHotEncodingTransformer)
RE_SSN = re.compile(r'\d\d\d-\d\d-\d\d\d\d')
class TestCategoricalTransformer:
def test___setstate__(self):
"""Test the ``__set_state__`` method.
Validate that the ``__dict__`` attribute is correctly udpdated when
Setup:
- create an instance of a ``CategoricalTransformer``.
Side effect:
- it updates the ``__dict__`` attribute of the object.
"""
# Setup
transformer = CategoricalTransformer()
# Run
transformer.__setstate__({
'intervals': {
None: 'abc'
}
})
# Assert
assert transformer.__dict__['intervals'][np.nan] == 'abc'
def test___init__(self):
"""Passed arguments must be stored as attributes."""
# Run
transformer = CategoricalTransformer(
fuzzy='fuzzy_value',
clip='clip_value',
)
# Asserts
assert transformer.fuzzy == 'fuzzy_value'
assert transformer.clip == 'clip_value'
def test_is_transform_deterministic(self):
"""Test the ``is_transform_deterministic`` method.
Validate that this method returs the opposite boolean value of the ``fuzzy`` parameter.
Setup:
- initialize a ``CategoricalTransformer`` with ``fuzzy = True``.
Output:
- the boolean value which is the opposite of ``fuzzy``.
"""
# Setup
transformer = CategoricalTransformer(fuzzy=True)
# Run
output = transformer.is_transform_deterministic()
# Assert
assert output is False
def test_is_composition_identity(self):
"""Test the ``is_composition_identity`` method.
Since ``COMPOSITION_IS_IDENTITY`` is True, just validates that the method
returns the opposite boolean value of the ``fuzzy`` parameter.
Setup:
- initialize a ``CategoricalTransformer`` with ``fuzzy = True``.
Output:
- the boolean value which is the opposite of ``fuzzy``.
"""
# Setup
transformer = CategoricalTransformer(fuzzy=True)
# Run
output = transformer.is_composition_identity()
# Assert
assert output is False
def test__get_intervals(self):
"""Test the ``_get_intervals`` method.
Validate that the intervals for each categorical value are correct.
Input:
- a pandas series containing categorical values.
Output:
- a tuple, where the first element describes the intervals for each
categorical value (start, end).
"""
# Run
data = pd.Series(['foo', 'bar', 'bar', 'foo', 'foo', 'tar'])
result = CategoricalTransformer._get_intervals(data)
# Asserts
expected_intervals = {
'foo': (
0,
0.5,
0.25,
0.5 / 6
),
'bar': (
0.5,
0.8333333333333333,
0.6666666666666666,
0.05555555555555555
),
'tar': (
0.8333333333333333,
0.9999999999999999,
0.9166666666666666,
0.027777777777777776
)
}
expected_means = pd.Series({
'foo': 0.25,
'bar': 0.6666666666666666,
'tar': 0.9166666666666666
})
expected_starts = pd.DataFrame({
'category': ['foo', 'bar', 'tar'],
'start': [0, 0.5, 0.8333333333333333]
}).set_index('start')
assert result[0] == expected_intervals
pd.testing.assert_series_equal(result[1], expected_means)
pd.testing.assert_frame_equal(result[2], expected_starts)
def test__get_intervals_nans(self):
"""Test the ``_get_intervals`` method when data contains nan's.
Validate that the intervals for each categorical value are correct, when passed
data containing nan values.
Input:
- a pandas series cotaining nan values and categorical values.
Output:
- a tuple, where the first element describes the intervals for each
categorical value (start, end).
"""
# Setup
data = pd.Series(['foo', np.nan, None, 'foo', 'foo', 'tar'])
# Run
result = CategoricalTransformer._get_intervals(data)
# Assert
expected_intervals = {
'foo': (
0,
0.5,
0.25,
0.5 / 6
),
np.nan: (
0.5,
0.8333333333333333,
0.6666666666666666,
0.05555555555555555
),
'tar': (
0.8333333333333333,
0.9999999999999999,
0.9166666666666666,
0.027777777777777776
)
}
expected_means = pd.Series({
'foo': 0.25,
np.nan: 0.6666666666666666,
'tar': 0.9166666666666666
})
expected_starts = pd.DataFrame({
'category': ['foo', np.nan, 'tar'],
'start': [0, 0.5, 0.8333333333333333]
}).set_index('start')
assert result[0] == expected_intervals
pd.testing.assert_series_equal(result[1], expected_means)
pd.testing.assert_frame_equal(result[2], expected_starts)
def test__fit_intervals(self):
# Setup
transformer = CategoricalTransformer()
# Run
data = pd.Series(['foo', 'bar', 'bar', 'foo', 'foo', 'tar'])
transformer._fit(data)
# Asserts
expected_intervals = {
'foo': (
0,
0.5,
0.25,
0.5 / 6
),
'bar': (
0.5,
0.8333333333333333,
0.6666666666666666,
0.05555555555555555
),
'tar': (
0.8333333333333333,
0.9999999999999999,
0.9166666666666666,
0.027777777777777776
)
}
expected_means = pd.Series({
'foo': 0.25,
'bar': 0.6666666666666666,
'tar': 0.9166666666666666
})
expected_starts = pd.DataFrame({
'category': ['foo', 'bar', 'tar'],
'start': [0, 0.5, 0.8333333333333333]
}).set_index('start')
assert transformer.intervals == expected_intervals
pd.testing.assert_series_equal(transformer.means, expected_means)
pd.testing.assert_frame_equal(transformer.starts, expected_starts)
def test__get_value_no_fuzzy(self):
# Setup
transformer = CategoricalTransformer(fuzzy=False)
transformer.intervals = {
'foo': (0, 0.5, 0.25, 0.5 / 6),
np.nan: (0.5, 1.0, 0.75, 0.5 / 6),
}
# Run
result_foo = transformer._get_value('foo')
result_nan = transformer._get_value(np.nan)
# Asserts
assert result_foo == 0.25
assert result_nan == 0.75
@patch('rdt.transformers.categorical.norm')
def test__get_value_fuzzy(self, norm_mock):
# setup
norm_mock.rvs.return_value = 0.2745
transformer = CategoricalTransformer(fuzzy=True)
transformer.intervals = {
'foo': (0, 0.5, 0.25, 0.5 / 6),
}
# Run
result = transformer._get_value('foo')
# Asserts
assert result == 0.2745
def test__normalize_no_clip(self):
"""Test normalize data"""
# Setup
transformer = CategoricalTransformer(clip=False)
# Run
data = pd.Series([-0.43, 0.1234, 1.5, -1.31])
result = transformer._normalize(data)
# Asserts
expect = pd.Series([0.57, 0.1234, 0.5, 0.69], dtype=float)
pd.testing.assert_series_equal(result, expect)
def test__normalize_clip(self):
"""Test normalize data with clip=True"""
# Setup
transformer = CategoricalTransformer(clip=True)
# Run
data = pd.Series([-0.43, 0.1234, 1.5, -1.31])
result = transformer._normalize(data)
# Asserts
expect = pd.Series([0.0, 0.1234, 1.0, 0.0], dtype=float)
pd.testing.assert_series_equal(result, expect)
def test__reverse_transform_array(self):
"""Test reverse_transform a numpy.array"""
# Setup
data = pd.Series(['foo', 'bar', 'bar', 'foo', 'foo', 'tar'])
rt_data = np.array([-0.6, 0.5, 0.6, 0.2, 0.1, -0.2])
transformer = CategoricalTransformer()
# Run
transformer._fit(data)
result = transformer._reverse_transform(rt_data)
# Asserts
expected_intervals = {
'foo': (
0,
0.5,
0.25,
0.5 / 6
),
'bar': (
0.5,
0.8333333333333333,
0.6666666666666666,
0.05555555555555555
),
'tar': (
0.8333333333333333,
0.9999999999999999,
0.9166666666666666,
0.027777777777777776
)
}
assert transformer.intervals == expected_intervals
expect = pd.Series(data)
pd.testing.assert_series_equal(result, expect)
def test__transform_by_category_called(self):
"""Test that the `_transform_by_category` method is called.
When the number of rows is greater than the number of categories, expect
that the `_transform_by_category` method is called.
Setup:
The categorical transformer is instantiated with 4 categories.
Input:
- data with 5 rows.
Output:
- the output of `_transform_by_category`.
Side effects:
- `_transform_by_category` will be called once.
"""
# Setup
data = pd.Series([1, 3, 3, 2, 1])
categorical_transformer_mock = Mock()
categorical_transformer_mock.means = pd.Series([0.125, 0.375, 0.625, 0.875])
# Run
transformed = CategoricalTransformer._transform(categorical_transformer_mock, data)
# Asserts
categorical_transformer_mock._transform_by_category.assert_called_once_with(data)
assert transformed == categorical_transformer_mock._transform_by_category.return_value
def test__transform_by_category(self):
"""Test the `_transform_by_category` method with numerical data.
Expect that the correct transformed data is returned.
Setup:
The categorical transformer is instantiated with 4 categories and intervals.
Input:
- data with 5 rows.
Ouptut:
- the transformed data.
"""
# Setup
data = pd.Series([1, 3, 3, 2, 1])
transformer = CategoricalTransformer()
transformer.intervals = {
4: (0, 0.25, 0.125, 0.041666666666666664),
3: (0.25, 0.5, 0.375, 0.041666666666666664),
2: (0.5, 0.75, 0.625, 0.041666666666666664),
1: (0.75, 1.0, 0.875, 0.041666666666666664),
}
# Run
transformed = transformer._transform_by_category(data)
# Asserts
expected = np.array([0.875, 0.375, 0.375, 0.625, 0.875])
assert (transformed == expected).all()
def test__transform_by_category_nans(self):
"""Test the ``_transform_by_category`` method with data containing nans.
Validate that the data is transformed correctly when it contains nan's.
Setup:
- the categorical transformer is instantiated, and the appropriate ``intervals``
attribute is set.
Input:
- a pandas series containing nan's.
Output:
- a numpy array containing the transformed data.
"""
# Setup
data = pd.Series([np.nan, 3, 3, 2, np.nan])
transformer = CategoricalTransformer()
transformer.intervals = {
4: (0, 0.25, 0.125, 0.041666666666666664),
3: (0.25, 0.5, 0.375, 0.041666666666666664),
2: (0.5, 0.75, 0.625, 0.041666666666666664),
np.nan: (0.75, 1.0, 0.875, 0.041666666666666664),
}
# Run
transformed = transformer._transform_by_category(data)
# Asserts
expected = np.array([0.875, 0.375, 0.375, 0.625, 0.875])
assert (transformed == expected).all()
@patch('rdt.transformers.categorical.norm')
def test__transform_by_category_fuzzy_true(self, norm_mock):
"""Test the ``_transform_by_category`` method when ``fuzzy`` is True.
Validate that the data is transformed correctly when ``fuzzy`` is True.
Setup:
- the categorical transformer is instantiated with ``fuzzy`` as True,
and the appropriate ``intervals`` attribute is set.
- the ``intervals`` attribute is set to a a dictionary of intervals corresponding
to the elements of the passed data.
- set the ``side_effect`` of the ``rvs_mock`` to the appropriate function.
Input:
- a pandas series.
Output:
- a numpy array containing the transformed data.
Side effect:
- ``rvs_mock`` should be called four times, one for each element of the
intervals dictionary.
"""
# Setup
def rvs_mock_func(loc, scale, **kwargs):
return loc
norm_mock.rvs.side_effect = rvs_mock_func
data = pd.Series([1, 3, 3, 2, 1])
transformer = CategoricalTransformer(fuzzy=True)
transformer.intervals = {
4: (0, 0.25, 0.125, 0.041666666666666664),
3: (0.25, 0.5, 0.375, 0.041666666666666664),
2: (0.5, 0.75, 0.625, 0.041666666666666664),
1: (0.75, 1.0, 0.875, 0.041666666666666664),
}
# Run
transformed = transformer._transform_by_category(data)
# Assert
expected = np.array([0.875, 0.375, 0.375, 0.625, 0.875])
assert (transformed == expected).all()
norm_mock.rvs.assert_has_calls([
call(0.125, 0.041666666666666664, size=0),
call(0.375, 0.041666666666666664, size=2),
call(0.625, 0.041666666666666664, size=1),
call(0.875, 0.041666666666666664, size=2),
])
def test__transform_by_row_called(self):
"""Test that the `_transform_by_row` method is called.
When the number of rows is less than or equal to the number of categories,
expect that the `_transform_by_row` method is called.
Setup:
The categorical transformer is instantiated with 4 categories.
Input:
- data with 4 rows
Output:
- the output of `_transform_by_row`
Side effects:
- `_transform_by_row` will be called once
"""
# Setup
data = pd.Series([1, 2, 3, 4])
categorical_transformer_mock = Mock()
categorical_transformer_mock.means = pd.Series([0.125, 0.375, 0.625, 0.875])
# Run
transformed = CategoricalTransformer._transform(categorical_transformer_mock, data)
# Asserts
categorical_transformer_mock._transform_by_row.assert_called_once_with(data)
assert transformed == categorical_transformer_mock._transform_by_row.return_value
def test__transform_by_row(self):
"""Test the `_transform_by_row` method with numerical data.
Expect that the correct transformed data is returned.
Setup:
The categorical transformer is instantiated with 4 categories and intervals.
Input:
- data with 4 rows
Ouptut:
- the transformed data
"""
# Setup
data = pd.Series([1, 2, 3, 4])
transformer = CategoricalTransformer()
transformer.intervals = {
4: (0, 0.25, 0.125, 0.041666666666666664),
3: (0.25, 0.5, 0.375, 0.041666666666666664),
2: (0.5, 0.75, 0.625, 0.041666666666666664),
1: (0.75, 1.0, 0.875, 0.041666666666666664),
}
# Run
transformed = transformer._transform_by_row(data)
# Asserts
expected = np.array([0.875, 0.625, 0.375, 0.125])
assert (transformed == expected).all()
@patch('psutil.virtual_memory')
def test__reverse_transform_by_matrix_called(self, psutil_mock):
"""Test that the `_reverse_transform_by_matrix` method is called.
When there is enough virtual memory, expect that the
`_reverse_transform_by_matrix` method is called.
Setup:
The categorical transformer is instantiated with 4 categories. Also patch the
`psutil.virtual_memory` function to return a large enough `available_memory`.
Input:
- numerical data with 4 rows
Output:
- the output of `_reverse_transform_by_matrix`
Side effects:
- `_reverse_transform_by_matrix` will be called once
"""
# Setup
data = pd.Series([1, 2, 3, 4])
categorical_transformer_mock = Mock()
categorical_transformer_mock.means = pd.Series([0.125, 0.375, 0.625, 0.875])
categorical_transformer_mock._normalize.return_value = data
virtual_memory = Mock()
virtual_memory.available = 4 * 4 * 8 * 3 + 1
psutil_mock.return_value = virtual_memory
# Run
reverse = CategoricalTransformer._reverse_transform(categorical_transformer_mock, data)
# Asserts
categorical_transformer_mock._reverse_transform_by_matrix.assert_called_once_with(data)
assert reverse == categorical_transformer_mock._reverse_transform_by_matrix.return_value
@patch('psutil.virtual_memory')
def test__reverse_transform_by_matrix(self, psutil_mock):
"""Test the _reverse_transform_by_matrix method with numerical data
Expect that the transformed data is correctly reverse transformed.
Setup:
The categorical transformer is instantiated with 4 categories and means. Also patch
the `psutil.virtual_memory` function to return a large enough `available_memory`.
Input:
- transformed data with 4 rows
Ouptut:
- the original data
"""
# Setup
data = pd.Series([1, 2, 3, 4])
transformed = pd.Series([0.875, 0.625, 0.375, 0.125])
transformer = CategoricalTransformer()
transformer.means = pd.Series([0.125, 0.375, 0.625, 0.875], index=[4, 3, 2, 1])
transformer.dtype = data.dtype
virtual_memory = Mock()
virtual_memory.available = 4 * 4 * 8 * 3 + 1
psutil_mock.return_value = virtual_memory
# Run
reverse = transformer._reverse_transform_by_matrix(transformed)
# Assert
pd.testing.assert_series_equal(data, reverse)
@patch('psutil.virtual_memory')
def test__reverse_transform_by_category_called(self, psutil_mock):
"""Test that the `_reverse_transform_by_category` method is called.
When there is not enough virtual memory and the number of rows is greater than the
number of categories, expect that the `_reverse_transform_by_category` method is called.
Setup:
The categorical transformer is instantiated with 4 categories. Also patch the
`psutil.virtual_memory` function to return an `available_memory` of 1.
Input:
- numerical data with 5 rows
Output:
- the output of `_reverse_transform_by_category`
Side effects:
- `_reverse_transform_by_category` will be called once
"""
# Setup
transform_data = pd.Series([1, 3, 3, 2, 1])
categorical_transformer_mock = Mock()
categorical_transformer_mock.means = pd.Series([0.125, 0.375, 0.625, 0.875])
categorical_transformer_mock._normalize.return_value = transform_data
virtual_memory = Mock()
virtual_memory.available = 1
psutil_mock.return_value = virtual_memory
# Run
reverse = CategoricalTransformer._reverse_transform(
categorical_transformer_mock, transform_data)
# Asserts
categorical_transformer_mock._reverse_transform_by_category.assert_called_once_with(
transform_data)
assert reverse == categorical_transformer_mock._reverse_transform_by_category.return_value
@patch('psutil.virtual_memory')
def test__reverse_transform_by_category(self, psutil_mock):
"""Test the _reverse_transform_by_category method with numerical data.
Expect that the transformed data is correctly reverse transformed.
Setup:
The categorical transformer is instantiated with 4 categories, and the means
and intervals are set for those categories. Also patch the `psutil.virtual_memory`
function to return an `available_memory` of 1.
Input:
- transformed data with 5 rows
Ouptut:
- the original data
"""
data = pd.Series([1, 3, 3, 2, 1])
transformed = pd.Series([0.875, 0.375, 0.375, 0.625, 0.875])
transformer = CategoricalTransformer()
transformer.means = pd.Series([0.125, 0.375, 0.625, 0.875], index=[4, 3, 2, 1])
transformer.intervals = {
4: (0, 0.25, 0.125, 0.041666666666666664),
3: (0.25, 0.5, 0.375, 0.041666666666666664),
2: (0.5, 0.75, 0.625, 0.041666666666666664),
1: (0.75, 1.0, 0.875, 0.041666666666666664),
}
transformer.dtype = data.dtype
virtual_memory = Mock()
virtual_memory.available = 1
psutil_mock.return_value = virtual_memory
reverse = transformer._reverse_transform_by_category(transformed)
pd.testing.assert_series_equal(data, reverse)
def test__get_category_from_start(self):
"""Test the ``_get_category_from_start`` method.
Setup:
- instantiate a ``CategoricalTransformer``, and set the attribute ``starts``
to a pandas dataframe with ``set_index`` as ``'start'``.
Input:
- an integer, an index from data.
Output:
- a category from the data.
"""
# Setup
transformer = CategoricalTransformer()
transformer.starts = pd.DataFrame({
'start': [0.0, 0.5, 0.7],
'category': ['a', 'b', 'c']
}).set_index('start')
# Run
category = transformer._get_category_from_start(2)
# Assert
assert category == 'c'
@patch('psutil.virtual_memory')
def test__reverse_transform_by_row_called(self, psutil_mock):
"""Test that the `_reverse_transform_by_row` method is called.
When there is not enough virtual memory and the number of rows is less than or equal
to the number of categories, expect that the `_reverse_transform_by_row` method
is called.
Setup:
The categorical transformer is instantiated with 4 categories. Also patch the
`psutil.virtual_memory` function to return an `available_memory` of 1.
Input:
- numerical data with 4 rows
Output:
- the output of `_reverse_transform_by_row`
Side effects:
- `_reverse_transform_by_row` will be called once
"""
# Setup
data = pd.Series([1, 2, 3, 4])
categorical_transformer_mock = Mock()
categorical_transformer_mock.means = pd.Series([0.125, 0.375, 0.625, 0.875])
categorical_transformer_mock.starts = pd.DataFrame(
[0., 0.25, 0.5, 0.75], index=[4, 3, 2, 1], columns=['category'])
categorical_transformer_mock._normalize.return_value = data
virtual_memory = Mock()
virtual_memory.available = 1
psutil_mock.return_value = virtual_memory
# Run
reverse = CategoricalTransformer._reverse_transform(categorical_transformer_mock, data)
# Asserts
categorical_transformer_mock._reverse_transform_by_row.assert_called_once_with(data)
assert reverse == categorical_transformer_mock._reverse_transform_by_row.return_value
@patch('psutil.virtual_memory')
def test__reverse_transform_by_row(self, psutil_mock):
"""Test the _reverse_transform_by_row method with numerical data.
Expect that the transformed data is correctly reverse transformed.
Setup:
The categorical transformer is instantiated with 4 categories, and the means, starts,
and intervals are set for those categories. Also patch the `psutil.virtual_memory`
function to return an `available_memory` of 1.
Input:
- transformed data with 4 rows
Ouptut:
- the original data
"""
# Setup
data = pd.Series([1, 2, 3, 4])
transformed = pd.Series([0.875, 0.625, 0.375, 0.125])
transformer = CategoricalTransformer()
transformer.means = pd.Series([0.125, 0.375, 0.625, 0.875], index=[4, 3, 2, 1])
transformer.starts = pd.DataFrame(
[4, 3, 2, 1], index=[0., 0.25, 0.5, 0.75], columns=['category'])
transformer.intervals = {
4: (0, 0.25, 0.125, 0.041666666666666664),
3: (0.25, 0.5, 0.375, 0.041666666666666664),
2: (0.5, 0.75, 0.625, 0.041666666666666664),
1: (0.75, 1.0, 0.875, 0.041666666666666664),
}
transformer.dtype = data.dtype
virtual_memory = Mock()
virtual_memory.available = 1
psutil_mock.return_value = virtual_memory
# Run
reverse = transformer._reverse_transform(transformed)
# Assert
pd.testing.assert_series_equal(data, reverse)
class TestOneHotEncodingTransformer:
def test___init__(self):
"""Test the ``__init__`` method.
Validate that the passed arguments are stored as attributes.
Input:
- a string passed to the ``error_on_unknown`` parameter.
Side effect:
- the ``error_on_unknown`` attribute is set to the passed string.
"""
# Run
transformer = OneHotEncodingTransformer(error_on_unknown='error_value')
# Asserts
assert transformer.error_on_unknown == 'error_value'
def test__prepare_data_empty_lists(self):
# Setup
ohet = OneHotEncodingTransformer()
data = [[], [], []]
# Assert
with pytest.raises(ValueError, match='Unexpected format.'):
ohet._prepare_data(data)
def test__prepare_data_nested_lists(self):
# Setup
ohet = OneHotEncodingTransformer()
data = [[[]]]
# Assert
with pytest.raises(ValueError, match='Unexpected format.'):
ohet._prepare_data(data)
def test__prepare_data_list_of_lists(self):
# Setup
ohet = OneHotEncodingTransformer()
# Run
data = [['a'], ['b'], ['c']]
out = ohet._prepare_data(data)
# Assert
expected = np.array(['a', 'b', 'c'])
np.testing.assert_array_equal(out, expected)
def test__prepare_data_pandas_series(self):
# Setup
ohet = OneHotEncodingTransformer()
# Run
data = pd.Series(['a', 'b', 'c'])
out = ohet._prepare_data(data)
# Assert
expected = pd.Series(['a', 'b', 'c'])
np.testing.assert_array_equal(out, expected)
def test_get_output_types(self):
"""Test the ``get_output_types`` method.
Validate that the ``_add_prefix`` method is properly applied to the ``output_types``
dictionary. For this class, the ``output_types`` dictionary is described as:
{
'value1': 'float',
'value2': 'float',
...
}
The number of items in the dictionary is defined by the ``dummies`` attribute.
Setup:
- initialize a ``OneHotEncodingTransformer`` and set:
- the ``dummies`` attribute to a list.
- the ``column_prefix`` attribute to a string.
Output:
- the ``output_types`` dictionary, but with ``self.column_prefix``
added to the beginning of the keys of the ``output_types`` dictionary.
"""
# Setup
transformer = OneHotEncodingTransformer()
transformer.column_prefix = 'abc'
transformer.dummies = [1, 2]
# Run
output = transformer.get_output_types()
# Assert
expected = {
'abc.value0': 'float',
'abc.value1': 'float'
}
assert output == expected
def test__fit_dummies_no_nans(self):
"""Test the ``_fit`` method without nans.
Check that ``self.dummies`` does not
contain nans.
Input:
- Series with values
"""
# Setup
ohet = OneHotEncodingTransformer()
# Run
data = pd.Series(['a', 2, 'c'])
ohet._fit(data)
# Assert
np.testing.assert_array_equal(ohet.dummies, ['a', 2, 'c'])
def test__fit_dummies_nans(self):
"""Test the ``_fit`` method without nans.
Check that ``self.dummies`` contain ``np.nan``.
Input:
- Series with values
"""
# Setup
ohet = OneHotEncodingTransformer()
# Run
data = pd.Series(['a', 2, 'c', None])
ohet._fit(data)
# Assert
np.testing.assert_array_equal(ohet.dummies, ['a', 2, 'c', np.nan])
def test__fit_no_nans(self):
"""Test the ``_fit`` method without nans.
Check that the settings of the transformer
are properly set based on the input. Encoding
should be activated
Input:
- Series with values
"""
# Setup
ohet = OneHotEncodingTransformer()
# Run
data = pd.Series(['a', 'b', 'c'])
ohet._fit(data)
# Assert
np.testing.assert_array_equal(ohet.dummies, ['a', 'b', 'c'])
np.testing.assert_array_equal(ohet._uniques, ['a', 'b', 'c'])
assert ohet._dummy_encoded
assert not ohet._dummy_na
def test__fit_no_nans_numeric(self):
"""Test the ``_fit`` method without nans.
Check that the settings of the transformer
are properly set based on the input. Encoding
should be deactivated
Input:
- Series with values
"""
# Setup
ohet = OneHotEncodingTransformer()
# Run
data = pd.Series([1, 2, 3])
ohet._fit(data)
# Assert
np.testing.assert_array_equal(ohet.dummies, [1, 2, 3])
np.testing.assert_array_equal(ohet._uniques, [1, 2, 3])
assert not ohet._dummy_encoded
assert not ohet._dummy_na
def test__fit_nans(self):
"""Test the ``_fit`` method with nans.
Check that the settings of the transformer
are properly set based on the input. Encoding
and NA should be activated.
Input:
- Series with containing nan values
"""
# Setup
ohet = OneHotEncodingTransformer()
# Run
data = pd.Series(['a', 'b', None])
ohet._fit(data)
# Assert
np.testing.assert_array_equal(ohet.dummies, ['a', 'b', np.nan])
np.testing.assert_array_equal(ohet._uniques, ['a', 'b'])
assert ohet._dummy_encoded
assert ohet._dummy_na
def test__fit_nans_numeric(self):
"""Test the ``_fit`` method with nans.
Check that the settings of the transformer
are properly set based on the input. Encoding
should be deactivated and NA activated.
Input:
- Series with containing nan values
"""
# Setup
ohet = OneHotEncodingTransformer()
# Run
data = pd.Series([1, 2, np.nan])
ohet._fit(data)
# Assert
np.testing.assert_array_equal(ohet.dummies, [1, 2, np.nan])
np.testing.assert_array_equal(ohet._uniques, [1, 2])
assert not ohet._dummy_encoded
assert ohet._dummy_na
def test__fit_single(self):
# Setup
ohet = OneHotEncodingTransformer()
# Run
data = pd.Series(['a', 'a', 'a'])
ohet._fit(data)
# Assert
np.testing.assert_array_equal(ohet.dummies, ['a'])
def test__transform_no_nan(self):
"""Test the ``_transform`` method without nans.
The values passed to ``_transform`` should be
returned in a one-hot encoding representation.
Input:
- Series with values
Output:
- one-hot encoding of the input
"""
# Setup
ohet = OneHotEncodingTransformer()
data = pd.Series(['a', 'b', 'c'])
ohet._uniques = ['a', 'b', 'c']
ohet._num_dummies = 3
# Run
out = ohet._transform_helper(data)
# Assert
expected = np.array([
[1, 0, 0],
[0, 1, 0],
[0, 0, 1]
])
np.testing.assert_array_equal(out, expected)
def test__transform_no_nan_categorical(self):
"""Test the ``_transform`` method without nans.
The values passed to ``_transform`` should be
returned in a one-hot encoding representation
using the categorical branch.
Input:
- Series with categorical values
Output:
- one-hot encoding of the input
"""
# Setup
ohet = OneHotEncodingTransformer()
data = pd.Series(['a', 'b', 'c'])
ohet._uniques = ['a', 'b', 'c']
ohet._indexer = [0, 1, 2]
ohet._num_dummies = 3
ohet._dummy_encoded = True
# Run
out = ohet._transform_helper(data)
# Assert
expected = np.array([
[1, 0, 0],
[0, 1, 0],
[0, 0, 1]
])
np.testing.assert_array_equal(out, expected)
def test__transform_nans_encoded(self):
"""Test the ``_transform`` method with nans.
The values passed to ``_transform`` should be
returned in a one-hot encoding representation. Null
values should be represented by the same encoding.
Input:
- Series with values containing nans
Output:
- one-hot encoding of the input
"""
# Setup
ohet = OneHotEncodingTransformer()
data = pd.Series([np.nan, None, 'a', 'b'])
ohet._uniques = ['a', 'b']
ohet._dummy_na = True
ohet._num_dummies = 2
# Run
out = ohet._transform_helper(data)
# Assert
expected = np.array([
[0, 0, 1],
[0, 0, 1],
[1, 0, 0],
[0, 1, 0]
])
np.testing.assert_array_equal(out, expected)
def test__transform_nans_categorical(self):
"""Test the ``_transform`` method with nans.
The values passed to ``_transform`` should be
returned in a one-hot encoding representation using
the categorical branch. Null values should be
represented by the same encoding.
Input:
- Series with categorical values containing nans
Output:
- one-hot encoding of the input
"""
# Setup
ohet = OneHotEncodingTransformer()
data = pd.Series([np.nan, None, 'a', 'b'])
ohet._uniques = ['a', 'b']
ohet._indexer = [0, 1]
ohet._dummy_na = True
ohet._num_dummies = 2
ohet._dummy_encoded = True
# Run
out = ohet._transform_helper(data)
# Assert
expected = np.array([
[0, 0, 1],
[0, 0, 1],
[1, 0, 0],
[0, 1, 0]
])
np.testing.assert_array_equal(out, expected)
def test__transform_single_column(self):
"""Test the ``_transform`` with one category.
The values passed to ``_transform`` should be
returned in a one-hot encoding representation
where it should be a single column.
Input:
- Series with a single category
Output:
- one-hot encoding of the input
"""
# Setup
ohet = OneHotEncodingTransformer()
data = pd.Series(['a', 'a', 'a'])
ohet._uniques = ['a']
ohet._num_dummies = 1
# Run
out = ohet._transform_helper(data)
# Assert
expected = np.array([
[1],
[1],
[1]
])
np.testing.assert_array_equal(out, expected)
def test__transform_single_categorical(self):
"""Test the ``_transform`` with one category.
The values passed to ``_transform`` should be
returned in a one-hot encoding representation
using the categorical branch where it should
be a single column.
Input:
- Series with a single category
Output:
- one-hot encoding of the input
"""
# Setup
ohet = OneHotEncodingTransformer()
data = pd.Series(['a', 'a', 'a'])
ohet._uniques = ['a']
ohet._indexer = [0]
ohet._num_dummies = 1
ohet._dummy_encoded = True
# Run
out = ohet._transform_helper(data)
# Assert
expected = np.array([
[1],
[1],
[1]
])
np.testing.assert_array_equal(out, expected)
def test__transform_zeros(self):
"""Test the ``_transform`` with unknown category.
The values passed to ``_transform`` should be
returned in a one-hot encoding representation
where it should be a column of zeros.
Input:
- Series with unknown values
Output:
- one-hot encoding of the input
"""
# Setup
ohet = OneHotEncodingTransformer()
pd.Series(['a'])
ohet._uniques = ['a']
ohet._num_dummies = 1
# Run
out = ohet._transform_helper(pd.Series(['b', 'b', 'b']))
# Assert
expected = np.array([
[0],
[0],
[0]
])
np.testing.assert_array_equal(out, expected)
def test__transform_zeros_categorical(self):
"""Test the ``_transform`` with unknown category.
The values passed to ``_transform`` should be
returned in a one-hot encoding representation
using the categorical branch where it should
be a column of zeros.
Input:
- Series with categorical and unknown values
Output:
- one-hot encoding of the input
"""
# Setup
ohet = OneHotEncodingTransformer()
pd.Series(['a'])
ohet._uniques = ['a']
ohet._indexer = [0]
ohet._num_dummies = 1
ohet.dummy_encoded = True
# Run
out = ohet._transform_helper(pd.Series(['b', 'b', 'b']))
# Assert
expected = np.array([
[0],
[0],
[0]
])
np.testing.assert_array_equal(out, expected)
def test__transform_unknown_nan(self):
"""Test the ``_transform`` with unknown and nans.
This is an edge case for ``_transform`` where
unknowns should be zeros and nans should be
the last entry in the column.
Input:
- Series with unknown and nans
Output:
- one-hot encoding of the input
"""
# Setup
ohet = OneHotEncodingTransformer()
pd.Series(['a'])
ohet._uniques = ['a']
ohet._dummy_na = True
ohet._num_dummies = 1
# Run
out = ohet._transform_helper(pd.Series(['b', 'b', np.nan]))
# Assert
expected = np.array([
[0, 0],
[0, 0],
[0, 1]
])
np.testing.assert_array_equal(out, expected)
def test__transform_no_nans(self):
"""Test the ``transform`` without nans.
In this test ``transform`` should return an identity
matrix representing each item in the input.
Input:
- Series with categorical values
Output:
- one-hot encoding of the input
"""
# Setup
ohet = OneHotEncodingTransformer()
data = pd.Series(['a', 'b', 'c'])
ohet._fit(data)
# Run
out = ohet._transform(data)
# Assert
expected = np.array([
[1, 0, 0],
[0, 1, 0],
[0, 0, 1]
])
np.testing.assert_array_equal(out, expected)
def test__transform_nans(self):
"""Test the ``transform`` with nans.
In this test ``transform`` should return an identity matrix
representing each item in the input as well as nans.
Input:
- Series with categorical values and nans
Output:
- one-hot encoding of the input
"""
# Setup
ohet = OneHotEncodingTransformer()
data = pd.Series(['a', 'b', None])
ohet._fit(data)
# Run
out = ohet._transform(data)
# Assert
expected = np.array([
[1, 0, 0],
[0, 1, 0],
[0, 0, 1]
])
np.testing.assert_array_equal(out, expected)
def test__transform_single_column_filled_with_ones(self):
"""Test the ``transform`` on a single category.
In this test ``transform`` should return a column
filled with ones.
Input:
- Series with a single categorical value
Output:
- one-hot encoding of the input
"""
# Setup
ohet = OneHotEncodingTransformer()
data = pd.Series(['a', 'a', 'a'])
ohet._fit(data)
# Run
out = ohet._transform(data)
# Assert
expected = np.array([
[1],
[1],
[1]
])
np.testing.assert_array_equal(out, expected)
def test__transform_unknown(self):
"""Test the ``transform`` with unknown data.
In this test ``transform`` should raise an error
due to the attempt of transforming data with previously
unseen categories.
Input:
- Series with unknown categorical values
"""
# Setup
ohet = OneHotEncodingTransformer()
data = pd.Series(['a'])
ohet._fit(data)
# Assert
with np.testing.assert_raises(ValueError):
ohet._transform(['b'])
def test__transform_numeric(self):
"""Test the ``transform`` on numeric input.
In this test ``transform`` should return a matrix
representing each item in the input as one-hot encodings.
Input:
- Series with numeric input
Output:
- one-hot encoding of the input
"""
# Setup
ohet = OneHotEncodingTransformer()
data = pd.Series([1, 2])
ohet._fit(data)
expected = np.array([
[1, 0],
[0, 1],
])
# Run
out = ohet._transform(data)
# Assert
assert not ohet._dummy_encoded
np.testing.assert_array_equal(out, expected)
def test__reverse_transform_no_nans(self):
# Setup
ohet = OneHotEncodingTransformer()
data = pd.Series(['a', 'b', 'c'])
ohet._fit(data)
# Run
transformed = np.array([
[1, 0, 0],
[0, 1, 0],
[0, 0, 1]
])
out = ohet._reverse_transform(transformed)
# Assert
expected = pd.Series(['a', 'b', 'c'])
pd.testing.assert_series_equal(out, expected)
def test__reverse_transform_nans(self):
# Setup
ohet = OneHotEncodingTransformer()
data = pd.Series(['a', 'b', None])
ohet._fit(data)
# Run
transformed = np.array([
[1, 0, 0],
[0, 1, 0],
[0, 0, 1]
])
out = ohet._reverse_transform(transformed)
# Assert
expected = pd.Series(['a', 'b', None])
pd.testing.assert_series_equal(out, expected)
def test__reverse_transform_single(self):
# Setup
ohet = OneHotEncodingTransformer()
data = pd.Series(['a', 'a', 'a'])
ohet._fit(data)
# Run
transformed = np.array([
[1],
[1],
[1]
])
out = ohet._reverse_transform(transformed)
# Assert
expected = pd.Series(['a', 'a', 'a'])
pd.testing.assert_series_equal(out, expected)
def test__reverse_transform_1d(self):
# Setup
ohet = OneHotEncodingTransformer()
data = pd.Series(['a', 'a', 'a'])
ohet._fit(data)
# Run
transformed = pd.Series([1, 1, 1])
out = ohet._reverse_transform(transformed)
# Assert
expected =
|
pd.Series(['a', 'a', 'a'])
|
pandas.Series
|
import pandas as pd;
data = {
'수학': [90, 80, 70],
'영어': [91, 81, 71],
'과학': [92, 82, 72],
'국어': [93, 83, 73],
};
df = pd.DataFrame(data, index=['A', 'B', 'C']);
class P001:
def series01(self):
list_data = ['202007',3.14,'ABC',100, True];
sr =
|
pd.Series(list_data)
|
pandas.Series
|
#-----------------------------------------------------------------------------
# Copyright (c) 2017 - 2020, <NAME>, and CV_CONSA_Tools Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import cv2
import time
import argparse
import glob
from datetime import datetime
#############################################################################################################################################
# USAGE: python3 ./tavideo2frames.py -if ./path/to/my/video/folder -of ./data/images/output -fr "CSV" -sr 0.25
#############################################################################################################################################
########################
# MAIN FUNCTIONS
########################
def doVideo2Frames(video_path, output_folder, sample_rate, save_results, show_output, format_results):
# Starting point
print("Starting to sample video: " + video_path)
# For saving results in HDF5 or CSV
dateTime = datetime.now()
timestampStr = dateTime.strftime("%d-%b-%Y_(%H:%M:%S.%f)")
if format_results == "HDF5":
store_hdf5 = pd.HDFStore(output_folder + '/ta_video2frames_' + timestampStr + '.h5')
# Record data video. Containers for saving the information.
df = pd.DataFrame()
frames_names = []
time_stamps = []
######################
# Reading input data #
######################
video = video_path.split('/')
video_name = video[-1].split('.')
# Video Reader
cap = cv2.VideoCapture(video_path)
hasFrame, frame = cap.read()
if not hasFrame:
exit(-1)
# General info from videos
main_height, main_width, channels_main = frame.shape
recording_fps = cap.get(cv2.CAP_PROP_FPS) # Same than video input, but it can be changed.
total_number_frames = cap.get(cv2.CAP_PROP_FRAME_COUNT) # Same than video input, but it can be changed.
# General Variables
frame_count = 1 # We start from 1 because the frame 0 was used for getting the resolution and other meta-info.
sampling_distance = int(recording_fps / sample_rate)
# Visual settings
if show_output:
cv2.namedWindow("Video Output " + str(video_name[0]), cv2.WINDOW_NORMAL)
#########################
# MAIN LOOP #
#########################
print(" >> INFO:")
print(" - Processing video: " + str(video_name[0]))
print(" - Number of frames in the video: " + str(total_number_frames))
print(" - Sampling 1 frame each " + str(sampling_distance) + " frames.")
number_sample_frame = 0
while (1):
hasFrame, frame = cap.read()
if not hasFrame:
break
output_frame = frame.copy() # Frame for drawing results
if frame_count % sampling_distance == 0:
if number_sample_frame < 10:
name_frame = video_name[0] + "_00" + str(number_sample_frame)
elif number_sample_frame > 0 and number_sample_frame < 100:
name_frame = video_name[0] + "_0" + str(number_sample_frame)
else:
name_frame = video_name[0] + "_" + str(number_sample_frame)
cv2.imwrite(output_folder + "/" + name_frame + ".jpg", output_frame)
frames_names.append(name_frame + ".jpg")
time_stamps.append(frame_count / recording_fps)
number_sample_frame += 1
if show_output:
cv2.imshow("Video Output " + str(video_name[0]), output_frame)
print(" -> Sampled frame number " + str(frame_count) + " of video " + str(video_name[0]))
frame_count += 1
k = cv2.waitKey(1)
if k == 27:
break
cv2.destroyAllWindows()
# Save Dataframe results.
if save_results:
print(" >> Saving " + str(len(frames_names)) + " frames of the videos in the input folder...")
df["Frame name"] = frames_names
df["Timestamp (seconds)"] = time_stamps
if format_results == "HDF5":
store_hdf5.append(df)
elif format_results == "CSV":
df.to_csv(output_folder + '/ta_video2frames_' + timestampStr + '.csv', index=False, header=True)
def doVideoFolder2Frames(videos, output_folder, sample_rate, save_results, show_output, format_results):
# For saving results in HDF5 or CSV
dateTime = datetime.now()
timestampStr = dateTime.strftime("%d-%b-%Y_(%H:%M:%S.%f)")
if format_results == "HDF5":
store_hdf5 = pd.HDFStore(output_folder + '/ta_videos2frames_' + timestampStr + '.h5')
video_number = 1
videos_n = len(videos)
# Record data video. Containers for saving the information.
df =
|
pd.DataFrame()
|
pandas.DataFrame
|
import pandas
pibs = pandas.read_csv('pibs_ibge.csv', sep=',', encoding='utf-8')
tse =
|
pandas.read_csv('codigos_tse.csv', sep=',', encoding='utf-8')
|
pandas.read_csv
|
# -*- coding: utf-8 -*-
"""
@created: 01/29/21
@modified: 01/29/21
@author: <NAME>
CentraleSupelec
MICS laboratory
9 rue <NAME>, Gif-Sur-Yvette, 91190 France
Defines internal classes user-level functions for building and plotting double heatmaps.
"""
import copy
from dataclasses import dataclass, field
import numpy as np
import pandas as pd
import os
from typing import Dict, List, Tuple, Union
from tqdm import tqdm
import sys
import seaborn as sns
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import statsmodels.sandbox.stats.multicomp as mp
from scipy.stats import fisher_exact
# type aliases
DataFrame = pd.core.frame.DataFrame
Vector = Union[List[float], np.ndarray, pd.core.series.Series]
Table = Union[List[List[float]], np.ndarray, pd.core.frame.DataFrame]
def _cont_table(A: Vector, B: Vector) -> Table:
"""
Compute 2 x 2 contignecy table from A and B binary vectors.
Parameters
----------
A: array-like
A vector of binary entries
B: array-like
A vector of binary entries
Returns
-------
tab: array-like
A 2x2 contigency table.
"""
tab = np.zeros((2, 2))
A_anti = np.where(A==1, 0, 1)
B_anti = np.where(B==1, 0, 1)
tab[0,0] = np.sum(A*B)
tab[0,1] = np.sum(A*B_anti)
tab[1,0] = np.sum(A_anti*B)
tab[1,1] = np.sum(A_anti*B_anti)
return tab
def _odds_ratio(tab: Table) -> float:
"""
Computes the odds ratio of a contigency table
-------------------
a b
c d
-------------------
as (a/b)/(c/d) or ad/bc
Parameters
----------
tab: array-like
The table.
Returns
-------
_odds_ratio: float
"""
if tab[0,1] == 0 or tab[1,0] == 0:
_odds_ratio = tab[0,0] * tab[1,1] / max(tab[1,0], tab[0,1], 1)
else:
_odds_ratio = tab[0,0] * tab[1,1] / (tab[1,0] * tab[0,1])
return _odds_ratio
class _DoubleHeatmapBuild(object):
def __init__(self, pair_count="cooccurrence", pair_ratio="odds", pair_test="fisher_exact"):
"""
Parameters
----------
pair_count: str, default="cooccurrence"
Either a string or a callable taking as input two iterables of the same size (lists or arrays) and that
returns a float. For each pair of variables, this will be plotted in one half of the heatmap.
pair_ratio: str, default="odds"
Either a string, a dataframe or a callable taking as input two iterables of the same size (lists or arrays)
and that returns a float. For each pair of variables, this will be plotted in one half of the heatmap.
pair_test: str, default="fisher_exact"
Either a string None or a callable taking as input two iterables of the same size (lists or arrays) and that
returns a p-value. Pairs that have a significant test will have a star above their cell.
"""
self.pair_count = pair_count
self.pair_ratio = pair_ratio
self.pair_test = pair_test
def _pair_count(self, A, B):
if isinstance(self.pair_ratio, str) and self.pair_count == "cooccurrence":
assert set(A).issubset(set([0,1]))
assert set(B).issubset(set([0,1]))
return sum((A==1) & (B==1))
elif isinstance(self.pair_count, Callable):
return self.pair_count(A,B)
else:
raise ValueError("Invalid value for parameter 'pair_count'. Specify a Callable or one of 'cooccurrence'")
def _pair_ratio(self, A, B):
if isinstance(self.pair_ratio, str) and self.pair_ratio == "odds":
c_tab = _cont_table(A, B)
ratio = _odds_ratio(c_tab)
return ratio
elif isinstance(self.pair_ratio, Callable):
return self.pair_ratio(A,B)
else:
raise ValueError("Invalid value for parameter 'pair_ratio'. Specify a Callable or one of 'cooccurrence'")
def _pair_test(self, A, B):
if self.pair_test is None:
return None
if type(self.pair_test) == str and self.pair_test == "fisher_exact":
c_tab = _cont_table(A, B)
_, pval = fisher_exact(c_tab)
return pval
else:
return self.pair_test(A,B)
def _build_half_matrix(self, df, pair, use_diagonal=True):
"""
Builds a half matrix of size (n_var, n_var) from a matrix of size (n_obs, n_var).
Parameters
----------
df: array-like, (n_obs, n_var)
It defines the values used to build the half matrix
pair:
A callable function taking as input two iterables of the same size (lists or arrays) and that returns a
float. For each pair of variables, the float will be fill the half-matrix.
Returns
-------
half_df:
Half-filled matrix
"""
vars = df.columns.tolist()
n_vars = len(vars)
m_half = []
if use_diagonal:
for i in tqdm(range(n_vars)):
l_half = [np.nan for _ in range(n_vars)]
for j in range(0, i + 1):
l_half[j] = pair(df[vars[i]], df[vars[j]])
m_half.append(l_half)
else:
m_half.append([np.nan for _ in range(n_vars)])
for i in tqdm(range(1, n_vars)):
l_half = [np.nan for _ in range(n_vars)]
for j in range(0, i):
l_half[j] = pair(df[vars[i]], df[vars[j]])
m_half.append(l_half)
df_half = pd.DataFrame(m_half, vars)
df_half.columns = df.columns
return df_half
def build_half_matrices(self, df_values, df_active=None):
"""
Builds one, two or three half-matrices from a matrix of activation and a matrix of values of size (n_obs, n_var).
Each half-matrix is a square matrix of size (n_var, n_var).
Parameters
----------
df_values: array-like, (n_obs, n_var)
It defines the values used to build the half matrices of ratios and tests in observations x variables
format.
df_active: array-like, (n_obs, n_var) default=None
If None, df_active=df_values. It defines the binary activation indicator of variables in sample used to
build the half matrix of counts in observations x variables format.
Returns
-------
dfs: dict of dataframe
Dict containing the half-matrices of "count", "ratio" and "test"
"""
if df_active is None:
df_active = df_values
if self.pair_count is None:
df_count = None
else:
df_count = self._build_half_matrix(df_active, self._pair_count)
if self.pair_ratio is None:
df_ratio = None
else:
df_ratio = self._build_half_matrix(df_values, self._pair_ratio, use_diagonal=False)
if self.pair_test is None:
df_test = None
else:
df_test = self._build_half_matrix(df_values, self._pair_test, use_diagonal=False)
return {"count": df_count, "ratio": df_ratio, "test": df_test}
def build_double_heatmap(df_values, df_active=None, pair_count="cooccurrence", pair_ratio="odds",
pair_test="fisher_exact"):
"""
Builds one, two or three half-matrices from a matrix of activation and a matrix of values of size (n_obs, n_var).
Each half-matrix is a square matrix of size (n_var, n_var).
Parameters
----------
pair_count: str, default="cooccurrence"
Either a string or a callable taking as input two iterables of the same size (lists or arrays) and that
returns a float. For each pair of variables, this will be plotted in one half of the heatmap.
pair_ratio: str, default="odds"
Either a string, a dataframe or a callable taking as input two iterables of the same size (lists or arrays)
and that returns a float. For each pair of variables, this will be plotted in one half of the heatmap.
pair_test: str, default="fisher_exact"
Either a string None or a callable taking as input two iterables of the same size (lists or arrays) and that
returns a p-value. Pairs that have a significant test will have a star above their cell.
Parameters
----------
df_values: array-like, (n_obs, n_var)
It defines the values used to build the half matrices of ratios and tests in observations x variables
format.
df_active: array-like, (n_obs, n_var) default=None
If None, df_active=df_values. It defines the binary activation indicator of variables in sample used to
build the half matrix of counts in observations x variables format.
Returns
-------
dfs: dict of dataframe
Dict containing the half-matrices of "count", "ratio" and "test"
"""
builder = _DoubleHeatmapBuild(pair_count, pair_ratio, pair_test)
return builder.build_half_matrices(df_values, df_active)
def default_field(obj):
return field(default_factory=lambda: obj)
@dataclass
class DoubleHeatmapConfig:
figure: Dict[str, Union[str,tuple]] = default_field({
"figsize": (8,8),
"dpi": 300,
"n_grid": 10,
})
heatmap: Dict[str, Union[int, str, bool, float]] = default_field({
"orientation" : "antidiagonal",
"xticklabels" : True,
"yticklabels" : True,
"ticks_labelsize" : 8,
"xticks_labelrotation" : 90,
"yticks_labelrotation" : 0,
"linecolor" : "white",
"linewidths" : 0.5,
"square" : True,
})
legend: Dict[str, Union[int, float, str]] = default_field({
'edgecolor': 'k',
'fancybox': False,
'facecolor': 'w',
'fontsize': 10,
'framealpha': 1,
'frameon': False,
'handle_length': 1,
'handle_height': 1.125,
'title_fontsize': 12,
})
count: Dict[str, Union[int, float, str, bool]] = default_field({
'boundaries' : [1,5,10,15,20,50,200,500],
'auto_boundaries' : {"n": 7, "decimals": 0, "middle": None, "regular": True},
'cmap' : sns.color_palette("Blues", n_colors=7, as_cmap=True),
'cbar_fraction' : 0.25,
'cbar_aspect' : None,
'cbar_reverse' : True,
'cbar_xy' : (0, 0.5),
'cbar_title' : "Counts",
'cbar_title_fontsize' : 12,
'cbar_title_pad' : 6,
'cbar_ticks_rotation' : 0,
'cbar_ticks_length' : 5,
'cbar_ticks_labelsize': 8,
'cbar_ticks_pad' : 4,
})
ratio: Dict[str, Union[int, float, str]] = default_field({
'boundaries' : [0.001, 0.01, 0.1, 1, 10, 100, 1000],
'auto_boundaries' : {"n": 7, "decimals": 0, "middle": None, "regular": True},
'cmap' : sns.diverging_palette(50, 200, s=90, l=50, sep=1, as_cmap=True),
'cbar_fraction' : 0.25,
'cbar_aspect' : None,
'cbar_reverse' : False,
'cbar_xy' : (0.5, 0.1),
'cbar_title' : "Ratios",
'cbar_title_pad' : 6,
'cbar_title_fontsize' : 12,
'cbar_ticks_rotation' : 0,
'cbar_ticks_length' : 5,
'cbar_ticks_labelsize': 8,
'cbar_ticks_pad' : 4,
})
test: Dict[str, Union[int, float, str]] = default_field({
'pval_level': 0.05,
'fwer_level': 0.05,
'fdr_level': 0.1,
'fwer_size': 10,
'fwer_marker': '*',
'fwer_color': 'black',
'fdr_size': 1,
'fdr_marker': 's',
'fdr_color': 'black',
})
class _DoubleHeatmapPlot(object):
def __init__(self, df_count: DataFrame, df_ratio: DataFrame, df_test: DataFrame, config: DoubleHeatmapConfig):
"""
Plots double heatmap.
Parameters
----------
df_count: pandas.core.frame.DataFrame
Pandas half-filled dataframe of counts.
df_ratio: pandas.core.frame.DataFrame
Pandas half-filled dataframe of ratios.
df_test: pandas.core.frame.DataFrame
Pandas half-filled dataframe of p-values.
config: DoubleHeatmapConfig
Graphical parameters.
"""
self.df_count = df_count.copy()
self.df_ratio = df_ratio.copy()
self.df_test = df_test.copy()
self.n_var = self.df_count.shape[0]
self.config = config
self._check_config(config)
self._automatic_config()
def _check_config(self, config):
for cmap in [self.config.ratio["cmap"], self.config.count["cmap"]]:
if not isinstance(config.ratio["cmap"], cm.colors.LinearSegmentedColormap):
raise ValueError("""Please specify color maps of that are instances of LinearSegmentedColormap
as produced by the sns.color_palette with cmap=True function for instance""")
if self.config.heatmap["orientation"] not in ["diagonal", "antidiagonal"]:
raise ValueError("%s is invalid for heatmap orientation. Choose 'diagonal' or 'antidiagonal'" %
self.config.heatmap["orientation"] == "antidiagonal")
def _automatic_boundaries(self, df, use_diagonal=True, n=9, middle=None, decimals=1):
if use_diagonal:
vals = np.array([self.df_ratio.iloc[i,j] for i in range(self.n_var) for j in range(i)])
else:
vals = np.array([self.df_ratio.iloc[i,j] for i in range(1,self.n_var) for j in range(i-1)])
min_val = np.round(min(vals), decimals=decimals)
max_val = np.round(max(vals), decimals=decimals)
if middle is not None:
below_middle = pd.qcut(vals[vals < middle], q=(n-1)//2).categories.mid.values
below_middle = np.round(below_middle, decimals=decimals)
above_middle =
|
pd.qcut(vals[vals > middle], q=(n-1)//2)
|
pandas.qcut
|
'''
Tests for bipartitepandas
DATE: March 2021
'''
import pytest
import numpy as np
import pandas as pd
import bipartitepandas as bpd
import pickle
###################################
##### Tests for BipartiteBase #####
###################################
def test_refactor_1():
# 2 movers between firms 0 and 1, and 1 stayer at firm 2.
worker_data = []
# Firm 0 -> 1
worker_data.append({'i': 0, 'j': 0, 'y': 2., 't': 1})
worker_data.append({'i': 0, 'j': 1, 'y': 1., 't': 2})
# Firm 1 -> 0
worker_data.append({'i': 1, 'j': 1, 'y': 1., 't': 1})
worker_data.append({'i': 1, 'j': 0, 'y': 1., 't': 2})
# Firm 2 -> 2
worker_data.append({'i': 2, 'j': 2, 'y': 1., 't': 1})
worker_data.append({'i': 2, 'j': 2, 'y': 2., 't': 2})
df = pd.concat([pd.DataFrame(worker, index=[i]) for i, worker in enumerate(worker_data)])
bdf = bpd.BipartiteLong(data=df)
bdf = bdf.clean_data()
bdf = bdf.get_collapsed_long()
bdf = bdf.get_es()
stayers = bdf[bdf['m'] == 0]
movers = bdf[bdf['m'] > 0]
assert len(stayers) == 0
assert movers.iloc[0]['i'] == 0
assert movers.iloc[0]['j1'] == 0
assert movers.iloc[0]['j2'] == 1
assert movers.iloc[0]['y1'] == 2
assert movers.iloc[0]['y2'] == 1
assert movers.iloc[1]['i'] == 1
assert movers.iloc[1]['j1'] == 1
assert movers.iloc[1]['j2'] == 0
assert movers.iloc[1]['y1'] == 1
assert movers.iloc[1]['y2'] == 1
def test_refactor_2():
# 2 movers between firms 0 and 1, and 1 stayer at firm 2. Time has jumps.
worker_data = []
# Firm 0 -> 1
# Time 1 -> 3
worker_data.append({'i': 0, 'j': 0, 'y': 2., 't': 1})
worker_data.append({'i': 0, 'j': 1, 'y': 1., 't': 3})
# Firm 1 -> 0
worker_data.append({'i': 1, 'j': 1, 'y': 1., 't': 1})
worker_data.append({'i': 1, 'j': 0, 'y': 1., 't': 2})
# Firm 2 -> 2
worker_data.append({'i': 2, 'j': 2, 'y': 1., 't': 1})
worker_data.append({'i': 2, 'j': 2, 'y': 2., 't': 2})
df = pd.concat([pd.DataFrame(worker, index=[i]) for i, worker in enumerate(worker_data)])
bdf = bpd.BipartiteLong(data=df)
bdf = bdf.clean_data()
bdf = bdf.get_collapsed_long()
bdf = bdf.get_es()
stayers = bdf[bdf['m'] == 0]
movers = bdf[bdf['m'] > 0]
assert len(stayers) == 0
assert movers.iloc[0]['i'] == 0
assert movers.iloc[0]['j1'] == 0
assert movers.iloc[0]['j2'] == 1
assert movers.iloc[0]['y1'] == 2
assert movers.iloc[0]['y2'] == 1
assert movers.iloc[1]['i'] == 1
assert movers.iloc[1]['j1'] == 1
assert movers.iloc[1]['j2'] == 0
assert movers.iloc[1]['y1'] == 1
assert movers.iloc[1]['y2'] == 1
def test_refactor_3():
# 1 mover between firms 0 and 1, and 2 between firms 1 and 2.
worker_data = []
# Firm 0 -> 1
worker_data.append({'i': 0, 'j': 0, 'y': 2., 't': 1})
worker_data.append({'i': 0, 'j': 1, 'y': 1., 't': 2})
# Firm 1 -> 2
worker_data.append({'i': 1, 'j': 1, 'y': 1., 't': 1})
worker_data.append({'i': 1, 'j': 2, 'y': 1., 't': 2})
# Firm 2 -> 1
worker_data.append({'i': 2, 'j': 2, 'y': 1., 't': 1})
worker_data.append({'i': 2, 'j': 1, 'y': 2., 't': 2})
df = pd.concat([pd.DataFrame(worker, index=[i]) for i, worker in enumerate(worker_data)])
bdf = bpd.BipartiteLong(data=df)
bdf = bdf.clean_data()
bdf = bdf.get_collapsed_long()
bdf = bdf.get_es()
stayers = bdf[bdf['m'] == 0]
movers = bdf[bdf['m'] > 0]
assert len(stayers) == 0
assert movers.iloc[0]['i'] == 0
assert movers.iloc[0]['j1'] == 0
assert movers.iloc[0]['j2'] == 1
assert movers.iloc[0]['y1'] == 2
assert movers.iloc[0]['y2'] == 1
assert movers.iloc[1]['i'] == 1
assert movers.iloc[1]['j1'] == 1
assert movers.iloc[1]['j2'] == 2
assert movers.iloc[1]['y1'] == 1
assert movers.iloc[1]['y2'] == 1
assert movers.iloc[2]['i'] == 2
assert movers.iloc[2]['j1'] == 2
assert movers.iloc[2]['j2'] == 1
assert movers.iloc[2]['y1'] == 1
assert movers.iloc[2]['y2'] == 2
def test_refactor_4():
# 1 mover between firms 0 and 1, and 2 between firms 1 and 2.
worker_data = []
# Firm 0 -> 1 -> 0
worker_data.append({'i': 0, 'j': 0, 'y': 2., 't': 1})
worker_data.append({'i': 0, 'j': 1, 'y': 1., 't': 2})
worker_data.append({'i': 0, 'j': 0, 'y': 1., 't': 3})
# Firm 1 -> 2
worker_data.append({'i': 1, 'j': 1, 'y': 1., 't': 1})
worker_data.append({'i': 1, 'j': 2, 'y': 1., 't': 2})
# Firm 2 -> 1
worker_data.append({'i': 2, 'j': 2, 'y': 1., 't': 1})
worker_data.append({'i': 2, 'j': 1, 'y': 2., 't': 2})
df = pd.concat([pd.DataFrame(worker, index=[i]) for i, worker in enumerate(worker_data)])
bdf = bpd.BipartiteLong(data=df)
bdf = bdf.clean_data()
bdf = bdf.get_collapsed_long()
bdf = bdf.get_es()
stayers = bdf[bdf['m'] == 0]
movers = bdf[bdf['m'] > 0]
assert len(stayers) == 0
assert movers.iloc[0]['i'] == 0
assert movers.iloc[0]['j1'] == 0
assert movers.iloc[0]['j2'] == 1
assert movers.iloc[0]['y1'] == 2
assert movers.iloc[0]['y2'] == 1
assert movers.iloc[1]['i'] == 0
assert movers.iloc[1]['j1'] == 1
assert movers.iloc[1]['j2'] == 0
assert movers.iloc[1]['y1'] == 1
assert movers.iloc[1]['y2'] == 1
assert movers.iloc[2]['i'] == 1
assert movers.iloc[2]['j1'] == 1
assert movers.iloc[2]['j2'] == 2
assert movers.iloc[2]['y1'] == 1
assert movers.iloc[2]['y2'] == 1
assert movers.iloc[3]['i'] == 2
assert movers.iloc[3]['j1'] == 2
assert movers.iloc[3]['j2'] == 1
assert movers.iloc[3]['y1'] == 1
assert movers.iloc[3]['y2'] == 2
def test_refactor_5():
# 1 mover between firms 0 and 1, and 2 between firms 1 and 2. Time has jumps.
worker_data = []
# Firm 0 -> 1 -> 0
# Time 1 -> 2 -> 4
worker_data.append({'i': 0, 'j': 0, 'y': 2., 't': 1})
worker_data.append({'i': 0, 'j': 1, 'y': 1., 't': 2})
worker_data.append({'i': 0, 'j': 0, 'y': 1., 't': 4})
# Firm 1 -> 2
worker_data.append({'i': 1, 'j': 1, 'y': 1., 't': 1})
worker_data.append({'i': 1, 'j': 2, 'y': 1., 't': 2})
# Firm 2 -> 1
worker_data.append({'i': 2, 'j': 2, 'y': 1., 't': 1})
worker_data.append({'i': 2, 'j': 1, 'y': 2., 't': 2})
df = pd.concat([pd.DataFrame(worker, index=[i]) for i, worker in enumerate(worker_data)])
bdf = bpd.BipartiteLong(data=df)
bdf = bdf.clean_data()
bdf = bdf.get_collapsed_long()
bdf = bdf.get_es()
stayers = bdf[bdf['m'] == 0]
movers = bdf[bdf['m'] > 0]
assert len(stayers) == 0
assert movers.iloc[0]['i'] == 0
assert movers.iloc[0]['j1'] == 0
assert movers.iloc[0]['j2'] == 1
assert movers.iloc[0]['y1'] == 2
assert movers.iloc[0]['y2'] == 1
assert movers.iloc[1]['i'] == 0
assert movers.iloc[1]['j1'] == 1
assert movers.iloc[1]['j2'] == 0
assert movers.iloc[1]['y1'] == 1
assert movers.iloc[1]['y2'] == 1
assert movers.iloc[2]['i'] == 1
assert movers.iloc[2]['j1'] == 1
assert movers.iloc[2]['j2'] == 2
assert movers.iloc[2]['y1'] == 1
assert movers.iloc[2]['y2'] == 1
assert movers.iloc[3]['i'] == 2
assert movers.iloc[3]['j1'] == 2
assert movers.iloc[3]['j2'] == 1
assert movers.iloc[3]['y1'] == 1
assert movers.iloc[3]['y2'] == 2
def test_refactor_6():
# 1 mover between firms 0 and 1, and 2 between firms 1 and 2. Time has jumps.
worker_data = []
# Firm 0 -> 0 -> 1 -> 0
# Time 1 -> 2 -> 3 -> 5
worker_data.append({'i': 0, 'j': 0, 'y': 2., 't': 1})
worker_data.append({'i': 0, 'j': 0, 'y': 2., 't': 2})
worker_data.append({'i': 0, 'j': 1, 'y': 1., 't': 3})
worker_data.append({'i': 0, 'j': 0, 'y': 1., 't': 5})
# Firm 1 -> 2
worker_data.append({'i': 1, 'j': 1, 'y': 1., 't': 1})
worker_data.append({'i': 1, 'j': 2, 'y': 1., 't': 2})
# Firm 2 -> 1
worker_data.append({'i': 2, 'j': 2, 'y': 1., 't': 1})
worker_data.append({'i': 2, 'j': 1, 'y': 2., 't': 2})
df = pd.concat([pd.DataFrame(worker, index=[i]) for i, worker in enumerate(worker_data)])
bdf = bpd.BipartiteLong(data=df)
bdf = bdf.clean_data()
bdf = bdf.get_collapsed_long()
bdf = bdf.get_es()
stayers = bdf[bdf['m'] == 0]
movers = bdf[bdf['m'] > 0]
assert len(stayers) == 0
assert movers.iloc[0]['i'] == 0
assert movers.iloc[0]['j1'] == 0
assert movers.iloc[0]['j2'] == 1
assert movers.iloc[0]['y1'] == 2
assert movers.iloc[0]['y2'] == 1
assert movers.iloc[1]['i'] == 0
assert movers.iloc[1]['j1'] == 1
assert movers.iloc[1]['j2'] == 0
assert movers.iloc[1]['y1'] == 1
assert movers.iloc[1]['y2'] == 1
assert movers.iloc[2]['i'] == 1
assert movers.iloc[2]['j1'] == 1
assert movers.iloc[2]['j2'] == 2
assert movers.iloc[2]['y1'] == 1
assert movers.iloc[2]['y2'] == 1
assert movers.iloc[3]['i'] == 2
assert movers.iloc[3]['j1'] == 2
assert movers.iloc[3]['j2'] == 1
assert movers.iloc[3]['y1'] == 1
assert movers.iloc[3]['y2'] == 2
def test_refactor_7():
# 1 mover between firms 0 and 1, and 2 between firms 1 and 2. Time has jumps.
worker_data = []
# Firm 0 -> 0 -> 1 -> 0
# Time 1 -> 3 -> 4 -> 6
worker_data.append({'i': 0, 'j': 0, 'y': 2., 't': 1})
worker_data.append({'i': 0, 'j': 0, 'y': 2., 't': 3})
worker_data.append({'i': 0, 'j': 1, 'y': 1., 't': 4})
worker_data.append({'i': 0, 'j': 0, 'y': 1., 't': 6})
# Firm 1 -> 2
worker_data.append({'i': 1, 'j': 1, 'y': 1., 't': 1})
worker_data.append({'i': 1, 'j': 2, 'y': 1., 't': 2})
# Firm 2 -> 1
worker_data.append({'i': 2, 'j': 2, 'y': 1., 't': 1})
worker_data.append({'i': 2, 'j': 1, 'y': 2., 't': 2})
df = pd.concat([pd.DataFrame(worker, index=[i]) for i, worker in enumerate(worker_data)])
bdf = bpd.BipartiteLong(data=df)
bdf = bdf.clean_data()
bdf = bdf.get_collapsed_long()
bdf = bdf.get_es()
stayers = bdf[bdf['m'] == 0]
movers = bdf[bdf['m'] > 0]
assert len(stayers) == 0
assert movers.iloc[0]['i'] == 0
assert movers.iloc[0]['j1'] == 0
assert movers.iloc[0]['j2'] == 1
assert movers.iloc[0]['y1'] == 2
assert movers.iloc[0]['y2'] == 1
assert movers.iloc[1]['i'] == 0
assert movers.iloc[1]['j1'] == 1
assert movers.iloc[1]['j2'] == 0
assert movers.iloc[1]['y1'] == 1
assert movers.iloc[1]['y2'] == 1
assert movers.iloc[2]['i'] == 1
assert movers.iloc[2]['j1'] == 1
assert movers.iloc[2]['j2'] == 2
assert movers.iloc[2]['y1'] == 1
assert movers.iloc[2]['y2'] == 1
assert movers.iloc[3]['i'] == 2
assert movers.iloc[3]['j1'] == 2
assert movers.iloc[3]['j2'] == 1
assert movers.iloc[3]['y1'] == 1
assert movers.iloc[3]['y2'] == 2
def test_refactor_8():
# 2 movers between firms 0 and 1, and 1 between firms 1 and 2. Time has jumps.
worker_data = []
# Firm 0 -> 0 -> 1 -> 0
# Time 1 -> 3 -> 4 -> 6
worker_data.append({'i': 0, 'j': 0, 'y': 2., 't': 1})
worker_data.append({'i': 0, 'j': 0, 'y': 2., 't': 3})
worker_data.append({'i': 0, 'j': 1, 'y': 1., 't': 4})
worker_data.append({'i': 0, 'j': 0, 'y': 1., 't': 6})
# Firm 0 -> 1
worker_data.append({'i': 1, 'j': 0, 'y': 1., 't': 1})
worker_data.append({'i': 1, 'j': 1, 'y': 1., 't': 2})
# Firm 2 -> 1
worker_data.append({'i': 2, 'j': 2, 'y': 1., 't': 1})
worker_data.append({'i': 2, 'j': 1, 'y': 2., 't': 2})
df = pd.concat([pd.DataFrame(worker, index=[i]) for i, worker in enumerate(worker_data)])
bdf = bpd.BipartiteLong(data=df)
bdf = bdf.clean_data()
bdf = bdf.get_collapsed_long()
bdf = bdf.get_es()
stayers = bdf[bdf['m'] == 0]
movers = bdf[bdf['m'] > 0]
assert len(stayers) == 0
assert movers.iloc[0]['i'] == 0
assert movers.iloc[0]['j1'] == 0
assert movers.iloc[0]['j2'] == 1
assert movers.iloc[0]['y1'] == 2
assert movers.iloc[0]['y2'] == 1
assert movers.iloc[1]['i'] == 0
assert movers.iloc[1]['j1'] == 1
assert movers.iloc[1]['j2'] == 0
assert movers.iloc[1]['y1'] == 1
assert movers.iloc[1]['y2'] == 1
assert movers.iloc[2]['i'] == 1
assert movers.iloc[2]['j1'] == 0
assert movers.iloc[2]['j2'] == 1
assert movers.iloc[2]['y1'] == 1
assert movers.iloc[2]['y2'] == 1
assert movers.iloc[3]['i'] == 2
assert movers.iloc[3]['j1'] == 2
assert movers.iloc[3]['j2'] == 1
assert movers.iloc[3]['y1'] == 1
assert movers.iloc[3]['y2'] == 2
def test_refactor_9():
# 2 movers between firms 0 and 1, and 1 between firms 1 and 2. Time has jumps.
worker_data = []
# Firm 0 -> 0 -> 1 -> 0
# Time 1 -> 3 -> 4 -> 6
worker_data.append({'i': 0, 'j': 0, 'y': 2., 't': 1})
worker_data.append({'i': 0, 'j': 0, 'y': 2., 't': 3})
worker_data.append({'i': 0, 'j': 1, 'y': 1., 't': 4})
worker_data.append({'i': 0, 'j': 0, 'y': 1., 't': 6})
# Firm 1 -> 0
worker_data.append({'i': 1, 'j': 1, 'y': 1., 't': 1})
worker_data.append({'i': 1, 'j': 0, 'y': 1., 't': 2})
# Firm 2 -> 1
worker_data.append({'i': 2, 'j': 2, 'y': 1., 't': 1})
worker_data.append({'i': 2, 'j': 1, 'y': 2., 't': 2})
df = pd.concat([pd.DataFrame(worker, index=[i]) for i, worker in enumerate(worker_data)])
bdf = bpd.BipartiteLong(data=df)
bdf = bdf.clean_data()
bdf = bdf.get_collapsed_long()
bdf = bdf.get_es()
stayers = bdf[bdf['m'] == 0]
movers = bdf[bdf['m'] > 0]
assert len(stayers) == 0
assert movers.iloc[0]['i'] == 0
assert movers.iloc[0]['j1'] == 0
assert movers.iloc[0]['j2'] == 1
assert movers.iloc[0]['y1'] == 2
assert movers.iloc[0]['y2'] == 1
assert movers.iloc[1]['i'] == 0
assert movers.iloc[1]['j1'] == 1
assert movers.iloc[1]['j2'] == 0
assert movers.iloc[1]['y1'] == 1
assert movers.iloc[1]['y2'] == 1
assert movers.iloc[2]['i'] == 1
assert movers.iloc[2]['j1'] == 1
assert movers.iloc[2]['j2'] == 0
assert movers.iloc[2]['y1'] == 1
assert movers.iloc[2]['y2'] == 1
assert movers.iloc[3]['i'] == 2
assert movers.iloc[3]['j1'] == 2
assert movers.iloc[3]['j2'] == 1
assert movers.iloc[3]['y1'] == 1
assert movers.iloc[3]['y2'] == 2
def test_refactor_10():
# 1 mover between firms 0 and 1, 1 between firms 1 and 2, and 1 stayer at firm 2.
worker_data = []
# Firm 0 -> 1
worker_data.append({'i': 0, 'j': 0, 'y': 2., 't': 1})
worker_data.append({'i': 0, 'j': 1, 'y': 1., 't': 2})
# Firm 1 -> 2
worker_data.append({'i': 1, 'j': 1, 'y': 1., 't': 1})
worker_data.append({'i': 1, 'j': 2, 'y': 1., 't': 2})
# Firm 2 -> 2
worker_data.append({'i': 2, 'j': 2, 'y': 1., 't': 1})
worker_data.append({'i': 2, 'j': 2, 'y': 1., 't': 2})
df = pd.concat([pd.DataFrame(worker, index=[i]) for i, worker in enumerate(worker_data)])
bdf = bpd.BipartiteLong(data=df)
bdf = bdf.clean_data()
bdf = bdf.get_collapsed_long()
bdf = bdf.get_es()
stayers = bdf[bdf['m'] == 0]
movers = bdf[bdf['m'] > 0]
assert stayers.iloc[0]['i'] == 2
assert stayers.iloc[0]['j1'] == 2
assert stayers.iloc[0]['j2'] == 2
assert stayers.iloc[0]['y1'] == 1
assert stayers.iloc[0]['y2'] == 1
assert movers.iloc[0]['i'] == 0
assert movers.iloc[0]['j1'] == 0
assert movers.iloc[0]['j2'] == 1
assert movers.iloc[0]['y1'] == 2
assert movers.iloc[0]['y2'] == 1
assert movers.iloc[1]['i'] == 1
assert movers.iloc[1]['j1'] == 1
assert movers.iloc[1]['j2'] == 2
assert movers.iloc[1]['y1'] == 1
assert movers.iloc[1]['y2'] == 1
def test_refactor_11():
# 1 mover between firms 0 and 1 and 2 and 3, 1 between firms 1 and 2, and 1 stayer at firm 2.
# Check going to event study and back to long, for data where movers have extended periods where they stay at the same firm
worker_data = []
# Firm 0 -> 1 -> 2 -> 3
worker_data.append({'i': 0, 'j': 0, 'y': 2., 't': 1})
worker_data.append({'i': 0, 'j': 1, 'y': 1., 't': 2})
worker_data.append({'i': 0, 'j': 2, 'y': 0.5, 't': 3})
worker_data.append({'i': 0, 'j': 2, 'y': 0.5, 't': 4})
worker_data.append({'i': 0, 'j': 2, 'y': 0.75, 't': 5})
worker_data.append({'i': 0, 'j': 3, 'y': 1.5, 't': 6})
# Firm 1 -> 2
worker_data.append({'i': 1, 'j': 1, 'y': 1., 't': 1})
worker_data.append({'i': 1, 'j': 2, 'y': 1., 't': 2})
# Firm 2 -> 2
worker_data.append({'i': 2, 'j': 2, 'y': 1., 't': 1})
worker_data.append({'i': 2, 'j': 2, 'y': 1., 't': 2})
df = pd.concat([pd.DataFrame(worker, index=[i]) for i, worker in enumerate(worker_data)])
bdf = bpd.BipartiteLong(data=df).clean_data().get_es()
stayers = bdf[bdf['m'] == 0]
movers = bdf[bdf['m'] > 0]
assert stayers.iloc[0]['i'] == 0
assert stayers.iloc[0]['j1'] == 2
assert stayers.iloc[0]['j2'] == 2
assert stayers.iloc[0]['y1'] == 0.5
assert stayers.iloc[0]['y2'] == 0.5
assert stayers.iloc[0]['t1'] == 4
assert stayers.iloc[0]['t2'] == 4
assert stayers.iloc[1]['i'] == 2
assert stayers.iloc[1]['j1'] == 2
assert stayers.iloc[1]['j2'] == 2
assert stayers.iloc[1]['y1'] == 1.
assert stayers.iloc[1]['y2'] == 1.
assert stayers.iloc[1]['t1'] == 1
assert stayers.iloc[1]['t2'] == 1
assert stayers.iloc[2]['i'] == 2
assert stayers.iloc[2]['j1'] == 2
assert stayers.iloc[2]['j2'] == 2
assert stayers.iloc[2]['y1'] == 1.
assert stayers.iloc[2]['y2'] == 1.
assert stayers.iloc[2]['t1'] == 2
assert stayers.iloc[2]['t2'] == 2
assert movers.iloc[0]['i'] == 0
assert movers.iloc[0]['j1'] == 0
assert movers.iloc[0]['j2'] == 1
assert movers.iloc[0]['y1'] == 2.
assert movers.iloc[0]['y2'] == 1.
assert movers.iloc[0]['t1'] == 1
assert movers.iloc[0]['t2'] == 2
assert movers.iloc[1]['i'] == 0
assert movers.iloc[1]['j1'] == 1
assert movers.iloc[1]['j2'] == 2
assert movers.iloc[1]['y1'] == 1.
assert movers.iloc[1]['y2'] == 0.5
assert movers.iloc[1]['t1'] == 2
assert movers.iloc[1]['t2'] == 3
assert movers.iloc[2]['i'] == 0
assert movers.iloc[2]['j1'] == 2
assert movers.iloc[2]['j2'] == 3
assert movers.iloc[2]['y1'] == 0.75
assert movers.iloc[2]['y2'] == 1.5
assert movers.iloc[2]['t1'] == 5
assert movers.iloc[2]['t2'] == 6
assert movers.iloc[3]['i'] == 1
assert movers.iloc[3]['j1'] == 1
assert movers.iloc[3]['j2'] == 2
assert movers.iloc[3]['y1'] == 1.
assert movers.iloc[3]['y2'] == 1.
assert movers.iloc[3]['t1'] == 1
assert movers.iloc[3]['t2'] == 2
bdf = bdf.get_long()
for row in range(len(bdf)):
df_row = df.iloc[row]
bdf_row = bdf.iloc[row]
for col in ['i', 'j', 'y', 't']:
assert df_row[col] == bdf_row[col]
def test_refactor_12():
# Check going to event study and back to long
df = bpd.SimBipartite(bpd.sim_params({'p_move': 0.05, 'rng': np.random.default_rng(1234)})).sim_network()
bdf = bpd.BipartiteLong(df).clean_data()
assert len(bdf) == len(bdf.get_es().get_long())
def test_contiguous_fids_11():
# Check contiguous_ids() with firm ids.
worker_data = []
# Firm 0 -> 1
worker_data.append({'i': 0, 'j': 0, 'y': 2., 't': 1})
worker_data.append({'i': 0, 'j': 1, 'y': 1., 't': 2})
# Firm 1 -> 3
worker_data.append({'i': 1, 'j': 1, 'y': 1., 't': 1})
worker_data.append({'i': 1, 'j': 3, 'y': 1., 't': 2})
# Firm 3 -> 3
worker_data.append({'i': 2, 'j': 3, 'y': 1., 't': 1})
worker_data.append({'i': 2, 'j': 3, 'y': 1., 't': 2})
df = pd.concat([pd.DataFrame(worker, index=[i]) for i, worker in enumerate(worker_data)])
bdf = bpd.BipartiteLong(data=df)
bdf = bdf.clean_data()
bdf = bdf.get_collapsed_long()
bdf = bdf.get_es()
stayers = bdf[bdf['m'] == 0]
movers = bdf[bdf['m'] > 0]
assert stayers.iloc[0]['i'] == 2
assert stayers.iloc[0]['j1'] == 2
assert stayers.iloc[0]['j2'] == 2
assert stayers.iloc[0]['y1'] == 1
assert stayers.iloc[0]['y2'] == 1
assert movers.iloc[0]['i'] == 0
assert movers.iloc[0]['j1'] == 0
assert movers.iloc[0]['j2'] == 1
assert movers.iloc[0]['y1'] == 2
assert movers.iloc[0]['y2'] == 1
assert movers.iloc[1]['i'] == 1
assert movers.iloc[1]['j1'] == 1
assert movers.iloc[1]['j2'] == 2
assert movers.iloc[1]['y1'] == 1
assert movers.iloc[1]['y2'] == 1
def test_contiguous_wids_12():
# Check contiguous_ids() with worker ids.
worker_data = []
# Firm 0 -> 1
worker_data.append({'i': 0, 'j': 0, 'y': 2., 't': 1})
worker_data.append({'i': 0, 'j': 1, 'y': 1., 't': 2})
# Firm 1 -> 2
worker_data.append({'i': 1, 'j': 1, 'y': 1., 't': 1})
worker_data.append({'i': 1, 'j': 2, 'y': 1., 't': 2})
# Worker 3
# Firm 2 -> 2
worker_data.append({'i': 3, 'j': 2, 'y': 1., 't': 1})
worker_data.append({'i': 3, 'j': 2, 'y': 1., 't': 2})
df = pd.concat([pd.DataFrame(worker, index=[i]) for i, worker in enumerate(worker_data)])
bdf = bpd.BipartiteLong(data=df)
bdf = bdf.clean_data()
bdf = bdf.get_collapsed_long()
bdf = bdf.get_es()
stayers = bdf[bdf['m'] == 0]
movers = bdf[bdf['m'] > 0]
assert stayers.iloc[0]['i'] == 2
assert stayers.iloc[0]['j1'] == 2
assert stayers.iloc[0]['j2'] == 2
assert stayers.iloc[0]['y1'] == 1
assert stayers.iloc[0]['y2'] == 1
assert movers.iloc[0]['i'] == 0
assert movers.iloc[0]['j1'] == 0
assert movers.iloc[0]['j2'] == 1
assert movers.iloc[0]['y1'] == 2
assert movers.iloc[0]['y2'] == 1
assert movers.iloc[1]['i'] == 1
assert movers.iloc[1]['j1'] == 1
assert movers.iloc[1]['j2'] == 2
assert movers.iloc[1]['y1'] == 1
assert movers.iloc[1]['y2'] == 1
def test_contiguous_cids_13():
# Check contiguous_ids() with cluster ids.
worker_data = []
# Firm 0 -> 1
# Cluster 1 -> 2
worker_data.append({'i': 0, 'j': 0, 'y': 2., 't': 1, 'g': 1})
worker_data.append({'i': 0, 'j': 1, 'y': 1., 't': 2, 'g': 2})
# Firm 1 -> 2
# Cluster 2 -> 1
worker_data.append({'i': 1, 'j': 1, 'y': 1., 't': 1, 'g': 2})
worker_data.append({'i': 1, 'j': 2, 'y': 1., 't': 2, 'g': 1})
# Firm 2 -> 2
# Cluster 1 -> 1
worker_data.append({'i': 2, 'j': 2, 'y': 1., 't': 1, 'g': 1})
worker_data.append({'i': 2, 'j': 2, 'y': 1., 't': 2, 'g': 1})
df = pd.concat([pd.DataFrame(worker, index=[i]) for i, worker in enumerate(worker_data)])
bdf = bpd.BipartiteLong(data=df)
bdf = bdf.clean_data()
bdf = bdf.get_collapsed_long()
bdf = bdf.get_es()
stayers = bdf[bdf['m'] == 0]
movers = bdf[bdf['m'] > 0]
assert stayers.iloc[0]['i'] == 2
assert stayers.iloc[0]['j1'] == 2
assert stayers.iloc[0]['j2'] == 2
assert stayers.iloc[0]['y1'] == 1
assert stayers.iloc[0]['y2'] == 1
assert stayers.iloc[0]['g1'] == 0
assert stayers.iloc[0]['g2'] == 0
assert movers.iloc[0]['i'] == 0
assert movers.iloc[0]['j1'] == 0
assert movers.iloc[0]['j2'] == 1
assert movers.iloc[0]['y1'] == 2
assert movers.iloc[0]['y2'] == 1
assert movers.iloc[0]['g1'] == 0
assert movers.iloc[0]['g2'] == 1
assert movers.iloc[1]['i'] == 1
assert movers.iloc[1]['j1'] == 1
assert movers.iloc[1]['j2'] == 2
assert movers.iloc[1]['y1'] == 1
assert movers.iloc[1]['y2'] == 1
assert movers.iloc[1]['g1'] == 1
assert movers.iloc[1]['g2'] == 0
def test_contiguous_cids_14():
# Check contiguous_ids() with cluster ids.
worker_data = []
# Firm 0 -> 1
# Cluster 2 -> 1
worker_data.append({'i': 0, 'j': 0, 'y': 2., 't': 1, 'g': 2})
worker_data.append({'i': 0, 'j': 1, 'y': 1., 't': 2, 'g': 1})
# Firm 1 -> 2
# Cluster 1 -> 2
worker_data.append({'i': 1, 'j': 1, 'y': 1., 't': 1, 'g': 1})
worker_data.append({'i': 1, 'j': 2, 'y': 1., 't': 2, 'g': 2})
# Firm 2 -> 2
# Cluster 2 -> 2
worker_data.append({'i': 2, 'j': 2, 'y': 1., 't': 1, 'g': 2})
worker_data.append({'i': 2, 'j': 2, 'y': 1., 't': 2, 'g': 2})
df = pd.concat([pd.DataFrame(worker, index=[i]) for i, worker in enumerate(worker_data)])
bdf = bpd.BipartiteLong(data=df, include_id_reference_dict=True)
bdf = bdf.clean_data()
bdf = bdf.get_collapsed_long()
bdf = bdf.get_es().original_ids()
stayers = bdf[bdf['m'] == 0]
movers = bdf[bdf['m'] > 0]
assert movers.iloc[0]['i'] == 0
assert movers.iloc[0]['j1'] == 0
assert movers.iloc[0]['j2'] == 1
assert movers.iloc[0]['y1'] == 2
assert movers.iloc[0]['y2'] == 1
assert movers.iloc[0]['original_g1'] == 2
assert movers.iloc[0]['original_g2'] == 1
assert movers.iloc[0]['g1'] == 0
assert movers.iloc[0]['g2'] == 1
assert movers.iloc[1]['i'] == 1
assert movers.iloc[1]['j1'] == 1
assert movers.iloc[1]['j2'] == 2
assert movers.iloc[1]['y1'] == 1
assert movers.iloc[1]['y2'] == 1
assert movers.iloc[1]['original_g1'] == 1
assert movers.iloc[1]['original_g2'] == 2
assert movers.iloc[1]['g1'] == 1
assert movers.iloc[1]['g2'] == 0
assert stayers.iloc[0]['i'] == 2
assert stayers.iloc[0]['j1'] == 2
assert stayers.iloc[0]['j2'] == 2
assert stayers.iloc[0]['y1'] == 1
assert stayers.iloc[0]['y2'] == 1
assert stayers.iloc[0]['original_g1'] == 2
assert stayers.iloc[0]['original_g2'] == 2
assert stayers.iloc[0]['g1'] == 0
assert stayers.iloc[0]['g2'] == 0
def test_col_dict_15():
# Check that col_dict works properly.
worker_data = []
# Firm 0 -> 1
worker_data.append({'i': 0, 'j': 0, 'y': 2., 't': 1})
worker_data.append({'i': 0, 'j': 1, 'y': 1., 't': 2})
# Firm 1 -> 2
worker_data.append({'i': 1, 'j': 1, 'y': 1., 't': 1})
worker_data.append({'i': 1, 'j': 2, 'y': 1., 't': 2})
# Worker 3
# Firm 2 -> 2
worker_data.append({'i': 3, 'j': 2, 'y': 1., 't': 1})
worker_data.append({'i': 3, 'j': 2, 'y': 1., 't': 2})
df = pd.concat([pd.DataFrame(worker, index=[i]) for i, worker in enumerate(worker_data)]).rename({'j': 'firm', 'i': 'worker'}, axis=1)
bdf = bpd.BipartiteLong(data=df, col_dict={'j': 'firm', 'i': 'worker'})
bdf = bdf.clean_data()
bdf = bdf.get_collapsed_long()
bdf = bdf.get_es()
stayers = bdf[bdf['m'] == 0]
movers = bdf[bdf['m'] > 0]
assert stayers.iloc[0]['i'] == 2
assert stayers.iloc[0]['j1'] == 2
assert stayers.iloc[0]['j2'] == 2
assert stayers.iloc[0]['y1'] == 1
assert stayers.iloc[0]['y2'] == 1
assert movers.iloc[0]['i'] == 0
assert movers.iloc[0]['j1'] == 0
assert movers.iloc[0]['j2'] == 1
assert movers.iloc[0]['y1'] == 2
assert movers.iloc[0]['y2'] == 1
assert movers.iloc[1]['i'] == 1
assert movers.iloc[1]['j1'] == 1
assert movers.iloc[1]['j2'] == 2
assert movers.iloc[1]['y1'] == 1
assert movers.iloc[1]['y2'] == 1
def test_worker_year_unique_16_1():
# Workers with multiple jobs in the same year, keep the highest paying, with long format. Testing 'max', 'sum', and 'mean' options, where options should not have an effect.
worker_data = []
# Firm 0 -> 1
# Time 1 -> 2
worker_data.append({'i': 0, 'j': 0, 'y': 2., 't': 1})
worker_data.append({'i': 0, 'j': 1, 'y': 1., 't': 2})
# Firm 1 -> 2 -> 3
# Time 1 -> 2 -> 2
worker_data.append({'i': 1, 'j': 1, 'y': 1., 't': 1})
worker_data.append({'i': 1, 'j': 2, 'y': 1., 't': 2})
worker_data.append({'i': 1, 'j': 3, 'y': 0.5, 't': 2})
# Worker 3
# Firm 2 -> 1 -> 2
# Time 1 -> 1 -> 2
worker_data.append({'i': 3, 'j': 2, 'y': 1., 't': 1})
worker_data.append({'i': 3, 'j': 1, 'y': 1.5, 't': 1})
worker_data.append({'i': 3, 'j': 2, 'y': 1., 't': 2})
df = pd.concat([pd.DataFrame(worker, index=[i]) for i, worker in enumerate(worker_data)])
for how in ['max', 'sum', 'mean']:
bdf = bpd.BipartiteLong(data=df)
bdf = bdf.clean_data(bpd.clean_params({'i_t_how': how}))
stayers = bdf[bdf['m'] == 0]
movers = bdf[bdf['m'] > 0]
assert len(stayers) == 0
assert movers.iloc[0]['i'] == 0
assert movers.iloc[0]['j'] == 0
assert movers.iloc[0]['y'] == 2
assert movers.iloc[0]['t'] == 1
assert movers.iloc[1]['i'] == 0
assert movers.iloc[1]['j'] == 1
assert movers.iloc[1]['y'] == 1
assert movers.iloc[1]['t'] == 2
assert movers.iloc[2]['i'] == 1
assert movers.iloc[2]['j'] == 1
assert movers.iloc[2]['y'] == 1
assert movers.iloc[2]['t'] == 1
assert movers.iloc[3]['i'] == 1
assert movers.iloc[3]['j'] == 2
assert movers.iloc[3]['y'] == 1
assert movers.iloc[3]['t'] == 2
assert movers.iloc[4]['i'] == 2
assert movers.iloc[4]['j'] == 1
assert movers.iloc[4]['y'] == 1.5
assert movers.iloc[4]['t'] == 1
assert movers.iloc[5]['i'] == 2
assert movers.iloc[5]['j'] == 2
assert movers.iloc[5]['y'] == 1
assert movers.iloc[5]['t'] == 2
def test_worker_year_unique_16_2():
# Workers with multiple jobs in the same year, keep the highest paying, with long format. Testing 'max', 'sum' and 'mean' options, where options should have an effect.
worker_data = []
# Firm 0 -> 1
# Time 1 -> 2
worker_data.append({'i': 0, 'j': 0, 'y': 2., 't': 1})
worker_data.append({'i': 0, 'j': 1, 'y': 1., 't': 2})
# Firm 1 -> 2 -> 2 -> 3
# Time 1 -> 2 -> 2 -> 2
worker_data.append({'i': 1, 'j': 1, 'y': 1., 't': 1})
worker_data.append({'i': 1, 'j': 2, 'y': 1., 't': 2})
worker_data.append({'i': 1, 'j': 2, 'y': 1.5, 't': 2})
worker_data.append({'i': 1, 'j': 3, 'y': 0.5, 't': 2})
# Worker 3
# Firm 2 -> 1 -> 2
# Time 1 -> 1 -> 2
worker_data.append({'i': 3, 'j': 2, 'y': 1., 't': 1})
worker_data.append({'i': 3, 'j': 1, 'y': 1.5, 't': 1})
worker_data.append({'i': 3, 'j': 2, 'y': 1., 't': 2})
df = pd.concat([pd.DataFrame(worker, index=[i]) for i, worker in enumerate(worker_data)])
for how in ['max', 'sum', 'mean']:
bdf = bpd.BipartiteLong(data=df.copy())
bdf = bdf.clean_data(bpd.clean_params({'i_t_how': how}))
stayers = bdf[bdf['m'] == 0]
movers = bdf[bdf['m'] > 0]
assert len(stayers) == 0
assert movers.iloc[0]['i'] == 0
assert movers.iloc[0]['j'] == 0
assert movers.iloc[0]['y'] == 2
assert movers.iloc[0]['t'] == 1
assert movers.iloc[1]['i'] == 0
assert movers.iloc[1]['j'] == 1
assert movers.iloc[1]['y'] == 1
assert movers.iloc[1]['t'] == 2
assert movers.iloc[2]['i'] == 1
assert movers.iloc[2]['j'] == 1
assert movers.iloc[2]['y'] == 1
assert movers.iloc[2]['t'] == 1
assert movers.iloc[3]['i'] == 1
assert movers.iloc[3]['j'] == 2
if how == 'max':
assert movers.iloc[3]['y'] == 1.5
elif how == 'sum':
assert movers.iloc[3]['y'] == 2.5
elif how == 'mean':
assert movers.iloc[3]['y'] == 1.25
assert movers.iloc[3]['t'] == 2
assert movers.iloc[4]['i'] == 2
assert movers.iloc[4]['j'] == 1
assert movers.iloc[4]['y'] == 1.5
assert movers.iloc[4]['t'] == 1
assert movers.iloc[5]['i'] == 2
assert movers.iloc[5]['j'] == 2
assert movers.iloc[5]['y'] == 1
assert movers.iloc[5]['t'] == 2
def test_worker_year_unique_16_3():
# Workers with multiple jobs in the same year, keep the highest paying, with collapsed long format. Testing 'max', 'sum', and 'mean' options, where options should have an effect. Using collapsed long data.
worker_data = []
# Firm 0 -> 1
# Time 1 -> 2
worker_data.append({'i': 0, 'j': 0, 'y': 2., 't1': 1, 't2': 1})
worker_data.append({'i': 0, 'j': 1, 'y': 1., 't1': 2, 't2': 2})
# Firm 1 -> 2 -> 2 -> 3
# Time 1 -> 2 -> 2 -> 2
worker_data.append({'i': 1, 'j': 1, 'y': 1., 't1': 1, 't2': 2})
worker_data.append({'i': 1, 'j': 2, 'y': 1., 't1': 2, 't2': 2})
worker_data.append({'i': 1, 'j': 2, 'y': 1.5, 't1': 2, 't2': 2})
worker_data.append({'i': 1, 'j': 3, 'y': 0.5, 't1': 2, 't2': 2})
# Worker 3
# Firm 2 -> 1
# Time 1 -> 1
worker_data.append({'i': 3, 'j': 2, 'y': 1., 't1': 1, 't2': 2})
worker_data.append({'i': 3, 'j': 1, 'y': 1.5, 't1': 1, 't2': 2})
df = pd.concat([pd.DataFrame(worker, index=[i]) for i, worker in enumerate(worker_data)])
for how in ['max', 'sum', 'mean']:
bdf = bpd.BipartiteLongCollapsed(data=df)
bdf = bdf.clean_data(bpd.clean_params({'i_t_how': how}))
stayers = bdf[bdf['m'] == 0]
movers = bdf[bdf['m'] > 0]
assert stayers.iloc[0]['i'] == 2
assert stayers.iloc[0]['j'] == 1
assert stayers.iloc[0]['y'] == 1.5
assert stayers.iloc[0]['t1'] == 1
assert stayers.iloc[0]['t2'] == 2
assert movers.iloc[0]['i'] == 0
assert movers.iloc[0]['j'] == 0
assert movers.iloc[0]['y'] == 2
assert movers.iloc[0]['t1'] == 1
assert movers.iloc[0]['t2'] == 1
assert movers.iloc[1]['i'] == 0
assert movers.iloc[1]['j'] == 1
assert movers.iloc[1]['y'] == 1
assert movers.iloc[1]['t1'] == 2
assert movers.iloc[1]['t2'] == 2
assert movers.iloc[2]['i'] == 1
assert movers.iloc[2]['j'] == 1
assert movers.iloc[2]['y'] == 1
assert movers.iloc[2]['t1'] == 1
assert movers.iloc[2]['t2'] == 1
assert movers.iloc[3]['i'] == 1
assert movers.iloc[3]['j'] == 2
if how == 'max':
assert movers.iloc[3]['y'] == 1.5
elif how == 'sum':
assert movers.iloc[3]['y'] == 2.5
elif how == 'mean':
assert movers.iloc[3]['y'] == 1.25
assert movers.iloc[3]['t1'] == 2
assert movers.iloc[3]['t2'] == 2
def test_worker_year_unique_16_4():
# Workers with multiple jobs in the same year, keep the highest paying, with event study format. Testing 'max', 'sum', and 'mean' options, where options should have an effect. NOTE: because of how data converts from event study to long (it only shifts period 2 (e.g. j2, y2) for the last row, as it assumes observations zigzag), it will only correct duplicates for period 1
worker_data = []
# Worker 0
worker_data.append({'i': 0, 'j1': 0, 'j2': 1, 'y1': 2., 'y2': 1., 't1': 1, 't2': 2})
# Worker 1
worker_data.append({'i': 1, 'j1': 1, 'j2': 2, 'y1': 0.5, 'y2': 1.5, 't1': 1, 't2': 2})
worker_data.append({'i': 1, 'j1': 1, 'j2': 2, 'y1': 0.75, 'y2': 1., 't1': 1, 't2': 2})
worker_data.append({'i': 1, 'j1': 2, 'j2': 1, 'y1': 1., 'y2': 2., 't1': 1, 't2': 2})
# Worker 3
worker_data.append({'i': 3, 'j1': 2, 'j2': 2, 't1': 1, 't2': 1, 'y1': 1., 'y2': 1.})
worker_data.append({'i': 3, 'j1': 2, 'j2': 2, 'y1': 1., 'y2': 1., 't1': 2, 't2': 2})
worker_data.append({'i': 3, 'j1': 1, 'j2': 1, 'y1': 1.5, 'y2': 1.5, 't1': 1, 't2': 1})
worker_data.append({'i': 3, 'j1': 1, 'j2': 1, 'y1': 1.5, 'y2': 1.5, 't1': 2, 't2': 2})
df = pd.concat([pd.DataFrame(worker, index=[i]) for i, worker in enumerate(worker_data)])
for how in ['max', 'sum', 'mean']:
bdf = bpd.BipartiteEventStudy(data=df.copy(), include_id_reference_dict=True)
bdf = bdf.clean_data(bpd.clean_params({'i_t_how': how})).original_ids()
stayers = bdf[bdf['m'] == 0]
movers = bdf[bdf['m'] > 0]
assert stayers.iloc[0]['i'] == 2
assert stayers.iloc[0]['original_i'] == 3
assert stayers.iloc[0]['j1'] == 1
assert stayers.iloc[0]['j2'] == 1
assert stayers.iloc[0]['y1'] == 1.5
assert stayers.iloc[0]['y2'] == 1.5
assert stayers.iloc[0]['t1'] == 1
assert stayers.iloc[0]['t2'] == 1
assert stayers.iloc[1]['i'] == 2
assert stayers.iloc[1]['original_i'] == 3
assert stayers.iloc[1]['j1'] == 1
assert stayers.iloc[1]['j2'] == 1
assert stayers.iloc[1]['y1'] == 1.5
assert stayers.iloc[1]['y2'] == 1.5
assert stayers.iloc[1]['t1'] == 2
assert stayers.iloc[1]['t2'] == 2
assert movers.iloc[0]['original_i'] == 0
assert movers.iloc[0]['i'] == 0
assert movers.iloc[0]['j1'] == 0
assert movers.iloc[0]['j2'] == 1
assert movers.iloc[0]['y1'] == 2
assert movers.iloc[0]['y2'] == 1
assert movers.iloc[0]['t1'] == 1
assert movers.iloc[0]['t2'] == 2
assert movers.iloc[1]['original_i'] == 1
assert movers.iloc[1]['i'] == 1
if how == 'max':
assert movers.iloc[1]['j1'] == 2
assert movers.iloc[1]['y1'] == 1
assert movers.iloc[1]['j2'] == 1
assert movers.iloc[1]['y2'] == 2
elif how == 'sum':
assert movers.iloc[1]['j1'] == 1
assert movers.iloc[1]['y1'] == 1.25
assert movers.iloc[1]['j2'] == 2
assert movers.iloc[1]['y2'] == 2.5
elif how == 'mean':
assert movers.iloc[1]['j1'] == 2
assert movers.iloc[1]['y1'] == 1
assert movers.iloc[1]['j2'] == 1
assert movers.iloc[1]['y2'] == 2
assert movers.iloc[1]['t1'] == 1
assert movers.iloc[1]['t2'] == 2
def test_string_ids_17():
# String worker and firm ids.
worker_data = []
# Worker 'a'
worker_data.append({'i': 'a', 'j': 'a', 'y': 2., 't': 1})
worker_data.append({'i': 'a', 'j': 'b', 'y': 1., 't': 2})
# Worker 'b'
worker_data.append({'i': 'b', 'j': 'b', 'y': 1., 't': 1})
worker_data.append({'i': 'b', 'j': 'c', 'y': 1., 't': 2})
worker_data.append({'i': 'b', 'j': 'd', 'y': 0.5, 't': 2})
# Worker 'd'
worker_data.append({'i': 'd', 'j': 'c', 'y': 1., 't': 1})
worker_data.append({'i': 'd', 'j': 'b', 'y': 1.5, 't': 1})
worker_data.append({'i': 'd', 'j': 'c', 'y': 1., 't': 2})
df = pd.concat([pd.DataFrame(worker, index=[i]) for i, worker in enumerate(worker_data)])
bdf = bpd.BipartiteLong(data=df)
bdf = bdf.clean_data()
stayers = bdf[bdf['m'] == 0]
movers = bdf[bdf['m'] > 0]
assert len(stayers) == 0
assert movers.iloc[0]['i'] == 0
assert movers.iloc[0]['j'] == 0
assert movers.iloc[0]['y'] == 2
assert movers.iloc[0]['t'] == 1
assert movers.iloc[1]['i'] == 0
assert movers.iloc[1]['j'] == 1
assert movers.iloc[1]['y'] == 1
assert movers.iloc[1]['t'] == 2
assert movers.iloc[2]['i'] == 1
assert movers.iloc[2]['j'] == 1
assert movers.iloc[2]['y'] == 1
assert movers.iloc[2]['t'] == 1
assert movers.iloc[3]['i'] == 1
assert movers.iloc[3]['j'] == 2
assert movers.iloc[3]['y'] == 1
assert movers.iloc[3]['t'] == 2
assert movers.iloc[4]['i'] == 2
assert movers.iloc[4]['j'] == 1
assert movers.iloc[4]['y'] == 1.5
assert movers.iloc[4]['t'] == 1
assert movers.iloc[5]['i'] == 2
assert movers.iloc[5]['j'] == 2
assert movers.iloc[5]['y'] == 1
assert movers.iloc[5]['t'] == 2
def test_general_methods_18():
# Test some general methods, like n_workers/n_firms/n_clusters, included_cols(), drop(), and rename().
worker_data = []
# Worker 0
worker_data.append({'i': 0, 'j': 0, 'y': 2., 't': 1, 'g': 2})
worker_data.append({'i': 0, 'j': 1, 'y': 1., 't': 2, 'g': 1})
# Worker 1
worker_data.append({'i': 1, 'j': 1, 'y': 1., 't': 1, 'g': 1})
worker_data.append({'i': 1, 'j': 2, 'y': 1., 't': 2, 'g': 2})
# Worker 2
worker_data.append({'i': 2, 'j': 2, 'y': 1., 't': 1, 'g': 2})
worker_data.append({'i': 2, 'j': 2, 'y': 1., 't': 2, 'g': 2})
df = pd.concat([pd.DataFrame(worker, index=[i]) for i, worker in enumerate(worker_data)])
bdf = bpd.BipartiteLong(data=df)
bdf = bdf.clean_data()
bdf = bdf.get_collapsed_long()
bdf = bdf.get_es()
assert bdf.n_workers() == 3
assert bdf.n_firms() == 3
assert bdf.n_clusters() == 2
correct_cols = True
all_cols = bdf._included_cols()
for col in ['i', 'j', 'y', 't', 'g']:
if col not in all_cols:
correct_cols = False
break
assert correct_cols
bdf.drop('g1', axis=1, inplace=True)
assert 'g1' in bdf.columns and 'g2' in bdf.columns
bdf.drop('g', axis=1, inplace=True)
assert 'g1' not in bdf.columns and 'g2' not in bdf.columns
bdf.rename({'i': 'w'})
assert 'i' in bdf.columns
bdf['g1'] = 1
bdf['g2'] = 1
bdf.col_dict['g1'] = 'g1'
bdf.col_dict['g2'] = 'g2'
assert 'g1' in bdf.columns and 'g2' in bdf.columns
bdf.rename({'g': 'r'})
assert 'g1' not in bdf.columns and 'g2' not in bdf.columns
def test_save_19():
# Make sure changing attributes in a saved version does not overwrite values in the original.
worker_data = []
# Worker 0
worker_data.append({'i': 0, 'j': 0, 'y': 2., 't': 1})
worker_data.append({'i': 0, 'j': 1, 'y': 1., 't': 2})
# Worker 1
worker_data.append({'i': 1, 'j': 1, 'y': 1., 't': 1})
worker_data.append({'i': 1, 'j': 2, 'y': 1., 't': 2})
worker_data.append({'i': 1, 'j': 3, 'y': 0.5, 't': 2})
# Worker 3
worker_data.append({'i': 3, 'j': 2, 'y': 1., 't': 1})
worker_data.append({'i': 3, 'j': 1, 'y': 1.5, 't': 1})
worker_data.append({'i': 3, 'j': 2, 'y': 1., 't': 2})
df = pd.concat([pd.DataFrame(worker, index=[i]) for i, worker in enumerate(worker_data)])
# Long
bdf = bpd.BipartiteLong(data=df)
bdf = bdf.clean_data().drop('m', axis=1, inplace=True)
bdf2 = bdf.copy()
bdf2 = bdf2.gen_m(copy=False)
assert 'm' in bdf2._included_cols() and 'm' not in bdf._included_cols()
# Event study
bdf = bdf.gen_m(copy=False).get_es()
bdf = bdf.clean_data().drop('m', axis=1, inplace=True)
bdf2 = bdf.copy()
bdf2 = bdf2.gen_m(copy=False)
assert 'm' in bdf2._included_cols() and 'm' not in bdf._included_cols()
# Collapsed long
bdf = bdf.gen_m(copy=False).get_long().get_collapsed_long()
bdf = bdf.clean_data().drop('m', axis=1, inplace=True)
bdf2 = bdf.copy()
bdf2 = bdf2.gen_m(copy=False)
assert 'm' in bdf2._included_cols() and 'm' not in bdf._included_cols()
# Collapsed event study
bdf = bdf.gen_m(copy=False).get_es()
bdf = bdf.clean_data().drop('m', axis=1, inplace=True)
bdf2 = bdf.copy()
bdf2 = bdf2.gen_m(copy=False)
assert 'm' in bdf2._included_cols() and 'm' not in bdf._included_cols()
def test_id_reference_dict_20():
# String worker and firm ids, link with id_reference_dict.
worker_data = []
# Worker 'a'
worker_data.append({'i': 'a', 'j': 'a', 'y': 2., 't': 1})
worker_data.append({'i': 'a', 'j': 'b', 'y': 1., 't': 2})
# Worker 'b'
worker_data.append({'i': 'b', 'j': 'b', 'y': 1., 't': 1})
worker_data.append({'i': 'b', 'j': 'c', 'y': 1., 't': 2})
worker_data.append({'i': 'b', 'j': 'd', 'y': 0.5, 't': 2})
# Worker 'd'
worker_data.append({'i': 'd', 'j': 'c', 'y': 1., 't': 1})
worker_data.append({'i': 'd', 'j': 'b', 'y': 1.5, 't': 1})
worker_data.append({'i': 'd', 'j': 'c', 'y': 1., 't': 2})
df = pd.concat([pd.DataFrame(worker, index=[i]) for i, worker in enumerate(worker_data)])
bdf = bpd.BipartiteLong(data=df, include_id_reference_dict=True)
bdf = bdf.clean_data()
id_reference_dict = bdf.id_reference_dict
merge_df = bdf.merge(id_reference_dict['i'], how='left', left_on='i', right_on='adjusted_ids_1').rename({'original_ids': 'original_i'})
merge_df = merge_df.merge(id_reference_dict['j'], how='left', left_on='j', right_on='adjusted_ids_1').rename({'original_ids': 'original_j'})
stayers = merge_df[merge_df['m'] == 0]
movers = merge_df[merge_df['m'] == 1]
assert len(stayers) == 0
assert movers.iloc[0]['i'] == 0
assert movers.iloc[0]['original_i'] == 'a'
assert movers.iloc[0]['j'] == 0
assert movers.iloc[0]['original_j'] == 'a'
assert movers.iloc[0]['y'] == 2
assert movers.iloc[0]['t'] == 1
assert movers.iloc[1]['i'] == 0
assert movers.iloc[1]['original_i'] == 'a'
assert movers.iloc[1]['j'] == 1
assert movers.iloc[1]['original_j'] == 'b'
assert movers.iloc[1]['y'] == 1
assert movers.iloc[1]['t'] == 2
assert movers.iloc[2]['i'] == 1
assert movers.iloc[2]['original_i'] == 'b'
assert movers.iloc[2]['j'] == 1
assert movers.iloc[2]['original_j'] == 'b'
assert movers.iloc[2]['y'] == 1
assert movers.iloc[2]['t'] == 1
assert movers.iloc[3]['i'] == 1
assert movers.iloc[3]['original_i'] == 'b'
assert movers.iloc[3]['j'] == 2
assert movers.iloc[3]['original_j'] == 'c'
assert movers.iloc[3]['y'] == 1
assert movers.iloc[3]['t'] == 2
assert movers.iloc[4]['i'] == 2
assert movers.iloc[4]['original_i'] == 'd'
assert movers.iloc[4]['j'] == 1
assert movers.iloc[4]['original_j'] == 'b'
assert movers.iloc[4]['y'] == 1.5
assert movers.iloc[4]['t'] == 1
assert movers.iloc[5]['i'] == 2
assert movers.iloc[5]['original_i'] == 'd'
assert movers.iloc[5]['j'] == 2
assert movers.iloc[5]['original_j'] == 'c'
assert movers.iloc[5]['y'] == 1
assert movers.iloc[5]['t'] == 2
def test_id_reference_dict_22():
# String worker and firm ids, link with id_reference_dict. Testing original_ids() method.
worker_data = []
# Worker 'a'
worker_data.append({'i': 'a', 'j': 'a', 'y': 2., 't': 1})
worker_data.append({'i': 'a', 'j': 'b', 'y': 1., 't': 2})
# Worker 'b'
worker_data.append({'i': 'b', 'j': 'b', 'y': 1., 't': 1})
worker_data.append({'i': 'b', 'j': 'c', 'y': 1., 't': 2})
worker_data.append({'i': 'b', 'j': 'd', 'y': 0.5, 't': 2})
# Worker 'd'
worker_data.append({'i': 'd', 'j': 'c', 'y': 1., 't': 1})
worker_data.append({'i': 'd', 'j': 'b', 'y': 1.5, 't': 1})
worker_data.append({'i': 'd', 'j': 'c', 'y': 1., 't': 2})
df = pd.concat([pd.DataFrame(worker, index=[i]) for i, worker in enumerate(worker_data)])
bdf = bpd.BipartiteLong(data=df, include_id_reference_dict=True)
bdf = bdf.clean_data()
merge_df = bdf.original_ids()
stayers = merge_df[merge_df['m'] == 0]
movers = merge_df[merge_df['m'] == 1]
assert len(stayers) == 0
assert movers.iloc[0]['i'] == 0
assert movers.iloc[0]['original_i'] == 'a'
assert movers.iloc[0]['j'] == 0
assert movers.iloc[0]['original_j'] == 'a'
assert movers.iloc[0]['y'] == 2
assert movers.iloc[0]['t'] == 1
assert movers.iloc[1]['i'] == 0
assert movers.iloc[1]['original_i'] == 'a'
assert movers.iloc[1]['j'] == 1
assert movers.iloc[1]['original_j'] == 'b'
assert movers.iloc[1]['y'] == 1
assert movers.iloc[1]['t'] == 2
assert movers.iloc[2]['i'] == 1
assert movers.iloc[2]['original_i'] == 'b'
assert movers.iloc[2]['j'] == 1
assert movers.iloc[2]['original_j'] == 'b'
assert movers.iloc[2]['y'] == 1
assert movers.iloc[2]['t'] == 1
assert movers.iloc[3]['i'] == 1
assert movers.iloc[3]['original_i'] == 'b'
assert movers.iloc[3]['j'] == 2
assert movers.iloc[3]['original_j'] == 'c'
assert movers.iloc[3]['y'] == 1
assert movers.iloc[3]['t'] == 2
assert movers.iloc[4]['i'] == 2
assert movers.iloc[4]['original_i'] == 'd'
assert movers.iloc[4]['j'] == 1
assert movers.iloc[4]['original_j'] == 'b'
assert movers.iloc[4]['y'] == 1.5
assert movers.iloc[4]['t'] == 1
assert movers.iloc[5]['i'] == 2
assert movers.iloc[5]['original_i'] == 'd'
assert movers.iloc[5]['j'] == 2
assert movers.iloc[5]['original_j'] == 'c'
assert movers.iloc[5]['y'] == 1
assert movers.iloc[5]['t'] == 2
def test_id_reference_dict_23():
# String worker and firm ids, link with id_reference_dict. Testing original_ids() method where there are multiple steps of references.
worker_data = []
# Worker 'a'
# Firm a -> b -> c turns into 0 -> 1 -> 2 turns into 0 -> 1
worker_data.append({'i': 'a', 'j': 'a', 'y': 2., 't': 1})
worker_data.append({'i': 'a', 'j': 'b', 'y': 1., 't': 2})
worker_data.append({'i': 'a', 'j': 'c', 'y': 1.5, 't': 3})
# Worker 'b'
# Firm b -> d turns into 1 -> 3 turns into 0 -> 2
worker_data.append({'i': 'b', 'j': 'b', 'y': 1., 't': 1})
worker_data.append({'i': 'b', 'j': 'd', 'y': 1., 't': 2})
worker_data.append({'i': 'b', 'j': 'c', 'y': 0.5, 't': 2})
# Worker 'd'
# Firm b -> d turns into 1 -> 3 turns into 0 -> 2
worker_data.append({'i': 'd', 'j': 'd', 'y': 1., 't': 1})
worker_data.append({'i': 'd', 'j': 'b', 'y': 1.5, 't': 1})
worker_data.append({'i': 'd', 'j': 'd', 'y': 1., 't': 2})
df = pd.concat([pd.DataFrame(worker, index=[i]) for i, worker in enumerate(worker_data)])
bdf = bpd.BipartiteLong(data=df, include_id_reference_dict=True)
bdf = bdf.clean_data()
bdf = bdf[bdf['j'] > 0]
bdf = bdf.clean_data(bpd.clean_params({'connectedness': None}))
merge_df = bdf.original_ids()
stayers = merge_df[merge_df['m'] == 0]
movers = merge_df[merge_df['m'] > 0]
assert len(stayers) == 0
assert movers.iloc[0]['i'] == 0
assert movers.iloc[0]['original_i'] == 'a'
assert movers.iloc[0]['j'] == 0
assert movers.iloc[0]['original_j'] == 'b'
assert movers.iloc[0]['y'] == 1
assert movers.iloc[0]['t'] == 2
assert movers.iloc[1]['i'] == 0
assert movers.iloc[1]['original_i'] == 'a'
assert movers.iloc[1]['j'] == 1
assert movers.iloc[1]['original_j'] == 'c'
assert movers.iloc[1]['y'] == 1.5
assert movers.iloc[1]['t'] == 3
assert movers.iloc[2]['i'] == 1
assert movers.iloc[2]['original_i'] == 'b'
assert movers.iloc[2]['j'] == 0
assert movers.iloc[2]['original_j'] == 'b'
assert movers.iloc[2]['y'] == 1
assert movers.iloc[2]['t'] == 1
assert movers.iloc[3]['i'] == 1
assert movers.iloc[3]['original_i'] == 'b'
assert movers.iloc[3]['j'] == 2
assert movers.iloc[3]['original_j'] == 'd'
assert movers.iloc[3]['y'] == 1
assert movers.iloc[3]['t'] == 2
assert movers.iloc[4]['i'] == 2
assert movers.iloc[4]['original_i'] == 'd'
assert movers.iloc[4]['j'] == 0
assert movers.iloc[4]['original_j'] == 'b'
assert movers.iloc[4]['y'] == 1.5
assert movers.iloc[4]['t'] == 1
assert movers.iloc[5]['i'] == 2
assert movers.iloc[5]['original_i'] == 'd'
assert movers.iloc[5]['j'] == 2
assert movers.iloc[5]['original_j'] == 'd'
assert movers.iloc[5]['y'] == 1
assert movers.iloc[5]['t'] == 2
def test_fill_time_24_1():
# Test .fill_time() method for long format, with no data to fill in.
worker_data = []
# Worker 0
worker_data.append({'i': 0, 'j': 0, 'y': 2., 't': 1})
worker_data.append({'i': 0, 'j': 1, 'y': 1., 't': 2})
# Worker 1
worker_data.append({'i': 1, 'j': 1, 'y': 1., 't': 1})
worker_data.append({'i': 1, 'j': 2, 'y': 1., 't': 2})
# Worker 3
worker_data.append({'i': 3, 'j': 2, 'y': 1., 't': 1})
worker_data.append({'i': 3, 'j': 2, 'y': 1., 't': 2})
df = pd.concat([pd.DataFrame(worker, index=[i]) for i, worker in enumerate(worker_data)])
bdf = bpd.BipartiteLong(data=df)
bdf = bdf.clean_data()
new_df = bdf.fill_periods()
stayers = new_df[new_df['m'] == 0]
movers = new_df[new_df['m'] == 1]
assert stayers.iloc[0]['i'] == 2
assert stayers.iloc[0]['j'] == 2
assert stayers.iloc[0]['y'] == 1
assert movers.iloc[0]['i'] == 0
assert movers.iloc[0]['j'] == 0
assert movers.iloc[0]['y'] == 2
assert movers.iloc[1]['i'] == 0
assert movers.iloc[1]['j'] == 1
assert movers.iloc[1]['y'] == 1
assert movers.iloc[2]['i'] == 1
assert movers.iloc[2]['j'] == 1
assert movers.iloc[2]['y'] == 1
assert movers.iloc[3]['i'] == 1
assert movers.iloc[3]['j'] == 2
assert movers.iloc[3]['y'] == 1
def test_fill_time_24_2():
# Test .fill_time() method for long format, with 1 row of data to fill in.
worker_data = []
# Worker 0
worker_data.append({'i': 0, 'j': 0, 'y': 2., 't': 1})
worker_data.append({'i': 0, 'j': 1, 'y': 1., 't': 2})
# Worker 1
# Time 1 -> 3
worker_data.append({'i': 1, 'j': 1, 'y': 1., 't': 1})
worker_data.append({'i': 1, 'j': 2, 'y': 1., 't': 3})
# Worker 3
worker_data.append({'i': 3, 'j': 2, 'y': 1., 't': 1})
worker_data.append({'i': 3, 'j': 2, 'y': 1., 't': 2})
df = pd.concat([pd.DataFrame(worker, index=[i]) for i, worker in enumerate(worker_data)])
bdf = bpd.BipartiteLong(data=df)
bdf = bdf.clean_data()
new_df = bdf.fill_periods()
stayers = new_df[new_df.groupby('i')['m'].transform('max') == 0]
movers = new_df[new_df.groupby('i')['m'].transform('max') == 1]
assert stayers.iloc[0]['i'] == 2
assert stayers.iloc[0]['j'] == 2
assert stayers.iloc[0]['y'] == 1
assert movers.iloc[0]['i'] == 0
assert movers.iloc[0]['j'] == 0
assert movers.iloc[0]['y'] == 2
assert movers.iloc[1]['i'] == 0
assert movers.iloc[1]['j'] == 1
assert movers.iloc[1]['y'] == 1
assert movers.iloc[2]['i'] == 1
assert movers.iloc[2]['j'] == 1
assert movers.iloc[2]['y'] == 1
assert movers.iloc[3]['i'] == 1
assert movers.iloc[3]['j'] == - 1
assert np.isnan(movers.iloc[3]['y'])
assert np.isnan(movers.iloc[3]['m'])
assert movers.iloc[4]['i'] == 1
assert movers.iloc[4]['j'] == 2
assert movers.iloc[4]['y'] == 1
def test_fill_time_24_3():
# Test .fill_time() method for long format, with 2 rows of data to fill in.
worker_data = []
# Worker 0
worker_data.append({'i': 0, 'j': 0, 'y': 2., 't': 1})
worker_data.append({'i': 0, 'j': 1, 'y': 1., 't': 2})
# Worker 1
# Time 1 -> 4
worker_data.append({'i': 1, 'j': 1, 'y': 1., 't': 1})
worker_data.append({'i': 1, 'j': 2, 'y': 1., 't': 4})
# Worker 3
worker_data.append({'i': 3, 'j': 2, 'y': 1., 't': 1})
worker_data.append({'i': 3, 'j': 2, 'y': 1., 't': 2})
df = pd.concat([pd.DataFrame(worker, index=[i]) for i, worker in enumerate(worker_data)])
bdf = bpd.BipartiteLong(data=df)
bdf = bdf.clean_data()
new_df = bdf.fill_periods()
stayers = new_df[new_df.groupby('i')['m'].transform('max') == 0]
movers = new_df[new_df.groupby('i')['m'].transform('max') == 1]
assert stayers.iloc[0]['i'] == 2
assert stayers.iloc[0]['j'] == 2
assert stayers.iloc[0]['y'] == 1
assert movers.iloc[0]['i'] == 0
assert movers.iloc[0]['j'] == 0
assert movers.iloc[0]['y'] == 2
assert movers.iloc[1]['i'] == 0
assert movers.iloc[1]['j'] == 1
assert movers.iloc[1]['y'] == 1
assert movers.iloc[2]['i'] == 1
assert movers.iloc[2]['j'] == 1
assert movers.iloc[2]['y'] == 1
assert movers.iloc[3]['i'] == 1
assert movers.iloc[3]['j'] == - 1
assert np.isnan(movers.iloc[3]['y'])
assert np.isnan(movers.iloc[3]['m'])
assert movers.iloc[4]['i'] == 1
assert movers.iloc[4]['j'] == - 1
assert np.isnan(movers.iloc[4]['y'])
assert np.isnan(movers.iloc[4]['m'])
assert movers.iloc[5]['i'] == 1
assert movers.iloc[5]['j'] == 2
assert movers.iloc[5]['y'] == 1
def test_uncollapse_25():
# Convert from collapsed long to long format.
worker_data = []
# Worker 0
worker_data.append({'i': 0, 'j': 0, 'y': 2., 't1': 1, 't2': 1})
worker_data.append({'i': 0, 'j': 1, 'y': 1., 't1': 2, 't2': 2})
# Worker 1
# Time 1 -> 3
worker_data.append({'i': 1, 'j': 1, 'y': 1., 't1': 1, 't2': 2})
worker_data.append({'i': 1, 'j': 2, 'y': 1., 't1': 2, 't2': 2})
worker_data.append({'i': 1, 'j': 2, 'y': 1.5, 't1': 2, 't2': 2})
worker_data.append({'i': 1, 'j': 3, 'y': 0.5, 't1': 2, 't2': 2})
# Worker 3
worker_data.append({'i': 3, 'j': 2, 'y': 1., 't1': 1, 't2': 2})
worker_data.append({'i': 3, 'j': 1, 'y': 1.5, 't1': 1, 't2': 2})
df = pd.concat([pd.DataFrame(worker, index=[i]) for i, worker in enumerate(worker_data)])
bdf = bpd.BipartiteLongCollapsed(data=df).uncollapse()
assert bdf.iloc[0]['i'] == 0
assert bdf.iloc[0]['j'] == 0
assert bdf.iloc[0]['y'] == 2
assert bdf.iloc[0]['t'] == 1
assert bdf.iloc[1]['i'] == 0
assert bdf.iloc[1]['j'] == 1
assert bdf.iloc[1]['y'] == 1
assert bdf.iloc[1]['t'] == 2
assert bdf.iloc[2]['i'] == 1
assert bdf.iloc[2]['j'] == 1
assert bdf.iloc[2]['y'] == 1
assert bdf.iloc[2]['t'] == 1
assert bdf.iloc[3]['i'] == 1
assert bdf.iloc[3]['j'] == 1
assert bdf.iloc[3]['y'] == 1
assert bdf.iloc[3]['t'] == 2
assert bdf.iloc[4]['i'] == 1
assert bdf.iloc[4]['j'] == 2
assert bdf.iloc[4]['y'] == 1
assert bdf.iloc[4]['t'] == 2
assert bdf.iloc[5]['i'] == 1
assert bdf.iloc[5]['j'] == 2
assert bdf.iloc[5]['y'] == 1.5
assert bdf.iloc[5]['t'] == 2
assert bdf.iloc[6]['i'] == 1
assert bdf.iloc[6]['j'] == 3
assert bdf.iloc[6]['y'] == 0.5
assert bdf.iloc[6]['t'] == 2
assert bdf.iloc[7]['i'] == 3
assert bdf.iloc[7]['j'] == 2
assert bdf.iloc[7]['y'] == 1
assert bdf.iloc[7]['t'] == 1
assert bdf.iloc[8]['i'] == 3
assert bdf.iloc[8]['j'] == 2
assert bdf.iloc[8]['y'] == 1
assert bdf.iloc[8]['t'] == 2
assert bdf.iloc[9]['i'] == 3
assert bdf.iloc[9]['j'] == 1
assert bdf.iloc[9]['y'] == 1.5
assert bdf.iloc[9]['t'] == 1
assert bdf.iloc[10]['i'] == 3
assert bdf.iloc[10]['j'] == 1
assert bdf.iloc[10]['y'] == 1.5
assert bdf.iloc[10]['t'] == 2
def test_keep_ids_26():
# Keep only given ids.
df = bpd.SimBipartite(bpd.sim_params({'p_move': 0.05, 'rng': np.random.default_rng(1234)})).sim_network()
bdf = bpd.BipartiteLong(df).clean_data()
all_fids = bdf['j'].unique()
ids_to_keep = all_fids[: len(all_fids) // 2]
bdf_keep = bdf.get_es().keep_ids('j', ids_to_keep).get_long()
assert set(bdf_keep['j']) == set(ids_to_keep)
# Make sure long and es give same results
bdf_keep2 = bdf.keep_ids('j', ids_to_keep)
assert len(bdf_keep) == len(bdf_keep2)
def test_drop_ids_27():
# Drop given ids.
df = bpd.SimBipartite(bpd.sim_params({'p_move': 0.05, 'rng': np.random.default_rng(1234)})).sim_network()
bdf = bpd.BipartiteLong(df).clean_data()
all_fids = bdf['j'].unique()
ids_to_drop = all_fids[: len(all_fids) // 2]
bdf_keep = bdf.get_es().drop_ids('j', ids_to_drop).get_long()
assert set(bdf_keep['j']) == set(all_fids).difference(set(ids_to_drop))
# Make sure long and es give same results
bdf_keep2 = bdf.drop_ids('j', ids_to_drop)
assert len(bdf_keep) == len(bdf_keep2)
def test_min_obs_firms_28_1():
# List only firms that meet a minimum threshold of observations.
# Using long/event study.
df = bpd.SimBipartite(bpd.sim_params({'p_move': 0.05, 'rng': np.random.default_rng(1234)})).sim_network()
bdf = bpd.BipartiteLong(df).clean_data()
threshold = 250
# First, manually estimate the valid set of firms
frame = bdf.copy()
n_moves = frame.groupby('j')['i'].size()
valid_firms = sorted(n_moves[n_moves >= threshold].index)
# Next, estimate the set of valid firms using the built-in function
valid_firms2 = sorted(bdf.min_obs_firms(threshold))
valid_firms3 = sorted(bdf.get_es().min_obs_firms(threshold))
assert (0 < len(valid_firms) < df['j'].nunique())
assert len(valid_firms) == len(valid_firms2) == len(valid_firms3)
for i in range(len(valid_firms)):
assert valid_firms[i] == valid_firms2[i] == valid_firms3[i]
def test_min_obs_firms_28_2():
# List only firms that meet a minimum threshold of observations.
# Using long collapsed/event study collapsed.
df = bpd.SimBipartite(bpd.sim_params({'p_move': 0.05, 'rng': np.random.default_rng(1234)})).sim_network()
bdf = bpd.BipartiteLong(df).clean_data().get_collapsed_long()
threshold = 60
# First, manually estimate the valid set of firms
frame = bdf.copy()
n_moves = frame.groupby('j')['i'].size()
valid_firms = sorted(n_moves[n_moves >= threshold].index)
# Next, estimate the set of valid firms using the built-in function
valid_firms2 = sorted(bdf.min_obs_firms(threshold))
valid_firms3 = sorted(bdf.get_es().min_obs_firms(threshold))
assert (0 < len(valid_firms) < df['j'].nunique())
assert len(valid_firms) == len(valid_firms2) == len(valid_firms3)
for i in range(len(valid_firms)):
assert valid_firms[i] == valid_firms2[i] == valid_firms3[i]
def test_min_obs_frame_29_1():
# Keep only firms that meet a minimum threshold of observations.
# Using long/event study.
df = bpd.SimBipartite(bpd.sim_params({'p_move': 0.05, 'rng': np.random.default_rng(1234)})).sim_network()
bdf = bpd.BipartiteLong(df).clean_data()
threshold = 250
# First, manually estimate the new frame
frame = bdf.copy()
n_moves = frame.groupby('j')['i'].size()
valid_firms = sorted(n_moves[n_moves >= threshold].index)
new_frame = frame.keep_ids('j', valid_firms)
new_frame.reset_index(drop=True, inplace=True)
# Next, estimate the new frame using the built-in function
new_frame2 = bdf.min_obs_frame(threshold)
new_frame3 = bdf.get_es().min_obs_frame(threshold).get_long()
assert (0 < len(new_frame) < len(bdf))
assert len(new_frame) == len(new_frame2) == len(new_frame3)
for i in range(100): # range(len(new_frame)): # It takes too long to go through all rows
for col in ['i', 'j', 'y', 't']:
# Skip 'm' since we didn't recompute it
assert new_frame.iloc[i][col] == new_frame2.iloc[i][col] == new_frame3.iloc[i][col]
for i in range(len(new_frame) - 100, len(new_frame)): # range(len(new_frame)): # It takes too long to go through all rows
for col in ['i', 'j', 'y', 't']:
# Skip 'm' since we didn't recompute it
assert new_frame.iloc[i][col] == new_frame2.iloc[i][col] == new_frame3.iloc[i][col]
def test_min_obs_frame_29_2():
# Keep only firms that meet a minimum threshold of observations.
# Using long collapsed/event study collapsed.
df = bpd.SimBipartite(bpd.sim_params({'p_move': 0.05, 'rng': np.random.default_rng(1234)})).sim_network()
bdf = bpd.BipartiteLong(df).clean_data().get_collapsed_long()
threshold = 60
# First, manually estimate the new frame
frame = bdf.copy()
n_moves = frame.groupby('j')['i'].size()
valid_firms = sorted(n_moves[n_moves >= threshold].index)
new_frame = frame.keep_ids('j', valid_firms)
# Next, estimate the new frame using the built-in function
new_frame2 = bdf.min_obs_frame(threshold)
new_frame3 = bdf.get_es().min_obs_frame(threshold).get_long()
assert (0 < len(new_frame) < len(bdf))
assert len(new_frame) == len(new_frame2) == len(new_frame3)
for i in range(100): # range(len(new_frame)): # It takes too long to go through all rows
for col in ['i', 'j', 'y', 't1', 't2']:
# Skip 'm' since we didn't recompute it
assert new_frame.iloc[i][col] == new_frame2.iloc[i][col] == new_frame3.iloc[i][col]
for i in range(len(new_frame) - 100, len(new_frame)): # range(len(new_frame)): # It takes too long to go through all rows
for col in ['i', 'j', 'y', 't1', 't2']:
# Skip 'm' since we didn't recompute it
assert new_frame.iloc[i][col] == new_frame2.iloc[i][col] == new_frame3.iloc[i][col]
def test_min_workers_firms_30():
# List only firms that meet a minimum threshold of workers.
# Using long/event study/long collapsed/event study collapsed.
df = bpd.SimBipartite(bpd.sim_params({'p_move': 0.05, 'rng': np.random.default_rng(1234)})).sim_network()
bdf = bpd.BipartiteLong(df).clean_data()
threshold = 40
# First, manually estimate the valid set of firms
frame = bdf.copy()
# Count workers
n_workers = frame.groupby('j')['i'].nunique()
valid_firms = sorted(n_workers[n_workers >= threshold].index)
# Next, estimate the set of valid firms using the built-in function
valid_firms2 = sorted(bdf.min_workers_firms(threshold))
valid_firms3 = sorted(bdf.get_es().min_workers_firms(threshold))
valid_firms4 = sorted(bdf.get_collapsed_long().min_workers_firms(threshold))
valid_firms5 = sorted(bdf.get_collapsed_long().get_es().min_workers_firms(threshold))
assert (0 < len(valid_firms) < df['j'].nunique())
assert len(valid_firms) == len(valid_firms2) == len(valid_firms3) == len(valid_firms4) == len(valid_firms5)
for i in range(len(valid_firms)):
assert valid_firms[i] == valid_firms2[i] == valid_firms3[i] == valid_firms4[i] == valid_firms5[i]
def test_min_workers_frame_31():
# Keep only firms that meet a minimum threshold of workers.
# Using long/event study/long collapsed/event study collapsed.
df = bpd.SimBipartite(bpd.sim_params({'p_move': 0.05, 'rng': np.random.default_rng(1234)})).sim_network()
bdf = bpd.BipartiteLong(df).clean_data()
threshold = 60
# First, manually estimate the new frame
frame = bdf.copy()
# Count workers
n_workers = frame.groupby('j')['i'].nunique()
valid_firms = n_workers[n_workers >= threshold].index
new_frame = frame.keep_ids('j', valid_firms).get_collapsed_long()
# Next, estimate the new frame using the built-in function
new_frame2 = bdf.min_workers_frame(threshold).get_collapsed_long()
new_frame3 = bdf.get_es().min_workers_frame(threshold).get_long().get_collapsed_long()
new_frame4 = bdf.get_collapsed_long().min_workers_frame(threshold)
new_frame5 = bdf.get_collapsed_long().get_es().min_workers_frame(threshold).get_long()
assert (0 < len(new_frame) < len(bdf))
assert len(new_frame) == len(new_frame2) == len(new_frame3) == len(new_frame4) == len(new_frame5)
for i in range(100): # range(len(new_frame)): # It takes too long to go through all rows
for col in ['i', 'j', 'y', 't1', 't2']:
# Skip 'm' since we didn't recompute it
assert new_frame.iloc[i][col] == new_frame2.iloc[i][col] == new_frame3.iloc[i][col] == new_frame4.iloc[i][col] == new_frame5.iloc[i][col]
for i in range(len(new_frame) - 100, len(new_frame)): # range(len(new_frame)): # It takes too long to go through all rows
for col in ['i', 'j', 'y', 't1', 't2']:
# Skip 'm' since we didn't recompute it
assert new_frame.iloc[i][col] == new_frame2.iloc[i][col] == new_frame3.iloc[i][col] == new_frame4.iloc[i][col] == new_frame5.iloc[i][col]
def test_min_moves_firms_32_1():
# List only firms that meet a minimum threshold of moves.
# Using long/event study.
df = bpd.SimBipartite(bpd.sim_params({'p_move': 0.05, 'rng': np.random.default_rng(1234)})).sim_network()
bdf = bpd.BipartiteLong(df).clean_data()
threshold = 20
# First, manually estimate the valid set of firms
frame = bdf.copy()
frame.loc[frame.loc[:, 'm'] == 2, 'm'] = 1
n_moves = frame.groupby('j')['m'].sum()
valid_firms = sorted(n_moves[n_moves >= threshold].index)
# Next, estimate the set of valid firms using the built-in function
valid_firms2 = sorted(bdf.min_moves_firms(threshold))
valid_firms3 = sorted(bdf.get_es().min_moves_firms(threshold))
assert (0 < len(valid_firms) < df['j'].nunique())
assert len(valid_firms) == len(valid_firms2) == len(valid_firms3)
for i in range(len(valid_firms)):
assert valid_firms[i] == valid_firms2[i] == valid_firms3[i]
def test_min_moves_firms_32_2():
# List only firms that meet a minimum threshold of moves.
# Using long collapsed/event study collapsed.
df = bpd.SimBipartite(bpd.sim_params({'p_move': 0.05, 'rng': np.random.default_rng(1234)})).sim_network()
bdf = bpd.BipartiteLong(df).clean_data().get_collapsed_long()
threshold = 20
# First, manually estimate the valid set of firms
frame = bdf.copy()
frame.loc[frame.loc[:, 'm'] == 2, 'm'] = 1
n_moves = frame.groupby('j')['m'].sum()
valid_firms = sorted(n_moves[n_moves >= threshold].index)
# Next, estimate the set of valid firms using the built-in function
valid_firms2 = sorted(bdf.min_moves_firms(threshold))
valid_firms3 = sorted(bdf.get_es().min_moves_firms(threshold))
assert (0 < len(valid_firms) < df['j'].nunique())
assert len(valid_firms) == len(valid_firms2) == len(valid_firms3)
for i in range(len(valid_firms)):
assert valid_firms[i] == valid_firms2[i] == valid_firms3[i]
def test_min_moves_frame_33():
# Keep only firms that meet a minimum threshold of moves.
# Using long/event study/long collapsed/event study collapsed.
df = bpd.SimBipartite(bpd.sim_params({'p_move': 0.05, 'rng': np.random.default_rng(1234)})).sim_network()
bdf = bpd.BipartiteLong(df).clean_data()
threshold = 12
# First, manually estimate the valid set of firms
frame = bdf.copy()
frame.loc[frame.loc[:, 'm'] == 2, 'm'] = 1
n_moves = frame.groupby('j')['m'].sum()
valid_firms = sorted(n_moves[n_moves >= threshold].index)
new_frame = frame.keep_ids('j', valid_firms)
# Iterate until set of firms stays the same between loops
loop = True
n_loops = 0
while loop:
n_loops += 1
prev_frame = new_frame
prev_frame.loc[prev_frame.loc[:, 'm'] == 2, 'm'] = 1
# Keep firms with sufficiently many moves
n_moves = prev_frame.groupby('j')['m'].sum()
valid_firms = sorted(n_moves[n_moves >= threshold].index)
new_frame = prev_frame.keep_ids('j', valid_firms)
loop = (len(new_frame) != len(prev_frame))
new_frame = new_frame.get_collapsed_long()
# Next, estimate the new frame using the built-in function
new_frame2 = bdf.min_moves_frame(threshold).get_collapsed_long()
new_frame3 = bdf.get_es().min_moves_frame(threshold).get_long().get_collapsed_long()
new_frame4 = bdf.get_collapsed_long().min_moves_frame(threshold)
new_frame5 = bdf.get_collapsed_long().get_es().min_moves_frame(threshold).get_long()
assert n_loops > 1
assert (0 < len(new_frame) < len(bdf))
assert len(new_frame) == len(new_frame2) == len(new_frame3) == len(new_frame4) == len(new_frame5)
for i in range(100): # range(len(new_frame)): # It takes too long to go through all rows
for col in ['i', 'j', 'y', 't1', 't2']:
# Skip 'm' since we didn't recompute it
assert new_frame.iloc[i][col] == new_frame2.iloc[i][col] == new_frame3.iloc[i][col] == new_frame4.iloc[i][col] == new_frame5.iloc[i][col]
for i in range(len(new_frame) - 100, len(new_frame)): # range(len(new_frame)): # It takes too long to go through all rows
for col in ['i', 'j', 'y', 't1', 't2']:
# Skip 'm' since we didn't recompute it
assert new_frame.iloc[i][col] == new_frame2.iloc[i][col] == new_frame3.iloc[i][col] == new_frame4.iloc[i][col] == new_frame5.iloc[i][col]
def test_min_movers_firms_34():
# List only firms that meet a minimum threshold of movers.
# Using long/event study/long collapsed/event study collapsed.
df = bpd.SimBipartite(bpd.sim_params({'p_move': 0.05, 'rng': np.random.default_rng(1234)})).sim_network()
bdf = bpd.BipartiteLong(df).clean_data()
threshold = 20
# First, manually estimate the valid set of firms
frame = bdf.copy()
# Keep movers
frame = frame[frame['m'] > 0]
n_movers = frame.groupby('j')['i'].nunique()
valid_firms = sorted(n_movers[n_movers >= threshold].index)
# Next, estimate the set of valid firms using the built-in function
valid_firms2 = sorted(bdf.min_movers_firms(threshold))
valid_firms3 = sorted(bdf.get_es().min_movers_firms(threshold))
valid_firms4 = sorted(bdf.get_collapsed_long().min_movers_firms(threshold))
valid_firms5 = sorted(bdf.get_collapsed_long().get_es().min_movers_firms(threshold))
assert (0 < len(valid_firms) < df['j'].nunique())
assert len(valid_firms) == len(valid_firms2) == len(valid_firms3) == len(valid_firms4) == len(valid_firms5)
for i in range(len(valid_firms)):
assert valid_firms[i] == valid_firms2[i] == valid_firms3[i] == valid_firms4[i] == valid_firms5[i]
def test_min_movers_frame_35():
# Keep only firms that meet a minimum threshold of movers.
# Using long/event study/long collapsed/event study collapsed.
df = bpd.SimBipartite(bpd.sim_params({'p_move': 0.05, 'rng': np.random.default_rng(1234)})).sim_network()
bdf = bpd.BipartiteLong(df).clean_data()
threshold = 12
# First, manually estimate the new frame
frame = bdf.copy()
# Keep movers
frame_movers = frame[frame['m'] > 0]
n_movers = frame_movers.groupby('j')['i'].nunique()
valid_firms = n_movers[n_movers >= threshold].index
new_frame = frame.keep_ids('j', valid_firms)
# Iterate until set of firms stays the same between loops
loop = True
n_loops = 0
while loop:
n_loops += 1
prev_frame = new_frame
# Keep movers
prev_frame_movers = prev_frame[prev_frame['m'] > 0]
n_movers = prev_frame_movers.groupby('j')['i'].nunique()
valid_firms = n_movers[n_movers >= threshold].index
new_frame = prev_frame.keep_ids('j', valid_firms)
loop = (len(new_frame) != len(prev_frame))
new_frame = new_frame.get_collapsed_long()
# Next, estimate the new frame using the built-in function
new_frame2 = bdf.min_movers_frame(threshold).get_collapsed_long()
new_frame3 = bdf.get_es().min_movers_frame(threshold).get_long().get_collapsed_long()
new_frame4 = bdf.get_collapsed_long().min_movers_frame(threshold)
new_frame5 = bdf.get_collapsed_long().get_es().min_movers_frame(threshold).get_long()
assert n_loops > 1
assert (0 < len(new_frame) < len(bdf))
assert len(new_frame) == len(new_frame2) == len(new_frame3) == len(new_frame4) == len(new_frame5)
for i in range(100): # range(len(new_frame)): # It takes too long to go through all rows
for col in ['i', 'j', 'y', 't1', 't2']:
# Skip 'm' since we didn't recompute it
assert new_frame.iloc[i][col] == new_frame2.iloc[i][col] == new_frame3.iloc[i][col] == new_frame4.iloc[i][col] == new_frame5.iloc[i][col]
for i in range(len(new_frame) - 100, len(new_frame)): # range(len(new_frame)): # It takes too long to go through all rows
for col in ['i', 'j', 'y', 't1', 't2']:
# Skip 'm' since we didn't recompute it
assert new_frame.iloc[i][col] == new_frame2.iloc[i][col] == new_frame3.iloc[i][col] == new_frame4.iloc[i][col] == new_frame5.iloc[i][col]
###################################
##### Tests for BipartiteLong #####
###################################
def test_long_get_es_extended_1():
# Test get_es_extended() by making sure it is generating the event study correctly for periods_pre=2 and periods_post=1
worker_data = []
# Worker 0
worker_data.append({'i': 0, 'j': 0, 'y': 2., 't': 1})
worker_data.append({'i': 0, 'j': 1, 'y': 1., 't': 2})
worker_data.append({'i': 0, 'j': 1, 'y': 1., 't': 3})
worker_data.append({'i': 0, 'j': 0, 'y': 1., 't': 4})
# Worker 1
worker_data.append({'i': 1, 'j': 1, 'y': 1., 't': 1})
worker_data.append({'i': 1, 'j': 2, 'y': 1., 't': 2})
worker_data.append({'i': 1, 'j': 2, 'y': 2., 't': 3})
worker_data.append({'i': 1, 'j': 5, 'y': 1., 't': 3})
# Worker 3
worker_data.append({'i': 3, 'j': 2, 'y': 1., 't': 1})
worker_data.append({'i': 3, 'j': 2, 'y': 1., 't': 2})
worker_data.append({'i': 3, 'j': 3, 'y': 1.5, 't': 3})
# Worker 4
worker_data.append({'i': 4, 'j': 0, 'y': 1., 't': 1})
df = pd.concat([pd.DataFrame(worker, index=[i]) for i, worker in enumerate(worker_data)])
df['g'] = df['j'] # Fill in g column as j
bdf = bpd.BipartiteLong(df)
bdf = bdf.clean_data()
es_extended = bdf.get_es_extended(periods_pre=2, periods_post=1, include=['j', 'y'], transition_col='g')
assert es_extended.iloc[0]['i'] == 0
assert es_extended.iloc[0]['j_l2'] == 1
assert es_extended.iloc[0]['j_l1'] == 1
assert es_extended.iloc[0]['j_f1'] == 0
assert es_extended.iloc[0]['y_l2'] == 1
assert es_extended.iloc[0]['y_l1'] == 1
assert es_extended.iloc[0]['y_f1'] == 1
assert es_extended.iloc[0]['t'] == 4
assert es_extended.iloc[1]['i'] == 2
assert es_extended.iloc[1]['j_l2'] == 2
assert es_extended.iloc[1]['j_l1'] == 2
assert es_extended.iloc[1]['j_f1'] == 3
assert es_extended.iloc[1]['y_l2'] == 1
assert es_extended.iloc[1]['y_l1'] == 1
assert es_extended.iloc[1]['y_f1'] == 1.5
assert es_extended.iloc[1]['t'] == 3
def test_long_get_es_extended_2():
# Test get_es_extended() by making sure workers move firms at the fulcrum of the event study
sim_data = bpd.SimBipartite().sim_network()
bdf = bpd.BipartiteLong(sim_data)
bdf = bdf.clean_data()
es_extended = bdf.get_es_extended(periods_pre=3, periods_post=2, include=['j', 'y'])
assert np.sum(es_extended['j_l1'] == es_extended['j_f1']) == 0
def test_long_get_es_extended_3_1():
# Test get_es_extended() by making sure workers move firms at the fulcrum of the event study and stable_pre works
sim_data = bpd.SimBipartite().sim_network()
bdf = bpd.BipartiteLong(sim_data)
bdf = bdf.clean_data()
es_extended = bdf.get_es_extended(periods_pre=2, periods_post=3, stable_pre='j', include=['j', 'y'])
assert np.sum(es_extended['j_l2'] != es_extended['j_l1']) == 0
assert np.sum(es_extended['j_l1'] == es_extended['j_f1']) == 0
def test_long_get_es_extended_3_2():
# Test get_es_extended() by making sure workers move firms at the fulcrum of the event study and stable_post works
sim_data = bpd.SimBipartite().sim_network()
bdf = bpd.BipartiteLong(sim_data)
bdf = bdf.clean_data()
es_extended = bdf.get_es_extended(periods_pre=3, periods_post=2, stable_post='j', include=['j', 'y'])
assert np.sum(es_extended['j_l1'] == es_extended['j_f1']) == 0
assert np.sum(es_extended['j_f1'] != es_extended['j_f2']) == 0
def test_long_get_es_extended_3_3():
# Test get_es_extended() by making sure workers move firms at the fulcrum of the event study and stable_post and stable_pre work together
sim_data = bpd.SimBipartite().sim_network()
bdf = bpd.BipartiteLong(sim_data)
bdf = bdf.clean_data()
es_extended = bdf.get_es_extended(periods_pre=3, periods_post=2, stable_pre='j', stable_post='j', include=['j', 'y'])
assert len(es_extended) > 0 # Make sure something is left
assert np.sum(es_extended['j_l3'] != es_extended['j_l2']) == 0
assert np.sum(es_extended['j_l2'] != es_extended['j_l1']) == 0
assert np.sum(es_extended['j_l1'] == es_extended['j_f1']) == 0
assert np.sum(es_extended['j_f1'] != es_extended['j_f2']) == 0
# Only uncomment for manual testing - this produces a graph which pauses the testing
# def test_long_plot_es_extended_4():
# # Test plot_es_extended() by making sure it doesn't crash
# sim_data = bpd.SimBipartite().sim_network()
# bdf = bpd.BipartiteLong(sim_data).clean_data().cluster(grouping=bpd.grouping.kmeans(n_clusters=2))
# bdf.plot_es_extended()
# assert True # Just making sure it doesn't crash
############################################
##### Tests for BipartiteLongCollapsed #####
############################################
def test_long_collapsed_1():
# Test constructor for BipartiteLongCollapsed.
worker_data = []
# Worker 0
worker_data.append({'i': 0, 'j': 0, 'y': 2., 't': 1})
worker_data.append({'i': 0, 'j': 1, 'y': 1., 't': 2})
# Worker 1
worker_data.append({'i': 1, 'j': 1, 'y': 1., 't': 1})
worker_data.append({'i': 1, 'j': 2, 'y': 1., 't': 2})
# Worker 3
worker_data.append({'i': 3, 'j': 2, 'y': 1., 't': 1})
worker_data.append({'i': 3, 'j': 2, 'y': 1., 't': 2})
df = pd.concat([pd.DataFrame(worker, index=[i]) for i, worker in enumerate(worker_data)])
bdf = bpd.BipartiteLong(data=df)
bdf = bdf.clean_data()
df = pd.DataFrame(bdf.get_collapsed_long()).rename({'y': 'y'}, axis=1)
bdf = bpd.BipartiteLongCollapsed(df, col_dict={'y': 'y'})
bdf = bdf.clean_data()
stayers = bdf[bdf['m'] == 0]
movers = bdf[bdf['m'] > 0]
assert stayers.iloc[0]['i'] == 2
assert stayers.iloc[0]['j'] == 2
assert stayers.iloc[0]['y'] == 1
assert movers.iloc[0]['i'] == 0
assert movers.iloc[0]['j'] == 0
assert movers.iloc[0]['y'] == 2
assert movers.iloc[1]['i'] == 0
assert movers.iloc[1]['j'] == 1
assert movers.iloc[1]['y'] == 1
assert movers.iloc[2]['i'] == 1
assert movers.iloc[2]['j'] == 1
assert movers.iloc[2]['y'] == 1
assert movers.iloc[3]['i'] == 1
assert movers.iloc[3]['j'] == 2
assert movers.iloc[3]['y'] == 1
#########################################
##### Tests for BipartiteEventStudy #####
#########################################
def test_event_study_1():
# Test constructor for BipartiteEventStudy.
worker_data = []
# Worker 0
worker_data.append({'i': 0, 'j': 0, 'y': 2., 't': 1})
worker_data.append({'i': 0, 'j': 1, 'y': 1., 't': 2})
# Worker 1
worker_data.append({'i': 1, 'j': 1, 'y': 1., 't': 1})
worker_data.append({'i': 1, 'j': 2, 'y': 1., 't': 2})
# Worker 3
worker_data.append({'i': 3, 'j': 2, 'y': 1., 't': 1})
worker_data.append({'i': 3, 'j': 2, 'y': 2., 't': 2})
df = pd.concat([
|
pd.DataFrame(worker, index=[i])
|
pandas.DataFrame
|
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
out_dir = '../../plots/time-series/'
in_dir_bases = ['re-simulations','re-simulations-nonsocial','re-processed']
non_social_dir = 're-simulations-nonsocial'
games = ['2015-01-30-20-58-46-738_5_3-1en01_490778852021.csv',
'2015-01-30-12-1-4-89_5_2-1en01_578523323172.csv',
'2015-01-30-14-13-5-238_5_0-1en01_850912750000.csv',
'2015-01-30-19-13-16-753_5_1-1en01_254379808437.csv',
'2015-01-29-20-50-9-4_5_2-1en01_619311067974.csv',
'2015-01-30-20-58-46-738_5_3-1en01_490778852021.csv']
quantities = ['Score','Uncertainty']
copies = ['copying','copying_exploiting']
results = []
for in_dir_base in in_dir_bases:
in_dir = '../../' + in_dir_base + '/'
for copying in copies:
for game in games:
df = pd.read_csv(in_dir + game)
players = list(set(df['pid']))
for player in range(len(players)):
pid = players[player]
sub = df.loc[df['pid'] == pid].copy()
if len(sub) == 0:
continue
results += [[in_dir_base, copying, game, game+'-'+str(pid), np.mean(sub[copying])]]
df = pd.DataFrame(results)
df.columns = ['in_dir','copying','game','pid','amount']
diffs = []
# TODO: switch this to per PID
for game in set(df['game']):
for copying in copies:
sub = df.loc[(df['game'] == game)&(df['copying'] == copying)&(df['in_dir'] == non_social_dir)].copy()
nonsocial_amount = np.mean(sub['amount'])
for in_dir in in_dir_bases:
if in_dir == non_social_dir:
continue
sub = df.loc[(df['game'] == game)&(df['copying'] == copying)&(df['in_dir'] == in_dir)].copy()
norm_amount = np.mean(sub['amount']) - nonsocial_amount
diffs += [[in_dir, copying, norm_amount]]
df =
|
pd.DataFrame(diffs)
|
pandas.DataFrame
|
"""
Routes and views for the flask application.
"""
import os
from datetime import datetime
from flask import render_template, request
from werkzeug import secure_filename
from terror_ai import app
import numpy as np
import pandas as pd
from terror_ai.decision_tree_ml import decision_main
from terror_ai.logistic_regression_ml import logi_main
# Get the real output from the predicted label
def get_real_output(pred_label, filepath, flag):
temp_df = pd.DataFrame.from_csv(filepath, header=0)
temp_df = temp_df.reset_index()
val1 = "NA"
val2 = "NA"
if flag == 1:
for i, row in temp_df.iterrows():
if row["targtype1_txt_code"] == pred_label[0]:
val1 = row["targtype1_txt"]
return val1
if flag == 2:
for i, row in temp_df.iterrows():
if row["attacktype1_txt_code"] == pred_label[0]:
val2 = row["attacktype1_txt"]
return val2
@app.route('/')
@app.route('/home')
def home():
"""Renders the home page."""
return render_template(
'index.html'
)
@app.route("/form")
def form():
return render_template(
"form.html"
)
@app.route("/uploader", methods=["GET", "POST"])
def uploader():
month_val=int(request.form["monthnumber"])
casualty_val=int(request.form["ncasualty"])
country_val=int(request.form["countryname"])
region_val=int(request.form["regionname"])
target_val=int(request.form["targetname"])
file1 = "/mnt/d/COLLEGE/DONE/terror_ai/terror_ai/static/data/target_type.csv"
file2 = "/mnt/d/COLLEGE/DONE/terror_ai/terror_ai/static/data/attack_type.csv"
if int(request.form["algo"]) == 1:
algorithm_to_use = "Decision Tree"
df1 = pd.DataFrame.from_csv("/mnt/d/COLLEGE/DONE/terror_ai/terror_ai/static/data/final_train1.csv", header=0)
# form a test vector
test_df1 = pd.DataFrame([month_val, casualty_val, country_val, region_val, target_val])
test_df1 = test_df1.transpose()
# train and predict using model1
pred_label1, accuracy_value1 = decision_main(df1, test_df1)
#pred_label1, accuracy_value1 = decision_main(train1X, train1y, test_df1)
out1 = get_real_output(pred_label1, file1, 1)
df2 = pd.DataFrame.from_csv("/mnt/d/COLLEGE/DONE/terror_ai/terror_ai/static/data/final_train2.csv", header=0)
# form a test vector
test_df2 = pd.DataFrame([month_val, casualty_val, country_val, region_val, target_val, pred_label1])
test_df2 = test_df2.transpose()
# train and predict using model1
pred_label2, accuracy_value2 = decision_main(df2, test_df2)
out2 = get_real_output(pred_label2, file2, 2)
else:
algorithm_to_use = "Logistic Regression"
df1 = pd.DataFrame.from_csv("/mnt/d/COLLEGE/DONE/terror_ai/terror_ai/static/data/final_train1.csv", header=0)
# form a test vector
test_df1 = pd.DataFrame([month_val, casualty_val, country_val, region_val, target_val])
test_df1 = test_df1.transpose()
# train and predict using model1
pred_label1, accuracy_value1 = logi_main(df1, test_df1)
out1 = get_real_output(pred_label1, file1, 1)
df2 =
|
pd.DataFrame.from_csv("/mnt/d/COLLEGE/DONE/terror_ai/terror_ai/static/data/final_train2.csv", header=0)
|
pandas.DataFrame.from_csv
|
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# pyre-unsafe
"""
CUSUM stands for cumulative sum, it is a changepoint detection algorithm.
In the Kats implementation, it has two main components:
1. Locate the change point: The algorithm iteratively estimates the means
before and after the change point and finds the change point
maximizing/minimizing the cusum value until the change point has
converged. The starting point for the change point is at the middle.
2. Hypothesis testing: Conducting log likelihood ratio test where the null
hypothesis has no change point with one mean and the alternative
hypothesis has a change point with two means.
And here are a few things worth mentioning:
* We assume there is only one increase/decrease change point;
* We use Gaussian distribution as the underlying model to calculate the cusum
value and conduct the hypothesis test;
Typical usage example:
>>> # Univariate CUSUM
>>> timeseries = TimeSeriesData(...)
>>> detector = CusumDetector(timeseries)
>>> #Run detector
>>> changepoints = detector.detector()
>>> # Plot the results
>>> detector.plot(changepoints)
The usage is the same for multivariate CUSUM except that the time series needs
to be multivariate and that the plotting functions are not yet supported for
this use case.
"""
import logging
from typing import Any, Dict, List, Optional, Sequence, Tuple, Union
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from kats.consts import (
TimeSeriesChangePoint,
TimeSeriesData,
)
from kats.detectors.detector import Detector
from scipy.stats import chi2 # @manual
pd.options.plotting.matplotlib.register_converters = True
# Constants
CUSUM_DEFAULT_ARGS = {
"threshold": 0.01,
"max_iter": 10,
"delta_std_ratio": 1.0,
"min_abs_change": 0,
"start_point": None,
"change_directions": None,
"interest_window": None,
"magnitude_quantile": None,
"magnitude_ratio": 1.3,
"magnitude_comparable_day": 0.5,
"return_all_changepoints": False,
"remove_seasonality": False,
}
def _get_arg(name: str, **kwargs) -> Any:
return kwargs.get(name, CUSUM_DEFAULT_ARGS[name])
class CUSUMChangePoint(TimeSeriesChangePoint):
"""CUSUM change point.
This is a changepoint detected by CUSUMDetector.
Attributes:
start_time: Start time of the change.
end_time: End time of the change.
confidence: The confidence of the change point.
direction: a str stand for the changepoint change direction 'increase'
or 'decrease'.
cp_index: an int for changepoint index.
mu0: a float indicates the mean before changepoint.
mu1: a float indicates the mean after changepoint.
delta: mu1 - mu0.
llr: log likelihood ratio.
llr_int: log likelihood ratio in the interest window.
regression_detected: a bool indicates if regression detected.
stable_changepoint: a bool indicates if we have a stable changepoint
when locating the changepoint.
p_value: p_value of the changepoint.
p_value_int: p_value of the changepoint in the interest window.
"""
def __init__(
self,
start_time: pd.Timestamp,
end_time: pd.Timestamp,
confidence: float,
direction: str,
cp_index: int,
mu0: Union[float, np.ndarray],
mu1: Union[float, np.ndarray],
delta: Union[float, np.ndarray],
llr_int: float,
llr: float,
regression_detected: bool,
stable_changepoint: bool,
p_value: float,
p_value_int: float,
) -> None:
super().__init__(start_time, end_time, confidence)
self._direction = direction
self._cp_index = cp_index
self._mu0 = mu0
self._mu1 = mu1
self._delta = delta
self._llr_int = llr_int
self._llr = llr
self._regression_detected = regression_detected
self._stable_changepoint = stable_changepoint
self._p_value = p_value
self._p_value_int = p_value_int
@property
def direction(self) -> str:
return self._direction
@property
def cp_index(self) -> int:
return self._cp_index
@property
def mu0(self) -> Union[float, np.ndarray]:
return self._mu0
@property
def mu1(self) -> Union[float, np.ndarray]:
return self._mu1
@property
def delta(self) -> Union[float, np.ndarray]:
return self._delta
@property
def llr(self) -> float:
return self._llr
@property
def llr_int(self) -> float:
return self._llr_int
@property
def regression_detected(self) -> bool:
return self._regression_detected
@property
def stable_changepoint(self) -> bool:
return self._stable_changepoint
@property
def p_value(self) -> float:
return self._p_value
@property
def p_value_int(self) -> float:
return self._p_value_int
def __repr__(self) -> str:
return (
f"CUSUMChangePoint(start_time: {self._start_time}, end_time: "
f"{self._end_time}, confidence: {self._confidence}, direction: "
f"{self._direction}, index: {self._cp_index}, delta: {self._delta}, "
f"regression_detected: {self._regression_detected}, "
f"stable_changepoint: {self._stable_changepoint}, mu0: {self._mu0}, "
f"mu1: {self._mu1}, llr: {self._llr}, llr_int: {self._llr_int}, "
f"p_value: {self._p_value}, p_value_int: {self._p_value_int})"
)
class CUSUMDetector(Detector):
interest_window: Optional[Tuple[int, int]] = None
magnitude_quantile: Optional[float] = None
magnitude_ratio: Optional[float] = None
changes_meta: Optional[Dict[str, Dict[str, Any]]] = None
def __init__(
self,
data: TimeSeriesData,
is_multivariate: bool = False,
is_vectorized: bool = False,
) -> None:
"""Univariate CUSUM detector for level shifts
Use cusum to detect changes, the algorithm is based on likelihood ratio
cusum. See https://www.fs.isy.liu.se/Edu/Courses/TSFS06/PDFs/Basseville.pdf
for details. This detector is used to detect mean changes in Normal
Distribution.
Args:
data: :class:`kats.consts.TimeSeriesData`; The input time series data.
is_multivariate: Optional; bool; should be False unless running
MultiCUSUMDetector,
"""
super(CUSUMDetector, self).__init__(data=data)
if not self.data.is_univariate() and not is_multivariate and not is_vectorized:
msg = (
"CUSUMDetector only supports univariate time series, but got "
f"{type(self.data.value)}. For multivariate time series, use "
"MultiCUSUMDetector or VectorizedCUSUMDetector"
)
logging.error(msg)
raise ValueError(msg)
def _get_change_point(
self, ts: np.ndarray, max_iter: int, start_point: int, change_direction: str
) -> Dict[str, Any]:
"""
Find change point in the timeseries.
"""
interest_window = self.interest_window
# locate the change point using cusum method
if change_direction == "increase":
changepoint_func = np.argmin
logging.debug("Detecting increase changepoint.")
else:
assert change_direction == "decrease"
changepoint_func = np.argmax
logging.debug("Detecting decrease changepoint.")
n = 0
# use the middle point as initial change point to estimate mu0 and mu1
if interest_window is not None:
ts_int = ts[interest_window[0] : interest_window[1]]
else:
ts_int = ts
if start_point is None:
cusum_ts = np.cumsum(ts_int - np.mean(ts_int))
changepoint = min(changepoint_func(cusum_ts), len(ts_int) - 2)
else:
changepoint = start_point
mu0 = mu1 = None
# iterate until the changepoint converage
while n < max_iter:
n += 1
mu0 = np.mean(ts_int[: (changepoint + 1)])
mu1 = np.mean(ts_int[(changepoint + 1) :])
mean = (mu0 + mu1) / 2
# here is where cusum is happening
cusum_ts = np.cumsum(ts_int - mean)
next_changepoint = max(1, min(changepoint_func(cusum_ts), len(ts_int) - 2))
if next_changepoint == changepoint:
break
changepoint = next_changepoint
if n == max_iter:
logging.info("Max iteration reached and no stable changepoint found.")
stable_changepoint = False
else:
stable_changepoint = True
# llr in interest window
if interest_window is None:
llr_int = np.inf
pval_int = np.NaN
delta_int = None
else:
llr_int = self._get_llr(
ts_int,
{"mu0": mu0, "mu1": mu1, "changepoint": changepoint},
)
pval_int = 1 - chi2.cdf(llr_int, 2)
delta_int = mu1 - mu0
changepoint += interest_window[0]
# full time changepoint and mean
mu0 = np.mean(ts[: (changepoint + 1)])
mu1 = np.mean(ts[(changepoint + 1) :])
return {
"changepoint": changepoint,
"mu0": mu0,
"mu1": mu1,
"changetime": self.data.time[changepoint],
"stable_changepoint": stable_changepoint,
"delta": mu1 - mu0,
"llr_int": llr_int,
"p_value_int": pval_int,
"delta_int": delta_int,
}
def _get_llr(self, ts: np.ndarray, change_meta: Dict[str, Any]):
"""
Calculate the log likelihood ratio
"""
mu0: float = change_meta["mu0"]
mu1: float = change_meta["mu1"]
changepoint: int = change_meta["changepoint"]
scale = np.sqrt(
(
np.sum((ts[: (changepoint + 1)] - mu0) ** 2)
+ np.sum((ts[(changepoint + 1) :] - mu1) ** 2)
)
/ (len(ts) - 2)
)
mu_tilde, sigma_tilde = np.mean(ts), np.std(ts)
if scale == 0:
scale = sigma_tilde * 0.01
llr = -2 * (
self._log_llr(ts[: (changepoint + 1)], mu_tilde, sigma_tilde, mu0, scale)
+ self._log_llr(ts[(changepoint + 1) :], mu_tilde, sigma_tilde, mu1, scale)
)
return llr
def _log_llr(
self, x: np.ndarray, mu0: float, sigma0: float, mu1: float, sigma1: float
) -> float:
"""Helper function to calculate log likelihood ratio.
This function calculate the log likelihood ratio of two Gaussian
distribution log(l(0)/l(1)).
Args:
x: the data value.
mu0: mean of model 0.
sigma0: std of model 0.
mu1: mean of model 1.
sigma1: std of model 1.
Returns:
the value of log likelihood ratio.
"""
return np.sum(
np.log(sigma1 / sigma0)
+ 0.5 * (((x - mu1) / sigma1) ** 2 - ((x - mu0) / sigma0) ** 2)
)
def _magnitude_compare(self, ts: np.ndarray) -> float:
"""
Compare daily magnitude to avoid daily seasonality false positives.
"""
time = self.data.time
interest_window = self.interest_window
magnitude_ratio = self.magnitude_ratio
if interest_window is None:
raise ValueError("detect must be called first")
assert magnitude_ratio is not None
# get number of days in historical window
days = (time.max() - time.min()).days
# get interest window magnitude
mag_int = self._get_time_series_magnitude(
ts[interest_window[0] : interest_window[1]]
)
comparable_mag = 0
for i in range(days):
start_time = time[interest_window[0]] -
|
pd.Timedelta(f"{i}D")
|
pandas.Timedelta
|
# coding: utf-8
# Seismic data is a neat thing. You can imagine it like an ultra-sound of the subsurface. However, in an ultra-sound, we use much smaller wavelengths to image our body. Seismic data usually has wavelengths around 1m to 100m. That has some physical implications, but for now, we don't have to deal with that. It's just something to keep in mind while thinking about resolution.
#
# Imaging salt has been a huge topic in the seismic industry, basically since they imaged salt the first time. The Society of Exploration geophysicist alone has over 10,000 publications with the [keyword salt](https://library.seg.org/action/doSearch?AllField=salt). Salt bodies are important for the hydrocarbon industry, as they usually form nice oil traps. So there's a clear motivation to delineate salt bodies in the subsurface. If you would like to do a deep dive, you can see [this publication](https://www.iongeo.com/content/documents/Resource%20Center/Articles/INT_Imaging_Salt_tutorial_141101.pdf)
#
# Seismic data interpreters are used to interpreting on 2D or 3D images that have been heavily processed. The standard work of [seismic data analysis](https://wiki.seg.org/wiki/Seismic_Data_Analysis) is open access.
# You'll find sections on Salt in there as well (https://wiki.seg.org/wiki/Salt-flank_reflections and https://wiki.seg.org/wiki/Salt_flanks). The seismic itself is pretty "old" in the publication, and you're dealing with data that is less noisy here, which is nice.
#
# [](https://wiki.seg.org/wiki/Salt-flank_reflections#/media/File:Ch05_fig0-1.png)
# Caption: Figure 5.0-1 Conflicting dips associated with salt flanks: (a) CMP stack without dip-moveout correction; (b) time migration of the stack in (a); (c) the stack with dip-moveout correction; (d) time migration of the stack in (c). CC-BY-SA Yilmaz.
#
# Interpretation on seismic images has long used texture attributes, to identify better and highlight areas of interest. These can be seen like feature maps on the texture of the seismic. For salt, you will notice that the texture in the salt masks is rather chaotic, where the surrounding seismic is more "striped". You can think of Earth as layered. Sand gets deposited on top of existing sand. In comes salt, which is behaving very much, unlike other rocks. There is an entire research branch dedicated to salt tectonics, that is the movement of salt in the subsurface. To give you the gist, these salt diapirs form from salt layers somewhere else that were under much pressure. These started to flow (behave ductile) and find a way into other layers above. I have written a bit about salt on [my blog](http://the-geophysicist.com/the-showroom-data-for-my-thesis).
#
# One common seismic attribute is called "chaos" or "seismic disorder". So if you talk to cynic geophysicists, you'll hear "that deep learning better outperform the Chaos attribute". A good starting point is [this publication](http://www.chopraseismic.com/wp-content/uploads/2016/08/Chopra_Marfurt_TLE_Aug2016-LowRes.pdf).
#
# Recently, geoscience has started to adopt deep learning, and it has seen a clear boom, particularly in imaging salt. Code for automatic seismic interpretation can be found here:
#
# + https://github.com/waldeland/CNN-for-ASI
# + https://github.com/bolgebrygg/MalenoV
# + https://github.com/crild/facies_net
#
# You will notice that these solutions load a specific SEG-Y file, which luckily we don't have to bother with. TGS provided some nice PNG files instead. However, you can glean some information from them how to approach seismic data. If you find you need some geophysical helpers, you can [import Bruges](https://github.com/agile-geoscience/bruges)
#
# Let's dive in for now.
#
# In[ ]:
import os
import sys
import random
import warnings
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
get_ipython().magic(u'matplotlib inline')
import cv2
from tqdm import tqdm_notebook, tnrange
from itertools import chain
from skimage.io import imread, imshow, concatenate_images
from skimage.transform import resize
from skimage.morphology import label
from keras.models import Model, load_model
from keras.layers import Input
from keras.layers.core import Lambda
from keras.layers.convolutional import Conv2D, Conv2DTranspose
from keras.layers.pooling import MaxPooling2D
from keras.layers.merge import concatenate
from keras.callbacks import EarlyStopping, ModelCheckpoint
from keras import backend as K
import tensorflow as tf
from keras.preprocessing.image import ImageDataGenerator, array_to_img, img_to_array, load_img
# In[ ]:
# Set some parameters
im_width = 128
im_height = 128
im_chan = 1
path_train = '../input/train/'
path_test = '../input/test/'
# # Data Exploration
# Let's look at some data. We can see that TGS chose to use very varied data by inspecting. That is great and adresses a problem in deep learning geoscience at the moment. We build models on one type of seismic and have no idea whether it generalizes.
# In[ ]:
ids= ['1f1cc6b3a4','5b7c160d0d','6c40978ddf','7dfdf6eeb8','7e5a6e5013']
plt.figure(figsize=(20,10))
for j, img_name in enumerate(ids):
q = j+1
img = load_img('../input/train/images/' + img_name + '.png')
img_mask = load_img('../input/train/masks/' + img_name + '.png')
plt.subplot(1,2*(1+len(ids)),q*2-1)
plt.imshow(img)
plt.subplot(1,2*(1+len(ids)),q*2)
plt.imshow(img_mask)
plt.show()
# We have many examples without salt, as you can see by the masks that are entirely dark. That's great, an algorithm we build will then know that patches exist entirely without salt. Talk about biasing your data.
#
# We can draw heavily on other work, instead of regurgitating the geophysics work that has been done before. I mentioned that seismic is kind of like ultrasound. So I had a look at https://www.kaggle.com/keegil/keras-u-net-starter-lb-0-277
#
# Let's throw a Unet at our data. I am blatanly stealing from Ketil at this point. All credit goes to him and his nice code.
# First we'll need to get our data into a shape that works for U-Nets. That means, it should be a power of 2. Let's do it quick and dirty for now, but eventually, consider aliasing and all that fun.
# In[ ]:
train_ids = next(os.walk(path_train+"images"))[2]
test_ids = next(os.walk(path_test+"images"))[2]
# In[ ]:
# Get and resize train images and masks
X_train = np.zeros((len(train_ids), im_height, im_width, im_chan), dtype=np.uint8)
Y_train = np.zeros((len(train_ids), im_height, im_width, 1), dtype=np.bool)
print('Getting and resizing train images and masks ... ')
sys.stdout.flush()
for n, id_ in tqdm_notebook(enumerate(train_ids), total=len(train_ids)):
path = path_train
img = load_img(path + '/images/' + id_)
x = img_to_array(img)[:,:,1]
x = resize(x, (128, 128, 1), mode='constant', preserve_range=True)
X_train[n] = x
mask = img_to_array(load_img(path + '/masks/' + id_))[:,:,1]
Y_train[n] = resize(mask, (128, 128, 1), mode='constant', preserve_range=True)
print('Done!')
# In[ ]:
# Check if training data looks all right
ix = random.randint(0, len(train_ids))
plt.imshow(np.dstack((X_train[ix],X_train[ix],X_train[ix])))
plt.show()
tmp = np.squeeze(Y_train[ix]).astype(np.float32)
plt.imshow(np.dstack((tmp,tmp,tmp)))
plt.show()
# # Train Model
# Our task, just like the segmentation task for nuclei, is evaluated on the mean IoU metric. This one isn't in keras, but obviously, we're stealing this one too from Ketil.
# In[ ]:
# Define IoU metric
def mean_iou(y_true, y_pred):
prec = []
for t in np.arange(0.5, 1.0, 0.05):
y_pred_ = tf.to_int32(y_pred > t)
score, up_opt = tf.metrics.mean_iou(y_true, y_pred_, 2)
K.get_session().run(tf.local_variables_initializer())
with tf.control_dependencies([up_opt]):
score = tf.identity(score)
prec.append(score)
return K.mean(K.stack(prec), axis=0)
# This is the fun part. Building the sequential Model. The U-Net is basically looking like an Auto-Encoder with shortcuts.
#
# We're also sprinkling in some earlystopping to prevent overfitting. If you're running this on kaggle, this is the point, you want to have GPU support.
# In[ ]:
# Build U-Net model
inputs = Input((im_height, im_width, im_chan))
s = Lambda(lambda x: x / 255) (inputs)
c1 = Conv2D(8, (3, 3), activation='relu', padding='same') (s)
c1 = Conv2D(8, (3, 3), activation='relu', padding='same') (c1)
p1 = MaxPooling2D((2, 2)) (c1)
c2 = Conv2D(16, (3, 3), activation='relu', padding='same') (p1)
c2 = Conv2D(16, (3, 3), activation='relu', padding='same') (c2)
p2 = MaxPooling2D((2, 2)) (c2)
c3 = Conv2D(32, (3, 3), activation='relu', padding='same') (p2)
c3 = Conv2D(32, (3, 3), activation='relu', padding='same') (c3)
p3 = MaxPooling2D((2, 2)) (c3)
c4 = Conv2D(64, (3, 3), activation='relu', padding='same') (p3)
c4 = Conv2D(64, (3, 3), activation='relu', padding='same') (c4)
p4 = MaxPooling2D(pool_size=(2, 2)) (c4)
c5 = Conv2D(128, (3, 3), activation='relu', padding='same') (p4)
c5 = Conv2D(128, (3, 3), activation='relu', padding='same') (c5)
u6 = Conv2DTranspose(64, (2, 2), strides=(2, 2), padding='same') (c5)
u6 = concatenate([u6, c4])
c6 = Conv2D(64, (3, 3), activation='relu', padding='same') (u6)
c6 = Conv2D(64, (3, 3), activation='relu', padding='same') (c6)
u7 = Conv2DTranspose(32, (2, 2), strides=(2, 2), padding='same') (c6)
u7 = concatenate([u7, c3])
c7 = Conv2D(32, (3, 3), activation='relu', padding='same') (u7)
c7 = Conv2D(32, (3, 3), activation='relu', padding='same') (c7)
u8 = Conv2DTranspose(16, (2, 2), strides=(2, 2), padding='same') (c7)
u8 = concatenate([u8, c2])
c8 = Conv2D(16, (3, 3), activation='relu', padding='same') (u8)
c8 = Conv2D(16, (3, 3), activation='relu', padding='same') (c8)
u9 = Conv2DTranspose(8, (2, 2), strides=(2, 2), padding='same') (c8)
u9 = concatenate([u9, c1], axis=3)
c9 = Conv2D(8, (3, 3), activation='relu', padding='same') (u9)
c9 = Conv2D(8, (3, 3), activation='relu', padding='same') (c9)
outputs = Conv2D(1, (1, 1), activation='sigmoid') (c9)
model = Model(inputs=[inputs], outputs=[outputs])
model.compile(optimizer='adam', loss='binary_crossentropy', metrics=[mean_iou])
model.summary()
# In[ ]:
earlystopper = EarlyStopping(patience=5, verbose=1)
checkpointer = ModelCheckpoint('model-tgs-salt-1.h5', verbose=1, save_best_only=True)
results = model.fit(X_train, Y_train, validation_split=0.1, batch_size=8, epochs=30,
callbacks=[earlystopper, checkpointer])
# # Test Data
# First we'll get the test data. This takes a while, it's 18000 samples.
# In[ ]:
# Get and resize test images
X_test = np.zeros((len(test_ids), im_height, im_width, im_chan), dtype=np.uint8)
sizes_test = []
print('Getting and resizing test images ... ')
sys.stdout.flush()
for n, id_ in tqdm_notebook(enumerate(test_ids), total=len(test_ids)):
path = path_test
img = load_img(path + '/images/' + id_)
x = img_to_array(img)[:,:,1]
sizes_test.append([x.shape[0], x.shape[1]])
x = resize(x, (128, 128, 1), mode='constant', preserve_range=True)
X_test[n] = x
print('Done!')
# In[ ]:
# Predict on train, val and test
model = load_model('model-tgs-salt-1.h5', custom_objects={'mean_iou': mean_iou})
preds_train = model.predict(X_train[:int(X_train.shape[0]*0.9)], verbose=1)
preds_val = model.predict(X_train[int(X_train.shape[0]*0.9):], verbose=1)
preds_test = model.predict(X_test, verbose=1)
# Threshold predictions
preds_train_t = (preds_train > 0.5).astype(np.uint8)
preds_val_t = (preds_val > 0.5).astype(np.uint8)
preds_test_t = (preds_test > 0.5).astype(np.uint8)
# In[ ]:
# Create list of upsampled test masks
preds_test_upsampled = []
for i in tnrange(len(preds_test)):
preds_test_upsampled.append(resize(np.squeeze(preds_test[i]),
(sizes_test[i][0], sizes_test[i][1]),
mode='constant', preserve_range=True))
# In[ ]:
preds_test_upsampled[0].shape
# We'll look at it again, just to be sure.
# In[ ]:
# Perform a sanity check on some random training samples
ix = random.randint(0, len(preds_train_t))
plt.imshow(np.dstack((X_train[ix],X_train[ix],X_train[ix])))
plt.show()
tmp = np.squeeze(Y_train[ix]).astype(np.float32)
plt.imshow(np.dstack((tmp,tmp,tmp)))
plt.show()
tmp = np.squeeze(preds_train_t[ix]).astype(np.float32)
plt.imshow(np.dstack((tmp,tmp,tmp)))
plt.show()
# # Prepare Submission
# We need to prepare the submission. A nice CSV with predictions. All of this is one to one from Ketil and does not differ from any of the other segmentation tasks. Check them out to improve on this.
# In[ ]:
def RLenc(img, order='F', format=True):
"""
img is binary mask image, shape (r,c)
order is down-then-right, i.e. Fortran
format determines if the order needs to be preformatted (according to submission rules) or not
returns run length as an array or string (if format is True)
"""
bytes = img.reshape(img.shape[0] * img.shape[1], order=order)
runs = [] ## list of run lengths
r = 0 ## the current run length
pos = 1 ## count starts from 1 per WK
for c in bytes:
if (c == 0):
if r != 0:
runs.append((pos, r))
pos += r
r = 0
pos += 1
else:
r += 1
# if last run is unsaved (i.e. data ends with 1)
if r != 0:
runs.append((pos, r))
pos += r
r = 0
if format:
z = ''
for rr in runs:
z += '{} {} '.format(rr[0], rr[1])
return z[:-1]
else:
return runs
pred_dict = {fn[:-4]:RLenc(np.round(preds_test_upsampled[i])) for i,fn in tqdm_notebook(enumerate(test_ids))}
# In[ ]:
sub =
|
pd.DataFrame.from_dict(pred_dict,orient='index')
|
pandas.DataFrame.from_dict
|
"""Analysis.
This script executes the models that find arbitrage opportunities.
Currently two arbitrage models are being used:
1.Triangular Arbitrage.
2.Bellman Ford Optimization.
"""
import sqlite3
import pandas as pd
import numpy as np
import datetime as dt
import logging
import logging.config
from pathlib import Path
import yaml
import os
from bellmanford import (
bellman_ford_exec,
print_profit_opportunity_for_path_store_db,
load_exchange_graph,
draw_graph_to_png,
)
# Logging
path = Path(os.getcwd())
Path("log").mkdir(parents=True, exist_ok=True)
log_config = Path(path, "log_config.yaml")
timestamp = "{:%Y_%m_%d_%H_%M_%S}".format(dt.datetime.now())
with open(log_config, "r") as log_file:
config_dict = yaml.safe_load(log_file.read())
# Append date stamp to the file name
log_filename = config_dict["handlers"]["file"]["filename"]
base, extension = os.path.splitext(log_filename)
base2 = "_" + os.path.splitext(os.path.basename(__file__))[0] + "_"
log_filename = "{}{}{}{}".format(base, base2, timestamp, extension)
config_dict["handlers"]["file"]["filename"] = log_filename
logging.config.dictConfig(config_dict)
logger = logging.getLogger(__name__)
# pandas controls on how much data to see
pd.set_option("display.max_rows", None)
|
pd.set_option("display.max_columns", None)
|
pandas.set_option
|
# -*- coding: utf-8 -*-
"""
These the test the public routines exposed in types/common.py
related to inference and not otherwise tested in types/test_common.py
"""
from warnings import catch_warnings, simplefilter
import collections
import re
from datetime import datetime, date, timedelta, time
from decimal import Decimal
from numbers import Number
from fractions import Fraction
import numpy as np
import pytz
import pytest
import pandas as pd
from pandas._libs import lib, iNaT, missing as libmissing
from pandas import (Series, Index, DataFrame, Timedelta,
DatetimeIndex, TimedeltaIndex, Timestamp,
Panel, Period, Categorical, isna, Interval,
DateOffset)
from pandas import compat
from pandas.compat import u, PY2, StringIO, lrange
from pandas.core.dtypes import inference
from pandas.core.dtypes.common import (
is_timedelta64_dtype,
is_timedelta64_ns_dtype,
is_datetime64_dtype,
is_datetime64_ns_dtype,
is_datetime64_any_dtype,
is_datetime64tz_dtype,
is_number,
is_integer,
is_float,
is_bool,
is_scalar,
is_scipy_sparse,
ensure_int32,
ensure_categorical)
from pandas.util import testing as tm
import pandas.util._test_decorators as td
@pytest.fixture(params=[True, False], ids=str)
def coerce(request):
return request.param
# collect all objects to be tested for list-like-ness; use tuples of objects,
# whether they are list-like or not (special casing for sets), and their ID
ll_params = [
([1], True, 'list'), # noqa: E241
([], True, 'list-empty'), # noqa: E241
((1, ), True, 'tuple'), # noqa: E241
(tuple(), True, 'tuple-empty'), # noqa: E241
({'a': 1}, True, 'dict'), # noqa: E241
(dict(), True, 'dict-empty'), # noqa: E241
({'a', 1}, 'set', 'set'), # noqa: E241
(set(), 'set', 'set-empty'), # noqa: E241
(frozenset({'a', 1}), 'set', 'frozenset'), # noqa: E241
(frozenset(), 'set', 'frozenset-empty'), # noqa: E241
(iter([1, 2]), True, 'iterator'), # noqa: E241
(iter([]), True, 'iterator-empty'), # noqa: E241
((x for x in [1, 2]), True, 'generator'), # noqa: E241
((x for x in []), True, 'generator-empty'), # noqa: E241
(Series([1]), True, 'Series'), # noqa: E241
(Series([]), True, 'Series-empty'), # noqa: E241
(Series(['a']).str, True, 'StringMethods'), # noqa: E241
(Series([], dtype='O').str, True, 'StringMethods-empty'), # noqa: E241
(Index([1]), True, 'Index'), # noqa: E241
(Index([]), True, 'Index-empty'), # noqa: E241
(DataFrame([[1]]), True, 'DataFrame'), # noqa: E241
(DataFrame(), True, 'DataFrame-empty'), # noqa: E241
(np.ndarray((2,) * 1), True, 'ndarray-1d'), # noqa: E241
(np.array([]), True, 'ndarray-1d-empty'), # noqa: E241
(np.ndarray((2,) * 2), True, 'ndarray-2d'), # noqa: E241
(np.array([[]]), True, 'ndarray-2d-empty'), # noqa: E241
(np.ndarray((2,) * 3), True, 'ndarray-3d'), # noqa: E241
(np.array([[[]]]), True, 'ndarray-3d-empty'), # noqa: E241
(np.ndarray((2,) * 4), True, 'ndarray-4d'), # noqa: E241
(np.array([[[[]]]]), True, 'ndarray-4d-empty'), # noqa: E241
(np.array(2), False, 'ndarray-0d'), # noqa: E241
(1, False, 'int'), # noqa: E241
(b'123', False, 'bytes'), # noqa: E241
(b'', False, 'bytes-empty'), # noqa: E241
('123', False, 'string'), # noqa: E241
('', False, 'string-empty'), # noqa: E241
(str, False, 'string-type'), # noqa: E241
(object(), False, 'object'), # noqa: E241
(np.nan, False, 'NaN'), # noqa: E241
(None, False, 'None') # noqa: E241
]
objs, expected, ids = zip(*ll_params)
@pytest.fixture(params=zip(objs, expected), ids=ids)
def maybe_list_like(request):
return request.param
def test_is_list_like(maybe_list_like):
obj, expected = maybe_list_like
expected = True if expected == 'set' else expected
assert inference.is_list_like(obj) == expected
def test_is_list_like_disallow_sets(maybe_list_like):
obj, expected = maybe_list_like
expected = False if expected == 'set' else expected
assert inference.is_list_like(obj, allow_sets=False) == expected
def test_is_sequence():
is_seq = inference.is_sequence
assert (is_seq((1, 2)))
assert (is_seq([1, 2]))
assert (not is_seq("abcd"))
assert (not is_seq(u("abcd")))
assert (not is_seq(np.int64))
class A(object):
def __getitem__(self):
return 1
assert (not is_seq(A()))
def test_is_array_like():
assert inference.is_array_like(Series([]))
assert inference.is_array_like(Series([1, 2]))
assert inference.is_array_like(np.array(["a", "b"]))
assert inference.is_array_like(Index(["2016-01-01"]))
class DtypeList(list):
dtype = "special"
assert inference.is_array_like(DtypeList())
assert not inference.is_array_like([1, 2, 3])
assert not inference.is_array_like(tuple())
assert not inference.is_array_like("foo")
assert not inference.is_array_like(123)
@pytest.mark.parametrize('inner', [
[], [1], (1, ), (1, 2), {'a': 1}, {1, 'a'}, Series([1]),
Series([]), Series(['a']).str, (x for x in range(5))
])
@pytest.mark.parametrize('outer', [
list, Series, np.array, tuple
])
def test_is_nested_list_like_passes(inner, outer):
result = outer([inner for _ in range(5)])
assert inference.is_list_like(result)
@pytest.mark.parametrize('obj', [
'abc', [], [1], (1,), ['a'], 'a', {'a'},
[1, 2, 3], Series([1]), DataFrame({"A": [1]}),
([1, 2] for _ in range(5)),
])
def test_is_nested_list_like_fails(obj):
assert not inference.is_nested_list_like(obj)
@pytest.mark.parametrize(
"ll", [{}, {'A': 1}, Series([1])])
def test_is_dict_like_passes(ll):
assert inference.is_dict_like(ll)
@pytest.mark.parametrize(
"ll", ['1', 1, [1, 2], (1, 2), range(2), Index([1])])
def test_is_dict_like_fails(ll):
assert not inference.is_dict_like(ll)
@pytest.mark.parametrize("has_keys", [True, False])
@pytest.mark.parametrize("has_getitem", [True, False])
@pytest.mark.parametrize("has_contains", [True, False])
def test_is_dict_like_duck_type(has_keys, has_getitem, has_contains):
class DictLike(object):
def __init__(self, d):
self.d = d
if has_keys:
def keys(self):
return self.d.keys()
if has_getitem:
def __getitem__(self, key):
return self.d.__getitem__(key)
if has_contains:
def __contains__(self, key):
return self.d.__contains__(key)
d = DictLike({1: 2})
result = inference.is_dict_like(d)
expected = has_keys and has_getitem and has_contains
assert result is expected
def test_is_file_like(mock):
class MockFile(object):
pass
is_file = inference.is_file_like
data = StringIO("data")
assert is_file(data)
# No read / write attributes
# No iterator attributes
m = MockFile()
assert not is_file(m)
MockFile.write = lambda self: 0
# Write attribute but not an iterator
m = MockFile()
assert not is_file(m)
# gh-16530: Valid iterator just means we have the
# __iter__ attribute for our purposes.
MockFile.__iter__ = lambda self: self
# Valid write-only file
m = MockFile()
assert is_file(m)
del MockFile.write
MockFile.read = lambda self: 0
# Valid read-only file
m = MockFile()
assert is_file(m)
# Iterator but no read / write attributes
data = [1, 2, 3]
assert not is_file(data)
assert not is_file(mock.Mock())
@pytest.mark.parametrize(
"ll", [collections.namedtuple('Test', list('abc'))(1, 2, 3)])
def test_is_names_tuple_passes(ll):
assert inference.is_named_tuple(ll)
@pytest.mark.parametrize(
"ll", [(1, 2, 3), 'a', Series({'pi': 3.14})])
def test_is_names_tuple_fails(ll):
assert not inference.is_named_tuple(ll)
def test_is_hashable():
# all new-style classes are hashable by default
class HashableClass(object):
pass
class UnhashableClass1(object):
__hash__ = None
class UnhashableClass2(object):
def __hash__(self):
raise TypeError("Not hashable")
hashable = (1,
3.14,
np.float64(3.14),
'a',
tuple(),
(1, ),
HashableClass(), )
not_hashable = ([], UnhashableClass1(), )
abc_hashable_not_really_hashable = (([], ), UnhashableClass2(), )
for i in hashable:
assert inference.is_hashable(i)
for i in not_hashable:
assert not inference.is_hashable(i)
for i in abc_hashable_not_really_hashable:
assert not inference.is_hashable(i)
# numpy.array is no longer collections.Hashable as of
# https://github.com/numpy/numpy/pull/5326, just test
# is_hashable()
assert not inference.is_hashable(np.array([]))
# old-style classes in Python 2 don't appear hashable to
# collections.Hashable but also seem to support hash() by default
if PY2:
class OldStyleClass():
pass
c = OldStyleClass()
assert not isinstance(c, compat.Hashable)
assert inference.is_hashable(c)
hash(c) # this will not raise
@pytest.mark.parametrize(
"ll", [re.compile('ad')])
def test_is_re_passes(ll):
assert inference.is_re(ll)
@pytest.mark.parametrize(
"ll", ['x', 2, 3, object()])
def test_is_re_fails(ll):
assert not inference.is_re(ll)
@pytest.mark.parametrize(
"ll", [r'a', u('x'),
r'asdf',
re.compile('adsf'),
u(r'\u2233\s*'),
re.compile(r'')])
def test_is_recompilable_passes(ll):
assert inference.is_re_compilable(ll)
@pytest.mark.parametrize(
"ll", [1, [], object()])
def test_is_recompilable_fails(ll):
assert not inference.is_re_compilable(ll)
class TestInference(object):
def test_infer_dtype_bytes(self):
compare = 'string' if PY2 else 'bytes'
# string array of bytes
arr = np.array(list('abc'), dtype='S1')
assert lib.infer_dtype(arr) == compare
# object array of bytes
arr = arr.astype(object)
assert lib.infer_dtype(arr) == compare
# object array of bytes with missing values
assert lib.infer_dtype([b'a', np.nan, b'c'], skipna=True) == compare
def test_isinf_scalar(self):
# GH 11352
assert libmissing.isposinf_scalar(float('inf'))
assert libmissing.isposinf_scalar(np.inf)
assert not libmissing.isposinf_scalar(-np.inf)
assert not libmissing.isposinf_scalar(1)
assert not libmissing.isposinf_scalar('a')
assert libmissing.isneginf_scalar(float('-inf'))
assert libmissing.isneginf_scalar(-np.inf)
assert not libmissing.isneginf_scalar(np.inf)
assert not libmissing.isneginf_scalar(1)
assert not libmissing.isneginf_scalar('a')
def test_maybe_convert_numeric_infinities(self):
# see gh-13274
infinities = ['inf', 'inF', 'iNf', 'Inf',
'iNF', 'InF', 'INf', 'INF']
na_values = {'', 'NULL', 'nan'}
pos = np.array(['inf'], dtype=np.float64)
neg = np.array(['-inf'], dtype=np.float64)
msg = "Unable to parse string"
for infinity in infinities:
for maybe_int in (True, False):
out = lib.maybe_convert_numeric(
np.array([infinity], dtype=object),
na_values, maybe_int)
tm.assert_numpy_array_equal(out, pos)
out = lib.maybe_convert_numeric(
np.array(['-' + infinity], dtype=object),
na_values, maybe_int)
tm.assert_numpy_array_equal(out, neg)
out = lib.maybe_convert_numeric(
np.array([u(infinity)], dtype=object),
na_values, maybe_int)
tm.assert_numpy_array_equal(out, pos)
out = lib.maybe_convert_numeric(
np.array(['+' + infinity], dtype=object),
na_values, maybe_int)
tm.assert_numpy_array_equal(out, pos)
# too many characters
with pytest.raises(ValueError, match=msg):
lib.maybe_convert_numeric(
np.array(['foo_' + infinity], dtype=object),
na_values, maybe_int)
def test_maybe_convert_numeric_post_floatify_nan(self, coerce):
# see gh-13314
data = np.array(['1.200', '-999.000', '4.500'], dtype=object)
expected = np.array([1.2, np.nan, 4.5], dtype=np.float64)
nan_values = {-999, -999.0}
out = lib.maybe_convert_numeric(data, nan_values, coerce)
tm.assert_numpy_array_equal(out, expected)
def test_convert_infs(self):
arr = np.array(['inf', 'inf', 'inf'], dtype='O')
result = lib.maybe_convert_numeric(arr, set(), False)
assert result.dtype == np.float64
arr = np.array(['-inf', '-inf', '-inf'], dtype='O')
result = lib.maybe_convert_numeric(arr, set(), False)
assert result.dtype == np.float64
def test_scientific_no_exponent(self):
# See PR 12215
arr = np.array(['42E', '2E', '99e', '6e'], dtype='O')
result = lib.maybe_convert_numeric(arr, set(), False, True)
assert np.all(np.isnan(result))
def test_convert_non_hashable(self):
# GH13324
# make sure that we are handing non-hashables
arr = np.array([[10.0, 2], 1.0, 'apple'])
result = lib.maybe_convert_numeric(arr, set(), False, True)
tm.assert_numpy_array_equal(result, np.array([np.nan, 1.0, np.nan]))
def test_convert_numeric_uint64(self):
arr = np.array([2**63], dtype=object)
exp = np.array([2**63], dtype=np.uint64)
tm.assert_numpy_array_equal(lib.maybe_convert_numeric(arr, set()), exp)
arr = np.array([str(2**63)], dtype=object)
exp = np.array([2**63], dtype=np.uint64)
tm.assert_numpy_array_equal(lib.maybe_convert_numeric(arr, set()), exp)
arr = np.array([np.uint64(2**63)], dtype=object)
exp = np.array([2**63], dtype=np.uint64)
tm.assert_numpy_array_equal(lib.maybe_convert_numeric(arr, set()), exp)
@pytest.mark.parametrize("arr", [
np.array([2**63, np.nan], dtype=object),
np.array([str(2**63), np.nan], dtype=object),
np.array([np.nan, 2**63], dtype=object),
np.array([np.nan, str(2**63)], dtype=object)])
def test_convert_numeric_uint64_nan(self, coerce, arr):
expected = arr.astype(float) if coerce else arr.copy()
result = lib.maybe_convert_numeric(arr, set(),
coerce_numeric=coerce)
tm.assert_almost_equal(result, expected)
def test_convert_numeric_uint64_nan_values(self, coerce):
arr = np.array([2**63, 2**63 + 1], dtype=object)
na_values = {2**63}
expected = (np.array([np.nan, 2**63 + 1], dtype=float)
if coerce else arr.copy())
result = lib.maybe_convert_numeric(arr, na_values,
coerce_numeric=coerce)
tm.assert_almost_equal(result, expected)
@pytest.mark.parametrize("case", [
np.array([2**63, -1], dtype=object),
np.array([str(2**63), -1], dtype=object),
np.array([str(2**63), str(-1)], dtype=object),
np.array([-1, 2**63], dtype=object),
np.array([-1, str(2**63)], dtype=object),
np.array([str(-1), str(2**63)], dtype=object)])
def test_convert_numeric_int64_uint64(self, case, coerce):
expected = case.astype(float) if coerce else case.copy()
result = lib.maybe_convert_numeric(case, set(), coerce_numeric=coerce)
tm.assert_almost_equal(result, expected)
@pytest.mark.parametrize("value", [-2**63 - 1, 2**64])
def test_convert_int_overflow(self, value):
# see gh-18584
arr = np.array([value], dtype=object)
result = lib.maybe_convert_objects(arr)
tm.assert_numpy_array_equal(arr, result)
def test_maybe_convert_objects_uint64(self):
# see gh-4471
arr = np.array([2**63], dtype=object)
exp = np.array([2**63], dtype=np.uint64)
tm.assert_numpy_array_equal(lib.maybe_convert_objects(arr), exp)
# NumPy bug: can't compare uint64 to int64, as that
# results in both casting to float64, so we should
# make sure that this function is robust against it
arr = np.array([np.uint64(2**63)], dtype=object)
exp = np.array([2**63], dtype=np.uint64)
tm.assert_numpy_array_equal(lib.maybe_convert_objects(arr), exp)
arr = np.array([2, -1], dtype=object)
exp = np.array([2, -1], dtype=np.int64)
tm.assert_numpy_array_equal(lib.maybe_convert_objects(arr), exp)
arr = np.array([2**63, -1], dtype=object)
exp = np.array([2**63, -1], dtype=object)
tm.assert_numpy_array_equal(lib.maybe_convert_objects(arr), exp)
def test_mixed_dtypes_remain_object_array(self):
# GH14956
array = np.array([datetime(2015, 1, 1, tzinfo=pytz.utc), 1],
dtype=object)
result = lib.maybe_convert_objects(array, convert_datetime=1)
tm.assert_numpy_array_equal(result, array)
class TestTypeInference(object):
# Dummy class used for testing with Python objects
class Dummy():
pass
def test_inferred_dtype_fixture(self, any_skipna_inferred_dtype):
# see pandas/conftest.py
inferred_dtype, values = any_skipna_inferred_dtype
# make sure the inferred dtype of the fixture is as requested
assert inferred_dtype == lib.infer_dtype(values, skipna=True)
def test_length_zero(self):
result = lib.infer_dtype(np.array([], dtype='i4'))
assert result == 'integer'
result = lib.infer_dtype([])
assert result == 'empty'
# GH 18004
arr = np.array([np.array([], dtype=object),
np.array([], dtype=object)])
result = lib.infer_dtype(arr)
assert result == 'empty'
def test_integers(self):
arr = np.array([1, 2, 3, np.int64(4), np.int32(5)], dtype='O')
result = lib.infer_dtype(arr)
assert result == 'integer'
arr = np.array([1, 2, 3, np.int64(4), np.int32(5), 'foo'], dtype='O')
result = lib.infer_dtype(arr)
assert result == 'mixed-integer'
arr = np.array([1, 2, 3, 4, 5], dtype='i4')
result = lib.infer_dtype(arr)
assert result == 'integer'
def test_bools(self):
arr = np.array([True, False, True, True, True], dtype='O')
result = lib.infer_dtype(arr)
assert result == 'boolean'
arr = np.array([np.bool_(True), np.bool_(False)], dtype='O')
result = lib.infer_dtype(arr)
assert result == 'boolean'
arr = np.array([True, False, True, 'foo'], dtype='O')
result = lib.infer_dtype(arr)
assert result == 'mixed'
arr = np.array([True, False, True], dtype=bool)
result = lib.infer_dtype(arr)
assert result == 'boolean'
arr = np.array([True, np.nan, False], dtype='O')
result = lib.infer_dtype(arr, skipna=True)
assert result == 'boolean'
def test_floats(self):
arr = np.array([1., 2., 3., np.float64(4), np.float32(5)], dtype='O')
result = lib.infer_dtype(arr)
assert result == 'floating'
arr = np.array([1, 2, 3, np.float64(4), np.float32(5), 'foo'],
dtype='O')
result = lib.infer_dtype(arr)
assert result == 'mixed-integer'
arr = np.array([1, 2, 3, 4, 5], dtype='f4')
result = lib.infer_dtype(arr)
assert result == 'floating'
arr = np.array([1, 2, 3, 4, 5], dtype='f8')
result = lib.infer_dtype(arr)
assert result == 'floating'
def test_decimals(self):
# GH15690
arr = np.array([Decimal(1), Decimal(2), Decimal(3)])
result = lib.infer_dtype(arr)
assert result == 'decimal'
arr = np.array([1.0, 2.0, Decimal(3)])
result = lib.infer_dtype(arr)
assert result == 'mixed'
arr = np.array([Decimal(1), Decimal('NaN'), Decimal(3)])
result = lib.infer_dtype(arr)
assert result == 'decimal'
arr = np.array([Decimal(1), np.nan, Decimal(3)], dtype='O')
result = lib.infer_dtype(arr)
assert result == 'decimal'
def test_string(self):
pass
def test_unicode(self):
arr = [u'a', np.nan, u'c']
result = lib.infer_dtype(arr)
assert result == 'mixed'
arr = [u'a', np.nan, u'c']
result = lib.infer_dtype(arr, skipna=True)
expected = 'unicode' if PY2 else 'string'
assert result == expected
@pytest.mark.parametrize('dtype, missing, skipna, expected', [
(float, np.nan, False, 'floating'),
(float, np.nan, True, 'floating'),
(object, np.nan, False, 'floating'),
(object, np.nan, True, 'empty'),
(object, None, False, 'mixed'),
(object, None, True, 'empty')
])
@pytest.mark.parametrize('box', [pd.Series, np.array])
def test_object_empty(self, box, missing, dtype, skipna, expected):
# GH 23421
arr = box([missing, missing], dtype=dtype)
result = lib.infer_dtype(arr, skipna=skipna)
assert result == expected
def test_datetime(self):
dates = [datetime(2012, 1, x) for x in range(1, 20)]
index = Index(dates)
assert index.inferred_type == 'datetime64'
def test_infer_dtype_datetime(self):
arr = np.array([Timestamp('2011-01-01'),
Timestamp('2011-01-02')])
assert lib.infer_dtype(arr) == 'datetime'
arr = np.array([np.datetime64('2011-01-01'),
np.datetime64('2011-01-01')], dtype=object)
assert lib.infer_dtype(arr) == 'datetime64'
arr = np.array([datetime(2011, 1, 1), datetime(2012, 2, 1)])
assert lib.infer_dtype(arr) == 'datetime'
# starts with nan
for n in [pd.NaT, np.nan]:
arr = np.array([n, pd.Timestamp('2011-01-02')])
assert lib.infer_dtype(arr) == 'datetime'
arr = np.array([n, np.datetime64('2011-01-02')])
assert lib.infer_dtype(arr) == 'datetime64'
arr = np.array([n, datetime(2011, 1, 1)])
assert lib.infer_dtype(arr) == 'datetime'
arr = np.array([n, pd.Timestamp('2011-01-02'), n])
assert lib.infer_dtype(arr) == 'datetime'
arr = np.array([n, np.datetime64('2011-01-02'), n])
assert lib.infer_dtype(arr) == 'datetime64'
arr = np.array([n, datetime(2011, 1, 1), n])
assert lib.infer_dtype(arr) == 'datetime'
# different type of nat
arr = np.array([np.timedelta64('nat'),
np.datetime64('2011-01-02')], dtype=object)
assert lib.infer_dtype(arr) == 'mixed'
arr = np.array([np.datetime64('2011-01-02'),
np.timedelta64('nat')], dtype=object)
assert lib.infer_dtype(arr) == 'mixed'
# mixed datetime
arr = np.array([datetime(2011, 1, 1),
pd.Timestamp('2011-01-02')])
assert lib.infer_dtype(arr) == 'datetime'
# should be datetime?
arr = np.array([np.datetime64('2011-01-01'),
pd.Timestamp('2011-01-02')])
assert lib.infer_dtype(arr) == 'mixed'
arr = np.array([pd.Timestamp('2011-01-02'),
np.datetime64('2011-01-01')])
assert lib.infer_dtype(arr) == 'mixed'
arr = np.array([np.nan, pd.Timestamp('2011-01-02'), 1])
assert lib.infer_dtype(arr) == 'mixed-integer'
arr = np.array([np.nan, pd.Timestamp('2011-01-02'), 1.1])
assert lib.infer_dtype(arr) == 'mixed'
arr = np.array([np.nan, '2011-01-01', pd.Timestamp('2011-01-02')])
assert lib.infer_dtype(arr) == 'mixed'
def test_infer_dtype_timedelta(self):
arr = np.array([pd.Timedelta('1 days'),
pd.Timedelta('2 days')])
assert lib.infer_dtype(arr) == 'timedelta'
arr = np.array([np.timedelta64(1, 'D'),
np.timedelta64(2, 'D')], dtype=object)
assert lib.infer_dtype(arr) == 'timedelta'
arr = np.array([timedelta(1), timedelta(2)])
assert lib.infer_dtype(arr) == 'timedelta'
# starts with nan
for n in [pd.NaT, np.nan]:
arr = np.array([n, Timedelta('1 days')])
assert lib.infer_dtype(arr) == 'timedelta'
arr = np.array([n, np.timedelta64(1, 'D')])
assert lib.infer_dtype(arr) == 'timedelta'
arr = np.array([n, timedelta(1)])
assert lib.infer_dtype(arr) == 'timedelta'
arr = np.array([n, pd.Timedelta('1 days'), n])
assert lib.infer_dtype(arr) == 'timedelta'
arr = np.array([n, np.timedelta64(1, 'D'), n])
assert lib.infer_dtype(arr) == 'timedelta'
arr = np.array([n, timedelta(1), n])
assert lib.infer_dtype(arr) == 'timedelta'
# different type of nat
arr = np.array([np.datetime64('nat'), np.timedelta64(1, 'D')],
dtype=object)
assert lib.infer_dtype(arr) == 'mixed'
arr = np.array([np.timedelta64(1, 'D'), np.datetime64('nat')],
dtype=object)
assert lib.infer_dtype(arr) == 'mixed'
def test_infer_dtype_period(self):
# GH 13664
arr = np.array([pd.Period('2011-01', freq='D'),
pd.Period('2011-02', freq='D')])
assert lib.infer_dtype(arr) == 'period'
arr = np.array([pd.Period('2011-01', freq='D'),
pd.Period('2011-02', freq='M')])
assert lib.infer_dtype(arr) == 'period'
# starts with nan
for n in [pd.NaT, np.nan]:
arr = np.array([n, pd.Period('2011-01', freq='D')])
assert lib.infer_dtype(arr) == 'period'
arr = np.array([n, pd.Period('2011-01', freq='D'), n])
assert lib.infer_dtype(arr) == 'period'
# different type of nat
arr = np.array([np.datetime64('nat'), pd.Period('2011-01', freq='M')],
dtype=object)
assert lib.infer_dtype(arr) == 'mixed'
arr = np.array([pd.Period('2011-01', freq='M'), np.datetime64('nat')],
dtype=object)
assert lib.infer_dtype(arr) == 'mixed'
@pytest.mark.parametrize(
"data",
[
[datetime(2017, 6, 12, 19, 30), datetime(2017, 3, 11, 1, 15)],
[Timestamp("20170612"), Timestamp("20170311")],
[Timestamp("20170612", tz='US/Eastern'),
Timestamp("20170311", tz='US/Eastern')],
[date(2017, 6, 12),
Timestamp("20170311", tz='US/Eastern')],
[np.datetime64("2017-06-12"), np.datetime64("2017-03-11")],
[np.datetime64("2017-06-12"), datetime(2017, 3, 11, 1, 15)]
]
)
def test_infer_datetimelike_array_datetime(self, data):
assert lib.infer_datetimelike_array(data) == "datetime"
@pytest.mark.parametrize(
"data",
[
[timedelta(2017, 6, 12), timedelta(2017, 3, 11)],
[timedelta(2017, 6, 12), date(2017, 3, 11)],
[np.timedelta64(2017, "D"), np.timedelta64(6, "s")],
[np.timedelta64(2017, "D"), timedelta(2017, 3, 11)]
]
)
def test_infer_datetimelike_array_timedelta(self, data):
assert lib.infer_datetimelike_array(data) == "timedelta"
def test_infer_datetimelike_array_date(self):
arr = [date(2017, 6, 12), date(2017, 3, 11)]
assert lib.infer_datetimelike_array(arr) == "date"
@pytest.mark.parametrize(
"data",
[
["2017-06-12", "2017-03-11"],
[20170612, 20170311],
[20170612.5, 20170311.8],
[Dummy(), Dummy()],
[Timestamp("20170612"), Timestamp("20170311", tz='US/Eastern')],
[Timestamp("20170612"), 20170311],
[timedelta(2017, 6, 12), Timestamp("20170311", tz='US/Eastern')]
]
)
def test_infer_datetimelike_array_mixed(self, data):
assert lib.infer_datetimelike_array(data) == "mixed"
@pytest.mark.parametrize(
"first, expected",
[
[[None], "mixed"],
[[np.nan], "mixed"],
[[pd.NaT], "nat"],
[[datetime(2017, 6, 12, 19, 30), pd.NaT], "datetime"],
[[np.datetime64("2017-06-12"), pd.NaT], "datetime"],
[[date(2017, 6, 12), pd.NaT], "date"],
[[timedelta(2017, 6, 12), pd.NaT], "timedelta"],
[[np.timedelta64(2017, "D"), pd.NaT], "timedelta"]
]
)
@pytest.mark.parametrize("second", [None, np.nan])
def test_infer_datetimelike_array_nan_nat_like(self, first, second,
expected):
first.append(second)
assert lib.infer_datetimelike_array(first) == expected
def test_infer_dtype_all_nan_nat_like(self):
arr = np.array([np.nan, np.nan])
assert lib.infer_dtype(arr) == 'floating'
# nan and None mix are result in mixed
arr = np.array([np.nan, np.nan, None])
assert lib.infer_dtype(arr) == 'mixed'
arr = np.array([None, np.nan, np.nan])
assert lib.infer_dtype(arr) == 'mixed'
# pd.NaT
arr = np.array([pd.NaT])
assert lib.infer_dtype(arr) == 'datetime'
arr = np.array([pd.NaT, np.nan])
assert lib.infer_dtype(arr) == 'datetime'
arr = np.array([np.nan, pd.NaT])
assert lib.infer_dtype(arr) == 'datetime'
arr = np.array([np.nan, pd.NaT, np.nan])
assert lib.infer_dtype(arr) == 'datetime'
arr = np.array([None, pd.NaT, None])
assert lib.infer_dtype(arr) == 'datetime'
# np.datetime64(nat)
arr = np.array([np.datetime64('nat')])
assert lib.infer_dtype(arr) == 'datetime64'
for n in [np.nan, pd.NaT, None]:
arr = np.array([n, np.datetime64('nat'), n])
assert lib.infer_dtype(arr) == 'datetime64'
arr = np.array([pd.NaT, n, np.datetime64('nat'), n])
assert lib.infer_dtype(arr) == 'datetime64'
arr = np.array([np.timedelta64('nat')], dtype=object)
assert lib.infer_dtype(arr) == 'timedelta'
for n in [np.nan, pd.NaT, None]:
arr = np.array([n, np.timedelta64('nat'), n])
assert lib.infer_dtype(arr) == 'timedelta'
arr = np.array([pd.NaT, n, np.timedelta64('nat'), n])
assert lib.infer_dtype(arr) == 'timedelta'
# datetime / timedelta mixed
arr = np.array([pd.NaT, np.datetime64('nat'),
np.timedelta64('nat'), np.nan])
assert lib.infer_dtype(arr) == 'mixed'
arr = np.array([np.timedelta64('nat'), np.datetime64('nat')],
dtype=object)
assert lib.infer_dtype(arr) == 'mixed'
def test_is_datetimelike_array_all_nan_nat_like(self):
arr = np.array([np.nan, pd.NaT, np.datetime64('nat')])
assert lib.is_datetime_array(arr)
assert lib.is_datetime64_array(arr)
assert not lib.is_timedelta_or_timedelta64_array(arr)
arr = np.array([np.nan, pd.NaT, np.timedelta64('nat')])
assert not lib.is_datetime_array(arr)
assert not lib.is_datetime64_array(arr)
assert lib.is_timedelta_or_timedelta64_array(arr)
arr = np.array([np.nan, pd.NaT, np.datetime64('nat'),
np.timedelta64('nat')])
assert not lib.is_datetime_array(arr)
assert not lib.is_datetime64_array(arr)
assert not lib.is_timedelta_or_timedelta64_array(arr)
arr = np.array([np.nan, pd.NaT])
assert lib.is_datetime_array(arr)
assert lib.is_datetime64_array(arr)
assert lib.is_timedelta_or_timedelta64_array(arr)
arr = np.array([np.nan, np.nan], dtype=object)
assert not lib.is_datetime_array(arr)
assert not lib.is_datetime64_array(arr)
assert not lib.is_timedelta_or_timedelta64_array(arr)
assert lib.is_datetime_with_singletz_array(
np.array([pd.Timestamp('20130101', tz='US/Eastern'),
pd.Timestamp('20130102', tz='US/Eastern')],
dtype=object))
assert not lib.is_datetime_with_singletz_array(
np.array([pd.Timestamp('20130101', tz='US/Eastern'),
pd.Timestamp('20130102', tz='CET')],
dtype=object))
@pytest.mark.parametrize(
"func",
[
'is_datetime_array',
'is_datetime64_array',
'is_bool_array',
'is_timedelta_or_timedelta64_array',
'is_date_array',
'is_time_array',
'is_interval_array',
'is_period_array'])
def test_other_dtypes_for_array(self, func):
func = getattr(lib, func)
arr = np.array(['foo', 'bar'])
assert not func(arr)
arr = np.array([1, 2])
assert not func(arr)
def test_date(self):
dates = [date(2012, 1, day) for day in range(1, 20)]
index = Index(dates)
assert index.inferred_type == 'date'
dates = [date(2012, 1, day) for day in range(1, 20)] + [np.nan]
result = lib.infer_dtype(dates)
assert result == 'mixed'
result =
|
lib.infer_dtype(dates, skipna=True)
|
pandas._libs.lib.infer_dtype
|
# -*- coding: utf-8 -*-
"""
Created on Mon Apr 23 12:34:01 2018
@author: <NAME>
"""
import pandas as pd
import numpy as np
from scipy import sparse
from aikit.enums import DataTypes
def get_type(data):
"""Retrieve the type of a data
Parameters
----------
data : pd.DataFrame,np.array, ...
the thing we want the type
Returns
-------
one data_type or None
Example
-------
>>> df = pd.DataFrame({"a":np.arange(10)})
>>> dfs = pd.SparseDataFrame({"a":[0,0,0,1,1]})
>>> assert get_type(df) == DataTypes.DataFrame
>>> assert get_type(df["a"]) == DataTypes.Serie
>>> assert get_type(df.values ) == DataTypes.NumpyArray
>>> assert get_type(sparse.coo_matrix(df.values)) == DataTypes.SparseArray
>>> assert get_type(dfs) == DataTypes.SparseDataFrame
"""
type_of_data = type(data)
if type_of_data == pd.DataFrame:
return DataTypes.DataFrame
elif type_of_data == pd.Series:
return DataTypes.Serie
elif type_of_data == np.ndarray:
return DataTypes.NumpyArray
elif type_of_data == pd.SparseDataFrame:
return DataTypes.SparseDataFrame
elif sparse.issparse(data):
return DataTypes.SparseArray
else:
return None
def convert_to_dataframe(xx, mapped_type=None):
""" convert something to a DataFrame """
if mapped_type is None:
mapped_type = get_type(xx)
if mapped_type is None:
return pd.DataFrame(xx) # try to create a DataFrame no matter what
if mapped_type == DataTypes.DataFrame:
return xx
elif mapped_type == DataTypes.Serie:
return pd.DataFrame(xx, index=xx.index)
elif mapped_type == DataTypes.NumpyArray:
return pd.DataFrame(xx)
elif mapped_type == DataTypes.SparseDataFrame:
return xx.to_dense()
elif mapped_type == DataTypes.SparseArray:
return pd.DataFrame(xx.todense())
else:
raise TypeError("I don't know that type : %s" % type(xx))
def convert_to_array(xx, mapped_type=None):
""" convert something to a Numpy Array """
if mapped_type is None:
mapped_type = get_type(xx)
if mapped_type is None:
return convert_to_array(convert_to_dataframe(xx))
if mapped_type == DataTypes.DataFrame:
return xx.values
elif mapped_type == DataTypes.Serie:
return xx.values.reshape((xx.shape[0], 1))
elif mapped_type == DataTypes.NumpyArray:
if xx.ndim == 1:
return xx.reshape((xx.shape[0], 1))
else:
return xx
elif mapped_type == DataTypes.SparseArray:
return np.array(xx.todense()) # np.array to prevent type 'matrix'
elif mapped_type == DataTypes.SparseDataFrame:
return xx.to_dense().values
else:
raise TypeError("I don't know how to convert that %s" % type(xx))
def convert_to_sparsearray(xx, mapped_type=None):
""" convert something to a Sparse Array """
if mapped_type is None:
mapped_type = get_type(xx)
if mapped_type is None:
return convert_to_sparsearray(convert_to_dataframe(xx))
if mapped_type == DataTypes.DataFrame:
return sparse.csr_matrix(xx.values)
elif mapped_type == DataTypes.Serie:
sparse.csr_matrix(xx.values[:, np.newaxis]) # np.newaxis to make sure I have 2 dimensio
elif mapped_type == DataTypes.NumpyArray:
if xx.ndim == 1:
return sparse.csr_matrix(xx.reshape((xx.shape[0], 1)))
else:
return sparse.csr_matrix(xx)
elif mapped_type == DataTypes.SparseArray:
return xx
elif mapped_type == DataTypes.SparseDataFrame:
return xx.to_coo() # maybe convert to csr ?
else:
raise TypeError("I don't know how to convert that %s" % type(xx))
def convert_to_sparsedataframe(xx, mapped_type=None):
""" convert something to a Sparse DataFrame """
if mapped_type is None:
mapped_type = get_type(xx)
if mapped_type is None:
return convert_to_sparsedataframe(convert_to_dataframe(xx))
if mapped_type == DataTypes.DataFrame:
return pd.SparseDataFrame(xx, default_fill_value=0)
elif mapped_type == DataTypes.Serie:
return pd.SparseDataFrame(pd.DataFrame(xx), default_fill_value=0)
elif mapped_type == DataTypes.NumpyArray:
if xx.ndim == 1:
return pd.SparseDataFrame(xx.reshape((xx.shape[0], 1)), default_fill_value=0)
else:
return pd.SparseDataFrame(xx, default_fill_value=0)
elif mapped_type == DataTypes.SparseArray:
return
|
pd.SparseDataFrame(xx, default_fill_value=0)
|
pandas.SparseDataFrame
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Sun Oct 7 11:17:50 2018
@author: nmei
plot cross experiment generalization
"""
import os
import pandas as pd
import numpy as np
import seaborn as sns
sns.set_style('whitegrid')
sns.set_context('poster')
import matplotlib.pyplot as plt
working_dir = '../results/cross_experiment_generalization'
df = pd.read_csv(os.path.join(working_dir,'cross experiment generalization.csv'))
df_full = pd.read_csv(os.path.join(working_dir,'cross experiment generalization (folds).csv'))
df_corrected = pd.read_csv(os.path.join(working_dir,'cross experimnet validation post test.csv'))
df_plot = df.copy()
resample = []
n = 500
for (model_name,experiment_name),df_sub in df_plot.groupby(['model','test']):
df_sub
temp_ = []
for window, df_sub_sub in df_sub.groupby(['window']):
temp = pd.concat([df_sub_sub]*n,ignore_index=True)
np.random.seed(12345)
scores = np.random.normal(loc = df_sub_sub['score_mean'].values[0],
scale = df_sub_sub['score_std'].values[0],
size = n)
temp['scores'] = scores
temp_.append(temp)
resample.append(pd.concat(temp_))
resample =
|
pd.concat(resample)
|
pandas.concat
|
import pandas as pd
import numpy as np
import gc
import os
# read data
col_dict = {'mjd': np.float64, 'flux': np.float32, 'flux_err': np.float32, 'object_id': np.int32, 'passband': np.int8,
'detected': np.int8}
train_meta = pd.read_csv(os.path.join('data', 'training_set_metadata.csv'))
train = pd.read_csv(os.path.join('data', 'training_set.csv'), dtype=col_dict)
def calc_aggs(all_data, exact):
# Normalise the flux, following the Bayesian approach here:
# https://www.statlect.com/fundamentals-of-statistics/normal-distribution-Bayesian-estimation
# Similar idea (but not the same) as the normalisation done in the Starter Kit
# https://www.kaggle.com/michaelapers/the-plasticc-astronomy-starter-kit?scriptVersionId=6040398
prior_mean = all_data.groupby(['object_id', 'passband'])['flux'].transform('mean')
prior_std = all_data.groupby(['object_id', 'passband'])['flux'].transform('std')
prior_std.loc[prior_std.isnull()] = all_data.loc[prior_std.isnull(), 'flux_err']
obs_std = all_data['flux_err'] # since the above kernel tells us that the flux error is the 68% confidence interval
all_data['bayes_flux'] = (all_data['flux'] / obs_std**2 + prior_mean / prior_std**2) \
/ (1 / obs_std**2 + 1 / prior_std**2)
all_data.loc[all_data['bayes_flux'].notnull(), 'flux'] \
= all_data.loc[all_data['bayes_flux'].notnull(), 'bayes_flux']
# Estimate the flux at source, using the fact that light is proportional
# to inverse square of distance from source.
# This is hinted at here: https://www.kaggle.com/c/PLAsTiCC-2018/discussion/70725#417195
redshift = all_meta.set_index('object_id')[['hostgal_specz', 'hostgal_photoz']]
if exact:
redshift['redshift'] = redshift['hostgal_specz']
redshift.loc[redshift['redshift'].isnull(), 'redshift'] \
= redshift.loc[redshift['redshift'].isnull(), 'hostgal_photoz']
else:
redshift['redshift'] = redshift['hostgal_photoz']
all_data = pd.merge(all_data, redshift, 'left', 'object_id')
nonzero_redshift = all_data['redshift'] > 0
all_data.loc[nonzero_redshift, 'flux'] = all_data.loc[nonzero_redshift, 'flux'] \
* all_data.loc[nonzero_redshift, 'redshift']**2
# aggregate features
band_aggs = all_data.groupby(['object_id', 'passband'])['flux'].agg(['mean', 'std', 'max', 'min']).unstack(-1)
band_aggs.columns = [x + '_' + str(y) for x in band_aggs.columns.levels[0]
for y in band_aggs.columns.levels[1]]
all_data.sort_values(['object_id', 'passband', 'flux'], inplace=True)
# this way of calculating quantiles is faster than using the pandas quantile builtin on the groupby object
all_data['group_count'] = all_data.groupby(['object_id', 'passband']).cumcount()
all_data['group_size'] = all_data.groupby(['object_id', 'passband'])['flux'].transform('size')
q_list = [0.25, 0.75]
for q in q_list:
all_data['q_' + str(q)] = all_data.loc[
(all_data['group_size'] * q).astype(int) == all_data['group_count'], 'flux']
quantiles = all_data.groupby(['object_id', 'passband'])[['q_' + str(q) for q in q_list]].max().unstack(-1)
quantiles.columns = [str(x) + '_' + str(y) + '_quantile' for x in quantiles.columns.levels[0]
for y in quantiles.columns.levels[1]]
# max detected flux
max_detected = all_data.loc[all_data['detected'] == 1].groupby('object_id')['flux'].max().to_frame('max_detected')
def most_extreme(df_in, k, positive=True, suffix='', include_max=True, include_dur=True, include_interval=False):
# find the "most extreme" time for each object, and for each band, retrieve the k data points on either side
# k points before
df = df_in.copy()
df['object_passband_mean'] = df.groupby(['object_id', 'passband'])['flux'].transform('median')
if positive:
df['dist_from_mean'] = (df['flux'] - df['object_passband_mean'])
else:
df['dist_from_mean'] = -(df['flux'] - df['object_passband_mean'])
max_time = df.loc[df['detected'] == 1].groupby('object_id')['dist_from_mean'].idxmax().to_frame(
'max_ind')
max_time['mjd_max' + suffix] = df.loc[max_time['max_ind'].values, 'mjd'].values
df = pd.merge(df, max_time[['mjd_max' + suffix]], 'left', left_on=['object_id'], right_index=True)
df['time_after_mjd_max'] = df['mjd'] - df['mjd_max' + suffix]
df['time_before_mjd_max'] = -df['time_after_mjd_max']
# first k after event
df.sort_values(['object_id', 'passband', 'time_after_mjd_max'], inplace=True)
df['row_num_after'] = df.loc[df['time_after_mjd_max'] >= 0].groupby(
['object_id', 'passband']).cumcount()
first_k_after = df.loc[(df['row_num_after'] < k) & (df['time_after_mjd_max'] <= 50),
['object_id', 'passband', 'flux', 'row_num_after']]
first_k_after.set_index(['object_id', 'passband', 'row_num_after'], inplace=True)
first_k_after = first_k_after.unstack(level=-1).unstack(level=-1)
first_k_after.columns = [str(x) + '_' + str(y) + '_after' for x in first_k_after.columns.levels[1]
for y in first_k_after.columns.levels[2]]
extreme_data = first_k_after
time_bands = [[-50, -20], [-20, -10], [-10, 0], [0, 10], [10, 20], [20, 50], [50, 100], [100, 200], [200, 500]]
if include_interval:
interval_arr = []
for start, end in time_bands:
band_data = df.loc[(start <= df['time_after_mjd_max']) & (df['time_after_mjd_max'] <= end)]
interval_agg = band_data.groupby(['object_id', 'passband'])['flux'].mean().unstack(-1)
interval_agg.columns = ['{}_start_{}_end_{}'.format(c, start, end) for c in interval_agg.columns]
interval_arr.append(interval_agg)
interval_data = pd.concat(interval_arr, axis=1)
extreme_data = pd.concat([extreme_data, interval_data], axis=1)
if include_dur:
# detection duration in each passband after event
duration_after = df.loc[(df['time_after_mjd_max'] >= 0) & (df['detected'] == 0)] \
.groupby(['object_id', 'passband'])['time_after_mjd_max'].first().unstack(-1)
duration_after.columns = ['dur_after_' + str(c) for c in range(6)]
extreme_data = pd.concat([extreme_data, duration_after], axis=1)
# last k before event
df.sort_values(['object_id', 'passband', 'time_before_mjd_max'], inplace=True)
df['row_num_before'] = df.loc[df['time_before_mjd_max'] >= 0].groupby(
['object_id', 'passband']).cumcount()
first_k_before = df.loc[(df['row_num_before'] < k) & (df['time_after_mjd_max'] <= 50),
['object_id', 'passband', 'flux', 'row_num_before']]
first_k_before.set_index(['object_id', 'passband', 'row_num_before'], inplace=True)
first_k_before = first_k_before.unstack(level=-1).unstack(level=-1)
first_k_before.columns = [str(x) + '_' + str(y) + '_before' for x in first_k_before.columns.levels[1]
for y in first_k_before.columns.levels[2]]
extreme_data = pd.concat([extreme_data, first_k_before], axis=1)
if include_dur:
# detection duration in each passband before event
duration_before = df.loc[(df['time_before_mjd_max'] >= 0) & (df['detected'] == 0)] \
.groupby(['object_id', 'passband'])['time_before_mjd_max'].first().unstack(-1)
duration_before.columns = ['dur_before_' + str(c) for c in range(6)]
extreme_data = pd.concat([extreme_data, duration_before], axis=1)
if include_max:
# passband with maximum detected flux for each object
max_pb = df.loc[max_time['max_ind'].values].groupby('object_id')['passband'].max().to_frame(
'max_passband')
# time of max in each passband, relative to extreme max
band_max_ind = df.groupby(['object_id', 'passband'])['flux'].idxmax()
band_mjd_max = df.loc[band_max_ind.values].groupby(['object_id', 'passband'])['mjd'].max().unstack(-1)
cols = ['max_time_' + str(i) for i in range(6)]
band_mjd_max.columns = cols
band_mjd_max = pd.merge(band_mjd_max, max_time, 'left', 'object_id')
for c in cols:
band_mjd_max[c] -= band_mjd_max['mjd_max' + suffix]
band_mjd_max.drop(['mjd_max' + suffix, 'max_ind'], axis=1, inplace=True)
extreme_data = pd.concat([extreme_data, max_pb, band_mjd_max], axis=1)
extreme_data.columns = [c + suffix for c in extreme_data.columns]
return extreme_data
extreme_max = most_extreme(all_data, 1, positive=True, suffix='', include_max=True, include_dur=True,
include_interval=True)
extreme_min = most_extreme(all_data, 1, positive=False, suffix='_min', include_max=False, include_dur=True)
# add the feature mentioned here, attempts to identify periodicity:
# https://www.kaggle.com/c/PLAsTiCC-2018/discussion/69696#410538
time_between_detections = all_data.loc[all_data['detected'] == 1].groupby('object_id')['mjd'].agg(['max', 'min'])
time_between_detections['det_period'] = time_between_detections['max'] - time_between_detections['min']
# same feature but grouped by passband
time_between_detections_pb \
= all_data.loc[all_data['detected'] == 1].groupby(['object_id', 'passband'])['mjd'].agg(['max', 'min'])
time_between_detections_pb['det_period'] = time_between_detections_pb['max'] - time_between_detections_pb['min']
time_between_detections_pb = time_between_detections_pb['det_period'].unstack(-1)
time_between_detections_pb.columns = ['det_period_pb_' + str(i) for i in range(6)]
# similar feature based on high values
all_data['threshold'] = all_data.groupby(['object_id'])['flux'].transform('max') * 0.75
all_data['high'] = ((all_data['flux'] >= all_data['threshold']) & (all_data['detected'] == 1)).astype(int)
time_between_highs = all_data.loc[all_data['high'] == 1].groupby('object_id')['mjd'].agg(['max', 'min'])
time_between_highs['det_period_high'] = time_between_highs['max'] - time_between_highs['min']
# aggregate values of the features during the detection period
all_data = pd.merge(all_data, time_between_detections, 'left', 'object_id')
det_data = all_data.loc[(all_data['mjd'] >= all_data['min']) & (all_data['mjd'] <= all_data['max'])]
det_aggs = det_data.groupby(['object_id', 'passband'])['flux'].agg(['min', 'max', 'std', 'median'])
det_aggs['prop_detected'] = det_data.groupby(['object_id', 'passband'])['detected'].mean()
det_aggs = det_aggs.unstack(-1)
det_aggs.columns = [x + '_' + str(y) + '_det_period' for x in det_aggs.columns.levels[0]
for y in det_aggs.columns.levels[1]]
# time distribution of detections in each band
detection_time_dist \
= all_data.loc[all_data['detected'] == 1].groupby(['object_id', 'passband'])['mjd'].std().unstack(-1)
detection_time_dist.columns = ['time_dist_' + str(i) for i in range(6)]
detection_time_dist_all \
= all_data.loc[all_data['detected'] == 1].groupby(['object_id'])['mjd'].std().to_frame('time_dist')
# scale data and recalculate band aggs
all_data['abs_flux'] = all_data['flux'].abs()
all_data['flux'] = (all_data['flux']) / all_data.groupby('object_id')['abs_flux'].transform('max')
band_aggs_s = all_data.groupby(['object_id', 'passband'])['flux'].agg(['mean', 'std', 'max', 'min']).unstack(-1)
band_aggs_s.columns = [x + '_' + str(y) + '_scaled' for x in band_aggs_s.columns.levels[0]
for y in band_aggs_s.columns.levels[1]]
all_data.sort_values(['object_id', 'passband', 'flux'], inplace=True)
for q in q_list:
all_data['q_' + str(q)] = all_data.loc[
(all_data['group_size'] * q).astype(int) == all_data['group_count'], 'flux']
quantiles_s = all_data.groupby(['object_id', 'passband'])[['q_' + str(q) for q in q_list]].max().unstack(-1)
quantiles_s.columns = [str(x) + '_' + str(y) + '_quantile_s' for x in quantiles_s.columns.levels[0]
for y in quantiles_s.columns.levels[1]]
extreme_max_s = most_extreme(all_data, 1, positive=True, suffix='_s', include_max=False, include_dur=False,
include_interval=True)
extreme_min_s = most_extreme(all_data, 1, positive=False, suffix='_min_s', include_max=False, include_dur=False)
new_data = pd.concat([band_aggs, quantiles, band_aggs_s, max_detected, time_between_detections[['det_period']],
time_between_detections_pb, extreme_max, extreme_min, extreme_max_s, extreme_min_s,
time_between_highs[['det_period_high']], quantiles_s, detection_time_dist,
detection_time_dist_all, det_aggs], axis=1)
return new_data
# get the metadata
test_meta = pd.read_csv(os.path.join('data', 'test_set_metadata.csv'))
all_meta =
|
pd.concat([train_meta, test_meta], axis=0, ignore_index=True, sort=True)
|
pandas.concat
|
"""
How do citation counts differ across datasets, when a paper appears in more than one dataset?
And how many citation counts are zero, by dataset?
Our motivation is understanding how dataset choice (or aggregation choice) might affect results.
"""
from itertools import permutations
from pathlib import Path
import numpy as np
import pandas as pd
from analysis import write_latest
from bq import read_sql
from settings import PROJECT_ID, DATASET
CITATION_EXPORT_PATH = Path(__file__).parent / 'citation_counts.pkl.gz'
PREDICTION_EXPORT_PATH = Path(__file__).parent / 'predictions.pkl.gz'
DATASET_ABBR = ['mag', 'ds', 'wos']
def summarize_zero_citation_counts() -> None:
df = pd.read_gbq(read_sql('../analysis/zero_citation_counts_by_dataset.sql'), project_id=PROJECT_ID)
df['Pct'] = df.groupby('dataset', as_index=False).apply(lambda x: x['count'] / x['count'].sum()).reset_index(
drop=True)
df = df.pivot_table(index='has_zero_citations', columns='dataset')
write_latest(df, 'analysis/zero_citation_counts_by_dataset.csv', index=True)
def summarize_citation_count_differences() -> None:
if CITATION_EXPORT_PATH.exists():
df = pd.read_pickle(CITATION_EXPORT_PATH, compression='gzip')
else:
df = pd.read_gbq(read_sql('../analysis/citation_count_export.sql'), project_id=PROJECT_ID)
df.to_pickle(CITATION_EXPORT_PATH, compression='gzip')
# Join in SciBERT predictions
if PREDICTION_EXPORT_PATH.exists():
hits = pd.read_pickle(PREDICTION_EXPORT_PATH, compression='gzip')
else:
hits = pd.read_gbq(f'select cset_id from {DATASET}.comparison where arxiv_scibert_hit is true', project_id=PROJECT_ID)
hits.to_pickle(PREDICTION_EXPORT_PATH, compression='gzip')
hits['scibert_hit'] = True
assert not hits['cset_id'].duplicated().any()
assert not df['cset_id'].duplicated().any()
df = pd.merge(df, hits, on='cset_id', how='left')
df['scibert_hit'] = df['scibert_hit'].fillna(False)
for a, b in permutations(DATASET_ABBR, 2):
df[f'{a}_{b}_diff'] = df[f'{a}_times_cited'] - df[f'{b}_times_cited']
df[f'{a}_{b}_diff_pct'] = df[f'{a}_{b}_diff'] / df[f'{b}_times_cited']
percentiles = calculate_percentiles(df)
write_latest(percentiles, 'analysis/citation_count_pct_diff_quantiles.csv', index=True)
zero_diff_counts = count_zeroes(df)
write_latest(zero_diff_counts, 'analysis/citation_zero_diff_counts.csv', index=True)
ai_percentiles = calculate_percentiles(df.query('scibert_hit == True'))
write_latest(ai_percentiles, 'analysis/ai_citation_count_pct_diff_quantiles.csv', index=True)
ai_zero_diff_counts = count_zeroes(df.query('scibert_hit == True'))
write_latest(ai_zero_diff_counts, 'analysis/ai_citation_zero_diff_counts.csv', index=True)
ai_percent_greater = calculate_percent_greater(df.query('scibert_hit == True'))
write_latest(ai_percent_greater, 'analysis/ai_citation_percent_greater_counts.csv', index=False)
def iter_diff_pct_cols():
for a, b in permutations(DATASET_ABBR, 2):
yield f'{a}_{b}_diff_pct'
def calculate_percent_greater(df):
pct_greater = {}
for k in ['mag_ds_diff', 'mag_wos_diff', 'ds_wos_diff']:
# Drop null differences to restrict to papers for which we observe citation counts in both datasets
is_positive = df[k].dropna() > 0
counts = is_positive.value_counts()
pct_greater[k] = \
pd.DataFrame({'count_greater': counts, 'pct': counts / counts.sum(), 'total': counts.sum()}).loc[True]
pct_greater =
|
pd.concat(pct_greater)
|
pandas.concat
|
# set similarity join
from six import iteritems
import pandas as pd
import pyprind
from py_stringsimjoin.filter.position_filter import PositionFilter
from py_stringsimjoin.index.position_index import PositionIndex
from py_stringsimjoin.utils.generic_helper import convert_dataframe_to_array, \
find_output_attribute_indices, get_output_header_from_tables, \
get_output_row_from_tables, COMP_OP_MAP
from py_stringsimjoin.utils.simfunctions import get_sim_function
from py_stringsimjoin.utils.token_ordering import \
gen_token_ordering_for_tables, order_using_token_ordering
def set_sim_join(ltable, rtable,
l_columns, r_columns,
l_key_attr, r_key_attr,
l_join_attr, r_join_attr,
tokenizer, sim_measure_type, threshold, comp_op,
allow_empty,
l_out_attrs, r_out_attrs,
l_out_prefix, r_out_prefix,
out_sim_score, show_progress):
"""Perform set similarity join for a split of ltable and rtable"""
# find column indices of key attr, join attr and output attrs in ltable
l_key_attr_index = l_columns.index(l_key_attr)
l_join_attr_index = l_columns.index(l_join_attr)
l_out_attrs_indices = find_output_attribute_indices(l_columns, l_out_attrs)
# find column indices of key attr, join attr and output attrs in rtable
r_key_attr_index = r_columns.index(r_key_attr)
r_join_attr_index = r_columns.index(r_join_attr)
r_out_attrs_indices = find_output_attribute_indices(r_columns, r_out_attrs)
# generate token ordering using tokens in l_join_attr
# and r_join_attr
token_ordering = gen_token_ordering_for_tables(
[ltable, rtable],
[l_join_attr_index, r_join_attr_index],
tokenizer, sim_measure_type)
# Build position index on l_join_attr
position_index = PositionIndex(ltable, l_join_attr_index,
tokenizer, sim_measure_type,
threshold, token_ordering)
# While building the index, we cache the tokens and the empty records.
# We cache the tokens so that we need not tokenize each string in
# l_join_attr multiple times when we need to compute the similarity measure.
# Further we cache the empty record ids to handle the allow_empty flag.
cached_data = position_index.build(allow_empty, cache_tokens=True)
l_empty_records = cached_data['empty_records']
cached_l_tokens = cached_data['cached_tokens']
pos_filter = PositionFilter(tokenizer, sim_measure_type, threshold)
sim_fn = get_sim_function(sim_measure_type)
comp_fn = COMP_OP_MAP[comp_op]
output_rows = []
has_output_attributes = (l_out_attrs is not None or
r_out_attrs is not None)
if show_progress:
prog_bar = pyprind.ProgBar(len(rtable))
for r_row in rtable:
r_string = r_row[r_join_attr_index]
# order the tokens using the token ordering.
r_ordered_tokens = order_using_token_ordering(
tokenizer.tokenize(r_string), token_ordering)
# If allow_empty flag is set and the current rtable record has empty set
# of tokens in the join attribute, then generate output pairs joining
# the current rtable record with those records in ltable with empty set
# of tokens in the join attribute. These ltable record ids are cached in
# l_empty_records list which was constructed when building the position
# index.
if allow_empty and len(r_ordered_tokens) == 0:
for l_id in l_empty_records:
if has_output_attributes:
output_row = get_output_row_from_tables(
ltable[l_id], r_row,
l_key_attr_index, r_key_attr_index,
l_out_attrs_indices,
r_out_attrs_indices)
else:
output_row = [ltable[l_id][l_key_attr_index],
r_row[r_key_attr_index]]
if out_sim_score:
output_row.append(1.0)
output_rows.append(output_row)
continue
# obtain candidates by applying position filter.
candidate_overlap = pos_filter.find_candidates(r_ordered_tokens,
position_index)
for cand, overlap in iteritems(candidate_overlap):
if overlap > 0:
l_ordered_tokens = cached_l_tokens[cand]
# compute the actual similarity score
sim_score = round(sim_fn(l_ordered_tokens, r_ordered_tokens), 4)
if comp_fn(sim_score, threshold):
if has_output_attributes:
output_row = get_output_row_from_tables(
ltable[cand], r_row,
l_key_attr_index, r_key_attr_index,
l_out_attrs_indices,
r_out_attrs_indices)
else:
output_row = [ltable[cand][l_key_attr_index],
r_row[r_key_attr_index]]
# if out_sim_score flag is set, append the similarity score
# to the output record.
if out_sim_score:
output_row.append(sim_score)
output_rows.append(output_row)
if show_progress:
prog_bar.update()
output_header = get_output_header_from_tables(
l_key_attr, r_key_attr,
l_out_attrs, r_out_attrs,
l_out_prefix, r_out_prefix)
if out_sim_score:
output_header.append("_sim_score")
# generate a dataframe from the list of output rows
output_table =
|
pd.DataFrame(output_rows, columns=output_header)
|
pandas.DataFrame
|
import numpy as np
import pytest
from pandas import (
DataFrame,
Index,
Series,
)
import pandas._testing as tm
import pandas.core.common as com
class TestSample:
@pytest.fixture(params=[Series, DataFrame])
def obj(self, request):
klass = request.param
if klass is Series:
arr = np.random.randn(10)
else:
arr = np.random.randn(10, 10)
return klass(arr, dtype=None)
@pytest.mark.parametrize("test", list(range(10)))
def test_sample(self, test, obj):
# Fixes issue: 2419
# Check behavior of random_state argument
# Check for stability when receives seed or random state -- run 10
# times.
seed = np.random.randint(0, 100)
tm.assert_equal(
obj.sample(n=4, random_state=seed), obj.sample(n=4, random_state=seed)
)
tm.assert_equal(
obj.sample(frac=0.7, random_state=seed),
obj.sample(frac=0.7, random_state=seed),
)
tm.assert_equal(
obj.sample(n=4, random_state=np.random.RandomState(test)),
obj.sample(n=4, random_state=np.random.RandomState(test)),
)
tm.assert_equal(
obj.sample(frac=0.7, random_state=np.random.RandomState(test)),
obj.sample(frac=0.7, random_state=np.random.RandomState(test)),
)
tm.assert_equal(
obj.sample(frac=2, replace=True, random_state=np.random.RandomState(test)),
obj.sample(frac=2, replace=True, random_state=np.random.RandomState(test)),
)
os1, os2 = [], []
for _ in range(2):
np.random.seed(test)
os1.append(obj.sample(n=4))
os2.append(obj.sample(frac=0.7))
tm.assert_equal(*os1)
tm.assert_equal(*os2)
def test_sample_lengths(self, obj):
# Check lengths are right
assert len(obj.sample(n=4) == 4)
assert len(obj.sample(frac=0.34) == 3)
assert len(obj.sample(frac=0.36) == 4)
def test_sample_invalid_random_state(self, obj):
# Check for error when random_state argument invalid.
msg = (
"random_state must be an integer, array-like, a BitGenerator, Generator, "
"a numpy RandomState, or None"
)
with pytest.raises(ValueError, match=msg):
obj.sample(random_state="a_string")
def test_sample_wont_accept_n_and_frac(self, obj):
# Giving both frac and N throws error
msg = "Please enter a value for `frac` OR `n`, not both"
with pytest.raises(ValueError, match=msg):
obj.sample(n=3, frac=0.3)
def test_sample_requires_positive_n_frac(self, obj):
with pytest.raises(
ValueError,
match="A negative number of rows requested. Please provide `n` >= 0",
):
obj.sample(n=-3)
with pytest.raises(
ValueError,
match="A negative number of rows requested. Please provide `frac` >= 0",
):
obj.sample(frac=-0.3)
def test_sample_requires_integer_n(self, obj):
# Make sure float values of `n` give error
with pytest.raises(ValueError, match="Only integers accepted as `n` values"):
obj.sample(n=3.2)
def test_sample_invalid_weight_lengths(self, obj):
# Weight length must be right
msg = "Weights and axis to be sampled must be of same length"
with pytest.raises(ValueError, match=msg):
obj.sample(n=3, weights=[0, 1])
with pytest.raises(ValueError, match=msg):
bad_weights = [0.5] * 11
obj.sample(n=3, weights=bad_weights)
with pytest.raises(ValueError, match="Fewer non-zero entries in p than size"):
bad_weight_series = Series([0, 0, 0.2])
obj.sample(n=4, weights=bad_weight_series)
def test_sample_negative_weights(self, obj):
# Check won't accept negative weights
bad_weights = [-0.1] * 10
msg = "weight vector many not include negative values"
with pytest.raises(ValueError, match=msg):
obj.sample(n=3, weights=bad_weights)
def test_sample_inf_weights(self, obj):
# Check inf and -inf throw errors:
weights_with_inf = [0.1] * 10
weights_with_inf[0] = np.inf
msg = "weight vector may not include `inf` values"
with pytest.raises(ValueError, match=msg):
obj.sample(n=3, weights=weights_with_inf)
weights_with_ninf = [0.1] * 10
weights_with_ninf[0] = -np.inf
with pytest.raises(ValueError, match=msg):
obj.sample(n=3, weights=weights_with_ninf)
def test_sample_zero_weights(self, obj):
# All zeros raises errors
zero_weights = [0] * 10
with pytest.raises(ValueError, match="Invalid weights: weights sum to zero"):
obj.sample(n=3, weights=zero_weights)
def test_sample_missing_weights(self, obj):
# All missing weights
nan_weights = [np.nan] * 10
with pytest.raises(ValueError, match="Invalid weights: weights sum to zero"):
obj.sample(n=3, weights=nan_weights)
def test_sample_none_weights(self, obj):
# Check None are also replaced by zeros.
weights_with_None = [None] * 10
weights_with_None[5] = 0.5
tm.assert_equal(
obj.sample(n=1, axis=0, weights=weights_with_None), obj.iloc[5:6]
)
@pytest.mark.parametrize(
"func_str,arg",
[
("np.array", [2, 3, 1, 0]),
("np.random.MT19937", 3),
("np.random.PCG64", 11),
],
)
def test_sample_random_state(self, func_str, arg, frame_or_series):
# GH#32503
obj = DataFrame({"col1": range(10, 20), "col2": range(20, 30)})
if frame_or_series is Series:
obj = obj["col1"]
result = obj.sample(n=3, random_state=eval(func_str)(arg))
expected = obj.sample(n=3, random_state=com.random_state(eval(func_str)(arg)))
tm.assert_equal(result, expected)
def test_sample_generator(self, frame_or_series):
# GH#38100
obj = frame_or_series(np.arange(100))
rng = np.random.default_rng()
# Consecutive calls should advance the seed
result1 = obj.sample(n=50, random_state=rng)
result2 = obj.sample(n=50, random_state=rng)
assert not (result1.index.values == result2.index.values).all()
# Matching generator initialization must give same result
# Consecutive calls should advance the seed
result1 = obj.sample(n=50, random_state=np.random.default_rng(11))
result2 = obj.sample(n=50, random_state=np.random.default_rng(11))
tm.assert_equal(result1, result2)
def test_sample_upsampling_without_replacement(self, frame_or_series):
# GH#27451
obj = DataFrame({"A": list("abc")})
if frame_or_series is Series:
obj = obj["A"]
msg = (
"Replace has to be set to `True` when "
"upsampling the population `frac` > 1."
)
with pytest.raises(ValueError, match=msg):
obj.sample(frac=2, replace=False)
class TestSampleDataFrame:
# Tests which are relevant only for DataFrame, so these are
# as fully parametrized as they can get.
def test_sample(self):
# GH#2419
# additional specific object based tests
# A few dataframe test with degenerate weights.
easy_weight_list = [0] * 10
easy_weight_list[5] = 1
df = DataFrame(
{
"col1": range(10, 20),
"col2": range(20, 30),
"colString": ["a"] * 10,
"easyweights": easy_weight_list,
}
)
sample1 = df.sample(n=1, weights="easyweights")
tm.assert_frame_equal(sample1, df.iloc[5:6])
# Ensure proper error if string given as weight for Series or
# DataFrame with axis = 1.
ser = Series(range(10))
msg = "Strings cannot be passed as weights when sampling from a Series."
with pytest.raises(ValueError, match=msg):
ser.sample(n=3, weights="weight_column")
msg = (
"Strings can only be passed to weights when sampling from rows on a "
"DataFrame"
)
with pytest.raises(ValueError, match=msg):
df.sample(n=1, weights="weight_column", axis=1)
# Check weighting key error
with pytest.raises(
KeyError, match="'String passed to weights not a valid column'"
):
df.sample(n=3, weights="not_a_real_column_name")
# Check that re-normalizes weights that don't sum to one.
weights_less_than_1 = [0] * 10
weights_less_than_1[0] = 0.5
tm.assert_frame_equal(df.sample(n=1, weights=weights_less_than_1), df.iloc[:1])
###
# Test axis argument
###
# Test axis argument
df = DataFrame({"col1": range(10), "col2": ["a"] * 10})
second_column_weight = [0, 1]
tm.assert_frame_equal(
df.sample(n=1, axis=1, weights=second_column_weight), df[["col2"]]
)
# Different axis arg types
tm.assert_frame_equal(
df.sample(n=1, axis="columns", weights=second_column_weight), df[["col2"]]
)
weight = [0] * 10
weight[5] = 0.5
tm.assert_frame_equal(df.sample(n=1, axis="rows", weights=weight), df.iloc[5:6])
tm.assert_frame_equal(
df.sample(n=1, axis="index", weights=weight), df.iloc[5:6]
)
# Check out of range axis values
msg = "No axis named 2 for object type DataFrame"
with pytest.raises(ValueError, match=msg):
df.sample(n=1, axis=2)
msg = "No axis named not_a_name for object type DataFrame"
with pytest.raises(ValueError, match=msg):
df.sample(n=1, axis="not_a_name")
ser = Series(range(10))
with pytest.raises(ValueError, match="No axis named 1 for object type Series"):
ser.sample(n=1, axis=1)
# Test weight length compared to correct axis
msg = "Weights and axis to be sampled must be of same length"
with pytest.raises(ValueError, match=msg):
df.sample(n=1, axis=1, weights=[0.5] * 10)
def test_sample_axis1(self):
# Check weights with axis = 1
easy_weight_list = [0] * 3
easy_weight_list[2] = 1
df = DataFrame(
{"col1": range(10, 20), "col2": range(20, 30), "colString": ["a"] * 10}
)
sample1 = df.sample(n=1, axis=1, weights=easy_weight_list)
tm.assert_frame_equal(sample1, df[["colString"]])
# Test default axes
tm.assert_frame_equal(
df.sample(n=3, random_state=42), df.sample(n=3, axis=0, random_state=42)
)
def test_sample_aligns_weights_with_frame(self):
# Test that function aligns weights with frame
df = DataFrame({"col1": [5, 6, 7], "col2": ["a", "b", "c"]}, index=[9, 5, 3])
ser =
|
Series([1, 0, 0], index=[3, 5, 9])
|
pandas.Series
|
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from numpy import mean, var, median
from scipy import stats
from collections import Counter
def find_gene_index(gene_list,gene):
j = [i for i,x in enumerate(gene_list) if x == gene]
return j
def find_patients_index(patients, p):
j = [i for i,x in enumerate(patients) if x == p]
return j[0]
filename = "log_modified_LAML_TPM.csv"
filename2 = "patients.txt"
filename3 = "FAB.txt"
filename4 = "sex.txt"
filename5 = "age.txt"
filename6 = "BM_blasts.txt"
filename7 = "WBC.txt"
#filename = "modified_raw_counts.csv"
data = pd.read_csv(filename)
patients = pd.read_csv(filename2)
FAB = pd.read_csv(filename3)
sex = pd.read_csv(filename4)
age = pd.read_csv(filename5)
blasts = pd.read_csv(filename6)
WBC = pd.read_csv(filename7)
gene = 'HOXA9'
gene_list = data['Hybridization REF']
# find the index of HOXA9 in the data
i_HOXA9 = find_gene_index(gene_list, gene)
HOXA9_exp = data.iloc[i_HOXA9,2:]
peak1_indexes = [i+2 for i,x in enumerate(HOXA9_exp.values[0]) if x <= 1 and x >= 0.005] # +1 due to the first gene columns we removed +1 due to index shift
peak2_indexes = [i+2 for i,x in enumerate(HOXA9_exp.values[0]) if x <= 5.5 and x >= 4]
peak1_patients = data.iloc[:,peak1_indexes]
peak2_patients = data.iloc[:,peak2_indexes]
print(len(peak1_indexes))
print(len(peak2_indexes))
peak1_patients.index = gene_list
peak2_patients.index = gene_list
# only keep the patient number
low_patients_names = [item.split('-')[2] for item in peak1_patients.columns]
high_patients_names = [item.split('-')[2] for item in peak2_patients.columns]
index_cohort1 =[find_patients_index(patients['patients'],int(item)) for item in low_patients_names]
index_cohort2 =[find_patients_index(patients['patients'],int(item)) for item in high_patients_names]
sex_list_cohort1 = sex['sex'][index_cohort1].values.tolist()
sex_list_cohort2 = sex['sex'][index_cohort2].values.tolist()
age_list_cohort1 = age['age'][index_cohort1].values.tolist()
age_list_cohort2 = age['age'][index_cohort2].values.tolist()
FAB_list_cohort1 = FAB['FAB'][index_cohort1].values.tolist()
FAB_list_cohort2 = FAB['FAB'][index_cohort2].values.tolist()
#age_class_low = ['old' if x > median(age_list_cohort1) else 'young' for x in age_list_cohort1]
#age_class_high = ['old' if x > median(age_list_cohort2) else 'young' for x in age_list_cohort2]
print("Age\n")
print(stats.shapiro(age_list_cohort1)) # normal distribution (p>0.05)
print(stats.shapiro(age_list_cohort2)) # NOT
print(stats.levene(age_list_cohort1,age_list_cohort2)) # SAME VARIANCE
print(stats.ttest_ind(age_list_cohort1,age_list_cohort2)) # CANT APPLY T TEST
# multiply pvalue by 2 to have two sided test
print(stats.mannwhitneyu(age_list_cohort1,age_list_cohort2)) # significant
WBC_list_cohort1 = WBC['WBC'][index_cohort1].values.tolist()
WBC_list_cohort2 = WBC['WBC'][index_cohort2].values.tolist()
print("WBC\n")
print(stats.shapiro(WBC_list_cohort1)) # NOT
print(stats.shapiro(WBC_list_cohort2)) # NOT
print(stats.levene(WBC_list_cohort1,WBC_list_cohort2)) # NOT SAME VARIANCE
print(stats.ttest_ind(WBC_list_cohort1,WBC_list_cohort2)) # CANT APPLY T TEST
print(stats.mannwhitneyu(WBC_list_cohort1,WBC_list_cohort2)) # significant
blasts_list_cohort1 = blasts['blasts'][index_cohort1].values.tolist()
blasts_list_cohort2 = blasts['blasts'][index_cohort2].values.tolist()
print("blasts")
print(stats.shapiro(blasts_list_cohort1)) # maybe
print(stats.shapiro(blasts_list_cohort2)) # NOT
print(stats.levene(blasts_list_cohort1,blasts_list_cohort2)) # SAME VARIANCE
print(stats.ttest_ind(blasts_list_cohort1,blasts_list_cohort2)) # CANT APPLY T TEST
print(stats.mannwhitneyu(blasts_list_cohort1,blasts_list_cohort2)) # pvalue x 2 = 0.05 - significant?
d_low = {'age': age_list_cohort1, 'FAB': FAB_list_cohort1, 'blast': blasts_list_cohort1, 'WBC': WBC_list_cohort1, 'HOXA9': ['Low']*len(age_list_cohort1)}
low = pd.DataFrame(data=d_low)
d_high = {'age': age_list_cohort2, 'FAB': FAB_list_cohort2, 'blast': blasts_list_cohort2, 'WBC': WBC_list_cohort2, 'HOXA9': ['High']*len(age_list_cohort2)}
high =
|
pd.DataFrame(data=d_high)
|
pandas.DataFrame
|
import unittest
import pandas as pd
import numpy as np
import datetime
import pytz
from variable_explorer_helpers import describe_pd_dataframe
class TestDataframeDescribe(unittest.TestCase):
def test_dataframe(self):
df = pd.DataFrame(data={'col1': [1, 2], 'col2': [3, 4]})
result = describe_pd_dataframe(df)
self.assertEqual(result['row_count'], 2)
self.assertEqual(result['column_count'], 2)
self.assertEqual(len(result['rows_top']), 2)
self.assertEqual(result['rows_bottom'], None)
self.assertEqual(result['columns'][0]['name'], 'col1')
def test_dataframe_sort(self):
df =
|
pd.DataFrame(data={'col1': [3, 1, 2]})
|
pandas.DataFrame
|
# %%
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
import pandas as pd
import sys
from allensdk.api.queries.image_download_api import ImageDownloadApi
from allensdk.config.manifest import Manifest
import logging
import os
from tqdm import tqdm
# %%
def get_gene_by_id(results_df, ExperimentID):
gene_name = results_df["Gene Symbol"][
results_df["ExperimentID"] == ExperimentID
].iloc[0]
print(
"You are requesting for downloading brain lices of "
+ gene_name
+ " ("
+ ExperimentID
+ ")",
file=sys.stderr,
flush=True,
)
print(
'The downloaded brain lices will be placed in the dir "' + gene_name + '".',
file=sys.stderr,
flush=True,
)
return gene_name
# %%
def search_by_keywords(keywords, outfile):
# create a browser
driver = webdriver.Chrome()
# create a result DataFrame to store results
result = pd.DataFrame()
# the index of necessary columns in the table
column_index = [1, 2, 3, 6]
for ii, keyword in enumerate(keywords):
url = "https://mouse.brain-map.org/search/show?search_term=" + keyword
driver.get(url)
# make sure the page is correcly loaded using explict wait
try:
element = WebDriverWait(driver, 10).until(
EC.presence_of_element_located((By.CLASS_NAME, "slick-column-name"))
)
except:
print(
"An exception occurred: an element could not be found.\nThe Internet speed may be too slow."
)
driver.quit()
exit()
# get header at the first loop
# if ii == 0:
# use selenium to find the header
elements = driver.find_elements_by_class_name("slick-column-name")
header = []
for element in elements:
header.append(element.text)
if len(header) == 8:
header = [header[i] for i in column_index]
else:
raise Exception("Something went wrong when accessing the header.")
# user selenium to find the search results in the cells of the table
elements = driver.find_elements_by_tag_name("div[row]")
rows = []
for element in elements:
if element.text:
rows.append([element.text.split("\n")[i - 1] for i in column_index])
# If the search result is present, make it a dataframe
if rows:
table =
|
pd.DataFrame(rows, columns=header)
|
pandas.DataFrame
|
# Copyright 2013-2021 The Salish Sea MEOPAR contributors
# and The University of British Columbia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A collection of Python functions to produce model residual calculations and
visualizations.
"""
import datetime
import io
import arrow
import matplotlib.dates as mdates
import matplotlib.pyplot as plt
import netCDF4 as nc
import numpy as np
import pandas as pd
import pytz
import requests
from dateutil import tz
from salishsea_tools import geo_tools, stormtools, tidetools, nc_tools
from nowcast import analyze
from nowcast.figures import shared
# Module constants
paths = {
"nowcast": "/results/SalishSea/nowcast/",
"forecast": "/results/SalishSea/forecast/",
"forecast2": "/results/SalishSea/forecast2/",
"tides": "/data/nsoontie/MEOPAR/tools/SalishSeaNowcast/tidal_predictions/",
}
colours = {
"nowcast": "DodgerBlue",
"forecast": "ForestGreen",
"forecast2": "MediumVioletRed",
"observed": "Indigo",
"predicted": "ForestGreen",
"model": "blue",
"residual": "DimGray",
}
SITES = {
# Constant with station information: mean sea level, latitude,
# longitude, station number, historical extreme ssh, etc.
# Extreme ssh from DFO website
# Mean sea level from CHS tidal constiuents.
# VENUS coordinates from the VENUS website. Depth is in meters.
"Nanaimo": {"lat": 49.16, "lon": -123.93, "msl": 3.08, "extreme_ssh": 5.47},
"Halibut Bank": {"lat": 49.34, "lon": -123.72},
"Dungeness": {"lat": 48.15, "lon": -123.117},
"La Perouse Bank": {"lat": 48.83, "lon": -126.0},
"<NAME>": {
"lat": 49.33,
"lon": -123.25,
"msl": 3.09,
"stn_no": 7795,
"extreme_ssh": 5.61,
},
"Victoria": {
"lat": 48.41,
"lon": -123.36,
"msl": 1.8810,
"stn_no": 7120,
"extreme_ssh": 3.76,
},
"<NAME>": {
"lat": 50.04,
"lon": -125.24,
"msl": 2.916,
"stn_no": 8074,
"extreme_ssh": 5.35,
},
"<NAME>": {"lat": 48.4, "lon": -124.6, "stn_no": 9443090},
"<NAME>": {"lat": 48.55, "lon": -123.016667, "stn_no": 9449880},
"<NAME>": {
"lat": 48.866667,
"lon": -122.766667,
"stn_no": 9449424,
"msl": 3.543,
"extreme_ssh": 5.846,
},
"SandHeads": {"lat": 49.10, "lon": -123.30},
"Tofino": {"lat": 49.15, "lon": -125.91, "stn_no": 8615},
"Bamfield": {"lat": 48.84, "lon": -125.14, "stn_no": 8545},
"VENUS": {
"East": {"lat": 49.0419, "lon": -123.3176, "depth": 170},
"Central": {"lat": 49.0401, "lon": -123.4261, "depth": 300},
},
}
# Module functions
def plot_residual_forcing(ax, runs_list, t_orig):
"""Plots the observed water level residual at Neah Bay against
forced residuals from existing ssh*.txt files for Neah Bay.
Function may produce none, any, or all (nowcast, forecast, forecast 2)
forced residuals depending on availability for specified date (runs_list).
:arg ax: The axis where the residuals are plotted.
:type ax: axis object
:arg runs_list: Runs that are verified as complete.
:type runs_list: list
:arg t_orig: Date being considered.
:type t_orig: datetime object
"""
# truncation times
sdt = t_orig.replace(tzinfo=tz.tzutc())
edt = sdt + datetime.timedelta(days=1)
# retrieve observations, tides and residual
tides = shared.get_tides("Neah Bay", path=paths["tides"])
res_obs, obs = obs_residual_ssh_NOAA("Neah Bay", tides, sdt, sdt)
# truncate and plot
res_obs_trun, time_trun = analyze.truncate_data(
np.array(res_obs), np.array(obs.time), sdt, edt
)
ax.plot(time_trun, res_obs_trun, colours["observed"], label="observed", lw=2.5)
# plot forcing for each simulation
for mode in runs_list:
filename_NB, run_date = analyze.create_path(mode, t_orig, "ssh*.txt")
if filename_NB:
dates, surge, fflag = NeahBay_forcing_anom(
filename_NB, run_date, paths["tides"]
)
surge_t, dates_t = analyze.truncate_data(
np.array(surge), np.array(dates), sdt, edt
)
ax.plot(dates_t, surge_t, label=mode, lw=2.5, color=colours[mode])
ax.set_title(
"Comparison of observed and forced sea surface"
" height residuals at Neah Bay:"
"{t_forcing:%d-%b-%Y}".format(t_forcing=t_orig)
)
def plot_residual_model(axs, names, runs_list, grid_B, t_orig):
"""Plots the observed sea surface height residual against the
sea surface height model residual (calculate_residual) at
specified stations. Function may produce none, any, or all
(nowcast, forecast, forecast 2) model residuals depending on
availability for specified date (runs_list).
:arg ax: The axis where the residuals are plotted.
:type ax: list of axes
:arg names: Names of station.
:type names: list of names
:arg runs_list: Runs that have been verified as complete.
:type runs_list: list
:arg grid_B: Bathymetry dataset for the Salish Sea NEMO model.
:type grid_B: :class:`netCDF4.Dataset`
:arg t_orig: Date being considered.
:type t_orig: datetime object
"""
bathy, X, Y = tidetools.get_bathy_data(grid_B)
t_orig_obs = t_orig + datetime.timedelta(days=-1)
t_final_obs = t_orig + datetime.timedelta(days=1)
# truncation times
sdt = t_orig.replace(tzinfo=tz.tzutc())
edt = sdt + datetime.timedelta(days=1)
for ax, name in zip(axs, names):
# Identify model grid point
lat = SITES[name]["lat"]
lon = SITES[name]["lon"]
j, i = geo_tools.find_closest_model_point(lon, lat, X, Y, land_mask=bathy.mask)
# Observed residuals and wlevs and tides
ttide = shared.get_tides(name, path=paths["tides"])
res_obs, wlev_meas = obs_residual_ssh(name, ttide, t_orig_obs, t_final_obs)
# truncate and plot
res_obs_trun, time_obs_trun = analyze.truncate_data(
np.array(res_obs), np.array(wlev_meas.time), sdt, edt
)
ax.plot(
time_obs_trun, res_obs_trun, c=colours["observed"], lw=2.5, label="observed"
)
for mode in runs_list:
filename, run_date = analyze.create_path(
mode, t_orig, "SalishSea_1h_*_grid_T.nc"
)
grid_T = nc.Dataset(filename)
res_mod, t_model, ssh_corr, ssh_mod = model_residual_ssh(
grid_T, j, i, ttide
)
# truncate and plot
res_mod_trun, t_mod_trun = analyze.truncate_data(res_mod, t_model, sdt, edt)
ax.plot(t_mod_trun, res_mod_trun, label=mode, c=colours[mode], lw=2.5)
ax.set_title(
"Comparison of modelled sea surface height residuals at"
" {station}: {t:%d-%b-%Y}".format(station=name, t=t_orig)
)
def get_error_model(names, runs_list, grid_B, t_orig):
"""Sets up the calculation for the model residual error.
:arg names: Names of station.
:type names: list of strings
:arg runs_list: Runs that have been verified as complete.
:type runs_list: list
:arg grid_B: Bathymetry dataset for the Salish Sea NEMO model.
:type grid_B: :class:`netCDF4.Dataset`
:arg t_orig: Date being considered.
:type t_orig: datetime object
:returns: error_mod_dict, t_mod_dict, t_orig_dict
"""
bathy, X, Y = tidetools.get_bathy_data(grid_B)
t_orig_obs = t_orig + datetime.timedelta(days=-1)
t_final_obs = t_orig + datetime.timedelta(days=1)
# truncation times
sdt = t_orig.replace(tzinfo=tz.tzutc())
edt = sdt + datetime.timedelta(days=1)
error_mod_dict = {}
t_mod_dict = {}
for name in names:
error_mod_dict[name] = {}
t_mod_dict[name] = {}
# Look up model grid
lat = SITES[name]["lat"]
lon = SITES[name]["lon"]
j, i = geo_tools.find_closest_model_point(lon, lat, X, Y, land_mask=bathy.mask)
# Observed residuals and wlevs and tides
ttide = shared.get_tides(name, path=paths["tides"])
res_obs, wlev_meas = obs_residual_ssh(name, ttide, t_orig_obs, t_final_obs)
res_obs_trun, time_obs_trun = analyze.truncate_data(
np.array(res_obs), np.array(wlev_meas.time), sdt, edt
)
for mode in runs_list:
filename, run_date = analyze.create_path(
mode, t_orig, "SalishSea_1h_*_grid_T.nc"
)
grid_T = nc.Dataset(filename)
res_mod, t_model, ssh_corr, ssh_mod = model_residual_ssh(
grid_T, j, i, ttide
)
# Truncate
res_mod_trun, t_mod_trun = analyze.truncate_data(res_mod, t_model, sdt, edt)
# Error
error_mod = analyze.calculate_error(
res_mod_trun, t_mod_trun, res_obs_trun, time_obs_trun
)
error_mod_dict[name][mode] = error_mod
t_mod_dict[name][mode] = t_mod_trun
return error_mod_dict, t_mod_dict
def get_error_forcing(runs_list, t_orig):
"""Sets up the calculation for the forcing residual error.
:arg runs_list: Runs that have been verified as complete.
:type runs_list: list
:arg t_orig: Date being considered.
:type t_orig: datetime object
:returns: error_frc_dict, t_frc_dict
"""
# truncation times
sdt = t_orig.replace(tzinfo=tz.tzutc())
edt = sdt + datetime.timedelta(days=1)
# retrieve observed residual
tides = shared.get_tides("Neah Bay", path=paths["tides"])
res_obs, obs = obs_residual_ssh_NOAA("Neah Bay", tides, sdt, sdt)
res_obs_trun, time_trun = analyze.truncate_data(
np.array(res_obs), np.array(obs.time), sdt, edt
)
# calculate forcing error
error_frc_dict = {}
t_frc_dict = {}
for mode in runs_list:
filename_NB, run_date = analyze.create_path(mode, t_orig, "ssh*.txt")
if filename_NB:
dates, surge, fflag = NeahBay_forcing_anom(
filename_NB, run_date, paths["tides"]
)
surge_t, dates_t = analyze.truncate_data(
np.array(surge), np.array(dates), sdt, edt
)
error_frc = analyze.calculate_error(
surge_t, dates_t, res_obs_trun, obs.time
)
error_frc_dict[mode] = error_frc
t_frc_dict[mode] = dates_t
return error_frc_dict, t_frc_dict
def plot_error_model(axs, names, runs_list, grid_B, t_orig):
"""Plots the model residual error.
:arg axs: The axis where the residual errors are plotted.
:type axs: list of axes
:arg names: Names of station.
:type names: list of strings
:arg runs_list: Runs that have been verified as complete.
:type runs_list: list of strings
:arg grid_B: Bathymetry dataset for the Salish Sea NEMO model.
:type grid_B: :class:`netCDF4.Dataset`
:arg t_orig: Date being considered.
:type t_orig: datetime object
"""
error_mod_dict, t_mod_dict = get_error_model(names, runs_list, grid_B, t_orig)
for ax, name in zip(axs, names):
ax.set_title(
"Comparison of modelled residual errors at {station}:"
" {t:%d-%b-%Y}".format(station=name, t=t_orig)
)
for mode in runs_list:
ax.plot(
t_mod_dict[name][mode],
error_mod_dict[name][mode],
label=mode,
c=colours[mode],
lw=2.5,
)
def plot_error_forcing(ax, runs_list, t_orig):
"""Plots the forcing residual error.
:arg ax: The axis where the residual errors are plotted.
:type ax: axis object
:arg runs_list: Runs that have been verified as complete.
:type runs_list: list
:arg t_orig: Date being considered.
:type t_orig: datetime object
"""
error_frc_dict, t_frc_dict = get_error_forcing(runs_list, t_orig)
for mode in runs_list:
ax.plot(
t_frc_dict[mode], error_frc_dict[mode], label=mode, c=colours[mode], lw=2.5
)
ax.set_title(
"Comparison of observed and forced residual errors at "
"Neah Bay: {t_forcing:%d-%b-%Y}".format(t_forcing=t_orig)
)
def plot_residual_error_all(subject, grid_B, t_orig, figsize=(20, 16)):
"""Sets up and combines the plots produced by plot_residual_forcing
and plot_residual_model or plot_error_forcing and plot_error_model.
This function specifies the stations for which the nested functions
apply. Figure formatting except x-axis limits and titles are included.
:arg subject: Subject of figure, either 'residual' or 'error'.
:type subject: string
:arg grid_B: Bathymetry dataset for the Salish Sea NEMO model.
:type grid_B: :class:`netCDF4.Dataset`
:arg t_orig: Date being considered.
:type t_orig: datetime object
:arg figsize: Figure size (width, height) in inches.
:type figsize: 2-tuple
:returns: fig
"""
# set up axis limits - based on full 24 hour period 0000 to 2400
sax = t_orig
eax = t_orig + datetime.timedelta(days=1)
runs_list = analyze.verified_runs(t_orig)
fig, axes = plt.subplots(4, 1, figsize=figsize)
axs_mod = [axes[1], axes[2], axes[3]]
names = ["<NAME>", "Victoria", "<NAME>"]
if subject == "residual":
plot_residual_forcing(axes[0], runs_list, t_orig)
plot_residual_model(axs_mod, names, runs_list, grid_B, t_orig)
elif subject == "error":
plot_error_forcing(axes[0], runs_list, t_orig)
plot_error_model(axs_mod, names, runs_list, grid_B, t_orig)
for ax in axes:
ax.set_ylim([-0.4, 0.4])
ax.set_xlabel("[hrs UTC]")
ax.set_ylabel("[m]")
hfmt = mdates.DateFormatter("%m/%d %H:%M")
ax.xaxis.set_major_formatter(hfmt)
ax.legend(loc=2, ncol=4)
ax.grid()
ax.set_xlim([sax, eax])
return fig
def combine_errors(name, mode, dates, grid_B):
"""Combine model and forcing errors for a simulaion mode over several days.
returns time series of both model and forcing error and daily means.
Treats each simulation over 24 hours.
:arg name: name of station for model calculation
:type name: string, example 'Point Atkinson', 'Victoria'
:arg mode: simulation mode: nowcast, forecast, or forecast2
:type mode: string
:arg dates: list of dates to combine
:type dates: list of datetime objects
:arg grid_B: Bathymetry dataset for the Salish Sea NEMO model.
:type grid_B: :class:`netCDF4.Dataset
:returns: force, model, time, daily_time.
model and force are dictionaries with keys 'error' and 'daily'.
Each key corresponds to array of error time series and daily means.
time is an array of times correspinding to error caclulations
daily_time is an array of timea corresponding to daily means
"""
model = {"error": np.array([]), "daily": np.array([])}
force = {"error": np.array([]), "daily": np.array([])}
time = np.array([])
daily_time = np.array([])
for t_sim in dates:
# check if the run happened
if mode in analyze.verified_runs(t_sim):
# retrieve forcing and model error
e_frc_tmp, t_frc_tmp = get_error_forcing([mode], t_sim)
e_mod_tmp, t_mod_tmp = get_error_model([name], [mode], grid_B, t_sim)
e_frc_tmp = shared.interp_to_model_time(
t_mod_tmp[name][mode], e_frc_tmp[mode], t_frc_tmp[mode]
)
# append to larger array
force["error"] = np.append(force["error"], e_frc_tmp)
model["error"] = np.append(model["error"], e_mod_tmp[name][mode])
time = np.append(time, t_mod_tmp[name][mode])
# append daily mean error
force["daily"] = np.append(force["daily"], np.nanmean(e_frc_tmp))
model["daily"] = np.append(
model["daily"], np.nanmean(e_mod_tmp[name][mode])
)
daily_time = np.append(daily_time, t_sim + datetime.timedelta(hours=12))
else:
print("{} simulation for {} did not occur".format(mode, t_sim))
return force, model, time, daily_time
def compare_errors(name, mode, start, end, grid_B, figsize=(20, 12)):
"""compares the model and forcing error at a station
between dates start and end for a simulation mode."""
# array of dates for iteration
numdays = (end - start).days
dates = [start + datetime.timedelta(days=num) for num in range(0, numdays + 1)]
dates.sort()
# intiialize figure and arrays
fig, axs = plt.subplots(3, 1, figsize=figsize)
force, model, time, daily_time = combine_errors(name, mode, dates, grid_B)
ttide = shared.get_tides(name, path=paths["tides"])
# Plotting time series
ax = axs[0]
ax.plot(time, force["error"], "b", label="Forcing error", lw=2)
ax.plot(time, model["error"], "g", lw=2, label="Model error")
ax.set_title("Comparison of {mode} error at" " {name}".format(mode=mode, name=name))
ax.set_ylim([-0.4, 0.4])
hfmt = mdates.DateFormatter("%m/%d %H:%M")
# Plotting daily mean
ax = axs[1]
ax.plot(daily_time, force["daily"], "b", label="Forcing daily mean error", lw=2)
ax.plot(
[time[0], time[-1]],
[np.nanmean(force["error"]), np.nanmean(force["error"])],
"--b",
label="Mean forcing error",
lw=2,
)
ax.plot(daily_time, model["daily"], "g", lw=2, label="Model daily mean error")
ax.plot(
[time[0], time[-1]],
[np.nanmean(model["error"]), np.nanmean(model["error"])],
"--g",
label="Mean model error",
lw=2,
)
ax.set_title(
"Comparison of {mode} daily mean error at"
" {name}".format(mode=mode, name=name)
)
ax.set_ylim([-0.4, 0.4])
# Plot tides
ax = axs[2]
ax.plot(ttide.time, ttide.pred_all, "k", lw=2, label="tides")
ax.set_title("Tidal predictions")
ax.set_ylim([-3, 3])
# format axes
hfmt = mdates.DateFormatter("%m/%d %H:%M")
for ax in axs:
ax.xaxis.set_major_formatter(hfmt)
ax.legend(loc=2, ncol=4)
ax.grid()
ax.set_xlim([start, end + datetime.timedelta(days=1)])
ax.set_ylabel("[m]")
return fig
def model_residual_ssh(grid_T, j, i, tides):
"""Calcuates the model residual at coordinate j, i.
:arg grid_T: hourly model results file
:type grid_T: netCDF file
:arg j: model y-index
:type j: integer 0<=j<898
:arg i: model i-index
:type i: integer 0<=i<398
:arg tides: tidal predictions at grid point
:type tides: pandas DataFrame
:returns: res_mod, t_model, ssh_corr, ssh_mod
The model residual, model times, model corrected ssh, and
unmodified model ssh"""
ssh_mod = grid_T.variables["sossheig"][:, j, i]
t_s, t_f, t_model = get_model_time_variables(grid_T)
ssh_corr = shared.correct_model_ssh(ssh_mod, t_model, tides)
res_mod = compute_residual(ssh_corr, t_model, tides)
return res_mod, t_model, ssh_corr, ssh_mod
def obs_residual_ssh(name, tides, sdt, edt):
"""Calculates the observed residual at Point Atkinson, Campbell River,
or Victoria.
:arg name: Name of station.
:type name: string
:arg sdt: The beginning of the date range of interest.
:type sdt: datetime object
:arg edt: The end of the date range of interest.
:type edt: datetime object
:returns: residual (calculated residual), obs (observed water levels),
tides (predicted tides)"""
msl = SITES[name]["msl"]
obs = load_archived_observations(
name, sdt.strftime("%d-%b-%Y"), edt.strftime("%d-%b-%Y")
)
residual = compute_residual(obs.wlev - msl, obs.time, tides)
return residual, obs
def obs_residual_ssh_NOAA(name, tides, sdt, edt, product="hourly_height"):
"""Calculates the residual of the observed water levels with respect
to the predicted tides at a specific NOAA station and for a date range.
:arg name: Name of station.
:type name: string
:arg sdt: The beginning of the date range of interest.
:type sdt: datetime object
:arg edt: The end of the date range of interest.
:type edt: datetime object
:arg product: defines frequency of observed water levels
'hourly_height' for hourly or 'water_levels' for 6 min
:type product: string
:returns: residual (calculated residual), obs (observed water levels),
tides (predicted tides)
"""
sites = SITES
start_date = sdt.strftime("%d-%b-%Y")
end_date = edt.strftime("%d-%b-%Y")
obs = get_NOAA_wlevels(sites[name]["stn_no"], start_date, end_date, product=product)
# Prepare to find residual
residual = compute_residual(obs.wlev, obs.time, tides)
return residual, obs
def plot_wlev_residual_NOAA(t_orig, elements, figsize=(20, 6)):
"""Plots the water level residual as calculated by the function
calculate_wlev_residual_NOAA and has the option to also plot the
observed water levels and predicted tides over the course of one day.
:arg t_orig: The beginning of the date range of interest.
:type t_orig: datetime object
:arg elements: Elements included in figure.
'residual' for residual only and 'all' for residual,
observed water level, and predicted tides.
:type elements: string
:arg figsize: Figure size (width, height) in inches.
:type figsize: 2-tuple
:returns: fig
"""
tides = shared.get_tides("Neah Bay", path=paths["tides"])
residual, obs = obs_residual_ssh_NOAA("Neah Bay", tides, t_orig, t_orig)
# Figure
fig, ax = plt.subplots(1, 1, figsize=figsize)
# Plot
ax.plot(
obs.time,
residual,
colours["residual"],
label="Observed Residual",
linewidth=2.5,
)
if elements == "all":
ax.plot(
obs.time,
obs.wlev,
colours["observed"],
label="Observed Water Level",
lw=2.5,
)
ax.plot(
tides.time,
tides.pred[tides.time == obs.time],
colours["predicted"],
label="Tidal Predictions",
linewidth=2.5,
)
if elements == "residual":
pass
ax.set_title(
"Residual of the observed water levels at"
" Neah Bay: {t:%d-%b-%Y}".format(t=t_orig)
)
ax.set_ylim([-3.0, 3.0])
ax.set_xlabel("[hrs]")
hfmt = mdates.DateFormatter("%m/%d %H:%M")
ax.xaxis.set_major_formatter(hfmt)
ax.legend(loc=2, ncol=3)
ax.grid()
return fig
def NeahBay_forcing_anom(textfile, run_date, tide_file, archive=False, fromtar=False):
"""Calculate the Neah Bay forcing anomaly for the data stored in textfile.
:arg textfile: the textfile containing forecast/observations
:type textfile: string
:arg run_date: date of the simulation
:type run_date: datetime object
:arg tide_file: path and name for the tide file
:type tide_file: string
:returns: dates, surge, forecast_flag
The dates, surges and a flag specifying if each point was a forecast
"""
if fromtar:
data = pd.read_csv(textfile, parse_dates=[0], index_col=0).rename(
columns={" OB": "obs", " TWL": "fcst"}
)
data["Date"] = pd.to_datetime(data.index)
# clean up 00:00 obs
datesat00 = data.resample("1d").nearest().index.array
data["offset"] = data.obs - data[" TIDE"]
data.loc[data.Date.isin(datesat00), "obs"] = (
data[(data.Date.shift(1).isin(datesat00))].offset.values
+ data[data.Date.isin(datesat00)][" TIDE"].values
)
data.loc[data.obs > 9000, "obs"] = 9999
data = data.resample("1h").nearest()
data["Date"] = pd.to_datetime(data.index, utc=True)
dates = data.Date.array
else:
data = _load_surge_data(textfile, archive)
if archive:
data.Date =
|
pd.to_datetime(data["Date"] + " " + data["Time"], utc=True)
|
pandas.to_datetime
|
import logging
from abc import ABC, abstractmethod
import numpy as np
import pandas as pd
from hdrbp._util import (
basic_repr,
basic_str,
compute_correlation,
compute_diversification_ratio,
compute_drawdowns,
compute_gini,
compute_prices,
compute_risk_contributions,
compute_turnover,
compute_variance,
count_dates_per_year,
count_years,
)
logger = logging.getLogger(__name__)
@basic_str
@basic_repr
class MetricCalculator(ABC):
@property
def name(self):
return repr(self)
@abstractmethod
def calculate(self, result: pd.DataFrame) -> float:
pass
class GeometricMeanReturn(MetricCalculator):
def __init__(self, annualized: bool = False) -> None:
self._annualized = annualized
def calculate(self, result: pd.DataFrame) -> float:
logger.debug(f"{self}: Calculating metric")
result = _filter_valid_returns(result)
returns = result["return"].values
log_returns = np.log1p(returns)
mean_log_return = np.mean(log_returns)
if self._annualized:
dates = pd.to_datetime(result["date"].values)
dates_per_year = count_dates_per_year(dates)
mean_log_return = dates_per_year * mean_log_return
geometric_mean_return = np.expm1(mean_log_return)
return geometric_mean_return
class MeanReturn(MetricCalculator):
def __init__(self, annualized: bool = False) -> None:
self._annualized = annualized
def calculate(self, result: pd.DataFrame) -> float:
logger.debug(f"{self}: Calculating metric")
result = _filter_valid_returns(result)
returns = result["return"].values
mean_return = np.mean(returns)
if self._annualized:
dates =
|
pd.to_datetime(result["date"].values)
|
pandas.to_datetime
|
# Cleaning/merging deposits and withdrawals
import pandas as pd
from datetime import datetime
for file in ['withdrawals', 'deposits']:
df = pd.read_csv('../data/' + file + '.csv')
df.dropna(inplace=True)
# Re-order dataframe from oldest to newest
df = df[::-1]
# Convert to datetime
df['Date'] = [datetime.strptime(day, '%Y-%m-%d %H:%M:%S') for day in df['Date']]
if file == 'withdrawals':
df['Type'] = 'WITHDRAWAL'
else:
df['Type'] = 'DEPOSIT'
df.to_csv('../data/' + file + '.csv', index=False)
d = pd.read_csv('../data/deposits.csv')
w =
|
pd.read_csv('../data/withdrawals.csv')
|
pandas.read_csv
|
import optuna
import pandas as pd
import numpy as np
from scipy.stats import rankdata
import pandas_ta as pta
from finta import TA as fta
import talib as tta
import re
import warnings
import pareto
warnings.filterwarnings("ignore")
from timeit import default_timer as timer
def col_name(function, study_best_params):
"""
Create consistent column names given string function and params
:param function: Function represented as string
:param study_best_params: Params for function
:return:
"""
# Optuna string of indicator
function_name = function.split("(")[0].replace(".", "_")
# Optuna string of parameters
params = re.sub('[^0-9a-zA-Z_:,]', '', str(study_best_params)).replace(",", "_").replace(":", "_")
# Concatenate name and params to define
col = f"{function_name}_{params}"
return col
def _weighted_pearson(y, y_pred, w=None, pearson=True):
"""Calculate the weighted Pearson correlation coefficient."""
if pearson:
if w is None:
w = np.ones(len(y))
# idx = ~np.logical_or(np.isnan(y_pred), np.isnan(y)) # Drop NAs w/boolean mask
# y = np.compress(idx, np.array(y))
# y_pred = np.compress(idx, np.array(y_pred))
# w = np.compress(idx, w)
with np.errstate(divide='ignore', invalid='ignore'):
y_pred_demean = y_pred - np.average(y_pred, weights=w)
y_demean = y - np.average(y, weights=w)
corr = ((np.sum(w * y_pred_demean * y_demean) / np.sum(w)) /
np.sqrt((np.sum(w * y_pred_demean ** 2) *
np.sum(w * y_demean ** 2)) /
(np.sum(w) ** 2)))
if np.isfinite(corr):
return np.abs(corr)
return 0.
def _weighted_spearman(y, y_pred, w=None):
"""Calculate the weighted Spearman correlation coefficient."""
# idx = ~np.logical_or(np.isnan(y_pred), np.isnan(y)) # Drop NAs w/boolean mask
# y = np.compress(idx, np.array(y))
# y_pred = np.compress(idx, np.array(y_pred))
# w = np.compress(idx, w)
y_pred_ranked = np.apply_along_axis(rankdata, 0, y_pred)
y_ranked = np.apply_along_axis(rankdata, 0, y)
return _weighted_pearson(y_pred_ranked, y_ranked, w, pearson=False)
def _trial(self, trial, X):
"""
Calculate indicator using best fitted trial over X
:param self: Optuna study
:param trial: Optuna trial
:param X: dataset
:return:
"""
# Evaluate TA defined as optuna trial string
res = eval(self.function)
# If return is tuple, convert to DF
if isinstance(res, tuple):
res =
|
pd.DataFrame(res)
|
pandas.DataFrame
|
import matplotlib.image as mpimg
import matplotlib.style as style
import matplotlib.pyplot as plt
from matplotlib import rcParams
from simtk.openmm.app import *
from simtk.openmm import *
from simtk.unit import *
from sys import stdout
import seaborn as sns
from math import exp
import pandas as pd
import mdtraj as md
import pickle as pk
import numpy as np
import statistics
import itertools
import fileinput
import fnmatch
import shutil
import random
import math
import os
import re
def fix_cap_remove_ace(pdb_file):
"""
Removes the H atoms of the capped ACE residue.
"""
remove_words = [
"H1 ACE",
"H2 ACE",
"H3 ACE",
"H31 ACE",
"H32 ACE",
"H33 ACE",
]
with open(pdb_file) as oldfile, open("intermediate.pdb", "w") as newfile:
for line in oldfile:
if not any(word in line for word in remove_words):
newfile.write(line)
command = "rm -rf " + pdb_file
os.system(command)
command = "mv intermediate.pdb " + pdb_file
os.system(command)
def fix_cap_replace_ace(pdb_file):
"""
Replaces the alpha carbon atom of the
capped ACE residue with a standard name.
"""
fin = open(pdb_file, "rt")
data = fin.read()
data = data.replace("CA ACE", "CH3 ACE")
data = data.replace("C ACE", "CH3 ACE")
fin.close()
fin = open(pdb_file, "wt")
fin.write(data)
fin.close()
def fix_cap_remove_nme(pdb_file):
"""
Removes the H atoms of the capped NME residue.
"""
remove_words = [
"H1 NME",
"H2 NME",
"H3 NME",
"H31 NME",
"H32 NME",
"H33 NME",
]
with open(pdb_file) as oldfile, open("intermediate.pdb", "w") as newfile:
for line in oldfile:
if not any(word in line for word in remove_words):
newfile.write(line)
command = "rm -rf " + pdb_file
os.system(command)
command = "mv intermediate.pdb " + pdb_file
os.system(command)
def fix_cap_replace_nme(pdb_file):
"""
Replaces the alpha carbon atom of the
capped NME residue with a standard name.
"""
fin = open(pdb_file, "rt")
data = fin.read()
data = data.replace("CA NME", "CH3 NME")
data = data.replace("C NME", "CH3 NME")
fin.close()
fin = open(pdb_file, "wt")
fin.write(data)
fin.close()
def prepare_alanine_dipeptide():
"""
Prepares the alanine dipeptide system for Gaussian
Accelerated Molecular Dynamics (GaMD) simulations.
Downloads the pdb structure from
https://markovmodel.github.io/mdshare/ALA2/ and
parameterizes it using General Amber Force Field
(GAFF).
"""
os.system(
"curl -O http://ftp.imp.fu-berlin.de/pub/cmb-data/alanine-dipeptide-nowater.pdb"
)
os.system(
"rm -rf system_inputs"
) # Removes any existing directory named system_inputs
os.system("mkdir system_inputs") # Creates a directory named system_inputs
cwd = os.getcwd()
target_dir = cwd + "/" + "system_inputs"
os.system("pdb4amber -i alanine-dipeptide-nowater.pdb -o intermediate.pdb")
# Delete HH31, HH32 and HH33 from the ACE residue (tleap adds them later)
remove_words = ["HH31 ACE", "HH32 ACE", "HH33 ACE"]
with open("intermediate.pdb") as oldfile, open(
"system.pdb", "w"
) as newfile:
for line in oldfile:
if not any(word in line for word in remove_words):
newfile.write(line)
os.system("rm -rf intermediate*")
# save the tleap script to file
with open("input_TIP3P.leap", "w") as f:
f.write(
"""
source leaprc.protein.ff14SB
source leaprc.water.tip3p
set default FlexibleWater on
set default PBRadii mbondi2
pdb = loadpdb system.pdb
solvateBox pdb TIP3PBOX 15
saveamberparm pdb system_TIP3P.prmtop system_TIP3P.inpcrd
saveamberparm pdb system_TIP3P.parm7 system_TIP3P.rst7
savepdb pdb system_TIP3P.pdb
quit
"""
)
os.system("tleap -f input_TIP3P.leap")
os.system("rm -rf leap.log")
shutil.copy(
cwd + "/" + "system_TIP3P.inpcrd",
target_dir + "/" + "system_TIP3P.inpcrd",
)
shutil.copy(
cwd + "/" + "system_TIP3P.parm7",
target_dir + "/" + "system_TIP3P.parm7",
)
shutil.copy(
cwd + "/" + "system_TIP3P.pdb", target_dir + "/" + "system_TIP3P.pdb"
)
shutil.copy(
cwd + "/" + "system_TIP3P.prmtop",
target_dir + "/" + "system_TIP3P.prmtop",
)
shutil.copy(
cwd + "/" + "system_TIP3P.rst7", target_dir + "/" + "system_TIP3P.rst7"
)
shutil.copy(cwd + "/" + "system.pdb", target_dir + "/" + "system.pdb")
shutil.copy(
cwd + "/" + "alanine-dipeptide-nowater.pdb",
target_dir + "/" + "alanine-dipeptide-nowater.pdb",
)
shutil.copy(
cwd + "/" + "input_TIP3P.leap", target_dir + "/" + "input_TIP3P.leap"
)
os.system("rm -rf system_TIP3P.inpcrd")
os.system("rm -rf system_TIP3P.parm7")
os.system("rm -rf system_TIP3P.pdb")
os.system("rm -rf system_TIP3P.inpcrd")
os.system("rm -rf system_TIP3P.rst7")
os.system("rm -rf system_TIP3P.prmtop")
os.system("rm -rf system.pdb")
os.system("rm -rf input_TIP3P.leap")
os.system("rm -rf alanine-dipeptide-nowater.pdb")
def create_vectors(x):
"""
Extracts peridic box information from the
given line.
"""
x = str(x)
x = x.replace("Vec3", "")
x = re.findall("\d*\.?\d+", x)
for i in range(0, len(x)):
x[i] = float(x[i])
x = tuple(x)
n = int(len(x) / 3)
x = [x[i * n : (i + 1) * n] for i in range((len(x) + n - 1) // n)]
return x
def simulated_annealing(
parm="system_TIP3P.prmtop",
rst="system_TIP3P.inpcrd",
annealing_output_pdb="system_annealing_output.pdb",
annealing_steps=100000,
pdb_freq=100000,
starting_temp=0,
target_temp=300,
temp_incr=3,
):
"""
Performs simulated annealing of the system from
0K to 300 K (default) using OpenMM MD engine and
saves the last frame of the simulation to be
accessed by the next simulation.
Parameters
----------
parm: str
System's topology file
rst: str
System's coordinate file
annealing_output_pdb: str
System's output trajectory file
annealing_steps: int
Aneealing steps at each temperatrure jump
pdb_freq: int
Trajectory to be saved after every pdb_freq steps
starting_temp: int
Initial temperature of Simulated Annealing
target_temp: int
Final temperature of Simulated Annealing
temp_incr: int
Temmperature increase for every step
"""
prmtop = AmberPrmtopFile(parm)
inpcrd = AmberInpcrdFile(rst)
annealing_system = prmtop.createSystem(
nonbondedMethod=PME, nonbondedCutoff=1 * nanometer, constraints=HBonds
)
annealing_integrator = LangevinIntegrator(
0 * kelvin, 1 / picosecond, 2 * femtoseconds
)
total_steps = ((target_temp / temp_incr) + 1) * annealing_steps
annealing_temp_range = int((target_temp / temp_incr) + 1)
annealing_platform = Platform.getPlatformByName("CUDA")
annealing_properties = {"CudaDeviceIndex": "0", "CudaPrecision": "mixed"}
annealing_simulation = Simulation(
prmtop.topology,
annealing_system,
annealing_integrator,
annealing_platform,
annealing_properties,
)
annealing_simulation.context.setPositions(inpcrd.positions)
if inpcrd.boxVectors is not None:
annealing_simulation.context.setPeriodicBoxVectors(*inpcrd.boxVectors)
annealing_simulation.minimizeEnergy()
annealing_simulation.reporters.append(
PDBReporter(annealing_output_pdb, pdb_freq)
)
simulated_annealing_last_frame = (
annealing_output_pdb[:-4] + "_last_frame.pdb"
)
annealing_simulation.reporters.append(
PDBReporter(simulated_annealing_last_frame, total_steps)
)
annealing_simulation.reporters.append(
StateDataReporter(
stdout,
pdb_freq,
step=True,
time=True,
potentialEnergy=True,
totalSteps=total_steps,
temperature=True,
progress=True,
remainingTime=True,
speed=True,
separator="\t",
)
)
temp = starting_temp
while temp <= target_temp:
annealing_integrator.setTemperature(temp * kelvin)
if temp == starting_temp:
annealing_simulation.step(annealing_steps)
annealing_simulation.saveState("annealing.state")
else:
annealing_simulation.loadState("annealing.state")
annealing_simulation.step(annealing_steps)
temp += temp_incr
state = annealing_simulation.context.getState()
print(state.getPeriodicBoxVectors())
annealing_simulation_box_vectors = state.getPeriodicBoxVectors()
print(annealing_simulation_box_vectors)
with open("annealing_simulation_box_vectors.pkl", "wb") as f:
pk.dump(annealing_simulation_box_vectors, f)
print("Finshed NVT Simulated Annealing Simulation")
def npt_equilibration(
parm="system_TIP3P.prmtop",
npt_output_pdb="system_npt_output.pdb",
pdb_freq=500000,
npt_steps=5000000,
target_temp=300,
npt_pdb="system_annealing_output_last_frame.pdb",
):
"""
Performs NPT equilibration MD of the system
using OpenMM MD engine and saves the last
frame of the simulation to be accessed by
the next simulation.
Parameters
----------
parm: str
System's topology file
npt_output_pdb: str
System's output trajectory file
pdb_freq: int
Trajectory to be saved after every pdb_freq steps
npt_steps: int
NPT simulation steps
target_temp: int
Temperature for MD simulation
npt_pdb: str
Last frame of the simulation
"""
npt_init_pdb = PDBFile(npt_pdb)
prmtop = AmberPrmtopFile(parm)
npt_system = prmtop.createSystem(
nonbondedMethod=PME, nonbondedCutoff=1 * nanometer, constraints=HBonds
)
barostat = MonteCarloBarostat(25.0 * bar, target_temp * kelvin, 25)
npt_system.addForce(barostat)
npt_integrator = LangevinIntegrator(
target_temp * kelvin, 1 / picosecond, 2 * femtoseconds
)
npt_platform = Platform.getPlatformByName("CUDA")
npt_properties = {"CudaDeviceIndex": "0", "CudaPrecision": "mixed"}
npt_simulation = Simulation(
prmtop.topology,
npt_system,
npt_integrator,
npt_platform,
npt_properties,
)
npt_simulation.context.setPositions(npt_init_pdb.positions)
npt_simulation.context.setVelocitiesToTemperature(target_temp * kelvin)
with open("annealing_simulation_box_vectors.pkl", "rb") as f:
annealing_simulation_box_vectors = pk.load(f)
annealing_simulation_box_vectors = create_vectors(
annealing_simulation_box_vectors
)
npt_simulation.context.setPeriodicBoxVectors(
annealing_simulation_box_vectors[0],
annealing_simulation_box_vectors[1],
annealing_simulation_box_vectors[2],
)
npt_last_frame = npt_output_pdb[:-4] + "_last_frame.pdb"
npt_simulation.reporters.append(PDBReporter(npt_output_pdb, pdb_freq))
npt_simulation.reporters.append(PDBReporter(npt_last_frame, npt_steps))
npt_simulation.reporters.append(
StateDataReporter(
stdout,
pdb_freq,
step=True,
time=True,
potentialEnergy=True,
totalSteps=npt_steps,
temperature=True,
progress=True,
remainingTime=True,
speed=True,
separator="\t",
)
)
npt_simulation.minimizeEnergy()
npt_simulation.step(npt_steps)
npt_simulation.saveState("npt_simulation.state")
state = npt_simulation.context.getState()
print(state.getPeriodicBoxVectors())
npt_simulation_box_vectors = state.getPeriodicBoxVectors()
print(npt_simulation_box_vectors)
with open("npt_simulation_box_vectors.pkl", "wb") as f:
pk.dump(npt_simulation_box_vectors, f)
print("Finished NPT Simulation")
def nvt_equilibration(
parm="system_TIP3P.prmtop",
nvt_output_pdb="system_nvt_output.pdb",
pdb_freq=500000,
nvt_steps=5000000,
target_temp=300,
nvt_pdb="system_npt_output_last_frame.pdb",
):
"""
Performs NVT equilibration MD of the system
using OpenMM MD engine saves the last
frame of the simulation to be accessed by
the next simulation.
Parameters
----------
parm: str
System's topology file
nvt_output_pdb: str
System's output trajectory file
pdb_freq: int
Trajectory to be saved after every pdb_freq steps
nvt_steps: int
NVT simulation steps
target_temp: int
Temperature for MD simulation
nvt_pdb: str
Last frame of the simulation
"""
nvt_init_pdb = PDBFile(nvt_pdb)
prmtop = AmberPrmtopFile(parm)
nvt_system = prmtop.createSystem(
nonbondedMethod=PME, nonbondedCutoff=1 * nanometer, constraints=HBonds
)
nvt_integrator = LangevinIntegrator(
target_temp * kelvin, 1 / picosecond, 2 * femtoseconds
)
nvt_platform = Platform.getPlatformByName("CUDA")
nvt_properties = {"CudaDeviceIndex": "0", "CudaPrecision": "mixed"}
nvt_simulation = Simulation(
prmtop.topology,
nvt_system,
nvt_integrator,
nvt_platform,
nvt_properties,
)
nvt_simulation.context.setPositions(nvt_init_pdb.positions)
nvt_simulation.context.setVelocitiesToTemperature(target_temp * kelvin)
with open("npt_simulation_box_vectors.pkl", "rb") as f:
npt_simulation_box_vectors = pk.load(f)
npt_simulation_box_vectors = create_vectors(npt_simulation_box_vectors)
nvt_simulation.context.setPeriodicBoxVectors(
npt_simulation_box_vectors[0],
npt_simulation_box_vectors[1],
npt_simulation_box_vectors[2],
)
nvt_last_frame = nvt_output_pdb[:-4] + "_last_frame.pdb"
nvt_simulation.reporters.append(PDBReporter(nvt_output_pdb, pdb_freq))
nvt_simulation.reporters.append(PDBReporter(nvt_last_frame, nvt_steps))
nvt_simulation.reporters.append(
StateDataReporter(
stdout,
pdb_freq,
step=True,
time=True,
potentialEnergy=True,
totalSteps=nvt_steps,
temperature=True,
progress=True,
remainingTime=True,
speed=True,
separator="\t",
)
)
nvt_simulation.minimizeEnergy()
nvt_simulation.step(nvt_steps)
nvt_simulation.saveState("nvt_simulation.state")
state = nvt_simulation.context.getState()
print(state.getPeriodicBoxVectors())
nvt_simulation_box_vectors = state.getPeriodicBoxVectors()
print(nvt_simulation_box_vectors)
with open("nvt_simulation_box_vectors.pkl", "wb") as f:
pk.dump(nvt_simulation_box_vectors, f)
print("Finished NVT Simulation")
def run_equilibration():
"""
Runs systematic simulated annealing followed by
NPT and NVT equilibration MD simulation.
"""
cwd = os.getcwd()
target_dir = cwd + "/" + "equilibration"
os.system("rm -rf equilibration")
os.system("mkdir equilibration")
shutil.copy(
cwd + "/" + "system_inputs" + "/" + "system_TIP3P.inpcrd",
target_dir + "/" + "system_TIP3P.inpcrd",
)
shutil.copy(
cwd + "/" + "system_inputs" + "/" + "system_TIP3P.parm7",
target_dir + "/" + "system_TIP3P.parm7",
)
shutil.copy(
cwd + "/" + "system_inputs" + "/" + "system_TIP3P.pdb",
target_dir + "/" + "system_TIP3P.pdb",
)
shutil.copy(
cwd + "/" + "system_inputs" + "/" + "system_TIP3P.prmtop",
target_dir + "/" + "system_TIP3P.prmtop",
)
shutil.copy(
cwd + "/" + "system_inputs" + "/" + "system_TIP3P.rst7",
target_dir + "/" + "system_TIP3P.rst7",
)
shutil.copy(
cwd + "/" + "system_inputs" + "/" + "system.pdb",
target_dir + "/" + "system.pdb",
)
shutil.copy(
cwd + "/" + "system_inputs" + "/" + "alanine-dipeptide-nowater.pdb",
target_dir + "/" + "alanine-dipeptide-nowater.pdb",
)
shutil.copy(
cwd + "/" + "system_inputs" + "/" + "input_TIP3P.leap",
target_dir + "/" + "input_TIP3P.leap",
)
os.chdir(target_dir)
simulated_annealing()
npt_equilibration()
nvt_equilibration()
os.system("rm -rf system_TIP3P.inpcrd")
os.system("rm -rf system_TIP3P.parm7")
os.system("rm -rf system_TIP3P.pdb")
os.system("rm -rf system_TIP3P.rst7")
os.system("rm -rf system_TIP3P.prmtop")
os.system("rm -rf system.pdb")
os.system("rm -rf alanine-dipeptide-nowater.pdb")
os.system("rm -rf input_TIP3P.leap")
os.chdir(cwd)
def create_starting_structures():
"""
Prepares starting structures for Amber GaMD simulations.
All input files required to run Amber GaMD simulations are
placed in the starting_structures directory.
"""
cwd = os.getcwd()
target_dir = cwd + "/" + "starting_structures"
os.system("rm -rf starting_structures")
os.system("mkdir starting_structures")
shutil.copy(
cwd + "/" + "equilibration" + "/" + "system_nvt_output_last_frame.pdb",
target_dir + "/" + "system_nvt_output_last_frame.pdb",
)
os.chdir(target_dir)
fix_cap_remove_nme("system_nvt_output_last_frame.pdb")
fix_cap_replace_nme("system_nvt_output_last_frame.pdb")
# Save the tleap script to file
with open("final_input_TIP3P.leap", "w") as f:
f.write(
"""
source leaprc.protein.ff14SB
source leaprc.water.tip3p
set default FlexibleWater on
set default PBRadii mbondi2
pdb = loadpdb system_nvt_output_last_frame.pdb
saveamberparm pdb system_final.prmtop system_final.inpcrd
saveamberparm pdb system_final.parm7 system_final.rst7
savepdb pdb system_final.pdb
quit
"""
)
os.system("tleap -f final_input_TIP3P.leap")
os.system("rm -rf leap.log")
os.system("rm -rf system_nvt_output_last_frame.pdb")
os.chdir(cwd)
def add_vec_inpcrd():
"""
Adds box dimensions captured from the last saved
frame of the NVT simulations to the inpcrd file.
Only to be used when the box dimensions are not
present in the inpcrd file.
"""
cwd = os.getcwd()
target_dir = cwd + "/" + "starting_structures"
shutil.copy(
cwd + "/" + "equilibration" + "/" + "nvt_simulation_box_vectors.pkl",
target_dir + "/" + "nvt_simulation_box_vectors.pkl",
)
os.chdir(target_dir)
with open("nvt_simulation_box_vectors.pkl", "rb") as f:
nvt_simulation_box_vectors = pk.load(f)
nvt_simulation_box_vectors = create_vectors(nvt_simulation_box_vectors)
vectors = (
(nvt_simulation_box_vectors[0][0]) * 10,
(nvt_simulation_box_vectors[1][1]) * 10,
(nvt_simulation_box_vectors[2][2]) * 10,
)
vectors = (
round(vectors[0], 7),
round(vectors[1], 7),
round(vectors[2], 7),
)
last_line = (
" "
+ str(vectors[0])
+ " "
+ str(vectors[1])
+ " "
+ str(vectors[2])
+ " 90.0000000"
+ " 90.0000000"
+ " 90.0000000"
)
with open("system_final.inpcrd", "a+") as f:
f.write(last_line)
os.system("rm -rf nvt_simulation_box_vectors.pkl")
os.chdir(cwd)
def add_vec_prmtop():
"""
Adds box dimensions captured from the last saved
frame of the NVT simulations to the prmtop file.
Only to be used when the box dimensions are not
present in the prmtop file.
"""
cwd = os.getcwd()
target_dir = cwd + "/" + "starting_structures"
shutil.copy(
cwd + "/" + "equilibration" + "/" + "nvt_simulation_box_vectors.pkl",
target_dir + "/" + "nvt_simulation_box_vectors.pkl",
)
os.chdir(target_dir)
with open("nvt_simulation_box_vectors.pkl", "rb") as f:
nvt_simulation_box_vectors = pk.load(f)
nvt_simulation_box_vectors = create_vectors(nvt_simulation_box_vectors)
vectors = (
nvt_simulation_box_vectors[0][0],
nvt_simulation_box_vectors[1][1],
nvt_simulation_box_vectors[2][2],
)
vectors = round(vectors[0], 7), round(vectors[1], 7), round(vectors[2], 7)
oldbeta = "9.00000000E+01"
x = str(vectors[0]) + str(0) + "E+" + "01"
y = str(vectors[1]) + str(0) + "E+" + "01"
z = str(vectors[2]) + str(0) + "E+" + "01"
line1 = "%FLAG BOX_DIMENSIONS"
line2 = "%FORMAT(5E16.8)"
line3 = " " + oldbeta + " " + x + " " + y + " " + z
with open("system_final.prmtop") as i, open(
"system_intermediate_final.prmtop", "w"
) as f:
for line in i:
if line.startswith("%FLAG RADIUS_SET"):
line = line1 + "\n" + line2 + "\n" + line3 + "\n" + line
f.write(line)
os.system("rm -rf system_final.prmtop")
os.system("mv system_intermediate_final.prmtop system_final.prmtop")
os.system("rm -rf nvt_simulation_box_vectors.pkl")
os.chdir(cwd)
def create_filetree(
nst_lim=26000000,
ntw_x=1000,
nt_cmd=1000000,
n_teb=1000000,
n_tave=50000,
ntcmd_prep=200000,
nteb_prep=200000,
):
"""
Creates a directory named gamd_simulations. Inside
this directory, there are subdirectories for dihedral,
dual and total potential-boosted GaMD with upper and
lower threshold boosts separately.
Parameters
----------
nst_lim: int
Total simulation time including preparatory simulation.
For example, if nst_lim = 26000000, then, we may have
2 ns of preparatory simulation i.e. 1000000 preparation steps
and 50 ns of GaMD simulation i.e. 25000000 simulation steps
ntw_x: int
Saving coordinates of the simulation every ntw_x
timesteps. For example, 2 ps implies 1000 timesteps
nt_cmd: int
Number of initial MD simulation step, 2 ns of
preparatory simulation requires 1000000 preparation
timesteps
n_teb: int
Number of biasing MD simulation steps
n_tave: int
Number of simulation steps used to calculate the
average and standard deviation of potential energies
ntcmd_prep: int
Number of preparation conventional molecular dynamics
steps.This is used for system equilibration and
potential energies are not collected for statistics
nteb_prep: int
Number of preparation biasing molecular dynamics
simulation steps. This is used for system
equilibration
"""
cwd = os.getcwd()
os.system("rm -rf gamd_simulations")
os.system("mkdir gamd_simulations")
os.chdir(cwd + "/" + "gamd_simulations")
source_dir = cwd + "/" + "starting_structures"
target_dir = cwd + "/" + "gamd_simulations"
dir_list = [
"dihedral_threshold_lower",
"dihedral_threshold_upper",
"dual_threshold_lower",
"dual_threshold_upper",
"total_threshold_lower",
"total_threshold_upper",
]
for i in range(len(dir_list)):
os.mkdir(dir_list[i])
os.chdir(target_dir + "/" + dir_list[i])
shutil.copy(
source_dir + "/" + "system_final.inpcrd",
target_dir + "/" + dir_list[i] + "/" + "system_final.inpcrd",
)
shutil.copy(
source_dir + "/" + "system_final.prmtop",
target_dir + "/" + dir_list[i] + "/" + "system_final.prmtop",
)
if "lower" in dir_list[i]:
i_E = 1
if "upper" in dir_list[i]:
i_E = 2
if "total" in dir_list[i]:
i_gamd = 1
if "dihedral" in dir_list[i]:
i_gamd = 2
if "dual" in dir_list[i]:
i_gamd = 3
with open("md.in", "w") as f:
f.write("&cntrl" + "\n")
f.write(" imin = 0, irest = 0, ntx = 1," + "\n")
f.write(" nstlim = " + str(nst_lim) + ", dt = 0.002," + "\n")
f.write(" ntc = 2, ntf = 2, tol = 0.000001," + "\n")
f.write(" iwrap = 1, ntb = 1, cut = 8.0," + "\n")
f.write(" ntt = 3, temp0 = 300.0, gamma_ln = 1.0, " + "\n")
f.write(
" ntpr = 500, ntwx = " + str(ntw_x) + ", ntwr = 500," + "\n"
)
f.write(" ntxo = 2, ioutfm = 1, ig = -1, ntwprt = 0," + "\n")
f.write(
" igamd = "
+ str(i_gamd)
+ ", iE = "
+ str(i_E)
+ ", irest_gamd = 0,"
+ "\n"
)
f.write(
" ntcmd = "
+ str(nt_cmd)
+ ", nteb = "
+ str(n_teb)
+ ", ntave = "
+ str(n_tave)
+ ","
+ "\n"
)
f.write(
" ntcmdprep = "
+ str(ntcmd_prep)
+ ", ntebprep = "
+ str(nteb_prep)
+ ","
+ "\n"
)
f.write(" sigma0D = 6.0, sigma0P = 6.0" + " \n")
f.write("&end" + "\n")
os.chdir(target_dir)
os.chdir(cwd)
def run_simulations():
"""
Runs GaMD simulations for each of the dihedral, dual and total
potential boosts for both thresholds i.e. upper and lower potential
thresholds. (Remember to check md.in files for further details and
flag information).
"""
cwd = os.getcwd()
os.chdir(cwd + "/" + "gamd_simulations")
os.chdir(cwd + "/" + "gamd_simulations" + "/" + "dihedral_threshold_lower")
os.system(
"pmemd.cuda -O -i md.in -o system_final.out -p system_final.prmtop -c system_final.inpcrd -r system_final.rst -x system_final.nc"
)
os.chdir(cwd + "/" + "gamd_simulations" + "/" + "dihedral_threshold_upper")
os.system(
"pmemd.cuda -O -i md.in -o system_final.out -p system_final.prmtop -c system_final.inpcrd -r system_final.rst -x system_final.nc"
)
os.chdir(cwd + "/" + "gamd_simulations" + "/" + "dual_threshold_lower")
os.system(
"pmemd.cuda -O -i md.in -o system_final.out -p system_final.prmtop -c system_final.inpcrd -r system_final.rst -x system_final.nc"
)
os.chdir(cwd + "/" + "gamd_simulations" + "/" + "dual_threshold_upper")
os.system(
"pmemd.cuda -O -i md.in -o system_final.out -p system_final.prmtop -c system_final.inpcrd -r system_final.rst -x system_final.nc"
)
os.chdir(cwd + "/" + "gamd_simulations" + "/" + "total_threshold_lower")
os.system(
"pmemd.cuda -O -i md.in -o system_final.out -p system_final.prmtop -c system_final.inpcrd -r system_final.rst -x system_final.nc"
)
os.chdir(cwd + "/" + "gamd_simulations" + "/" + "total_threshold_upper")
os.system(
"pmemd.cuda -O -i md.in -o system_final.out -p system_final.prmtop -c system_final.inpcrd -r system_final.rst -x system_final.nc"
)
os.chdir(cwd + "/" + "gamd_simulations")
os.chdir(cwd)
def create_data_files(
jump=10,
traj="system_final.nc",
topology="system_final.prmtop",
T=300,
):
"""
Extracts data from GaMD log files and saves them as
weights.dat, Psi.dat and Phi_Psi.dat. gamd.log file
contains data excluding the initial equilibration MD
simulation steps but trajectory output file has all
the trajectories including the initial equilibration
MD steps. This part has ben taken care to make the
data consistent.
Parameters
----------
jump: int
Every nth frame to be considered for reweighting
traj: str
System's trajectory file
topology: str
System's topology file
T: int
MD simulation temperature
"""
# To make data consistent with gamd.log and .nc file
factor = 0.001987 * T
with open("md.in") as f:
lines = f.readlines()
for i in lines:
if "nstlim =" in i:
nstlim_line = i
if "ntcmd =" in i:
ntcmd_line = i
if "ntwx =" in i:
ntwx_line = i
x = re.findall(r"\b\d+\b", ntcmd_line)
ntcmd = int(x[0])
x = re.findall(r"\b\d+\b", nstlim_line)
nstlim = int(x[0])
x = re.findall(r"\b\d+\b", ntwx_line)
ntwx = int(x[1])
# From the .nc trajectory files, we will not consider ntcmd trajectories
leave_frames = int(ntcmd / ntwx)
no_frames = int(nstlim / ntwx)
# Recheck conditions
file = open("gamd.log", "r")
number_of_lines = 0
for line in file:
line = line.strip("\n")
number_of_lines += 1
file.close()
f = open("gamd.log")
fourth_line = f.readlines()[3]
if str(ntcmd) in fourth_line:
datapoints = number_of_lines - 4
if not str(ntcmd) in fourth_line:
datapoints = number_of_lines - 3
print(datapoints == int((nstlim - ntcmd) / ntwx))
# Creating Psi.dat and Phi_Psi.dat
traj = md.load(traj, top=topology)
traj = traj[leave_frames:no_frames:jump]
phi = md.compute_phi(traj)
phi = phi[1] # 0:indices, 1:phi angles
phi = np.array([math.degrees(i) for i in phi]) # radians to degrees
psi = md.compute_psi(traj)
psi = psi[1] # 0:indices, 1:psi angles
psi = np.array([math.degrees(i) for i in psi]) # radians to degrees
df_psi = pd.DataFrame(phi, columns=["Psi"])
df_psi = df_psi.tail(int(datapoints))
df_psi.to_csv("Psi.dat", sep="\t", index=False, header=False)
df_phi = pd.DataFrame(psi, columns=["Phi"])
df_phi = df_phi.tail(int(datapoints))
df_phi_psi = pd.concat([df_phi, df_psi], axis=1)
df_phi_psi.to_csv("Phi_Psi.dat", sep="\t", index=False, header=False)
# Creating weights.dat
with open("gamd.log") as f:
lines = f.readlines()
column_names = lines[2]
column_names = column_names.replace("#", "")
column_names = column_names.replace("\n", "")
column_names = column_names.replace(" ", "")
column_names = column_names.split(",")
list_words = ["#"]
with open("gamd.log") as oldfile, open("data.log", "w") as newfile:
for line in oldfile:
if not any(word in line for word in list_words):
newfile.write(line)
df = pd.read_csv("data.log", delim_whitespace=True, header=None)
df.columns = column_names
df["dV(kcal/mol)"] = (
df["Boost-Energy-Potential"] + df["Boost-Energy-Dihedral"]
)
df["dV(kbT)"] = df["dV(kcal/mol)"] / factor
df_ = df[["dV(kbT)", "total_nstep", "dV(kcal/mol)"]]
df_ = df_[::jump]
df_.to_csv("weights.dat", sep="\t", index=False, header=False)
os.system("rm -rf data.log")
print(df_phi_psi.shape)
print(df_phi.shape)
print(df_.shape)
def create_bins(lower_bound, width, upper_bound):
"""
Creates bin if given the lower and upper bound
with the wirdth information.
"""
bins = []
for low in range(lower_bound, upper_bound, width):
bins.append([low, low + width])
return bins
def find_bin(value, bins):
"""
Finds which value belongs to which bin.
"""
for i in range(0, len(bins)):
if bins[i][0] <= value < bins[i][1]:
return i
return -1
def reweight_1d(
binspace=10, n_structures=4, Xdim=[-180, 180], T=300.0, min_prob=0.000001
):
"""
Reweights boosted potential energies in one-dimension based on
Maclaurin series expansion to one, two and three degrees.
Parameters
----------
binspace: int
Spacing between the bins
n_structures: int
Number of structures per bin chosen
for Weighted Ensemble (WE) simulations
Xdim: list
Range of dihedral angles
T: float
MD simulation temperature
min_prob: float
minimum probability threshold
"""
beta = 1.0 / (0.001987 * float(T))
df_Psi = pd.read_csv("Psi.dat", delim_whitespace=True, header=None)
df_Psi.columns = ["Psi"]
df_weight = pd.read_csv("weights.dat", delim_whitespace=True, header=None)
df_weight.columns = ["dV_kBT", "timestep", "dVkcalmol"]
sum_total = df_Psi.shape[0]
binsX = np.arange(float(Xdim[0]), (float(Xdim[1]) + binspace), binspace)
hist, hist_edges = np.histogram(df_Psi[["Psi"]], bins=binsX, weights=None)
pstarA = [i / sum_total for i in list(hist)]
bins = create_bins(
lower_bound=int(Xdim[0]), width=binspace, upper_bound=int(Xdim[1])
)
data = df_Psi["Psi"].values.tolist()
binned_weights = []
for value in data:
bin_index = find_bin(value, bins)
binned_weights.append(bin_index)
df_index = pd.DataFrame(binned_weights)
df_index.columns = ["index"]
df = pd.concat([df_index, df_Psi, df_weight], axis=1)
dV_c1 = []
dV_c2 = []
dV_c3 = []
dV = []
for i in range(len(bins)):
df_i = df.loc[(df["index"] == i)]
dV_list = df_i["dVkcalmol"].values.tolist()
if len(dV_list) >= 10:
dV_c1.append(statistics.mean(dV_list))
dV_c2.append(
statistics.mean([i ** 2 for i in dV_list])
- (statistics.mean(dV_list)) ** 2
)
dV_c3.append(
statistics.mean([i ** 3 for i in dV_list])
- 3
* (statistics.mean([i ** 2 for i in dV_list]))
* (statistics.mean(dV_list))
+ 2 * (statistics.mean(dV_list)) ** 3
)
if len(dV_list) < 10:
dV_c1.append(0)
dV_c2.append(0)
dV_c3.append(0)
dV.append(dV_list)
c1 = [i * beta for i in dV_c1]
c2 = [i * ((beta ** 2) / 2) for i in dV_c2]
c3 = [i * ((beta ** 3) / 6) for i in dV_c3]
c1 = c1
c12 = [a + b for a, b in zip(c1, c2)]
c123 = [a + b for a, b in zip(c12, c3)]
for i in range(len(c1)):
if c1[i] >= 700:
c1[i] = 700
for i in range(len(c12)):
if c12[i] >= 700:
c12[i] = 700
for i in range(len(c123)):
if c123[i] >= 700:
c123[i] = 700
ensemble_average_c1 = [exp(i) for i in c1]
ensemble_average_c12 = [exp(i) for i in c12]
ensemble_average_c123 = [exp(i) for i in c123]
numerator_c1 = [a * b for a, b in zip(pstarA, ensemble_average_c1)]
numerator_c12 = [a * b for a, b in zip(pstarA, ensemble_average_c12)]
numerator_c123 = [a * b for a, b in zip(pstarA, ensemble_average_c123)]
#### c1
denominatorc1 = []
for i in range(len(bins)):
product_c1 = pstarA[i] * ensemble_average_c1[i]
denominatorc1.append(product_c1)
denominator_c1 = sum(denominatorc1)
pA_c1 = [i / denominator_c1 for i in numerator_c1]
#### c12
denominatorc12 = []
for i in range(len(bins)):
product_c12 = pstarA[i] * ensemble_average_c12[i]
denominatorc12.append(product_c12)
denominator_c12 = sum(denominatorc12)
pA_c12 = [i / denominator_c12 for i in numerator_c12]
#### c123
denominatorc123 = []
for i in range(len(bins)):
product_c123 = pstarA[i] * ensemble_average_c123[i]
denominatorc123.append(product_c123)
denominator_c123 = sum(denominatorc123)
pA_c123 = [i / denominator_c123 for i in numerator_c123]
data_c1 = list(zip(bins, pA_c1))
data_c12 = list(zip(bins, pA_c12))
data_c123 = list(zip(bins, pA_c123))
df_c1 = pd.DataFrame(data_c1, columns=["bins", "pA_c1"])
df_c12 = pd.DataFrame(data_c12, columns=["bins", "pA_c12"])
df_c123 = pd.DataFrame(data_c123, columns=["bins", "pA_c123"])
####c1
df_c1.to_csv("c1_1d.txt", header=True, index=None, sep=" ", mode="w")
with open("c1_1d.txt", "r") as f1, open("pA_c1_1d.txt", "w") as f2:
for line in f1:
f2.write(line.replace('"', "").replace("'", ""))
os.system("rm -rf c1_1d.txt")
####c12
df_c12.to_csv("c12_1d.txt", header=True, index=None, sep=" ", mode="w")
with open("c12_1d.txt", "r") as f1, open("pA_c12_1d.txt", "w") as f2:
for line in f1:
f2.write(line.replace('"', "").replace("'", ""))
os.system("rm -rf c12_1d.txt")
####c123
df_c123.to_csv("c123_1d.txt", header=True, index=None, sep=" ", mode="w")
with open("c123_1d.txt", "r") as f1, open("pA_c123_1d.txt", "w") as f2:
for line in f1:
f2.write(line.replace('"', "").replace("'", ""))
os.system("rm -rf c123_1d.txt")
####c1_arranged
df_c1_arranged = df_c1.sort_values(by="pA_c1", ascending=False)
df_c1_arranged = df_c1_arranged[df_c1_arranged.pA_c1 > min_prob]
df_c1_arranged.to_csv(
"c1_arranged_1d.txt", header=True, index=None, sep=" ", mode="w"
)
with open("c1_arranged_1d.txt", "r") as f1, open(
"pA_c1_arranged_1d.txt", "w"
) as f2:
for line in f1:
f2.write(line.replace('"', "").replace("'", ""))
os.system("rm -rf c1_arranged_1d.txt")
####c12_arranged
df_c12_arranged = df_c12.sort_values(by="pA_c12", ascending=False)
df_c12_arranged = df_c12_arranged[df_c12_arranged.pA_c12 > min_prob]
df_c12_arranged.to_csv(
"c12_arranged_1d.txt", header=True, index=None, sep=" ", mode="w"
)
with open("c12_arranged_1d.txt", "r") as f1, open(
"pA_c12_arranged_1d.txt", "w"
) as f2:
for line in f1:
f2.write(line.replace('"', "").replace("'", ""))
os.system("rm -rf c12_arranged_1d.txt")
####c123_arranged
df_c123_arranged = df_c123.sort_values(by="pA_c123", ascending=False)
df_c123_arranged = df_c123_arranged[df_c123_arranged.pA_c123 > min_prob]
df_c123_arranged.to_csv(
"c123_arranged_1d.txt", header=True, index=None, sep=" ", mode="w"
)
with open("c123_arranged_1d.txt", "r") as f1, open(
"pA_c123_arranged_1d.txt", "w"
) as f2:
for line in f1:
f2.write(line.replace('"', "").replace("'", ""))
os.system("rm -rf c123_arranged_1d.txt")
####c1_arranged
df_c1_arranged["index"] = df_c1_arranged.index
index_list_c1 = df_c1_arranged["index"].tolist()
df["frame_index"] = df.index
df_frame_index = df[["frame_index", "index"]]
frame_indices_c1 = []
index_indces_c1 = []
for i in index_list_c1:
df_index_list_c1 = df_frame_index.loc[df_frame_index["index"] == i]
frame_c1 = df_index_list_c1["frame_index"].tolist()
frame_indices_c1.append(frame_c1)
index_c1 = [i] * len(frame_c1)
index_indces_c1.append(index_c1)
frame_indices_c1 = [item for elem in frame_indices_c1 for item in elem]
index_indces_c1 = [item for elem in index_indces_c1 for item in elem]
df_c1_frame = pd.DataFrame(frame_indices_c1, columns=["frame_index"])
df_c1_index = pd.DataFrame(index_indces_c1, columns=["index"])
df_c1_frame_index = pd.concat([df_c1_frame, df_c1_index], axis=1)
df_c1_frame_index = df_c1_frame_index.groupby("index").filter(
lambda x: len(x) >= 10
)
df_c1_frame_index.to_csv(
"c1_frame_index_1d.txt", header=True, index=None, sep=" ", mode="w"
)
with open("c1_frame_index_1d.txt", "r") as f1, open(
"c1_frame_1d.txt", "w"
) as f2:
for line in f1:
f2.write(line.replace('"', "").replace("'", ""))
os.system("rm -rf c1_frame_index_1d.txt")
####c12_arranged
df_c12_arranged["index"] = df_c12_arranged.index
index_list_c12 = df_c12_arranged["index"].tolist()
df["frame_index"] = df.index
df_frame_index = df[["frame_index", "index"]]
frame_indices_c12 = []
index_indces_c12 = []
for i in index_list_c12:
df_index_list_c12 = df_frame_index.loc[df_frame_index["index"] == i]
frame_c12 = df_index_list_c12["frame_index"].tolist()
frame_indices_c12.append(frame_c12)
index_c12 = [i] * len(frame_c12)
index_indces_c12.append(index_c12)
frame_indices_c12 = [item for elem in frame_indices_c12 for item in elem]
index_indces_c12 = [item for elem in index_indces_c12 for item in elem]
df_c12_frame = pd.DataFrame(frame_indices_c12, columns=["frame_index"])
df_c12_index = pd.DataFrame(index_indces_c12, columns=["index"])
df_c12_frame_index = pd.concat([df_c12_frame, df_c12_index], axis=1)
df_c12_frame_index = df_c12_frame_index.groupby("index").filter(
lambda x: len(x) >= 10
)
df_c12_frame_index.to_csv(
"c12_frame_index_1d.txt", header=True, index=None, sep=" ", mode="w"
)
with open("c12_frame_index_1d.txt", "r") as f1, open(
"c12_frame_1d.txt", "w"
) as f2:
for line in f1:
f2.write(line.replace('"', "").replace("'", ""))
os.system("rm -rf c12_frame_index_1d.txt")
####c123_arranged
df_c123_arranged["index"] = df_c123_arranged.index
index_list_c123 = df_c123_arranged["index"].tolist()
df["frame_index"] = df.index
df_frame_index = df[["frame_index", "index"]]
frame_indices_c123 = []
index_indces_c123 = []
for i in index_list_c123:
df_index_list_c123 = df_frame_index.loc[df_frame_index["index"] == i]
frame_c123 = df_index_list_c123["frame_index"].tolist()
frame_indices_c123.append(frame_c123)
index_c123 = [i] * len(frame_c123)
index_indces_c123.append(index_c123)
frame_indices_c123 = [item for elem in frame_indices_c123 for item in elem]
index_indces_c123 = [item for elem in index_indces_c123 for item in elem]
df_c123_frame = pd.DataFrame(frame_indices_c123, columns=["frame_index"])
df_c123_index = pd.DataFrame(index_indces_c123, columns=["index"])
df_c123_frame_index = pd.concat([df_c123_frame, df_c123_index], axis=1)
df_c123_frame_index = df_c123_frame_index.groupby("index").filter(
lambda x: len(x) >= 10
)
df_c123_frame_index.to_csv(
"c123_frame_index_1d.txt", header=True, index=None, sep=" ", mode="w"
)
with open("c123_frame_index_1d.txt", "r") as f1, open(
"c123_frame_1d.txt", "w"
) as f2:
for line in f1:
f2.write(line.replace('"', "").replace("'", ""))
os.system("rm -rf c123_frame_index_1d.txt")
####c1
indices_c1_1d = df_c1_frame_index["index"].unique()
frames_c1 = []
for i in indices_c1_1d:
x = df_c1_frame_index.loc[df_c1_frame_index["index"] == i]
y = x["frame_index"].values.tolist()
z = random.sample(y, n_structures)
frames_c1.append(z)
frames_c1_1d = [item for elem in frames_c1 for item in elem]
with open("frames_c1_1d.pickle", "wb") as f:
pk.dump(frames_c1_1d, f)
with open("indices_c1_1d.pickle", "wb") as f:
pk.dump(indices_c1_1d, f)
####c12
indices_c12_1d = df_c12_frame_index["index"].unique()
frames_c12 = []
for i in indices_c12_1d:
x = df_c12_frame_index.loc[df_c12_frame_index["index"] == i]
y = x["frame_index"].values.tolist()
z = random.sample(y, n_structures)
frames_c12.append(z)
frames_c12_1d = [item for elem in frames_c12 for item in elem]
with open("frames_c12_1d.pickle", "wb") as f:
pk.dump(frames_c12_1d, f)
with open("indices_c12_1d.pickle", "wb") as f:
pk.dump(indices_c12_1d, f)
####c123
indices_c123_1d = df_c123_frame_index["index"].unique()
frames_c123 = []
for i in indices_c123_1d:
x = df_c123_frame_index.loc[df_c123_frame_index["index"] == i]
y = x["frame_index"].values.tolist()
z = random.sample(y, n_structures)
frames_c123.append(z)
frames_c123_1d = [item for elem in frames_c123 for item in elem]
with open("frames_c123_1d.pickle", "wb") as f:
pk.dump(frames_c123_1d, f)
with open("indices_c123_1d.pickle", "wb") as f:
pk.dump(indices_c123_1d, f)
##saving probabilities for each selected frame
####c1
prob_c1_1d_list = []
for i in indices_c1_1d:
prob_c1_1d_list.append(df_c1["pA_c1"][i])
prob_c1_1d_list = list(
itertools.chain.from_iterable(
itertools.repeat(x, n_structures) for x in prob_c1_1d_list
)
)
prob_c1_1d_list = [x / n_structures for x in prob_c1_1d_list]
with open("prob_c1_1d_list.pickle", "wb") as f:
pk.dump(prob_c1_1d_list, f)
####c12
prob_c12_1d_list = []
for i in indices_c12_1d:
prob_c12_1d_list.append(df_c12["pA_c12"][i])
prob_c12_1d_list = list(
itertools.chain.from_iterable(
itertools.repeat(x, n_structures) for x in prob_c12_1d_list
)
)
prob_c12_1d_list = [x / n_structures for x in prob_c12_1d_list]
with open("prob_c12_1d_list.pickle", "wb") as f:
pk.dump(prob_c12_1d_list, f)
####c123
prob_c123_1d_list = []
for i in indices_c123_1d:
prob_c123_1d_list.append(df_c123["pA_c123"][i])
prob_c123_1d_list = list(
itertools.chain.from_iterable(
itertools.repeat(x, n_structures) for x in prob_c123_1d_list
)
)
prob_c123_1d_list = [x / n_structures for x in prob_c123_1d_list]
with open("prob_c123_1d_list.pickle", "wb") as f:
pk.dump(prob_c123_1d_list, f)
ref_df_1d = pd.DataFrame(bins, columns=["dim0", "dim1"])
ref_df_1d["bins"] = ref_df_1d.agg(
lambda x: f"[{x['dim0']} , {x['dim1']}]", axis=1
)
ref_df_1d = ref_df_1d[["bins"]]
index_ref_1d = []
for i in range(len(bins)):
index_ref_1d.append(i)
index_ref_df_1d = pd.DataFrame(index_ref_1d, columns=["index"])
df_ref_1d = pd.concat([ref_df_1d, index_ref_df_1d], axis=1)
df_ref_1d.to_csv("ref_1d.txt", header=True, index=None, sep=" ", mode="w")
df.to_csv("df_1d.csv", index=False)
os.system("rm -rf __pycache__")
print("Successfully Completed Reweighing")
def reweight_2d(
binspace=10,
n_structures=4,
Xdim=[-180, 180],
Ydim=[-180, 180],
T=300.0,
min_prob=0.000001,
):
"""
Reweights boosted potential energies in two-dimensions
based on Maclaurin series expansion to one, two and
three degrees.
Parameters
----------
binspace: int
Spacing between the bins
n_structures: int
Number of structures per bin chosen
for Weighted Ensemble (WE) simulations
Xdim: list
Range of dihedral angles (1st dimension)
Ydim: list
Range of dihedral angles (2nd dimension)
T: float
MD simulation temperature
min_prob: float
minimum probability threshold
"""
beta = 1.0 / (0.001987 * float(T))
df_Phi_Psi = pd.read_csv("Phi_Psi.dat", delim_whitespace=True, header=None)
df_Phi_Psi.columns = ["Phi", "Psi"]
df_weight = pd.read_csv("weights.dat", delim_whitespace=True, header=None)
df_weight.columns = ["dV_kBT", "timestep", "dVkcalmol"]
sum_total = df_Phi_Psi.shape[0]
binsX = np.arange(float(Xdim[0]), (float(Xdim[1]) + binspace), binspace)
binsY = np.arange(float(Ydim[0]), (float(Ydim[1]) + binspace), binspace)
hist2D, hist_edgesX, hist_edgesY = np.histogram2d(
df_Phi_Psi["Phi"].values.tolist(),
df_Phi_Psi["Psi"].values.tolist(),
bins=(binsX, binsY),
weights=None,
)
pstarA_2D = [i / sum_total for i in list(hist2D)]
bins_tuple_X = create_bins(
lower_bound=int(Xdim[0]), width=binspace, upper_bound=int(Xdim[1])
)
bins_tuple_Y = create_bins(
lower_bound=int(Ydim[0]), width=binspace, upper_bound=int(Ydim[1])
)
bins = []
for i in range(len(bins_tuple_X)):
for j in range(len(bins_tuple_Y)):
bins.append([bins_tuple_X[i], bins_tuple_Y[j]])
pstarA = [item for elem in pstarA_2D for item in elem]
hist = [item for elem in hist2D for item in elem]
hist = [int(i) for i in hist]
data_X = df_Phi_Psi["Phi"].values.tolist()
binned_weights_X = []
for value in data_X:
bin_index_X = find_bin(value, bins_tuple_X)
binned_weights_X.append(bin_index_X)
data_Y = df_Phi_Psi["Psi"].values.tolist()
binned_weights_Y = []
for value in data_Y:
bin_index_Y = find_bin(value, bins_tuple_Y)
binned_weights_Y.append(bin_index_Y)
binned_weights_2D = []
for i in range(len(binned_weights_X)):
binned_weights_2D.append([binned_weights_X[i], binned_weights_Y[i]])
binned_weights = []
for i in range(len(binned_weights_2D)):
binned_weights.append(
(binned_weights_2D[i][0] * len(bins_tuple_Y))
+ (binned_weights_2D[i][1] + 1)
)
df_index = pd.DataFrame(binned_weights)
df_index.columns = ["index"]
df_index["index"] = df_index["index"] - 1
df = pd.concat([df_index, df_Phi_Psi, df_weight], axis=1)
dV_c1 = []
dV_c2 = []
dV_c3 = []
dV = []
for i in range(len(bins)):
df_i = df.loc[(df["index"] == i)]
dV_list = df_i["dVkcalmol"].values.tolist()
if len(dV_list) >= 10:
dV_c1.append(statistics.mean(dV_list))
dV_c2.append(
statistics.mean([i ** 2 for i in dV_list])
- (statistics.mean(dV_list)) ** 2
)
dV_c3.append(
statistics.mean([i ** 3 for i in dV_list])
- 3
* (statistics.mean([i ** 2 for i in dV_list]))
* (statistics.mean(dV_list))
+ 2 * (statistics.mean(dV_list)) ** 3
)
if len(dV_list) < 10:
dV_c1.append(0)
dV_c2.append(0)
dV_c3.append(0)
dV.append(dV_list)
c1 = [i * beta for i in dV_c1]
c2 = [i * ((beta ** 2) / 2) for i in dV_c2]
c3 = [i * ((beta ** 3) / 6) for i in dV_c3]
c1 = c1
c12 = [a + b for a, b in zip(c1, c2)]
c123 = [a + b for a, b in zip(c12, c3)]
for i in range(len(c1)):
if c1[i] >= 700:
c1[i] = 700
for i in range(len(c12)):
if c12[i] >= 700:
c12[i] = 700
for i in range(len(c123)):
if c123[i] >= 700:
c123[i] = 700
ensemble_average_c1 = [exp(i) for i in c1]
ensemble_average_c12 = [exp(i) for i in c12]
ensemble_average_c123 = [exp(i) for i in c123]
numerator_c1 = [a * b for a, b in zip(pstarA, ensemble_average_c1)]
numerator_c12 = [a * b for a, b in zip(pstarA, ensemble_average_c12)]
numerator_c123 = [a * b for a, b in zip(pstarA, ensemble_average_c123)]
#### c1
denominatorc1 = []
for i in range(len(bins)):
product_c1 = pstarA[i] * ensemble_average_c1[i]
denominatorc1.append(product_c1)
denominator_c1 = sum(denominatorc1)
pA_c1 = [i / denominator_c1 for i in numerator_c1]
#### c12
denominatorc12 = []
for i in range(len(bins)):
product_c12 = pstarA[i] * ensemble_average_c12[i]
denominatorc12.append(product_c12)
denominator_c12 = sum(denominatorc12)
pA_c12 = [i / denominator_c12 for i in numerator_c12]
#### c123
denominatorc123 = []
for i in range(len(bins)):
product_c123 = pstarA[i] * ensemble_average_c123[i]
denominatorc123.append(product_c123)
denominator_c123 = sum(denominatorc123)
pA_c123 = [i / denominator_c123 for i in numerator_c123]
data_c1 = list(zip(bins, pA_c1))
data_c12 = list(zip(bins, pA_c12))
data_c123 = list(zip(bins, pA_c123))
df_c1 = pd.DataFrame(data_c1, columns=["bins", "pA_c1"])
df_c12 = pd.DataFrame(data_c12, columns=["bins", "pA_c12"])
df_c123 = pd.DataFrame(data_c123, columns=["bins", "pA_c123"])
df_c1.to_csv("c1_2d.txt", header=True, index=None, sep=" ", mode="w")
with open("c1_2d.txt", "r") as f1, open("pA_c1_2d.txt", "w") as f2:
for line in f1:
f2.write(line.replace('"', "").replace("'", ""))
os.system("rm -rf c1_2d.txt")
####c12
df_c12.to_csv("c12_2d.txt", header=True, index=None, sep=" ", mode="w")
with open("c12_2d.txt", "r") as f1, open("pA_c12_2d.txt", "w") as f2:
for line in f1:
f2.write(line.replace('"', "").replace("'", ""))
os.system("rm -rf c12_2d.txt")
####c123
df_c123.to_csv("c123_2d.txt", header=True, index=None, sep=" ", mode="w")
with open("c123_2d.txt", "r") as f1, open("pA_c123_2d.txt", "w") as f2:
for line in f1:
f2.write(line.replace('"', "").replace("'", ""))
os.system("rm -rf c123_2d.txt")
####c1_arranged
df_c1_arranged = df_c1.sort_values(by="pA_c1", ascending=False)
df_c1_arranged = df_c1_arranged[df_c1_arranged.pA_c1 > min_prob]
df_c1_arranged.to_csv(
"c1_arranged_2d.txt", header=True, index=None, sep=" ", mode="w"
)
with open("c1_arranged_2d.txt", "r") as f1, open(
"pA_c1_arranged_2d.txt", "w"
) as f2:
for line in f1:
f2.write(line.replace('"', "").replace("'", ""))
os.system("rm -rf c1_arranged_2d.txt")
####c12_arranged
df_c12_arranged = df_c12.sort_values(by="pA_c12", ascending=False)
df_c12_arranged = df_c12_arranged[df_c12_arranged.pA_c12 > min_prob]
df_c12_arranged.to_csv(
"c12_arranged_2d.txt", header=True, index=None, sep=" ", mode="w"
)
with open("c12_arranged_2d.txt", "r") as f1, open(
"pA_c12_arranged_2d.txt", "w"
) as f2:
for line in f1:
f2.write(line.replace('"', "").replace("'", ""))
os.system("rm -rf c12_arranged_2d.txt")
####c123_arranged
df_c123_arranged = df_c123.sort_values(by="pA_c123", ascending=False)
df_c123_arranged = df_c123_arranged[df_c123_arranged.pA_c123 > min_prob]
df_c123_arranged.to_csv(
"c123_arranged_2d.txt", header=True, index=None, sep=" ", mode="w"
)
with open("c123_arranged_2d.txt", "r") as f1, open(
"pA_c123_arranged_2d.txt", "w"
) as f2:
for line in f1:
f2.write(line.replace('"', "").replace("'", ""))
os.system("rm -rf c123_arranged_2d.txt")
####c1_arranged
df_c1_arranged["index"] = df_c1_arranged.index
index_list_c1 = df_c1_arranged["index"].tolist()
df["frame_index"] = df.index
df_frame_index = df[["frame_index", "index"]]
frame_indices_c1 = []
index_indces_c1 = []
for i in index_list_c1:
df_index_list_c1 = df_frame_index.loc[df_frame_index["index"] == i]
frame_c1 = df_index_list_c1["frame_index"].tolist()
frame_indices_c1.append(frame_c1)
index_c1 = [i] * len(frame_c1)
index_indces_c1.append(index_c1)
frame_indices_c1 = [item for elem in frame_indices_c1 for item in elem]
index_indces_c1 = [item for elem in index_indces_c1 for item in elem]
df_c1_frame = pd.DataFrame(frame_indices_c1, columns=["frame_index"])
df_c1_index = pd.DataFrame(index_indces_c1, columns=["index"])
df_c1_frame_index = pd.concat([df_c1_frame, df_c1_index], axis=1)
df_c1_frame_index = df_c1_frame_index.groupby("index").filter(
lambda x: len(x) >= 10
)
df_c1_frame_index.to_csv(
"c1_frame_index_2d.txt", header=True, index=None, sep=" ", mode="w"
)
with open("c1_frame_index_2d.txt", "r") as f1, open(
"c1_frame_2d.txt", "w"
) as f2:
for line in f1:
f2.write(line.replace('"', "").replace("'", ""))
os.system("rm -rf c1_frame_index_2d.txt")
####c12_arranged
df_c12_arranged["index"] = df_c12_arranged.index
index_list_c12 = df_c12_arranged["index"].tolist()
df["frame_index"] = df.index
df_frame_index = df[["frame_index", "index"]]
frame_indices_c12 = []
index_indces_c12 = []
for i in index_list_c12:
df_index_list_c12 = df_frame_index.loc[df_frame_index["index"] == i]
frame_c12 = df_index_list_c12["frame_index"].tolist()
frame_indices_c12.append(frame_c12)
index_c12 = [i] * len(frame_c12)
index_indces_c12.append(index_c12)
frame_indices_c12 = [item for elem in frame_indices_c12 for item in elem]
index_indces_c12 = [item for elem in index_indces_c12 for item in elem]
df_c12_frame = pd.DataFrame(frame_indices_c12, columns=["frame_index"])
df_c12_index = pd.DataFrame(index_indces_c12, columns=["index"])
df_c12_frame_index = pd.concat([df_c12_frame, df_c12_index], axis=1)
df_c12_frame_index = df_c12_frame_index.groupby("index").filter(
lambda x: len(x) >= 10
)
df_c12_frame_index.to_csv(
"c12_frame_index_2d.txt", header=True, index=None, sep=" ", mode="w"
)
with open("c12_frame_index_2d.txt", "r") as f1, open(
"c12_frame_2d.txt", "w"
) as f2:
for line in f1:
f2.write(line.replace('"', "").replace("'", ""))
os.system("rm -rf c12_frame_index_2d.txt")
####c123_arranged
df_c123_arranged["index"] = df_c123_arranged.index
df_c123_arranged["index"] = df_c123_arranged.index
index_list_c123 = df_c123_arranged["index"].tolist()
df["frame_index"] = df.index
df_frame_index = df[["frame_index", "index"]]
frame_indices_c123 = []
index_indces_c123 = []
for i in index_list_c123:
df_index_list_c123 = df_frame_index.loc[df_frame_index["index"] == i]
frame_c123 = df_index_list_c123["frame_index"].tolist()
frame_indices_c123.append(frame_c123)
index_c123 = [i] * len(frame_c123)
index_indces_c123.append(index_c123)
frame_indices_c123 = [item for elem in frame_indices_c123 for item in elem]
index_indces_c123 = [item for elem in index_indces_c123 for item in elem]
df_c123_frame = pd.DataFrame(frame_indices_c123, columns=["frame_index"])
df_c123_index = pd.DataFrame(index_indces_c123, columns=["index"])
df_c123_frame_index = pd.concat([df_c123_frame, df_c123_index], axis=1)
df_c123_frame_index = df_c123_frame_index.groupby("index").filter(
lambda x: len(x) >= 10
)
df_c123_frame_index.to_csv(
"c123_frame_index_2d.txt", header=True, index=None, sep=" ", mode="w"
)
with open("c123_frame_index_2d.txt", "r") as f1, open(
"c123_frame_2d.txt", "w"
) as f2:
for line in f1:
f2.write(line.replace('"', "").replace("'", ""))
os.system("rm -rf c123_frame_index_2d.txt")
####c1
indices_c1_2d = df_c1_frame_index["index"].unique()
frames_c1 = []
for i in indices_c1_2d:
x = df_c1_frame_index.loc[df_c1_frame_index["index"] == i]
y = x["frame_index"].values.tolist()
z = random.sample(y, n_structures)
frames_c1.append(z)
frames_c1_2d = [item for elem in frames_c1 for item in elem]
with open("frames_c1_2d.pickle", "wb") as f:
pk.dump(frames_c1_2d, f)
with open("indices_c1_2d.pickle", "wb") as f:
pk.dump(indices_c1_2d, f)
####c12
indices_c12_2d = df_c12_frame_index["index"].unique()
frames_c12 = []
for i in indices_c12_2d:
x = df_c12_frame_index.loc[df_c12_frame_index["index"] == i]
y = x["frame_index"].values.tolist()
z = random.sample(y, n_structures)
frames_c12.append(z)
frames_c12_2d = [item for elem in frames_c12 for item in elem]
with open("frames_c12_2d.pickle", "wb") as f:
pk.dump(frames_c12_2d, f)
with open("indices_c12_2d.pickle", "wb") as f:
pk.dump(indices_c12_2d, f)
####c123
indices_c123_2d = df_c123_frame_index["index"].unique()
frames_c123 = []
for i in indices_c123_2d:
x = df_c123_frame_index.loc[df_c123_frame_index["index"] == i]
y = x["frame_index"].values.tolist()
z = random.sample(y, n_structures)
frames_c123.append(z)
frames_c123_2d = [item for elem in frames_c123 for item in elem]
with open("frames_c123_2d.pickle", "wb") as f:
pk.dump(frames_c123_2d, f)
with open("indices_c123_2d.pickle", "wb") as f:
pk.dump(indices_c123_2d, f)
##saving probabilities for each selected frame
####c1
prob_c1_2d_list = []
for i in indices_c1_2d:
prob_c1_2d_list.append(df_c1["pA_c1"][i])
prob_c1_2d_list = list(
itertools.chain.from_iterable(
itertools.repeat(x, n_structures) for x in prob_c1_2d_list
)
)
prob_c1_2d_list = [x / n_structures for x in prob_c1_2d_list]
with open("prob_c1_2d_list.pickle", "wb") as f:
pk.dump(prob_c1_2d_list, f)
####c12
prob_c12_2d_list = []
for i in indices_c12_2d:
prob_c12_2d_list.append(df_c12["pA_c12"][i])
prob_c12_2d_list = list(
itertools.chain.from_iterable(
itertools.repeat(x, n_structures) for x in prob_c12_2d_list
)
)
prob_c12_2d_list = [x / n_structures for x in prob_c12_2d_list]
with open("prob_c12_2d_list.pickle", "wb") as f:
pk.dump(prob_c12_2d_list, f)
####c123
prob_c123_2d_list = []
for i in indices_c123_2d:
prob_c123_2d_list.append(df_c123["pA_c123"][i])
prob_c123_2d_list = list(
itertools.chain.from_iterable(
itertools.repeat(x, n_structures) for x in prob_c123_2d_list
)
)
prob_c123_2d_list = [x / n_structures for x in prob_c123_2d_list]
with open("prob_c123_2d_list.pickle", "wb") as f:
pk.dump(prob_c123_2d_list, f)
ref_df_2d = pd.DataFrame(bins, columns=["binsX", "binsY"])
ref_df_2d["XY"] = ref_df_2d.agg(
lambda x: f"{x['binsX']} , {x['binsX']}", axis=1
)
ref_df_2d = ref_df_2d[["XY"]]
index_ref_2d = []
for i in range(len(bins_tuple_X) * len(bins_tuple_Y)):
index_ref_2d.append(i)
index_ref_df_2d = pd.DataFrame(index_ref_2d, columns=["index"])
df_ref_2d = pd.concat([ref_df_2d, index_ref_df_2d], axis=1)
df_ref_2d.to_csv("ref_2d.txt", header=True, index=None, sep=" ", mode="w")
df.to_csv("df_2d.csv", index=False)
os.system("rm -rf __pycache__")
print("Successfully Completed Reweighing")
def save_frames():
"""
Creates a directory named we_structures. Inside this
directory, there are six subdirectories (three for
one-dimension reweighing and other three for
two-dimensional reweighted frames). All frames
for one, two and three-degree Maclaurin series
expanded reweighted frames are present in their
respective folders.
"""
cwd = os.getcwd()
os.system("rm -rf we_structures")
os.system("mkdir we_structures")
os.chdir(cwd + "/" + "we_structures")
os.system("mkdir 1d_c1")
os.system("mkdir 1d_c12")
os.system("mkdir 1d_c123")
os.system("mkdir 2d_c1")
os.system("mkdir 2d_c12")
os.system("mkdir 2d_c123")
os.chdir(cwd)
df1 = pd.read_csv("df_1d.csv")
index = df1["index"].tolist()
frame = df1["frame_index"].tolist()
index_frame = dict(zip(frame, index))
df2 = pd.read_csv("ref_1d.txt", sep=" ", delimiter=None, header="infer")
index_ = df2["index"].tolist()
bins = df2["bins"].tolist()
index_bins = dict(zip(index_, bins))
#### 1d
with open("frames_c1_1d.pickle", "rb") as input_file:
frames_c1_1d = pk.load(input_file)
for i in frames_c1_1d:
j = index_frame[i]
frame_index = frames_c1_1d.index(i)
k = index_bins[j]
k = k.strip("[]")
k = k.replace(" , ", "_")
# traj = pt.load("system_final.nc", top="system_final.prmtop", frame_indices=[i])
traj = md.load_frame(
"system_final.nc", top="system_final.prmtop", index=i
)
frame_pdb = str(frame_index) + "_" + k + "_1d_c1_" + str(i) + ".pdb"
# pt.save(frame_pdb, traj, overwrite=True)
traj.save_pdb(frame_pdb, force_overwrite=True)
target_dir = cwd + "/" + "we_structures" + "/" + "1d_c1"
shutil.move(cwd + "/" + frame_pdb, target_dir + "/" + frame_pdb)
with open("frames_c12_1d.pickle", "rb") as input_file:
frames_c12_1d = pk.load(input_file)
for i in frames_c12_1d:
j = index_frame[i]
frame_index = frames_c12_1d.index(i)
k = index_bins[j]
k = k.strip("[]")
k = k.replace(" , ", "_")
# traj = pt.load("system_final.nc", top="system_final.prmtop", frame_indices=[i])
traj = md.load_frame(
"system_final.nc", top="system_final.prmtop", index=i
)
frame_pdb = str(frame_index) + "_" + k + "_1d_c12_" + str(i) + ".pdb"
# pt.save(frame_pdb, traj, overwrite=True)
traj.save_pdb(frame_pdb, force_overwrite=True)
target_dir = cwd + "/" + "we_structures" + "/" + "1d_c12"
shutil.move(cwd + "/" + frame_pdb, target_dir + "/" + frame_pdb)
with open("frames_c123_1d.pickle", "rb") as input_file:
frames_c123_1d = pk.load(input_file)
for i in frames_c123_1d:
j = index_frame[i]
frame_index = frames_c123_1d.index(i)
k = index_bins[j]
k = k.strip("[]")
k = k.replace(" , ", "_")
# traj = pt.load("system_final.nc", top="system_final.prmtop", frame_indices=[i])
traj = md.load_frame(
"system_final.nc", top="system_final.prmtop", index=i
)
frame_pdb = str(frame_index) + "_" + k + "_1d_c123_" + str(i) + ".pdb"
# pt.save(frame_pdb, traj, overwrite=True)
traj.save_pdb(frame_pdb, force_overwrite=True)
target_dir = cwd + "/" + "we_structures" + "/" + "1d_c123"
shutil.move(cwd + "/" + frame_pdb, target_dir + "/" + frame_pdb)
df1 = pd.read_csv("df_2d.csv")
index = df1["index"].tolist()
frame = df1["frame_index"].tolist()
index_frame = dict(zip(frame, index))
df2 = pd.read_csv("ref_2d.txt", sep=" ", delimiter=None, header="infer")
index_ = df2["index"].tolist()
bins = df2["XY"].tolist()
index_bins = dict(zip(index_, bins))
#### 2d
with open("frames_c1_2d.pickle", "rb") as input_file:
frames_c1_2d = pk.load(input_file)
for i in frames_c1_2d:
j = index_frame[i]
frame_index = frames_c1_2d.index(i)
k = index_bins[j]
k = k.strip("[]")
k = k.replace("] , [", "_")
k = k.replace(", ", "_")
# traj = pt.load("system_final.nc", top="system_final.prmtop", frame_indices=[i])
traj = md.load_frame(
"system_final.nc", top="system_final.prmtop", index=i
)
frame_pdb = str(frame_index) + "_" + k + "_2d_c1_" + str(i) + ".pdb"
# pt.save(frame_pdb, traj, overwrite=True)
traj.save_pdb(frame_pdb, force_overwrite=True)
target_dir = cwd + "/" + "we_structures" + "/" + "2d_c1"
shutil.move(cwd + "/" + frame_pdb, target_dir + "/" + frame_pdb)
with open("frames_c12_2d.pickle", "rb") as input_file:
frames_c12_2d = pk.load(input_file)
for i in frames_c12_2d:
j = index_frame[i]
frame_index = frames_c12_2d.index(i)
k = index_bins[j]
k = k.strip("[]")
k = k.replace("] , [", "_")
k = k.replace(", ", "_")
# traj = pt.load("system_final.nc", top="system_final.prmtop", frame_indices=[i])
traj = md.load_frame(
"system_final.nc", top="system_final.prmtop", index=i
)
frame_pdb = str(frame_index) + "_" + k + "_2d_c12_" + str(i) + ".pdb"
# pt.save(frame_pdb, traj, overwrite=True)
traj.save_pdb(frame_pdb, force_overwrite=True)
target_dir = cwd + "/" + "we_structures" + "/" + "2d_c12"
shutil.move(cwd + "/" + frame_pdb, target_dir + "/" + frame_pdb)
with open("frames_c123_2d.pickle", "rb") as input_file:
frames_c123_2d = pk.load(input_file)
for i in frames_c123_2d:
j = index_frame[i]
frame_index = frames_c123_2d.index(i)
k = index_bins[j]
k = k.strip("[]")
k = k.replace("] , [", "_")
k = k.replace(", ", "_")
# traj = pt.load("system_final.nc", top="system_final.prmtop", frame_indices=[i])
traj = md.load_frame(
"system_final.nc", top="system_final.prmtop", index=i
)
frame_pdb = str(frame_index) + "_" + k + "_2d_c123_" + str(i) + ".pdb"
# pt.save(frame_pdb, traj, overwrite=True)
traj.save_pdb(frame_pdb, force_overwrite=True)
target_dir = cwd + "/" + "we_structures" + "/" + "2d_c123"
shutil.move(cwd + "/" + frame_pdb, target_dir + "/" + frame_pdb)
def save_we_inputs():
"""
Writes an input file in each of the simulation folder.
Input file contains one column each for the name of
the PDB file and its respective probability.
"""
cwd = os.getcwd()
target_dir = cwd + "/" + "we_structures"
dir_list = ["1d_c1", "1d_c12", "1d_c123", "2d_c1", "2d_c12", "2d_c123"]
for i in dir_list:
os.chdir(target_dir + "/" + i)
pdbs = os.listdir(".")
pickle_file = "pdb_" + i + ".pickle"
with open(pickle_file, "wb") as f:
pk.dump(pdbs, f)
shutil.move(
target_dir + "/" + i + "/" + pickle_file, cwd + "/" + pickle_file
)
os.chdir(cwd)
# c1_1d
with open("prob_c1_1d_list.pickle", "rb") as input_file:
prob_c1_1d_list = pk.load(input_file)
prob_c1_1d_list = [i / min(prob_c1_1d_list) for i in prob_c1_1d_list]
prob_c1_1d_list = [i / sum(prob_c1_1d_list) for i in prob_c1_1d_list]
with open("pdb_1d_c1.pickle", "rb") as input_file:
pdb_1d_c1 = pk.load(input_file)
pdb_1d_c1_index = []
for i in range(len(pdb_1d_c1)):
pdb_1d_c1_index.append(int(re.findall(r"\d+", pdb_1d_c1[i])[0]))
df = pd.DataFrame(
list(zip(pdb_1d_c1, prob_c1_1d_list, pdb_1d_c1_index)),
columns=["pdb_name", "probability", "pdb_index"],
)
df = df.sort_values(by=["pdb_index"])
df = df[["probability", "pdb_name"]]
index_row = []
for i in range(df.shape[0]):
index_row.append(i)
df_index = pd.DataFrame(index_row, columns=["index_"])
df_merged = pd.concat([df_index, df], axis=1)
df_merged.to_csv(
"we_input_c1_1d.txt", header=False, index=None, sep=" ", mode="w"
)
# c12_1d
with open("prob_c12_1d_list.pickle", "rb") as input_file:
prob_c12_1d_list = pk.load(input_file)
prob_c12_1d_list = [i / min(prob_c12_1d_list) for i in prob_c12_1d_list]
prob_c12_1d_list = [i / sum(prob_c12_1d_list) for i in prob_c12_1d_list]
with open("pdb_1d_c12.pickle", "rb") as input_file:
pdb_1d_c12 = pk.load(input_file)
pdb_1d_c12_index = []
for i in range(len(pdb_1d_c12)):
pdb_1d_c12_index.append(int(re.findall(r"\d+", pdb_1d_c12[i])[0]))
df = pd.DataFrame(
list(zip(pdb_1d_c12, prob_c12_1d_list, pdb_1d_c12_index)),
columns=["pdb_name", "probability", "pdb_index"],
)
df = df.sort_values(by=["pdb_index"])
df = df[["probability", "pdb_name"]]
index_row = []
for i in range(df.shape[0]):
index_row.append(i)
df_index = pd.DataFrame(index_row, columns=["index_"])
df_merged =
|
pd.concat([df_index, df], axis=1)
|
pandas.concat
|
# RCS14_entrainment_naive.py
# Generate timeseries analysis and power estimate
# Author: maria.olaru@
"""
Created on Mon May 3 18:22:44 2021
@author: mariaolaru
"""
import numpy as np
import os
import scipy.signal as signal
import pandas as pd
import math
from preproc.preprocess_funcs import convert_unix2dt
from fooof import FOOOF
def name_file(mscs, gp):
out_plot_dir = gp + '/' + 'tables/'
if not os.path.isdir(out_plot_dir):
os.mkdir(out_plot_dir)
subj_id = mscs['subj_id'].iloc[0]
session_id = str(int(mscs['session_id'].iloc[0]))
out_name = subj_id + '_' + session_id
out_fp = out_plot_dir + out_name
return out_fp
def qc_msc(mscs):
num_fs = len(mscs['ch1_sr'].unique())
num_sesh = len(mscs['session_id'].unique())
num_amps = len(mscs['amplitude_ma'].unique())
mscs_qc = mscs.iloc[:, 5:len(mscs.columns)-3].drop(['stim_contact_an'], axis = 1)
if ('timestamp' in mscs_qc.columns):
mscs_qc = mscs_qc.drop(['timestamp'], axis = 1)
mscs_qc_avg = mscs_qc.mean()
if (num_fs > 1):
raise Exception("Timestamp range cannot have multiple sampling rates")
if (num_sesh > 1):
raise Exception("Timestamp range cannot have multiple session ids... code still inp")
if (num_amps > 1):
raise Exception("Timestamp range cannot have multiple stim amplitudes... code still inp")
if ((round((mscs_qc_avg - mscs_qc), 3)!=0).all().any()):
raise Exception("A setting was not held constant in current session... code still inp")
return
def subset_msc(msc, ts_range):
mscs_start = msc[(msc['timestamp_unix'] <= ts_range[0])].tail(1)
mscs_end = msc[(msc['timestamp_unix'] >= ts_range[0]) & (msc['timestamp_unix'] <= ts_range[1])]
if mscs_start.equals(mscs_end):
return mscs_start
else:
mscs = pd.concat([mscs_start, mscs_end])
return mscs
def subset_md(md, mscs, fs, ts_range, ts_int = None):
"""
Parameters
----------
md : meta data output from preprocess_data()
ts_range: vector of UNIX timestamps with min and max values
Returns
-------
a subset of the meta data with modified timestamps
"""
print("Subsetting the meta data")
ts_min = md['timestamp_unix'].iloc[md['timestamp_unix'].sub(ts_range[0]).abs().idxmin()]
ts_max = md['timestamp_unix'].iloc[md['timestamp_unix'].sub(ts_range[1]).abs().idxmin()]
md_i_min = md[md['timestamp_unix'] == ts_min].index[0]
md_i_max = md[md['timestamp_unix'] == ts_max].index[0]
mds = md.iloc[md_i_min:md_i_max, :]
if (ts_int != None):
i_int = mscs[mscs['timestamp'] == ts_int].index[0]
amp1 = mscs['amplitude_ma'].iloc[i_int-1]
amp2 = mscs['amplitude_ma'].iloc[i_int]
mds = mds.assign(amp=np.where(mds['timestamp_unix'] < ts_int, amp1, amp2))
mds = md.loc[md_i_min:md_i_max, :]
mds = mds.reset_index(drop=True)
ts_dt = convert_unix2dt(mds['timestamp_unix'])
mds.insert(1, 'timestamp', ts_dt)
tt = len(mds)/fs/60
return [mds, tt]
def convert_psd_montage(df, sr):
"""
Parameters
----------
df : linked dataframe
Returns
-------
df_psd : dataframe, power spectra using Welch's in long-form
"""
print('Creating PSDs for sr' + str(sr))
cols = df['sense_contacts'].unique()
labels = np.array(df['label'].unique())
df_collection = {}
for label in labels:
df_collection[label] = pd.DataFrame(columns = ['f_0'])
for i in range(len(cols)):
dfs = df[(df['label'] == label) & (df['sr'] == sr) & (df['sense_contacts'] == cols[i])]
df_curr = convert_psd(dfs['voltage'], sr, cols[i])
df_collection[label] = df_collection[label].merge(df_curr, how = 'outer')
return df_collection
def convert_psd(voltage, fs, col_header):
f_0, Pxx_den = signal.welch(voltage, fs, average = 'median', window = 'hann', nperseg=fs)
psd_ind = pd.DataFrame(np.array([f_0, Pxx_den]).T, columns=['f_0', col_header])
return psd_ind
def convert_psd_wide(df, sr):
#df is a linked df
stim_freqs = np.sort(df['stim_freq'].unique())
sense_contacts = df['sense_contacts'].unique()
df_collection = {}
for stim_freq in stim_freqs:
df_collection[stim_freq] = {}
dfs = df[df['stim_freq'] == stim_freq]
stim_amps = np.sort(dfs['stim_amp'].unique())
for stim_amp in stim_amps:
df_collection[stim_freq][stim_amp] = pd.DataFrame(columns = ['f_0'])
for i in range(len(stim_amps)):
dfss = dfs[dfs['stim_amp'] == stim_amp]
for j in range(len(sense_contacts)):
dfsss = dfss[dfss['sense_contacts'] == sense_contacts[j]]
df_psd_ind = convert_psd(dfsss['voltage'], sr, sense_contacts[j])
df_collection[stim_freq][stim_amp] = df_collection[stim_freq][stim_amp].merge(df_psd_ind, how = 'outer')
return df_collection
def convert_psd_long_old(df, gp, contacts, time_duration, time_overlap, sr, spec):
"""
Parameters
----------
df : timedomain data (not linked)
spec: type of psd (period, aperiodic, gross)
Returns
-------
df_psd : dataframe, power spectra using Welch's in long-form
#Assumes sense contacts don't change
"""
out_name = os.path.basename(gp)
df_psd = pd.DataFrame() #initialize metadata table of settings information
nperseg = time_duration * sr
overlapseg = time_overlap * sr
channels = df.columns[1:5]
for i in range(len(channels)):
ch = channels[i]
df_ch = df.loc[:,ch]
df_ch.name = contacts[i]
indx_start = int(0)
indx_stop = int(nperseg)
num_spectra = math.floor((len(df_ch)-nperseg)/(nperseg - overlapseg))
# ts_sec = df['timestamp'].round(-3)/1000
# ts_wholemin = ts_sec.iloc[np.where(ts_sec % 60 == 0)[0]]
for j in range(num_spectra):
ts_start = int(df.iloc[indx_start, 0])
ts_stop = int(df.iloc[indx_stop, 0])
ts_sec = round(ts_stop, -3)/1000
if (ts_sec % 60 != 0):
indx_start = int(indx_start + (nperseg - overlapseg))
indx_stop = int(indx_stop + (nperseg - overlapseg))
continue
ts_diff = ts_stop - ts_start
if (ts_diff > (time_duration*1000)*1.1):
indx_start = int(indx_start + (nperseg - overlapseg))
indx_stop = int(indx_stop + (nperseg - overlapseg))
continue
else:
print("ch: " + contacts[i] + " (" + str(j) + "/" + str(num_spectra) + ")")
voltage = df_ch[indx_start:indx_stop]
nan_indx = np.where(np.isnan(voltage) == True)[0]
if (len(nan_indx) > 0):
if (len(nan_indx) > 0.1*len(voltage)):
continue
else:
voltage = voltage.drop(voltage.index[nan_indx])
#print(indx_start)
#print(indx_stop)
#print(ts_tail)
df_psd_ind = convert_psd(voltage, sr, 'spectra')
if (spec == 'aperiodic'):
spectrum = df_psd_ind[df_psd_ind.columns[len(df_psd_ind.columns)-1]]
freq_range = [4, 100]
fooof_psd = flatten_psd(df_psd_ind['f_0'], spectrum, freq_range)
df_psd_ind = fooof_psd.loc[:, ['f_0', 'fooof_peak_rm']]
if (spec == 'periodic'):
spectrum = df_psd_ind[df_psd_ind.columns[len(df_psd_ind.columns)-1]]
freq_range = [4, 100]
fooof_psd = flatten_psd(df_psd_ind['f_0'], spectrum, freq_range)
df_psd_ind = fooof_psd.loc[:, ['f_0', 'fooof_flat']]
df_psd_ind['contacts'] = df_ch.name
df_psd_ind['timestamp'] = int(ts_sec*1000)
df_psd =
|
pd.concat([df_psd, df_psd_ind])
|
pandas.concat
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
from datetime import timedelta, date
import pandas as pd
import numpy as np
import warnings
import requests
import sqlalchemy
import airports
def configure_engine():
DB_TYPE = 'mysql'
DB_DRIV = 'pymysql'
DB_USER = 'DBUSER'
DB_PASS = 'PASSWORD'
DB_HOST = 'IP'
DB_NAME = 'DBNAME'
SQLALCHEMY_DATABASE_URI = '%s+%s://%s:%s@%s/%s' % (DB_TYPE, DB_DRIV, DB_USER, DB_PASS, DB_HOST, DB_NAME)
return SQLALCHEMY_DATABASE_URI
def create_table():
POOL_SIZE = 50
metadata = sqlalchemy.MetaData()
TABLENAME = 'weather'
ENGINE = sqlalchemy.create_engine(
configure_engine(), pool_size=POOL_SIZE, max_overflow=0)
t = sqlalchemy.Table(TABLENAME, metadata,
sqlalchemy.Column('id', sqlalchemy.Integer, primary_key=True, autoincrement=True),
sqlalchemy.Column('DateTime', sqlalchemy.DATETIME),
sqlalchemy.Column('Airport-code', sqlalchemy.VARCHAR(10)),
sqlalchemy.Column('Temperature', sqlalchemy.DECIMAL(3,1)),
sqlalchemy.Column('Dew-Point', sqlalchemy.DECIMAL(3,1)),
sqlalchemy.Column('Humidity', sqlalchemy.VARCHAR(10)),
sqlalchemy.Column('Pressure-hPa', sqlalchemy.INTEGER),
sqlalchemy.Column('Visibility-km', sqlalchemy.DECIMAL(3,1)),
sqlalchemy.Column('Wind-Dir', sqlalchemy.VARCHAR(10)),
sqlalchemy.Column('Conditions', sqlalchemy.TEXT),
sqlalchemy.Column('Wind-Speed-kmh', sqlalchemy.DECIMAL(3,1)),
sqlalchemy.Column('Wind-Speed-ms', sqlalchemy.DECIMAL(3,1))
)
t.create(ENGINE)
def read_text(link):
page = requests.get(link)
return page.text
def get_data_between_texts(raw_text, text1, text2, include_texts=True):
index1 = raw_text.find(text1)
index2 = raw_text.find(text2)
if(include_texts):
filtered = raw_text[index1:index2+len(text2)]
else:
filtered = raw_text[index1+len(text1):index2]
return filtered
def get_weather_table(url):
full_html = read_text(url)
table_header = '<table cellspacing="0" cellpadding="0" id="obsTable" class="obs-table responsive">'
table_footer = '<div class="obs-table-footer">'
table = get_data_between_texts(full_html, table_header, table_footer)
return pd.read_html(table, header=0)[0]
def generate_weather_information(airport_code, year, month, day):
LINK = 'https://www.wunderground.com/history/airport/%s/%s/%s/%s/DailyHistory.html' % (airport_code, year, month, day)
try:
df = get_weather_table(LINK)
except:
return pd.DataFrame()
#Drop columns that not appair for all airports.
if unicode('Heat Index',"utf-8") in df.columns:
df = df.drop('Heat Index', 1)
if unicode('Windchill',"utf-8") in df.columns:
df = df.drop('Windchill', 1)
df = df.drop('Gust Speed', 1)
df = df.drop('Events', 1)
df = df.replace(np.nan, 'NA', regex=True)
df = df[df['Precip'] == 'NA']
df = df[df['Humidity'] != 'N/A%']
df = df.drop('Precip', 1)
df['Temp.'] = df['Temp.'].map(lambda x: x.rstrip(unicode(' °C',"utf-8")).replace('- ','0.0').replace(u'\xa0', u''))
df['Dew Point'] = df['Dew Point'].map(lambda x: x.rstrip(unicode(' °C',"utf-8")).replace('- ','0.0').replace(u'\xa0', u''))
df['Wind Speed'] = df['Wind Speed'].map(lambda x: x.replace('Calm','0.0 km/h / 0.0 m/s').replace('-','0.0 km/h / 0.0 m/s').replace(u'\xa0', u''))
df['Wind-Speed-kmh'] = df['Wind Speed'].map(lambda x: x.split(' / ')[0].rstrip(unicode(' km/h',"utf-8")).replace(u'\xa0', u''))
df['Wind-Speed-ms'] = df['Wind Speed'].map(lambda x: x.split(' / ')[1].rstrip(unicode(' m/s',"utf-8")).replace(u'\xa0', u''))
df = df.drop('Wind Speed', 1)
df['Pressure'] = df['Pressure'].map(lambda x: x.rstrip(unicode(' hPa',"utf-8")).replace('-','0.0').replace(u'\xa0', u''))
df['Visibility'] = df['Visibility'].map(lambda x: x.rstrip(unicode(' km',"utf-8")).replace('-','0.0').replace(u'\xa0', u''))
#Adjust the first column name to just Time
df.rename(columns={ df.columns[0]: 'Time' }, inplace=True)
measure_date = '%s-%s-%s ' % (year, month, day)
df['Time'] =
|
pd.to_datetime(measure_date + df['Time'])
|
pandas.to_datetime
|
import numpy as np
import pandas as pd
from sklearn import preprocessing
import types
def csv_loader(path, csv_header, manual_encoder, sep=',', col_names=None, drop_first_line=False, drop_last_line=False,
missing_handle=dict()):
# Arguments:
# path: path to csv file;
# used in function pandas.read_csv
# csv_header: 0 if the first row is the columns names; None if no header
# used in function pandas.read_csv
# manual_encode: a dictionary of dictionaries (can be empty);
# column_name->(original_value->encoded_value);
# labels must be encoded manually
# sep: separators of csv
# used in function pandas.read_csv
# col_names: optional, names of columns in data files
# used in function pandas.read_csv
# drop_first_line: optional, binary, if ==True, drop the first row of the data file
# drop_last_line: optional, if ==True, drop the last row of the data file
# missing_handle: dictionary, specify the scheme to handle a CONTINUOUS missing value;
# applied BEFORE the manual encoder;
# column_name -> (missing symbol, handle scheme)
# handle_scheme could be 'mean' to take the mean values
#
# Functionality:
# load a given csv file and encode its values;
# set the column names accordingly;
# solve the missing values;
# drop the last line of the file (might NaNs) if drop_last_line=True.
#
# Returns:
# df: the loaded dataframe.
engine = 'c'
if len(sep) > 1:
engine = 'python'
df = pd.read_csv(path, sep=sep, header=csv_header, names=col_names, engine=engine)
if drop_first_line:
df.drop(df.head(1).index, inplace=True)
if drop_last_line:
df.drop(df.tail(1).index, inplace=True)
# handle missing values:
for col in missing_handle.keys():
symbol, scheme = missing_handle[col]
if scheme == 'mean':
df[col][df[col] == symbol] = df[col][df[col] != symbol].astype(float).mean()
# Manual encoding
for col in manual_encoder.keys():
if col not in df.columns:
print(col, "cannot be found in the data file:", path)
raise NameError("header is not in the data file: " + path)
if isinstance(manual_encoder[col], types.FunctionType):
df[col] = df[col].apply(manual_encoder[col])
elif isinstance(manual_encoder[col], dict):
df[col] = df[col].map(manual_encoder[col])
else:
raise TypeError
return df
def feature_label_split(df, label_col, label_map=None):
# Arguments:
# df: DataFrame, containing features and labels.
# label_col: the column to be used as label; can be index if no name
# label_map: original_value->encoded_value; could either be a function or a dictionary
#
# Functionality:
# split the input DataFrame of the form (X, y) into X, y;
# then have y encoded if needed.
#
# Returns:
# X: features.
# y: labels.
if label_col not in df.columns:
raise NameError("label cannot be found in the dataframe")
y = df[label_col]
X = df.drop(label_col, axis=1)
if label_map is not None:
if isinstance(label_map, types.FunctionType):
y = y.apply(label_map)
elif isinstance(label_map, dict):
y = y.map(label_map)
return X, y
def train_test_split(X, y, train_size, test_size=None, shuffle=True, seed=None):
# Arguments:
# X: pandas DataFrame, features matrix
# y: labels
# train_size: size of training data
# test_size: optional; if None, then calculate based on train_size.
# shuffle: binary; if shuffle == True, then shuffle the X and y before slicing
# seed: random seed for shuffling; has no effect if shuffle is disabled
#
# Functionality:
# split a dataset into feature matrix and labels
#
# Returns:
# train_X
# train_y
# test_X
# test_y
if shuffle:
if seed is not None:
np.random.seed(seed)
random_perm = np.random.permutation(X.index)
X = X.reindex(random_perm)
y = y.reindex(random_perm)
if test_size is None:
return (X.iloc[:train_size], y.iloc[:train_size], X.iloc[train_size:], y.iloc[train_size:])
assert (train_size + test_size <= X.shape[0])
return (X.iloc[:train_size], y.iloc[:train_size], X.iloc[-test_size:], y.iloc[-test_size:])
def feature_engineering_cat(train_X, test_X, cat_col):
# Arguments:
# train_X: training feature matrix
# test_X: testing feature matrix
# cat_col: the list of columns with categorical values
#
# Functionality:
# one hot encode all categorical values;
#
# Returns:
# train_X: the engineered training feature matrix
# test_X: the engineered testing feature matrix
train_test = train_X.append(test_X, ignore_index=True)
train_size = train_X.shape[0]
# one-hot encode categorical values
for i in np.intersect1d(cat_col, train_test.columns.values):
temp =
|
pd.get_dummies(train_test[i])
|
pandas.get_dummies
|
# Gist example of IB wrapper from here: https://gist.github.com/robcarver17/f50aeebc2ecd084f818706d9f05c1eb4
#
# Download API from http://interactivebrokers.github.io/#
# (must be at least version 9.73)
#
# Install python API code /IBJts/source/pythonclient $ python3 setup.py install
#
# Note: The test cases, and the documentation refer to a python package called IBApi,
# but the actual package is called ibapi. Go figure.
#
# Get the latest version of the gateway:
# https://www.interactivebrokers.com/en/?f=%2Fen%2Fcontrol%2Fsystemstandalone-ibGateway.php%3Fos%3Dunix
# (for unix: windows and mac users please find your own version)
#
# Run the gateway
#
# user: edemo
# pwd: <PASSWORD>
#
# duration units and bar sizes:
# https://interactivebrokers.github.io/tws-api/historical_bars.html#hd_duration
# limitations:
# https://interactivebrokers.github.io/tws-api/historical_limitations.html
import os
import time
import pprint
import queue
import datetime
import traceback
from pytz import timezone
from pathlib import Path
import pandas as pd
import numpy as np
from tqdm import tqdm
import seaborn as sns
import matplotlib.pyplot as plt
from ibapi.wrapper import EWrapper
from ibapi.client import EClient
from ibapi.contract import Contract as IBcontract
from threading import Thread
DEFAULT_HISTORIC_DATA_ID = 50
DEFAULT_GET_CONTRACT_ID = 43
DEFAULT_GET_NP_ID = 42
DEFAULT_GET_EARLIEST_ID = 1
DEFAULT_HISTORIC_NEWS_ID = 1001
## marker for when queue is finished
FINISHED = object()
STARTED = object()
TIME_OUT = object()
class finishableQueue(object):
def __init__(self, queue_to_finish):
self._queue = queue_to_finish
self.status = STARTED
def get(self, timeout):
"""
Returns a list of queue elements once timeout is finished, or a FINISHED flag is received in the queue
:param timeout: how long to wait before giving up
:return: list of queue elements
"""
contents_of_queue = []
finished = False
while not finished:
try:
current_element = self._queue.get(timeout=timeout)
if current_element is FINISHED:
finished = True
self.status = FINISHED
else:
contents_of_queue.append(current_element)
## keep going and try and get more data
except queue.Empty:
## If we hit a time out it's most probable we're not getting a finished element any time soon
## give up and return what we have
finished = True
self.status = TIME_OUT
return contents_of_queue
def timed_out(self):
return self.status is TIME_OUT
class TestWrapper(EWrapper):
"""
The wrapper deals with the action coming back from the IB gateway or TWS instance
We override methods in EWrapper that will get called when this action happens, like currentTime
Extra methods are added as we need to store the results in this object
"""
def __init__(self):
self._my_contract_details = {}
self._my_historic_data_dict = {}
self._my_earliest_timestamp_dict = {}
self._my_np_dict = {}
self._my_hn_dict = {}
self._my_na_dict = {}
self._my_errors = queue.Queue()
## error handling code
def init_error(self):
error_queue = queue.Queue()
self._my_errors = error_queue
def get_error(self, timeout=5):
if self.is_error():
try:
return self._my_errors.get(timeout=timeout)
except queue.Empty:
return None
return None
def is_error(self):
an_error_if=not self._my_errors.empty()
return an_error_if
def error(self, id, errorCode, errorString):
## Overriden method
errormsg = "IB error id %d errorcode %d string %s" % (id, errorCode, errorString)
self._my_errors.put(errormsg)
## get contract details code
def init_contractdetails(self, reqId):
self._my_contract_details[reqId] = queue.Queue()
return self._my_contract_details[reqId]
def contractDetails(self, reqId, contractDetails):
## overridden method
if reqId not in self._my_contract_details.keys():
self.init_contractdetails(reqId)
self._my_contract_details[reqId].put(contractDetails)
def contractDetailsEnd(self, reqId):
## overriden method
if reqId not in self._my_contract_details.keys():
self.init_contractdetails(reqId)
self._my_contract_details[reqId].put(FINISHED)
def init_historicprices(self, tickerid):
self._my_historic_data_dict[tickerid] = queue.Queue()
return self._my_historic_data_dict[tickerid]
def init_earliest_timestamp(self, tickerid):
self._my_earliest_timestamp_dict[tickerid] = queue.Queue()
return self._my_earliest_timestamp_dict[tickerid]
def init_np(self, tickerid):
self._my_np_dict[tickerid] = queue.Queue()
return self._my_np_dict[tickerid]
def init_hn(self, requestId):
self._my_hn_dict[requestId] = queue.Queue()
return self._my_hn_dict[requestId]
def init_na(self, requestId):
self._my_na_dict[requestId] = queue.Queue()
return self._my_na_dict[requestId]
def historicalData(self, tickerid, bar):
## Overriden method
## Note I'm choosing to ignore barCount, WAP and hasGaps but you could use them if you like
# pprint.pprint(bar.__dict__)
bardata = (bar.date, bar.open, bar.high, bar.low, bar.close, bar.volume)
historic_data_dict = self._my_historic_data_dict
## Add on to the current data
if tickerid not in historic_data_dict.keys():
self.init_historicprices(tickerid)
historic_data_dict[tickerid].put(bardata)
def headTimestamp(self, tickerid, headTimestamp:str):
## overridden method
if tickerid not in self._my_earliest_timestamp_dict.keys():
self.init_earliest_timestamp(tickerid)
self._my_earliest_timestamp_dict[tickerid].put(headTimestamp)
self._my_earliest_timestamp_dict[tickerid].put(FINISHED)
def newsProviders(self, newsProviders):
## overridden method
tickerid = DEFAULT_GET_NP_ID
if tickerid not in self._my_np_dict.keys():
self.init_np(tickerid)
self._my_np_dict[tickerid].put(newsProviders)
self._my_np_dict[tickerid].put(FINISHED)
def historicalDataEnd(self, tickerid, start:str, end:str):
## overriden method
if tickerid not in self._my_historic_data_dict.keys():
self.init_historicprices(tickerid)
self._my_historic_data_dict[tickerid].put(FINISHED)
def historicalNews(self, requestId, time, providerCode, articleId, headline):
newsdata = (time, providerCode, articleId, headline)
newsdict = self._my_hn_dict
if requestId not in newsdict.keys():
self.init_hn(requestId)
newsdict[requestId].put(newsdata)
def historicalNewsEnd(self, requestId, hasMore):
if requestId not in self._my_hn_dict.keys():
self.init_hn(requestId)
if hasMore:
print('more results available')
self._my_hn_dict[requestId].put(FINISHED)
def newsArticle(self, requestId, articleType, articleText):
if requestId not in self._my_na_dict.keys():
self.init_na(requestId)
self._my_na_dict[requestId].put((articleType, articleText))
self._my_na_dict[requestId].put(FINISHED)
class TestClient(EClient):
"""
The client method
We don't override native methods, but instead call them from our own wrappers
"""
def __init__(self, wrapper):
## Set up with a wrapper inside
EClient.__init__(self, wrapper)
def resolve_ib_contract(self, ibcontract, reqId=DEFAULT_GET_CONTRACT_ID):
"""
From a partially formed contract, returns a fully fledged version
:returns fully resolved IB contract
"""
## Make a place to store the data we're going to return
contract_details_queue = finishableQueue(self.init_contractdetails(reqId))
print("Getting full contract details from the server... ")
self.reqContractDetails(reqId, ibcontract)
## Run until we get a valid contract(s) or get bored waiting
MAX_WAIT_SECONDS = 3
new_contract_details = contract_details_queue.get(timeout = MAX_WAIT_SECONDS)
while self.wrapper.is_error():
print(self.get_error())
if contract_details_queue.timed_out():
print("Exceeded maximum wait for wrapper to confirm finished - seems to be normal behaviour")
if len(new_contract_details)==0:
print("Failed to get additional contract details: returning unresolved contract")
return ibcontract, new_contract_details
if len(new_contract_details)>1:
print("got multiple contracts; using first one")
new_contract_details = new_contract_details[0]
resolved_ibcontract = new_contract_details.contract
return resolved_ibcontract, new_contract_details
def get_IB_historical_data(self,
ibcontract,
whatToShow="ADJUSTED_LAST",
durationStr="1 Y",
barSizeSetting="1 day",
tickerid=DEFAULT_HISTORIC_DATA_ID,
latest_date=None):
"""
Returns historical prices for a contract, up to latest_date
if latest_date is none, uses todays date
latest_date should be of form %Y%m%d %H:%M:%S %Z
ibcontract is a Contract
:returns list of prices in 4 tuples: Open high low close volume
"""
# set latest_date to today and now if it is None
if latest_date is None:
latest_date = get_latest_date_local()
## Make a place to store the data we're going to return
historic_data_queue = finishableQueue(self.init_historicprices(tickerid))
# Request some historical data. Native method in EClient
self.reqHistoricalData(
tickerid, # tickerId,
ibcontract, # contract,
latest_date, # endDateTime,
durationStr, # durationStr,
barSizeSetting, # barSizeSetting,
whatToShow=whatToShow,
useRTH=1,
formatDate=1,
keepUpToDate=False, # <<==== added for api 9.73.2
chartOptions=[] ## chartOptions not used
)
## Wait until we get a completed data, an error, or get bored waiting
MAX_WAIT_SECONDS = 5
while True:
print("Getting historical data from the server... could take %d seconds to complete " % MAX_WAIT_SECONDS)
historic_data = historic_data_queue.get(timeout=MAX_WAIT_SECONDS)
er = ''
while self.wrapper.is_error():
er = self.get_error()
print(er)
if 'Not connected' in er:
print('sleeping 30s to wait for reconnection; suggest restarting TWS')
time.sleep(30)
if "HMDS query returned no data" in er:
print(historic_data)
print(historic_data is None)
if historic_data_queue.timed_out() and er is None:
print("Exceeded maximum wait for wrapper to confirm finished - seems to be normal behaviour")
# only keep trying if not connected
if not 'Not connected' in er:
break
# TODO: this is cancelling query early maybe?
self.cancelHistoricalData(tickerid)
# convert to pandas dataframe
# date, open, high, low, close, vol
# already adjusted for splits
if len(historic_data) != 0:
df = pd.DataFrame.from_records(data=historic_data, index='datetime', columns=['datetime', 'open', 'high', 'low', 'close', 'volume'])
df.index = pd.to_datetime(df.index)
if whatToShow not in ['TRADES', 'ADJUSTED_LAST']:
# volume only available for trades
df.drop('volume', axis=1, inplace=True)
return df
else:
return historic_data
def getEarliestTimestamp(self, contract, whatToShow='ADJUSTED_LAST', useRTH=1, formatDate=1, tickerid=DEFAULT_GET_EARLIEST_ID):
# parameters: https://interactivebrokers.github.io/tws-api/classIBApi_1_1EClient.html#a059b5072d1e8e8e96394e53366eb81f3
## Make a place to store the data we're going to return
## Wait until we get a completed data, an error, or get bored waiting
MAX_WAIT_SECONDS = 2
tries = 0
while True:
tries += 1
earliest_timestamp_queue = finishableQueue(self.init_earliest_timestamp(tickerid))
self.reqHeadTimeStamp(tickerid, contract, whatToShow, useRTH, formatDate)
print("Getting earliest timestamp from the server... could take %d seconds to complete " % MAX_WAIT_SECONDS)
earliest = earliest_timestamp_queue.get(timeout=MAX_WAIT_SECONDS)
while self.wrapper.is_error():
er = self.get_error()
print(er)
if 'No head time stamp' in er:
return None
break
if earliest_timestamp_queue.timed_out():
print("Exceeded maximum wait for wrapper to confirm finished - seems to be normal behaviour")
self.cancelHeadTimeStamp(tickerid)
if len(earliest) != 0 or tries == 20:
return None
break
return earliest[0] # first element in list
def getNewsProviders(self):
"""
available news providers by default are
[140007057343600: BRFG, Briefing.com General Market Columns,
140007057342704: BRFUPDN, Briefing.com Analyst Actions,
140007057343544: DJNL, Dow Jones Newsletters]
"""
## Make a place to store the data we're going to return
tickerid = DEFAULT_GET_NP_ID
np_queue = finishableQueue(self.init_np(tickerid))
# Request news providers. Native method in EClient
self.reqNewsProviders()
## Wait until we get a completed data, an error, or get bored waiting
MAX_WAIT_SECONDS = 2
print("Getting list of news providers from the server... could take %d seconds to complete " % MAX_WAIT_SECONDS)
nps = np_queue.get(timeout=MAX_WAIT_SECONDS)
while self.wrapper.is_error():
print(self.get_error())
if np_queue.timed_out():
print("Exceeded maximum wait for wrapper to confirm finished - seems to be normal behaviour")
return nps[0] # list within a list
def getHistoricalNews(self, reqId, conId, providerCodes, startDateTime, endDateTime, totalResults):
hn_queue = finishableQueue(self.init_hn(reqId))
self.reqHistoricalNews(reqId, conId, providerCodes, startDateTime, endDateTime, totalResults, historicalNewsOptions=[])
## Wait until we get a completed data, an error, or get bored waiting
MAX_WAIT_SECONDS = 15
print("Getting historical news from the server... could take %d seconds to complete " % MAX_WAIT_SECONDS)
hn = hn_queue.get(timeout=MAX_WAIT_SECONDS)
while self.wrapper.is_error():
print(self.get_error())
if hn_queue.timed_out():
print("Exceeded maximum wait for wrapper to confirm finished - seems to be normal behaviour")
return hn
def getNewsArticle(self, reqId, providerCode, articleId):
na_queue = finishableQueue(self.init_na(reqId))
self.reqNewsArticle(reqId, providerCode, articleId, [])
## Wait until we get a completed data, an error, or get bored waiting
MAX_WAIT_SECONDS = 5
print("Getting historical news from the server... could take %d seconds to complete " % MAX_WAIT_SECONDS)
na = na_queue.get(timeout=MAX_WAIT_SECONDS)
while self.wrapper.is_error():
print(self.get_error())
if na_queue.timed_out():
print("Exceeded maximum wait for wrapper to confirm finished - seems to be normal behaviour")
return na
class TestApp(TestWrapper, TestClient):
def __init__(self, ipaddress, portid, clientid):
TestWrapper.__init__(self)
TestClient.__init__(self, wrapper=self)
self.connect(ipaddress, portid, clientid)
thread = Thread(target = self.run)
thread.start()
setattr(self, "_thread", thread)
self.init_error()
def get_hist_data_date_range(self,
ibcontract,
whatToShow='TRADES',
barSizeSetting='3 mins',
start_date=None,
end_date=None,
tickerid=DEFAULT_HISTORIC_DATA_ID):
"""
gets historic data for date range
if start_date is None, then first finds earliest date available,
and gets all data to there
if end_date is None, will get data to latest possible time
start_date and end_date should be strings in format YYYYMMDD
useful options for whatToShow for stocks can be:
ADJUSTED_LAST (adj for splits and dividends)
TRADES (only adjusted for splits)
BID
ASK
OPTION_IMPLIED_VOLATILITY
HISTORICAL_VOLATILITY
"""
# convert start_date string to datetime date object for comparisons
start_date_datetime_date = pd.to_datetime('1800-01-01').date() # early date so it doesn't match df.index.date below (if not updating data)
if start_date is not None:
# go one day past start date just to make sure we have all data
start_date_datetime_date = (pd.to_datetime(start_date) - pd.Timedelta('1D')).date()
smallbars = ['1 secs', '5 secs', '10 secs', '15 secs', '30 secs', '1 min']
max_step_sizes = {'1 secs': '1800 S', # 30 mins
'5 secs': '3600 S', # 1 hour
'10 secs': '14400 S', # 4 hours
'15 secs': '14400 S', # 4 hours
'30 secs': '28800 S', # 8 hours
'1 min': '1 D',
'2 mins': '2 D',
'3 mins': '1 W',
'5 mins': '1 W',
'10 mins': '1 W',
'15 mins': '1 W',
'20 mins': '1 W',
'30 mins': '1 M',
'1 hour': '1 M',
'2 hours': '1 M',
'3 hours': '1 M',
'4 hours': '1 M',
'8 hours': '1 M',
'1 day': '1 Y',
'1 week': '1 Y',
'1 month': '1 Y'}
# TODO: check if earliest timestamp is nothing or before/after end_date
earliest_timestamp = self.getEarliestTimestamp(ibcontract, whatToShow=whatToShow, tickerid=tickerid)
if earliest_timestamp is not None:
earliest_datestamp = earliest_timestamp[:8]
# if timeout, will return empty list
df = []
if end_date is None:
latest_date = None
else:
# TODO: need to adopt this to other than mountain time
latest_date = end_date + ' ' + get_close_hour_local() + ':00:00'
# list is returned if there is an error or something?
tries = 0
while type(df) is list:
tries += 1
df = self.get_IB_historical_data(ibcontract,
whatToShow=whatToShow,
durationStr=max_step_sizes[barSizeSetting],
barSizeSetting=barSizeSetting,
tickerid=tickerid,
latest_date=latest_date)
if tries == 10:
print('tried to get historic data 10x and failed, retutrning None')
return None
earliest_date = df.index[0]
full_df = df
self.df = full_df
df_dates = df.index.date
# keep going until the same result is returned twice...not perfectly efficient but oh well
previous_earliest_date = None
i = 0
start_time = time.time()
is_list = 0
while previous_earliest_date != earliest_date:
i += 1
print(i)
print(previous_earliest_date)
print(earliest_date)
# TODO: if "HMDS query returned no data" in error lots of times, maybe finish it
df = self.get_IB_historical_data(ibcontract,
whatToShow=whatToShow,
durationStr=max_step_sizes[barSizeSetting],
barSizeSetting=barSizeSetting,
tickerid=tickerid,
latest_date=earliest_date.strftime('%Y%m%d %H:%M:%S'))
if type(df) is list:
is_list += 1
# we've probably hit the earliest time we can get
if earliest_timestamp is not None:
if is_list >= 3 and earliest_date.date().strftime('%Y%m%d') == earliest_datestamp:
print("hit earliest timestamp")
break
if is_list >= 10:
print('hit 10 lists in a row')
break
df_dates = None
continue
else:
is_list = 0
previous_earliest_date = earliest_date
earliest_date = df.index[0]
full_df = pd.concat([df, full_df])
self.df = full_df
df_dates = df.index.date
if df_dates.min() <= start_date_datetime_date:
print('start_date_datetime in dates, ending')
break
# no more than 6 requests every 2s for bars under 30s
# https://interactivebrokers.github.io/tws-api/historical_limitations.html
# TODO: take care of 60 requests per 10 mins
if barSizeSetting in smallbars and i >= 6:
time_left = 2 - (time.time() - start_time())
i = 0
time.sleep(time_left)
return full_df
def get_stock_contract(self, ticker='SNAP', reqId=DEFAULT_HISTORIC_DATA_ID):
"""
gets resolved IB contract for stocks
assumes ISLAND exchange for now (NASDAQ and maybe others?)
"""
# available sec types: https://interactivebrokers.github.io/tws-api/classIBApi_1_1Contract.html#a4f83111c0ea37a19fe1dae98e3b67456
ibcontract = IBcontract()
ibcontract.secType = 'STK'
# get todays date, format as YYYYMMDD -- need to check this is correct
# today = datetime.datetime.today().strftime('%Y%m%d')
# ibcontract.lastTradeDateOrContractMonth = '20180711'#today
ibcontract.symbol = ticker
ibcontract.exchange = 'ISLAND'
resolved_ibcontract, contract_details = self.resolve_ib_contract(ibcontract=ibcontract, reqId=reqId)
return resolved_ibcontract, contract_details
def get_otc_contract(self, ticker='SNAP', reqId=DEFAULT_HISTORIC_DATA_ID):
"""
gets resolved IB contract for stocks
assumes ISLAND exchange for now (NASDAQ and maybe others?)
"""
# available sec types: https://interactivebrokers.github.io/tws-api/classIBApi_1_1Contract.html#a4f83111c0ea37a19fe1dae98e3b67456
ibcontract = IBcontract()
ibcontract.secType = 'STK'
# get todays date, format as YYYYMMDD -- need to check this is correct
# today = datetime.datetime.today().strftime('%Y%m%d')
# ibcontract.lastTradeDateOrContractMonth = '20180711'#today
ibcontract.symbol = ticker
ibcontract.exchange = 'ARCAEDGE'
resolved_ibcontract, contract_details = self.resolve_ib_contract(ibcontract=ibcontract, reqId=reqId)
return resolved_ibcontract, contract_details
def get_forex_contract(self, main_currency='USD', second_currency='JPY', reqId=DEFAULT_HISTORIC_DATA_ID):
"""
gets resolved IB contract for stocks
assumes ISLAND exchange for now (NASDAQ and maybe others?)
"""
# available sec types: https://interactivebrokers.github.io/tws-api/classIBApi_1_1Contract.html#a4f83111c0ea37a19fe1dae98e3b67456
ibcontract = IBcontract()
ibcontract.symbol = 'EUR'#second_currency
ibcontract.secType = "CASH"
ibcontract.currency = 'GBP'#main_currency
ibcontract.exchange = "IDEALPRO"
resolved_ibcontract, contract_details = self.resolve_ib_contract(ibcontract=ibcontract, reqId=reqId)
return resolved_ibcontract, contract_details
def download_all_history_stock(self, ticker='SNAP', barSizeSetting='3 mins', reqId=DEFAULT_HISTORIC_DATA_ID, what='TRADES', exchange='ISLAND'):
"""
downloads all historical data for a stock including
TRADES or ADJUSTED_LAST
BID
ASK
OPTION_IMPLIED_VOLATILITY
if data already exists, updates and appends to it
'what' parameter can be 'ADJUSTED_LAST' or 'TRADES'.
ADJUSTED_LAST is the dividend-adjusted prices; trades is only split-adjusted
"""
if exchange == 'ISLAND': # NASDAQ / regular stocks
contract, contract_details = self.get_stock_contract(ticker=ticker, reqId=reqId)
elif exchange == 'ARCAEDGE': # OTC / PINK
contract, contract_details = self.get_otc_contract(ticker=ticker, reqId=reqId)
if what == 'TRADES':
folder = '/home/nate/Dropbox/data/ib_full_adj/data/'
elif what == 'ADJUSTED_LAST':
folder = '/home/nate/Dropbox/data/ib_split_adj_only/data/'
trades_start_date = None
bids_start_date = None
asks_start_date = None
opt_vol_start_date = None
tr_mode = 'w'
bid_mode = 'w'
ask_mode = 'w'
opt_vol_mode = 'w'
bss = barSizeSetting.replace(' ', '_')
trades_filename = folder + ticker + '_trades_' + bss + '.h5'
bid_filename = folder + ticker + '_bid_' + bss + '.h5'
ask_filename = folder + ticker + '_ask_' + bss + '.h5'
opt_vol_filename = folder + ticker + '_opt_vol_' + bss + '.h5'
# TODO: provide option for which files to download;
# check each file individually and update individually
if os.path.exists(trades_filename):
print('trades file exists, going to append...')
cur_trades = pd.read_hdf(trades_filename)
latest_trades_datetime = cur_trades.index[-1]
trades_start_date = latest_trades_datetime.strftime('%Y%m%d')
tr_mode = 'r+'
print('latest trades date is', trades_start_date)
if os.path.exists(bid_filename):
print('bids file exists, going to append')
cur_bids = pd.read_hdf(bid_filename)
latest_bids_datetime = cur_bids.index[-1]
bids_start_date = latest_bids_datetime.strftime('%Y%m%d')
bid_mode='r+'
if os.path.exists(ask_filename):
print('asks filename exists, going to append')
cur_asks = pd.read_hdf(ask_filename)
latest_asks_datetime = cur_asks.index[-1]
asks_start_date = latest_asks_datetime.strftime('%Y%m%d')
ask_mode='r+'
if os.path.exists(opt_vol_filename):
print('opt_vol file exists, gonna append')
cur_opt_vol = pd.read_hdf(opt_vol_filename)
latest_opt_vol_datetime = cur_opt_vol.index[-1]
opt_vol_start_date = latest_opt_vol_datetime.strftime('%Y%m%d')
opt_vol_mode = 'r+' # append to existing files, should throw error if they don't exist
end_date = None#'20170401' # smaller amount of data for prototyping/testing
print('\n\n\ngetting trades...\n\n\n')
trades = self.get_hist_data_date_range(contract, barSizeSetting=barSizeSetting, whatToShow=what, end_date=end_date, start_date=trades_start_date, tickerid=reqId)
if trades is not None:
# write or append data
# TODO: function for cleaning up data and remove duplicates, sort data
# TODO: only append things after the latest datetime, and do it for trades, bid, etc separately
# if appending, get next index after latest existing datetime
tr_append = False # need to set option in to_hdf
bid_append = False
ask_append = False
opt_vol_append = False
if tr_mode == 'r+':
next_trades_idx = trades.loc[latest_trades_datetime:]
if next_trades_idx.shape[0] <= 1 or cur_trades.iloc[-1].equals(trades.iloc[-1]):
print('already have all the data I think for trades')
# return
else:
next_trades_idx = next_trades_idx.index[1]
trades = trades.loc[next_trades_idx:]
tr_append=True
trades.to_hdf(trades_filename, key='data', format='table', complevel=9, complib='blosc:lz4', mode=tr_mode, append=tr_append)
else:
trades.to_hdf(trades_filename, key='data', format='table', complevel=9, complib='blosc:lz4', mode=tr_mode, append=tr_append)
print('\n\n\ngetting bids...\n\n\n')
bid = self.get_hist_data_date_range(contract, barSizeSetting=barSizeSetting, whatToShow='BID', end_date=end_date, start_date=bids_start_date, tickerid=reqId)
if bid is not None:
if bid_mode == 'r+':
next_bids_idx = bid.loc[latest_bids_datetime:]
if next_bids_idx.shape[0] <= 1 or cur_bids.iloc[-1].equals(bid.iloc[-1]):
print('already have all bids data I think')
else:
next_bids_idx = next_bids_idx.index[1]
bid = bid.loc[next_bids_idx:]
bid_append=True
bid.to_hdf(bid_filename, key='data', format='table', complevel=9, complib='blosc:lz4', mode=bid_mode, append=bid_append)
else:
bid.to_hdf(bid_filename, key='data', format='table', complevel=9, complib='blosc:lz4', mode=bid_mode, append=bid_append)
print('\n\n\ngetting asks...\n\n\n')
ask = self.get_hist_data_date_range(contract, barSizeSetting=barSizeSetting, whatToShow='ASK', end_date=end_date, start_date=asks_start_date, tickerid=reqId)
if ask is not None:
if ask_mode == 'r+':
next_asks_idx = ask.loc[latest_asks_datetime:]
if next_asks_idx.shape[0] <= 1 or cur_asks.iloc[-1].equals(ask.iloc[-1]):
print('already have all asks data I think')
else:
next_asks_idx = next_asks_idx.index[1]
ask = ask.loc[next_asks_idx:]
ask_append = True
ask.to_hdf(ask_filename, key='data', format='table', complevel=9, complib='blosc:lz4', mode=ask_mode, append=ask_append)
else:
ask.to_hdf(ask_filename, key='data', format='table', complevel=9, complib='blosc:lz4', mode=ask_mode, append=ask_append)
print('\n\n\ngetting opt_vol...\n\n\n')
opt_vol = self.get_hist_data_date_range(contract, barSizeSetting=barSizeSetting, whatToShow='OPTION_IMPLIED_VOLATILITY', end_date=end_date, start_date=opt_vol_start_date, tickerid=reqId)
if opt_vol is not None:
if opt_vol_mode == 'r+':
# TODO: doesn't seem to be working properly for opt_vol, seems to append every time
next_opt_vol_idx = opt_vol.loc[latest_opt_vol_datetime:]
if next_opt_vol_idx.shape[0] <= 1 or cur_opt_vol.iloc[-1].equals(opt_vol.iloc[-1]):
print('already have all opt_vol data I think')
else:
next_opt_vol_idx = next_opt_vol_idx.index[1]
opt_vol = opt_vol.loc[next_opt_vol_idx:]
opt_vol_append = True
opt_vol.to_hdf(opt_vol_filename, key='data', format='table', complevel=9, complib='blosc:lz4', mode=opt_vol_mode, append=opt_vol_append)
else:
opt_vol.to_hdf(opt_vol_filename, key='data', format='table', complevel=9, complib='blosc:lz4', mode=opt_vol_mode, append=opt_vol_append)
def get_earliest_dates(self, ticker):
contract, contract_details = self.get_stock_contract(ticker=ticker)
for t in ['ADJUSTED_LAST', 'BID', 'ASK', 'OPTION_IMPLIED_VOLATILITY']:
earliest = self.getEarliestTimestamp(contract, tickerid=200)
print(t)
print(earliest)
def get_datetime_from_date(date='2018-06-30'):
"""
not sure if I need this anymore...
converts a date to a datetime (end-of-day) for historical data gathering
date should be a string in format YYYYMMDD
uses eastern timezone (EDT or EST) by default
TODO: convert eastern to local timezone from machine
"""
tz='US/Eastern'
tz_obj = timezone(tz)
date = datetime.datetime.strptime(date, '%Y-%m-%d')
date = date.replace(hour = 16, minute = 0, second = 0)
date = tz_obj.localize(date)
return date.strftime('%Y%m%d %H:%M:%S %Z')
def get_latest_date_local():
"""
gets the latest date with the machine's local timezone
endDateTime and startDateTime "Uses TWS timezone specified at login."
at least for tick-by-tick data
"""
machines_tz = datetime.datetime.now(datetime.timezone.utc).astimezone().tzname()
latest_date = datetime.datetime.today()
# doesn't work with machines tz in there
latest_date = latest_date.strftime('%Y%m%d %H:%M:%S')# + machines_tz
return latest_date
def get_close_hour_local():
"""
gets closing hour in local machine time (4 pm Eastern)
"""
eastern_tz = timezone('US/Eastern')
eastern_close = datetime.datetime(year=2018, month=6, day=29, hour=16)
eastern_close = eastern_tz.localize(eastern_close)
return str(eastern_close.astimezone().hour)
def get_home_dir(repo_name='scrape_ib'):
cwd = str(Path(__file__).resolve())
cwd_list = cwd.split('/')
repo_position = [i for i, s in enumerate(cwd_list) if s == repo_name]
if len(repo_position) > 1:
print("error! more than one intance of repo name in path")
return None
home_dir = '/'.join(cwd_list[:repo_position[0] + 1]) + '/'
return home_dir
def load_data(ticker='SNAP', barSizeSetting='3 mins', what='TRADES'):
"""
loads historical tick data
"""
if what == 'TRADES':
folder = '/home/nate/Dropbox/data/ib_full_adj/data/'
elif what == 'ADJUSTED_LAST':
folder = '/home/nate/Dropbox/data/ib_split_adj_only/data/'
bss = barSizeSetting.replace(' ', '_')
trades = pd.read_hdf(folder + ticker + '_trades_' + bss + '.h5')
# fill 0 volume with 1
trades.at[trades['volume'] == 0, 'volume'] = 1
bid = pd.read_hdf(folder + ticker + '_bid_' + bss + '.h5')
ask = pd.read_hdf(folder + ticker + '_ask_' + bss + '.h5')
opt_vol = pd.read_hdf(folder + ticker + '_opt_vol_' + bss + '.h5')
# drop duplicates just in case...dupes throw off concat
trades.drop_duplicates(inplace=True)
bid.drop_duplicates(inplace=True)
ask.drop_duplicates(inplace=True)
opt_vol.drop_duplicates(inplace=True)
# sometimes with dupes, index is no longer sorted
trades.sort_index(inplace=True)
bid.sort_index(inplace=True)
ask.sort_index(inplace=True)
opt_vol.sort_index(inplace=True)
# TODO: find opt_vol and other files with problems
# e.g. found BOX opt_vol file had some price data in it
# look for outliers or matches within other DFs, then delete messed up DFs
# rename columns so can join to one big dataframe
bid.columns = ['bid_' + c for c in bid.columns]
ask.columns = ['ask_' + c for c in ask.columns]
opt_vol.columns = ['opt_vol_' + c for c in opt_vol.columns]
# inner join should drop na's but just to be safe
# opt_vol has missing values at the end of each day for some reason...
# so cant do inner join or dropna
full_df = pd.concat([trades, bid, ask, opt_vol], axis=1)#, join='inner').dropna()
full_df.index = full_df.index.tz_localize('America/New_York')
return full_df
def make_feats_targs_individ_df(df, future_gap_idx_steps, future_span_idx_steps, feature_span, feature_span_idx_steps):
"""
"""
targets = df['close'].pct_change(future_span_idx_steps).shift(-future_gap_idx_steps - future_span_idx_steps)
pct_change_features = df.copy().pct_change(feature_span_idx_steps, axis=0)
pct_change_features.columns = [c + '_' + str(feature_span) + '_min_pct_chg' for c in pct_change_features.columns]
df['targets'] = targets
# inner join should drop na's but just to be safe
feats_targs = pd.concat([df, pct_change_features], axis=1, join='inner').dropna()
feat_cols = [c for c in feats_targs.columns if c != 'targets']
return feats_targs[feat_cols], feats_targs['targets']
def make_features_targets(full_df, future_gap=0, future_span=15, feature_span=15, intraday=True):
"""
uses close price to make targets -- percent change over certain time in future
features are percent change of other columns as well as raw values
future_gap is number of minutes between current time and start of future pct_change
future_span is number of minutes to calculate price percent change
feature_span is number of minutes to calculate pct change of everything in df
intraday is boolean; if True, will only get features/targs within each day
and not extending over close/open times
"""
# copy full_df so we don't modify it
full_df_copy = full_df.copy()
# get number of minutes between timesteps -- won't work if not integer minutes
minute_gap = (full_df_copy.index[1] - full_df_copy.index[0]).seconds // 60
future_gap_idx_steps = future_gap // minute_gap
future_span_idx_steps = future_span // minute_gap
feature_span_idx_steps = feature_span // minute_gap
# TODO: use dask or multicore/multithread
if intraday:
# get dataframes for each day and make feats targs, then join
days = [idx.date() for idx in full_df_copy.index]
unique_days = np.unique(days)
all_feats, all_targs = [], []
for d in tqdm(unique_days):
df = full_df_copy[full_df_copy.index.date == d].copy()
d_feats, d_targs = make_feats_targs_individ_df(df,
future_gap_idx_steps=future_gap_idx_steps,
future_span_idx_steps=future_span_idx_steps,
feature_span=feature_span,
feature_span_idx_steps=feature_span_idx_steps)
all_feats.append(d_feats)
all_targs.append(d_targs)
return
|
pd.concat(all_feats)
|
pandas.concat
|
'''
Tools for simple baseline/benchmark forecasts
These methods might serve as the forecast themselves, but are more likely
to be used as a baseline to evaluate if more complex models offer a sufficient
increase in accuracy to justify their use.
Naive1:
Carry last value forward across forecast horizon (random walk)
SNaive:
Carry forward value from last seasonal period
Average: np.sqrt(((h - 1) / self._period).astype(np.int)+1)
Carry forward average of observations
Drift:
Carry forward last time period, but allow for upwards/downwards drift.
EnsembleNaive:
An unweighted average of all of the Naive forecasting methods.
'''
import numpy as np
import pandas as pd
from scipy.stats import norm, t
from abc import ABC, abstractmethod
# Boolean, unsigned integer, signed integer, float, complex.
_NUMERIC_KINDS = set('buifc')
def is_numeric(array):
"""Determine whether the argument has a numeric datatype, when
converted to a NumPy array.
Booleans, unsigned integers, signed integers, floats and complex
numbers are the kinds of numeric datatype.
source:
https://codereview.stackexchange.com/questions/128032/check-if-a-numpy-array-contains-numerical-data
Parameters
----------
array : array-like
The array to check.
Returns
-------
is_numeric : `bool`
True if the array has a numeric datatype, False if not.
"""
return np.asarray(array).dtype.kind in _NUMERIC_KINDS
class Forecast(ABC):
'''
Abstract base class for all baseline forecast
methods
'''
def __init__(self):
self._fitted = None
self._t = None
def _get_fitted(self):
return self._fitted['pred']
def _get_resid(self):
return self._fitted['resid']
@abstractmethod
def fit(self, train):
pass
def fit_predict(self, train, horizon, return_predict_int=False,
alpha=None):
'''
Convenience method. Fit model and predict with one call.
Parameters:
---------
train: array-like,
vector, series, or dataframe of the time series used for training.
Values should be floats and not contain any np.nan or np.inf
horizon: int,
forecast horizon.
return_predict_int: bool, optional (default=False)
If True function will return a Tuple
0: point forecasts (mean)
1: matrix of intervals.
alpha: None, or list of floats, optional (default=None)
List of floats between 0 and 1. If return_predict_int == True this
specifies the 100(1-alpha) prediction intervals to return.
Returns:
------
np.array, vector of predictions. length=horizon
'''
self.fit(train)
return self.predict(horizon, return_predict_int=return_predict_int,
alpha=alpha)
def validate_training_data(self, train, min_length=1):
'''
Checks the validity of training data for forecasting
and raises exceptions if required.
1. check is instance of pd.Series, pd.DataFrame or np.ndarray
2. check len is > min_length
Parameters:
---------
min_length: int optional (default=0)
minimum length of the time series.
'''
if not isinstance(train, (pd.Series, pd.DataFrame, np.ndarray)):
raise TypeError(
'Training data must be pd.Series, pd.DataFrame or np.ndarray')
elif len(train) < min_length:
raise ValueError('Training data is empty')
elif not is_numeric(train):
raise TypeError('Training data must be numeric')
elif np.isnan(np.asarray(train)).any():
raise TypeError(
'Training data contains at least one NaN. '
+ 'Data myst all be floats')
elif np.isinf(np.asarray(train)).any():
raise TypeError(
'Training data contains at least one Infinite '
+ 'value (np.Inf). Data myst all be floats')
@abstractmethod
def predict(self, horizon, return_predict_int=False, alpha=None):
pass
def _prediction_interval(self, horizon, alpha=None):
'''
Prediction intervals for naive forecast 1 (NF1)
lower = pred - z * std_h
upper = pred + z * std_h
where
std_h = resid_std * sqrt(h)
resid_std = standard deviation of in-sample residuals
h = horizon
See and credit: https://otexts.com/fpp2/prediction-intervals.html
Pre-requisit: Must have called fit()
Parameters:
---------
horizon - int,
forecast horizon
levels - list,
list of floats representing prediction limits
e.g. [0.80, 0.90, 0.95] will calculate three sets ofprediction
intervals giving limits for which will include the actual future
value with probability 80, 90 and 95 percent,
respectively (default = [0.8, 0.95]).
Returns:
--------
list
np.array matricies that contain the lower and upper prediction
limits for each prediction interval specified.
'''
if alpha is None:
alpha = [0.20, 0.05]
zs = [self.interval_multiplier(1-a, self._t - 1) for a in alpha]
pis = []
std_h = self._std_h(horizon)
for z in zs:
hw = z * std_h
pis.append(np.array([self.predict(horizon) - hw,
self.predict(horizon) + hw]).T)
return pis
def interval_multiplier(self, level, dof):
'''
inverse of normal distribution
can be overridden if needed.
'''
x = norm.ppf((1 - level) / 2)
return np.abs(x)
@abstractmethod
def _std_h(self, horizon):
'''
Calculate the standard error of the residuals over
a forecast horizon. This is method specific.
'''
pass
# breaks PEP8 to align with statsmodels naming
fittedvalues = property(_get_fitted)
resid = property(_get_resid)
class Naive1(Forecast):
'''
Naive forecast 1 or NF1: Carry the last value foreward across a
forecast horizon
For details and theory see [1]
Attributes
----------
fittedvalues: pd.Series
In-sample predictions of training data
resid: pd.Series
In-sample residuals
Methods
-------
fit(train)
fit the model to training data
predict(horizon, return_predict_int=False, alpha=None)
Predict h-steps ahead
fit_predict(train, horizons, return_predict_int=False, alpha=None)
convenience method. combine fit() and predict()
See Also
--------
forecast_tools.baseline.SNaive
forecast_tools.baseline.Drift
forecast_tools.baseline.Average
forecast_tools.baseline.EnsembleNaive
References:
----------
[1]. https://otexts.com/fpp2/simple-methods.html
Examples:
--------
Basic fitting and prediction
>>> y_train = np.arange(10)
>>> model = Naive1()
>>> model.fit(y_train)
>>> model.predict(horizon=7)
array([9., 9., 9., 9., 9., 9., 9.]
fit_predict() convenience method
>>> y_train = np.arange(10)
>>> model = Naive1()
>>> model.fit_predict(y_train, horizon=7)
array([9., 9., 9., 9., 9., 9., 9.]
80 and 95% prediction intervals
>>> y_train = np.arange(10)
>>> model = Naive1()
>>> model.fit(y_train)
>>> y_pred, y_intervals = model.predict(horizon=2,
return_pred_interval=True,
alpha=[0.1, 0.05])
>>> y_pred
array([9., 9.]
>>> y_intervals[0]
array([[ 7.71844843, 10.28155157],
[ 7.1876124 , 10.8123876 ]])
>>> y_intervals[1]
array([[ 7.35514637, 10.64485363],
[ 6.67382569, 11.32617431]])
Fitted values (one step in-sample predictions)
.fittedvalue returns a pandas.Series called pred
>>> y_train = np.arange(5)
>>> model = Naive1()
>>> model.fit(y_train)
>>> model.fittedvalues
0 NaN
1 0.0
2 1.0
3 2.0
4 3.0
Name: pred, dtype: float64
'''
def __init__(self):
'''
Constructor method
Parameters:
-------
level - list,
confidence levels for prediction intervals (e.g. [90, 95])
'''
self._fitted = None
def __repr__(self):
'''
String representation of object
'''
return f'Naive1()'
def __str__(self):
'''
Print/str representation of object
'''
return f'Naive1()'
def fit(self, train):
'''
Train the naive model
Parameters:
--------
train - array-like,
vector, series, or dataframe of the time series used for training.
Values should be floats and not contain any np.nan or np.inf
'''
self.validate_training_data(train)
_train = np.asarray(train)
self._pred = _train[-1]
self._fitted = pd.DataFrame(_train)
if isinstance(train, (pd.DataFrame, pd.Series)):
self._fitted.index = train.index
self._t = len(_train)
self._fitted.columns = ['actual']
self._fitted['pred'] = self._fitted['actual'].shift(periods=1)
self._fitted['resid'] = self._fitted['actual'] - self._fitted['pred']
self._resid_std = np.sqrt(np.nanmean(np.square(self._fitted['resid'])))
def predict(self, horizon, return_predict_int=False, alpha=None):
'''
Forecast and optionally produce 100(1-alpha) prediction intervals.
Prediction intervals for naive forecast 1 (NF1)
lower = pred - z * std_h
upper = pred + z * std_h
where
std_h = resid_std * sqrt(h)
resid_std = standard deviation of in-sample residuals
h = horizon
See and credit: https://otexts.com/fpp2/prediction-intervals.html
Pre-requisit: Must have called fit()
Parameters:
--------
horizon - int,
forecast horizon.
return_predict_int: bool, optional
if True calculate 100(1-alpha) prediction
intervals for the forecast. (default=False)
alpha: list of floats, optional (default=None)
controls set of prediction intervals returned and the width of
each.
Intervals are 100(1-alpha) in width. e.g. [0.2, 0.1]
would return the 80% and 90% prediction intervals of the forecast
distribution. default=None. When return_predict_int = True the
default behaviour is to return 80 and 90% intervals.
Returns:
-------
if return_predict_int = False
np.array, vector of predictions. length=horizon
if return_predict_int = True then returns a tuple.
0. np.array, vector of predictions. length=horizon
1. list of numpy.array[lower_pi, upper_pi].
One for each prediction interval.
'''
if self._fitted is None:
raise UnboundLocalError('Must call fit() prior to predict()')
if alpha is None:
alpha = [0.2, 0.1]
preds = np.full(shape=horizon, fill_value=self._pred, dtype=float)
if return_predict_int:
return preds, self._prediction_interval(horizon, alpha)
else:
return preds
def _std_h(self, horizon):
'''
Calculate the sample standard deviation.
'''
indexes = np.sqrt(np.arange(1, horizon+1))
std = np.full(shape=horizon,
fill_value=self._resid_std,
dtype=np.float)
std_h = std * indexes
return std_h
class SNaive(Forecast):
'''
Seasonal Naive Forecast SNF
Each forecast to be equal to the last observed value from the
same season of the year (e.g., the same month of the previous year).
SNF is useful for highly seasonal data. See [1]
Attributes
----------
fittedvalues: pd.Series
In-sample predictions of training data
resid: pd.Series
In-sample residuals
Methods
-------
fit(train)
fit the model to training data
predict(horizon, return_predict_int=False, alpha=None)
Predict h-steps ahead
fit_predict(train, horizons, return_predict_int=False, alpha=None)
convenience method. combine fit() and predict()
See Also
--------
forecast_tools.baseline.Naive1
forecast_tools.baseline.Drift
forecast_tools.baseline.Average
forecast_tools.baseline.EnsembleNaive
References:
-----------
[1]. https://otexts.com/fpp2/simple-methods.html
'''
def __init__(self, period):
'''
Parameters:
--------
period - int, the seasonal period of the daya
e.g. weekly = 7, monthly = 12, daily = 24
'''
self._period = period
self._fitted = None
def __repr__(self):
'''
String representation of object
'''
return f'SNaive1(period={self._period})'
def __str__(self):
'''
Print/str representation of object
'''
return f'SNaive1(period={self._period})'
def fit(self, train):
'''
Seasonal naive forecast - train the model
Parameters:
--------
train: array-like.
vector, pd.DataFrame or pd.Series containing the time series used
for training. Values should be floats and not contain any np.nan
or np.inf
'''
self.validate_training_data(train, min_length=self._period)
# could refactor this to be more like Naive1's simpler implementation.
if isinstance(train, (pd.Series)):
self._f = np.asarray(train)[-self._period:]
_train = train.to_numpy()
self._fitted = pd.DataFrame(_train, index=train.index)
elif isinstance(train, (pd.DataFrame)):
self._f = train.to_numpy().T[0][-self._period:]
_train = train.copy()[train.columns[0]].to_numpy()
self._fitted = pd.DataFrame(_train, index=train.index)
else:
self._f = train[-self._period:]
_train = train.copy()
self._fitted = pd.DataFrame(_train)
self._t = len(_train)
self._fitted.columns = ['actual']
self._fitted['pred'] = self._fitted['actual'].shift(self._period)
self._fitted['resid'] = self._fitted['actual'] - self._fitted['pred']
self._resid_std = np.sqrt(np.nanmean(np.square(self._fitted['resid'])))
def predict(self, horizon, return_predict_int=False, alpha=None):
'''
Predict time series over a horizon
Parameters:
--------
horizon - int,
forecast horizon.
return_predict_int: bool, optional
if True calculate 100(1-alpha) prediction
intervals for the forecast. (default=False)
alpha: list of floats, optional (default=None)
controls set of prediction intervals returned and the width of
each.
Intervals are 100(1-alpha) in width. e.g. [0.2, 0.1]
would return the 80% and 90% prediction intervals of the forecast
distribution. default=None. When return_predict_int = True the
default behaviour is to return 80 and 90% intervals.
Returns:
--------
if return_predict_int = False
np.array, vector of predictions. length=horizon
if return_predict_int = True then returns a tuple.
0. np.array, vector of predictions. length=horizon
1. list of numpy.array[lower_pi, upper_pi].
One for each prediction interval.
'''
if self._fitted is None:
raise UnboundLocalError('Must call fit() prior to predict()')
if alpha is None:
alpha = [0.2, 0.1]
preds = np.array([], dtype=float)
for _ in range(0, int(horizon/self._period)):
preds = np.concatenate([preds, self._f.copy()], axis=0)
preds = np.concatenate([preds,
self._f.copy()[:horizon % self._period]],
axis=0)
if return_predict_int:
return preds, self._prediction_interval(horizon, alpha)
else:
return preds
def _std_h(self, horizon):
h = np.arange(1, horizon+1)
# need to query if should be +1 or not.
return self._resid_std * \
np.sqrt(((h - 1) / self._period).astype(np.int)+1)
class Average(Forecast):
'''
Average forecast. Forecast is set to the average
of the historical data.
See for discussion of the average as a forecat measure [1]
Attributes
----------
fittedvalues: pd.Series
In-sample predictions of training data
resid: pd.Series
In-sample residuals
Methods
-------
fit(train)
fit the model to training data
predict(horizon, return_predict_int=False, alpha=None)
Predict h-steps ahead
fit_predict(train, horizons, return_predict_int=False, alpha=None)
convenience method. combine fit() and predict()
See Also
--------
forecast_tools.baseline.Naive1
forecast_tools.baseline.SNaive
forecast_tools.baseline.Drift
forecast_tools.baseline.EnsembleNaive
References:
-----------
[1.] Makridakis, Wheelwright and Hyndman. Forecasting (1998)
'''
def __init__(self):
self._pred = None
self._fitted = None
def __repr__(self):
'''
String representation of object
'''
return f'Average()'
def __str__(self):
'''
Print/str representation of object
'''
return f'Average()'
def _get_fitted(self):
return self._fitted['pred']
def _get_resid(self):
return self._fitted['resid']
def fit(self, train):
'''
Train the model
Parameters:
--------
train: arraylike
vector, pd.series, pd.DataFrame,
Time series used for training. Values should be floats
and not contain any np.nan or np.inf
'''
self.validate_training_data(train)
if isinstance(train, (pd.DataFrame)):
_train = train.copy()[train.columns[0]].to_numpy()
self._fitted = pd.DataFrame(_train, index=train.index)
elif isinstance(train, (pd.Series)):
_train = train.to_numpy()
self._fitted =
|
pd.DataFrame(_train, index=train.index)
|
pandas.DataFrame
|
"""
Plot the geospatial results by country.
Written by <NAME>.
February 2022
"""
import os
import sys
import configparser
import numpy as np
import pandas as pd
import geopandas as gpd
import matplotlib.pyplot as plt
import seaborn as sns
import contextily as cx
import geopy as gp
CONFIG = configparser.ConfigParser()
CONFIG.read(os.path.join(os.path.dirname(__file__), '..', 'scripts', 'script_config.ini'))
BASE_PATH = CONFIG['file_locations']['base_path']
DATA_RAW = os.path.join(BASE_PATH, 'raw')
DATA_PROCESSED = os.path.join(BASE_PATH, 'processed')
RESULTS = os.path.join(BASE_PATH, '..', 'results')
VIS = os.path.join(BASE_PATH, '..', 'vis', 'figures')
REPORTS = os.path.join(BASE_PATH, '..', 'reports', 'images')
def get_regional_shapes():
"""
Load regional shapes.
"""
output = []
for item in os.listdir(DATA_INTERMEDIATE):#[:15]:
if len(item) == 3: # we only want iso3 code named folders
filename_gid2 = 'regions_2_{}.shp'.format(item)
path_gid2 = os.path.join(DATA_INTERMEDIATE, item, 'regions', filename_gid2)
filename_gid1 = 'regions_1_{}.shp'.format(item)
path_gid1 = os.path.join(DATA_INTERMEDIATE, item, 'regions', filename_gid1)
if os.path.exists(path_gid2):
data = gpd.read_file(path_gid2)
data['GID_id'] = data['GID_2']
data = data.to_dict('records')
elif os.path.exists(path_gid1):
data = gpd.read_file(path_gid1)
data['GID_id'] = data['GID_1']
data = data.to_dict('records')
else:
print('No shapefiles for {}'.format(item))
continue
for datum in data:
output.append({
'geometry': datum['geometry'],
'properties': {
'GID_id': datum['GID_id'],
},
})
output = gpd.GeoDataFrame.from_features(output, crs='epsg:4326')
return output
def plot_regions_by_geotype(country, regions, path):
"""
Plot regions by geotype.
"""
filename = 'regional_data.csv'
path_data = os.path.join(DATA_PROCESSED, iso3, filename)
data = pd.read_csv(path_data)
n = len(regions)
data['population_km2'] = round(data['population_total'] / data['area_km2'], 2)
data = data[['GID_id', 'population_km2']]
GID_level = 'GID_{}'.format(country['lowest'])
regions = regions[[GID_level, 'geometry']]#[:1000]
regions = regions.copy()
regions = regions.merge(data, left_on=GID_level, right_on='GID_id')
regions.reset_index(drop=True, inplace=True)
metric = 'population_km2'
bins = [-1, 20, 43, 69, 109, 171, 257, 367, 541, 1104, 1e8]
labels = [
'<20 $\mathregular{km^2}$',
'20-43 $\mathregular{km^2}$',
'43-69 $\mathregular{km^2}$',
'69-109 $\mathregular{km^2}$',
'109-171 $\mathregular{km^2}$',
'171-257 $\mathregular{km^2}$',
'257-367 $\mathregular{km^2}$',
'367-541 $\mathregular{km^2}$',
'541-1104 $\mathregular{km^2}$',
'>1104 $\mathregular{km^2}$'
]
regions['bin'] = pd.cut(
regions[metric],
bins=bins,
labels=labels
)
sns.set(font_scale=1, font="Times New Roman")
fig, ax = plt.subplots(1, 1, figsize=country['figsize'])
minx, miny, maxx, maxy = regions.total_bounds
ax.set_xlim(minx-.5, maxx+.5)
ax.set_ylim(miny-0.1, maxy+.1)
regions.plot(column='bin', ax=ax, cmap='viridis_r', linewidth=0.2, alpha=0.8,
legend=True, edgecolor='grey')
handles, labels = ax.get_legend_handles_labels()
fig.legend(handles[::-1], labels[::-1])
cx.add_basemap(ax, crs=regions.crs, source=cx.providers.CartoDB.Voyager)
cx.add_basemap(ax, crs='epsg:4326')
name = 'Population Density Deciles for Sub-National Regions (n={})'.format(n)
fig.suptitle(name)
fig.tight_layout()
fig.savefig(path, dpi=600)
plt.close(fig)
def plot_cells_per_region(country, regions, path):
"""
Plot regions by geotype.
"""
iso3 = country['iso3']
name = country['country']
filename = '{}.csv'.format(iso3)
folder = os.path.join(DATA_PROCESSED, iso3, 'sites')
path_sites = os.path.join(folder, filename)
sites = pd.read_csv(path_sites, encoding='latin-1')
sites = gpd.GeoDataFrame(
sites,
geometry=gpd.points_from_xy(
sites.lon,
sites.lat
), crs='epsg:4326'
)
fig, (ax1, ax2) = plt.subplots(2, 2, figsize=(12,12))
fig.subplots_adjust(hspace=.2, wspace=.2)
minx, miny, maxx, maxy = regions.total_bounds
buffer = 2
for ax in [ax1, ax2]:
for dim in [0,1]:
ax[dim].set_xlim(minx-buffer, maxx+buffer)
ax[dim].set_ylim(miny-0.1, maxy+.1)
fig.set_facecolor('gainsboro')
gsm = sites.loc[sites['radio'] == 'GSM']
umts = sites.loc[sites['radio'] == 'UMTS']
lte = sites.loc[sites['radio'] == 'LTE']
nr = sites.loc[sites['radio'] == 'NR']
regions.plot(facecolor="none", edgecolor="grey", ax=ax1[0])
regions.plot(facecolor="none", edgecolor="grey", ax=ax1[1])
regions.plot(facecolor="none", edgecolor="grey", ax=ax2[0])
regions.plot(facecolor="none", edgecolor="grey", ax=ax2[1])
gsm.plot(color='red', markersize=1.5, ax=ax1[0])
umts.plot(color='blue', markersize=1.5, ax=ax1[1])
lte.plot(color='yellow', markersize=1.5, ax=ax2[0])
nr.plot(color='black', markersize=1.5, ax=ax2[1])
ax1[0].set_title('2G GSM Cells')
ax1[1].set_title('3G UMTS Cells')
ax2[0].set_title('4G LTE Cells')
ax2[1].set_title('5G NR Cells')
filename = 'core_edges_existing.shp'
folder = os.path.join(DATA_PROCESSED, iso3, 'network_existing')
path_fiber = os.path.join(folder, filename)
if os.path.exists(path_fiber):
fiber = gpd.read_file(path_fiber, crs='epsg:4326')
fiber.plot(color='orange', lw=0.8, ax=ax1[0])
fiber.plot(color='orange', lw=0.8, ax=ax1[1])
fiber.plot(color='orange', lw=0.8, ax=ax2[0])
fiber.plot(color='orange', lw=0.8, ax=ax2[1])
fig.tight_layout()
main_title = 'Mobile Cellular Infrastructure: {}'.format(name)
plt.suptitle(main_title, fontsize=20, y=1.03)
crs = 'epsg:4326'
cx.add_basemap(ax1[0], crs=crs)
cx.add_basemap(ax1[1], crs=crs)
cx.add_basemap(ax2[0], crs=crs)
cx.add_basemap(ax2[1], crs=crs)
plt.savefig(path,
pad_inches=0.4,
bbox_inches='tight'
)
plt.close()
def plot_coverage_by_region(country, regions, path):
"""
Plot regions by geotype.
"""
iso3 = country['iso3']
name = country['country']
filename = 'baseline_coverage.csv'
folder = os.path.join(DATA_PROCESSED, iso3)
path_data = os.path.join(folder, filename)
data = pd.read_csv(path_data, encoding='latin-1')
data = data[['GID_id', 'technology', 'covered_pop_perc', 'uncovered_pop_perc']]
data = format_data(data, regions)
data = pd.DataFrame(data)
regions = regions.merge(data, left_on='GID_2', right_on='GID_id')
regions.reset_index(drop=True, inplace=True)
bins = [-1, 10, 20, 30, 40, 50, 60, 70, 80, 90, 1e8]
labels = [
'<10%',
'10-20%',
'20-30%',
'30-40%',
'40-50%',
'50-60%',
'60-70%',
'70-80%',
'80-90%',
'90-100%'
]
regions['bin'] = pd.cut(
regions['covered_pop_perc'],
bins=bins,
labels=labels
)#.add_categories('missing')
regions['bin'].fillna('<10%')
regions.to_csv(os.path.join(VIS, 'test.csv'))
fig, (ax1, ax2) = plt.subplots(2, 2, figsize=(12,12))
fig.subplots_adjust(hspace=.2, wspace=.2)
minx, miny, maxx, maxy = regions.total_bounds
buffer = 2
for ax in [ax1, ax2]:
for dim in [0,1]:
ax[dim].set_xlim(minx-buffer, maxx+buffer)
ax[dim].set_ylim(miny-0.1, maxy+.1)
fig.set_facecolor('gainsboro')
gsm = regions.loc[regions['technology'] == 'GSM']
umts = regions.loc[regions['technology'] == 'UMTS']
lte = regions.loc[regions['technology'] == 'LTE']
nr = regions.loc[regions['technology'] == 'NR']
if len(nr) == 0:
nr = lte.copy()
nr['technology'] = 'NR'
nr['bin'] = '<10%'
gsm.plot(column='bin', cmap='viridis_r', linewidth=0.2, alpha=0.8,
legend=True, edgecolor='grey', ax=ax1[0])
umts.plot(column='bin', cmap='viridis_r', linewidth=0.2, alpha=0.8,
legend=True, edgecolor='grey', ax=ax1[1])
lte.plot(column='bin', cmap='viridis_r', linewidth=0.2, alpha=0.8,
legend=True, edgecolor='grey', ax=ax2[0])
nr.plot(column='bin', cmap='viridis_r', linewidth=0.2, alpha=0.8,
legend=True, edgecolor='grey', ax=ax2[1])
ax1[0].set_title('Covered by 2G GSM (%)')
ax1[1].set_title('Covered by 3G UMTS (%)')
ax2[0].set_title('Covered by 4G LTE (%)')
ax2[1].set_title('Covered by 5G NR (%)')
fig.tight_layout()
main_title = 'Covered Population by Region: {}'.format(name)
plt.suptitle(main_title, fontsize=20, y=1.01)
crs = 'epsg:4326'
cx.add_basemap(ax1[0], crs=crs)
cx.add_basemap(ax1[1], crs=crs)
cx.add_basemap(ax2[0], crs=crs)
cx.add_basemap(ax2[1], crs=crs)
plt.savefig(path,
# pad_inches=0.4,
bbox_inches='tight'
)
plt.close()
def plot_uncovered_pop_by_region(country, outline, path):
"""
Plot uncovered population by region.
"""
iso3 = country['iso3']
name = country['country']
fig, (ax1, ax2) = plt.subplots(2, 2, figsize=(12,12))
fig.subplots_adjust(hspace=.2, wspace=.2)
minx, miny, maxx, maxy = outline.total_bounds
buffer = 2
for ax in [ax1, ax2]:
for dim in [0,1]:
ax[dim].set_xlim(minx-buffer, maxx+buffer)
ax[dim].set_ylim(miny-0.1, maxy+.1)
fig.set_facecolor('gainsboro')
outline.plot(facecolor="none", edgecolor="grey", ax=ax1[0])
outline.plot(facecolor="none", edgecolor="grey", ax=ax1[1])
outline.plot(facecolor="none", edgecolor="grey", ax=ax2[0])
outline.plot(facecolor="none", edgecolor="grey", ax=ax2[1])
folder = os.path.join(DATA_PROCESSED, iso3, 'coverage')
path1 = os.path.join(folder, 'baseline_uncovered_GSM.shp')
if os.path.exists(path1):
gsm = gpd.read_file(path1, crs='epsg:3857')
gsm = gsm.to_crs(4326)
gsm.plot(color='red', linewidth=0.2, alpha=0.4,
legend=True, edgecolor='grey', ax=ax1[0])
path2 = os.path.join(folder, 'baseline_uncovered_UMTS.shp')
if os.path.exists(path2):
umts = gpd.read_file(path2, crs='epsg:3857')
umts = umts.to_crs(4326)
umts.plot(color='blue', linewidth=0.2, alpha=0.4,
legend=True, edgecolor='grey', ax=ax1[1])
path3 = os.path.join(folder, 'baseline_uncovered_LTE.shp')
if os.path.exists(path3):
lte = gpd.read_file(path3, crs='epsg:3857')
lte = lte.to_crs(4326)
lte.plot(color='yellow', linewidth=0.2, alpha=0.4,
legend=True, edgecolor='grey', ax=ax2[0])
path4 = os.path.join(folder, 'baseline_uncovered_NR.shp')
if os.path.exists(path4):
nr = gpd.read_file(path4, crs='epsg:3857')
nr = nr.to_crs(4326)
nr.plot(color='black', linewidth=0.2, alpha=0.4,
legend=True, edgecolor='grey', ax=ax2[1])
else:
nr = gpd.read_file(os.path.join(folder, '..', 'national_outline.shp'), crs='epsg:4326')
nr.plot(color='black', linewidth=0.2, alpha=0.4,
legend=True, edgecolor='grey', ax=ax2[1])
ax1[0].set_title('2G GSM Uncovered')
ax1[1].set_title('3G UMTS Uncovered')
ax2[0].set_title('4G LTE Uncovered')
ax2[1].set_title('5G NR Uncovered')
fig.tight_layout()
main_title = 'Uncovered Population: {}'.format(name)
plt.suptitle(main_title, fontsize=20, y=1.01)
crs = 'epsg:4326'
cx.add_basemap(ax1[0], crs=crs)
cx.add_basemap(ax1[1], crs=crs)
cx.add_basemap(ax2[0], crs=crs)
cx.add_basemap(ax2[1], crs=crs)
plt.savefig(path,
# pad_inches=0.4,
bbox_inches='tight'
)
plt.close()
def format_data(data, regions):
"""
"""
output = []
technologies = data['technology'].unique()
regions = regions['GID_2'].unique()
for region in regions:
for technology in technologies:
if ((data['GID_id'] == region) & (data['technology'] == technology)).any():
subset = data.loc[(data['GID_id'] == region) & (data['technology'] == technology)]
covered_pop_perc = subset['covered_pop_perc'].values[0]
uncovered_pop_perc = subset['uncovered_pop_perc'].values[0]
else:
covered_pop_perc = 0
uncovered_pop_perc = 100
output.append({
'GID_id': region,
'technology': technology,
'covered_pop_perc': covered_pop_perc,
'uncovered_pop_perc': uncovered_pop_perc,
})
return output
def single_extreme_plot(country, regions, outline, path):
"""
Plot regions by geotype as points.
"""
iso3 = country['iso3']
name = country['country']
filename = 'sites_{}.csv'.format(iso3)
path_data = os.path.join(RESULTS, filename)
data =
|
pd.read_csv(path_data)
|
pandas.read_csv
|
import pandas as pd
import re
import requests
df_states_timeline = pd.read_csv(r'/home/amritaparna/PycharmProjects/covid19/venv/states_timeline_data.csv')
state_tup = tuple(df_states_timeline['State UT'])
cols = ['date']
cols.extend([x.replace(" ", "") for x in list(df_states_timeline['State UT'])])
#print(cols)
date_v = list(df_states_timeline.columns)[:-1]
# print(date_v)
df_timeline =
|
pd.DataFrame(None, None, cols)
|
pandas.DataFrame
|
from decimal import Decimal
import numpy as np
import pytest
import pandas as pd
import pandas._testing as tm
class TestDataFrameUnaryOperators:
# __pos__, __neg__, __inv__
@pytest.mark.parametrize(
"df,expected",
[
(pd.DataFrame({"a": [-1, 1]}), pd.DataFrame({"a": [1, -1]})),
(pd.DataFrame({"a": [False, True]}), pd.DataFrame({"a": [True, False]})),
(
pd.DataFrame({"a": pd.Series(pd.to_timedelta([-1, 1]))}),
pd.DataFrame({"a": pd.Series(pd.to_timedelta([1, -1]))}),
),
],
)
def test_neg_numeric(self, df, expected):
tm.assert_frame_equal(-df, expected)
tm.assert_series_equal(-df["a"], expected["a"])
@pytest.mark.parametrize(
"df, expected",
[
(np.array([1, 2], dtype=object), np.array([-1, -2], dtype=object)),
([Decimal("1.0"), Decimal("2.0")], [Decimal("-1.0"), Decimal("-2.0")]),
],
)
def test_neg_object(self, df, expected):
# GH#21380
df = pd.DataFrame({"a": df})
expected = pd.DataFrame({"a": expected})
tm.assert_frame_equal(-df, expected)
tm.assert_series_equal(-df["a"], expected["a"])
@pytest.mark.parametrize(
"df",
[
pd.DataFrame({"a": ["a", "b"]}),
pd.DataFrame({"a":
|
pd.to_datetime(["2017-01-22", "1970-01-01"])
|
pandas.to_datetime
|
# -*- coding: utf-8 -*-
"""
Created on Thu Nov 19 15:27:23 2020
@author: saksh
Main execution file for market_networks paper; Reccommended to use market_networks(phase_3).ipynb for a more thorough analysis
Adjust the file path in import_csv according to position of file
"""
#init
import pandas as pd
import numpy as np
np.random.seed(1337) #random state used throughout the notebook for reproducibility
from math import log
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import matplotlib.cm as cm
import seaborn as sns
from datetime import datetime
import networkx as nx
import community as louvain
from collections import Counter
import random
from preprocess_funcs import louvain_community, variation_of_information, pd_fill_diagonal
plt.style.use('classic')
#dataset import
sp500 = pd.read_csv('/content/drive/My Drive/collab_files/^GSPC.csv', header = 0, index_col = 'Date')
sp500.index = pd.to_datetime(sp500.index, format = '%d-%m-%y')
sp500 = sp500[1:]
#sp500 = sp500.resample('W').mean()
#sp500.head()
print(len(sp500))
#import nifty50 data
nifty = pd.read_csv('/content/drive/My Drive/collab_files/^NSEI.csv', header = 0, index_col = 'Date')
nifty.index = pd.to_datetime(nifty.index, format = '%d-%m-%y')
nifty = nifty.reindex(index = sp500.index, method = 'bfill')
nifty.fillna(method = 'bfill', inplace=True)
#nifty = nifty.resample('W').mean()
#nifty.head()
print(len(nifty))
sing_sti =
|
pd.read_csv('/content/drive/My Drive/collab_files/^sti_d.csv', header = 0, index_col = 'Date')
|
pandas.read_csv
|
"""
Datasets
--------
A few popular time series datasets
"""
from pathlib import Path
from typing import List
import numpy as np
import pandas as pd
from darts import TimeSeries
from darts.logging import get_logger, raise_if_not
from .dataset_loaders import DatasetLoaderCSV, DatasetLoaderMetadata
"""
Overall usage of this package:
from darts.datasets import AirPassengersDataset
ts: TimeSeries = AirPassengersDataset.load()
"""
logger = get_logger(__name__)
_DEFAULT_PATH = "https://raw.githubusercontent.com/unit8co/darts/master/datasets"
class AirPassengersDataset(DatasetLoaderCSV):
"""
Monthly Air Passengers Dataset, from 1949 to 1960.
"""
def __init__(self):
super().__init__(
metadata=DatasetLoaderMetadata(
"air_passengers.csv",
uri=_DEFAULT_PATH + "/AirPassengers.csv",
hash="167ffa96204a2b47339c21eea25baf32",
header_time="Month",
)
)
class AusBeerDataset(DatasetLoaderCSV):
"""
Total quarterly beer production in Australia (in megalitres) from 1956:Q1 to 2008:Q3 [1]_.
References
----------
.. [1] https://rdrr.io/cran/fpp/man/ausbeer.html
"""
def __init__(self):
super().__init__(
metadata=DatasetLoaderMetadata(
"ausbeer.csv",
uri=_DEFAULT_PATH + "/ausbeer.csv",
hash="1f4028a570a20939411cc04de7364bbd",
header_time="date",
format_time="%Y-%m-%d",
)
)
class EnergyDataset(DatasetLoaderCSV):
"""
Hourly energy dataset coming from [1]_.
Contains a time series with 28 hourly components between 2014-12-31 23:00:00 and 2018-12-31 22:00:00
References
----------
.. [1] https://www.kaggle.com/nicholasjhana/energy-consumption-generation-prices-and-weather
"""
def __init__(self):
super().__init__(
metadata=DatasetLoaderMetadata(
"energy.csv",
uri=_DEFAULT_PATH + "/energy_dataset.csv",
hash="f564ef18e01574734a0fa20806d1c7ee",
header_time="time",
format_time="%Y-%m-%d %H:%M:%S",
)
)
class GasRateCO2Dataset(DatasetLoaderCSV):
"""
Gas Rate CO2 dataset
Two components, length 296 (integer time index)
"""
def __init__(self):
super().__init__(
metadata=DatasetLoaderMetadata(
"gasrate_co2.csv",
uri=_DEFAULT_PATH + "/gasrate_co2.csv",
hash="77bf383715a9cf81459f81fe17baf3b0",
header_time=None,
format_time=None,
)
)
class HeartRateDataset(DatasetLoaderCSV):
"""
The series contains 1800 evenly-spaced measurements of instantaneous heart rate from a single subject.
The measurements (in units of beats per minute) occur at 0.5 second intervals, so that the length of
each series is exactly 15 minutes.
This is the series1 in [1]_.
It uses an integer time index.
References
----------
.. [1] http://ecg.mit.edu/time-series/
"""
def __init__(self):
super().__init__(
metadata=DatasetLoaderMetadata(
"heart_rate.csv",
uri=_DEFAULT_PATH + "/heart_rate.csv",
hash="3c4a108e1116867cf056dc5be2c95386",
header_time=None,
format_time=None,
)
)
class IceCreamHeaterDataset(DatasetLoaderCSV):
"""
Monthly sales of heaters and ice cream between January 2004 and June 2020.
"""
def __init__(self):
super().__init__(
metadata=DatasetLoaderMetadata(
"ice_cream_heater.csv",
uri=_DEFAULT_PATH + "/ice_cream_heater.csv",
hash="62031c7b5cdc9339fe7cf389173ef1c3",
header_time="Month",
format_time="%Y-%m",
)
)
class MonthlyMilkDataset(DatasetLoaderCSV):
"""
Monthly production of milk (in pounds per cow) between January 1962 and December 1975
"""
def __init__(self):
super().__init__(
metadata=DatasetLoaderMetadata(
"monthly_milk.csv",
uri=_DEFAULT_PATH + "/monthly-milk.csv",
hash="4784443e696da45d7082e76a67687b93",
header_time="Month",
format_time="%Y-%m",
)
)
class MonthlyMilkIncompleteDataset(DatasetLoaderCSV):
"""
Monthly production of milk (in pounds per cow) between January 1962 and December 1975.
Has some missing values.
"""
def __init__(self):
super().__init__(
metadata=DatasetLoaderMetadata(
"monthly_milk-incomplete.csv",
uri=_DEFAULT_PATH + "/monthly-milk-incomplete.csv",
hash="49b275c7e2f8f28a6a05224be1a049a4",
header_time="Month",
format_time="%Y-%m",
freq="MS",
)
)
class SunspotsDataset(DatasetLoaderCSV):
"""
Monthly Sunspot Numbers, 1749 - 1983
Monthly mean relative sunspot numbers from 1749 to 1983.
Collected at Swiss Federal Observatory, Zurich until 1960, then Tokyo Astronomical Observatory.
Source: [1]_
References
----------
.. [1] https://www.rdocumentation.org/packages/datasets/versions/3.6.1/topics/sunspots
"""
def __init__(self):
super().__init__(
metadata=DatasetLoaderMetadata(
"sunspots.csv",
uri=_DEFAULT_PATH + "/monthly-sunspots.csv",
hash="4d27019c43d9c256d528f1bd6c5f40e0",
header_time="Month",
format_time="%Y-%m",
)
)
class TaylorDataset(DatasetLoaderCSV):
"""
Half-hourly electricity demand in England and Wales from Monday 5 June 2000 to Sunday 27 August 2000.
Discussed in Taylor (2003) [1]_, and kindly provided by <NAME> [2]_. Units: Megawatts
(Uses an integer time index).
References
----------
.. [1] <NAME>. (2003) Short-term electricity demand forecasting using double seasonal exponential smoothing.
Journal of the Operational Research Society, 54, 799-805.
.. [2] https://www.rdocumentation.org/packages/forecast/versions/8.13/topics/taylor
"""
def __init__(self):
super().__init__(
metadata=DatasetLoaderMetadata(
"taylor.csv",
uri=_DEFAULT_PATH + "/taylor.csv",
hash="1ea355c90e8214cb177788a674801a22",
header_time=None,
format_time=None,
)
)
class TemperatureDataset(DatasetLoaderCSV):
"""
Daily temperature in Melbourne between 1981 and 1990
"""
def __init__(self):
super().__init__(
metadata=DatasetLoaderMetadata(
"temperatures.csv",
uri=_DEFAULT_PATH + "/temps.csv",
hash="ce5b5e4929793ec8b6a54711110acebf",
header_time="Date",
format_time="%m/%d/%Y",
freq="D",
)
)
class USGasolineDataset(DatasetLoaderCSV):
"""
Weekly U.S. Product Supplied of Finished Motor Gasoline between 1991-02-08 and 2021-04-30
Obtained from [1]_.
References
----------
.. [1] https://www.eia.gov/dnav/pet/hist/LeafHandler.ashx?n=PET&s=wgfupus2&f=W
"""
def __init__(self):
super().__init__(
metadata=DatasetLoaderMetadata(
"us_gasoline.csv",
uri=_DEFAULT_PATH + "/us_gasoline.csv",
hash="25d440337a06cbf83423e81d0337a1ce",
header_time="Week",
format_time="%m/%d/%Y",
)
)
class WineDataset(DatasetLoaderCSV):
"""
Australian total wine sales by wine makers in bottles <= 1 litre. Monthly between Jan 1980 and Aug 1994.
Source: [1]_
References
----------
.. [1] https://www.rdocumentation.org/packages/forecast/versions/8.1/topics/wineind
"""
def __init__(self):
super().__init__(
metadata=DatasetLoaderMetadata(
"wine.csv",
uri=_DEFAULT_PATH + "/wineind.csv",
hash="b68971d7e709ad0b7e6300cab977e3cd",
header_time="date",
format_time="%Y-%m-%d",
)
)
class WoolyDataset(DatasetLoaderCSV):
"""
Quarterly production of woollen yarn in Australia: tonnes. Mar 1965 -- Sep 1994.
Source: [1]_
References
----------
.. [1] https://www.rdocumentation.org/packages/forecast/versions/8.1/topics/woolyrnq
"""
def __init__(self):
super().__init__(
metadata=DatasetLoaderMetadata(
"wooly.csv",
uri=_DEFAULT_PATH + "/woolyrnq.csv",
hash="4be8b12314db94c8fd76f5c674454bf0",
header_time="date",
format_time="%Y-%m-%d",
)
)
class ETTh1Dataset(DatasetLoaderCSV):
"""
The data of 1 Electricity Transformers at 1 stations, including load, oil temperature.
The dataset ranges from 2016/07 to 2018/07 taken hourly.
Source: [1][2]_
Field Descriptions:
date: The recorded date
HUFL: High UseFul Load
HULL: High UseLess Load
MUFL: Medium UseFul Load
MULL: Medium UseLess Load
LUFL: Low UseFul Load
LULL: Low UseLess Load
OT: Oil Temperature (Target)
References
----------
.. [1] https://github.com/zhouhaoyi/ETDataset
.. [2] https://arxiv.org/abs/2012.07436
"""
def __init__(self):
super().__init__(
metadata=DatasetLoaderMetadata(
"ETTh1.csv",
uri=_DEFAULT_PATH + "/ETTh1.csv",
hash="8381763947c85f4be6ac456c508460d6",
header_time="date",
format_time="%Y-%m-%d %H:%M:%S",
)
)
class ETTh2Dataset(DatasetLoaderCSV):
"""
The data of 1 Electricity Transformers at 1 stations, including load, oil temperature.
The dataset ranges from 2016/07 to 2018/07 taken hourly.
Source: [1][2]_
Field Descriptions:
date: The recorded date
HUFL: High UseFul Load
HULL: High UseLess Load
MUFL: Medium UseFul Load
MULL: Medium UseLess Load
LUFL: Low UseFul Load
LULL: Low UseLess Load
OT: Oil Temperature (Target)
References
----------
.. [1] https://github.com/zhouhaoyi/ETDataset
.. [2] https://arxiv.org/abs/2012.07436
"""
def __init__(self):
super().__init__(
metadata=DatasetLoaderMetadata(
"ETTh2.csv",
uri=_DEFAULT_PATH + "/ETTh2.csv",
hash="51a229a3fc13579dd939364fefe9c7ab",
header_time="date",
format_time="%Y-%m-%d %H:%M:%S",
)
)
class ETTm1Dataset(DatasetLoaderCSV):
"""
The data of 1 Electricity Transformers at 1 stations, including load, oil temperature.
The dataset ranges from 2016/07 to 2018/07 recorded every 15 minutes.
Source: [1][2]_
Field Descriptions:
date: The recorded date
HUFL: High UseFul Load
HULL: High UseLess Load
MUFL: Medium UseFul Load
MULL: Medium UseLess Load
LUFL: Low UseFul Load
LULL: Low UseLess Load
OT: Oil Temperature (Target)
References
----------
.. [1] https://github.com/zhouhaoyi/ETDataset
.. [2] https://arxiv.org/abs/2012.07436
"""
def __init__(self):
super().__init__(
metadata=DatasetLoaderMetadata(
"ETTm1.csv",
uri=_DEFAULT_PATH + "/ETTm1.csv",
hash="82d6bd89109c63d075d99c1077b33f38",
header_time="date",
format_time="%Y-%m-%d %H:%M:%S",
)
)
class ETTm2Dataset(DatasetLoaderCSV):
"""
The data of 1 Electricity Transformers at 1 stations, including load, oil temperature.
The dataset ranges from 2016/07 to 2018/07 recorded every 15 minutes.
Source: [1][2]_
Field Descriptions:
date: The recorded date
HUFL: High UseFul Load
HULL: High UseLess Load
MUFL: Medium UseFul Load
MULL: Medium UseLess Load
LUFL: Low UseFul Load
LULL: Low UseLess Load
OT: Oil Temperature (Target)
References
----------
.. [1] https://github.com/zhouhaoyi/ETDataset
.. [2] https://arxiv.org/abs/2012.07436
"""
def __init__(self):
super().__init__(
metadata=DatasetLoaderMetadata(
"ETTm2.csv",
uri=_DEFAULT_PATH + "/ETTm2.csv",
hash="7687e47825335860bf58bccb31be0c56",
header_time="date",
format_time="%Y-%m-%d %H:%M:%S",
)
)
class ElectricityDataset(DatasetLoaderCSV):
"""
Measurements of electric power consumption in one household with 15 minute sampling rate.
370 client's consumption are recorded in kW.
Source: [1]_
Loading this dataset will provide a multivariate timeseries with 370 columns for each household.
The following code can be used to convert the dataset to a list of univariate timeseries,
one for each household.
References
----------
.. [1] https://archive.ics.uci.edu/ml/datasets/ElectricityLoadDiagrams20112014
"""
def __init__(self, multivariate: bool = True):
"""
Parameters
----------
multivariate: bool
Whether to return a single multivariate timeseries - if False returns a list of univariate TimeSeries. Default is True.
"""
def pre_proces_fn(extracted_dir, dataset_path):
with open(Path(extracted_dir, "LD2011_2014.txt")) as fin:
with open(dataset_path, "wt", newline="\n") as fout:
for line in fin:
fout.write(line.replace(",", ".").replace(";", ","))
super().__init__(
metadata=DatasetLoaderMetadata(
"Electricity.csv",
uri="https://archive.ics.uci.edu/ml/machine-learning-databases/00321/LD2011_2014.txt.zip",
hash="acfe6783eea43905e510f537add940fd",
header_time="Unnamed: 0",
format_time="%Y-%m-%d %H:%M:%S",
pre_process_zipped_csv_fn=pre_proces_fn,
multivariate=multivariate,
)
)
def _to_multi_series(self, series: pd.DataFrame) -> List[TimeSeries]:
"""
Load the electricity dataset as a list of univariate series, one for each household.
"""
ts_list = [] # list of timeseries
for label in series:
srs = series[label]
# filter column down to the period of recording
srs = srs.replace(0.0, np.nan)
start_date = min(srs.fillna(method="ffill").dropna().index)
end_date = max(srs.fillna(method="bfill").dropna().index)
active_range = (srs.index >= start_date) & (srs.index <= end_date)
srs = srs[active_range].fillna(0.0)
# convert to timeseries
tmp = pd.DataFrame({"power_usage": srs})
tmp["date"] = tmp.index
ts = TimeSeries.from_dataframe(tmp, "date", ["power_usage"])
ts_list.append(ts)
return ts_list
class UberTLCDataset(DatasetLoaderCSV):
"""
14.3 million Uber pickups from January to June 2015. The data is resampled to hourly or daily based sample_freq
on using the locationID as the target.
Source: [1]_
Loading this dataset will provide a multivariate timeseries with 262 columns for each locationID.
The following code can be used to convert the dataset to a list of univariate timeseries,
one for each locationID.
References
----------
.. [1] https://github.com/fivethirtyeight/uber-tlc-foil-response
"""
def __init__(self, sample_freq: str = "hourly", multivariate: bool = True):
"""
Parameters
----------
sample_freq: str
The sampling frequency of the data. Can be "hourly" or "daily". Default is "hourly".
multivariate: bool
Whether to return a single multivariate timeseries - if False returns a list of univariate TimeSeries. Default is True.
"""
valid_sample_freq = ["daily", "hourly"]
raise_if_not(
sample_freq in valid_sample_freq,
f"sample_freq must be one of {valid_sample_freq}",
logger,
)
def pre_proces_fn(extracted_dir, dataset_path):
df = pd.read_csv(
Path(extracted_dir, "uber-raw-data-janjune-15.csv"),
header=0,
usecols=["Pickup_date", "locationID"],
index_col=0,
)
output_dict = {}
freq_setting = "1H" if "hourly" in str(dataset_path) else "1D"
time_series_of_locations = list(df.groupby(by="locationID"))
for locationID, df in time_series_of_locations:
df.sort_index()
df.index =
|
pd.to_datetime(df.index)
|
pandas.to_datetime
|
from sklearn.preprocessing import MultiLabelBinarizer
from sklearn.base import BaseEstimator, TransformerMixin
import pandas as pd
# This class is used to binarize a multi-label column
# It can be understood as a multi-label one-hot encoder
class MyBinarizer(BaseEstimator, TransformerMixin):
def __init__(self):
"""
Set up the class
"""
print(">>>> Binarizer Initialized")
self.mlb_list = []
def fit(self, X, y=None):
"""
Fit the binarizer on all the features in the dataframe
"""
print(">>>> Fit called")
for column in list(X.columns):
mlb = MultiLabelBinarizer()
self.mlb_list.append((column, mlb.fit(X[column]), list(mlb.classes_)))
# print(self.mlb_list)
return self
def transform(self, X, y=None):
"""
Return the transformed dataframe
"""
print(">>>> Transform called")
X_ =
|
pd.DataFrame()
|
pandas.DataFrame
|
from pandas.util import hash_pandas_object
import hashlib
import pandas as pd
import random
random.seed(42)
import numpy as np
import psutil
import time
ROWS = 20000000
DATA = [random.random() for _ in range(ROWS)]
def mem_use():
mem_profile = psutil.virtual_memory()
print("Memory Usage = {} | percent = {}".format(mem_profile.used,
mem_profile.percent))
def apply_assertion(df):
assert hashlib.sha256(pd.util.hash_pandas_object(df, index=True).values).hexdigest() == '867567dc7d46f77af2bca9804ac366a5165d27612de100461b699bd23094ab90'
## CREATE AND LOAD ARRAYS IN MEMORY
def using_numpy_arrays():
larray = []
for i in range(0,10):
mem_use()
larray.append(np.array(DATA)) #// 2.19 GB
del larray
time.sleep(5)
### USING HDF Storage
def using_hdf_storage():
store = pd.HDFStore('store.h5')
for i in range(0,10):
mem_use() ## constant memory usage (start with
store[f'v{i}'] = pd.DataFrame({f'v{i}':DATA})
df = pd.DataFrame()
generat = ( store[f'v{i}'] for i in range(0,10))
df = pd.concat(generat,axis=1)
mem_use()
print(df.head())
time.sleep(5)
using_hdf_storage()
exit()
import gc
# USING GENERATOR
def create_df_using_generator():
genera = ( pd.DataFramex for x in range(1,20000000))
mem_use()
myDf =
|
pd.DataFrame()
|
pandas.DataFrame
|
"""
Module to encompass various tools for the manipulation of feature-extracted MS data sets.
"""
import os
import inspect
import numpy
import pandas
import re
import warnings
import numbers
from datetime import datetime, timedelta
import logging
import copy
import networkx
from .._toolboxPath import toolboxPath
from ._dataset import Dataset
from ..utilities import rsd
from ..utilities._internal import _vcorrcoef
from ..utilities.extractParams import extractParams
from ..enumerations import VariableType, DatasetLevel, AssayRole, SampleType
from ..utilities import removeTrailingColumnNumbering
from ..utilities._filters import blankFilter
from ..utilities.normalisation._normaliserABC import Normaliser
from ..utilities.normalisation._nullNormaliser import NullNormaliser
class MSDataset(Dataset):
"""
MSDataset(datapath, fileType='QI', sop='GenericMS', **kwargs)
:py:class:`MSDataset` extends :py:class:`Dataset` to represent both peak-picked LC- or DI-MS datasets (discrete variables), and Continuum mode (spectral) DI-MS datasets.
Objects can be initialised from a variety of common data formats, currently peak-picked data from Progenesis QI or XCMS, and targeted Biocrates datasets.
* Progenesis QI
QI import operates on csv files exported *via* the 'Export Compound Measurements' menu option in QI. Import requires the presence of both normalised and raw datasets, but will only import the raw meaturenents.
* XCMS
XCMS import operates on the csv files generated by XCMS with the peakTable() method. By default, the csv is expected to have 14 columns of feature parameters, with the intensity values for the first sample coming on the 15 column. However, the number of columns to skip is dataset dependent and can be set with the (e ``noFeatureParams=`` keyword argument.
* Biocrates
Operates on spreadsheets exported from Biocrates MetIDQ. By default loads data from the sheet named 'Data Export', this may be overridden with the ``sheetName=`` argument, If the number of sample metadata columns differes from the default, this can be overridden with the ``noSampleParams=`` argument.
"""
def __init__(self, datapath, fileType='xcms', sop='GenericMS', **kwargs):
"""
Basic initialisation.
"""
super().__init__(sop=sop, **kwargs)
self.corrExclusions = None
self._correlationToDilution = numpy.array(None)
try:
self.Attributes['artifactualFilter'] = (self.Attributes['artifactualFilter'] == 'True')
except:
pass
self._tempArtifactualLinkageMatrix = pandas.DataFrame(None)
self._artifactualLinkageMatrix = pandas.DataFrame(None)
self.Attributes['Raw Data Path'] = None
self.Attributes['Feature Names'] = 'Feature Name'
self.filePath, fileName = os.path.split(datapath)
self.fileName, fileExtension = os.path.splitext(fileName)
self.name = self.fileName
# Attributes to record which feature filtering procedures and parameters were used
self.Attributes['featureFilters'] = {'rsdFilter': False, 'varianceRatioFilter': False,
'correlationToDilutionFilter': False,
'artifactualFilter': False, 'blankFilter': False}
self.Attributes['filterParameters'] = {'rsdThreshold': None, 'corrMethod': None, 'corrThreshold': None,
'varianceRatio': None, 'blankThreshold': None,
'overlapThresholdArtifactual': None, 'corrThresholdArtifactual': None,
'deltaMzArtifactual': None}
# Load the QI output file
fileType = fileType.lower()
if fileType == 'qi':
self._loadQIDataset(datapath)
self.Attributes['FeatureExtractionSoftware'] = 'Progenesis QI'
self.VariableType = VariableType.Discrete
elif fileType == 'csv':
self._loadCSVImport(datapath, **kwargs)
self.Attributes['FeatureExtractionSoftware'] = 'Unknown'
if 'variableType' not in kwargs:
self.VariableType = VariableType.Discrete
else:
self.VariableType = kwargs['variableType']
elif fileType == 'xcms':
self._loadXCMSDataset(datapath, **kwargs)
self.Attributes['FeatureExtractionSoftware'] = 'XCMS'
self.VariableType = VariableType.Discrete
elif fileType == 'biocrates':
self._loadBiocratesDataset(datapath, **kwargs)
self.Attributes['FeatureExtractionSoftware'] = 'Biocrates'
self.VariableType = VariableType.Discrete
elif fileType == 'metaboscape':
self._loadMetaboscapeDataset(datapath, **kwargs)
self.Attributes['FeatureExtractionSoftware'] = 'Metaboscape'
self.VariableType = VariableType.Discrete
elif fileType == 'csv export':
(self.name, self.intensityData, self.featureMetadata, self.sampleMetadata) = self._initialiseFromCSV(
datapath)
if 'm/z' in self.featureMetadata.columns:
self.featureMetadata['m/z'] = self.featureMetadata['m/z'].apply(pandas.to_numeric, errors='ignore')
if 'Retention Time' in self.featureMetadata.columns:
self.featureMetadata['Retention Time'] = self.featureMetadata['Retention Time'].apply(pandas.to_numeric,
errors='ignore')
self.VariableType = VariableType.Discrete
elif fileType == 'empty':
# Lets us build an empty object for testing &c
pass
else:
raise NotImplementedError
self.featureMetadata['Exclusion Details'] = None
self.featureMetadata['User Excluded'] = False
self.featureMetadata[['rsdFilter', 'varianceRatioFilter', 'correlationToDilutionFilter', 'blankFilter',
'artifactualFilter']] = pandas.DataFrame([[True, True, True, True, True]],
index=self.featureMetadata.index)
self.featureMetadata[['rsdSP', 'rsdSS/rsdSP', 'correlationToDilution', 'blankValue']] \
= pandas.DataFrame([[numpy.nan, numpy.nan, numpy.nan, numpy.nan]], index=self.featureMetadata.index)
self.initialiseMasks()
self.Attributes['Log'].append([datetime.now(),
'%s instance inited, with %d samples, %d features, from \%s\'' % (
self.__class__.__name__, self.noSamples, self.noFeatures, datapath)])
# When making a deepcopy, all artifactual linkage are reset
def __deepcopy__(self, memo):
cls = self.__class__
result = cls.__new__(cls)
memo[id(self)] = result
for k, v in self.__dict__.items():
# Check for pandas dataframe, and use the
if isinstance(v, pandas.DataFrame):
setattr(result, k, v.copy())
else:
setattr(result, k, copy.deepcopy(v, memo))
result._tempArtifactualLinkageMatrix =
|
pandas.DataFrame(None)
|
pandas.DataFrame
|
from __future__ import division
from functools import wraps
import pandas as pd
import numpy as np
import time
import csv, sys
import os.path
import logging
from .ted_functions import TedFunctions
from .ted_aggregate_methods import TedAggregateMethods
from base.uber_model import UberModel, ModelSharedInputs
class TedSpeciesProperties(object):
"""
Listing of species properties that will eventually be read in from a SQL db
"""
def __init__(self):
"""Class representing Species properties"""
super(TedSpeciesProperties, self).__init__()
self.sci_name = pd.Series([], dtype='object')
self.com_name = pd.Series([], dtype='object')
self.taxa = pd.Series([], dtype='object')
self.order = pd.Series([], dtype='object')
self.usfws_id = pd.Series([], dtype='object')
self.body_wgt = pd.Series([], dtype='object')
self.diet_item = pd.Series([], dtype='object')
self.h2o_cont = pd.Series([], dtype='float')
def read_species_properties(self):
# this is a temporary method to initiate the species/diet food items lists (this will be replaced with
# a method to access a SQL database containing the properties
#filename = './ted/tests/TEDSpeciesProperties.csv'
filename = os.path.join(os.path.dirname(__file__),'tests/TEDSpeciesProperties.csv')
try:
with open(filename,'rt') as csvfile:
# csv.DictReader uses first line in file for column headings by default
dr = pd.read_csv(csvfile) # comma is default delimiter
except csv.Error as e:
sys.exit('file: %s, %s' (filename, e))
print(dr)
self.sci_name = dr.ix[:,'Scientific Name']
self.com_name = dr.ix[:,'Common Name']
self.taxa = dr.ix[:,'Taxa']
self.order = dr.ix[:,'Order']
self.usfws_id = dr.ix[:,'USFWS Species ID (ENTITY_ID)']
self.body_wgt= dr.ix[:,'BW (g)']
self.diet_item = dr.ix[:,'Food item']
self.h2o_cont = dr.ix[:,'Water content of diet']
class TedInputs(ModelSharedInputs):
"""
Required inputs class for Ted.
"""
def __init__(self):
"""Class representing the inputs for Ted"""
super(TedInputs, self).__init__()
# Inputs: Assign object attribute variables from the input Pandas DataFrame
self.chemical_name = pd.Series([], dtype="object", name="chemical_name")
# application parameters for min/max application scenarios
self.crop_min = pd.Series([], dtype="object", name="crop")
self.app_method_min = pd.Series([], dtype="object", name="app_method_min")
self.app_rate_min = pd.Series([], dtype="float", name="app_rate_min")
self.num_apps_min = pd.Series([], dtype="int", name="num_apps_min")
self.app_interval_min = pd.Series([], dtype="int", name="app_interval_min")
self.droplet_spec_min = pd.Series([], dtype="object", name="droplet_spec_min")
self.boom_hgt_min = pd.Series([], dtype="object", name="droplet_spec_min")
self.pest_incorp_depth_min = pd.Series([], dtype="object", name="pest_incorp_depth")
self.crop_max = pd.Series([], dtype="object", name="crop")
self.app_method_max = pd.Series([], dtype="object", name="app_method_max")
self.app_rate_max = pd.Series([], dtype="float", name="app_rate_max")
self.num_apps_max = pd.Series([], dtype="int", name="num_app_maxs")
self.app_interval_max = pd.Series([], dtype="int", name="app_interval_max")
self.droplet_spec_max = pd.Series([], dtype="object", name="droplet_spec_max")
self.boom_hgt_max = pd.Series([], dtype="object", name="droplet_spec_max")
self.pest_incorp_depth_max = pd.Series([], dtype="object", name="pest_incorp_depth")
# physical, chemical, and fate properties of pesticide
self.foliar_diss_hlife = pd.Series([], dtype="float", name="foliar_diss_hlife")
self.aerobic_soil_meta_hlife = pd.Series([], dtype="float", name="aerobic_soil_meta_hlife")
self.frac_retained_mamm = pd.Series([], dtype="float", name="frac_retained_mamm")
self.frac_retained_birds = pd.Series([], dtype="float", name="frac_retained_birds")
self.log_kow = pd.Series([], dtype="float", name="log_kow")
self.koc = pd.Series([], dtype="float", name="koc")
self.solubility = pd.Series([], dtype="float", name="solubility")
self.henry_law_const = pd.Series([], dtype="float", name="henry_law_const")
# bio concentration factors (ug active ing/kg-ww) / (ug active ing/liter)
self.aq_plant_algae_bcf_mean = pd.Series([], dtype="float", name="aq_plant_algae_bcf_mean")
self.aq_plant_algae_bcf_upper = pd.Series([], dtype="float", name="aq_plant_algae_bcf_upper")
self.inv_bcf_mean = pd.Series([], dtype="float", name="inv_bcf_mean")
self.inv_bcf_upper = pd.Series([], dtype="float", name="inv_bcf_upper")
self.fish_bcf_mean = pd.Series([], dtype="float", name="fish_bcf_mean")
self.fish_bcf_upper = pd.Series([], dtype="float", name="fish_bcf_upper")
# bounding water concentrations (ug active ing/liter)
self.water_conc_1 = pd.Series([], dtype="float", name="water_conc_1") # lower bound
self.water_conc_2 = pd.Series([], dtype="float", name="water_conc_2") # upper bound
# health value inputs
# naming convention (based on listing from OPP TED Excel spreadsheet 'inputs' worksheet):
# dbt: dose based toxicity
# cbt: concentration-based toxicity
# arbt: application rate-based toxicity
# 1inmill_mort: 1/million mortality (note initial character is numeral 1, not letter l)
# 1inten_mort: 10% mortality (note initial character is numeral 1, not letter l)
# others are self explanatory
# dose based toxicity(dbt): mammals (mg-pest/kg-bw) & weight of test animal (grams)
self.dbt_mamm_1inmill_mort = pd.Series([], dtype="float", name="dbt_mamm_1inmill_mort")
self.dbt_mamm_1inten_mort = pd.Series([], dtype="float", name="dbt_mamm_1inten_mort")
self.dbt_mamm_low_ld50 = pd.Series([], dtype="float", name="dbt_mamm_low_ld50")
self.dbt_mamm_rat_oral_ld50 = pd.Series([], dtype="float", name="dbt_mamm_1inten_mort")
self.dbt_mamm_rat_derm_ld50 = pd.Series([], dtype="float", name="dbt_mamm_rat_derm_ld50")
self.dbt_mamm_rat_inhal_ld50 = pd.Series([], dtype="float", name="dbt_mamm_rat_inhal_ld50")
self.dbt_mamm_sub_direct = pd.Series([], dtype="float", name="dbt_mamm_sub_direct")
self.dbt_mamm_sub_indirect = pd.Series([], dtype="float", name="dbt_mamm_sub_indirect")
self.dbt_mamm_1inmill_mort_wgt = pd.Series([], dtype="float", name="dbt_mamm_1inmill_mort_wgt")
self.dbt_mamm_1inten_mort_wgt = pd.Series([], dtype="float", name="dbt_mamm_1inten_mort_wgt")
self.dbt_mamm_low_ld50_wgt = pd.Series([], dtype="float", name="dbt_mamm_low_ld50_wgt")
self.dbt_mamm_rat_oral_ld50_wgt = pd.Series([], dtype="float", name="dbt_mamm_1inten_mort_wgt")
self.dbt_mamm_rat_derm_ld50_wgt = pd.Series([], dtype="float", name="dbt_mamm_rat_derm_ld50_wgt")
self.dbt_mamm_rat_inhal_ld50_wgt = pd.Series([], dtype="float", name="dbt_mamm_rat_inhal_ld50_wgt")
self.dbt_mamm_sub_direct_wgt = pd.Series([], dtype="float", name="dbt_mamm_sub_direct_wgt")
self.dbt_mamm_sub_indirect_wgt = pd.Series([], dtype="float", name="dbt_mamm_sub_indirect_wgt")
# dose based toxicity(dbt): birds (mg-pest/kg-bw) & weight of test animal (grams)
self.dbt_bird_1inmill_mort = pd.Series([], dtype="float", name="dbt_bird_1inmill_mort")
self.dbt_bird_1inten_mort = pd.Series([], dtype="float", name="dbt_bird_1inten_mort")
self.dbt_bird_low_ld50 = pd.Series([], dtype="float", name="dbt_bird_low_ld50")
self.dbt_bird_hc05 = pd.Series([], dtype="float", name="dbt_bird_hc05")
self.dbt_bird_hc50 = pd.Series([], dtype="float", name="dbt_bird_hc50")
self.dbt_bird_hc95 =
|
pd.Series([], dtype="float", name="dbt_bird_hc95")
|
pandas.Series
|
"""
Tests for DatetimeIndex timezone-related methods
"""
from datetime import date, datetime, time, timedelta, tzinfo
import dateutil
from dateutil.tz import gettz, tzlocal
import numpy as np
import pytest
import pytz
from pandas._libs.tslibs import conversion, timezones
import pandas.util._test_decorators as td
import pandas as pd
from pandas import (
DatetimeIndex,
Index,
Timestamp,
bdate_range,
date_range,
isna,
to_datetime,
)
import pandas._testing as tm
class FixedOffset(tzinfo):
"""Fixed offset in minutes east from UTC."""
def __init__(self, offset, name):
self.__offset = timedelta(minutes=offset)
self.__name = name
def utcoffset(self, dt):
return self.__offset
def tzname(self, dt):
return self.__name
def dst(self, dt):
return timedelta(0)
fixed_off = FixedOffset(-420, "-07:00")
fixed_off_no_name = FixedOffset(-330, None)
class TestDatetimeIndexTimezones:
# -------------------------------------------------------------
# DatetimeIndex.tz_convert
def test_tz_convert_nat(self):
# GH#5546
dates = [pd.NaT]
idx = DatetimeIndex(dates)
idx = idx.tz_localize("US/Pacific")
tm.assert_index_equal(idx, DatetimeIndex(dates, tz="US/Pacific"))
idx = idx.tz_convert("US/Eastern")
tm.assert_index_equal(idx, DatetimeIndex(dates, tz="US/Eastern"))
idx = idx.tz_convert("UTC")
tm.assert_index_equal(idx, DatetimeIndex(dates, tz="UTC"))
dates = ["2010-12-01 00:00", "2010-12-02 00:00", pd.NaT]
idx = DatetimeIndex(dates)
idx = idx.tz_localize("US/Pacific")
tm.assert_index_equal(idx, DatetimeIndex(dates, tz="US/Pacific"))
idx = idx.tz_convert("US/Eastern")
expected = ["2010-12-01 03:00", "2010-12-02 03:00", pd.NaT]
tm.assert_index_equal(idx, DatetimeIndex(expected, tz="US/Eastern"))
idx = idx + pd.offsets.Hour(5)
expected = ["2010-12-01 08:00", "2010-12-02 08:00", pd.NaT]
tm.assert_index_equal(idx, DatetimeIndex(expected, tz="US/Eastern"))
idx = idx.tz_convert("US/Pacific")
expected = ["2010-12-01 05:00", "2010-12-02 05:00", pd.NaT]
tm.assert_index_equal(idx, DatetimeIndex(expected, tz="US/Pacific"))
idx = idx + np.timedelta64(3, "h")
expected = ["2010-12-01 08:00", "2010-12-02 08:00", pd.NaT]
tm.assert_index_equal(idx, DatetimeIndex(expected, tz="US/Pacific"))
idx = idx.tz_convert("US/Eastern")
expected = ["2010-12-01 11:00", "2010-12-02 11:00", pd.NaT]
tm.assert_index_equal(idx, DatetimeIndex(expected, tz="US/Eastern"))
@pytest.mark.parametrize("prefix", ["", "dateutil/"])
def test_dti_tz_convert_compat_timestamp(self, prefix):
strdates = ["1/1/2012", "3/1/2012", "4/1/2012"]
idx = DatetimeIndex(strdates, tz=prefix + "US/Eastern")
conv = idx[0].tz_convert(prefix + "US/Pacific")
expected = idx.tz_convert(prefix + "US/Pacific")[0]
assert conv == expected
def test_dti_tz_convert_hour_overflow_dst(self):
# Regression test for:
# https://github.com/pandas-dev/pandas/issues/13306
# sorted case US/Eastern -> UTC
ts = ["2008-05-12 09:50:00", "2008-12-12 09:50:35", "2009-05-12 09:50:32"]
tt = DatetimeIndex(ts).tz_localize("US/Eastern")
ut = tt.tz_convert("UTC")
expected = Index([13, 14, 13])
tm.assert_index_equal(ut.hour, expected)
# sorted case UTC -> US/Eastern
ts = ["2008-05-12 13:50:00", "2008-12-12 14:50:35", "2009-05-12 13:50:32"]
tt = DatetimeIndex(ts).tz_localize("UTC")
ut = tt.tz_convert("US/Eastern")
expected = Index([9, 9, 9])
tm.assert_index_equal(ut.hour, expected)
# unsorted case US/Eastern -> UTC
ts = ["2008-05-12 09:50:00", "2008-12-12 09:50:35", "2008-05-12 09:50:32"]
tt = DatetimeIndex(ts).tz_localize("US/Eastern")
ut = tt.tz_convert("UTC")
expected = Index([13, 14, 13])
tm.assert_index_equal(ut.hour, expected)
# unsorted case UTC -> US/Eastern
ts = ["2008-05-12 13:50:00", "2008-12-12 14:50:35", "2008-05-12 13:50:32"]
tt = DatetimeIndex(ts).tz_localize("UTC")
ut = tt.tz_convert("US/Eastern")
expected = Index([9, 9, 9])
tm.assert_index_equal(ut.hour, expected)
@pytest.mark.parametrize("tz", ["US/Eastern", "dateutil/US/Eastern"])
def test_dti_tz_convert_hour_overflow_dst_timestamps(self, tz):
# Regression test for GH#13306
# sorted case US/Eastern -> UTC
ts = [
Timestamp("2008-05-12 09:50:00", tz=tz),
Timestamp("2008-12-12 09:50:35", tz=tz),
Timestamp("2009-05-12 09:50:32", tz=tz),
]
tt = DatetimeIndex(ts)
ut = tt.tz_convert("UTC")
expected = Index([13, 14, 13])
tm.assert_index_equal(ut.hour, expected)
# sorted case UTC -> US/Eastern
ts = [
Timestamp("2008-05-12 13:50:00", tz="UTC"),
Timestamp("2008-12-12 14:50:35", tz="UTC"),
Timestamp("2009-05-12 13:50:32", tz="UTC"),
]
tt = DatetimeIndex(ts)
ut = tt.tz_convert("US/Eastern")
expected = Index([9, 9, 9])
tm.assert_index_equal(ut.hour, expected)
# unsorted case US/Eastern -> UTC
ts = [
Timestamp("2008-05-12 09:50:00", tz=tz),
Timestamp("2008-12-12 09:50:35", tz=tz),
Timestamp("2008-05-12 09:50:32", tz=tz),
]
tt = DatetimeIndex(ts)
ut = tt.tz_convert("UTC")
expected = Index([13, 14, 13])
tm.assert_index_equal(ut.hour, expected)
# unsorted case UTC -> US/Eastern
ts = [
Timestamp("2008-05-12 13:50:00", tz="UTC"),
Timestamp("2008-12-12 14:50:35", tz="UTC"),
Timestamp("2008-05-12 13:50:32", tz="UTC"),
]
tt = DatetimeIndex(ts)
ut = tt.tz_convert("US/Eastern")
expected = Index([9, 9, 9])
tm.assert_index_equal(ut.hour, expected)
@pytest.mark.parametrize("freq, n", [("H", 1), ("T", 60), ("S", 3600)])
def test_dti_tz_convert_trans_pos_plus_1__bug(self, freq, n):
# Regression test for tslib.tz_convert(vals, tz1, tz2).
# See https://github.com/pandas-dev/pandas/issues/4496 for details.
idx = date_range(datetime(2011, 3, 26, 23), datetime(2011, 3, 27, 1), freq=freq)
idx = idx.tz_localize("UTC")
idx = idx.tz_convert("Europe/Moscow")
expected = np.repeat(np.array([3, 4, 5]), np.array([n, n, 1]))
tm.assert_index_equal(idx.hour, Index(expected))
def test_dti_tz_convert_dst(self):
for freq, n in [("H", 1), ("T", 60), ("S", 3600)]:
# Start DST
idx = date_range(
"2014-03-08 23:00", "2014-03-09 09:00", freq=freq, tz="UTC"
)
idx = idx.tz_convert("US/Eastern")
expected = np.repeat(
np.array([18, 19, 20, 21, 22, 23, 0, 1, 3, 4, 5]),
np.array([n, n, n, n, n, n, n, n, n, n, 1]),
)
tm.assert_index_equal(idx.hour, Index(expected))
idx = date_range(
"2014-03-08 18:00", "2014-03-09 05:00", freq=freq, tz="US/Eastern"
)
idx = idx.tz_convert("UTC")
expected = np.repeat(
np.array([23, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9]),
np.array([n, n, n, n, n, n, n, n, n, n, 1]),
)
tm.assert_index_equal(idx.hour, Index(expected))
# End DST
idx = date_range(
"2014-11-01 23:00", "2014-11-02 09:00", freq=freq, tz="UTC"
)
idx = idx.tz_convert("US/Eastern")
expected = np.repeat(
np.array([19, 20, 21, 22, 23, 0, 1, 1, 2, 3, 4]),
np.array([n, n, n, n, n, n, n, n, n, n, 1]),
)
tm.assert_index_equal(idx.hour,
|
Index(expected)
|
pandas.Index
|
"""
Class Features
Name: driver_data_io_destination
Author(s): <NAME> (<EMAIL>)
Date: '20200515'
Version: '1.0.0'
"""
######################################################################################
# Library
import logging
import os
import numpy as np
from copy import deepcopy
import pandas as pd
from lib_utils_hazard import read_file_hazard
from lib_utils_io import read_obj, write_obj, save_file_json
from lib_utils_system import fill_tags2string, make_folder
from lib_utils_generic import get_dict_value, reduce_dict_2_lists
from lib_utils_plot import save_file_tiff, save_file_png, read_file_tiff, save_file_info
from lib_utils_ts import prepare_file_ts, save_file_ts
from lib_utils_tr import cmp_tr_exp, cmp_tr_linear
from lib_info_args import logger_name, time_format_algorithm
# Logging
log_stream = logging.getLogger(logger_name)
# Debug
import matplotlib.pylab as plt
######################################################################################
# -------------------------------------------------------------------------------------
# Class DriverScenario
class DriverScenario:
# -------------------------------------------------------------------------------------
# Initialize class
def __init__(self, time_now, time_run, discharge_data_collection, geo_data_collection,
src_dict, anc_dict, dst_dict,
alg_ancillary=None, alg_template_tags=None,
flag_telemac_data='telemac_data', flag_hazard_data='hazard_data',
flag_scenario_data_info='scenario_data_info',
flag_scenario_data_file='scenario_data_file',
flag_scenario_data_map='scenario_data_map',
flag_scenario_plot_info='scenario_info',
flag_scenario_plot_tiff='scenario_tiff',
flag_scenario_plot_png='scenario_png',
flag_scenario_data_ts='scenario_time_series',
flag_cleaning_anc_scenario_info=True, flag_cleaning_anc_scenario_file=True,
flag_cleaning_anc_scenario_map=True,
flag_cleaning_plot_scenario=True, flag_cleaning_data_scenario=True):
self.time_now = time_now
self.time_run = time_run
self.discharge_data_collection = discharge_data_collection
self.geo_data_collection = geo_data_collection
self.flag_telemac_data = flag_telemac_data
self.flag_hazard_data = flag_hazard_data
self.flag_scenario_data_info = flag_scenario_data_info
self.flag_scenario_data_file = flag_scenario_data_file
self.flag_scenario_data_map = flag_scenario_data_map
self.flag_scenario_plot_info = flag_scenario_plot_info
self.flag_scenario_plot_tiff = flag_scenario_plot_tiff
self.flag_scenario_plot_png = flag_scenario_plot_png
self.flag_scenario_data_ts = flag_scenario_data_ts
self.alg_ancillary = alg_ancillary
self.tr_min = alg_ancillary['tr_min']
self.tr_max = alg_ancillary['tr_max']
self.tr_freq = alg_ancillary['tr_freq']
self.scenario_analysis = alg_ancillary['scenario_analysis']
self.scenario_type = alg_ancillary['scenario_type']
if 'scenario_tiling' in list(alg_ancillary.keys()):
self.scenario_tiling = alg_ancillary['scenario_tiling']
else:
self.scenario_tiling = 'rounded'
self.alg_template_tags = alg_template_tags
self.file_name_tag = 'file_name'
self.folder_name_tag = 'folder_name'
self.save_status_tag = 'save_status'
self.group_dst_plot = 'plot'
self.group_dst_data = 'data'
self.domain_name_list = self.alg_ancillary['domain_name']
self.folder_name_telemac = src_dict[self.flag_telemac_data][self.folder_name_tag]
self.file_name_telemac = src_dict[self.flag_telemac_data][self.file_name_tag]
self.folder_name_hazard = src_dict[self.flag_hazard_data][self.folder_name_tag]
self.file_name_hazard = src_dict[self.flag_hazard_data][self.file_name_tag]
self.folder_name_scenario_anc_info = anc_dict[self.flag_scenario_data_info][self.folder_name_tag]
self.file_name_scenario_anc_info = anc_dict[self.flag_scenario_data_info][self.file_name_tag]
self.folder_name_scenario_anc_file = anc_dict[self.flag_scenario_data_file][self.folder_name_tag]
self.file_name_scenario_anc_file = anc_dict[self.flag_scenario_data_file][self.file_name_tag]
self.folder_name_scenario_anc_map = anc_dict[self.flag_scenario_data_map][self.folder_name_tag]
self.file_name_scenario_anc_map = anc_dict[self.flag_scenario_data_map][self.file_name_tag]
self.file_path_scenario_anc_info = self.define_file_scenario(
self.time_now, self.folder_name_scenario_anc_info, self.file_name_scenario_anc_info,
file_type='dictionary')
self.file_path_scenario_anc_file = self.define_file_scenario(
self.time_now, self.folder_name_scenario_anc_file, self.file_name_scenario_anc_file,
file_type='dictionary')
self.format_tr = '{:03d}'
self.scenario_tr = self.define_tr_scenario(self.tr_min, self.tr_max, self.tr_freq)
group_dst_plot = dst_dict[self.group_dst_plot]
self.folder_name_scenario_plot_info = group_dst_plot[self.flag_scenario_plot_info][self.folder_name_tag]
self.file_name_scenario_plot_info = group_dst_plot[self.flag_scenario_plot_info][self.file_name_tag]
self.save_status_scenario_plot_info = group_dst_plot[self.flag_scenario_plot_info][self.save_status_tag]
self.folder_name_scenario_plot_tiff = group_dst_plot[self.flag_scenario_plot_tiff][self.folder_name_tag]
self.file_name_scenario_plot_tiff = group_dst_plot[self.flag_scenario_plot_tiff][self.file_name_tag]
self.save_status_scenario_plot_tiff = group_dst_plot[self.flag_scenario_plot_tiff][self.save_status_tag]
self.folder_name_scenario_plot_png = group_dst_plot[self.flag_scenario_plot_png][self.folder_name_tag]
self.file_name_scenario_plot_png = group_dst_plot[self.flag_scenario_plot_png][self.file_name_tag]
self.save_status_scenario_plot_png = group_dst_plot[self.flag_scenario_plot_png][self.save_status_tag]
group_dst_data = dst_dict[self.group_dst_data]
self.folder_name_scenario_data_ts = group_dst_data[self.flag_scenario_data_ts][self.folder_name_tag]
self.file_name_scenario_data_ts = group_dst_data[self.flag_scenario_data_ts][self.file_name_tag]
self.save_status_scenario_data_ts = group_dst_data[self.flag_scenario_data_ts][self.save_status_tag]
self.flag_cleaning_anc_scenario_info = flag_cleaning_anc_scenario_info
self.flag_cleaning_anc_scenario_file = flag_cleaning_anc_scenario_file
self.flag_cleaning_anc_scenario_map = flag_cleaning_anc_scenario_map
self.flag_cleaning_plot_scenario = flag_cleaning_plot_scenario
self.flag_cleaning_data_scenario = flag_cleaning_data_scenario
scenario_description_collection = {}
for domain_name_step in self.domain_name_list:
domain_description_list = get_dict_value(geo_data_collection[domain_name_step], 'description', [])
scenario_description_collection[domain_name_step] = domain_description_list
self.scenario_description_collection = scenario_description_collection
self.domain_discharge_index_tag = 'discharge_idx'
self.domain_grid_x_tag = 'grid_x_grid'
self.domain_grid_y_tag = 'grid_y_grid'
self.domain_sections_db_tag = 'domain_sections_db'
self.domain_scenario_index_tag = 'scenario_idx'
self.domain_scenario_index_right_tag = 'scenario_idx_right'
self.domain_scenario_index_left_tag = 'scenario_idx_left'
self.domain_scenario_weight_right_tag = 'scenario_weight_right'
self.domain_scenario_weight_left_tag = 'scenario_weight_left'
self.domain_scenario_discharge_tag = 'discharge_value'
self.domain_scenario_type_tag = 'type_value'
self.domain_scenario_time_tag = 'time'
self.domain_scenario_n_tag = 'scenario_n'
self.domain_scenario_attrs_tag = 'scenario_attrs'
self.domain_scenario_index_cmp_tag = 'scenario_idx_cmp'
self.domain_scenario_discharge_cmp_tag = 'discharge_value_cmp'
self.domain_scenario_h_cmp_tag = 'h_value_cmp'
self.domain_scenario_area_tag = "mappa_aree_new"
self.domain_scenario_grid_x_tag = "new_x"
self.domain_scenario_grid_y_tag = "new_y"
self.domain_scenario_hazard_name = 'mappa_h'
self.domain_scenario_hazard_format = np.float32
self.domain_scenario_hazard_scale_factor = 1
self.domain_scenario_hazard_units = 'm'
self.domain_name_tag = 'domain_name'
self.var_name_time = 'time'
self.var_name_discharge = 'discharge'
self.var_name_water_level = 'water_level'
self.var_name_type = 'type'
self.dframe_columns = [self.domain_scenario_index_cmp_tag, self.domain_scenario_discharge_cmp_tag,
self.domain_scenario_h_cmp_tag]
self.correction_discharge_factor = 1.16
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# Method to define hazard file
def define_file_hazard(self, folder_name_raw, file_name_raw, domain_name, section_tr):
template_tags = self.alg_template_tags
template_values_step = {'domain_name': domain_name, 'tr': self.format_tr.format(section_tr)}
folder_name_def = fill_tags2string(folder_name_raw, template_tags, template_values_step)
file_name_def = fill_tags2string(file_name_raw, template_tags, template_values_step)
path_name_def = os.path.join(folder_name_def, file_name_def)
return path_name_def
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# Method to define scenarios tr
def define_tr_scenario(self, tr_min, tr_max, tr_freq=1):
scenario_tr_raw = np.arange(tr_min, tr_max + 1, tr_freq).tolist()
scenario_tr_def = []
for scenario_step in scenario_tr_raw:
scenario_tmp = self.format_tr.format(scenario_step)
scenario_tr_def.append(scenario_tmp)
return scenario_tr_def
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# Method to compute tr for evaluating scenario
def compute_scenario_tr(self, section_discharge_idx, section_discharge_times, section_discharge_values,
section_discharge_name, section_scenario_tr_min=1, section_scenario_tr_max=500):
if not isinstance(section_discharge_values, list):
section_discharge_values = [section_discharge_values]
if not isinstance(section_discharge_times, list):
section_discharge_times = [section_discharge_times]
if section_discharge_idx > 0.0:
section_scenario_trs = []
section_scenario_trs_right, section_scenario_trs_left = [], []
section_scenario_weights_right, section_scenario_weights_left = [], []
section_scenario_tr_check, section_scenario_tr_right_check, section_scenario_tr_left_check = [], [], []
for section_discharge_id, (section_discharge_time, section_discharge_value) in enumerate(
zip(section_discharge_times, section_discharge_values)):
if section_discharge_value >= 0.0:
if section_discharge_value >= section_discharge_idx * self.correction_discharge_factor:
section_scenario_tr_rounded, \
section_scenario_tr_right, section_scenario_tr_left, \
section_scenario_weight_right, section_scenario_weight_left = cmp_tr_exp(
section_discharge_idx, section_discharge_value)
else:
section_scenario_tr_rounded, \
section_scenario_tr_right, section_scenario_tr_left, \
section_scenario_weight_right, section_scenario_weight_left = cmp_tr_linear(
section_discharge_idx, section_discharge_value,
section_discharge_factor=self.correction_discharge_factor)
section_scenario_tr = int(section_scenario_tr_rounded)
section_scenario_tr_right = int(section_scenario_tr_right)
section_scenario_tr_left = int(section_scenario_tr_left)
if section_scenario_tr < section_scenario_tr_min:
section_scenario_tr = section_scenario_tr_min
section_scenario_tr_check.append(section_discharge_time.strftime(time_format_algorithm))
elif section_scenario_tr > section_scenario_tr_max:
log_stream.error(' ===> At time "' + section_discharge_time.strftime(time_format_algorithm) +
'" find the "tr ' + str(section_scenario_tr) +
'" greater then "tr_max ' + str(section_scenario_tr_max) + '"')
raise NotImplementedError('Case not implemented yet')
if section_scenario_tr_right < section_scenario_tr_min:
section_scenario_tr_right_check.append(section_discharge_time.strftime(time_format_algorithm))
section_scenario_tr_right = section_scenario_tr_min
section_scenario_tr_left = section_scenario_tr_min
section_scenario_weight_right = 0.5
section_scenario_weight_left = 0.5
elif section_scenario_tr_left > section_scenario_tr_max:
log_stream.error(' ===> At time "' + section_discharge_time.strftime(time_format_algorithm) +
'" find the "tr_left ' + str(section_scenario_tr_left) +
'" greater then "tr_max ' + str(section_scenario_tr_max) + '"')
raise NotImplementedError('Case not implemented yet')
else:
section_scenario_tr = np.nan
section_scenario_tr_right, section_scenario_tr_left = np.nan, np.nan
section_scenario_weight_right, section_scenario_weight_left = np.nan, np.nan
section_scenario_trs.append(section_scenario_tr)
section_scenario_trs_right.append(section_scenario_tr_right)
section_scenario_trs_left.append(section_scenario_tr_left)
section_scenario_weights_right.append(section_scenario_weight_right)
section_scenario_weights_left.append(section_scenario_weight_left)
if section_scenario_tr_check:
section_scenario_tr_str = ', '.join(section_scenario_tr_check)
log_stream.warning(' ===> At times "' + section_scenario_tr_str +
'" found the "tr" less then "tr_min ' + str(section_scenario_tr_min) + '"')
log_stream.warning(' ===> Set the "tr" equal to "tr_min"')
if section_scenario_tr_right_check:
section_scenario_tr_str = ', '.join(section_scenario_tr_right_check)
log_stream.warning(' ===> At times "' + section_scenario_tr_str +
'" found the "tr_right" less then "tr_min ' + str(section_scenario_tr_min) + '"')
log_stream.warning(' ===> Set the "tr_right" equal to "tr_min"')
else:
section_scenario_trs = [np.nan] * section_discharge_values.__len__()
section_scenario_trs_right = [np.nan] * section_discharge_values.__len__()
section_scenario_trs_left = [np.nan] * section_discharge_values.__len__()
section_scenario_weights_right = [np.nan] * section_discharge_values.__len__()
section_scenario_weights_left = [np.nan] * section_discharge_values.__len__()
return section_scenario_trs, section_scenario_trs_right, section_scenario_trs_left, \
section_scenario_weights_right, section_scenario_weights_left
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# Method to compute discharge for evaluating scenario
@staticmethod
def compute_scenario_discharge(dframe_discharge, dframe_type, analysis_freq=None):
if analysis_freq == 'max_period':
reference_value, time_value, occurrence_value, discharge_value, type_value = [], [], [], [], []
time_max_value = dframe_discharge.idxmax()
time_max_idx = dframe_discharge.index.get_loc(time_max_value)
reference_value.append(time_max_idx)
time_value.append(time_max_value)
discharge_value.append(dframe_discharge[time_max_idx])
type_value.append(dframe_type[time_max_idx])
occurrence_value.append(1)
elif analysis_freq == 'all_period':
reference_value, time_value, occurrence_value, discharge_value, type_value = [], [], [], [], []
for id_step, (time_step, discharge_step, type_step) in enumerate(zip(
dframe_discharge.index, dframe_discharge.values, dframe_type.values)):
if not np.isnan(discharge_step):
reference_value.append(id_step)
time_value.append(time_step)
discharge_value.append(discharge_step)
type_value.append(type_step)
occurrence_value.append(1)
else:
log_stream.error(' ===> Frequency to compute discharge for evaluating scenario is not defined')
raise NotImplemented('Method not implemented yet')
return reference_value, time_value, discharge_value, type_value, occurrence_value
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# Method to define hazard file
def define_file_scenario(self, time_run, folder_name_raw, file_name_raw, domain_list=None,
file_type='string', time_step=None):
template_tags = self.alg_template_tags
if time_step is None:
time_step = time_run
if domain_list is None:
domain_list = self.domain_name_list
if not isinstance(domain_list, list):
domain_list = [domain_list]
file_path_dict = {}
for domain_name in domain_list:
template_values_step = {'domain_name': domain_name,
'destination_sub_path_time_scenario_plot': time_run,
'destination_sub_path_time_scenario_data': time_run,
'ancillary_sub_path_time_scenario': time_run,
'destination_datetime_scenario_plot': time_step,
'destination_datetime_scenario_data': time_step,
'ancillary_datetime_scenario': time_step
}
folder_name_def = fill_tags2string(folder_name_raw, template_tags, template_values_step)
file_name_def = fill_tags2string(file_name_raw, template_tags, template_values_step)
path_name_def = os.path.join(folder_name_def, file_name_def)
file_path_dict[domain_name] = path_name_def
if file_type == 'string':
_, file_path_list = reduce_dict_2_lists(file_path_dict)
if file_path_list.__len__() == 1:
file_path_obj = file_path_list[0]
else:
log_stream.error(' ===> File format is not supported')
raise NotImplementedError('File multi-band not implemented yet')
elif file_type == 'dictionary':
file_path_obj = deepcopy(file_path_dict)
else:
log_stream.error(' ===> File type mode is not supported')
raise NotImplementedError('File multi-band not implemented yet')
return file_path_obj
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# Method to dump scenario map
def dump_scenario_map(self, scenario_map_collection, scenario_info_collection, scenario_dframe_collection):
time_run = self.time_run
time_now = self.time_now
time_now_string = time_now.strftime(time_format_algorithm)
geo_data_collection = self.geo_data_collection
scenario_description_collection = self.scenario_description_collection
log_stream.info(' ---> Dump scenario time-series, maps and info [' + time_run.strftime(time_format_algorithm) +
'] ... ')
for domain_name_step in self.domain_name_list:
log_stream.info(' ----> Domain "' + domain_name_step + '" ... ')
domain_geo_collection = geo_data_collection[domain_name_step]
domain_info_collection = scenario_info_collection[domain_name_step]
domain_map_collection = scenario_map_collection[domain_name_step]
domain_dframe_collection = scenario_dframe_collection[domain_name_step]
domain_description_collection = scenario_description_collection[domain_name_step]
if not domain_map_collection:
domain_map_collection = None
if domain_map_collection is not None:
log_stream.info(' -----> Time-Series section ... ')
log_stream.info(' ------> Prepare file data ... ')
file_path_scenario_data_ts = self.define_file_scenario(
time_now, self.folder_name_scenario_data_ts, self.file_name_scenario_data_ts,
domain_name_step, file_type='string', time_step=time_now)
if self.flag_cleaning_data_scenario:
if os.path.exists(file_path_scenario_data_ts):
os.remove(file_path_scenario_data_ts)
domain_ts_data = prepare_file_ts(domain_description_collection, domain_dframe_collection,
domain_info_collection)
log_stream.info(' ------> Prepare file data ... DONE')
# Save information in ts file
folder_name_scenario_data_ts, file_name_scenario_data_ts = os.path.split(
file_path_scenario_data_ts)
make_folder(folder_name_scenario_data_ts)
log_stream.info(' ------> Save file ts ' + file_name_scenario_data_ts + ' ... ')
if self.save_status_scenario_data_ts:
if not os.path.exists(file_path_scenario_data_ts):
save_file_ts(file_path_scenario_data_ts, domain_ts_data)
log_stream.info(' -------> Save file ts ' + file_name_scenario_data_ts + ' ... DONE')
else:
log_stream.info(' ------> Save file ts ' + file_name_scenario_data_ts + ' ... PREVIOUSLY SAVED')
else:
log_stream.info(' ------> Save file ts ' + file_name_scenario_data_ts +
' ... SKIPPED. Save method is deactivated')
log_stream.info(' ------> Save file ts ' + file_name_scenario_data_ts + ' ... DONE')
log_stream.info(' -----> Time-Series section ... DONE')
log_stream.info(' -----> Maps section ... ')
for domain_map_time, domain_map_file_ancillary in sorted(domain_map_collection.items()):
log_stream.info(' ------> Time step "' + domain_map_time.strftime(time_format_algorithm) + '" ... ')
log_stream.info(' -------> Prepare file data ... ')
if domain_map_file_ancillary.endswith('tiff') or \
domain_map_file_ancillary.endswith('tif'):
domain_map_data = read_file_tiff(domain_map_file_ancillary)
# DEBUG START
# plt.figure()
# plt.imshow(domain_map_data)
# plt.colorbar()
# plt.clim(0, 8)
# plt.show()
# DEBUG END
elif domain_map_file_ancillary.endswith('workspace'):
domain_map_data = read_obj(domain_map_file_ancillary)
else:
log_stream.error(' ===> Read selected method is not supported.')
raise NotImplementedError('Case not implemented yet')
file_path_scenario_plot_info = self.define_file_scenario(
time_now, self.folder_name_scenario_plot_info, self.file_name_scenario_plot_info,
domain_name_step, file_type='string', time_step=domain_map_time)
file_path_scenario_plot_tiff = self.define_file_scenario(
time_now, self.folder_name_scenario_plot_tiff, self.file_name_scenario_plot_tiff,
domain_name_step, file_type='string', time_step=domain_map_time)
file_path_scenario_plot_png = self.define_file_scenario(
time_now, self.folder_name_scenario_plot_png, self.file_name_scenario_plot_png,
domain_name_step, file_type='string', time_step=domain_map_time)
if self.flag_cleaning_plot_scenario:
if os.path.exists(file_path_scenario_plot_info):
os.remove(file_path_scenario_plot_info)
if os.path.exists(file_path_scenario_plot_tiff):
os.remove(file_path_scenario_plot_tiff)
if os.path.exists(file_path_scenario_plot_png):
os.remove(file_path_scenario_plot_png)
domain_geo_data = domain_geo_collection[self.domain_scenario_area_tag]
domain_geo_x = domain_geo_collection[self.domain_scenario_grid_x_tag]
domain_geo_y = domain_geo_collection[self.domain_scenario_grid_y_tag]
time_step_string = domain_map_time.strftime(time_format_algorithm)
section_info_collection = {}
for domain_info_key, domain_info_fields in domain_info_collection.items():
if domain_info_fields is not None:
if domain_info_key in domain_description_collection:
if self.domain_scenario_attrs_tag in list(domain_info_fields.keys()):
section_info_attrs = domain_info_fields[self.domain_scenario_attrs_tag]
else:
log_stream.warning(' ===> Section attributes for "' + domain_info_key +
'" are undefined due to time-series discharge datasets.')
section_info_attrs = {}
domain_info_dframe = pd.DataFrame(domain_info_fields, index=domain_info_fields['time'])
if not domain_info_dframe[domain_info_dframe.index.isin([domain_map_time])].empty:
section_info_fields = domain_info_dframe[domain_info_dframe.index.isin([domain_map_time])].to_dict('r')[0]
else:
section_info_fields = {}
log_stream.warning(' ===> Section information for "' + domain_info_key +
'" are undefined due to time-series discharge datasets.')
if isinstance(section_info_fields, dict) and isinstance(section_info_attrs, dict):
section_info_fields = {**section_info_fields, **section_info_attrs}
else:
log_stream.warning(' ===> Section information and attributes for "' + domain_info_key +
'" are undefined due to time-series discharge datasets.')
section_info_fields = {}
if section_info_fields is not None:
for section_info_key, section_info_value in section_info_fields.items():
if isinstance(section_info_value, pd.Timestamp):
section_tmp_value = section_info_value.strftime(time_format_algorithm)
section_info_fields[section_info_key] = section_tmp_value
elif isinstance(section_info_value, list):
section_tmp_value = ','.join(str(elem) for elem in section_info_value)
section_info_fields[section_info_key] = section_tmp_value
elif isinstance(section_info_value, bool):
section_tmp_value = str(section_info_value)
section_info_fields[section_info_key] = section_tmp_value
section_info_collection[domain_info_key] = section_info_fields
section_info_collection['scenario_name'] = domain_name_step
section_info_collection['scenario_time_now'] = time_now_string
section_info_collection['scenario_time_step'] = time_step_string
log_stream.info(' -------> Prepare file data ... DONE')
# Save information in json file
folder_name_scenario_plot_info, file_name_scenario_plot_info = os.path.split(
file_path_scenario_plot_info)
make_folder(folder_name_scenario_plot_info)
log_stream.info(' -------> Save file json ' + file_name_scenario_plot_info + ' ... ')
if self.save_status_scenario_plot_info:
if not os.path.exists(file_path_scenario_plot_info):
save_file_info(file_path_scenario_plot_info, section_info_collection)
log_stream.info(' -------> Save file json ' + file_name_scenario_plot_info +
' ... DONE')
else:
log_stream.info(' -------> Save file json ' + file_name_scenario_plot_info +
' ... PREVIOUSLY SAVED')
else:
log_stream.info(' -------> Save file json ' + file_name_scenario_plot_info +
' ... SKIPPED. Save method is deactivated')
# Save information in png file
folder_name_scenario_plot_png, file_name_scenario_plot_png = os.path.split(
file_path_scenario_plot_png)
make_folder(folder_name_scenario_plot_png)
log_stream.info(' -------> Save file png ' + file_name_scenario_plot_png + ' ... ')
if self.save_status_scenario_plot_png:
if not os.path.exists(file_path_scenario_plot_png):
save_file_png(file_path_scenario_plot_png,
domain_map_data, domain_geo_x, domain_geo_y,
scenario_name=domain_name_step,
scenario_time_now_string=time_now_string,
scenario_time_step_string=time_step_string,
fig_color_map_type=None, fig_dpi=150)
log_stream.info(' -------> Save file png ' + file_name_scenario_plot_png +
' ... DONE')
else:
log_stream.info(' -------> Save file png ' + file_name_scenario_plot_png +
' ... PREVIOUSLY SAVED')
else:
log_stream.info(' -------> Save file png ' + file_name_scenario_plot_png +
' ... SKIPPED. Save method is deactivated')
# Save information in tiff file
folder_name_scenario_plot_tiff, file_name_scenario_plot_tiff = os.path.split(
file_path_scenario_plot_tiff)
make_folder(folder_name_scenario_plot_tiff)
log_stream.info(' -------> Save file tiff ' + file_name_scenario_plot_tiff + ' ... ')
if self.save_status_scenario_plot_tiff:
if not os.path.exists(file_path_scenario_plot_tiff):
save_file_tiff(file_path_scenario_plot_tiff,
domain_map_data, domain_geo_x, domain_geo_y,
file_epsg_code='EPSG:32632')
log_stream.info(' -------> Save file tiff ' + file_name_scenario_plot_tiff +
' ... DONE')
else:
log_stream.info(' -------> Save file tiff ' + file_name_scenario_plot_tiff +
' ... PREVIOUSLY SAVED')
else:
log_stream.info(' -------> Save file tiff ' + file_name_scenario_plot_tiff +
' ... SKIPPED. Save method is deactivated')
log_stream.info(' ------> Time step "' + domain_map_time.strftime(time_format_algorithm) +
'" ... DONE')
log_stream.info(' -----> Maps section ... DONE')
log_stream.info(' ----> Domain "' + domain_name_step + '" ... DONE')
else:
log_stream.info(' ----> Domain "' + domain_name_step + '" ... SKIPPED. Datasets are empty')
log_stream.info(' ---> Dump scenario time-series, maps and info [' + time_run.strftime(time_format_algorithm) +
'] ... DONE')
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# Method to compute scenario map
def compute_scenario_map(self, scenario_data_collection):
time = self.time_run
geo_data_collection = self.geo_data_collection
file_path_scenario_anc_collections_file = self.file_path_scenario_anc_file
log_stream.info(' ---> Compute scenario maps [' + time.strftime(time_format_algorithm) + '] ... ')
scenario_map_collection = {}
scenario_dframe_collection = {}
for domain_name_step in self.domain_name_list:
log_stream.info(' ----> Domain "' + domain_name_step + '" ... ')
domain_geo_collection = geo_data_collection[domain_name_step]
domain_scenario_data = scenario_data_collection[domain_name_step]
domain_section_db = geo_data_collection[domain_name_step][self.domain_sections_db_tag]
file_path_scenario_anc_domain_file = file_path_scenario_anc_collections_file[domain_name_step]
if domain_scenario_data is not None:
domain_scenario_merged_default = np.zeros(
[domain_geo_collection[self.domain_scenario_area_tag].shape[0],
domain_geo_collection[self.domain_scenario_area_tag].shape[1]])
domain_scenario_merged_default[:, :] = np.nan
domain_geo_data = domain_geo_collection[self.domain_scenario_area_tag]
domain_geo_x = domain_geo_collection[self.domain_scenario_grid_x_tag]
domain_geo_y = domain_geo_collection[self.domain_scenario_grid_y_tag]
if self.flag_cleaning_anc_scenario_info or self.flag_cleaning_anc_scenario_file or \
self.flag_cleaning_anc_scenario_map:
file_path_scenario_tmp = []
if os.path.exists(file_path_scenario_anc_domain_file):
file_path_scenario_obj = read_obj(file_path_scenario_anc_domain_file)
file_path_scenario_tmp = list(file_path_scenario_obj['file_path'].values())
os.remove(file_path_scenario_anc_domain_file)
for file_path_step in file_path_scenario_tmp:
if os.path.exists(file_path_step):
os.remove(file_path_step)
if not os.path.exists(file_path_scenario_anc_domain_file):
section_dframe_collections = {}
file_path_scenarios_collections = {}
for section_scenario_id, \
(section_scenario_key, section_scenario_data) in enumerate(domain_scenario_data.items()):
# DEBUG
# section_scenario_key = 'Entella_Carasco'
# section_scenario_data = domain_scenario_data[section_scenario_key]
log_stream.info(' -----> Section "' + section_scenario_key + '" ... ')
section_db_data = None
for domain_section_key, domain_section_fields in domain_section_db.items():
if domain_section_fields['description'] == section_scenario_key:
section_db_data = domain_section_fields.copy()
break
if section_db_data is not None:
section_db_n = section_db_data['n']
section_db_description = section_db_data['description']
section_db_name_outlet = section_db_data['name_point_outlet']
section_db_name_downstream = section_db_data['name_point_downstream']
section_db_name_upstream = section_db_data['name_point_upstream']
section_db_name_obs = section_db_data['name_point_obs']
section_db_idx_terrain = section_db_data['idx_data_terrain']
if 'idx_data_hydraulic' in list(section_db_data.keys()):
section_db_idx_hydraulic = section_db_data['idx_data_hydraulic']
else:
section_db_idx_hydraulic = None
assert section_db_description == section_scenario_key
if section_scenario_data is not None:
section_scenario_discharges = section_scenario_data[self.domain_scenario_discharge_tag]
section_scenario_trs_cmp = section_scenario_data[self.domain_scenario_index_tag]
section_scenario_trs_right = section_scenario_data[self.domain_scenario_index_right_tag]
section_scenario_trs_left = section_scenario_data[self.domain_scenario_index_left_tag]
section_scenario_wgs_right = section_scenario_data[self.domain_scenario_weight_right_tag]
section_scenario_wgs_left = section_scenario_data[self.domain_scenario_weight_left_tag]
if self.scenario_analysis == 'max_period':
section_scenario_times = [time]
elif self.scenario_analysis == 'all_period':
section_scenario_times = section_scenario_data[self.domain_scenario_time_tag]
else:
log_stream.error(' ===> Scenario frequency value "' + str(self.scenario_analysis) +
'" is not allowed')
log_stream.info(' -----> Section "' + section_scenario_key + '" ... FAILED')
raise NotImplementedError('Case not implemented yet')
section_scenario_time_list, section_scenario_discharge_list = [], []
section_scenario_tr_list, section_scenario_h_list = [], []
section_scenario_tr_list_right, section_scenario_tr_list_left = [], []
for id_scenario_time, section_scenario_time in enumerate(section_scenario_times):
section_scenario_discharge = section_scenario_discharges[id_scenario_time]
section_scenario_tr_cmp = section_scenario_trs_cmp[id_scenario_time]
section_scenario_tr_right = section_scenario_trs_right[id_scenario_time]
section_scenario_tr_left = section_scenario_trs_left[id_scenario_time]
section_scenario_wg_right = section_scenario_wgs_right[id_scenario_time]
section_scenario_wg_left = section_scenario_wgs_left[id_scenario_time]
log_stream.info(' ------> Time step "' + section_scenario_time.strftime(
time_format_algorithm) + '" ... ')
file_path_scenario_anc_map = self.define_file_scenario(
time, self.folder_name_scenario_anc_map,
self.file_name_scenario_anc_map,
domain_list=domain_name_step, file_type='string',
time_step=section_scenario_time)
'''
if self.scenario_analysis is None:
if section_scenario_tr_cmp not in list(file_path_scenarios_collections.keys()):
if self.flag_cleaning_ancillary:
if os.path.exists(flag_cleaning_plot_scenario_maps):
os.remove(file_path_scenario_ancillary)
elif (section_scenario_id == 0) and (self.scenario_analysis == 'ALL'):
if section_scenario_tr_cmp not in list(file_path_scenarios_collections.keys()):
if self.flag_cleaning_ancillary:
if os.path.exists(file_path_scenario_ancillary):
os.remove(file_path_scenario_ancillary)
'''
# Find tr value
if np.isnan(section_scenario_tr_cmp):
section_scenario_tr_other = get_dict_value(
domain_scenario_data, self.domain_scenario_index_tag, [])
section_scenario_tr_check = int(np.nanmax(section_scenario_tr_other))
section_scenario_tr_right_check = deepcopy(section_scenario_tr_check)
section_scenario_tr_left_check = deepcopy(section_scenario_tr_check)
section_scenario_wg_right_check, section_scenario_wg_left_check = 1.0, 1.0
log_stream.warning(' ===> Scenario tr is undefined for section "' +
section_db_description + '". Check the datasets')
else:
section_scenario_tr_check = section_scenario_tr_cmp
section_scenario_tr_right_check = section_scenario_tr_right
section_scenario_tr_left_check = section_scenario_tr_left
section_scenario_wg_right_check = section_scenario_wg_right
section_scenario_wg_left_check = section_scenario_wg_left
# Compare tr value with tr min
if section_scenario_tr_check >= self.tr_min:
section_area_idx = np.argwhere(
domain_geo_collection[self.domain_scenario_area_tag] == section_db_n)
section_scenario_tr_select = max(self.tr_min,
min(self.tr_max, section_scenario_tr_check))
section_scenario_tr_right_select = max(self.tr_min,
min(self.tr_max, section_scenario_tr_right_check))
section_scenario_tr_left_select = max(self.tr_min,
min(self.tr_max, section_scenario_tr_left_check))
if section_scenario_tr_right_select > section_scenario_tr_left_select:
log_stream.error(' ===> Left scenario must be greater then right scenario')
raise RuntimeError('Check the evaluation of scenario boundaries')
if self.scenario_tiling == 'rounded':
file_path_hazard = self.define_file_hazard(
self.folder_name_hazard, self.file_name_hazard,
domain_name_step, section_scenario_tr_select)
file_data_hazard = read_file_hazard(
file_path_hazard,
file_vars=[self.domain_scenario_hazard_name],
file_format=[self.domain_scenario_hazard_format],
file_scale_factor=[self.domain_scenario_hazard_scale_factor])
if file_data_hazard is not None:
file_data_h = file_data_hazard[self.domain_scenario_hazard_name]
else:
log_stream.error(' ===> File hazard "' + file_path_hazard +
'" is not available. Check your folder.')
raise FileNotFoundError('File not found.')
elif self.scenario_tiling == 'weighted':
file_path_hazard_right = self.define_file_hazard(
self.folder_name_hazard, self.file_name_hazard,
domain_name_step, section_scenario_tr_right_select)
file_path_hazard_left = self.define_file_hazard(
self.folder_name_hazard, self.file_name_hazard,
domain_name_step, section_scenario_tr_left_select)
file_data_hazard_right = read_file_hazard(
file_path_hazard_right,
file_vars=[self.domain_scenario_hazard_name],
file_format=[self.domain_scenario_hazard_format],
file_scale_factor=[self.domain_scenario_hazard_scale_factor])
file_data_hazard_left = read_file_hazard(
file_path_hazard_left,
file_vars=[self.domain_scenario_hazard_name],
file_format=[self.domain_scenario_hazard_format],
file_scale_factor=[self.domain_scenario_hazard_scale_factor])
if file_data_hazard_right is not None:
file_data_h_right = file_data_hazard_right[self.domain_scenario_hazard_name]
else:
log_stream.error(' ===> File hazard "' + file_data_hazard_right +
'" is not available. Check your folder.')
raise FileNotFoundError('File not found.')
if file_data_hazard_left is not None:
file_data_h_left = file_data_hazard_left[self.domain_scenario_hazard_name]
else:
log_stream.error(' ===> File hazard "' + file_data_hazard_right +
'" is not available. Check your folder.')
raise FileNotFoundError('File not found.')
# DEBUG
# test_left = file_data_h_left[section_db_idx_hydraulic[0], section_db_idx_hydraulic[1]]
# test_right = file_data_h_right[section_db_idx_hydraulic[0], section_db_idx_hydraulic[1]]
file_data_h = (file_data_h_right * section_scenario_wg_right_check +
file_data_h_left * section_scenario_wg_left_check)
file_data_h[file_data_h < 0.0] = 0.0
section_scenario_tr_select = (
section_scenario_tr_right_select * section_scenario_wg_right_check +
section_scenario_tr_left_select * section_scenario_wg_left_check)
else:
log_stream.info(' ===> Scenario tiling method "' +
self.scenario_tiling + '" is not supported')
raise NotImplementedError('Case not implemented yet')
idx_x = section_area_idx[:, 0]
idx_y = section_area_idx[:, 1]
if section_db_idx_hydraulic is not None:
section_scenario_h_select = file_data_h[section_db_idx_hydraulic[0], section_db_idx_hydraulic[1]]
else:
section_scenario_h_select = np.nan
if not os.path.exists(file_path_scenario_anc_map):
domain_scenario_merged_filled = deepcopy(domain_scenario_merged_default)
file_data_h_scenario = file_data_h[idx_x, idx_y]
domain_scenario_merged_filled[idx_x, idx_y] = file_data_h_scenario
domain_scenario_merged_filled[domain_scenario_merged_filled <= 0] = np.nan
folder_name, file_name = os.path.split(file_path_scenario_anc_map)
make_folder(folder_name)
if file_path_scenario_anc_map.endswith('tiff') or \
file_path_scenario_anc_map.endswith('tif'):
save_file_tiff(file_path_scenario_anc_map,
domain_scenario_merged_filled,
domain_geo_x, domain_geo_y,
file_epsg_code='EPSG:32632')
elif file_path_scenario_anc_map.endswith('workspace'):
write_obj(file_path_scenario_anc_map, domain_scenario_merged_filled)
else:
log_stream.error(' ===> Save selected method is not supported.')
raise NotImplementedError('Case not implemented yet')
else:
if file_path_scenario_anc_map.endswith('tiff') or \
file_path_scenario_anc_map.endswith('tif'):
domain_scenario_merged_tmp = read_file_tiff(file_path_scenario_anc_map)
elif file_path_scenario_anc_map.endswith('workspace'):
domain_scenario_merged_tmp = read_obj(file_path_scenario_anc_map)
else:
log_stream.error(' ===> Read selected method is not supported.')
raise NotImplementedError('Case not implemented yet')
file_data_h_scenario = file_data_h[idx_x, idx_y]
domain_scenario_merged_tmp[idx_x, idx_y] = file_data_h_scenario
domain_scenario_merged_tmp[domain_scenario_merged_tmp <= 0] = np.nan
if os.path.exists(file_path_scenario_anc_map):
os.remove(file_path_scenario_anc_map)
if file_path_scenario_anc_map.endswith('tiff') or \
file_path_scenario_anc_map.endswith('tif'):
save_file_tiff(file_path_scenario_anc_map,
domain_scenario_merged_tmp,
domain_geo_x, domain_geo_y,
file_epsg_code='EPSG:32632')
elif file_path_scenario_anc_map.endswith('workspace'):
write_obj(file_path_scenario_anc_map, domain_scenario_merged_tmp)
else:
log_stream.error(' ===> Save selected method is not supported.')
raise NotImplementedError('Case not implemented yet')
if section_scenario_time not in list(file_path_scenarios_collections.keys()):
file_path_scenarios_collections[section_scenario_time] = file_path_scenario_anc_map
log_stream.info(' ------> Time step "' +
section_scenario_time.strftime(time_format_algorithm) +
'" ... DONE')
else:
section_scenario_tr_select = np.nan
section_scenario_h_select = np.nan
section_scenario_tr_right_select = np.nan
section_scenario_tr_left_select = np.nan
log_stream.info(
' ------> Time step "' +
section_scenario_time.strftime(time_format_algorithm) +
'" ... SKIPPED. Scenarios threshold is less then minimum threshold')
section_scenario_time_list.append(section_scenario_time)
section_scenario_discharge_list.append(section_scenario_discharge)
section_scenario_tr_list.append(section_scenario_tr_select)
section_scenario_h_list.append(section_scenario_h_select)
section_scenario_tr_list_right.append(section_scenario_tr_right_select)
section_scenario_tr_list_left.append(section_scenario_tr_left_select)
section_scenario_tmp = list(zip(
section_scenario_time_list, section_scenario_tr_list,
section_scenario_discharge_list, section_scenario_h_list))
# Store time-series of tr, q and h (for a selected point)
dframe_data = {self.dframe_columns[0]: section_scenario_tr_list,
self.dframe_columns[1]: section_scenario_discharge_list,
self.dframe_columns[2]: section_scenario_h_list}
dframe_index = section_scenario_time_list
dframe_summary =
|
pd.DataFrame(index=dframe_index, data=dframe_data)
|
pandas.DataFrame
|
import os
import sys
from numpy.core.numeric import zeros_like
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
plt.style.use('seaborn-poster')
# I hate this too but it allows everything to use the same helper functions.
sys.path.insert(0, 'model')
from helper_functions import read_in_NNDSS
from Reff_constants import *
from params import alpha_start_date, delta_start_date, omicron_start_date, vaccination_start_date
def read_in_posterior(date):
"""
read in samples from posterior from inference
"""
df = pd.read_hdf("results/soc_mob_posterior"+date+".h5", key='samples')
return df
def read_in_google(Aus_only=True, local=False, moving=False):
"""
Read in the Google data set
"""
if local:
if type(local) == str:
df = pd.read_csv(local, parse_dates=['date'])
elif type(local) == bool:
local = 'data/Global_Mobility_Report.csv'
df = pd.read_csv(local, parse_dates=['date'])
else:
# Download straight from the web
df = pd.read_csv('https://www.gstatic.com/covid19/mobility/Global_Mobility_Report.csv', parse_dates=['date'])
# Make it save automatically.
df.to_csv('data/Global_Mobility_Report.csv', index=False)
if Aus_only:
df = df.loc[df.country_region_code == 'AU']
# Change state column to state initials
df['state'] = df.sub_region_1.map(lambda x: states_initials[x] if not pd.isna(x) else 'AUS')
df = df.loc[df.sub_region_2.isna()]
if moving:
# generate moving average columns in reverse
df = df.sort_values(by='date')
mov_values = []
for val in value_vars:
mov_values.append(val[:-29]+'_7days')
df[mov_values[-1]] = df.groupby(['state'])[val].transform(lambda x: x[::-1].rolling(7, 1).mean()[::-1]) # minimumnumber of 1
# minimum of 7 days for std, forward fill the rest
df[mov_values[-1]+'_std'] = df.groupby(['state'])[val].transform(lambda x: x[::-1].rolling(7, 7).std()[::-1])
# fill final values as std doesn't work with single value
df[mov_values[-1]+'_std'] = df.groupby('state')[mov_values[-1]+'_std'].fillna(method='ffill')
# show latest date
print("Latest date in Google indices " + str(df.date.values[-1]))
return df
def predict_plot(samples, df, third_date_range=None, split=True, gamma=False, moving=True, grocery=True,
delta=1.0, R=2.2, sigma=1, md_arg=None,
ban='2020-03-16', single=False, var=None,
rho=None, R_I=None, winter=False, prop=None, masks_prop=None,
second_phase=False, third_phase=False,
vaccination=None, third_states=None, prop_omicron_to_delta=None):
"""
Produce posterior predictive plots for all states
"""
from scipy.special import expit
from params import third_start_date
os.makedirs("results/third_wave_fit/", exist_ok=True)
value_vars = ['retail_and_recreation_percent_change_from_baseline',
'grocery_and_pharmacy_percent_change_from_baseline',
'parks_percent_change_from_baseline',
'transit_stations_percent_change_from_baseline',
'workplaces_percent_change_from_baseline',
'residential_percent_change_from_baseline']
value_vars.remove('residential_percent_change_from_baseline')
if not grocery:
value_vars.remove('grocery_and_pharmacy_percent_change_from_baseline')
if moving:
value_vars = [val[:-29]+'_7days' for val in value_vars]
if single:
# Single state
fig, ax = plt.subplots(figsize=(12, 9))
df_state = df
post_values = samples[[
'beta['+str(i)+']' for i in range(1, 1+len(value_vars))]].sample(df_state.shape[0]).values.T
if split:
# split model with parameters pre and post policy
df1 = df_state.loc[df_state.date <= ban]
df2 = df_state.loc[df_state.date > ban]
X1 = df1[value_vars]/100 # N by K
X2 = df2[value_vars]/100
# N by K times (Nsamples by K )^T = N by N
logodds = X1 @ post_values
if md is None:
post_alphas = samples[['alpha['+str(i)+']'
for i in range(1, 1+len(value_vars))]].sample(df_state.shape[0]).values.T
logodds = np.append(logodds, X2 @ (post_values + post_alphas), axis=0)
else:
# take right size of md
md = np.random.choice(md, size=df_state.shape[0])
# set initial pre ban values of md to 1
md[:logodds.shape[0]] = np.ones(size=logodds.shape[0])
# make logodds by appending post ban values
logodds = np.append(logodds, X2 @ post_values, axis=0)
else:
X1 = df_state[value_vars]/100
# N by K times (Nsamples by K )^T = N by N
logodds = X1 @ post_values
if gamma:
if type(md) == np.ndarray:
mu_hat = 2 * expit(logodds) * policy * md
else:
mu_hat = 2 * expit(logodds)
if type(delta) == np.ndarray:
delta = np.random.choice(delta, size=df_state.shape[0])
R = np.random.choice(R, size=df_state.shape[0])
R_eff_hat = np.random.gamma(shape=R * mu_hat*delta, scale=1.0/delta)
else:
# Use normal distribution
mu_hat = R * 2 * expit(logodds)
if type(sigma) == pd.Series:
sigma_i = sigma.sample(df_state.shape[0]).values
else:
sigma_i = sigma
# N by N, where rows = datum, column = sample from posterior
R_eff_hat = np.random.normal(mu_hat, sigma_i)
df_hat = pd.DataFrame(R_eff_hat.T)
# plot actual R_eff
ax.plot(df_state.date, df_state['mean'], label='R_eff from Price et al')
ax.fill_between(df_state.date, df_state['bottom'], df_state['top'], color='C0', alpha=0.3)
ax.plot(df_state.date, df_hat.quantile(0.5, axis=0), label='R_eff_hat', color='C1')
ax.fill_between(df_state.date, df_hat.quantile(0.25, axis=0), df_hat.quantile(0.75, axis=0), color='C1', alpha=0.3)
ax.fill_between(df_state.date, df_hat.quantile(0.05, axis=0), df_hat.quantile(0.95, axis=0), color='C1', alpha=0.3)
# grid line at R_eff =1
ax.set_yticks([1], minor=True,)
ax.yaxis.grid(b=True, which='minor', linestyle='dashed', color='grey')
ax.tick_params(axis='x', rotation=90)
else:
# all states
fig, ax = plt.subplots(figsize=(15, 12), ncols=4, nrows=2, sharex=True, sharey=True)
states = sorted(list(states_initials.keys()))
if not third_phase:
states.remove('Northern Territory')
states.remove('Australian Capital Territory')
# no R_eff modelled for these states, skip
# counter for brho_v
pos = 1
for i, state in enumerate(states):
df_state = df.loc[df.sub_region_1 == state]
if second_phase:
df_state = df_state.loc[df_state.is_sec_wave == 1]
elif third_phase:
df_state = df_state.loc[df_state.is_third_wave == 1]
if third_phase:
masks_prop_sim = masks_prop[states_initials[state]].values[:df_state.shape[0]]
samples_sim = samples.sample(1000)
post_values = samples_sim[['bet['+str(i)+']' for i in range(1, 1+len(value_vars))]].values.T
prop_sim = prop[states_initials[state]].values[:df_state.shape[0]]
if split:
# split model with parameters pre and post policy
df1 = df_state.loc[df_state.date <= ban]
df2 = df_state.loc[df_state.date > ban]
X1 = df1[value_vars]/100 # N by K
X2 = df2[value_vars]/100
# N by K times (Nsamples by K )^T = N by N
logodds = X1 @ post_values
if md_arg is None:
post_alphas = samples_sim[['alpha['+str(i)+']' for i in range(1, 1+len(value_vars))]].values.T
logodds = np.append(logodds, X2 @ (post_values + post_alphas), axis=0)
md = 1
elif md_arg == 'power':
theta_md = samples_sim.theta_md.values # 1 by samples shape
# each row is a date, column a new sample
theta_md = np.tile(theta_md, (df_state.shape[0], 1))
md = ((1+theta_md).T**(-1 * prop_sim)).T
# set preban md values to 1
md[:logodds.shape[0]] = 1
if third_phase:
theta_masks = samples_sim.theta_masks.values # 1 by samples shape
# each row is a date, column a new sample
theta_masks = np.tile(theta_masks, (df_state.shape[0], 1))
masks = ((1+theta_masks).T**(-1 * masks_prop_sim)).T
# set preban mask values to 1
masks[:logodds.shape[0]] = 1
# make logodds by appending post ban values
logodds = np.append(logodds, X2 @ post_values, axis=0)
elif md_arg == 'logistic':
theta_md = samples_sim.theta_md.values # 1 by samples shape
# each row is a date, column a new sample
theta_md = np.tile(theta_md, (df_state.shape[0], 1))
md = 2*expit(-1*theta_md * prop_sim)
md[:logodds.shape[0]] = 1
# make logodds by appending post ban values
logodds = np.append(logodds, X2 @ post_values, axis=0)
else:
# take right size of md to be N by N
md = np.tile(samples_sim['md'].values,(df_state.shape[0], 1))
# set initial pre ban values of md to 1
md[:logodds.shape[0], :] = 1
# make logodds by appending post ban values
logodds = np.append(logodds, X2 @ post_values, axis=0)
# grab posterior sampled vaccination effects here and multiply by the daily efficacy
if vaccination is not None and states_initials[state] in third_states:
# transposing the vaccination sampled values so that it can be multiplied by the data
# the str(i+1) is required because the state indexing starts at 0
# print(vaccination.loc[states_initials[state]])
idx = (
(vaccination.columns >= third_date_range[states_initials[state]][0]) &
(vaccination.columns <= third_date_range[states_initials[state]][-1])
)
vacc_ts_data = vaccination.loc[states_initials[state]][idx]
third_states_indices = {state: index+1 for (index, state) in enumerate(third_states)}
third_days = {k: v.shape[0] for (k, v) in third_date_range.items()}
third_days_cumulative = np.append([0], np.cumsum([v for v in third_days.values()]))
vax_idx_ranges = {k: range(third_days_cumulative[i], third_days_cumulative[i+1]) for (i, k) in enumerate(third_days.keys())}
third_days_tot = sum(v for v in third_days.values())
# get the sampled vaccination effect (this will be incomplete as it's only over the fitting period)
sampled_vax_effects_all = samples_sim[["vacc_effect[" + str(j) + "]" for j in range(1, third_days_tot+1)]].T
vacc_tmp = sampled_vax_effects_all.iloc[vax_idx_ranges[states_initials[state]],:]
# now we layer in the posterior vaccine multiplier effect which ill be a (T,mob_samples) array
if states_initials[state] in third_states:
eta = samples_sim['eta[' + str(third_states_indices[states_initials[state]]) + ']']
r = samples_sim['eta[' + str(third_states_indices[states_initials[state]]) + ']']
else:
eta = samples_sim['eta[1]']
r = samples_sim['r[1]']
# get before and after fitting and tile them
vacc_ts_data_before = pd.concat(
[vacc_ts_data.loc[vacc_ts_data.index < third_date_range[states_initials[state]][0]]] * eta.shape[0],
axis=1
)
vacc_ts_data_after = pd.concat(
[vacc_ts_data.loc[vacc_ts_data.index > third_date_range[states_initials[state]][-1]]] * eta.shape[0],
axis=1
)
# rename columns for easy merging
vacc_ts_data_before.columns = vacc_tmp.columns
vacc_ts_data_after.columns = vacc_tmp.columns
# merge in order
vacc_ts = pd.concat(
[vacc_ts_data_before, vacc_tmp, vacc_ts_data_after], axis=0, ignore_index=True
)
# reset the index to be the dates for easier information handling
vacc_ts.set_index(vacc_ts_data.index, inplace=True)
vacc_ts = vacc_tmp
third_states_indices = {state: index+1 for (index, state) in enumerate(third_states)}
# From conversations with James and Nic we think the heterogeneity / assortativity was more prominent before late
# August (hence the fixed date)
# in order for this to be correctly applied in the plot, we need to get the start dates after the beginning of
# the third wave data which we determine based off the third_date_range
heterogeneity_delay_start_day = (pd.to_datetime('2021-08-20') - third_date_range[states_initials[state]][0]).days
omicron_start_day = (pd.to_datetime(omicron_start_date) - third_date_range[states_initials[state]][0]).days
omicron_start_day = 0 if omicron_start_day < 0 else omicron_start_day
# this will hold the posterior VE, with adjustement factors
vacc_post = np.zeros_like(vacc_ts)
# loop ober days in third wave and apply the appropriate form (i.e. decay or not)
# note that in here we apply the entire sample to the vaccination data to create a days by samples array
# set the full vaccination data as the mean
# vacc_sig = 0.001
# vacc_mu = vacc_sim
# # calculate shape and scale
# a_vacc = vacc_mu*(vacc_mu*(1-vacc_mu)/vacc_sig - 1)
# b_vacc = (1-vacc_mu)*(vacc_mu*(1-vacc_mu)/vacc_sig - 1)
# # sample a noisier version of the vax effect
# vacc_sim_adj = np.random.beta(a_vacc, b_vacc)
# create zero array to fill in with the full vaccine effect model
vacc_post = np.zeros_like(vacc_ts)
days_into_omicron = np.cumsum(np.append([0], [(v >=
|
pd.to_datetime(omicron_start_date)
|
pandas.to_datetime
|
"""
Author: <NAME>
"""
import numpy as np
import pandas as pd
from datetime import datetime
class TrackerFeeder(object):
"""
Feeder for the trackers of the FinanceHub database.
"""
def __init__(self, db_connect):
"""
Feeder construction
:param db_connect: sql connection engine from sqlalchemy
"""
self.conn = db_connect.connection
def fetch(self, fh_ticker):
"""
grabs trackers from the FH database
:param fh_ticker: str or list with the tickers from the database trackers
:return: pandas DataFrame with tickers on the columns
"""
assert type(fh_ticker) is str or type(fh_ticker) is list or type(fh_ticker) is dict, \
"'tickers' must be a string, list or dict"
sql_query = 'SELECT time_stamp, fh_ticker, value FROM "trackers" WHERE '
if type(fh_ticker) is str:
sql_query = sql_query + "fh_ticker IN ('" + fh_ticker + "')"
elif type(fh_ticker) is list:
sql_query = sql_query + "fh_ticker IN ('" + "', '".join(fh_ticker) + "')"
elif type(fh_ticker) is dict:
sql_query = sql_query + "fh_ticker IN ('" + "', '".join(list(fh_ticker.keys())) + "')"
df = pd.read_sql(sql=sql_query, con=self.conn)
df = df.pivot(index='time_stamp', columns='fh_ticker', values='value')
if type(fh_ticker) is dict:
df = df.rename(fh_ticker, axis=1)
df.index = pd.to_datetime(df.index)
df = df.dropna(how='all')
df = df.sort_index()
return df
def fetch_metadata(self):
"""
Returns the full metadata table of the FH trackers, which is useful to do custom filters and look at what
is in the database.
:return: pandas Dataframe
"""
sql_query = 'SELECT * FROM "trackers_description"'
df =
|
pd.read_sql(sql=sql_query, con=self.conn)
|
pandas.read_sql
|
from nbc_analysis.utils.config_utils import get_config
from nbc_analysis.utils.debug_utils import retval
from nbc_analysis.utils.file_utils import init_dir
from pathlib import Path
import pandas as pd
import re
from toolz import first
from itertools import starmap
from functools import partial
def group_files_by_day(indir):
def parse_name(file):
return re.search(r"(?P<day>\d+)\.csv.gz", file).group('day'), file
reader = map(str, indir.glob('*.csv.gz'))
reader = map(parse_name, reader)
df = pd.DataFrame.from_records(reader, columns=['day', 'agg_file'], index=['day'])
df.sort_index(inplace=True)
return df.groupby(level='day')
def proc_day(day, df, outdir):
print(f">> starting concat for day {day}")
reader = map(pd.read_csv, df.agg_file)
df =
|
pd.concat(reader)
|
pandas.concat
|
import collections
import os
import pickle
import re
from pathlib import Path
from pprint import pprint
from subprocess import CalledProcessError, check_output
import function_parser
import numpy as np
import pandas as pd
from function_parser.language_data import LANGUAGE_METADATA
from function_parser.process import DataProcessor
from function_parser.utils import get_sha, walk
from git import Git, Repo
from tqdm import tqdm
from tree_sitter import Language
import config
np.random.seed(1997)
LANG = "java"
DataProcessor.PARSER.set_language(
Language(
os.path.join(function_parser.__path__[0], "tree-sitter-languages.so"), LANG
)
)
FUNC_PROCESSOR = DataProcessor(
language=LANG, language_parser=LANGUAGE_METADATA[LANG]["language_parser"]
)
def get_definitions(repo_path, commit):
"""
Get the definitions of all methods in the project.
Args:
repo_path (str): Path to the project's repository.
commit (str): Commit hash of the project's repository.
Returns:
df (pd.DataFrame): DataFrame containing the definitions of all methods in the project.
"""
g = Git(str(repo_path))
g.checkout(commit)
indexes = []
files = repo_path.glob(f"**/*.{LANGUAGE_METADATA[LANG]['ext']}")
sha = None
for f in files:
definitions = FUNC_PROCESSOR.get_function_definitions(str(f))
if definitions is None:
continue
nwo, path, functions = definitions
indexes.extend(
(
FUNC_PROCESSOR.extract_function_data(func, nwo, path, sha)
for func in functions
if len(func["function_tokens"]) > 1
)
)
def format_method_path_name(row):
formatted_path = (
row["file_path"].split("main/java/")[-1].split(".")[0].replace("/", ".")
)
method_name = row["code_tokens"][row["code_tokens"].index("(") - 1]
formatted_path_method_name = ":".join([formatted_path, method_name])
return formatted_path_method_name
df =
|
pd.DataFrame(indexes)
|
pandas.DataFrame
|
from my_answers import NeuralNetwork
import unittest
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
data_path = 'Bike-Sharing-Dataset/hour.csv'
rides = pd.read_csv(data_path)
# rides.head(5)
# rides[:24*10].plot(x='dteday', y='cnt')
dummy_fields = ['season', 'weathersit', 'mnth', 'hr', 'weekday']
for each in dummy_fields:
dummies = pd.get_dummies(rides[each], prefix=each, drop_first=False)
rides =
|
pd.concat([rides, dummies], axis=1)
|
pandas.concat
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Sun Sep 8 08:53:30 2019
@author: rhou
"""
import warnings
warnings.filterwarnings("ignore")
import os, sys
import argparse
import matplotlib
matplotlib.use('agg')
import pandas as pd
import numpy as np
try:
import seaborn as sns
except ImportError:
sys.exit('\n\nError: seaborn module is missing, please install it before proceeding.')
try:
import igraph as ig
except ImportError:
sys.exit('\n\nError: igraph module is missing, please install it before proceeding.')
try:
import networkx as nx
except ImportError:
sys.exit('\n\nError: NetworkX module is missing, please install it before proceeding.')
try:
import pygraphviz as pgv
except ImportError:
sys.exit('\n\nError: PyGraphviz module is missing, please install it before proceeding.')
#filter adjacency matrix
def ChooseTopEdges(adjM, keepTopEdge):
if keepTopEdge == 0:
return adjM
edgeDict = {'s':[],'t':[],'v':[]}
for idx in adjM.index:
for col in adjM.columns:
edgeDict['s'].append(idx)
edgeDict['t'].append(col)
if adjM.loc[idx,col] <=0:
edgeDict['v'].append((-1.0) * adjM.loc[idx,col])
else:
edgeDict['v'].append(adjM.loc[idx,col])
edgeD = pd.DataFrame(edgeDict).sort_values(by=['v'], ascending=False)
edgeD = edgeD.head(keepTopEdge)
nadjM =
|
pd.DataFrame(0.0, index=adjM.index,columns=adjM.index)
|
pandas.DataFrame
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.