prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
"""
Copyright 2019 Samsung SDS
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from brightics.common.repr import BrtcReprBuilder, strip_margin, plt2MD, pandasDF2MD
from brightics.common.groupby import _function_by_group
from brightics.common.utils import check_required_parameters
from brightics.common.utils import get_default_from_parameters_if_required
from brightics.common.validation import validate, greater_than, greater_than_or_equal_to
import matplotlib.pyplot as plt
import seaborn as sns
from scipy import stats
import pandas as pd
import numpy as np
from sklearn.feature_selection import VarianceThreshold
def variance_filter(table, group_by=None, **params):
check_required_parameters(_variance_filter, params, ['table'])
if group_by is not None:
return _function_by_group(_variance_filter, table, group_by=group_by, **params)
else:
return _variance_filter(table, **params)
def _variance_filter(table, feature_cols, threshold=0.0):
data = table[feature_cols]
selector = VarianceThreshold(threshold=threshold)
selector.fit(data)
remain_label_index = selector.get_support()
output = selector.transform(data)
out_table = pd.DataFrame(output, columns=
|
pd.Series(feature_cols)
|
pandas.Series
|
import datetime
import numpy as np
import pytest
import pytz
import pandas as pd
from pandas import Timedelta, merge_asof, read_csv, to_datetime
import pandas._testing as tm
from pandas.core.reshape.merge import MergeError
class TestAsOfMerge:
def read_data(self, datapath, name, dedupe=False):
path = datapath("reshape", "merge", "data", name)
x = read_csv(path)
if dedupe:
x = x.drop_duplicates(["time", "ticker"], keep="last").reset_index(
drop=True
)
x.time = to_datetime(x.time)
return x
@pytest.fixture(autouse=True)
def setup_method(self, datapath):
self.trades = self.read_data(datapath, "trades.csv")
self.quotes = self.read_data(datapath, "quotes.csv", dedupe=True)
self.asof = self.read_data(datapath, "asof.csv")
self.tolerance = self.read_data(datapath, "tolerance.csv")
self.allow_exact_matches = self.read_data(datapath, "allow_exact_matches.csv")
self.allow_exact_matches_and_tolerance = self.read_data(
datapath, "allow_exact_matches_and_tolerance.csv"
)
def test_examples1(self):
""" doc-string examples """
left = pd.DataFrame({"a": [1, 5, 10], "left_val": ["a", "b", "c"]})
right = pd.DataFrame({"a": [1, 2, 3, 6, 7], "right_val": [1, 2, 3, 6, 7]})
expected = pd.DataFrame(
{"a": [1, 5, 10], "left_val": ["a", "b", "c"], "right_val": [1, 3, 7]}
)
result = pd.merge_asof(left, right, on="a")
tm.assert_frame_equal(result, expected)
def test_examples2(self):
""" doc-string examples """
trades = pd.DataFrame(
{
"time": pd.to_datetime(
[
"20160525 13:30:00.023",
"20160525 13:30:00.038",
"20160525 13:30:00.048",
"20160525 13:30:00.048",
"20160525 13:30:00.048",
]
),
"ticker": ["MSFT", "MSFT", "GOOG", "GOOG", "AAPL"],
"price": [51.95, 51.95, 720.77, 720.92, 98.00],
"quantity": [75, 155, 100, 100, 100],
},
columns=["time", "ticker", "price", "quantity"],
)
quotes = pd.DataFrame(
{
"time": pd.to_datetime(
[
"20160525 13:30:00.023",
"20160525 13:30:00.023",
"20160525 13:30:00.030",
"20160525 13:30:00.041",
"20160525 13:30:00.048",
"20160525 13:30:00.049",
"20160525 13:30:00.072",
"20160525 13:30:00.075",
]
),
"ticker": [
"GOOG",
"MSFT",
"MSFT",
"MSFT",
"GOOG",
"AAPL",
"GOOG",
"MSFT",
],
"bid": [720.50, 51.95, 51.97, 51.99, 720.50, 97.99, 720.50, 52.01],
"ask": [720.93, 51.96, 51.98, 52.00, 720.93, 98.01, 720.88, 52.03],
},
columns=["time", "ticker", "bid", "ask"],
)
pd.merge_asof(trades, quotes, on="time", by="ticker")
pd.merge_asof(
trades, quotes, on="time", by="ticker", tolerance=pd.Timedelta("2ms")
)
expected = pd.DataFrame(
{
"time": pd.to_datetime(
[
"20160525 13:30:00.023",
"20160525 13:30:00.038",
"20160525 13:30:00.048",
"20160525 13:30:00.048",
"20160525 13:30:00.048",
]
),
"ticker": ["MSFT", "MSFT", "GOOG", "GOOG", "AAPL"],
"price": [51.95, 51.95, 720.77, 720.92, 98.00],
"quantity": [75, 155, 100, 100, 100],
"bid": [np.nan, 51.97, np.nan, np.nan, np.nan],
"ask": [np.nan, 51.98, np.nan, np.nan, np.nan],
},
columns=["time", "ticker", "price", "quantity", "bid", "ask"],
)
result = pd.merge_asof(
trades,
quotes,
on="time",
by="ticker",
tolerance=pd.Timedelta("10ms"),
allow_exact_matches=False,
)
tm.assert_frame_equal(result, expected)
def test_examples3(self):
""" doc-string examples """
# GH14887
left = pd.DataFrame({"a": [1, 5, 10], "left_val": ["a", "b", "c"]})
right = pd.DataFrame({"a": [1, 2, 3, 6, 7], "right_val": [1, 2, 3, 6, 7]})
expected = pd.DataFrame(
{"a": [1, 5, 10], "left_val": ["a", "b", "c"], "right_val": [1, 6, np.nan]}
)
result = pd.merge_asof(left, right, on="a", direction="forward")
tm.assert_frame_equal(result, expected)
def test_examples4(self):
""" doc-string examples """
# GH14887
left = pd.DataFrame({"a": [1, 5, 10], "left_val": ["a", "b", "c"]})
right = pd.DataFrame({"a": [1, 2, 3, 6, 7], "right_val": [1, 2, 3, 6, 7]})
expected = pd.DataFrame(
{"a": [1, 5, 10], "left_val": ["a", "b", "c"], "right_val": [1, 6, 7]}
)
result = pd.merge_asof(left, right, on="a", direction="nearest")
tm.assert_frame_equal(result, expected)
def test_basic(self):
expected = self.asof
trades = self.trades
quotes = self.quotes
result = merge_asof(trades, quotes, on="time", by="ticker")
tm.assert_frame_equal(result, expected)
def test_basic_categorical(self):
expected = self.asof
trades = self.trades.copy()
trades.ticker = trades.ticker.astype("category")
quotes = self.quotes.copy()
quotes.ticker = quotes.ticker.astype("category")
expected.ticker = expected.ticker.astype("category")
result = merge_asof(trades, quotes, on="time", by="ticker")
tm.assert_frame_equal(result, expected)
def test_basic_left_index(self):
# GH14253
expected = self.asof
trades = self.trades.set_index("time")
quotes = self.quotes
result = merge_asof(
trades, quotes, left_index=True, right_on="time", by="ticker"
)
# left-only index uses right"s index, oddly
expected.index = result.index
# time column appears after left"s columns
expected = expected[result.columns]
tm.assert_frame_equal(result, expected)
def test_basic_right_index(self):
expected = self.asof
trades = self.trades
quotes = self.quotes.set_index("time")
result = merge_asof(
trades, quotes, left_on="time", right_index=True, by="ticker"
)
tm.assert_frame_equal(result, expected)
def test_basic_left_index_right_index(self):
expected = self.asof.set_index("time")
trades = self.trades.set_index("time")
quotes = self.quotes.set_index("time")
result = merge_asof(
trades, quotes, left_index=True, right_index=True, by="ticker"
)
tm.assert_frame_equal(result, expected)
def test_multi_index_on(self):
def index_by_time_then_arbitrary_new_level(df):
df = df.set_index("time")
df = pd.concat([df, df], keys=["f1", "f2"], names=["f", "time"])
return df.reorder_levels([1, 0]).sort_index()
trades = index_by_time_then_arbitrary_new_level(self.trades)
quotes = index_by_time_then_arbitrary_new_level(self.quotes)
expected = index_by_time_then_arbitrary_new_level(self.asof)
result = merge_asof(trades, quotes, on="time", by=["ticker"])
tm.assert_frame_equal(result, expected)
def test_on_and_index(self):
# "on" parameter and index together is prohibited
trades = self.trades.set_index("time")
quotes = self.quotes.set_index("time")
with pytest.raises(MergeError):
merge_asof(
trades, quotes, left_on="price", left_index=True, right_index=True
)
trades = self.trades.set_index("time")
quotes = self.quotes.set_index("time")
with pytest.raises(MergeError):
merge_asof(
trades, quotes, right_on="bid", left_index=True, right_index=True
)
def test_basic_left_by_right_by(self):
# GH14253
expected = self.asof
trades = self.trades
quotes = self.quotes
result = merge_asof(
trades, quotes, on="time", left_by="ticker", right_by="ticker"
)
tm.assert_frame_equal(result, expected)
def test_missing_right_by(self):
expected = self.asof
trades = self.trades
quotes = self.quotes
q = quotes[quotes.ticker != "MSFT"]
result = merge_asof(trades, q, on="time", by="ticker")
expected.loc[expected.ticker == "MSFT", ["bid", "ask"]] = np.nan
tm.assert_frame_equal(result, expected)
def test_multiby(self):
# GH13936
trades = pd.DataFrame(
{
"time": pd.to_datetime(
[
"20160525 13:30:00.023",
"20160525 13:30:00.023",
"20160525 13:30:00.046",
"20160525 13:30:00.048",
"20160525 13:30:00.050",
]
),
"ticker": ["MSFT", "MSFT", "GOOG", "GOOG", "AAPL"],
"exch": ["ARCA", "NSDQ", "NSDQ", "BATS", "NSDQ"],
"price": [51.95, 51.95, 720.77, 720.92, 98.00],
"quantity": [75, 155, 100, 100, 100],
},
columns=["time", "ticker", "exch", "price", "quantity"],
)
quotes = pd.DataFrame(
{
"time": pd.to_datetime(
[
"20160525 13:30:00.023",
"20160525 13:30:00.023",
"20160525 13:30:00.030",
"20160525 13:30:00.041",
"20160525 13:30:00.045",
"20160525 13:30:00.049",
]
),
"ticker": ["GOOG", "MSFT", "MSFT", "MSFT", "GOOG", "AAPL"],
"exch": ["BATS", "NSDQ", "ARCA", "ARCA", "NSDQ", "ARCA"],
"bid": [720.51, 51.95, 51.97, 51.99, 720.50, 97.99],
"ask": [720.92, 51.96, 51.98, 52.00, 720.93, 98.01],
},
columns=["time", "ticker", "exch", "bid", "ask"],
)
expected = pd.DataFrame(
{
"time": pd.to_datetime(
[
"20160525 13:30:00.023",
"20160525 13:30:00.023",
"20160525 13:30:00.046",
"20160525 13:30:00.048",
"20160525 13:30:00.050",
]
),
"ticker": ["MSFT", "MSFT", "GOOG", "GOOG", "AAPL"],
"exch": ["ARCA", "NSDQ", "NSDQ", "BATS", "NSDQ"],
"price": [51.95, 51.95, 720.77, 720.92, 98.00],
"quantity": [75, 155, 100, 100, 100],
"bid": [np.nan, 51.95, 720.50, 720.51, np.nan],
"ask": [np.nan, 51.96, 720.93, 720.92, np.nan],
},
columns=["time", "ticker", "exch", "price", "quantity", "bid", "ask"],
)
result = pd.merge_asof(trades, quotes, on="time", by=["ticker", "exch"])
tm.assert_frame_equal(result, expected)
def test_multiby_heterogeneous_types(self):
# GH13936
trades = pd.DataFrame(
{
"time": pd.to_datetime(
[
"20160525 13:30:00.023",
"20160525 13:30:00.023",
"20160525 13:30:00.046",
"20160525 13:30:00.048",
"20160525 13:30:00.050",
]
),
"ticker": [0, 0, 1, 1, 2],
"exch": ["ARCA", "NSDQ", "NSDQ", "BATS", "NSDQ"],
"price": [51.95, 51.95, 720.77, 720.92, 98.00],
"quantity": [75, 155, 100, 100, 100],
},
columns=["time", "ticker", "exch", "price", "quantity"],
)
quotes = pd.DataFrame(
{
"time": pd.to_datetime(
[
"20160525 13:30:00.023",
"20160525 13:30:00.023",
"20160525 13:30:00.030",
"20160525 13:30:00.041",
"20160525 13:30:00.045",
"20160525 13:30:00.049",
]
),
"ticker": [1, 0, 0, 0, 1, 2],
"exch": ["BATS", "NSDQ", "ARCA", "ARCA", "NSDQ", "ARCA"],
"bid": [720.51, 51.95, 51.97, 51.99, 720.50, 97.99],
"ask": [720.92, 51.96, 51.98, 52.00, 720.93, 98.01],
},
columns=["time", "ticker", "exch", "bid", "ask"],
)
expected = pd.DataFrame(
{
"time": pd.to_datetime(
[
"20160525 13:30:00.023",
"20160525 13:30:00.023",
"20160525 13:30:00.046",
"20160525 13:30:00.048",
"20160525 13:30:00.050",
]
),
"ticker": [0, 0, 1, 1, 2],
"exch": ["ARCA", "NSDQ", "NSDQ", "BATS", "NSDQ"],
"price": [51.95, 51.95, 720.77, 720.92, 98.00],
"quantity": [75, 155, 100, 100, 100],
"bid": [np.nan, 51.95, 720.50, 720.51, np.nan],
"ask": [np.nan, 51.96, 720.93, 720.92, np.nan],
},
columns=["time", "ticker", "exch", "price", "quantity", "bid", "ask"],
)
result = pd.merge_asof(trades, quotes, on="time", by=["ticker", "exch"])
tm.assert_frame_equal(result, expected)
def test_multiby_indexed(self):
# GH15676
left = pd.DataFrame(
[
[pd.to_datetime("20160602"), 1, "a"],
[pd.to_datetime("20160602"), 2, "a"],
[pd.to_datetime("20160603"), 1, "b"],
[pd.to_datetime("20160603"), 2, "b"],
],
columns=["time", "k1", "k2"],
).set_index("time")
right = pd.DataFrame(
[
[pd.to_datetime("20160502"), 1, "a", 1.0],
[pd.to_datetime("20160502"), 2, "a", 2.0],
[pd.to_datetime("20160503"), 1, "b", 3.0],
[pd.to_datetime("20160503"), 2, "b", 4.0],
],
columns=["time", "k1", "k2", "value"],
).set_index("time")
expected = pd.DataFrame(
[
[pd.to_datetime("20160602"), 1, "a", 1.0],
[pd.to_datetime("20160602"), 2, "a", 2.0],
[pd.to_datetime("20160603"), 1, "b", 3.0],
[pd.to_datetime("20160603"), 2, "b", 4.0],
],
columns=["time", "k1", "k2", "value"],
).set_index("time")
result = pd.merge_asof(
left, right, left_index=True, right_index=True, by=["k1", "k2"]
)
tm.assert_frame_equal(expected, result)
with pytest.raises(MergeError):
pd.merge_asof(
left,
right,
left_index=True,
right_index=True,
left_by=["k1", "k2"],
right_by=["k1"],
)
def test_basic2(self, datapath):
expected = self.read_data(datapath, "asof2.csv")
trades = self.read_data(datapath, "trades2.csv")
quotes = self.read_data(datapath, "quotes2.csv", dedupe=True)
result = merge_asof(trades, quotes, on="time", by="ticker")
tm.assert_frame_equal(result, expected)
def test_basic_no_by(self):
f = (
lambda x: x[x.ticker == "MSFT"]
.drop("ticker", axis=1)
.reset_index(drop=True)
)
# just use a single ticker
expected = f(self.asof)
trades = f(self.trades)
quotes = f(self.quotes)
result = merge_asof(trades, quotes, on="time")
tm.assert_frame_equal(result, expected)
def test_valid_join_keys(self):
trades = self.trades
quotes = self.quotes
with pytest.raises(MergeError):
merge_asof(trades, quotes, left_on="time", right_on="bid", by="ticker")
with pytest.raises(MergeError):
merge_asof(trades, quotes, on=["time", "ticker"], by="ticker")
with pytest.raises(MergeError):
merge_asof(trades, quotes, by="ticker")
def test_with_duplicates(self, datapath):
q = (
pd.concat([self.quotes, self.quotes])
.sort_values(["time", "ticker"])
.reset_index(drop=True)
)
result = merge_asof(self.trades, q, on="time", by="ticker")
expected = self.read_data(datapath, "asof.csv")
tm.assert_frame_equal(result, expected)
def test_with_duplicates_no_on(self):
df1 = pd.DataFrame({"key": [1, 1, 3], "left_val": [1, 2, 3]})
df2 = pd.DataFrame({"key": [1, 2, 2], "right_val": [1, 2, 3]})
result = merge_asof(df1, df2, on="key")
expected = pd.DataFrame(
{"key": [1, 1, 3], "left_val": [1, 2, 3], "right_val": [1, 1, 3]}
)
tm.assert_frame_equal(result, expected)
def test_valid_allow_exact_matches(self):
trades = self.trades
quotes = self.quotes
with pytest.raises(MergeError):
merge_asof(
trades, quotes, on="time", by="ticker", allow_exact_matches="foo"
)
def test_valid_tolerance(self):
trades = self.trades
quotes = self.quotes
# dti
merge_asof(trades, quotes, on="time", by="ticker", tolerance=Timedelta("1s"))
# integer
merge_asof(
trades.reset_index(),
quotes.reset_index(),
on="index",
by="ticker",
tolerance=1,
)
# incompat
with pytest.raises(MergeError):
merge_asof(trades, quotes, on="time", by="ticker", tolerance=1)
# invalid
with pytest.raises(MergeError):
merge_asof(
trades.reset_index(),
quotes.reset_index(),
on="index",
by="ticker",
tolerance=1.0,
)
# invalid negative
with pytest.raises(MergeError):
merge_asof(
trades, quotes, on="time", by="ticker", tolerance=-Timedelta("1s")
)
with pytest.raises(MergeError):
merge_asof(
trades.reset_index(),
quotes.reset_index(),
on="index",
by="ticker",
tolerance=-1,
)
def test_non_sorted(self):
trades = self.trades.sort_values("time", ascending=False)
quotes = self.quotes.sort_values("time", ascending=False)
# we require that we are already sorted on time & quotes
assert not trades.time.is_monotonic
assert not quotes.time.is_monotonic
with pytest.raises(ValueError):
merge_asof(trades, quotes, on="time", by="ticker")
trades = self.trades.sort_values("time")
assert trades.time.is_monotonic
assert not quotes.time.is_monotonic
with pytest.raises(ValueError):
merge_asof(trades, quotes, on="time", by="ticker")
quotes = self.quotes.sort_values("time")
assert trades.time.is_monotonic
assert quotes.time.is_monotonic
# ok, though has dupes
merge_asof(trades, self.quotes, on="time", by="ticker")
@pytest.mark.parametrize(
"tolerance",
[Timedelta("1day"), datetime.timedelta(days=1)],
ids=["pd.Timedelta", "datetime.timedelta"],
)
def test_tolerance(self, tolerance):
trades = self.trades
quotes = self.quotes
result = merge_asof(trades, quotes, on="time", by="ticker", tolerance=tolerance)
expected = self.tolerance
tm.assert_frame_equal(result, expected)
def test_tolerance_forward(self):
# GH14887
left = pd.DataFrame({"a": [1, 5, 10], "left_val": ["a", "b", "c"]})
right = pd.DataFrame({"a": [1, 2, 3, 7, 11], "right_val": [1, 2, 3, 7, 11]})
expected = pd.DataFrame(
{"a": [1, 5, 10], "left_val": ["a", "b", "c"], "right_val": [1, np.nan, 11]}
)
result = pd.merge_asof(left, right, on="a", direction="forward", tolerance=1)
tm.assert_frame_equal(result, expected)
def test_tolerance_nearest(self):
# GH14887
left = pd.DataFrame({"a": [1, 5, 10], "left_val": ["a", "b", "c"]})
right = pd.DataFrame({"a": [1, 2, 3, 7, 11], "right_val": [1, 2, 3, 7, 11]})
expected = pd.DataFrame(
{"a": [1, 5, 10], "left_val": ["a", "b", "c"], "right_val": [1, np.nan, 11]}
)
result = pd.merge_asof(left, right, on="a", direction="nearest", tolerance=1)
tm.assert_frame_equal(result, expected)
def test_tolerance_tz(self):
# GH 14844
left = pd.DataFrame(
{
"date": pd.date_range(
start=pd.to_datetime("2016-01-02"),
freq="D",
periods=5,
tz=pytz.timezone("UTC"),
),
"value1": np.arange(5),
}
)
right = pd.DataFrame(
{
"date": pd.date_range(
start=pd.to_datetime("2016-01-01"),
freq="D",
periods=5,
tz=pytz.timezone("UTC"),
),
"value2": list("ABCDE"),
}
)
result = pd.merge_asof(left, right, on="date", tolerance=pd.Timedelta("1 day"))
expected = pd.DataFrame(
{
"date": pd.date_range(
start=pd.to_datetime("2016-01-02"),
freq="D",
periods=5,
tz=pytz.timezone("UTC"),
),
"value1": np.arange(5),
"value2": list("BCDEE"),
}
)
tm.assert_frame_equal(result, expected)
def test_tolerance_float(self):
# GH22981
left = pd.DataFrame({"a": [1.1, 3.5, 10.9], "left_val": ["a", "b", "c"]})
right = pd.DataFrame(
{"a": [1.0, 2.5, 3.3, 7.5, 11.5], "right_val": [1.0, 2.5, 3.3, 7.5, 11.5]}
)
expected = pd.DataFrame(
{
"a": [1.1, 3.5, 10.9],
"left_val": ["a", "b", "c"],
"right_val": [1, 3.3, np.nan],
}
)
result = pd.merge_asof(left, right, on="a", direction="nearest", tolerance=0.5)
tm.assert_frame_equal(result, expected)
def test_index_tolerance(self):
# GH 15135
expected = self.tolerance.set_index("time")
trades = self.trades.set_index("time")
quotes = self.quotes.set_index("time")
result = pd.merge_asof(
trades,
quotes,
left_index=True,
right_index=True,
by="ticker",
tolerance=pd.Timedelta("1day"),
)
tm.assert_frame_equal(result, expected)
def test_allow_exact_matches(self):
result = merge_asof(
self.trades, self.quotes, on="time", by="ticker", allow_exact_matches=False
)
expected = self.allow_exact_matches
tm.assert_frame_equal(result, expected)
def test_allow_exact_matches_forward(self):
# GH14887
left = pd.DataFrame({"a": [1, 5, 10], "left_val": ["a", "b", "c"]})
right = pd.DataFrame({"a": [1, 2, 3, 7, 11], "right_val": [1, 2, 3, 7, 11]})
expected = pd.DataFrame(
{"a": [1, 5, 10], "left_val": ["a", "b", "c"], "right_val": [2, 7, 11]}
)
result = pd.merge_asof(
left, right, on="a", direction="forward", allow_exact_matches=False
)
tm.assert_frame_equal(result, expected)
def test_allow_exact_matches_nearest(self):
# GH14887
left = pd.DataFrame({"a": [1, 5, 10], "left_val": ["a", "b", "c"]})
right = pd.DataFrame({"a": [1, 2, 3, 7, 11], "right_val": [1, 2, 3, 7, 11]})
expected = pd.DataFrame(
{"a": [1, 5, 10], "left_val": ["a", "b", "c"], "right_val": [2, 3, 11]}
)
result = pd.merge_asof(
left, right, on="a", direction="nearest", allow_exact_matches=False
)
tm.assert_frame_equal(result, expected)
def test_allow_exact_matches_and_tolerance(self):
result = merge_asof(
self.trades,
self.quotes,
on="time",
by="ticker",
tolerance=Timedelta("100ms"),
allow_exact_matches=False,
)
expected = self.allow_exact_matches_and_tolerance
tm.assert_frame_equal(result, expected)
def test_allow_exact_matches_and_tolerance2(self):
# GH 13695
df1 = pd.DataFrame(
{"time": pd.to_datetime(["2016-07-15 13:30:00.030"]), "username": ["bob"]}
)
df2 = pd.DataFrame(
{
"time": pd.to_datetime(
["2016-07-15 13:30:00.000", "2016-07-15 13:30:00.030"]
),
"version": [1, 2],
}
)
result = pd.merge_asof(df1, df2, on="time")
expected = pd.DataFrame(
{
"time": pd.to_datetime(["2016-07-15 13:30:00.030"]),
"username": ["bob"],
"version": [2],
}
)
tm.assert_frame_equal(result, expected)
result = pd.merge_asof(df1, df2, on="time", allow_exact_matches=False)
expected = pd.DataFrame(
{
"time":
|
pd.to_datetime(["2016-07-15 13:30:00.030"])
|
pandas.to_datetime
|
# plots.py
import matplotlib
matplotlib.rcParams = matplotlib.rc_params_from_file('../../matplotlibrc')
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
def randomWalk():
"""Creates plot of symmetric one-D random lattice walk"""
N = 1000 #length of random walk
s = np.zeros(N)
s[1:] = np.random.binomial(1, .5, size=(N-1,))*2-1 #coin flips
s = pd.Series(s)
s = s.cumsum() #random walk
s.plot()
plt.ylim([-50,50])
plt.savefig("randomWalk.pdf")
#randomWalk()
def biasedRandomWalk():
"""Create plots of biased random walk of different lengths."""
N = 100 #length of random walk
s1 = np.zeros(N)
s1[1:] = np.random.binomial(1, .51, size=(N-1,))*2-1 #coin flips
s1 = pd.Series(s1)
s1 = s1.cumsum() #random walk
plt.subplot(211)
s1.plot()
N = 10000 #length of random walk
s1 = np.zeros(N)
s1[1:] = np.random.binomial(1, .51, size=(N-1,))*2-1 #coin flips
s1 = pd.Series(s1)
s1 = s1.cumsum() #random walk
plt.subplot(212)
s1.plot()
plt.savefig("biasedRandomWalk.pdf")
#biasedRandomWalk()
def dfPlot():
"""Plot columns of DataFrame against each other."""
xvals = pd.Series(np.sqrt(np.arange(1000)))
yvals = pd.Series(np.random.randn(1000).cumsum())
df = pd.DataFrame({'xvals':xvals,'yvals':yvals}) #Put in in a dataframe
df.plot(x='xvals',y='yvals') #Plot, specifying which column is to be used to x and y values.
plt.savefig("dfPlot.pdf")
#dfPlot()
def histogram():
"""Creat histogram of columns in DataFrame."""
col1 = pd.Series(np.random.randn(1000)) #normal distribution
col2 = pd.Series(np.random.gamma(5, size=1000)) #gamma distribution
df =
|
pd.DataFrame({'normal':col1, 'gamma':col2})
|
pandas.DataFrame
|
"""
Python module for scripting helper functions
"""
from glob import glob
import configparser
import os
import re
import json
import pandas as pd
def set_parameter(parameters_filepath, section_name, parameter_name, parameter_value):
"""
set the specified parameter to the specified value and write back to the *.ini file
:param parameters_filepath: filename (absolute path)
:param section_name: section name under which parameter is
:param parameter_name: parameter name
:param parameter_value: target value
:return:
"""
conf_parameters = configparser.ConfigParser()
conf_parameters.read(parameters_filepath, encoding="UTF-8")
conf_parameters.set(section_name, parameter_name, parameter_value)
with open(parameters_filepath, 'w') as config_file:
conf_parameters.write(config_file)
def df_2_tex(df, filepath):
"""
writes a df to tex file
:param df: dataframe to be converted into tex table
:param filepath: tex filepath
:return:
"""
tex_prefix = r"""\documentclass{standalone}
\usepackage{booktabs}
\begin{document}"""
tex_suffix = r"""\end{document}"""
with open(filepath, "w") as f:
f.write(tex_prefix)
f.write(df.to_latex(float_format="%.1f"))
f.write(tex_suffix)
def file_rank(filename):
"""
assign a rank to the file can be used for sorting
:param filename:
:return:
"""
order = {'natural': 0, 'rfgsm_k': 2, 'dfgsm_k': 1, 'bga_k': 3, 'bca_k': 4, 'grosse': 5}
training_method = re.search("\[training:.*\|", filename).group(0)[:-1].split(':')[-1]
evasion_method = re.search("\|evasion:.*\]", filename).group(0)[:-1].split(':')[-1]
return order[training_method] * 6 + order[evasion_method]
def create_tex_tables(filespath="../result_files"):
"""
Create TeX tables from the results populated under `result_files`
which is generated from running `framework.py`
The tex file is stored in `result_files`
:param filespath: the path where the results in json are stored and the tex files are created
:return:
"""
# read the bscn files
bscn_files = sorted(glob(os.path.join(filespath, "*.txt")), key=lambda x: file_rank(x))
# read the results file
files = sorted(glob(os.path.join(filespath, "*.json")), key=lambda x: file_rank(x))
# dataframes
bscn_df = pd.DataFrame()
evasion_df = pd.DataFrame()
accuracy_df = pd.DataFrame()
afp_df = pd.DataFrame()
bon_accuracy_df = pd.DataFrame()
mal_accuracy_df =
|
pd.DataFrame()
|
pandas.DataFrame
|
import numpy as np
import numpy.testing as npt
import pandas as pd
import unittest
from scyjava import config, jimport, to_java
config.endpoints.append('org.scijava:scijava-table')
config.add_option('-Djava.awt.headless=true')
def assert_same_table(table, df):
assert len(table.toArray()) == df.shape[1]
assert len(table.toArray()[0].toArray()) == df.shape[0]
for i, column in enumerate(table.toArray()):
npt.assert_array_almost_equal(df.iloc[:, i].values, column.toArray())
assert table.getColumnHeader(i) == df.columns[i]
class TestPandas(unittest.TestCase):
def testPandasToTable(self):
# Float table.
columns = ["header1", "header2", "header3", "header4", "header5"]
array = np.random.random(size=(7, 5))
df = pd.DataFrame(array, columns=columns)
table = to_java(df)
assert_same_table(table, df)
assert type(table) == jimport('org.scijava.table.DefaultFloatTable')
# Int table.
columns = ["header1", "header2", "header3", "header4", "header5"]
array = np.random.random(size=(7, 5)) * 100
array = array.astype('int')
df = pd.DataFrame(array, columns=columns)
table = to_java(df)
assert_same_table(table, df)
assert type(table) == jimport('org.scijava.table.DefaultIntTable')
# Bool table.
columns = ["header1", "header2", "header3", "header4", "header5"]
array = np.random.random(size=(7, 5)) > 0.5
df = pd.DataFrame(array, columns=columns)
table = to_java(df)
assert_same_table(table, df)
assert type(table) == jimport('org.scijava.table.DefaultBoolTable')
# Mixed table.
columns = ["header1", "header2", "header3", "header4", "header5"]
array = np.random.random(size=(7, 5))
df =
|
pd.DataFrame(array, columns=columns)
|
pandas.DataFrame
|
"""
Tests the usecols functionality during parsing
for all of the parsers defined in parsers.py
"""
from io import StringIO
import numpy as np
import pytest
from pandas._libs.tslib import Timestamp
from pandas import DataFrame, Index
import pandas._testing as tm
_msg_validate_usecols_arg = (
"'usecols' must either be list-like "
"of all strings, all unicode, all "
"integers or a callable."
)
_msg_validate_usecols_names = (
"Usecols do not match columns, columns expected but not found: {0}"
)
def test_raise_on_mixed_dtype_usecols(all_parsers):
# See gh-12678
data = """a,b,c
1000,2000,3000
4000,5000,6000
"""
usecols = [0, "b", 2]
parser = all_parsers
with pytest.raises(ValueError, match=_msg_validate_usecols_arg):
parser.read_csv(StringIO(data), usecols=usecols)
@pytest.mark.parametrize("usecols", [(1, 2), ("b", "c")])
def test_usecols(all_parsers, usecols):
data = """\
a,b,c
1,2,3
4,5,6
7,8,9
10,11,12"""
parser = all_parsers
result = parser.read_csv(StringIO(data), usecols=usecols)
expected = DataFrame([[2, 3], [5, 6], [8, 9], [11, 12]], columns=["b", "c"])
tm.assert_frame_equal(result, expected)
def test_usecols_with_names(all_parsers):
data = """\
a,b,c
1,2,3
4,5,6
7,8,9
10,11,12"""
parser = all_parsers
names = ["foo", "bar"]
result = parser.read_csv(StringIO(data), names=names, usecols=[1, 2], header=0)
expected = DataFrame([[2, 3], [5, 6], [8, 9], [11, 12]], columns=names)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"names,usecols", [(["b", "c"], [1, 2]), (["a", "b", "c"], ["b", "c"])]
)
def test_usecols_relative_to_names(all_parsers, names, usecols):
data = """\
1,2,3
4,5,6
7,8,9
10,11,12"""
parser = all_parsers
result = parser.read_csv(StringIO(data), names=names, header=None, usecols=usecols)
expected = DataFrame([[2, 3], [5, 6], [8, 9], [11, 12]], columns=["b", "c"])
tm.assert_frame_equal(result, expected)
def test_usecols_relative_to_names2(all_parsers):
# see gh-5766
data = """\
1,2,3
4,5,6
7,8,9
10,11,12"""
parser = all_parsers
result = parser.read_csv(
StringIO(data), names=["a", "b"], header=None, usecols=[0, 1]
)
expected =
|
DataFrame([[1, 2], [4, 5], [7, 8], [10, 11]], columns=["a", "b"])
|
pandas.DataFrame
|
from pathlib import Path
import numpy as np
import pandas as pd
import time
import pickle
import json
import h5py
import sys
import traceback
import warnings
import Analyses.spike_functions as spike_funcs
import Analyses.spatial_functions as spatial_funcs
import Analyses.open_field_functions as of_funcs
import Pre_Processing.pre_process_functions as pp_funcs
from Utils.robust_stats import robust_zscore
import Analyses.tree_maze_functions as tmf
import Analyses.plot_functions as pf
import scipy.signal as signal
"""
Classes in this file will have several retrieval processes to acquire the required information for each
subject and session.
:class SubjectInfo
-> class that takes a subject as an input. contains general information about what processes have been performed,
clusters, and importantly all the session paths. The contents of this class are saved as a pickle in the results
folder.
:class SubjectSessionInfo
-> children class of SubjectInfo, takes session as an input. This class contains session specific retrieval methods
Low level things, like reading position (eg. 'get_track_dat') are self contained in the class. Higher level
functions like 'get_spikes', are outsourced to the appropriate submodules in the Analyses folder.
If it is the first time calling a retrieval method, the call will save the contents according the paths variable
Otherwise the contents will be loaded from existing data, as opposed to recalculation. Exception is the get_time
method, as this is easily regenerated on each call.
"""
class SummaryInfo:
subjects = ['Li', 'Ne', 'Cl', 'Al', 'Ca', 'Mi']
min_n_units = 1
min_n_trials = 50 # task criteria
min_pct_coverage = 0.75 # open field criteria
invalid_sessions = ['Li_OF_080718']
figure_names = [f"f{ii}" for ii in range(5)]
_root_paths = dict(GD=Path("/home/alexgonzalez/google-drive/TreeMazeProject/"),
BigPC=Path("/mnt/Data_HD2T/TreeMazeProject/"))
def __init__(self, data_root='BigPC'):
self.main_path = self._root_paths[data_root]
self.paths = self._get_paths()
self.unit_table = self.get_unit_table()
self.analyses_table = self.get_analyses_table()
self.valid_track_table = self.get_track_validity_table()
self.sessions_by_subject = {}
self.tasks_by_subject = {}
for s in self.subjects:
self.sessions_by_subject[s] = self.unit_table[self.unit_table.subject == s].session.unique()
self.tasks_by_subject[s] = self.unit_table[self.unit_table.subject == s].task.unique()
def run_analyses(self, task='all', which='all', verbose=False, overwrite=False):
interrupt_flag = False
for subject in self.subjects:
if not interrupt_flag:
subject_info = SubjectInfo(subject)
for session in subject_info.sessions:
try:
if task == 'all':
pass
elif task not in session:
continue
else:
pass
if verbose:
t0 = time.time()
print(f'Processing Session {session}')
session_info = SubjectSessionInfo(subject, session)
session_info.run_analyses(overwrite=overwrite, which=which, verbose=verbose)
if verbose:
t1 = time.time()
print(f"Session Processing Completed: {t1 - t0:0.2f}s")
print()
else:
print(".", end='')
except KeyboardInterrupt:
interrupt_flag = True
break
except ValueError:
pass
except FileNotFoundError:
pass
except:
if verbose:
traceback.print_exc(file=sys.stdout)
pass
if verbose:
print(f"Subject {subject} Analyses Completed.")
def get_analyses_table(self, overwrite=False):
if not self.paths['analyses_table'].exists() or overwrite:
analyses_table = pd.DataFrame()
for subject in self.subjects:
analyses_table = analyses_table.append(SubjectInfo(subject).get_sessions_analyses())
analyses_table.to_csv(self.paths['analyses_table'])
else:
analyses_table = pd.read_csv(self.paths['analyses_table'], index_col=0)
self.analyses_table = analyses_table
return analyses_table
def get_track_validity_table(self, overwrite=False):
if not self.paths['valid_track_table'].exists() or overwrite:
valid_track_table = pd.DataFrame()
for subject in self.subjects:
valid_track_table = valid_track_table.append(SubjectInfo(subject).valid_track_table)
valid_track_table.to_csv(self.paths['valid_track_table'])
else:
valid_track_table = pd.read_csv(self.paths['valid_track_table'], index_col=0)
return valid_track_table
def get_behav_perf(self, overwrite=False):
if not self.paths['behavior'].exists() or overwrite:
perf = pd.DataFrame()
for subject in self.subjects:
subject_info = SubjectInfo(subject)
for session in subject_info.sessions:
if 'T3' in session:
try:
session_info = SubjectSessionInfo(subject, session)
b = session_info.get_event_behavior()
sp = b.get_session_perf()
sp['session'] = session
sp['task'] = session_info.task
sp['subject'] = subject
sp['n_units'] = session_info.n_units
sp['n_cells'] = session_info.n_cells
sp['n_mua'] = session_info.n_mua
perf = pd.concat((perf, sp), ignore_index=True)
except:
pass
perf.to_csv(self.paths['behavior'])
else:
perf = pd.read_csv(self.paths['behavior'], index_col=0)
return perf
def _get_paths(self, root_path=None):
if root_path is None:
results_path = self.main_path / 'Results_Summary'
figures_path = self.main_path / 'Figures'
else:
results_path = root_path / 'Results_Summary'
figures_path = root_path / 'Figures'
paths = dict(
analyses_table=results_path / 'analyses_table.csv',
valid_track_table=results_path / 'valid_track_table.csv',
behavior=results_path / 'behavior_session_perf.csv',
units=results_path / 'all_units_table.csv',
of_metric_scores=results_path / 'of_metric_scores_summary_table.csv',
of_model_scores=results_path / 'of_model_scores_summary_table_agg.csv',
zone_rates_comps=results_path / 'zone_rates_comps_summary_table.csv',
zone_rates_remap=results_path / 'zone_rates_remap_summary_table.csv',
bal_conds_seg_rates=results_path / 'bal_conds_seg_rates_summary_table.csv',
)
paths['results'] = results_path
paths['figures'] = figures_path
return paths
def update_paths(self):
for subject in self.subjects:
_ = SubjectInfo(subject, overwrite=True)
def get_zone_rates_comps(self, overwrite=False):
"""
Aggregates tables across sessions and adds unit information.
Note, that overwrite only overwrites the aggregate table and does not perform the analysis on each session.
:param overwrite:
:return:
pandas data frame with n_units as index
"""
if not self.paths['zone_rates_comps'].exists() or overwrite:
sessions_validity = self.get_track_validity_table()
zone_rates = pd.DataFrame()
unit_count = 0
valid_sessions = list(self.analyses_table.loc[self.analyses_table.zone_rates_comps == True].index)
for subject in self.subjects:
subject_info = SubjectInfo(subject)
for session in subject_info.sessions:
if session in valid_sessions:
session_info = SubjectSessionInfo(subject, session)
n_session_units = session_info.n_units
if n_session_units > 0:
try:
session_zone_rate_comp_table = session_info.get_zone_rates_remapping()
comp_table_columns = session_zone_rate_comp_table.columns
session_table = pd.DataFrame(index=np.arange(n_session_units),
columns=['unit_id', 'subject', 'session',
'session_pct_cov', 'session_valid',
'session_unit_id', 'unit_type', 'tt', 'tt_cl',
'cl_name'])
session_table['session'] = session
session_table['subject'] = session_info.subject
session_table['session_unit_id'] = np.arange(n_session_units)
session_table['unit_id'] = np.arange(n_session_units) + unit_count
session_table['unit_type'] = [v[0] for k, v in session_info.cluster_ids.items()]
session_table['tt'] = [v[1] for k, v in session_info.cluster_ids.items()]
session_table['tt_cl'] = [v[2] for k, v in session_info.cluster_ids.items()]
if session in sessions_validity.columns:
session_table['session_pct_cov'] = sessions_validity[session]
session_table['session_valid'] = 1
else:
session_table['session_pct_cov'] = 0
session_table['session_valid'] = 0
cl_names = []
for k, v in session_info.cluster_ids.items():
tt = v[1]
cl = v[2]
depth = subject_info.sessions_tt_positions.loc[session, f"tt_{tt}"]
cl_name = f"{session}-tt{tt}_d{depth}_cl{cl}"
cl_names.append(cl_name)
session_table['cl_name'] = cl_names
unit_count += n_session_units
session_table = session_table.join(session_zone_rate_comp_table)
except:
print(f'Error Processing Session {session}')
traceback.print_exc(file=sys.stdout)
continue
zone_rates = zone_rates.append(session_table)
zone_rates = zone_rates.reset_index(drop=True)
zone_rates.to_csv(self.paths['zone_rates_comps'])
else:
zone_rates = pd.read_csv(self.paths['zone_rates_comps'], index_col=0)
return zone_rates
def get_bal_conds_seg_rates(self, segment_type='bigseg', overwrite=False):
fn = self.paths['bal_conds_seg_rates']
if segment_type != 'bigseg':
name = fn.name.split('.')
name2 = name[0] + segment_type + name[1]
fn = fn.parent / name2
if not fn.exists() or overwrite:
sessions_validity = self.get_track_validity_table()
seg_rates = pd.DataFrame()
unit_count = 0
valid_sessions = list(self.analyses_table.loc[self.analyses_table.bal_conds_seg_rates == True].index)
for subject in self.subjects:
subject_info = SubjectInfo(subject)
for session in subject_info.sessions:
if session in valid_sessions:
session_info = SubjectSessionInfo(subject, session)
n_session_units = session_info.n_units
if n_session_units > 0:
try:
session_zone_rate_comp_table = session_info.get_bal_conds_seg_rates(segment_type=segment_type)
comp_table_columns = session_zone_rate_comp_table.columns
session_table = pd.DataFrame(index=np.arange(n_session_units),
columns=['unit_id', 'subject', 'session',
'session_pct_cov', 'session_valid',
'session_unit_id', 'unit_type', 'tt', 'tt_cl',
'cl_name'])
session_table['session'] = session
session_table['subject'] = session_info.subject
session_table['session_unit_id'] = np.arange(n_session_units)
session_table['unit_id'] = np.arange(n_session_units) + unit_count
session_table['unit_type'] = [v[0] for k, v in session_info.cluster_ids.items()]
session_table['tt'] = [v[1] for k, v in session_info.cluster_ids.items()]
session_table['tt_cl'] = [v[2] for k, v in session_info.cluster_ids.items()]
if session in sessions_validity.columns:
session_table['session_pct_cov'] = sessions_validity[session]
session_table['session_valid'] = 1
else:
session_table['session_pct_cov'] = 0
session_table['session_valid'] = 0
cl_names = []
for k, v in session_info.cluster_ids.items():
tt = v[1]
cl = v[2]
depth = subject_info.sessions_tt_positions.loc[session, f"tt_{tt}"]
cl_name = f"{session}-tt{tt}_d{depth}_cl{cl}"
cl_names.append(cl_name)
session_table['cl_name'] = cl_names
unit_count += n_session_units
session_table = session_table.join(session_zone_rate_comp_table)
except:
print(f'Error Processing Session {session}')
traceback.print_exc(file=sys.stdout)
continue
seg_rates = seg_rates.append(session_table)
seg_rates = seg_rates.reset_index(drop=True)
seg_rates.to_csv(fn)
else:
seg_rates = pd.read_csv(fn, index_col=0)
return seg_rates
def get_zone_rates_remap(self, overwrite=False):
if not self.paths['zone_rates_remap'].exists() or overwrite:
sessions_validity = self.get_track_validity_table()
zone_rates =
|
pd.DataFrame()
|
pandas.DataFrame
|
import os.path
import datetime as dt
from typing import List
import logging
import pandas as pd
import numpy as np
logging.getLogger().setLevel(logging.INFO)
class Journey:
def __init__(self, start_time, end_time, origin, destination, charge, note):
self.start_time = start_time
self.end_time = end_time
self.origin = origin
self.destination = destination
self.charge = charge
self.note = note
if self.end_time:
self.journey_time = self.end_time - self.start_time
else:
self.journey_time = None
def __repr__(self):
return 'Journey(start_time={!r}, end_time={!r}, origin={!r}, destination={!r}, journey_time={!r}, ' \
'charge={!r}, note={!r})'.format(self.start_time, self.end_time, self.origin, self.destination,
self.journey_time, self.charge, self.note)
def __lt__(self, other):
if not (self.journey_time or other.journey_time):
return False
return self.journey_time < other.journey_time
def __gt__(self, other):
if not (self.journey_time or other.journey_time):
return False
return self.journey_time > other.journey_time
class JourneyHistory:
def __init__(self, history_files: List[str] = None, history_dir: str = None):
self.raw_dfs = {}
if history_dir is not None:
if history_files is not None:
raise ValueError('Only provide either the list of journey history files or the directory containing the'
' history files, but not both.')
assert os.path.exists(history_dir), 'Journey history directory does not exist: {}'.format(history_dir)
self.df = self.load_history_from_dir(history_dir)
else:
assert isinstance(history_files, list), '`history_files` must be a list of filepaths'
self.df = self.load_history_from_file_list(history_files)
def __len__(self):
""" Number of total rows of the dataframe """
if self.df is None:
return 0
return len(self.df)
def __repr__(self):
return 'JourneyHistory(journeys={})'.format(len(self))
def __getitem__(self, item):
if item >= len(self):
raise IndexError('Index out of range of number of DataFrame rows')
return self.df.iloc[item]
def load_history_from_dir(self, history_dir: str) -> pd.DataFrame:
# List of filepaths for all CSVs in `history_dir`
csv_filepaths = [os.path.join(history_dir, f) for f in os.listdir(history_dir) if f.endswith('.csv')]
return self.load_history_from_file_list(csv_filepaths)
def load_history_from_file_list(self, history_files: List[str]) -> pd.DataFrame:
""" For a given list of filename, load the CSVs into one dataframe.
Columns: ['Start Time', 'End Time', 'Duration', 'From', 'To', 'Bus Route', 'Charge', 'Note']
"""
individual_history_dfs = []
# Use to validate CSV file as a journey history file
expected_columns = ['Date', 'Start Time', 'End Time', 'Journey/Action', 'Charge', 'Credit', 'Balance', 'Note']
for csv_file in history_files:
df = pd.read_csv(csv_file)
if df.columns.tolist() == expected_columns: # having the correct headers is the condition for a valid file
self.raw_dfs[csv_file] = df
individual_history_dfs.append(df)
if len(individual_history_dfs) == 0:
logging.info('No valid CSV files')
return pd.DataFrame()
# Join all the individual dfs into one big df
combined_df = pd.concat(individual_history_dfs)
return self._clean_raw_df(combined_df)
def _clean_raw_df(self, combined_df: pd.DataFrame) -> pd.DataFrame:
df = combined_df
# Initialise empty `Bus Journeys` columns that will be filled
df['Bus Route'] = np.nan
df = df.reset_index().drop('index', axis=1)
# Processing of dates and times (mainly combining)
df['Start Time'] = pd.to_datetime(df['Date'] + ' ' + df['Start Time'])
df['End Time'] = pd.to_datetime(df['Date'] + ' ' + df['End Time'])
# Add 1 day to journeys whose end times go into the next day
df.loc[df['End Time'] < df['Start Time'], 'End Time'] += dt.timedelta(days=1)
# Calculate durations
df['Duration'] = df['End Time'] - df['Start Time']
# Get the origin and destination columns
df['From'] = df['Journey/Action'].str.split(' to ').str[0]
df['To'] = df['Journey/Action'].str.split(' to ').str[1]
# Filter out unwanted rows
# todo - find better way of chaining these ORs
df = df[~(df['To'].astype(str).str.contains("No touch-out") |
df['Journey/Action'].str.contains('Oyster helpline refund') |
df['Journey/Action'].str.contains('Auto top-up') |
df['Journey/Action'].str.contains('Topped-up on touch in'))]
# Bus journeys
bus_journeys = df.loc[df['Journey/Action'].str.contains('Bus journey')]
bus_journeys['Bus Route'] = bus_journeys['Journey/Action'].str.extract(r'(\w\d+)')
bus_journeys['Journey/Action'] = np.nan
bus_journeys['From'] = np.nan
# Merging the processed dataframe subset for bus journeys back into the main dataframe
df.loc[bus_journeys.index] = bus_journeys
final_columns = ['Start Time', 'End Time', 'Duration', 'From', 'To', 'Bus Route', 'Charge', 'Note']
df = df[final_columns].sort_values('Start Time').reset_index().drop('index', axis=1)
self.df = df
return self.df
@staticmethod
def _df_row_to_journey(row: pd.Series) -> Journey:
start_time = row['Start Time'].to_pydatetime() if not pd.isnull(row['Start Time']) else None
end_time = row['End Time'].to_pydatetime() if not pd.isnull(row['End Time']) else None
origin = row['From'] if not pd.isnull(row['From']) else None
destination = row['To'] if not pd.isnull(row['To']) else None
charge = row['Charge'] if not pd.isnull(row['Charge']) else None
note = row['Note'] if not
|
pd.isnull(row['Note'])
|
pandas.isnull
|
import os
import argparse
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import matplotlib.dates as mdates
from PIL import Image
from datetime import datetime, timedelta
import argparse
import bisect
from pandas.plotting import register_matplotlib_converters
time_fmt = mdates.DateFormatter('%m/%d/%y')
tick_spacing = 24
ticker_locator = mdates.HourLocator(interval=tick_spacing)
font_size = 18
def plot_cmp(est_csv_path, gt_csv_path):
est_csv = pd.read_csv(est_csv_path)
gt_csv =
|
pd.read_csv(gt_csv_path)
|
pandas.read_csv
|
#!/usr/bin/env python3
import json
import sys
import pandas as pd
import mysql.connector
import datetime
import matplotlib.pyplot as plt
import re
def mysql_quote(x):
"""Quote the string x using MySQL quoting rules. If x is the empty string,
return "NULL". Probably not safe against maliciously formed strings, but
our input is fixed and from a basically trustable source."""
if not x:
return "NULL"
x = x.replace("\\", "\\\\")
x = x.replace("'", "''")
x = x.replace("\n", "\\n")
return "'{}'".format(x)
if len(sys.argv) < 2:
print("Please specify JSON file as first argument", file=sys.stderr)
quit()
m = re.search(r"(\d\d\d\d-\d\d-\d\d)\.json", sys.argv[1])
if not m:
print("File name must end with YYYY-MM-DD.json", file=sys.stderr)
quit()
date_observed = m.group(1)
with open(sys.argv[1], "r") as f:
j = json.load(f)
df =
|
pd.DataFrame(j["SpotPriceHistory"])
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
#依据酒店id获取评论数据
import pandas as pd
import urllib.request as req
import json
import sys
import time
import random
import re
from openpyxl import load_workbook
ids = []
wb2 = load_workbook('/code/data/hotelList.xlsx')
ws = wb2['Sheet']
row_max = ws.max_row
con_max = ws.max_column
for j in ws.rows:
n=j[0]
print(n.value)
ids.append(n.value)
print(len(ids))
print(sys.getdefaultencoding())
class MTCommentsCrawler:
def __init__(self, productId=None, limit=10, start=0):
self.productId = productId # 酒店ID
self.limit = limit # 一次获取多少条评论
self.start = start
self.locationLink = 'https://ihotel.meituan.com/api/v2/comments/biz/reviewList'
self.paramValue = {
'referid': self.productId,
'limit': self.limit,
'start': self.start,
}
self.locationUrl = None
# 构造url调用参数
def paramDict2Str(self, params):
str1 = ''
for p, v in params.items():
str1 = str1 + p + '=' + str(v) + '&'
return str1
# 构造调用url
def concatLinkParam(self):
self.locationUrl = self.locationLink + '?' + self.paramDict2Str(
self.paramValue) + 'filterid=800&querytype=1&utm_medium=touch&version_name=999.9'
# print(self.locationUrl)
# 伪装浏览器进行数据请求
def requestMethodPage(self):
# 伪装浏览器 ,打开网站
headers = {
'Connection': 'Keep-Alive',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3',
'Accept-Language': 'zh-CN,zh;q=0.8,zh-TW;q=0.7,zh-HK;q=0.5,en-US;q=0.3,en;q=0.2',
'User-Agent': 'Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.86 Mobile Safari/537.36',
'Referer': 'https://i.meituan.com/awp/h5/hotel-v2/feedback/index.html?poiId=%d' % (self.productId),
'Host': 'ihotel.meituan.com'
}
url = self.locationUrl
print('url : ', url)
reqs = req.Request(url, headers=headers)
return reqs
# 读取服务端获取的数据,返回json格式
def showListPage(self):
request_m = self.requestMethodPage()
conn = req.urlopen(request_m)
return_str = conn.read().decode('utf-8')
return json.loads(return_str)
# 将评论数据保存到本地
def save_csv(self, df):
# 保存文件
df.to_csv(path_or_buf='mt_%d.csv' % self.productId, sep=',', header=True, index=True,
encoding='utf_8_sig')
# 移除换行符,#,表情
def remove_emoji(self, text):
text = text.replace('\n', '')
text = text.replace('#', '')
try:
highpoints = re.compile(u'[\U00010000-\U0010ffff]')
except re.error:
highpoints = re.compile(u'[\uD800-\uDBFF][\uDC00-\uDFFF]')
return highpoints.sub(u'', text)
# 抓取数据
def crawler(self):
# 把抓取的数据存入CSV文件,设置时间间隔,以免被屏蔽
json_info = self.showListPage()
tmp_list = []
tmp_text_list = []
# print(json_info)
Data = json_info['Data']
comments = Data['List']
for com in comments:
text = self.remove_emoji(com['Content'])
star = com['Star']
tmp_list.append([star, text])
tmp_text_list.append([text])
df =
|
pd.DataFrame(tmp_list, columns=['star', 'content'])
|
pandas.DataFrame
|
import re
from pathlib import Path
import pandas as pd
from tqdm import tqdm
def walk(path):
for path in Path(path).iterdir():
if path.is_dir():
yield from walk(path)
continue
yield path.resolve()
def parse_cluster_info(entry):
# read data
txt = entry.read_text()
# gather stats
date = re.search(f'Started at (?P<date>.+)', txt).group('date')
successful = 'Successfully completed.' in txt
try:
status = re.search(
re.compile(
r'-+.*-+\n\n(?P<status>.+)\n\nResource usage summary:',
re.MULTILINE | re.DOTALL,
),
txt,
).group('status')
except AttributeError:
status = pd.NA
try:
duration = float(
re.search(r'Run time : +(?P<duration>[\d.]+) sec.', txt).group('duration')
)
avg_memory = float(
re.search(r'Average Memory : +(?P<avg_memory>[\d.]+) MB', txt).group(
'avg_memory'
)
)
max_memory = float(
re.search(r'Max Memory : +(?P<max_memory>[\d.]+) MB', txt).group(
'max_memory'
)
)
except AttributeError:
duration = pd.NA
avg_memory = pd.NA
max_memory = pd.NA
return {
'date': date,
'successful': successful,
'duration': duration,
'avg_memory': avg_memory,
'max_memory': max_memory,
'status': status,
}
def parse_execution_info(entry):
# read data
txt = entry.read_text()
# gather stats
rules = ','.join(sorted(set(re.findall('rule (.*):', txt))))
wildcards = ','.join(sorted(set(re.findall('wildcards: (.*)', txt))))
return {
'rules': rules,
'wildcards': wildcards,
}
def main(root, fname_out):
tmp = []
for child in tqdm(walk(root)):
if child.suffix != '.out':
continue
# extract info
try:
cluster_info = parse_cluster_info(child)
execution_info = parse_execution_info(child.with_suffix('.err'))
except Exception as e:
cluster_info = {}
execution_info = {}
# print(child, e)
# save results
tmp.append(
{
'filename': child.name,
**cluster_info,
**execution_info,
}
)
df =
|
pd.DataFrame(tmp)
|
pandas.DataFrame
|
# coding: UTF-8
import numpy as np
from numpy import nan as npNaN
import pandas as pd
from pandas import Series
import talib
from src import verify_series
def first(l=[]):
return l[0]
def last(l=[]):
return l[-1]
def highest(source, period):
return pd.Series(source).rolling(period).max().values
def lowest(source, period):
return pd.Series(source).rolling(period).min().values
def med_price(high, low):
"""
also found in tradingview as hl2 source
"""
return talib.MEDPRICE(high, low)
def avg_price(open, high, low, close):
"""
also found in tradingview as ohlc4 source
"""
return talib.AVGPRICE(open, high, low, close)
def typ_price(high,low,close):
"""
typical price, also found in tradingview as hlc3 source
"""
return talib.TYPPRICE(high, low, close)
def MAX(close, period):
return talib.MAX(close, period)
def highestbars(source, length):
"""
Highest value offset for a given number of bars back.
Returns offset to the highest bar.
"""
source = source[-length:]
offset = abs(length - 1 - np.argmax(source))
return offset
def lowestbars(source, length):
"""
Lowest value offset for a given number of bars back.
Returns offset to the lowest bar.
"""
source = source[-length:]
offset = abs(length - 1 - np.argmin(source))
return offset
def tr(high, low, close):
"""
true range
"""
return talib.TRANGE(high, low, close)
def atr(high, low, close, period):
"""
average true range
"""
return talib.ATR(high, low, close, period)
def stdev(source, period):
return
|
pd.Series(source)
|
pandas.Series
|
import pandas as pd
from datetime import datetime, timedelta
import xgboost as xgb
import numpy as np
from sklearn.model_selection import KFold # import KFold
from plotly.offline import plot
from plotly.graph_objs import Scatter
import pickle
from xgboost import plot_importance
from matplotlib import pyplot
from pygam import LinearGAM
from sklearn.ensemble import RandomForestRegressor
def rmse(predictions, targets):
return np.sqrt(((predictions - targets) ** 2).mean())
P04_RF=
|
pd.read_csv("c:/temp/P09_modele.csv", sep=",")
|
pandas.read_csv
|
"""
Main dataset entity
"""
from datetime import datetime
import json
import logging
import random
import os
import os.path
import re
from dataclasses import dataclass, field
from typing import List
from urllib.parse import urlparse
from sqlalchemy import Column, Integer, JSON, ForeignKey, func, sql, or_, and_, inspect
from sqlalchemy.orm import relationship
from sqlalchemy.orm.attributes import flag_dirty, flag_modified
from flask import flash
import pandas as pd
from pandas.api.types import is_numeric_dtype
import numpy as np
import app.lib.config as config
from app.lib.database_internals import Base
from app.lib.models.datasetcontent import DatasetContent
from app.lib.models.task import DatasetTask
from app.lib.models.user import User
from app.lib.models.activity import Activity
from app.lib.models.annotation import Annotation
from app.lib.npencoder import NpEncoder
DATASET_CONTENT_CACHE = {}
def pd_expand_json_column(df, json_column):
"""
https://stackoverflow.com/a/25512372
"""
df = pd.concat(
[df, json_column.apply(lambda content: pd.Series(list(content.values()), index=list(content.keys())))],
axis=1
)
return df.drop(columns=['data'])
def calculate_row_state(row, additional_user_columns):
annotations = {}
for annotator_column in additional_user_columns:
if annotator_column not in row:
continue
annotations[annotator_column] = row[annotator_column]
row_state = set()
if len(annotations) == 0:
row_state.add("empty")
elif len(annotations) == 1:
row_state.add("single")
elif len(annotations) > 1:
row_state.add("multiple")
uniques = set(annotations.values())
if len(uniques) == 1:
row_state.add("undisputed")
elif len(uniques) > 1:
row_state.add("disputed")
return row_state
def restore_anno_values(v):
if v is not None and isinstance(v, str):
try:
v = json.loads(v)
except:
return v
return v
class Dataset(Base):
__tablename__ = 'datasets'
dataset_id = Column(Integer, primary_key=True)
owner_id = Column(Integer, ForeignKey("users.uid"), nullable=False)
owner = relationship("User", back_populates="datasets", lazy="joined")
dsannotations = relationship("Annotation", cascade="all, delete-orphan")
dscontent = relationship("DatasetContent", cascade="all, delete-orphan")
dstasks = relationship("DatasetTask", cascade="all, delete-orphan", order_by="DatasetTask.taskorder")
dsmetadata = Column(JSON, nullable=False)
persisted = False
_cached_df = None
valid_option_keys = set(["annotators_can_comment", "allow_restart_annotation", "additional_column"])
def defined_splits(self, dbsession):
split_content_counts = dbsession.query(DatasetContent.split_id, func.count(DatasetContent.sample))
split_content_counts = split_content_counts.filter_by(dataset_id=self.dataset_id)
split_content_counts = split_content_counts.group_by(DatasetContent.split_id).all()
split_info = {}
split_metadata = self.dsmetadata.get("splitdetails", {})
for ds_split, ds_split_count in split_content_counts:
split_info[ds_split] = split_metadata.get(ds_split, {}) or {}
split_info[ds_split]['size'] = ds_split_count
return split_info
def split_annotator_list(self, dbsession, split_id, resolve_users=False):
metadata = self.split_metadata(split_id)
result_list = metadata[split_id]["acl"]
if not resolve_users:
return [str(uid) for uid in result_list]
result_list = [User.by_id(dbsession, int(uid), True) for uid in result_list]
return result_list
def split_annotator_add(self, dbsession, split_id, uid):
if uid is None:
return False
if isinstance(uid, User):
uid = uid.uid
if not isinstance(uid, str):
uid = str(uid)
annotator_list = self.split_annotator_list(dbsession, split_id, False)
logging.debug("split_annotator_add %s to split %s (before: %s)", uid, split_id, annotator_list)
if uid in annotator_list:
return False
annotator_list.append(uid)
split_metadata = self.split_metadata(split_id)
split_metadata[split_id]['acl'] = annotator_list
self.dsmetadata['splitdetails'] = split_metadata
self.dirty(dbsession)
return True
def split_annotator_remove(self, dbsession, split_id, uid):
if uid is None:
return False
if isinstance(uid, User):
uid = uid.uid
if not isinstance(uid, str):
uid = str(uid)
annotator_list = self.split_annotator_list(dbsession, split_id, False)
logging.debug("split_annotator_remove %s to split %s (before: %s)", uid, split_id, annotator_list)
if uid not in annotator_list:
return False
annotator_list.remove(uid)
split_metadata = self.split_metadata(split_id)
split_metadata[split_id]['acl'] = annotator_list
self.dsmetadata['splitdetails'] = split_metadata
self.dirty(dbsession)
return True
def _split_target(self, target_old):
target_criterion = "(dc.split_id = '' OR dc.split_id IS NULL)"
if target_old != "" and target_old is not None:
target_criterion = "dc.split_id = :targetold"
return target_criterion
def split_metadata(self, target_split=None):
metadata = self.dsmetadata.get("splitdetails", {})
if target_split not in metadata:
metadata[target_split] = {}
if "acl" not in metadata[target_split]:
metadata[target_split]["acl"] = []
return metadata
def rename_split(self, dbsession, session_user, target_old, target_new):
target_old = "" if target_old is None else target_old
target_new = "" if target_new is None else target_new
# move split metadata to new name if it did not exist before
split_metadata = self.split_metadata(target_old)
if target_new not in split_metadata:
split_metadata[target_new] = split_metadata[target_old]
del split_metadata[target_old]
self.dsmetadata['splitdetails'] = split_metadata
# update dataset content to new split
statement = sql.text("""
UPDATE datasetcontent AS dc
SET split_id = :targetnew
WHERE
dc.dataset_id = :datasetid
AND
""" + self._split_target(target_old) + """
""")
params = {
"datasetid": self.dataset_id,
"targetnew": target_new
}
if target_old != "" and target_old is not None:
params["targetold"] = target_old
sqlres = dbsession.execute(statement, params=params)
affected = sqlres.rowcount
# create an activity to track this change
Activity.create(dbsession, session_user, self, "split_edit",
"renamed split '%s' to '%s' (affected: %s)" %
(target_old, target_new, affected))
self.dirty(dbsession)
return affected
def get_field_minmax(self, dbsession, fieldid):
"""
Retrieve the minimum and maximum values of a particular additional field in the dataset.
"""
maxval = minval = None
sql_raw = prep_sql("""
SELECT MIN((data->>:targetcolumn)::float) AS minval, MAX((data->>:targetcolumn)::float) AS maxval
FROM datasetcontent AS dc
WHERE
dc.dataset_id = :datasetid
""".strip())
params = {
"datasetid": self.dataset_id,
"targetcolumn": fieldid
}
logging.debug("DB_SQL_LOG %s %s", sql, params)
statement = sql.text(sql_raw)
sqlres = dbsession.execute(statement, params=params)
sqlres = [{column: value for column, value in rowproxy.items()} for rowproxy in sqlres]
if len(sqlres) > 0:
minval = sqlres[0]['minval']
maxval = sqlres[0]['maxval']
return minval, maxval
def split_dataset(self, dbsession, session_user, targetsplit, splitoptions):
splitmethod = splitoptions.get("splitmethod", "")
if splitmethod == "" or splitmethod is None:
raise Exception("splitmethod cannot be empty")
targetsplit = "" if targetsplit is None else targetsplit
target_criterion = self._split_target(targetsplit)
params = {
"datasetid": self.dataset_id,
"targetold": targetsplit
}
affected = 0
sql_raw = None
if splitmethod == "attribute":
splitcolumn = splitoptions.get("splitcolumn", None) or None
if splitcolumn is None or splitcolumn == "":
raise Exception("no column specified for split method %s" % (splitmethod))
params['targetcolumn'] = splitcolumn
params['trimtargetcolumn'] = (splitcolumn or "").strip()
sql_raw = """
UPDATE datasetcontent AS dc
SET split_id = TRIM(BOTH FROM (split_id || ' / ' || :trimtargetcolumn || '=' || (TRIM(both ' "' FROM data->>:targetcolumn))::TEXT))
WHERE
dc.dataset_id = :datasetid
AND
""" + target_criterion + """
"""
elif splitmethod == "value":
splitcolumn = splitoptions.get("splitcolumn", None) or None
if splitcolumn is None or splitcolumn == "":
raise Exception("no column specified for split method %s" % (splitmethod))
splitvalue = splitoptions.get("splitvalue", None) or None
if splitvalue is None or splitvalue == "":
raise Exception("no value specified to split at")
splitvalue = float(splitvalue)
params['targetcolumn'] = splitcolumn
params['splitvalue'] = splitvalue
params['trimtargetcolumn'] = (splitcolumn or "").strip()
sql_raw = """
UPDATE datasetcontent AS dc
SET split_id = CASE WHEN (data->>:targetcolumn)::float < :splitvalue
THEN TRIM(BOTH FROM (split_id || ' / ' || :trimtargetcolumn || '<' || :splitvalue))
ELSE TRIM(BOTH FROM (split_id || ' / ' || :trimtargetcolumn || '>=' || :splitvalue))
END
WHERE
dc.dataset_id = :datasetid
AND
""" + target_criterion + """
"""
elif splitmethod in ["ratio", "evenly"]:
# gather target IDs
id_query = dbsession.query(DatasetContent.sample_index).filter_by(dataset_id=self.dataset_id)
if targetsplit is None or targetsplit == "":
# pylint: disable=singleton-comparison
id_query = id_query.filter(or_(DatasetContent.split_id == "", DatasetContent.split_id == None))
else:
id_query = id_query.filter_by(split_id=targetsplit)
all_affected_ids = [t[0] for t in id_query.all()]
# shuffle affected ID list
random.shuffle(all_affected_ids)
newsplits = []
if splitmethod == "ratio":
new_ratio = splitoptions.get("splitratio", "").strip()
if new_ratio == "" or '-' not in new_ratio:
raise Exception('missing valid ratio argument')
new_ratio_a = int(new_ratio.split("-")[0])
# since the IDs were shuffled, we can just take N elements for the first ratio
# and treat the rest as the second one
new_ratio_a_size = max(1, int(len(all_affected_ids) * (new_ratio_a/100.0)))
newsplits = [
all_affected_ids[:new_ratio_a_size],
all_affected_ids[new_ratio_a_size:]
]
elif splitmethod == "evenly":
num_chunks = int(splitoptions.get("splitcount", "2"))
newsplits = [[] for _ in range(num_chunks)]
for idx, sample_id in enumerate(all_affected_ids):
target_bucket = newsplits[idx % num_chunks]
target_bucket.append(sample_id)
if len(newsplits) > 0:
newsplit_identifiers = [("%s / %s" % (targetsplit, chr(ord('A') + idx))).strip(" /") for idx in range(len(newsplits))]
for newsplit_index, newsplit_ids in enumerate(newsplits):
newsplit_label = newsplit_identifiers[newsplit_index]
update_query = dbsession.query(DatasetContent).filter_by(dataset_id=self.dataset_id)
if targetsplit is None or targetsplit == "":
update_query = update_query.filter(or_(DatasetContent.split_id == "", DatasetContent.split_id == None))
else:
update_query = update_query.filter_by(split_id=targetsplit)
update_query = update_query.filter(DatasetContent.sample_index.in_(newsplit_ids))
affected += update_query.update({"split_id": newsplit_label}, synchronize_session='fetch')
Activity.create(dbsession, session_user, self, "split_edit",
"forked split '%s' method:'%s' (affected: %s, new splits: %s)" %
(targetsplit, splitmethod, affected, len(newsplits)))
else:
raise Exception("no implementation found for split method %s" % (splitmethod))
if sql_raw is not None:
sql_raw = prep_sql(sql_raw)
logging.debug("DB_SQL_LOG %s %s", sql, params)
statement = sql.text(sql_raw)
sqlres = dbsession.execute(statement, params=params)
affected = sqlres.rowcount
Activity.create(dbsession, session_user, self, "split_edit",
"forked split '%s' method:'%s' (affected: %s)" %
(targetsplit, splitmethod, affected))
self.dirty(dbsession)
return affected
@staticmethod
def by_id(dbsession, dataset_id, user_id=None, no_error=False):
qry = None
if user_id is None:
qry = dbsession.query(Dataset).filter_by(dataset_id=dataset_id)
else:
qry = dbsession.query(Dataset).filter_by(owner_id=user_id, dataset_id=dataset_id)
if no_error:
return qry.one_or_none()
return qry.one()
def get_option(self, key, default_value=False):
return bool(self.dsmetadata.get(key, default_value))
def get_option_list(self, key, default_value=[]):
optval = self.dsmetadata.get(key, default_value)
if optval is None:
return default_value
if isinstance(optval, str):
optval = [optval]
return optval
def empty(self):
return not self.has_content()
def has_content(self):
if self.dscontent is None:
return False
return len(self.dscontent) > 0
def size(self, dbsession):
return self.content_query(dbsession).count()
def content_query(self, dbsession):
return dbsession.query(DatasetContent).filter_by(dataset_id=self.dataset_id)
def __repr__(self):
return "<Dataset (%s)>" % self.get_name()
@staticmethod
def activity_prefix():
return "DATASET:"
def activity_target(self):
return "DATASET:%s" % self.dataset_id
def get_name(self):
if self.dsmetadata is None:
return self.dataset_id
return self.dsmetadata.get("name", self.dataset_id)
def get_description(self):
if self.dsmetadata is None:
return ""
return self.dsmetadata.get("description", "")
def description_is_link(self):
description = self.get_description().strip()
description_l = description.lower()
if not description_l.startswith("http://") and \
not description_l.startswith("https://"):
return False
if " " in description:
return False
try:
res = urlparse(description)
return all([res.scheme, res.netloc])
except: # noqa
return False
return False
def get_text_column(self):
textcol = self.dsmetadata.get("textcol", None)
if textcol is None:
return None
return textcol
def get_size(self):
return self.dsmetadata.get("size", -1) or -1
def get_id_column(self):
idcolumn = self.dsmetadata.get("idcolumn", None)
if idcolumn is None:
return None
return idcolumn
def get_annotator_splits(self, dbsession, uid):
if isinstance(uid, int):
uid = str(uid)
if isinstance(uid, User):
uid = str(uid.uid)
if not uid:
return set()
split_list = set()
for split_id in self.defined_splits(dbsession).keys():
split_annotators = self.split_annotator_list(dbsession, split_id, resolve_users=False)
if uid in split_annotators:
split_list.add(split_id)
return split_list
def get_split_progress(self, dbsession):
sql_raw = prep_sql("""
SELECT
dc.split_id,
COUNT(dc.sample_index) as sample_count,
COUNT(DISTINCT annos.sample_index) AS annotated_sample_count,
COUNT(DISTINCT annos.owner_id) AS annotators_count
FROM datasetcontent dc
LEFT OUTER JOIN annotations annos ON dc.dataset_id = annos.dataset_id AND dc.sample_index = annos.sample_index
LEFT OUTER JOIN users u ON u.uid = annos.owner_id
WHERE u.email <> 'SYSTEM'
AND dc.dataset_id = :datasetid
GROUP BY dc.split_id;
""".strip())
params = {
"datasetid": self.dataset_id,
}
logging.debug("DB_SQL_LOG %s %s", sql, params)
statement = sql.text(sql_raw)
sqlres = dbsession.execute(statement, params=params)
sqlres = [{column: value for column, value in rowproxy.items()} for rowproxy in sqlres]
return sqlres
def get_roles(self, dbsession, user_obj, splitroles=True):
if isinstance(user_obj, str):
user_obj = int(user_obj)
if isinstance(user_obj, int):
user_obj = User.by_id(dbsession, user_obj)
if not user_obj:
return set()
if user_obj.is_system_user():
return set(["annotator", "curator"])
roles = []
if user_obj.uid == self.owner_id:
# add all roles for owned datasets
roles.append("owner")
curacl = self.get_acl()
uid = str(user_obj.uid)
if uid in curacl and not curacl[uid] is None:
if "annotator" in curacl[uid]:
roles.append("annotator")
if "curator" in curacl[uid]:
roles.append("curator")
# check if user is granted annotation for individual splits
if splitroles:
if "annotator" not in roles and len(self.get_annotator_splits(dbsession, user_obj)) > 0:
roles.append("annotator")
return set(roles)
def reorder_tasks(self):
"""
ensures that all tasks for this dataset have unique ordering criteria
"""
if self.dstasks is None:
return
if len(self.dstasks) == 0:
return
for idx, task in enumerate(self.dstasks):
if idx != task.taskorder:
task.taskorder = idx
def task_by_taskorder(self, taskorder: int):
target = None
for task in self.dstasks:
if task.taskorder == taskorder:
target = task
break
return target
def task_rename(self, dbsession, taskorder: int, new_name: str):
target = self.task_by_taskorder(taskorder)
if target is None:
return False
target.taskconfig['title'] = new_name
flag_dirty(target)
flag_modified(target, 'taskconfig')
def task_delete(self, dbsession, taskorder: int):
target = self.task_by_taskorder(taskorder)
if target is None:
return False
dbsession.delete(target)
return True
def taskorder(self, taskorder: int, change: int):
target = self.task_by_taskorder(taskorder)
if target is None:
return
swap_target_taskorder = taskorder + change
if swap_target_taskorder == taskorder:
return
swap_target = self.task_by_taskorder(swap_target_taskorder)
target.taskorder, swap_target.taskorder = swap_target.taskorder, target.taskorder
flag_dirty(target)
flag_dirty(swap_target)
flag_modified(target, "taskorder")
flag_modified(swap_target, "taskorder")
def check_dataset(self):
errorlist = []
dsname = self.dsmetadata.get("name", None) if self.dsmetadata is not None else None
if dsname is None or dsname.strip() == '':
errorlist.append("unnamed dataset")
if not self.persisted and self.dataset_id is None:
errorlist.append("not saved")
if self.dsmetadata.get("hasdata", None) is None:
errorlist.append("no data")
if self.dstasks is None or len(self.dstasks) == 0:
errorlist.append("no tasks defined")
for taskdef in self.dstasks:
taskdef.validate(errorlist)
if self.empty():
errorlist.append("no data")
textcol = self.dsmetadata.get("textcol", None)
if textcol is None:
errorlist.append("no text column defined")
idcolumn = self.dsmetadata.get("idcolumn", None)
if idcolumn is None:
errorlist.append("no ID column defined")
if len(errorlist) == 0:
return None
return errorlist
def validate_owner(self, userobj):
if self.owner is not userobj:
raise Exception("You cannot modify datasets you do not own.")
def ensure_id(self, dbsession):
"""
Ensure that this dataset was written to the database and was assigned an identifier.
"""
if self.dataset_id is not None:
return True
self.dirty(dbsession)
dbsession.commit()
dbsession.flush()
return False
def accessible_by(self, dbsession, for_user):
if self.dataset_id is None:
raise Exception("cannot check accessibility. dataset needs to be committed first.")
if isinstance(for_user, int):
for_user = User.by_id(dbsession, for_user)
if self.owner == for_user:
return True
dsacl = self.get_roles(dbsession, for_user)
if dsacl is None or len(dsacl) == 0:
return False
return True
def get_task(self, dbsession, for_user):
if not self.accessible_by(dbsession, for_user):
return None
if isinstance(for_user, int):
for_user = User.by_id(dbsession, for_user)
if not isinstance(for_user, User):
raise Exception("dataset::get_task - argument for_user needs to be of type User")
check_result = self.check_dataset()
if check_result is not None and len(check_result) > 0:
return None
task_size = self.get_size()
global_roles = self.get_roles(dbsession, for_user, splitroles=False)
annotation_splits = None
if "annotator" not in global_roles:
annotation_splits = self.get_annotator_splits(dbsession, for_user)
if annotation_splits is not None and len(annotation_splits) == 0:
annotation_splits = None
if annotation_splits is not None:
dataset_splits = self.defined_splits(dbsession)
task_size = 0
for split_id in annotation_splits:
if split_id not in dataset_splits:
continue
task_size += dataset_splits[split_id].get("size", 0)
task = AnnotationTask(id=self.dataset_id,
name=self.get_name(),
dataset=self,
progress=0,
user_roles=self.get_roles(dbsession, for_user),
size=task_size,
splits=annotation_splits,
annos=self.annocount(dbsession, for_user, annotation_splits),
annos_today=self.annocount_today(dbsession, for_user, annotation_splits)
)
task.calculate_progress()
task.can_annotate = task.progress < 100.0 or self.dsmetadata.get("allow_restart_annotation", False)
return task
def dirty(self, dbsession):
db_state = inspect(self)
if db_state.deleted:
return False
flag_dirty(self)
flag_modified(self, "dsmetadata")
dbsession.add(self)
return True
def migrate_annotations(self, dbsession, update_taskdef, old_name, new_name):
migrated_annotations = 0
for anno in dbsession.query(Annotation).filter_by(dataset_id=self.dataset_id, task_id=update_taskdef.anno_task_id).all():
if anno.data is None:
continue
anno_tag = anno.data.get("value", None)
if anno_tag is None or not anno_tag == old_name:
continue
anno.data["value"] = new_name
flag_dirty(anno)
flag_modified(anno, "data")
dbsession.add(anno)
migrated_annotations += 1
dbsession.flush()
return migrated_annotations
def taskdef_by_id(self, taskdef_id):
if taskdef_id is None:
raise Exception("taskdef_id cannot be null")
if self.dstasks is None:
return None
for taskdef in self.dstasks:
if str(taskdef.task_id) == str(taskdef_id):
return taskdef
return None
def annotations(self, dbsession, fortask=None, page=1, page_size=50, foruser=None,
user_column=None, restrict_view=None, only_user=False, with_content=True,
query=None, order_by=None, min_sample_index=None,
splits=None,
tags_include=None, tags_exclude=None):
if restrict_view is not None and not isinstance(restrict_view, list):
restrict_view = [restrict_view]
foruser = User.by_id(dbsession, foruser)
user_roles = self.get_roles(dbsession, foruser)
if 'annotator' not in user_roles and \
'curator' not in user_roles:
raise Exception("Unauthorized, user %s does not have role 'curator'. Active roles: %s"
% (foruser, user_roles))
if user_column is None:
user_column = "annotation"
sql_select = ""
sql_where = ""
field_list = ["dc.sample_index AS sample_index", "dc.sample AS sample_id"]
if with_content:
field_list.append("dc.content AS sample_content")
params = {
"dataset_id": self.dataset_id
}
if query is not None:
sql_where += "\nAND dc.content ILIKE %(query_pattern)s"
if not query.startswith("%") and not query.endswith("%"):
query = "%" + query + "%"
params['query_pattern'] = query
join_type = "LEFT" if restrict_view is not None and "curated" in restrict_view else "LEFT OUTER"
id_column = self.get_id_column()
annotation_columns = []
col_renames = {
"sample_id": id_column,
"sample_content": self.get_text_column()
}
if foruser is not None:
sql_select += """
{join_type} JOIN annotations AS usercol ON usercol.dataset_id = dc.dataset_id AND usercol.sample_index = dc.sample_index AND usercol.owner_id = %(foruser_join)s
""".format(join_type=join_type)
col_renames["usercol_value"] = user_column
params['foruser_join'] = foruser.uid
if fortask:
field_list.append("usercol.data->'%s'->'value' #>> '{}' AS usercol_value" % str(fortask.task_id))
else:
field_list.append("usercol.data #>> '{}' AS usercol_value")
annotation_columns.append(user_column)
if tags_include is None:
tags_include = []
if tags_exclude is None:
tags_exclude = []
condition_include = []
condition_exclude = []
target_tasks = [fortask] if fortask else self.dstasks
for curtask in target_tasks:
for tag_idx, tag in enumerate(curtask.get_taglist()):
if tag not in tags_include and tag not in tags_exclude:
continue
params["tag_%s" % tag_idx] = tag
if tag in tags_include:
condition_include.append("tag_%s" % tag_idx)
if tag in tags_exclude:
condition_exclude.append("tag_%s" % tag_idx)
"""
TODO taskdef id inclusion not tested yet
"""
if len(condition_include) > 0:
sql_where += "\nAND usercol.data->'%s'->'value' #>> '{}' IN (%s)" % (str(curtask.task_id), ", ".join(map(lambda p: "%(" + p + ")s", condition_include)))
if len(condition_exclude) > 0:
sql_where += "\nAND NOT usercol.data->'%s'->'value' #>> '{}' IN (%s)" % (str(curtask.task_id), ", ".join(map(lambda p: "%(" + p + ")s", condition_exclude)))
# if user is annotator, only export and show their own annotations
target_users = [foruser]
if 'curator' in user_roles and not only_user:
# curator, also implied by owner role
target_users = list(set(User.userlist(dbsession)) - set([foruser]))
additional_user_columns = []
additional_user_columns_raw = []
if not only_user:
for user_obj in target_users:
if user_obj is foruser:
continue
sql_select += """
{join_type} JOIN annotations AS "anno-{uid}" ON "anno-{uid}".dataset_id = dc.dataset_id AND "anno-{uid}".sample_index = dc.sample_index AND "anno-{uid}".owner_id = %(foruser_{uid})s
""".format(join_type=join_type, uid=user_obj.uid)
params["foruser_{uid}".format(uid=user_obj.uid)] = user_obj.uid
if fortask:
field_list.append("\"anno-{uid}\".data->'{taskid}'->'value' AS \"anno-{uid}\"".format(taskid=fortask.task_id, uid=user_obj.uid))
else:
field_list.append("\"anno-{uid}\".data AS \"anno-{uid}\"".format(uid=user_obj.uid))
annotation_columns.append("anno-{uid}-{uname}".format(uid=user_obj.uid, uname=user_obj.email))
additional_user_columns.append("anno-{uid}-{uname}".format(uid=user_obj.uid, uname=user_obj.email))
additional_user_columns_raw.append("anno-{uid}".format(uid=user_obj.uid))
col_renames["anno-{uid}".format(uid=user_obj.uid)] = "anno-{uid}-{uname}".format(uid=user_obj.uid, uname=user_obj.email)
if splits is not None and len(splits) > 0:
sql_where += "\nAND dc.split_id = ANY(%(splitlist)s)"
params["splitlist"] = list(splits)
sql_where = """
WHERE dc.dataset_id = %(dataset_id)s
""" + sql_where
restrict_clause = ""
if restrict_view is not None:
if "curated" in restrict_view:
sql_where += "\nAND usercol IS NOT NULL"
elif "uncurated" in restrict_view:
sql_where += "\nAND usercol IS NULL"
if "disputed" in restrict_view:
restrict_clause = "WHERE aggr.unique_anno_count > 1"
elif "undisputed" in restrict_view:
restrict_clause = "WHERE aggr.unique_anno_count = 1"
if min_sample_index is not None:
sql_where += "\nAND dc.sample_index >= %(min_sample_index)s"
params["min_sample_index"] = min_sample_index
sql_raw = """
SELECT {field_list} FROM datasetcontent AS dc
{sql_select}
{sql_where}
""".format(field_list=", ".join(field_list),
sql_select="\n" + sql_select.strip(),
sql_where="\n" + sql_where.strip())
# wrap in order to gather disputed/undisputed states
sql_raw = """
SELECT o.*, aggr.* FROM ({original_sql}) AS o
LEFT JOIN LATERAL (
SELECT COUNT(DISTINCT val) AS unique_anno_count FROM (
SELECT UNNEST(ARRAY[{anno_columns}]::text[]) AS val
) AS aggrcnt
WHERE val IS NOT NULL
) AS aggr ON true
{restrict_clause}
""".format(
original_sql=sql_raw,
anno_columns=", ".join(map(lambda col: "o.\"" + col + "\"",
additional_user_columns_raw)),
restrict_clause=restrict_clause,
).strip()
# sql_select += """
# LEFT JOIN (ARRAY[{anno_columns}] AS unique_annotations
# {join_type} JOIN annotations AS "anno-{uid}" ON "anno-{uid}".dataset_id = dc.dataset_id AND "anno-{uid}".sample_index = dc.sample_index AND "anno-{uid}".owner_id = %(foruser_{uid})s
# """.format(anno_columns=)
sql_count = """
SELECT COUNT(o.*) AS cnt FROM ({original_sql}) AS o
""".format(original_sql=sql_raw).strip()
# ordering and constraints
if order_by is not None:
sql_raw += "\nORDER BY %s" % order_by
if page_size > 0:
sql_raw += "\nLIMIT %(page_size)s"
params["page_size"] = page_size
if page > 0 and page_size > 0:
sql_raw += "\nOFFSET %(page_onset)s"
params["page_onset"] = (page - 1) * page_size
sql_raw = prep_sql(sql_raw)
logging.debug("DB_SQL_LOG %s %s", sql_raw, params)
df = pd.read_sql(sql_raw,
dbsession.bind,
params=params)
sql_count = prep_sql(sql_count)
logging.debug("DB_SQL_LOG %s %s", sql_count, params)
# placeholders for sql.text are :placeholder instead of %(placeholder)s
sql_count = re.sub(r"%\((.*?)\)s", r":\1", sql_count)
statement = sql.text(sql_count)
sqlres = dbsession.execute(statement, params=params)
sqlres = [{column: value for column, value in rowproxy.items()} for rowproxy in sqlres][0]
df_count = sqlres['cnt']
df = df.rename(columns=col_renames)
# remove additional user columns that do not have annotations yet
drop_columns = []
for check_column in df.columns.intersection(additional_user_columns):
if df[check_column].dropna().empty:
drop_columns.append(check_column)
if len(drop_columns) > 0:
df = df.drop(columns=drop_columns)
for col in drop_columns:
annotation_columns.remove(col)
# remove pseudo-columns if present
pseudo_columns = set(['unique_anno_count'])
pseudo_columns = pseudo_columns.intersection(set(df.columns))
if len(pseudo_columns) > 0:
df = df.drop(columns=pseudo_columns)
for col in annotation_columns:
df[col] = df[col].apply(restore_anno_values)
return df, annotation_columns, df_count
def task_by_id(self, task_id):
if isinstance(task_id, str):
task_id = int(task_id)
for task in self.dstasks:
if task.task_id == task_id:
return task_id, task
return task_id, None
def get_anno_votes(self, dbsession, task_id, sample_id, exclude_user=None):
anno_votes = {}
if not isinstance(sample_id, str):
sample_id = str(sample_id)
task_id, task = self.task_by_id(task_id)
for tag in task.get_taglist():
anno_votes[tag] = []
for anno in dbsession.query(Annotation).filter_by(
dataset_id=self.dataset_id,
task_id=task_id,
sample=sample_id).all():
if exclude_user is not None and exclude_user is anno.owner:
continue
if anno.data is None or anno.data.get('value', None) is None or \
not anno.data['value'] in task.get_taglist():
continue
anno_votes[anno.data['value']].append(anno.owner)
return anno_votes
def supports_simple_annotation(self):
if len(self.dstasks) > 1:
return False
for task in self.dstasks:
if task.tasktype == "tagging" and task.taskconfig.get("multiselect", False):
return False
if task.tasktype == "text":
return False
return True
def getannos(self, dbsession, uid, task_id, asdict=False):
"""
@deprecated
"""
user_obj = User.by_id(dbsession, uid)
annores = dbsession.query(Annotation).filter_by(
owner_id=user_obj.uid, dataset_id=self.dataset_id, task_id=task_id).all()
if not asdict:
return annores
resdict = {"task_id": task_id, "sample": [], "uid": [], "annotation": []}
for anno in annores:
resdict['uid'].append(anno.owner_id)
resdict['sample'].append(anno.sample)
resdict['annotation'].append(anno.data.get('value', None))
return resdict
def getanno_for_task(self, dbsession, uid, task_id, sample):
user_obj = User.by_id(dbsession, uid)
task_id = int(task_id)
anno_obj = dbsession.query(Annotation).filter_by(owner_id=user_obj.uid,
dataset_id=self.dataset_id,
task_id=task_id,
sample=sample).one_or_none()
anno_obj_data = anno_obj.data if anno_obj is not None else {}
return anno_obj_data
def getanno(self, dbsession, uid, task_id, sample):
sample = str(sample)
anno_obj_data = {}
if task_id != "*":
anno_obj_data = self.getanno_for_task(dbsession, uid, task_id, sample)
return {
"sample": sample,
"task": task_id,
"data": anno_obj_data
}
else:
for query_task in self.dstasks:
task_anno = self.getanno_for_task(dbsession, uid, query_task.task_id, sample)
anno_obj_data[query_task.task_id] = task_anno
return {
"sample": sample,
"data": anno_obj_data
}
def sample_by_index(self, dbsession, sample_index):
qry = self.content_query(dbsession).filter_by(sample_index=int(sample_index))
return qry.one_or_none()
def get_next_sample(self, dbsession, sample_index, user_obj, splits, exclude_annotated=True):
if sample_index is None:
return None, None
sample_index = int(sample_index)
sql_raw = ""
split_where = ""
if splits is not None and len(splits) > 0:
split_where += "\nAND dc.split_id = ANY(%(splitlist)s)"
if exclude_annotated:
sql_raw = """
SELECT
dc.sample_index, dc.sample, COUNT(anno.owner_id) AS annocount
FROM
datasetcontent AS dc
LEFT JOIN annotations AS anno
ON anno.sample_index = dc.sample_index AND anno.dataset_id = dc.dataset_id
WHERE 1=1
AND dc.dataset_id = %(dsid)s
AND (anno.owner_id != %(uid)s
OR anno.owner_id IS NULL)
AND dc.sample_index > %(req_sample_idx)s
{split_where}
GROUP BY dc.sample_index, dc.sample
ORDER BY dc.sample_index ASC
LIMIT 1
""".format(split_where=split_where).strip()
else:
sql_raw = """
SELECT
dc.sample_index, dc.sample, COUNT(anno.owner_id) AS annocount
FROM
datasetcontent AS dc
LEFT JOIN annotations AS anno
ON anno.sample_index = dc.sample_index AND anno.dataset_id = dc.dataset_id
WHERE 1=1
AND dc.dataset_id = %(dsid)s
AND dc.sample_index > %(req_sample_idx)s
{split_where}
GROUP BY dc.sample_index, dc.sample
ORDER BY dc.sample_index ASC
LIMIT 1
""".format(split_where=split_where).strip()
params = {
"uid": user_obj.uid,
"dsid": self.dataset_id,
"req_sample_idx": sample_index
}
if splits is not None and len(splits) > 0:
params["splitlist"] = list(splits)
logging.debug("DF_SQL_LOG %s\n%s\n%s", "get_next_sample(excl=%s)" % exclude_annotated, sql_raw, params)
df = pd.read_sql(sql_raw,
dbsession.bind,
params=params)
if df.shape[0] == 0 and exclude_annotated and self.dsmetadata.get("allow_restart_annotation", False):
return self.get_next_sample(dbsession, sample_index, user_obj, splits, exclude_annotated=False)
if df.shape[0] > 0:
first_row = df.iloc[df.index[0]]
return first_row["sample_index"], first_row["sample"]
# empty dataset
return None, None
def _update_overview_statistics(self, overview, df):
if df is None:
return
# drop split, ID, and text columns
ignore_columns = []
if 'split' in df.columns:
ignore_columns.append("split")
if self.get_id_column() is not None and self.get_id_column() in df.columns:
ignore_columns.append(self.get_id_column())
if self.get_text_column() is not None and self.get_text_column() in df.columns:
ignore_columns.append(self.get_text_column())
df.drop(columns=ignore_columns)
cur_dtypes = dict(df.dtypes)
overview['columns'] = {}
for colname, coldtype in cur_dtypes.items():
overview['columns'][colname] = {}
colinfo = overview['columns'][colname]
colinfo['dtype'] = coldtype
colinfo['numeric'] =
|
is_numeric_dtype(coldtype)
|
pandas.api.types.is_numeric_dtype
|
import numpy as np
import pandas as pd
from fairlens import utils
dfc = pd.read_csv("datasets/compas.csv")
def test_zipped_hist():
arr1 = np.arange(10)
arr2 = np.arange(5, 10)
hist1, hist2 = utils.zipped_hist((pd.Series(arr1), pd.Series(arr2)))
assert (hist1 == np.bincount(arr1) / len(arr1)).all()
assert (hist2 == np.bincount(arr2) / len(arr2)).all()
arr = np.concatenate([np.arange(10)] * 10)
assert (utils.zipped_hist((pd.Series(arr),))[0] == np.bincount(arr) / len(arr)).all()
arr = np.random.rand(1000)
hist, bin_edges = utils.zipped_hist((pd.Series(arr),), ret_bins=True)
_hist, _bin_edges = np.histogram(arr, bins="auto")
assert (hist == _hist / _hist.sum()).all() and (bin_edges == _bin_edges).all()
def test_bin():
columns = ["A", "B", "C"]
df = pd.DataFrame(np.array([np.arange(101) * (i + 1) for i in range(3)]).T, index=range(101), columns=columns)
assert df.loc[:, "A"].nunique() > 4
a_binned = utils.bin(df["A"], 4, duplicates="drop", remove_outliers=0.1)
assert a_binned.nunique() == 4
def test_quantize_dates():
col = pd.Series(pd.date_range(start="1/1/2018", periods=5))
assert utils.quantize_date(col).equals(pd.Series(["Day 1", "Day 2", "Day 3", "Day 4", "Day 5"]))
col = pd.Series(pd.to_datetime(["1/1/1999", "1/1/2020"]))
assert utils.quantize_date(col).equals(pd.Series(["1990-2000", "2020-2030"]))
col = pd.Series(pd.date_range(start="1/1/2018", periods=70))
assert (utils.quantize_date(col).unique() == ["Jan", "Feb", "Mar"]).all()
col = pd.Series(pd.date_range(start="1/1/2018", periods=500))
assert (utils.quantize_date(col).unique() == [2018, 2019]).all()
col = pd.Series(pd.date_range(start="1/1/2018", periods=2000))
assert (utils.quantize_date(col).unique() == [2018, 2019, 2020, 2021, 2022, 2023]).all()
col = pd.Series(pd.date_range(start="1/1/2018", periods=7000))
assert (utils.quantize_date(col).unique() == ["2010-2020", "2020-2030", "2030-2040"]).all()
def test_infer_dtype():
cols = ["A", "B", "C"]
df = pd.DataFrame(np.array([np.arange(11) * (i + 1) for i in range(len(cols))]).T, index=range(11), columns=cols)
assert str(utils.infer_dtype(df["A"]).dtype) == "int64"
df = pd.DataFrame(
np.array([np.linspace(0, 10, 21) * (i + 1) for i in range(len(cols))]).T, index=range(21), columns=cols
)
assert str(utils.infer_dtype(df["A"]).dtype) == "float64"
def test_infer_distr_type():
assert utils.infer_distr_type(pd.Series(np.linspace(-20, 20, 200))).is_continuous()
assert utils.infer_distr_type(pd.Series(np.linspace(-20, 20, 9))).is_continuous()
assert utils.infer_distr_type(pd.Series([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11])).is_continuous()
assert utils.infer_distr_type(pd.Series([1, 2, 3, 4, 5, 6, 7, 8, 9, 10])).is_categorical()
assert utils.infer_distr_type(pd.Series([1, 0] * 10)).is_binary()
assert utils.infer_distr_type(pd.Series([1, 0, 1, 1])).is_binary()
assert utils.infer_distr_type(pd.Series([True, False, True, True])).is_binary()
assert utils.infer_distr_type(pd.Series([1, 1, 1])).is_categorical()
assert utils.infer_distr_type(
|
pd.Series([0])
|
pandas.Series
|
from flask import Flask, escape, request, send_file, send_from_directory, jsonify
from flask_cors import CORS
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression, LogisticRegression
from sklearn import svm
import pickle
import io
import gzip
import joblib
import jsonpickle
import json
from sklearn import metrics
app = Flask(__name__)
CORS(app)
@app.route('/train', methods=['POST'])
def train():
file = request.files['file']
form = request.form
targetVariable, splitRatio, selectedDataHeaders, algorithm = form[
'targetVariable'], form['splitRatio'], form['selectedDataHeaders'], form['algorithm']
df = pd.read_csv(file)
inc = [str(i) for i in selectedDataHeaders.split(",")]
df_ = df[inc]
categoricals = []
for col, col_type in df_.dtypes.iteritems():
if col_type == 'O':
categoricals.append(col)
else:
df_[col].fillna(0, inplace=True)
df_ohe = pd.get_dummies(df_, columns=categoricals, dummy_na=True)
x = df_ohe[df_ohe.columns.difference([targetVariable])]
y = df_ohe[targetVariable]
X_train, X_test, y_train, y_test = train_test_split(
x, y, train_size=int(splitRatio)/100, random_state=4)
switcher = {
1: LinearRegression(),
2: LogisticRegression(),
4: svm.SVC(kernel='linear')
}
model = switcher[int(algorithm)]
model.fit(X_train, y_train)
model_columns = list(x.columns)
Y_pred = model.predict(X_test)
score_model = round(model.score(X_test, y_test), 2)
mae_model = round(metrics.mean_absolute_error(y_test, Y_pred), 4)
mse_model = round(metrics.mean_squared_error(y_test, Y_pred), 4)
mc = jsonpickle.encode(model_columns)
jp = jsonpickle.encode(model)
return jsonify({'model': jp, 'columns': mc, 'mae': mae_model, 'mse': mse_model, 'score': score_model})
@app.route('/predict', methods=['POST'])
def predict():
model_file = request.form["model"]
loaded_model = jsonpickle.decode(model_file)
inputs = request.form["inputs"]
load_inputs = json.loads(inputs)
model_columns = request.form["columns"]
load_model_columns = json.loads(model_columns)
df = pd.DataFrame([load_inputs.values()], columns=load_inputs.keys())
df2 =
|
pd.get_dummies(df)
|
pandas.get_dummies
|
# Copyright 2016-present CERN – European Organization for Nuclear Research
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Tuple
import pandas
from qf_lib.plotting.decorators.chart_decorator import ChartDecorator
from qf_lib.plotting.decorators.data_element_decorator import DataElementDecorator
from qf_lib.plotting.decorators.simple_legend_item import SimpleLegendItem
class PointEmphasisDecorator(ChartDecorator, SimpleLegendItem):
"""
Creates a new marker for `series_data_element` for `x=series_index`. For a timeseries, you can specify
the time that you wish to be emphasised.
Parameters
----------
series_data_element: DataElementDecorator
The DataElementDecorator which should be decorated with an emphasised point.
coordinates: Tuple[Any, Any]
The x and y coordinate of the point that should be emphasised. The x and y coordinates should be expressed
in data coordinates (e.g. the x coordinate should be a date if x-axis contains dates).
color: str
color of the marker; by default it will be the same as the decorated line
decimal_points: int
number of decimal points that should be shown in the point's label
label_format: str
A format string specifying how the label should be displayed. Takes two parameters: the index and value.
useful values: ' {:0.1E}', ' {:0.1f}'
key: str
see: ChartDecorator.__init__#key
use_secondary_axes: bool
determines whether this PointEmphasis belongs on the secondary axis.
move_point: bool
font_size: int
size of font
"""
def __init__(self, series_data_element: DataElementDecorator, coordinates: Tuple[Any, Any], color: str = None,
decimal_points: int = 2, label_format: str = ' {:.4g}', key: str = None,
use_secondary_axes: bool = False, move_point: bool = True, font_size: int = 15):
# label_format = ' {:0.1E}'
ChartDecorator.__init__(self, key)
SimpleLegendItem.__init__(self)
assert isinstance(series_data_element.data, pandas.Series)
assert not pandas.isnull(coordinates[0])
assert not
|
pandas.isnull(coordinates[1])
|
pandas.isnull
|
import numpy as np
import sn_plotter_metrics.nsnPlot as nsn_plot
import matplotlib.pylab as plt
import argparse
from optparse import OptionParser
import glob
# from sn_tools.sn_obs import dataInside
import healpy as hp
import numpy.lib.recfunctions as rf
import pandas as pd
import os
import multiprocessing
def processMulti(toproc, Npixels, outFile, nproc=1):
"""
Function to analyze metric output using multiprocesses
The results are stored in outFile (npy file)
Parameters
--------------
toproc: pandas df
data to process
Npixels: numpy array
array of the total number of pixels per OS
outFile: str
output file name
nproc: int, opt
number of cores to use for the processing
"""
nfi = len(toproc)
tabfi = np.linspace(0, nfi, nproc+1, dtype='int')
print(tabfi)
result_queue = multiprocessing.Queue()
# launching the processes
for j in range(len(tabfi)-1):
ida = tabfi[j]
idb = tabfi[j+1]
p = multiprocessing.Process(name='Subprocess-'+str(j), target=processLoop, args=(
toproc[ida:idb], Npixels, j, result_queue))
p.start()
# grabing the results
resultdict = {}
for j in range(len(tabfi)-1):
resultdict.update(result_queue.get())
for p in multiprocessing.active_children():
p.join()
resdf = pd.DataFrame()
for j in range(len(tabfi)-1):
resdf = pd.concat((resdf, resultdict[j]))
print('finally', resdf.columns)
# saving the results in a npy file
np.save(outFile, resdf.to_records(index=False))
def processLoop(toproc, Npixels, j=0, output_q=None):
"""
Function to analyze a set of metric result files
Parameters
--------------
toproc: pandas df
data to process
Npixels: numpy array
array of the total number of pixels per OS
j: int, opt
internal int for the multiprocessing
output_q: multiprocessing.queue
queue for multiprocessing
Returns
-----------
pandas df with the following cols:
zlim, nsn, sig_nsn, nsn_extra, dbName, plotName, color,marker
"""
# this is to get summary values here
resdf = pd.DataFrame()
for index, val in toproc.iterrows():
dbName = val['dbName']
idx = Npixels['dbName'] == dbName
npixels = Npixels[idx]['npixels'].item()
metricdata = nsn_plot.NSNAnalysis(dirFile, val, metricName, fieldType,
nside, npixels=npixels)
# metricdata.plot()
# plt.show()
if metricdata.data_summary is not None:
resdf = pd.concat((resdf, metricdata.data_summary))
if output_q is not None:
output_q.put({j: resdf})
else:
return resdf
def mscatter(x, y, ax=None, m=None, **kw):
import matplotlib.markers as mmarkers
ax = ax or plt.gca()
sc = ax.scatter(x, y, **kw)
if (m is not None) and (len(m) == len(x)):
paths = []
for marker in m:
if isinstance(marker, mmarkers.MarkerStyle):
marker_obj = marker
else:
marker_obj = mmarkers.MarkerStyle(marker)
path = marker_obj.get_path().transformed(
marker_obj.get_transform())
paths.append(path)
sc.set_paths(paths)
return sc
def print_best(resdf, ref_var='nsn', num=10, name='a'):
"""
Method to print the "best" OS maximizing ref_var
Parameters
--------------
resdf: pandas df
data to process
ref_var: str, opt
variable chosen to rank the strategies (default: nsn)
num: int, opt
number of OS to display)
"""
ressort = pd.DataFrame(resdf)
ressort = ressort.sort_values(by=[ref_var], ascending=False)
ressort['rank'] = ressort[ref_var].rank(
ascending=False, method='first').astype('int')
print(ressort[['dbName', ref_var, 'rank']][:num])
ressort['dbName'] = ressort['dbName'].str.split('v1.4_10yrs').str[0]
ressort['dbName'] = ressort['dbName'].str.rstrip('_')
ressort[['dbName', ref_var, 'rank']][:].to_csv(
'OS_best_{}.csv'.format(name), index=False)
def rankCadences(resdf, ref_var='nsn'):
"""
Method to print the "best" OS maximizing ref_var
Parameters
--------------
resdf: pandas df
data to process
ref_var: str, opt
variable chosen to rank the strategies (default: nsn)
Returns
-----------
original pandas df plus rank
"""
ressort = pd.DataFrame(resdf)
ressort = ressort.sort_values(by=[ref_var], ascending=False)
ressort['rank'] = ressort[ref_var].rank(
ascending=False, method='first').astype('int')
return ressort
def plotSummary(resdf, ref=False, ref_var='nsn'):
"""
Method to draw the summary plot nSN vs zlim
Parameters
---------------
resdf: pandas df
dat to plot
ref: bool, opt
if true, results are displayed from a reference cadence (default: False)
ref_var: str, opt
column from which the reference OS is chosen (default: nsn_ref
"""
fig, ax = plt.subplots()
zlim_ref = -1
nsn_ref = -1
if ref:
ido = np.argmax(resdf[ref_var])
zlim_ref = resdf.loc[ido, 'zlim']
nsn_ref = resdf.loc[ido, 'nsn']
print(zlim_ref, nsn_ref)
"""
if zlim_ref > 0:
mscatter(zlim_ref-resdf['zlim'], resdf['nsn']/nsn_ref, ax=ax,
m=resdf['marker'].to_list(), c=resdf['color'].to_list())
else:
mscatter(resdf['zlim'], resdf['nsn'], ax=ax,
m=resdf['marker'].to_list(), c=resdf['color'].to_list())
"""
for ii, row in resdf.iterrows():
if zlim_ref > 0:
ax.text(zlim_ref-row['zlim'], row['nsn']/nsn_ref, row['dbName'])
else:
ax.plot(row['zlim'], row['nsn'], marker=row['marker'],
color=row['color'], ms=10)
ax.text(row['zlim']+0.001, row['nsn'], row['dbName'], size=12)
ax.grid()
ax.set_xlabel('$z_{faint}$')
ax.set_ylabel('$N_{SN}(z\leq z_{faint})$')
def plotCorrel(resdf, x=('', ''), y=('', '')):
"""
Method for 2D plots
Parameters
---------------
resdf: pandas df
data to plot
x: tuple
x-axis variable (first value: colname in resdf; second value: x-axis label)
y: tuple
y-axis variable (first value: colname in resdf; second value: y-axis label)
"""
fig, ax = plt.subplots()
resdf = filter(resdf, ['alt_sched'])
for ik, row in resdf.iterrows():
varx = row[x[0]]
vary = row[y[0]]
ax.plot(varx, vary, marker=row['marker'], color=row['color'])
#ax.text(varx+0.1, vary, row['dbName'], size=10)
ax.set_xlabel(x[1])
ax.set_ylabel(y[1])
ax.grid()
def plotBarh(resdf, varname,leg):
"""
Method to plot varname - barh
Parameters
---------------
resdf: pandas df
data to plot
varname: str
column to plot
"""
fig, ax = plt.subplots(figsize=(10,5))
fig.subplots_adjust(left=0.3)
resdf = resdf.sort_values(by=[varname])
resdf['dbName'] = resdf['dbName'].str.split('_10yrs', expand=True)[0]
ax.barh(resdf['dbName'], resdf[varname], color=resdf['color'])
ax.set_xlabel(r'{}'.format(leg))
ax.tick_params(axis='y', labelsize=15.)
plt.grid(axis='x')
#plt.tight_layout
plt.savefig('Plots_pixels/Summary_{}.png'.format(varname))
def filter(resdf, strfilt=['_noddf']):
"""
Function to remove OS according to their names
Parameters
---------------
resdf: pandas df
data to process
strfilt: list(str),opt
list of strings used to remove OS (default: ['_noddf']
"""
for vv in strfilt:
idx = resdf['dbName'].str.contains(vv)
resdf = pd.DataFrame(resdf[~idx])
return resdf
parser = OptionParser(
description='Display NSN metric results for WFD fields')
parser.add_option("--dirFile", type="str", default='/sps/lsst/users/gris/MetricOutput',
help="file directory [%default]")
parser.add_option("--nside", type="int", default=64,
help="nside for healpixels [%default]")
parser.add_option("--fieldType", type="str", default='WFD',
help="field type - DD, WFD, Fake [%default]")
parser.add_option("--nPixelsFile", type="str", default='ObsPixels_fbs14_nside_64.npy',
help="file with the total number of pixels per obs. strat.[%default]")
parser.add_option("--listdb", type="str", default='plot_scripts/input/WFD_test.csv',
help="list of dbnames to process [%default]")
parser.add_option("--tagbest", type="str", default='snpipe_a',
help="tag for the best OS [%default]")
opts, args = parser.parse_args()
# Load parameters
dirFile = opts.dirFile
nside = opts.nside
fieldType = opts.fieldType
metricName = 'NSN'
nPixelsFile = opts.nPixelsFile
listdb = opts.listdb
tagbest = opts.tagbest
metricTot = None
metricTot_med = None
toproc =
|
pd.read_csv(listdb,comment='#')
|
pandas.read_csv
|
#!/usr/bin/env python
# -*-coding:utf-8 -*-
'''
@File : Stress_detection_script.py
@Time : 2022/03/17 09:45:59
@Author : <NAME>
@Contact : <EMAIL>
'''
import os
import logging
import plotly.express as px
import numpy as np
import pandas as pd
import zipfile
import fnmatch
import flirt.reader.empatica
import matplotlib.pyplot as plt
from tqdm import tqdm
from datetime import datetime, timedelta
import cvxopt as cv
from neurokit2 import eda_phasic
from matplotlib.font_manager import FontProperties
import matplotlib.dates as mdates
# rootPath = r"./"
# pattern = '*.zip'
rootPath = input("Enter Folder Path : ")
pattern = input("Enter File Name : ")
for root, dirs, files in os.walk(rootPath):
for filename in fnmatch.filter(files, pattern):
print(os.path.join(root, filename))
zipfile.ZipFile(os.path.join(root, filename)).extractall(
os.path.join(root, os.path.splitext(filename)[0]))
dir = os.path.splitext(pattern)[0]
# os.listdir(dir)
class process:
def moving_avarage_smoothing(X, k, description_str):
S = np.zeros(X.shape[0])
for t in tqdm(range(X.shape[0]), desc=description_str):
if t < k:
S[t] = np.mean(X[:t+1])
else:
S[t] = np.sum(X[t-k:t])/k
return S
def deviation_above_mean(unit, mean_unit, std_unit):
'''
Function takes 3 arguments
unit : number of Standard deviations above the mean
mean_unit : mean value of each signal
std_unit : standard deviation of each signal
'''
if unit == 0:
return (mean_unit)
else:
return (mean_unit + (unit*std_unit))
def Starting_timeStamp(column, time_frames, deviation_metric):
'''
Function takes signal, its timestamps and threshold for calculating the starting time when the signal crosses the throshold value
'''
starting_time_index = []
for i in range(len(column)-1): #iterating till the end of the array
if column[i] < deviation_metric and column[i+1] > deviation_metric: # checking if the n+1 element is greater than nth element to conclude if the signal is increasing
starting_time_index.append(time_frames[i]) #appending the timestamp's index to the declared empty array
return starting_time_index
def Ending_timeStamp(column, time_frames, deviation_metric):
'''
Function takes signal, its timestamps and threshold for calculating the starting time when the signal crosses the throshold value
'''
time_index = []
for i in range(len(column)-1):
if column[i] > deviation_metric and column[i+1] < deviation_metric: # checking if the n+1 element is lesser than nth element to conclude if the signal is decreasing
time_index.append(time_frames[i])
if column[len(column) - 1] > deviation_metric: # checking for hanging ends, where the signal stops abruptly
time_index.insert(
len(time_index), time_frames[len(time_frames) - 1]) # inserting the timestamp's index to the last index of the array
else:
pass
return time_index
def Extract_HRV_Information():
global hrv_features # declaring global to get access them for combined plot function
global hrv_events_df # declaring global to get access them for combined plot function
ibi = pd.read_csv(rootPath+'/'+dir+'\IBI.csv')
mean_ibi = ibi[' IBI'].mean()
average_heart_rate = 60/mean_ibi
print('mean ibi is :', mean_ibi)
print('mean heart rate :', average_heart_rate.round())
ibis = flirt.reader.empatica.read_ibi_file_into_df(
rootPath+'/'+dir + '\IBI.csv')
hrv_features = flirt.get_hrv_features(
ibis['ibi'], 128, 1, ["td", "fd"], 0.2)
hrv_features = hrv_features.dropna(how='any', axis=0)
hrv_features.reset_index(inplace=True)
hrv_features['datetime'] = hrv_features['datetime'].dt.tz_convert('US/Eastern')
hrv_features['datetime'] = pd.to_datetime(hrv_features['datetime'])
hrv_features['datetime'] = hrv_features['datetime'].apply(lambda x: datetime.replace(x, tzinfo=None))
# smoothing the curve
print('\n', '******************** Smoothing The Curve ********************', '\n')
MAG_K500 = process.moving_avarage_smoothing(
hrv_features['hrv_rmssd'], 500, "Processing HRV Data")
hrv_features['MAG_K500'] = MAG_K500
# hrv_features.to_csv("./Metadata/"+ dir+"_HRV.csv")
# hrv_features.to_csv(os.path.join('./Metadata'+dir+'_HRV.csv'))
mean_rmssd = hrv_features['hrv_rmssd'].mean()
std_rmssd = hrv_features['hrv_rmssd'].std()
# getting the starting and ending time of of the signal
starting_timestamp = process.Starting_timeStamp(hrv_features['MAG_K500'], hrv_features['datetime'],
process.deviation_above_mean(1, mean_rmssd, std_rmssd))
ending_timestamp = process.Ending_timeStamp(hrv_features['MAG_K500'], hrv_features['datetime'],
process.deviation_above_mean(1, mean_rmssd, std_rmssd))
# in the below if case i am assuming that there was no events that crossed the threshold
if len(starting_timestamp) < 1:
fig, ax1 = plt.subplots(figsize=(30, 10))
ax1.plot(hrv_features['datetime'],
hrv_features['MAG_K500'], color='red')
# fig.savefig('./Plots/HRV_figure.png')
else:
#check if the len of starting timestamps and ending timestamps are equal if not popping the last element of the ending timestamp
if starting_timestamp > ending_timestamp:
ending_timestamp.pop(0)
else:
pass
difference = [] # empty array to see how long the event lasts in seconds
time_delta_minutes = []
desired_time_index = []
zip_object = zip(ending_timestamp, starting_timestamp)
for list1_i, list2_i in zip_object:
# append each difference to list
difference.append(list1_i-list2_i) #subtracting ending timestamp - starting timestamp to get difference in seconds
for i in difference:
time_delta_minutes.append(i.total_seconds()/60) # converting the second's difference to minuted
time_delta_minutes
for i in range(len(time_delta_minutes)):
if time_delta_minutes[i] > 5.00: #checking if the each episode is more then 5 minutes
desired_time_index.append(i)
starting_timestamp_df = pd.DataFrame(starting_timestamp)
ending_timestamp_df =
|
pd.DataFrame(ending_timestamp)
|
pandas.DataFrame
|
from nose_parameterized import parameterized
from unittest import TestCase
from pandas import (
Series,
DataFrame,
DatetimeIndex,
date_range,
Timedelta,
read_csv
)
from pandas.util.testing import (assert_frame_equal)
import os
import gzip
from pyfolio.round_trips import (extract_round_trips,
add_closing_transactions,
_groupby_consecutive,
)
class RoundTripTestCase(TestCase):
dates = date_range(start='2015-01-01', freq='D', periods=20)
dates_intraday = date_range(start='2015-01-01',
freq='2BH', periods=8)
@parameterized.expand([
(DataFrame(data=[[2, 10., 'A'],
[2, 20., 'A'],
[-2, 20., 'A'],
[-2, 10., 'A'],
],
columns=['amount', 'price', 'symbol'],
index=dates_intraday[:4]),
DataFrame(data=[[4, 15., 'A'],
[-4, 15., 'A'],
],
columns=['amount', 'price', 'symbol'],
index=dates_intraday[[0, 2]])
.rename_axis('dt', axis='index')
),
(DataFrame(data=[[2, 10., 'A'],
[2, 20., 'A'],
[2, 20., 'A'],
[2, 10., 'A'],
],
columns=['amount', 'price', 'symbol'],
index=dates_intraday[[0, 1, 4, 5]]),
DataFrame(data=[[4, 15., 'A'],
[4, 15., 'A'],
],
columns=['amount', 'price', 'symbol'],
index=dates_intraday[[0, 4]])
.rename_axis('dt', axis='index')
),
])
def test_groupby_consecutive(self, transactions, expected):
grouped_txn = _groupby_consecutive(transactions)
assert_frame_equal(grouped_txn.sort_index(axis='columns'),
expected.sort_index(axis='columns'))
@parameterized.expand([
# Simple round-trip
(DataFrame(data=[[2, 10., 'A'],
[-2, 15., 'A']],
columns=['amount', 'price', 'symbol'],
index=dates[:2]),
DataFrame(data=[[dates[0], dates[1],
|
Timedelta(days=1)
|
pandas.Timedelta
|
# -*- coding: utf-8 -*-
# pylint: disable=E1101
# flake8: noqa
from datetime import datetime
import csv
import os
import sys
import re
import nose
import platform
from multiprocessing.pool import ThreadPool
from numpy import nan
import numpy as np
from pandas.io.common import DtypeWarning
from pandas import DataFrame, Series, Index, MultiIndex, DatetimeIndex
from pandas.compat import(
StringIO, BytesIO, PY3, range, long, lrange, lmap, u
)
from pandas.io.common import URLError
import pandas.io.parsers as parsers
from pandas.io.parsers import (read_csv, read_table, read_fwf,
TextFileReader, TextParser)
import pandas.util.testing as tm
import pandas as pd
from pandas.compat import parse_date
import pandas.lib as lib
from pandas import compat
from pandas.lib import Timestamp
from pandas.tseries.index import date_range
import pandas.tseries.tools as tools
from numpy.testing.decorators import slow
import pandas.parser
class ParserTests(object):
"""
Want to be able to test either C+Cython or Python+Cython parsers
"""
data1 = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
def read_csv(self, *args, **kwargs):
raise NotImplementedError
def read_table(self, *args, **kwargs):
raise NotImplementedError
def setUp(self):
import warnings
warnings.filterwarnings(action='ignore', category=FutureWarning)
self.dirpath = tm.get_data_path()
self.csv1 = os.path.join(self.dirpath, 'test1.csv')
self.csv2 = os.path.join(self.dirpath, 'test2.csv')
self.xls1 = os.path.join(self.dirpath, 'test.xls')
def construct_dataframe(self, num_rows):
df = DataFrame(np.random.rand(num_rows, 5), columns=list('abcde'))
df['foo'] = 'foo'
df['bar'] = 'bar'
df['baz'] = 'baz'
df['date'] = pd.date_range('20000101 09:00:00',
periods=num_rows,
freq='s')
df['int'] = np.arange(num_rows, dtype='int64')
return df
def generate_multithread_dataframe(self, path, num_rows, num_tasks):
def reader(arg):
start, nrows = arg
if not start:
return pd.read_csv(path, index_col=0, header=0, nrows=nrows,
parse_dates=['date'])
return pd.read_csv(path,
index_col=0,
header=None,
skiprows=int(start) + 1,
nrows=nrows,
parse_dates=[9])
tasks = [
(num_rows * i / num_tasks,
num_rows / num_tasks) for i in range(num_tasks)
]
pool = ThreadPool(processes=num_tasks)
results = pool.map(reader, tasks)
header = results[0].columns
for r in results[1:]:
r.columns = header
final_dataframe = pd.concat(results)
return final_dataframe
def test_converters_type_must_be_dict(self):
with tm.assertRaisesRegexp(TypeError, 'Type converters.+'):
self.read_csv(StringIO(self.data1), converters=0)
def test_empty_decimal_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
self.assertRaises(ValueError, read_csv, StringIO(data), decimal='')
def test_empty_thousands_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
self.assertRaises(ValueError, read_csv, StringIO(data), thousands='')
def test_multi_character_decimal_marker(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
self.assertRaises(ValueError, read_csv, StringIO(data), thousands=',,')
def test_empty_string(self):
data = """\
One,Two,Three
a,1,one
b,2,two
,3,three
d,4,nan
e,5,five
nan,6,
g,7,seven
"""
df = self.read_csv(StringIO(data))
xp = DataFrame({'One': ['a', 'b', np.nan, 'd', 'e', np.nan, 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', np.nan, 'five',
np.nan, 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(StringIO(data), na_values={'One': [], 'Three': []},
keep_default_na=False)
xp = DataFrame({'One': ['a', 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', 'nan', 'five',
'', 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(
StringIO(data), na_values=['a'], keep_default_na=False)
xp = DataFrame({'One': [np.nan, 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', 'nan', 'five', '',
'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
df = self.read_csv(StringIO(data), na_values={'One': [], 'Three': []})
xp = DataFrame({'One': ['a', 'b', np.nan, 'd', 'e', np.nan, 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['one', 'two', 'three', np.nan, 'five',
np.nan, 'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
# GH4318, passing na_values=None and keep_default_na=False yields
# 'None' as a na_value
data = """\
One,Two,Three
a,1,None
b,2,two
,3,None
d,4,nan
e,5,five
nan,6,
g,7,seven
"""
df = self.read_csv(
StringIO(data), keep_default_na=False)
xp = DataFrame({'One': ['a', 'b', '', 'd', 'e', 'nan', 'g'],
'Two': [1, 2, 3, 4, 5, 6, 7],
'Three': ['None', 'two', 'None', 'nan', 'five', '',
'seven']})
tm.assert_frame_equal(xp.reindex(columns=df.columns), df)
def test_read_csv(self):
if not compat.PY3:
if compat.is_platform_windows():
prefix = u("file:///")
else:
prefix = u("file://")
fname = prefix + compat.text_type(self.csv1)
# it works!
read_csv(fname, index_col=0, parse_dates=True)
def test_dialect(self):
data = """\
label1,label2,label3
index1,"a,c,e
index2,b,d,f
"""
dia = csv.excel()
dia.quoting = csv.QUOTE_NONE
df = self.read_csv(StringIO(data), dialect=dia)
data = '''\
label1,label2,label3
index1,a,c,e
index2,b,d,f
'''
exp = self.read_csv(StringIO(data))
exp.replace('a', '"a', inplace=True)
tm.assert_frame_equal(df, exp)
def test_dialect_str(self):
data = """\
fruit:vegetable
apple:brocolli
pear:tomato
"""
exp = DataFrame({
'fruit': ['apple', 'pear'],
'vegetable': ['brocolli', 'tomato']
})
dia = csv.register_dialect('mydialect', delimiter=':') # noqa
df = self.read_csv(StringIO(data), dialect='mydialect')
tm.assert_frame_equal(df, exp)
csv.unregister_dialect('mydialect')
def test_1000_sep(self):
data = """A|B|C
1|2,334|5
10|13|10.
"""
expected = DataFrame({
'A': [1, 10],
'B': [2334, 13],
'C': [5, 10.]
})
df = self.read_csv(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data), sep='|', thousands=',')
tm.assert_frame_equal(df, expected)
def test_1000_sep_with_decimal(self):
data = """A|B|C
1|2,334.01|5
10|13|10.
"""
expected = DataFrame({
'A': [1, 10],
'B': [2334.01, 13],
'C': [5, 10.]
})
tm.assert_equal(expected.A.dtype, 'int64')
tm.assert_equal(expected.B.dtype, 'float')
tm.assert_equal(expected.C.dtype, 'float')
df = self.read_csv(StringIO(data), sep='|', thousands=',', decimal='.')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data), sep='|',
thousands=',', decimal='.')
tm.assert_frame_equal(df, expected)
data_with_odd_sep = """A|B|C
1|2.334,01|5
10|13|10,
"""
df = self.read_csv(StringIO(data_with_odd_sep),
sep='|', thousands='.', decimal=',')
tm.assert_frame_equal(df, expected)
df = self.read_table(StringIO(data_with_odd_sep),
sep='|', thousands='.', decimal=',')
tm.assert_frame_equal(df, expected)
def test_separator_date_conflict(self):
# Regression test for issue #4678: make sure thousands separator and
# date parsing do not conflict.
data = '06-02-2013;13:00;1-000.215'
expected = DataFrame(
[[datetime(2013, 6, 2, 13, 0, 0), 1000.215]],
columns=['Date', 2]
)
df = self.read_csv(StringIO(data), sep=';', thousands='-',
parse_dates={'Date': [0, 1]}, header=None)
tm.assert_frame_equal(df, expected)
def test_squeeze(self):
data = """\
a,1
b,2
c,3
"""
idx = Index(['a', 'b', 'c'], name=0)
expected = Series([1, 2, 3], name=1, index=idx)
result = self.read_table(StringIO(data), sep=',', index_col=0,
header=None, squeeze=True)
tm.assertIsInstance(result, Series)
tm.assert_series_equal(result, expected)
def test_squeeze_no_view(self):
# GH 8217
# series should not be a view
data = """time,data\n0,10\n1,11\n2,12\n4,14\n5,15\n3,13"""
result = self.read_csv(StringIO(data), index_col='time', squeeze=True)
self.assertFalse(result._is_view)
def test_inf_parsing(self):
data = """\
,A
a,inf
b,-inf
c,Inf
d,-Inf
e,INF
f,-INF
g,INf
h,-INf
i,inF
j,-inF"""
inf = float('inf')
expected = Series([inf, -inf] * 5)
df = read_csv(StringIO(data), index_col=0)
tm.assert_almost_equal(df['A'].values, expected.values)
df = read_csv(StringIO(data), index_col=0, na_filter=False)
tm.assert_almost_equal(df['A'].values, expected.values)
def test_multiple_date_col(self):
# Can use multiple date parsers
data = """\
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
def func(*date_cols):
return lib.try_parse_dates(parsers._concat_date_cols(date_cols))
df = self.read_csv(StringIO(data), header=None,
date_parser=func,
prefix='X',
parse_dates={'nominal': [1, 2],
'actual': [1, 3]})
self.assertIn('nominal', df)
self.assertIn('actual', df)
self.assertNotIn('X1', df)
self.assertNotIn('X2', df)
self.assertNotIn('X3', df)
d = datetime(1999, 1, 27, 19, 0)
self.assertEqual(df.ix[0, 'nominal'], d)
df = self.read_csv(StringIO(data), header=None,
date_parser=func,
parse_dates={'nominal': [1, 2],
'actual': [1, 3]},
keep_date_col=True)
self.assertIn('nominal', df)
self.assertIn('actual', df)
self.assertIn(1, df)
self.assertIn(2, df)
self.assertIn(3, df)
data = """\
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
df = read_csv(StringIO(data), header=None,
prefix='X',
parse_dates=[[1, 2], [1, 3]])
self.assertIn('X1_X2', df)
self.assertIn('X1_X3', df)
self.assertNotIn('X1', df)
self.assertNotIn('X2', df)
self.assertNotIn('X3', df)
d = datetime(1999, 1, 27, 19, 0)
self.assertEqual(df.ix[0, 'X1_X2'], d)
df = read_csv(StringIO(data), header=None,
parse_dates=[[1, 2], [1, 3]], keep_date_col=True)
self.assertIn('1_2', df)
self.assertIn('1_3', df)
self.assertIn(1, df)
self.assertIn(2, df)
self.assertIn(3, df)
data = '''\
KORD,19990127 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
'''
df = self.read_csv(StringIO(data), sep=',', header=None,
parse_dates=[1], index_col=1)
d = datetime(1999, 1, 27, 19, 0)
self.assertEqual(df.index[0], d)
def test_multiple_date_cols_int_cast(self):
data = ("KORD,19990127, 19:00:00, 18:56:00, 0.8100\n"
"KORD,19990127, 20:00:00, 19:56:00, 0.0100\n"
"KORD,19990127, 21:00:00, 20:56:00, -0.5900\n"
"KORD,19990127, 21:00:00, 21:18:00, -0.9900\n"
"KORD,19990127, 22:00:00, 21:56:00, -0.5900\n"
"KORD,19990127, 23:00:00, 22:56:00, -0.5900")
date_spec = {'nominal': [1, 2], 'actual': [1, 3]}
import pandas.io.date_converters as conv
# it works!
df = self.read_csv(StringIO(data), header=None, parse_dates=date_spec,
date_parser=conv.parse_date_time)
self.assertIn('nominal', df)
def test_multiple_date_col_timestamp_parse(self):
data = """05/31/2012,15:30:00.029,1306.25,1,E,0,,1306.25
05/31/2012,15:30:00.029,1306.25,8,E,0,,1306.25"""
result = self.read_csv(StringIO(data), sep=',', header=None,
parse_dates=[[0, 1]], date_parser=Timestamp)
ex_val = Timestamp('05/31/2012 15:30:00.029')
self.assertEqual(result['0_1'][0], ex_val)
def test_single_line(self):
# GH 6607
# Test currently only valid with python engine because sep=None and
# delim_whitespace=False. Temporarily copied to TestPythonParser.
# Test for ValueError with other engines:
with tm.assertRaisesRegexp(ValueError,
'sep=None with delim_whitespace=False'):
# sniff separator
buf = StringIO()
sys.stdout = buf
# printing warning message when engine == 'c' for now
try:
# it works!
df = self.read_csv(StringIO('1,2'), names=['a', 'b'],
header=None, sep=None)
tm.assert_frame_equal(DataFrame({'a': [1], 'b': [2]}), df)
finally:
sys.stdout = sys.__stdout__
def test_multiple_date_cols_with_header(self):
data = """\
ID,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000"""
df = self.read_csv(StringIO(data), parse_dates={'nominal': [1, 2]})
self.assertNotIsInstance(df.nominal[0], compat.string_types)
ts_data = """\
ID,date,nominalTime,actualTime,A,B,C,D,E
KORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000
"""
def test_multiple_date_col_name_collision(self):
self.assertRaises(ValueError, self.read_csv, StringIO(self.ts_data),
parse_dates={'ID': [1, 2]})
data = """\
date_NominalTime,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir
KORD1,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD2,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD3,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD4,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD5,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD6,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000""" # noqa
self.assertRaises(ValueError, self.read_csv, StringIO(data),
parse_dates=[[1, 2]])
def test_index_col_named(self):
no_header = """\
KORD1,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000
KORD2,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000
KORD3,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000
KORD4,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000
KORD5,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000
KORD6,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000"""
h = "ID,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir\n"
data = h + no_header
rs = self.read_csv(StringIO(data), index_col='ID')
xp = self.read_csv(StringIO(data), header=0).set_index('ID')
tm.assert_frame_equal(rs, xp)
self.assertRaises(ValueError, self.read_csv, StringIO(no_header),
index_col='ID')
data = """\
1,2,3,4,hello
5,6,7,8,world
9,10,11,12,foo
"""
names = ['a', 'b', 'c', 'd', 'message']
xp = DataFrame({'a': [1, 5, 9], 'b': [2, 6, 10], 'c': [3, 7, 11],
'd': [4, 8, 12]},
index=Index(['hello', 'world', 'foo'], name='message'))
rs = self.read_csv(StringIO(data), names=names, index_col=['message'])
tm.assert_frame_equal(xp, rs)
self.assertEqual(xp.index.name, rs.index.name)
rs = self.read_csv(StringIO(data), names=names, index_col='message')
tm.assert_frame_equal(xp, rs)
self.assertEqual(xp.index.name, rs.index.name)
def test_usecols_index_col_False(self):
# Issue 9082
s = "a,b,c,d\n1,2,3,4\n5,6,7,8"
s_malformed = "a,b,c,d\n1,2,3,4,\n5,6,7,8,"
cols = ['a', 'c', 'd']
expected = DataFrame({'a': [1, 5], 'c': [3, 7], 'd': [4, 8]})
df = self.read_csv(StringIO(s), usecols=cols, index_col=False)
tm.assert_frame_equal(expected, df)
df = self.read_csv(StringIO(s_malformed),
usecols=cols, index_col=False)
tm.assert_frame_equal(expected, df)
def test_index_col_is_True(self):
# Issue 9798
self.assertRaises(ValueError, self.read_csv, StringIO(self.ts_data),
index_col=True)
def test_converter_index_col_bug(self):
# 1835
data = "A;B\n1;2\n3;4"
rs = self.read_csv(StringIO(data), sep=';', index_col='A',
converters={'A': lambda x: x})
xp = DataFrame({'B': [2, 4]}, index=Index([1, 3], name='A'))
tm.assert_frame_equal(rs, xp)
self.assertEqual(rs.index.name, xp.index.name)
def test_date_parser_int_bug(self):
# #3071
log_file = StringIO(
'posix_timestamp,elapsed,sys,user,queries,query_time,rows,'
'accountid,userid,contactid,level,silo,method\n'
'1343103150,0.062353,0,4,6,0.01690,3,'
'12345,1,-1,3,invoice_InvoiceResource,search\n'
)
def f(posix_string):
return datetime.utcfromtimestamp(int(posix_string))
# it works!
read_csv(log_file, index_col=0, parse_dates=0, date_parser=f)
def test_multiple_skts_example(self):
data = "year, month, a, b\n 2001, 01, 0.0, 10.\n 2001, 02, 1.1, 11."
pass
def test_malformed(self):
# all
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
"""
try:
df = self.read_table(
StringIO(data), sep=',', header=1, comment='#')
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 4, saw 5', str(inst))
# skip_footer
data = """ignore
A,B,C
1,2,3 # comment
1,2,3,4,5
2,3,4
footer
"""
# GH 6607
# Test currently only valid with python engine because
# skip_footer != 0. Temporarily copied to TestPythonParser.
# Test for ValueError with other engines:
try:
with tm.assertRaisesRegexp(ValueError, 'skip_footer'): # XXX
df = self.read_table(
StringIO(data), sep=',', header=1, comment='#',
skip_footer=1)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 4, saw 5', str(inst))
# first chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',',
header=1, comment='#',
iterator=True, chunksize=1,
skiprows=[2])
df = it.read(5)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
# middle chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',', header=1,
comment='#', iterator=True, chunksize=1,
skiprows=[2])
df = it.read(1)
it.read(2)
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
# last chunk
data = """ignore
A,B,C
skip
1,2,3
3,5,10 # comment
1,2,3,4,5
2,3,4
"""
try:
it = self.read_table(StringIO(data), sep=',',
header=1, comment='#',
iterator=True, chunksize=1, skiprows=[2])
df = it.read(1)
it.read()
self.assertTrue(False)
except Exception as inst:
self.assertIn('Expected 3 fields in line 6, saw 5', str(inst))
def test_passing_dtype(self):
# GH 6607
# Passing dtype is currently only supported by the C engine.
# Temporarily copied to TestCParser*.
# Test for ValueError with other engines:
with tm.assertRaisesRegexp(ValueError,
"The 'dtype' option is not supported"):
df = DataFrame(np.random.rand(5, 2), columns=list(
'AB'), index=['1A', '1B', '1C', '1D', '1E'])
with tm.ensure_clean('__passing_str_as_dtype__.csv') as path:
df.to_csv(path)
# GH 3795
# passing 'str' as the dtype
result = self.read_csv(path, dtype=str, index_col=0)
tm.assert_series_equal(result.dtypes, Series(
{'A': 'object', 'B': 'object'}))
# we expect all object columns, so need to convert to test for
# equivalence
result = result.astype(float)
tm.assert_frame_equal(result, df)
# invalid dtype
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'foo', 'B': 'float64'},
index_col=0)
# valid but we don't support it (date)
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'datetime64', 'B': 'float64'},
index_col=0)
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'datetime64', 'B': 'float64'},
index_col=0, parse_dates=['B'])
# valid but we don't support it
self.assertRaises(TypeError, self.read_csv, path,
dtype={'A': 'timedelta64', 'B': 'float64'},
index_col=0)
with tm.assertRaisesRegexp(ValueError,
"The 'dtype' option is not supported"):
# empty frame
# GH12048
self.read_csv(StringIO('A,B'), dtype=str)
def test_quoting(self):
bad_line_small = """printer\tresult\tvariant_name
Klosterdruckerei\tKlosterdruckerei <Salem> (1611-1804)\tMuller, Jacob
Klosterdruckerei\tKlosterdruckerei <Salem> (1611-1804)\tMuller, Jakob
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\t"Furststiftische Hofdruckerei, <Kempten""
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\tGaller, Alois
Klosterdruckerei\tKlosterdruckerei <Kempten> (1609-1805)\tHochfurstliche Buchhandlung <Kempten>"""
self.assertRaises(Exception, self.read_table, StringIO(bad_line_small),
sep='\t')
good_line_small = bad_line_small + '"'
df = self.read_table(StringIO(good_line_small), sep='\t')
self.assertEqual(len(df), 3)
def test_non_string_na_values(self):
# GH3611, na_values that are not a string are an issue
with tm.ensure_clean('__non_string_na_values__.csv') as path:
df = DataFrame({'A': [-999, 2, 3], 'B': [1.2, -999, 4.5]})
df.to_csv(path, sep=' ', index=False)
result1 = read_csv(path, sep=' ', header=0,
na_values=['-999.0', '-999'])
result2 = read_csv(path, sep=' ', header=0,
na_values=[-999, -999.0])
result3 = read_csv(path, sep=' ', header=0,
na_values=[-999.0, -999])
tm.assert_frame_equal(result1, result2)
tm.assert_frame_equal(result2, result3)
result4 = read_csv(path, sep=' ', header=0, na_values=['-999.0'])
result5 = read_csv(path, sep=' ', header=0, na_values=['-999'])
result6 = read_csv(path, sep=' ', header=0, na_values=[-999.0])
result7 = read_csv(path, sep=' ', header=0, na_values=[-999])
tm.assert_frame_equal(result4, result3)
tm.assert_frame_equal(result5, result3)
tm.assert_frame_equal(result6, result3)
tm.assert_frame_equal(result7, result3)
good_compare = result3
# with an odd float format, so we can't match the string 999.0
# exactly, but need float matching
df.to_csv(path, sep=' ', index=False, float_format='%.3f')
result1 = read_csv(path, sep=' ', header=0,
na_values=['-999.0', '-999'])
result2 = read_csv(path, sep=' ', header=0,
na_values=[-999, -999.0])
result3 = read_csv(path, sep=' ', header=0,
na_values=[-999.0, -999])
tm.assert_frame_equal(result1, good_compare)
tm.assert_frame_equal(result2, good_compare)
tm.assert_frame_equal(result3, good_compare)
result4 = read_csv(path, sep=' ', header=0, na_values=['-999.0'])
result5 = read_csv(path, sep=' ', header=0, na_values=['-999'])
result6 = read_csv(path, sep=' ', header=0, na_values=[-999.0])
result7 = read_csv(path, sep=' ', header=0, na_values=[-999])
tm.assert_frame_equal(result4, good_compare)
tm.assert_frame_equal(result5, good_compare)
tm.assert_frame_equal(result6, good_compare)
tm.assert_frame_equal(result7, good_compare)
def test_default_na_values(self):
_NA_VALUES = set(['-1.#IND', '1.#QNAN', '1.#IND', '-1.#QNAN',
'#N/A', 'N/A', 'NA', '#NA', 'NULL', 'NaN',
'nan', '-NaN', '-nan', '#N/A N/A', ''])
self.assertEqual(_NA_VALUES, parsers._NA_VALUES)
nv = len(_NA_VALUES)
def f(i, v):
if i == 0:
buf = ''
elif i > 0:
buf = ''.join([','] * i)
buf = "{0}{1}".format(buf, v)
if i < nv - 1:
buf = "{0}{1}".format(buf, ''.join([','] * (nv - i - 1)))
return buf
data = StringIO('\n'.join([f(i, v) for i, v in enumerate(_NA_VALUES)]))
expected = DataFrame(np.nan, columns=range(nv), index=range(nv))
df = self.read_csv(data, header=None)
tm.assert_frame_equal(df, expected)
def test_custom_na_values(self):
data = """A,B,C
ignore,this,row
1,NA,3
-1.#IND,5,baz
7,8,NaN
"""
expected = [[1., nan, 3],
[nan, 5, nan],
[7, 8, nan]]
df = self.read_csv(StringIO(data), na_values=['baz'], skiprows=[1])
tm.assert_almost_equal(df.values, expected)
df2 = self.read_table(StringIO(data), sep=',', na_values=['baz'],
skiprows=[1])
tm.assert_almost_equal(df2.values, expected)
df3 = self.read_table(StringIO(data), sep=',', na_values='baz',
skiprows=[1])
tm.assert_almost_equal(df3.values, expected)
def test_nat_parse(self):
# GH 3062
df = DataFrame(dict({
'A': np.asarray(lrange(10), dtype='float64'),
'B': pd.Timestamp('20010101')}))
df.iloc[3:6, :] = np.nan
with tm.ensure_clean('__nat_parse_.csv') as path:
df.to_csv(path)
result = read_csv(path, index_col=0, parse_dates=['B'])
tm.assert_frame_equal(result, df)
expected = Series(dict(A='float64', B='datetime64[ns]'))
tm.assert_series_equal(expected, result.dtypes)
# test with NaT for the nan_rep
# we don't have a method to specif the Datetime na_rep (it defaults
# to '')
df.to_csv(path)
result = read_csv(path, index_col=0, parse_dates=['B'])
tm.assert_frame_equal(result, df)
def test_skiprows_bug(self):
# GH #505
text = """#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
1/1/2000,1.,2.,3.
1/2/2000,4,5,6
1/3/2000,7,8,9
"""
data = self.read_csv(StringIO(text), skiprows=lrange(6), header=None,
index_col=0, parse_dates=True)
data2 = self.read_csv(StringIO(text), skiprows=6, header=None,
index_col=0, parse_dates=True)
expected = DataFrame(np.arange(1., 10.).reshape((3, 3)),
columns=[1, 2, 3],
index=[datetime(2000, 1, 1), datetime(2000, 1, 2),
datetime(2000, 1, 3)])
expected.index.name = 0
tm.assert_frame_equal(data, expected)
tm.assert_frame_equal(data, data2)
def test_deep_skiprows(self):
# GH #4382
text = "a,b,c\n" + \
"\n".join([",".join([str(i), str(i + 1), str(i + 2)])
for i in range(10)])
condensed_text = "a,b,c\n" + \
"\n".join([",".join([str(i), str(i + 1), str(i + 2)])
for i in [0, 1, 2, 3, 4, 6, 8, 9]])
data = self.read_csv(StringIO(text), skiprows=[6, 8])
condensed_data = self.read_csv(StringIO(condensed_text))
tm.assert_frame_equal(data, condensed_data)
def test_skiprows_blank(self):
# GH 9832
text = """#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
#foo,a,b,c
1/1/2000,1.,2.,3.
1/2/2000,4,5,6
1/3/2000,7,8,9
"""
data = self.read_csv(StringIO(text), skiprows=6, header=None,
index_col=0, parse_dates=True)
expected = DataFrame(np.arange(1., 10.).reshape((3, 3)),
columns=[1, 2, 3],
index=[datetime(2000, 1, 1), datetime(2000, 1, 2),
datetime(2000, 1, 3)])
expected.index.name = 0
tm.assert_frame_equal(data, expected)
def test_detect_string_na(self):
data = """A,B
foo,bar
NA,baz
NaN,nan
"""
expected = [['foo', 'bar'],
[nan, 'baz'],
[nan, nan]]
df = self.read_csv(StringIO(data))
tm.assert_almost_equal(df.values, expected)
def test_unnamed_columns(self):
data = """A,B,C,,
1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
expected = [[1, 2, 3, 4, 5.],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15]]
df = self.read_table(StringIO(data), sep=',')
tm.assert_almost_equal(df.values, expected)
self.assert_numpy_array_equal(df.columns,
['A', 'B', 'C', 'Unnamed: 3',
'Unnamed: 4'])
def test_string_nas(self):
data = """A,B,C
a,b,c
d,,f
,g,h
"""
result = self.read_csv(StringIO(data))
expected = DataFrame([['a', 'b', 'c'],
['d', np.nan, 'f'],
[np.nan, 'g', 'h']],
columns=['A', 'B', 'C'])
tm.assert_frame_equal(result, expected)
def test_duplicate_columns(self):
for engine in ['python', 'c']:
data = """A,A,B,B,B
1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
# check default beahviour
df = self.read_table(StringIO(data), sep=',', engine=engine)
self.assertEqual(list(df.columns), ['A', 'A.1', 'B', 'B.1', 'B.2'])
df = self.read_table(StringIO(data), sep=',',
engine=engine, mangle_dupe_cols=False)
self.assertEqual(list(df.columns), ['A', 'A', 'B', 'B', 'B'])
df = self.read_table(StringIO(data), sep=',',
engine=engine, mangle_dupe_cols=True)
self.assertEqual(list(df.columns), ['A', 'A.1', 'B', 'B.1', 'B.2'])
def test_csv_mixed_type(self):
data = """A,B,C
a,1,2
b,3,4
c,4,5
"""
df = self.read_csv(StringIO(data))
# TODO
def test_csv_custom_parser(self):
data = """A,B,C
20090101,a,1,2
20090102,b,3,4
20090103,c,4,5
"""
f = lambda x: datetime.strptime(x, '%Y%m%d')
df = self.read_csv(StringIO(data), date_parser=f)
expected = self.read_csv(StringIO(data), parse_dates=True)
tm.assert_frame_equal(df, expected)
def test_parse_dates_implicit_first_col(self):
data = """A,B,C
20090101,a,1,2
20090102,b,3,4
20090103,c,4,5
"""
df = self.read_csv(StringIO(data), parse_dates=True)
expected = self.read_csv(StringIO(data), index_col=0, parse_dates=True)
self.assertIsInstance(
df.index[0], (datetime, np.datetime64, Timestamp))
tm.assert_frame_equal(df, expected)
def test_parse_dates_string(self):
data = """date,A,B,C
20090101,a,1,2
20090102,b,3,4
20090103,c,4,5
"""
rs = self.read_csv(
StringIO(data), index_col='date', parse_dates='date')
idx = date_range('1/1/2009', periods=3)
idx.name = 'date'
xp = DataFrame({'A': ['a', 'b', 'c'],
'B': [1, 3, 4],
'C': [2, 4, 5]}, idx)
tm.assert_frame_equal(rs, xp)
def test_yy_format(self):
data = """date,time,B,C
090131,0010,1,2
090228,1020,3,4
090331,0830,5,6
"""
rs = self.read_csv(StringIO(data), index_col=0,
parse_dates=[['date', 'time']])
idx = DatetimeIndex([datetime(2009, 1, 31, 0, 10, 0),
datetime(2009, 2, 28, 10, 20, 0),
datetime(2009, 3, 31, 8, 30, 0)],
dtype=object, name='date_time')
xp = DataFrame({'B': [1, 3, 5], 'C': [2, 4, 6]}, idx)
tm.assert_frame_equal(rs, xp)
rs = self.read_csv(StringIO(data), index_col=0,
parse_dates=[[0, 1]])
idx = DatetimeIndex([datetime(2009, 1, 31, 0, 10, 0),
datetime(2009, 2, 28, 10, 20, 0),
datetime(2009, 3, 31, 8, 30, 0)],
dtype=object, name='date_time')
xp = DataFrame({'B': [1, 3, 5], 'C': [2, 4, 6]}, idx)
tm.assert_frame_equal(rs, xp)
def test_parse_dates_column_list(self):
from pandas.core.datetools import to_datetime
data = '''date;destination;ventilationcode;unitcode;units;aux_date
01/01/2010;P;P;50;1;12/1/2011
01/01/2010;P;R;50;1;13/1/2011
15/01/2010;P;P;50;1;14/1/2011
01/05/2010;P;P;50;1;15/1/2011'''
expected = self.read_csv(StringIO(data), sep=";", index_col=lrange(4))
lev = expected.index.levels[0]
levels = list(expected.index.levels)
levels[0] = lev.to_datetime(dayfirst=True)
# hack to get this to work - remove for final test
levels[0].name = lev.name
expected.index.set_levels(levels, inplace=True)
expected['aux_date'] = to_datetime(expected['aux_date'],
dayfirst=True)
expected['aux_date'] = lmap(Timestamp, expected['aux_date'])
tm.assertIsInstance(expected['aux_date'][0], datetime)
df = self.read_csv(StringIO(data), sep=";", index_col=lrange(4),
parse_dates=[0, 5], dayfirst=True)
tm.assert_frame_equal(df, expected)
df = self.read_csv(StringIO(data), sep=";", index_col=lrange(4),
parse_dates=['date', 'aux_date'], dayfirst=True)
tm.assert_frame_equal(df, expected)
def test_no_header(self):
data = """1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
df = self.read_table(StringIO(data), sep=',', header=None)
df_pref = self.read_table(StringIO(data), sep=',', prefix='X',
header=None)
names = ['foo', 'bar', 'baz', 'quux', 'panda']
df2 = self.read_table(StringIO(data), sep=',', names=names)
expected = [[1, 2, 3, 4, 5.],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15]]
tm.assert_almost_equal(df.values, expected)
tm.assert_almost_equal(df.values, df2.values)
self.assert_numpy_array_equal(df_pref.columns,
['X0', 'X1', 'X2', 'X3', 'X4'])
self.assert_numpy_array_equal(df.columns, lrange(5))
self.assert_numpy_array_equal(df2.columns, names)
def test_no_header_prefix(self):
data = """1,2,3,4,5
6,7,8,9,10
11,12,13,14,15
"""
df_pref = self.read_table(StringIO(data), sep=',', prefix='Field',
header=None)
expected = [[1, 2, 3, 4, 5.],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15]]
tm.assert_almost_equal(df_pref.values, expected)
self.assert_numpy_array_equal(df_pref.columns,
['Field0', 'Field1', 'Field2', 'Field3', 'Field4'])
def test_header_with_index_col(self):
data = """foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
names = ['A', 'B', 'C']
df = self.read_csv(StringIO(data), names=names)
self.assertEqual(names, ['A', 'B', 'C'])
values = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
expected = DataFrame(values, index=['foo', 'bar', 'baz'],
columns=['A', 'B', 'C'])
tm.assert_frame_equal(df, expected)
def test_read_csv_dataframe(self):
df = self.read_csv(self.csv1, index_col=0, parse_dates=True)
df2 = self.read_table(self.csv1, sep=',', index_col=0,
parse_dates=True)
self.assert_numpy_array_equal(df.columns, ['A', 'B', 'C', 'D'])
self.assertEqual(df.index.name, 'index')
self.assertIsInstance(
df.index[0], (datetime, np.datetime64, Timestamp))
self.assertEqual(df.values.dtype, np.float64)
tm.assert_frame_equal(df, df2)
def test_read_csv_no_index_name(self):
df = self.read_csv(self.csv2, index_col=0, parse_dates=True)
df2 = self.read_table(self.csv2, sep=',', index_col=0,
parse_dates=True)
self.assert_numpy_array_equal(df.columns, ['A', 'B', 'C', 'D', 'E'])
self.assertIsInstance(
df.index[0], (datetime, np.datetime64, Timestamp))
self.assertEqual(df.ix[:, ['A', 'B', 'C', 'D']
].values.dtype, np.float64)
tm.assert_frame_equal(df, df2)
def test_read_csv_infer_compression(self):
# GH 9770
expected = self.read_csv(self.csv1, index_col=0, parse_dates=True)
inputs = [self.csv1, self.csv1 + '.gz',
self.csv1 + '.bz2', open(self.csv1)]
for f in inputs:
df = self.read_csv(f, index_col=0, parse_dates=True,
compression='infer')
tm.assert_frame_equal(expected, df)
inputs[3].close()
def test_read_table_unicode(self):
fin = BytesIO(u('\u0141aski, Jan;1').encode('utf-8'))
df1 = read_table(fin, sep=";", encoding="utf-8", header=None)
tm.assertIsInstance(df1[0].values[0], compat.text_type)
def test_read_table_wrong_num_columns(self):
# too few!
data = """A,B,C,D,E,F
1,2,3,4,5,6
6,7,8,9,10,11,12
11,12,13,14,15,16
"""
self.assertRaises(Exception, self.read_csv, StringIO(data))
def test_read_table_duplicate_index(self):
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo,12,13,14,15
bar,12,13,14,15
"""
result = self.read_csv(StringIO(data), index_col=0)
expected = self.read_csv(StringIO(data)).set_index('index',
verify_integrity=False)
tm.assert_frame_equal(result, expected)
def test_read_table_duplicate_index_implicit(self):
data = """A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo,12,13,14,15
bar,12,13,14,15
"""
# it works!
result = self.read_csv(StringIO(data))
def test_parse_bools(self):
data = """A,B
True,1
False,2
True,3
"""
data = self.read_csv(StringIO(data))
self.assertEqual(data['A'].dtype, np.bool_)
data = """A,B
YES,1
no,2
yes,3
No,3
Yes,3
"""
data = self.read_csv(StringIO(data),
true_values=['yes', 'Yes', 'YES'],
false_values=['no', 'NO', 'No'])
self.assertEqual(data['A'].dtype, np.bool_)
data = """A,B
TRUE,1
FALSE,2
TRUE,3
"""
data = self.read_csv(StringIO(data))
self.assertEqual(data['A'].dtype, np.bool_)
data = """A,B
foo,bar
bar,foo"""
result = self.read_csv(StringIO(data), true_values=['foo'],
false_values=['bar'])
expected = DataFrame({'A': [True, False], 'B': [False, True]})
tm.assert_frame_equal(result, expected)
def test_int_conversion(self):
data = """A,B
1.0,1
2.0,2
3.0,3
"""
data = self.read_csv(StringIO(data))
self.assertEqual(data['A'].dtype, np.float64)
self.assertEqual(data['B'].dtype, np.int64)
def test_infer_index_col(self):
data = """A,B,C
foo,1,2,3
bar,4,5,6
baz,7,8,9
"""
data = self.read_csv(StringIO(data))
self.assertTrue(data.index.equals(Index(['foo', 'bar', 'baz'])))
def test_read_nrows(self):
df = self.read_csv(StringIO(self.data1), nrows=3)
expected = self.read_csv(StringIO(self.data1))[:3]
tm.assert_frame_equal(df, expected)
def test_read_chunksize(self):
reader = self.read_csv(StringIO(self.data1), index_col=0, chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col=0)
chunks = list(reader)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
def test_read_chunksize_named(self):
reader = self.read_csv(
StringIO(self.data1), index_col='index', chunksize=2)
df = self.read_csv(StringIO(self.data1), index_col='index')
chunks = list(reader)
tm.assert_frame_equal(chunks[0], df[:2])
tm.assert_frame_equal(chunks[1], df[2:4])
tm.assert_frame_equal(chunks[2], df[4:])
def test_get_chunk_passed_chunksize(self):
data = """A,B,C
1,2,3
4,5,6
7,8,9
1,2,3"""
result = self.read_csv(
|
StringIO(data)
|
pandas.compat.StringIO
|
'''IO functions for various formats used: trace, sinex etc '''
import glob as _glob
import re as _re
import zlib
from io import BytesIO as _BytesIO
import logging
import numpy as _np
import pandas as _pd
from p_tqdm import p_map as _p_map
from p_tqdm.p_tqdm import tqdm as _tqdm
from ..gn_const import PT_CATEGORY, TYPE_CATEGORY
from ..gn_datetime import yydoysec2datetime as _yydoysec2datetime
from .common import path2bytes
_RE_BLK_HEAD = _re.compile(rb'\+S\w+\/\w+(\s[LU]|)\s*(CORR|COVA|INFO|)[ ]*\n(?:\*[ ].+\n|)(?:\*\w.+\n|)')
def _get_valid_stypes(stypes, verbose=True):
'''Returns only stypes in allowed list
Fastest if stypes size is small'''
allowed_stypes = set()
not_allowed_stypes = set()
for stype in stypes:
if stype in {'APR', 'EST', 'NEQ'}:
allowed_stypes.add(stype)
else:
if verbose:
not_allowed_stypes.add(stype)
if verbose and len(not_allowed_stypes) > 0:
logging.error(f'{not_allowed_stypes} not supported')
return sorted(list(allowed_stypes))
def _snx_extract_blk(snx_bytes, blk_name, remove_header=False):
'''
Extracts a blk content from a sinex databytes using the + and - blk_name bounds
Works for both vector and matrix blks.
Returns blk content (with or without header), count of content lines (ignooring the header),
matrix form [L or U] and matrix content type [INFO, COVA, CORR].
The latter two are empty in case of vector blk'''
blk_begin = snx_bytes.find(f'+{blk_name}'.encode())
blk_end = snx_bytes.find(f'-{blk_name}'.encode(), blk_begin)
if blk_begin == -1:
# _tqdm.write(f'{blk_name} blk missing')
return None #if there is no block begin bound -> None is returned
if blk_end == -1:
# _tqdm.write(f'{blk_name} blk corrupted')
return None
head_search = _RE_BLK_HEAD.search(string=snx_bytes, pos=blk_begin)
ma_form, ma_content = head_search.groups()
blk_content = snx_bytes[head_search.end():blk_end]
# blk content without header (usual request)
lines_count = blk_content.count(b'\n')
#may be skipped for last/first block (TODO)
if not remove_header:
blk_content = snx_bytes[head_search.span(2)[1]:blk_end]
# if header requested (1st request only)
return blk_content, lines_count, ma_form, ma_content
# ma_form, ma_content only for matrix
def _snx_extract(snx_bytes, stypes, obj_type, verbose=True):
# obj_type= matrix or vector
if obj_type == 'MATRIX':
stypes_dict = {
'EST': 'SOLUTION/MATRIX_ESTIMATE',
'APR': 'SOLUTION/MATRIX_APRIORI',
'NEQ': 'SOLUTION/NORMAL_EQUATION_MATRIX'
}
elif obj_type == 'VECTOR':
stypes_dict = {
'EST': 'SOLUTION/ESTIMATE',
'APR': 'SOLUTION/APRIORI',
'NEQ': 'SOLUTION/NORMAL_EQUATION_VECTOR',
'ID' : 'SITE/ID'
}
snx_buffer = b''
stypes_form, stypes_content, stypes_rows = {}, {}, {}
objects_in_buf = 0
for stype in stypes:
if stype in stypes_dict.keys():
remove_header = objects_in_buf != 0
if (objects_in_buf == 0) & (obj_type == 'MATRIX'): # override matrix header as comments may be present
snx_buffer+=b'*PARA1 PARA2 ____PARA2+0__________ ____PARA2+1__________ ____PARA2+2__________\n'
remove_header = True
stype_extr = _snx_extract_blk(snx_bytes=snx_bytes,
blk_name=stypes_dict[stype],
remove_header= remove_header)
# print(objects_in_buf != 0)
if stype_extr is not None:
snx_buffer += stype_extr[0]
stypes_rows[stype] = stype_extr[1]
stypes_form[stype] = stype_extr[2] #dict of forms
stypes_content[stype] = stype_extr[3] #dict of content
objects_in_buf += 1
else:
if verbose:
logging.error(f'{stype} ({stypes_dict[stype]}) blk not found')
return None
else:
if verbose:
logging.error(f'{stype} blk not supported')
stypes = list(stypes_rows.keys())
n_stypes = len(stypes) #existing stypes only
if n_stypes == 0:
if verbose:
logging.error('nothing found')
return None
return _BytesIO(snx_buffer), stypes_rows, stypes_form, stypes_content
def _get_snx_matrix(path_or_bytes,
stypes=('APR', 'EST'),
n_elements=None,
verbose=True):
'''
stypes = "APR","EST","NEQ"
APRIORY, ESTIMATE, NORMAL_EQUATION
Would want ot extract apriori in the very same run with only single parser call
If you use the INFO type this block should contain the normal equation matrix of the
constraints applied to your solution in SOLUTION/ESTIMATE.
n_elements is useful for the igs sinex files when matrix has missing end rows.\
Fetch it from estimates vector
'''
if isinstance(path_or_bytes, str):
snx_bytes = path2bytes(path_or_bytes)
else:
snx_bytes = path_or_bytes
snx_buffer, stypes_rows, stypes_form, stypes_content = _snx_extract(snx_bytes=snx_bytes,
stypes=stypes,
obj_type='MATRIX',
verbose=verbose)
matrix_raw = _pd.read_csv(snx_buffer,
delim_whitespace=True,
dtype={
0: _np.int16,
1: _np.int16,
}) #can be 4 and 5 columns; only 2 first int16
output = []
prev_idx = 0
for i in range(len(stypes_rows)):
idx = stypes_rows[stypes[i]]
# Where to get the n-elements for the apriori matrix? Should be taken from estimates matrix
ma_sq = _matrix_raw2square(
matrix_raw=matrix_raw[prev_idx:prev_idx + idx],
matrix_content_type=stypes_content[stypes[i]],
stypes_form=stypes_form[stypes[i]],
n_elements=n_elements)
output.append(ma_sq)
prev_idx += idx
return output, stypes,stypes_content
def snxdf2xyzdf(snxdf):
types_mask = snxdf.TYPE.isin(['STAX','STAY', 'STAZ', 'VELX', 'VELY', 'VELZ',]).values
snxdf.drop(index = snxdf.index.values[~types_mask],inplace=True)
snxdf['CODE_PT'] = snxdf.CODE.values + '_' + snxdf.PT.values.astype(object)
return snxdf.drop(columns=['CODE','PT','SOLN']).set_index(['CODE_PT', 'REF_EPOCH','TYPE']).unstack(2)
def _get_snx_vector(path_or_bytes, stypes=('APR', 'EST'), snx_format=True,verbose=True):
'''stypes = "APR","EST","NEQ"
APRIORY, ESTIMATE, NORMAL_EQUATION
'''
path = None
if isinstance(path_or_bytes, str):
path = path_or_bytes
snx_bytes = path2bytes(path)
elif isinstance(path_or_bytes, list):
path, stypes, snx_format,verbose = path_or_bytes
snx_bytes = path2bytes(path)
else:
snx_bytes = path_or_bytes
stypes = _get_valid_stypes(stypes, verbose=verbose)
if stypes == ('NEQ'):
stypes = ('APR','NEQ')
#should always return NEQ vector with APR above it
if verbose:
logging.info('Prepending APR')
extracted = _snx_extract(snx_bytes=snx_bytes, stypes=stypes, obj_type='VECTOR', verbose=verbose)
if extracted is None:
return None
snx_buffer, stypes_rows, stypes_form, stypes_content = extracted
try:
vector_raw = _pd.read_csv(
snx_buffer,
delim_whitespace=True,
comment=b'*',
header=None,
usecols=[1, 2, 3, 4, 5, 8, 9],
names=['TYPE', 'CODE', 'PT', 'SOLN', 'REF_EPOCH', 'EST', 'STD'],
dtype={
1: TYPE_CATEGORY,
2: object,
3: PT_CATEGORY,
4: 'category', #can not be int as may be '----'
5: object,
8: _np.float_,
9: _np.float_
})
except ValueError as _e:
if _e.args[0][:33] == 'could not convert string to float':
_tqdm.write(f'{path} data corrupted. Skipping', end=' | ')
return None
else:
raise _e
if path is not None:
del snx_buffer #need to test this better
# removing possible str values (->0) and converting to int8
# vector_raw.loc[~vector_raw['SOLN'].str.isnumeric(),'SOLN'] = 0
# vector_raw['SOLN'] = vector_raw['SOLN'].apply(_np.int8)
output = []
prev_idx = 0
for i in range(len(stypes_rows)):
stype = stypes[i]
idx = stypes_rows[stype]
vec_df = (vector_raw[prev_idx:prev_idx + idx]).copy()
if i == 0:
vec_df.REF_EPOCH = _yydoysec2datetime(vec_df.REF_EPOCH,
recenter=True,
as_j2000=True)
else:
# Assuming the vectors are in the same order so blindly concat them
vec_df = vec_df.iloc[:, 5:].reset_index(drop=True)
if stype in ['APR', 'NEQ']:
vec_df.rename(columns={'EST': stype}, inplace=True)
vec_df.drop(columns='STD', inplace=True)
output.append(vec_df)
prev_idx += idx
output = _pd.concat(output, axis=1)
if snx_format:
return output
return snxdf2xyzdf(output)
def _matrix_raw2square(matrix_raw,matrix_content_type,stypes_form,n_elements=None):
if stypes_form == b'U':
logging.info('U matrix detected. Not tested!')
idx = matrix_raw.iloc[:,:2].values - 1
#last element of first index column. Should be specified for IGS APR matrices (?)
n_elements = idx[-1,0] + 1 if n_elements is None else n_elements
rows = idx[:,0]
cols = idx[:,1]
values = matrix_raw.iloc[:,2:].values.flatten(order='F')
nanmask = ~_np.isnan(values)
rows = _np.concatenate((rows,rows,rows))
cols = _np.concatenate((cols,cols+1,cols+2))
matrix = _np.ndarray((n_elements,n_elements),dtype=values.dtype)
matrix.fill(0)
matrix[rows[nanmask],cols[nanmask]] = values[nanmask]
# shouldn't care if lower or upper triangle
matrix_square = matrix.T + matrix
# CORR diagonal elements are std values. Dropping as it is a copy of EST block std
_np.fill_diagonal(matrix_square,1 if matrix_content_type == b'CORR' else _np.diag(matrix))
return matrix_square
def _unc_snx_neq(path_or_bytes):
vector = _get_snx_vector(path_or_bytes=path_or_bytes,stypes=['APR','EST','NEQ'],verbose=False)
matrix = _get_snx_matrix(path_or_bytes=path_or_bytes,stypes=['NEQ'],
n_elements=vector.shape[0],verbose=False)
neqm = matrix[0][0]
neqv = vector.NEQ.values
aprv = vector.APR.values
vector.drop(columns='NEQ',inplace=True)
vector['UNC'] = aprv + _np.linalg.solve(a=neqm,b=neqv)
return vector
def _unc_snx_cova(path_or_bytes):
vector = _get_snx_vector(path_or_bytes=path_or_bytes,stypes=['APR','EST'],verbose=False)
matrix = _get_snx_matrix(path_or_bytes=path_or_bytes,stypes=['APR','EST'],
n_elements=vector.shape[0],verbose=False)
aprm = matrix[0][0]
estm = matrix[0][1]
aprv = vector.APR.values
estv = vector.EST.values
vector['UNC'] = aprv + (_np.linalg.solve(aprm,aprm-estm) @ (estv - aprv))
return vector
def unc_snx(path,snx_format=True):
'''removes constrains from snx estimates using either COVA or NEQ method'''
snx_bytes = path2bytes(path)
if snx_bytes.find(b'NORMAL_EQUATION_MATRIX') == -1:
output = _unc_snx_cova(snx_bytes)
else:
output = _unc_snx_neq(snx_bytes)
if snx_format:
return output
return snxdf2xyzdf(output)
def _read_snx_solution(path_or_bytes):
'''_get_snx_vector template to get a df with multiIndex columns as:
| APR | EST | STD |
|X|Y|Z|X|Y|Z|X|Y|Z|'''
return _get_snx_vector(path_or_bytes=path_or_bytes,
stypes=('APR', 'EST'),
snx_format=False,
verbose=False)
def gather_sinex(glob_expr, n_threads=4, unconstrain=False):
'''Expects a glob.glob() expression (e.g. '/data/cddis/*/esa*.snx.Z')'''
files = sorted(_glob.glob(glob_expr))
n_files = len(files)
if not unconstrain:
data = _p_map(_get_snx_vector,
files, [('APR', 'EST')] * n_files,
[True] * n_files, [False] * n_files,
num_cpus=n_threads)
else:
data = _p_map(unc_snx, files, [False] * n_files, num_cpus=4)
return data
# return _pd.concat(data, axis=0).pivot(index=['CODE','TYPE'],columns='REF_EPOCH').T
def _get_snx_vector_gzchunks(filename,block_name='SOLUTION/ESTIMATE',size_lookback=100):
'''extract block from a large gzipped sinex file e.g. ITRF2014 sinex'''
block_open = False
block_bytes = b''
stop = False
gzip_file = filename.endswith('.gz')
if gzip_file:
decompressor_zlib = zlib.decompressobj(16+zlib.MAX_WBITS)
with open(file=filename,mode='rb') as compressed_file:
i=0
while not stop: # until EOF
uncompressed = compressed_file.read(8192)
if gzip_file:
uncompressed = decompressor_zlib.decompress(uncompressed)
if i>0:
old_chunk = chunk[-size_lookback:]
chunk = old_chunk + uncompressed
else:
chunk = uncompressed
if chunk.find(f'+{block_name}'.encode()) != -1:
block_open = True
# print('found')
if block_open:
block_bytes += chunk[size_lookback if i>0 else 0:]
if chunk.find(f'-{block_name}'.encode()) != -1:
block_open = False
stop=True
i+=1
return _get_snx_vector(path_or_bytes=block_bytes,stypes=['EST'])
#SINEX ID BLOCK
def degminsec2decdeg(series):
'''Converts degrees/minutes/seconds to decimal degrees'''
_deg = series.str[:-8].values.astype(float)
_min = series.str[-8:-5].values.astype(float)
_sec = series.str[-5:].values.astype(float)
sign = _np.sign(_deg)
return _deg + sign*_min/60 + sign*_sec/3600
def _get_snx_id(path):
snx_bytes = path2bytes(path)
site_id = _snx_extract_blk(snx_bytes=snx_bytes,blk_name='SITE/ID',remove_header=True)[0]
site_id = site_id.decode(encoding='utf8',errors='ignore').encode()
id_df = _pd.read_fwf(_BytesIO(site_id),header=None,
colspecs=[(0,5),(5,8),(8,18),(18,20),(20,44),(44,55),(55,68),(68,76)])
id_df.columns = ['CODE','PT','DOMES','T','LOCATION','LON','LAT','H']
id_df.LON = degminsec2decdeg(id_df.LON)
id_df.LAT = degminsec2decdeg(id_df.LAT)
return id_df
def llh2snxdms(llh):
'''converts llh ndarray to degree-minute-second snx id block format
LAT LON HEI
'''
ll_dd = llh[:,:2]
ll_dd[:,1] %=360
# zero_mask = _np.any(ll_dd == 0.0, axis=1)
# print(llh[zero_mask])
sign = _np.sign(ll_dd)
ll_dd = _np.abs(ll_dd)
hei = llh[:,2]
minutes,seconds = _np.divmod(ll_dd*3600,60)
degrees,minutes = _np.divmod(minutes,60)
degrees *= sign
array = _np.concatenate([degrees,minutes,seconds.round(1),llh[:,[2,]].round(1)],axis=1)
llh_dms_df = _pd.DataFrame(array,dtype=object,
columns=[['LAT','LON','LAT','LON','LAT','LON','HEI'],
['D','D','M','M','S','S','']])
llh_dms_df.iloc[:,:4] = llh_dms_df.iloc[:,:4].astype(int)
llh_dms_df = llh_dms_df.astype(str)
n_rows = llh_dms_df.shape[0]
ll_stack = _pd.concat([llh_dms_df.LON, llh_dms_df.LAT],axis=0)
ll_stack = ( ll_stack.D.str.rjust(4).values
+ ll_stack.M.str.rjust(3).values
+ ll_stack.S.str.rjust(5).values)
buf = ll_stack[:n_rows] + ll_stack[n_rows:] + llh_dms_df.HEI.str.rjust(8).values
buf[(hei>8000) | (hei<-2000) ] = ' 000 00 00.0 00 00 00.0 000.0' #| zero_mask
return buf
def logllh2snxdms(llh):
'''Converts igs logfile-formatted lat-lon-height to the format needed for sinex ID block'''
n_rows = llh.shape[0]
latlon =
|
_pd.concat([llh.LON,llh.LAT],axis=0)
|
pandas.concat
|
import pandas as pd
from business_rules.operators import (DataframeType, StringType,
NumericType, BooleanType, SelectType,
SelectMultipleType, GenericType)
from . import TestCase
from decimal import Decimal
import sys
import pandas
class StringOperatorTests(TestCase):
def test_operator_decorator(self):
self.assertTrue(StringType("foo").equal_to.is_operator)
def test_string_equal_to(self):
self.assertTrue(StringType("foo").equal_to("foo"))
self.assertFalse(StringType("foo").equal_to("Foo"))
def test_string_not_equal_to(self):
self.assertTrue(StringType("foo").not_equal_to("Foo"))
self.assertTrue(StringType("foo").not_equal_to("boo"))
self.assertFalse(StringType("foo").not_equal_to("foo"))
def test_string_equal_to_case_insensitive(self):
self.assertTrue(StringType("foo").equal_to_case_insensitive("FOo"))
self.assertTrue(StringType("foo").equal_to_case_insensitive("foo"))
self.assertFalse(StringType("foo").equal_to_case_insensitive("blah"))
def test_string_starts_with(self):
self.assertTrue(StringType("hello").starts_with("he"))
self.assertFalse(StringType("hello").starts_with("hey"))
self.assertFalse(StringType("hello").starts_with("He"))
def test_string_ends_with(self):
self.assertTrue(StringType("hello").ends_with("lo"))
self.assertFalse(StringType("hello").ends_with("boom"))
self.assertFalse(StringType("hello").ends_with("Lo"))
def test_string_contains(self):
self.assertTrue(StringType("hello").contains("ell"))
self.assertTrue(StringType("hello").contains("he"))
self.assertTrue(StringType("hello").contains("lo"))
self.assertFalse(StringType("hello").contains("asdf"))
self.assertFalse(StringType("hello").contains("ElL"))
def test_string_matches_regex(self):
self.assertTrue(StringType("hello").matches_regex(r"^h"))
self.assertFalse(StringType("hello").matches_regex(r"^sh"))
def test_non_empty(self):
self.assertTrue(StringType("hello").non_empty())
self.assertFalse(StringType("").non_empty())
self.assertFalse(StringType(None).non_empty())
class NumericOperatorTests(TestCase):
def test_instantiate(self):
err_string = "foo is not a valid numeric type"
with self.assertRaisesRegexp(AssertionError, err_string):
NumericType("foo")
def test_numeric_type_validates_and_casts_decimal(self):
ten_dec = Decimal(10)
ten_int = 10
ten_float = 10.0
if sys.version_info[0] == 2:
ten_long = long(10)
else:
ten_long = int(10) # long and int are same in python3
ten_var_dec = NumericType(ten_dec) # this should not throw an exception
ten_var_int = NumericType(ten_int)
ten_var_float = NumericType(ten_float)
ten_var_long = NumericType(ten_long)
self.assertTrue(isinstance(ten_var_dec.value, Decimal))
self.assertTrue(isinstance(ten_var_int.value, Decimal))
self.assertTrue(isinstance(ten_var_float.value, Decimal))
self.assertTrue(isinstance(ten_var_long.value, Decimal))
def test_numeric_equal_to(self):
self.assertTrue(NumericType(10).equal_to(10))
self.assertTrue(NumericType(10).equal_to(10.0))
self.assertTrue(NumericType(10).equal_to(10.000001))
self.assertTrue(NumericType(10.000001).equal_to(10))
self.assertTrue(NumericType(Decimal('10.0')).equal_to(10))
self.assertTrue(NumericType(10).equal_to(Decimal('10.0')))
self.assertFalse(NumericType(10).equal_to(10.00001))
self.assertFalse(NumericType(10).equal_to(11))
def test_numeric_not_equal_to(self):
self.assertTrue(NumericType(10).not_equal_to(10.00001))
self.assertTrue(NumericType(10).not_equal_to(11))
self.assertTrue(NumericType(Decimal('10.0')).not_equal_to(Decimal('10.1')))
self.assertFalse(NumericType(10).not_equal_to(10))
self.assertFalse(NumericType(10).not_equal_to(10.0))
self.assertFalse(NumericType(Decimal('10.0')).not_equal_to(Decimal('10.0')))
def test_other_value_not_numeric(self):
error_string = "10 is not a valid numeric type"
with self.assertRaisesRegexp(AssertionError, error_string):
NumericType(10).equal_to("10")
def test_numeric_greater_than(self):
self.assertTrue(NumericType(10).greater_than(1))
self.assertFalse(NumericType(10).greater_than(11))
self.assertTrue(NumericType(10.1).greater_than(10))
self.assertFalse(NumericType(10.000001).greater_than(10))
self.assertTrue(NumericType(10.000002).greater_than(10))
def test_numeric_greater_than_or_equal_to(self):
self.assertTrue(NumericType(10).greater_than_or_equal_to(1))
self.assertFalse(NumericType(10).greater_than_or_equal_to(11))
self.assertTrue(NumericType(10.1).greater_than_or_equal_to(10))
self.assertTrue(NumericType(10.000001).greater_than_or_equal_to(10))
self.assertTrue(NumericType(10.000002).greater_than_or_equal_to(10))
self.assertTrue(NumericType(10).greater_than_or_equal_to(10))
def test_numeric_less_than(self):
self.assertTrue(NumericType(1).less_than(10))
self.assertFalse(NumericType(11).less_than(10))
self.assertTrue(NumericType(10).less_than(10.1))
self.assertFalse(NumericType(10).less_than(10.000001))
self.assertTrue(NumericType(10).less_than(10.000002))
def test_numeric_less_than_or_equal_to(self):
self.assertTrue(NumericType(1).less_than_or_equal_to(10))
self.assertFalse(NumericType(11).less_than_or_equal_to(10))
self.assertTrue(NumericType(10).less_than_or_equal_to(10.1))
self.assertTrue(NumericType(10).less_than_or_equal_to(10.000001))
self.assertTrue(NumericType(10).less_than_or_equal_to(10.000002))
self.assertTrue(NumericType(10).less_than_or_equal_to(10))
class BooleanOperatorTests(TestCase):
def test_instantiate(self):
err_string = "foo is not a valid boolean type"
with self.assertRaisesRegexp(AssertionError, err_string):
BooleanType("foo")
err_string = "None is not a valid boolean type"
with self.assertRaisesRegexp(AssertionError, err_string):
BooleanType(None)
def test_boolean_is_true_and_is_false(self):
self.assertTrue(BooleanType(True).is_true())
self.assertFalse(BooleanType(True).is_false())
self.assertFalse(BooleanType(False).is_true())
self.assertTrue(BooleanType(False).is_false())
class SelectOperatorTests(TestCase):
def test_contains(self):
self.assertTrue(SelectType([1, 2]).contains(2))
self.assertFalse(SelectType([1, 2]).contains(3))
self.assertTrue(SelectType([1, 2, "a"]).contains("A"))
def test_does_not_contain(self):
self.assertTrue(SelectType([1, 2]).does_not_contain(3))
self.assertFalse(SelectType([1, 2]).does_not_contain(2))
self.assertFalse(SelectType([1, 2, "a"]).does_not_contain("A"))
class SelectMultipleOperatorTests(TestCase):
def test_contains_all(self):
self.assertTrue(SelectMultipleType([1, 2]).
contains_all([2, 1]))
self.assertFalse(SelectMultipleType([1, 2]).
contains_all([2, 3]))
self.assertTrue(SelectMultipleType([1, 2, "a"]).
contains_all([2, 1, "A"]))
def test_is_contained_by(self):
self.assertTrue(SelectMultipleType([1, 2]).
is_contained_by([2, 1, 3]))
self.assertFalse(SelectMultipleType([1, 2]).
is_contained_by([2, 3, 4]))
self.assertTrue(SelectMultipleType([1, 2, "a"]).
is_contained_by([2, 1, "A"]))
def test_shares_at_least_one_element_with(self):
self.assertTrue(SelectMultipleType([1, 2]).
shares_at_least_one_element_with([2, 3]))
self.assertFalse(SelectMultipleType([1, 2]).
shares_at_least_one_element_with([4, 3]))
self.assertTrue(SelectMultipleType([1, 2, "a"]).
shares_at_least_one_element_with([4, "A"]))
def test_shares_exactly_one_element_with(self):
self.assertTrue(SelectMultipleType([1, 2]).
shares_exactly_one_element_with([2, 3]))
self.assertFalse(SelectMultipleType([1, 2]).
shares_exactly_one_element_with([4, 3]))
self.assertTrue(SelectMultipleType([1, 2, "a"]).
shares_exactly_one_element_with([4, "A"]))
self.assertFalse(SelectMultipleType([1, 2, 3]).
shares_exactly_one_element_with([2, 3, "a"]))
def test_shares_no_elements_with(self):
self.assertTrue(SelectMultipleType([1, 2]).
shares_no_elements_with([4, 3]))
self.assertFalse(SelectMultipleType([1, 2]).
shares_no_elements_with([2, 3]))
self.assertFalse(SelectMultipleType([1, 2, "a"]).
shares_no_elements_with([4, "A"]))
class DataframeOperatorTests(TestCase):
def test_exists(self):
df = pandas.DataFrame.from_dict({
"var1": [1, 2, 4, ],
"var2": [3, 5, 6, ],
})
result: pd.Series = DataframeType({"value": df}).exists({"target": "var1"})
self.assertTrue(result.equals(pd.Series([True, True, True, ])))
result: pd.Series = DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).exists({"target": "--r1"})
self.assertTrue(result.equals(pd.Series([True, True, True, ])))
result: pd.Series = DataframeType({"value": df}).exists({"target": "invalid"})
self.assertTrue(result.equals(pd.Series([False, False, False, ])))
def test_not_exists(self):
df = pandas.DataFrame.from_dict({
"var1": [1, 2, 4, ],
"var2": [3, 5, 6, ]
})
result: pd.Series = DataframeType({"value": df}).not_exists({"target": "invalid"})
self.assertTrue(result.equals(pd.Series([True, True, True, ])))
result: pd.Series = DataframeType({"value": df}).not_exists({"target": "var1"})
self.assertTrue(result.equals(pd.Series([False, False, False, ])))
result: pd.Series = DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).not_exists({"target": "--r1"})
self.assertTrue(result.equals(pd.Series([False, False, False, ])))
def test_equal_to(self):
df = pandas.DataFrame.from_dict({
"var1": [1, 2, 4, "", 7, ],
"var2": [3, 5, 6, "", 2, ],
"var3": [1, 3, 8, "", 7, ],
"var4": ["test", "issue", "one", "", "two", ]
})
self.assertTrue(DataframeType({"value": df}).equal_to({
"target": "var1",
"comparator": ""
}).equals(pandas.Series([False, False, False, False, False, ])))
self.assertTrue(DataframeType({"value": df}).equal_to({
"target": "var1",
"comparator": 2
}).equals(pandas.Series([False, True, False, False, False, ])))
self.assertTrue(DataframeType({"value": df}).equal_to({
"target": "var1",
"comparator": "var3"
}).equals(pandas.Series([True, False, False, False, True, ])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).equal_to({
"target": "--r1",
"comparator": "--r3"
}).equals(pandas.Series([True, False, False, False, True, ])))
self.assertTrue(DataframeType({"value": df}).equal_to({
"target": "var1",
"comparator": "var2"
}).equals(pandas.Series([False, False, False, False, False, ])))
self.assertTrue(DataframeType({"value": df}).equal_to({
"target": "var1",
"comparator": 20
}).equals(pandas.Series([False, False, False, False, False, ])))
self.assertTrue(DataframeType({"value": df}).equal_to({
"target": "var4",
"comparator": "var1",
"value_is_literal": True
}).equals(pandas.Series([False, False, False, False, False, ])))
def test_not_equal_to(self):
df = pandas.DataFrame.from_dict({
"var1": [1,2,4],
"var2": [3,5,6],
"var3": [1,3,8],
"var4": [1,2,4]
})
self.assertTrue(DataframeType({"value": df}).not_equal_to({
"target": "var1",
"comparator": "var4"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).not_equal_to({
"target": "var1",
"comparator": "var2"
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).not_equal_to({
"target": "--r1",
"comparator": "--r2"
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).not_equal_to({
"target": "--r1",
"comparator": 20
}).equals(pandas.Series([True, True, True])))
def test_equal_to_case_insensitive(self):
df = pandas.DataFrame.from_dict({
"var1": ["word", "", "new", "val"],
"var2": ["WORD", "", "test", "VAL"],
"var3": ["LET", "", "GO", "read"]
})
self.assertTrue(DataframeType({"value": df}).equal_to_case_insensitive({
"target": "var1",
"comparator": "NEW"
}).equals(pandas.Series([False, False, True, False])))
self.assertTrue(DataframeType({"value": df}).equal_to_case_insensitive({
"target": "var1",
"comparator": ""
}).equals(pandas.Series([False, False, False, False])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).equal_to_case_insensitive({
"target": "--r1",
"comparator": "--r2"
}).equals(pandas.Series([True, False, False, True])))
self.assertTrue(DataframeType({"value": df}).equal_to_case_insensitive({
"target": "var1",
"comparator": "var2"
}).equals(pandas.Series([True, False, False, True])))
self.assertTrue(DataframeType({"value": df}).equal_to_case_insensitive({
"target": "var1",
"comparator": "var3"
}).equals(pandas.Series([False, False, False, False])))
self.assertTrue(DataframeType({"value": df}).equal_to_case_insensitive({
"target": "var1",
"comparator": "var1",
"value_is_literal": True
}).equals(pandas.Series([False, False, False, False])))
def test_not_equal_to_case_insensitive(self):
df = pandas.DataFrame.from_dict({
"var1": ["word", "new", "val"],
"var2": ["WORD", "test", "VAL"],
"var3": ["LET", "GO", "read"],
"var4": ["WORD", "NEW", "VAL"]
})
self.assertTrue(DataframeType({"value": df}).not_equal_to_case_insensitive({
"target": "var1",
"comparator": "var4"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).not_equal_to_case_insensitive({
"target": "var1",
"comparator": "var2"
}).equals(pandas.Series([False, True, False])))
self.assertTrue(DataframeType({"value": df}).not_equal_to_case_insensitive({
"target": "var1",
"comparator": "var1",
"value_is_literal": True
}).equals(pandas.Series([True, True, True])))
def test_less_than(self):
df = pandas.DataFrame.from_dict({
"var1": [1,2,4],
"var2": [3,5,6],
"var3": [1,3,8],
"var4": [1,2,4]
})
self.assertTrue(DataframeType({"value": df}).less_than({
"target": "var1",
"comparator": "var4"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).less_than({
"target": "var1",
"comparator": "var3"
}).equals(pandas.Series([False, True, True])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).less_than({
"target": "--r1",
"comparator": "var3"
}).equals(pandas.Series([False, True, True])))
self.assertTrue(DataframeType({"value": df}).less_than({
"target": "var2",
"comparator": 2
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).less_than({
"target": "var1",
"comparator": 3
}).equals(pandas.Series([True, True, False])))
another_df = pandas.DataFrame.from_dict(
{
"LBDY": [4, None, None, None, None]
}
)
self.assertTrue(DataframeType({"value": another_df}).less_than({
"target": "LBDY",
"comparator": 5
}).equals(pandas.Series([True, False, False, False, False, ])))
def test_less_than_or_equal_to(self):
df = pandas.DataFrame.from_dict({
"var1": [1,2,4],
"var2": [3,5,6],
"var3": [1,3,8],
"var4": [1,2,4]
})
self.assertTrue(DataframeType({"value": df}).less_than_or_equal_to({
"target": "var1",
"comparator": "var4"
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).less_than_or_equal_to({
"target": "--r1",
"comparator": "var4"
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df}).less_than_or_equal_to({
"target": "var2",
"comparator": "var1"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).less_than_or_equal_to({
"target": "var2",
"comparator": 2
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).less_than_or_equal_to({
"target": "var2",
"comparator": "var3"
}).equals(pandas.Series([False, False, True])))
another_df = pandas.DataFrame.from_dict(
{
"LBDY": [4, 5, None, None, None]
}
)
self.assertTrue(DataframeType({"value": another_df}).less_than_or_equal_to({
"target": "LBDY",
"comparator": 5
}).equals(pandas.Series([True, True, False, False, False, ])))
def test_greater_than(self):
df = pandas.DataFrame.from_dict({
"var1": [1,2,4],
"var2": [3,5,6],
"var3": [1,3,8],
"var4": [1,2,4]
})
self.assertTrue(DataframeType({"value": df}).greater_than({
"target": "var1",
"comparator": "var4"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).greater_than({
"target": "var1",
"comparator": "var3"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).greater_than({
"target": "var1",
"comparator": "--r3"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).greater_than({
"target": "var2",
"comparator": 2
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df}).greater_than({
"target": "var1",
"comparator": 5000
}).equals(pandas.Series([False, False, False])))
another_df = pandas.DataFrame.from_dict(
{
"LBDY": [4, None, None, None, None]
}
)
self.assertTrue(DataframeType({"value": another_df}).greater_than({
"target": "LBDY",
"comparator": 3
}).equals(pandas.Series([True, False, False, False, False, ])))
def test_greater_than_or_equal_to(self):
df = pandas.DataFrame.from_dict({
"var1": [1,2,4],
"var2": [3,5,6],
"var3": [1,3,8],
"var4": [1,2,4]
})
self.assertTrue(DataframeType({"value": df}).greater_than_or_equal_to({
"target": "var1",
"comparator": "var4"
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).greater_than_or_equal_to({
"target": "var1",
"comparator": "--r4"
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df}).greater_than_or_equal_to({
"target": "var2",
"comparator": "var3"
}).equals(pandas.Series([True, True, False])))
self.assertTrue(DataframeType({"value": df}).greater_than_or_equal_to({
"target": "var2",
"comparator": 2
}).equals(pandas.Series([True, True, True])))
another_df = pandas.DataFrame.from_dict(
{
"LBDY": [4, 3, None, None, None]
}
)
self.assertTrue(DataframeType({"value": another_df}).greater_than_or_equal_to({
"target": "LBDY",
"comparator": 3
}).equals(pandas.Series([True, True, False, False, False, ])))
def test_contains(self):
df = pandas.DataFrame.from_dict({
"var1": [1,2,4],
"var2": [3,5,6],
"var3": [1,3,8],
"var4": [1,2,4],
"string_var": ["hj", "word", "c"],
"var5": [[1,3,5],[1,3,5], [1,3,5]]
})
self.assertTrue(DataframeType({"value": df}).contains({
"target": "var1",
"comparator": 2
}).equals(pandas.Series([False, True, False])))
self.assertTrue(DataframeType({"value": df}).contains({
"target": "var1",
"comparator": "var3"
}).equals(pandas.Series([True, False, False])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).contains({
"target": "var1",
"comparator": "--r3"
}).equals(pandas.Series([True, False, False])))
self.assertTrue(DataframeType({"value": df}).contains({
"target": "var1",
"comparator": "var2"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).contains({
"target": "string_var",
"comparator": "string_var"
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df}).contains({
"target": "string_var",
"comparator": "string_var",
"value_is_literal": True
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).contains({
"target": "var5",
"comparator": "var1"
}).equals(pandas.Series([True, False, False])))
def test_does_not_contain(self):
df = pandas.DataFrame.from_dict({
"var1": [1,2,4],
"var2": [3,5,6],
"var3": [1,3,8],
"var4": [1,2,4],
"string_var": ["hj", "word", "c"],
"var5": [[1,3,5],[1,3,5], [1,3,5]]
})
self.assertTrue(DataframeType({"value": df}).does_not_contain({
"target": "var1",
"comparator": 5
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df}).does_not_contain({
"target": "var1",
"comparator": "var3"
}).equals(pandas.Series([False, True, True])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).does_not_contain({
"target": "var1",
"comparator": "--r3"
}).equals(pandas.Series([False, True, True])))
self.assertTrue(DataframeType({"value": df}).does_not_contain({
"target": "var1",
"comparator": "var2"
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df}).does_not_contain({
"target": "string_var",
"comparator": "string_var",
"value_is_literal": True
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df}).does_not_contain({
"target": "string_var",
"comparator": "string_var"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).does_not_contain({
"target": "var5",
"comparator": "var1"
}).equals(pandas.Series([False, True, True])))
def test_contains_case_insensitive(self):
df = pandas.DataFrame.from_dict({
"var1": ["pikachu", "charmander", "squirtle"],
"var2": ["PIKACHU", "CHARIZARD", "BULBASAUR"],
"var3": ["POKEMON", "CHARIZARD", "BULBASAUR"],
"var4": [
["pikachu", "charizard", "bulbasaur"],
["chikorita", "cyndaquil", "totodile"],
["chikorita", "cyndaquil", "totodile"]
]
})
self.assertTrue(DataframeType({"value": df}).contains_case_insensitive({
"target": "var1",
"comparator": "PIKACHU"
}).equals(pandas.Series([True, False, False])))
self.assertTrue(DataframeType({"value": df}).contains_case_insensitive({
"target": "var1",
"comparator": "var2"
}).equals(pandas.Series([True, False, False])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).contains_case_insensitive({
"target": "--r1",
"comparator": "--r2"
}).equals(pandas.Series([True, False, False])))
self.assertTrue(DataframeType({"value": df}).contains_case_insensitive({
"target": "var1",
"comparator": "var3"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).contains_case_insensitive({
"target": "var3",
"comparator": "var3"
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df}).contains_case_insensitive({
"target": "var3",
"comparator": "var3",
"value_is_literal": True
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).contains_case_insensitive({
"target": "var4",
"comparator": "var2"
}).equals(pandas.Series([True, False, False])))
def test_does_not_contain_case_insensitive(self):
df = pandas.DataFrame.from_dict({
"var1": ["pikachu", "charmander", "squirtle"],
"var2": ["PIKACHU", "CHARIZARD", "BULBASAUR"],
"var3": ["pikachu", "charizard", "bulbasaur"],
"var4": [
["pikachu", "charizard", "bulbasaur"],
["chikorita", "cyndaquil", "totodile"],
["chikorita", "cyndaquil", "totodile"]
]
})
self.assertTrue(DataframeType({"value": df}).does_not_contain_case_insensitive({
"target": "var1",
"comparator": "IVYSAUR"
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df}).does_not_contain_case_insensitive({
"target": "var3",
"comparator": "var2"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).does_not_contain_case_insensitive({
"target": "var3",
"comparator": "var3",
"value_is_literal": True
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df}).does_not_contain_case_insensitive({
"target": "var3",
"comparator": "var3"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).does_not_contain_case_insensitive({
"target": "var4",
"comparator": "var2"
}).equals(pandas.Series([False, True, True])))
def test_is_contained_by(self):
df = pandas.DataFrame.from_dict({
"var1": [1,2,4],
"var2": [3,5,6],
"var3": [1,3,8],
"var4": [1,2,4],
"var5": [[1,2,3], [1,2], [17]]
})
self.assertTrue(DataframeType({"value": df}).is_contained_by({
"target": "var1",
"comparator": [4,5,6]
}).equals(pandas.Series([False, False, True])))
self.assertTrue(DataframeType({"value": df}).is_contained_by({
"target": "var1",
"comparator": "var3"
}).equals(pandas.Series([True, False, False])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).is_contained_by({
"target": "var1",
"comparator": "--r3"
}).equals(pandas.Series([True, False, False])))
self.assertTrue(DataframeType({"value": df}).is_contained_by({
"target": "var1",
"comparator": [9, 10, 11]
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).is_contained_by({
"target": "var1",
"comparator": "var2"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).is_contained_by({
"target": "var1",
"comparator": "var5"
}).equals(pandas.Series([True, True, False])))
def test_is_not_contained_by(self):
df = pandas.DataFrame.from_dict({
"var1": [1,2,4],
"var2": [3,5,6],
"var3": [1,3,8],
"var4": [1,2,4],
"var5": [[1,2,3], [1,2], [17]]
})
self.assertTrue(DataframeType({"value": df}).is_not_contained_by({
"target": "var1",
"comparator": "var3"
}).equals(pandas.Series([False, True, True])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).is_not_contained_by({
"target": "var1",
"comparator": "--r3"
}).equals(pandas.Series([False, True, True])))
self.assertTrue(DataframeType({"value": df}).is_not_contained_by({
"target": "var1",
"comparator": [9, 10, 11]
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df}).is_not_contained_by({
"target": "var1",
"comparator": "var1"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).is_not_contained_by({
"target": "var1",
"comparator": "var5"
}).equals(pandas.Series([False, False, True])))
def test_is_contained_by_case_insensitive(self):
df = pandas.DataFrame.from_dict({
"var1": ["WORD", "test"],
"var2": ["word", "TEST"],
"var3": ["another", "item"],
"var4": [set(["word"]), set(["test"])]
})
self.assertTrue(DataframeType({"value": df}).is_contained_by_case_insensitive({
"target": "var1",
"comparator": ["word", "TEST"]
}).equals(pandas.Series([True, True])))
self.assertTrue(DataframeType({"value": df}).is_contained_by_case_insensitive({
"target": "var1",
"comparator": "var1"
}).equals(pandas.Series([True, True])))
self.assertTrue(DataframeType({"value": df}).is_contained_by_case_insensitive({
"target": "var1",
"comparator": "var3"
}).equals(pandas.Series([False, False])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).is_contained_by_case_insensitive({
"target": "var1",
"comparator": "--r3"
}).equals(pandas.Series([False, False])))
self.assertTrue(DataframeType({"value": df}).is_contained_by_case_insensitive({
"target": "var1",
"comparator": "var4"
}).equals(pandas.Series([True, True])))
self.assertTrue(DataframeType({"value": df}).is_contained_by_case_insensitive({
"target": "var3",
"comparator": "var4"
}).equals(pandas.Series([False, False])))
def test_is_not_contained_by_case_insensitive(self):
df = pandas.DataFrame.from_dict({
"var1": ["WORD", "test"],
"var2": ["word", "TEST"],
"var3": ["another", "item"],
"var4": [set(["word"]), set(["test"])]
})
self.assertTrue(DataframeType({"value": df}).is_not_contained_by_case_insensitive({
"target": "var1",
"comparator": ["word", "TEST"]
}).equals(pandas.Series([False, False])))
self.assertTrue(DataframeType({"value": df}).is_not_contained_by_case_insensitive({
"target": "var1",
"comparator": "var3"
}).equals(pandas.Series([True, True])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).is_not_contained_by_case_insensitive({
"target": "var1",
"comparator": "--r3"
}).equals(pandas.Series([True, True])))
self.assertTrue(DataframeType({"value": df}).is_not_contained_by_case_insensitive({
"target": "var1",
"comparator": "var4"
}).equals(pandas.Series([False, False])))
self.assertTrue(DataframeType({"value": df}).is_not_contained_by_case_insensitive({
"target": "var3",
"comparator": "var4"
}).equals(pandas.Series([True, True])))
def test_prefix_matches_regex(self):
df = pandas.DataFrame.from_dict({
"var1": ["WORD", "test"],
"var2": ["word", "TEST"],
"var3": ["another", "item"]
})
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).prefix_matches_regex({
"target": "--r2",
"comparator": "w.*",
"prefix": 2
}).equals(pandas.Series([True, False])))
self.assertTrue(DataframeType({"value": df}).prefix_matches_regex({
"target": "var2",
"comparator": "[0-9].*",
"prefix": 2
}).equals(pandas.Series([False, False])))
def test_suffix_matches_regex(self):
df = pandas.DataFrame.from_dict({
"var1": ["WORD", "test"],
"var2": ["word", "TEST"],
"var3": ["another", "item"]
})
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).suffix_matches_regex({
"target": "--r1",
"comparator": "es.*",
"suffix": 3
}).equals(pandas.Series([False, True])))
self.assertTrue(DataframeType({"value": df}).suffix_matches_regex({
"target": "var1",
"comparator": "[0-9].*",
"suffix": 3
}).equals(pandas.Series([False, False])))
def test_not_prefix_matches_suffix(self):
df = pandas.DataFrame.from_dict({
"var1": ["WORD", "test"],
"var2": ["word", "TEST"],
"var3": ["another", "item"]
})
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).not_prefix_matches_regex({
"target": "--r1",
"comparator": ".*",
"prefix": 2
}).equals(pandas.Series([False, False])))
self.assertTrue(DataframeType({"value": df}).not_prefix_matches_regex({
"target": "var2",
"comparator": "[0-9].*",
"prefix": 2
}).equals(pandas.Series([True, True])))
def test_not_suffix_matches_regex(self):
df = pandas.DataFrame.from_dict({
"var1": ["WORD", "test"],
"var2": ["word", "TEST"],
"var3": ["another", "item"]
})
self.assertTrue(DataframeType({"value": df}).not_suffix_matches_regex({
"target": "var1",
"comparator": ".*",
"suffix": 3
}).equals(
|
pandas.Series([False, False])
|
pandas.Series
|
import json
from datetime import datetime, date
import argparse
import pandas as pd
import numpy as np
import partridge as ptg
from models import metrics, timetable, arrival_history, util, constants, errors
if __name__ == "__main__":
parser = argparse.ArgumentParser(description = "Get the timetable for stops on a given route")
parser.add_argument('--agency', required=True, help='Agency id')
parser.add_argument("--route", required = True, help = "Route id")
parser.add_argument("--stops", required = True, help = "Comma-separated list of stops on the route (ex '3413,4416'")
parser.add_argument("--date", required = True, help = "Date - YYYY-MM-DD")
parser.add_argument("--comparison", dest = "comparison", action = "store_true", help = "option to compare timetables to actual data - true or false")
parser.add_argument("--thresholds", help = "comma-separated list of thresholds to define late/very late arrivals (ex '5,10')")
# add version param for later
parser.set_defaults(comparison = False)
parser.set_defaults(thresholds = '5,10')
args = parser.parse_args()
route = args.route
stops = [stop for stop in args.stops.split(",") if len(stop) > 0]
d = date.fromisoformat(args.date)
comparison = args.comparison
agency = config.get_agency(args.agency)
ver = "v1"
try:
thresholds = [int(x) for x in args.thresholds.split(',') if len(x) > 0]
if len(thresholds) != 2:
raise errors.InvalidInputError
except (TypeError, errors.InvalidInputError):
print("Invalid thresholds, using the default of 5/10 minutes.")
thresholds = [5, 10]
agency_id = agency.id
start_time = datetime.now()
print(f"Start: {start_time}")
tt = timetable.get_timetable_from_csv(agency_id, route, d, ver)
rc = agency.get_route_config(route)
for stop in stops:
# get direction
nextbus_dir = rc.get_directions_for_stop(stop)
if len(nextbus_dir) == 0:
print(f"Stop {stop} has no directions.")
else:
for direction in nextbus_dir:
tt.pretty_print(stop, direction)
if comparison:
route_metrics = metrics.RouteMetrics(agency_id, route)
df = route_metrics.get_comparison_to_timetable(d, stop, direction)
if len(df) > 0:
df = df.rename({
"arrival_time": "Scheduled Arrival",
"arrival_headway": "Scheduled Headway",
"next_arrival": "Next Arrival",
"next_arrival_delta": "Delta (Next Arrival)",
"next_arrival_headway": "Next Arrival Headway",
"closest_arrival": "Closest Arrival",
"closest_arrival_delta": "Delta (Closest Arrival)",
"closest_arrival_headway": "Closest Arrival Headway"
}, axis = "columns")
times_df = df[["Scheduled Arrival", "Closest Arrival", "Delta (Closest Arrival)", "Next Arrival", "Delta (Next Arrival)"]].copy(deep = True)
times_df[["Scheduled Arrival", "Closest Arrival", "Next Arrival"]] = times_df[["Scheduled Arrival", "Closest Arrival", "Next Arrival"]].applymap(lambda x: datetime.fromtimestamp(x, agency.tz).time() if not pd.isna(x) else np.nan)
times_df[["Delta (Closest Arrival)", "Delta (Next Arrival)"]] = times_df[["Delta (Closest Arrival)", "Delta (Next Arrival)"]].applymap(lambda x: f"{round(x/60, 2)} min")
headways_df = df[["Scheduled Arrival", "Scheduled Headway", "Closest Arrival Headway", "Next Arrival Headway"]].copy(deep = True)
headways_df["Scheduled Arrival"] = headways_df["Scheduled Arrival"].apply(lambda x: datetime.fromtimestamp(x, agency.tz).time() if not pd.isna(x) else np.nan)
headways_df[["Scheduled Headway", "Closest Arrival Headway", "Next Arrival Headway"]] = headways_df[["Scheduled Headway", "Closest Arrival Headway", "Next Arrival Headway"]].applymap(lambda x: f"{round(x, 2) if not pd.isna(x) else np.nan} min")
with
|
pd.option_context("display.max_rows", None, "display.max_columns", None, 'display.expand_frame_repr', False)
|
pandas.option_context
|
import pandas as pd
import numpy as np
from datetime import datetime
from pvlib.bifacial import pvfactors_timeseries, PVFactorsReportBuilder
from conftest import requires_pvfactors
import pytest
@requires_pvfactors
@pytest.mark.parametrize('run_parallel_calculations',
[False, True])
def test_pvfactors_timeseries(run_parallel_calculations):
""" Test that pvfactors is functional, using the TLDR section inputs of the
package github repo README.md file:
https://github.com/SunPower/pvfactors/blob/master/README.md#tldr---quick-start"""
# Create some inputs
timestamps = pd.DatetimeIndex([datetime(2017, 8, 31, 11),
datetime(2017, 8, 31, 12)]
).set_names('timestamps')
solar_zenith = [20., 10.]
solar_azimuth = [110., 140.]
surface_tilt = [10., 0.]
surface_azimuth = [90., 90.]
axis_azimuth = 0.
dni = [1000., 300.]
dhi = [50., 500.]
gcr = 0.4
pvrow_height = 1.75
pvrow_width = 2.44
albedo = 0.2
n_pvrows = 3
index_observed_pvrow = 1
rho_front_pvrow = 0.03
rho_back_pvrow = 0.05
horizon_band_angle = 15.
# Expected values
expected_ipoa_front = pd.Series([1034.95474708997, 795.4423259036623],
index=timestamps,
name=('total_inc_front'))
expected_ipoa_back = pd.Series([91.88707460262768, 78.05831585685215],
index=timestamps,
name=('total_inc_back'))
# Run calculation
ipoa_front, ipoa_back = pvfactors_timeseries(
solar_azimuth, solar_zenith, surface_azimuth, surface_tilt,
axis_azimuth,
timestamps, dni, dhi, gcr, pvrow_height, pvrow_width, albedo,
n_pvrows=n_pvrows, index_observed_pvrow=index_observed_pvrow,
rho_front_pvrow=rho_front_pvrow, rho_back_pvrow=rho_back_pvrow,
horizon_band_angle=horizon_band_angle,
run_parallel_calculations=run_parallel_calculations,
n_workers_for_parallel_calcs=-1)
pd.testing.assert_series_equal(ipoa_front, expected_ipoa_front)
pd.testing.assert_series_equal(ipoa_back, expected_ipoa_back)
@requires_pvfactors
@pytest.mark.parametrize('run_parallel_calculations',
[False, True])
def test_pvfactors_timeseries_pandas_inputs(run_parallel_calculations):
""" Test that pvfactors is functional, using the TLDR section inputs of the
package github repo README.md file, but converted to pandas Series:
https://github.com/SunPower/pvfactors/blob/master/README.md#tldr---quick-start"""
# Create some inputs
timestamps = pd.DatetimeIndex([datetime(2017, 8, 31, 11),
datetime(2017, 8, 31, 12)]
).set_names('timestamps')
solar_zenith = pd.Series([20., 10.])
solar_azimuth = pd.Series([110., 140.])
surface_tilt = pd.Series([10., 0.])
surface_azimuth = pd.Series([90., 90.])
axis_azimuth = 0.
dni = pd.Series([1000., 300.])
dhi = pd.Series([50., 500.])
gcr = 0.4
pvrow_height = 1.75
pvrow_width = 2.44
albedo = 0.2
n_pvrows = 3
index_observed_pvrow = 1
rho_front_pvrow = 0.03
rho_back_pvrow = 0.05
horizon_band_angle = 15.
# Expected values
expected_ipoa_front = pd.Series([1034.95474708997, 795.4423259036623],
index=timestamps,
name=('total_inc_front'))
expected_ipoa_back = pd.Series([91.88707460262768, 78.05831585685215],
index=timestamps,
name=('total_inc_back'))
# Run calculation
ipoa_front, ipoa_back = pvfactors_timeseries(
solar_azimuth, solar_zenith, surface_azimuth, surface_tilt,
axis_azimuth,
timestamps, dni, dhi, gcr, pvrow_height, pvrow_width, albedo,
n_pvrows=n_pvrows, index_observed_pvrow=index_observed_pvrow,
rho_front_pvrow=rho_front_pvrow, rho_back_pvrow=rho_back_pvrow,
horizon_band_angle=horizon_band_angle,
run_parallel_calculations=run_parallel_calculations,
n_workers_for_parallel_calcs=-1)
pd.testing.assert_series_equal(ipoa_front, expected_ipoa_front)
|
pd.testing.assert_series_equal(ipoa_back, expected_ipoa_back)
|
pandas.testing.assert_series_equal
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import pymysql
import pandas as pd
import datetime
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import os
import matplotlib.ticker as tck
import matplotlib.font_manager as fm
import math as m
import matplotlib.dates as mdates
import matplotlib.colors as colors
import netCDF4 as nc
from netCDF4 import Dataset
#------------------------------------------------------------------------------
# Motivación codigo sección 1--------------------------------------------------
"Código para el dibujo y cálculo de los histogramas de frecuencias horarios de la lluvia en determinados puntos de medición. Se lee como"
"un pandas los datos a dibujar de cada punto de medición para luego calcular el histograma de los acumulados y de las horas de acumulados."
"Inicialmente, se crea con el propósito de estimar la distribucion de los acumulados en los puntos de medición de los paneles experimentales "
"Se hace con los datos del 2018."
Pluvio = 'si' ##--> Para que promedie la lluvia de los dos pluviometros debe ser 'si'
#-----------------------------------------------------------------------------
# Rutas para las fuentes -----------------------------------------------------
prop = fm.FontProperties(fname='/home/nacorreasa/SIATA/Cod_Califi/AvenirLTStd-Heavy.otf' )
prop_1 = fm.FontProperties(fname='/home/nacorreasa/SIATA/Cod_Califi/AvenirLTStd-Book.otf')
prop_2 = fm.FontProperties(fname='/home/nacorreasa/SIATA/Cod_Califi/AvenirLTStd-Black.otf')
##########################################################################
## ----------------LECTURA DE LOS ARCHIVOS DE ACUMULADOS----------------##
##########################################################################
df_Acum_JV = pd.read_csv('/home/nacorreasa/Maestria/Datos_Tesis/Pluvio/AcumH211.csv', sep=',', index_col =0)
df_Acum_CI = pd.read_csv('/home/nacorreasa/Maestria/Datos_Tesis/Pluvio/AcumH206.csv', sep=',', index_col =0)
df_Acum_TS =
|
pd.read_csv('/home/nacorreasa/Maestria/Datos_Tesis/Pluvio/AcumH201.csv', sep=',', index_col =0)
|
pandas.read_csv
|
# -*- coding: utf-8 -*-
"""
Created on 2018-09-13
@author: <NAME>
"""
import numpy as np
import pandas as pd
CURRENT_ROUND = 38
# Load data from all 2018 rounds
# Data from https://github.com/henriquepgomide/caRtola
rounds = []
rounds.append(pd.read_csv('data/rodada-1.csv'))
rounds.append(pd.read_csv('2018/data/rodada-2.csv'))
rounds.append(pd.read_csv('2018/data/rodada-3.csv'))
rounds.append(pd.read_csv('2018/data/rodada-4.csv'))
rounds.append(pd.read_csv('2018/data/rodada-5.csv'))
rounds.append(pd.read_csv('2018/data/rodada-6.csv'))
rounds.append(pd.read_csv('2018/data/rodada-7.csv'))
rounds.append(pd.read_csv('2018/data/rodada-8.csv'))
rounds.append(pd.read_csv('2018/data/rodada-9.csv'))
rounds.append(pd.read_csv('2018/data/rodada-10.csv'))
rounds.append(pd.read_csv('2018/data/rodada-11.csv'))
rounds.append(pd.read_csv('2018/data/rodada-12.csv'))
rounds.append(pd.read_csv('2018/data/rodada-13.csv'))
rounds.append(pd.read_csv('2018/data/rodada-14.csv'))
rounds.append(pd.read_csv('2018/data/rodada-15.csv'))
rounds.append(pd.read_csv('2018/data/rodada-16.csv'))
rounds.append(pd.read_csv('2018/data/rodada-17.csv'))
rounds.append(pd.read_csv('2018/data/rodada-18.csv'))
rounds.append(pd.read_csv('2018/data/rodada-19.csv'))
rounds.append(
|
pd.read_csv('2018/data/rodada-20.csv')
|
pandas.read_csv
|
# =============================================================================
# Standard imports
# =============================================================================
import os
import logging
#import datetime
#import gc
#import zipfile
# =============================================================================
# External imports - reimported for code completion!
# =============================================================================
print_imports()
# Import again for code completion!
import pandas as pd
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import seaborn as sns
import sklearn as sk
import sklearn
import sklearn.linear_model
#from sklearn_pandas import DataFrameMapper
#from sklearn_features.transformers import DataFrameSelector
#from pandas.tseries.holiday import USFederalHolidayCalendar
#from sklearn.cross_validation import KFold, cross_val_score
#from sklearn.ensemble import RandomForestClassifier
#from sklearn.linear_model import SGDClassifier
#from sklearn.grid_search import GridSearchCV
#from sklearn.kernel_approximation import RBFSampler
#from sklearn.pipeline import make_pipeline
#from sklearn.preprocessing import StandardScaler, LabelEncoder, LabelBinarizer
#from sklearn_pandas import DataFrameMapper
# to make this notebook's output stable across runs
np.random.seed(42)
# Ignore useless warnings (see SciPy issue #5998)
import warnings
warnings.filterwarnings(action="ignore", module="scipy", message="^internal gelsd")
days_off = USFederalHolidayCalendar().holidays(start='2003-01-01', end='2015-05-31').to_pydatetime()
#%% Analaysis of fit
if 0:
importances = clf.feature_importances_
std = np.std([tree.feature_importances_ for tree in forest_reg.estimators_],
axis=0)
indices = np.argsort(importances)[::-1]
# Print the feature ranking
print("Feature ranking:")
for f in range(train_df_numeric.shape[1]):
print("%d. feature %d (%f)" % (f + 1, indices[f], importances[indices[f]]))
print(train_df_numeric.columns[indices[f]])
# Plot the feature importances of the forest
plt.figure()
plt.title("Feature importances")
plt.bar(range(X.shape[1]), importances[indices],
color="r", yerr=std[indices], align="center")
plt.xticks(range(X.shape[1]), indices)
plt.xlim([-1, X.shape[1]])
plt.show()
#%%**************************************************************************************
# Gradient Boosting Regression
#****************************************************************************************
if 0:
params = {'n_estimators': 500, 'max_depth': 4, 'min_samples_split': 2,
'learning_rate': 0.01, 'loss': 'ls'}
clf = sk.ensemble.GradientBoostingRegressor(**params)
clf.fit(train_df_numeric, np.log1p(y_train))
#%% Predict
if 0:
y_train_predicted = clf.predict(train_df_numeric)
y_test_predicted = clf.predict(test_df_numeric)
res = pd.DataFrame(y_train_predicted)
res.describe()
res.hist(bins=1000)
#%% Evaluate
# Calculate exp(x) - 1 for all elements in the array.
#y_train_predicted_cut[y_train_predicted > 100] = 100
if 0:
y_train_theor = np.expm1(y_train_predicted)
y_test_theor = np.expm1(y_test_predicted)
print()
print("Training set")
print("RMSLE: ", rmsle(y_train_predicted, y_train_theor))
sk.metrics.mean_squared_error(y_train,y_train_predicted)
#%%**************************************************************************************
# Random Forest
#****************************************************************************************
if 0:
from sklearn import ensemble
forest_reg = sk.ensemble.RandomForestRegressor(n_jobs=-1)
forest_reg.fit(train_df_numeric, np.log1p(y_train))
#%% Predict
if 0:
y_train_predicted = forest_reg.predict(train_df_numeric)
y_test_predicted = forest_reg.predict(test_df_numeric)
res = pd.DataFrame(y_train_predicted)
res.describe()
res.hist(bins=1000)
#%% Evaluate
if 0:
y_train_theor = np.expm1(y_train_predicted)
y_test_theor = np.expm1(y_test_predicted)
print()
print("Training set")
print("RMSLE: ", rmsle(y_train_predicted, y_train_theor))
sk.metrics.mean_squared_error(y_train,y_train_predicted)
#%%**************************************************************************************
# Stochastic Gradient Descent
#****************************************************************************************
if 0:
from sklearn import linear_model
#params = {'n_estimators': 500, 'max_depth': 4, 'min_samples_split': 2,
# 'learning_rate': 0.01, 'loss': 'ls'}
#clf = sk.ensemble.GradientBoostingRegressor(**params)
clf = sk.linear_model.SGDRegressor()
print(clf)
clf.fit(train_df_numeric, np.log1p(y_train))
#%% Predict
if 0:
y_train_predicted = clf.predict(train_df_numeric)
y_test_predicted = clf.predict(test_df_numeric)
res =
|
pd.DataFrame(y_train_predicted)
|
pandas.DataFrame
|
import dash
import dash_core_components as dcc
import dash_html_components as html
import plotly.express as px
import plotly.graph_objects as go
import os
import pandas as pd
from datetime import datetime
from indicators import *
external_stylesheets = []
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
colors = {
'background': '#ffffff',
'text': '#333333'
}
def select_range(df,start,stop):
return df[(df['time'] >= start) & (df['time'] <= stop)]
rootdir = '/Users/tran/Documents'
period_name = 'H1'
period_in_secs = 60*60
price_file = os.path.join(rootdir, f'import_EURUSD_{period_name}.csv')
news_file = os.path.join(rootdir, 'reuters_news.tsv')
price_key = 'closeBid'
# Read candlesticks
df = pd.read_csv(price_file)
df['time_aligned'] =
|
pd.to_datetime(df['ts'] - df['ts']%period_in_secs, unit='s')
|
pandas.to_datetime
|
import pandas as pd
import numpy as np
from drain import util
import sys
def read_acs(table, columns, engine, offsets={0:{}}, years=range(2009, 2016)):
select = """
select geoid, {fields} from acs{year}_5yr.{table}
where geoid ~ 'US1703'
"""
column_names = ['geoid']
column_names.extend(columns.keys())
dfs = []
for year in years:
for i, attrs in offsets.iteritems():
offset = [c + i for c in columns.values()]
cols = map( (lambda x: "{0}{1:03d}".format(table, x)), offset)
s = select.format(fields=str.join(',', cols), year=year, table=table)
df =
|
pd.read_sql(s, engine)
|
pandas.read_sql
|
import locale
import numpy as np
import pytest
from pandas.compat import (
is_platform_windows,
np_version_under1p19,
)
import pandas as pd
import pandas._testing as tm
from pandas.core.arrays import FloatingArray
from pandas.core.arrays.floating import (
Float32Dtype,
Float64Dtype,
)
def test_uses_pandas_na():
a = pd.array([1, None], dtype=Float64Dtype())
assert a[1] is pd.NA
def test_floating_array_constructor():
values = np.array([1, 2, 3, 4], dtype="float64")
mask = np.array([False, False, False, True], dtype="bool")
result =
|
FloatingArray(values, mask)
|
pandas.core.arrays.FloatingArray
|
"""
This module contains statistical measures for analyzing target variable distributions across sensitive groups.
"""
import functools
import operator
from typing import Any, List, Mapping, Sequence, Union
import pandas as pd
from scipy.stats import describe, entropy
from .. import utils
def _mean_numerical(x: pd.Series) -> float:
return describe(x).mean
def _variance_numerical(x: pd.Series) -> float:
return describe(x).variance
def _mean_datetime(x: pd.Series) -> pd.Timedelta:
nums = pd.to_datetime(x)
date_min = nums.min()
diffs = [num - date_min for num in nums]
date_mean = date_min + functools.reduce(operator.add, diffs) / len(diffs)
return date_mean
def _variance_datetime(x: pd.Series) -> pd.Timedelta:
nums = pd.to_datetime(x).astype(int)
res = nums.std()
std = pd.to_timedelta(res)
return std
def _mode_categorical(x: pd.Series) -> Any:
return x.value_counts(sort=True).index[0]
def _variance_square_sum(x: pd.Series) -> float:
return (x.value_counts(normalize=True) ** 2).sum()
def _variance_entropy(x: pd.Series) -> float:
counts = x.value_counts()
return entropy(counts)
def _means_multinomial(x: pd.Series) -> pd.Series:
return x.value_counts(normalize=True, sort=False)
def _variances_multinomial(x: pd.Series) -> pd.Series:
probs = x.value_counts(normalize=True, sort=False)
variances =
|
pd.Series([prob * (1 - prob) for prob in probs], index=probs.index)
|
pandas.Series
|
#!/usr/bin/env python
# coding: utf-8
# ###### Abalone Data-set - > Extension of EDA Kernel by [<NAME>](https://www.kaggle.com/ragnisah/eda-abalone-age-prediction)
#
# - Model Insights
# - Different Classification Algorithms used,
# - Work Done by [<NAME>](https://www.kaggle.com/sriram1204), [<NAME>](https://www.kaggle.com/nikhitaagr)
# In[ ]:
''' Library Import'''
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
# In[ ]:
''' SK-Learn Library Import'''
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import RandomizedLasso,LassoLarsCV
from sklearn.exceptions import ConvergenceWarning
from sklearn.model_selection import train_test_split
from sklearn.metrics import make_scorer, accuracy_score, confusion_matrix
import sklearn.datasets
# In[ ]:
'''Scipy, Stats Library'''
from scipy.stats import skew
# In[ ]:
''' To Ignore Warning'''
import warnings
warnings.filterwarnings('ignore')
# In[ ]:
''' To Do : Inline Priting of Visualizations '''
sns.set()
print()
# In[ ]:
''' Importing Data : from the Archive Directly'''
df = pd.read_csv(r"../../../input/rodolfomendes_abalone-dataset/abalone.csv")
# In[ ]:
'''Display The head --> To Check if Data is Properly Imported'''
df.head()
# In[ ]:
''' Feature Information of the DataSet'''
df.info()
# ##### According to the Infomation:
#
# - 1)No-Null data
# - 2)1 - Object Type
# - 3)7 - Float Type
# - 4)1 - Int Type
# In[ ]:
'''Feature Distirbution of data for Float and Int Data Type'''
df.describe()
# ###### According to Described Information:
#
# - 1)No Feature has Minimum Value = 0, except *Height*
# - 2)All Features are not Normally Distributed, ( Theortically if feature is normally distributed, Mean = Median = Mode ).
# - 3)But Features are close to Normality
# - 4)All numerical, Except Sex
# - 5)Each Feature has Different Scale
# In[ ]:
'''Numerical Features and Categorical Features'''
nf = df.select_dtypes(include=[np.number]).columns
cf = df.select_dtypes(include=[np.object]).columns
# In[ ]:
'''List of Numerical Features'''
nf
# In[ ]:
''' List of Categorical Features'''
cf
# In[ ]:
'''Histogram : to see the numeric data distribution'''
df.hist(figsize=(20,20), grid = True, layout = (2,4), bins = 30)
# In[ ]:
'''After Seeing Above Graph of Data Distribution, I feel the Data is skewed, So checking for Skewness '''
skew_list = skew(df[nf],nan_policy='omit') #sending all numericalfeatures and omitting nan values
skew_list_df = pd.concat([pd.DataFrame(nf,columns=['Features']),
|
pd.DataFrame(skew_list,columns=['Skewness'])
|
pandas.DataFrame
|
import sklearn
import numpy as np
import pandas as pd
from sklearn.model_selection import StratifiedShuffleSplit
#set path to necessary data
acid_path = r'/Users/matthewholland/OneDrive/Oxford/Amide Bond Formation/Features/acid_features.csv'
amine_path = r'/Users/matthewholland/OneDrive/Oxford/Amide Bond Formation/Features/amine_features.csv'
catalyst_path = r'/Users/matthewholland/OneDrive/Oxford/Amide Bond Formation/Features/catalyst_features.csv'
data_path = r'/Users/matthewholland/OneDrive/Oxford/Amide Bond Formation/Data/Boronic Acid Database.xlsx'
partial_charge_path = r'/Users/matthewholland/OneDrive/Oxford/Amide Bond Formation/Data/boron_partial_charges_xtb.csv'
#Import Data
acid_features = pd.read_csv(acid_path)
amine_features = pd.read_csv(amine_path)
catalyst_features = pd.read_csv(catalyst_path)
partial_charges = pd.read_csv(partial_charge_path, header=None)
database = pd.read_excel(data_path)
#Rename column headers to identify which features belong to which reagent
for column in acid_features:
acid_features = acid_features.rename(columns={column: column+'_acid'} )
for column in amine_features:
amine_features = amine_features.rename(columns={column: column+'_amine'})
for column in catalyst_features:
catalyst_features = catalyst_features.rename(columns={column: column+'_catalyst'})
#Merge partial charges into main database
#database.insert(5, 'Boron_partial_charge', partial_charges)
#Remove non-numerical data from the database dataframe
numeric_database = database.select_dtypes(include='number')
#remove incomplete data - temperature and solvent ratio
numeric_database = numeric_database.drop(columns=['Temperature', 'Ratio (v/v)'])
#combine all numerical features into one enormous database
all_numerical_features =
|
pd.concat([numeric_database, acid_features.iloc[:, 1:], amine_features.iloc[:, 1:], catalyst_features.iloc[:, 1:]], axis=1)
|
pandas.concat
|
import pandas as pd
import matplotlib.pyplot as plt
names = ["Round","nodes alive","sent","avg energy","max energy","min energy"]
minimum = [999999999999999, 0]
columnas_nodes_alive = pd.DataFrame()
columnas_avg_energy = pd.DataFrame()
columnas_max_energy = pd.DataFrame()
columnas_min_energy =
|
pd.DataFrame()
|
pandas.DataFrame
|
import nose
import os
import string
from distutils.version import LooseVersion
from datetime import datetime, date, timedelta
from pandas import Series, DataFrame, MultiIndex, PeriodIndex, date_range
from pandas.compat import range, lrange, StringIO, lmap, lzip, u, zip
import pandas.util.testing as tm
from pandas.util.testing import ensure_clean
from pandas.core.config import set_option
import numpy as np
from numpy import random
from numpy.random import randn
from numpy.testing import assert_array_equal
from numpy.testing.decorators import slow
import pandas.tools.plotting as plotting
def _skip_if_no_scipy():
try:
import scipy
except ImportError:
raise nose.SkipTest("no scipy")
@tm.mplskip
class TestSeriesPlots(tm.TestCase):
def setUp(self):
import matplotlib as mpl
self.mpl_le_1_2_1 = str(mpl.__version__) <= LooseVersion('1.2.1')
self.ts = tm.makeTimeSeries()
self.ts.name = 'ts'
self.series = tm.makeStringSeries()
self.series.name = 'series'
self.iseries = tm.makePeriodSeries()
self.iseries.name = 'iseries'
def tearDown(self):
tm.close()
@slow
def test_plot(self):
_check_plot_works(self.ts.plot, label='foo')
_check_plot_works(self.ts.plot, use_index=False)
_check_plot_works(self.ts.plot, rot=0)
_check_plot_works(self.ts.plot, style='.', logy=True)
_check_plot_works(self.ts.plot, style='.', logx=True)
_check_plot_works(self.ts.plot, style='.', loglog=True)
_check_plot_works(self.ts[:10].plot, kind='bar')
_check_plot_works(self.iseries.plot)
_check_plot_works(self.series[:5].plot, kind='bar')
_check_plot_works(self.series[:5].plot, kind='line')
_check_plot_works(self.series[:5].plot, kind='barh')
_check_plot_works(self.series[:10].plot, kind='barh')
_check_plot_works(Series(randn(10)).plot, kind='bar', color='black')
@slow
def test_plot_figsize_and_title(self):
# figsize and title
import matplotlib.pyplot as plt
ax = self.series.plot(title='Test', figsize=(16, 8))
self.assertEqual(ax.title.get_text(), 'Test')
assert_array_equal(np.round(ax.figure.get_size_inches()),
np.array((16., 8.)))
@slow
def test_bar_colors(self):
import matplotlib.pyplot as plt
import matplotlib.colors as colors
default_colors = plt.rcParams.get('axes.color_cycle')
custom_colors = 'rgcby'
df = DataFrame(randn(5, 5))
ax = df.plot(kind='bar')
rects = ax.patches
conv = colors.colorConverter
for i, rect in enumerate(rects[::5]):
xp = conv.to_rgba(default_colors[i % len(default_colors)])
rs = rect.get_facecolor()
self.assertEqual(xp, rs)
tm.close()
ax = df.plot(kind='bar', color=custom_colors)
rects = ax.patches
conv = colors.colorConverter
for i, rect in enumerate(rects[::5]):
xp = conv.to_rgba(custom_colors[i])
rs = rect.get_facecolor()
self.assertEqual(xp, rs)
tm.close()
from matplotlib import cm
# Test str -> colormap functionality
ax = df.plot(kind='bar', colormap='jet')
rects = ax.patches
rgba_colors = lmap(cm.jet, np.linspace(0, 1, 5))
for i, rect in enumerate(rects[::5]):
xp = rgba_colors[i]
rs = rect.get_facecolor()
self.assertEqual(xp, rs)
tm.close()
# Test colormap functionality
ax = df.plot(kind='bar', colormap=cm.jet)
rects = ax.patches
rgba_colors = lmap(cm.jet, np.linspace(0, 1, 5))
for i, rect in enumerate(rects[::5]):
xp = rgba_colors[i]
rs = rect.get_facecolor()
self.assertEqual(xp, rs)
tm.close()
df.ix[:, [0]].plot(kind='bar', color='DodgerBlue')
@slow
def test_bar_linewidth(self):
df = DataFrame(randn(5, 5))
# regular
ax = df.plot(kind='bar', linewidth=2)
for r in ax.patches:
self.assertEqual(r.get_linewidth(), 2)
# stacked
ax = df.plot(kind='bar', stacked=True, linewidth=2)
for r in ax.patches:
self.assertEqual(r.get_linewidth(), 2)
# subplots
axes = df.plot(kind='bar', linewidth=2, subplots=True)
for ax in axes:
for r in ax.patches:
self.assertEqual(r.get_linewidth(), 2)
@slow
def test_bar_log(self):
expected = np.array([1., 10., 100., 1000.])
if not self.mpl_le_1_2_1:
expected = np.hstack((.1, expected, 1e4))
ax = Series([200, 500]).plot(log=True, kind='bar')
assert_array_equal(ax.yaxis.get_ticklocs(), expected)
def test_rotation(self):
df = DataFrame(randn(5, 5))
ax = df.plot(rot=30)
for l in ax.get_xticklabels():
self.assertEqual(l.get_rotation(), 30)
def test_irregular_datetime(self):
rng = date_range('1/1/2000', '3/1/2000')
rng = rng[[0, 1, 2, 3, 5, 9, 10, 11, 12]]
ser = Series(randn(len(rng)), rng)
ax = ser.plot()
xp = datetime(1999, 1, 1).toordinal()
ax.set_xlim('1/1/1999', '1/1/2001')
self.assertEqual(xp, ax.get_xlim()[0])
@slow
def test_hist(self):
_check_plot_works(self.ts.hist)
_check_plot_works(self.ts.hist, grid=False)
_check_plot_works(self.ts.hist, figsize=(8, 10))
_check_plot_works(self.ts.hist, by=self.ts.index.month)
import matplotlib.pyplot as plt
fig, ax = plt.subplots(1, 1)
_check_plot_works(self.ts.hist, ax=ax)
_check_plot_works(self.ts.hist, ax=ax, figure=fig)
_check_plot_works(self.ts.hist, figure=fig)
tm.close()
fig, (ax1, ax2) = plt.subplots(1, 2)
_check_plot_works(self.ts.hist, figure=fig, ax=ax1)
_check_plot_works(self.ts.hist, figure=fig, ax=ax2)
with tm.assertRaises(ValueError):
self.ts.hist(by=self.ts.index, figure=fig)
@slow
def test_hist_layout(self):
n = 10
gender = tm.choice(['Male', 'Female'], size=n)
df = DataFrame({'gender': gender,
'height': random.normal(66, 4, size=n), 'weight':
random.normal(161, 32, size=n)})
with tm.assertRaises(ValueError):
df.height.hist(layout=(1, 1))
with tm.assertRaises(ValueError):
df.height.hist(layout=[1, 1])
@slow
def test_hist_layout_with_by(self):
import matplotlib.pyplot as plt
n = 10
gender = tm.choice(['Male', 'Female'], size=n)
df = DataFrame({'gender': gender,
'height': random.normal(66, 4, size=n), 'weight':
random.normal(161, 32, size=n),
'category': random.randint(4, size=n)})
_check_plot_works(df.height.hist, by=df.gender, layout=(2, 1))
tm.close()
_check_plot_works(df.height.hist, by=df.gender, layout=(1, 2))
tm.close()
_check_plot_works(df.weight.hist, by=df.category, layout=(1, 4))
tm.close()
_check_plot_works(df.weight.hist, by=df.category, layout=(4, 1))
tm.close()
@slow
def test_hist_no_overlap(self):
from matplotlib.pyplot import subplot, gcf, close
x = Series(randn(2))
y = Series(randn(2))
subplot(121)
x.hist()
subplot(122)
y.hist()
fig = gcf()
axes = fig.get_axes()
self.assertEqual(len(axes), 2)
@slow
def test_plot_fails_with_dupe_color_and_style(self):
x = Series(randn(2))
with tm.assertRaises(ValueError):
x.plot(style='k--', color='k')
@slow
def test_hist_by_no_extra_plots(self):
import matplotlib.pyplot as plt
n = 10
df = DataFrame({'gender': tm.choice(['Male', 'Female'], size=n),
'height': random.normal(66, 4, size=n)})
axes = df.height.hist(by=df.gender)
self.assertEqual(len(plt.get_fignums()), 1)
def test_plot_fails_when_ax_differs_from_figure(self):
from pylab import figure, close
fig1 = figure()
fig2 = figure()
ax1 = fig1.add_subplot(111)
with tm.assertRaises(AssertionError):
self.ts.hist(ax=ax1, figure=fig2)
@slow
def test_kde(self):
_skip_if_no_scipy()
_check_plot_works(self.ts.plot, kind='kde')
_check_plot_works(self.ts.plot, kind='density')
ax = self.ts.plot(kind='kde', logy=True)
self.assertEqual(ax.get_yscale(), 'log')
@slow
def test_kde_kwargs(self):
_skip_if_no_scipy()
from numpy import linspace
_check_plot_works(self.ts.plot, kind='kde', bw_method=.5, ind=linspace(-100,100,20))
_check_plot_works(self.ts.plot, kind='density', bw_method=.5, ind=linspace(-100,100,20))
ax = self.ts.plot(kind='kde', logy=True, bw_method=.5, ind=linspace(-100,100,20))
self.assertEqual(ax.get_yscale(), 'log')
@slow
def test_kde_color(self):
_skip_if_no_scipy()
ax = self.ts.plot(kind='kde', logy=True, color='r')
lines = ax.get_lines()
self.assertEqual(len(lines), 1)
self.assertEqual(lines[0].get_color(), 'r')
@slow
def test_autocorrelation_plot(self):
from pandas.tools.plotting import autocorrelation_plot
_check_plot_works(autocorrelation_plot, self.ts)
_check_plot_works(autocorrelation_plot, self.ts.values)
@slow
def test_lag_plot(self):
from pandas.tools.plotting import lag_plot
_check_plot_works(lag_plot, self.ts)
_check_plot_works(lag_plot, self.ts, lag=5)
@slow
def test_bootstrap_plot(self):
from pandas.tools.plotting import bootstrap_plot
_check_plot_works(bootstrap_plot, self.ts, size=10)
def test_invalid_plot_data(self):
s = Series(list('abcd'))
kinds = 'line', 'bar', 'barh', 'kde', 'density'
for kind in kinds:
with tm.assertRaises(TypeError):
s.plot(kind=kind)
@slow
def test_valid_object_plot(self):
s = Series(lrange(10), dtype=object)
kinds = 'line', 'bar', 'barh', 'kde', 'density'
for kind in kinds:
_check_plot_works(s.plot, kind=kind)
def test_partially_invalid_plot_data(self):
s = Series(['a', 'b', 1.0, 2])
kinds = 'line', 'bar', 'barh', 'kde', 'density'
for kind in kinds:
with tm.assertRaises(TypeError):
s.plot(kind=kind)
def test_invalid_kind(self):
s = Series([1, 2])
with tm.assertRaises(ValueError):
s.plot(kind='aasdf')
@slow
def test_dup_datetime_index_plot(self):
dr1 = date_range('1/1/2009', periods=4)
dr2 = date_range('1/2/2009', periods=4)
index = dr1.append(dr2)
values = randn(index.size)
s = Series(values, index=index)
_check_plot_works(s.plot)
@tm.mplskip
class TestDataFramePlots(tm.TestCase):
def setUp(self):
import matplotlib as mpl
self.mpl_le_1_2_1 = str(mpl.__version__) <= LooseVersion('1.2.1')
def tearDown(self):
tm.close()
@slow
def test_plot(self):
df = tm.makeTimeDataFrame()
_check_plot_works(df.plot, grid=False)
_check_plot_works(df.plot, subplots=True)
_check_plot_works(df.plot, subplots=True, use_index=False)
df = DataFrame({'x': [1, 2], 'y': [3, 4]})
self._check_plot_fails(df.plot, kind='line', blarg=True)
df = DataFrame(np.random.rand(10, 3),
index=list(string.ascii_letters[:10]))
_check_plot_works(df.plot, use_index=True)
_check_plot_works(df.plot, sort_columns=False)
_check_plot_works(df.plot, yticks=[1, 5, 10])
_check_plot_works(df.plot, xticks=[1, 5, 10])
_check_plot_works(df.plot, ylim=(-100, 100), xlim=(-100, 100))
_check_plot_works(df.plot, subplots=True, title='blah')
_check_plot_works(df.plot, title='blah')
tuples = lzip(string.ascii_letters[:10], range(10))
df = DataFrame(np.random.rand(10, 3),
index=MultiIndex.from_tuples(tuples))
_check_plot_works(df.plot, use_index=True)
# unicode
index = MultiIndex.from_tuples([(u('\u03b1'), 0),
(u('\u03b1'), 1),
(u('\u03b2'), 2),
(u('\u03b2'), 3),
(u('\u03b3'), 4),
(u('\u03b3'), 5),
(u('\u03b4'), 6),
(u('\u03b4'), 7)], names=['i0', 'i1'])
columns = MultiIndex.from_tuples([('bar', u('\u0394')),
('bar', u('\u0395'))], names=['c0',
'c1'])
df = DataFrame(np.random.randint(0, 10, (8, 2)),
columns=columns,
index=index)
_check_plot_works(df.plot, title=u('\u03A3'))
def test_nonnumeric_exclude(self):
import matplotlib.pyplot as plt
df = DataFrame({'A': ["x", "y", "z"], 'B': [1, 2, 3]})
ax = df.plot()
self.assertEqual(len(ax.get_lines()), 1) # B was plotted
@slow
def test_implicit_label(self):
df = DataFrame(randn(10, 3), columns=['a', 'b', 'c'])
ax = df.plot(x='a', y='b')
self.assertEqual(ax.xaxis.get_label().get_text(), 'a')
@slow
def test_explicit_label(self):
df = DataFrame(randn(10, 3), columns=['a', 'b', 'c'])
ax = df.plot(x='a', y='b', label='LABEL')
self.assertEqual(ax.xaxis.get_label().get_text(), 'LABEL')
@slow
def test_plot_xy(self):
import matplotlib.pyplot as plt
# columns.inferred_type == 'string'
df = tm.makeTimeDataFrame()
self._check_data(df.plot(x=0, y=1),
df.set_index('A')['B'].plot())
self._check_data(df.plot(x=0), df.set_index('A').plot())
self._check_data(df.plot(y=0), df.B.plot())
self._check_data(df.plot(x='A', y='B'),
df.set_index('A').B.plot())
self._check_data(df.plot(x='A'), df.set_index('A').plot())
self._check_data(df.plot(y='B'), df.B.plot())
# columns.inferred_type == 'integer'
df.columns = lrange(1, len(df.columns) + 1)
self._check_data(df.plot(x=1, y=2),
df.set_index(1)[2].plot())
self._check_data(df.plot(x=1), df.set_index(1).plot())
self._check_data(df.plot(y=1), df[1].plot())
# figsize and title
ax = df.plot(x=1, y=2, title='Test', figsize=(16, 8))
self.assertEqual(ax.title.get_text(), 'Test')
assert_array_equal(np.round(ax.figure.get_size_inches()),
np.array((16., 8.)))
# columns.inferred_type == 'mixed'
# TODO add MultiIndex test
@slow
def test_xcompat(self):
import pandas as pd
import matplotlib.pyplot as plt
df = tm.makeTimeDataFrame()
ax = df.plot(x_compat=True)
lines = ax.get_lines()
self.assert_(not isinstance(lines[0].get_xdata(), PeriodIndex))
tm.close()
pd.plot_params['xaxis.compat'] = True
ax = df.plot()
lines = ax.get_lines()
self.assert_(not isinstance(lines[0].get_xdata(), PeriodIndex))
tm.close()
pd.plot_params['x_compat'] = False
ax = df.plot()
lines = ax.get_lines()
tm.assert_isinstance(lines[0].get_xdata(), PeriodIndex)
tm.close()
# useful if you're plotting a bunch together
with
|
pd.plot_params.use('x_compat', True)
|
pandas.plot_params.use
|
import matplotlib.image as img
import os
import pandas as pd
from PIL import Image
from io import BytesIO
import numpy as np
import requests
import h5py
from scipy.misc import imresize
import zipfile
import sys
from random import shuffle
from shutil import copyfile,move
from os.path import join
DIR_PATH = os.path.dirname(os.path.realpath(__file__))
#----------------------------GENERAL FUNCTIONS---------------------------
def df_as_images_labels(df):
print('Cleaning data...')
df = clean_data(df)
dataset = df['image'].tolist()
labels = df['label'].tolist()
print('Encoding data...')
dataset = encode_dataset(dataset)
labels,dic = encode_labels(labels)
return dataset, labels, dic
def encode_dataset(my_list):
resized_list = resize_images(my_list)
return np.stack(resized_list, axis=0)
def encode_labels(labels):
dic = dict(enumerate(set(labels)))
size = 12 # len(dic)
inv_dic = {v: k for k, v in dic.items()}
new_labels = []
for label in labels:
new_labels.append(one_hot_vector_encoding(inv_dic.get(label), size))
return np.stack(new_labels), dic
def one_hot_vector_encoding(label, num_class):
res = np.zeros(num_class, dtype='int')
res[label] += 1
return res
def resize_images(list_images):
#TODO : better resizing
new_list = []
for image in list_images:
new_list.append(np.transpose(imresize(image,(227,227,3))))
return new_list
def clean_data(df):
idx_delete = []
for i,row in df.iterrows():
image = row['image']
if len(image.shape)!=3 or image.shape[2]!=3:
idx_delete.append(i)
df = df.drop(df.index[idx_delete])
return df
#----------------------------PANDORA FUNCTIONS---------------------------
def serialize_pandora():
df = load_df_pandora()
images, labels, dic = df_as_images_labels(df)
del df
print('Serializing data')
with h5py.File('../datasets/pandora.h5', 'w') as f:
f.create_dataset('images', data=images)
f.create_dataset('labels', data=labels)
def load_df_pandora():
dataPath = '../../data/pandora/'
styles = os.listdir(dataPath)
dataset = []
labels = []
artists = []
image_names = []
print('Loading data')
for style in styles:
print('Loading style ',style,'...')
style_content = os.listdir(dataPath+style)
for item in style_content:
path = dataPath+style+'/'+item
if os.path.isfile(path):
try:
dataset.append(img.imread(path))
artists.append('unknown')
labels.append(style)
image_names.append(item)
except OSError:
print('Couldn\'t load ' + item)
if os.path.isdir(path):
artist_content = os.listdir(path)
for file in artist_content:
try:
dataset.append(img.imread(path+'/'+file))
artists.append(item)
labels.append(style)
image_names.append(file)
except OSError:
print('Couldn\'t load ' + file)
df = pd.DataFrame()
df['image_name'] = image_names
df['image'] = dataset
df['label'] = labels
df['artist'] = artists
return df
def download_pandora():
dataPath = '../../data/pandora/'
print('Downloading data...')
request = requests.get("http://imag.pub.ro/pandora/Download/Pandora_V1.zip",stream=True)
print('Unziping data...')
zip_ref = zipfile.ZipFile(BytesIO(request.content))
zip_ref.extractall(dataPath)
zip_ref.close()
return
def load_pandora():
print('Loading data...')
file_path = os.path.dirname(os.path.realpath(__file__))
with h5py.File(file_path+'/../datasets/pandora.h5') as f:
x = f['images'][:]
y = f['labels'][:]
return x,y
#----------------------------WIKIPAINTINGS FUNCTIONS---------------------------
def download_wikipaintings():
dataPath = '../../data/wikipaintings/'
data = pd.read_csv('../datasets/wiki_paintings.csv')
main_styles = ['Art Informel', 'Magic Realism', 'Abstract Art', 'Pop Art', 'Ukiyo-e', 'Mannerism (Late Renaissance)', 'Color Field Painting', 'Minimalism', 'High Renaissance', 'Early Renaissance', 'Cubism', 'Rococo', 'Abstract Expressionism', 'Naïve Art (Primitivism)', 'Northern Renaissance', 'Neoclassicism', 'Baroque', 'Symbolism', 'Art Nouveau (Modern)', 'Surrealism', 'Expressionism', 'Post-Impressionism', 'Romanticism', 'Realism', 'Impressionism']
data = data[data['style'].isin(main_styles)]
size = len(data)
print(size)
n_downloaded = 0
for index, row in data.iterrows():
style = row['style']
if not os.path.exists(dataPath+style):
os.makedirs(dataPath+style)
_download_image(dataPath+style+'/'+row['image_id'],row['image_url'])
done = int(50 * n_downloaded / size)
sys.stdout.write("\r[%s%s]" % ('=' * done, ' ' * (50 - done)))
sys.stdout.flush()
n_downloaded +=1
return data
def _download_image(file_name,url):
try:
img = Image.open(requests.get(url,stream=True).raw).convert('RGB')
img.save(file_name+'.jpg','JPEG')
except OSError:
print('Erreur downloading image ',file_name)
def serialize_wikipaintings():
df = load_df_wikipaintings()
images, labels, dic = df_as_images_labels(df)
del df
print('Serializing data')
with h5py.File('../datasets/wikipaintings.h5', 'w') as f:
f.create_dataset('images', data=images)
f.create_dataset('labels', data=labels)
def load_df_wikipaintings():
dataPath = '../../data/wikipaintings/'
styles = os.listdir(dataPath)
dataset = []
labels = []
image_names = []
print('Loading data')
for style in styles:
print('Loading style ',style,'...')
style_content = os.listdir(dataPath+style)
for item in style_content:
path = dataPath+style+'/'+item
try:
dataset.append(img.imread(path))
labels.append(style)
image_names.append(item)
except OSError:
print('Couldn\'t load ' + item)
df =
|
pd.DataFrame()
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
"""
Created on Sat Aug 21 14:36:08 2021
@author: Administrator
"""
#%%
# =============================================================================
# =============================================================================
# # 문제 11 유형(DataSet_11.csv 이용)
# 구분자 : comma(“,”), 470 Rows, 4 Columns, UTF-8 인코딩
# 세계 각국의 행복지수를 비롯한 여러 정보를 조사한 DS리서치는
# 취합된 자료의 현황 파악 및 간단한 통계분석을 실시하고자 한다.
# 컬 럼 / 정 의 / Type
# Country / 국가명 / String
# Happiness_Rank / 당해 행복점수 순위 / Double
# Happiness_Score / 행복점수 / Double
# year / 년도 / Double
# =============================================================================
# =============================================================================
#%%
import pandas as pd
data11=pd.read_csv('Dataset_11.csv')
#%%
# =============================================================================
# 1.분석을 위해 3년 연속 행복지수가 기록된 국가의 데이터를 사용하고자 한다.
# 3년 연속 데이터가 기록되지 않은 국가의 개수는?
# - 국가명 표기가 한 글자라도 다른 경우 다른 국가로 처리하시오.
# - 3년 연속 데이터가 기록되지 않은 국가 데이터는 제외하고 이를 향후 분석에서
# 활용하시오.(답안 예시) 1
# =============================================================================
data11.columns
# ['Country', 'Happiness_Rank', 'Happiness_Score', 'year']
q1_agg=data11.groupby('Country').apply(len)
len(q1_agg[q1_agg < 3])
# 3년 연속 기록되지 않은 국가 수 : 20
q1_tab=pd.pivot_table(data=data11,
index='Country',
columns='year',
values='Happiness_Score')
q1_tab2=pd.pivot_table(data=data11,
index='Country',
columns='year',
values='Happiness_Score',
aggfunc='count')
con_list=q1_agg[q1_agg < 3].index
q1=data11[~data11.Country.isin(con_list)]
len(data11) # 470
len(q1) # 438
#%%
# =============================================================================
# 2.(1번 산출물을 활용하여) 2017년 행복지수와 2015년 행복지수를 활용하여 국가별
# 행복지수 증감률을 산출하고 행복지수 증감률이 가장 높은 3개 국가를 행복지수가
# 높은 순서대로 차례대로 기술하시오.
# 증감률 = (2017년행복지수−2015년행복지수)/2
#
# - 연도는 년월(YEAR_MONTH) 변수로부터 추출하며, 연도별 매출금액합계는 1월부터
# 12월까지의 매출 총액을 의미한다. (답안 예시) Korea, Japan, China
# =============================================================================
q1_tab=pd.pivot_table(data=data11,
index='Country',
columns='year',
values='Happiness_Score')
q2=q1_tab.dropna()
q2.loc[:, 'ratio']=(q2.loc[:, 2017]-q2.loc[:,2015])/2
q2['ratio'].nlargest(3).index
# (정답) ['Latvia', 'Romania', 'Togo']
#%%
# =============================================================================
# 3.(1번 산출물을 활용하여) 년도별 행복지수 평균이 유의미하게 차이가 나는지
# 알아보고자 한다.
# 이와 관련하여 적절한 검정을 사용하고 검정통계량을 기술하시오.
# - 해당 검정의 검정통계량은 자유도가 2인 F 분포를 따른다.
# - 검정통계량은 소수점 넷째 자리까지 기술한다. (답안 예시) 0.1234
# =============================================================================
# (참고)
# from statsmodels.formula.api import ols
# from statsmodels.stats.anova import anova_lm
from scipy.stats import f_oneway
from statsmodels.formula.api import ols
from statsmodels.stats.anova import anova_lm
f_oneway(q2[2015].dropna(), q2[2016].dropna(), q2[2017].dropna())
# F_onewayResult(statistic=0.004276725037689305,
# pvalue=0.9957324489944479)
# H0: 모든 집단의 평균은 동일하다(mu1=mu2=mu3)
# H1: 적어도 하나의 그룹의 평균은 동일하지 않다(mu1=mu2, mu1!=m3)
ols1=ols('Happiness_Score~C(year)', data=q1).fit()
anova_lm(ols1)
# df sum_sq mean_sq F PR(>F)
# C(year)그룹간 2.0 0.011198 0.005599 0.004277 0.995732
# Residual그룹내 435.0 569.472307 1.309132 NaN NaN
# (정답) 0.004277 -> 0.0042
from statsmodels.stats.multicomp import pairwise_tukeyhsd
multi_out=pairwise_tukeyhsd(q1['Happiness_Score'], q1['year'])
print(multi_out)
#%%
# =============================================================================
# =============================================================================
# # 문제 12 유형(DataSet_12.csv 이용)
# 구분자 : comma(“,”), 5000 Rows, 7 Columns, UTF-8 인코딩
# 직장인의 독서 실태를 분석하기 위해서 수도권 거주자 5000명을
# 대상으로 간단한 인적 사항과 연간 독서량 정보를 취합하였다.
# 컬 럼 / 정 의 / Type
# Age / 나이 / String
# Gender / 성별(M: 남성) / String
# Dependent_Count / 부양가족 수 / Double
# Education_Level / 교육 수준 / String
# is_Married / 결혼 여부(1: 결혼) / Double
# Read_Book_per_Year / 연간 독서량(권) / Double
# Income_Range / 소득 수준에 따른 구간(A < B < C < D < E)이며 X는
# 정보 누락 / String
# =============================================================================
# =============================================================================
import pandas as pd
data12=
|
pd.read_csv('Dataset_12.csv')
|
pandas.read_csv
|
"""
Data-Bundle for Aplhavantage
Usage: set api-key with 'export ALPHAVANTAGE_API_KEY=yourkey'
Limitations: free-accounts can have 5 calls per minute and 500 calls a day.
tolerance is used to wait some small additional time, otherwise sometimes
we will still hit the limit if we want to squeeze out as much calls as
possible.
In case you have a premium-subscription, you can tune these values by
setting the env-vars AV_FREQ_SEC, AV_CALLS_PER_FREQ, and AV_TOLERANCE_SEC
AV_FREQ_SEC - sets the base-frequency
AV_CALLS_PER_FREQ - sets the amount of calls per base-frequency
AV_TOLERANCE_SEC - the amount of seconds to add to the interval in case
we're getting exceptions because of calling api too often
Adjustments: to enable a bigger precision on our backtests, i decided to
go for unadjusted-prices and implemented an adjustment-writer to account
for dividends and splits. However, only daily-data contains this information
so it's really IMPORTANT that you never only request minute-data alone.
"""
import numpy as np
import pandas as pd
from alpha_vantage.timeseries import TimeSeries
from datetime import date, timedelta
from trading_calendars import TradingCalendar
from ratelimit import limits, sleep_and_retry
import zipline.config
from zipline.data.bundles import core as bundles
from zipline.data.bundles.common import asset_to_sid_map
from zipline.data.bundles.universe import Universe, get_sp500, get_sp100, get_nasdaq100, all_alpaca_assets
from zipline.data import bundles as bundles_module
import trading_calendars
import os
import time
av_config = zipline.config.bundle.AlphaVantage()
AV_FREQ_SEC = av_config.sample_frequency
AV_CALLS_PER_FREQ = av_config.max_calls_per_freq
AV_TOLERANCE_SEC = av_config.breathing_space
os.environ["ALPHAVANTAGE_API_KEY"] = av_config.api_key # make sure it's set in env variable
UNIVERSE = Universe.NASDAQ100
ASSETS = None
def list_assets():
global ASSETS
if not ASSETS:
custom_asset_list = av_config.av.get("custom_asset_list")
if custom_asset_list:
custom_asset_list = custom_asset_list.strip().replace(" ", "").split(",")
ASSETS = list(set(custom_asset_list))
else:
try:
universe = Universe[av_config.av["universe"]]
except:
universe = Universe.ALL
if universe == Universe.ALL:
# alpha vantage doesn't define a universe. we could try using alpaca's universe if the
# user defined credentials. if not, we will raise an exception.
try:
import zipline.data.bundles.alpaca_api as alpaca
alpaca.initialize_client()
ASSETS = all_alpaca_assets(alpaca.CLIENT)
except:
raise Exception("You tried to use Universe.ALL but you didn't define the alpaca credentials.")
elif universe == Universe.SP100:
ASSETS = get_sp100()
elif universe == Universe.SP500:
ASSETS = get_sp500()
elif universe == Universe.NASDAQ100:
ASSETS = get_nasdaq100()
ASSETS = list(set(ASSETS))
return ASSETS
def fill_daily_gaps(df):
"""
filling missing data. logic:
1. get start date and end date from df. (caveat: if the missing dates are at the edges this will not work)
2. use trading calendars to get all session dates between start and end
3. use difference() to get only missing dates.
4. add those dates to the original df with NaN
5. dividends get 0 and split gets 1 (meaning no split happened)
6. all the rest get ffill of the close value.
7. volume get 0
:param df:
:return:
"""
cal: TradingCalendar = trading_calendars.get_calendar('NYSE')
sessions = cal.sessions_in_range(df.index[0], df.index[-1])
if len(df.index) == len(sessions):
return df
to_fill = sessions.difference(df.index)
df = df.append(pd.DataFrame(index=to_fill)).sort_index()
# forward-fill these values regularly
df.close.fillna(method='ffill', inplace=True)
df.dividend.fillna(0, inplace=True)
df.split.fillna(1, inplace=True)
df.volume.fillna(0, inplace=True)
df.open.fillna(df.close, inplace=True)
df.high.fillna(df.close, inplace=True)
df.low.fillna(df.close, inplace=True)
df.adj_close.fillna(df.close, inplace=True)
filled = len(to_fill)
print(f'\nWarning! Filled {filled} empty values!')
return df
# purpose of this function is to encapsulate both minute- and daily-requests in one
# function to be able to properly do rate-limiting.
@sleep_and_retry
@limits(calls=AV_CALLS_PER_FREQ, period=AV_FREQ_SEC + AV_TOLERANCE_SEC)
def av_api_wrapper(symbol, interval, _slice=None):
if interval == '1m':
ts = TimeSeries(output_format='csv')
data_slice, meta_data = ts.get_intraday_extended(symbol, interval='1min', slice=_slice, adjusted='false')
return data_slice
else:
ts = TimeSeries()
data, meta_data = ts.get_daily_adjusted(symbol, outputsize='full')
return data
def av_get_data_for_symbol(symbol, start, end, interval):
if interval == '1m':
data = []
for i in range(1, 3):
for j in range(1, 13):
_slice = 'year' + str(i) + 'month' + str(j)
# print('requesting slice ' + _slice + ' for ' + symbol)
data_slice = av_api_wrapper(symbol, interval=interval, slice=_slice)
# dont know better way to convert _csv.reader to list or DataFrame
table = []
for line in data_slice:
table.append(line)
# strip header-row from csv
table = table[1:]
data = data + table
df = pd.DataFrame(data, columns=['date', 'open', 'high', 'low', 'close', 'volume'])
df.index = pd.to_datetime(df['date'])
df.index = df.index.tz_localize('UTC')
df.drop(columns=['date'], inplace=True)
df.sort_index(inplace=True)
else:
data = av_api_wrapper(symbol, interval)
df = pd.DataFrame.from_dict(data, orient='index')
df.index = pd.to_datetime(df.index).tz_localize('UTC')
df.rename(columns={
'1. open': 'open',
'2. high': 'high',
'3. low': 'low',
'4. close': 'close',
'5. volume': 'volume',
'5. adjusted close': 'adj_close',
'6. volume': 'volume',
'7. dividend amount': 'dividend',
'8. split coefficient': 'split'
}, inplace=True)
df.sort_index(inplace=True)
# fill potential gaps in data
df = fill_daily_gaps(df)
# data comes as strings
df['open'] = pd.to_numeric(df['open'], downcast='float')
df['high'] =
|
pd.to_numeric(df['high'], downcast='float')
|
pandas.to_numeric
|
import libraries.measures_calculation
import pandas as pd
import numpy as np
import json
from libraries.model_var import ModelVar
from collections import Counter
def loadData(path_file):
"""
return a pandasDatafram object with the loaded data in the path_file csv
:param path_file: path of the csv file
:type path_file: string - required
:return: datafram object with all the data in the csv
:rtype: Dataframe
"""
data = pd.read_csv(path_file)
data.head()
return data
def getSenSpeValuesByScores(datafram_scores):
"""
return a a dataframe object with the sensitivity and specificity for all the models in the dataframe scores (tn,fp,fn and tp).
E.G:
+---------+---------+--------+-----+------+------+-----+-------------------+
| param_a | param_b | CV_num | tn | fp | fn | tp | file_name |
+---------+---------+--------+-----+------+------+-----+-------------------+
| 1.0 | 0.0 | 0.0 | 3.0 | 46.0 | 72.0 | 6.0 | conf_A_C ... .ftt |
+---------+---------+--------+-----+------+------+-----+-------------------+
:param datafram_scores: datafram with all the tn,fp,fn and tp of all the models loaded
:type datafram_scores: Dataframe - required
E.G: return
+-------+-------------+-------------+
| index | Sensitivity | Specificity |
+-------+-------------+-------------+
| 0 | 0.076923 | 0.061224 |
+-------+-------------+-------------+
:return: datafram object with the score for the models in the :datafram_scores
:rtype: Dataframe
"""
sen_spe_values_vec = []
for index, row in datafram_scores.iterrows():
sensitivity = libraries.measures_calculation.calculateSensitivity(row['tn'], row['fp'], row['fn'], row['tp'])
specificity = libraries.measures_calculation.calculateSpecificity(row['tn'], row['fp'], row['fn'], row['tp'])
model_file_name = row['file_name']
sen_spe_values_vec.append([index, sensitivity, specificity, model_file_name])
df = pd.DataFrame(sen_spe_values_vec, columns=['index', 'Sensitivity', 'Specificity', 'file_name'])
return df
def filterDataframeBySenSpeLimit(value_sen, value_spe, dataframe_values_models):
"""
return a pandasDatafram filtered by the sensitivity and specificity
:param value_sen: limit of the sensitivity
:param value_spe: limit of the specificity
:param dataframe_values_models: all the models
:type value_sen: float - required
:type value_spe: float - required
:type dataframe_values_models: pandas Dataframe - required
:return: datafram object with the models that are higher than the sensitivity and specificity specified
:rtype: Dataframe
"""
datafram_values_filtered = dataframe_values_models.query('Sensitivity >= {0} and Specificity >= {1}'.format(value_sen, value_spe))
return datafram_values_filtered
def filterDataframeBySenSpeLimitContrary(value_sen, value_spe, dataframe_values_models):
"""
return a pandasDatafram filtered by the sensitivity and specificity. This return the inverse of the limits
:param value_sen: limit of the sensitivity
:param value_spe: limit of the specificity
:param dataframe_values_models: all the models
:type value_sen: float - required
:type value_spe: float - required
:type dataframe_values_models: pandas Dataframe - required
:return: datafram object with the models that are higher than the sensitivity and specificity specified
:rtype: Dataframe
"""
datafram_values_filtered = dataframe_values_models.query('Sensitivity < {0} or Specificity < {1}'.format(value_sen, value_spe))
return datafram_values_filtered
def getModelsByFiltrationSenSpeDataframe(vec_models_filtered_data, vec_all_models_data):
"""
return a list with all the files models filtered by the sensitivity and specificity
:param vec_models_filtered_data: dataframe with the indexs, sen, and spe of the selected models
:param vec_all_models_data: dataframe with all the models
:type vec_models_filtered_data: pandas Dataframe - required
:type vec_all_models_data: pandas Dataframe - required
:return: list of strings of the models path
:rtype: list[string]
"""
index_models = np.array(vec_models_filtered_data['index'].values)
models_delects_ds = vec_all_models_data[vec_all_models_data.index.isin(index_models)]
list_models_path = models_delects_ds['file_name'].values.tolist()
return list_models_path
def getListRulesPerModel(path_model):
"""
return a list of the vector rules given a path model file
:param path_model: path of the model
:type path_model: string - required
:return: list of model_var
:rtype: list[model_var]
"""
with open(path_model) as data_file:
data_json = json.load(data_file)
#pprint(len(data_json['rules']))
list_rules = data_json['rules']
return list_rules
def transformListRulesToModelVar(model_path, list_rules):
"""
return a list of model_var given a vector of rules (according to the structure)
:param model_path: path of the model
:param list_rules: list of the rules (according to the structure) [[[['10', 1], ['25', 2]], [1.0]], [[['20', 2], ['14', 1], ['21', 0]], [1.0]], [[['22', 0]], [1.0]], [[['0', 2], ['17', 2]], [0.0]]]
:type model_path: string - required
:type list_rules: list(list(list())) - required
:return: list of model_var
:rtype: list[model_var]
"""
dict_var_qty = {}
for rule in list_rules:
variables_in_rule = rule[0]
for variable in variables_in_rule:
if variable[0] in dict_var_qty:
dict_var_qty[variable[0]] = dict_var_qty[variable[0]] + 1
else:
dict_var_qty[variable[0]] = 1
model_var = ModelVar(model_path, dict_var_qty)
return model_var
def transformModelsToModelVarObj(list_models_path):
"""
return a list model_var obj (see this class to understand what is it) based on a list of models path
:param list_models_path: dlist of models path selected
:type list_models_path: list[string] - required
:return: list of model_var
:rtype: list[model_var]
"""
list_models_vars = []
for file_name_path in list_models_path:
list_rules = getListRulesPerModel(file_name_path)
model_var = transformListRulesToModelVar(file_name_path,list_rules)
list_models_vars.append(model_var)
return list_models_vars
def countVarFreq(list_models_vars_freq):
"""
return a dictionary with the frequencies of all the variables
If the variable appear two times in a model it only count one
:param list_models_path: list of model_var
:type list_models_path: list[model_var] - required
:return: dictionary with the frequencies for all the variables
:rtype: dict{'var_name: frequence}
"""
list_variables_total = []
for model_var_freq in list_models_vars_freq:
variables_names = list(model_var_freq.dict_freq_var.keys())
list_variables_total.extend(variables_names)
counter_frec_variables = Counter(list_variables_total)
dict_frec_variables = dict(counter_frec_variables)
return dict_frec_variables
def sort_reverse_dictionary_by_values(dicitonary_values):
"""
return a dictionary reverse sorted by its values
:param dicitonary_values: normal dictionary
:type dicitonary_values: dict{string:int} - required
:return: dictionary reverse sorted by values
:rtype: dict{'var_name: frequence}
"""
#Sort the dictionary by values
sorted_dict_values = sorted(dicitonary_values.items(), key=lambda kv: kv[1], reverse=True)
#Transform in a dictionary
dict_sorted_values = dict((k[0],k[1]) for k in sorted_dict_values)
return dict_sorted_values
def reduceQtyVars(nb_min_var:int, dict_values:dict, list_models_var):
"""
return a list of model_var that the quantities of each variable are upper than the np_min_ar
:param nb_min_var: quantity of the minimum variables that you want to save
:param dict_values: dictionary with the frequency variables
:param list_models_var: list of all the model_var objects
:type nb_min_var: integer - required
:type dict_values: dict{string:int} - required
:type list_models_var: list[model_var] - required
:return: list with all the model_Var saved
:rtype: list[model_var]
"""
dict2 = dict_values.copy()
#On garde les variables qui ont une freq inferieur au seuil
dict2 = {k: v for k, v in dict2.items() if v < nb_min_var}
list_var_remove = list(dict2.keys())
list_index_remove = []
index_value = 0
for model_var in list_models_var:
var_in_models = list(model_var.dict_freq_var.keys())
exists_var = any(x in var_in_models for x in list_var_remove)
if exists_var == True:
list_index_remove.append(index_value)
index_value =index_value +1
list_index_remove= reversed(list_index_remove)
for element in list_index_remove:
list_models_var.pop(element)
return list_models_var
def createPlotQtyVarPerModelByMinimumFreq(dict_values, list_models_vars):
#list_models_vars_cpopy = list_models_vars.copy()
nb_min_var = 1
qty_models = -1
qty_variables = -1
vec_qty_models = []
vec_qty_variables = []
while qty_models != 0:
list_models_vars_cpopy = list_models_vars.copy()
list_model_var_resultant = reduceQtyVars(nb_min_var, dict_values, list_models_vars_cpopy)
dict_values_resulant = countVarFreq(list_model_var_resultant)
#indication of the number of models and variables
qty_models = len(list_model_var_resultant)
qty_variables = len(dict_values_resulant)
vec_qty_models.append(qty_models)
vec_qty_variables.append(qty_variables)
nb_min_var += 1
vec_index = np.arange(1,nb_min_var)
matrix_data = np.stack((vec_index, vec_qty_models, vec_qty_variables))
headers = ['min freq var', 'number of models', 'quantity of variables']
dataframe_result = pd.DataFrame(matrix_data.T, columns=headers)
return dataframe_result
def plotSenSpeQtyModelsByThreshold(dataframe_models_sen_spe):
value_sen = 0.0
value_spe = 0.0
array_results = []
while value_sen < 1:
value_spe = 0
while value_spe < 1:
dataframe_result = dataframe_models_sen_spe.loc[(dataframe_models_sen_spe['Sensitivity'] > value_sen) & (dataframe_models_sen_spe['Specificity'] > value_spe)]
number_rows = dataframe_result.shape[0]
array_results.append([value_sen, value_spe, number_rows])
value_spe += 0.1
value_sen += 0.1
df_results =
|
pd.DataFrame(array_results)
|
pandas.DataFrame
|
#!/usr/bin/env python3
"""
Reactome REST API utilities.
https://reactome.org/ContentService/
https://reactome.org/ContentService/data/pathways/top/9606
https://reactome.org/documentation/data-model
"Life on the cellular level is a network of molecular interactions. Molecules are synthesized and degraded, undergo a bewildering array of temporary and permanent modifications, are transported from one location to another, and form complexes with other molecules. Reactome represents all of this complexity as reactions in which input physical entities are converted to output entities."
"PhysicalEntities include individual molecules, multi-molecular complexes, and sets of molecules or complexes grouped together on the basis of shared characteristics. Molecules are further classified as genome encoded (DNA, RNA, and proteins) or not (all others). Attributes of a PhysicalEntity instance capture the chemical structure of an entity, including any covalent modifications in the case of a macromolecule, and its subcellular localization."
"""
import sys,os,re,json,time,logging
import pandas as pd
#
from ..util import rest
#
API_HOST='reactome.org'
API_BASE_PATH='/ContentService'
BASE_URL='https://'+API_HOST+API_BASE_PATH
#
HEADERS={'Content-type':'text/plain', 'Accept':'application/json'}
#
##############################################################################
def DBInfo(base_url=BASE_URL, fout=None):
name = rest.Utils.GetURL(base_url+'/data/database/name')
version = rest.Utils.GetURL(base_url+'/data/database/version')
df = pd.DataFrame({'param':['name', 'version'], 'value':[name, version]});
if fout: df.to_csv(fout, "\t", index=False)
return df
##############################################################################
def ListToplevelPathways(base_url=BASE_URL, fout=None):
tags=None; species="9606"; df=pd.DataFrame();
pathways = rest.Utils.GetURL(base_url+f'/data/pathways/top/{species}', parse_json=True)
for pathway in pathways:
if not tags: tags = list(pathway.keys())
df = pd.concat([df, pd.DataFrame({tags[j]:[pathway[tags[j]]] if tags[j] in pathway else [''] for j in range(len(tags))})])
if fout: df.to_csv(fout, "\t", index=False)
logging.info('Top-level pathways: {}'.format(df.shape[0]))
return df
##############################################################################
def ListDiseases(base_url=BASE_URL, fout=None):
tags=[]; df=
|
pd.DataFrame()
|
pandas.DataFrame
|
import os
import sys
import datetime
import pandas as pd
class tools:
"""
Contains all the utility methods for the application
"""
def __init__(self):
self.indexList = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26]
self.date = self.getDate()
self.docsDir = ""
self.rabtax = ""
self.temp = ""
self.controlState = ""
self.indCounter = ""
self.fileSearch()
pass
def getDate(self):
cDate = datetime.date.today()
return cDate.strftime("%d/%m/%Y")
def fileSearch(self):
# Move to the Documents directory within the current user account
rPath = self.FrozenFile("")
pList = rPath.split("\\")
rPath = os.path.join(pList[0], "\\" + str(pList[1]))
self.docsDir = os.path.join(rPath, pList[2] + "\\Documents")
# Try to open the .csv file with pandas,
# file is stored in /Documents/RABTAX.
# Date is set for 2017, after this date
# the condition will always fail leaving
# new years to be updated dynamically later
try:
os.chdir(os.path.join(self.docsDir, "RABTAX"))
rabtax =
|
pd.read_csv('RABTAX2017.csv', index_col=0)
|
pandas.read_csv
|
# -*- coding: utf-8 -*-
"""
Created on Fri Jun 26 11:57:27 2015
@author: malte
"""
import numpy as np
import pandas as pd
from scipy import sparse
import implicit
import time
class ColdImplicit:
'''
ColdImplicit(n_factors = 100, n_iterations = 10, learning_rate = 0.01, lambda_session = 0.0, lambda_item = 0.0, sigma = 0.05, init_normal = False, session_key = 'SessionId', item_key = 'ItemId')
Parameters
--------
'''
def __init__(self, n_factors = 100, epochs = 10, lr = 0.01, reg=0.01, algo='als', idf_weight=False, session_key = 'playlist_id', item_key = 'track_id'):
self.factors = n_factors
self.epochs = epochs
self.lr = lr
self.reg = reg
self.algo = algo
self.idf_weight = idf_weight
self.session_key = session_key
self.item_key = item_key
self.current_session = None
def train(self, train, test=None):
'''
Trains the predictor.
Parameters
--------
data: pandas.DataFrame
Training data. It contains the transactions of the sessions. It has one column for session IDs, one for item IDs and one for the timestamp of the events (unix timestamps).
It must have a header. Column names are arbitrary, but must correspond to the ones you set during the initialization of the network (session_key, item_key, time_key properties).
'''
data = train['actions']
#datat = test['actions']
#data = pd.concat([data,datat])
itemids = data[self.item_key].unique()
self.n_items = len(itemids)
self.itemidmap = pd.Series(data=np.arange(self.n_items), index=itemids)
self.itemidmap2 = pd.Series(index=np.arange(self.n_items), data=itemids)
sessionids = data[self.session_key].unique()
self.n_sessions = len(sessionids)
self.useridmap = pd.Series(data=np.arange(self.n_sessions), index=sessionids)
tstart = time.time()
data = pd.merge(data, pd.DataFrame({self.item_key:self.itemidmap.index, 'ItemIdx':self.itemidmap[self.itemidmap.index].values}), on=self.item_key, how='inner')
data = pd.merge(data,
|
pd.DataFrame({self.session_key:self.useridmap.index, 'SessionIdx':self.useridmap[self.useridmap.index].values})
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
# pylint: disable=E1101,E1103,W0232
import os
import sys
from datetime import datetime
from distutils.version import LooseVersion
import numpy as np
import pandas as pd
import pandas.compat as compat
import pandas.core.common as com
import pandas.util.testing as tm
from pandas import (Categorical, Index, Series, DataFrame, PeriodIndex,
Timestamp, CategoricalIndex)
from pandas.compat import range, lrange, u, PY3
from pandas.core.config import option_context
# GH 12066
# flake8: noqa
class TestCategorical(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
self.factor = Categorical.from_array(['a', 'b', 'b', 'a',
'a', 'c', 'c', 'c'],
ordered=True)
def test_getitem(self):
self.assertEqual(self.factor[0], 'a')
self.assertEqual(self.factor[-1], 'c')
subf = self.factor[[0, 1, 2]]
tm.assert_almost_equal(subf._codes, [0, 1, 1])
subf = self.factor[np.asarray(self.factor) == 'c']
tm.assert_almost_equal(subf._codes, [2, 2, 2])
def test_getitem_listlike(self):
# GH 9469
# properly coerce the input indexers
np.random.seed(1)
c = Categorical(np.random.randint(0, 5, size=150000).astype(np.int8))
result = c.codes[np.array([100000]).astype(np.int64)]
expected = c[np.array([100000]).astype(np.int64)].codes
self.assert_numpy_array_equal(result, expected)
def test_setitem(self):
# int/positional
c = self.factor.copy()
c[0] = 'b'
self.assertEqual(c[0], 'b')
c[-1] = 'a'
self.assertEqual(c[-1], 'a')
# boolean
c = self.factor.copy()
indexer = np.zeros(len(c), dtype='bool')
indexer[0] = True
indexer[-1] = True
c[indexer] = 'c'
expected = Categorical.from_array(['c', 'b', 'b', 'a',
'a', 'c', 'c', 'c'], ordered=True)
self.assert_categorical_equal(c, expected)
def test_setitem_listlike(self):
# GH 9469
# properly coerce the input indexers
np.random.seed(1)
c = Categorical(np.random.randint(0, 5, size=150000).astype(
np.int8)).add_categories([-1000])
indexer = np.array([100000]).astype(np.int64)
c[indexer] = -1000
# we are asserting the code result here
# which maps to the -1000 category
result = c.codes[np.array([100000]).astype(np.int64)]
self.assertEqual(result, np.array([5], dtype='int8'))
def test_constructor_unsortable(self):
# it works!
arr = np.array([1, 2, 3, datetime.now()], dtype='O')
factor = Categorical.from_array(arr, ordered=False)
self.assertFalse(factor.ordered)
if compat.PY3:
self.assertRaises(
TypeError, lambda: Categorical.from_array(arr, ordered=True))
else:
# this however will raise as cannot be sorted (on PY3 or older
# numpies)
if LooseVersion(np.__version__) < "1.10":
self.assertRaises(
TypeError,
lambda: Categorical.from_array(arr, ordered=True))
else:
Categorical.from_array(arr, ordered=True)
def test_is_equal_dtype(self):
# test dtype comparisons between cats
c1 = Categorical(list('aabca'), categories=list('abc'), ordered=False)
c2 = Categorical(list('aabca'), categories=list('cab'), ordered=False)
c3 = Categorical(list('aabca'), categories=list('cab'), ordered=True)
self.assertTrue(c1.is_dtype_equal(c1))
self.assertTrue(c2.is_dtype_equal(c2))
self.assertTrue(c3.is_dtype_equal(c3))
self.assertFalse(c1.is_dtype_equal(c2))
self.assertFalse(c1.is_dtype_equal(c3))
self.assertFalse(c1.is_dtype_equal(Index(list('aabca'))))
self.assertFalse(c1.is_dtype_equal(c1.astype(object)))
self.assertTrue(c1.is_dtype_equal(CategoricalIndex(c1)))
self.assertFalse(c1.is_dtype_equal(
CategoricalIndex(c1, categories=list('cab'))))
self.assertFalse(c1.is_dtype_equal(CategoricalIndex(c1, ordered=True)))
def test_constructor(self):
exp_arr = np.array(["a", "b", "c", "a", "b", "c"])
c1 = Categorical(exp_arr)
self.assert_numpy_array_equal(c1.__array__(), exp_arr)
c2 = Categorical(exp_arr, categories=["a", "b", "c"])
self.assert_numpy_array_equal(c2.__array__(), exp_arr)
c2 = Categorical(exp_arr, categories=["c", "b", "a"])
self.assert_numpy_array_equal(c2.__array__(), exp_arr)
# categories must be unique
def f():
Categorical([1, 2], [1, 2, 2])
self.assertRaises(ValueError, f)
def f():
Categorical(["a", "b"], ["a", "b", "b"])
self.assertRaises(ValueError, f)
def f():
with tm.assert_produces_warning(FutureWarning):
Categorical([1, 2], [1, 2, np.nan, np.nan])
self.assertRaises(ValueError, f)
# The default should be unordered
c1 = Categorical(["a", "b", "c", "a"])
self.assertFalse(c1.ordered)
# Categorical as input
c1 = Categorical(["a", "b", "c", "a"])
c2 = Categorical(c1)
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "b", "c", "d"])
c2 = Categorical(c1)
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "c", "b"])
c2 = Categorical(c1)
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "c", "b"])
c2 = Categorical(c1, categories=["a", "b", "c"])
self.assert_numpy_array_equal(c1.__array__(), c2.__array__())
self.assert_numpy_array_equal(c2.categories, np.array(["a", "b", "c"]))
# Series of dtype category
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "b", "c", "d"])
c2 = Categorical(Series(c1))
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "c", "b"])
c2 = Categorical(Series(c1))
self.assertTrue(c1.equals(c2))
# Series
c1 = Categorical(["a", "b", "c", "a"])
c2 = Categorical(Series(["a", "b", "c", "a"]))
self.assertTrue(c1.equals(c2))
c1 = Categorical(["a", "b", "c", "a"], categories=["a", "b", "c", "d"])
c2 = Categorical(
Series(["a", "b", "c", "a"]), categories=["a", "b", "c", "d"])
self.assertTrue(c1.equals(c2))
# This should result in integer categories, not float!
cat = pd.Categorical([1, 2, 3, np.nan], categories=[1, 2, 3])
self.assertTrue(com.is_integer_dtype(cat.categories))
# https://github.com/pydata/pandas/issues/3678
cat = pd.Categorical([np.nan, 1, 2, 3])
self.assertTrue(com.is_integer_dtype(cat.categories))
# this should result in floats
cat = pd.Categorical([np.nan, 1, 2., 3])
self.assertTrue(com.is_float_dtype(cat.categories))
cat = pd.Categorical([np.nan, 1., 2., 3.])
self.assertTrue(com.is_float_dtype(cat.categories))
# Deprecating NaNs in categoires (GH #10748)
# preserve int as far as possible by converting to object if NaN is in
# categories
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical([np.nan, 1, 2, 3],
categories=[np.nan, 1, 2, 3])
self.assertTrue(com.is_object_dtype(cat.categories))
# This doesn't work -> this would probably need some kind of "remember
# the original type" feature to try to cast the array interface result
# to...
# vals = np.asarray(cat[cat.notnull()])
# self.assertTrue(com.is_integer_dtype(vals))
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical([np.nan, "a", "b", "c"],
categories=[np.nan, "a", "b", "c"])
self.assertTrue(com.is_object_dtype(cat.categories))
# but don't do it for floats
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical([np.nan, 1., 2., 3.],
categories=[np.nan, 1., 2., 3.])
self.assertTrue(com.is_float_dtype(cat.categories))
# corner cases
cat = pd.Categorical([1])
self.assertTrue(len(cat.categories) == 1)
self.assertTrue(cat.categories[0] == 1)
self.assertTrue(len(cat.codes) == 1)
self.assertTrue(cat.codes[0] == 0)
cat = pd.Categorical(["a"])
self.assertTrue(len(cat.categories) == 1)
self.assertTrue(cat.categories[0] == "a")
self.assertTrue(len(cat.codes) == 1)
self.assertTrue(cat.codes[0] == 0)
# Scalars should be converted to lists
cat = pd.Categorical(1)
self.assertTrue(len(cat.categories) == 1)
self.assertTrue(cat.categories[0] == 1)
self.assertTrue(len(cat.codes) == 1)
self.assertTrue(cat.codes[0] == 0)
cat = pd.Categorical([1], categories=1)
self.assertTrue(len(cat.categories) == 1)
self.assertTrue(cat.categories[0] == 1)
self.assertTrue(len(cat.codes) == 1)
self.assertTrue(cat.codes[0] == 0)
# Catch old style constructor useage: two arrays, codes + categories
# We can only catch two cases:
# - when the first is an integer dtype and the second is not
# - when the resulting codes are all -1/NaN
with tm.assert_produces_warning(RuntimeWarning):
c_old = Categorical([0, 1, 2, 0, 1, 2],
categories=["a", "b", "c"]) # noqa
with tm.assert_produces_warning(RuntimeWarning):
c_old = Categorical([0, 1, 2, 0, 1, 2], # noqa
categories=[3, 4, 5])
# the next one are from the old docs, but unfortunately these don't
# trigger :-(
with tm.assert_produces_warning(None):
c_old2 = Categorical([0, 1, 2, 0, 1, 2], [1, 2, 3]) # noqa
cat = Categorical([1, 2], categories=[1, 2, 3])
# this is a legitimate constructor
with tm.assert_produces_warning(None):
c = Categorical(np.array([], dtype='int64'), # noqa
categories=[3, 2, 1], ordered=True)
def test_constructor_with_index(self):
ci = CategoricalIndex(list('aabbca'), categories=list('cab'))
self.assertTrue(ci.values.equals(Categorical(ci)))
ci = CategoricalIndex(list('aabbca'), categories=list('cab'))
self.assertTrue(ci.values.equals(Categorical(
ci.astype(object), categories=ci.categories)))
def test_constructor_with_generator(self):
# This was raising an Error in isnull(single_val).any() because isnull
# returned a scalar for a generator
xrange = range
exp = Categorical([0, 1, 2])
cat = Categorical((x for x in [0, 1, 2]))
self.assertTrue(cat.equals(exp))
cat = Categorical(xrange(3))
self.assertTrue(cat.equals(exp))
# This uses xrange internally
from pandas.core.index import MultiIndex
MultiIndex.from_product([range(5), ['a', 'b', 'c']])
# check that categories accept generators and sequences
cat = pd.Categorical([0, 1, 2], categories=(x for x in [0, 1, 2]))
self.assertTrue(cat.equals(exp))
cat = pd.Categorical([0, 1, 2], categories=xrange(3))
self.assertTrue(cat.equals(exp))
def test_from_codes(self):
# too few categories
def f():
Categorical.from_codes([1, 2], [1, 2])
self.assertRaises(ValueError, f)
# no int codes
def f():
Categorical.from_codes(["a"], [1, 2])
self.assertRaises(ValueError, f)
# no unique categories
def f():
Categorical.from_codes([0, 1, 2], ["a", "a", "b"])
self.assertRaises(ValueError, f)
# too negative
def f():
Categorical.from_codes([-2, 1, 2], ["a", "b", "c"])
self.assertRaises(ValueError, f)
exp = Categorical(["a", "b", "c"], ordered=False)
res = Categorical.from_codes([0, 1, 2], ["a", "b", "c"])
self.assertTrue(exp.equals(res))
# Not available in earlier numpy versions
if hasattr(np.random, "choice"):
codes = np.random.choice([0, 1], 5, p=[0.9, 0.1])
pd.Categorical.from_codes(codes, categories=["train", "test"])
def test_comparisons(self):
result = self.factor[self.factor == 'a']
expected = self.factor[np.asarray(self.factor) == 'a']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor != 'a']
expected = self.factor[np.asarray(self.factor) != 'a']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor < 'c']
expected = self.factor[np.asarray(self.factor) < 'c']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor > 'a']
expected = self.factor[np.asarray(self.factor) > 'a']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor >= 'b']
expected = self.factor[np.asarray(self.factor) >= 'b']
self.assertTrue(result.equals(expected))
result = self.factor[self.factor <= 'b']
expected = self.factor[np.asarray(self.factor) <= 'b']
self.assertTrue(result.equals(expected))
n = len(self.factor)
other = self.factor[np.random.permutation(n)]
result = self.factor == other
expected = np.asarray(self.factor) == np.asarray(other)
self.assert_numpy_array_equal(result, expected)
result = self.factor == 'd'
expected = np.repeat(False, len(self.factor))
self.assert_numpy_array_equal(result, expected)
# comparisons with categoricals
cat_rev = pd.Categorical(["a", "b", "c"], categories=["c", "b", "a"],
ordered=True)
cat_rev_base = pd.Categorical(
["b", "b", "b"], categories=["c", "b", "a"], ordered=True)
cat = pd.Categorical(["a", "b", "c"], ordered=True)
cat_base = pd.Categorical(["b", "b", "b"], categories=cat.categories,
ordered=True)
# comparisons need to take categories ordering into account
res_rev = cat_rev > cat_rev_base
exp_rev = np.array([True, False, False])
self.assert_numpy_array_equal(res_rev, exp_rev)
res_rev = cat_rev < cat_rev_base
exp_rev = np.array([False, False, True])
self.assert_numpy_array_equal(res_rev, exp_rev)
res = cat > cat_base
exp = np.array([False, False, True])
self.assert_numpy_array_equal(res, exp)
# Only categories with same categories can be compared
def f():
cat > cat_rev
self.assertRaises(TypeError, f)
cat_rev_base2 = pd.Categorical(
["b", "b", "b"], categories=["c", "b", "a", "d"])
def f():
cat_rev > cat_rev_base2
self.assertRaises(TypeError, f)
# Only categories with same ordering information can be compared
cat_unorderd = cat.set_ordered(False)
self.assertFalse((cat > cat).any())
def f():
cat > cat_unorderd
self.assertRaises(TypeError, f)
# comparison (in both directions) with Series will raise
s = Series(["b", "b", "b"])
self.assertRaises(TypeError, lambda: cat > s)
self.assertRaises(TypeError, lambda: cat_rev > s)
self.assertRaises(TypeError, lambda: s < cat)
self.assertRaises(TypeError, lambda: s < cat_rev)
# comparison with numpy.array will raise in both direction, but only on
# newer numpy versions
a = np.array(["b", "b", "b"])
self.assertRaises(TypeError, lambda: cat > a)
self.assertRaises(TypeError, lambda: cat_rev > a)
# The following work via '__array_priority__ = 1000'
# works only on numpy >= 1.7.1
if LooseVersion(np.__version__) > "1.7.1":
self.assertRaises(TypeError, lambda: a < cat)
self.assertRaises(TypeError, lambda: a < cat_rev)
# Make sure that unequal comparison take the categories order in
# account
cat_rev = pd.Categorical(
list("abc"), categories=list("cba"), ordered=True)
exp = np.array([True, False, False])
res = cat_rev > "b"
self.assert_numpy_array_equal(res, exp)
def test_na_flags_int_categories(self):
# #1457
categories = lrange(10)
labels = np.random.randint(0, 10, 20)
labels[::5] = -1
cat = Categorical(labels, categories, fastpath=True)
repr(cat)
self.assert_numpy_array_equal(com.isnull(cat), labels == -1)
def test_categories_none(self):
factor = Categorical(['a', 'b', 'b', 'a',
'a', 'c', 'c', 'c'], ordered=True)
self.assertTrue(factor.equals(self.factor))
def test_describe(self):
# string type
desc = self.factor.describe()
expected = DataFrame({'counts': [3, 2, 3],
'freqs': [3 / 8., 2 / 8., 3 / 8.]},
index=pd.CategoricalIndex(['a', 'b', 'c'],
name='categories'))
tm.assert_frame_equal(desc, expected)
# check unused categories
cat = self.factor.copy()
cat.set_categories(["a", "b", "c", "d"], inplace=True)
desc = cat.describe()
expected = DataFrame({'counts': [3, 2, 3, 0],
'freqs': [3 / 8., 2 / 8., 3 / 8., 0]},
index=pd.CategoricalIndex(['a', 'b', 'c', 'd'],
name='categories'))
tm.assert_frame_equal(desc, expected)
# check an integer one
desc = Categorical([1, 2, 3, 1, 2, 3, 3, 2, 1, 1, 1]).describe()
expected = DataFrame({'counts': [5, 3, 3],
'freqs': [5 / 11., 3 / 11., 3 / 11.]},
index=pd.CategoricalIndex([1, 2, 3],
name='categories'))
tm.assert_frame_equal(desc, expected)
# https://github.com/pydata/pandas/issues/3678
# describe should work with NaN
cat = pd.Categorical([np.nan, 1, 2, 2])
desc = cat.describe()
expected = DataFrame({'counts': [1, 2, 1],
'freqs': [1 / 4., 2 / 4., 1 / 4.]},
index=pd.CategoricalIndex([1, 2, np.nan],
categories=[1, 2],
name='categories'))
tm.assert_frame_equal(desc, expected)
# NA as a category
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical(["a", "c", "c", np.nan],
categories=["b", "a", "c", np.nan])
result = cat.describe()
expected = DataFrame([[0, 0], [1, 0.25], [2, 0.5], [1, 0.25]],
columns=['counts', 'freqs'],
index=pd.CategoricalIndex(['b', 'a', 'c', np.nan],
name='categories'))
tm.assert_frame_equal(result, expected)
# NA as an unused category
with tm.assert_produces_warning(FutureWarning):
cat = pd.Categorical(["a", "c", "c"],
categories=["b", "a", "c", np.nan])
result = cat.describe()
exp_idx = pd.CategoricalIndex(
['b', 'a', 'c', np.nan], name='categories')
expected = DataFrame([[0, 0], [1, 1 / 3.], [2, 2 / 3.], [0, 0]],
columns=['counts', 'freqs'], index=exp_idx)
tm.assert_frame_equal(result, expected)
def test_print(self):
expected = ["[a, b, b, a, a, c, c, c]",
"Categories (3, object): [a < b < c]"]
expected = "\n".join(expected)
actual = repr(self.factor)
self.assertEqual(actual, expected)
def test_big_print(self):
factor = Categorical([0, 1, 2, 0, 1, 2] * 100, ['a', 'b', 'c'],
name='cat', fastpath=True)
expected = ["[a, b, c, a, b, ..., b, c, a, b, c]", "Length: 600",
"Categories (3, object): [a, b, c]"]
expected = "\n".join(expected)
actual = repr(factor)
self.assertEqual(actual, expected)
def test_empty_print(self):
factor = Categorical([], ["a", "b", "c"])
expected = ("[], Categories (3, object): [a, b, c]")
# hack because array_repr changed in numpy > 1.6.x
actual = repr(factor)
self.assertEqual(actual, expected)
self.assertEqual(expected, actual)
factor = Categorical([], ["a", "b", "c"], ordered=True)
expected = ("[], Categories (3, object): [a < b < c]")
actual = repr(factor)
self.assertEqual(expected, actual)
factor = Categorical([], [])
expected = ("[], Categories (0, object): []")
self.assertEqual(expected, repr(factor))
def test_print_none_width(self):
# GH10087
a = pd.Series(pd.Categorical([1, 2, 3, 4]))
exp = u("0 1\n1 2\n2 3\n3 4\n" +
"dtype: category\nCategories (4, int64): [1, 2, 3, 4]")
with option_context("display.width", None):
self.assertEqual(exp, repr(a))
def test_unicode_print(self):
if PY3:
_rep = repr
else:
_rep = unicode # noqa
c = pd.Categorical(['aaaaa', 'bb', 'cccc'] * 20)
expected = u"""\
[aaaaa, bb, cccc, aaaaa, bb, ..., bb, cccc, aaaaa, bb, cccc]
Length: 60
Categories (3, object): [aaaaa, bb, cccc]"""
self.assertEqual(_rep(c), expected)
c = pd.Categorical([u'ああああ', u'いいいいい', u'ううううううう']
* 20)
expected = u"""\
[ああああ, いいいいい, ううううううう, ああああ, いいいいい, ..., いいいいい, ううううううう, ああああ, いいいいい, ううううううう]
Length: 60
Categories (3, object): [ああああ, いいいいい, ううううううう]""" # noqa
self.assertEqual(_rep(c), expected)
# unicode option should not affect to Categorical, as it doesn't care
# the repr width
with option_context('display.unicode.east_asian_width', True):
c = pd.Categorical([u'ああああ', u'いいいいい', u'ううううううう']
* 20)
expected = u"""[ああああ, いいいいい, ううううううう, ああああ, いいいいい, ..., いいいいい, ううううううう, ああああ, いいいいい, ううううううう]
Length: 60
Categories (3, object): [ああああ, いいいいい, ううううううう]""" # noqa
self.assertEqual(_rep(c), expected)
def test_periodindex(self):
idx1 = PeriodIndex(['2014-01', '2014-01', '2014-02', '2014-02',
'2014-03', '2014-03'], freq='M')
cat1 = Categorical.from_array(idx1)
str(cat1)
exp_arr = np.array([0, 0, 1, 1, 2, 2], dtype='int64')
exp_idx = PeriodIndex(['2014-01', '2014-02', '2014-03'], freq='M')
self.assert_numpy_array_equal(cat1._codes, exp_arr)
self.assertTrue(cat1.categories.equals(exp_idx))
idx2 = PeriodIndex(['2014-03', '2014-03', '2014-02', '2014-01',
'2014-03', '2014-01'], freq='M')
cat2 = Categorical.from_array(idx2, ordered=True)
str(cat2)
exp_arr = np.array([2, 2, 1, 0, 2, 0], dtype='int64')
exp_idx2 = PeriodIndex(['2014-01', '2014-02', '2014-03'], freq='M')
self.assert_numpy_array_equal(cat2._codes, exp_arr)
self.assertTrue(cat2.categories.equals(exp_idx2))
idx3 = PeriodIndex(['2013-12', '2013-11', '2013-10', '2013-09',
'2013-08', '2013-07', '2013-05'], freq='M')
cat3 = Categorical.from_array(idx3, ordered=True)
exp_arr = np.array([6, 5, 4, 3, 2, 1, 0], dtype='int64')
exp_idx = PeriodIndex(['2013-05', '2013-07', '2013-08', '2013-09',
'2013-10', '2013-11', '2013-12'], freq='M')
self.assert_numpy_array_equal(cat3._codes, exp_arr)
self.assertTrue(cat3.categories.equals(exp_idx))
def test_categories_assigments(self):
s = pd.Categorical(["a", "b", "c", "a"])
exp = np.array([1, 2, 3, 1])
s.categories = [1, 2, 3]
self.assert_numpy_array_equal(s.__array__(), exp)
self.assert_numpy_array_equal(s.categories, np.array([1, 2, 3]))
# lengthen
def f():
s.categories = [1, 2, 3, 4]
self.assertRaises(ValueError, f)
# shorten
def f():
s.categories = [1, 2]
self.assertRaises(ValueError, f)
def test_construction_with_ordered(self):
# GH 9347, 9190
cat = Categorical([0, 1, 2])
self.assertFalse(cat.ordered)
cat = Categorical([0, 1, 2], ordered=False)
self.assertFalse(cat.ordered)
cat = Categorical([0, 1, 2], ordered=True)
self.assertTrue(cat.ordered)
def test_ordered_api(self):
# GH 9347
cat1 = pd.Categorical(["a", "c", "b"], ordered=False)
self.assertTrue(cat1.categories.equals(Index(['a', 'b', 'c'])))
self.assertFalse(cat1.ordered)
cat2 = pd.Categorical(["a", "c", "b"], categories=['b', 'c', 'a'],
ordered=False)
self.assertTrue(cat2.categories.equals(Index(['b', 'c', 'a'])))
self.assertFalse(cat2.ordered)
cat3 = pd.Categorical(["a", "c", "b"], ordered=True)
self.assertTrue(cat3.categories.equals(Index(['a', 'b', 'c'])))
self.assertTrue(cat3.ordered)
cat4 = pd.Categorical(["a", "c", "b"], categories=['b', 'c', 'a'],
ordered=True)
self.assertTrue(cat4.categories.equals(Index(['b', 'c', 'a'])))
self.assertTrue(cat4.ordered)
def test_set_ordered(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
cat2 = cat.as_unordered()
self.assertFalse(cat2.ordered)
cat2 = cat.as_ordered()
self.assertTrue(cat2.ordered)
cat2.as_unordered(inplace=True)
self.assertFalse(cat2.ordered)
cat2.as_ordered(inplace=True)
self.assertTrue(cat2.ordered)
self.assertTrue(cat2.set_ordered(True).ordered)
self.assertFalse(cat2.set_ordered(False).ordered)
cat2.set_ordered(True, inplace=True)
self.assertTrue(cat2.ordered)
cat2.set_ordered(False, inplace=True)
self.assertFalse(cat2.ordered)
# deperecated in v0.16.0
with tm.assert_produces_warning(FutureWarning):
cat.ordered = False
self.assertFalse(cat.ordered)
with tm.assert_produces_warning(FutureWarning):
cat.ordered = True
self.assertTrue(cat.ordered)
def test_set_categories(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
exp_categories = np.array(["c", "b", "a"])
exp_values = np.array(["a", "b", "c", "a"])
res = cat.set_categories(["c", "b", "a"], inplace=True)
self.assert_numpy_array_equal(cat.categories, exp_categories)
self.assert_numpy_array_equal(cat.__array__(), exp_values)
self.assertIsNone(res)
res = cat.set_categories(["a", "b", "c"])
# cat must be the same as before
self.assert_numpy_array_equal(cat.categories, exp_categories)
self.assert_numpy_array_equal(cat.__array__(), exp_values)
# only res is changed
exp_categories_back = np.array(["a", "b", "c"])
self.assert_numpy_array_equal(res.categories, exp_categories_back)
self.assert_numpy_array_equal(res.__array__(), exp_values)
# not all "old" included in "new" -> all not included ones are now
# np.nan
cat = Categorical(["a", "b", "c", "a"], ordered=True)
res = cat.set_categories(["a"])
self.assert_numpy_array_equal(res.codes, np.array([0, -1, -1, 0]))
# still not all "old" in "new"
res = cat.set_categories(["a", "b", "d"])
self.assert_numpy_array_equal(res.codes, np.array([0, 1, -1, 0]))
self.assert_numpy_array_equal(res.categories,
np.array(["a", "b", "d"]))
# all "old" included in "new"
cat = cat.set_categories(["a", "b", "c", "d"])
exp_categories = np.array(["a", "b", "c", "d"])
self.assert_numpy_array_equal(cat.categories, exp_categories)
# internals...
c = Categorical([1, 2, 3, 4, 1], categories=[1, 2, 3, 4], ordered=True)
self.assert_numpy_array_equal(c._codes, np.array([0, 1, 2, 3, 0]))
self.assert_numpy_array_equal(c.categories, np.array([1, 2, 3, 4]))
self.assert_numpy_array_equal(c.get_values(),
np.array([1, 2, 3, 4, 1]))
c = c.set_categories(
[4, 3, 2, 1
]) # all "pointers" to '4' must be changed from 3 to 0,...
self.assert_numpy_array_equal(c._codes, np.array([3, 2, 1, 0, 3])
) # positions are changed
self.assert_numpy_array_equal(c.categories, np.array([4, 3, 2, 1])
) # categories are now in new order
self.assert_numpy_array_equal(c.get_values(), np.array([1, 2, 3, 4, 1])
) # output is the same
self.assertTrue(c.min(), 4)
self.assertTrue(c.max(), 1)
# set_categories should set the ordering if specified
c2 = c.set_categories([4, 3, 2, 1], ordered=False)
self.assertFalse(c2.ordered)
self.assert_numpy_array_equal(c.get_values(), c2.get_values())
# set_categories should pass thru the ordering
c2 = c.set_ordered(False).set_categories([4, 3, 2, 1])
self.assertFalse(c2.ordered)
self.assert_numpy_array_equal(c.get_values(), c2.get_values())
def test_rename_categories(self):
cat = pd.Categorical(["a", "b", "c", "a"])
# inplace=False: the old one must not be changed
res = cat.rename_categories([1, 2, 3])
self.assert_numpy_array_equal(res.__array__(), np.array([1, 2, 3, 1]))
self.assert_numpy_array_equal(res.categories, np.array([1, 2, 3]))
self.assert_numpy_array_equal(cat.__array__(),
np.array(["a", "b", "c", "a"]))
self.assert_numpy_array_equal(cat.categories,
np.array(["a", "b", "c"]))
res = cat.rename_categories([1, 2, 3], inplace=True)
# and now inplace
self.assertIsNone(res)
self.assert_numpy_array_equal(cat.__array__(), np.array([1, 2, 3, 1]))
self.assert_numpy_array_equal(cat.categories, np.array([1, 2, 3]))
# lengthen
def f():
cat.rename_categories([1, 2, 3, 4])
self.assertRaises(ValueError, f)
# shorten
def f():
cat.rename_categories([1, 2])
self.assertRaises(ValueError, f)
def test_reorder_categories(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
old = cat.copy()
new = Categorical(["a", "b", "c", "a"], categories=["c", "b", "a"],
ordered=True)
# first inplace == False
res = cat.reorder_categories(["c", "b", "a"])
# cat must be the same as before
self.assert_categorical_equal(cat, old)
# only res is changed
self.assert_categorical_equal(res, new)
# inplace == True
res = cat.reorder_categories(["c", "b", "a"], inplace=True)
self.assertIsNone(res)
self.assert_categorical_equal(cat, new)
# not all "old" included in "new"
cat = Categorical(["a", "b", "c", "a"], ordered=True)
def f():
cat.reorder_categories(["a"])
self.assertRaises(ValueError, f)
# still not all "old" in "new"
def f():
cat.reorder_categories(["a", "b", "d"])
self.assertRaises(ValueError, f)
# all "old" included in "new", but too long
def f():
cat.reorder_categories(["a", "b", "c", "d"])
self.assertRaises(ValueError, f)
def test_add_categories(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
old = cat.copy()
new = Categorical(["a", "b", "c", "a"],
categories=["a", "b", "c", "d"], ordered=True)
# first inplace == False
res = cat.add_categories("d")
self.assert_categorical_equal(cat, old)
self.assert_categorical_equal(res, new)
res = cat.add_categories(["d"])
self.assert_categorical_equal(cat, old)
self.assert_categorical_equal(res, new)
# inplace == True
res = cat.add_categories("d", inplace=True)
self.assert_categorical_equal(cat, new)
self.assertIsNone(res)
# new is in old categories
def f():
cat.add_categories(["d"])
self.assertRaises(ValueError, f)
# GH 9927
cat = Categorical(list("abc"), ordered=True)
expected = Categorical(
list("abc"), categories=list("abcde"), ordered=True)
# test with Series, np.array, index, list
res = cat.add_categories(Series(["d", "e"]))
self.assert_categorical_equal(res, expected)
res = cat.add_categories(np.array(["d", "e"]))
self.assert_categorical_equal(res, expected)
res = cat.add_categories(Index(["d", "e"]))
self.assert_categorical_equal(res, expected)
res = cat.add_categories(["d", "e"])
self.assert_categorical_equal(res, expected)
def test_remove_categories(self):
cat = Categorical(["a", "b", "c", "a"], ordered=True)
old = cat.copy()
new = Categorical(["a", "b", np.nan, "a"], categories=["a", "b"],
ordered=True)
# first inplace == False
res = cat.remove_categories("c")
self.assert_categorical_equal(cat, old)
self.assert_categorical_equal(res, new)
res = cat.remove_categories(["c"])
self.assert_categorical_equal(cat, old)
self.assert_categorical_equal(res, new)
# inplace == True
res = cat.remove_categories("c", inplace=True)
self.assert_categorical_equal(cat, new)
self.assertIsNone(res)
# removal is not in categories
def f():
cat.remove_categories(["c"])
self.assertRaises(ValueError, f)
def test_remove_unused_categories(self):
c = Categorical(["a", "b", "c", "d", "a"],
categories=["a", "b", "c", "d", "e"])
exp_categories_all = np.array(["a", "b", "c", "d", "e"])
exp_categories_dropped = np.array(["a", "b", "c", "d"])
self.assert_numpy_array_equal(c.categories, exp_categories_all)
res = c.remove_unused_categories()
self.assert_numpy_array_equal(res.categories, exp_categories_dropped)
self.assert_numpy_array_equal(c.categories, exp_categories_all)
res = c.remove_unused_categories(inplace=True)
self.assert_numpy_array_equal(c.categories, exp_categories_dropped)
self.assertIsNone(res)
# with NaN values (GH11599)
c = Categorical(["a", "b", "c", np.nan],
categories=["a", "b", "c", "d", "e"])
res = c.remove_unused_categories()
self.assert_numpy_array_equal(res.categories,
np.array(["a", "b", "c"]))
self.assert_numpy_array_equal(c.categories, exp_categories_all)
val = ['F', np.nan, 'D', 'B', 'D', 'F', np.nan]
cat = pd.Categorical(values=val, categories=list('ABCDEFG'))
out = cat.remove_unused_categories()
self.assert_numpy_array_equal(out.categories, ['B', 'D', 'F'])
self.assert_numpy_array_equal(out.codes, [2, -1, 1, 0, 1, 2, -1])
self.assertEqual(out.get_values().tolist(), val)
alpha = list('abcdefghijklmnopqrstuvwxyz')
val = np.random.choice(alpha[::2], 10000).astype('object')
val[np.random.choice(len(val), 100)] = np.nan
cat = pd.Categorical(values=val, categories=alpha)
out = cat.remove_unused_categories()
self.assertEqual(out.get_values().tolist(), val.tolist())
def test_nan_handling(self):
# Nans are represented as -1 in codes
c = Categorical(["a", "b", np.nan, "a"])
self.assert_numpy_array_equal(c.categories, np.array(["a", "b"]))
self.assert_numpy_array_equal(c._codes, np.array([0, 1, -1, 0]))
c[1] = np.nan
self.assert_numpy_array_equal(c.categories, np.array(["a", "b"]))
self.assert_numpy_array_equal(c._codes, np.array([0, -1, -1, 0]))
# If categories have nan included, the code should point to that
# instead
with tm.assert_produces_warning(FutureWarning):
c = Categorical(["a", "b", np.nan, "a"],
categories=["a", "b", np.nan])
self.assert_numpy_array_equal(c.categories,
np.array(["a", "b", np.nan],
dtype=np.object_))
self.assert_numpy_array_equal(c._codes, np.array([0, 1, 2, 0]))
c[1] = np.nan
self.assert_numpy_array_equal(c.categories,
np.array(["a", "b", np.nan],
dtype=np.object_))
self.assert_numpy_array_equal(c._codes, np.array([0, 2, 2, 0]))
# Changing categories should also make the replaced category np.nan
c = Categorical(["a", "b", "c", "a"])
with tm.assert_produces_warning(FutureWarning):
c.categories = ["a", "b", np.nan] # noqa
self.assert_numpy_array_equal(c.categories,
np.array(["a", "b", np.nan],
dtype=np.object_))
self.assert_numpy_array_equal(c._codes, np.array([0, 1, 2, 0]))
# Adding nan to categories should make assigned nan point to the
# category!
c = Categorical(["a", "b", np.nan, "a"])
self.assert_numpy_array_equal(c.categories, np.array(["a", "b"]))
self.assert_numpy_array_equal(c._codes, np.array([0, 1, -1, 0]))
with tm.assert_produces_warning(FutureWarning):
c.set_categories(["a", "b", np.nan], rename=True, inplace=True)
self.assert_numpy_array_equal(c.categories,
np.array(["a", "b", np.nan],
dtype=np.object_))
self.assert_numpy_array_equal(c._codes, np.array([0, 1, -1, 0]))
c[1] = np.nan
self.assert_numpy_array_equal(c.categories,
np.array(["a", "b", np.nan],
dtype=np.object_))
self.assert_numpy_array_equal(c._codes, np.array([0, 2, -1, 0]))
# Remove null categories (GH 10156)
cases = [
([1.0, 2.0, np.nan], [1.0, 2.0]),
(['a', 'b', None], ['a', 'b']),
([pd.Timestamp('2012-05-01'), pd.NaT],
[pd.Timestamp('2012-05-01')])
]
null_values = [np.nan, None, pd.NaT]
for with_null, without in cases:
with tm.assert_produces_warning(FutureWarning):
base = Categorical([], with_null)
expected = Categorical([], without)
for nullval in null_values:
result = base.remove_categories(nullval)
self.assert_categorical_equal(result, expected)
# Different null values are indistinguishable
for i, j in [(0, 1), (0, 2), (1, 2)]:
nulls = [null_values[i], null_values[j]]
def f():
with tm.assert_produces_warning(FutureWarning):
Categorical([], categories=nulls)
self.assertRaises(ValueError, f)
def test_isnull(self):
exp = np.array([False, False, True])
c = Categorical(["a", "b", np.nan])
res = c.isnull()
self.assert_numpy_array_equal(res, exp)
with tm.assert_produces_warning(FutureWarning):
c = Categorical(["a", "b", np.nan], categories=["a", "b", np.nan])
res = c.isnull()
self.assert_numpy_array_equal(res, exp)
# test both nan in categories and as -1
exp = np.array([True, False, True])
c = Categorical(["a", "b", np.nan])
with tm.assert_produces_warning(FutureWarning):
c.set_categories(["a", "b", np.nan], rename=True, inplace=True)
c[0] = np.nan
res = c.isnull()
self.assert_numpy_array_equal(res, exp)
def test_codes_immutable(self):
# Codes should be read only
c = Categorical(["a", "b", "c", "a", np.nan])
exp = np.array([0, 1, 2, 0, -1], dtype='int8')
self.assert_numpy_array_equal(c.codes, exp)
# Assignments to codes should raise
def f():
c.codes = np.array([0, 1, 2, 0, 1], dtype='int8')
self.assertRaises(ValueError, f)
# changes in the codes array should raise
# np 1.6.1 raises RuntimeError rather than ValueError
codes = c.codes
def f():
codes[4] = 1
self.assertRaises(ValueError, f)
# But even after getting the codes, the original array should still be
# writeable!
c[4] = "a"
exp = np.array([0, 1, 2, 0, 0], dtype='int8')
self.assert_numpy_array_equal(c.codes, exp)
c._codes[4] = 2
exp = np.array([0, 1, 2, 0, 2], dtype='int8')
self.assert_numpy_array_equal(c.codes, exp)
def test_min_max(self):
# unordered cats have no min/max
cat = Categorical(["a", "b", "c", "d"], ordered=False)
self.assertRaises(TypeError, lambda: cat.min())
self.assertRaises(TypeError, lambda: cat.max())
cat =
|
Categorical(["a", "b", "c", "d"], ordered=True)
|
pandas.Categorical
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
import xarray as xr
import re
import datetime
import os
from pathlib import Path
import hpl2netCDF_client as proc
class hpl_files(object):
name= []
time= []
# The class "constructor" - It's actually an initializer
def __init__(self, name, time):
self.name = name
self.time = time
@staticmethod
def make_file_list(date_chosen, confDict, url):
path = Path(url) / date_chosen.strftime('%Y') / date_chosen.strftime('%Y%m') / date_chosen.strftime('%Y%m%d')
#confDict= config.gen_confDict()
## for halo
if confDict['SYSTEM'] == 'halo':
if (confDict['SCAN_TYPE'] == 'Stare') | (confDict['SCAN_TYPE'] == 'VAD') | (confDict['SCAN_TYPE'] == 'RHI'):
scan_type= confDict['SCAN_TYPE']
else:
scan_type= 'User'
mylist= list(path.glob('**/' + scan_type + '*.hpl'))
if confDict['SCAN_TYPE']=='Stare':
file_time= [ datetime.datetime.strptime(x.stem, scan_type
+ "_"
+ confDict['SYSTEM_ID']
+ "_%Y%m%d_%H")
for x in mylist]
# sort files according to time stamp
file_list = []
for ii,idx in enumerate(np.argsort(file_time).astype(int)):
file_list.append(mylist[idx])
file_time = [ datetime.datetime.strptime(x.stem
, scan_type
+ "_"
+ confDict['SYSTEM_ID']
+ "_%Y%m%d_%H")
for x in file_list]
elif (confDict['SCAN_TYPE']=='VAD') | (confDict['SCAN_TYPE']=='RHI'):
file_time= [ datetime.datetime.strptime(x.stem, scan_type
+ "_"
+ confDict['SYSTEM_ID']
+ "_%Y%m%d_%H%M%S")
for x in mylist]
# sort files according to time stamp
file_list = []
for ii,idx in enumerate(np.argsort(file_time).astype(int)):
file_list.append(mylist[idx])
file_time = [ datetime.datetime.strptime(x.stem
, scan_type
+ "_"
+ confDict['SYSTEM_ID']
+ "_%Y%m%d_%H%M%S")
for x in file_list]
else:
file_time= [ datetime.datetime.strptime(x.stem
, scan_type
+ x.name[4]
+ "_"
+ confDict['SYSTEM_ID']
+ "_%Y%m%d_%H%M%S")
for x in mylist]
# sort files according to time stamp
file_list = []
for ii,idx in enumerate(np.argsort(file_time).astype(int)):
file_list.append(mylist[idx])
file_time = [ datetime.datetime.strptime(x.stem
, scan_type
+ x.name[4]
+ "_"
+ confDict['SYSTEM_ID']
+ "_%Y%m%d_%H%M%S")
for x in file_list]
return hpl_files(file_list, file_time)
## for windcube
elif confDict['SYSTEM'] == 'windcube':
if (confDict['SCAN_TYPE'] == 'Stare') | (confDict['SCAN_TYPE'] == 'VAD') | (confDict['SCAN_TYPE'] == 'RHI'):
scan_type= 'fixed'
else:
print('unknown scantype!')
if abs((date_chosen - datetime.datetime(date_chosen.year, date_chosen.month, date_chosen.day)).total_seconds()) > 0:
mylist= list(path.glob('**/' + 'WCS*' + date_chosen.strftime('%Y-%m-%d_%H*') + scan_type + '*.nc'))
else:
mylist= list(path.glob('**/' + 'WCS*' + scan_type + '*.nc'))
file_time = [ datetime.datetime.strptime( x.stem[0:29]
, x.stem[0:9] + '_%Y-%m-%d_%H-%M-%S')
for x in mylist
]
file_list = [mylist[idx] for idx in np.argsort(file_time).astype(int)]
file_time = [ datetime.datetime.strptime( x.stem[0:29]
, x.stem[0:9] + '_%Y-%m-%d_%H-%M-%S')
for x in file_list
]
return hpl_files(file_list, file_time)
# function used for calculation of range bounds
@staticmethod
def range_calc(rg_vec, confDict):
'''Calculate range bounds, also accounting for overlapping gates. If your hpl-files contain overlapping gates please add the "OVERLAPPING_GATES" argument to the configuration file.'''
if 'OVERLAPPING_GATES' in confDict:
r = lambda x,idx: (x + idx) * float(confDict['RANGE_GATE_LENGTH'])/(1,float(confDict['NUMBER_OF_GATE_POINTS']))[int(confDict['OVERLAPPING_GATES'])]
else:
r = lambda x,idx: (x + idx) * float(confDict['RANGE_GATE_LENGTH'])
return r(rg_vec, .5).astype('f4')
@staticmethod
def split_header(string):
return [x.strip() for x in re.split('[:\=\-]', re.sub('[\n\t]','',string),1)]
@staticmethod
def split_data(string):
return re.split('\s+', re.sub('\n','',string).strip())
#switch_str = {True: split_header(line), False: split_data(line)}
@staticmethod
def split_default(string):
return string
@staticmethod
def switch(case,string):
return {
True: hpl_files.split_header(string),
False: hpl_files.split_data(string)}.get(case, hpl_files.split_default)
@staticmethod
def reader_idx(hpl_list,confDict,chunks=False):
print(hpl_list.time[0:10])
time_file = pd.to_datetime(hpl_list.time)
time_vec= np.arange(pd.to_datetime(hpl_list.time[0].date()),(hpl_list.time[0]+datetime.timedelta(days = 1))
,pd.to_timedelta(int(confDict['AVG_MIN']), unit = 'm'))
if chunks == True:
return [np.where((ii <= time_file)*(time_file < iip1))
for ii,iip1 in zip(time_vec[0:-1],time_vec[1::])]
if chunks == False:
return np.arange(0,len(hpl_list.time))
@staticmethod
def combine_lvl1(hpl_list, confDict, date_chosen):
if confDict['SYSTEM'] == 'halo':
ds = xr.concat((hpl_files.read_hpl(iit,confDict) for iit in hpl_list.name)
,dim='time'#, combine='nested'#,compat='identical'
,data_vars='minimal'
,coords='minimal')
elif confDict['SYSTEM'] == 'windcube':
ds = xr.concat((hpl_files.read_wcsradial(iit,confDict) for iit in hpl_list.name)
, dim='time'#, combine='nested'#,compat='identical'
, data_vars='minimal'
, compat='override'
, coords='minimal')
ds['nqv'].values = ((ds.dv.max() - ds.dv.min()).data/2).astype('f4')
ds['nqf'].values = (2*ds.nqv.data/float(confDict['SYSTEM_WAVELENGTH'])).astype('f4')
ds['resv'].values = (2*ds.nqv.data/float(confDict['FFT_POINTS'])).astype('f4')
## delete 'delv' variable, if all entries are NaN.
if (ds.delv == -999.).all():
ds = ds.drop_vars(['delv'])
# print('dropping "delv" / "spectral width", because all are NaN!')
# if os.name == 'nt':
# ds = ds._drop_vars(['delv'])
#else:
# ds = ds.drop_vars(['delv'])
##!!!NOTE!!!##
# There was an issue under windows, possible due to a version problem,
# so in case an Attribute error occurs change line 126 to following
#ds = ds._drop_vars(['delv'])
## choose only timestamp within a daily range
start_dt = (pd.to_datetime(date_chosen.date()) - pd.Timestamp("1970-01-01")) / pd.Timedelta('1s')
end_dt = (pd.to_datetime(date_chosen + datetime.timedelta(days= +1)) - pd.Timestamp("1970-01-01")) / pd.Timedelta('1s')
ds = ds.isel(time=np.where((ds.time >= start_dt) & (ds.time <= end_dt))[0])
ds.attrs['title']= confDict['NC_TITLE']
ds.attrs['institution']= confDict['NC_INSTITUTION']
ds.attrs['site_location']= confDict['NC_SITE_LOCATION']
ds.attrs['source']= confDict['NC_SOURCE']
ds.attrs['instrument_type']= confDict['NC_INSTRUMENT_TYPE']
ds.attrs['instrument_mode']= confDict['NC_INSTRUMENT_MODE']
if 'NC_INSTRUMENT_FIRMWARE_VERSION' in confDict:
ds.attrs['instrument_firmware_version']= confDict['NC_INSTRUMENT_FIRMWARE_VERSION']
else:
ds.attrs['instrument_firmware_version']= 'N/A'
ds.attrs['instrument_contact']= confDict['NC_INSTRUMENT_CONTACT']
if 'NC_INSTRUMENT_ID' in confDict:
ds.attrs['instrument_id']= confDict['NC_INSTRUMENT_ID']
else:
ds.attrs['instrument_id']= 'N/A'
# ds.attrs['Source']= "HALO Photonics Doppler lidar (system_id: " + confDict['SYSTEM_ID']
ds.attrs['conventions']= confDict['NC_CONVENTIONS']
ds.attrs['processing_date']= str(pd.to_datetime(datetime.datetime.now())) + ' UTC'
# ds.attrs['Author']= confDict['NC_AUTHOR']
ds.attrs['instrument_contact']= confDict['NC_INSTRUMENT_CONTACT']
ds.attrs['data_policy']= confDict['NC_DATA_POLICY']
# attributes for operational use of netCDFs, see E-Profile wind profiler netCDF version 1.7
if 'NC_WIGOS_STATION_ID' in confDict:
ds.attrs['wigos_station_id']= confDict['NC_WIGOS_STATION_ID']
else:
ds.attrs['wigos_station_id']= 'N/A'
if 'NC_WMO_ID' in confDict:
ds.attrs['wmo_id']= confDict['NC_WMO_ID']
else:
ds.attrs['wmo_id']= 'N/A'
if 'NC_PI_ID' in confDict:
ds.attrs['principal_investigator']= confDict['NC_PI_ID']
else:
ds.attrs['principal_investigator']= 'N/A'
if 'NC_INSTRUMENT_SERIAL_NUMBER' in confDict:
ds.attrs['instrument_serial_number']= confDict['NC_INSTRUMENT_SERIAL_NUMBER']
else:
ds.attrs['instrument_serial_number']= ' '
ds.attrs['history']= confDict['NC_HISTORY'] + ' version ' + confDict['VERSION'] + ' on ' + str(pd.to_datetime(datetime.datetime.now())) + ' UTC'
ds.attrs['comments']= confDict['NC_COMMENTS']
## add configuration as attribute used to create the file
configuration = """"""
for dd in confDict:
configuration += dd + '=' + confDict[dd]+'\n'
ds.attrs['File_Configuration']= configuration
# adjust time variable to double (aka float64)
ds.time.data.astype(np.float64)
path= Path(confDict['NC_L1_PATH'] + '/'
+ date_chosen.strftime("%Y") + '/'
+ date_chosen.strftime("%Y%m"))
path.mkdir(parents=True, exist_ok=True)
path= path / Path(confDict['NC_L1_BASENAME'] + 'v' + confDict['VERSION'] + '_' + date_chosen.strftime("%Y%m%d")+ '.nc')
if 'UTC_OFFSET' in confDict:
time_offset = np.timedelta64(int(confDict['UTC_OFFSET']), 'h')
time_delta = int(confDict['UTC_OFFSET'])
else:
time_offset = np.timedelta64(0, 'h')
time_delta = 0
# compress variables
comp = dict(zlib=True, complevel=9)
encoding = {var: comp for var in np.hstack([ds.data_vars,ds.coords])}
ds.time.attrs['units'] = ('seconds since 1970-01-01 00:00:00', 'seconds since 1970-01-01 00:00:00 {:+03d}'.format(time_delta))[abs(np.sign(time_delta))]
ds.time.encoding['units'] = ('seconds since 1970-01-01 00:00:00', 'seconds since 1970-01-01 00:00:00 {:+03d}'.format(time_delta))[abs(np.sign(time_delta))]
ds.to_netcdf(path, encoding=encoding)
# ds.to_netcdf(path, unlimited_dims={'time':True}, encoding=encoding)
ds.close()
return path
@staticmethod
def read_hpl(filename, confDict):
if not filename.exists():
print("Oops, file doesn't exist!")
else:
print('reading file: ' + filename.name)
with filename.open() as infile:
header_info = True
mheader = {}
for line in infile:
if line.startswith("****"):
header_info = False
## Adjust header in order to extract data formats more easily
## 1st for 'Data line 1' , i.e. time of beam etc.
tmp = [x.split() for x in mheader['Data line 1'].split(' ')]
if len(tmp) > 3:
tmp.append(" ".join([tmp[2][2],tmp[2][3]]))
tmp.append(" ".join([tmp[2][4],tmp[2][5]]))
tmp[0] = " ".join(tmp[0])
tmp[1] = " ".join(tmp[1])
tmp[2] = " ".join([tmp[2][0],tmp[2][1]])
mheader['Data line 1'] = tmp
tmp = mheader['Data line 1 (format)'].split(',1x,')
tmp.append(tmp[-1])
tmp.append(tmp[-1])
mheader['Data line 1 (format)'] = tmp
## Adjust header in order to extract data formats more easily
## 2st for 'Data line 2' , i.e. actual data
tmp = [x.split() for x in mheader['Data line 2'].split(' ')]
tmp[0] = " ".join(tmp[0])
tmp[1] = " ".join(tmp[1])
tmp[2] = " ".join(tmp[2])
tmp[3] = " ".join(tmp[3])
mheader['Data line 2'] = tmp
tmp = mheader['Data line 2 (format)'].split(',1x,')
mheader['Data line 2 (format)'] = tmp
## start counter for time and range gates
counter_jj = 0
continue # stop the loop and continue with the next line
tmp = hpl_files.switch(header_info,line)
## this temporary variable indicates whether the a given data line includes
# the spectral width or not, so 2d information can be distinguished from
# 1d information.
indicator = len(line[:10].split())
if header_info == True:
try:
if tmp[0][0:1] == 'i':
tmp_tmp = {'Data line 2 (format)': tmp[0]}
else:
tmp_tmp = {tmp[0]: tmp[1]}
except:
if tmp[0][0] == 'f':
tmp_tmp = {'Data line 1 (format)': tmp[0]}
else:
tmp_tmp = {'blank': 'nothing'}
mheader.update(tmp_tmp)
elif (header_info == False):
if (counter_jj == 0):
n_o_rays = (len(filename.open().read().splitlines())-17)//(int(mheader['Number of gates'])+1)
mbeam = np.recarray((n_o_rays,),
dtype=np.dtype([('time', 'f8')
, ('azimuth', 'f4')
,('elevation','f4')
,('pitch','f4')
,('roll','f4')]))
mdata = np.recarray((n_o_rays,int(mheader['Number of gates'])),
dtype=np.dtype([('range gate', 'i2')
,('velocity', 'f4')
,('snrp1','f4')
,('beta','f4')
,('dels', 'f4')]))
mdata[:, :] = np.full(mdata.shape, -999.)
# store tmp in time array
if (indicator==1):
dt=np.dtype([('time', 'f8'), ('azimuth', 'f4'),('elevation','f4'),('pitch','f4'),('roll','f4')])
if len(tmp) < 4:
tmp.extend(['-999']*2)
if counter_jj < n_o_rays:
mbeam[counter_jj] = np.array(tuple(tmp), dtype=dt)
counter_jj = counter_jj+1
# store tmp in range gate array
elif (indicator==2):
dt=np.dtype([('range gate', 'i2')
, ('velocity', 'f4')
,('snrp1','f4')
,('beta','f4')
,('dels', 'f4')])
ii_index = np.array(tmp[0], dtype=dt[0])
if (len(tmp) == 4):
tmp.append('-999')
mdata[counter_jj-1, ii_index] = np.array(tuple(tmp), dtype=dt)
elif (len(tmp) == 5):
mdata[counter_jj-1, ii_index] = np.array(tuple(tmp), dtype=dt)
#set time information
time_tmp= pd.to_numeric(pd.to_timedelta(pd.DataFrame(mbeam)['time'], unit = 'h')
+pd.to_datetime(datetime.datetime.strptime(mheader['Start time'], '%Y%m%d %H:%M:%S.%f').date())
).values / 10**9
time_ds= [ x+(datetime.timedelta(days=1)).total_seconds()
if time_tmp[0]-x>0 else x
for x in time_tmp
]
#calculate range in meters from range gate number, gate length
range_mid = hpl_files.range_calc(mdata['range gate'][0,:], confDict)
dr = (np.float32(confDict['PULS_DURATION']) * 299792458/4).astype('f4')
range_bnds = np.array([range_mid-dr, range_mid+dr]).T
tgint = (2*np.array(confDict['RANGE_GATE_LENGTH'], dtype='f4') / 299792458).astype('f4')
# SNR_tmp= np.copy(np.squeeze(mdata['snrp1']))-1
SNR_tmp= np.copy(mdata['snrp1'])-1
# SNR_tmp[SNR_tmp<=0]= np.nan
SNR_tmp[abs(SNR_tmp)<=np.finfo(np.float32).eps] = np.finfo(np.float32).eps
## calculate SNR in dB
# SNR_dB= 10*np.log10(np.ma.masked_values(SNR_tmp, np.nan)).filled(np.nan)
SNR_dB= 10*np.log10(SNR_tmp.astype(np.complex)).real
## calculate measurement uncertainty, with consensus indices
sigma_tmp= proc.hpl2netCDF_client.calc_sigma_single(SNR_dB
,int(mheader['Gate length (pts)'])
,int(confDict['PULSES_PER_DIRECTION'])
,float(mheader['Gate length (pts)'])/tgint/2*float(confDict['SYSTEM_WAVELENGTH'])
,1.316)
return xr.Dataset({
'dv': (['time', 'range']
# , np.squeeze(mdata['velocity'])
, mdata['velocity']
, {'units': 'm s-1'
,'long_name' : 'radial velocity of scatterers away from instrument'
,'standard_name' : 'doppler_velocity'
,'comments' : 'A velocity is a vector quantity; the component of the velocity of the scatterers along the line of sight of the instrument where positive implies movement away from the instrument'
,'_FillValue': -999.
,'_CoordinateAxes': 'time range'
}
)
, 'errdv': (['time', 'range']
, sigma_tmp.astype('f4')
, {'units': 'm s-1'
,'long_name' : 'error of Doppler velocity'
,'standard_name' : 'doppler_velocity_error'
,'comments' : 'error of radial velocity calculated from Cramer-Rao lower bound (CRLB)'
,'_FillValue': -999.
,'_CoordinateAxes': 'time range'
}
)
, 'intensity': (['time', 'range']
# , np.squeeze(mdata['snrp1'])
, mdata['snrp1']
, {'units': '1'
,'long_name' : 'backscatter intensity: b_int = snr+1, where snr denotes the signal-to-noise-ratio'
,'standard_name' : 'backscatter_intensity'
,'comments' : 'backscatter intensity: b_int = snr+1'
,'_FillValue': -999.
,'_CoordinateAxes': 'time range'
}
)
, 'beta': (['time', 'range']
# , np.squeeze(mdata['beta'])
, mdata['beta']
, {'units': 'm-1 sr-1'
,'long_name' : 'attenuated backscatter coefficient'
,'standard_name' : 'volume_attenuated_backwards_scattering_function_in_air'
,'comments' : 'determined from SNR by means of lidar equation; uncalibrated and uncorrected'
,'_FillValue': -999.
,'_CoordinateAxes': 'time range'
}
)
, 'delv': (['time', 'range']
# , np.squeeze(mdata['beta'])
, mdata['dels']
, {'units': 'm s-1'
,'long_name' : 'spectral width of detected signal'
,'standard_name' : 'spectral_width'
,'comments' : 'currently not part of the standard data product'
,'_FillValue': -999.
,'_CoordinateAxes': 'time range'
}
)
, 'azi': ('time'
# , np.squeeze(mbeam['azimuth'])
, mbeam['azimuth']
, {'units' : 'degree'
,'long_name' : 'sensor azimuth due reference point'
,'standard_name' : 'sensor_azimuth_angle'
,'_CoordinateAxes': 'time'
,'comments' : 'sensor_azimuth_angle is the horizontal angle between the line of sight from the observation point to the sensor and a reference direction at the observation point, which is often due north. The angle is measured clockwise positive, starting from the reference direction. A comment attribute should be added to a data variable with this standard name to specify the reference direction. A standard name also exists for platform_azimuth_angle, where \"platform\" refers to the vehicle from which observations are made e.g. aeroplane, ship, or satellite. For some viewing geometries the sensor and the platform cannot be assumed to be close enough to neglect the difference in calculated azimuth angle.'
}
)
# , 'ele': ('time'
# , mbeam['elevation']
# , {'units' : 'degree'
# ,'long_name' : 'beam direction due elevation'
# ,'standard_name' : 'elevation_angle'
# ,'comments' : 'elevation angle of the beam to local horizone; a value of 90 is directly overhead'
# }
# )
, 'zenith': ('time'
# , 90-np.squeeze(mbeam['elevation'])
, 90-mbeam['elevation']
, {'units' : 'degree'
,'long_name' : 'beam direction due zenith'
,'standard_name' : 'zenith_angle'
,'_CoordinateAxes': 'time'
,'comments' : 'zenith angle of the beam to the local vertical; a value of zero is directly overhead'
}
)
, 'lat': ([]
, np.float32(confDict['SYSTEM_LATITUDE'])
, {'units': 'degrees_north'
,'long_name': 'latitude'
,'standard_name': 'latitude'
,'comments': 'latitude of sensor'
,'_FillValue': -999.
}
)
, 'lon': ([]
, np.float32(confDict['SYSTEM_LONGITUDE'])
, {'units': 'degrees_east'
,'long_name': 'longitude'
,'standard_name': 'longitude'
,'comments': 'longitude of sensor'
,'_FillValue': -999.
}
)
, 'zsl': ([]
, np.float32(confDict['SYSTEM_ALTITUDE'])
, {'units': 'm'
,'comments': 'system altitude above mean sea level'
,'standard_name': 'altitude'
,'_FillValue': -999.
}
)
, 'wl': ([]
, np.float32(confDict['SYSTEM_WAVELENGTH'])
, {'units': 'm'
,'long_name': 'laser center wavelength'
,'standard_name': 'radiation_wavelength'
,'_FillValue': -999.
}
)
, 'pd': ([]
, np.float32(confDict['PULS_DURATION'])
, {'units': 'seconds'
,'long_name': 'laser duration'
,'comments': 'duration of the transmitted pulse pd = 2 dr / c'
,'_FillValue': -999.
}
)
, 'nfft': ([]
, np.float32(confDict['FFT_POINTS'])
, {'units': '1'
,'long_name': 'number of fft points'
,'comments': 'according to the manufacturer'
,'_FillValue': -999.
}
)
# , 'id': ([]
# , confDict['SYSTEM_ID']
# , {'long_name': 'system identification number'}
# )
, 'nrg': ([]
, np.float32(mheader['Number of gates'])
, {'long_name': 'total number of range gates per ray'
,'units': '1'
,'_FillValue': -999.
}
)
, 'lrg': ([]
, np.float32(mheader['Range gate length (m)'])
, {'units' : 'm'
,'long_name': 'range gate length'
,'_FillValue': -999.
}
)
, 'nsmpl': ([]
, np.float32(mheader['Gate length (pts)'])
, {'long_name': 'points per range gate'
,'units': '1'
}
)
, 'prf': ([]
, np.float32(confDict['PULS_REPETITION_FREQ'])
, {'units' : 's-1'
,'long_name': 'pulse repetition frequency'
,'_FillValue': -999.
}
)
, 'npls': ([]
, np.float32(confDict['PULSES_PER_DIRECTION'])#[int(mheader['Pulses/ray'])]
, {'long_name': 'number of pulses per ray'
,'units': '1'
,'_FillValue': -999.
}
)
, 'focus': ([]
, np.float32(mheader['Focus range'])
, {'units' : 'm'
,'long_name': 'telescope focus length'
,'_FillValue': -999.
}
)
, 'resv': ([]
, np.float32(mheader['Resolution (m/s)'])
, {'units' : 'm s-1'
,'long_name': 'resolution of Doppler velocity'
,'_FillValue': -999.
}
)
, 'nqf': ([], (np.float32(mheader['Gate length (pts)'])/tgint/2).astype('f4')
, {'long_name': 'nyquist frequency'
, 'comments' : 'half of the detector sampling frequency; detector bandwidth'
}
)
, 'nqv': ([], (np.float32(mheader['Gate length (pts)'])/tgint/2*np.float32(confDict['SYSTEM_WAVELENGTH'])/2).astype('f4')
, {'long_name': 'nyquist velocity'
,'comments' : 'nq_freq*lambda/2; signal bandwidth'
}
)
, 'smplf': ([], np.float32(mheader['Gate length (pts)'])/tgint
, {'long_name': 'sampling frequency'
,'units': 's-1'
,'comments' : 'nsmpl / tgint'
}
)
, 'resf': ([], (np.float32(mheader['Gate length (pts)'])/tgint/float(confDict['FFT_POINTS'])).astype('f4')
, {'long_name': 'frequency resolution'
,'units': 's-1'
,'comments' : 'smplf / nfft'
}
)
, 'tgint': ([], tgint
, {'long_name': 'total observation time per range gate'
,'units': 's'
,'comments' : 'time window used for time gating the time series of the signal received on the detector: tgint = (2 X) / c, with X = range_bnds[range,1] - range_bnds[range,0]'
}
)
, 'range_bnds': (['range','nv']
, range_bnds.astype('f4')
, {'units': 'm'
,'_FillValue' : -999.
}
)
# , 'pitch': ('time', np.squeeze(mbeam['pitch']))
# , 'roll': ('time', np.squeeze(mbeam['roll']))
}
, coords= { 'time': ( ['time']
, time_ds#.astype(np.float64)
,{ #'units': ('seconds since 1970-01-01 00:00:00', 'seconds since 1970-01-01 00:00:00 {:+03d}'.format(time_delta))[abs(np.sign(time_delta))]
'units': 'seconds since 1970-01-01 00:00:00'
,'standard_name': 'time'
,'long_name': 'Time'
,'calendar':'gregorian'
,'_CoordinateAxisType': 'time'
})
,'range': (['range']
, range_mid.astype('f4')
# , ((mdata['range gate'][0,:] + 0.5) * np.float32(mheader['Range gate length (m)'])).astype('f4')
, {'units' : 'm'
,'long_name': 'line of sight distance towards the center of each range gate'
,'_FillValue': -999.
,'_CoordinateAxisType': 'range'
}
)
, 'nv': (['nv'],np.arange(0,2).astype(np.int8))
}
)
@staticmethod
def read_wcsradial(filename, confDict):
while True:
if not filename.exists():
print("Oops, no such file or directory '{}'".format(filename))
break
else:
print("reading file '{}'".format(filename))
## the windcube netCDF l1-files are in cf-radial format
# this requires a workaround to open to access the radial data,
# when relying on xarray package alone
# read root attributes
ds_root = xr.open_dataset(filename)
sweep_list = list(ds_root.sweep_group_name.data)
# print("combining sweeps {}".format(sweep_list))
# read radial data in sweep group
ds_tmp = xr.concat( ( xr.open_dataset( filename
, group = sweep_ii
, decode_times=False
)
for sweep_ii in sweep_list)
, dim='time'
, data_vars='minimal'
, compat='override'
, coords='minimal')
range_mid = hpl_files.range_calc(ds_tmp.gate_index.data.astype(int), confDict)
dr = (np.float32(confDict['PULS_DURATION']) * 299792458/4).astype('f4')
range_bnds = np.array([range_mid-dr, range_mid+dr]).T
tgint = 2*(range_bnds[0,1] - range_bnds[0,0]) / 299792458
zenith = np.array([90 - ds_root.sweep_fixed_angle.data[0]] * ds_tmp.time.size)
## calculate measurement uncertainty
sigma_tmp = proc.hpl2netCDF_client.calc_sigma_single( ds_tmp.cnr.data
, int(confDict['NUMBER_OF_GATE_POINTS'])
, int(confDict['PULSES_PER_DIRECTION'])
, ((ds_tmp.radial_wind_speed.max() - ds_tmp.radial_wind_speed.min()).data/2).astype('f4')
, 1.316 )
return xr.Dataset({
'dv': (['time', 'range']
, ds_tmp.radial_wind_speed.data
, {'units': 'm s-1'
,'long_name' : 'radial velocity of scatterers away from instrument'
,'standard_name' : 'doppler_velocity'
,'comments' : 'A velocity is a vector quantity; the component of the velocity of the scatterers along the line of sight of the instrument where positive implies movement away from the instrument'
,'_FillValue': -999.
,'_CoordinateAxes': 'time range'
}
)
, 'errdv': (['time', 'range']
, sigma_tmp.astype('f4')
, {'units': 'm s-1'
,'long_name' : 'error of Doppler velocity'
,'standard_name' : 'doppler_velocity_error'
,'comments' : 'error of radial velocity calculated from Cramer-Rao lower bound (CRLB)'
,'_FillValue': -999.
,'_CoordinateAxes': 'time range'
}
)
, 'intensity': (['time', 'range']
, (10**(ds_tmp.cnr.data / 10) + 1).astype('f4')
, {'units': '1'
,'long_name' : 'backscatter intensity: b_int = snr+1, where snr denotes the signal-to-noise-ratio'
,'standard_name' : 'backscatter_intensity'
,'comments' : 'backscatter intensity: b_int = snr+1'
,'_FillValue': -999.
,'_CoordinateAxes': 'time range'
}
)
, 'beta': (['time', 'range']
, ds_tmp.relative_beta.data.astype('f4')
, {'units': 'm-1 sr-1'
,'long_name' : 'attenuated backscatter coefficient'
,'standard_name' : 'volume_attenuated_backwards_scattering_function_in_air'
,'comments' : 'determined from SNR by means of lidar equation; uncalibrated and uncorrected'
,'_FillValue': -999.
,'_CoordinateAxes': 'time range'
}
)
, 'delv': (['time', 'range']
, ds_tmp.doppler_spectrum_width.data.astype('f4')
, {'units': 'm s-1'
,'long_name' : 'spectral width of detected signal'
,'standard_name' : 'spectral_width'
,'comments' : 'currently not part of the standard data product'
,'_FillValue': -999.
,'_CoordinateAxes': 'time range'
}
)
, 'azi': ('time'
, ds_tmp.azimuth.data.astype('f4')
, {'units' : 'degree'
,'long_name' : 'sensor azimuth due reference point'
,'standard_name' : 'sensor_azimuth_angle'
,'_CoordinateAxes': 'time'
,'comments' : 'sensor_azimuth_angle is the horizontal angle between the line of sight from the observation point to the sensor and a reference direction at the observation point, which is often due north. The angle is measured clockwise positive, starting from the reference direction. A comment attribute should be added to a data variable with this standard name to specify the reference direction. A standard name also exists for platform_azimuth_angle, where \"platform\" refers to the vehicle from which observations are made e.g. aeroplane, ship, or satellite. For some viewing geometries the sensor and the platform cannot be assumed to be close enough to neglect the difference in calculated azimuth angle.'
}
)
, 'zenith': ('time'
, zenith.astype('f4')
, {'units' : 'degree'
,'long_name' : 'beam direction due zenith'
,'standard_name' : 'zenith_angle'
,'_CoordinateAxes': 'time'
,'comments' : 'zenith angle of the beam to the local vertical; a value of zero is directly overhead'
}
)
, 'lat': ([]
, np.float32(confDict['SYSTEM_LATITUDE'])
, {'units': 'degrees_north'
,'long_name': 'latitude'
,'standard_name': 'latitude'
,'comments': 'latitude of sensor'
,'_FillValue': -999.
}
)
, 'lon': ([]
, np.float32(confDict['SYSTEM_LONGITUDE'])
, {'units': 'degrees_east'
,'long_name': 'longitude'
,'standard_name': 'longitude'
,'comments': 'longitude of sensor'
,'_FillValue': -999.
}
)
, 'zsl': ([]
, np.float32(confDict['SYSTEM_ALTITUDE'])
, {'units': 'm'
,'comments': 'system altitude above mean sea level'
,'standard_name': 'altitude'
,'_FillValue': -999.
}
)
, 'wl': ([]
, np.float32(confDict['SYSTEM_WAVELENGTH'])
, {'units': 'm'
,'long_name': 'laser center wavelength'
,'standard_name': 'radiation_wavelength'
,'_FillValue': -999.
}
)
, 'pd': ([]
, np.float32(confDict['PULS_DURATION'])
, {'units': 'seconds'
,'long_name': 'laser duration'
,'comments': 'duration of the transmitted pulse pd = 2 dr / c'
,'_FillValue': -999.
}
)
, 'nfft': ([]
, np.float32(confDict['FFT_POINTS'])
, {'units': '1'
,'long_name': 'number of fft points'
,'comments': 'according to the manufacturer'
,'_FillValue': -999.
}
)
, 'nrg': ([]
, np.float32(ds_tmp.dims['range'])
, {'long_name': 'total number of range gates per ray'
,'units': '1'
,'_FillValue': -999.
}
)
, 'lrg': ([]
, np.float32(ds_tmp.range_gate_length.data)
, {'units' : 'm'
,'long_name': 'range gate length'
,'_FillValue': -999.
}
)
, 'nsmpl': ([]
, np.float32(confDict['NUMBER_OF_GATE_POINTS'])
, {'long_name': 'points per range gate'
,'units': '1'
}
)
, 'prf': ([]
, np.float32(confDict['PULS_REPETITION_FREQ'])
, {'units' : 's-1'
,'long_name': 'pulse repetition frequency'
,'_FillValue': -999.
}
)
, 'npls': ([]
, np.float32(confDict['PULSES_PER_DIRECTION'])
, {'long_name': 'number of pulses per ray'
,'units': '1'
,'_FillValue': -999.
}
)
, 'focus': ([]
, np.float32(confDict['FOCUS'])
, {'units' : 'm'
,'long_name': 'telescope focus length'
,'_FillValue': -999.
}
)
, 'resv': ([]
, ((ds_tmp.radial_wind_speed.max() - ds_tmp.radial_wind_speed.min()).data/float(confDict['FFT_POINTS'])).astype('f4')
, {'units' : 'm s-1'
,'long_name': 'resolution of Doppler velocity'
,'_FillValue': -999.
}
)
, 'nqf': ([], # (np.float32(mheader['Gate length (pts)'])/tgint/2).astype('f4')
((ds_tmp.radial_wind_speed.max() - ds_tmp.radial_wind_speed.min()).data/float(confDict['SYSTEM_WAVELENGTH'])).astype('f4')
, {'long_name': 'nyquist frequency'
, 'comments' : 'half of the detector sampling frequency; detector bandwidth'
}
)
, 'nqv': ([], # (np.float32(confDict['NUMBER_OF_GATE_POINTS'])/tgint/2*np.float32(confDict['SYSTEM_WAVELENGTH'])/2).astype('f4')
((ds_tmp.radial_wind_speed.max() - ds_tmp.radial_wind_speed.min()).data/2).astype('f4')
, {'long_name': 'nyquist velocity'
,'comments' : 'nq_freq*lambda/2; signal bandwidth'
}
)
, 'smplf': ([], np.float32(confDict['NUMBER_OF_GATE_POINTS'])/tgint
, {'long_name': 'sampling frequency'
,'units': 's-1'
,'comments' : 'nsmpl / tgint'
}
)
, 'resf': ([], (np.float32(confDict['NUMBER_OF_GATE_POINTS'])/tgint/float(confDict['FFT_POINTS'])).astype('f4')
, {'long_name': 'frequency resolution'
,'units': 's-1'
,'comments' : 'smplf / nfft'
}
)
, 'tgint': ([], tgint
, {'long_name': 'total observation time per range gate'
,'units': 's'
,'comments' : 'time window used for time gating the time series of the signal received on the detector: tgint = (2 X) / c, with X = range_bnds[range,1] - range_bnds[range,0]'
}
)
, 'range_bnds': (['range','nv']
, range_bnds.astype('f4')
, {'units': 'm'
,'_FillValue' : -999.
}
)
# , 'pitch': ('time', np.squeeze(mbeam['pitch']))
# , 'roll': ('time', np.squeeze(mbeam['roll']))
}
, coords= { 'time': ( ['time']
, ds_tmp.time.data
,{ 'units': "seconds since {}".format(
|
pd.to_datetime(ds_tmp.time_reference.data)
|
pandas.to_datetime
|
#
# Copyright (c) 2021 salesforce.com, inc.
# All rights reserved.
# SPDX-License-Identifier: BSD-3-Clause
# For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
#
import logging
import os
import requests
from tqdm import tqdm
import pandas as pd
from ts_datasets.base import BaseDataset
logger = logging.getLogger(__name__)
class M4(BaseDataset):
"""
The M4 Competition data is an extended and diverse set of time series to
identify the most accurate forecasting method(s) for different types
of domains, including Business, financial and economic forecasting,
and different type of granularity, including Yearly (23,000 sequences),
Quarterly (24,000 sequences), Monthly (48,000 sequences),
Weekly(359 sequences), Daily (4,227 sequences) and Hourly (414 sequences)
data.
- source: https://github.com/Mcompetitions/M4-methods/tree/master/Dataset
- timeseries sequences: 100,000
"""
valid_subsets = ["Yearly", "Quarterly", "Monthly", "Weekly", "Daily", "Hourly"]
url = "https://github.com/Mcompetitions/M4-methods/raw/master/Dataset/{}.csv"
def __init__(self, subset="Hourly", rootdir=None):
super().__init__()
self.subset = subset
assert subset in self.valid_subsets, f"subset should be in {self.valid_subsets}, but got {subset}"
if rootdir is None:
fdir = os.path.dirname(os.path.abspath(__file__))
merlion_root = os.path.abspath(os.path.join(fdir, "..", "..", ".."))
rootdir = os.path.join(merlion_root, "data", "M4")
# download dataset if it is not found in root dir
if not os.path.isdir(rootdir):
logger.info(
f"M4 {subset} dataset cannot be found from {rootdir}.\n"
f"M4 {subset} dataset will be downloaded from {self.url}.\n"
)
download(rootdir, self.url, "M4-info")
# extract starting date from meta-information of dataset
info_dataset = pd.read_csv(os.path.join(rootdir, "M4-info.csv"), delimiter=",").set_index("M4id")
if subset == "Yearly":
logger.warning(
"the max length of yearly data is 841 which is too big to convert to "
"timestamps, we fallback to quarterly frequency"
)
freq = "13W"
elif subset == "Quarterly":
freq = "13W"
elif subset == "Monthly":
freq = "30D"
else:
freq = subset[0]
train_csv = os.path.join(rootdir, f"train/{subset}-train.csv")
if not os.path.isfile(train_csv):
download(os.path.join(rootdir, "train"), self.url, f"{subset}-train", "Train")
test_csv = os.path.join(rootdir, f"test/{subset}-test.csv")
if not os.path.isfile(test_csv):
download(os.path.join(rootdir, "test"), self.url, f"{subset}-test", "Test")
train_set = pd.read_csv(train_csv).set_index("V1")
test_set =
|
pd.read_csv(test_csv)
|
pandas.read_csv
|
""" Analytics Toolkit """
import errno
import hashlib
import hmac
import logging
import logging.config
import nltk
import os
import pandas as pd
import re
import yaml
from datetime import datetime, timedelta
from typing import List
class SafeDict(dict):
def __missing__(self, key):
return '{' + key + '}'
def read_sql_file(path_to_file, **kwargs):
""" Loads SQL file as a string.
Arguments:
path_to_file (string): path to sql file from PROJECT_ROOT
*kwargs: can be passed if some parameters are to be passed.
Returns:
a SQL formated with **kwargs if applicable.
Example:
With SQL as:
"select .. {param2} .. {param1} .. {paramN}"
*kwargs as:
param1=value1
param2=value2
paranN=valueN
The functions returns:
"select .. value2 .. value1 .. valueN"
"""
full_path = os.path.join(os.environ.get("PROJECT_ROOT"), path_to_file)
file = open(full_path, 'r')
sql = file.read()
file.close()
if len(kwargs) > 0:
sql = sql.format_map(SafeDict(**kwargs))
return sql
def read_yaml_file(path_to_file):
""" Loads YAML file as a dictionary.
Arguments:
- path_to_file (string): path to yaml file from PROJECT_ROOT
"""
full_path = os.path.join(os.getenv("PROJECT_ROOT"), path_to_file)
with open(full_path, 'r') as file:
config = yaml.safe_load(file)
file.close()
return config
def customer_hash(email):
""" Hash Email address using sha256 algorithm with salt.
Parameters:
email (string): Email address of the customer
Returns:
UID (string)
"""
if "EMAIL_HASH_SALT" in os.environ:
pass
else:
raise KeyError("EMAIL_HASH_SALT does not exist")
if isinstance(email, str):
if email != '':
email = bytes(email.lower(), 'utf-8')
salt = bytes(os.environ.get("EMAIL_HASH_SALT"), 'utf-8')
hash_ = hmac.new(key=salt,
digestmod=hashlib.sha256)
hash_.update(email)
uid = str(hash_.hexdigest())[0:16]
else:
uid = '0000000000000000'
return uid
elif email is None:
uid = '0000000000000000'
return uid
else:
raise KeyError("Email argument should be a string")
def stringify(value):
"""
Returns the string representation of the value.
"""
if value is None:
return 'null'
elif value is True:
return 'True'
elif value is False:
return 'False'
return str(value)
def is_email_address(text):
"""
Return true if it is a valid email address
"""
return re.search(r'[\w\.-]+@[\w\.-]+', text)
def anonymizer(text):
"""
A part-of-speech tagger, or POS-tagger, processes a sequence
of words, and attaches a part of speech tag to each word. See
https://www.nltk.org/index.html for more information.
Cant be used ATM. Need to add the process installation to base image.
$ pip install -U textblob
$ python -m textblob.download_corpora
"""
if text is None or text == "":
new_text = text
else:
new_text = []
# Splits text into sentences
sentence_list = text.replace('\n', ' ').split(". ")
for sentence in sentence_list:
# Splits sentence into list of words and filters empty elts.
# Not using nltk.word_tokenize as it splits an email address
# in several entities.
word_list = list(filter(None, sentence.split(" ")))
# process word_list
pos = nltk.pos_tag(word_list)
new_word_list = []
for word in pos:
if is_email_address(word[0]):
# tags word as EMAIL
new_word_list.append("{EMAIL}")
elif word[1] == 'NNP':
# tags word as NAME (proper noun)
new_word_list.append("{NAME}")
elif word[1] == 'CD':
# tags word as NUMBER
new_word_list.append("{NUMBER}")
else:
# no tranformation
new_word_list.append(word[0])
new_sentence = " ".join(new_word_list)
new_text.append(new_sentence)
new_text = ". ".join(new_text)
return new_text
def get_date(window, date_format="%Y-%m-%d"):
"""
Returns date in string format ('%Y-%m-%d') from today using window in days.
get_date(window=0) returns today, get_date(widnow=1) returns yesterday...
"""
date = datetime.today() - timedelta(days=window)
return date.strftime(date_format)
def get_today_date(date_format="%Y-%m-%d"):
"""
Returns today date in string format specified. Defaults to '%Y-%m-%d'.
"""
return get_date(0, date_format)
def get_yesterday_date(date_format="%Y-%m-%d"):
"""
Returns yesterday date in string format specified. Defaults to '%Y-%m-%d'.
"""
return get_date(1, date_format)
def date_lister(start_date, end_date):
"""
Returns list of dates between start_date and end_date in string format ('%Y-%m-%d')
Arguments:
- start_date (string)
- end_date (string)
"""
if end_date < start_date:
date_list = []
logging.error("End date must be equal or after start_date")
else:
date_list = pd.date_range(start_date, end_date)
date_list = date_list.format()
logging.info(date_list)
return date_list
def validate_date(date, format='%Y-%m-%d', error_msg=None):
try:
datetime.strptime(date, format)
except ValueError:
if error_msg is None:
raise ValueError("Incorrect data format, should be {}".format(format))
else:
raise ValueError(error_msg)
def level_is_valid(verbosity):
'''
Validates key parameter for configure_logging() and raises approriate
exception messages.
Returns the expected level for verbosity.
'''
if not isinstance(verbosity, str):
raise ValueError('verbosity needs to be a string')
verbosity = verbosity.upper()
logging.info("verbosity passed: %s", verbosity)
if verbosity not in ('DEBUG', 'INFO', 'QUIET', 'WARNING', 'CRITICAL'):
raise ValueError('verbosity needs to be one of the following: DEBUG, INFO, QUIET, WARNING, CRITICAL')
return verbosity
def configure_logging(env=None, verbosity=None):
""" Sets logging """
levels = {
"DEBUG": {
'level': "DEBUG",
'format': "%(asctime)s,%(msecs)3d - %(levelname)-8s - %(funcName)5s:%(lineno)-10s :: %(message)s",
'datefmt': "%Y-%m-%d %H:%M:%S"
},
"INFO": {
'level': "INFO",
'format': "%(asctime)s - %(levelname)-8s — %(name)-10s :: %(message)s",
'datefmt': "%H:%M:%S"
},
"QUIET": {
'level': "ERROR",
'format': "%(asctime)s - %(levelname)-8s — %(name)-10s :: %(message)s",
'datefmt': "%H:%M:%S"
},
"WARNING": {
'level': "WARNING",
'format': "%(asctime)s - %(levelname)s - %(funcName)-10s :: %(message)s",
'datefmt': "%H:%M:%S"
},
"CRITICAL": {
'level': "CRITICAL",
'format': "%(asctime)s - %(levelname)-8s - %(funcName)5s:%(lineno)-10s :: %(message)s",
'datefmt': "%Y-%m-%d %H:%M:%S"
}
}
if verbosity is None:
setlevel = levels["INFO"]['level']
setformat = levels["INFO"]['format']
setdatefmt = levels["INFO"]['datefmt']
else:
verbosity = level_is_valid(verbosity)
setlevel = levels[verbosity]['level']
setformat = levels[verbosity]['format']
setdatefmt = levels[verbosity]['datefmt']
logging.config.dictConfig(
{
'version': 1,
'disable_existing_loggers': False,
'root': {
'level': setlevel,
'handlers': ['default']
},
'formatters': {
'default': {
'format': setformat,
'datefmt': setdatefmt
},
},
'handlers': {
'default': {
'class': 'logging.StreamHandler',
'stream': 'ext://sys.stdout',
'formatter': 'default',
'level': logging.DEBUG
}
}
}
)
def convert_list_for_sql(my_list):
""" Convert a python list to a SQL list.
The function is primarly used when trying to format SQL queries by passing an argument.
Arguments:
my_list: list of elements to be used in a SQL query
Example:
1. convert_list_for_sql([1, 2, 3]) returns '1, 2, 3'
2. convert_list_for_sql(['Simone', 'Dan']) returns ''Simone', 'Dan''
"""
final_list = []
for item in my_list:
if isinstance(item, str):
_item = '\'{}\''.format(item)
else:
_item = item
final_list.append(_item)
return ", ".join([str(item) for item in final_list])
def split_full_path(
path: str
) -> List[str]:
"""
Splits a full file path to path and file name.
Args:
path: full file path e.g. 'path/to/file.json'
Returns:
[f_path, f_name]: ['path/to', 'file.json']
"""
split = path.split("/")
f_name = split[-1]
split.remove(f_name)
f_path = "/".join(split)
return f_path, f_name
def create_folder(
path,
print_status: bool = True
):
"""
Checks if a folder exists, if not creates it.
Args:
path: folder path
print_status: print folder status
"""
try:
os.makedirs(path)
if print_status:
print(f"Folder created at '{path}'.")
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
def write_text_file(
content: str,
file_path: str
):
"""
Write string to a text file.
Args:
content: string to write
file_path: e.g. 'path/file.txt'
"""
t = open(file_path, "w")
t.write(content)
t.close()
def slice_list_to_chunks(lst, n):
"""
Slice a list into chunks of size n.
Args:
list
int
Returns:
[list]
"""
chunks = [lst[x:x+n] for x in range(0, len(lst), n)]
return chunks
def flatten_df_columns(df: pd.DataFrame) -> pd.DataFrame:
"""
Flatten repeated fields in a pandas dataframe an unnest columns.
(for single layer nesting only)
Checks if column is nested - if not raises AttributeError and appends
original column.
Args:
df
Returns:
df
"""
# explode repeated fields
for col in df.columns:
df = df.explode(col)
df = df.reset_index(drop=True)
# unnest
df_flat = pd.DataFrame()
for col in df.columns:
try:
col_flat = df[col].apply(lambda x: {} if pd.isna(x) else x)
col_flat = pd.json_normalize(col_flat)
if col_flat.shape[1] == 1:
raise AttributeError
else:
col_flat.columns = [f"{col}__{x}" for x in col_flat.columns]
df_flat = pd.concat([df_flat, col_flat], axis=1)
except AttributeError:
df_flat =
|
pd.concat([df_flat, df[col]], axis=1)
|
pandas.concat
|
import numpy as np
import csv
from datetime import date
import random
from sklearn import linear_model
from sklearn.model_selection import train_test_split, validation_curve
from sklearn.preprocessing import StandardScaler
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
import pandas as pd
data=pd.read_csv('BOS_CUN_trips1M.csv')
def add_features():
'''
This section is a list of helper functions for further optimizing code
-------------------------------------------------------------------------------------------------------------------------------------------------
'''
def create_dict(id_list):
#creates a dictionary for relevant one-hot categorical vectors
id_dict={}
for i in range(len(id_list)):
id_dict[id_list[i]]=i
return id_dict
def total_days(departure, order):
#calculates the total days between order date and departure date and changes the raw value to categorical data
total_days=departure.sub(order)
total_days.rename(columns={0:'total days'}, axis='columns')
total_days.astype('timedelta64[D]')
total_days=total_days.apply(lambda x: x.days)
total_days=pd.cut(total_days, bins=12)
return pd.get_dummies(total_days)
def one_hot(features, feature_list, prefixes):
#creates one-hot vectors for all the categorical data
for i in range(len(feature_list)):
if type(feature_list[i])==str:
feature_vector=pd.get_dummies(data[feature_list[i]], prefix=prefixes[i])
else:
feature_vector=pd.get_dummies(feature_list[i], prefix=prefixes[i])
features=pd.concat([features,feature_vector], axis=1)
return features
'''
-------------------------------------------------------------------------------------------------------------------------------------------------
This initializes many of the labels for the data frames and certain dates into date time as well as lists to help shorten and optimize code length
------------------------------------------------------------------------------------------------------------------------------------------------------
'''
monthsDepart=['Depart January', 'Depart February', 'Depart March', 'Depart April', 'Depart May', 'Depart June', 'Depart July', 'Depart August', 'Depart September', 'Depart October', 'Depart November', 'Depart December']
monthsReturn=['Return January', 'Return February', 'Return March', 'Return April', 'Return May', 'Return June', 'Return July', 'Return August', 'Return September', 'Return October', 'Return November', 'Return December']
days_of_weekD=['Depart Monday', 'Depart Tuesday', 'Depart Wednesday', 'Depart Thursday', 'Depart Friday', 'Depart Saturday','Depart Sunday']
days_of_weekR=['Return Monday', 'Return Tuesday', 'Return Wednesday', 'Return Thursday', 'Return Friday', 'Return Saturday','Return Sunday']
#creates dictionary of carrier ids
carrier_ids=create_dict(data.majorcarrierid.unique())
#creates dictionary of cabin classes
cabin_ids=create_dict(data.cabinclass.unique())
#creates dictionary of sources
source_ids=create_dict(data.source.unique())
#converting dates to date_time
order_date=pd.to_datetime(data['received_odate'])
departure_date=pd.to_datetime(data['departure_odate'])
return_date=pd.to_datetime(data['return_ddate'])
#getting the month of departure and return
departure_month=
|
pd.DatetimeIndex(departure_date)
|
pandas.DatetimeIndex
|
"""
Testing interaction between the different managers (BlockManager, ArrayManager)
"""
from pandas.core.dtypes.missing import array_equivalent
import pandas as pd
import pandas._testing as tm
from pandas.core.internals import (
ArrayManager,
BlockManager,
SingleArrayManager,
SingleBlockManager,
)
def test_dataframe_creation():
with pd.option_context("mode.data_manager", "block"):
df_block = pd.DataFrame({"a": [1, 2, 3], "b": [0.1, 0.2, 0.3], "c": [4, 5, 6]})
assert isinstance(df_block._mgr, BlockManager)
with pd.option_context("mode.data_manager", "array"):
df_array = pd.DataFrame({"a": [1, 2, 3], "b": [0.1, 0.2, 0.3], "c": [4, 5, 6]})
assert isinstance(df_array._mgr, ArrayManager)
# also ensure both are seen as equal
tm.assert_frame_equal(df_block, df_array)
# conversion from one manager to the other
result = df_block._as_manager("block")
assert isinstance(result._mgr, BlockManager)
result = df_block._as_manager("array")
assert isinstance(result._mgr, ArrayManager)
tm.assert_frame_equal(result, df_block)
assert all(
array_equivalent(left, right)
for left, right in zip(result._mgr.arrays, df_array._mgr.arrays)
)
result = df_array._as_manager("array")
assert isinstance(result._mgr, ArrayManager)
result = df_array._as_manager("block")
assert isinstance(result._mgr, BlockManager)
tm.assert_frame_equal(result, df_array)
assert len(result._mgr.blocks) == 2
def test_series_creation():
with
|
pd.option_context("mode.data_manager", "block")
|
pandas.option_context
|
# flask 서버
import sys
import os
import dateutil.relativedelta
from flask import Flask,request,Response
from multiprocessing import Process
sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
import json
from functools import wraps
import mpld3
# koapy
from koapy import KiwoomOpenApiPlusEntrypoint, KiwoomOpenApiPlusTrInfo
from pandas import Timestamp
import matplotlib.pyplot as plt
import pandas as pd
from exchange_calendars import get_calendar
# DB
from DataBase.SqliteDB import StockDB
# Custom
from datetime import datetime
import logging
# Telegram
import telepot
if not os.path.exists('log'):
os.mkdir('log')
fh = logging.FileHandler(filename=os.path.join('log', '{:%Y-%m-%d}.log'.format(datetime.now())),
encoding="utf-8")
format = '[%(asctime)s] I %(filename)s | %(name)s-%(funcName)s-%(lineno)04d I %(levelname)-8s > %(message)s'
fh.setLevel(logging.DEBUG)
sh = logging.StreamHandler()
sh.setLevel(logging.INFO)
logging.basicConfig(format=format, handlers=[fh, sh], level=logging.DEBUG)
########### init ###########
app = Flask(__name__)
server = Process()
stock_db = StockDB()
# 1. 엔트리포인트 객체 생성
entrypoint = KiwoomOpenApiPlusEntrypoint()
# 2. 로그인
print('Logging in...')
entrypoint.EnsureConnected()
logging.info('Logged in.')
base_account = entrypoint.GetAccountList()[0]
# 3. kospi/kosdaq 종목리스트 저장
# 종목 리스트 확인 (기본 함수 호출 예시)
print('Getting stock codes and names...')
codes = entrypoint.GetKospiCodeList()
names = [entrypoint.GetMasterCodeName(code) for code in codes]
codes_by_names_dict_kospi = dict(zip(names, codes))
names_by_codes_dict_kospi = dict(zip(codes, names))
codes = entrypoint.GetKosdaqCodeList()
names = [entrypoint.GetMasterCodeName(code) for code in codes]
codes_by_names_dict_kosdaq = dict(zip(names, codes))
names_by_codes_dict_kosdaq = dict(zip(codes, names))
logging.info('End stock codes and names...')
# 6.주문처리
krx_calendar = get_calendar('XKRX')
# 7.telegram 등록
def getToken():
f = open("telebot.txt")
token = f.readline().strip()
userid = f.readline().strip()
f.close()
return (token , userid)
(token, chat_id) = getToken()
bot = telepot.Bot(token)
def as_json(f):
@wraps(f)
def decorated_function(*args, **kwargs):
res = f(*args, **kwargs)
res = json.dumps(res, ensure_ascii=False, indent=4).encode('utf8')
return Response(res, content_type='application/json; charset=utf-8')
return decorated_function
@app.route('/')
def home():
# 접속 상태 확인 (기본 함수 호출 예시)
print('Checking connection status...')
status = entrypoint.GetConnectState()
print('Connection status: %s', status)
return 'Kiwoom Bridge Made By Dotz'
@app.route('/disconnect', methods=['GET'])
def disconnect():
# 리소스 해제
entrypoint.close()
shutdown_server()
print('Server shutting down...')
@app.route('/myaccount', methods=['GET'])
def myaccount():
sAccNo = base_account
account = stock_db.load_account_table().to_html()
tname = 'account_detail_{}'.format(sAccNo)
account_detail = stock_db.load_account_detail_table(tname)
result = account + '</br></br>'
result += account_detail.to_html()
return result
@app.route('/stock_list/<kind>')
@as_json
def get_stock_list(kind):
if kind == 'kospi':
return names_by_codes_dict_kospi
elif kind == 'kosdaq':
return names_by_codes_dict_kosdaq
@app.route('/basic_info/<code>')
@as_json
def get_basic_info(code): # 업데이트 예정
print('Getting basic info of %s', code)
info = entrypoint.GetStockBasicInfoAsDict(code)
print('Got basic info data (using GetStockBasicInfoAsDict):')
return info
@app.route('/index_stock_data/<name>')
def get_index_stock_data(name):
# date, open, high, low, close, volume
tname = stock_db.getTableName(name)
result = stock_db.load(tname)
if result is None:
return ('', 204)
html = "<div style=\"position: relative;\"><h1 align=\"center\">"+name+"지수 차트</h1>"
result = result.astype({'date': 'str', 'open': 'int', 'high': 'int', 'low': 'int', 'close': 'int', 'volume': 'int'})
result['open'] = result['open'].apply(lambda _: _ / 100 if _ > 0 else _)
result['high'] = result['high'].apply(lambda _: _ / 100 if _ > 0 else _)
result['low'] = result['low'].apply(lambda _: _ / 100 if _ > 0 else _)
result['close'] = result['close'].apply(lambda _: _ / 100 if _ > 0 else _)
dates =
|
pd.to_datetime(result['date'], format='%Y%m%d')
|
pandas.to_datetime
|
import pandas as pd
from pandas import Period, offsets
from pandas.util import testing as tm
from pandas.tseries.frequencies import _period_code_map
class TestFreqConversion(tm.TestCase):
"Test frequency conversion of date objects"
def test_asfreq_corner(self):
val = Period(freq='A', year=2007)
result1 = val.asfreq('5t')
result2 = val.asfreq('t')
expected = Period('2007-12-31 23:59', freq='t')
self.assertEqual(result1.ordinal, expected.ordinal)
self.assertEqual(result1.freqstr, '5T')
self.assertEqual(result2.ordinal, expected.ordinal)
self.assertEqual(result2.freqstr, 'T')
def test_conv_annual(self):
# frequency conversion tests: from Annual Frequency
ival_A = Period(freq='A', year=2007)
ival_AJAN = Period(freq="A-JAN", year=2007)
ival_AJUN = Period(freq="A-JUN", year=2007)
ival_ANOV = Period(freq="A-NOV", year=2007)
ival_A_to_Q_start = Period(freq='Q', year=2007, quarter=1)
ival_A_to_Q_end = Period(freq='Q', year=2007, quarter=4)
ival_A_to_M_start = Period(freq='M', year=2007, month=1)
ival_A_to_M_end = Period(freq='M', year=2007, month=12)
ival_A_to_W_start = Period(freq='W', year=2007, month=1, day=1)
ival_A_to_W_end = Period(freq='W', year=2007, month=12, day=31)
ival_A_to_B_start = Period(freq='B', year=2007, month=1, day=1)
ival_A_to_B_end = Period(freq='B', year=2007, month=12, day=31)
ival_A_to_D_start = Period(freq='D', year=2007, month=1, day=1)
ival_A_to_D_end = Period(freq='D', year=2007, month=12, day=31)
ival_A_to_H_start = Period(freq='H', year=2007, month=1, day=1, hour=0)
ival_A_to_H_end = Period(freq='H', year=2007, month=12, day=31,
hour=23)
ival_A_to_T_start = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0)
ival_A_to_T_end = Period(freq='Min', year=2007, month=12, day=31,
hour=23, minute=59)
ival_A_to_S_start = Period(freq='S', year=2007, month=1, day=1, hour=0,
minute=0, second=0)
ival_A_to_S_end = Period(freq='S', year=2007, month=12, day=31,
hour=23, minute=59, second=59)
ival_AJAN_to_D_end = Period(freq='D', year=2007, month=1, day=31)
ival_AJAN_to_D_start = Period(freq='D', year=2006, month=2, day=1)
ival_AJUN_to_D_end = Period(freq='D', year=2007, month=6, day=30)
ival_AJUN_to_D_start = Period(freq='D', year=2006, month=7, day=1)
ival_ANOV_to_D_end = Period(freq='D', year=2007, month=11, day=30)
ival_ANOV_to_D_start = Period(freq='D', year=2006, month=12, day=1)
self.assertEqual(ival_A.asfreq('Q', 'S'), ival_A_to_Q_start)
self.assertEqual(ival_A.asfreq('Q', 'e'), ival_A_to_Q_end)
self.assertEqual(ival_A.asfreq('M', 's'), ival_A_to_M_start)
self.assertEqual(ival_A.asfreq('M', 'E'), ival_A_to_M_end)
self.assertEqual(ival_A.asfreq('W', 'S'), ival_A_to_W_start)
self.assertEqual(ival_A.asfreq('W', 'E'), ival_A_to_W_end)
self.assertEqual(ival_A.asfreq('B', 'S'), ival_A_to_B_start)
self.assertEqual(ival_A.asfreq('B', 'E'), ival_A_to_B_end)
self.assertEqual(ival_A.asfreq('D', 'S'), ival_A_to_D_start)
self.assertEqual(ival_A.asfreq('D', 'E'), ival_A_to_D_end)
self.assertEqual(ival_A.asfreq('H', 'S'), ival_A_to_H_start)
self.assertEqual(ival_A.asfreq('H', 'E'), ival_A_to_H_end)
self.assertEqual(ival_A.asfreq('min', 'S'), ival_A_to_T_start)
self.assertEqual(ival_A.asfreq('min', 'E'), ival_A_to_T_end)
self.assertEqual(ival_A.asfreq('T', 'S'), ival_A_to_T_start)
self.assertEqual(ival_A.asfreq('T', 'E'), ival_A_to_T_end)
self.assertEqual(ival_A.asfreq('S', 'S'), ival_A_to_S_start)
self.assertEqual(ival_A.asfreq('S', 'E'), ival_A_to_S_end)
self.assertEqual(ival_AJAN.asfreq('D', 'S'), ival_AJAN_to_D_start)
self.assertEqual(ival_AJAN.asfreq('D', 'E'), ival_AJAN_to_D_end)
self.assertEqual(ival_AJUN.asfreq('D', 'S'), ival_AJUN_to_D_start)
self.assertEqual(ival_AJUN.asfreq('D', 'E'), ival_AJUN_to_D_end)
self.assertEqual(ival_ANOV.asfreq('D', 'S'), ival_ANOV_to_D_start)
self.assertEqual(ival_ANOV.asfreq('D', 'E'), ival_ANOV_to_D_end)
self.assertEqual(ival_A.asfreq('A'), ival_A)
def test_conv_quarterly(self):
# frequency conversion tests: from Quarterly Frequency
ival_Q = Period(freq='Q', year=2007, quarter=1)
ival_Q_end_of_year = Period(freq='Q', year=2007, quarter=4)
ival_QEJAN = Period(freq="Q-JAN", year=2007, quarter=1)
ival_QEJUN = Period(freq="Q-JUN", year=2007, quarter=1)
ival_Q_to_A = Period(freq='A', year=2007)
ival_Q_to_M_start = Period(freq='M', year=2007, month=1)
ival_Q_to_M_end = Period(freq='M', year=2007, month=3)
ival_Q_to_W_start = Period(freq='W', year=2007, month=1, day=1)
ival_Q_to_W_end = Period(freq='W', year=2007, month=3, day=31)
ival_Q_to_B_start = Period(freq='B', year=2007, month=1, day=1)
ival_Q_to_B_end = Period(freq='B', year=2007, month=3, day=30)
ival_Q_to_D_start = Period(freq='D', year=2007, month=1, day=1)
ival_Q_to_D_end = Period(freq='D', year=2007, month=3, day=31)
ival_Q_to_H_start = Period(freq='H', year=2007, month=1, day=1, hour=0)
ival_Q_to_H_end = Period(freq='H', year=2007, month=3, day=31, hour=23)
ival_Q_to_T_start = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0)
ival_Q_to_T_end = Period(freq='Min', year=2007, month=3, day=31,
hour=23, minute=59)
ival_Q_to_S_start = Period(freq='S', year=2007, month=1, day=1, hour=0,
minute=0, second=0)
ival_Q_to_S_end = Period(freq='S', year=2007, month=3, day=31, hour=23,
minute=59, second=59)
ival_QEJAN_to_D_start = Period(freq='D', year=2006, month=2, day=1)
ival_QEJAN_to_D_end = Period(freq='D', year=2006, month=4, day=30)
ival_QEJUN_to_D_start = Period(freq='D', year=2006, month=7, day=1)
ival_QEJUN_to_D_end = Period(freq='D', year=2006, month=9, day=30)
self.assertEqual(ival_Q.asfreq('A'), ival_Q_to_A)
self.assertEqual(ival_Q_end_of_year.asfreq('A'), ival_Q_to_A)
self.assertEqual(ival_Q.asfreq('M', 'S'), ival_Q_to_M_start)
self.assertEqual(ival_Q.asfreq('M', 'E'), ival_Q_to_M_end)
self.assertEqual(ival_Q.asfreq('W', 'S'), ival_Q_to_W_start)
self.assertEqual(ival_Q.asfreq('W', 'E'), ival_Q_to_W_end)
self.assertEqual(ival_Q.asfreq('B', 'S'), ival_Q_to_B_start)
self.assertEqual(ival_Q.asfreq('B', 'E'), ival_Q_to_B_end)
self.assertEqual(ival_Q.asfreq('D', 'S'), ival_Q_to_D_start)
self.assertEqual(ival_Q.asfreq('D', 'E'), ival_Q_to_D_end)
self.assertEqual(ival_Q.asfreq('H', 'S'), ival_Q_to_H_start)
self.assertEqual(ival_Q.asfreq('H', 'E'), ival_Q_to_H_end)
self.assertEqual(ival_Q.asfreq('Min', 'S'), ival_Q_to_T_start)
self.assertEqual(ival_Q.asfreq('Min', 'E'), ival_Q_to_T_end)
self.assertEqual(ival_Q.asfreq('S', 'S'), ival_Q_to_S_start)
self.assertEqual(ival_Q.asfreq('S', 'E'), ival_Q_to_S_end)
self.assertEqual(ival_QEJAN.asfreq('D', 'S'), ival_QEJAN_to_D_start)
self.assertEqual(ival_QEJAN.asfreq('D', 'E'), ival_QEJAN_to_D_end)
self.assertEqual(ival_QEJUN.asfreq('D', 'S'), ival_QEJUN_to_D_start)
self.assertEqual(ival_QEJUN.asfreq('D', 'E'), ival_QEJUN_to_D_end)
self.assertEqual(ival_Q.asfreq('Q'), ival_Q)
def test_conv_monthly(self):
# frequency conversion tests: from Monthly Frequency
ival_M = Period(freq='M', year=2007, month=1)
ival_M_end_of_year = Period(freq='M', year=2007, month=12)
ival_M_end_of_quarter = Period(freq='M', year=2007, month=3)
ival_M_to_A = Period(freq='A', year=2007)
ival_M_to_Q = Period(freq='Q', year=2007, quarter=1)
ival_M_to_W_start = Period(freq='W', year=2007, month=1, day=1)
ival_M_to_W_end = Period(freq='W', year=2007, month=1, day=31)
ival_M_to_B_start = Period(freq='B', year=2007, month=1, day=1)
ival_M_to_B_end = Period(freq='B', year=2007, month=1, day=31)
ival_M_to_D_start = Period(freq='D', year=2007, month=1, day=1)
ival_M_to_D_end = Period(freq='D', year=2007, month=1, day=31)
ival_M_to_H_start = Period(freq='H', year=2007, month=1, day=1, hour=0)
ival_M_to_H_end = Period(freq='H', year=2007, month=1, day=31, hour=23)
ival_M_to_T_start = Period(freq='Min', year=2007, month=1, day=1,
hour=0, minute=0)
ival_M_to_T_end = Period(freq='Min', year=2007, month=1, day=31,
hour=23, minute=59)
ival_M_to_S_start = Period(freq='S', year=2007, month=1, day=1, hour=0,
minute=0, second=0)
ival_M_to_S_end = Period(freq='S', year=2007, month=1, day=31, hour=23,
minute=59, second=59)
self.assertEqual(ival_M.asfreq('A'), ival_M_to_A)
self.assertEqual(ival_M_end_of_year.asfreq('A'), ival_M_to_A)
self.assertEqual(ival_M.asfreq('Q'), ival_M_to_Q)
self.assertEqual(ival_M_end_of_quarter.asfreq('Q'), ival_M_to_Q)
self.assertEqual(ival_M.asfreq('W', 'S'), ival_M_to_W_start)
self.assertEqual(ival_M.asfreq('W', 'E'), ival_M_to_W_end)
self.assertEqual(ival_M.asfreq('B', 'S'), ival_M_to_B_start)
self.assertEqual(ival_M.asfreq('B', 'E'), ival_M_to_B_end)
self.assertEqual(ival_M.asfreq('D', 'S'), ival_M_to_D_start)
self.assertEqual(ival_M.asfreq('D', 'E'), ival_M_to_D_end)
self.assertEqual(ival_M.asfreq('H', 'S'), ival_M_to_H_start)
self.assertEqual(ival_M.asfreq('H', 'E'), ival_M_to_H_end)
self.assertEqual(ival_M.asfreq('Min', 'S'), ival_M_to_T_start)
self.assertEqual(ival_M.asfreq('Min', 'E'), ival_M_to_T_end)
self.assertEqual(ival_M.asfreq('S', 'S'), ival_M_to_S_start)
self.assertEqual(ival_M.asfreq('S', 'E'), ival_M_to_S_end)
self.assertEqual(ival_M.asfreq('M'), ival_M)
def test_conv_weekly(self):
# frequency conversion tests: from Weekly Frequency
ival_W = Period(freq='W', year=2007, month=1, day=1)
ival_WSUN = Period(freq='W', year=2007, month=1, day=7)
ival_WSAT = Period(freq='W-SAT', year=2007, month=1, day=6)
ival_WFRI = Period(freq='W-FRI', year=2007, month=1, day=5)
ival_WTHU = Period(freq='W-THU', year=2007, month=1, day=4)
ival_WWED = Period(freq='W-WED', year=2007, month=1, day=3)
ival_WTUE = Period(freq='W-TUE', year=2007, month=1, day=2)
ival_WMON = Period(freq='W-MON', year=2007, month=1, day=1)
ival_WSUN_to_D_start = Period(freq='D', year=2007, month=1, day=1)
ival_WSUN_to_D_end = Period(freq='D', year=2007, month=1, day=7)
ival_WSAT_to_D_start = Period(freq='D', year=2006, month=12, day=31)
ival_WSAT_to_D_end =
|
Period(freq='D', year=2007, month=1, day=6)
|
pandas.Period
|
"""
Testing the ``modelchain`` module.
SPDX-FileCopyrightText: 2019 oemof developer group <<EMAIL>>
SPDX-License-Identifier: MIT
"""
import pandas as pd
import numpy as np
import pytest
from pandas.util.testing import assert_series_equal
import windpowerlib.wind_turbine as wt
import windpowerlib.modelchain as mc
class TestModelChain:
@classmethod
def setup_class(self):
"""Setup default values"""
self.test_turbine = {'hub_height': 100,
'turbine_type': 'E-126/4200',
'power_curve': pd.DataFrame(
data={'value': [0.0, 4200 * 1000],
'wind_speed': [0.0, 25.0]})}
temperature_2m = np.array([[267], [268]])
temperature_10m = np.array([[267], [266]])
pressure_0m = np.array([[101125], [101000]])
wind_speed_8m = np.array([[4.0], [5.0]])
wind_speed_10m = np.array([[5.0], [6.5]])
roughness_length = np.array([[0.15], [0.15]])
self.weather_df = pd.DataFrame(
np.hstack((temperature_2m, temperature_10m, pressure_0m,
wind_speed_8m, wind_speed_10m, roughness_length)),
index=[0, 1],
columns=[np.array(['temperature', 'temperature', 'pressure',
'wind_speed', 'wind_speed',
'roughness_length']),
np.array([2, 10, 0, 8, 10, 0])])
def test_temperature_hub(self):
# Test modelchain with temperature_model='linear_gradient'
test_mc = mc.ModelChain(wt.WindTurbine(**self.test_turbine))
# Test modelchain with temperature_model='interpolation_extrapolation'
test_mc_2 = mc.ModelChain(
wt.WindTurbine(**self.test_turbine),
temperature_model='interpolation_extrapolation')
# Parameters for tests
temperature_2m = np.array([[267], [268]])
temperature_10m = np.array([[267], [266]])
weather_df = pd.DataFrame(np.hstack((temperature_2m,
temperature_10m)),
index=[0, 1],
columns=[np.array(['temperature',
'temperature']),
np.array([2, 10])])
# temperature_10m is closer to hub height than temperature_2m
temp_exp = pd.Series(data=[266.415, 265.415], name=10)
assert_series_equal(test_mc.temperature_hub(weather_df), temp_exp)
temp_exp = pd.Series(data=[267.0, 243.5])
assert_series_equal(test_mc_2.temperature_hub(weather_df), temp_exp)
# change heights of temperatures so that old temperature_2m is now used
weather_df.columns = [np.array(['temperature', 'temperature']),
np.array([10, 200])]
temp_exp = pd.Series(data=[266.415, 267.415], name=10)
assert_series_equal(test_mc.temperature_hub(weather_df), temp_exp)
temp_exp = pd.Series(data=[267.0, 267.052632])
assert_series_equal(test_mc_2.temperature_hub(weather_df), temp_exp)
# temperature at hub height
weather_df.columns = [np.array(['temperature', 'temperature']),
np.array([100, 10])]
temp_exp = pd.Series(data=[267, 268], name=100)
assert_series_equal(test_mc.temperature_hub(weather_df), temp_exp)
def test_density_hub(self):
# Test modelchain with density_model='barometric'
test_mc = mc.ModelChain(wt.WindTurbine(**self.test_turbine))
# Test modelchain with density_model='ideal_gas'
test_mc_2 = mc.ModelChain(wt.WindTurbine(**self.test_turbine),
density_model='ideal_gas')
# Test modelchain with density_model='interpolation_extrapolation'
test_mc_3 = mc.ModelChain(wt.WindTurbine(**self.test_turbine),
density_model='interpolation_extrapolation')
# Parameters for tests
temperature_2m = np.array([[267], [268]])
temperature_10m = np.array([[267], [266]])
pressure_0m = np.array([[101125], [101000]])
weather_df = pd.DataFrame(np.hstack((temperature_2m,
temperature_10m,
pressure_0m)),
index=[0, 1],
columns=[np.array(['temperature',
'temperature',
'pressure']),
np.array([2, 10, 0])])
# temperature_10m is closer to hub height than temperature_2m
rho_exp = pd.Series(data=[1.30591, 1.30919])
assert_series_equal(test_mc.density_hub(weather_df), rho_exp)
rho_exp = pd.Series(data=[1.30595575725, 1.30923554056])
assert_series_equal(test_mc_2.density_hub(weather_df), rho_exp)
# change heights of temperatures so that old temperature_2m is now used
weather_df.columns = [np.array(['temperature', 'temperature',
'pressure']),
np.array([10, 200, 0])]
rho_exp = pd.Series(data=[1.30591, 1.29940])
assert_series_equal(test_mc.density_hub(weather_df), rho_exp)
rho_exp =
|
pd.Series(data=[1.30595575725, 1.29944375221])
|
pandas.Series
|
#!/usr/bin/env python
# coding: utf-8
import re
import os
import gc
import glob
import keras
import numbers
import tldextract
import numpy as np
import pandas as pd
from tqdm import tqdm
import tensorflow as tf
from itertools import chain
from keras.models import Model
from keras.models import load_model
import matplotlib.pyplot as plt
from collections import Counter
from sklearn import preprocessing
from gensim.models import FastText
from sklearn.decomposition import PCA
from keras.callbacks import ReduceLROnPlateau
from sklearn.model_selection import train_test_split
from keras.preprocessing.sequence import pad_sequences
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.feature_extraction.text import CountVectorizer
from keras.layers import Input, Embedding, LSTM, Dense, Bidirectional, Dropout
from sklearn.metrics import confusion_matrix, classification_report, accuracy_score
import warnings
warnings.filterwarnings("ignore")
# Initializing tqdm for pandas
tqdm.pandas()
from tensorflow.python.client import device_lib
local_device_protos = device_lib.list_local_devices()
print([x.name for x in local_device_protos if x.device_type == 'GPU'])
np.random.seed(0)
# Get the kinds of ids associated with each tuple
def update_ids(x):
kinds_of_ids = set()
for item in x:
kinds_of_ids.add(item[0])
return kinds_of_ids
##################################################################################################
# ##################################################################################################
# ## Newspaper data ##
## Get the top 150 sections which we got from training the 2.7 million citations
largest_sections =
|
pd.read_csv('/dlabdata1/harshdee/largest_sections.csv', header=None)
|
pandas.read_csv
|
from rest_framework.views import APIView
from rest_framework.response import Response
from django.core import serializers
from django.contrib.postgres.search import SearchQuery, SearchVector
from django.db.models import Q
from rest_framework import status
from . import models
import pandas as pd
import numpy as np
import json, math, pickle, collections
from sklearn.tree import DecisionTreeClassifier, export_graphviz
from sklearn.cluster import AgglomerativeClustering
from sklearn.model_selection import train_test_split, cross_val_score, cross_validate
from sklearn import preprocessing
from sklearn.inspection import partial_dependence, plot_partial_dependence
from sklearn.metrics.pairwise import euclidean_distances
from sklearn.metrics import accuracy_score
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from sklearn.preprocessing import normalize
from collections import Counter
from io import StringIO
import time, ast
from static.models import dl
# A tweet as json
'''
{'grp': '0', 'content': 'truck control...', 'screen_name': 'feedthebuzz',
'valence': 0.333333333, 'valence_seq': 'terror attack kills scores in nice',
'valence_seq_rank': 15, 'valence_pred': 0.161331654, 'valence_grp_pred': 0,
'dominance': 0.270833333, 'dominance_seq': 'terror attack kills scores in', 'dominance_seq_rank': 15, 'dominance_pred': 0.299620539, 'dominance_grp_pred': 0,
'care': 2, 'care_seq': 'terror attack kills scores in nice', 'care_seq_rank': 13, 'care_pred': 2, 'care_grp_pred': 0, 'care_prob': 4.848002434,
'fairness': 1, 'fairness_seq': 'terror attack kills', 'fairness_seq_rank': 3, 'fairness_pred': 2, 'fairness_grp_pred': 0, 'fairness_prob': 1.320369363,
'tweet_id': 0
}
'''
def save_model(model, model_id):
file_name = './app/static/models/' + model_id
file_name = file_name + '.pkl'
with open(file_name, 'wb') as f:
pickle.dump(model, f)
f.close()
def load_model(model_id):
file_name = './app/static/models/' + model_id + '.pkl'
model = ''
with open(file_name, 'rb') as f:
unpickler = pickle.Unpickler(f)
model = unpickler.load()
f.close()
return model
def get_rules(dtc, df):
rules_list = []
values_path = []
values = dtc.tree_.value
def RevTraverseTree(tree, node, rules, pathValues):
'''
Traverase an skl decision tree from a node (presumably a leaf node)
up to the top, building the decision rules. The rules should be
input as an empty list, which will be modified in place. The result
is a nested list of tuples: (feature, direction (left=-1), threshold).
The "tree" is a nested list of simplified tree attributes:
[split feature, split threshold, left node, right node]
'''
# now find the node as either a left or right child of something
# first try to find it as a left node
try:
prevnode = tree[2].index(node)
leftright = '<='
pathValues.append(values[prevnode])
except ValueError:
# failed, so find it as a right node - if this also causes an exception, something's really f'd up
prevnode = tree[3].index(node)
leftright = '>'
pathValues.append(values[prevnode])
# now let's get the rule that caused prevnode to -> node
p1 = df.columns[tree[0][prevnode]]
p2 = tree[1][prevnode]
rules.append(str(p1) + ' ' + leftright + ' ' + str(p2))
# if we've not yet reached the top, go up the tree one more step
if prevnode != 0:
RevTraverseTree(tree, prevnode, rules, pathValues)
# get the nodes which are leaves
leaves = dtc.tree_.children_left == -1
leaves = np.arange(0,dtc.tree_.node_count)[leaves]
# build a simpler tree as a nested list: [split feature, split threshold, left node, right node]
thistree = [dtc.tree_.feature.tolist()]
thistree.append(dtc.tree_.threshold.tolist())
thistree.append(dtc.tree_.children_left.tolist())
thistree.append(dtc.tree_.children_right.tolist())
# get the decision rules for each leaf node & apply them
for (ind,nod) in enumerate(leaves):
# get the decision rules
rules = []
pathValues = []
RevTraverseTree(thistree, nod, rules, pathValues)
pathValues.insert(0, values[nod])
pathValues = list(reversed(pathValues))
rules = list(reversed(rules))
rules_list.append(rules)
values_path.append(pathValues)
return (rules_list, values_path, leaves)
# For the initial run
class LoadData(APIView):
def get(self, request, format=None):
tweet_objects = models.Tweet.objects.all()
# serializer return string, so convert it to list with eval()
tweet_objects_json = eval(serializers.serialize('json', tweet_objects))
tweets_json = []
for tweet in tweet_objects_json:
tweet_json = tweet['fields']
tweet_json.update({ 'tweet_id': str(tweet['pk']) })
tweets_json.append(tweet_json)
return Response(tweets_json)
class LoadUsers(APIView):
def get(self, request, format=None):
users_objects = models.User.objects.all()
# serializer return string, so convert it to list with eval()
users_objects_json = eval(serializers.serialize('json', users_objects))
users_json = []
for user in users_objects_json:
user_json = user['fields']
user_json.update({ 'screen_name': user['pk'] })
users_json.append(user_json)
return Response(users_json)
class LoadWords(APIView):
def post(self, request, format=None):
request_json = json.loads(request.body.decode(encoding='UTF-8'))
group_objs = request_json['groups']
tweet_objects = models.Tweet.objects.all()
# serializer return string, so convert it to list with eval()
tweet_objects_json = eval(serializers.serialize('json', tweet_objects))
groups = [ group_obj['idx'] for group_obj in group_objs ]
tweets_json = []
word_tokens = [] # All importance word apperances from all second-level features
for tweet_idx, tweet in enumerate(tweet_objects_json):
tweet_json = tweet['fields']
tweet_json.update({ 'tweet_id': tweet['pk'] })
tweets_json.append(tweet_json)
word_tokens.append({ 'word': tweet_json['valence_seq'], 'group': tweet_json['grp'] })
word_tokens.append({ 'word': tweet_json['dominance_seq'], 'group': tweet_json['grp'] })
word_tokens.append({ 'word': tweet_json['fairness_seq'], 'group': tweet_json['grp'] })
word_tokens.append({ 'word': tweet_json['care_seq'], 'group': tweet_json['grp'] })
# word_tokens.append({ 'word': tweet_json['loyalty_seq'], 'group': tweet_json['grp'] })
# word_tokens.append({ 'word': tweet_json['authority_seq'], 'group': tweet_json['grp'] })
# word_tokens.append({ 'word': tweet_json['purity_seq'], 'group': tweet_json['grp'] })
# Orgainze word tokens as unique words and their frequencies
word_count_dict = {}
for word_dict in word_tokens:
if word_dict['word'] in word_count_dict.keys():
word_count_dict[word_dict['word']][word_dict['group']] += 1
word_count_dict[word_dict['word']]['count_total'] += 1
else:
word_count_dict[word_dict['word']] = {}
word_count_dict[word_dict['word']]['count_total'] = 0
for group in groups: # Create keys for all groups
word_count_dict[word_dict['word']][str(group)] = 0
#word_count_dict = dict(Counter(word_tokens)) # { 'dog': 2, 'cat': 1, ... }
df_word_count = pd.DataFrame()
df_word_list = pd.DataFrame(list(word_count_dict.keys()), columns=['word'])
df_word_count_per_group = pd.DataFrame.from_dict(list(word_count_dict.values()))
df_word_count = pd.concat([ df_word_list, df_word_count_per_group ], axis=1)
df_word_count['word'] = df_word_count['word'].map(lambda x: x.encode('unicode-escape').decode('utf-8'))
# Filter out words with threshold
df_filtered_word_count = df_word_count.loc[df_word_count['count_total'] > 10]
return Response(df_filtered_word_count.to_dict(orient='records')) # [{ 'word': 'dog', 'count': 2 }, { ... }, ...]
# For the global interpretability,
class SearchTweets(APIView):
def get(self, request, format=None):
pass
def post(self, request, format=None):
request_json = json.loads(request.body.decode(encoding='UTF-8'))
keywords = request_json['searchKeyword'].split(' ')
content_q = Q()
for keyword in keywords:
content_q &= Q(content__contains=keyword)
retrieved_tweet_objects = models.Tweet.objects.filter(content_q)
tweet_objects_json = eval(serializers.serialize('json', retrieved_tweet_objects))
tweets_json = [ tweet['fields'] for tweet in tweet_objects_json ]
return Response(tweets_json)
class RunDecisionTree(APIView):
def get(self, request, format=None):
pass
def post(self, request, format=None):
request_json = json.loads(request.body.decode(encoding='UTF-8'))
feature_objs = request_json['selectedFeatures']
features = [feature['key'] for feature in feature_objs]
tweets = request_json['tweets']
# tweet_objects = models.Tweet.objects.all()
# tweet_objects_json = eval(serializers.serialize('json', tweet_objects)) # serializer return string, so convert it to list with eval()
# tweets_json = [ tweet['fields'] for tweet in tweet_objects_json ]
df_tweets = pd.DataFrame(tweets)
lb = preprocessing.LabelBinarizer()
X = df_tweets[features]
y = lb.fit_transform(df_tweets['group'].astype(str)) # con: 0, lib: 1
y = np.ravel(y)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0)
if len(feature_objs) == 8: # if all features are selected, just load the saved model
clf = load_model('dt_all')
else:
clf = DecisionTreeClassifier(max_depth=9, random_state=42)
tree = clf.fit(X_train, y_train)
feature_imps = clf.feature_importances_
y_pred_binary = clf.predict(X)
y_pred_prob = clf.predict_proba(X)
y_pred_string = lb.inverse_transform(y_pred_binary)
df_tweets['pred'] = y_pred_string
df_tweets['prob'] = [probs[1] for probs in y_pred_prob] # Extract the prob of tweet being liberal
y_pred_for_test = clf.predict(X_test)
accuracy = accuracy_score(y_test, y_pred_for_test)
scores = cross_validate(clf, X, y, cv=10)['test_score']
save_model(clf, 'dt_all')
return Response({
'modelId': 'dt_all',
'tweets': df_tweets.to_json(orient='records'),
'features': features,
'accuracy': accuracy,
'featureImps': feature_imps
})
# class RunClustering(APIView):
# def get(self, request, format=None):
# selected_features = ['valence', 'dominance', 'care', 'fairness']
# tweet_objects = models.Tweet.objects.all()
# # serializer return string, so convert it to list with eval()
# tweet_objects_json = eval(serializers.serialize('json', tweet_objects))
# tweets_json = [tweet['fields'] for tweet in tweet_objects_json]
# df_tweets = pd.DataFrame(tweets_json)
# # Clustering all together
# df_tweets_selected = df_tweets[selected_features]
# fit_cls = AgglomerativeClustering(n_clusters=10).fit(df_tweets_selected)
# cls_labels = fit_cls.labels_
# df_tweets['clusterId'] = cls_labels
# df_tweets_by_cluster = df_tweets.groupby(['clusterId'])
# num_tweets_per_group = df_tweets_by_cluster.size()
# df_group_ratio = df_tweets_by_cluster.agg({
# 'grp': lambda x: math.ceil((x.loc[x == '1'].shape[0] / x.shape[0]) * 100) / 100
# }).rename(columns={'grp': 'group_lib_ratio'})
# # Clustering per each goal's features
# goals_features = [
# { 'goal': 'emotion', 'features': ['valence', 'dominance'] },
# { 'goal': 'moral', 'features': ['care', 'fairness'] }
# ]
# clusters_per_goals = []
# for goal_features in goals_features:
# goal = goal_features['goal']
# df_tweets_per_goal = df_tweets_selected[goal_feature['features']]
# fit_cls = AgglomerativeClustering(n_clusters=4).fit(df_tweets_selected)
# cls_labels = fit_cls.labels_
# df_tweets_per_goal['clusterIdFor' + capitalize(goal)] = cls_labels
# df_clusters_per_goal = df_tweets_per_goal.agg({
# 'grp': lambda x: math.ceil((x.loc[x == '1'].shape[0] / x.shape[0]) * 100) / 100
# }).rename(columns={'grp': 'group_lib_ratio'})
# clusters_per_goal = {
# 'goal': 'emotion',
# 'clusters': df_clusters_per_goal.to_json(orient='records')
# }
# clusters_per_goal.append(clusters_per_goal)
# # Save all results for clustering-all
# df_clusters = pd.DataFrame({
# 'clusterId': list(df_tweets_by_cluster.groups),
# 'numTweets': num_tweets_per_group,
# 'groupRatio': df_group_ratio['group_lib_ratio'],
# 'pdpValue': 0.2
# # 'tweetIds': tweet_ids_per_cluster_list
# })
# cluster_ids = cls_labels
# return Response({
# 'clusterIdsForTweets': cluster_ids,
# 'clusters': df_clusters.to_json(orient='records'),
# 'clustersPerGoal': clusters_per_goal
# })
class CalculatePartialDependence(APIView):
def post(self, request, format=None):
request_json = json.loads(request.body.decode(encoding='UTF-8'))
model_id = request_json['currentModelInfo']['id']
tweets = request_json['tweets']
feature_objs = request_json['features']
features = [feature['key'] for feature in feature_objs]
df_tweets = pd.DataFrame(tweets)
lb = preprocessing.LabelBinarizer()
X = df_tweets[features]
y = lb.fit_transform(df_tweets['group'].astype(str))
y = np.ravel(y)
model = load_model(model_id)
pdp_values_list = {}
for feature_idx, feature in enumerate(features):
pdp_values, feature_values = partial_dependence(model, X, [feature_idx], percentiles=(0, 1)) # 0 is the selected feature index
pdp_values_list.append({
'feature': feature,
'values': pd.DataFrame({ 'pdpValue': pdp_values, 'featureValue': feature_values }).to_json(orient='index')
})
# performance
return Response({
'modelId': model,
'pdpValues': pdp_values_list
})
class RunClusteringAndPartialDependenceForClusters(APIView):
def post(self, request, format=None):
request_json = json.loads(request.body.decode(encoding='UTF-8'))
model_id = request_json['modelId']
feature_objs = request_json['features']
features = [feature['key'] for feature in feature_objs]
tweets = request_json['tweets']
groups = request_json['groups']
# tweet_objects = models.Tweet.objects.all()
# tweet_objects_json = eval(serializers.serialize('json', tweet_objects)) # serializer return string, so convert it to list with eval()
# tweets_json = [ tweet['fields'] for tweet in tweet_objects_json ]
df_tweets = pd.DataFrame(tweets)
df_tweets_selected = df_tweets[features]
# Run clustering
fit_cls = AgglomerativeClustering(n_clusters=10).fit(df_tweets_selected)
cls_labels = fit_cls.labels_
df_tweets['clusterId'] = cls_labels
df_tweets_by_cluster = df_tweets.groupby(['clusterId'])
num_tweets_per_group = df_tweets_by_cluster.size()
df_group_ratio = df_tweets_by_cluster.agg({
'group': lambda x: math.ceil((x.loc[x == '1'].shape[0] / x.shape[0]) * 100) / 100
}).rename(columns={'group': 'group_lib_ratio'}) # '1': lib
# Clustering per each goal's features
goals_features = [
{ 'goal': 'emotion', 'features': ['valence', 'dominance'] },
{ 'goal': 'moral', 'features': ['care', 'fairness', 'loyalty', 'authority', 'purity'] }
]
clusters_per_goals = []
for goal_features in goals_features:
goal = goal_features['goal']
features_in_goal = goal_features['features']
df_tweets_per_goal = df_tweets[goal_features['features'] + ['group']]
fit_cls = AgglomerativeClustering(n_clusters=4).fit(df_tweets_per_goal)
cls_labels_for_goal = fit_cls.labels_
df_tweets_per_goal['clusterId'] = cls_labels_for_goal
df_tweets_per_goal_by_cluster = df_tweets_per_goal.groupby(['clusterId'])
# Define aggregated functions
agg_dict = {}
agg_dict['group'] = lambda x: math.ceil((x.loc[x == '1'].shape[0] / x.shape[0]) * 100) / 100 # group ratio
agg_dict['clusterId'] = lambda x: x.count() / df_tweets.shape[0] # size of cluster (# of tweets)
for feature in features_in_goal: # mean feature values
agg_dict[feature] = lambda x: x.mean()
df_clusters_per_goal = df_tweets_per_goal_by_cluster.agg(agg_dict).rename(columns={
'group': 'group_lib_ratio',
'clusterId': 'countRatio'
})
clusters_per_goal = {
'goal': goal,
'clusters': df_clusters_per_goal.to_dict(orient='records')
}
clusters_per_goals.append(clusters_per_goal)
# Prepare data for partial dependence (PD)
lb = preprocessing.LabelBinarizer()
X = df_tweets[features]
X_for_groups = []
for group_idx, group in enumerate(groups):
X_group = X.loc[df_tweets['group'] == str(group_idx)]
X_for_groups.append(X_group)
y = lb.fit_transform(df_tweets['group'].astype(str))
y = np.ravel(y)
#model = load_model(model_id)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0)
model = DecisionTreeClassifier(random_state=20)
tree = model.fit(X_train, y_train)
# Calculate PD-all
pdp_values_for_all = []
for feature_idx, feature in enumerate(features):
pdp_values, feature_values = partial_dependence(model, X, [feature_idx], percentiles=(0, 1)) # 0 is the selected feature index
pdp_values_json = pd.DataFrame({ 'pdpValue': pdp_values[0], 'featureValue': feature_values[0] }).to_dict(orient='records')
pdp_values_for_all.append({ 'feature': feature, 'values': pdp_values_json })
# Calculate PD-per-group
pdp_values_for_groups = []
for group_idx, group in enumerate(groups):
pdp_values_for_features = []
for feature_idx, feature in enumerate(features):
pdp_values, feature_values = partial_dependence(model, X_for_groups[group_idx], [feature_idx], percentiles=(0, 1))
pdp_values_for_group = pdp_values[0]
# Do 1 - (probability) if the group is not true class (since probability is possibility of being the group 1 (blue team))
if group_idx == 0:
pdp_values_for_group = [ 1- pdp_value for pdp_value in pdp_values_for_group ]
pdp_values_json = pd.DataFrame({ 'pdpValue': pdp_values_for_group, 'featureValue': feature_values[0] }).to_dict(orient='records')
pdp_values_for_features.append({ 'feature': feature, 'values': pdp_values_json })
pdp_values_for_groups.append({ 'group': group, 'valuesForFeatures': pdp_values_for_features })
# Calculate PD-per-clusters (for all and for groups)
pdp_values_for_cls = []
pdp_values_for_cls_and_groups = []
instances_for_clusters = []
# df_tweets_by_cluster.to_csv('./app/static/df_tweets_by_cluster.csv')
for cl_idx in df_tweets_by_cluster.groups.keys():
# Prepare data for PD per cluster
indexes = df_tweets_by_cluster.groups[cl_idx]
df_tweets_in_cluster = df_tweets.loc[indexes]
X_cl = df_tweets_in_cluster[features]
X_for_groups = []
for group_idx, group in enumerate(groups):
X_group = X_cl.loc[df_tweets['group'] == str(group_idx)]
X_for_groups.append(X_group)
# for all
pdp_values_for_features = []
for feature_idx, feature in enumerate(features):
pdp_values_cl, feature_values_cl = partial_dependence(model, X_cl, [feature_idx], percentiles=(0, 1)) # 0 is the selected feature index
pdp_values_cl_json =
|
pd.DataFrame({ 'pdpValue': pdp_values_cl[0], 'featureValue': feature_values_cl[0] })
|
pandas.DataFrame
|
"""
Module containing functions and classes related to Spectra calculation and
manipulation
Spectra are calculated from the windowed, decimated time data. The inbuilt
Fourier transform implementation is inspired by the implementation of the
scipy stft function.
"""
from loguru import logger
from pathlib import Path
from typing import Union, Tuple, Dict, List, Any, Optional
from pydantic import PositiveInt
import numpy as np
import pandas as pd
import plotly.graph_objects as go
import plotly.express as px
from resistics.common import ResisticsData, ResisticsProcess, History
from resistics.common import ResisticsWriter, Metadata, WriteableMetadata
from resistics.sampling import HighResDateTime
from resistics.time import ChanMetadata
from resistics.decimate import DecimationParameters
from resistics.window import WindowedData, WindowedLevelMetadata
class SpectraLevelMetadata(Metadata):
"""Metadata for spectra of a windowed decimation level"""
fs: float
"""The sampling frequency of the decimation level"""
n_wins: int
"""The number of windows"""
win_size: PositiveInt
"""The window size in samples"""
olap_size: PositiveInt
"""The overlap size in samples"""
index_offset: int
"""The global window offset for local window 0"""
n_freqs: int
"""The number of frequencies in the frequency data"""
freqs: List[float]
"""List of frequencies"""
@property
def nyquist(self) -> float:
"""Get the nyquist frequency"""
return self.fs / 2
class SpectraMetadata(WriteableMetadata):
"""Metadata for spectra data"""
fs: List[float]
chans: List[str]
n_chans: Optional[int] = None
n_levels: int
first_time: HighResDateTime
last_time: HighResDateTime
system: str = ""
serial: str = ""
wgs84_latitude: float = -999.0
wgs84_longitude: float = -999.0
easting: float = -999.0
northing: float = -999.0
elevation: float = -999.0
chans_metadata: Dict[str, ChanMetadata]
levels_metadata: List[SpectraLevelMetadata]
ref_time: HighResDateTime
history: History = History()
class Config:
extra = "ignore"
class SpectraData(ResisticsData):
"""
Class for holding spectra data
The spectra data is stored in the class as a dictionary mapping decimation
level to numpy array. The shape of the array for each decimation level is:
n_wins x n_chans x n_freqs
"""
def __init__(self, metadata: SpectraMetadata, data: Dict[int, np.ndarray]):
"""
Initialise spectra data
Parameters
----------
metadata : SpectraMetadata
Metadata for the spectra data
data : Dict[int, np.ndarray]
Dictionary of data, one entry for each evaluation level
"""
logger.debug(f"Creating SpectraData with data type {data[0].dtype}")
self.metadata = metadata
self.data = data
def get_level(self, level: int) -> np.ndarray:
"""Get the spectra data for a decimation level"""
if level >= self.metadata.n_levels:
raise ValueError(f"Level {level} not <= max {self.metadata.n_levels - 1}")
return self.data[level]
def get_chan(self, level: int, chan: str) -> np.ndarray:
"""Get the channel spectra data for a decimation level"""
from resistics.errors import ChannelNotFoundError
if chan not in self.metadata.chans:
raise ChannelNotFoundError(chan, self.metadata.chans)
idx = self.metadata.chans.index(chan)
return self.data[level][..., idx, :]
def get_chans(self, level: int, chans: List[str]) -> np.ndarray:
"""Get the channels spectra data for a decimation level"""
from resistics.errors import ChannelNotFoundError
for chan in chans:
if chan not in self.metadata.chans:
raise ChannelNotFoundError(chan, self.metadata.chans)
indices = [self.metadata.chans.index(chan) for chan in chans]
return self.data[level][..., indices, :]
def get_freq(self, level: int, idx: int) -> np.ndarray:
"""Get the spectra data at a frequency index for a decimation level"""
n_freqs = self.metadata.levels_metadata[level].n_freqs
if idx < 0 or idx >= n_freqs:
raise ValueError(f"Freq. index {idx} not 0 <= idx < {n_freqs}")
return np.squeeze(self.data[level][..., idx])
def get_mag_phs(
self, level: int, unwrap: bool = False
) -> Tuple[np.ndarray, np.ndarray]:
"""Get magnitude and phase for a decimation level"""
spec = self.data[level]
if unwrap:
return np.absolute(spec), np.unwrap(np.angle(spec))
return np.absolute(spec), np.angle(spec)
def get_timestamps(self, level: int) -> pd.DatetimeIndex:
"""
Get the start time of each window
Note that this does not use high resolution timestamps
Parameters
----------
level : int
The decimation level
Returns
-------
pd.DatetimeIndex
The starts of each window
Raises
------
ValueError
If the level is out of range
"""
from resistics.window import get_win_starts
if level >= self.metadata.n_levels:
raise ValueError(f"Level {level} not <= max {self.metadata.n_levels - 1}")
level_metadata = self.metadata.levels_metadata[level]
return get_win_starts(
self.metadata.ref_time,
level_metadata.win_size,
level_metadata.olap_size,
level_metadata.fs,
level_metadata.n_wins,
level_metadata.index_offset,
)
def plot(self, max_pts: Optional[int] = 10_000) -> go.Figure:
"""
Stack spectra data for all decimation levels
Parameters
----------
max_pts : Optional[int], optional
The maximum number of points in any individual plot before applying
lttbc downsampling, by default 10_000. If set to None, no
downsampling will be applied.
Returns
-------
go.Figure
The plotly figure
"""
from resistics.plot import get_spectra_stack_fig
y_labels = {x: "Magnitude" for x in self.metadata.chans}
fig = get_spectra_stack_fig(self.metadata.chans, y_labels)
colors = iter(px.colors.qualitative.Plotly)
for ilevel in range(self.metadata.n_levels):
level_metadata = self.metadata.levels_metadata[ilevel]
freqs = np.array(level_metadata.freqs)
stack = np.mean(np.absolute(self.data[ilevel]), axis=0)
legend = f"{ilevel} - {level_metadata.fs:.4f} Hz"
fig = self._add_stack_data(
fig, freqs, stack, legend, color=next(colors), max_pts=max_pts
)
return fig
def plot_level_stack(
self,
level: int,
max_pts: int = 10_000,
grouping: Optional[str] = None,
offset: str = "0h",
) -> go.Figure:
"""
Stack the spectra for a decimation level with optional time grouping
Parameters
----------
level : int
The decimation level
max_pts : int, optional
The maximum number of points in any individual plot before applying
lttbc downsampling, by default 10_000
grouping : Optional[str], optional
A grouping interval as a pandas freq string, by default None
offset : str, optional
A time offset to add to the grouping, by default "0h". For instance,
to plot night time and day time spectra, set grouping to "12h" and
offset to "6h"
Returns
-------
go.Figure
The plotly figure
"""
from resistics.plot import get_spectra_stack_fig
if grouping is None:
first_date = pd.Timestamp(self.metadata.first_time.isoformat()).floor("D")
last_date = pd.Timestamp(self.metadata.last_time.isoformat()).ceil("D")
grouping = last_date - first_date
level_metadata = self.metadata.levels_metadata[level]
df = pd.DataFrame(
data=np.arange(level_metadata.n_wins),
index=self.get_timestamps(level),
columns=["local"],
)
# group by the grouping frequency, iterate over the groups and plot
freqs = np.array(level_metadata.freqs)
y_labels = {x: "Magnitude" for x in self.metadata.chans}
fig = get_spectra_stack_fig(self.metadata.chans, y_labels)
colors = iter(px.colors.qualitative.Plotly)
for idx, group in df.groupby(
|
pd.Grouper(freq=grouping, offset=offset)
|
pandas.Grouper
|
# coding: utf-8
import pandas as pd
|
pd.set_option("display.max_columns", 500)
|
pandas.set_option
|
import seaborn as sns
import matplotlib.pyplot as plt
import pandas as pd
import logging
df = pd.read_csv('data/Output/plink/2-POP/PCA_results.eigenvec', delim_whitespace=True, header=None)
cols = ['FID', 'IID']
for i in range(1, 21):
cols.append(f'PC{i}')
df.columns = cols
colorcode = pd.read_csv('data/Output/plink/2-POP/popfile.txt', delim_whitespace=True)
df = pd.merge(df, colorcode, on=['FID', 'IID'])
ax = sns.lmplot('PC1', # Horizontal axis
'PC2', # Vertical axis
hue = 'SUPERPOP', # color variable
data=df, # Data source
fit_reg=False, # Don't fix a regression line
height = 10,
aspect =2 ) # height and dimension
plt.title('PCA: Projection on 1000Genomes')
# Set x-axis label
plt.xlabel(f'PC1')
# Set y-axis label
plt.ylabel('PC2')
plt.savefig('data/PCA.png', dpi=240, bbox_inches='tight')
def label_point(x, y, val, ax):
a =
|
pd.concat({'x': x, 'y': y, 'val': val}, axis=1)
|
pandas.concat
|
import matplotlib.pyplot as plt
import matplotlib as mpl
import pandas as pd
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import copy
import os
from torchvision import transforms
import pickle as pkl
import sklearn.manifold as manifold
import sklearn.metrics as skmetrics
from itertools import cycle
""" read and plot logged data """
def load_log_data(filename):
data = None
with open(filename, 'r') as f:
try:
data =
|
pd.read_csv(f, sep='\t', index_col=False)
|
pandas.read_csv
|
# -*- coding: utf-8 -*-
"""
-------------------------------------------------
File Name: adjust_api
Description :
Author : developer
date: 2020/5/6
-------------------------------------------------
Change Activity:
2020/5/6:
-------------------------------------------------
"""
__author__ = 'developer'
import configparser
import datetime
from aiohttp import ClientSession
import asyncio
import aiohttp
import csv
import xlwt
import pandas as pd
import numpy as np
import os
import shutil
headers = {
'content-type': 'application/json',
'Accept': 'text/csv'
}
base_url = 'https://api.adjust.com/kpis/v1'
csv_dir = './csv_event'
output_dir = './output'
output_file = '{}/output_event.xlsx'.format(output_dir)
def saveData(result: dict):
if os.path.exists(csv_dir):
shutil.rmtree(csv_dir)
os.mkdir(csv_dir)
final_excel_data = {}
for key in result.keys():
res = result.get(key)
data = res.split('\n')
titles = data[0].split(',')
data = [[x for x in y.split(',')] for y in data[1:]]
f = open('{}/{}.csv'.format(csv_dir, key), "w+", newline='', encoding='utf-8')
writer = csv.writer(f)
writer.writerows([titles])
writer.writerows(data)
f.close()
data = pd.DataFrame(pd.read_csv('{}/{}.csv'.format(csv_dir, key)))
final_excel_data[key] = getExcelData(key, data)
writeExcelByPandas(final_excel_data)
def testSave():
final_excel_data = {}
for key in params.keys():
data = pd.DataFrame(pd.read_csv('{}/{}.csv'.format(csv_dir, key)))
final_excel_data[key] = getExcelData(key, data)
writeExcelByPandas(final_excel_data)
def mergeData(data: list):
merged_data = data[0]
i = 0
for ele in data:
if i == 0:
i += 1
continue
merged_data =
|
pd.merge(merged_data, ele, how="outer", on='tracker_name')
|
pandas.merge
|
import re
import pandas as pd
from datetime import date, datetime
import configparser
# Read local file `config.ini`.
config = configparser.ConfigParser()
config.read('config.ini')
def mmyy_make_iterable_from_to(mmyy_from, mmyy_to=None):
"""
Create a collection of mmyy formatted dates lying between a from and to date value
:param mmyy_from: from value (mmyy)
:param mmyy_to: to value (mmyy)
:return: Collection of dates
"""
if not(mmyy_valid_date(mmyy_from)):
return []
if mmyy_to:
if mmyy_to.strip() == "":
mmyy_to = None
if mmyy_to and not(mmyy_valid_date(mmyy_to)):
return []
if mmyy_to and not(mmyy_from == mmyy_to) and not(mmyy_to_older_then_from(mmyy_from, mmyy_to)):
return []
# check if no older date than the current is passed
today = date.today()
mm = f"{today.month}".zfill(2)
yy = f"{today.year}"[2:]
yymm_today = int(f"{yy}{mm}")
if (mmyy_to and int(f"{mmyy_to[2:]}{mmyy_to[:2]}") > yymm_today) or \
(int(f"{mmyy_from[2:]}{mmyy_from[:2]}") > yymm_today):
return []
dt_from = datetime.strptime(mmyy_from, "%m%y")
if not mmyy_to:
dt_to = datetime.today()
dt_to = dt_to +
|
pd.offsets.MonthBegin(-1)
|
pandas.offsets.MonthBegin
|
"""
Reader for Axiom databases.
"""
import hashlib
import logging
import os
import cf_xarray
import fsspec
import intake
import numpy as np
import pandas as pd
import requests
import xarray as xr
import ocean_data_gateway as odg
from ocean_data_gateway import Reader
logger = logging.getLogger(__name__)
# this can be queried with
# search.AxdsReader.reader
reader = "axds"
class AxdsReader(Reader):
"""
This class searches Axiom databases for types `platforms2`, which
are like gliders, and `layer_group`, which are like grids and models.
Attributes
----------
parallel: boolean
If True, run with simple parallelization using `multiprocessing`.
If False, run serially.
catalog_name: string
Input catalog path if you want to use an existing catalog.
axds_type: string
Which Axiom database type to search for.
* "platform2" (default): gliders, drifters; result in pandas DataFrames
* "layer_group": grids, model output; result in xarray Datasets
url_search_base: string
Base string of search url
url_docs_base: string
Base string of url for a known dataset_id
search_headers: dict
Required for reading in the request
url_axds_type: string
Url for the given `axds_type`.
name: string
f'axds_{axds_type}' so 'axds_platform2' or 'axds_layer_group'
reader: string
Reader name: AxdsReader
"""
def __init__(
self, parallel=True, catalog_name=None, axds_type="platform2", filetype="netcdf"
):
"""
Parameters
----------
parallel: boolean, optional
If True, run with simple parallelization using `multiprocessing`.
If False, run serially.
catalog_name: string, optional
Input catalog path if you want to use an existing catalog.
axds_type: string, optional
Which Axiom database type to search for.
* "platform2" (default): gliders, drifters; result in pandas DataFrames
* "layer_group": grids, model output; result in xarray Datasets
"""
self.parallel = parallel
# search Axiom database, version 2
self.url_search_base = "https://search.axds.co/v2/search?portalId=-1&page=1&pageSize=10000&verbose=true"
self.url_docs_base = "https://search.axds.co/v2/docs?verbose=true"
# this is the json being returned from the request
self.search_headers = {"Accept": "application/json"}
self.approach = None
if catalog_name is None:
name = f"{pd.Timestamp.now().isoformat()}"
hash_name = hashlib.sha256(name.encode()).hexdigest()[:7]
self.catalog_name = odg.catalogs_path.joinpath(f"catalog_{hash_name}.yml")
else:
self.catalog_name = catalog_name
# if catalog_name already exists, read it in to save time
self.catalog
# can be 'platform2' or 'layer_group'
assert axds_type in [
"platform2",
"layer_group",
], 'variable `axds_type` must be "platform2" or "layer_group"'
self.axds_type = axds_type
self.url_axds_type = f"{self.url_search_base}&type={self.axds_type}"
self.name = f"axds_{axds_type}"
self.reader = "AxdsReader"
if self.axds_type == "platform2":
self.data_type = "csv"
elif self.axds_type == "layer_group":
self.data_type = "nc"
# name
self.name = f"axds_{axds_type}"
self.reader = "AxdsReader"
self.filetype = filetype
self.store = dict()
def __getitem__(self, key):
"""Redefinition of dict-like behavior.
This enables user to use syntax `reader[dataset_id]` to read in and
save dataset into the object.
Parameters
----------
key: str
dataset_id for a dataset that is available in the search/reader
object.
Returns
-------
xarray Dataset of the data associated with key
"""
returned_data = self.data_by_dataset(key)
# returned_data = self._return_data(key)
self.__setitem__(key, returned_data)
return returned_data
def url_query(self, query):
"""url modification to add query field.
Parameters
----------
query: string
String to query for. Can be multiple words.
Returns
-------
Modification for url to add query field.
"""
return f"&query={query}"
def url_variable(self, variable):
"""url modification to add variable search.
Parameters
----------
variable: string
String to search for.
Returns
-------
Modification for url to add variable search.
Notes
-----
This variable search is specifically by parameter group and
only works for `axds_type='platform2'`.
For `axds_type='layer_group'`, use `url_query` with the variable name.
"""
return f"&tag=Parameter+Group:{variable}"
def url_region(self):
"""url modification to add spatial search box.
Returns
-------
Modification for url to add lon/lat filtering.
Notes
-----
Uses the `kw` dictionary already stored in the class object
to access the spatial limits of the box.
"""
url_add_box = (
f'&geom={{"type":"Polygon","coordinates":[[[{self.kw["min_lon"]},{self.kw["min_lat"]}],'
+ f'[{self.kw["max_lon"]},{self.kw["min_lat"]}],'
+ f'[{self.kw["max_lon"]},{self.kw["max_lat"]}],'
+ f'[{self.kw["min_lon"]},{self.kw["max_lat"]}],'
+ f'[{self.kw["min_lon"]},{self.kw["min_lat"]}]]]}}'
)
return f"{url_add_box}"
def url_time(self):
"""url modification to add time filtering.
Returns
-------
Modification for url to add time filtering.
Notes
-----
Uses the `kw` dictionary already stored in the class object
to access the time limits of the search.
"""
# convert input datetime to seconds since 1970
startDateTime = (
pd.Timestamp(self.kw["min_time"]).tz_localize("UTC")
- pd.Timestamp("1970-01-01 00:00").tz_localize("UTC")
) // pd.Timedelta("1s")
endDateTime = (
pd.Timestamp(self.kw["max_time"]).tz_localize("UTC")
-
|
pd.Timestamp("1970-01-01 00:00")
|
pandas.Timestamp
|
import os
import copy
import pytest
import numpy as np
import pandas as pd
import pyarrow as pa
from pyarrow import feather as pf
from pyarrow import parquet as pq
from time_series_transform.io.base import io_base
from time_series_transform.io.numpy import (
from_numpy,
to_numpy
)
from time_series_transform.io.pandas import (
from_pandas,
to_pandas
)
from time_series_transform.io.arrow import (
from_arrow_record_batch,
from_arrow_table,
to_arrow_record_batch,
to_arrow_table
)
from time_series_transform.transform_core_api.base import (
Time_Series_Data,
Time_Series_Data_Collection
)
from time_series_transform.io.parquet import (
from_parquet,
to_parquet
)
from time_series_transform.io.feather import (
from_feather,
to_feather
)
@pytest.fixture(scope = 'class')
def dictList_single():
return {
'time': [1, 2],
'data': [1, 2]
}
@pytest.fixture(scope = 'class')
def dictList_collection():
return {
'time': [1,2,1,3],
'data':[1,2,1,2],
'category':[1,1,2,2]
}
@pytest.fixture(scope = 'class')
def expect_single_expandTime():
return {
'data_1':[1],
'data_2':[2]
}
@pytest.fixture(scope = 'class')
def expect_single_seperateLabel():
return [{
'time': [1, 2],
'data': [1, 2]
},
{
'data_label': [1, 2]
}]
@pytest.fixture(scope = 'class')
def expect_collection_seperateLabel():
return [{
'time': [1,2,1,3],
'data':[1,2,1,2],
'category':[1,1,2,2]
},
{
'data_label':[1,2,1,2]
}
]
@pytest.fixture(scope = 'class')
def expect_collection_expandTime():
return {
'pad': {
'data_1':[1,1],
'data_2':[2,np.nan],
'data_3':[np.nan,2],
'category':[1,2]
},
'remove': {
'data_1':[1,1],
'category':[1,2]
}
}
@pytest.fixture(scope = 'class')
def expect_collection_expandCategory():
return {
'pad': {
'time':[1,2,3],
'data_1':[1,2,np.nan],
'data_2':[1,np.nan,2]
},
'remove': {
'time':[1],
'data_1':[1],
'data_2':[1]
}
}
@pytest.fixture(scope = 'class')
def expect_collection_expandFull():
return {
'pad': {
'data_1_1':[1],
'data_2_1':[1],
'data_1_2':[2],
'data_2_2':[np.nan],
'data_1_3':[np.nan],
'data_2_3':[2]
},
'remove': {
'data_1_1':[1],
'data_2_1':[1],
}
}
@pytest.fixture(scope = 'class')
def expect_collection_noExpand():
return {
'ignore':{
'time': [1,2,1,3],
'data':[1,2,1,2],
'category':[1,1,2,2]
},
'pad': {
'time': [1,2,3,1,2,3],
'data':[1,2,np.nan,1,np.nan,2],
'category':[1,1,1,2,2,2]
},
'remove': {
'time': [1,1],
'data':[1,1],
'category':[1,2]
}
}
@pytest.fixture(scope = 'class')
def seq_single():
return {
'time':[1,2,3],
'data':[[1,2,3],[11,12,13],[21,22,23]]
}
@pytest.fixture(scope = 'class')
def seq_collection():
return {
'time':[1,2,1,2],
'data':[[1,2],[1,2],[2,2],[2,2]],
'category':[1,1,2,2]
}
@pytest.fixture(scope = 'class')
def expect_seq_collection():
return {
'data_1_1':[[1,2]],
'data_2_1':[[2,2]],
'data_1_2':[[1,2]],
'data_2_2':[[2,2]]
}
class Test_base_io:
def test_base_io_from_single(self, dictList_single,expect_single_expandTime):
ExpandTimeAns = expect_single_expandTime
data = dictList_single
ts = Time_Series_Data()
ts = ts.set_time_index(data['time'], 'time')
ts = ts.set_data(data['data'], 'data')
io = io_base(ts, 'time', None)
timeSeries = io.from_single(False)
for i in timeSeries:
assert timeSeries[i].tolist() == data[i]
timeSeries = io.from_single(True)
for i in timeSeries:
assert timeSeries[i] == ExpandTimeAns[i]
def test_base_io_to_single(self, dictList_single):
data = dictList_single
ts = Time_Series_Data()
ts = ts.set_time_index(data['time'], 'time')
ts = ts.set_data(data['data'], 'data')
io = io_base(data, 'time', None)
assert io.to_single() == ts
def test_base_io_from_collection_expandTime(self, dictList_collection,expect_collection_expandTime):
noChange = dictList_collection
expand = expect_collection_expandTime
fullExpand = []
data = dictList_collection
ts = Time_Series_Data()
ts = ts.set_time_index(data['time'], 'time')
ts = ts.set_data(data['data'], 'data')
ts = ts.set_data(data['category'],'category')
tsc = Time_Series_Data_Collection(ts,'time','category')
io = io_base(tsc, 'time', 'category')
with pytest.raises(ValueError):
timeSeries = io.from_collection(False,True,'ignore')
timeSeries = io.from_collection(False,True,'pad')
for i in timeSeries:
np.testing.assert_equal(timeSeries[i],expand['pad'][i])
timeSeries = io.from_collection(False,True,'remove')
for i in timeSeries:
np.testing.assert_equal(timeSeries[i],expand['remove'][i])
def test_base_io_from_collection_expandCategory(self, dictList_collection,expect_collection_expandCategory):
noChange = dictList_collection
expand = expect_collection_expandCategory
fullExpand = []
data = dictList_collection
ts = Time_Series_Data()
ts = ts.set_time_index(data['time'], 'time')
ts = ts.set_data(data['data'], 'data')
ts = ts.set_data(data['category'],'category')
tsc = Time_Series_Data_Collection(ts,'time','category')
io = io_base(tsc, 'time', 'category')
with pytest.raises(ValueError):
timeSeries = io.from_collection(True,False,'ignore')
timeSeries = io.from_collection(True,False,'pad')
for i in timeSeries:
np.testing.assert_equal(timeSeries[i],expand['pad'][i])
timeSeries = io.from_collection(True,False,'remove')
for i in timeSeries:
np.testing.assert_equal(timeSeries[i],expand['remove'][i])
def test_base_io_from_collection_expandFull(self, dictList_collection,expect_collection_expandFull):
noChange = dictList_collection
expand = expect_collection_expandFull
fullExpand = []
data = dictList_collection
ts = Time_Series_Data()
ts = ts.set_time_index(data['time'], 'time')
ts = ts.set_data(data['data'], 'data')
ts = ts.set_data(data['category'],'category')
tsc = Time_Series_Data_Collection(ts,'time','category')
io = io_base(tsc, 'time', 'category')
timeSeries = io.from_collection(True,True,'pad')
for i in timeSeries:
np.testing.assert_equal(timeSeries[i],expand['pad'][i])
timeSeries = io.from_collection(True,True,'remove')
for i in timeSeries:
np.testing.assert_equal(timeSeries[i],expand['remove'][i])
def test_base_io_to_collection(self, dictList_collection):
dataList = dictList_collection
io = io_base(dataList, 'time', 'category')
testData = io.to_collection()
tsd = Time_Series_Data(dataList,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
assert testData== tsc
def test_base_io_from_collection_no_expand(self,dictList_collection,expect_collection_noExpand):
noChange = dictList_collection
expand = expect_collection_noExpand
data = dictList_collection
ts = Time_Series_Data()
ts = ts.set_time_index(data['time'], 'time')
ts = ts.set_data(data['data'], 'data')
ts = ts.set_data(data['category'],'category')
tsc = Time_Series_Data_Collection(ts,'time','category')
io = io_base(tsc, 'time', 'category')
timeSeries = io.from_collection(False,False,'ignore')
for i in timeSeries:
np.testing.assert_array_equal(timeSeries[i],expand['ignore'][i])
timeSeries = io.from_collection(False,False,'pad')
for i in timeSeries:
np.testing.assert_equal(timeSeries[i],expand['pad'][i])
timeSeries = io.from_collection(False,False,'remove')
for i in timeSeries:
np.testing.assert_equal(timeSeries[i],expand['remove'][i])
class Test_Pandas_IO:
def test_from_pandas_single(self,dictList_single):
data = dictList_single
df = pd.DataFrame(dictList_single)
tsd = Time_Series_Data(data,'time')
testData = from_pandas(df,'time',None)
assert tsd == testData
def test_from_pandas_collection(self,dictList_collection):
data = dictList_collection
df = pd.DataFrame(dictList_collection)
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
testData = from_pandas(df,'time','category')
assert tsc == testData
def test_to_pandas_single(self,dictList_single,expect_single_expandTime):
data = dictList_single
df = pd.DataFrame(data)
expandTime = pd.DataFrame(expect_single_expandTime)
tsd = Time_Series_Data(data,'time')
testData = to_pandas(
tsd,
expandCategory= None,
expandTime=False,
preprocessType= None
)
pd.testing.assert_frame_equal(testData,df,check_dtype=False)
testData = to_pandas(
tsd,
expandCategory= None,
expandTime=True,
preprocessType= None
)
pd.testing.assert_frame_equal(testData,expandTime,check_dtype=False)
def test_to_pandas_collection_expandTime(self,dictList_collection,expect_collection_expandTime):
data = dictList_collection
expandTime_pad = pd.DataFrame(expect_collection_expandTime['pad'])
expandTime_remove = pd.DataFrame(expect_collection_expandTime['remove'])
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
testData = to_pandas(
tsc,
expandCategory= False,
expandTime=True,
preprocessType= 'pad'
)
pd.testing.assert_frame_equal(testData,expandTime_pad,check_dtype=False)
testData = to_pandas(
tsc,
expandCategory= False,
expandTime=True,
preprocessType= 'remove'
)
pd.testing.assert_frame_equal(testData,expandTime_remove,check_dtype=False)
with pytest.raises(ValueError):
timeSeries = to_pandas(tsc,False,True,'ignore')
def test_to_pandas_collection_expandCategory(self,dictList_collection,expect_collection_expandCategory):
data = dictList_collection
expandTime_pad = pd.DataFrame(expect_collection_expandCategory['pad'])
expandTime_remove = pd.DataFrame(expect_collection_expandCategory['remove'])
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
testData = to_pandas(
tsc,
expandCategory= True,
expandTime=False,
preprocessType= 'pad'
)
pd.testing.assert_frame_equal(testData,expandTime_pad,check_dtype=False)
testData = to_pandas(
tsc,
expandCategory= True,
expandTime=False,
preprocessType= 'remove'
)
pd.testing.assert_frame_equal(testData,expandTime_remove,check_dtype=False)
with pytest.raises(ValueError):
timeSeries = to_pandas(tsc,True,False,'ignore')
def test_to_pandas_collection_expandFull(self,dictList_collection,expect_collection_expandFull):
data = dictList_collection
expandTime_pad = pd.DataFrame(expect_collection_expandFull['pad'])
expandTime_remove = pd.DataFrame(expect_collection_expandFull['remove'])
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
testData = to_pandas(
tsc,
expandCategory= True,
expandTime=True,
preprocessType= 'pad'
)
pd.testing.assert_frame_equal(testData,expandTime_pad,check_dtype=False)
testData = to_pandas(
tsc,
expandCategory= True,
expandTime=True,
preprocessType= 'remove'
)
pd.testing.assert_frame_equal(testData,expandTime_remove,check_dtype=False)
def test_to_pandas_collection_noExpand(self,dictList_collection,expect_collection_noExpand):
data = dictList_collection
expandTime_ignore = pd.DataFrame(expect_collection_noExpand['ignore'])
expandTime_pad = pd.DataFrame(expect_collection_noExpand['pad'])
expandTime_remove = pd.DataFrame(expect_collection_noExpand['remove'])
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
testData = to_pandas(
tsc,
expandCategory= False,
expandTime=False,
preprocessType= 'pad'
)
pd.testing.assert_frame_equal(testData,expandTime_pad,check_dtype=False)
testData = to_pandas(
tsc,
expandCategory= False,
expandTime=False,
preprocessType= 'remove'
)
pd.testing.assert_frame_equal(testData,expandTime_remove,check_dtype=False)
testData = to_pandas(
tsc,
expandCategory= False,
expandTime=False,
preprocessType= 'ignore'
)
pd.testing.assert_frame_equal(testData,expandTime_ignore,check_dtype=False)
def test_to_pandas_seperateLabels_single(self,dictList_single,expect_single_seperateLabel):
data = dictList_single
expectedX, expectedY = expect_single_seperateLabel
expectedX = pd.DataFrame(expectedX)
expectedY = pd.DataFrame(expectedY)
tsd = Time_Series_Data(data,'time')
tsd.set_labels([1,2],'data_label')
x, y = to_pandas(tsd,False,False,'ignore',True)
print(x)
print(y)
pd.testing.assert_frame_equal(x,expectedX,check_dtype=False)
pd.testing.assert_frame_equal(y,expectedY,check_dtype=False)
def test_to_pandas_seperateLabels_collection(self,dictList_collection,expect_collection_seperateLabel):
data = dictList_collection
expectedX, expectedY = expect_collection_seperateLabel
expectedX = pd.DataFrame(expectedX)
expectedY = pd.DataFrame(expectedY)
tsd = Time_Series_Data(data,'time')
tsd = tsd.set_labels([1,2,1,2],'data_label')
tsc = Time_Series_Data_Collection(tsd,'time','category')
x, y = to_pandas(tsc,False,False,'ignore',True)
print(y)
pd.testing.assert_frame_equal(x,expectedX,check_dtype=False)
pd.testing.assert_frame_equal(y,expectedY,check_dtype=False)
def test_to_pandas_single_sequence(self,seq_single):
data = seq_single
df= pd.DataFrame(data)
tsd = Time_Series_Data(data,'time')
test = to_pandas(tsd,False,False,'ignore',False)
pd.testing.assert_frame_equal(test,df,False)
def test_to_pandas_collection_sequence(self,seq_collection,expect_seq_collection):
data = seq_collection
df = pd.DataFrame(data)
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
test = to_pandas(tsc,False,False,'ignore')
pd.testing.assert_frame_equal(df,test,False)
test = to_pandas(tsc,True,True,'ignore')
full = pd.DataFrame(expect_seq_collection)
print(test)
print(full)
test = test.reindex(sorted(df.columns), axis=1)
full = full.reindex(sorted(df.columns), axis=1)
pd.testing.assert_frame_equal(test,full,False)
class Test_Numpy_IO:
def test_from_numpy_single(self,dictList_single):
data = dictList_single
tsd = Time_Series_Data()
tsd.set_time_index(data['time'],0)
tsd.set_data(data['data'],1)
numpydata = pd.DataFrame(dictList_single).values
testData = from_numpy(numpydata,0,None)
assert tsd == testData
def test_from_numpy_collection(self,dictList_collection):
data = dictList_collection
numpyData = pd.DataFrame(data).values
numpyDataDict = pd.DataFrame(pd.DataFrame(data).values).to_dict('list')
testData = from_numpy(numpyData,0,2)
tsd = Time_Series_Data(numpyDataDict,0)
assert testData == Time_Series_Data_Collection(tsd,0,2)
def test_to_numpy_single(self,dictList_single,expect_single_expandTime):
data = dictList_single
numpyData = pd.DataFrame(data).values
expandTime = pd.DataFrame(expect_single_expandTime).values
tsd = Time_Series_Data()
tsd.set_time_index(data['time'],0)
tsd.set_data(data['data'],1)
testData = to_numpy(
tsd,
expandCategory= None,
expandTime=False,
preprocessType= None
)
np.testing.assert_equal(testData,numpyData)
testData = to_numpy(
tsd,
expandCategory= None,
expandTime=True,
preprocessType= None
)
np.testing.assert_equal(testData,expandTime)
def test_to_numpy_collection_expandTime(self,dictList_collection,expect_collection_expandTime):
data = dictList_collection
results = expect_collection_expandTime
numpyData = pd.DataFrame(data).values
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
pad_numpy = to_numpy(
tsc,
expandCategory = False,
expandTime = True,
preprocessType='pad'
)
remove_numpy = to_numpy(
tsc,
expandCategory = False,
expandTime = True,
preprocessType='remove'
)
np.testing.assert_equal(pad_numpy,pd.DataFrame(results['pad']).values)
np.testing.assert_equal(remove_numpy,pd.DataFrame(results['remove']).values)
with pytest.raises(ValueError):
timeSeries = to_numpy(tsc,False,True,'ignore')
def test_to_numpy_collection_expandCategory(self,dictList_collection,expect_collection_expandCategory):
data = dictList_collection
results = expect_collection_expandCategory
numpyData = pd.DataFrame(data).values
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
pad_numpy = to_numpy(
tsc,
expandCategory = True,
expandTime = False,
preprocessType='pad'
)
remove_numpy = to_numpy(
tsc,
expandCategory = True,
expandTime = False,
preprocessType='remove'
)
np.testing.assert_equal(pad_numpy,pd.DataFrame(results['pad']).values)
np.testing.assert_equal(remove_numpy,pd.DataFrame(results['remove']).values)
with pytest.raises(ValueError):
timeSeries = to_numpy(tsc,False,True,'ignore')
def test_to_numpy_collection_expandFull(self,dictList_collection,expect_collection_expandFull):
data = dictList_collection
results = expect_collection_expandFull
numpyData = pd.DataFrame(data).values
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
pad_numpy = to_numpy(
tsc,
expandCategory = True,
expandTime = True,
preprocessType='pad'
)
np.testing.assert_equal(pad_numpy,pd.DataFrame(results['pad']).values)
remove_numpy = to_numpy(
tsc,
expandCategory = True,
expandTime = True,
preprocessType='remove'
)
np.testing.assert_equal(remove_numpy,pd.DataFrame(results['remove']).values)
def test_to_numpy_collection_noExpand(self,dictList_collection,expect_collection_noExpand):
data = dictList_collection
results = expect_collection_noExpand
numpyData = pd.DataFrame(data).values
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
pad_numpy = to_numpy(
tsc,
expandCategory = False,
expandTime = False,
preprocessType='pad'
)
np.testing.assert_equal(pad_numpy,pd.DataFrame(results['pad']).values)
remove_numpy = to_numpy(
tsc,
expandCategory = False,
expandTime = False,
preprocessType='remove'
)
np.testing.assert_equal(remove_numpy,pd.DataFrame(results['remove']).values)
ignore_numpy = to_numpy(
tsc,
expandCategory = False,
expandTime = False,
preprocessType='ignore'
)
np.testing.assert_equal(ignore_numpy,pd.DataFrame(results['ignore']).values)
def test_to_numpy_seperateLabel_single(self,dictList_single,expect_single_seperateLabel):
data = dictList_single
expectedX, expectedY = expect_single_seperateLabel
expectedX = pd.DataFrame(expectedX).values
expectedY = pd.DataFrame(expectedY).values
tsd = Time_Series_Data(data,'time')
tsd.set_labels([1,2],'data_label')
x, y = to_numpy(tsd,False,False,'ignore',True)
print(x)
print(y)
np.testing.assert_equal(x,expectedX)
np.testing.assert_equal(y,expectedY)
def test_to_numpy_seperateLabel_collection(self,dictList_collection,expect_collection_seperateLabel):
data = dictList_collection
expectedX, expectedY = expect_collection_seperateLabel
expectedX = pd.DataFrame(expectedX).values
expectedY = pd.DataFrame(expectedY).values
tsd = Time_Series_Data(data,'time')
tsd = tsd.set_labels([1,2,1,2],'data_label')
tsc = Time_Series_Data_Collection(tsd,'time','category')
x, y = to_numpy(tsc,False,False,'ignore',True)
np.testing.assert_equal(x,expectedX)
np.testing.assert_equal(y,expectedY)
def test_to_numpy_single_sequence(self,seq_single):
data = seq_single
df= pd.DataFrame(data).values
tsd = Time_Series_Data(data,'time')
test = to_numpy(tsd,False,False,'ignore',False)
np.testing.assert_equal(df,test)
def test_to_numpy_collection_sequence(self,seq_collection,expect_seq_collection):
data = seq_collection
df = pd.DataFrame(data).values
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
test = to_numpy(tsc,False,False,'ignore')
for i in range(len(test)):
if isinstance(test[i][1],np.ndarray):
test[i][1] = test[i][1].tolist()
np.testing.assert_equal(df,test)
test = to_numpy(tsc,True,True,'ignore')
full = pd.DataFrame(expect_seq_collection).values
for i in range(len(test[0])):
if isinstance(test[0][i],np.ndarray):
test[0][i] = test[0][i].tolist()
np.testing.assert_equal(full,test)
class Test_Arrow_IO:
def test_from_arrow_table_single(self,dictList_single):
data = dictList_single
df = pd.DataFrame(dictList_single)
table = pa.Table.from_pandas(df)
testData = from_arrow_table(table,'time',None)
tsd = Time_Series_Data(data,'time')
assert tsd == testData
def test_from_arrow_table_collection(self,dictList_collection):
data = dictList_collection
df = pd.DataFrame(dictList_collection)
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
table = pa.Table.from_pandas(df)
testData = from_arrow_table(table,'time','category')
assert tsc == testData
def test_from_arrow_batch_single(self,dictList_single):
data = dictList_single
df = pd.DataFrame(dictList_single)
table = pa.RecordBatch.from_pandas(df,preserve_index = False)
testData = from_arrow_record_batch(table,'time',None)
tsd = Time_Series_Data(data,'time')
assert tsd == testData
def test_from_arrow_batch_collection(self,dictList_collection):
data = dictList_collection
df = pd.DataFrame(dictList_collection)
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
table = pa.RecordBatch.from_pandas(df,preserve_index = False)
testData = from_arrow_record_batch(table,'time','category')
assert tsc == testData
def test_to_arrow_table_single(self,dictList_single,expect_single_expandTime):
data = dictList_single
df = pd.DataFrame(data)
expandTime = pd.DataFrame(expect_single_expandTime)
tsd = Time_Series_Data(data,'time')
testData = to_arrow_table(
tsd,
expandCategory= None,
expandTime=False,
preprocessType= None
).to_pandas()
pd.testing.assert_frame_equal(testData,df,check_dtype=False)
testData = to_arrow_table(
tsd,
expandCategory= None,
expandTime=True,
preprocessType= None
).to_pandas()
pd.testing.assert_frame_equal(testData,expandTime,check_dtype=False)
def test_to_arrow_table_collection_expandTime(self,dictList_collection,expect_collection_expandTime):
data = dictList_collection
expandTime_pad = pd.DataFrame(expect_collection_expandTime['pad'])
expandTime_remove = pd.DataFrame(expect_collection_expandTime['remove'])
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
testData = to_arrow_table(
tsc,
expandCategory= False,
expandTime=True,
preprocessType= 'pad'
).to_pandas()
pd.testing.assert_frame_equal(testData,expandTime_pad,check_dtype=False)
testData = to_arrow_table(
tsc,
expandCategory= False,
expandTime=True,
preprocessType= 'remove'
).to_pandas()
pd.testing.assert_frame_equal(testData,expandTime_remove,check_dtype=False)
with pytest.raises(ValueError):
tsc = Time_Series_Data_Collection(tsd,'time','category')
timeSeries = to_pandas(tsc,False,True,'ignore')
def test_to_arrow_table_collection_expandCategory(self,dictList_collection,expect_collection_expandCategory):
data = dictList_collection
expandTime_pad = pd.DataFrame(expect_collection_expandCategory['pad'])
expandTime_remove = pd.DataFrame(expect_collection_expandCategory['remove'])
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
testData = to_arrow_table(
tsc,
expandCategory= True,
expandTime=False,
preprocessType= 'pad'
).to_pandas()
pd.testing.assert_frame_equal(testData,expandTime_pad,check_dtype=False)
testData = to_arrow_table(
tsc,
expandCategory= True,
expandTime=False,
preprocessType= 'remove'
).to_pandas()
pd.testing.assert_frame_equal(testData,expandTime_remove,check_dtype=False)
with pytest.raises(ValueError):
timeSeries = to_pandas(tsc,True,False,'ignore')
def test_to_arrow_table_collection_expandFull(self,dictList_collection,expect_collection_expandFull):
data = dictList_collection
expandTime_pad = pd.DataFrame(expect_collection_expandFull['pad'])
expandTime_remove = pd.DataFrame(expect_collection_expandFull['remove'])
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
testData = to_arrow_table(
tsc,
expandCategory= True,
expandTime=True,
preprocessType= 'pad'
).to_pandas()
pd.testing.assert_frame_equal(testData,expandTime_pad,check_dtype=False)
testData = to_arrow_table(
tsc,
expandCategory= True,
expandTime=True,
preprocessType= 'remove'
).to_pandas()
pd.testing.assert_frame_equal(testData,expandTime_remove,check_dtype=False)
def test_to_arrow_table_collection_noExpand(self,dictList_collection,expect_collection_noExpand):
data = dictList_collection
expandTime_ignore = pd.DataFrame(expect_collection_noExpand['ignore'])
expandTime_pad = pd.DataFrame(expect_collection_noExpand['pad'])
expandTime_remove = pd.DataFrame(expect_collection_noExpand['remove'])
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
testData = to_arrow_table(
tsc,
expandCategory= False,
expandTime=False,
preprocessType= 'pad'
).to_pandas()
pd.testing.assert_frame_equal(testData,expandTime_pad,check_dtype=False)
testData = to_arrow_table(
tsc,
expandCategory= False,
expandTime=False,
preprocessType= 'remove'
).to_pandas()
pd.testing.assert_frame_equal(testData,expandTime_remove,check_dtype=False)
testData = to_arrow_table(
tsc,
expandCategory= False,
expandTime=False,
preprocessType= 'ignore'
).to_pandas()
pd.testing.assert_frame_equal(testData,expandTime_ignore,check_dtype=False)
def test_to_arrow_table_seperateLabels_single(self,dictList_single,expect_single_seperateLabel):
data = dictList_single
expectedX, expectedY = expect_single_seperateLabel
expectedX = pd.DataFrame(expectedX)
expectedY = pd.DataFrame(expectedY)
tsd = Time_Series_Data(data,'time')
tsd.set_labels([1,2],'data_label')
x, y = to_arrow_table(tsd,False,False,'ignore',True)
x = x.to_pandas()
y = y.to_pandas()
print(x)
print(y)
pd.testing.assert_frame_equal(x,expectedX,check_dtype=False)
pd.testing.assert_frame_equal(y,expectedY,check_dtype=False)
def test_to_arrow_table_seperateLabels_collection(self,dictList_collection,expect_collection_seperateLabel):
data = dictList_collection
expectedX, expectedY = expect_collection_seperateLabel
expectedX = pd.DataFrame(expectedX)
expectedY = pd.DataFrame(expectedY)
tsd = Time_Series_Data(data,'time')
tsd = tsd.set_labels([1,2,1,2],'data_label')
tsc = Time_Series_Data_Collection(tsd,'time','category')
x, y = to_arrow_table(tsc,False,False,'ignore',True)
x = x.to_pandas()
y = y.to_pandas()
print(y)
pd.testing.assert_frame_equal(x,expectedX,check_dtype=False)
pd.testing.assert_frame_equal(y,expectedY,check_dtype=False)
def test_to_arrow_table_single_sequence(self,seq_single):
data = seq_single
df= pd.DataFrame(data)
tsd = Time_Series_Data(data,'time')
test = to_arrow_table(tsd,False,False,'ignore',False).to_pandas()
pd.testing.assert_frame_equal(test,df,False)
def test_to_arrow_table_collection_sequence(self,seq_collection,expect_seq_collection):
data = seq_collection
df = pd.DataFrame(data)
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
test = to_arrow_table(tsc,False,False,'ignore').to_pandas()
pd.testing.assert_frame_equal(df,test,False)
test = to_arrow_table(tsc,True,True,'ignore').to_pandas()
full = pd.DataFrame(expect_seq_collection)
print(test)
print(full)
test = test.reindex(sorted(df.columns), axis=1)
full = full.reindex(sorted(df.columns), axis=1)
pd.testing.assert_frame_equal(test,full,False)
###
def record_batch_to_pandas(self,batchList):
df = None
for i in batchList:
if df is None:
df = i.to_pandas()
continue
df = df.append(i.to_pandas(),ignore_index = True)
return df
def test_to_arrow_batch_single(self,dictList_single,expect_single_expandTime):
data = dictList_single
df = pd.DataFrame(data)
expandTime = pd.DataFrame(expect_single_expandTime)
tsd = Time_Series_Data(data,'time')
testData = to_arrow_record_batch(
tsd,
expandCategory= None,
expandTime=False,
preprocessType= None,
max_chunksize = 1
)
testData = self.record_batch_to_pandas(testData)
pd.testing.assert_frame_equal(testData,df,check_dtype=False)
testData = to_arrow_record_batch(
tsd,
expandCategory= None,
expandTime=True,
preprocessType= None,
max_chunksize = 1
)
testData = self.record_batch_to_pandas(testData)
pd.testing.assert_frame_equal(testData,expandTime,check_dtype=False)
def test_to_arrow_batch_collection_expandTime(self,dictList_collection,expect_collection_expandTime):
data = dictList_collection
expandTime_pad = pd.DataFrame(expect_collection_expandTime['pad'])
expandTime_remove = pd.DataFrame(expect_collection_expandTime['remove'])
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
testData = to_arrow_record_batch(
tsc,
expandCategory= False,
expandTime=True,
preprocessType= 'pad',
max_chunksize=1
)
testData = self.record_batch_to_pandas(testData)
pd.testing.assert_frame_equal(testData,expandTime_pad,check_dtype=False)
testData = to_arrow_record_batch(
tsc,
expandCategory= False,
expandTime=True,
preprocessType= 'remove',
max_chunksize=1
)
testData = self.record_batch_to_pandas(testData)
pd.testing.assert_frame_equal(testData,expandTime_remove,check_dtype=False)
with pytest.raises(ValueError):
timeSeries = to_pandas(tsc,False,True,'ignore')
def test_to_arrow_batch_collection_expandCategory(self,dictList_collection,expect_collection_expandCategory):
data = dictList_collection
expandTime_pad = pd.DataFrame(expect_collection_expandCategory['pad'])
expandTime_remove = pd.DataFrame(expect_collection_expandCategory['remove'])
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
testData = to_arrow_record_batch(
tsc,
expandCategory= True,
expandTime=False,
preprocessType= 'pad',
max_chunksize=1
)
testData = self.record_batch_to_pandas(testData)
pd.testing.assert_frame_equal(testData,expandTime_pad,check_dtype=False)
testData = to_arrow_record_batch(
tsc,
expandCategory= True,
expandTime=False,
preprocessType= 'remove',
max_chunksize=1
)
testData = self.record_batch_to_pandas(testData)
pd.testing.assert_frame_equal(testData,expandTime_remove,check_dtype=False)
with pytest.raises(ValueError):
timeSeries = to_pandas(tsc,True,False,'ignore')
def test_to_arrow_batch_collection_expandFull(self,dictList_collection,expect_collection_expandFull):
data = dictList_collection
expandTime_pad = pd.DataFrame(expect_collection_expandFull['pad'])
expandTime_remove = pd.DataFrame(expect_collection_expandFull['remove'])
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
testData = to_arrow_record_batch(
tsc,
expandCategory= True,
expandTime=True,
preprocessType= 'pad',
max_chunksize=1
)
testData = self.record_batch_to_pandas(testData)
pd.testing.assert_frame_equal(testData,expandTime_pad,check_dtype=False)
testData = to_arrow_record_batch(
tsc,
expandCategory= True,
expandTime=True,
preprocessType= 'remove',
max_chunksize=1
)
testData = self.record_batch_to_pandas(testData)
pd.testing.assert_frame_equal(testData,expandTime_remove,check_dtype=False)
def test_to_arrow_batch_collection_noExpand(self,dictList_collection,expect_collection_noExpand):
data = dictList_collection
expandTime_ignore = pd.DataFrame(expect_collection_noExpand['ignore'])
expandTime_pad = pd.DataFrame(expect_collection_noExpand['pad'])
expandTime_remove = pd.DataFrame(expect_collection_noExpand['remove'])
tsd = Time_Series_Data(data,'time')
tsc = Time_Series_Data_Collection(tsd,'time','category')
testData = to_arrow_record_batch(
tsc,
expandCategory= False,
expandTime=False,
preprocessType= 'pad',
max_chunksize=1
)
testData = self.record_batch_to_pandas(testData)
pd.testing.assert_frame_equal(testData,expandTime_pad,check_dtype=False)
testData = to_arrow_record_batch(
tsc,
expandCategory= False,
expandTime=False,
preprocessType= 'remove',
max_chunksize=1
)
testData = self.record_batch_to_pandas(testData)
pd.testing.assert_frame_equal(testData,expandTime_remove,check_dtype=False)
testData = to_arrow_record_batch(
tsc,
expandCategory= False,
expandTime=False,
preprocessType= 'ignore',
max_chunksize=1
)
testData = self.record_batch_to_pandas(testData)
pd.testing.assert_frame_equal(testData,expandTime_ignore,check_dtype=False)
def test_to_arrow_batch_seperateLabels_single(self,dictList_single,expect_single_seperateLabel):
data = dictList_single
expectedX, expectedY = expect_single_seperateLabel
expectedX = pd.DataFrame(expectedX)
expectedY = pd.DataFrame(expectedY)
tsd = Time_Series_Data(data,'time')
tsd.set_labels([1,2],'data_label')
x, y = to_arrow_record_batch(tsd,1,False,False,'ignore',True)
x = self.record_batch_to_pandas(x)
y = self.record_batch_to_pandas(y)
print(x)
print(y)
pd.testing.assert_frame_equal(x,expectedX,check_dtype=False)
pd.testing.assert_frame_equal(y,expectedY,check_dtype=False)
def test_to_arrow_table_seperateLabels_collection(self,dictList_collection,expect_collection_seperateLabel):
data = dictList_collection
expectedX, expectedY = expect_collection_seperateLabel
expectedX = pd.DataFrame(expectedX)
expectedY =
|
pd.DataFrame(expectedY)
|
pandas.DataFrame
|
"""Contains a suite of functions for copy number variation"""
import copy
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from rpy2.robjects.packages import importr
import rpy2.robjects as robjects
from sklearn.manifold import TSNE
from sklearn.cluster import KMeans
import probe_summary_generator
import mip_functions as mip
import allel
import subprocess
plt.style.use('ggplot')
dnacopy = importr("DNAcopy")
def filter_samples(barcode_counts, settings, sample_threshold,
probe_threshold):
"""Filter a UMI count table based on per sample and per probe thresholds.
First, samples failing average UMI threshold are removed. Then, probes
failing the average UMI count are removed.
"""
filter_level = settings["copyStableLevel"]
filter_values = settings["copyStableValues"]
col = barcode_counts.columns
all_values = col.get_level_values(filter_level)
if filter_values == "none":
filter_values = all_values
indexer = [c in filter_values for c in all_values]
cns_counts = barcode_counts.loc[:, indexer]
cns_medians = cns_counts.median(axis=1)
sample_mask = cns_medians.loc[
cns_medians >= sample_threshold].index.tolist()
probe_medians = barcode_counts.loc[sample_mask].median()
probe_mask = probe_medians.loc[
probe_medians >= probe_threshold].index.tolist()
masked_counts = barcode_counts.loc[sample_mask, probe_mask]
return masked_counts
def sample_normalize(masked_counts, settings):
"""Normalize a UMI count table sample-wise."""
filter_level = settings["copyStableLevel"]
filter_values = settings["copyStableValues"]
col = masked_counts.columns
all_values = col.get_level_values(filter_level)
if filter_values == "none":
filter_values = all_values
indexer = [c in filter_values for c in all_values]
cns_counts = masked_counts.loc[:, indexer]
cns_totals = cns_counts.sum(axis=1)
sample_normalized = masked_counts.div(cns_totals, axis=0)
return sample_normalized
def probe_normalize(sample_normalized, settings):
"""Probe-wise normalize a (sample normalized) count table.
Probe-wise normalize a (sample normalized) count table assuming
the average (median or other specified value) probe represents a certain
copy number. For human samples, for example, assumes the average sample
will have 2 copies of each target.
"""
average_copy_count = int(settings["averageCopyCount"])
norm_percentiles = list(map(float, settings["normalizationPercentiles"]))
copy_counts = sample_normalized.transform(
lambda a: average_copy_count * a/(a.quantile(norm_percentiles).mean()))
return copy_counts
def call_copy_numbers(copy_counts, settings):
"""Call integer copy numbers based on normalized count tables.
Define breakpoints using DNACopy algorithm.
"""
copy_calls = {}
problem_genes = []
try:
diploid_upper = float(settings["upperNormalPloidy"])
diploid_lower = float(settings["lowerNormalPloidy"])
ploidy = int(settings["ploidy"])
except KeyError:
diploid_upper = 2
diploid_lower = 2
ploidy = 2
# Set analysis_level.
analysis_level = settings["copyNumberCallLevel"]
# Set cluster method.
# Options are kmeans and tsne.
cluster_method = settings["cnvClusterMethod"]
n_clusters = int(settings["cnvClusterNumber"])
# segmentation parameters
# number of probes threshold for large genes
large_gene_threshold = int(settings["largeGroupTreshold"])
min_segment_size_large = int(settings["minSegmentSizeLarge"])
min_segment_size_small = int(settings["minSegmentSizeSmall"])
sdundo_large = float(settings["sdUndoLarge"])
sdundo_small = float(settings["sdUndoSmall"])
# Ecludian distance to re-assign a sample to a larger copy state
merge_distance = float(settings["reassignDistance"])
# get analysis units
if analysis_level != "none":
analysis_units = set(copy_counts.columns.get_level_values(
analysis_level))
else:
analysis_units = [None]
# Copy number analysis can and should be performed on data that is
# separated into logical units.
# Most intuitively the analysis unit is the gene group,
# called gene here.
# For example if we have data on two groups such as
# Glycophorins and Alphahemoglobins, copy numbers
# should be called separately on these groups.
# Sometimes it may be useful to analyze on different units,
# when target groups contain multiple gene names, for example.
# The unit will be referred to as gene whether or not it actually
# is a gene.
for g in analysis_units:
# get copy counts for the unit
try:
# drop samples where all values are NA
# drop probes with any NA values
if g is not None:
N = copy_counts.xs(
g, level=analysis_level, axis=1, drop_level=False).dropna(
axis=0, how="all").dropna(axis=1, how="any")
else:
N = copy_counts.dropna(axis=0, how="all").dropna(
axis=1, how="any")
g = "all"
# Cluster samples based on copy counts accross the analysis unit.
if cluster_method == "tsne":
ts = TSNE(init="pca", perplexity=50, random_state=0)
T = ts.fit_transform(N)
# apply kmeans clustering to copy counts for this gene
kmeans = KMeans(n_clusters=n_clusters, random_state=0).fit(T)
else:
kmeans = KMeans(n_clusters=n_clusters, random_state=0).fit(N)
T = None
copy_calls[g] = {"copy_counts": N, "tsne": T, "kmeans": kmeans}
clusters = copy_calls[g]["kmeans_clusters"] = {}
labels = kmeans.labels_
# next step is to find break points for each
# cluster found in kmeans the algorithm removes
# break points that create segments which are
# not significantly different, based on SD.
# Lower number of SDs seem to work better for
# larger genes. So there will be two different
# undo_SD vaules depending on the size of the gene,
# in terms of # of probes.
if N.shape[1] > large_gene_threshold:
undo_SD = sdundo_large
min_segment_size = min_segment_size_large
else:
undo_SD = sdundo_small
min_segment_size = min_segment_size_small
# find break points for each kmeans cluster center
for i in range(n_clusters):
# create an array of the median of copy numbers
cluster_mask = labels == i
a_list = np.median(N.loc[cluster_mask], axis=0)
# Add a negligible value to the array to get rid of
# zeros (they will lead to an error when taking log)
a_vec = np.array(a_list) + 1e-20
# get log2 values of copy numbers
b_vec = robjects.FloatVector(np.log2(a_vec))
maploc = N.columns.get_level_values(level="begin")
res = dnacopy.segment(
dnacopy.smooth_CNA(
dnacopy.CNA(
genomdat=b_vec, maploc=robjects.IntVector(maploc),
chrom=robjects.StrVector(
["x" for j in range(len(a_list))])),
smooth_region=2),
undo_splits="sdundo", undo_SD=undo_SD, min_width=2)
clusters[i] = res
# Clean up segmentation
# All break points found for gene will be merged into
# a list to be used in determining whether a break point
# should be used.
break_points = []
for clu in clusters:
# Get segmentation results for the cluster
# from DNACopy output
res = clusters[clu]
seg_sizes = np.array(res[1][4],
dtype=int)
# Convert log2 copy values to real values
segment_cns = 2 ** np.array(res[1][5], dtype=float)
# create a list of segment break points,
# starting from first probe
bp_starts = [0]
for s in seg_sizes:
bp_starts.append(bp_starts[-1] + s)
# extend the cumulative break point list
# with the bps from this cluster
if bp_starts[-1] > min_segment_size:
for i in range(len(bp_starts) - 1):
if seg_sizes[i] < min_segment_size:
# if this is the last segment,
# merge with the previous
if i == len(bp_starts) - 2:
bp_starts[i] = "remove"
# if this is the first segment,
# merge with the next
else:
if i == 0:
bp_starts[i+1] = "remove"
# if this is a middle segment
else:
# if copy numbers of flanking
# segments are the same,
# remove the segment entirely
if (int(segment_cns[i-1])
== int(segment_cns[i+1])):
bp_starts[i] = "remove"
bp_starts[i+1] = "remove"
# if copy numbers of flanking
# segments are the same,
# merge segment with the larger
# flanking segment
if seg_sizes[i-1] > seg_sizes[i+1]:
bp_starts[i] = "remove"
else:
bp_starts[i+1] = "remove"
# recalculate break points and segment sizes
bp_starts = [b for b in bp_starts if b != "remove"]
break_points.extend(bp_starts)
# remove recurrent break points
break_points = sorted(set(break_points))
seg_sizes = [break_points[i+1] - break_points[i]
for i in range(len(break_points) - 1)]
# remove break points leading to small segments
# this will cause smaller segments to be merged with
# the larger of its flanking segments
if break_points[-1] > min_segment_size:
for i in range(len(break_points) - 1):
if seg_sizes[i] < min_segment_size:
if i == len(break_points) - 2:
break_points[i] = "remove"
else:
if i == 0:
break_points[i+1] = "remove"
else:
if seg_sizes[i-1] > seg_sizes[i+1]:
break_points[i] = "remove"
else:
break_points[i+1] = "remove"
# recalculate break points and segment sizes
break_points = [b for b in break_points if b != "remove"]
seg_sizes = [break_points[i+1] - break_points[i]
for i in range(len(break_points) - 1)]
copy_calls[g]["break_points"] = break_points
copy_calls[g]["segment_sizes"] = seg_sizes
segment_calls = copy_calls[g]["segment_calls"] = {}
for i in range(len(break_points) - 1):
seg_start = break_points[i]
seg_end = break_points[i + 1]
# get copy count data for the segment
seg_counts = N.iloc[:, seg_start:seg_end]
# Filter noisy probes.
# If a probe in the segment has much higher
# std compared to the median std in all probes
# in the segment, remove the noisy probe
seg_stds = seg_counts.std()
med_std = seg_stds.median()
std_mask = seg_stds <= 2 * med_std
seg_filtered = seg_counts.loc[:, std_mask]
seg_rounded = seg_filtered.median(
axis=1).round()
delta_diploid_upper = seg_filtered - diploid_upper
delta_diploid_lower = seg_filtered - diploid_lower
delta_rounded = seg_filtered.transform(
lambda a: a - seg_rounded)
distance_to_diploid_lower = delta_diploid_lower.apply(
np.linalg.norm, axis=1)
distance_to_diploid_upper = delta_diploid_upper.apply(
np.linalg.norm, axis=1)
distance_to_diploid_rounded = delta_rounded.apply(
np.linalg.norm, axis=1)
diploid_mask = ((distance_to_diploid_upper
<= distance_to_diploid_rounded)
| (distance_to_diploid_lower
<= distance_to_diploid_rounded))
diploid = seg_filtered.loc[diploid_mask].transform(
lambda a: a - a + ploidy)
other_ploid = seg_filtered.loc[~diploid_mask].transform(
lambda a: a - a + seg_rounded.loc[~diploid_mask])
seg_ploid = pd.concat([diploid, other_ploid])
segment_calls[i] = seg_ploid
copy_calls[g]["copy_calls"] =
|
pd.concat(segment_calls, axis=1)
|
pandas.concat
|
import pandas as pd
import datetime
from dateutil.relativedelta import relativedelta
import scripts.main.importer.importer as importer
import scripts.main.config as config
import scripts.main.models as models
from scripts.main.base_logger import log
def total_money_data(data: dict) -> pd.DataFrame:
"""Get summary data of all assets sorted by categories
Args:
data (dict): dictionary with all financial data
Returns:
pd.DataFrame: grouped financial standings
"""
log.info('Fetching latest total money data')
checking_account = __latest_account_balance(data, 'checking')
savings_account = __latest_account_balance(data, 'savings')
cash = __latest_account_balance(data, 'cash')
ppk = __latest_account_balance(data, 'retirement')
inv = data['investment'].loc[data['investment']['Active'] == True]
inv = inv['Start Amount'].sum()
stock_buy = data['stock'].loc[data['stock']['Operation'] == 'Buy']
stock_buy = stock_buy['Total Value'].sum()
stock_sell = data['stock'].loc[data['stock']['Operation'] == 'Sell']
stock_sell = stock_sell['Total Value'].sum()
stock = stock_buy - stock_sell
# TODO check how much stock units I have Broker-Title pair buy-sell
total = checking_account + savings_account + cash + ppk + inv + stock
return pd.DataFrame([
{'Type': 'Checking Account', 'Total': checking_account, 'Percentage': checking_account / total},
{'Type': 'Savings Account', 'Total': savings_account, 'Percentage': savings_account / total},
{'Type': 'Cash', 'Total': cash, 'Percentage': cash / total},
{'Type': 'PPK', 'Total': ppk, 'Percentage': ppk / total},
{'Type': 'Investments', 'Total': inv, 'Percentage': inv / total},
{'Type': 'Stocks', 'Total': stock, 'Percentage': stock / total}
])
def __latest_account_balance(data: dict, type: str) -> float:
df = data['account'].loc[data['account']['Type'] == type]
if not df.empty:
return accounts_balance_for_day(df, df['Date'].max())
return 0.00
def update_total_money(accounts: pd.DataFrame, updated_dates: pd.Series) -> pd.DataFrame:
"""Calculate and add rows of totals for each day from pd.Series
Args:
accounts (pd.DataFrame): updated accounts file
updated_dates (pd.Series): Series of updated dates for which calculation needs to be done
Returns:
pd.DataFrame: new, updated total assets standing
"""
log.info('Updating and calculating total money history from %s', str(updated_dates.min()))
total = importer.load_data_from_file(models.FileType.TOTAL)
total = __clean_overlapping_days(total, updated_dates.min())
total_new_lines = __calc_totals(accounts, updated_dates)
total = pd.concat([total, total_new_lines]).reset_index(drop=True)
total.to_csv(config.mankkoo_file_path('total'), index=False)
log.info('Total money data was updated successfully')
return total
def __clean_overlapping_days(total: pd.DataFrame, min_date: datetime.date):
return total.drop(total[total['Date'] >= min_date].index)
def __calc_totals(accounts: pd.DataFrame, updated_dates: pd.Series):
accounts_dates = accounts[accounts['Date'] > updated_dates.min()]['Date']
updated_dates = updated_dates.append(accounts_dates, ignore_index=True)
updated_dates = updated_dates.drop_duplicates().sort_values()
investments = importer.load_data_from_file(models.FileType.INVESTMENT)
stock = importer.load_data_from_file(models.FileType.STOCK)
result_list = []
# TODO replace with better approach, e.g.
# https://stackoverflow.com/questions/16476924/how-to-iterate-over-rows-in-a-dataframe-in-pandas
for date_tuple in updated_dates.iteritems():
date = date_tuple[1]
total = accounts_balance_for_day(accounts, date) + investments_for_day(investments, date) + stock_for_day(stock, date)
row_dict = {'Date': date, 'Total': round(total, 2)}
result_list.append(row_dict)
return pd.DataFrame(result_list)
def accounts_balance_for_day(accounts: pd.DataFrame, date: datetime.date):
account_names = accounts['Account'].unique()
result = 0
for account_name in account_names:
result = result + __get_balance_for_day_or_earlier(accounts, account_name, date)
return result
def __get_balance_for_day_or_earlier(accounts: pd.DataFrame, account_name: str, date: datetime.date):
only_single_account = accounts[accounts['Account'] == account_name]
only_specific_dates_accounts = only_single_account.loc[only_single_account['Date'] <= date]
if only_specific_dates_accounts.empty:
return 0
return only_specific_dates_accounts['Balance'].iloc[-1]
def investments_for_day(investments: pd.DataFrame, date: datetime.date) -> float:
"""Sums all investments in particular day
Args:
investments (pd.DataFrame): DataFrame with all operations for investments
date (datetime.date): a day for which sum of investments need to be calculated
Returns:
float: calculated total sum of all investments
"""
after_start = investments['Start Date'] <= date
before_end = investments['End Date'] >= date
is_na =
|
pd.isna(investments['End Date'])
|
pandas.isna
|
# Licensed to Modin Development Team under one or more contributor license agreements.
# See the NOTICE file distributed with this work for additional information regarding
# copyright ownership. The Modin Development Team licenses this file to you under the
# Apache License, Version 2.0 (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under
# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific language
# governing permissions and limitations under the License.
import numpy as np
import pandas
from pandas.core.common import is_bool_indexer
from pandas.core.indexing import check_bool_indexer
from pandas.core.dtypes.common import (
is_list_like,
is_numeric_dtype,
is_datetime_or_timedelta_dtype,
is_scalar,
)
from pandas.core.base import DataError
import warnings
from modin.backends.base.query_compiler import BaseQueryCompiler
from modin.error_message import ErrorMessage
from modin.utils import try_cast_to_pandas, wrap_udf_function
from modin.data_management.functions import (
FoldFunction,
MapFunction,
MapReduceFunction,
ReductionFunction,
BinaryFunction,
GroupbyReduceFunction,
)
def _get_axis(axis):
if axis == 0:
return lambda self: self._modin_frame.index
else:
return lambda self: self._modin_frame.columns
def _set_axis(axis):
if axis == 0:
def set_axis(self, idx):
self._modin_frame.index = idx
else:
def set_axis(self, cols):
self._modin_frame.columns = cols
return set_axis
def _str_map(func_name):
def str_op_builder(df, *args, **kwargs):
str_s = df.squeeze(axis=1).str
return getattr(pandas.Series.str, func_name)(str_s, *args, **kwargs).to_frame()
return str_op_builder
def _dt_prop_map(property_name):
"""
Create a function that call property of property `dt` of the series.
Parameters
----------
property_name
The property of `dt`, which will be applied.
Returns
-------
A callable function to be applied in the partitions
Notes
-----
This applies non-callable properties of `Series.dt`.
"""
def dt_op_builder(df, *args, **kwargs):
prop_val = getattr(df.squeeze(axis=1).dt, property_name)
if isinstance(prop_val, pandas.Series):
return prop_val.to_frame()
elif isinstance(prop_val, pandas.DataFrame):
return prop_val
else:
return pandas.DataFrame([prop_val])
return dt_op_builder
def _dt_func_map(func_name):
"""
Create a function that call method of property `dt` of the series.
Parameters
----------
func_name
The method of `dt`, which will be applied.
Returns
-------
A callable function to be applied in the partitions
Notes
-----
This applies callable methods of `Series.dt`.
"""
def dt_op_builder(df, *args, **kwargs):
dt_s = df.squeeze(axis=1).dt
return pandas.DataFrame(
getattr(pandas.Series.dt, func_name)(dt_s, *args, **kwargs)
)
return dt_op_builder
def copy_df_for_func(func):
"""
Create a function that copies the dataframe, likely because `func` is inplace.
Parameters
----------
func : callable
The function, usually updates a dataframe inplace.
Returns
-------
callable
A callable function to be applied in the partitions
"""
def caller(df, *args, **kwargs):
df = df.copy()
func(df, *args, **kwargs)
return df
return caller
class PandasQueryCompiler(BaseQueryCompiler):
"""This class implements the logic necessary for operating on partitions
with a Pandas backend. This logic is specific to Pandas."""
def __init__(self, modin_frame):
self._modin_frame = modin_frame
def default_to_pandas(self, pandas_op, *args, **kwargs):
"""Default to pandas behavior.
Parameters
----------
pandas_op : callable
The operation to apply, must be compatible pandas DataFrame call
args
The arguments for the `pandas_op`
kwargs
The keyword arguments for the `pandas_op`
Returns
-------
PandasQueryCompiler
The result of the `pandas_op`, converted back to PandasQueryCompiler
Note
----
This operation takes a distributed object and converts it directly to pandas.
"""
ErrorMessage.default_to_pandas(str(pandas_op))
args = (a.to_pandas() if isinstance(a, type(self)) else a for a in args)
kwargs = {
k: v.to_pandas if isinstance(v, type(self)) else v
for k, v in kwargs.items()
}
result = pandas_op(self.to_pandas(), *args, **kwargs)
if isinstance(result, pandas.Series):
if result.name is None:
result.name = "__reduced__"
result = result.to_frame()
if isinstance(result, pandas.DataFrame):
return self.from_pandas(result, type(self._modin_frame))
else:
return result
def to_pandas(self):
return self._modin_frame.to_pandas()
@classmethod
def from_pandas(cls, df, data_cls):
return cls(data_cls.from_pandas(df))
@classmethod
def from_arrow(cls, at, data_cls):
return cls(data_cls.from_arrow(at))
index = property(_get_axis(0), _set_axis(0))
columns = property(_get_axis(1), _set_axis(1))
@property
def dtypes(self):
return self._modin_frame.dtypes
# END Index, columns, and dtypes objects
# Metadata modification methods
def add_prefix(self, prefix, axis=1):
return self.__constructor__(self._modin_frame.add_prefix(prefix, axis))
def add_suffix(self, suffix, axis=1):
return self.__constructor__(self._modin_frame.add_suffix(suffix, axis))
# END Metadata modification methods
# Copy
# For copy, we don't want a situation where we modify the metadata of the
# copies if we end up modifying something here. We copy all of the metadata
# to prevent that.
def copy(self):
return self.__constructor__(self._modin_frame.copy())
# END Copy
# Append/Concat/Join (Not Merge)
# The append/concat/join operations should ideally never trigger remote
# compute. These operations should only ever be manipulations of the
# metadata of the resulting object. It should just be a simple matter of
# appending the other object's blocks and adding np.nan columns for the new
# columns, if needed. If new columns are added, some compute may be
# required, though it can be delayed.
#
# Currently this computation is not delayed, and it may make a copy of the
# DataFrame in memory. This can be problematic and should be fixed in the
# future. TODO (devin-petersohn): Delay reindexing
def concat(self, axis, other, **kwargs):
"""Concatenates two objects together.
Args:
axis: The axis index object to join (0 for columns, 1 for index).
other: The other_index to concat with.
Returns:
Concatenated objects.
"""
if not isinstance(other, list):
other = [other]
assert all(
isinstance(o, type(self)) for o in other
), "Different Manager objects are being used. This is not allowed"
sort = kwargs.get("sort", None)
if sort is None:
sort = False
join = kwargs.get("join", "outer")
ignore_index = kwargs.get("ignore_index", False)
other_modin_frame = [o._modin_frame for o in other]
new_modin_frame = self._modin_frame._concat(axis, other_modin_frame, join, sort)
result = self.__constructor__(new_modin_frame)
if ignore_index:
if axis == 0:
return result.reset_index(drop=True)
else:
result.columns = pandas.RangeIndex(len(result.columns))
return result
return result
# END Append/Concat/Join
# Data Management Methods
def free(self):
"""In the future, this will hopefully trigger a cleanup of this object."""
# TODO create a way to clean up this object.
return
# END Data Management Methods
# To NumPy
def to_numpy(self, **kwargs):
"""
Converts Modin DataFrame to NumPy array.
Returns
-------
NumPy array of the QueryCompiler.
"""
arr = self._modin_frame.to_numpy(**kwargs)
ErrorMessage.catch_bugs_and_request_email(
len(arr) != len(self.index) or len(arr[0]) != len(self.columns)
)
return arr
# END To NumPy
# Binary operations (e.g. add, sub)
# These operations require two DataFrames and will change the shape of the
# data if the index objects don't match. An outer join + op is performed,
# such that columns/rows that don't have an index on the other DataFrame
# result in NaN values.
add = BinaryFunction.register(pandas.DataFrame.add)
combine = BinaryFunction.register(pandas.DataFrame.combine)
combine_first = BinaryFunction.register(pandas.DataFrame.combine_first)
eq = BinaryFunction.register(pandas.DataFrame.eq)
floordiv = BinaryFunction.register(pandas.DataFrame.floordiv)
ge = BinaryFunction.register(pandas.DataFrame.ge)
gt = BinaryFunction.register(pandas.DataFrame.gt)
le = BinaryFunction.register(pandas.DataFrame.le)
lt = BinaryFunction.register(pandas.DataFrame.lt)
mod = BinaryFunction.register(pandas.DataFrame.mod)
mul = BinaryFunction.register(pandas.DataFrame.mul)
ne = BinaryFunction.register(pandas.DataFrame.ne)
pow = BinaryFunction.register(pandas.DataFrame.pow)
rfloordiv = BinaryFunction.register(pandas.DataFrame.rfloordiv)
rmod = BinaryFunction.register(pandas.DataFrame.rmod)
rpow = BinaryFunction.register(pandas.DataFrame.rpow)
rsub = BinaryFunction.register(pandas.DataFrame.rsub)
rtruediv = BinaryFunction.register(pandas.DataFrame.rtruediv)
sub = BinaryFunction.register(pandas.DataFrame.sub)
truediv = BinaryFunction.register(pandas.DataFrame.truediv)
__and__ = BinaryFunction.register(pandas.DataFrame.__and__)
__or__ = BinaryFunction.register(pandas.DataFrame.__or__)
__rand__ = BinaryFunction.register(pandas.DataFrame.__rand__)
__ror__ = BinaryFunction.register(pandas.DataFrame.__ror__)
__rxor__ = BinaryFunction.register(pandas.DataFrame.__rxor__)
__xor__ = BinaryFunction.register(pandas.DataFrame.__xor__)
df_update = BinaryFunction.register(
copy_df_for_func(pandas.DataFrame.update), join_type="left"
)
series_update = BinaryFunction.register(
copy_df_for_func(
lambda x, y: pandas.Series.update(x.squeeze(axis=1), y.squeeze(axis=1))
),
join_type="left",
)
def where(self, cond, other, **kwargs):
"""Gets values from this manager where cond is true else from other.
Args:
cond: Condition on which to evaluate values.
Returns:
New QueryCompiler with updated data and index.
"""
assert isinstance(
cond, type(self)
), "Must have the same QueryCompiler subclass to perform this operation"
if isinstance(other, type(self)):
# Note: Currently we are doing this with two maps across the entire
# data. This can be done with a single map, but it will take a
# modification in the `BlockPartition` class.
# If this were in one pass it would be ~2x faster.
# TODO (devin-petersohn) rewrite this to take one pass.
def where_builder_first_pass(cond, other, **kwargs):
return cond.where(cond, other, **kwargs)
first_pass = cond._modin_frame._binary_op(
where_builder_first_pass, other._modin_frame, join_type="left"
)
def where_builder_second_pass(df, new_other, **kwargs):
return df.where(new_other.eq(True), new_other, **kwargs)
new_modin_frame = self._modin_frame._binary_op(
where_builder_second_pass, first_pass, join_type="left"
)
# This will be a Series of scalars to be applied based on the condition
# dataframe.
else:
def where_builder_series(df, cond):
return df.where(cond, other, **kwargs)
new_modin_frame = self._modin_frame._binary_op(
where_builder_series, cond._modin_frame, join_type="left"
)
return self.__constructor__(new_modin_frame)
def merge(self, right, **kwargs):
"""
Merge DataFrame or named Series objects with a database-style join.
Parameters
----------
right : PandasQueryCompiler
The query compiler of the right DataFrame to merge with.
Returns
-------
PandasQueryCompiler
A new query compiler that contains result of the merge.
Notes
-----
See pd.merge or pd.DataFrame.merge for more info on kwargs.
"""
how = kwargs.get("how", "inner")
on = kwargs.get("on", None)
left_on = kwargs.get("left_on", None)
right_on = kwargs.get("right_on", None)
left_index = kwargs.get("left_index", False)
right_index = kwargs.get("right_index", False)
sort = kwargs.get("sort", False)
if how in ["left", "inner"] and left_index is False and right_index is False:
right = right.to_pandas()
kwargs["sort"] = False
def map_func(left, right=right, kwargs=kwargs):
return pandas.merge(left, right, **kwargs)
new_self = self.__constructor__(
self._modin_frame._apply_full_axis(1, map_func)
)
is_reset_index = True
if left_on and right_on:
left_on = left_on if is_list_like(left_on) else [left_on]
right_on = right_on if is_list_like(right_on) else [right_on]
is_reset_index = (
False
if any(o in new_self.index.names for o in left_on)
and any(o in right.index.names for o in right_on)
else True
)
if sort:
new_self = (
new_self.sort_rows_by_column_values(left_on.append(right_on))
if is_reset_index
else new_self.sort_index(axis=0, level=left_on.append(right_on))
)
if on:
on = on if is_list_like(on) else [on]
is_reset_index = not any(
o in new_self.index.names and o in right.index.names for o in on
)
if sort:
new_self = (
new_self.sort_rows_by_column_values(on)
if is_reset_index
else new_self.sort_index(axis=0, level=on)
)
return new_self.reset_index(drop=True) if is_reset_index else new_self
else:
return self.default_to_pandas(pandas.DataFrame.merge, right, **kwargs)
def join(self, right, **kwargs):
"""
Join columns of another DataFrame.
Parameters
----------
right : BaseQueryCompiler
The query compiler of the right DataFrame to join with.
Returns
-------
BaseQueryCompiler
A new query compiler that contains result of the join.
Notes
-----
See pd.DataFrame.join for more info on kwargs.
"""
on = kwargs.get("on", None)
how = kwargs.get("how", "left")
sort = kwargs.get("sort", False)
if how in ["left", "inner"]:
right = right.to_pandas()
def map_func(left, right=right, kwargs=kwargs):
return pandas.DataFrame.join(left, right, **kwargs)
new_self = self.__constructor__(
self._modin_frame._apply_full_axis(1, map_func)
)
return new_self.sort_rows_by_column_values(on) if sort else new_self
else:
return self.default_to_pandas(pandas.DataFrame.join, right, **kwargs)
# END Inter-Data operations
# Reindex/reset_index (may shuffle data)
def reindex(self, axis, labels, **kwargs):
"""Fits a new index for this Manager.
Args:
axis: The axis index object to target the reindex on.
labels: New labels to conform 'axis' on to.
Returns:
A new QueryCompiler with updated data and new index.
"""
new_index = self.index if axis else labels
new_columns = labels if axis else self.columns
new_modin_frame = self._modin_frame._apply_full_axis(
axis,
lambda df: df.reindex(labels=labels, axis=axis, **kwargs),
new_index=new_index,
new_columns=new_columns,
)
return self.__constructor__(new_modin_frame)
def reset_index(self, **kwargs):
"""Removes all levels from index and sets a default level_0 index.
Returns:
A new QueryCompiler with updated data and reset index.
"""
drop = kwargs.get("drop", False)
level = kwargs.get("level", None)
# TODO Implement level
if level is not None or self.has_multiindex():
return self.default_to_pandas(pandas.DataFrame.reset_index, **kwargs)
if not drop:
new_column_name = (
self.index.name
if self.index.name is not None
else "index"
if "index" not in self.columns
else "level_0"
)
new_self = self.insert(0, new_column_name, self.index)
else:
new_self = self.copy()
new_self.index = pandas.RangeIndex(len(new_self.index))
return new_self
# END Reindex/reset_index
# Transpose
# For transpose, we aren't going to immediately copy everything. Since the
# actual transpose operation is very fast, we will just do it before any
# operation that gets called on the transposed data. See _prepare_method
# for how the transpose is applied.
#
# Our invariants assume that the blocks are transposed, but not the
# data inside. Sometimes we have to reverse this transposition of blocks
# for simplicity of implementation.
def transpose(self, *args, **kwargs):
"""Transposes this QueryCompiler.
Returns:
Transposed new QueryCompiler.
"""
# Switch the index and columns and transpose the data within the blocks.
return self.__constructor__(self._modin_frame.transpose())
def columnarize(self):
"""
Transposes this QueryCompiler if it has a single row but multiple columns.
This method should be called for QueryCompilers representing a Series object,
i.e. self.is_series_like() should be True.
Returns
-------
PandasQueryCompiler
Transposed new QueryCompiler or self.
"""
if len(self.columns) != 1 or (
len(self.index) == 1 and self.index[0] == "__reduced__"
):
return self.transpose()
return self
def is_series_like(self):
"""Return True if QueryCompiler has a single column or row"""
return len(self.columns) == 1 or len(self.index) == 1
# END Transpose
# MapReduce operations
def _is_monotonic(self, func_type=None):
funcs = {
"increasing": lambda df: df.is_monotonic_increasing,
"decreasing": lambda df: df.is_monotonic_decreasing,
}
monotonic_fn = funcs.get(func_type, funcs["increasing"])
def is_monotonic_map(df):
df = df.squeeze(axis=1)
return [monotonic_fn(df), df.iloc[0], df.iloc[len(df) - 1]]
def is_monotonic_reduce(df):
df = df.squeeze(axis=1)
common_case = df[0].all()
left_edges = df[1]
right_edges = df[2]
edges_list = []
for i in range(len(left_edges)):
edges_list.extend([left_edges.iloc[i], right_edges.iloc[i]])
edge_case = monotonic_fn(pandas.Series(edges_list))
return [common_case and edge_case]
return MapReduceFunction.register(
is_monotonic_map, is_monotonic_reduce, axis=0
)(self)
def is_monotonic_decreasing(self):
return self._is_monotonic(func_type="decreasing")
is_monotonic = _is_monotonic
count = MapReduceFunction.register(pandas.DataFrame.count, pandas.DataFrame.sum)
max = MapReduceFunction.register(pandas.DataFrame.max, pandas.DataFrame.max)
min = MapReduceFunction.register(pandas.DataFrame.min, pandas.DataFrame.min)
sum = MapReduceFunction.register(pandas.DataFrame.sum, pandas.DataFrame.sum)
prod = MapReduceFunction.register(pandas.DataFrame.prod, pandas.DataFrame.prod)
any = MapReduceFunction.register(pandas.DataFrame.any, pandas.DataFrame.any)
all = MapReduceFunction.register(pandas.DataFrame.all, pandas.DataFrame.all)
memory_usage = MapReduceFunction.register(
pandas.DataFrame.memory_usage,
lambda x, *args, **kwargs: pandas.DataFrame.sum(x),
axis=0,
)
mean = MapReduceFunction.register(
lambda df, **kwargs: df.apply(
lambda x: (x.sum(skipna=kwargs.get("skipna", True)), x.count()),
axis=kwargs.get("axis", 0),
result_type="reduce",
).set_axis(df.axes[kwargs.get("axis", 0) ^ 1], axis=0),
lambda df, **kwargs: df.apply(
lambda x: x.apply(lambda d: d[0]).sum(skipna=kwargs.get("skipna", True))
/ x.apply(lambda d: d[1]).sum(skipna=kwargs.get("skipna", True)),
axis=kwargs.get("axis", 0),
).set_axis(df.axes[kwargs.get("axis", 0) ^ 1], axis=0),
)
def value_counts(self, **kwargs):
"""
Return a QueryCompiler of Series containing counts of unique values.
Returns
-------
PandasQueryCompiler
"""
if kwargs.get("bins", None) is not None:
new_modin_frame = self._modin_frame._apply_full_axis(
0, lambda df: df.squeeze(axis=1).value_counts(**kwargs)
)
return self.__constructor__(new_modin_frame)
def map_func(df, *args, **kwargs):
return df.squeeze(axis=1).value_counts(**kwargs)
def reduce_func(df, *args, **kwargs):
normalize = kwargs.get("normalize", False)
sort = kwargs.get("sort", True)
ascending = kwargs.get("ascending", False)
dropna = kwargs.get("dropna", True)
try:
result = df.squeeze(axis=1).groupby(df.index, sort=False).sum()
# This will happen with Arrow buffer read-only errors. We don't want to copy
# all the time, so this will try to fast-path the code first.
except (ValueError):
result = df.copy().squeeze(axis=1).groupby(df.index, sort=False).sum()
if not dropna and np.nan in df.index:
result = result.append(
pandas.Series(
[df.squeeze(axis=1).loc[[np.nan]].sum()], index=[np.nan]
)
)
if normalize:
result = result / df.squeeze(axis=1).sum()
result = result.sort_values(ascending=ascending) if sort else result
# We want to sort both values and indices of the result object.
# This function will sort indices for equal values.
def sort_index_for_equal_values(result, ascending):
"""
Sort indices for equal values of result object.
Parameters
----------
result : pandas.Series or pandas.DataFrame with one column
The object whose indices for equal values is needed to sort.
ascending : boolean
Sort in ascending (if it is True) or descending (if it is False) order.
Returns
-------
pandas.DataFrame
A new DataFrame with sorted indices.
"""
is_range = False
is_end = False
i = 0
new_index = np.empty(len(result), dtype=type(result.index))
while i < len(result):
j = i
if i < len(result) - 1:
while result[result.index[i]] == result[result.index[i + 1]]:
i += 1
if is_range is False:
is_range = True
if i == len(result) - 1:
is_end = True
break
if is_range:
k = j
for val in sorted(
result.index[j : i + 1], reverse=not ascending
):
new_index[k] = val
k += 1
if is_end:
break
is_range = False
else:
new_index[j] = result.index[j]
i += 1
return pandas.DataFrame(result, index=new_index)
return sort_index_for_equal_values(result, ascending)
return MapReduceFunction.register(map_func, reduce_func, preserve_index=False)(
self, **kwargs
)
# END MapReduce operations
# Reduction operations
idxmax = ReductionFunction.register(pandas.DataFrame.idxmax)
idxmin = ReductionFunction.register(pandas.DataFrame.idxmin)
median = ReductionFunction.register(pandas.DataFrame.median)
nunique = ReductionFunction.register(pandas.DataFrame.nunique)
skew = ReductionFunction.register(pandas.DataFrame.skew)
kurt = ReductionFunction.register(pandas.DataFrame.kurt)
sem = ReductionFunction.register(pandas.DataFrame.sem)
std = ReductionFunction.register(pandas.DataFrame.std)
var = ReductionFunction.register(pandas.DataFrame.var)
sum_min_count = ReductionFunction.register(pandas.DataFrame.sum)
prod_min_count = ReductionFunction.register(pandas.DataFrame.prod)
quantile_for_single_value = ReductionFunction.register(pandas.DataFrame.quantile)
mad = ReductionFunction.register(pandas.DataFrame.mad)
to_datetime = ReductionFunction.register(
lambda df, *args, **kwargs: pandas.to_datetime(
df.squeeze(axis=1), *args, **kwargs
),
axis=1,
)
# END Reduction operations
def _resample_func(
self, resample_args, func_name, new_columns=None, df_op=None, *args, **kwargs
):
def map_func(df, resample_args=resample_args):
if df_op is not None:
df = df_op(df)
resampled_val = df.resample(*resample_args)
op = getattr(pandas.core.resample.Resampler, func_name)
if callable(op):
try:
# This will happen with Arrow buffer read-only errors. We don't want to copy
# all the time, so this will try to fast-path the code first.
val = op(resampled_val, *args, **kwargs)
except (ValueError):
resampled_val = df.copy().resample(*resample_args)
val = op(resampled_val, *args, **kwargs)
else:
val = getattr(resampled_val, func_name)
if isinstance(val, pandas.Series):
return val.to_frame()
else:
return val
new_modin_frame = self._modin_frame._apply_full_axis(
axis=0, func=map_func, new_columns=new_columns
)
return self.__constructor__(new_modin_frame)
def resample_get_group(self, resample_args, name, obj):
return self._resample_func(resample_args, "get_group", name=name, obj=obj)
def resample_app_ser(self, resample_args, func, *args, **kwargs):
return self._resample_func(
resample_args,
"apply",
df_op=lambda df: df.squeeze(axis=1),
func=func,
*args,
**kwargs,
)
def resample_app_df(self, resample_args, func, *args, **kwargs):
return self._resample_func(resample_args, "apply", func=func, *args, **kwargs)
def resample_agg_ser(self, resample_args, func, *args, **kwargs):
return self._resample_func(
resample_args,
"aggregate",
df_op=lambda df: df.squeeze(axis=1),
func=func,
*args,
**kwargs,
)
def resample_agg_df(self, resample_args, func, *args, **kwargs):
return self._resample_func(
resample_args, "aggregate", func=func, *args, **kwargs
)
def resample_transform(self, resample_args, arg, *args, **kwargs):
return self._resample_func(resample_args, "transform", arg=arg, *args, **kwargs)
def resample_pipe(self, resample_args, func, *args, **kwargs):
return self._resample_func(resample_args, "pipe", func=func, *args, **kwargs)
def resample_ffill(self, resample_args, limit):
return self._resample_func(resample_args, "ffill", limit=limit)
def resample_backfill(self, resample_args, limit):
return self._resample_func(resample_args, "backfill", limit=limit)
def resample_bfill(self, resample_args, limit):
return self._resample_func(resample_args, "bfill", limit=limit)
def resample_pad(self, resample_args, limit):
return self._resample_func(resample_args, "pad", limit=limit)
def resample_nearest(self, resample_args, limit):
return self._resample_func(resample_args, "nearest", limit=limit)
def resample_fillna(self, resample_args, method, limit):
return self._resample_func(resample_args, "fillna", method=method, limit=limit)
def resample_asfreq(self, resample_args, fill_value):
return self._resample_func(resample_args, "asfreq", fill_value=fill_value)
def resample_interpolate(
self,
resample_args,
method,
axis,
limit,
inplace,
limit_direction,
limit_area,
downcast,
**kwargs,
):
return self._resample_func(
resample_args,
"interpolate",
axis=axis,
limit=limit,
inplace=inplace,
limit_direction=limit_direction,
limit_area=limit_area,
downcast=downcast,
**kwargs,
)
def resample_count(self, resample_args):
return self._resample_func(resample_args, "count")
def resample_nunique(self, resample_args, _method, *args, **kwargs):
return self._resample_func(
resample_args, "nunique", _method=_method, *args, **kwargs
)
def resample_first(self, resample_args, _method, *args, **kwargs):
return self._resample_func(
resample_args, "first", _method=_method, *args, **kwargs
)
def resample_last(self, resample_args, _method, *args, **kwargs):
return self._resample_func(
resample_args, "last", _method=_method, *args, **kwargs
)
def resample_max(self, resample_args, _method, *args, **kwargs):
return self._resample_func(
resample_args, "max", _method=_method, *args, **kwargs
)
def resample_mean(self, resample_args, _method, *args, **kwargs):
return self._resample_func(
resample_args, "median", _method=_method, *args, **kwargs
)
def resample_median(self, resample_args, _method, *args, **kwargs):
return self._resample_func(
resample_args, "median", _method=_method, *args, **kwargs
)
def resample_min(self, resample_args, _method, *args, **kwargs):
return self._resample_func(
resample_args, "min", _method=_method, *args, **kwargs
)
def resample_ohlc_ser(self, resample_args, _method, *args, **kwargs):
return self._resample_func(
resample_args,
"ohlc",
df_op=lambda df: df.squeeze(axis=1),
_method=_method,
*args,
**kwargs,
)
def resample_ohlc_df(self, resample_args, _method, *args, **kwargs):
return self._resample_func(
resample_args, "ohlc", _method=_method, *args, **kwargs
)
def resample_prod(self, resample_args, _method, min_count, *args, **kwargs):
return self._resample_func(
resample_args, "prod", _method=_method, min_count=min_count, *args, **kwargs
)
def resample_size(self, resample_args):
return self._resample_func(resample_args, "size", new_columns=["__reduced__"])
def resample_sem(self, resample_args, _method, *args, **kwargs):
return self._resample_func(
resample_args, "sem", _method=_method, *args, **kwargs
)
def resample_std(self, resample_args, ddof, *args, **kwargs):
return self._resample_func(resample_args, "std", ddof=ddof, *args, **kwargs)
def resample_sum(self, resample_args, _method, min_count, *args, **kwargs):
return self._resample_func(
resample_args, "sum", _method=_method, min_count=min_count, *args, **kwargs
)
def resample_var(self, resample_args, ddof, *args, **kwargs):
return self._resample_func(resample_args, "var", ddof=ddof, *args, **kwargs)
def resample_quantile(self, resample_args, q, **kwargs):
return self._resample_func(resample_args, "quantile", q=q, **kwargs)
window_mean = FoldFunction.register(
lambda df, rolling_args, *args, **kwargs: pandas.DataFrame(
df.rolling(*rolling_args).mean(*args, **kwargs)
)
)
window_sum = FoldFunction.register(
lambda df, rolling_args, *args, **kwargs: pandas.DataFrame(
df.rolling(*rolling_args).sum(*args, **kwargs)
)
)
window_var = FoldFunction.register(
lambda df, rolling_args, ddof, *args, **kwargs: pandas.DataFrame(
df.rolling(*rolling_args).var(ddof=ddof, *args, **kwargs)
)
)
window_std = FoldFunction.register(
lambda df, rolling_args, ddof, *args, **kwargs: pandas.DataFrame(
df.rolling(*rolling_args).std(ddof=ddof, *args, **kwargs)
)
)
rolling_count = FoldFunction.register(
lambda df, rolling_args: pandas.DataFrame(df.rolling(*rolling_args).count())
)
rolling_sum = FoldFunction.register(
lambda df, rolling_args, *args, **kwargs: pandas.DataFrame(
df.rolling(*rolling_args).sum(*args, **kwargs)
)
)
rolling_mean = FoldFunction.register(
lambda df, rolling_args, *args, **kwargs: pandas.DataFrame(
df.rolling(*rolling_args).mean(*args, **kwargs)
)
)
rolling_median = FoldFunction.register(
lambda df, rolling_args, **kwargs: pandas.DataFrame(
df.rolling(*rolling_args).median(**kwargs)
)
)
rolling_var = FoldFunction.register(
lambda df, rolling_args, ddof, *args, **kwargs: pandas.DataFrame(
df.rolling(*rolling_args).var(ddof=ddof, *args, **kwargs)
)
)
rolling_std = FoldFunction.register(
lambda df, rolling_args, ddof, *args, **kwargs: pandas.DataFrame(
df.rolling(*rolling_args).std(ddof=ddof, *args, **kwargs)
)
)
rolling_min = FoldFunction.register(
lambda df, rolling_args, *args, **kwargs: pandas.DataFrame(
df.rolling(*rolling_args).min(*args, **kwargs)
)
)
rolling_max = FoldFunction.register(
lambda df, rolling_args, *args, **kwargs: pandas.DataFrame(
df.rolling(*rolling_args).max(*args, **kwargs)
)
)
rolling_skew = FoldFunction.register(
lambda df, rolling_args, **kwargs: pandas.DataFrame(
df.rolling(*rolling_args).skew(**kwargs)
)
)
rolling_kurt = FoldFunction.register(
lambda df, rolling_args, **kwargs: pandas.DataFrame(
df.rolling(*rolling_args).kurt(**kwargs)
)
)
rolling_apply = FoldFunction.register(
lambda df, rolling_args, func, raw, engine, engine_kwargs, args, kwargs: pandas.DataFrame(
df.rolling(*rolling_args).apply(
func=func,
raw=raw,
engine=engine,
engine_kwargs=engine_kwargs,
args=args,
kwargs=kwargs,
)
)
)
rolling_quantile = FoldFunction.register(
lambda df, rolling_args, quantile, interpolation, **kwargs: pandas.DataFrame(
df.rolling(*rolling_args).quantile(
quantile=quantile, interpolation=interpolation, **kwargs
)
)
)
def rolling_corr(self, rolling_args, other, pairwise, *args, **kwargs):
if len(self.columns) > 1:
return self.default_to_pandas(
lambda df: pandas.DataFrame.rolling(df, *rolling_args).corr(
other=other, pairwise=pairwise, *args, **kwargs
)
)
else:
return FoldFunction.register(
lambda df: pandas.DataFrame(
df.rolling(*rolling_args).corr(
other=other, pairwise=pairwise, *args, **kwargs
)
)
)(self)
def rolling_cov(self, rolling_args, other, pairwise, ddof, **kwargs):
if len(self.columns) > 1:
return self.default_to_pandas(
lambda df:
|
pandas.DataFrame.rolling(df, *rolling_args)
|
pandas.DataFrame.rolling
|
import datetime
import re
from warnings import (
catch_warnings,
simplefilter,
)
import numpy as np
import pytest
from pandas._libs.tslibs import Timestamp
from pandas.compat import is_platform_windows
import pandas as pd
from pandas import (
DataFrame,
Index,
Series,
_testing as tm,
bdate_range,
read_hdf,
)
from pandas.tests.io.pytables.common import (
_maybe_remove,
ensure_clean_path,
ensure_clean_store,
)
from pandas.util import _test_decorators as td
_default_compressor = "blosc"
pytestmark = pytest.mark.single
def test_conv_read_write(setup_path):
with tm.ensure_clean() as path:
def roundtrip(key, obj, **kwargs):
obj.to_hdf(path, key, **kwargs)
return read_hdf(path, key)
o = tm.makeTimeSeries()
tm.assert_series_equal(o, roundtrip("series", o))
o = tm.makeStringSeries()
tm.assert_series_equal(o, roundtrip("string_series", o))
o = tm.makeDataFrame()
tm.assert_frame_equal(o, roundtrip("frame", o))
# table
df = DataFrame({"A": range(5), "B": range(5)})
df.to_hdf(path, "table", append=True)
result = read_hdf(path, "table", where=["index>2"])
tm.assert_frame_equal(df[df.index > 2], result)
def test_long_strings(setup_path):
# GH6166
df = DataFrame(
{"a": tm.rands_array(100, size=10)}, index=tm.rands_array(100, size=10)
)
with ensure_clean_store(setup_path) as store:
store.append("df", df, data_columns=["a"])
result = store.select("df")
tm.assert_frame_equal(df, result)
def test_api(setup_path):
# GH4584
# API issue when to_hdf doesn't accept append AND format args
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
df.iloc[:10].to_hdf(path, "df", append=True, format="table")
df.iloc[10:].to_hdf(path, "df", append=True, format="table")
tm.assert_frame_equal(
|
read_hdf(path, "df")
|
pandas.read_hdf
|
import numpy as np
import gdax
import json
import logging
from os.path import expanduser
import pandas as pd
from backfire.bots import bot_db
logger = logging.getLogger(__name__)
def load_gdax_auth(test_bool):
home = expanduser("~")
if test_bool == True:
gdax_auth = json.load(open(f'{home}/auth/gdax_sb'))
if test_bool == False:
gdax_auth = json.load(open(f'{home}/auth/gdax'))
key = gdax_auth['key']
secret = gdax_auth['secret']
passphrase = gdax_auth['passphrase']
return(key, secret, passphrase)
def initialize_gdax(test_bool):
key, secret, passphrase = load_gdax_auth(test_bool)
if test_bool == True:
logger.info("Initialize GDAX Sandbox API")
bot_db.db.my_db = 'gdax_test'
bot_db.db.set_engine()
ac = gdax.AuthenticatedClient(key, secret, passphrase,
api_url="https://api-public.sandbox.gdax.com")
if test_bool == False:
logger.info("Initialize live GDAX API")
bot_db.db.my_db = 'gdax'
bot_db.db.set_engine()
ac = gdax.AuthenticatedClient(key, secret, passphrase)
return(ac)
def gdax_get_orders(ac, uniq_orders):
order_list = []
for o in uniq_orders:
order = ac.get_order(o)
order_list.append(order)
return(order_list)
def update_orders(ac):
gdax_orders = ac.get_orders()
gdax_orders = [item for sublist in gdax_orders for item in sublist]
if len(gdax_orders) > 0:
orders_df = bot_db.prep_gdax_order_df(gdax_orders)
gdax_order_ids = orders_df['order_id'].tolist()
else:
gdax_order_ids = gdax_orders
sql_order_ids = bot_db.get_cur_orders()
new_order_ids = set(gdax_order_ids) - set(sql_order_ids['order_id'])
stale_order_ids = set(sql_order_ids['order_id']) - set(gdax_order_ids)
# Add new
if len(new_order_ids) > 0:
new_orders_df = orders_df[orders_df['order_id'].isin(new_order_ids)]
bot_db.append_if_new('order_id', new_orders_df, 'gdax_order_cur')
# Remove old
if len(stale_order_ids) > 0:
stale_hist = gdax_get_orders(ac, stale_order_ids)
stale_hist = pd.DataFrame(stale_hist)
stale_hist = bot_db.prep_gdax_order_df(stale_hist)
fills_df = get_gdax_fills(ac)
fills_df = add_bot_ids(fills_df)
bot_db.append_if_new('trade_id', fills_df, 'gdax_fill_hist')
bot_db.gdax_delete_open_orders(stale_order_ids, stale_hist)
def update_gdax_transfers_manual(ac):
bot_id = 'manual'
signal_id = 'manual'
my_accounts = ac.get_accounts()
transfer_list = []
for i in my_accounts:
my_id = i['id']
my_cur = i['currency']
gdax_acc_hist = ac.get_account_history(my_id)
gdax_acc_hist = [item for sublist in gdax_acc_hist for item in sublist]
for d in gdax_acc_hist:
if d['type'] == 'transfer':
d['cur'] = my_cur
d = {**d, **d.pop('details', None)}
transfer_list.append(d)
transfer_df =
|
pd.DataFrame(transfer_list)
|
pandas.DataFrame
|
"""ISIC Dataset."""
from __future__ import annotations
from enum import Enum, auto
from itertools import islice
import os
from pathlib import Path
import shutil
from typing import ClassVar, Iterable, Iterator, List, Optional, TypeVar, Union
import zipfile
from PIL import Image
import numpy as np
import pandas as pd
from ranzen import flatten_dict
from ranzen.decorators import enum_name_str, parsable
import requests
import torch
from tqdm import tqdm
from conduit.data.datasets.utils import ImageTform
from conduit.data.datasets.vision.base import CdtVisionDataset
__all__ = ["IsicAttr", "ISIC"]
@enum_name_str
class IsicAttr(Enum):
histo = auto()
malignant = auto()
patch = auto()
T = TypeVar("T")
class ISIC(CdtVisionDataset):
"""PyTorch Dataset for the ISIC 2018 dataset from
'Skin Lesion Analysis Toward Melanoma Detection 2018: A Challenge Hosted by the International
Skin Imaging Collaboration (ISIC)',"""
LABELS_FILENAME: ClassVar[str] = "labels.csv"
METADATA_FILENAME: ClassVar[str] = "metadata.csv"
_PBAR_COL: ClassVar[str] = "#fac000"
_REST_API_URL: ClassVar[str] = "https://isic-archive.com/api/v1"
@parsable
def __init__(
self,
root: Union[str, Path],
*,
download: bool = True,
max_samples: int = 25_000, # default is the number of samples used for the NSLB paper
context_attr: IsicAttr = IsicAttr.histo,
target_attr: IsicAttr = IsicAttr.malignant,
transform: Optional[ImageTform] = None,
) -> None:
self.root = Path(root)
self.download = download
self._base_dir = self.root / self.__class__.__name__
self._processed_dir = self._base_dir / "processed"
self._raw_dir = self._base_dir / "raw"
if max_samples < 1:
raise ValueError("max_samples must be a positive integer.")
self.max_samples = max_samples
if self.download:
self._download_data()
self._preprocess_data()
elif not self._check_downloaded():
raise FileNotFoundError(
f"Data not found at location {self._processed_dir.resolve()}. "
"Have you downloaded it?"
)
self.metadata = pd.read_csv(self._processed_dir / self.LABELS_FILENAME)
# Divide up the dataframe into its constituent arrays because indexing with pandas is
# considerably slower than indexing with numpy/torch
x = self.metadata["path"].to_numpy()
s = torch.as_tensor(self.metadata[str(context_attr)], dtype=torch.int32)
y = torch.as_tensor(self.metadata[str(target_attr)], dtype=torch.int32)
super().__init__(x=x, y=y, s=s, transform=transform, image_dir=self._processed_dir)
def _check_downloaded(self) -> bool:
return (self._raw_dir / "images").exists() and (
self._raw_dir / self.METADATA_FILENAME
).exists()
def _check_processed(self) -> bool:
return (self._processed_dir / "ISIC-images").exists() and (
self._processed_dir / self.LABELS_FILENAME
).exists()
@staticmethod
def chunk(it: Iterable[T], *, size: int) -> Iterator[List[T]]:
"""Divide any iterable into chunks of the given size."""
it = iter(it)
return iter(lambda: list(islice(it, size)), []) # this is magic from stackoverflow
def _download_isic_metadata(self) -> pd.DataFrame:
"""Downloads the metadata CSV from the ISIC website."""
self._raw_dir.mkdir(parents=True, exist_ok=True)
req = requests.get(
f"{self._REST_API_URL}/image?limit={self.max_samples}"
f"&sort=name&sortdir=1&detail=false"
)
image_ids = req.json()
image_ids = [image_id["_id"] for image_id in image_ids]
template_start = "?limit=300&sort=name&sortdir=1&detail=true&imageIds=%5B%22"
template_sep = "%22%2C%22"
template_end = "%22%5D"
entries = []
with tqdm(
total=(len(image_ids) - 1) // 300 + 1,
desc="Downloading metadata",
colour=self._PBAR_COL,
) as pbar:
for block in self.chunk(image_ids, size=300):
pbar.set_postfix(image_id=block[0])
args = ""
args += template_start
args += template_sep.join(block)
args += template_end
req = requests.get(f"{self._REST_API_URL}/image{args}")
image_details = req.json()
for image_detail in image_details:
entry = flatten_dict(image_detail, sep=".")
entries.append(entry)
pbar.update()
metadata_df = pd.DataFrame(entries)
metadata_df = metadata_df.set_index("_id")
metadata_df.to_csv(self._raw_dir / self.METADATA_FILENAME)
return metadata_df
def _download_isic_images(self) -> None:
"""Given the metadata CSV, downloads the ISIC images."""
metadata_path = self._raw_dir / self.METADATA_FILENAME
if not metadata_path.is_file():
raise FileNotFoundError(
f"{self.METADATA_FILENAME} not downloaded. "
f"Run 'download_isic_data` before this function."
)
metadata_df = pd.read_csv(metadata_path)
metadata_df = metadata_df.set_index("_id")
template_start = "?include=images&imageIds=%5B%22"
template_sep = "%22%2C%22"
template_end = "%22%5D"
raw_image_dir = self._raw_dir / "images"
raw_image_dir.mkdir(exist_ok=True)
image_ids = list(metadata_df.index)
with tqdm(
total=(len(image_ids) - 1) // 50 + 1, desc="Downloading images", colour=self._PBAR_COL
) as pbar:
for i, block in enumerate(self.chunk(image_ids, size=50)):
pbar.set_postfix(image_id=block[0])
args = ""
args += template_start
args += template_sep.join(block)
args += template_end
req = requests.get(f"{self._REST_API_URL}/image/download{args}", stream=True)
req.raise_for_status()
image_path = raw_image_dir / f"{i}.zip"
with open(image_path, "wb") as f:
shutil.copyfileobj(req.raw, f)
del req
pbar.update()
def _preprocess_isic_metadata(self) -> None:
"""Preprocesses the raw ISIC metadata."""
self._processed_dir.mkdir(exist_ok=True)
metadata_path = self._raw_dir / self.METADATA_FILENAME
if not metadata_path.is_file():
raise FileNotFoundError(
f"{self.METADATA_FILENAME} not found while preprocessing ISIC dataset. "
"Run `download_isic_metadata` and `download_isic_images` before "
"calling `preprocess_isic_metadata`."
)
metadata_df =
|
pd.read_csv(metadata_path)
|
pandas.read_csv
|
import numpy as np
from scipy.special import expit as sigmoid
import numpyro.handlers as numpyro
import pandas as pd
import pytest
import torch
from jax import random
import pyro.poutine as poutine
from brmp import define_model, brm, makedesc
from brmp.backend import data_from_numpy
from brmp.design import (Categorical, CategoricalCoding, Integral,
NumericCoding, RealValued, code_lengths, code_terms,
coef_names, dummy_df, make_column_lookup, makedata,
metadata_from_cols, metadata_from_df)
from brmp.family import (LKJ, Bernoulli, Binomial, HalfCauchy, HalfNormal,
Normal, StudentT, Poisson)
from brmp.fit import Samples
from brmp.formula import Formula, OrderedSet, Term, _1, allfactors, parse
from brmp.model import parameters, scalar_parameter_map, scalar_parameter_names
from brmp.model_pre import build_model_pre
from brmp.numpyro_backend import backend as numpyro_backend
from brmp.priors import Prior, build_prior_tree
from brmp.pyro_backend import backend as pyro_backend
from pyro.distributions import Independent
def assert_equal(a, b):
assert type(a) == np.ndarray or type(a) == torch.Tensor
assert type(a) == type(b)
if type(a) == np.ndarray:
assert (a == b).all()
else:
assert torch.equal(a, b)
default_params = dict(
Normal=dict(loc=0., scale=1.),
Cauchy=dict(loc=0., scale=1.),
HalfCauchy=dict(scale=3.),
HalfNormal=dict(scale=1.),
LKJ=dict(eta=1.),
Beta=dict(concentration1=1., concentration0=1.),
StudentT=dict(df=3., loc=0., scale=1.),
)
# Makes list of columns metadata that includes an entry for every
# factor in `formula`. Any column not already in `cols` is assumed to
# be `RealValued`.
def expand_columns(formula, cols):
lookup = make_column_lookup(cols)
return [lookup.get(factor, RealValued(factor))
for factor in allfactors(formula)]
codegen_cases = [
# TODO: This (and similar examples below) can't be expressed with
# the current parser. Is it useful to fix this (`y ~ -1`?), or can
# these be dropped?
# (Formula('y', [], []), [], [], ['sigma']),
('y ~ 1 + x', [], {}, Normal, [],
[('b_0', 'Cauchy', {}),
('sigma', 'HalfCauchy', {})]),
# Integer valued predictor.
('y ~ 1 + x', [Integral('x', min=0, max=10)], {}, Normal, [],
[('b_0', 'Cauchy', {}),
('sigma', 'HalfCauchy', {})]),
('y ~ 1 + x1 + x2', [], {}, Normal, [],
[('b_0', 'Cauchy', {}),
('sigma', 'HalfCauchy', {})]),
('y ~ x1:x2',
[Categorical('x1', list('ab')), Categorical('x2', list('cd'))],
{}, Normal, [],
[('b_0', 'Cauchy', {}),
('sigma', 'HalfCauchy', {})]),
# (Formula('y', [], [Group([], 'z', True)]), [Categorical('z', list('ab'))], [], ['sigma', 'z_1']),
# Groups with fewer than two terms don't sample the (Cholesky
# decomp. of the) correlation matrix.
# (Formula('y', [], [Group([], 'z', True)]), [Categorical('z', list('ab'))], [], ['sigma', 'z_1']),
('y ~ 1 | z', [Categorical('z', list('ab'))], {}, Normal, [],
[('sigma', 'HalfCauchy', {}),
('z_0', 'Normal', {}),
('sd_0_0', 'HalfCauchy', {})]),
# Integers as categorical levels.
('y ~ 1 | z', [Categorical('z', [10, 20])], {}, Normal, [],
[('sigma', 'HalfCauchy', {}),
('z_0', 'Normal', {}),
('sd_0_0', 'HalfCauchy', {})]),
('y ~ x | z', [Categorical('z', list('ab'))], {}, Normal, [],
[('sigma', 'HalfCauchy', {}),
('z_0', 'Normal', {}),
('sd_0_0', 'HalfCauchy', {})]),
('y ~ x | z',
[Categorical('x', list('ab')), Categorical('z', list('ab'))],
{}, Normal, [],
[('sigma', 'HalfCauchy', {}),
('z_0', 'Normal', {}),
('sd_0_0', 'HalfCauchy', {}),
('L_0', 'LKJ', {})]),
('y ~ 1 + x1 + x2 + (1 + x3 | z)', [Categorical('z', list('ab'))], {}, Normal, [],
[('b_0', 'Cauchy', {}),
('sigma', 'HalfCauchy', {}),
('z_0', 'Normal', {}),
('sd_0_0', 'HalfCauchy', {}),
('L_0', 'LKJ', {})]),
('y ~ 1 + x1 + x2 + (1 + x3 || z)', [Categorical('z', list('ab'))], {}, Normal, [],
[('b_0', 'Cauchy', {}),
('sigma', 'HalfCauchy', {}),
('z_0', 'Normal', {}),
('sd_0_0', 'HalfCauchy', {})]),
('y ~ 1 + x1 + x2 + (1 + x3 + x4 | z1) + (1 + x5 | z2)',
[Categorical('z1', list('ab')), Categorical('z2', list('ab'))],
{},
Normal,
[],
[('b_0', 'Cauchy', {}),
('sigma', 'HalfCauchy', {}),
('z_0', 'Normal', {}),
('sd_0_0', 'HalfCauchy', {}),
('L_0', 'LKJ', {}),
('z_1', 'Normal', {}),
('sd_1_0', 'HalfCauchy', {}),
('L_1', 'LKJ', {})]),
('y ~ 1 | a:b',
[Categorical('a', ['a1', 'a2']), Categorical('b', ['b1', 'b2'])],
{},
Normal,
[],
[('sigma', 'HalfCauchy', {}),
('z_0', 'Normal', {}),
('sd_0_0', 'HalfCauchy', {})]),
# Custom priors.
('y ~ 1 + x1 + x2',
[], {},
Normal,
[Prior(('b',), Normal(0., 100.))],
[('b_0', 'Normal', {'loc': 0., 'scale': 100.}),
('sigma', 'HalfCauchy', {})]),
('y ~ 1 + x1 + x2',
[], {},
Normal,
[Prior(('b', 'intercept'), Normal(0., 100.))],
[('b_0', 'Normal', {'loc': 0., 'scale': 100.}),
('b_1', 'Cauchy', {}),
('sigma', 'HalfCauchy', {})]),
('y ~ 1 + x1 + x2',
[], {},
Normal,
[Prior(('b', 'x1'), Normal(0., 100.))],
[('b_0', 'Cauchy', {}),
('b_1', 'Normal', {'loc': 0., 'scale': 100.}),
('b_2', 'Cauchy', {}),
('sigma', 'HalfCauchy', {})]),
('y ~ 1',
[], {},
Normal,
[Prior(('b',), StudentT(3., 0., 1.))],
[('b_0', 'StudentT', {}),
('sigma', 'HalfCauchy', {})]),
# Prior on coef of a factor.
('y ~ 1 + x',
[Categorical('x', list('ab'))],
{},
Normal,
[Prior(('b', 'x[b]'), Normal(0., 100.))],
[('b_0', 'Cauchy', {}),
('b_1', 'Normal', {'loc': 0., 'scale': 100.}),
('sigma', 'HalfCauchy', {})]),
# Prior on coef of an interaction.
('y ~ x1:x2',
[Categorical('x1', list('ab')), Categorical('x2', list('cd'))],
{},
Normal,
[Prior(('b', 'x1[b]:x2[c]'), Normal(0., 100.))],
[('b_0', 'Cauchy', {}),
('b_1', 'Normal', {'loc': 0., 'scale': 100.}),
('b_2', 'Cauchy', {}),
('sigma', 'HalfCauchy', {})]),
# Prior on group level `sd` choice.
('y ~ 1 + x2 + x3 | x1',
[Categorical('x1', list('ab'))],
{},
Normal,
[Prior(('sd', 'x1', 'intercept'), HalfCauchy(4.))],
[('sigma', 'HalfCauchy', {}),
('sd_0_0', 'HalfCauchy', {'scale': 4.}),
('sd_0_1', 'HalfCauchy', {}),
('z_0', 'Normal', {}),
('L_0', 'LKJ', {})]),
('y ~ 1 + x2 + x3 || x1',
[Categorical('x1', list('ab'))],
{},
Normal,
[Prior(('sd', 'x1', 'intercept'), HalfNormal(4.))],
[('sigma', 'HalfCauchy', {}),
('sd_0_0', 'HalfNormal', {'scale': 4.}),
('sd_0_1', 'HalfCauchy', {}),
('z_0', 'Normal', {})]),
('y ~ 1 + x || a:b',
[Categorical('a', ['a1', 'a2']), Categorical('b', ['b1', 'b2'])],
{},
Normal,
[Prior(('sd', 'a:b', 'intercept'), HalfNormal(4.))],
[('sigma', 'HalfCauchy', {}),
('z_0', 'Normal', {}),
('sd_0_0', 'HalfNormal', {'scale': 4.}),
('sd_0_1', 'HalfCauchy', {})]),
# Prior on L.
('y ~ 1 + x2 | x1',
[Categorical('x1', list('ab'))],
{},
Normal,
[Prior(('cor',), LKJ(2.))],
[('sigma', 'HalfCauchy', {}),
('sd_0_0', 'HalfCauchy', {}),
('z_0', 'Normal', {}),
('L_0', 'LKJ', {'eta': 2.})]),
('y ~ 1 + x | a:b',
[Categorical('a', ['a1', 'a2']), Categorical('b', ['b1', 'b2'])],
{},
Normal,
[Prior(('cor', 'a:b'), LKJ(2.))],
[('sigma', 'HalfCauchy', {}),
('z_0', 'Normal', {}),
('sd_0_0', 'HalfCauchy', {}),
('L_0', 'LKJ', {'eta': 2.})]),
# Prior on parameter of response distribution.
('y ~ x',
[],
{},
Normal,
[Prior(('resp', 'sigma'), HalfCauchy(4.))],
[('b_0', 'Cauchy', {}),
('sigma', 'HalfCauchy', {'scale': 4.})]),
# Custom response family.
('y ~ x',
[],
{},
Normal(sigma=0.5),
[],
[('b_0', 'Cauchy', {})]),
('y ~ x',
[Categorical('y', list('AB'))],
{},
Bernoulli,
[],
[('b_0', 'Cauchy', {})]),
('y ~ x',
[Integral('y', min=0, max=1)],
{},
Bernoulli,
[],
[('b_0', 'Cauchy', {})]),
('y ~ x',
[Integral('y', min=0, max=10)],
{},
Binomial(num_trials=10),
[],
[('b_0', 'Cauchy', {})]),
('y ~ 1 + x',
[Integral('y', min=0, max=10), Integral('x', min=0, max=10)],
{},
Poisson,
[],
[('b_0', 'Cauchy', {})]),
# Contrasts
('y ~ a',
[Categorical('a', ['a1', 'a2'])],
{'a': np.array([[-1, -1, -1], [1, 1, 1]])},
Normal,
[Prior(('b', 'a[custom.1]'), Normal(0., 1.))],
[('b_0', 'Cauchy', {}),
('b_1', 'Normal', {}),
('b_2', 'Cauchy', {}),
('sigma', 'HalfCauchy', {})]),
('y ~ a + (a | b)',
[Categorical('a', ['a1', 'a2']), Categorical('b', ['b1', 'b2'])],
{'a': np.array([[-1, -1, -1], [1, 1, 1]])},
Normal, [
Prior(('b', 'a[custom.1]'), Normal(0., 1.)),
Prior(('sd', 'b', 'a[custom.0]'), HalfCauchy(4.))
],
[('b_0', 'Cauchy', {}),
('b_1', 'Normal', {}),
('b_2', 'Cauchy', {}),
('z_0', 'Normal', {}),
('sd_0_0', 'HalfCauchy', {'scale': 4.}),
('sd_0_1', 'HalfCauchy', {}),
('L_0', 'LKJ', {}),
('sigma', 'HalfCauchy', {})]),
]
# Map generic family names to backend specific names.
def pyro_family_name(name):
return dict(LKJ='LKJCorrCholesky').get(name, name)
def numpyro_family_name(name):
return dict(LKJ='LKJCholesky',
Bernoulli='BernoulliProbs',
Binomial='BinomialProbs').get(name, name)
@pytest.mark.parametrize('N', [1, 5])
@pytest.mark.parametrize('formula_str, non_real_cols, contrasts, family, priors, expected', codegen_cases)
def test_pyro_codegen(N, formula_str, non_real_cols, contrasts, family, priors, expected):
# Make dummy data.
formula = parse(formula_str)
cols = expand_columns(formula, non_real_cols)
# Generate the model from the column information rather than from
# the metadata extracted from `df`. Since N is small, the metadata
# extracted from `df` might loose information compared to the full
# metadata derived from `cols` (e.g. levels of a categorical
# column) leading to unexpected results. e.g. Missing levels might
# cause correlations not to be modelled, even thought they ought
# to be given the full metadata.
metadata = metadata_from_cols(cols)
desc = makedesc(formula, metadata, family, priors, code_lengths(contrasts))
# Generate model function and data.
modelfn = pyro_backend.gen(desc).fn
df = dummy_df(cols, N, allow_non_exhaustive=True)
data = data_from_numpy(pyro_backend, makedata(formula, df, metadata, contrasts))
trace = poutine.trace(modelfn).get_trace(**data)
# Check that y is correctly observed.
y_node = trace.nodes['y']
assert y_node['is_observed']
assert type(y_node['fn']).__name__ == family.name
assert_equal(y_node['value'], data['y_obs'])
# Check sample sites.
expected_sites = [site for (site, _, _) in expected]
assert set(trace.stochastic_nodes) - {'obs'} == set(expected_sites)
for (site, family_name, maybe_params) in expected:
fn = unwrapfn(trace.nodes[site]['fn'])
params = maybe_params or default_params[family_name]
assert type(fn).__name__ == pyro_family_name(family_name)
for (name, expected_val) in params.items():
val = fn.__getattribute__(name)
assert_equal(val, torch.tensor(expected_val).expand(val.shape))
def unwrapfn(fn):
return unwrapfn(fn.base_dist) if type(fn) == Independent else fn
@pytest.mark.parametrize('N', [1, 5])
@pytest.mark.parametrize('formula_str, non_real_cols, contrasts, family, priors, expected', codegen_cases)
def test_numpyro_codegen(N, formula_str, non_real_cols, contrasts, family, priors, expected):
# Make dummy data.
formula = parse(formula_str)
cols = expand_columns(formula, non_real_cols)
metadata = metadata_from_cols(cols)
desc = makedesc(formula, metadata, family, priors, code_lengths(contrasts))
# Generate model function and data.
modelfn = numpyro_backend.gen(desc).fn
df = dummy_df(cols, N, allow_non_exhaustive=True)
data = data_from_numpy(numpyro_backend, makedata(formula, df, metadata, contrasts))
rng = random.PRNGKey(0)
trace = numpyro.trace(numpyro.seed(modelfn, rng)).get_trace(**data)
# Check that y is correctly observed.
y_node = trace['y']
assert y_node['is_observed']
assert type(y_node['fn']).__name__ == numpyro_family_name(family.name)
assert_equal(y_node['value'], data['y_obs'])
# Check sample sites.
expected_sites = [site for (site, _, _) in expected]
sample_sites = [name for name, node in trace.items() if not node['is_observed']]
assert set(sample_sites) == set(expected_sites)
for (site, family_name, maybe_params) in expected:
fn = trace[site]['fn']
params = maybe_params or default_params[family_name]
assert type(fn).__name__ == numpyro_family_name(family_name)
for (name, expected_val) in params.items():
if family_name == 'LKJ':
assert name == 'eta'
name = 'concentration'
val = fn.__getattribute__(name)
assert_equal(val._value, np.broadcast_to(expected_val, val.shape))
@pytest.mark.parametrize('formula_str, cols, expected', [
('y ~ 1 + x',
[],
lambda df, coef: coef('b_intercept') + df['x'] * coef('b_x')),
('y ~ a',
[Categorical('a', ['a0', 'a1', 'a2'])],
lambda df, coef: ((df['a'] == 'a0') * coef('b_a[a0]') +
(df['a'] == 'a1') * coef('b_a[a1]') +
(df['a'] == 'a2') * coef('b_a[a2]'))),
('y ~ 1 + a',
[Categorical('a', ['a0', 'a1', 'a2'])],
lambda df, coef: (coef('b_intercept') +
(df['a'] == 'a1') * coef('b_a[a1]') +
(df['a'] == 'a2') * coef('b_a[a2]'))),
('y ~ x1:x2',
[],
lambda df, coef: df['x1'] * df['x2'] * coef('b_x1:x2')),
('y ~ a:x',
[Categorical('a', ['a0', 'a1'])],
lambda df, coef: (((df['a'] == 'a0') * df['x'] * coef('b_a[a0]:x')) +
((df['a'] == 'a1') * df['x'] * coef('b_a[a1]:x')))),
('y ~ 1 + x | a',
[Categorical('a', ['a0', 'a1'])],
lambda df, coef: ((df['a'] == 'a0') * (coef('r_a[a0,intercept]') + df['x'] * coef('r_a[a0,x]')) +
(df['a'] == 'a1') * (coef('r_a[a1,intercept]') + df['x'] * coef('r_a[a1,x]')))),
('y ~ 1 + x | a:b',
[Categorical('a', ['a0', 'a1']), Categorical('b', ['b0', 'b1'])],
lambda df, coef: (((df['a'] == 'a0') & (df['b'] == 'b0')) *
(coef('r_a:b[a0_b0,intercept]') + df['x'] * coef('r_a:b[a0_b0,x]')) +
((df['a'] == 'a1') & (df['b'] == 'b0')) *
(coef('r_a:b[a1_b0,intercept]') + df['x'] * coef('r_a:b[a1_b0,x]')) +
((df['a'] == 'a0') & (df['b'] == 'b1')) *
(coef('r_a:b[a0_b1,intercept]') + df['x'] * coef('r_a:b[a0_b1,x]')) +
((df['a'] == 'a1') & (df['b'] == 'b1')) *
(coef('r_a:b[a1_b1,intercept]') + df['x'] * coef('r_a:b[a1_b1,x]')))),
('y ~ 1 + (x1 | a) + (x2 | b)',
[Categorical('a', ['a0', 'a1']), Categorical('b', ['b0', 'b1'])],
lambda df, coef: (coef('b_intercept') +
(df['a'] == 'a0') * df['x1'] * coef('r_a[a0,x1]') +
(df['a'] == 'a1') * df['x1'] * coef('r_a[a1,x1]') +
(df['b'] == 'b0') * df['x2'] * coef('r_b[b0,x2]') +
(df['b'] == 'b1') * df['x2'] * coef('r_b[b1,x2]'))),
])
@pytest.mark.parametrize('backend', [pyro_backend, numpyro_backend])
def test_mu_correctness(formula_str, cols, backend, expected):
df = dummy_df(expand_columns(parse(formula_str), cols), 10)
fit = brm(formula_str, df).prior(num_samples=1, backend=backend)
# Pick out the one (and only) sample drawn.
actual_mu = fit.fitted(what='linear')[0]
# `expected` is assumed to return a data frame.
expected_mu = expected(df, fit.get_scalar_param).to_numpy(np.float32)
assert np.allclose(actual_mu, expected_mu)
@pytest.mark.parametrize('cols, family, expected', [
([],
Normal,
lambda mu: mu),
([Integral('y', min=0, max=1)],
Bernoulli,
lambda mu: sigmoid(mu)),
([Integral('y', min=0, max=5)],
Binomial(num_trials=5),
lambda mu: sigmoid(mu) * 5),
([Integral('y', min=0, max=5)],
Poisson,
lambda mu: np.exp(mu)),
])
@pytest.mark.parametrize('backend', [pyro_backend, numpyro_backend])
def test_expectation_correctness(cols, family, expected, backend):
formula_str = 'y ~ 1 + x'
df = dummy_df(expand_columns(parse(formula_str), cols), 10)
fit = brm(formula_str, df, family=family).prior(num_samples=1, backend=backend)
actual_expectation = fit.fitted(what='expectation')[0]
# We assume (since it's tested elsewhere) that `mu` is computed
# correctly by `fitted`. So given that, we check that `fitted`
# computes the correct expectation.
expected_expectation = expected(fit.fitted('linear')[0])
assert np.allclose(actual_expectation, expected_expectation)
@pytest.mark.parametrize('N', [0, 5])
@pytest.mark.parametrize('backend', [pyro_backend, numpyro_backend])
@pytest.mark.parametrize('formula_str, non_real_cols, contrasts, family, priors, expected', codegen_cases)
def test_sampling_from_prior_smoke(N, backend, formula_str, non_real_cols, contrasts, family, priors, expected):
formula = parse(formula_str)
cols = expand_columns(formula, non_real_cols)
metadata = metadata_from_cols(cols) # Use full metadata for same reason given in comment in codegen test.
desc = makedesc(formula, metadata, family, priors, code_lengths(contrasts))
model = backend.gen(desc)
df = dummy_df(cols, N, allow_non_exhaustive=True)
data = data_from_numpy(backend, makedata(formula, df, metadata, contrasts))
samples = backend.prior(data, model, num_samples=10, seed=None)
assert type(samples) == Samples
@pytest.mark.parametrize('formula_str, non_real_cols, contrasts, family, priors, expected', codegen_cases)
@pytest.mark.parametrize('fitargs', [
dict(backend=pyro_backend, num_samples=1, algo='prior'),
dict(backend=numpyro_backend, num_samples=1, algo='prior'),
])
def test_parameter_shapes(formula_str, non_real_cols, contrasts, family, priors, expected, fitargs):
# Make dummy data.
N = 5
formula = parse(formula_str)
cols = expand_columns(formula, non_real_cols)
df = dummy_df(cols, N, allow_non_exhaustive=True)
# Define model, and generate a single posterior sample.
metadata = metadata_from_cols(cols)
model = define_model(formula_str, metadata, family, priors, contrasts).gen(fitargs['backend'])
data = model.encode(df)
fit = model.run_algo('prior', data, num_samples=1, seed=None)
num_chains = fitargs.get('num_chains', 1)
# Check parameter sizes.
for parameter in parameters(fit.model_desc):
expected_param_shape = parameter.shape
samples = fit.get_param(parameter.name)
# A single sample is collected by each chain for all cases.
assert samples.shape == (num_chains,) + expected_param_shape
samples_with_chain_dim = fit.get_param(parameter.name, True)
assert samples_with_chain_dim.shape == (num_chains, 1) + expected_param_shape
def test_scalar_param_map_consistency():
formula = parse('y ~ 1 + x1 + (1 + x2 + b | a) + (1 + x1 | a:b)')
non_real_cols = [
Categorical('a', ['a1', 'a2', 'a3']),
Categorical('b', ['b1', 'b2', 'b3']),
]
cols = expand_columns(formula, non_real_cols)
desc = makedesc(formula, metadata_from_cols(cols), Normal, [], {})
params = parameters(desc)
spmap = scalar_parameter_map(desc)
# Check that each entry in the map points to a unique parameter
# position.
param_and_indices_set = set(param_and_indices
for (_, param_and_indices) in spmap)
assert len(param_and_indices_set) == len(spmap)
# Ensure that we have enough entries in the map to cover all of
# the scalar parameters. (The L_i parameters have a funny status.
# We consider them to be parameters, but not scalar parameters.
# This is not planned, rather things just evolved this way. It
# does makes some sense though, since we usually look at R_i
# instead.)
num_scalar_params = sum(np.product(shape)
for name, shape in params
if not name.startswith('L_'))
assert num_scalar_params == len(spmap)
# Check that all indices are valid. (i.e. Within the shape of the
# parameter.)
for scalar_param_name, (param_name, indices) in spmap:
ss = [shape for (name, shape) in params if name == param_name]
assert len(ss) == 1
param_shape = ss[0]
assert len(indices) == len(param_shape)
assert all(i < s for (i, s) in zip(indices, param_shape))
@pytest.mark.parametrize('formula_str, non_real_cols, contrasts, family, priors, expected', codegen_cases)
def test_scalar_parameter_names_smoke(formula_str, non_real_cols, contrasts, family, priors, expected):
formula = parse(formula_str)
cols = expand_columns(formula, non_real_cols)
metadata = metadata_from_cols(cols)
model = define_model(formula_str, metadata, family, priors, contrasts)
names = scalar_parameter_names(model.desc)
assert type(names) == list
@pytest.mark.parametrize('formula_str, non_real_cols, family, priors', [
('y ~ x', [], Bernoulli, []),
('y ~ x', [Integral('y', min=0, max=2)], Bernoulli, []),
('y ~ x', [Categorical('y', list('abc'))], Bernoulli, []),
('y ~ x', [Categorical('y', list('ab'))], Normal, []),
('y ~ x', [Integral('y', min=0, max=1)], Normal, []),
('y ~ x', [], Binomial(num_trials=1), []),
('y ~ x', [Integral('y', min=-1, max=1)], Binomial(num_trials=1), []),
('y ~ x',
[Integral('y', min=0, max=3)],
Binomial(num_trials=2),
[]),
('y ~ x', [Categorical('y', list('abc'))], Binomial(num_trials=1), []),
('y ~ x', [], Poisson, []),
])
def test_family_and_response_type_checks(formula_str, non_real_cols, family, priors):
formula = parse(formula_str)
cols = expand_columns(formula, non_real_cols)
metadata = metadata_from_cols(cols)
with pytest.raises(Exception, match='not compatible'):
build_model_pre(formula, metadata, family, {})
@pytest.mark.parametrize('formula_str, non_real_cols, family, priors, expected_error', [
('y ~ x',
[],
Normal,
[Prior(('resp', 'sigma'), Normal(0., 1.))],
r'(?i)invalid prior'),
('y ~ x1 | x2',
[Categorical('x2', list('ab'))],
Normal,
[Prior(('sd', 'x2'), Normal(0., 1.))],
r'(?i)invalid prior'),
('y ~ 1 + x1 | x2',
[Categorical('x2', list('ab'))],
Normal,
[Prior(('cor', 'x2'), Normal(0., 1.))],
r'(?i)invalid prior'),
('y ~ x',
[],
Normal,
[Prior(('b',), Bernoulli(.5))],
r'(?i)invalid prior'),
# This hasn't passed since I moved the family/response checks in
# to the pre-model. The problem is that the support of the
# Binomial response depends on its parameters which aren't fully
# specified in this case, meaning that the family/reponse check
# can't happen, and the prior test that ought to flag that a prior
# is missing is never reached. It's not clear that a "prior
# missing" error is the most helpful error to raise for this case,
# and it's possible that having the family/response test suggest
# that extra parameters ought to be specified is a better idea.
# It's tricky to say though, since this case is a bit of a one
# off, so figuring out a good general solution is tricky. Since
# it's not clear how best to proceed, so I'll punt for now.
pytest.param(
'y ~ x',
[Integral('y', 0, 1)],
Binomial,
[],
r'(?i)prior missing', marks=pytest.mark.xfail),
])
def test_prior_checks(formula_str, non_real_cols, family, priors, expected_error):
formula = parse(formula_str)
cols = expand_columns(formula, non_real_cols)
metadata = metadata_from_cols(cols)
design_metadata = build_model_pre(formula, metadata, family, {})
with pytest.raises(Exception, match=expected_error):
build_prior_tree(design_metadata, priors)
@pytest.mark.parametrize('formula_str, df, metadata_cols, contrasts, expected', [
# (Formula('y', [], []),
# pd.DataFrame(dict(y=[1, 2, 3])),
# dict(X=torch.tensor([[],
# [],
# []]),
# y_obs=torch.tensor([1., 2., 3.]))),
('y ~ 1',
pd.DataFrame(dict(y=[1., 2., 3.])),
None,
{},
dict(X=np.array([[1.],
[1.],
[1.]]),
y_obs=np.array([1., 2., 3.]))),
('y ~ x',
pd.DataFrame(dict(y=[1., 2., 3.],
x=[4., 5., 6.])),
None,
{},
dict(X=np.array([[4.],
[5.],
[6.]]),
y_obs=np.array([1., 2., 3.]))),
('y ~ 1 + x',
pd.DataFrame(dict(y=[1., 2., 3.],
x=[4., 5., 6.])),
None,
{},
dict(X=np.array([[1., 4.],
[1., 5.],
[1., 6.]]),
y_obs=np.array([1., 2., 3.]))),
('y ~ x + 1',
pd.DataFrame(dict(y=[1., 2., 3.],
x=[4., 5., 6.])),
None,
{},
dict(X=np.array([[1., 4.],
[1., 5.],
[1., 6.]]),
y_obs=np.array([1., 2., 3.]))),
('y ~ x',
pd.DataFrame(dict(y=[1., 2., 3.],
x=pd.Categorical(list('AAB')))),
None,
{},
dict(X=np.array([[1., 0.],
[1., 0.],
[0., 1.]]),
y_obs=np.array([1., 2., 3.]))),
('y ~ 1 + x',
pd.DataFrame(dict(y=[1., 2., 3.],
x=pd.Categorical(list('AAB')))),
None,
{},
dict(X=np.array([[1., 0.],
[1., 0.],
[1., 1.]]),
y_obs=np.array([1., 2., 3.]))),
('y ~ x1 + x2',
pd.DataFrame(dict(y=[1., 2., 3.],
x1=pd.Categorical(list('AAB')),
x2=pd.Categorical(list('ABC')))),
None,
{},
dict(X=np.array([[1., 0., 0., 0.],
[1., 0., 1., 0.],
[0., 1., 0., 1.]]),
y_obs=np.array([1., 2., 3.]))),
('y ~ 1 + x',
pd.DataFrame(dict(y=[1., 2., 3.],
x=pd.Categorical(list('ABC')))),
None,
{},
dict(X=np.array([[1., 0., 0.],
[1., 1., 0.],
[1., 0., 1.]]),
y_obs=np.array([1., 2., 3.]))),
# (Formula('y', [], [Group([], 'x', True)]),
# pd.DataFrame(dict(y=[1, 2, 3],
# x=pd.Categorical(list('ABC')))),
# dict(X=np.array([[],
# [],
# []]),
# y_obs=np.array([1., 2., 3.]),
# J_1=np.array([0, 1, 2]),
# Z_1=np.array([[],
# [],
# []]))),
('y ~ 1 + (1 + x1 | x2)',
pd.DataFrame(dict(y=[1., 2., 3.],
x1=pd.Categorical(list('AAB')),
x2=pd.Categorical(list('ABC')))),
None,
{},
dict(X=np.array([[1.],
[1.],
[1.]]),
y_obs=np.array([1., 2., 3.]),
J_0=np.array([0, 1, 2]),
Z_0=np.array([[1., 0.],
[1., 0.],
[1., 1.]]))),
# The matches brms modulo 0 vs. 1 based indexing.
('y ~ 1 | a:b:c',
pd.DataFrame(dict(y=[1., 2., 3.],
a=pd.Categorical([0, 0, 1]),
b=pd.Categorical([2, 1, 0]),
c=pd.Categorical([0, 1, 2]))),
None,
{},
dict(X=np.array([[], [], []]),
y_obs=np.array([1., 2., 3.]),
J_0=np.array([1, 0, 2]),
Z_0=np.array([[1.], [1.], [1.]]))),
# Interactions
# --------------------------------------------------
('y ~ x1:x2',
pd.DataFrame(dict(y=[1., 2., 3., 4.],
x1=pd.Categorical(list('ABAB')),
x2=pd.Categorical(list('CCDD')))),
None,
{},
# AC BC AD BD
dict(X=np.array([[1., 0., 0., 0.],
[0., 1., 0., 0.],
[0., 0., 1., 0.],
[0., 0., 0., 1.]]),
y_obs=np.array([1., 2., 3., 4.]))),
('y ~ 1 + x1:x2',
pd.DataFrame(dict(y=[1., 2., 3., 4.],
x1=pd.Categorical(list('ABAB')),
x2=pd.Categorical(list('CCDD')))),
None,
{},
# 1 D BC BD
dict(X=np.array([[1., 0., 0., 0.],
[1., 0., 1., 0.],
[1., 1., 0., 0.],
[1., 1., 0., 1.]]),
y_obs=np.array([1., 2., 3., 4.]))),
('y ~ 1 + x1 + x2 + x1:x2',
pd.DataFrame(dict(y=[1., 2., 3., 4.],
x1=pd.Categorical(list('ABAB')),
x2=pd.Categorical(list('CCDD')))),
None,
{},
# 1 B D BD
dict(X=np.array([[1., 0., 0., 0.],
[1., 1., 0., 0.],
[1., 0., 1., 0.],
[1., 1., 1., 1.]]),
y_obs=np.array([1., 2., 3., 4.]))),
# real-real
('y ~ x1:x2',
pd.DataFrame(dict(y=[1., 2., 3., 4.],
x1=np.array([1., 2., 1., 2.]),
x2=np.array([-10., 0., 10., 20.]))),
None,
{},
dict(X=np.array([[-10.],
[0.],
[10.],
[40.]]),
y_obs=np.array([1., 2., 3., 4.]))),
# real-int
('y ~ x1:x2',
pd.DataFrame(dict(y=[1., 2., 3., 4.],
x1=np.array([1., 2., 1., 2.]),
x2=np.array([-10, 0, 10, 20]))),
None,
{},
dict(X=np.array([[-10.],
[0.],
[10.],
[40.]]),
y_obs=np.array([1., 2., 3., 4.]))),
# real-categorical
('y ~ x1:x2',
pd.DataFrame(dict(y=[1., 2., 3., 4.],
x1=np.array([1., 2., 3., 4.]),
x2=pd.Categorical(list('ABAB')))),
None,
{},
dict(X=np.array([[1., 0.],
[0., 2.],
[3., 0.],
[0., 4.]]),
y_obs=np.array([1., 2., 3., 4.]))),
# This example is taken from here:
# https://patsy.readthedocs.io/en/latest/R-comparison.html
('y ~ a:x + a:b',
pd.DataFrame(dict(y=[1., 2., 3., 4.],
a=pd.Categorical(list('ABAB')),
b=pd.Categorical(list('CCDD')),
x=np.array([1., 2., 3., 4.]))),
None,
{},
dict(X=np.array([[1., 0., 0., 0., 1., 0.],
[0., 1., 0., 0., 0., 2.],
[0., 0., 1., 0., 3., 0.],
[0., 0., 0., 1., 0., 4.]]),
y_obs=np.array([1., 2., 3., 4.]))),
# Integer-valued Factors
# --------------------------------------------------
('y ~ x1 + x2',
pd.DataFrame(dict(y=[1, 2, 3],
x1=[4, 5, 6],
x2=[7., 8., 9.])),
None,
{},
dict(X=np.array([[4., 7.],
[5., 8.],
[6., 9.]]),
y_obs=np.array([1., 2., 3.]))),
# Categorical Response
# --------------------------------------------------
('y ~ x',
pd.DataFrame(dict(y=pd.Categorical(list('AAB')),
x=[1., 2., 3.])),
None,
{},
dict(X=np.array([[1.],
[2.],
[3.]]),
y_obs=np.array([0., 0., 1.]))),
# Contrasts
# --------------------------------------------------
('y ~ a',
pd.DataFrame(dict(y=[1., 2., 3.],
a=pd.Categorical(['a1', 'a1', 'a2']))),
None,
{'a': np.array([[-1], [1]])},
dict(X=np.array([[-1.],
[-1.],
[1.]]),
y_obs=np.array([1., 2., 3.]))),
('y ~ a',
pd.DataFrame(dict(y=[1., 2., 3.],
a=pd.Categorical(['a1', 'a1', 'a2']))),
[RealValued('y'), Categorical('a', levels=['a0', 'a1', 'a2'])],
{'a': np.array([[0], [-1], [1]])},
dict(X=np.array([[-1.],
[-1.],
[1.]]),
y_obs=np.array([1., 2., 3.]))),
('y ~ a',
pd.DataFrame(dict(y=[1., 2., 3.],
a=pd.Categorical(['a1', 'a1', 'a2']))),
None,
{'a': np.array([[-1, -2], [0, 1]])},
dict(X=np.array([[-1., -2.],
[-1., -2.],
[0., 1.]]),
y_obs=np.array([1., 2., 3.]))),
('y ~ 1 + a + b + a:b',
pd.DataFrame(dict(y=[1., 2., 3.],
a=
|
pd.Categorical(['a1', 'a1', 'a2'])
|
pandas.Categorical
|
import numpy as np
import pandas as pd
import pytest
from pandas_profiling import ProfileReport
def test_load(get_data_file, test_output_dir):
file_name = get_data_file(
"meteorites.csv",
"https://data.nasa.gov/api/views/gh4g-9sfh/rows.csv?accessType=DOWNLOAD",
)
# For reproducibility
np.random.seed(7331)
df = pd.read_csv(file_name)
# Note: Pandas does not support dates before 1880, so we ignore these for this analysis
df["year"] =
|
pd.to_datetime(df["year"], errors="coerce")
|
pandas.to_datetime
|
from neuralnetwork.FeedForward import FeedForward
from neuralnetwork.Sigmoid import Sigmoid
from neuralnetwork.Backpropagation import Backpropagation
import pandas as pd
import numpy as np
from numpy import argmax
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import OneHotEncoder
from datetime import datetime
mit_test_data =
|
pd.read_csv('mitbih_test.csv', header=None)
|
pandas.read_csv
|
"""Test class Scorer."""
import numpy as np
from operator import add
import pandas as pd
from orphanet_translation import scorer
def test_end_to_end():
"""Test end to end for Scorer."""
columns = ['labelEn', 'altEn', 'goldLabelEn', 'goldAltEn']
values = [['test', 'test1|test2', 'test', 'test2|test1'],
['disease', 'disease', 'disease', 'flu']]
input_df = pd.DataFrame(values, columns=columns)
scorer_tool = scorer.Scorer(['jaccard', 'jaro'])
output_df = scorer_tool.score(input_df)
new_columns = ['scoreJaccardEnLabel', 'scoreJaccardEnBest_label',
'scoreJaccardEnMean_best_label',
'scoreJaccardEnMax_best_label', 'scoreJaroEnLabel',
'scoreJaroEnBest_label', 'scoreJaroEnMean_best_label',
'scoreJaroEnMax_best_label']
new_values = [[1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 0.5, 1, 1, 1, 0.5, 1]]
new_values = list(map(add, values, new_values))
expected_df =
|
pd.DataFrame(new_values, columns=columns+new_columns)
|
pandas.DataFrame
|
import argparse
import pandas as pd
import numpy as np
import sys
p = str(Path(__file__).resolve().parents[2]) # directory two levels up from this file
sys.path.append(p)
from realism.realism_utils import make_orderbook_for_analysis
def create_orderbooks(exchange_path, ob_path):
MID_PRICE_CUTOFF = 10000
processed_orderbook = make_orderbook_for_analysis(exchange_path, ob_path, num_levels=1,
hide_liquidity_collapse=False)
cleaned_orderbook = processed_orderbook[(processed_orderbook['MID_PRICE'] > - MID_PRICE_CUTOFF) &
(processed_orderbook['MID_PRICE'] < MID_PRICE_CUTOFF)]
transacted_orders = cleaned_orderbook.loc[cleaned_orderbook.TYPE == "ORDER_EXECUTED"]
transacted_orders = transacted_orders.reset_index()
transacted_orders = transacted_orders.sort_values(by=['index', 'ORDER_ID']).iloc[1::2]
transacted_orders.set_index('index', inplace=True)
return processed_orderbook, transacted_orders, cleaned_orderbook
def calculate_market_impact(orders_df, ob_df, start_time, end_time, tao):
def create_bins(tao, start_time, end_time, orders_df, is_buy):
bins = pd.interval_range(start=start_time, end=end_time, freq=pd.DateOffset(seconds=tao))
binned = pd.cut(orders_df.loc[orders_df.BUY_SELL_FLAG == is_buy].index, bins=bins)
binned_volume = orders_df.loc[orders_df.BUY_SELL_FLAG == is_buy].groupby(binned).SIZE.agg(np.sum)
return binned_volume
def calculate_mid_move(row):
try:
t_start = row.name.left
t_end = row.name.right
mid_t_start = mid_resampled.loc[mid_resampled.index == t_start].item()
mid_t_end = mid_resampled.loc[mid_resampled.index == t_end].item()
if row.ti < 0:
row.mi = -1 * ((mid_t_end - mid_t_start) / mid_t_start) * 10000 # bps
else:
row.mi = (mid_t_end - mid_t_start) / mid_t_start * 10000 # bps
return row.mi
except:
pass
ob_df = ob_df.reset_index().drop_duplicates(subset='index', keep='last').set_index('index')
mid = ob_df.MID_PRICE
mid_resampled = mid.resample(f'{tao}s').ffill()
binned_buy_volume = create_bins(tao=int(tao), start_time=start_time, end_time=end_time, orders_df=orders_df,
is_buy=True).fillna(0)
binned_sell_volume = create_bins(tao=int(tao), start_time=start_time, end_time=end_time, orders_df=orders_df,
is_buy=False).fillna(0)
midf = pd.DataFrame()
midf['buy_vol'] = binned_buy_volume
midf['sell_vol'] = binned_sell_volume
midf['ti'] = midf['buy_vol'] - midf['sell_vol'] # Trade Imbalance
midf['pov'] = abs(midf['ti']) / (midf['buy_vol'] + midf['sell_vol']) # Participation of Volume in tao
midf['mi'] = None
midf.index = pd.interval_range(start=start_time, end=end_time, freq=pd.DateOffset(seconds=int(tao)))
midf.mi = midf.apply(calculate_mid_move, axis=1)
pov_bins = np.linspace(start=0, stop=1, num=1000, endpoint=False)
pov_binned = pd.cut(x=midf['pov'], bins=pov_bins)
midf['pov_bins'] = pov_binned
midf_gpd = midf.sort_values(by='pov_bins')
midf_gpd.index = midf_gpd.pov_bins
del midf_gpd['pov_bins']
df = pd.DataFrame(index=midf_gpd.index)
df['mi'] = midf_gpd['mi']
df['pov'] = midf_gpd['pov']
df = df.groupby(df.index).mean()
return df
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Market Impact Curve as described in AlmgrenChriss 05 paper')
parser.add_argument('--stock', default=None, required=True, help='stock (ABM)')
parser.add_argument('--date', default=None, required=True, help='date (20200101)')
parser.add_argument('--log', type=str, default=None, required=True, help='log folder')
parser.add_argument('--tao', type=int, required=True, help='Number of seconds in each bin')
args, remaining_args = parser.parse_known_args()
stock = args.stock
date = args.date
start_time =
|
pd.Timestamp(date)
|
pandas.Timestamp
|
# Classification
# SVM
# -*- coding: utf-8 -*-
### 기본 라이브러리 불러오기
import pandas as pd
import seaborn as sns
'''
[Step 1] 데이터 준비/ 기본 설정
'''
# load_dataset 함수를 사용하여 데이터프레임으로 변환
df = sns.load_dataset('titanic')
# IPython 디스플레이 설정 - 출력할 열의 개수 한도 늘리기
pd.set_option('display.max_columns', 15)
'''
[Step 2] 데이터 탐색/ 전처리
'''
# NaN값이 많은 deck 열을 삭제, embarked와 내용이 겹치는 embark_town 열을 삭제
rdf = df.drop(['deck', 'embark_town'], axis=1)
# age 열에 나이 데이터가 없는 모든 행을 삭제 - age 열(891개 중 177개의 NaN 값)
rdf = rdf.dropna(subset=['age'], how='any', axis=0)
# embarked 열의 NaN값을 승선도시 중에서 가장 많이 출현한 값으로 치환하기
most_freq = rdf['embarked'].value_counts(dropna=True).idxmax()
rdf['embarked'].fillna(most_freq, inplace=True)
'''
[Step 3] 분석에 사용할 속성을 선택
'''
# 분석에 활용할 열(속성)을 선택
ndf = rdf[['survived', 'pclass', 'sex', 'age', 'sibsp', 'parch', 'embarked']]
# 원핫인코딩 - 범주형 데이터를 모형이 인식할 수 있도록 숫자형으로 변환
onehot_sex =
|
pd.get_dummies(ndf['sex'])
|
pandas.get_dummies
|
#%%
import os
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
#%% Measurementes
ms_path = 'examples/bs2019/measurements.csv'
ms = pd.read_csv(ms_path, index_col=0)
ms.index = pd.to_datetime(ms.index)
t0 = pd.to_datetime('2018-04-05 00:00:00')
t1 = pd.to_datetime('2018-04-08 00:00:00')
ms = ms.loc[t0:t1]
solrad = ms['solrad'].values
Tout = ms['Tout'].values
occ = ms['occ'].values
t = (ms.index - ms.index[0]).total_seconds() / 3600.
fig, ax = plt.subplots(3, 1, figsize=(5, 3), sharex=True)
fig.set_dpi(120)
ax[0].plot(t, Tout, 'b-')
ax[0].set_ylabel("$T_{out}$ [$^\circ$C]")
ax[1].plot(t, solrad, 'b-')
ax[1].set_ylabel("$q_{sol}$ [W/m$^2$]")
ax[2].plot(t, occ, 'b-')
ax[2].set_ylabel("$n_{occ}$ [-]")
ax[2].set_xticks(np.arange(0, 73, 24))
ax[2].set_xlabel("$t$ [h]")
plt.subplots_adjust(0.13, 0.15, 0.98, 0.98)
fig.savefig('examples/bs2019/figs/inputs_mpc.pdf')
### Case 1 ####################################################################
#%% Compare estimates between FMUs
est_dir = 'examples/bs2019/case1/results/est/'
tols = [
'1e-4',
'1e-6',
'1e-7',
'1e-9',
'1e-11'
]
cols = pd.read_csv(est_dir + 'r1c1_dymola_' + tols[0] +
'/parameters_rel.csv').columns
parameters = pd.DataFrame(index=pd.Index(tols, name='tol'),
columns=cols)
for t in tols:
for p in cols:
parameters.loc[t, p] = pd.read_csv(est_dir + 'r1c1_dymola_'
+ t + '/parameters_rel.csv')[p].iloc[0]
parameters.T.plot(kind='bar')
#%% Parameter estimation: validation
est_dir = 'examples/bs2019/case1/results/est/'
tols = [
'1e-4',
'1e-6',
'1e-7',
'1e-9',
'1e-11'
]
idl = pd.read_csv(est_dir + 'ideal.csv', index_col=0)
idl
vld = pd.DataFrame()
for t in tols:
res = pd.read_csv(est_dir + 'r1c1_dymola_' + t +
'/vld_res.csv', index_col=0)
vld[t] = res['T']
idl = idl.loc[:vld.index[-1]]
idl.index /= 3600.
vld.index /= 3600.
# Plot
fig, ax = plt.subplots(1, 1, figsize=(5, 3))
fig.set_dpi(130)
ax.plot(idl['T'], ls='-', label='Measurement')
ax.plot(vld['1e-11'], ls='-.', label='R1C1')
ax.legend(loc='lower right')
ax.set_xticks(np.arange(0, vld.index[-1] + 1, 24))
ax.set_ylabel('$T$ [$^\circ$C]')
ax.set_xlabel('$t$ [h]')
ax.vlines(5*24., ymin=19.5, ymax=26.9, linestyles='--', lw=0.75, color='k')
ax.set_ylim(19.5, 26.9)
ax.text(80, 26.4, "Training")
ax.text(128, 26.4, "Validation")
plt.subplots_adjust(0.1, 0.18, 0.99, 0.99)
fig.savefig('examples/bs2019/figs/validation_T.pdf')
#%% Result overview
fmu = 'r1c1_dymola_1e-11'
hrz = 4
outdir = 'examples/bs2019/case1/results/mpc/{}/h{}/'.format(fmu, hrz)
# Constraints
constr = pd.read_csv(outdir + 'constr.csv', index_col=0)
constr.index /= 3600.
constr -= 273.15
# Emulation states
xemu = pd.read_csv(outdir + '/xemu.csv')\
.set_index('time')
xemu.index /= 3600.
xemu -= 273.15
# Control states
xctr = pd.read_csv(outdir + 'xctr.csv')\
.set_index('time')
xctr.index /= 3600.
xctr -= 273.15
# Control inputs
u = pd.read_csv(outdir + 'u.csv')\
.set_index('time')
u.index /= 3600.
# Optimized inputs
fig, ax = plt.subplots(2, 1, sharex=True, sharey=False,
figsize=(5, 4))
fig.set_dpi(130)
# ax[0]
ax[0].plot(u['vpos'], 'k-', lw=2)
ax[0].set_ylim(-100, 100)
ax[0].set_ylabel('$q$ [%]')
# ax[1]
# ax[1].plot(xctr['x0'], label='Control')
ax[1].plot(xemu['cair.T'], 'r-', label=fmu)
ax[1].legend(loc='upper right')
ax[1].plot(constr['Tmin'], 'k--', lw=0.5)
ax[1].plot(constr['Tmax'], 'k--', lw=0.5)
ax[1].set_xticks(np.arange(0, u.index.values[-1] + 1, 24))
plt.minorticks_off()
ax[1].set_yticks(np.arange(19, 25, 1))
ax[1].set_xlabel('$t$ [h]')
ax[1].set_ylabel('$T_i$ [$^\circ$C]')
# ax[0] - subinterval solutions
files = os.listdir(outdir)
ufiles = list()
for f in files:
fname = f.split('.')[0]
if fname[0] == 'u' and len(fname) > 1:
ufiles.append(f)
udfs = list()
for i in range(len(ufiles)):
df = pd.read_csv(outdir + 'u{}.csv'.format(i), index_col=0)
df.index /= 3600.
ax[0].plot(df['vpos'], ls='--', lw=1.)
# plt.show()
#%% Compare horizons
fmu = 'r1c1_dymola_1e-11'
horizons = [2, 4, 6, 8, 10]
fig, ax = plt.subplots(2, 1, sharex=True, sharey=False,
figsize=(5, 4))
fig.set_dpi(120)
Qrc = dict()
i = 0
for hrz in horizons:
outdir = 'examples/bs2019/case1/results/mpc/{}/h{}/'.format(fmu, hrz)
# Constraints
constr = pd.read_csv(outdir + 'constr.csv', index_col=0)
constr.index /= 3600.
constr -= 273.15
# Emulation states
xemu = pd.read_csv(outdir + '/xemu.csv')\
.set_index('time')
xemu.index /= 3600.
xemu -= 273.15
# Control states
xctr = pd.read_csv(outdir + 'xctr.csv')\
.set_index('time')
xctr.index /= 3600.
xctr -= 273.15
# Control inputs
u = pd.read_csv(outdir + 'u.csv')\
.set_index('time')
u.index /= 3600.
u['vpos'] *= 20 # [%] -> [W]
Qrc[hrz] = u['vpos'].abs().sum() / 1000.
# Actual horizon string, e.g. "6h"
ahrz = "{}h".format(hrz)
# Color map
lspace = np.linspace(0, 1, len(horizons))
colors = [plt.cm.winter(x) for x in lspace]
# ax[0]
ax[0].plot(u['vpos'], c=colors[i], label=ahrz)
# ax[1]
ax[1].plot(xemu['cair.T'], c=colors[i], label=ahrz)
i += 1
ax[1].legend(loc='center', bbox_to_anchor=(0.5,-0.5), ncol=5)
ax[1].plot(constr['Tmin'], 'k--', lw=0.5)
ax[1].plot(constr['Tmax'], 'k--', lw=0.5)
ax[0].set_ylim(-2200, 2200)
ax[1].set_xticks(np.arange(0, u.index.values[-1] + 1, 24))
plt.minorticks_off()
ax[1].set_yticks(np.arange(19, 25, 1))
ax[0].set_ylabel('$q$ [W]')
ax[1].set_xlabel('$t$ [h]')
ax[1].set_ylabel('$T$ [$^\circ$C]')
ax[0].set_title('(a)')
ax[1].set_title('(b)')
plt.subplots_adjust(left=0.16, right=0.99, top=0.93, bottom=0.24)
fig.tight_layout()
fig.savefig('examples/bs2019/figs/case1_horizon_tol_1e-11.pdf')
#%% Computational time
# FMU 1e-11
wd1 = 'examples/bs2019/case1/results/mpc/r1c1_dymola_1e-11/'
# FMU 1e-9
wd2 = 'examples/bs2019/case1/results/mpc/r1c1_dymola_1e-9/'
# SVM
wd3 = 'examples/bs2019/case2/results/mpc-lin/'
hdirs1 = [x[0].split('/')[-1] for x in os.walk(wd1)][1:]
hdirs2 = [x[0].split('/')[-1] for x in os.walk(wd2)][1:]
hdirs3 = [x[0].split('/')[-1] for x in os.walk(wd3)][1:]
hix = [int(x[1:]) for x in hdirs1]
hix = sorted(hix)
ct1 = list()
ct2 = list()
ct3 = list()
# Number of optimization variables
nv = [x * 2 for x in hix]
# Optimization horizon [h]
oh = [x for x in hix]
for h in hix:
with open(wd1 + "h" + str(h) + '/cputime.txt') as f:
s = f.read().split(' ')
x = int(s[-2])
ct1.append(x / 60.)
with open(wd2 + "h" + str(h) + '/cputime.txt') as f:
s = f.read().split(' ')
x = int(s[-2])
ct2.append(x / 60.)
with open(wd3 + "h" + str(h) + '/cputime.txt') as f:
s = f.read().split(' ')
x = int(s[-2])
ct3.append(x / 60.)
fig, ax = plt.subplots(1, 1, figsize=(5,3))
fig.set_dpi(120)
plt.plot(oh, ct1, marker='s', c='k', ls=':', lw=1., label='R1C1 FMU (tol=1e-11)')
plt.plot(oh, ct2, marker='o', c='b', ls=':', lw=1., label='R1C1 FMU (tol=1e-9)')
plt.plot(oh, ct3, marker='v', c='r', ls=':', lw=1., label='SVR')
ax.set_xlabel('Optimization horizon [h]')
ax.set_ylabel('Total CPU time [min]')
ax2 = ax.twiny()
ax2.set_xticks(ax.get_xticks())
ax2.set_xlim(ax.get_xlim())
ax2.set_xticklabels([int(x * 2) for x in ax.get_xticks()])
ax2.set_xlabel('Number of optimization variables')
ax.legend()
ax.grid()
plt.subplots_adjust(0.1, 0.18, 0.99, 0.85)
fig.savefig('examples/bs2019/figs/cputime.pdf')
plt.show()
#%% Solution quality - omit CVode FMUs, they seem not working correctly
# Read all inputs and states
wd = 'examples/bs2019/case1/results/mpc/'
fmus = os.listdir(wd)
hz = '/h10/'
new_names = [y[5:].replace('_', ' ') for y in fmus]
for i in range(len(new_names)):
new_names[i] = new_names[i].replace('dymola ', 'tol=')
cdirs = [wd + x + hz for x in fmus]
cmap = {x:y for x, y in zip(cdirs, new_names)}
uall = pd.DataFrame()
xall = pd.DataFrame()
for c, f in zip(cdirs, fmus):
u = pd.read_csv(c + 'u.csv', index_col=0)
x = pd.read_csv(c + 'xemu.csv', index_col=0)
uall[c] = u['vpos']
xall[c] = x['cair.T']
uall = uall.rename(columns=cmap) # Inputs
xall = xall.rename(columns=cmap) # States
# Energy consumption
q = uall * 20.
Q = q.abs().sum() / 1000. # [kWh]
# Constraint violation
cstr = pd.read_csv(wd + 'r1c1_dymola_1e-9/h2/constr.csv')
cstr['time'] = cstr['time'].astype(int)
cstr = cstr.set_index('time')
vup = xall.copy()
vlo = xall.copy()
for c in xall:
vup[c] = xall[c] - cstr['Tmax']
vup[c].loc[vup[c] < 0] = 0
vlo[c] = cstr['Tmin'] - xall[c]
vlo[c].loc[vlo[c] < 0] = 0
vtot = vup + vlo
vtot = vtot.sum()
# Case order for plots
cord = ['tol=1e-4', 'tol=1e-6', 'tol=1e-7', 'tol=1e-9', 'tol=1e-11']
# Ordered results
Qord = [Q.loc[x] for x in cord]
vord = [vtot.loc[x] for x in cord]
# Show both on scatter plot
n_horizons = 5
lspace = np.linspace(0, 1, n_horizons)
colors = [plt.cm.jet(x) for x in lspace]
markers = ['o', 's', 'D', 'v', '^']
fig, ax = plt.subplots(figsize=(5, 3))
fig.set_dpi(120)
for q, v, l, c, m in zip(Qord, vord, cord, colors, markers):
plt.scatter(q, v, label=l, c=c, s=100, marker=m)
ax.set_xlabel('Total energy consumption $Q$ [kWh]')
ax.set_ylabel('Temperature violation $v_T$ [Kh]')
ax.legend(loc='center', ncol=3, bbox_to_anchor=(0.45,-0.4))
ax.grid()
plt.subplots_adjust(0.18, 0.35, 0.97, 0.95)
fig.savefig('examples/bs2019/figs/solution_quality.pdf')
# Case 2 ######################################################################
#%% Model validation
svr_x = pd.read_csv('examples/bs2019/case2/results/mpc-lin/vld_xctr.csv', index_col=0)
svr_x = svr_x.rename(columns={'cair.T':'T'})
svr_x.index /= 3600.
svr_x['T'] -= 273.15
rc_x =
|
pd.read_csv('examples/bs2019/case2/results/mpc-lin/vld_xemu.csv', index_col=0)
|
pandas.read_csv
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# pylint: disable-msg=import-error
"""
run_preprocess.py is writen for preprocessing tweets
"""
__author__ = "<NAME>"
__project__ = "Persian Emoji Prediction"
__credits__ = ["<NAME>"]
__license__ = "Public Domain"
__version__ = "1.0.0"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Production"
__date__ = "10/17/2020"
import time
import random
from itertools import chain
import hazm
import pandas as pd
from emoji_prediction.pre_process.normalizer import Normalizer
from emoji_prediction.tools.log_helper import process_time
from emoji_prediction.config.bilstm_config import RAW_NO_MENTION_DATA_PATH,\
TRAIN_NORMAL_NO_MENTION_DATA_PATH, TEST_NORMAL_NO_MENTION_DATA_PATH,\
RAW_DATA_PATH, TRAIN_NORMAL_DATA_PATH, TEST_NORMAL_DATA_PATH
class Cleaning:
"""
Cleaning class use for normalizing tweets
"""
def __init__(self, input_path, train_output_path, test_output_path):
self.normalizer = Normalizer()
self.input_path = input_path
self.train_output_path = train_output_path
self.test_output_path = test_output_path
def read_tweets(self):
"""
read_tweets method is writen for read tweets from .csv file
and tell some information from raw data
:return:
raw_tweets: all unique tweets
raw_labels: the emojis of tweets
"""
print("Start Reading tweets")
# load csv file
data_frame = pd.read_csv(self.input_path)
# drop duplicates tweets
data_frame = data_frame.drop_duplicates(subset=["tweets"], keep="first")
print(f"We have {len(data_frame)} tweets.")
print(f"We have {len(set(data_frame.labels))} unique emoji type.")
raw_tweets = data_frame.tweets
raw_labels = data_frame.labels
return raw_tweets, raw_labels
def normalizing(self):
"""
normalizing method is writen for normalizing tweets
:return:
normal_tweets: normal tweets
emojis: emojis of normal tweets
"""
# load tweets and emojis
raw_tweets, raw_emojis = self.read_tweets()
print("Start normalizing tweets ...")
start_time = time.time()
# normalizing tweets
normal_tweets = [self.normalizer.normalizer_text(tweet) for tweet in raw_tweets]
end_time = time.time()
# calculate normalizing time
elapsed_mins, elapsed_secs = process_time(start_time, end_time)
print(f"{elapsed_mins} min and {elapsed_secs} sec for normalizing tweets.")
print("End normalizing tweets")
return normal_tweets, raw_emojis
def test_split(self, normal_tweets, normal_emojis):
"""
test_split method is written for split data into train and test set
:param normal_tweets: list of all tweets
:param normal_emojis: list of all emojis
"""
# shuffle tweets
tweets_list = list(zip(normal_tweets, normal_emojis))
random.shuffle(tweets_list)
random.shuffle(tweets_list)
normal_tweets, normal_emojis = zip(*tweets_list)
test_tweet_list = [] # list for test tweets
test_emoji_list = [] # list for test emojis
train_tweet_list = [] # list for train tweets
train_emoji_list = [] # list for train emojis
# split test tweets
start_time = time.time()
for tweet, emoji in zip(normal_tweets, normal_emojis):
# filter tweets that have no character
if tweet != "":
if test_emoji_list.count(emoji) < 2000:
test_tweet_list.append(tweet)
test_emoji_list.append(emoji)
else:
train_tweet_list.append(tweet)
train_emoji_list.append(emoji)
end_time = time.time()
# calculate test split time
elapsed_mins, elapsed_secs = process_time(start_time, end_time)
print(f"{elapsed_mins} min and {elapsed_secs} sec for test split tweets.")
# save data
self.save_normal_tweets(train_tweet_list, train_emoji_list,
output_path=self.train_output_path)
self.save_normal_tweets(test_tweet_list, test_emoji_list,
output_path=self.test_output_path)
@staticmethod
def save_normal_tweets(normal_tweets, normal_emojis, output_path):
"""
save_normal_tweets method is writen for save normalized tweets
:param normal_tweets: all normalized tweets
:param normal_emojis: all emojis
:param output_path: output path for save data
"""
# create dataFrame
data_frame = pd.DataFrame({"tweets": normal_tweets, "emojis": normal_emojis})
# save dataFrame
data_frame.to_csv(output_path, index=False)
print("Tweets saved.")
@staticmethod
def count_chars(list_of_tweets):
"""
count_chars method is writen for count tweets characters
:param list_of_tweets: list that contain all tweets
"""
# get unique characters from tweets
chars = list(sorted(set(chain(*list_of_tweets))))
print(f"We have {len(chars)} character")
print(chars)
class AddPos:
"""
In this class we add pos to our dataset
"""
def __init__(self, train_input_path, test_input_path):
self.pos_tag = hazm.POSTagger(model="../data/Hazm_resources/resources-0.5/postagger.model")
self.train_input_path = train_input_path
self.test_input_path = test_input_path
@staticmethod
def read_input_file(input_path):
"""
read_input_file method is written for read input dataFrame
:param input_path: address of input dataFrame
:return:
data_frame: input dataFrame
"""
# read dataFrame
data_frame =
|
pd.read_csv(input_path)
|
pandas.read_csv
|
#!/l_mnt/python/envs/teaching/bin/python3
import gc
import sys
sys.path.append('/Geometry/')
sys.path.append('')
import pandas as pd
import numpy as np
from matplotlib import cm
from scipy.stats import gaussian_kde
import matplotlib.pyplot as plt
import seaborn as sns
import io
import base64
import math
import Geometry.GeoReport as geor
kde = 0.1
class GeoPlot:
def __init__(self,data,geoX,geoY='',title='',hue='bfactor',splitKey='',palette='viridis_r',
centre=False,vmin=0,vmax=0,operation='',newData=False,plot='scatter',categorical=False,
restrictions={},exclusions={},report=None,count=False,sort='ASC',gridsize=50,bins=100,Contour=True,YellowDots=np.array([])):
self.parent=report
self.plot = plot
self.data = data
self.geoX = geoX
self.geoY = geoY
self.title=title
self.hue = hue
self.splitKey=splitKey
self.gridsize=gridsize
self.bins = bins
self.palette=palette
self.centre = centre
self.vmin=vmin
self.vmax=vmax
self.operation=operation
self.newData = newData
self.categorical = categorical
self.numpy = []
self.logged = False
self.hasMatrix = False
self.axX = 0,0
self.axY = 0, 0
self.differ=0
self.sort = sort
self.restrictions=restrictions
self.exclusions = exclusions
self.Contour=Contour
self.range = []
self.YellowDots = YellowDots
if self.geoY == '' and plot not in 'surfaces' and plot != 'compare' and plot != 'csv' and plot!= 'comment':
self.plot = 'histogram'
self.count=count # only for histograms, probability or count
#if self.hue=='bfactor':gp.gridsize = 50
# self.hue = 'pdbCode'
if self.hue in 'aa,dssp,element,pdbCode':
self.categorical=True
self.data2 = None
#def getPlot(self,fig, ax):
# if self.plot == 'histogram':
# return self.plotHistogram(True,fig, ax)
# elif self.plot == 'scatter':
# return self.plotScatter(True,fig, ax)
# elif self.plot == 'probability':
# return self.plotProbability(True,fig, ax)
def plotToAxes(self,fig, ax):
if self.plot == 'csv':
return self.plotCsv()
elif self.plot == 'histogram':
return self.plotHistogram(fig, ax)
elif self.plot == 'scatter' or self.plot=='contact':
return self.plotScatter(fig, ax)
elif self.plot == 'hexbin':
return self.plotHexbin(fig, ax)
elif self.plot == 'probability':
#try:
return self.plotProbability(fig, ax)
#except:
# return 'Error in probability'
elif self.plot == 'surface':
return self.plotSurface(fig, ax)
elif self.plot == 'surfaces':
return self.plotSurfaces(fig, ax)
elif self.plot == 'compare':
return self.plotCompare()
elif self.plot == 'summary':
return self.plotSummary()
def plotNoAxes(self):
if self.plot == 'csv':
return self.plotCsv()
elif self.plot == 'compare':
return self.plotCompare()
elif self.plot == 'summary':
return self.plotSummary()
elif self.plot == 'comment':
#print('comment=',self.title)
return self.title
def plotSurface(self, fig, ax):
afa = 1
lw = 0.7
if self.logged:
afa = 1
self.plotOneSurface(fig,ax,self.surface,afa,self.centre,self.palette,self.logged,lw)
return ''
def plotOneSurface(self, fig, ax,surface,afa,zero,palette,logged,lw):
col='darkgrey'
lvls=20
if logged:
col='SlateGray'
x,y = surface.shape
mind = 1000
for i in range(0, x):
for j in range(0, y):
mind = min(mind, surface[i,j])
for i in range(0, x):
for j in range(0, y):
val = (surface[i,j]-mind)+1
surface[i,j] = math.log(val)
if zero:
x, y = surface.shape
mind = 1000
maxd = -1000
for i in range(0, x):
for j in range(0, y):
mind = min(mind, surface[i, j])
maxd = max(maxd, surface[i, j])
maxs = max(maxd,abs(mind))
mins=-1*maxs
image = plt.imshow(surface, cmap=palette, interpolation='nearest', origin='lower', aspect='equal',vmin=mins,vmax=maxs,alpha=afa)
else:
image = plt.imshow(surface, cmap=palette, interpolation='nearest', origin='lower', aspect='equal',alpha=afa)
if self.Contour:
afa=0.55
lw=0.3
image = plt.contour(surface, colors=col, alpha=afa, linewidths=lw, levels=lvls)
if self.YellowDots != np.array([]):
#my_cmap = plt.cm.get_cmap('plasma')
#my_cmap.set_under(('w'))
#print(my_cmap)
#https://matplotlib.org/3.1.0/tutorials/colors/colormap-manipulation.html
my_own_cmap = np.zeros((2,4))
my_own_cmap[1,0] = 1
my_own_cmap[1, 1] = 1
my_own_cmap[1, 2] = 0
my_own_cmap[1, 3] = 1
from matplotlib.colors import ListedColormap
newcmp = ListedColormap(my_own_cmap)
#print("We have yellow dots!")
image = plt.imshow(self.YellowDots, cmap=newcmp, interpolation='nearest', origin='lower', aspect='equal', alpha=1)
#ax.grid(False)
#cbar = fig.colorbar(image, ax=ax)
plt.axis('off')
plt.title(self.title)
return ''
def plotSurfaces(self, fig, ax):
afa = 0.95
lw = 0.2
for surface,palette,centre,logged in self.surface:
self.plotOneSurface(fig,ax,surface,afa,centre,palette,logged,lw)
afa = 0.55
lw = 0.2
return ''
def plotHistogram(self,fig, ax):
data = self.data.sort_values(by=self.geoX, ascending=True)
#data = self.data
title = self.title
#Create outlier tag
outliers = data.iloc[[0, -1]]
#print(outliers)
try:
pdbsA = outliers['pdbCode'].values
chainsA = outliers['chain'].values
ridsA = outliers['rid'].values
geoA = outliers[self.geoX].values
outMin = 0
outMax = 0
if len(pdbsA) > 1:
if type(geoA[0]) == float:
outMin = pdbsA[0] + ' ' + chainsA[0] + str(ridsA[0]) + ' ' + str(round(geoA[0], 3))
outMax = pdbsA[1] + ' ' + chainsA[1] + str(ridsA[1]) + ' ' + str(round(geoA[1], 3))
else:
outMin = pdbsA[0] + ' ' + chainsA[0] + str(ridsA[0]) + ' ' + str(geoA[0])[:6]
outMax = pdbsA[1] + ' ' + chainsA[1] + str(ridsA[1]) + ' ' + str(geoA[1])[:6]
except:
outMin = ''
outMax = ''
if self.operation == 'ABS':
data = data[data[self.geoX] == abs(data[self.geoX])]
elif self.operation == 'SQUARE':
data = data[data[self.geoX] == data[self.geoX] ** 2]
if self.hue != 'DEFAULT':
firstVal = data.head(1)[self.geoX].values[0]
lastVal = data.tail(1)[self.geoX].values[0]
firstHue = data.head(1)[self.hue].values[0]
lastHue = data.tail(1)[self.hue].values[0]
try:
firstVal = round(firstVal, 2)
lastVal = round(lastVal, 2)
except:
pass
try:
firstHue = round(firstHue, 2)
lastHue = round(lastHue, 2)
except:
pass
title += '\nFirst:' + self.hue + ' ' + str(firstHue) + '=' + str(firstVal)
title += '\nLast:' + self.hue + ' ' + str(lastHue) + '=' + str(lastVal)
else:
try:
strMin = str(outMin)
strMax = str(outMax)
strMin = strMin[:6]
strMax = strMax[:6]
title += '\nFirst = ' + strMin
title += '\n last = ' + strMax
except:
title += '\nFirst = ' + outMin
title += '\n last = ' + outMax
# sns.distplot(data[xName], norm_hist=True, bins=50, kde=False)
histCol = self.palette
alpha=1
bins = min(max(int(len(data[self.geoX])/6),10),50)
#bins = int(bins/2)
density = not self.count
minV = self.data[self.geoX].min()
maxV = self.data[self.geoX].max()
try:
disV = abs(maxV - minV)
if disV < 5:
bins = 13 # int(disV/0.004)
except:
bins = 10 # int(disV/0.004)
if self.title == 'ghost':
histCol = 'gainsboro'
alpha=0.5
plt.hist(data[self.geoX], EdgeColor='k', bins=bins,color=histCol,alpha=alpha,density=density,label='ghost')
#sns.distplot(data[self.geoX], label='x', norm_hist=True, bins=50, kde=False,color='gainsboro')
else:
#if self.hue != '':
# splitList = data[self.hue].unique()
# for split in splitList:
# dfx = data[data[self.hue] == split]
# bins = max(int(len(dfx[self.geoX]) / 6),10)
# #plt.hist(dfx[self.geoX], EdgeColor='k', bins=bins, alpha=alpha, density=True)
# sns.distplot(dfx[self.geoX], label=split, norm_hist=True, bins=bins, kde=False,hist_kws=dict(alpha=0.5,EdgeColor='silver'))
# plt.legend()
#else:
#sns.distplot(data[self.geoX], label='', norm_hist=True, bins=bins, kde=False,hist_kws=dict(alpha=0.8,EdgeColor='silver'))
if self.range == []:
plt.hist(data[self.geoX], EdgeColor='k', bins=bins, color=histCol, density=density,alpha=alpha)
else:
plt.xlim(xmin=self.range[0], xmax=self.range[1])
plt.hist(data[self.geoX], EdgeColor='k', bins=bins, color=histCol, density=density, alpha=alpha)
plt.title(title)
#self.title = title
plt.xlabel(self.geoX)
if self.title != 'ghost':
dfdesc = self.data[self.geoX].describe()
rows = len(dfdesc.index)
colsNames = list(dfdesc.index)
html = "<table class='innertable'>\n"
html += "<tr>\n"
for r in range(0, rows):
html += "<td>" + str(colsNames[r]) + "</td>\n"
html += "</tr>\n"
html += "<tr>"
for r in range(0, rows):
header = colsNames[r]
html += "<td>"
try:
number = float(dfdesc[r])
strnumber = str(round(number, 1))
if abs(number) < 10:
strnumber = str(round(number, 3))
elif abs(number) < 100:
strnumber = str(round(number, 2))
elif abs(number) > 1000:
strnumber = str(int(round(number, 0)))
#print(header, number,strnumber)
html += strnumber
except:
html += str(dfdesc[r])
html += "</td>\n"
html += "</tr>\n"
html += "</table></p>\n"
return html
else:
return ''
def plotCsv(self):
html = '<p>' + self.title + '</p>'
html += "<table class='innertable'>\n"
try:
cols = self.data.columns
except:
dicd = {'-':self.data}
self.data = pd.DataFrame(dicd)
cols = ['-']
try:
idx = self.data.index
html += "<tr>\n"
html += "<td>" + "" + "</td>\n"
for col in cols:
html += "<td>" + str(col) + "</td>\n"
html += "</tr>\n"
rows = self.data.shape[0]
for r in range(0, rows):
html += "<tr>\n"
html += "<td>" + str(idx[r]) + "</td>\n"
for col in cols:
coldata = self.data[col].tolist()
cold = coldata[r]
try:
number = float(cold)
cold = str(round(number, 1))
if abs(number) < 10:
cold = str(round(number, 3))
elif abs(number) < 100:
cold = str(round(number, 2))
elif abs(number) > 1000:
cold = str(int(round(number, 0)))
except:
cold = coldata[r]
html += "<td>" + str(cold) + "</td>\n"
html += "</tr>\n"
except:
html += '<p>' + 'error' + '</p>'
html += "</table></p>\n"
return html
def plotCompare(self):
#Create outliers
#Data A
self.data = self.data.sort_values(by=[self.geoX])
outliersA =self.data.iloc[[0,-1]]
#print(outliersA)
pdbsA = outliersA['pdbCode'].values
chainsA = outliersA['chain'].values
ridsA = outliersA['rid'].values
tausA = outliersA[self.geoX].values
outMinA = ''
outMaxA = ''
if len(pdbsA) > 1:
outMinA = pdbsA[0] + ' ' + chainsA[0] + str(ridsA[0]) + ' ' + str(round(tausA[0],3))
outMaxA = pdbsA[1] + ' ' + chainsA[1] + str(ridsA[1]) + ' ' + str(round(tausA[1],3))
#Data B
self.data2 = self.data2.sort_values(by=[self.geoX])
outliersB = self.data2.iloc[[0, -1]]
#print(outliersB)
pdbsB = outliersB['pdbCode'].values
chainsB = outliersB['chain'].values
ridsB = outliersB['rid'].values
tausB = outliersB[self.geoX].values
outMinB = ''
outMaxB = ''
if len(pdbsA) > 1:
outMinB = pdbsB[0] + ' ' + chainsB[0] + str(ridsB[0]) + ' ' + str(round(tausB[0],3))
outMaxB = pdbsB[1] + ' ' + chainsB[1] + str(ridsB[1]) + ' ' + str(round(tausB[1],3))
dataA = self.data[self.geoX].values
dataB = self.data2[self.geoX].values
dataA.sort()
dataB.sort()
desc1A = self.data[self.geoX].describe()
desc1B = self.data2[self.geoX].describe()
meanA = round(dataA.mean(), 3)
meanB = round(dataB.mean(), 3)
medA = round(desc1A[5], 3)
medB = round(desc1B[5], 3)
sdA = round(dataA.std(), 3)
sdB = round(dataB.std(), 3)
countA = round(desc1A[0], 0)
countB = round(desc1B[0], 0)
#https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.ttest_1samp.html
from scipy import stats
'''
Use Mann-Whitney U (not necessarily gaussian)
Null hypothesis - the distributions of the 2 sets are identical
'''
u_statistic, p_value = stats.mannwhitneyu(dataA, dataB)
u_statistic = round(u_statistic, 1)
p_value = round(p_value, 5)
hypothesis = 'Null hypothesis: the distributions of the 2 sets of data are identical.'
method = 'Method: If the P-Value < 0.05 we will reject.'
evidence = 'Evidence: The P-Value = ' + str(p_value)
conclusion = 'Conclusion: No evidence to reject the null hypothesis.'
if p_value <0.05:
conclusion = 'Conclusion: We reject the null hypothesis, the distributions are not the same.'
html = "<h3>" + self.title + "</h3>"
html += "<p>Data Set A: " + self.descA
html += "<br/>Data Set B: " + self.descB + "</p>"
html += "<p>" + "Mann-Whitney U Test" + "</p>"
html += "<p>" + hypothesis
html += "<br/>" + method + "</p>"
html += "<table class='innertable'>\n"
html += "<tr><td><red>Stats measure</red></td><td><red>" + self.geoX + " A</red></td><td><red>" + self.geoX + " B</red></td></tr>"
html += "<tr><td>count</td><td>" + str(countA) + "</td><td>" + str(countB) + "</td></tr>"
html += "<tr><td>mean</td><td>" + str(meanA) + "</td><td>" + str(meanB) + "</td></tr>"
html += "<tr><td>median</td><td>" + str(medA) + "</td><td>" + str(medB) + "</td></tr>"
html += "<tr><td>sd</td><td>" + str(sdA) + "</td><td>" + str(sdB) + "</td></tr>"
html += "<tr><td>Min</td><td>" + outMinA + "</td><td>" + outMinB + "</td></tr>"
html += "<tr><td>Max</td><td>" + outMaxA + "</td><td>" + outMaxB + "</td></tr>"
html += "<tr><td>" + 'U Statistic =' + "</td><td>" + str(u_statistic) + "</td><td></td></tr>"
html += "<tr><td>" + 'P-Value =' + "</td><td>" + str(p_value) + "</td><td></td></tr>"
html += "</table>"
html += "<p>" + evidence + "</p>"
html += "<p>" + conclusion + "</p>"
return html
def plotSummary(self):
#Create outliers
#Data A
geoYs = self.data[self.geoY].values
geoYs = list(set(geoYs))
geoYs.sort()
#print(geoYs)
html = "<h3>" + self.title + "</h3>"
html += "<p>Data Set: " + self.descA + "</p>"
html += "<table class='innertable'>\n"
html += "<tr><td>GeoX</td><td>GeoY</td><td>MinHue</td><td>Min</td><td>MaxHue</td><td>Max</td><td>Mean</td><td>Sd</td><td>Count</td></tr>"
for geoY in geoYs:
qry = "" + self.geoY + " == '" + str(geoY) + "'"
#print(self.geoX,qry)
dataCut = self.data.query(qry)
dataCut = dataCut.sort_values(by=[self.geoX])
outliers =dataCut.iloc[[0,-1]]
#print(outliersA)
hues = outliers[self.hue].values
geos = outliers[self.geoX].values
minHue = ''
minVal = ''
maxHue = ''
maxVal = ''
if len(hues) > 1:
minHue = hues[0]
minVal = str(round(geos[0],3))
maxHue = hues[1]
maxVal = str(round(geos[1], 3))
geoVals = dataCut[self.geoX].values
geoVals.sort()
meanA = round(geoVals.mean(), 3)
sdA = round(geoVals.std(), 3)
count = len(dataCut[self.geoX].values)
html += "<tr><td>" + self.geoX + "</td><td>" + geoY + "</td><td>" + minHue + "</td><td>" + minVal +"</td><td>" + maxHue + "</td><td>" + maxVal + "</td><td>" + str(meanA) + "</td><td>" + str(sdA)+ "</td><td>" + str(count) + "</td></tr>"
html += "</table>"
return html
def plotScatter(self,fig, ax):
#fig, ax = plt.subplots()
ax.grid(b=True, which='major', color='Gainsboro', linestyle='-')
ax.set_axisbelow(True)
if self.categorical or self.hue == 'dssp':
#at some point I will change the hue data to numerical, but for now do nothing
a=1
if self.operation == 'ABS':
self.data = self.data[self.data[self.geoX] == abs(self.data[self.geoX])]
if self.sort == 'DESC':
self.data = self.data.sort_values(by=self.hue, ascending=False)
elif self.hue == 'resolution':
self.data = self.data.sort_values(by=self.hue, ascending=False)
elif self.plot == 'contact':
self.data = self.data.sort_values(by='ridA', ascending=False)
elif self.sort== 'ASC':
self.data = self.data.sort_values(by=self.hue, ascending=True)
elif self.sort == 'RAND':
self.data = self.data.sample(frac=1)
lw = 0.5
alpha = 0.65#0.65
#if the count is really low then we can have a greater alphs
if len(self.data[self.geoX]) < 100:
alpha = 1
ecol = 'grey'
if self.palette == 'gist_gray_r':
lw = 0 # this gives a crystollagraphic image look
ecol = 'grey'
alpha = 0.9
if self.title=='ghost':
alpha = 0.4
self.palette='Greys'
if self.hue == 'count':
alpha = 0.003
self.data['count'] = 0
self.palette = 'bone'
lw=0.1
self.vmin=0
self.vmax=0
if self.centre:
self.data[self.hue + '2'] = self.data[self.hue] ** 2
data = self.data.sort_values(by=self.hue + '2', ascending=True)
maxh = max(data[self.hue].max(), -1 * data[self.hue].min())
minh = maxh * -1
g = ax.scatter(data[self.geoX], data[self.geoY], c=data[self.hue], cmap=self.palette,
vmin=minh,vmax=maxh, edgecolor=ecol, alpha=alpha,linewidth=lw,s=20)
cb = fig.colorbar(g)
ax.set_xlabel(self.geoX)
ax.set_ylabel(self.geoY)
cb.set_label(self.hue)
elif self.vmin < self.vmax:
#data = self.data.sort_values(by=self.hue, ascending=True)
g = ax.scatter(self.data[self.geoX], self.data[self.geoY], c=self.data[self.hue], cmap=self.palette, vmin=self.vmin,
vmax=self.vmax, edgecolor=ecol, alpha=alpha,linewidth=lw,s=20)
cb = fig.colorbar(g)
ax.set_xlabel(self.geoX)
ax.set_ylabel(self.geoY)
cb.set_label(self.hue)
elif self.plot == 'contact':
alpha = 0.75
self.data['distanceinv'] = 1/(self.data['distance'] ** 3)*4000
if False:
#if self.categorical == False:
g = ax.scatter(self.data[self.geoX], self.data[self.geoY], c=self.data[self.hue],
cmap=self.palette,s=self.data['distanceinv'],edgecolor=ecol,alpha=alpha,linewidth=lw)
cb = plt.colorbar(g)
cb.set_label(self.hue)
else:
alpha = 0.65
im = sns.scatterplot(x=self.geoX, y=self.geoY, hue=self.hue, data=self.data, alpha=alpha,legend='brief',
palette=self.palette, size='distanceinv',edgecolor=ecol, linewidth=lw,vmax=3)
#https://stackoverflow.com/questions/53437462/how-do-i-remove-an-attribute-from-the-legend-of-a-scatter-plot
# EXTRACT CURRENT HANDLES AND LABELS
h, l = ax.get_legend_handles_labels()
# COLOR LEGEND (FIRST guess at size ITEMS) we don;t want to plot the distanceinc
huelen = len(self.data.sort_values(by=self.hue, ascending=True)[self.hue].unique())+1
col_lgd = plt.legend(h[:huelen], l[:huelen], loc='upper left',bbox_to_anchor=(1.05, 1), fancybox=True, shadow=True, ncol=1)
plt.gca().add_artist(col_lgd)
ax.set_xlabel('')
ax.set_ylabel('')
else:
if self.range != []:
plt.xlim(xmin=self.range[0], xmax=self.range[1])
plt.ylim(ymin=self.range[0], ymax=self.range[1])
plt.gca().set_aspect("equal")
#if self.categorical:
if False:
legend='brief'
try:
self.data[self.hue] =
|
pd.to_numeric(self.data[self.hue])
|
pandas.to_numeric
|
"""This script is designed to perform statistics of demographic information
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from scipy.stats import pearsonr,spearmanr,kendalltau
import sys
sys.path.append(r'D:\My_Codes\LC_Machine_Learning\lc_rsfmri_tools\lc_rsfmri_tools_python')
import os
from eslearn.utils.lc_read_write_mat import read_mat, write_mat
#%% ----------------------------------Our center 550----------------------------------
uid_path_550 = r'D:\WorkStation_2018\SZ_classification\Scale\selected_550.txt'
scale_path_550 = r'D:\WorkStation_2018\SZ_classification\Scale\10-24大表.xlsx'
headmotion_file = r'D:\WorkStation_2018\SZ_classification\Scale\头动参数_1322.xlsx'
scale_data_550 = pd.read_excel(scale_path_550)
uid_550 = pd.read_csv(uid_path_550, header=None)
scale_selected_550 = pd.merge(uid_550, scale_data_550, left_on=0, right_on='folder', how='inner')
describe_bprs_550 = scale_selected_550.groupby('诊断')['BPRS_Total'].describe()
describe_age_550 = scale_selected_550.groupby('诊断')['年龄'].describe()
describe_duration_550 = scale_selected_550.groupby('诊断')['病程月'].describe()
describe_durgnaive_550 = scale_selected_550.groupby('诊断')['用药'].value_counts()
describe_sex_550 = scale_selected_550.groupby('诊断')['性别'].value_counts()
# Demographic
demographic_info_dataset1 = scale_selected_550[['folder', '诊断', '年龄', '性别', '病程月']]
headmotion = pd.read_excel(headmotion_file)
headmotion = headmotion[['Subject ID','mean FD_Power']]
demographic_info_dataset1 = pd.merge(demographic_info_dataset1, headmotion, left_on='folder', right_on='Subject ID', how='inner')
demographic_info_dataset1 = demographic_info_dataset1.drop(columns=['Subject ID'])
site_dataset1 = pd.DataFrame(np.zeros([len(demographic_info_dataset1),1]))
site_dataset1.columns = ['site']
demographic_dataset1_all = pd.concat([demographic_info_dataset1 , site_dataset1], axis=1)
demographic_dataset1_all.columns = ['ID','Diagnosis', 'Age', 'Sex', 'Duration', 'MeanFD', 'Site']
demographic_dataset1 = demographic_dataset1_all[['ID','Diagnosis', 'Age', 'Sex', 'MeanFD', 'Site']]
demographic_dataset1['Diagnosis'] = np.int32(demographic_dataset1['Diagnosis'] == 3)
# Duration and age
demographic_duration_dataset1 = demographic_dataset1_all[['Duration', 'Age']].dropna()
np.corrcoef(demographic_duration_dataset1['Duration'], demographic_duration_dataset1['Age'])
pearsonr(demographic_duration_dataset1['Duraton'], demographic_duration_dataset1['Age'])
#%% ----------------------------------BeiJing 206----------------------------------
uid_path_206 = r'D:\WorkStation_2018\SZ_classification\Scale\北大精分人口学及其它资料\SZ_NC_108_100.xlsx'
scale_path_206 = r'D:\WorkStation_2018\SZ_classification\Scale\北大精分人口学及其它资料\SZ_NC_108_100-WF.csv'
headmotion_file_206 = r'D:\WorkStation_2018\SZ_classification\Scale\北大精分人口学及其它资料\parameters\FD_power'
uid_to_remove = ['SZ010109','SZ010009']
scale_data_206 = pd.read_csv(scale_path_206)
scale_data_206 = scale_data_206.drop(np.array(scale_data_206.index)[scale_data_206['ID'].isin(uid_to_remove)])
scale_data_206['PANSStotal1'] = np.array([np.float64(duration) if duration.strip() !='' else 0 for duration in scale_data_206['PANSStotal1'].values])
Pscore = pd.DataFrame(scale_data_206[['P1', 'P2', 'P3', 'P4', 'P4', 'P5', 'P6', 'P7']].iloc[:106,:], dtype = np.float64)
Pscore = np.sum(Pscore, axis=1).describe()
Nscore = pd.DataFrame(scale_data_206[['N1', 'N2', 'N3', 'N4', 'N4', 'N5', 'N6', 'N7']].iloc[:106,:], dtype=np.float64)
Nscore = np.sum(Nscore, axis=1).describe()
Gscore = pd.DataFrame(scale_data_206[['G1', 'G2', 'G3', 'G4', 'G4', 'G5', 'G6', 'G7', 'G8', 'G9', 'G10', 'G11', 'G12', 'G13', 'G14', 'G15', 'G16']].iloc[:106,:])
Gscore = np.array(Gscore)
for i, itemi in enumerate(Gscore):
for j, itemj in enumerate(itemi):
print(itemj)
if itemj.strip() != '':
Gscore[i,j] = np.float64(itemj)
else:
Gscore[i, j] = np.nan
Gscore = pd.DataFrame(Gscore)
Gscore = np.sum(Gscore, axis=1).describe()
describe_panasstotol_206 = scale_data_206.groupby('group')['PANSStotal1'].describe()
describe_age_206 = scale_data_206.groupby('group')['age'].describe()
scale_data_206['duration'] = np.array([np.float64(duration) if duration.strip() !='' else 0 for duration in scale_data_206['duration'].values])
describe_duration_206 = scale_data_206.groupby('group')['duration'].describe()
describe_sex_206 = scale_data_206.groupby('group')['sex'].value_counts()
# Demographic
uid = pd.DataFrame(scale_data_206['ID'])
uid['ID'] = uid['ID'].str.replace('NC','10');
uid['ID'] = uid['ID'].str.replace('SZ','20');
uid = pd.DataFrame(uid, dtype=np.int32)
demographic_info_dataset2 = scale_data_206[['group','age', 'sex']]
demographic_info_dataset2 = pd.concat([uid, demographic_info_dataset2], axis=1)
headmotion_name_dataset2 = os.listdir(headmotion_file_206)
headmotion_file_path_dataset2 = [os.path.join(headmotion_file_206, name) for name in headmotion_name_dataset2]
meanfd = []
for i, file in enumerate(headmotion_file_path_dataset2):
fd = np.loadtxt(file)
meanfd.append(np.mean(fd))
meanfd_dataset2 = pd.DataFrame(meanfd)
headmotion_name_dataset2 = pd.Series(headmotion_name_dataset2)
headmotion_name_dataset2 = headmotion_name_dataset2.str.findall('(NC.*[0-9]\d*|SZ.*[0-9]\d*)')
headmotion_name_dataset2 = [str(id[0]) if id != [] else 0 for id in headmotion_name_dataset2]
headmotion_name_dataset2 = pd.DataFrame([''.join(id.split('_')) if id != 0 else '0' for id in headmotion_name_dataset2])
headmotion_name_dataset2[0] = headmotion_name_dataset2[0].str.replace('NC','10');
headmotion_name_dataset2[0] = headmotion_name_dataset2[0].str.replace('SZ','20');
headmotion_name_dataset2 = pd.DataFrame(headmotion_name_dataset2, dtype=np.int32)
headmotion_name_dataset2 = pd.concat([headmotion_name_dataset2, meanfd_dataset2], axis=1)
headmotion_name_dataset2.columns = ['ID','meanFD']
demographic_dataset2 = pd.merge(demographic_info_dataset2, headmotion_name_dataset2, left_on='ID', right_on='ID', how='left')
site_dataset2 = pd.DataFrame(np.ones([len(demographic_dataset2),1]))
site_dataset2.columns = ['site']
demographic_dataset2 = pd.concat([demographic_dataset2, site_dataset2], axis=1)
demographic_dataset2.columns = ['ID', 'Diagnosis', 'Age', 'Sex', 'MeanFD', 'Site']
demographic_dataset2['Diagnosis'] = np.int32(demographic_dataset2['Diagnosis'] == 1)
duration_dataset2 = pd.concat([uid, scale_data_206['duration']], axis=1)
demographic_duration_dataset2 = pd.merge(duration_dataset2, demographic_dataset2, left_on='ID', right_on='ID')
demographic_duration_dataset2 = demographic_duration_dataset2.iloc[:106,:]
pearsonr(demographic_duration_dataset2['duration'], demographic_duration_dataset2['Age'])
#%% -------------------------COBRE----------------------------------
# Inputs
matroot = r'D:\WorkStation_2018\SZ_classification\Data\SelectedFC_COBRE' # all mat files directory
scale = r'H:\Data\精神分裂症\COBRE\COBRE_phenotypic_data.csv' # whole scale path
headmotion_file_COBRE = r'D:\WorkStation_2018\SZ_classification\Data\headmotion\cobre\HeadMotion.tsv'
duration_COBRE = r'D:\WorkStation_2018\SZ_classification\Scale\COBRE_duration.xlsx'
# Transform the .mat files to one .npy file
allmatname = os.listdir(matroot)
# Give labels to each subject, concatenate at the first column
allmatname = pd.DataFrame(allmatname)
allsubjname = allmatname.iloc[:,0].str.findall(r'[1-9]\d*')
allsubjname = pd.DataFrame([name[0] for name in allsubjname])
scale_data = pd.read_csv(scale,sep=',',dtype='str')
print(scale_data)
diagnosis = pd.merge(allsubjname,scale_data,left_on=0,right_on='ID')[['ID','Subject Type']]
scale_data = pd.merge(allsubjname,scale_data,left_on=0,right_on='ID')
diagnosis['Subject Type'][diagnosis['Subject Type'] == 'Control'] = 0
diagnosis['Subject Type'][diagnosis['Subject Type'] == 'Patient'] = 1
include_loc = diagnosis['Subject Type'] != 'Disenrolled'
diagnosis = diagnosis[include_loc.values]
allsubjname = allsubjname[include_loc.values]
scale_data_COBRE = pd.merge(allsubjname, scale_data, left_on=0, right_on=0, how='inner').iloc[:,[0,1,2,3,5]]
scale_data_COBRE['Gender'] = scale_data_COBRE['Gender'].str.replace('Female', '0')
scale_data_COBRE['Gender'] = scale_data_COBRE['Gender'].str.replace('Male', '1')
scale_data_COBRE['Subject Type'] = scale_data_COBRE['Subject Type'].str.replace('Patient', '1')
scale_data_COBRE['Subject Type'] = scale_data_COBRE['Subject Type'].str.replace('Control', '0')
scale_data_COBRE = pd.DataFrame(scale_data_COBRE, dtype=np.float64)
describe_age_COBRE = scale_data_COBRE.groupby('Subject Type')['Current Age'].describe()
describe_sex_COBRE = scale_data_COBRE.groupby('Subject Type')['Gender'].value_counts()
headmotion_COBRE = pd.read_csv(headmotion_file_COBRE,sep='\t', index_col=False)
headmotion_COBRE = headmotion_COBRE[['Subject ID', 'mean FD_Power']]
scale_data['ID'] = pd.DataFrame(scale_data['ID'], dtype=np.int32)
demographic_COBRE = pd.merge(scale_data, headmotion_COBRE, left_on='ID', right_on='Subject ID', how='inner')
demographic_COBRE = demographic_COBRE[['ID', 'Subject Type', 'Current Age', 'Gender', 'mean FD_Power']]
site_COBRE = pd.DataFrame(np.ones([len(demographic_COBRE),1]) + 1)
site_COBRE.columns = ['site']
demographic_COBRE = pd.concat([demographic_COBRE, site_COBRE], axis=1).drop([70,82])
demographic_COBRE['Gender'] = demographic_COBRE['Gender'] == 'Male'
demographic_COBRE[['Current Age', 'Gender']] = np.int32(demographic_COBRE[['Current Age', 'Gender']] )
demographic_COBRE.columns = ['ID', 'Diagnosis', 'Age', 'Sex', 'MeanFD', 'Site']
demographic_COBRE['Diagnosis'] = np.int32(demographic_COBRE['Diagnosis'] == 'Patient')
duration_COBRE = pd.read_excel(duration_COBRE)
duration_COBRE = duration_COBRE.iloc[:,[0,1,2]]
duration_COBRE = duration_COBRE.dropna()
duration_COBRE = pd.DataFrame(duration_COBRE, dtype=np.int32)
duration_COBRE[duration_COBRE == 9999] = None
duration_COBRE = duration_COBRE.dropna()
duration_COBRE['duration'] = duration_COBRE.iloc[:,1] - duration_COBRE.iloc[:,2]
duration_COBRE['duration'] =duration_COBRE['duration'] * 12
duration_COBRE.columns = ['ID', 'Age', 'Onset_age', 'Duration']
demographic_druation_COBRE = pd.merge(demographic_COBRE, duration_COBRE, left_on='ID', right_on='ID', how='inner')
# Correattion of duration and age
pearsonr(demographic_druation_COBRE['Duration'], demographic_druation_COBRE ['Age_x'])
#%% -------------------------UCLA----------------------------------
matroot = r'D:\WorkStation_2018\SZ_classification\Data\SelectedFC_UCLA'
scale = r'H:\Data\精神分裂症\ds000030\schizophrenia_UCLA_restfmri\participants.tsv'
headmotion_UCAL = r'D:\WorkStation_2018\SZ_classification\Data\headmotion\ucal\HeadMotion.tsv'
headmotion_UCAL_rest = r'D:\WorkStation_2018\SZ_classification\Data\headmotion\ucal\HeadMotion_rest.tsv'
allmatname = os.listdir(matroot)
allmatname = pd.DataFrame(allmatname)
allsubjname = allmatname.iloc[:,0].str.findall(r'[1-9]\d*')
allsubjname = pd.DataFrame(['sub-' + name[0] for name in allsubjname])
scale_data =
|
pd.read_csv(scale,sep='\t')
|
pandas.read_csv
|
import datetime
import numpy as np
import pandas as pd
from serenity.tickstore.tickstore import LocalTickstore, BiTimestamp
from pathlib import Path
# noinspection DuplicatedCode
def test_tickstore():
ts_col_name = 'ts'
tickstore = LocalTickstore(Path('./COINBASE_PRO_ONE_MIN_BINS'), timestamp_column=ts_col_name)
# ensure we start empty
assert_empty(tickstore)
# populate the tickstore for October with random timestamps and integers
for i in range(31):
start = pd.to_datetime('2019-10-1')
end = pd.to_datetime('2019-10-31')
ts_index = random_dates(start, end, 100)
ts_index.name = ts_col_name
ticks = pd.DataFrame(np.random.randint(0, 100, size=(100, 4)), columns=list('ABCD'), index=ts_index)
tickstore.insert('BTC-USD', BiTimestamp(datetime.date(2019, 10, i+1)), ticks)
tickstore.insert('ETH-USD', BiTimestamp(datetime.date(2019, 10, i+1)), ticks)
# close and re-open
tickstore.close()
tickstore = LocalTickstore(Path('./COINBASE_PRO_ONE_MIN_BINS'), timestamp_column=ts_col_name)
# because timestamps are random the number of matches is not deterministic. is there a better way to test this?
df = tickstore.select('BTC-USD', start=datetime.datetime(2019, 10, 1), end=datetime.datetime(2019, 10, 15))
assert df.size > 0
# create a 2nd version of all rows
for i in range(31):
start =
|
pd.to_datetime('2019-10-1')
|
pandas.to_datetime
|
import numpy as np
import pytest
from pandas import DataFrame, SparseArray, SparseDataFrame, bdate_range
data = {
"A": [np.nan, np.nan, np.nan, 0, 1, 2, 3, 4, 5, 6],
"B": [0, 1, 2, np.nan, np.nan, np.nan, 3, 4, 5, 6],
"C": np.arange(10, dtype=np.float64),
"D": [0, 1, 2, 3, 4, 5, np.nan, np.nan, np.nan, np.nan],
}
dates = bdate_range("1/1/2011", periods=10)
# fixture names must be compatible with the tests in
# tests/frame/test_api.SharedWithSparse
@pytest.fixture
def float_frame_dense():
"""
Fixture for dense DataFrame of floats with DatetimeIndex
Columns are ['A', 'B', 'C', 'D']; some entries are missing
"""
return DataFrame(data, index=dates)
@pytest.fixture
def float_frame():
"""
Fixture for sparse DataFrame of floats with DatetimeIndex
Columns are ['A', 'B', 'C', 'D']; some entries are missing
"""
# default_kind='block' is the default
return SparseDataFrame(data, index=dates, default_kind="block")
@pytest.fixture
def float_frame_int_kind():
"""
Fixture for sparse DataFrame of floats with DatetimeIndex
Columns are ['A', 'B', 'C', 'D'] and default_kind='integer'.
Some entries are missing.
"""
return SparseDataFrame(data, index=dates, default_kind="integer")
@pytest.fixture
def float_string_frame():
"""
Fixture for sparse DataFrame of floats and strings with DatetimeIndex
Columns are ['A', 'B', 'C', 'D', 'foo']; some entries are missing
"""
sdf = SparseDataFrame(data, index=dates)
sdf["foo"] = SparseArray(["bar"] * len(dates))
return sdf
@pytest.fixture
def float_frame_fill0_dense():
"""
Fixture for dense DataFrame of floats with DatetimeIndex
Columns are ['A', 'B', 'C', 'D']; missing entries have been filled with 0
"""
values = SparseDataFrame(data).values
values[np.isnan(values)] = 0
return
|
DataFrame(values, columns=["A", "B", "C", "D"], index=dates)
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
"""
Spyder Editor
This is a temporary script file.
"""
import numpy as np
import matplotlib.pyplot as plt
from scipy.interpolate import CubicSpline
from scipy import integrate
import os
import pandas as pd
def generate_spiral(loops:float,rr:float,n:int):
thetas = 2*np.pi*np.linspace(0,loops,num=n)
r = rr*np.linspace(0,loops,num=n)
return np.stack([r*np.cos(thetas),r*np.sin(thetas)])
def plot_waypoints(xy,dr,r):
fig,ax = plt.subplots(nrows=2,sharex=True)
ax[0].plot(np.transpose(xy))
ax[0].set_ylabel("XY")
ax[1].plot(r)
ax[1].set_ylabel("Cumulative \ndistance")
ax[1].set_xlabel("Index")
fig.suptitle("Waypoints")
def plot_spline(cubic_spline,paramax,samples):
r = np.linspace(0,paramax,samples)
fig,ax = plt.subplots()
ax.plot(cubic_spline(r)[:,0],cubic_spline(r)[:,1])
def arc_length(cubic_spline,parameterization:np.ndarray):
"""
Parameters
----------
cubic_spline : scipy.interpolate._cubic.CubicSpline
Cubic spline object
parameterization : numpy.ndarray
Array of size N. Represents the parameterization values of the cubic spline.
Must be monotonically increasing.
Returns
-------
arclength : numpy.ndarray
Array of size N.
errs : numpy.ndarray
DESCRIPTION.
"""
df = lambda t : np.sqrt(np.sum(cubic_spline.derivative()(t)**2)) # arc-length
segments = len(parameterization)-1
sub_arclength = np.zeros((segments,))
errs = np.zeros((segments,))
for i in range(segments):
subf,suberr = integrate.quad(df,parameterization[i],parameterization[i+1])
sub_arclength[i] = subf
errs[i] = suberr
arclength = np.hstack((0,sub_arclength.cumsum()))
return arclength,errs
def alp_spline(xyz:np.ndarray):
ppdist = np.sqrt(np.sum(np.diff(xyz)**2,axis=0)) # point to point distance
non_zero_indices = ppdist>0
ppdist = ppdist[non_zero_indices]
xyz = xyz[:,np.hstack((True,non_zero_indices))]
linear_dist = np.hstack((0,ppdist.cumsum()))
ldp_cs = CubicSpline(linear_dist,np.transpose(xyz))
arclength,errs = arc_length(ldp_cs,linear_dist)
alp_cs = CubicSpline(arclength,np.transpose(xyz))
return linear_dist,ldp_cs,arclength,alp_cs
loop_count = 1
radius_multiplier = 1
waypoints = 50
#thetas = 2*np.pi*np.linspace(0,loop_count,num=10)
#xy = np.stack([20*np.cos(thetas),20*np.sin(thetas)])
#xy = generate_spiral(loop_count,radius_multiplier,waypoints)
#x,y = xy
"""Pull GNSS data. Putt in pandas"""
datapath = [f"C:/Users/RDGSLJDF/Desktop/analysis-scripts/data/openedbags/GpsSpikeData/VaqTrials{tt}/AnvelPos" for tt in range(1,3)]
df_list = []
for i,folder in enumerate(datapath):
posdata = [f for f in os.listdir(folder) if os.path.isfile(os.path.join(folder,f))]
for j,file in enumerate(posdata):
df = pd.read_csv(os.path.join(folder,file))
df["trial"] = i+1
df_list.append(df)
psdf =
|
pd.concat(df_list)
|
pandas.concat
|
"""
Open Power System Data
Timeseries Datapackage
imputation.py : fill functions for imputation of missing data.
"""
from datetime import datetime, date, timedelta
import pandas as pd
import numpy as np
import logging
logger = logging.getLogger(__name__)
logger.setLevel('INFO')
def find_nan(df, res_key, headers, patch=False):
'''
Search for missing values in a DataFrame and optionally apply further
functions on each column.
Parameters
----------
df : pandas.DataFrame
DataFrame to inspect and possibly patch
headers : list
List of strings indicating the level names of the pandas.MultiIndex
for the columns of the dataframe
patch : bool, default=False
If False, return unaltered DataFrame,
if True, return patched DataFrame
Returns
----------
patched: pandas.DataFrame
original df or df with gaps patched and marker column appended
nan_table: pandas.DataFrame
Contains detailed information about missing data
'''
nan_table = pd.DataFrame()
patched = pd.DataFrame()
marker_col = pd.Series(np.nan, index=df.index)
if df.empty:
return patched, nan_table
# Get the frequency/length of one period of df
one_period = pd.Timedelta(res_key)
for col_name, col in df.iteritems():
col = col.to_frame()
message = '| {:5.5} | {:6.6} | {:10.10} | {:10.10} | {:10.10} | '.format(
res_key, *col_name[0:4])
# make an empty list of NaN regions to use as default
nan_idx = pd.MultiIndex.from_arrays([
[0, 0, 0, 0],
['count', 'span', 'start_idx', 'till_idx']])
nan_list = pd.DataFrame(index=nan_idx, columns=col.columns)
# skip this column if it has no entries at all.
# This will also delete the column from the patched df
if col.empty:
continue
# tag all occurences of NaN in the data with True
# (but not before first or after last actual entry)
col['tag'] = (
(col.index >= col.first_valid_index()) &
(col.index <= col.last_valid_index()) &
col.isnull().transpose().as_matrix()
).transpose()
# make another DF to hold info about each region
nan_regs = pd.DataFrame()
# first row of consecutive region is a True preceded by a False in tags
nan_regs['start_idx'] = col.index[col['tag'] & ~
col['tag'].shift(1).fillna(False)]
# last row of consecutive region is a False preceded by a True
nan_regs['till_idx'] = col.index[
col['tag'] & ~
col['tag'].shift(-1).fillna(False)]
# if there are no NaNs, do nothing
if not col['tag'].any():
logger.info(message + 'nothing to patch in this column')
col.drop('tag', axis=1, inplace=True)
# else make a list of the NaN regions
else:
# how long is each region
nan_regs['span'] = (
nan_regs['till_idx'] - nan_regs['start_idx'] + one_period)
nan_regs['count'] = (nan_regs['span'] / one_period)
# sort the nan_regs DataFtame to put longest missing region on top
nan_regs = nan_regs.sort_values(
'count', ascending=False).reset_index(drop=True)
col.drop('tag', axis=1, inplace=True)
nan_list = nan_regs.stack().to_frame()
nan_list.columns = col.columns
if patch:
col, marker_col = choose_fill_method(
message,
col, col_name, nan_regs, df, marker_col, one_period)
if patched.empty:
patched = col
else:
patched = patched.combine_first(col)
if nan_table.empty:
nan_table = nan_list
else:
nan_table = nan_table.combine_first(nan_list)
# append the marker to the DataFrame
marker_col = marker_col.to_frame()
tuples = [('interpolated_values', '', '', '', '', '')]
marker_col.columns = pd.MultiIndex.from_tuples(tuples, names=headers)
patched =
|
pd.concat([patched, marker_col], axis=1)
|
pandas.concat
|
from collections import OrderedDict
import datetime
from datetime import timedelta
from io import StringIO
import json
import os
import numpy as np
import pytest
from pandas.compat import is_platform_32bit, is_platform_windows
import pandas.util._test_decorators as td
import pandas as pd
from pandas import DataFrame, DatetimeIndex, Series, Timestamp, read_json
import pandas._testing as tm
_seriesd = tm.getSeriesData()
_tsd = tm.getTimeSeriesData()
_frame = DataFrame(_seriesd)
_intframe = DataFrame({k: v.astype(np.int64) for k, v in _seriesd.items()})
_tsframe = DataFrame(_tsd)
_cat_frame = _frame.copy()
cat = ["bah"] * 5 + ["bar"] * 5 + ["baz"] * 5 + ["foo"] * (len(_cat_frame) - 15)
_cat_frame.index = pd.CategoricalIndex(cat, name="E")
_cat_frame["E"] = list(reversed(cat))
_cat_frame["sort"] = np.arange(len(_cat_frame), dtype="int64")
_mixed_frame = _frame.copy()
def assert_json_roundtrip_equal(result, expected, orient):
if orient == "records" or orient == "values":
expected = expected.reset_index(drop=True)
if orient == "values":
expected.columns = range(len(expected.columns))
tm.assert_frame_equal(result, expected)
@pytest.mark.filterwarnings("ignore:the 'numpy' keyword is deprecated:FutureWarning")
class TestPandasContainer:
@pytest.fixture(autouse=True)
def setup(self):
self.intframe = _intframe.copy()
self.tsframe = _tsframe.copy()
self.mixed_frame = _mixed_frame.copy()
self.categorical = _cat_frame.copy()
yield
del self.intframe
del self.tsframe
del self.mixed_frame
def test_frame_double_encoded_labels(self, orient):
df = DataFrame(
[["a", "b"], ["c", "d"]],
index=['index " 1', "index / 2"],
columns=["a \\ b", "y / z"],
)
result = read_json(df.to_json(orient=orient), orient=orient)
expected = df.copy()
assert_json_roundtrip_equal(result, expected, orient)
@pytest.mark.parametrize("orient", ["split", "records", "values"])
def test_frame_non_unique_index(self, orient):
df = DataFrame([["a", "b"], ["c", "d"]], index=[1, 1], columns=["x", "y"])
result = read_json(df.to_json(orient=orient), orient=orient)
expected = df.copy()
assert_json_roundtrip_equal(result, expected, orient)
@pytest.mark.parametrize("orient", ["index", "columns"])
def test_frame_non_unique_index_raises(self, orient):
df = DataFrame([["a", "b"], ["c", "d"]], index=[1, 1], columns=["x", "y"])
msg = f"DataFrame index must be unique for orient='{orient}'"
with pytest.raises(ValueError, match=msg):
df.to_json(orient=orient)
@pytest.mark.parametrize("orient", ["split", "values"])
@pytest.mark.parametrize(
"data",
[
[["a", "b"], ["c", "d"]],
[[1.5, 2.5], [3.5, 4.5]],
[[1, 2.5], [3, 4.5]],
[[Timestamp("20130101"), 3.5], [Timestamp("20130102"), 4.5]],
],
)
def test_frame_non_unique_columns(self, orient, data):
df = DataFrame(data, index=[1, 2], columns=["x", "x"])
result = read_json(
df.to_json(orient=orient), orient=orient, convert_dates=["x"]
)
if orient == "values":
expected = pd.DataFrame(data)
if expected.iloc[:, 0].dtype == "datetime64[ns]":
# orient == "values" by default will write Timestamp objects out
# in milliseconds; these are internally stored in nanosecond,
# so divide to get where we need
# TODO: a to_epoch method would also solve; see GH 14772
expected.iloc[:, 0] = expected.iloc[:, 0].astype(np.int64) // 1000000
elif orient == "split":
expected = df
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("orient", ["index", "columns", "records"])
def test_frame_non_unique_columns_raises(self, orient):
df = DataFrame([["a", "b"], ["c", "d"]], index=[1, 2], columns=["x", "x"])
msg = f"DataFrame columns must be unique for orient='{orient}'"
with pytest.raises(ValueError, match=msg):
df.to_json(orient=orient)
def test_frame_default_orient(self, float_frame):
assert float_frame.to_json() == float_frame.to_json(orient="columns")
@pytest.mark.parametrize("dtype", [False, float])
@pytest.mark.parametrize("convert_axes", [True, False])
@pytest.mark.parametrize("numpy", [True, False])
def test_roundtrip_simple(self, orient, convert_axes, numpy, dtype, float_frame):
data = float_frame.to_json(orient=orient)
result = pd.read_json(
data, orient=orient, convert_axes=convert_axes, numpy=numpy, dtype=dtype
)
expected = float_frame
assert_json_roundtrip_equal(result, expected, orient)
@pytest.mark.parametrize("dtype", [False, np.int64])
@pytest.mark.parametrize("convert_axes", [True, False])
@pytest.mark.parametrize("numpy", [True, False])
def test_roundtrip_intframe(self, orient, convert_axes, numpy, dtype):
data = self.intframe.to_json(orient=orient)
result = pd.read_json(
data, orient=orient, convert_axes=convert_axes, numpy=numpy, dtype=dtype
)
expected = self.intframe.copy()
if (
numpy
and (is_platform_32bit() or is_platform_windows())
and not dtype
and orient != "split"
):
# TODO: see what is causing roundtrip dtype loss
expected = expected.astype(np.int32)
assert_json_roundtrip_equal(result, expected, orient)
@pytest.mark.parametrize("dtype", [None, np.float64, np.int, "U3"])
@pytest.mark.parametrize("convert_axes", [True, False])
@pytest.mark.parametrize("numpy", [True, False])
def test_roundtrip_str_axes(self, orient, convert_axes, numpy, dtype):
df = DataFrame(
np.zeros((200, 4)),
columns=[str(i) for i in range(4)],
index=[str(i) for i in range(200)],
dtype=dtype,
)
# TODO: do we even need to support U3 dtypes?
if numpy and dtype == "U3" and orient != "split":
pytest.xfail("Can't decode directly to array")
data = df.to_json(orient=orient)
result = pd.read_json(
data, orient=orient, convert_axes=convert_axes, numpy=numpy, dtype=dtype
)
expected = df.copy()
if not dtype:
expected = expected.astype(np.int64)
# index columns, and records orients cannot fully preserve the string
# dtype for axes as the index and column labels are used as keys in
# JSON objects. JSON keys are by definition strings, so there's no way
# to disambiguate whether those keys actually were strings or numeric
# beforehand and numeric wins out.
# TODO: Split should be able to support this
if convert_axes and (orient in ("split", "index", "columns")):
expected.columns = expected.columns.astype(np.int64)
expected.index = expected.index.astype(np.int64)
elif orient == "records" and convert_axes:
expected.columns = expected.columns.astype(np.int64)
assert_json_roundtrip_equal(result, expected, orient)
@pytest.mark.parametrize("convert_axes", [True, False])
@pytest.mark.parametrize("numpy", [True, False])
def test_roundtrip_categorical(self, orient, convert_axes, numpy):
# TODO: create a better frame to test with and improve coverage
if orient in ("index", "columns"):
pytest.xfail(f"Can't have duplicate index values for orient '{orient}')")
data = self.categorical.to_json(orient=orient)
if numpy and orient in ("records", "values"):
pytest.xfail(f"Orient {orient} is broken with numpy=True")
result = pd.read_json(
data, orient=orient, convert_axes=convert_axes, numpy=numpy
)
expected = self.categorical.copy()
expected.index = expected.index.astype(str) # Categorical not preserved
expected.index.name = None # index names aren't preserved in JSON
if not numpy and orient == "index":
expected = expected.sort_index()
assert_json_roundtrip_equal(result, expected, orient)
@pytest.mark.parametrize("convert_axes", [True, False])
@pytest.mark.parametrize("numpy", [True, False])
def test_roundtrip_empty(self, orient, convert_axes, numpy, empty_frame):
data = empty_frame.to_json(orient=orient)
result = pd.read_json(
data, orient=orient, convert_axes=convert_axes, numpy=numpy
)
expected = empty_frame.copy()
# TODO: both conditions below are probably bugs
if convert_axes:
expected.index = expected.index.astype(float)
expected.columns = expected.columns.astype(float)
if numpy and orient == "values":
expected = expected.reindex([0], axis=1).reset_index(drop=True)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("convert_axes", [True, False])
@pytest.mark.parametrize("numpy", [True, False])
def test_roundtrip_timestamp(self, orient, convert_axes, numpy):
# TODO: improve coverage with date_format parameter
data = self.tsframe.to_json(orient=orient)
result = pd.read_json(
data, orient=orient, convert_axes=convert_axes, numpy=numpy
)
expected = self.tsframe.copy()
if not convert_axes: # one off for ts handling
# DTI gets converted to epoch values
idx = expected.index.astype(np.int64) // 1000000
if orient != "split": # TODO: handle consistently across orients
idx = idx.astype(str)
expected.index = idx
assert_json_roundtrip_equal(result, expected, orient)
@pytest.mark.parametrize("convert_axes", [True, False])
@pytest.mark.parametrize("numpy", [True, False])
def test_roundtrip_mixed(self, orient, convert_axes, numpy):
if numpy and orient != "split":
pytest.xfail("Can't decode directly to array")
index = pd.Index(["a", "b", "c", "d", "e"])
values = {
"A": [0.0, 1.0, 2.0, 3.0, 4.0],
"B": [0.0, 1.0, 0.0, 1.0, 0.0],
"C": ["foo1", "foo2", "foo3", "foo4", "foo5"],
"D": [True, False, True, False, True],
}
df =
|
DataFrame(data=values, index=index)
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
"""
A gorgeous and self-contained training loop.
"""
import logging
import os
import tqdm
import pickle
from functools import partial
from collections import defaultdict
from contextlib import contextmanager
import numpy as np
import pandas as pd
import torch
import gin
from src.callbacks.callbacks import ModelCheckpoint, LambdaCallback, History, DumpTensorboardSummaries, BaseLogger
from src.utils import save_weights
logger = logging.getLogger(__name__)
def _construct_default_callbacks(model, optimizer, H, save_path, checkpoint_monitor, save_freq, custom_callbacks,
use_tb, save_history_every_k_examples):
callbacks = []
callbacks.append(BaseLogger())
callbacks.append(LambdaCallback(on_epoch_end=partial(_append_to_history_csv, H=H)))
callbacks.append(LambdaCallback(on_epoch_end=partial(_save_history_csv, save_path=save_path, H=H)))
callbacks.append(History(save_every_k_examples=save_history_every_k_examples))
callbacks.append(ModelCheckpoint(monitor=checkpoint_monitor,
save_best_only=True,
mode='max',
filepath=os.path.join(save_path, "model_best_val.pt")))
if save_freq > 0:
def save_weights_fnc(epoch, logs):
if epoch % save_freq == 0:
logger.info("Saving model from epoch " + str(epoch))
save_weights(model, optimizer, os.path.join(save_path, "model_last_epoch.pt"))
callbacks.append(LambdaCallback(on_epoch_end=save_weights_fnc))
if use_tb:
callbacks.append(DumpTensorboardSummaries())
callbacks.append(LambdaCallback(on_epoch_end=partial(_save_loop_state, save_callbacks=custom_callbacks,
save_path=save_path)))
return callbacks
def _save_loop_state(epoch, logs, save_path, save_callbacks):
loop_state = {"epochs_done": epoch, "callbacks": save_callbacks} # 0 index
## A small hack to pickle callbacks ##
if len(save_callbacks):
m, opt, md = save_callbacks[0].get_model(), save_callbacks[0].get_optimizer(), save_callbacks[0].get_meta_data()
for c in save_callbacks:
c.set_model(None, ignore=False) # TODO: Remove
c.set_optimizer(None)
c.set_params(None) # TODO: Remove
c.set_meta_data(None)
pickle.dump(loop_state, open(os.path.join(save_path, "loop_state.pkl"), "wb"))
if len(save_callbacks):
for c in save_callbacks:
c.set_model(m)
c.set_optimizer(opt)
c.set_meta_data(md)
def _save_history_csv(epoch, logs, save_path, H):
out = ""
for key, value in logs.items():
if isinstance(value, (int, float, complex, np.float32, np.float64)):
out += "{key}={value}\t".format(key=key, value=value)
logger.info(out)
logger.info("Saving history to " + os.path.join(save_path, "history.csv"))
pd.DataFrame(H).to_csv(os.path.join(save_path, "history.csv"), index=False)
def _append_to_history_csv(epoch, logs, H):
for key, value in logs.items():
if isinstance(value, (int, float, complex, np.float32, np.float64)):
if key not in H:
H[key] = [value]
else:
H[key].append(value)
# Epoch is 0 first, so 1 key. Etc
assert len(H[key]) == epoch + 1, "Len H[{}] is {}, expected {} ".format(key, len(H[key]), epoch + 1)
else:
pass
def _reload(model, optimizer, save_path, callbacks):
model_last_epoch_path = os.path.join(save_path, "model_last_epoch.pt")
loop_state_path = os.path.join(save_path, "loop_state.pkl")
history_csv_path = os.path.join(save_path, "history.csv")
if not os.path.exists(model_last_epoch_path) or not os.path.exists(loop_state_path):
logger.warning("Failed to find last epoch model or loop state")
return {}, 0
# Reload everything (model, optimizer, loop state)
logger.warning("Reloading weights!")
checkpoint = torch.load(model_last_epoch_path)
model.load_state_dict(checkpoint['model'])
optimizer.load_state_dict(checkpoint['optimizer'])
logger.info("Reloading loop state!")
loop_state = pickle.load(open(loop_state_path, 'rb'))
logger.info("Reloading history!")
H =
|
pd.read_csv(history_csv_path)
|
pandas.read_csv
|
import pandas as pd
import numpy as np
import unittest
import math
# sys.path.append('src/main/python/')
# sys.path.append('../../main/python/')
from assess import *
class TestAssess(unittest.TestCase):
def test_difference(self):
X = pd.DataFrame([1, 2, 3])
Y = pd.DataFrame([4, 5, 6])
res = difference(X, Y)
self.assertTrue(pd.DataFrame([-3, -3, -3]).equals(res), res)
def test_differenceNormMinMax(self):
X = pd.DataFrame([4, 6, 8])
Y = pd.DataFrame([1, 2, 3])
res = minmaxnorm(difference(X, Y))
self.assertTrue(pd.DataFrame([0.0, 0.5, 1.0]).equals(res), res)
def test_ratio(self):
X =
|
pd.DataFrame([40, 60, 80])
|
pandas.DataFrame
|
#! /bin/python3
# compute_PCs.py takes
# RUN ON MASTODON--doesn't have memory issues there
# want local mean with #of non-missing values in either direction
# local mean with max number of positions to search in either direction
# global mean
# global mean by category
# filter out cases where there is no methylation (0 or 1 methylated values) and lots of missing values
import pandas as pd
import argparse
import os
from sklearn.decomposition import IncrementalPCA
def read_data(file_path):
'''
Parameters
----------
file_path : str
relative path to one chromosome
Returns
-------
'''
df_raw = pd.read_csv(file_path, sep = "\t")
if 'methylation_estimate' not in df_raw:
# point estimate for methylationg
df_raw['methylation_estimate'] = df_raw['methylated'] / df_raw['coverage']
df_raw.drop(columns=['methylated','chr', 'unmethylated'], inplace = True)
df_raw = df_raw.astype({'sample': 'uint8', 'methylation_estimate': 'float32', 'coverage': 'uint8'})
df = (df_raw.pivot_table(index=['pos'], columns=['sample'], values=['methylation_estimate', 'coverage']))
return df
def filter_too_many_nulls(df):
"""Drops positions that are more than half nulls
"""
num_na = df.isnull().sum(axis=1)
mean_val = df.sum(axis=1)
ix = (num_na < num_na_cut) & (mean_val > mean_cut)
return df[ix]
def impute_local_mean(df, group_ix, ws=50):
# Designed with methylated reads and coverage in mind...
'''imputes local mean by borrowin across groups
Args:
df: a data frame with
group_ix = same length as number of columns in df
'''
# Either mean then mean, or
# Minimum periods :=
mp = max(10, int(ws / 10))
df.rolling(window = ws, min_periods = mp)
return(None)
def run_pca(X, num_components = 2, is_incremental=True):
'''computes principal components incremntally
'''
#TODO: allow for normal PCA
ipca = IncrementalPCA(n_components = num_components, batch_size=10000)
X_ipca = ipca.fit_transform(X)
return X_ipca, ipca.explained_variance_ratio_
if __name__ == "__main__":
# argparsing,...
parser = argparse.ArgumentParser(description='Process some integers.')
parser.add_argument('--ifile', default = '../../data/cov-meth/chr22.tsv') #TODO: change to CSV in extract...
parser.add_argument('--odir', default = '../../data/prin-comps-array-samples/')
parser.add_argument('--filter_samples', action = 'store_true')
parser.add_argument('--filter_file', default = '../../data/meta/array-samples.csv')
args = parser.parse_args()
if not os.path.exists(args.odir):
os.makedirs(args.odir)
if args.filter_samples:
tmp = pd.read_csv(args.filter_file)['sample']
filter_list = [(x) for x in tmp]
df = read_data(args.ifile)
X = df['methylation_estimate'].dropna(axis=0).transpose() # pos is index so gets dropped (no need to do anything else)
valid_samples = list(set(X.index).intersection(filter_list))
X = X[X.index.isin(valid_samples)]
# Don't drop nas for coverage--replace with zeros...
Cov = df['coverage'].transpose()
Cov = Cov[Cov.index.isin(valid_samples)]
num_components = len(X)
print("Read in data frame, " + str(num_components) + " samples detected")
# PCA step
pca_out, var_exp = run_pca(X, num_components = num_components)
print("Computed PCs...")
col_names = ['PC' + str(x) for x in range(1, num_components + 1)]
pca_df = pd.DataFrame(pca_out, columns = col_names)
# Add some other columns
pca_df['sample'] = list(X.index)
pca_df['num_null'] = list(Cov.isnull().sum(axis=1))
pca_df['num_not_null'] = list(Cov.notnull().sum(axis=1))
pca_df['mean_methylation'] = list(X.mean(axis=1, skipna=True))
pca_df['mean_coverage'] = list(Cov.mean(axis=1, skipna=True))
pca_df['median_coverage'] = list(Cov.median(axis=1, skipna=True))
print("Computed summary statistics...")
# file paths
my_chr = os.path.basename(args.ifile).replace(".tsv", ".csv")
ofile = os.path.join(args.odir, my_chr)
pca_df.to_csv(ofile, index = False)
print("Wrote out " + ofile)
tmp = [var_exp, [i for i in range(1, num_components + 1)], [my_chr] * num_components]
var_df =
|
pd.DataFrame(tmp)
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
"""
Created on Thu Mar 02 13:56:53 2017
functions for plotting
@author: Boon
"""
import pandas as pd
import datetime
import numpy as np
import augfunc as af
#import xlsxwriter
from plotly.offline import download_plotlyjs, plot
from plotly.graph_objs import Bar, Layout, Scatter, Pie, Marker
#https://www.littlelives.com/img/identity/logo_littlelives_full_med.png
#%% General functions
def slicebytimeinterval(df,timeinterval,column='created_at_Date'):
if timeinterval[0]>timeinterval[1]:
print('Warning: timestart > timeend')
if not column=='created_at_Time':
sliceddf=df[(df[column] >= pd.to_datetime(timeinterval[0])) & (df[column] < pd.to_datetime(timeinterval[1]))]
else:
sliceddf=df[(df[column] >= timeinterval[0]) & (df[column] < timeinterval[1])]
return sliceddf
def expandtag(df,tagtype): #need to double check to see if truly duplicating properly--------------------------------------------------------
#use nested expandtag(expandtag(df,tagtype),tagtype) for both issue and school
if tagtype=='issue':
emptyrow=df[df['numissues']==0]#collect rows with issues equal to 0
filledrow=df[df['numissues']>0]#collect rows with issues greater than 1
elif tagtype=='school':
emptyrow=df[df['school']=='None']#collect rows with schools with none
filledrow=df[df['school']!='None']#collect rows with schools
#Build new df
newdf=[]
for index, row in filledrow.iterrows():
if type(row[tagtype])==unicode:
row[tagtype]=row[tagtype][1:-1].split(', ')
for multitag in row[tagtype]:
temprow=row.copy()#duplicate row
temprow[tagtype]=multitag#replace multi issue of duplicated row with single issue
newdf.append(temprow)
filledrow=pd.DataFrame(newdf)
expandeddf=emptyrow.append(filledrow) #recombine
expandeddf.sort_index(inplace=True) #sort
return expandeddf
def recogtf(tf,timebin):#for printing timeframe in context
timeframe=[7,30,180,365]#in days
tfstr=['Week','Month','6 Months','Year']
binout=af.bintime(pd.Timedelta(tf),'D',timebin,0)
binoutidx=[i for i,x in enumerate(timeframe) if x==binout]
return tfstr[binoutidx[0]],timeframe[binoutidx[0]]
#%% response and resolve pivottables for excel csv
def generatetagpivtbl(inputdf,columnname, timeinterval,forcecolumns=None):
#responsepivotdf=generatetagpivtbl(issueschoolexpandeddf,'s_response_bin',[timeframestartdt[0],timeframeenddt[0]])
#resolvepivotdf=generatetagpivtbl(issueschoolexpandeddf,'s_resolve_bin',[timeframestartdt[0],timeframeenddt[0]])
sliceddf=slicebytimeinterval(inputdf,timeinterval)
if sliceddf.empty:
raise ValueError('Empty sliceddf')
numconversations=len(sliceddf.convid.unique())
workindf=sliceddf[['issue',columnname]]
pivtable=workindf.pivot_table(index='issue', columns=columnname, aggfunc=len, fill_value=0)
sumoftags=pd.DataFrame(pivtable.transpose().sum())
pivtable['Total']=sumoftags
sumoftagsbycolumn=pd.DataFrame(pivtable.sum(),columns=['Grand Total'])
pivtable=pivtable.append(sumoftagsbycolumn.transpose())
if forcecolumns:
for colname in forcecolumns:
if colname not in pivtable.columns.values:
pivtable[colname]=0
#pivtable.sort_index(axis=1,inplace=True)
pivtable=pivtable[forcecolumns+['Total']]
return sliceddf, pivtable, numconversations
#%% generate pivotables for issues and adminname
def generatetagpivdf(inputdf, columnname, timeinterval):
#tagpivotdf,responsestats,numconversations=generatetagpivdf(issueschoolexpandeddf,'created_at_Date',[timeframestartdt[0],timeframeenddt[0]])
#adminpivotdf,responsestats,numconversations=generatetagpivdf(issueschoolexpandeddf,'adminname',[timeframestartdt[0],timeframeenddt[0]])
sliceddf, pivtable, numconversations=generatetagpivtbl(inputdf,columnname,timeinterval)
#get response stats
tagRpivotdf=sliceddf[['s_to_first_response',columnname]]
tagRpivotdfdes=tagRpivotdf.groupby(columnname).describe()
#tagRpivotdfs=tagRpivotdfdes.unstack().loc[:,(slice(None),['mean','max'])]
#responsestats=tagRpivotdfs['s_to_first_response'].transpose()
tagRpivotdfs=tagRpivotdfdes.s_to_first_response.unstack()[['mean','max']]
responsestats=tagRpivotdfs.transpose()
return pivtable, responsestats, numconversations
#%% generate pivottables for opentags
def generateopentagpivdf(rawinputdf, timeinterval,timescriptstart=datetime.datetime.now()): #use only sliced, not the augmented one
tfstart=timeinterval[0]
tfend=timeinterval[1]
tfdelta=tfend-tfstart
#have to remove those created on the last day of time interval
df=rawinputdf.copy()
'''
sliceddf=slicebytimeinterval(rawinputdf,timeinterval)#overallconvdf
#get those currently open earlier than of tfstart
currentlyopen=rawinputdf[rawinputdf['open']==1]
openbeforetf=slicebytimeinterval(currentlyopen,[pd.to_datetime(0).date(),tfstart])
#combine for processing
opentagconvdf=sliceddf.append(openbeforetf)
'''
#set all current open conversations to have last_closed to be time of running script.
#openconv=rawinputdf[rawinputdf['last_closed'].isnull()]
df.loc[df['last_closed'].isnull(), 'last_closed'] = timescriptstart#+pd.timedelta(1,'D')
#get all conversations closed before interval
closedbefore=slicebytimeinterval(df,[pd.to_datetime(0).date(), timeinterval[0]],'last_closed')
#get all conversations open after interval
openafter=slicebytimeinterval(df,[timeinterval[1],
|
pd.to_datetime(timescriptstart)
|
pandas.to_datetime
|
"""
File for preprocessing and augmenting data
"""
from bleach import clean
import pandas as pd
import argparse
from sklearn.model_selection import train_test_split
import preprocessor as p # forming a separate feature for cleaned tweets
from nlpaug.augmenter.word.synonym import SynonymAug
from nlpaug.augmenter.word.back_translation import BackTranslationAug
SPLIT_PROP = 0.25
parser = argparse.ArgumentParser(description='Arguments for preprocessing the data.')
parser.add_argument('-data_path', type=str, default='../datasets/tweet_emotions.csv',
help='path to where the data is stored.')
parser.add_argument('-augmentation', type=int, default=0,
help='Whether to augment the data or not.')
parser.add_argument('-last_k', type=int, default=6,
help='Which least populated columns to augment.')
parser.add_argument('-augmenter', type=str, default='synonym',
help='Which augmenter to use.')
def clean_tweets(df: pd.DataFrame) -> pd.DataFrame:
"""
Preprocess the tweets
"""
df.drop(['tweet_id'],axis=1,inplace=True)
df['content'] = df.content.apply(lambda x: p.clean(x))
return df
def augment(df:pd.DataFrame,last_k:int,augmenter='synonym')->pd.DataFrame:
"""
Function for word lvel synonym augmenting string data
in a DataFrame
"""
#create the augmenter
if augmenter=='synonym':
augmenter = SynonymAug(aug_p=0.2,aug_min=1,aug_max=4)
else:
#instantiate the backwards translation
augmenter = BackTranslationAug()
#loop over columns and add their augmented versions
for value in df.sentiment.value_counts().index.to_list()[-last_k:]:
df_part=df[df['sentiment']==value].copy()
df_part.content.apply(lambda x: augmenter.augment(x,num_thread=4))
df=pd.concat([df,df_part])
return df# TODO evaluate model and choose which features to keep
# TODO ADD requirements at the end
if __name__ == '__main__':
args = parser.parse_args()
df=pd.read_csv(args.data_path)
df=clean_tweets(df)
if args.augmentation:
df=augment(df,args.last_k,augmenter=args.augmenter)#TODO augment only the train set
X,y = df["content"], df["sentiment"]
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=42,stratify = y)
df_train =
|
pd.concat([X_train,y_train],axis = 1)
|
pandas.concat
|
import can
import pandas as pd
import numpy as np
from collections import namedtuple
import param
import panel as pn
import holoviews as hv
can_message = namedtuple('can_message', 'timestamp channel arbitration_id dlc data')
def readCanUtilsLog(file):
for msg in can.io.CanutilsLogReader(file):
data = int.from_bytes(msg.data, 'little', signed=False)
yield can_message(msg.timestamp,
msg.channel,
msg.arbitration_id,
msg.dlc,
data)
def can_to_df(msgs):
columns = can_message._fields
df = pd.DataFrame(msgs, columns=columns).astype({'arbitration_id': 'int32',
'dlc': 'int8',
'data': 'uint64'})
return df
def save_df(df, filename, key):
with pd.HDFStore(filename, append=True) as store:
if key in store:
raise ValueError
store.append(f'/{key}', df, format='table', data_columns=['channel', 'arbitration_id'])
def bit_numbering_invert(b):
"""
Convert between lsb0 and msb0 CAN dbc numbering.
This operation is symmetric.
Reference: https://github.com/ebroecker/canmatrix/wiki/signal-Byteorder
:param b: bit number
:return: inverted bit number
"""
return b - (b % 8) + 7 - (b % 8)
def msbit2lsbit(b, length):
"""
Convert from lsbit of signal data to msbit of signal data, when bit numbering is msb0
Reference: https://github.com/ebroecker/canmatrix/wiki/signal-Byteorder
:param b: msbit in msb0 numbering
:param length: signal length in bits
:return: lsbit in msb0 numbering
"""
return b + length - 1
def extract_sig(data, startbit, length, endian, signed, is_float=False):
"""
Extract the raw signal value from a CAN message given the dbc startbit, length and endianess.
DBC bit numbering makes sense for little_endian: 0-63, the startbit is the lsb
For big_endian, the start bit is the msb, but still using the lsb numbering ... which is messed up
After accounting for the numbering, we can extract the signals using simple bit-shift and masks
Kvaser DB editor says start-bit is the lsbit, inconsistent with DBC 'spec'.
:param data: full 64 bit CAN payload
:param startbit: dbc signal startbit
:param length: dbc signal length in bits
:param endian: 'big_endian' or 'little_endian'
:param signed: dbc signal sign (True if signed)
:param is_float: dbc signal is float
:return: raw integer signal value
"""
mask = 2 ** length - 1
if endian == 'big_endian':
# Using msb numbering (msb = 0)
start = bit_numbering_invert(startbit)
shiftcount = 63 - msbit2lsbit(start, length)
else:
shiftcount = startbit
shifted = np.right_shift(data, shiftcount)
val = np.bitwise_and(shifted, mask)
if is_float:
assert length == 32, 'Invalid float length'
val = val.astype('uint32')
return val.view('f4')
if signed:
tmp = val[val >= 2**(length - 1)].copy()
tmp = -1 * (((tmp ^ mask) + 1) & mask).astype('int64')
val = val.astype('int64')
val[val >= 2 ** (length - 1)] = tmp
return val
class DecoderDict(dict):
# Simple dictionary subclass to decode signal enum values and handle missing values
def __missing__(self, key):
return str(key)
def decode_id_vectorized(df, dbc, ID, choice=True):
"""
Vectorized message decoder, decodes all signals in a dbc message
:param df: pandas DataFrame with columns like can_message namedtuple
:param dbc: cantools Database object
:param ID: Message arbitration_id
:param choice: decode raw values to signal choices or ont
:return: pandas DataFrame with decoded values
"""
msg = dbc.get_message_by_frame_id(ID)
if not msg:
raise KeyError(f'ID: {ID} not in dbc')
tmp = df[df['arbitration_id'] == ID]
if msg.is_multiplexed():
muxers = [s for s in msg.signals if s.is_multiplexer]
assert len(muxers) == 1, 'Only supports single multiplexer for now!'
return decode_mux(tmp, msg, choice=choice)
out = dict()
raw = tmp['data'].copy().values
for signal in msg.signals:
sig = decode_signal(raw, signal, choice=choice)
out[signal.name] = sig
decoded =
|
pd.DataFrame(out)
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
"""
Created on Mon Jan 20 21:05:00 2020
Revised on Thur Mar 18 16:04:00 2021
@author: Starlitnightly
New Version 1.2.3
"""
import itertools
import numpy as np
import pandas as pd
from upsetplot import from_memberships
from upsetplot import plot
def FindERG(data, depth=2, sort_num=20, verbose=False, figure=False):
'''
Find out endogenous reference gene
Parameters
----------
data:pandas.DataFrmae
DataFrame of data points with each entry in the form:['gene_id','sample1',...]
depth:int
Accuracy of endogenous reference gene,must be larger that 2
The larger the number, the fewer genes are screened out,Accuracy improvement
sort_num:int
The size of the peendogenous reference gener filter
When the sample is large, it is recommended to increase the value
verbose: bool
Make the function noisy, writing times and results.
Returns
-------
result:list
a list of endogenous reference gene
'''
lp=[]
if verbose:
import time,datetime
start = time.time()
if depth==1:
print('the depth must larger than 2')
return
if len(data.columns)<=2:
print('the number of samples must larger than 2')
return
if depth>(len(data.columns)):
print('depth larger than samples')
return
count=0
result=[]#result
bucket_size = 1000
for i in itertools.combinations(data.columns[0:depth], 2):
if verbose:
start = time.time()
count=count+1
test=data.replace(0,np.nan).dropna()
last_std=pd.DataFrame()
for k in range(0 ,len(data), bucket_size):
test1=test[i[0]].iloc[k:k + bucket_size]
test2=test[i[1]].iloc[k:k + bucket_size]
data_len=len(test1.values)
table1=np.array(test1.values.tolist()*data_len).reshape(data_len,data_len)
table2=pd.DataFrame(table1.T/table1)
table2.index=test1.index
table4=np.array(test2.values.tolist()*data_len).reshape(data_len,data_len)
table5=
|
pd.DataFrame(table4.T/table4)
|
pandas.DataFrame
|
from __future__ import division
from parameterized import parameterized
from six.moves import range
import numpy as np
import pandas as pd
import talib
from numpy.random import RandomState
from zipline.lib.adjusted_array import AdjustedArray
from zipline.pipeline.data import USEquityPricing
from zipline.pipeline.factors import (
BollingerBands,
Aroon,
FastStochasticOscillator,
IchimokuKinkoHyo,
LinearWeightedMovingAverage,
RateOfChangePercentage,
TrueRange,
MovingAverageConvergenceDivergenceSignal,
AnnualizedVolatility,
RSI,
)
from zipline.testing import check_allclose, parameter_space
from zipline.testing.fixtures import ZiplineTestCase
from zipline.testing.predicates import assert_equal
from .base import BasePipelineTestCase
class BollingerBandsTestCase(BasePipelineTestCase):
def closes(self, mask_last_sid):
data = self.arange_data(dtype=np.float64)
if mask_last_sid:
data[:, -1] = np.nan
return data
def expected_bbands(self, window_length, k, closes):
"""Compute the expected data (without adjustments) for the given
window, k, and closes array.
This uses talib.BBANDS to generate the expected data.
"""
lower_cols = []
middle_cols = []
upper_cols = []
ndates, nassets = closes.shape
for n in range(nassets):
close_col = closes[:, n]
if np.isnan(close_col).all():
# ta-lib doesn't deal well with all nans.
upper, middle, lower = [np.full(ndates, np.nan)] * 3
else:
upper, middle, lower = talib.BBANDS(
close_col,
window_length,
k,
k,
)
upper_cols.append(upper)
middle_cols.append(middle)
lower_cols.append(lower)
# Stack all of our uppers, middles, lowers into three 2d arrays
# whose columns are the sids. After that, slice off only the
# rows we care about.
where = np.s_[window_length - 1:]
uppers = np.column_stack(upper_cols)[where]
middles = np.column_stack(middle_cols)[where]
lowers = np.column_stack(lower_cols)[where]
return uppers, middles, lowers
@parameter_space(
window_length={5, 10, 20},
k={1.5, 2, 2.5},
mask_last_sid={True, False},
__fail_fast=True,
)
def test_bollinger_bands(self, window_length, k, mask_last_sid):
closes = self.closes(mask_last_sid=mask_last_sid)
mask = ~np.isnan(closes)
bbands = BollingerBands(window_length=window_length, k=k)
expected = self.expected_bbands(window_length, k, closes)
self.check_terms(
terms={
'upper': bbands.upper,
'middle': bbands.middle,
'lower': bbands.lower,
},
expected={
'upper': expected[0],
'middle': expected[1],
'lower': expected[2],
},
initial_workspace={
USEquityPricing.close: AdjustedArray(
data=closes,
adjustments={},
missing_value=np.nan,
),
},
mask=self.build_mask(mask),
)
def test_bollinger_bands_output_ordering(self):
bbands = BollingerBands(window_length=5, k=2)
lower, middle, upper = bbands
self.assertIs(lower, bbands.lower)
self.assertIs(middle, bbands.middle)
self.assertIs(upper, bbands.upper)
class AroonTestCase(ZiplineTestCase):
window_length = 10
nassets = 5
dtype = [('down', 'f8'), ('up', 'f8')]
@parameterized.expand([
(np.arange(window_length),
np.arange(window_length) + 1,
np.recarray(shape=(nassets,), dtype=dtype,
buf=np.array([0, 100] * nassets, dtype='f8'))),
(np.arange(window_length, 0, -1),
np.arange(window_length, 0, -1) - 1,
np.recarray(shape=(nassets,), dtype=dtype,
buf=np.array([100, 0] * nassets, dtype='f8'))),
(np.array([10, 10, 10, 1, 10, 10, 10, 10, 10, 10]),
np.array([1, 1, 1, 1, 1, 10, 1, 1, 1, 1]),
np.recarray(shape=(nassets,), dtype=dtype,
buf=np.array([100 * 3 / 9, 100 * 5 / 9] * nassets,
dtype='f8'))),
])
def test_aroon_basic(self, lows, highs, expected_out):
aroon = Aroon(window_length=self.window_length)
today = pd.Timestamp('2014', tz='utc')
assets = pd.Index(np.arange(self.nassets, dtype=np.int64))
shape = (self.nassets,)
out = np.recarray(shape=shape, dtype=self.dtype,
buf=np.empty(shape=shape, dtype=self.dtype))
aroon.compute(today, assets, out, lows, highs)
assert_equal(out, expected_out)
class TestFastStochasticOscillator(ZiplineTestCase):
"""
Test the Fast Stochastic Oscillator
"""
def test_fso_expected_basic(self):
"""
Simple test of expected output from fast stochastic oscillator
"""
fso = FastStochasticOscillator()
today = pd.Timestamp('2015')
assets = np.arange(3, dtype=np.float64)
out = np.empty(shape=(3,), dtype=np.float64)
highs = np.full((50, 3), 3, dtype=np.float64)
lows = np.full((50, 3), 2, dtype=np.float64)
closes = np.full((50, 3), 4, dtype=np.float64)
fso.compute(today, assets, out, closes, lows, highs)
# Expected %K
assert_equal(out, np.full((3,), 200, dtype=np.float64))
@parameter_space(seed=range(5))
def test_fso_expected_with_talib(self, seed):
"""
Test the output that is returned from the fast stochastic oscillator
is the same as that from the ta-lib STOCHF function.
"""
window_length = 14
nassets = 6
rng = np.random.RandomState(seed=seed)
input_size = (window_length, nassets)
# values from 9 to 12
closes = 9.0 + (rng.random_sample(input_size) * 3.0)
# Values from 13 to 15
highs = 13.0 + (rng.random_sample(input_size) * 2.0)
# Values from 6 to 8.
lows = 6.0 + (rng.random_sample(input_size) * 2.0)
expected_out_k = []
for i in range(nassets):
fastk, fastd = talib.STOCHF(
high=highs[:, i],
low=lows[:, i],
close=closes[:, i],
fastk_period=window_length,
fastd_period=1,
)
expected_out_k.append(fastk[-1])
expected_out_k = np.array(expected_out_k)
today = pd.Timestamp('2015')
out = np.empty(shape=(nassets,), dtype=np.float)
assets = np.arange(nassets, dtype=np.float)
fso = FastStochasticOscillator()
fso.compute(
today, assets, out, closes, lows, highs
)
assert_equal(out, expected_out_k, array_decimal=6)
class IchimokuKinkoHyoTestCase(ZiplineTestCase):
def test_ichimoku_kinko_hyo(self):
window_length = 52
today = pd.Timestamp('2014', tz='utc')
nassets = 5
assets = pd.Index(np.arange(nassets))
days_col = np.arange(window_length)[:, np.newaxis]
highs = np.arange(nassets) + 2 + days_col
closes = np.arange(nassets) + 1 + days_col
lows = np.arange(nassets) + days_col
tenkan_sen_length = 9
kijun_sen_length = 26
chikou_span_length = 26
ichimoku_kinko_hyo = IchimokuKinkoHyo(
window_length=window_length,
tenkan_sen_length=tenkan_sen_length,
kijun_sen_length=kijun_sen_length,
chikou_span_length=chikou_span_length,
)
dtype = [
('tenkan_sen', 'f8'),
('kijun_sen', 'f8'),
('senkou_span_a', 'f8'),
('senkou_span_b', 'f8'),
('chikou_span', 'f8'),
]
out = np.recarray(
shape=(nassets,),
dtype=dtype,
buf=np.empty(shape=(nassets,), dtype=dtype),
)
ichimoku_kinko_hyo.compute(
today,
assets,
out,
highs,
lows,
closes,
tenkan_sen_length,
kijun_sen_length,
chikou_span_length,
)
expected_tenkan_sen = np.array([
(53 + 43) / 2,
(54 + 44) / 2,
(55 + 45) / 2,
(56 + 46) / 2,
(57 + 47) / 2,
])
expected_kijun_sen = np.array([
(53 + 26) / 2,
(54 + 27) / 2,
(55 + 28) / 2,
(56 + 29) / 2,
(57 + 30) / 2,
])
expected_senkou_span_a = (expected_tenkan_sen + expected_kijun_sen) / 2
expected_senkou_span_b = np.array([
(53 + 0) / 2,
(54 + 1) / 2,
(55 + 2) / 2,
(56 + 3) / 2,
(57 + 4) / 2,
])
expected_chikou_span = np.array([
27.0,
28.0,
29.0,
30.0,
31.0,
])
assert_equal(
out.tenkan_sen,
expected_tenkan_sen,
msg='tenkan_sen',
)
assert_equal(
out.kijun_sen,
expected_kijun_sen,
msg='kijun_sen',
)
assert_equal(
out.senkou_span_a,
expected_senkou_span_a,
msg='senkou_span_a',
)
assert_equal(
out.senkou_span_b,
expected_senkou_span_b,
msg='senkou_span_b',
)
assert_equal(
out.chikou_span,
expected_chikou_span,
msg='chikou_span',
)
@parameter_space(
arg={'tenkan_sen_length', 'kijun_sen_length', 'chikou_span_length'},
)
def test_input_validation(self, arg):
window_length = 52
with self.assertRaises(ValueError) as e:
IchimokuKinkoHyo(**{arg: window_length + 1})
assert_equal(
str(e.exception),
'%s must be <= the window_length: 53 > 52' % arg,
)
class TestRateOfChangePercentage(ZiplineTestCase):
@parameterized.expand([
('constant', [2.] * 10, 0.0),
('step', [2.] + [1.] * 9, -50.0),
('linear', [2. + x for x in range(10)], 450.0),
('quadratic', [2. + x**2 for x in range(10)], 4050.0),
])
def test_rate_of_change_percentage(self, test_name, data, expected):
window_length = len(data)
rocp = RateOfChangePercentage(
inputs=(USEquityPricing.close,),
window_length=window_length,
)
today = pd.Timestamp('2014')
assets = np.arange(5, dtype=np.int64)
# broadcast data across assets
data = np.array(data)[:, np.newaxis] * np.ones(len(assets))
out = np.zeros(len(assets))
rocp.compute(today, assets, out, data)
assert_equal(out, np.full((len(assets),), expected))
class TestLinearWeightedMovingAverage(ZiplineTestCase):
def test_wma1(self):
wma1 = LinearWeightedMovingAverage(
inputs=(USEquityPricing.close,),
window_length=10
)
today = pd.Timestamp('2015')
assets = np.arange(5, dtype=np.int64)
data = np.ones((10, 5))
out = np.zeros(data.shape[1])
wma1.compute(today, assets, out, data)
assert_equal(out, np.ones(5))
def test_wma2(self):
wma2 = LinearWeightedMovingAverage(
inputs=(USEquityPricing.close,),
window_length=10
)
today = pd.Timestamp('2015')
assets = np.arange(5, dtype=np.int64)
data = np.arange(50, dtype=np.float64).reshape((10, 5))
out = np.zeros(data.shape[1])
wma2.compute(today, assets, out, data)
assert_equal(out, np.array([30., 31., 32., 33., 34.]))
class TestTrueRange(ZiplineTestCase):
def test_tr_basic(self):
tr = TrueRange()
today = pd.Timestamp('2014')
assets = np.arange(3, dtype=np.int64)
out = np.empty(3, dtype=np.float64)
highs = np.full((2, 3), 3.)
lows = np.full((2, 3), 2.)
closes = np.full((2, 3), 1.)
tr.compute(today, assets, out, highs, lows, closes)
assert_equal(out, np.full((3,), 2.))
class MovingAverageConvergenceDivergenceTestCase(ZiplineTestCase):
def expected_ewma(self, data_df, window):
# Comment copied from `test_engine.py`:
# XXX: This is a comically inefficient way to compute a windowed EWMA.
# Don't use it outside of testing. We're using rolling-apply of an
# ewma (which is itself a rolling-window function) because we only want
# to look at ``window_length`` rows at a time.
return data_df.rolling(window).apply(
lambda sub: pd.DataFrame(sub)
.ewm(span=window)
.mean()
.values[-1])
@parameter_space(seed=range(5))
def test_MACD_window_length_generation(self, seed):
rng = RandomState(seed)
signal_period = rng.randint(1, 90)
fast_period = rng.randint(signal_period + 1, signal_period + 100)
slow_period = rng.randint(fast_period + 1, fast_period + 100)
ewma = MovingAverageConvergenceDivergenceSignal(
fast_period=fast_period,
slow_period=slow_period,
signal_period=signal_period,
)
assert_equal(
ewma.window_length,
slow_period + signal_period - 1,
)
def test_bad_inputs(self):
template = (
"MACDSignal() expected a value greater than or equal to 1"
" for argument %r, but got 0 instead."
)
with self.assertRaises(ValueError) as e:
MovingAverageConvergenceDivergenceSignal(fast_period=0)
self.assertEqual(template % 'fast_period', str(e.exception))
with self.assertRaises(ValueError) as e:
MovingAverageConvergenceDivergenceSignal(slow_period=0)
self.assertEqual(template % 'slow_period', str(e.exception))
with self.assertRaises(ValueError) as e:
MovingAverageConvergenceDivergenceSignal(signal_period=0)
self.assertEqual(template % 'signal_period', str(e.exception))
with self.assertRaises(ValueError) as e:
MovingAverageConvergenceDivergenceSignal(
fast_period=5,
slow_period=4,
)
expected = (
"'slow_period' must be greater than 'fast_period', but got\n"
"slow_period=4, fast_period=5"
)
self.assertEqual(expected, str(e.exception))
@parameter_space(
seed=range(2),
fast_period=[3, 5],
slow_period=[8, 10],
signal_period=[3, 9],
__fail_fast=True,
)
def test_moving_average_convergence_divergence(self,
seed,
fast_period,
slow_period,
signal_period):
rng = RandomState(seed)
nassets = 3
macd = MovingAverageConvergenceDivergenceSignal(
fast_period=fast_period,
slow_period=slow_period,
signal_period=signal_period,
)
today = pd.Timestamp('2016', tz='utc')
assets = pd.Index(np.arange(nassets))
out = np.empty(shape=(nassets,), dtype=np.float64)
close = rng.rand(macd.window_length, nassets)
macd.compute(
today,
assets,
out,
close,
fast_period,
slow_period,
signal_period,
)
close_df = pd.DataFrame(close)
fast_ewma = self.expected_ewma(
close_df,
fast_period,
)
slow_ewma = self.expected_ewma(
close_df,
slow_period,
)
signal_ewma = self.expected_ewma(
fast_ewma - slow_ewma,
signal_period
)
# Everything but the last row should be NaN.
self.assertTrue(signal_ewma.iloc[:-1].isnull().all().all())
# We're testing a single compute call, which we expect to be equivalent
# to the last row of the frame we calculated with pandas.
expected_signal = signal_ewma.values[-1]
np.testing.assert_almost_equal(
out,
expected_signal,
decimal=8
)
class RSITestCase(ZiplineTestCase):
@parameterized.expand([
# Test cases computed by doing:
# from numpy.random import seed, randn
# from talib import RSI
# seed(seed_value)
# data = abs(randn(15, 3))
# expected = [RSI(data[:, i])[-1] for i in range(3)]
(100, np.array([41.032913785966, 51.553585468393, 51.022005016446])),
(101, np.array([43.506969935466, 46.145367530182, 50.57407044197])),
(102, np.array([46.610102205934, 47.646892444315, 52.13182788538])),
])
def test_rsi(self, seed_value, expected):
rsi = RSI()
today = np.datetime64(1, 'ns')
assets = np.arange(3)
out = np.empty((3,), dtype=float)
np.random.seed(seed_value) # Seed so we get deterministic results.
test_data = np.abs(np.random.randn(15, 3))
out = np.empty((3,), dtype=float)
rsi.compute(today, assets, out, test_data)
check_allclose(expected, out)
def test_rsi_all_positive_returns(self):
"""
RSI indicator should be 100 in the case of 14 days of positive returns.
"""
rsi = RSI()
today = np.datetime64(1, 'ns')
assets = np.arange(1)
out = np.empty((1,), dtype=float)
closes = np.linspace(46, 60, num=15)
closes.shape = (15, 1)
rsi.compute(today, assets, out, closes)
self.assertEqual(out[0], 100.0)
def test_rsi_all_negative_returns(self):
"""
RSI indicator should be 0 in the case of 14 days of negative returns.
"""
rsi = RSI()
today = np.datetime64(1, 'ns')
assets = np.arange(1)
out = np.empty((1,), dtype=float)
closes = np.linspace(46, 32, num=15)
closes.shape = (15, 1)
rsi.compute(today, assets, out, closes)
self.assertEqual(out[0], 0.0)
def test_rsi_same_returns(self):
"""
RSI indicator should be the same for two price series with the same
returns, even if the prices are different.
"""
rsi = RSI()
today = np.datetime64(1, 'ns')
assets = np.arange(2)
out = np.empty((2,), dtype=float)
example_case = np.array([46.125, 47.125, 46.4375, 46.9375, 44.9375,
44.25, 44.625, 45.75, 47.8125, 47.5625, 47.,
44.5625, 46.3125, 47.6875, 46.6875])
double = example_case * 2
closes = np.vstack((example_case, double)).T
rsi.compute(today, assets, out, closes)
self.assertAlmostEqual(out[0], out[1])
class AnnualizedVolatilityTestCase(ZiplineTestCase):
"""
Test Annualized Volatility
"""
def test_simple_volatility(self):
"""
Simple test for uniform returns should generate 0 volatility
"""
nassets = 3
ann_vol = AnnualizedVolatility()
today = pd.Timestamp('2016', tz='utc')
assets = np.arange(nassets, dtype=np.float64)
returns = np.full((ann_vol.window_length, nassets),
0.004,
dtype=np.float64)
out = np.empty(shape=(nassets,), dtype=np.float64)
ann_vol.compute(today, assets, out, returns, 252)
expected_vol = np.zeros(nassets)
np.testing.assert_almost_equal(
out,
expected_vol,
decimal=8
)
def test_volatility(self):
"""
Check volatility results against values calculated manually
"""
nassets = 3
ann_vol = AnnualizedVolatility()
today =
|
pd.Timestamp('2016', tz='utc')
|
pandas.Timestamp
|
from math import sqrt
import numpy as np
import pandas as pd
pd.set_option('display.float_format', lambda x: '%.5f' % x) # pandas
pd.set_option('display.max_columns', 100)
pd.set_option('display.max_rows', 100)
pd.set_option('display.width', 600)
import statsmodels.formula.api as smf
sim_data = r'https://storage.googleapis.com/applied-economics/simulated_data.csv'
df_sim_data = pd.read_csv(sim_data, header=0, index_col=0, parse_dates=True).reset_index()
df_sim_data_est = df_sim_data[102:301]
df_sim_data_fore = df_sim_data[302:401]
"""
# Y, X, and scatter plot
aes(x, y)
ggplot(sim.data$est, aes(x = 102:301, y = y)) + geom_line() + theme_bw() + xlab('') + ylab('') + ggtitle('Y')
ggplot(sim.data$est, aes(x = 102:301, y = x)) + geom_line() + theme_bw() + xlab('') + ylab('') + ggtitle('X')
ggplot(sim.data$est, aes(x = x, y = y)) + geom_point() + theme_bw() + xlab('X') + ylab('Y')
"""
df_sim_data[['x', 'y']].plot.scatter(x='x', y='y')
df_sim_data[['x']].plot()
df_sim_data[['y']].plot()
## OLS
"""
ols.fit <- lm(y ~ x, data = sim.data$est)
print(xtable(ols.fit), floating = F) # LaTeX output
"""
ols_fit = smf.ols(formula='y ~ x', data=df_sim_data_est).fit()
print(ols_fit.summary())
# Forecasts
"""
yhat <- list()
yhat$y <- predict(ols.fit, newdata = sim.data$fore)
yhat$se <- sqrt(sum(ols.fit$residuals^2) / 198)
yhat$y.up <- yhat$y + 1.96 * yhat$se
yhat$y.low <- - 1.96 * yhat$se
"""
yhat = pd.DataFrame()
yhat['y_hat'] = ols_fit.predict(df_sim_data_fore)
yhat['se'] = sqrt(np.sum(ols_fit.resid ** 2) / 198)
yhat['up'] = yhat['y_hat'] + 1.96 * yhat['se']
yhat['low'] = yhat['y_hat'] - 1.96 * yhat['se']
yhat.plot()
# Plot - yhat1 / yhat1_up / yhat1_low
"""
yhat$y.rec <- yhat$y.recse <- rep(0, 100)
for (i in 1:100) {
ols.rec <- lm(y ~ x, data = sim.data$full[102:(300 + i)])
yhat$y.rec[i] <- predict(ols.rec, newdata = sim.data$full[301 + i])
yhat$y.recse[i] <- sqrt(sum(ols.rec$residuals^2) / (197 + i))
}
"""
y_plot = pd.concat([df_sim_data_fore[['y']], yhat[['y_hat']]], axis=1)
y_plot.plot()
## Recursive
df_rec = pd.DataFrame(index=range(0, 400), columns=['y_rec', 'y_recse'])
for i in range(1, 100):
ols_rec = smf.ols(formula='y ~ x', data=df_sim_data[101:(300 + i)]).fit()
df_rec['y_rec'][i + 300] = round(float(ols_rec.predict(df_sim_data[300 + i:(301 + i)])), 6)
df_rec['y_recse'][i + 300] = sqrt(np.sum(ols_rec.resid ** 2) / 197 + i)
# Plot - actual & recursive forecasts
df_plot = pd.concat([df_sim_data, df_rec], axis=1)
df_plot[275:399][['y', 'y_rec']].plot()
print(ols_rec.summary())
# define likelihood function
"""
Element Description
Dep. Variable Which variable is the response in the model
Model What model you are using in the fit
Method How the parameters of the model were calculated
No. Observations The number of observations (examples)
DF Residuals Degrees of freedom of the residuals. Number of observations - number of parameters
DF Model Number of parameters in the model (not including the constant term if present)
Element Description
R-squared The coefficient of determination. A statistical measure of how well the regression line approximates the real data points
Adj. R-squared The above value adjusted based on the number of observations and the degrees-of-freedom of the residuals
F-statistic A measure how significant the fit is. The mean squared error of the model divided by the mean squared error of the residuals
Prob (F-statistic) The probability that you would get the above statistic, given the null hypothesis that they are unrelated
Log-likelihood The log of the likelihood function.
AIC The Akaike Information Criterion. Adjusts the log-likelihood based on the number of observations and the complexity of the model.
BIC The Bayesian Information Criterion. Similar to the AIC, but has a higher penalty for models with more parameters.
Description The name of the term in the model
coef The estimated value of the coefficient
std err The basic standard error of the estimate of the coefficient. More sophisticated errors are also available.
t The t-statistic value. This is a measure of how statistically significant the coefficient is.
P > |t| P-value that the null-hypothesis that the coefficient = 0 is true. If it is less than the confidence level, often 0.05,
it indicates that there is a statistically significant relationship between the term and the response. [95.0% Conf. Interval]
The lower and upper values of the 95% confidence interval
Element Description
Skewness A measure of the symmetry of the data about the mean. Normally-distributed errors should be symmetrically distributed about the mean (equal amounts above and below the line).
Kurtosis A measure of the shape of the distribution. Compares the amount of data close to the mean with those far away from the mean (in the tails).
Omnibus D'Angostino's test. It provides a combined statistical test for the presence of skewness and kurtosis.
Prob(Omnibus) The above statistic turned into a probability
Jarque-Bera A different test of the skewness and kurtosis
Prob (JB) The above statistic turned into a probability
Durbin-Watson A test for the presence of autocorrelation (that the errors are not independent.) Often important in time-series analysis
Cond. No A test for multicollinearity (if in a fit with multiple parameters, the parameters are related with each other).
"""
### 1.12.1 Forecasting Euro Area GDP ###
ex2_regress_gdp = r'https://storage.googleapis.com/applied-economics/ex2_regress_gdp.csv'
df_eu_gdp_full = pd.read_csv(ex2_regress_gdp, header=0, index_col=0, parse_dates=True).reset_index()
## Full sample - 1996Q1 to 2013Q2
gdp_formula = ['y ~ ipr + su + pr + sr',
'y ~ ipr + su + sr',
'y ~ ipr + su',
'y ~ ipr + pr + sr']
fit = {}
df_fit = pd.DataFrame(index=range(0, 400), columns=['y_rec', 'y_recse'])
for i, model in enumerate(gdp_formula):
print(model)
fit[model] = smf.ols(formula=model, data=df_eu_gdp_full).fit()
print(fit[model].summary())
## Estimation sample - 1996Q1 to 2006Q4
"""
eu.gdp$est <- eu.gdp$full[1:44]
eu.gdp$fore <- eu.gdp$full[45:70]
gdp.est <- list()
for (model in 1:4) {
gdp.est[[model]] <- lm(gdp.formula[model], data = eu.gdp$est)
summary(gdp.est[[model]])
}
## Static and recursive forecasts
gdp.fore <- list()
gdp.rec <- list()
for (model in 1:4) {
gdp.fore[[model]] <- predict(gdp.est[[model]], newdata = eu.gdp$fore)
gdp.rec[[model]] <- rep(0, 26)
for (i in 1:26) {
print(eu.gdp$full[44 + i])
ols.rec <- lm(gdp.formula[model], data = eu.gdp$full[1:(43 + i)])
gdp.rec[[model]][i] <- predict(ols.rec, newdata = eu.gdp$full[44 + i])
}
}
"""
## Estimation sample - 1996Q1 to 2006Q4
df_eu_gdp_est = df_eu_gdp_full[0:44]
df_eu_gdp_fore = df_eu_gdp_full[44:70]
gdp_est = {}
for i, model in enumerate(gdp_formula):
gdp_est[model] = smf.ols(formula=model, data=df_eu_gdp_est).fit()
print(gdp_est[model].summary())
"""
## Static and recursive forecasts
gdp.fore < - list()
gdp.rec < - list()
for (model in 1:4) {
gdp.fore[[model]] < - predict(gdp.est[[model]], newdata=eu.gdp$fore)
gdp.rec[[model]] < - rep(0, 26)
for (i in 1:26) {
print(eu.gdp$full[44 + i])
ols.rec < - lm(gdp.formula[model], data=eu.gdp$full[1:(43 + i)])
gdp.rec[[model]][i] < - predict(ols.rec, newdata=eu.gdp$full[44 + i])
}
}
"""
## Static and recursive forecasts
gdp_fore = {}
gdp_rec = {}
df_gdp_rec = pd.DataFrame(index=df_eu_gdp_fore.date, columns=['{}'.format(f) for f in gdp_formula])
for i, model in enumerate(gdp_formula):
gdp_fore[model] = gdp_est[model].predict(df_eu_gdp_fore)
gdp_rec[model] = [0] * 26
for i in range(0, 26):
ols_rec = smf.ols(formula=model, data=df_eu_gdp_full[0: (44 + i)]).fit()
df_gdp_rec['{}'.format(model)][df_eu_gdp_full.loc[[44 + i]].date] = float(ols_rec.predict(df_eu_gdp_full.loc[[44 + i]]))
# Plots - actual & forecasts
df_eu_gdp_plot = pd.concat([df_eu_gdp_full.set_index('date'), df_gdp_rec], axis=1)
pred_columns = ['y'] + ['{}'.format(f) for f in gdp_formula]
df_eu_gdp_plot[df_eu_gdp_fore.date.min(): df_eu_gdp_fore.date.max()][pred_columns].plot()
"""
# RMSE & MAE
gdp.rec$Y <- cbind(gdp.rec[[1]], gdp.rec[[2]], gdp.rec[[3]], gdp.rec[[4]])
RMSE <- sqrt(colSums((gdp.rec$Y - eu.gdp$fore[, y])^2) / 26)
MAE <- colSums(abs(gdp.rec$Y - eu.gdp$fore[, y])) / 26
error.mat <- rbind(RMSE, MAE)
"""
# RMSE & MAE
df_RMSE = df_gdp_rec.apply(lambda x: sqrt(((x - df_eu_gdp_fore['y'].values) ** 2).sum() / 26))
df_MSE = df_gdp_rec.apply(lambda x: (x - df_eu_gdp_fore['y'].values).abs().sum() / 26)
df_error = pd.concat([df_RMSE, df_MSE], axis=1)
df_error.columns = ['RMSE', 'MSE']
df_error
### 1.12.2 Forecating US GDP ###
"""
us.gdp <- list()
us.gdp$full <- fread('ex2_regress_gdp_us.csv')
us.gdp$full[, date := as.Date(date, format = '%m/%d/%Y')]
"""
ex2_regress_gdp_us = r'https://storage.googleapis.com/applied-economics/ex2_regress_gdp_us.csv'
df_us_gdp_full =
|
pd.read_csv(ex2_regress_gdp_us, header=0, index_col=0, parse_dates=True)
|
pandas.read_csv
|
import pandas as pd
artists =
|
pd.read_csv("lyrics6genre/artists-data.csv", index_col="Link")
|
pandas.read_csv
|
import time
import numpy as np
from loguru import logger
import psycopg2.extras as extras
import os
import pandas as pd
import functools
logger.remove(0)
logger.add("sampling.log", level="DEBUG", enqueue=True, mode="w")
def timeit(f_py=None, to_log=None):
assert callable(f_py) or f_py is None
def _decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
start = time.time()
result = func(*args, **kwargs)
end = time.time()
if to_log:
logger.debug(
"Function '{}' executed in {:f} s", func.__name__, end - start
)
else:
print(f"| Finished in {end-start:.2f}s.")
return result
return wrapper
return _decorator(f_py) if callable(f_py) else _decorator
@timeit(to_log=True)
def get_load_data(
demand_scenario_id: int,
force_download=False,
**kwargs,
):
""" Query the load data from the database"""
fname = f"load_data-{demand_scenario_id}.csv"
if not os.path.exists(fname) or force_download:
df = read_from_db(
table_name="demand_timeseries",
where_clause=f"demand_scenario_id = '{demand_scenario_id}'",
**kwargs
)
df = df.sort_values(["load_zone_id", "raw_timepoint_id"])
df["date"] = df["timestamp_utc"].dt.strftime("%Y-%m-%d").values
df.to_csv(fname, index=False)
else:
df = pd.read_csv(fname, parse_dates=["timestamp_utc"])
return df
def insert_to_db(
table_name: str,
columns: list,
values,
db_conn,
schema,
id_column=None,
id_var=None,
overwrite=False,
verbose=None,
**kwargs,
):
# Safety check if no DB connection is passed
if not db_conn:
raise SystemExit(
"No connection to DB provided. Check if you passed it correctly"
)
# Convert columns to a single string to pass it into the query
columns = ",".join(columns)
# Default queries.
# NOTE: We can add new queries on this section
search_query = f"""
select {id_column} from {schema}.{table_name} where {id_column} = {id_var};
"""
default_query = f"""
insert into {schema}.{table_name}({columns}) values %s;
"""
clear_query = f"""
delete from {schema}.{table_name} where {id_column} = {id_var};
"""
print(f"+ {table_name}: ")
# Start transaction with DB
with db_conn:
with db_conn.cursor() as curs:
# Check if ID is in database
curs.execute(search_query)
data = curs.fetchall()
if data and overwrite:
if verbose:
print(data)
print(values)
print("| Data exists. Overwritting data")
curs.execute(clear_query)
extras.execute_values(curs, default_query, values)
elif not data:
print("| Inserting new data to DB.")
if verbose:
print(values)
extras.execute_values(curs, default_query, values)
else:
raise SystemExit(
f"\nValue {id_var} for {id_column} already exists on table {table_name}. Use another one."
)
...
def read_from_db(
table_name: str,
db_conn,
schema,
where_clause: str = None,
columns: list = None,
verbose=False,
**kwargs
):
if not db_conn:
raise SystemExit(
"No connection to DB provided. Check if you passed it correctly"
)
print(f" | Reading from {table_name}")
columns = "*" if columns is None else ",".join(columns)
query = f"""
SELECT {columns}
FROM {schema}.{table_name}
"""
if where_clause is not None:
query += f" WHERE {where_clause}"
query += ";"
if verbose:
print(query)
return pd.read_sql_query(query, db_conn)
def get_peak_days(data, freq: str = "MS", verbose: bool = False):
df = data.copy()
# Get timestamp of monthly peak
df = df.set_index("timestamp_utc")
peak_idx = df.groupby(
|
pd.Grouper(freq="MS")
|
pandas.Grouper
|
"""
Test module for runpandas types i.e Sessions
"""
import os
import pytest
from pandas import Timestamp, concat
from runpandas import read_dir, reader
from runpandas.types import columns
from runpandas.exceptions import RequiredColumnError
pytestmark = pytest.mark.stable
@pytest.fixture
def dirpath(datapath):
return datapath("io", "data")
@pytest.fixture
def multi_frame(dirpath):
sessions_dir = os.path.join(dirpath, "samples")
activities = [activity for activity in read_dir(sessions_dir)]
keys = [act.start for act in activities]
multi_frame =
|
concat(activities, keys=keys, names=["start", "time"], axis=0)
|
pandas.concat
|
import pandas as pd
import numpy as np
from suzieq.utils import SchemaForTable, humanize_timestamp, Schema
from suzieq.engines.base_engine import SqEngineObj
from suzieq.sqobjects import get_sqobject
from suzieq.db import get_sqdb_engine
from suzieq.exceptions import DBReadError, UserQueryError
import dateparser
from datetime import datetime
from pandas.core.groupby import DataFrameGroupBy
class SqPandasEngine(SqEngineObj):
def __init__(self, baseobj):
self.ctxt = baseobj.ctxt
self.iobj = baseobj
self.summary_row_order = []
self._summarize_on_add_field = []
self._summarize_on_add_with_query = []
self._summarize_on_add_list_or_count = []
self._summarize_on_add_stat = []
self._summarize_on_perdevice_stat = []
self._dbeng = get_sqdb_engine(baseobj.ctxt.cfg, baseobj.table, '',
None)
@property
def all_schemas(self) -> Schema:
return self.ctxt.schemas
@property
def schema(self) -> SchemaForTable:
return self.iobj.schema
@property
def cfg(self):
return self.iobj._cfg
@property
def table(self):
return self.iobj._table
def _get_ipvers(self, value: str) -> int:
"""Return the IP version in use"""
if ':' in value:
ipvers = 6
elif '.' in value:
ipvers = 4
else:
ipvers = ''
return ipvers
def _handle_user_query_str(self, df: pd.DataFrame,
query_str: str) -> pd.DataFrame:
"""Handle user query, trapping errors and returning exception
Args:
df (pd.DataFrame): The dataframe to run the query on
query_str (str): pandas query string
Raises:
UserQueryError: Exception if pandas query aborts with errmsg
Returns:
pd.DataFrame: dataframe post query
"""
if query_str:
if query_str.startswith('"') and query_str.endswith('"'):
query_str = query_str[1:-1]
try:
df = df.query(query_str).reset_index(drop=True)
except Exception as ex:
raise UserQueryError(ex)
return df
def get_valid_df(self, table: str, **kwargs) -> pd.DataFrame:
"""The heart of the engine: retrieving the data from the backing store
Args:
table (str): Name of the table to retrieve the data for
Returns:
pd.DataFrame: The data as a pandas dataframe
"""
if not self.ctxt.engine:
print("Specify an analysis engine using set engine command")
return pd.DataFrame(columns=["namespace", "hostname"])
# Thanks to things like OSPF, we cannot use self.schema here
sch = SchemaForTable(table, self.all_schemas)
phy_table = sch.get_phy_table_for_table()
columns = kwargs.pop('columns', ['default'])
addnl_fields = kwargs.pop('addnl_fields', [])
view = kwargs.pop('view', self.iobj.view)
active_only = kwargs.pop('active_only', True)
hostname = kwargs.get('hostname', [])
fields = sch.get_display_fields(columns)
key_fields = sch.key_fields()
drop_cols = []
if columns == ['*']:
drop_cols.append('sqvers')
aug_fields = sch.get_augmented_fields()
if 'timestamp' not in fields:
fields.append('timestamp')
if 'active' not in fields+addnl_fields:
addnl_fields.append('active')
drop_cols.append('active')
# Order matters. Don't put this before the missing key fields insert
for f in aug_fields:
dep_fields = sch.get_parent_fields(f)
addnl_fields += dep_fields
for fld in key_fields:
if fld not in fields+addnl_fields:
addnl_fields.insert(0, fld)
drop_cols.append(fld)
for f in addnl_fields:
if f not in fields:
# timestamp is always the last field
fields.insert(-1, f)
if self.iobj.start_time:
try:
start_time = int(dateparser.parse(
self.iobj.start_time.replace('last night', 'yesterday'))
.timestamp()*1000)
except Exception as e:
print(f"ERROR: invalid time {self.iobj.start_time}: {e}")
return pd.DataFrame()
else:
start_time = ''
if self.iobj.start_time and not start_time:
# Something went wrong with our parsing
print(f"ERROR: unable to parse {self.iobj.start_time}")
return pd.DataFrame()
if self.iobj.end_time:
try:
end_time = int(dateparser.parse(
self.iobj.end_time.replace('last night', 'yesterday'))
.timestamp()*1000)
except Exception as e:
print(f"ERROR: invalid time {self.iobj.end_time}: {e}")
return pd.DataFrame()
else:
end_time = ''
if self.iobj.end_time and not end_time:
# Something went wrong with our parsing
print(f"ERROR: Unable to parse {self.iobj.end_time}")
return pd.DataFrame()
table_df = self._dbeng.read(
phy_table,
'pandas',
start_time=start_time,
end_time=end_time,
columns=fields,
view=view,
key_fields=key_fields,
**kwargs
)
if not table_df.empty:
# hostname may not have been filtered if using regex
if hostname:
hdf_list = []
for hn in hostname:
df1 = table_df.query(f"hostname.str.match('{hn}')")
if not df1.empty:
hdf_list.append(df1)
if hdf_list:
table_df = pd.concat(hdf_list)
else:
return pd.DataFrame(columns=table_df.columns.tolist())
if view == "all" or not active_only:
table_df.drop(columns=drop_cols, inplace=True)
else:
table_df = table_df.query('active') \
.drop(columns=drop_cols)
if 'timestamp' in table_df.columns and not table_df.empty:
table_df['timestamp'] = humanize_timestamp(
table_df.timestamp, self.cfg.get('analyzer', {})
.get('timezone', None))
return table_df
def get(self, **kwargs) -> pd.DataFrame:
"""The default get method for all tables
Use this for a table if nothing special is desired. No table uses
this routine today.
Raises:
NotImplementedError: If no table has been defined
Returns:
pd.DataFrame: pandas dataframe of the object
"""
if not self.iobj.table:
raise NotImplementedError
user_query = kwargs.pop('query_str', '')
df = self.get_valid_df(self.iobj.table, **kwargs)
df = self._handle_user_query_str(df, user_query)
return df
def get_table_info(self, table: str, **kwargs) -> dict:
"""Returns information about the data available for a table
Used by table show command exclusively.
Args:
table (str): Name of the table about which info is desired
Returns:
dict: The desired data as a dictionary
"""
# You can't use view from user because we need to see all the data
# to compute data required.
kwargs.pop('view', None)
all_time_df = self.get_valid_df(table, view='all', **kwargs)
times = all_time_df['timestamp'].unique()
ret = {'firstTime': all_time_df.timestamp.min(),
'latestTime': all_time_df.timestamp.max(),
'intervals': len(times),
'allRows': len(all_time_df),
'namespaces': self._unique_or_zero(all_time_df, 'namespace'),
'deviceCnt': self._unique_or_zero(all_time_df, 'hostname')}
return ret
def _get_table_sqobj(self, table: str, start_time: str = None,
end_time: str = None, view=None):
"""Normalize pulling data from other tables into this one function
Typically pulling data involves calling get_sqobject with a bunch of
parameters that need to be passed to it, that a caller can forget to
pass. A classic example is passing the view, start-time and end-time
which is often forgotten. This function fixes this issue.
Args:
table (str): The table to retrieve the info from
verb (str): The verb to use in the get_sqobject call
"""
return get_sqobject(table)(
context=self.ctxt,
start_time=start_time or self.iobj.start_time,
end_time=end_time or self.iobj.end_time,
view=view or self.iobj.view)
def _unique_or_zero(self, df: pd.DataFrame, col: str) -> int:
"""Returns the unique count of a column in a dataframe or 0
Args:
df (pd.DataFrame): The dataframe to use
col (str): The column name to use
Returns:
int: Count of unique values
"""
if col in df.columns:
return df[col].nunique()
else:
return 0
def summarize(self, **kwargs):
"""Summarize the info about this resource/service.
There is a pattern of how to do these
use self._init_summarize():
- creates self.summary_df, which is the initial pandas dataframe
based on the table
- creates self.nsgrp of data grouped by namespace
- self.ns is the dict to add data to which will be turned into a
dataframe and then returned
if you want to simply take a field and run a pandas functon, then use
self._add_field_to_summary
at the end of te summarize
return pd.DataFrame(self.ns).convert_dtypes()
If you don't override this, then you get a default summary of all columns
"""
self._init_summarize(self.iobj._table, **kwargs)
if self.summary_df.empty:
return self.summary_df
self._gen_summarize_data()
self._post_summarize()
return self.ns_df.convert_dtypes()
def unique(self, **kwargs) -> pd.DataFrame:
"""Return the unique elements as per user specification
Raises:
ValueError: If len(columns) != 1
Returns:
pd.DataFrame: Pandas dataframe of unique values for given column
"""
count = kwargs.pop("count", 0)
query_str = kwargs.get('query_str', '')
columns = kwargs.pop("columns", None)
if query_str:
getcols = ['*']
else:
getcols = columns
column = columns[0]
df = self.get(columns=getcols, **kwargs)
if df.empty:
return df
# check if column we're looking at is a list, and if so explode it
if df.apply(lambda x: isinstance(x[column], np.ndarray), axis=1).all():
df = df.explode(column).dropna(how='any')
if not count:
return (pd.DataFrame({f'{column}': df[column].unique()}))
else:
r = df[column].value_counts()
return (pd.DataFrame({column: r})
.reset_index()
.rename(columns={column: 'numRows',
'index': column})
.sort_values(column))
def analyze(self, **kwargs):
raise NotImplementedError
def aver(self, **kwargs):
raise NotImplementedError
def top(self, **kwargs):
"""Default implementation of top.
The basic fields this assumes are present include the "what" keyword
which contains the name of the field we're getting the transitions on,
the "n" field which tells the count of the top entries you're
looking for, and the reverse field which tells whether you're looking
for the largest (default, and so reverse is False) or the smallest(
reverse is True). This invokes the default object's get routine. It
is upto the caller to ensure that the desired column is in the output.
"""
what = kwargs.pop("what", None)
reverse = kwargs.pop("reverse", False)
sqTopCount = kwargs.pop("count", 5)
if not what:
return
|
pd.DataFrame()
|
pandas.DataFrame
|
#! /usr/bin/env python
import maple
import maple.utils as utils
import maple.audio as audio
from maple.owner_recordings import OwnerRecordings
import time
import numpy as np
import pandas as pd
import pyaudio
import argparse
import datetime
import sounddevice as sd
from pathlib import Path
from scipy.io.wavfile import read as wav_read
from scipy.io.wavfile import write as wav_write
class Stream(object):
def __init__(self):
self.p = pyaudio.PyAudio()
self._stream = self.p.open(
format = pyaudio.paInt16,
channels = 1,
rate = maple.RATE,
input = True,
frames_per_buffer = maple.CHUNK,
input_device_index = utils.get_mic_id(),
start = False, # To read from stream, self.stream.start_stream must be called
)
def __enter__(self):
if not self._stream.is_active():
self._stream.start_stream()
return self
def __exit__(self, exc_type, exc_val, traceback):
self._stream.stop_stream()
def close(self):
"""Close the stream gracefully"""
if self._stream.is_active():
self._stream.stop_stream()
self._stream.close()
self.p.terminate()
class Detector(object):
def __init__(self, start_thresh, end_thresh, num_consecutive, seconds, dt, hang_time, wait_timeout, quiet=True):
"""Manages the detection of events
Parameters
==========
start_thresh : float
The pressure that must exceeded for a data point to be considered as the start of an
event.
end_thresh : float
The pressure value that the pressure must dip below for a data point to be considered as
the end of an event.
num_consecutive : int
The number of frames needed that must consecutively be above the threshold to be
considered the start of an event.
seconds : float
The number of seconds that must pass after the `end_thresh` condition is met in order
for the event to end. If, during this time, the `start_thresh` condition is met, the
ending of the event will be cancelled.
dt : float
The inverse sampling frequency, i.e the time captured by each frame.
hang_time : float
If an event lasts this long (seconds), the flag self.hang is set to True
wait_timeout : float
If no event occurs in this amount of time (seconds), self.timeout is set to True
quiet : bool
If True, nothing is sent to stdout
"""
self.quiet = quiet
self.dt = dt
self.start_thresh = start_thresh
self.end_thresh = end_thresh
self.seconds = seconds
self.num_consecutive = num_consecutive
self.hang_time = datetime.timedelta(seconds=hang_time)
self.wait_timeout = datetime.timedelta(seconds=wait_timeout)
self.reset()
def update_event_states(self, pressure):
"""Update event states based on their current states plus the pressure of the current frame"""
if self.in_event and self.timer.timedelta_to_checkpoint(checkpoint_key='start') > self.hang_time:
# Event has lasted more than self.hang_time seconds
self.hang = True
if not self.in_event and self.timer.timedelta_to_checkpoint(checkpoint_key=0) > self.wait_timeout:
self.timeout = True
if self.event_started:
self.event_started = False
if self.in_event:
if self.in_off_transition:
if self.off_time > self.seconds:
self.in_event = False
self.in_off_transition = False
self.event_finished = True
elif pressure > self.start_thresh:
self.in_off_transition = False
else:
self.off_time += self.dt
else:
if pressure < self.end_thresh:
self.in_off_transition = True
self.off_time = 0
else:
pass
else:
if self.in_on_transition:
# Not in event
if self.on_counter >= self.num_consecutive:
self.in_event = True
self.in_on_transition = False
self.event_started = True
elif pressure > self.start_thresh:
self.on_counter += 1
else:
self.in_on_transition = False
self.frames = []
else:
if pressure > self.start_thresh:
self.in_on_transition = True
self.on_counter = 0
else:
# Silence
pass
def print_to_stdout(self):
"""Prints to standard out to create a text-based stream of event detection"""
if self.quiet:
return
if self.in_event:
if self.in_off_transition:
msg = ' | '
else:
msg = ' |||'
else:
if self.in_on_transition:
msg = ' | '
else:
msg = ''
if self.event_started:
msg = '####### EVENT START #########'
elif self.event_finished:
msg = '####### EVENT END #########'
print(msg)
def reset(self):
"""Reset event states and storage buffer"""
self.in_event = False
self.in_off_transition = False
self.in_on_transition = False
self.event_finished = False
self.event_started = False
self.hang = False
self.timeout = False
self.timer = utils.Timer()
self.frames = []
def append_to_buffer(self, data):
if self.in_event or self.in_on_transition:
self.frames.append(data)
def process(self, data):
"""Takes in data and updates event transition variables if need be"""
# Calculate pressure of frame
pressure = utils.calc_mean_pressure(data)
self.update_event_states(pressure)
if self.event_started:
self.timer.make_checkpoint('start')
elif self.event_finished:
self.timer.make_checkpoint('finish')
# Write to stdout if not self.quiet
self.print_to_stdout()
# Append to buffer
self.append_to_buffer(data)
def get_event_data(self):
return np.concatenate(self.frames)
class Monitor(object):
def __init__(self, args = argparse.Namespace()):
self.args = args
A = lambda x: self.args.__dict__.get(x, None)
self.quiet = A('quiet') or False
self.calibration_time = A('calibration_time') or 3 # How many seconds is calibration window
self.event_start_threshold = A('event_start_threshold') or 4 # standard deviations above background noise to start an event
self.event_end_threshold = A('event_end_threshold') or 4 # standard deviations above background noise to end an event
self.background_mean_preset = A('background_mean_preset')
self.background_std_preset = A('background_std_preset')
self.seconds = A('seconds') or 0.25 # see Detector docstring
self.num_consecutive = A('num_consecutive') or 4 # see Detector docstring
self.hang_time = A('hang_time') or 20 # see Detector docstring
self.wait_timeout = A('wait_timeout') or 10 # see Detector docstring
self.stream = None
self.background = None
self.background_std = None
self.detector = None
self.event_recs = {}
self.num_events = 0
self.dt = maple.CHUNK/maple.RATE # Time between each sample
def read_chunk(self):
"""Read a chunk from the stream and cast as a numpy array"""
return np.fromstring(self.stream._stream.read(maple.CHUNK), dtype=maple.ARRAY_DTYPE)
def calibrate_background_noise(self):
"""Establish a background noise
Samples a small segment of background noise for noise removal.
Notes
=====
- In a perfect world this method calibrates the self.background and self.background_std
attributes, however I have not developed a robust enough calibration system.
"""
print(f'Starting {self.calibration_time} second calibration.')
# Number of chunks in running window based on self.calibration_time
running_avg_domain = int(self.calibration_time / self.dt)
audio_chunks = []
with self.stream:
for i in range(running_avg_domain):
chunk = self.read_chunk()
audio_chunks.append(chunk)
self.background_audio = np.concatenate(audio_chunks)
self.background = self.background_mean_preset
self.background_std = self.background_std_preset
print('Calibration done.')
def setup(self):
self.stream = Stream()
self.recalibrate()
def recalibrate(self):
self.calibrate_background_noise()
# Recast the start and end thresholds in terms of pressure values
start_thresh = self.background + self.event_start_threshold * self.background_std
end_thresh = self.background + self.event_end_threshold * self.background_std
self.detector = Detector(
start_thresh = start_thresh,
end_thresh = end_thresh,
seconds = self.seconds,
num_consecutive = self.num_consecutive,
hang_time = self.hang_time,
wait_timeout = self.wait_timeout,
dt = self.dt,
quiet = self.quiet,
)
def wait_for_event(self, timeout=False, denoise=True):
"""Waits for an event
Records the event, and returns the event audio as numpy array.
Parameters
==========
timeout : bool, False
If True, returns None after self.detector.wait_timeout seconds passes without detecting
the start of an event.
"""
self.detector.reset()
with self.stream:
while True:
self.detector.process(self.read_chunk())
if self.detector.event_finished:
break
if self.detector.hang:
print('Event hang... Recalibrating')
self.recalibrate()
return self.wait_for_event()
if timeout and self.detector.timeout:
return None
event_audio = self.detector.get_event_data()
return audio.denoise(event_audio, self.background_audio) if denoise else event_audio
def stream_pressure_and_pitch_to_stdout(self, data):
"""Call for every chunk to create a primitive stream plot of pressure and pitch to stdout
Pitch is indicated with 'o' bars, amplitude is indicated with '-'
"""
pressure = utils.calc_mean_pressure(data)
bars = "-"*int(1000*pressure/2**16)
print("%05d %s" % (pressure, bars))
w = np.fft.fft(data)
freqs = np.fft.fftfreq(len(data))
peak = abs(freqs[np.argmax(w)] * maple.RATE)
bars="o"*int(3000*peak/2**16)
print("%05d %s" % (peak, bars))
class Responder(object):
def __init__(self, args = argparse.Namespace(quiet=False)):
A = lambda x: args.__dict__.get(x, None)
self.praise = A('praise')
if self.praise is None: self.praise = False
self.praise_max_events = A('praise_max_events') or 10
self.praise_max_pressure_sum = A('praise_max_pressure_sum') or 0.01
self.praise_response_window = A('praise_response_window') or 2
self.praise_cooldown = A('praise_cooldown') or 2
# These event classes do not decrease praise likelihood or increase scold likelihood
self.neutral_classes = [
'play',
]
self.scold = A('scold')
if self.scold is None: self.scold = False
self.scold_trigger = A('scold_trigger') or 0.03
self.scold_scratch_door_count = A('scold_scratch_door_count') or 5
self.scold_bark_count = A('scold_bark_count') or 10
self.scold_consec_bark_count = A('scold_consec_bark_count') or 3
self.scold_response_window = A('scold_response_window') or 0.5
self.scold_cooldown = A('scold_cooldown') or 5
# FIXME not implemented
self.warn = A('warn')
if self.warn is None: self.warn = False
self.warn_response_window = A('warn_response_window') or 0.25
self.warn_cooldown = A('warn_cooldown') or 1
# Cast everything as datetime
self.response_window = datetime.timedelta(minutes=max([
self.warn_response_window,
self.scold_response_window,
self.praise_response_window,
]))
self.warn_response_window = datetime.timedelta(minutes=self.warn_response_window)
self.scold_response_window = datetime.timedelta(minutes=self.scold_response_window)
self.praise_response_window = datetime.timedelta(minutes=self.praise_response_window)
self.warn_cooldown = datetime.timedelta(minutes=self.warn_cooldown)
self.scold_cooldown = datetime.timedelta(minutes=self.scold_cooldown)
self.praise_cooldown = datetime.timedelta(minutes=self.praise_cooldown)
self.owner = OwnerRecordings()
self.owner.load()
self.events_in_window =
|
pd.DataFrame({}, columns=maple.db_structure['events']['names'])
|
pandas.DataFrame
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Oct 12 10:00:50 2020
@author: <NAME>
This script:
To do:
Remove seasonal option? And new_seasonal
Remove all s3 and s4 stuff
Make a dataframe of total glacier runoff beforehand, takes too long to have the whole spatial nc
Explain N in hydrographs name
Done:
Files needed:
Directories needed in run_dir:
Files
glacier_dailybasinsum
glaciers_nc
Figures
Output (where hydrographs are )
"""
import os
from os.path import join
import subprocess
import numpy.ma as ma
import numpy as np
import matplotlib.pyplot as plt
import time
import datetime
import pandas as pd
import xarray as xr
import hydroeval as he
import glob
import scipy.stats as stats
from matplotlib import rcParams, cycler
from matplotlib import cm
from matplotlib.colors import ListedColormap, LinearSegmentedColormap
#%% Functions
def months_slice(year,hemisphere,seasonal):
""" Create a slice of all months in one hydrological year
Southern hemispere is months earlier
Seasonal boolean is to only consider summer months"""
if seasonal ==True:
if hemisphere == 'North':
frommonth = '03'
tomonth = '10'
return slice(str(year)+'-'+frommonth,str(year)+'-'+tomonth)
elif hemisphere == 'South':
frommonth='09'
tomonth ='04'
return slice(str(year-1)+'-'+frommonth,str(year)+'-'+tomonth)
elif seasonal ==False:
if hemisphere =='North':
return slice(str(year-1)+'-10',str(year)+'-09')
if hemisphere =='South':
return slice(str(year-1)+'-04',str(year)+'-03')
def make_space_above(axes, topmargin=1):
""" increase figure size to make topmargin (in inches) space for
titles, without changing the axes sizes"""
fig = axes.flatten()[0].figure
s = fig.subplotpars
w, h = fig.get_size_inches()
figh = h - (1-s.top)*h + topmargin
fig.subplots_adjust(bottom=s.bottom*h/figh, top=1-topmargin/figh)
fig.set_figheight(figh)
def FD_curve(data):
""" Flow duration curve calculation"""
y = data.sort_values(ascending=False).values
x= np.cumsum(y)/np.nansum(y) *100
return x,y
def load_hg2(GRDC_no, full_daterange):
hg2_path = join(RUN_DIR,'Files','Q_obs',GRDC_no+'_Q_Day.Cmd.txt')
hg = pd.read_csv(hg2_path,
delimiter=';',
skiprows=36,
index_col=0,
parse_dates=True,
skipinitialspace=True,
usecols=[0,2])
hg = hg.rename(columns = {'Value':'hg'})
hg_full = pd.DataFrame(data=hg,index=full_date_range)
hg_full.index = hg_full.index.rename('time')
return hg_full.hg.to_xarray()
def RMSE(a,b):
return np.sqrt(((a-b)**2).mean())
def BE(mod,obs,benchmark):
"""Calculation of the Benchmark efficiency (comparison
between the coupled model and the benchmark against observations)"""
return 1-(np.nansum((mod-obs)**2)/np.nansum((obs-benchmark)**2))
#%% INPUTS
RUN_DIR = r'd:\Documents\Master vakken\Thesis\Code'
os.chdir(RUN_DIR)
NC_DIR =r'd:\Documents\Master vakken\Thesis\Code\Files\glaciers_nc'
FIG_DIR =join(RUN_DIR,'Figures')
basin_info = pd.read_csv(join(
RUN_DIR,'Files','basin_info_45min.csv'),index_col = 0)
### 25 basins used in the paper
# BASIN_NAMES = ['RHONE']
BASIN_NAMES = basin_info[basin_info['suitable']=='y'].index
# BASIN_NAMES = ['AMAZON','IRRAWADDY','MACKENZIE',
# 'OB','YUKON','ALSEK', 'CLUTHA', 'COLUMBIA', 'COPPER', 'DANUBE', 'DRAMSELV',
# 'FRASER', 'GLOMA',
# 'KUSKOKWIM', 'NASS', 'NEGRO', 'OELFUSA',
# 'RHINE', 'RHONE', 'SKAGIT', 'SKEENA', 'STIKINE','SUSITNA',
# 'TAKU', 'THJORSA'] #minus Indus, Kalixaelven, Nelson, Joekulsa, <NAME>, Lule
### Basins with routing problems
# BASIN_NAMES = ['JOEKULSA','NELSON','SANTA_CRUZ','LULE','KALIXAELVEN]
###Large Basins
# BASIN_NAMES = ['AMAZON','IRRAWADDY','MACKENZIE','OB','YUKON']
###Basins with a second station
# BASIN_NAMES = ['CLUTHA','COLUMBIA','RHINE','SUSITNA','DANUBE']
MODEL_SETUPS = ['2','0','1']
MODEL_NAMES ={'0':'Modelled (Benchmark)',
'1':'Modelled (Bare)',
'2':'Modelled (Coupled)'}
SEASONAL = False #False for whole year, true for only summer
MAIN_PLOT =['s0','s1','s2'] #Which model settings to plot
only_obsyears =True #Consider only years for which GRDC is available
PLOT_BOOL = True
save_figs = False
# new_seasonal =False #Consider only months above certain glacier runoff threshold
calendar_day =False #Calculate Caldendar day bencmark (Schaefli&Gupta2007)
GHM_NAME ='PCRG'
GG_GCM ='HadGEM2-ES'
GG_rcp ='rcp26'
Fromyear =2001
Untilyear =2012
OF_list = []
normdiflist=[]
FD_list = []
NRD_list = []
MBE_list = []
HG_list = []
glacier_sum_list =[]
Qobs_list = []
#%%
for Basin_name in BASIN_NAMES:
# Load hydrographs
print (Basin_name)
if Basin_name in ['CLUTHA','NEGRO','AMAZON','SANTA_CRUZ']:
hemisphere = 'South'
daterange = pd.date_range(str(Fromyear-1)+'-04-01',
str(Untilyear)+'-03-31')
else:
hemisphere = 'North'
daterange = pd.date_range(str(Fromyear-1)+'-10-01',
str(Untilyear)+'-09-30')
#Load GloGEM glacier runoff for analysis
glacier_sum_path = join(RUN_DIR,'Files','glacier_dailybasinsum',Basin_name+'_glacsum.nc')
nc_path = join(NC_DIR,'_'.join([Basin_name,
GG_GCM,
GG_rcp,
'2000',
'2016',
'R.nc']))
nc_obs =xr.open_dataset(nc_path).sel(time=daterange)
if not os.path.isfile(glacier_sum_path):
glacier_sum = nc_obs.R.sum(axis=(1,2))/(24*60*60)
glacier_sum.to_netcdf(glacier_sum_path)
else:
glacier_sum = xr.open_dataarray(glacier_sum_path)
glacier_sum_list.append(glacier_sum)
# if new_seasonal==True:
# nc_df = glacier_sum.to_dataframe()
# nc_df.pop('spatial_ref')
# nc_df.pop('height')
# R_months = nc_df.groupby(nc_df.index.month).sum()
# R_fracs = R_months/R_months.sum()>0.001
# min_month = R_fracs.index[R_fracs.R].min()
# max_month = R_fracs.index[R_fracs.R].max()
# daterange = daterange[(daterange.month>min_month)&
# (daterange.month<max_month)]
# nc_obs =nc_obs.sel(time=daterange)
# glacier_sum = nc_obs.R.sum(axis=(1,2))/(24*60*60)
#Load GRDC observations from .nc files
if Basin_name in ['CLUTHA','COLUMBIA','RHINE','SUSITNA','DANUBE']:
#NC files still has observations at basin mouth instead of more upstream
full_date_range=
|
pd.to_datetime(nc_obs.time.data)
|
pandas.to_datetime
|
import unittest
from unittest import TestCase
import env
import data
from eventgraphs import EventGraph, BadInputError
import pandas as pd
from pandas.testing import assert_frame_equal
DATASETS = [data.directed,
data.directed_hyper,
data.directed_hyper_single,
data.undirected_hyper,
data.extra_columns,
data.string_labels]
class IOTests(TestCase):
"""
Tests the input and output functionality of EventGraph class.
"""
def from_pandas(self, dataset):
""""""
df =
|
pd.DataFrame(dataset)
|
pandas.DataFrame
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.