prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
import re
from datetime import datetime
import discord
from secret import TOKEN
import plotly.express as px
import pandas as pd
client = discord.Client()
@client.event
async def on_ready():
# await count_tower()
await count_counting()
async def count_counting():
channel = client.get_channel(586777485023379467)
counts = list()
last = 0
async for message in channel.history(limit=1000000):
number_part = re.search(r"^-?\d+", message.content.strip())
if number_part is None:
print(message.content.strip())
else:
number = int(number_part[0])
if (not (1 >= number-last >= -1)) and (1 >= -number-last >= -1):
number *= -1
user = message.author.name
timestamp = message.created_at
counts.append((timestamp, user, number))
last = number
data = pd.DataFrame(counts, columns=["time", "user", "number"])
fig = px.line(data, x="time", y="number", title="#counting")
# fig.add_vline(x=datetime(year=2019, month=12, day=12).timestamp(), line_dash="dash", annotation_text="Ryu calls for a crusade to +100", annotation_position="top left")
# fig.add_vline(x=datetime(year=2020, month=2, day=12).timestamp(), line_dash="dash", annotation_text="Cryusade reaches +100", annotation_position="top left")
# fig.add_vline(x=datetime(year=2021, month=7, day=26).timestamp(), line_dash="dash", annotation_text="Blue issues counting challenge", annotation_position="top left")
fig.add_hline(y=42, line_dash="dash", annotation_text="+42", annotation_position="top left")
fig.add_hline(y=69, line_dash="dash", annotation_text="+69", annotation_position="top left")
# fig.add_vrect(x0="2019-12-12", x1="2020-02-12",
# annotation_text="Cryusade", annotation_position="top left",
# fillcolor="green", opacity=0.25, line_width=0)
fig.show()
async def count_tower():
channel = client.get_channel(401200182525558785)
counts = list()
async for message in channel.history(limit=1000000):
number_part = re.search(r"(?:^\W*[a-zA-z]*\W*|.*floor )(\d+)\**(?:[:;].*|\s*[a-zA-z])", message.content.strip().lower())
if number_part is None:
if message.content.strip().lower().startswith("floor"):
number = None
else:
print(message.content.strip())
continue
else:
number = int(number_part[1])
user = message.author.name
timestamp = message.created_at
counts.append((timestamp, user, number))
data = | pd.DataFrame(counts, columns=["time", "user", "number"]) | pandas.DataFrame |
import numpy as np
import pandas as pd
import pytest
import ray
from ray.ml.preprocessor import PreprocessorNotFittedException
from ray.ml.preprocessors import (
StandardScaler,
MinMaxScaler,
OrdinalEncoder,
OneHotEncoder,
LabelEncoder,
SimpleImputer,
Chain,
)
def test_standard_scaler():
"""Tests basic StandardScaler functionality."""
col_a = [-1, 0, 1, 2]
col_b = [1, 1, 5, 5]
col_c = [1, 1, 1, None]
in_df = pd.DataFrame.from_dict({"A": col_a, "B": col_b, "C": col_c})
ds = ray.data.from_pandas(in_df)
scaler = StandardScaler(["B", "C"])
# Transform with unfitted preprocessor.
with pytest.raises(PreprocessorNotFittedException):
scaler.transform(ds)
# Fit data.
scaler.fit(ds)
assert scaler.stats_ == {
"mean(B)": 3.0,
"mean(C)": 1.0,
"std(B)": 2.0,
"std(C)": 0.0,
}
# Transform data.
transformed = scaler.transform(ds)
out_df = transformed.to_pandas()
processed_col_a = col_a
processed_col_b = [-1.0, -1.0, 1.0, 1.0]
processed_col_c = [0.0, 0.0, 0.0, None]
expected_df = pd.DataFrame.from_dict(
{"A": processed_col_a, "B": processed_col_b, "C": processed_col_c}
)
assert out_df.equals(expected_df)
# Transform batch.
pred_col_a = [1, 2, 3]
pred_col_b = [3, 5, 7]
pred_col_c = [0, 1, 2]
pred_in_df = pd.DataFrame.from_dict(
{"A": pred_col_a, "B": pred_col_b, "C": pred_col_c}
)
pred_out_df = scaler.transform_batch(pred_in_df)
pred_processed_col_a = pred_col_a
pred_processed_col_b = [0.0, 1.0, 2.0]
pred_processed_col_c = [-1.0, 0.0, 1.0]
pred_expected_df = pd.DataFrame.from_dict(
{
"A": pred_processed_col_a,
"B": pred_processed_col_b,
"C": pred_processed_col_c,
}
)
assert pred_out_df.equals(pred_expected_df)
def test_min_max_scaler():
"""Tests basic MinMaxScaler functionality."""
col_a = [-1, 0, 1]
col_b = [1, 3, 5]
col_c = [1, 1, None]
in_df = pd.DataFrame.from_dict({"A": col_a, "B": col_b, "C": col_c})
ds = ray.data.from_pandas(in_df)
scaler = MinMaxScaler(["B", "C"])
# Transform with unfitted preprocessor.
with pytest.raises(PreprocessorNotFittedException):
scaler.transform(ds)
# Fit data.
scaler.fit(ds)
assert scaler.stats_ == {"min(B)": 1, "max(B)": 5, "min(C)": 1, "max(C)": 1}
transformed = scaler.transform(ds)
out_df = transformed.to_pandas()
processed_col_a = col_a
processed_col_b = [0.0, 0.5, 1.0]
processed_col_c = [0.0, 0.0, None]
expected_df = pd.DataFrame.from_dict(
{"A": processed_col_a, "B": processed_col_b, "C": processed_col_c}
)
assert out_df.equals(expected_df)
# Transform batch.
pred_col_a = [1, 2, 3]
pred_col_b = [3, 5, 7]
pred_col_c = [0, 1, 2]
pred_in_df = pd.DataFrame.from_dict(
{"A": pred_col_a, "B": pred_col_b, "C": pred_col_c}
)
pred_out_df = scaler.transform_batch(pred_in_df)
pred_processed_col_a = pred_col_a
pred_processed_col_b = [0.5, 1.0, 1.5]
pred_processed_col_c = [-1.0, 0.0, 1.0]
pred_expected_df = pd.DataFrame.from_dict(
{
"A": pred_processed_col_a,
"B": pred_processed_col_b,
"C": pred_processed_col_c,
}
)
assert pred_out_df.equals(pred_expected_df)
def test_ordinal_encoder():
"""Tests basic OrdinalEncoder functionality."""
col_a = ["red", "green", "blue", "red"]
col_b = ["warm", "cold", "hot", "cold"]
col_c = [1, 10, 5, 10]
in_df = pd.DataFrame.from_dict({"A": col_a, "B": col_b, "C": col_c})
ds = ray.data.from_pandas(in_df)
encoder = OrdinalEncoder(["B", "C"])
# Transform with unfitted preprocessor.
with pytest.raises(PreprocessorNotFittedException):
encoder.transform(ds)
# Fit data.
encoder.fit(ds)
assert encoder.stats_ == {
"unique_values(B)": {"cold": 0, "hot": 1, "warm": 2},
"unique_values(C)": {1: 0, 5: 1, 10: 2},
}
# Transform data.
transformed = encoder.transform(ds)
out_df = transformed.to_pandas()
processed_col_a = col_a
processed_col_b = [2, 0, 1, 0]
processed_col_c = [0, 2, 1, 2]
expected_df = pd.DataFrame.from_dict(
{"A": processed_col_a, "B": processed_col_b, "C": processed_col_c}
)
assert out_df.equals(expected_df)
# Transform batch.
pred_col_a = ["blue", "yellow", None]
pred_col_b = ["cold", "warm", "other"]
pred_col_c = [10, 1, 20]
pred_in_df = pd.DataFrame.from_dict(
{"A": pred_col_a, "B": pred_col_b, "C": pred_col_c}
)
pred_out_df = encoder.transform_batch(pred_in_df)
pred_processed_col_a = pred_col_a
pred_processed_col_b = [0, 2, None]
pred_processed_col_c = [2, 0, None]
pred_expected_df = pd.DataFrame.from_dict(
{
"A": pred_processed_col_a,
"B": pred_processed_col_b,
"C": pred_processed_col_c,
}
)
assert pred_out_df.equals(pred_expected_df)
# Test null behavior.
null_col = [1, None]
nonnull_col = [1, 1]
null_df = pd.DataFrame.from_dict({"A": null_col})
null_ds = ray.data.from_pandas(null_df)
nonnull_df = pd.DataFrame.from_dict({"A": nonnull_col})
nonnull_ds = ray.data.from_pandas(nonnull_df)
null_encoder = OrdinalEncoder(["A"])
# Verify fit fails for null values.
with pytest.raises(ValueError):
null_encoder.fit(null_ds)
null_encoder.fit(nonnull_ds)
# Verify transform fails for null values.
with pytest.raises(ValueError):
null_encoder.transform(null_ds)
null_encoder.transform(nonnull_ds)
# Verify transform_batch fails for null values.
with pytest.raises(ValueError):
null_encoder.transform_batch(null_df)
null_encoder.transform_batch(nonnull_df)
def test_one_hot_encoder():
"""Tests basic OneHotEncoder functionality."""
col_a = ["red", "green", "blue", "red"]
col_b = ["warm", "cold", "hot", "cold"]
col_c = [1, 10, 5, 10]
in_df = pd.DataFrame.from_dict({"A": col_a, "B": col_b, "C": col_c})
ds = ray.data.from_pandas(in_df)
encoder = OneHotEncoder(["B", "C"])
# Transform with unfitted preprocessor.
with pytest.raises(PreprocessorNotFittedException):
encoder.transform(ds)
# Fit data.
encoder.fit(ds)
assert encoder.stats_ == {
"unique_values(B)": {"cold": 0, "hot": 1, "warm": 2},
"unique_values(C)": {1: 0, 5: 1, 10: 2},
}
# Transform data.
transformed = encoder.transform(ds)
out_df = transformed.to_pandas()
processed_col_a = col_a
processed_col_b_cold = [0, 1, 0, 1]
processed_col_b_hot = [0, 0, 1, 0]
processed_col_b_warm = [1, 0, 0, 0]
processed_col_c_1 = [1, 0, 0, 0]
processed_col_c_5 = [0, 0, 1, 0]
processed_col_c_10 = [0, 1, 0, 1]
expected_df = pd.DataFrame.from_dict(
{
"A": processed_col_a,
"B_cold": processed_col_b_cold,
"B_hot": processed_col_b_hot,
"B_warm": processed_col_b_warm,
"C_1": processed_col_c_1,
"C_5": processed_col_c_5,
"C_10": processed_col_c_10,
}
)
assert out_df.equals(expected_df)
# Transform batch.
pred_col_a = ["blue", "yellow", None]
pred_col_b = ["cold", "warm", "other"]
pred_col_c = [10, 1, 20]
pred_in_df = pd.DataFrame.from_dict(
{"A": pred_col_a, "B": pred_col_b, "C": pred_col_c}
)
pred_out_df = encoder.transform_batch(pred_in_df)
pred_processed_col_a = ["blue", "yellow", None]
pred_processed_col_b_cold = [1, 0, 0]
pred_processed_col_b_hot = [0, 0, 0]
pred_processed_col_b_warm = [0, 1, 0]
pred_processed_col_c_1 = [0, 1, 0]
pred_processed_col_c_5 = [0, 0, 0]
pred_processed_col_c_10 = [1, 0, 0]
pred_expected_df = pd.DataFrame.from_dict(
{
"A": pred_processed_col_a,
"B_cold": pred_processed_col_b_cold,
"B_hot": pred_processed_col_b_hot,
"B_warm": pred_processed_col_b_warm,
"C_1": pred_processed_col_c_1,
"C_5": pred_processed_col_c_5,
"C_10": pred_processed_col_c_10,
}
)
assert pred_out_df.equals(pred_expected_df)
# Test null behavior.
null_col = [1, None]
nonnull_col = [1, 1]
null_df = pd.DataFrame.from_dict({"A": null_col})
null_ds = ray.data.from_pandas(null_df)
nonnull_df = pd.DataFrame.from_dict({"A": nonnull_col})
nonnull_ds = ray.data.from_pandas(nonnull_df)
null_encoder = OneHotEncoder(["A"])
# Verify fit fails for null values.
with pytest.raises(ValueError):
null_encoder.fit(null_ds)
null_encoder.fit(nonnull_ds)
# Verify transform fails for null values.
with pytest.raises(ValueError):
null_encoder.transform(null_ds)
null_encoder.transform(nonnull_ds)
# Verify transform_batch fails for null values.
with pytest.raises(ValueError):
null_encoder.transform_batch(null_df)
null_encoder.transform_batch(nonnull_df)
def test_label_encoder():
"""Tests basic LabelEncoder functionality."""
col_a = ["red", "green", "blue", "red"]
col_b = ["warm", "cold", "cold", "hot"]
col_c = [1, 2, 3, 4]
in_df = pd.DataFrame.from_dict({"A": col_a, "B": col_b, "C": col_c})
ds = ray.data.from_pandas(in_df)
encoder = LabelEncoder("A")
# Transform with unfitted preprocessor.
with pytest.raises(PreprocessorNotFittedException):
encoder.transform(ds)
# Fit data.
encoder.fit(ds)
assert encoder.stats_ == {"unique_values(A)": {"blue": 0, "green": 1, "red": 2}}
# Transform data.
transformed = encoder.transform(ds)
out_df = transformed.to_pandas()
processed_col_a = [2, 1, 0, 2]
processed_col_b = col_b
processed_col_c = col_c
expected_df = pd.DataFrame.from_dict(
{"A": processed_col_a, "B": processed_col_b, "C": processed_col_c}
)
assert out_df.equals(expected_df)
# Transform batch.
pred_col_a = ["blue", "red", "yellow"]
pred_col_b = ["cold", "unknown", None]
pred_col_c = [10, 20, None]
pred_in_df = pd.DataFrame.from_dict(
{"A": pred_col_a, "B": pred_col_b, "C": pred_col_c}
)
pred_out_df = encoder.transform_batch(pred_in_df)
pred_processed_col_a = [0, 2, None]
pred_processed_col_b = pred_col_b
pred_processed_col_c = pred_col_c
pred_expected_df = pd.DataFrame.from_dict(
{
"A": pred_processed_col_a,
"B": pred_processed_col_b,
"C": pred_processed_col_c,
}
)
assert pred_out_df.equals(pred_expected_df)
# Test null behavior.
null_col = [1, None]
nonnull_col = [1, 1]
null_df = pd.DataFrame.from_dict({"A": null_col})
null_ds = ray.data.from_pandas(null_df)
nonnull_df = pd.DataFrame.from_dict({"A": nonnull_col})
nonnull_ds = ray.data.from_pandas(nonnull_df)
null_encoder = LabelEncoder("A")
# Verify fit fails for null values.
with pytest.raises(ValueError):
null_encoder.fit(null_ds)
null_encoder.fit(nonnull_ds)
# Verify transform fails for null values.
with pytest.raises(ValueError):
null_encoder.transform(null_ds)
null_encoder.transform(nonnull_ds)
# Verify transform_batch fails for null values.
with pytest.raises(ValueError):
null_encoder.transform_batch(null_df)
null_encoder.transform_batch(nonnull_df)
def test_simple_imputer():
col_a = [1, 1, 1, np.nan]
col_b = [1, 3, None, np.nan]
col_c = [1, 1, 1, 1]
in_df = pd.DataFrame.from_dict({"A": col_a, "B": col_b, "C": col_c})
ds = ray.data.from_pandas(in_df)
imputer = SimpleImputer(["B", "C"])
# Transform with unfitted preprocessor.
with pytest.raises(PreprocessorNotFittedException):
imputer.transform(ds)
# Fit data.
imputer.fit(ds)
assert imputer.stats_ == {"mean(B)": 2.0, "mean(C)": 1.0}
# Transform data.
transformed = imputer.transform(ds)
out_df = transformed.to_pandas()
processed_col_a = col_a
processed_col_b = [1.0, 3.0, 2.0, 2.0]
processed_col_c = [1, 1, 1, 1]
expected_df = pd.DataFrame.from_dict(
{"A": processed_col_a, "B": processed_col_b, "C": processed_col_c}
)
assert out_df.equals(expected_df)
# Transform batch.
pred_col_a = [1, 2, np.nan]
pred_col_b = [1, 2, np.nan]
pred_col_c = [None, None, None]
pred_in_df = pd.DataFrame.from_dict(
{"A": pred_col_a, "B": pred_col_b, "C": pred_col_c}
)
pred_out_df = imputer.transform_batch(pred_in_df)
pred_processed_col_a = pred_col_a
pred_processed_col_b = [1.0, 2.0, 2.0]
pred_processed_col_c = [1.0, 1.0, 1.0]
pred_expected_df = pd.DataFrame.from_dict(
{
"A": pred_processed_col_a,
"B": pred_processed_col_b,
"C": pred_processed_col_c,
}
)
assert pred_out_df.equals(pred_expected_df)
# Test "most_frequent" strategy.
most_frequent_col_a = [1, 2, 2, None, None, None]
most_frequent_col_b = [None, "c", "c", "b", "b", "a"]
most_frequent_df = pd.DataFrame.from_dict(
{"A": most_frequent_col_a, "B": most_frequent_col_b}
)
most_frequent_ds = ray.data.from_pandas(most_frequent_df)
most_frequent_imputer = SimpleImputer(["A", "B"], strategy="most_frequent")
most_frequent_imputer.fit(most_frequent_ds)
assert most_frequent_imputer.stats_ == {
"most_frequent(A)": 2.0,
"most_frequent(B)": "b",
}
most_frequent_transformed = most_frequent_imputer.transform(most_frequent_ds)
most_frequent_out_df = most_frequent_transformed.to_pandas()
most_frequent_processed_col_a = [1.0, 2.0, 2.0, 2.0, 2.0, 2.0]
most_frequent_processed_col_b = ["b", "c", "c", "b", "b", "a"]
most_frequent_expected_df = pd.DataFrame.from_dict(
{"A": most_frequent_processed_col_a, "B": most_frequent_processed_col_b}
)
assert most_frequent_out_df.equals(most_frequent_expected_df)
# Test "constant" strategy.
constant_col_a = ["apple", None]
constant_df = pd.DataFrame.from_dict({"A": constant_col_a})
constant_ds = ray.data.from_pandas(constant_df)
with pytest.raises(ValueError):
SimpleImputer(["A"], strategy="constant")
constant_imputer = SimpleImputer(
["A", "B"], strategy="constant", fill_value="missing"
)
constant_transformed = constant_imputer.transform(constant_ds)
constant_out_df = constant_transformed.to_pandas()
constant_processed_col_a = ["apple", "missing"]
constant_expected_df = | pd.DataFrame.from_dict({"A": constant_processed_col_a}) | pandas.DataFrame.from_dict |
"""
Tests for DatetimeIndex timezone-related methods
"""
from datetime import date, datetime, time, timedelta, tzinfo
import dateutil
from dateutil.tz import gettz, tzlocal
import numpy as np
import pytest
import pytz
from pandas._libs.tslibs import conversion, timezones
import pandas.util._test_decorators as td
import pandas as pd
from pandas import (
DatetimeIndex,
Index,
Timestamp,
bdate_range,
date_range,
isna,
to_datetime,
)
import pandas._testing as tm
class FixedOffset(tzinfo):
"""Fixed offset in minutes east from UTC."""
def __init__(self, offset, name):
self.__offset = timedelta(minutes=offset)
self.__name = name
def utcoffset(self, dt):
return self.__offset
def tzname(self, dt):
return self.__name
def dst(self, dt):
return timedelta(0)
fixed_off = FixedOffset(-420, "-07:00")
fixed_off_no_name = FixedOffset(-330, None)
class TestDatetimeIndexTimezones:
# -------------------------------------------------------------
# DatetimeIndex.tz_convert
def test_tz_convert_nat(self):
# GH#5546
dates = [pd.NaT]
idx = DatetimeIndex(dates)
idx = idx.tz_localize("US/Pacific")
tm.assert_index_equal(idx, DatetimeIndex(dates, tz="US/Pacific"))
idx = idx.tz_convert("US/Eastern")
tm.assert_index_equal(idx, DatetimeIndex(dates, tz="US/Eastern"))
idx = idx.tz_convert("UTC")
tm.assert_index_equal(idx, DatetimeIndex(dates, tz="UTC"))
dates = ["2010-12-01 00:00", "2010-12-02 00:00", pd.NaT]
idx = DatetimeIndex(dates)
idx = idx.tz_localize("US/Pacific")
tm.assert_index_equal(idx, DatetimeIndex(dates, tz="US/Pacific"))
idx = idx.tz_convert("US/Eastern")
expected = ["2010-12-01 03:00", "2010-12-02 03:00", pd.NaT]
tm.assert_index_equal(idx, DatetimeIndex(expected, tz="US/Eastern"))
idx = idx + pd.offsets.Hour(5)
expected = ["2010-12-01 08:00", "2010-12-02 08:00", pd.NaT]
tm.assert_index_equal(idx, DatetimeIndex(expected, tz="US/Eastern"))
idx = idx.tz_convert("US/Pacific")
expected = ["2010-12-01 05:00", "2010-12-02 05:00", pd.NaT]
tm.assert_index_equal(idx, DatetimeIndex(expected, tz="US/Pacific"))
idx = idx + np.timedelta64(3, "h")
expected = ["2010-12-01 08:00", "2010-12-02 08:00", pd.NaT]
tm.assert_index_equal(idx, DatetimeIndex(expected, tz="US/Pacific"))
idx = idx.tz_convert("US/Eastern")
expected = ["2010-12-01 11:00", "2010-12-02 11:00", pd.NaT]
tm.assert_index_equal(idx, DatetimeIndex(expected, tz="US/Eastern"))
@pytest.mark.parametrize("prefix", ["", "dateutil/"])
def test_dti_tz_convert_compat_timestamp(self, prefix):
strdates = ["1/1/2012", "3/1/2012", "4/1/2012"]
idx = DatetimeIndex(strdates, tz=prefix + "US/Eastern")
conv = idx[0].tz_convert(prefix + "US/Pacific")
expected = idx.tz_convert(prefix + "US/Pacific")[0]
assert conv == expected
def test_dti_tz_convert_hour_overflow_dst(self):
# Regression test for:
# https://github.com/pandas-dev/pandas/issues/13306
# sorted case US/Eastern -> UTC
ts = ["2008-05-12 09:50:00", "2008-12-12 09:50:35", "2009-05-12 09:50:32"]
tt = DatetimeIndex(ts).tz_localize("US/Eastern")
ut = tt.tz_convert("UTC")
expected = Index([13, 14, 13])
tm.assert_index_equal(ut.hour, expected)
# sorted case UTC -> US/Eastern
ts = ["2008-05-12 13:50:00", "2008-12-12 14:50:35", "2009-05-12 13:50:32"]
tt = DatetimeIndex(ts).tz_localize("UTC")
ut = tt.tz_convert("US/Eastern")
expected = Index([9, 9, 9])
tm.assert_index_equal(ut.hour, expected)
# unsorted case US/Eastern -> UTC
ts = ["2008-05-12 09:50:00", "2008-12-12 09:50:35", "2008-05-12 09:50:32"]
tt = DatetimeIndex(ts).tz_localize("US/Eastern")
ut = tt.tz_convert("UTC")
expected = Index([13, 14, 13])
tm.assert_index_equal(ut.hour, expected)
# unsorted case UTC -> US/Eastern
ts = ["2008-05-12 13:50:00", "2008-12-12 14:50:35", "2008-05-12 13:50:32"]
tt = DatetimeIndex(ts).tz_localize("UTC")
ut = tt.tz_convert("US/Eastern")
expected = Index([9, 9, 9])
tm.assert_index_equal(ut.hour, expected)
@pytest.mark.parametrize("tz", ["US/Eastern", "dateutil/US/Eastern"])
def test_dti_tz_convert_hour_overflow_dst_timestamps(self, tz):
# Regression test for GH#13306
# sorted case US/Eastern -> UTC
ts = [
Timestamp("2008-05-12 09:50:00", tz=tz),
Timestamp("2008-12-12 09:50:35", tz=tz),
Timestamp("2009-05-12 09:50:32", tz=tz),
]
tt = DatetimeIndex(ts)
ut = tt.tz_convert("UTC")
expected = Index([13, 14, 13])
tm.assert_index_equal(ut.hour, expected)
# sorted case UTC -> US/Eastern
ts = [
Timestamp("2008-05-12 13:50:00", tz="UTC"),
Timestamp("2008-12-12 14:50:35", tz="UTC"),
Timestamp("2009-05-12 13:50:32", tz="UTC"),
]
tt = DatetimeIndex(ts)
ut = tt.tz_convert("US/Eastern")
expected = Index([9, 9, 9])
tm.assert_index_equal(ut.hour, expected)
# unsorted case US/Eastern -> UTC
ts = [
Timestamp("2008-05-12 09:50:00", tz=tz),
Timestamp("2008-12-12 09:50:35", tz=tz),
Timestamp("2008-05-12 09:50:32", tz=tz),
]
tt = DatetimeIndex(ts)
ut = tt.tz_convert("UTC")
expected = Index([13, 14, 13])
tm.assert_index_equal(ut.hour, expected)
# unsorted case UTC -> US/Eastern
ts = [
Timestamp("2008-05-12 13:50:00", tz="UTC"),
Timestamp("2008-12-12 14:50:35", tz="UTC"),
Timestamp("2008-05-12 13:50:32", tz="UTC"),
]
tt = DatetimeIndex(ts)
ut = tt.tz_convert("US/Eastern")
expected = Index([9, 9, 9])
tm.assert_index_equal(ut.hour, expected)
@pytest.mark.parametrize("freq, n", [("H", 1), ("T", 60), ("S", 3600)])
def test_dti_tz_convert_trans_pos_plus_1__bug(self, freq, n):
# Regression test for tslib.tz_convert(vals, tz1, tz2).
# See https://github.com/pandas-dev/pandas/issues/4496 for details.
idx = date_range(datetime(2011, 3, 26, 23), datetime(2011, 3, 27, 1), freq=freq)
idx = idx.tz_localize("UTC")
idx = idx.tz_convert("Europe/Moscow")
expected = np.repeat(np.array([3, 4, 5]), np.array([n, n, 1]))
tm.assert_index_equal(idx.hour, Index(expected))
def test_dti_tz_convert_dst(self):
for freq, n in [("H", 1), ("T", 60), ("S", 3600)]:
# Start DST
idx = date_range(
"2014-03-08 23:00", "2014-03-09 09:00", freq=freq, tz="UTC"
)
idx = idx.tz_convert("US/Eastern")
expected = np.repeat(
np.array([18, 19, 20, 21, 22, 23, 0, 1, 3, 4, 5]),
np.array([n, n, n, n, n, n, n, n, n, n, 1]),
)
tm.assert_index_equal(idx.hour, Index(expected))
idx = date_range(
"2014-03-08 18:00", "2014-03-09 05:00", freq=freq, tz="US/Eastern"
)
idx = idx.tz_convert("UTC")
expected = np.repeat(
np.array([23, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9]),
np.array([n, n, n, n, n, n, n, n, n, n, 1]),
)
tm.assert_index_equal(idx.hour, Index(expected))
# End DST
idx = date_range(
"2014-11-01 23:00", "2014-11-02 09:00", freq=freq, tz="UTC"
)
idx = idx.tz_convert("US/Eastern")
expected = np.repeat(
np.array([19, 20, 21, 22, 23, 0, 1, 1, 2, 3, 4]),
np.array([n, n, n, n, n, n, n, n, n, n, 1]),
)
tm.assert_index_equal(idx.hour, Index(expected))
idx = date_range(
"2014-11-01 18:00", "2014-11-02 05:00", freq=freq, tz="US/Eastern"
)
idx = idx.tz_convert("UTC")
expected = np.repeat(
np.array([22, 23, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]),
np.array([n, n, n, n, n, n, n, n, n, n, n, n, 1]),
)
tm.assert_index_equal(idx.hour, Index(expected))
# daily
# Start DST
idx = date_range("2014-03-08 00:00", "2014-03-09 00:00", freq="D", tz="UTC")
idx = idx.tz_convert("US/Eastern")
tm.assert_index_equal(idx.hour, Index([19, 19]))
idx = date_range(
"2014-03-08 00:00", "2014-03-09 00:00", freq="D", tz="US/Eastern"
)
idx = idx.tz_convert("UTC")
tm.assert_index_equal(idx.hour, Index([5, 5]))
# End DST
idx = date_range("2014-11-01 00:00", "2014-11-02 00:00", freq="D", tz="UTC")
idx = idx.tz_convert("US/Eastern")
tm.assert_index_equal(idx.hour, Index([20, 20]))
idx = date_range(
"2014-11-01 00:00", "2014-11-02 000:00", freq="D", tz="US/Eastern"
)
idx = idx.tz_convert("UTC")
tm.assert_index_equal(idx.hour, Index([4, 4]))
def test_tz_convert_roundtrip(self, tz_aware_fixture):
tz = tz_aware_fixture
idx1 = date_range(start="2014-01-01", end="2014-12-31", freq="M", tz="UTC")
exp1 = date_range(start="2014-01-01", end="2014-12-31", freq="M")
idx2 = date_range(start="2014-01-01", end="2014-12-31", freq="D", tz="UTC")
exp2 = date_range(start="2014-01-01", end="2014-12-31", freq="D")
idx3 = date_range(start="2014-01-01", end="2014-03-01", freq="H", tz="UTC")
exp3 = date_range(start="2014-01-01", end="2014-03-01", freq="H")
idx4 = date_range(start="2014-08-01", end="2014-10-31", freq="T", tz="UTC")
exp4 = date_range(start="2014-08-01", end="2014-10-31", freq="T")
for idx, expected in [(idx1, exp1), (idx2, exp2), (idx3, exp3), (idx4, exp4)]:
converted = idx.tz_convert(tz)
reset = converted.tz_convert(None)
tm.assert_index_equal(reset, expected)
assert reset.tzinfo is None
expected = converted.tz_convert("UTC").tz_localize(None)
expected = expected._with_freq("infer")
tm.assert_index_equal(reset, expected)
def test_dti_tz_convert_tzlocal(self):
# GH#13583
# tz_convert doesn't affect to internal
dti = date_range(start="2001-01-01", end="2001-03-01", tz="UTC")
dti2 = dti.tz_convert(dateutil.tz.tzlocal())
tm.assert_numpy_array_equal(dti2.asi8, dti.asi8)
dti = date_range(start="2001-01-01", end="2001-03-01", tz=dateutil.tz.tzlocal())
dti2 = dti.tz_convert(None)
tm.assert_numpy_array_equal(dti2.asi8, dti.asi8)
@pytest.mark.parametrize(
"tz",
[
"US/Eastern",
"dateutil/US/Eastern",
pytz.timezone("US/Eastern"),
gettz("US/Eastern"),
],
)
def test_dti_tz_convert_utc_to_local_no_modify(self, tz):
rng = date_range("3/11/2012", "3/12/2012", freq="H", tz="utc")
rng_eastern = rng.tz_convert(tz)
# Values are unmodified
tm.assert_numpy_array_equal(rng.asi8, rng_eastern.asi8)
assert timezones.tz_compare(rng_eastern.tz, timezones.maybe_get_tz(tz))
@pytest.mark.parametrize("tzstr", ["US/Eastern", "dateutil/US/Eastern"])
def test_tz_convert_unsorted(self, tzstr):
dr = date_range("2012-03-09", freq="H", periods=100, tz="utc")
dr = dr.tz_convert(tzstr)
result = dr[::-1].hour
exp = dr.hour[::-1]
tm.assert_almost_equal(result, exp)
# -------------------------------------------------------------
# DatetimeIndex.tz_localize
def test_dti_tz_localize_nonexistent_raise_coerce(self):
# GH#13057
times = ["2015-03-08 01:00", "2015-03-08 02:00", "2015-03-08 03:00"]
index = DatetimeIndex(times)
tz = "US/Eastern"
with pytest.raises(pytz.NonExistentTimeError, match="|".join(times)):
index.tz_localize(tz=tz)
with pytest.raises(pytz.NonExistentTimeError, match="|".join(times)):
index.tz_localize(tz=tz, nonexistent="raise")
result = index.tz_localize(tz=tz, nonexistent="NaT")
test_times = ["2015-03-08 01:00-05:00", "NaT", "2015-03-08 03:00-04:00"]
dti = to_datetime(test_times, utc=True)
expected = dti.tz_convert("US/Eastern")
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("tz", [pytz.timezone("US/Eastern"), gettz("US/Eastern")])
def test_dti_tz_localize_ambiguous_infer(self, tz):
# November 6, 2011, fall back, repeat 2 AM hour
# With no repeated hours, we cannot infer the transition
dr = date_range(datetime(2011, 11, 6, 0), periods=5, freq=pd.offsets.Hour())
with pytest.raises(pytz.AmbiguousTimeError, match="Cannot infer dst time"):
dr.tz_localize(tz)
# With repeated hours, we can infer the transition
dr = date_range(
datetime(2011, 11, 6, 0), periods=5, freq=pd.offsets.Hour(), tz=tz
)
times = [
"11/06/2011 00:00",
"11/06/2011 01:00",
"11/06/2011 01:00",
"11/06/2011 02:00",
"11/06/2011 03:00",
]
di = DatetimeIndex(times)
localized = di.tz_localize(tz, ambiguous="infer")
expected = dr._with_freq(None)
tm.assert_index_equal(expected, localized)
tm.assert_index_equal(expected, DatetimeIndex(times, tz=tz, ambiguous="infer"))
# When there is no dst transition, nothing special happens
dr = date_range(datetime(2011, 6, 1, 0), periods=10, freq=pd.offsets.Hour())
localized = dr.tz_localize(tz)
localized_infer = dr.tz_localize(tz, ambiguous="infer")
tm.assert_index_equal(localized, localized_infer)
@pytest.mark.parametrize("tz", [pytz.timezone("US/Eastern"), gettz("US/Eastern")])
def test_dti_tz_localize_ambiguous_times(self, tz):
# March 13, 2011, spring forward, skip from 2 AM to 3 AM
dr = date_range(datetime(2011, 3, 13, 1, 30), periods=3, freq=pd.offsets.Hour())
with pytest.raises(pytz.NonExistentTimeError, match="2011-03-13 02:30:00"):
dr.tz_localize(tz)
# after dst transition, it works
dr = date_range(
datetime(2011, 3, 13, 3, 30), periods=3, freq=pd.offsets.Hour(), tz=tz
)
# November 6, 2011, fall back, repeat 2 AM hour
dr = date_range(datetime(2011, 11, 6, 1, 30), periods=3, freq=pd.offsets.Hour())
with pytest.raises(pytz.AmbiguousTimeError, match="Cannot infer dst time"):
dr.tz_localize(tz)
# UTC is OK
dr = date_range(
datetime(2011, 3, 13), periods=48, freq=pd.offsets.Minute(30), tz=pytz.utc
)
@pytest.mark.parametrize("tzstr", ["US/Eastern", "dateutil/US/Eastern"])
def test_dti_tz_localize_pass_dates_to_utc(self, tzstr):
strdates = ["1/1/2012", "3/1/2012", "4/1/2012"]
idx = DatetimeIndex(strdates)
conv = idx.tz_localize(tzstr)
fromdates = DatetimeIndex(strdates, tz=tzstr)
assert conv.tz == fromdates.tz
tm.assert_numpy_array_equal(conv.values, fromdates.values)
@pytest.mark.parametrize("prefix", ["", "dateutil/"])
def test_dti_tz_localize(self, prefix):
tzstr = prefix + "US/Eastern"
dti = pd.date_range(start="1/1/2005", end="1/1/2005 0:00:30.256", freq="L")
dti2 = dti.tz_localize(tzstr)
dti_utc = pd.date_range(
start="1/1/2005 05:00", end="1/1/2005 5:00:30.256", freq="L", tz="utc"
)
tm.assert_numpy_array_equal(dti2.values, dti_utc.values)
dti3 = dti2.tz_convert(prefix + "US/Pacific")
tm.assert_numpy_array_equal(dti3.values, dti_utc.values)
dti = pd.date_range(start="11/6/2011 1:59", end="11/6/2011 2:00", freq="L")
with pytest.raises(pytz.AmbiguousTimeError, match="Cannot infer dst time"):
dti.tz_localize(tzstr)
dti = pd.date_range(start="3/13/2011 1:59", end="3/13/2011 2:00", freq="L")
with pytest.raises(pytz.NonExistentTimeError, match="2011-03-13 02:00:00"):
dti.tz_localize(tzstr)
@pytest.mark.parametrize(
"tz",
[
"US/Eastern",
"dateutil/US/Eastern",
pytz.timezone("US/Eastern"),
gettz("US/Eastern"),
],
)
def test_dti_tz_localize_utc_conversion(self, tz):
# Localizing to time zone should:
# 1) check for DST ambiguities
# 2) convert to UTC
rng = date_range("3/10/2012", "3/11/2012", freq="30T")
converted = rng.tz_localize(tz)
expected_naive = rng + pd.offsets.Hour(5)
tm.assert_numpy_array_equal(converted.asi8, expected_naive.asi8)
# DST ambiguity, this should fail
rng = date_range("3/11/2012", "3/12/2012", freq="30T")
# Is this really how it should fail??
with pytest.raises(pytz.NonExistentTimeError, match="2012-03-11 02:00:00"):
rng.tz_localize(tz)
def test_dti_tz_localize_roundtrip(self, tz_aware_fixture):
# note: this tz tests that a tz-naive index can be localized
# and de-localized successfully, when there are no DST transitions
# in the range.
idx = date_range(start="2014-06-01", end="2014-08-30", freq="15T")
tz = tz_aware_fixture
localized = idx.tz_localize(tz)
# cant localize a tz-aware object
with pytest.raises(
TypeError, match="Already tz-aware, use tz_convert to convert"
):
localized.tz_localize(tz)
reset = localized.tz_localize(None)
assert reset.tzinfo is None
expected = idx._with_freq(None)
tm.assert_index_equal(reset, expected)
def test_dti_tz_localize_naive(self):
rng = date_range("1/1/2011", periods=100, freq="H")
conv = rng.tz_localize("US/Pacific")
exp = date_range("1/1/2011", periods=100, freq="H", tz="US/Pacific")
tm.assert_index_equal(conv, exp._with_freq(None))
def test_dti_tz_localize_tzlocal(self):
# GH#13583
offset = dateutil.tz.tzlocal().utcoffset(datetime(2011, 1, 1))
offset = int(offset.total_seconds() * 1000000000)
dti = date_range(start="2001-01-01", end="2001-03-01")
dti2 = dti.tz_localize(dateutil.tz.tzlocal())
tm.assert_numpy_array_equal(dti2.asi8 + offset, dti.asi8)
dti = date_range(start="2001-01-01", end="2001-03-01", tz=dateutil.tz.tzlocal())
dti2 = dti.tz_localize(None)
tm.assert_numpy_array_equal(dti2.asi8 - offset, dti.asi8)
@pytest.mark.parametrize("tz", [pytz.timezone("US/Eastern"), gettz("US/Eastern")])
def test_dti_tz_localize_ambiguous_nat(self, tz):
times = [
"11/06/2011 00:00",
"11/06/2011 01:00",
"11/06/2011 01:00",
"11/06/2011 02:00",
"11/06/2011 03:00",
]
di = DatetimeIndex(times)
localized = di.tz_localize(tz, ambiguous="NaT")
times = [
"11/06/2011 00:00",
np.NaN,
np.NaN,
"11/06/2011 02:00",
"11/06/2011 03:00",
]
di_test = DatetimeIndex(times, tz="US/Eastern")
# left dtype is datetime64[ns, US/Eastern]
# right is datetime64[ns, tzfile('/usr/share/zoneinfo/US/Eastern')]
tm.assert_numpy_array_equal(di_test.values, localized.values)
@pytest.mark.parametrize("tz", [pytz.timezone("US/Eastern"), gettz("US/Eastern")])
def test_dti_tz_localize_ambiguous_flags(self, tz):
# November 6, 2011, fall back, repeat 2 AM hour
# Pass in flags to determine right dst transition
dr = date_range(
datetime(2011, 11, 6, 0), periods=5, freq=pd.offsets.Hour(), tz=tz
)
times = [
"11/06/2011 00:00",
"11/06/2011 01:00",
"11/06/2011 01:00",
"11/06/2011 02:00",
"11/06/2011 03:00",
]
# Test tz_localize
di = DatetimeIndex(times)
is_dst = [1, 1, 0, 0, 0]
localized = di.tz_localize(tz, ambiguous=is_dst)
expected = dr._with_freq(None)
tm.assert_index_equal(expected, localized)
tm.assert_index_equal(expected, DatetimeIndex(times, tz=tz, ambiguous=is_dst))
localized = di.tz_localize(tz, ambiguous=np.array(is_dst))
tm.assert_index_equal(dr, localized)
localized = di.tz_localize(tz, ambiguous=np.array(is_dst).astype("bool"))
tm.assert_index_equal(dr, localized)
# Test constructor
localized = DatetimeIndex(times, tz=tz, ambiguous=is_dst)
tm.assert_index_equal(dr, localized)
# Test duplicate times where inferring the dst fails
times += times
di = DatetimeIndex(times)
# When the sizes are incompatible, make sure error is raised
msg = "Length of ambiguous bool-array must be the same size as vals"
with pytest.raises(Exception, match=msg):
di.tz_localize(tz, ambiguous=is_dst)
# When sizes are compatible and there are repeats ('infer' won't work)
is_dst = np.hstack((is_dst, is_dst))
localized = di.tz_localize(tz, ambiguous=is_dst)
dr = dr.append(dr)
tm.assert_index_equal(dr, localized)
# When there is no dst transition, nothing special happens
dr = date_range(datetime(2011, 6, 1, 0), periods=10, freq=pd.offsets.Hour())
is_dst = np.array([1] * 10)
localized = dr.tz_localize(tz)
localized_is_dst = dr.tz_localize(tz, ambiguous=is_dst)
tm.assert_index_equal(localized, localized_is_dst)
# TODO: belongs outside tz_localize tests?
@pytest.mark.parametrize("tz", ["Europe/London", "dateutil/Europe/London"])
def test_dti_construction_ambiguous_endpoint(self, tz):
# construction with an ambiguous end-point
# GH#11626
with pytest.raises(pytz.AmbiguousTimeError, match="Cannot infer dst time"):
date_range(
"2013-10-26 23:00", "2013-10-27 01:00", tz="Europe/London", freq="H"
)
times = date_range(
"2013-10-26 23:00", "2013-10-27 01:00", freq="H", tz=tz, ambiguous="infer"
)
assert times[0] == Timestamp("2013-10-26 23:00", tz=tz, freq="H")
assert times[-1] == Timestamp("2013-10-27 01:00:00+0000", tz=tz, freq="H")
@pytest.mark.parametrize(
"tz, option, expected",
[
["US/Pacific", "shift_forward", "2019-03-10 03:00"],
["dateutil/US/Pacific", "shift_forward", "2019-03-10 03:00"],
["US/Pacific", "shift_backward", "2019-03-10 01:00"],
["dateutil/US/Pacific", "shift_backward", "2019-03-10 01:00"],
["US/Pacific", timedelta(hours=1), "2019-03-10 03:00"],
],
)
def test_dti_construction_nonexistent_endpoint(self, tz, option, expected):
# construction with an nonexistent end-point
with pytest.raises(pytz.NonExistentTimeError, match="2019-03-10 02:00:00"):
date_range(
"2019-03-10 00:00", "2019-03-10 02:00", tz="US/Pacific", freq="H"
)
times = date_range(
"2019-03-10 00:00", "2019-03-10 02:00", freq="H", tz=tz, nonexistent=option
)
assert times[-1] == Timestamp(expected, tz=tz, freq="H")
def test_dti_tz_localize_bdate_range(self):
dr = pd.bdate_range("1/1/2009", "1/1/2010")
dr_utc = pd.bdate_range("1/1/2009", "1/1/2010", tz=pytz.utc)
localized = dr.tz_localize(pytz.utc)
tm.assert_index_equal(dr_utc, localized)
@pytest.mark.parametrize("tz", ["Europe/Warsaw", "dateutil/Europe/Warsaw"])
@pytest.mark.parametrize(
"method, exp", [["NaT", pd.NaT], ["raise", None], ["foo", "invalid"]]
)
def test_dti_tz_localize_nonexistent(self, tz, method, exp):
# GH 8917
n = 60
dti = date_range(start="2015-03-29 02:00:00", periods=n, freq="min")
if method == "raise":
with pytest.raises(pytz.NonExistentTimeError, match="2015-03-29 02:00:00"):
dti.tz_localize(tz, nonexistent=method)
elif exp == "invalid":
msg = (
"The nonexistent argument must be one of "
"'raise', 'NaT', 'shift_forward', 'shift_backward' "
"or a timedelta object"
)
with pytest.raises(ValueError, match=msg):
dti.tz_localize(tz, nonexistent=method)
else:
result = dti.tz_localize(tz, nonexistent=method)
expected = DatetimeIndex([exp] * n, tz=tz)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize(
"start_ts, tz, end_ts, shift",
[
["2015-03-29 02:20:00", "Europe/Warsaw", "2015-03-29 03:00:00", "forward"],
[
"2015-03-29 02:20:00",
"Europe/Warsaw",
"2015-03-29 01:59:59.999999999",
"backward",
],
[
"2015-03-29 02:20:00",
"Europe/Warsaw",
"2015-03-29 03:20:00",
timedelta(hours=1),
],
[
"2015-03-29 02:20:00",
"Europe/Warsaw",
"2015-03-29 01:20:00",
timedelta(hours=-1),
],
["2018-03-11 02:33:00", "US/Pacific", "2018-03-11 03:00:00", "forward"],
[
"2018-03-11 02:33:00",
"US/Pacific",
"2018-03-11 01:59:59.999999999",
"backward",
],
[
"2018-03-11 02:33:00",
"US/Pacific",
"2018-03-11 03:33:00",
timedelta(hours=1),
],
[
"2018-03-11 02:33:00",
"US/Pacific",
"2018-03-11 01:33:00",
timedelta(hours=-1),
],
],
)
@pytest.mark.parametrize("tz_type", ["", "dateutil/"])
def test_dti_tz_localize_nonexistent_shift(
self, start_ts, tz, end_ts, shift, tz_type
):
# GH 8917
tz = tz_type + tz
if isinstance(shift, str):
shift = "shift_" + shift
dti = DatetimeIndex([Timestamp(start_ts)])
result = dti.tz_localize(tz, nonexistent=shift)
expected = DatetimeIndex([Timestamp(end_ts)]).tz_localize(tz)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("offset", [-1, 1])
@pytest.mark.parametrize("tz_type", ["", "dateutil/"])
def test_dti_tz_localize_nonexistent_shift_invalid(self, offset, tz_type):
# GH 8917
tz = tz_type + "Europe/Warsaw"
dti = DatetimeIndex([Timestamp("2015-03-29 02:20:00")])
msg = "The provided timedelta will relocalize on a nonexistent time"
with pytest.raises(ValueError, match=msg):
dti.tz_localize(tz, nonexistent=timedelta(seconds=offset))
# -------------------------------------------------------------
# DatetimeIndex.normalize
def test_normalize_tz(self):
rng = date_range("1/1/2000 9:30", periods=10, freq="D", tz="US/Eastern")
result = rng.normalize() # does not preserve freq
expected = date_range("1/1/2000", periods=10, freq="D", tz="US/Eastern")
tm.assert_index_equal(result, expected._with_freq(None))
assert result.is_normalized
assert not rng.is_normalized
rng = | date_range("1/1/2000 9:30", periods=10, freq="D", tz="UTC") | pandas.date_range |
from uff.ic.mell.sentimentembedding.modelos.modelo import Modelo
from uff.ic.mell.sentimentembedding.vocabularios.vocabulario import Vocabulario
import importlib
twokenize = importlib.import_module("uff.ic.mell.sentimentembedding.utils.ark-twokenize-py.twokenize")
import pandas as pd
import torch
from enum import Enum
import numpy as np
class ModeloEstatico(Modelo):
# utiliza a biblioteca ,
# separa a sentenca em espacos - SPACE
TOKENIZADOR = Enum("TOKENIZADOR", "TWOKENIZER SPACE")
def __init__(self, name:str, vocabulario:Vocabulario, defaultToken:str, tokenizador:TOKENIZADOR):
super().__init__(name)
self.vocabulario = vocabulario
self.defaultToken = defaultToken
self.tokenizador = tokenizador
def embTexts(self, dataSeries:pd.Series, **kwagars) -> pd.DataFrame:
retorno = []
for i, sentence in enumerate(dataSeries):
embed = self.getWordsEmbeddings(sentence, self.defaultToken)
#print(embed)
avg_emb = self.avgEmbeddings(embed)
retorno.append(avg_emb)
return | pd.DataFrame(retorno) | pandas.DataFrame |
"""
Tests for CBMonthEnd CBMonthBegin, SemiMonthEnd, and SemiMonthBegin in offsets
"""
from datetime import (
date,
datetime,
)
import numpy as np
import pytest
from pandas._libs.tslibs import Timestamp
from pandas._libs.tslibs.offsets import (
CBMonthBegin,
CBMonthEnd,
CDay,
SemiMonthBegin,
SemiMonthEnd,
)
from pandas import (
DatetimeIndex,
Series,
_testing as tm,
date_range,
)
from pandas.tests.tseries.offsets.common import (
Base,
assert_is_on_offset,
assert_offset_equal,
)
from pandas.tests.tseries.offsets.test_offsets import _ApplyCases
from pandas.tseries import offsets as offsets
from pandas.tseries.holiday import USFederalHolidayCalendar
class CustomBusinessMonthBase:
def setup_method(self, method):
self.d = datetime(2008, 1, 1)
self.offset = self._offset()
self.offset1 = self.offset
self.offset2 = self._offset(2)
def test_eq(self):
assert self.offset2 == self.offset2
def test_mul(self):
pass
def test_hash(self):
assert hash(self.offset2) == hash(self.offset2)
def test_roundtrip_pickle(self):
def _check_roundtrip(obj):
unpickled = tm.round_trip_pickle(obj)
assert unpickled == obj
_check_roundtrip(self._offset())
_check_roundtrip(self._offset(2))
_check_roundtrip(self._offset() * 2)
def test_copy(self):
# GH 17452
off = self._offset(weekmask="Mon Wed Fri")
assert off == off.copy()
class TestCustomBusinessMonthEnd(CustomBusinessMonthBase, Base):
_offset = CBMonthEnd
def test_different_normalize_equals(self):
# GH#21404 changed __eq__ to return False when `normalize` does not match
offset = self._offset()
offset2 = self._offset(normalize=True)
assert offset != offset2
def test_repr(self):
assert repr(self.offset) == "<CustomBusinessMonthEnd>"
assert repr(self.offset2) == "<2 * CustomBusinessMonthEnds>"
def test_call(self):
with tm.assert_produces_warning(FutureWarning):
# GH#34171 DateOffset.__call__ is deprecated
assert self.offset2(self.d) == datetime(2008, 2, 29)
def testRollback1(self):
assert CDay(10).rollback(datetime(2007, 12, 31)) == datetime(2007, 12, 31)
def testRollback2(self):
assert CBMonthEnd(10).rollback(self.d) == datetime(2007, 12, 31)
def testRollforward1(self):
assert CBMonthEnd(10).rollforward(self.d) == datetime(2008, 1, 31)
def test_roll_date_object(self):
offset = CBMonthEnd()
dt = date(2012, 9, 15)
result = offset.rollback(dt)
assert result == datetime(2012, 8, 31)
result = offset.rollforward(dt)
assert result == datetime(2012, 9, 28)
offset = offsets.Day()
result = offset.rollback(dt)
assert result == datetime(2012, 9, 15)
result = offset.rollforward(dt)
assert result == datetime(2012, 9, 15)
on_offset_cases = [
(CBMonthEnd(), datetime(2008, 1, 31), True),
(CBMonthEnd(), datetime(2008, 1, 1), False),
]
@pytest.mark.parametrize("case", on_offset_cases)
def test_is_on_offset(self, case):
offset, d, expected = case
assert_is_on_offset(offset, d, expected)
apply_cases: _ApplyCases = [
(
CBMonthEnd(),
{
datetime(2008, 1, 1): datetime(2008, 1, 31),
datetime(2008, 2, 7): datetime(2008, 2, 29),
},
),
(
2 * CBMonthEnd(),
{
datetime(2008, 1, 1): datetime(2008, 2, 29),
datetime(2008, 2, 7): datetime(2008, 3, 31),
},
),
(
-CBMonthEnd(),
{
datetime(2008, 1, 1): datetime(2007, 12, 31),
datetime(2008, 2, 8): datetime(2008, 1, 31),
},
),
(
-2 * CBMonthEnd(),
{
datetime(2008, 1, 1): datetime(2007, 11, 30),
datetime(2008, 2, 9): datetime(2007, 12, 31),
},
),
(
CBMonthEnd(0),
{
datetime(2008, 1, 1): datetime(2008, 1, 31),
datetime(2008, 2, 7): datetime(2008, 2, 29),
},
),
]
@pytest.mark.parametrize("case", apply_cases)
def test_apply(self, case):
offset, cases = case
for base, expected in cases.items():
assert_offset_equal(offset, base, expected)
def test_apply_large_n(self):
dt = datetime(2012, 10, 23)
result = dt + CBMonthEnd(10)
assert result == datetime(2013, 7, 31)
result = dt + CDay(100) - CDay(100)
assert result == dt
off = CBMonthEnd() * 6
rs = datetime(2012, 1, 1) - off
xp = datetime(2011, 7, 29)
assert rs == xp
st = datetime(2011, 12, 18)
rs = st + off
xp = datetime(2012, 5, 31)
assert rs == xp
def test_holidays(self):
# Define a TradingDay offset
holidays = ["2012-01-31", datetime(2012, 2, 28), np.datetime64("2012-02-29")]
bm_offset = CBMonthEnd(holidays=holidays)
dt = datetime(2012, 1, 1)
assert dt + bm_offset == datetime(2012, 1, 30)
assert dt + 2 * bm_offset == datetime(2012, 2, 27)
@pytest.mark.filterwarnings("ignore:Non:pandas.errors.PerformanceWarning")
def test_datetimeindex(self):
from pandas.tseries.holiday import USFederalHolidayCalendar
hcal = USFederalHolidayCalendar()
freq = CBMonthEnd(calendar=hcal)
assert date_range(start="20120101", end="20130101", freq=freq).tolist()[
0
] == datetime(2012, 1, 31)
class TestCustomBusinessMonthBegin(CustomBusinessMonthBase, Base):
_offset = CBMonthBegin
def test_different_normalize_equals(self):
# GH#21404 changed __eq__ to return False when `normalize` does not match
offset = self._offset()
offset2 = self._offset(normalize=True)
assert offset != offset2
def test_repr(self):
assert repr(self.offset) == "<CustomBusinessMonthBegin>"
assert repr(self.offset2) == "<2 * CustomBusinessMonthBegins>"
def test_call(self):
with tm.assert_produces_warning(FutureWarning):
# GH#34171 DateOffset.__call__ is deprecated
assert self.offset2(self.d) == datetime(2008, 3, 3)
def testRollback1(self):
assert CDay(10).rollback(datetime(2007, 12, 31)) == datetime(2007, 12, 31)
def testRollback2(self):
assert CBMonthBegin(10).rollback(self.d) == datetime(2008, 1, 1)
def testRollforward1(self):
assert CBMonthBegin(10).rollforward(self.d) == datetime(2008, 1, 1)
def test_roll_date_object(self):
offset = CBMonthBegin()
dt = date(2012, 9, 15)
result = offset.rollback(dt)
assert result == datetime(2012, 9, 3)
result = offset.rollforward(dt)
assert result == datetime(2012, 10, 1)
offset = offsets.Day()
result = offset.rollback(dt)
assert result == datetime(2012, 9, 15)
result = offset.rollforward(dt)
assert result == datetime(2012, 9, 15)
on_offset_cases = [
(CBMonthBegin(), datetime(2008, 1, 1), True),
(CBMonthBegin(), datetime(2008, 1, 31), False),
]
@pytest.mark.parametrize("case", on_offset_cases)
def test_is_on_offset(self, case):
offset, dt, expected = case
assert_is_on_offset(offset, dt, expected)
apply_cases: _ApplyCases = [
(
CBMonthBegin(),
{
datetime(2008, 1, 1): datetime(2008, 2, 1),
datetime(2008, 2, 7): datetime(2008, 3, 3),
},
),
(
2 * CBMonthBegin(),
{
datetime(2008, 1, 1): datetime(2008, 3, 3),
datetime(2008, 2, 7): datetime(2008, 4, 1),
},
),
(
-CBMonthBegin(),
{
datetime(2008, 1, 1): datetime(2007, 12, 3),
datetime(2008, 2, 8): datetime(2008, 2, 1),
},
),
(
-2 * CBMonthBegin(),
{
datetime(2008, 1, 1): datetime(2007, 11, 1),
datetime(2008, 2, 9): datetime(2008, 1, 1),
},
),
(
CBMonthBegin(0),
{
datetime(2008, 1, 1): datetime(2008, 1, 1),
datetime(2008, 1, 7): datetime(2008, 2, 1),
},
),
]
@pytest.mark.parametrize("case", apply_cases)
def test_apply(self, case):
offset, cases = case
for base, expected in cases.items():
assert_offset_equal(offset, base, expected)
def test_apply_large_n(self):
dt = datetime(2012, 10, 23)
result = dt + CBMonthBegin(10)
assert result == datetime(2013, 8, 1)
result = dt + CDay(100) - CDay(100)
assert result == dt
off = CBMonthBegin() * 6
rs = datetime(2012, 1, 1) - off
xp = datetime(2011, 7, 1)
assert rs == xp
st = datetime(2011, 12, 18)
rs = st + off
xp = datetime(2012, 6, 1)
assert rs == xp
def test_holidays(self):
# Define a TradingDay offset
holidays = ["2012-02-01", datetime(2012, 2, 2), np.datetime64("2012-03-01")]
bm_offset = CBMonthBegin(holidays=holidays)
dt = datetime(2012, 1, 1)
assert dt + bm_offset == datetime(2012, 1, 2)
assert dt + 2 * bm_offset == datetime(2012, 2, 3)
@pytest.mark.filterwarnings("ignore:Non:pandas.errors.PerformanceWarning")
def test_datetimeindex(self):
hcal = USFederalHolidayCalendar()
cbmb = CBMonthBegin(calendar=hcal)
assert date_range(start="20120101", end="20130101", freq=cbmb).tolist()[
0
] == datetime(2012, 1, 3)
class TestSemiMonthEnd(Base):
_offset = SemiMonthEnd
offset1 = _offset()
offset2 = _offset(2)
def test_offset_whole_year(self):
dates = (
datetime(2007, 12, 31),
datetime(2008, 1, 15),
datetime(2008, 1, 31),
datetime(2008, 2, 15),
datetime(2008, 2, 29),
datetime(2008, 3, 15),
datetime(2008, 3, 31),
datetime(2008, 4, 15),
datetime(2008, 4, 30),
datetime(2008, 5, 15),
datetime(2008, 5, 31),
datetime(2008, 6, 15),
datetime(2008, 6, 30),
datetime(2008, 7, 15),
datetime(2008, 7, 31),
datetime(2008, 8, 15),
datetime(2008, 8, 31),
datetime(2008, 9, 15),
datetime(2008, 9, 30),
datetime(2008, 10, 15),
datetime(2008, 10, 31),
datetime(2008, 11, 15),
datetime(2008, 11, 30),
datetime(2008, 12, 15),
datetime(2008, 12, 31),
)
for base, exp_date in zip(dates[:-1], dates[1:]):
assert_offset_equal(SemiMonthEnd(), base, exp_date)
# ensure .apply_index works as expected
s = DatetimeIndex(dates[:-1])
with tm.assert_produces_warning(None):
# GH#22535 check that we don't get a FutureWarning from adding
# an integer array to PeriodIndex
result = SemiMonthEnd() + s
exp = DatetimeIndex(dates[1:])
tm.assert_index_equal(result, exp)
# ensure generating a range with DatetimeIndex gives same result
result = date_range(start=dates[0], end=dates[-1], freq="SM")
exp = DatetimeIndex(dates, freq="SM")
tm.assert_index_equal(result, exp)
offset_cases = []
offset_cases.append(
(
SemiMonthEnd(),
{
datetime(2008, 1, 1): datetime(2008, 1, 15),
datetime(2008, 1, 15): datetime(2008, 1, 31),
datetime(2008, 1, 31): datetime(2008, 2, 15),
datetime(2006, 12, 14): datetime(2006, 12, 15),
datetime(2006, 12, 29): datetime(2006, 12, 31),
datetime(2006, 12, 31): datetime(2007, 1, 15),
datetime(2007, 1, 1): datetime(2007, 1, 15),
datetime(2006, 12, 1): datetime(2006, 12, 15),
datetime(2006, 12, 15): datetime(2006, 12, 31),
},
)
)
offset_cases.append(
(
SemiMonthEnd(day_of_month=20),
{
datetime(2008, 1, 1): datetime(2008, 1, 20),
datetime(2008, 1, 15): datetime(2008, 1, 20),
datetime(2008, 1, 21): datetime(2008, 1, 31),
datetime(2008, 1, 31): datetime(2008, 2, 20),
datetime(2006, 12, 14): datetime(2006, 12, 20),
datetime(2006, 12, 29): datetime(2006, 12, 31),
datetime(2006, 12, 31): datetime(2007, 1, 20),
datetime(2007, 1, 1): datetime(2007, 1, 20),
datetime(2006, 12, 1): datetime(2006, 12, 20),
datetime(2006, 12, 15): datetime(2006, 12, 20),
},
)
)
offset_cases.append(
(
SemiMonthEnd(0),
{
datetime(2008, 1, 1): datetime(2008, 1, 15),
datetime(2008, 1, 16): datetime(2008, 1, 31),
datetime(2008, 1, 15): datetime(2008, 1, 15),
datetime(2008, 1, 31): datetime(2008, 1, 31),
datetime(2006, 12, 29): datetime(2006, 12, 31),
datetime(2006, 12, 31): datetime(2006, 12, 31),
datetime(2007, 1, 1): datetime(2007, 1, 15),
},
)
)
offset_cases.append(
(
SemiMonthEnd(0, day_of_month=16),
{
datetime(2008, 1, 1): datetime(2008, 1, 16),
datetime(2008, 1, 16): datetime(2008, 1, 16),
datetime(2008, 1, 15): datetime(2008, 1, 16),
datetime(2008, 1, 31): datetime(2008, 1, 31),
datetime(2006, 12, 29): datetime(2006, 12, 31),
datetime(2006, 12, 31): datetime(2006, 12, 31),
datetime(2007, 1, 1): datetime(2007, 1, 16),
},
)
)
offset_cases.append(
(
SemiMonthEnd(2),
{
datetime(2008, 1, 1): datetime(2008, 1, 31),
datetime(2008, 1, 31): datetime(2008, 2, 29),
datetime(2006, 12, 29): datetime(2007, 1, 15),
datetime(2006, 12, 31): datetime(2007, 1, 31),
datetime(2007, 1, 1): datetime(2007, 1, 31),
datetime(2007, 1, 16): datetime(2007, 2, 15),
datetime(2006, 11, 1): datetime(2006, 11, 30),
},
)
)
offset_cases.append(
(
SemiMonthEnd(-1),
{
datetime(2007, 1, 1): datetime(2006, 12, 31),
datetime(2008, 6, 30): datetime(2008, 6, 15),
datetime(2008, 12, 31): datetime(2008, 12, 15),
datetime(2006, 12, 29): datetime(2006, 12, 15),
datetime(2006, 12, 30): datetime(2006, 12, 15),
datetime(2007, 1, 1): datetime(2006, 12, 31),
},
)
)
offset_cases.append(
(
SemiMonthEnd(-1, day_of_month=4),
{
datetime(2007, 1, 1): datetime(2006, 12, 31),
datetime(2007, 1, 4): datetime(2006, 12, 31),
datetime(2008, 6, 30): datetime(2008, 6, 4),
datetime(2008, 12, 31): datetime(2008, 12, 4),
datetime(2006, 12, 5): datetime(2006, 12, 4),
datetime(2006, 12, 30): datetime(2006, 12, 4),
datetime(2007, 1, 1): datetime(2006, 12, 31),
},
)
)
offset_cases.append(
(
SemiMonthEnd(-2),
{
datetime(2007, 1, 1): datetime(2006, 12, 15),
datetime(2008, 6, 30): datetime(2008, 5, 31),
datetime(2008, 3, 15): datetime(2008, 2, 15),
datetime(2008, 12, 31): datetime(2008, 11, 30),
datetime(2006, 12, 29): datetime(2006, 11, 30),
datetime(2006, 12, 14): datetime(2006, 11, 15),
datetime(2007, 1, 1): datetime(2006, 12, 15),
},
)
)
@pytest.mark.parametrize("case", offset_cases)
def test_offset(self, case):
offset, cases = case
for base, expected in cases.items():
assert_offset_equal(offset, base, expected)
@pytest.mark.parametrize("case", offset_cases)
def test_apply_index(self, case):
# https://github.com/pandas-dev/pandas/issues/34580
offset, cases = case
s = DatetimeIndex(cases.keys())
exp = DatetimeIndex(cases.values())
with tm.assert_produces_warning(None):
# GH#22535 check that we don't get a FutureWarning from adding
# an integer array to PeriodIndex
result = offset + s
tm.assert_index_equal(result, exp)
with tm.assert_produces_warning(FutureWarning):
result = offset.apply_index(s)
tm.assert_index_equal(result, exp)
on_offset_cases = [
(datetime(2007, 12, 31), True),
(datetime(2007, 12, 15), True),
(datetime(2007, 12, 14), False),
(datetime(2007, 12, 1), False),
(datetime(2008, 2, 29), True),
]
@pytest.mark.parametrize("case", on_offset_cases)
def test_is_on_offset(self, case):
dt, expected = case
assert_is_on_offset(SemiMonthEnd(), dt, expected)
@pytest.mark.parametrize("klass", [Series, DatetimeIndex])
def test_vectorized_offset_addition(self, klass):
s = klass(
[
Timestamp("2000-01-15 00:15:00", tz="US/Central"),
Timestamp("2000-02-15", tz="US/Central"),
],
name="a",
)
with tm.assert_produces_warning(None):
# GH#22535 check that we don't get a FutureWarning from adding
# an integer array to PeriodIndex
result = s + SemiMonthEnd()
result2 = SemiMonthEnd() + s
exp = klass(
[
Timestamp("2000-01-31 00:15:00", tz="US/Central"),
Timestamp("2000-02-29", tz="US/Central"),
],
name="a",
)
tm.assert_equal(result, exp)
tm.assert_equal(result2, exp)
s = klass(
[
Timestamp("2000-01-01 00:15:00", tz="US/Central"),
Timestamp("2000-02-01", tz="US/Central"),
],
name="a",
)
with tm.assert_produces_warning(None):
# GH#22535 check that we don't get a FutureWarning from adding
# an integer array to PeriodIndex
result = s + SemiMonthEnd()
result2 = SemiMonthEnd() + s
exp = klass(
[
Timestamp("2000-01-15 00:15:00", tz="US/Central"),
Timestamp("2000-02-15", tz="US/Central"),
],
name="a",
)
tm.assert_equal(result, exp)
tm.assert_equal(result2, exp)
class TestSemiMonthBegin(Base):
_offset = SemiMonthBegin
offset1 = _offset()
offset2 = _offset(2)
def test_offset_whole_year(self):
dates = (
datetime(2007, 12, 15),
datetime(2008, 1, 1),
datetime(2008, 1, 15),
datetime(2008, 2, 1),
datetime(2008, 2, 15),
datetime(2008, 3, 1),
datetime(2008, 3, 15),
datetime(2008, 4, 1),
datetime(2008, 4, 15),
datetime(2008, 5, 1),
datetime(2008, 5, 15),
datetime(2008, 6, 1),
datetime(2008, 6, 15),
datetime(2008, 7, 1),
datetime(2008, 7, 15),
datetime(2008, 8, 1),
datetime(2008, 8, 15),
datetime(2008, 9, 1),
datetime(2008, 9, 15),
datetime(2008, 10, 1),
datetime(2008, 10, 15),
datetime(2008, 11, 1),
datetime(2008, 11, 15),
datetime(2008, 12, 1),
datetime(2008, 12, 15),
)
for base, exp_date in zip(dates[:-1], dates[1:]):
assert_offset_equal(SemiMonthBegin(), base, exp_date)
# ensure .apply_index works as expected
s = DatetimeIndex(dates[:-1])
with tm.assert_produces_warning(None):
# GH#22535 check that we don't get a FutureWarning from adding
# an integer array to PeriodIndex
result = SemiMonthBegin() + s
exp = DatetimeIndex(dates[1:])
tm.assert_index_equal(result, exp)
# ensure generating a range with DatetimeIndex gives same result
result = date_range(start=dates[0], end=dates[-1], freq="SMS")
exp = DatetimeIndex(dates, freq="SMS")
tm.assert_index_equal(result, exp)
offset_cases = [
(
SemiMonthBegin(),
{
datetime(2008, 1, 1): datetime(2008, 1, 15),
datetime(2008, 1, 15): datetime(2008, 2, 1),
datetime(2008, 1, 31): datetime(2008, 2, 1),
datetime(2006, 12, 14): datetime(2006, 12, 15),
datetime(2006, 12, 29): datetime(2007, 1, 1),
datetime(2006, 12, 31): datetime(2007, 1, 1),
datetime(2007, 1, 1): datetime(2007, 1, 15),
datetime(2006, 12, 1): datetime(2006, 12, 15),
datetime(2006, 12, 15): datetime(2007, 1, 1),
},
),
(
SemiMonthBegin(day_of_month=20),
{
datetime(2008, 1, 1): datetime(2008, 1, 20),
datetime(2008, 1, 15): datetime(2008, 1, 20),
datetime(2008, 1, 21): datetime(2008, 2, 1),
datetime(2008, 1, 31): datetime(2008, 2, 1),
datetime(2006, 12, 14): datetime(2006, 12, 20),
datetime(2006, 12, 29): datetime(2007, 1, 1),
datetime(2006, 12, 31): datetime(2007, 1, 1),
datetime(2007, 1, 1): datetime(2007, 1, 20),
datetime(2006, 12, 1): datetime(2006, 12, 20),
datetime(2006, 12, 15): datetime(2006, 12, 20),
},
),
(
SemiMonthBegin(0),
{
datetime(2008, 1, 1): datetime(2008, 1, 1),
datetime(2008, 1, 16): datetime(2008, 2, 1),
datetime(2008, 1, 15): datetime(2008, 1, 15),
datetime(2008, 1, 31): datetime(2008, 2, 1),
datetime(2006, 12, 29): datetime(2007, 1, 1),
datetime(2006, 12, 2): datetime(2006, 12, 15),
datetime(2007, 1, 1): datetime(2007, 1, 1),
},
),
(
SemiMonthBegin(0, day_of_month=16),
{
datetime(2008, 1, 1): datetime(2008, 1, 1),
datetime(2008, 1, 16): datetime(2008, 1, 16),
datetime(2008, 1, 15): datetime(2008, 1, 16),
datetime(2008, 1, 31): datetime(2008, 2, 1),
datetime(2006, 12, 29): datetime(2007, 1, 1),
datetime(2006, 12, 31): datetime(2007, 1, 1),
datetime(2007, 1, 5): datetime(2007, 1, 16),
datetime(2007, 1, 1): datetime(2007, 1, 1),
},
),
(
SemiMonthBegin(2),
{
datetime(2008, 1, 1): datetime(2008, 2, 1),
datetime(2008, 1, 31): datetime(2008, 2, 15),
datetime(2006, 12, 1): datetime(2007, 1, 1),
datetime(2006, 12, 29): datetime(2007, 1, 15),
datetime(2006, 12, 15): datetime(2007, 1, 15),
datetime(2007, 1, 1): datetime(2007, 2, 1),
datetime(2007, 1, 16): datetime(2007, 2, 15),
datetime(2006, 11, 1): datetime(2006, 12, 1),
},
),
(
SemiMonthBegin(-1),
{
datetime(2007, 1, 1): datetime(2006, 12, 15),
datetime(2008, 6, 30): datetime(2008, 6, 15),
datetime(2008, 6, 14): datetime(2008, 6, 1),
datetime(2008, 12, 31): datetime(2008, 12, 15),
datetime(2006, 12, 29): datetime(2006, 12, 15),
datetime(2006, 12, 15): datetime(2006, 12, 1),
datetime(2007, 1, 1): datetime(2006, 12, 15),
},
),
(
SemiMonthBegin(-1, day_of_month=4),
{
datetime(2007, 1, 1): datetime(2006, 12, 4),
datetime(2007, 1, 4): datetime(2007, 1, 1),
datetime(2008, 6, 30): datetime(2008, 6, 4),
datetime(2008, 12, 31): datetime(2008, 12, 4),
datetime(2006, 12, 5): datetime(2006, 12, 4),
datetime(2006, 12, 30): datetime(2006, 12, 4),
datetime(2006, 12, 2): datetime(2006, 12, 1),
datetime(2007, 1, 1): datetime(2006, 12, 4),
},
),
(
SemiMonthBegin(-2),
{
datetime(2007, 1, 1): datetime(2006, 12, 1),
datetime(2008, 6, 30): datetime(2008, 6, 1),
datetime(2008, 6, 14): datetime(2008, 5, 15),
datetime(2008, 12, 31): datetime(2008, 12, 1),
datetime(2006, 12, 29): datetime(2006, 12, 1),
datetime(2006, 12, 15): datetime(2006, 11, 15),
datetime(2007, 1, 1): datetime(2006, 12, 1),
},
),
]
@pytest.mark.parametrize("case", offset_cases)
def test_offset(self, case):
offset, cases = case
for base, expected in cases.items():
assert_offset_equal(offset, base, expected)
@pytest.mark.parametrize("case", offset_cases)
def test_apply_index(self, case):
offset, cases = case
s = DatetimeIndex(cases.keys())
with tm.assert_produces_warning(None):
# GH#22535 check that we don't get a FutureWarning from adding
# an integer array to PeriodIndex
result = offset + s
exp = DatetimeIndex(cases.values())
tm.assert_index_equal(result, exp)
on_offset_cases = [
(datetime(2007, 12, 1), True),
(datetime(2007, 12, 15), True),
(datetime(2007, 12, 14), False),
(datetime(2007, 12, 31), False),
(datetime(2008, 2, 15), True),
]
@pytest.mark.parametrize("case", on_offset_cases)
def test_is_on_offset(self, case):
dt, expected = case
assert_is_on_offset(SemiMonthBegin(), dt, expected)
@pytest.mark.parametrize("klass", [Series, DatetimeIndex])
def test_vectorized_offset_addition(self, klass):
s = klass(
[
Timestamp("2000-01-15 00:15:00", tz="US/Central"),
Timestamp("2000-02-15", tz="US/Central"),
],
name="a",
)
with tm.assert_produces_warning(None):
# GH#22535 check that we don't get a FutureWarning from adding
# an integer array to PeriodIndex
result = s + SemiMonthBegin()
result2 = SemiMonthBegin() + s
exp = klass(
[
Timestamp("2000-02-01 00:15:00", tz="US/Central"),
Timestamp("2000-03-01", tz="US/Central"),
],
name="a",
)
tm.assert_equal(result, exp)
tm.assert_equal(result2, exp)
s = klass(
[
Timestamp("2000-01-01 00:15:00", tz="US/Central"),
Timestamp("2000-02-01", tz="US/Central"),
],
name="a",
)
with tm.assert_produces_warning(None):
# GH#22535 check that we don't get a FutureWarning from adding
# an integer array to PeriodIndex
result = s + SemiMonthBegin()
result2 = | SemiMonthBegin() | pandas._libs.tslibs.offsets.SemiMonthBegin |
import numpy as np
import scipy as sc
import scipy.linalg as spl
import scipy.stats as ss
import pandas as pd
import sys, os, csv
import grad_utils as model
import cv_utils
import opt_utils
import ks_utils as ks
def max_change(beta):
'''
get the maximal change in rank in neighboring timepoint based on beta
'''
T,N = beta.shape
arg = np.array([ss.rankdata(-beta[ii]) for ii in range(T)])
return np.max(abs(arg[1:] - arg[:-1]))
def plot_nfl_round(beta, team_id,season):
T, N = beta.shape
year = range(1,17)
f = plt.figure(1, figsize = (6,4))
for i in range(N):
plt.plot(year,beta[:,i], label=team_id['name'][i], color = np.random.rand(3,))
plt.xlabel("round")
plt.ylabel("latent parameter")
plt.legend(loc='upper left', bbox_to_anchor=(1, 1, 1, 0),prop={'size': 5})
plt.ticklabel_format(style='plain',axis='x',useOffset=False)
f.savefig("nfl_round_"+str(season)+".pdf", bbox_inches='tight')
def get_elo_rank_season(elo_all, season):
elo_season = elo_all.iloc[np.where(elo_all['season'] == season)]
elo_season = elo_season[pd.isnull(elo_season['playoff'])]
a = elo_season[['team1','elo1_post']]
a.columns = ['team','elo']
a = a.reset_index()
b = elo_season[['team2','elo2_post']]
b.columns = ['team','elo']
b = b.reset_index()
c = | pd.concat([a,b]) | pandas.concat |
import numpy as np
import pandas as pd
import shapely.wkt
from sklearn.metrics import pairwise_distances
from sklearn.metrics import mean_squared_error
import init
import constants as cn
from coordinate import Coordinate
def proximity_ratio(df_destinations):
"""
Calculate proximity ratio and summarize by blockgroups
input:
df_destination - data frame with distance, trip-level data
output:
df_blockgroup - data frame with origin blockgroup and proximity ratio
"""
lower_bound = cn.BASKET_EVAL_PROX_MIN # 2 miles
upper_bound = cn.BASKET_EVAL_PROX_MAX # 10 miles
# Ratio of trips under 2 miles to trips between 2 and 10 miles
df_destinations['dist_under_2'] = np.where(df_destinations[cn.DISTANCE] < lower_bound, 1, 0)
df_destinations['dist_2_to_10'] = np.where((df_destinations[cn.DISTANCE] >= lower_bound) & (df_destinations[cn.DISTANCE] < upper_bound), 1, 0)
df_blockgroup = df_destinations.groupby([cn.ORIGIN], as_index=False).agg({'dist_under_2':sum,'dist_2_to_10':sum})
# Remove rows with zero denominators
df_blockgroup = df_blockgroup[df_blockgroup['dist_2_to_10'] != 0]
# Make new column with the data
df_blockgroup[cn.PROX_RATIO] = df_blockgroup['dist_under_2'] / df_blockgroup['dist_2_to_10']
return df_blockgroup[[cn.ORIGIN, cn.PROX_RATIO]]
def vert_hori_ratio(df_destinations, df_blockgroup):
"""
Calculate the ratio between vertical distance and horizontal distance, for each blockgroup
input:
df_destination - data frame with distance, trip-level data
output:
df_blockgroup - data frame with origin blockgroup and proximity ratio
"""
df_destinations[cn.VERT_HORI_RATIO] = pd.DataFrame(np.abs( (df_destinations['dest_lat'] - df_destinations['orig_lat']) /
(df_destinations['dest_lon'] - df_destinations['orig_lon']) ))
df_blockgroup2 = df_destinations.groupby([cn.ORIGIN], as_index=False)[cn.VERT_HORI_RATIO].mean()
result_merged = pd.merge(left=df_blockgroup, right=df_blockgroup2, how='inner', left_on=cn.ORIGIN, right_on=cn.ORIGIN)
return result_merged
def average_distance(df_destinations, df_blockgroup):
"""
Calculate average travel distance of each blockgroup
input:
df_destination - data frame with distance, trip-level data
output:
df_blockgroup - data frame with origin blockgroup and proximity ratio
"""
df_blockgroup2 = df_destinations.groupby([cn.ORIGIN], as_index=False)[cn.DISTANCE].mean()
result_merged = pd.merge(left=df_blockgroup, right=df_blockgroup2, how='inner', left_on=cn.ORIGIN, right_on=cn.ORIGIN)
result_merged.rename(columns = {cn.DISTANCE: cn.AVG_DIST}, inplace=True)
return result_merged
def dist_from_cc(df):
"""
Helper function to create a new column in a DataFrame with dist to city center
"""
coordinate = Coordinate(df['dest_lat'], df['dest_lon'])
city_center = Coordinate(cn.CITY_CENTER[0], cn.CITY_CENTER[1])
return city_center.haversine_distance(coordinate)
def distance_from_citycenter(df_destinations, df_blockgroup):
"""
Calculate Euclidean distance of destination from the city center
input:
df_destination - data frame with distance, trip-level data
output:
df_blockgroup - data frame with origin blockgroup and proximity ratio
"""
df_destinations['distance_from_citycenter_val'] = df_destinations.apply(dist_from_cc, axis=1)
df_blockgroup2 = df_destinations.groupby([cn.ORIGIN], as_index=False)['distance_from_citycenter_val'].mean()
result_merged = pd.merge(left=df_blockgroup, right=df_blockgroup2, how='inner', left_on=cn.ORIGIN, right_on=cn.ORIGIN)
result_merged.rename(columns = {'distance_from_citycenter_val': 'distance_from_citycenter_test'}, inplace=True)
return result_merged
def prepare_psrc(psrc_raw):
"""
This code calculates four features and adds them to the original PSRC data.
It just needs to be run once, as we are using it without filtering.
input:
PSRC data (even though we call it raw, its column names are changed and latitude and longitudes are added)
output:
PSRC data with proximity ratio, vert_hori_ratio, average distance, and distance from city center
"""
psrc_blockgroup = proximity_ratio(psrc_raw)
with_vert_hori_ratio = vert_hori_ratio(psrc_raw, psrc_blockgroup)
with_average_distance = average_distance(psrc_raw, with_vert_hori_ratio)
with_distance_from_citycenter = distance_from_citycenter(psrc_raw, with_average_distance)
result_merged = with_distance_from_citycenter
result_merged.sort_values(by=[cn.ORIGIN])
return result_merged
def calculate_features(google_input, basket_combination):
"""
This calculates three features using google API data; need to run separately for each basket combination
It just needs to be run for every basket combination, as we are filtering it every time.
input:
Google API data, backet combination
output:
Google API data with proximity ratio, vert_hori_ratio, average distance, and distance from city center
"""
# Filter to match basket parameters based on rank (distance from destination)
filtered_data = google_input
for i in range(len(cn.BASKET_CATEGORIES)):
filtered_data = filtered_data[(filtered_data['class'] != cn.BASKET_CATEGORIES[i]) | (filtered_data['rank'] <= basket_combination[i])]
# FEATURES: PROXIMITY RATIO, VERTICAL/HORIZONTAL TRAVEL DISTANCES, AVERAGE DISTANCE TO DESTINATION
# Creating google results
with_proximity_ratio = proximity_ratio(filtered_data.copy())
with_vert_hori_ratio = vert_hori_ratio(filtered_data.copy(), with_proximity_ratio)
with_average_distance = average_distance(filtered_data.copy(), with_vert_hori_ratio)
with_distance_from_citycenter = distance_from_citycenter(filtered_data.copy(), with_average_distance)
final_result = with_distance_from_citycenter.sort_values(by = [cn.ORIGIN])
return final_result
def calculate_mse(psrc_output, google_input):
"""
This calculates three features for each basket combination, saves MSE to compare Google API with PSRC
input:
PSRC wth features, Google API data without features
output:
Basket combinations, MSEs for each basket
"""
score = []
combinations = []
for x in cn.BASKET_COMBOS:
if sum(x) == cn.BASKET_SIZE:
# To do a faster test run, comment out the above and use the following:
# if sum(x) == 40:
combinations.append(x)
df_google = calculate_features(google_input, list(x))
googled_psrc = psrc_output.loc[psrc_output[cn.ORIGIN].isin(df_google[cn.ORIGIN])]
proximity_ratio_mse = mean_squared_error(df_google[cn.PROX_RATIO], googled_psrc[cn.PROX_RATIO])
vert_hori_ratio_mse = mean_squared_error(df_google[cn.VERT_HORI_RATIO], googled_psrc[cn.VERT_HORI_RATIO])
average_distance_mse = mean_squared_error(df_google[cn.AVG_DIST], googled_psrc[cn.AVG_DIST])
distance_from_citycenter_mse = mean_squared_error(df_google['distance_from_citycenter_test'], googled_psrc['distance_from_citycenter_test'])
mses = (proximity_ratio_mse, vert_hori_ratio_mse, average_distance_mse, distance_from_citycenter_mse)
score.append(mses)
if (len(combinations) % 5000 == 0):
print("Still Processing..")
print("Idx+1 of combination is: ", len(combinations))
print("Total number of combinations: " + str(len(combinations)))
print()
final_mses = pd.DataFrame(score, columns = ['from_proximity_ratio', 'from_vert_hori_ratio', 'from_average_distance', 'from_distance_citycenter'])
final_mses['rank_from_proximity_ratio'] = final_mses['from_proximity_ratio'].rank(ascending=1)
final_mses['rank_from_vert_hori_ratio'] = final_mses['from_vert_hori_ratio'].rank(ascending=1)
final_mses['rank_from_average_distance'] = final_mses['from_average_distance'].rank(ascending=1)
final_mses['rank_from_distance_citycenter'] = final_mses['from_distance_citycenter'].rank(ascending=1)
final_combinations = pd.DataFrame(combinations, columns = cn.BASKET_CATEGORIES)
best_loc = final_mses['rank_from_average_distance'].idxmin()
print("Choose the following combination: \n")
print("The index of the best basket is: ", best_loc)
print(final_combinations.loc[best_loc])
return final_combinations, final_mses
# Load PSRC data and pre-process
psrc_rawdat = pd.read_csv(cn.PSRC_FP, dtype={cn.ORIGIN: str, cn.DESTINATION: str})
psrc_rawdat[cn.DISTANCE] = pd.to_numeric(psrc_rawdat[cn.DISTANCE], errors='coerce')
# Load Google API data
input_destinations = pd.read_csv(cn.RAW_DIR + 'GoogleMatrix_Places_Dist.csv', dtype={cn.ORIGIN: str})
input_destinations.rename(columns = {'lat': 'dest_lat', 'lng': 'dest_lon', 'orig_lng': 'orig_lon'}, inplace=True)
# Load blockgroup data with latitude and longitudes; will be merged with Google API
blockgroup_mapping = pd.read_csv(cn.PROCESSED_DIR + 'SeattleCensusBlockGroups.csv', dtype={'tract_blkgrp': str})
print("blockgroup_mapping is loaded!")
blockgroup_mapping['tract_blkgrp'] = '530330' + blockgroup_mapping['tract_blkgrp']
orig_pts = blockgroup_mapping.centroid.apply(shapely.wkt.loads)
blockgroup_mapping['orig_lon'] = pd.DataFrame([kk.x for kk in orig_pts])
blockgroup_mapping['orig_lat'] = | pd.DataFrame([kk.y for kk in orig_pts]) | pandas.DataFrame |
#!/usr/local/bin/env python3.7
# -*- coding: utf-8; py-indent-offset:4 -*-
###############################################################################
#
# The MIT License (MIT)
# Copyright (c) 2020 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
# OR OTHER DEALINGS IN THE SOFTWARE.
###############################################################################
"""Helper module to manipulate csv and pandas Dataframe"""
import csv
import pandas as pd
import datetime as dt
from functools import wraps
from statsmodels.tsa.stattools import adfuller
import os.path
import numpy as np
def ordinal_date(function):
"""Wrapper to add an ordinal date"""
@wraps(function)
def wrapper(cls,date_name,date_debut,date_fin, name_,directory,asset, ordinal_name,is_fx,dup_col):
series_ = function(cls,date_name,date_debut,date_fin, name_,directory,asset, ordinal_name,is_fx,dup_col)
series_.Date = pd.to_datetime(series_.Date)
series_[ordinal_name] = pd.to_datetime(series_[date_name]).map(dt.datetime.toordinal)
return series_
return wrapper
class ManipData():
"""Class to manipulate data"""
@classmethod
def __init__(cls,dir_,file_name,extension =""):
cls.dir_ = dir_ #directory
cls.filename = file_name
cls.extension_ = extension #if there is an extension added to the filename
@classmethod
def write_csv_(cls, dir_output, name_out, add_doc = "", is_walkfoward = False, **kwargs):
""" Write data to a csv
Parameters
----------
dir_output : str
directory where we want our data to be written
name_out : str
name of the file name
is_walkfoward : bool
says if we are doing a walkfoward analyis. If `True`, we have to create a separate training and test file
**kwargs : keyword param
dictionary with keys and items to be written in the file
"""
if is_walkfoward:
write_type = 'a'
func = 'writer.writerow'
else :
write_type = 'w'
func = 'str'
with open(dir_output + name_out + add_doc + ".csv" , write_type, newline='') as f:
writer = csv.writer(f)
eval(func)('')
for key, item in kwargs.items():
writer.writerow([key,item])
@classmethod
def erase_content(cls):
"""Method to erase contents of a csv file"""
filename = cls.dir_ + cls.filename + cls.extension_ + ".csv"
if os.path.isfile(filename):
with open(filename,"w+") as f:
f.close()
@classmethod
@ordinal_date
def csv_to_pandas(cls, date_name,date_debut,date_fin, name_,directory,asset, ordinal_name = '',is_fx = False,
dup_col = None):
"""Return the csv to a pandas Dataframe
The function remove nan value with `series_.dropna()` and remove the data when the market is closed with
`series_.drop_duplicates()`
"""
if is_fx:
dateparse = lambda x: dt.datetime.strptime(x, '%d.%m.%Y %H:%M:%S')
else :
dateparse = None
series_ = | pd.DataFrame() | pandas.DataFrame |
#!/usr/bin/env python
# coding: utf-8
# Best viewed locally in a Jupyter notebook or online in <a href="https://nbviewer.jupyter.org/github/codykingham/noun_semantics/blob/master/analysis.ipynb">Jupyter Notebook Viewer</a>
#
# # Analysis of Noun Semantics in the Hebrew Bible
# ## <NAME>
#
# In this notebook, I compare the syntactic contexts of the top 200 most frequent nouns in the Hebrew Bible. This notebook essentially walks through my process and includes limited commentary throughout. Full descriptions borrowed from the paper will soon be transferred to here as well.
# In[1]:
get_ipython().system(' echo "last updated:"; date')
# In[3]:
# ETCBC's BHSA data
from tf.fabric import Fabric
from tf.app import use
# stats & data-containers
import collections, math, re, random, csv
import pandas as pd
import numpy as np
import scipy.stats as stats
from sklearn.decomposition import PCA
from sklearn.metrics.pairwise import pairwise_distances
from kneed import KneeLocator # https://github.com/arvkevi/kneed
# data visualizations
import seaborn as sns
import matplotlib.pyplot as plt
from matplotlib import rcParams
rcParams['font.serif'] = ['Times New Roman']
from IPython.display import HTML, display, Image
from adjustText import adjust_text # fixes overlapping scatterplot annotations
# custom modules
#from pyscripts.contextcount import ContextCounter, ContextTester
from pyscripts.contextparameters import deliver_params
from pyscripts.deliver_data import deliver_data
# prep the Hebrew syntax data
name = 'noun_semantics'
hebrew_data = ['~/github/etcbc/{}/tf/c'.format(direc) for direc in ('bhsa','lingo/heads', 'heads', 'phono')] # data dirs
load_features = '''
typ phono lex_utf8 lex
voc_lex_utf8 voc_lex gloss
freq_lex pdp sp ls
language
rela number function
vs vt
code label
head obj_prep sem_set nhead
heads noun_heads
'''
# TF load statements
TF = Fabric(locations=hebrew_data)
api = TF.load(load_features)
B = use('bhsa', api=api, hoist=globals(), silent=True) # Bhsa functions for search and visualizing text
# In[4]:
def reverse_hb(heb_text):
'''
Reverses order of left-to-right text
for good matplotlib formatting.
'''
return ''.join(reversed(heb_text))
def show_word_list(word_nodes, joiner=' |', title=''):
'''
Displays Hebrew for a pipe-separated list of word nodes
Good for seeing lexemes without taking up screen space.
'''
formatted = joiner.join(T.text(node) for node in word_nodes)
display(HTML(formatted))
def show_subphrases(phrase, direction=L.d):
'''
A simple function to print subphrases
and their relations to each other.
'''
for sp in direction(phrase, 'subphrase'):
mother = E.mother.f(sp)[0] if E.mother.f(sp) else ''
mother_text = T.text(mother)
print('-'*7 + str(sp) + '-'*16)
print()
print(f'{T.text(sp)} -{F.rela.v(sp)}-> {mother_text}')
print(f'nodes: {sp} -{F.rela.v(sp)}-> {mother}')
print(f'slots: {L.d(sp, "word")} -{F.rela.v(sp)}-> {L.d(mother or 0, "word")}')
print('-'*30)
# ## Corpus Size
#
# Below is the number of words included in the corpus of BHSA.
# In[5]:
len(list(F.otype.s('word')))
# ## Demonstrating the Collocational Principle
#
# Here is a query for all nouns that serve as the object to the verb אכל "to eat". This query demonstrates how the collocation patterns of syntactic context can be informative for semantic meaning. This is the driving principle behind this project.
# In[6]:
eat_obj = '''
clause
phrase function=Pred
word pdp=verb lex=>KL[
phrase function=Objc
<head- w1:word pdp=subs
lex
w2:word
w1 = w2
'''
eat_obj = B.search(eat_obj)
eaten_lexs = collections.Counter(T.text(r[5]) for r in eat_obj)
for word, count in eaten_lexs.most_common(10):
print(f'{count}\t{word}')
# ## Define a Target Noun Set
#
# *Insert discussion about the semantic relationship between iconicity and frequency with regards to the most frequent noun lexemes in the HB.*
# In[7]:
raw_search = '''
lex language=Hebrew sp=subs
'''
raw_nouns = B.search(raw_search)
# Now we order the results on the basis of lexeme frequency.
# In[8]:
raw_terms_ordered = sorted(((F.freq_lex.v(res[0]), res[0]) for res in raw_nouns), reverse=True)
# Below we have a look at the top 50 terms from the selected set. Pay attention to the feature `ls`, i.e. "lexical set." This feature gives us some rudimentary semantic information about the nouns and their usual functions, and it suggests that some additional restrictions are necessary for the noun selection procedure. Note especially that several of these nouns are used in adjectival or prepositional roles (e.g. כל ,אחד, אין, תחת).
# In[9]:
raw_nnodes = [res[1] for res in raw_terms_ordered] # isolate the word nodes of the sample
B.displaySetup(extraFeatures={'ls', 'freq_lex'}) # config B to display ls and freq_lex
# display lexeme data
# for i, node in enumerate(raw_nnodes[:50]):
# B.prettyTuple((node,), seq=i)
# Based on the nouns that are present, we should make some key exclusions. Many substantives have more functional or adjectival roles. Undesirable categories include copulative nouns (`nmcp`, e.g. אין), cardinal numbers (`card`), potential prepositions (`ppre`, e.g. תחת). The `ls` category of potential adverb (`padv`) contains desirable nouns like יום, but also more functionally adverbial-nouns like עוד. Thus we can see that there is a range of adverbial tendencies found in this category. Due to the potentially interesting possibility of seeing these tendencies play out in the data, we can decide to keep these instances.
#
# To be sure, the very phenomenon of "functional" versus "nominal" is worthy of further, quantitative investigation. The `ls` feature is an experimental and incomplete feature in the ETCBC, and this is precisely the kind of shortcoming this present work seeks to address. Nouns and adverbs likely sit along a sliding scale of adverbial tendencies, with adverbs nearly always functioning in such a role, and nouns exhibiting various statistical tendencies. But due to the scope of this investigation, we limit ourselves to mainly nominal words with a small inclusion of some adverbial-like substantives.
#
# We can eliminate more functional nouns by restricting the possible lexical set (`ls`) values. Below we apply those restrictions to the search template. In the case of certain quantifiers such as כל there is an `ls` feature of distributive noun (`nmdi`), yet this feature is likewise applied to nouns such as אח ("brother"). So it is undesirable to exclude all of these cases. Thus we depend, instead, on an additional filter list that excludes quantifiers.
#
# A few terms such as דרך and עבר are eliminated because the ETCBC labels it as a potential preposition. This is a speculative classification. So we define a seperate parameter in the template that saves this instance.
# In[10]:
exclude = '|'.join(('KL/', 'M<V/', 'JTR/', 'M<FR/', 'XYJ/')) # exclude quantifiers
include = '|'.join(('padv', 'nmdi')) # ok ls features
keep = '|'.join(('DRK/', '<BR/'))
'''
Below is a TF search query for three cases:
One is a lexeme with included ls features.
The second is a lexeme with a null ls feature.
The third is lexemes we want to prevent from being excluded.
For all cases we exclude excluded lexemes.
'''
select_noun_search = f'''
lex language=Hebrew
/with/
sp=subs ls={include} lex#{exclude}
/or/
sp=subs ls# lex#{exclude}
/or/
sp=subs lex={keep}
/-/
'''
select_nouns = B.search(select_noun_search)
noun_dat_ordered = sorted(((F.freq_lex.v(res[0]), res[0]) for res in select_nouns), reverse=True)
nnodes_ordered = list(noun_dat[1] for noun_dat in noun_dat_ordered)
filtered_lexs = list(node for node in raw_nnodes if node not in nnodes_ordered)
print(f'\t{len(raw_nouns) - len(select_nouns)} results filtered out of raw noun list...')
print('\tfiltered lexemes shown below:')
show_word_list(filtered_lexs)
# ### Plot the Nouns in Order of Frequency
#
# Now that we have obtained a filtered noun-set, we must decide a cut-off point at which to limit the present analysis. Below we plot the attested nouns and their respective frequencies.
# In[14]:
# plot data
y_freqs = [lex_data[0] for lex_data in noun_dat_ordered]
x_rank = [i+1 for i in range(0, len(y_freqs))]
title = 'Noun Frequencies in the Hebrew Bible'
xlabel = 'Noun Rank'
ylabel = 'Noun Frequency'
# first plot
plt.figure(figsize=(8, 4))
plt.plot(x_rank, y_freqs, color='black', linewidth=1)
plt.title(title + f' (ranks 1-{len(x_rank)})', size=10)
plt.xlabel(xlabel, size=10)
plt.ylabel(ylabel, size=10)
plt.plot()
plt.show()
# We zoom in closer to view ranks 1-1000...
#
# *Consider using a subplot here with 4 different zooms*
# In[16]:
# second plot
plt.figure(figsize=(8, 4))
plt.plot(x_rank[:1000], y_freqs[:1000], color='black', linewidth=1)
plt.xlabel(xlabel, size=10)
plt.ylabel(ylabel, size=10)
plt.axvline(200, color='red', linewidth=0.8, linestyle='--')
plt.savefig('results/plots/noun_frequencies1-1000.svg', format='svg', bbox_inches='tight') # save the plot (without title)
plt.title(title + f' (ranks 1-1000)', size=10)
plt.show()
# This curve is typical of Zipf's law:
#
# > Zipf's law states that given some corpus of natural language utterances, the frequency of any word is inversely proportional to its rank in the frequency table ([wikipedia](https://en.wikipedia.org/wiki/Zipf%27s_law))
#
# The curve sharply "elbows" at around rank 15. Between ranks 50-100 there is still an appreciable drop-off. The curve starts to significantly flatten after 200. We thus decide an arbitrary cut-off point at rank 200, based on the fact that the curve does not show any significant leveling after this point.
# In[17]:
target_nouns = nnodes_ordered[:200]
tnoun_instances = set(word for lex in target_nouns for word in L.d(lex, 'word'))
show_word_list(target_nouns) # temporary comment out while bug is fixed
print(f'\n{len(tnoun_instances)} nouns ready for searches')
# In[18]:
nouns_text_freqs = sorted(
((F.voc_lex_utf8.v(L.d(noun,'word')[0]), F.freq_lex.v(noun))
for noun in target_nouns), key=lambda k: k[-1], reverse=True
)
# In[19]:
', '.join(f'{noun}' for noun, freq in nouns_text_freqs)
# ## Strategy for Context Selection
#
# See [pyscripts/contextparameters.py](pyscripts/contextparameters.py) for the full delineation of these patterns and to see how they've been selected and tokenized.
# In[20]:
contexts = deliver_params(tnoun_instances, tf=api)
# In[21]:
data = deliver_data(contexts, tf=TF)
# Let's have a look at the first example...
# In[22]:
data[0]
# Now we put the data into a dataframe. We also export the dataframe for reference.
# In[23]:
data_df = pd.DataFrame(data)
data_df.set_index('clause', inplace=True)
data_df.to_csv('dataset.csv') # export dataset
data_df.head()
# #### Random Samples of the Data
# In[15]:
# randomized = [r for r in counts.search2result['T.const→ lex (with article separation)']]
# random.shuffle(randomized)
# In[16]:
# B.show(randomized, end=50, condenseType='phrase', withNodes=True, extraFeatures={'sem_set'})
# <hr>
#
# ### Excursus: Checking Context Tags and Gathering Examples
#
# In this section I will inspect the tokens that are generated and counted, as well as pull out some examples and their counts for the presentation.
# In[17]:
# patterns = {'funct.-> st.verb.lex': '\D*\.-> \D*\.\D*\[',
# 'funct.prep-> st.verb.lex': '\D*\.\D+\-> \D*\.\D*\['}
# token_examps = collections.defaultdict(list)
# for token in counts.data.index:
# for query, pattern in patterns.items():
# if re.match(pattern, token):
# token_examps[query].append(token)
# for query in token_examps:
# random.shuffle(token_examps[query])
# examples = token_examps[query][:10]
# targets = list()
# # get example target nouns
# for ex in examples:
# ex_target = counts.data.loc[ex].sort_values(ascending=False).index[0]
# targets.append(ex_target)
# show_random = [f'target: {target} \t {ex}' for target, ex in zip(targets, examples)]
# print('QUERY: ', query)
# print('-'*5)
# print('\n'.join(show_random))
# print('-'*20, '\n')
# Now some more specific counts...
# In[18]:
counts.data['לב.n1']['T.Objc→ זכה.v1.piel'].sum()
# In[19]:
counts.data['פתח.n1']['T.Cmpl→ עמד.v1.qal'].sum()
# In[20]:
counts.data['אישׁ.n1']['T.Subj→ פקד.v1.hit'].sum()
# In[21]:
counts.data['שׁער.n1']['T.Loca→ שׁית.v1.qal'].sum()
# In[22]:
counts.data['גוי.n1']['T.ב.Adju→ אמר.v1.qal'].sum()
# In[23]:
counts.data['יד.n1']['T.מן.Cmpl→ ישׁע.v1.hif'].sum()
# In[24]:
counts.data['עת.n1']['T.ב.Time→ נתן.v1.nif'].sum()
# In[25]:
counts.data['דרך.n1']['T.ל.Cmpl→ פנה.v1.qal'].sum()
# <hr>
# #### Examining the Dataset
#
# Below we look at the number of dimensions in the data:
# In[26]:
counts.data.shape
# And a sample of the data is below, sorted on the results of אלהים in order to bring up interesting examples.
# In[27]:
counts.data.sort_values(ascending=False, by='אלהים.n1').head(10)
# Next we look at a few example counts:
# In[28]:
pd.DataFrame(counts.data['אלהים.n1'][counts.data['אלהים.n1'] > 0].sort_values(ascending=False)).head(15)
# This gives a good idea of the content of the co-occurrence counts.
# #### Various Tag Searches Below
#
# Below I isolate a few tags of interest to serve as examples in the paper.
#
# **TODO:** Extract and display all the exact examples.
# In[29]:
prec = [tag for tag in counts.data.index if 'PreC' in tag and 'אישׁ.n1' in tag]
prec
# In[30]:
target = 'עלה.n1'
target_counts = counts.data[target][counts.data[target]>0].sort_values(ascending=False)
prec_contexts = target_counts[target_counts.index.str.contains('ל.PreC')]
prec_contexts
# ## Adjusting the Counts
#
# We will apply two primary adjustments:
#
# 1. We drop co-occurrences that are unique to a noun. The dropped observations will thus be considered outliers. While these items are useful for describing the uniqueness of a given lexeme, they are unhelpful for drawing comparisons between our sets.
# 2. We convert the counts into a measure of statistical significance. For this we use Fisher's exact test, which is ideal for datasets that have counts that are less than 5. Our matrix is likely to have many such counts. The resulting p-values, of which <0.05 represents a statistically significant colexeme, will be log-transformed. Values that fall below expected frequencies will be negatively transformed.
# ### Remove Co-occurrence Outliers
#
# We will remove colexemes/bases that occur with only one target noun. This is done by subtracting the row total from each item in the row. Any 0 value in a row means that that row has a unique colexeme that only occurs with one target noun (we will call that a `hapax_colex` here). We will remove these rows further down.
# In[31]:
colex_counts = counts.data.sum(1)
remaining_counts = counts.data.sub(colex_counts, axis=0) # subtract colex_counts
hapax_colex = remaining_counts[(remaining_counts == 0).any(1)] # select rows that have a 0 value anywhere
# Below is an example just to make sure we've selected the right indices. The value has been manually chosen from `hapax_colex`.
# In[32]:
counts.data.loc['T.Adju→ אכל.v1.pual'].sort_values(ascending=False).head()
# Indeed this context tag is only attested with חרב, thus it is not useful for drawing meaningful comparisons to this noun. Below we see that there are `8191` other such basis elements. We remove these data points in the next cell and name the new dataset `data`.
# In[33]:
hapax_colex.shape
# In[34]:
data = counts.data.drop(labels=hapax_colex.index, axis=0)
print(f'New data dimensions: {data.shape}')
print(f'New total observations: {data.sum().sum()}')
print(f'Observations removed: {counts.data.sum().sum() - data.sum().sum()}')
# Random example to make sure there are no unique colexemes in the new dataset:
# In[35]:
data.loc['T.Adju→ בוא.v1.hif'].sort_values(ascending=False).head(5)
# #### Check for Orphaned Target Nouns
#
# I want to see if any target nouns in the dataset now have 0 basis observations (i.e. "orphaned") as a result of our data pruning. The test below shows that there is no columns in the table with a sum of 0.
# In[36]:
data.loc[:, (data == 0).all(0)].shape
# ### How many zero counts are there?
#
# The raw count matrix has a lot of sparsity. Here's how many zeros there are. We also count other values.
# In[37]:
unique_values, value_counts = np.unique(data.values, return_counts=True)
unique_counts = pd.DataFrame.from_dict(dict(zip(unique_values, value_counts)), orient='index', columns=['count'])
display(HTML('<h5>Top 10 Unique Values and Their Counts in Dataset</h5>'))
unique_counts.head(10)
# In[38]:
zero = unique_counts.loc[0.0][0]
non_zero = unique_counts[unique_counts.index > 0].sum()[0]
non_zero_ratio, zero_ratio = non_zero / (non_zero+zero), zero / (non_zero+zero)
print(f'Number of zero count variables: {zero} ({round(zero_ratio, 2)})')
print(f'Number of non-zero count variables: {non_zero} ({round(non_zero_ratio, 2)})')
# Below the number of observed counts is given:
# In[39]:
data.sum().sum()
# ### Apply Fisher's Exact Test
#
# Now we apply the Fisher's exact test to the data set. This involves supplying values to a 2x2 contingency table that is fed to `scipy.stats.fisher_exact`
# #### Number of Datapoints To Iterate Over
#
# The Fisher's exact test takes some time to run. That is because it must iterate over a lot of pairs. The number is printed below.
# In[40]:
print(data.shape[0]*data.shape[1])
# #### Apply the Tests
#
# The whole run takes 5.5-6.0 minutes on a 2017 Macbook pro.
# In[41]:
# data for contingency tables
target_obs = data.apply(lambda col: col.sum(), axis=0, result_type='broadcast') # total target lexeme observations
colex_obs = data.apply(lambda col: col.sum(), axis=1, result_type='broadcast') # total colexeme/basis observations
total_obs = data.sum().sum() # total observations
# preprocess parts of contingency formula;
# NB: a_matrix = data
b_matrix = target_obs.sub(data)
c_matrix = colex_obs.sub(data)
d_matrix = pd.DataFrame.copy(data, deep=True)
d_matrix[:] = total_obs
d_matrix = d_matrix.sub(data+b_matrix+c_matrix)
fisher_transformed = collections.defaultdict(lambda: collections.defaultdict())
i = 0 # counter for messages
indent(reset=True) # TF utility for timed messages
info('applying Fisher\'s test to dataset...')
indent(level=1, reset=True)
for lex in data.columns:
for colex in data.index:
a = data[lex][colex]
b = b_matrix[lex][colex]
c = c_matrix[lex][colex]
d = d_matrix[lex][colex]
contingency = np.matrix([[a, b], [c, d]])
oddsratio, pvalue = stats.fisher_exact(contingency)
fisher_transformed[lex][colex] = pvalue
i += 1
if i % 100000 == 0: # update message every 100,000 iterations
info(f'finished iteration {i}...')
indent(level=0)
info(f'DONE at iteration {i}!')
fisherdata = pd.DataFrame(fisher_transformed)
# In[42]:
fisherdata.head(10)
# ### log10 transformation
# In[43]:
expectedfreqs = (data+b_matrix) * (data+c_matrix) / (data+b_matrix+c_matrix+d_matrix)
fishertransf = collections.defaultdict(lambda: collections.defaultdict())
indent(reset=True)
info('applying log10 transformation to Fisher\'s data...')
for lex in data.columns:
for colex in data.index:
observed_freq = data[lex][colex]
exp_freq = expectedfreqs[lex][colex]
pvalue = fisherdata[lex][colex]
if observed_freq < exp_freq:
logv = np.log10(pvalue)
fishertransf[lex][colex] = logv
else:
logv = -np.log10(pvalue)
fishertransf[lex][colex] = logv
info('finished transformations!')
fishertransf = pd.DataFrame(fishertransf)
# The Fisher's test has produced zero values, indicating a very high degree of attraction between lexemes and a colexemes. A log-transformed zero equals `infinity`. Below those values are isolated.
# In[44]:
display(HTML('<h5>contexts x nouns with a p-value of 0 :</h5>'))
inf_nouns = fishertransf.columns[(fishertransf == np.inf).any()]
inf_data = [] # inf data contains column/index information needed to assign the new values
for inf_noun in inf_nouns:
inf_noun2context = pd.DataFrame(fishertransf[inf_noun][fishertransf[inf_noun] == np.inf])
inf_data.append(inf_noun2context)
display(inf_noun2context)
# In this case the Fisher's has returned a zero value. A p-value of 0 means that the likelihood אלהים and יהוה are *not* dependent variables is essentially null. We can thus reject the null hypothesis that the two values are not related. There is, rather, a maximum level of confidence that these two values *are* interrelated. The `np.inf` value that resulted from `log10(0)` is not viable for calculating vector distances. Thus, we need to substitute an arbitrary, but appropriate value. Below we access the lowest non-zero p-values in the dataset.
# In[45]:
minimum_pvalues = fisherdata.min()[fisherdata.min() > 0].sort_values()
minmin_noun = minimum_pvalues.index[0]
minmin_context = fisherdata[minimum_pvalues.index[0]].sort_values().index[0]
minimum_pvalues.head(10)
# The minimum noun x context score is shown below.
# In[46]:
minmin_noun
# In[47]:
minmin_context
# The small pvalue listed above is used to substitute the infinitive values below.
# In[48]:
# make the substitutions
for inf_dat in inf_data:
for noun in inf_dat.columns:
for context in inf_dat.index:
print(f'adjusting infinite score for {noun}')
new_pvalue, new_transf = fisherdata[minmin_noun][minmin_context], fishertransf[minmin_noun][minmin_context]
fisherdata[noun][context] = new_pvalue
print(f'\tpvalue updated to {new_pvalue}')
fishertransf[noun][context] = new_transf
print(f'\ttransformed pvalue updated to {new_transf}')
# Below we double to check to ensure that all infinitive values have been removed. The test should read `False`.
# In[49]:
# infinites in dataset?
bool(len(fishertransf[(fishertransf == np.inf).any(1)].index))
# ### Comparing Raw and Adjusted Counts
#
# What kinds of counts are "upvoted" and "downvoted" in the adjusted numbers? This information is helpful for gaining insight into the adjustment process and the efficacy of its results.
#
# Below I isolate and compare counts for a set of key lexemes: מלך "king", עיר "city", and חכמה "wisdom". The counts are analyzed by comparing context tag rankings and looking for those contexts which are most affected (i.e. have the most absolute differences) by the changes.
# In[50]:
examine_nouns = ['מלך.n1', 'עיר.n1', 'חכמה.n1']
context_rankings = {}
# gather context rankings into dataframes
for noun in examine_nouns:
# make raw context DF, sorted, with columns count and rank
rawcounts = pd.DataFrame(data[noun].values,
columns=['count'],
index=data.index).sort_values(ascending=False, by='count')
rawcounts['rank'] = np.arange(len(rawcounts))+1 # add column "rank"
# make adjusted context DF, sorted, with columns count and rank
adjcounts = pd.DataFrame(fishertransf[noun].values,
columns=['count'],
index=fishertransf.index).sort_values(ascending=False, by='count')
adjcounts['rank'] = np.arange(len(adjcounts))+1
# put both DFs into dict mapped to noun
context_rankings[noun]={'raw':rawcounts, 'adj':adjcounts}
# print for each noun a report on top up/downgrades
for noun, rankset in context_rankings.items():
raw, adj = rankset['raw'], rankset['adj']
upgrades = pd.DataFrame((raw['rank']-adj['rank']).sort_values(ascending=False))
downgrades = pd.DataFrame((raw['rank']-adj['rank']).sort_values())
upgrades.columns, downgrades.columns = [['difference']]*2
upgrades['previous rank'], downgrades['previous rank'] = [raw['rank']]*2
upgrades['new rank'], downgrades['new rank'] = [adj['rank']]*2
display(HTML(f'<h3>{noun}</h3>'))
print('top 10 raw counts:')
display(raw.head(10))
print('top 10 adjusted counts:')
display(adj.head(10))
print('top 10 rank upgrades')
display(upgrades.head(10))
print('top 10 rank downgrades')
display(downgrades.head(10))
print('-'*40)
print()
# #### Export Data for מלך for Paper
# In[51]:
context_rankings['מלך.n1']['raw'].head(10).to_csv('spreadsheets/king_top10_raw.csv')
round(context_rankings['מלך.n1']['adj'].head(10), 2).to_csv('spreadsheets/king_top10_adj.csv')
# #### Extracting Specific Examples for the Paper (on מלך) to Illustrate Count Adjustments
#
# Below the four separate parts of the contingency table are extracted for מלך "king". These were previously calculated above
# In[52]:
data['מלך.n1']['T.Objc→ נתן.v1.qal'] # A
# In[53]:
b_matrix['מלך.n1']['T.Objc→ נתן.v1.qal'] # B
# In[54]:
c_matrix['מלך.n1']['T.Objc→ נתן.v1.qal'] # C
# In[55]:
d_matrix['מלך.n1']['T.Objc→ נתן.v1.qal'] # D
# Where do the 10 cases happen?
# In[56]:
passages = []
for res in counts.target2basis2result['מלך.n1']['T.Objc→ נתן.v1.qal']:
passages.append('{} {}:{}'.format(*T.sectionFromNode(res[0])))
print('; '.join(passages))
# What is the result of the Fisher's test?
# In[57]:
round(fisherdata['מלך.n1']['T.Objc→ נתן.v1.qal'], 4)
# What is the value of the expected count?
# In[58]:
round(expectedfreqs['מלך.n1']['T.Objc→ נתן.v1.qal'], 2)
# In[59]:
round(fishertransf['מלך.n1']['T.Objc→ נתן.v1.qal'], 2)
# How has the rank changed?
# In[60]:
context_rankings['מלך.n1']['raw'].loc['T.Objc→ נתן.v1.qal']
# In[61]:
context_rankings['מלך.n1']['adj'].loc['T.Objc→ נתן.v1.qal']
# <hr>
#
# #### Excursus: A Random Sample Examined
#
# We saw that the model seems to be succeeding at isolating intuitive associations with קול. Let's look at another example at random, in this case the noun ארץ ("land"). Below are the transformed p-values for that noun.
# In[62]:
fishertransf['ארץ.n1'].sort_values(ascending=False).head(10)
# The most associated variables include cases where ארץ is an object to the verb ירשׁ, where ארץ serves as the complement from which something is brought (hifil of יצא and hifil of עלה), frequently in construct to עם "people"), the participle of ישב "inhabitant(s)"), and ממלכה, "kingdom", as well as other satisfying and expected occasions of use. These examples show that the model is working well.
# <hr>
# ## Comparing the Nouns
#
# The nouns are now ready to be compared. I will do so in two ways.
#
# 1. Principle Component Analysis — We have a semantic space with 4,218 dimensions. That is a lot of potential angles from which to compare the vectors. One method that is commonly used in semantic space analysis is principle component analysis or **PCA**. PCA is a dimensionality reduction method that reduce a multi-dimensional vector to the two points in an imagined space that show the most distance between the nouns. We can visualize said space by plotting the two points on an X and Y axis.
# 2. Cosine Similarity — This measure allows us to compare the vectors on the basis of their trajectories. This method is particularly well-suited for semantic spaces because it ignores differences in frequency and compares, rather, the closeness of relationship between two sets of frequencies.
# ### PCA Analysis
#
# We want to apply PCA in order to plot nouns in an imaginary space. The goal is to use the visualization to identify patterns and groups amongst the 199 target nouns. Nouns that are more similar should fall within the same general areas relative to the origin (0, 0). PCA seeks to identify the maximum variance amongst the vector spaces.
# In[63]:
pca = PCA(10) # PCA with 3 principal components
noun_fit = pca.fit(fishertransf.T.values) # get coordinates
pca_nouns = noun_fit.transform(fishertransf.T.values)
plt.figure(figsize=(8, 6))
sns.barplot(x=np.arange(10)+1, y=noun_fit.explained_variance_ratio_[:10])
plt.xlabel('Principle Component', size=20)
plt.ylabel('Raio of Explained Variance', size=20)
plt.title('Ratio of Explained Variance for Principle Components 1-10 (Scree Plot)', size=20)
plt.show()
# Variance accounted for by PC1 and PC2:
# In[64]:
noun_fit.explained_variance_ratio_[0]+noun_fit.explained_variance_ratio_[1]
# The plot above, also called a scree plot, tells us that the first two principle components only account for 12% of the total variance in the dataset. Thus the PCA noun space is rather noisy. This may be explained by the fact that we are combining many different kinds of syntactic contexts into one dataset. And it may also be due to the rather spread out nature of lexical data.
#
# Below we extract the top 25 features which are most influential for the first two principal components.
# In[65]:
loadings = noun_fit.components_.T * np.sqrt(noun_fit.explained_variance_)
loadings = pd.DataFrame(loadings.T, index=np.arange(10)+1, columns=data.index)
# In[66]:
pc1_loadings = pd.DataFrame(loadings.loc[1].sort_values(ascending=False))
pc2_loadings = pd.DataFrame(loadings.loc[2].sort_values(ascending=False))
pc1_loadings_above0 = pc1_loadings[pc1_loadings[1] > 0.1] # isolate loadings > 0
# automatically detect elbow in graph:
elbow = KneeLocator(x=np.arange(pc1_loadings_above0.shape[0]),
y=pc1_loadings_above0[1].values,
curve='convex',
direction='decreasing').knee
# plot it all
plt.figure(figsize=(8, 6))
plt.plot(pc1_loadings_above0.values)
plt.title('Loading Scores >0 by Rank for Principle Component 1', size=20)
plt.ylabel('Loading Score', size=20)
plt.xlabel('Rank', size=20)
plt.xticks(np.arange(pc1_loadings_above0.shape[0], step=20), size=20)
plt.yticks(size=20)
plt.axvline(elbow, color='red') # plot elbow with red line
plt.show()
# #### Top PCX Loadings and Scores (for data exploration)
# In[67]:
# pcx_loadings = pd.DataFrame(loadings.loc[4].sort_values(ascending=False)) # for experiments
# pcx_loadings.head(25)
# #### Top 25 PC1 Loadings and Scores
# In[68]:
pc1_loadings.round(2).head(25).to_csv('spreadsheets/PC1_loadings.csv')
pc1_loadings.head(25)
# #### PC1 Verb Contexts and Loadings
# In[69]:
pc1_loadings[pc1_loadings.index.str.contains('v1')].round(2).head(15).to_csv('spreadsheets/top15_animate_verbs.csv')
top_pc1_loadings = pc1_loadings[pc1_loadings[1] >= 0.30]
pc1_loadings[pc1_loadings.index.str.contains('v1')].head(15)
# #### Looking at T.ל.Cmpl→ לקח.v1.qal
#
# This is an interesting top verbal context. Is it related to marriage situations?
# In[70]:
take_contexts = [r for r in counts.basis2result['T.ל.Cmpl→ לקח.v1.qal']]
random.seed(213214) # shuffle random, preserve state
random.shuffle(take_contexts)
B.show(take_contexts, condenseType='clause', withNodes=True, end=5)
display(HTML(f'<h4>...{len(take_contexts)-5} other results cutoff...'))
# In[71]:
'; '.join(['{} {}:{}'.format(*T.sectionFromNode(r[0])) for r in sorted(take_contexts)])
# In[72]:
len(take_contexts)
# #### PC2 Loadings, top 25
# In[73]:
pc2_loadings.head(25)
# In[74]:
def plot_PCA(pca_nouns,
zoom=tuple(),
noun_xy_dict=False,
save='',
annotate=True,
title='',
components=(pca_nouns[:,0], pca_nouns[:,1])):
'''
Plots a PCA noun space.
Function is useful for presenting various zooms on the data.
'''
x, y = components
# plot coordinates
plt.figure(figsize=(12, 10))
plt.scatter(x, y)
if zoom:
xmin, xmax, ymin, ymax = zoom
plt.xlim(xmin, xmax)
plt.ylim(ymin, ymax)
if title:
plt.title(title, size=18)
plt.xlabel('PC1', size=18)
plt.ylabel('PC2', size=18)
plt.axhline(color='red', linestyle=':')
plt.axvline(color='red', linestyle=':')
# annotate points
if annotate:
noun_xy = {} # for noun_dict
noun_lexs = [f'{reverse_hb(F.voc_lex_utf8.v(counts.target2lex[n]))}' for n in fishertransf.columns]
for i, noun in enumerate(noun_lexs):
noun_x, noun_y = x[i], y[i]
noun_xy[fishertransf.columns[i]] = (noun_x, noun_y)
if zoom: # to avoid annotating outside of field of view (makes plot small)
if any([noun_x < xmin, noun_x > xmax, noun_y < ymin, noun_y > ymax]):
continue # skip noun
plt.annotate(noun, xy=(noun_x, noun_y), size='18')
if save:
plt.savefig(save, dpi=300, bbox_inches='tight')
plt.show()
if noun_xy_dict:
return noun_xy
test_components = (pca_nouns[:,0], pca_nouns[:,1])
# #### Whole PCA Space
# In[75]:
pca_nouns_xy = plot_PCA(pca_nouns, noun_xy_dict=True, save='plots/PCA_whole.png', components=test_components)
# We can already see some interesting tendencies in the data. קול and דבר are grouped in the same quadrant. In the upper right quadrant we see בן and בת. The lower left quadrant presents a particularly interesting match: יד "hand" and אלהים "God".
#
# We zoom in closer below to have a better look at the tendencies.
# #### Main Cluster of PCA space
# In[76]:
plot_PCA(pca_nouns, zoom=((-3, 3, -2.5, 1)), save='plots/PCA_main.png')
# ### ~Animate Nouns
# Note that nouns in the lower right quadrant tend to be people, while on the lower left there are primarily things.
#
# The plot below shows person nouns.
# In[77]:
plot_PCA(pca_nouns, zoom=((-0.1, 5, -2.5, 0.1)), save='plots/PCA_~animates')
# Let's see what nouns to the right of the y axis have most in common. This could corroborate the intuition that the nouns on the right are personal.
#
# First we isolate the nouns with a x-axis value > 0. Those are shown below, they are obviously personal nouns.
# In[78]:
nouns_xy = pd.DataFrame.from_dict(pca_nouns_xy, orient='index', columns=['x', 'y'])
possibly_animate = pd.DataFrame(nouns_xy[nouns_xy.x > 0])
possibly_animate['gloss'] = [F.gloss.v(counts.target2lex[targ]) for targ in possibly_animate.index]
possibly_animate = possibly_animate.reindex(['gloss', 'x', 'y'], axis=1)
# In[79]:
x_animate = pd.DataFrame(possibly_animate.drop('y', axis=1).sort_values(ascending=False, by='x'))
round(x_animate,2).to_csv('spreadsheets/animate_x.csv')
print(f'total number of ~animate nouns {x_animate.shape[0]}')
x_animate
# #### Why בגד?
#
# Why has בגד "garment" made it into the set? We compare the top loading scores against the top scores for בגד.
# In[80]:
def cf_PC_Noun(pc_loadings, noun_counts, noun, pc_name='PC1', ascending=False):
'''
Compares PC loadings and noun counts.
Returns a DF containing the top common
counts sorted on the PC.
'''
top_cts = noun_counts[noun][noun_counts[noun]>0] # isolate non-zero counts
pc_word = pc_loadings.copy() # make copy of PC loadings for modifications
pc_word.columns = [pc_name] # rename col to PCX
pc_word[noun] = top_cts[[i for i in top_cts.index if i in pc_word.index]] # add new column for noun
pc_word = pc_word[pc_word[noun] > 0].sort_values(by='PC1', ascending=ascending) # remove zero counts completely, sort
return pc_word
bgd_pc1 = cf_PC_Noun(pc1_loadings, fishertransf, 'בגד.n1')
bgd_pc1[bgd_pc1.PC1 >= 0.3].round(2).to_csv('spreadsheets/BGD_pc1.csv')
bgd_pc1[bgd_pc1.PC1 >= 0.3]
# Show passages for coord relations for paper:
# In[81]:
etcbc2sbl = {
'Genesis': 'Gen', 'Exodus': 'Exod', 'Leviticus': 'Lev', 'Numbers': 'Num',
'Deuteronomy': 'Deut', 'Joshua': 'Josh', 'Judges': 'Judg', '1_Samuel': '1 Sam', '2_Samuel': '2 Sam',
'1_Kings': '1 Kgs', '2_Kings': '2 Kgs', 'Isaiah': 'Isa', 'Jeremiah': 'Jer', 'Ezekiel': 'Ezek',
'Hosea': 'Hos', 'Joel': 'Joel', 'Amos': 'Amos', 'Obadiah': 'Obad', 'Jonah': 'Jonah', 'Micah': 'Mic',
'Nahum': 'Nah', 'Habakkuk': 'Hab', 'Zephaniah': 'Zeph', 'Haggai': 'Hag', 'Zechariah': 'Zech',
'Malachi': 'Mal', 'Psalms': 'Ps', 'Job': 'Job', 'Proverbs': 'Prov', 'Ruth': 'Ruth',
'Song_of_songs': 'Song', 'Ecclesiastes': 'Eccl', 'Lamentations': 'Lam', 'Esther': 'Esth',
'Daniel': 'Dan', 'Ezra': 'Ezra', 'Nehemiah': 'Neh', '1_Chronicles': '1 Chr', '2_Chronicles': '2 Chr'}
def formatPassages(resultslist):
'''
Formats biblical passages with SBL style
for a list of results.
'''
book2ch2vs = collections.defaultdict(lambda: collections.defaultdict(set))
for result in resultslist:
book, chapter, verse = T.sectionFromNode(result[0])
book = etcbc2sbl[book]
book2ch2vs[book][chapter].add(str(verse))
# assemble in to readable passages list
passages = []
for book, chapters in book2ch2vs.items():
ch_verses = []
for chapter, verses in chapters.items():
verses = ', '.join(f'{chapter}:{verse}' for verse in sorted(verses))
ch_verses.append(verses)
passage = f'{book} {", ".join(ch_verses)}'
passages.append(passage)
return '; '.join(passages)
def collectPassages(contextslist, targetnoun):
'''
Collects and returns neatly
formatted passages
for use in the paper.
'''
# map the passages with dicts to avoid repeats
results = sorted(res for context in contextslist for res in counts.target2basis2result[targetnoun][context])
return formatPassages(results)
bgd_mixContexts = ['']
collectPassages(bgd_pc1.head(4).index[bgd_pc1.head(4).index.str.contains('coord')], 'בגד.n1')
# In[82]:
# B.show(counts.target2basis2result['בגד.n1']['T.coord→ אהרן.n1'], condenseType='phrase', withNodes=True)
# Now we find the context tags that are highest in the set. We pull the fourth quartile (75th percentile) of the context tags to see which ones are most shared accross these nouns.
# In[83]:
animate_context = fishertransf[possibly_animate.index].quantile(0.75, axis=1).sort_values(ascending=False)
pd.DataFrame(animate_context.head(15))
# #### PCA Space: Focus on Bordering ~Animate Nouns
# In[84]:
plot_PCA(pca_nouns, zoom=((-0.5, 0.5, -1.5, -1)), save='plots/PCA_~animate_border')
# In[85]:
nouns_xy[(nouns_xy.x < 0) & (nouns_xy.x > -0.4)].sort_values(ascending=False, by='x')
# Verbs are the greatest distinguishing factor here, with אמר, בוא,נתן, לקח and others serving a big role. מות "die" also plays a role. These are definitely contexts we could expect with animate nouns.
# ### ~Inanimate Nouns
#
# The nouns to the left of the y axis appear to be mostly inanimate.
# In[86]:
plot_PCA(pca_nouns, zoom=((-2, 0, -2.5, 0)), title='PCA Space: ~Inanimate Noun Cluster')
# Below we pull the tendencies for the nouns with a PC1 < 0. These nouns appear to be impersonal in nature.
# In[87]:
possibly_inanimate = pd.DataFrame(nouns_xy[(nouns_xy.x < 0) & (nouns_xy.y < 0)])
possibly_inanimate['gloss'] = [F.gloss.v(counts.target2lex[targ]) for targ in possibly_inanimate.index]
possibly_inanimate = possibly_inanimate.reindex(['gloss', 'x', 'y'], axis=1)
x_inanimate = pd.DataFrame(possibly_inanimate.drop('y', axis=1).sort_values(by='x'))
round(x_inanimate,2).head(x_animate.shape[0]).to_csv('spreadsheets/inanimate_x.csv')
print(f'Number of total ~inanimates: {x_inanimate.shape[0]}')
print(f'Top ~inanimates: ')
x_inanimate.head(x_animate.shape[0])
# ### Top Influencing ~inanimate Contexts
# In[88]:
pc1_loadings.tail(25).sort_values(by=1).round(2).to_csv('spreadsheets//PC1_loadings_negative.csv')
pc1_loadings.tail(25).sort_values(by=1)
# #### What about מלאך?
#
# Why is מלאך rated in this list of mostly "inanimates"?
# In[89]:
pc_mlak = cf_PC_Noun(pc1_loadings, fishertransf, 'מלאך.n1', ascending=True)
pc_mlak[pc_mlak.PC1 <= -0.2].round(2).to_csv('spreadsheets/MLAK_pc1.csv')
pc_mlak.head(10)
# Note that several of the top 4 contexts are related to אלהים. We pull a few examples with אלהים out for use in the paper.
# In[90]:
collectPassages(['T.אחר.n1.Cmpl→ הלך.v1.qal'], 'אלהים.n1')
# In[91]:
collectPassages(['T.אחר.n1.Cmpl→ הלך.v1.qal'], 'מלאך.n1')
# In[92]:
collectPassages(['אחר.n2.atr→ T'], 'מלאך.n1')
# In[93]:
collectPassages(['T.appo→ אלהים.n1'], 'מלאך.n1')
# The next plot shows nouns to the left of the y-origin. Note especially the terms between y(-0.5) and y(0.0.). These are more conceptual nouns. This same trajectory extends up into the far parts of the upper left quadrant through דבר and קול.
# Here is a closer look at the larger cluster near the left side of the y-origin.
# In[94]:
plot_PCA(pca_nouns, zoom=((-0.5, -0.1, -1.5, -1)))
# Moving over one more notch:
# In[95]:
plot_PCA(pca_nouns, zoom=((-1, -0.5, -2, -0.5)))
# ### ~Perception Nouns?
#
# The first quandrant contains a number of interesting terms that appear to be mostly abstract. These nouns appear to be related in some sense to perceptions:
# In[96]:
plot_PCA(pca_nouns, zoom=((-2, 0.05, -0.05, 1)), save='plots/PCA_~perception_nouns')
# Below are the most common contexts for these nouns.
# In[97]:
perceptions = nouns_xy[(nouns_xy.x < 0) & (nouns_xy.y > 0)]
perception_contexts = fishertransf[perceptions.index].quantile(0.75, axis=1).sort_values(ascending=False).head(15)
pd.DataFrame(perception_contexts)
# Many perceptional related contexts can be seen here, namely when the noun is a direct object to verbs such as שׁמע "hear", ידע "know", ראה "see", מצא "find", and שׁכח "forget".
# ## Experiment in Metaphor Detection
#
# If the contexts of the animate nouns are queried against the inanimate nouns, is it possible to detect cases of metaphorical extension in the dataset?
# In[98]:
# get top 25 animate verbal contexts with Subj roles:
animate_verbal_contexts = pc1_loadings[pc1_loadings.index.str.contains('v') & pc1_loadings.index.str.contains('Subj')].head(25)
print(f'number of verbal contexts searched: {animate_verbal_contexts.shape[0]}')
metaphors = [] # metaphor data here
for i, ia_noun in enumerate(x_inanimate[1:].head(40).index): # go through top 40
# skip these nouns:
if ia_noun in {'אלהים.n1', 'מלאך.n1'}:
continue
# find attested, common contexts
contexts = cf_PC_Noun(animate_verbal_contexts, fishertransf, ia_noun)
if contexts.shape[0]: # a match is found
# gather row data with columns of [noun, context, hits, passages, example]
for context in contexts.index:
results = counts.target2basis2result[ia_noun][context] # get results from searches
hits = len(results)
passages = formatPassages(results)
example = T.text(results[0][0])
metaphors.append([ia_noun, context, hits, passages, example, f'({formatPassages([(results[0][0],)])})'])
metaphors = pd.DataFrame(metaphors, columns=['noun', 'context', 'hits', 'passages', 'example', ' '])
metaphors.to_csv('spreadsheets/metaphors.csv')
display(HTML('<h4>Detected Metaphors</h4>'))
metaphors
# ## Cosine Similarity Analysis
# In[99]:
distances_raw = pairwise_distances(fishertransf.T.values, metric='cosine')
distances = pd.DataFrame(distances_raw, columns=fishertransf.columns, index=fishertransf.columns)
similarities = distances.apply(lambda n: 1-n) # convert distance matrix to similarities.
# ### Export Spreadsheet Similarity Data
# In[100]:
for i, term in enumerate(data.sum().sort_values(ascending=False).index):
these_sims = pd.DataFrame(similarities[term].sort_values(ascending=False)[1:])
# export to simple data
these_sims.to_csv(f'../easydata/similarities/{i+1}Rank_{term}.csv')
# #### Export Big Table for Paper (top 20 terms)
# In[101]:
sim_rows = []
three_nouns = []
for i, term in enumerate(data.sum().sort_values(ascending=False).head(20).index):
if len(three_nouns) < 3:
three_nouns.append(term)
elif len(three_nouns) == 3 or i == 20: # 3 per row
# prep header with extra columns
header = [x for tup in zip(['']*3, three_nouns) for x in tup]
sim_rows.append(header)
noun_scores = []
for noun in three_nouns:
top_sims = similarities[noun].sort_values(ascending=False)[1:].head(5)
noun_scores.append(list(zip(top_sims.index, top_sims.values)))
score_rows = []
for simdat1, simdat2, simdat3 in zip(*noun_scores):
score_rows.append([simdat1[0], simdat1[1], simdat2[0], simdat2[1], simdat3[0], simdat3[1]])
sim_rows.extend(score_rows)
sim_rows.append([''])
three_nouns = []
with open('spreadsheets/top20_sims.csv', 'w') as outfile:
writer = csv.writer(outfile)
writer.writerows(sim_rows)
# ### Plot Top 5 Similarity for all Terms
#
# Below we visualize the top 5 similarity scores for all of the nouns.
# In[102]:
plt.figure(figsize=(22, 210))
for i, noun in enumerate(similarities.columns):
sims = similarities[noun].sort_values(ascending=False).head(6)[1:]
sim_values = sims.values
lexemes = [reverse_hb(F.voc_lex_utf8.v(counts.target2lex[noun])) for noun in sims.index]
glosses = [counts.target2gloss[noun] for noun in sims.index]
lex_glosses = [f'{lex} \'{gloss}\'' for lex, gloss in zip(lexemes, glosses)]
sims_new = pd.DataFrame(sim_values, index=lex_glosses)
# make plots
x = np.arange(sims.shape[0])
plt.subplot(50,4,i+1)
plt.plot(sims_new.values)
plt.scatter(x=x, y=sims_new.values, color='red')
plt.title(f'Top 5 Similar to {reverse_hb(F.voc_lex_utf8.v(counts.target2lex[noun]))}', size=30)
plt.xticks(x, lexemes, size=26)
plt.yticks(size=14)
plt.ylim(0, 1)
plt.tight_layout()
# ### Distribution Principle Example for Paper: מים and יין
# In[145]:
pos_water = data['מים.n1'][data['מים.n1'] > 0] # get non-zero counts
pos_wine = data['יין.n1'][data['יין.n1'] > 0]
winewater_common = set(pos_water.index) & set(pos_wine.index) # retrieve data using non-zero indicies in common
winewater = data.loc[winewater_common][['מים.n1', 'יין.n1']]
winewater = winewater.sort_values(by=winewater.sum().sort_values(ascending=False).index[0], ascending=False) # sort on largest sum column
winewater
# Where is "washed WITH wine"?
# In[148]:
B.show(counts.target2basis2result['יין.n1']['T.ב.Cmpl→ כבס.v1.piel'], condenseType='clause')
# In[153]:
T.text(L.u(counts.target2basis2result['יין.n1']['T.ב.Cmpl→ כבס.v1.piel'][0][0], 'verse')[0])
# But מים is much more frequent with the "washing" frame, using a different context:
# In[155]:
data['מים.n1'].sort_values(ascending=False).head(10)
# ### Build Toy Example to Illustrate Similarity Algorithms
#
# Select three words, two of which strongly related, a third which is only sort of related. I have manually selected לחם "bread", בשׂר "flesh", and זהב "gold", along with two contexts involving object use with אכל and נתן.
# In[103]:
ex_targets = ['לחם.n1', 'בשׂר.n1', 'זהב.n1']
ex_contexts = ['T.Objc→ אכל.v1.qal', 'T.Objc→ נתן.v1.qal']
sim_example = data[ex_targets].loc[ex_contexts]
sim_example.to_csv('spreadsheets/sim_example.csv')
sim_example
# #### Make Overly Simple Distance Matrix
# In[104]:
sim_dist = collections.defaultdict(lambda: collections.defaultdict(int))
for noun in sim_example:
for conoun in sim_example:
sim_dist[noun][conoun] = abs(sim_example[noun] - sim_example[conoun]).sum()
sim_dist = pd.DataFrame(sim_dist, columns=sim_example.columns, index=sim_example.columns)
sim_dist.to_csv('spreadsheets/sim_ex_dist.csv')
sim_dist
# #### Example Sorted Distances
# In[105]:
pd.DataFrame(sim_dist['לחם.n1'].sort_values()).to_csv('spreadsheets/sim_ex_lxm.csv')
pd.DataFrame(sim_dist['בשׂר.n1'].sort_values()).to_csv('spreadsheets/sim_ex_bfr.csv')
# #### Compare Attestations
# In[106]:
data['לחם.n1'].sum()
# In[107]:
data['בשׂר.n1'].sum()
# #### Build Cosine Similarity Example for Paper
# In[108]:
plt.figure(figsize=(8, 6))
x = sim_example.values[0]
y = sim_example.values[1]
labels = sim_example.columns
plt.scatter(x, y)
for xn, yn in zip(x, y):
plt.plot([0, xn], [0, yn], 'ro-', color='blue')
for xn, yn, labeln in zip(x, y, labels):
plt.annotate(reverse_hb(labeln.split('.')[0])+'.n1', (xn, yn), size=22)
#plt.xlabel(sim_example.index[0], size=22) # commented out because the Hebrew gets messed up
#plt.ylabel(sim_example.index[1], size=22)
plt.xticks(np.arange(90, step=10))
plt.yticks(np.arange(24, step=2))
plt.savefig('plots/sim_example.png', dpi=300, bbox_inches='tight')
plt.show()
# #### Angles
#
# Get angles to draw. Will do it manually to save time.
# In[109]:
# src: https://stackoverflow.com/questions/2827393/angles-between-two-n-dimensional-vectors-in-python
def dotproduct(v1, v2):
return sum((a*b) for a, b in zip(v1, v2))
def length(v):
return math.sqrt(dotproduct(v, v))
def angle(v1, v2):
return math.acos(dotproduct(v1, v2) / (length(v1) * length(v2)))
# In[110]:
for target in sim_example.columns:
for cotarget in sim_example.columns:
if target == cotarget:
continue
else:
print(f'{target} -> {cotarget}')
rad = angle(sim_example[target].values, sim_example[cotarget].values)
print(rad * 180 / math.pi)
print()
# Cosine Measure applied below.
# In[111]:
simex_dist = pairwise_distances(sim_example.T.values, metric='cosine')
simex_dist = pd.DataFrame(simex_dist, columns=sim_example.columns, index=sim_example.columns)
simex_dist.to_csv('spreadsheets/sim_ex_dist.csv')
simex_dist
# In[112]:
simex_sim = 1 - simex_dist
simex_sim.to_csv('spreadsheets/sim_ex_sim.csv')
simex_sim
# Export an example for the presentation, נחשׁת "copper", the result of which is improved in the semantic network experiment further below.
# In[113]:
save_figs = ['נחשׁת.n1', 'זהב.n1']
for noun in save_figs:
sims = similarities[noun].sort_values(ascending=False).head(6)[1:]
sim_values = sims.values
lexemes = [reverse_hb(F.voc_lex_utf8.v(counts.target2lex[noun])) for noun in sims.index]
glosses = [counts.target2gloss[noun] for noun in sims.index]
lex_glosses = [f'{lex} \'{gloss}\'' for lex, gloss in zip(lexemes, glosses)]
noun_text = F.voc_lex_utf8.v(counts.target2lex[noun])
# make plots
x = np.arange(sims.shape[0])
plt.plot(sim_values)
plt.scatter(x=x, y=sim_values, color='red')
plt.title(f'Top 5 Similarities for {reverse_hb(noun_text)}', size=30)
plt.xticks(x, lexemes, size=20)
plt.ylabel('% Similar', size=20)
plt.ylim(0, 1)
plt.savefig(f'plots/similarity_{noun_text}', dpi=300, bbox_inches='tight')
plt.show()
# Look at the commonalities between bronze and holiness, skin, and work.
# In[114]:
surprising_bronze = fishertransf[['נחשׁת.n1', 'קדשׁ.n1', 'עור.n2', 'עץ.n1']]
surprising_bronze.quantile(0.75, axis=1).sort_values(ascending=False).head(15)
# In[115]:
expected_bronze = fishertransf[['נחשׁת.n1', 'כסף.n1', 'זהב.n1']]
expected_bronze.quantile(0.75, axis=1).sort_values(ascending=False).head(15)
# ## Gephi Network Visualization
#
# The plots above are helpful for looking at individual words and their top similar terms, but they do not really give a sense of the bigger picture, nor of the connections between terms. For instance, רחב and ארך both share a connection as each others' top most common noun. But this relationship between the terms, and their mutual relationship to other terms, cannot be clearly seen.
#
# We can better visualize word relationships by graphing them as nodes connected by edges in a semantic network. The similarity scores can be used to give weight to the edges. A visual plotting studio like [Gephi](www.gephi.org) can then be used to plot the nodes and edges into a visual space.
#
# There are a couple of challenges to overcome before the nodes and edges can be exported for Gephi. The most important issue is how to decide which edges are exported? It is not ideal to plot 199x199 edges, and at any rate not all noun x noun comparisons have positive similarities (indeed many have negative values). This simplifies, at least, one aspect of the edge selection: we weed out all relationships that have negative similarity scores. Below, the number of positive similarity ratings per noun are shown.
# In[116]:
for lex in similarities.columns:
positives = similarities[lex][similarities[lex] > 0]
print(lex, positives.shape[0])
# The best cut-off point for edge values is the point of maximum curvature, known as the "knee" or in this case, an "elbow". `kneed` is an algorithm that automatically detects this point ([here](https://github.com/arvkevi/kneed)). The detector is demonstrated below, where the red lines are the detected elbows. These points will be used as the cut-off point for edge exports.
# In[117]:
plt.figure(figsize=(22, 210))
for i, noun in enumerate(similarities.columns):
sims = similarities[noun][similarities[noun] > 0].sort_values(ascending=False)[1:]
x = np.arange(sims.shape[0])
y = sims.values
# detect elbow
elbow = KneeLocator(x, y, curve='convex', direction='decreasing').knee
# make plots
plt.subplot(50,4,i+1)
plt.plot(x, y)
plt.title(f'{reverse_hb(F.voc_lex_utf8.v(counts.target2lex[noun]))}', size=30)
plt.ylim(0, 0.8)
plt.axvline(elbow, color='red')
plt.tight_layout()
# ### Export Elbow Edges
#
# Use `kneed` to detect the ideal edge cut-off point.
# In[118]:
node_maps = {}
nodes = []
edges = []
# make nodes
ID = 0
for noun in similarities:
label = F.voc_lex_utf8.v(counts.target2lex[noun])
nodes.append([ID, label])
node_maps[noun] = ID
ID += 1
# make edges
for noun in similarities.columns:
positive_sims = similarities[noun][similarities[noun] > 0].sort_values(ascending=False)[1:]
# detect elbow
x = np.arange(positive_sims.shape[0])
y = positive_sims.values
elbow = KneeLocator(x, y, curve='convex', direction='decreasing').knee
edge_sims = positive_sims.head(elbow)
for conoun in edge_sims.index:
source = node_maps[noun]
target = node_maps[conoun]
weight = similarities[noun][conoun]
edges.append([source, target, weight])
# export csv's
with open('gephidata/sim_nodes_elbows.csv', 'w') as outfile:
writer = csv.writer(outfile)
writer.writerow(['ID', 'Label'])
writer.writerows(nodes)
with open('gephidata/sim_edges_elbows.csv', 'w') as outfile:
writer = csv.writer(outfile)
writer.writerow(['Source', 'Target', 'Weight'])
writer.writerows(edges)
# # TODO: Re-run Gephi with corrected data and import new images
#
# The results presented below will change slightly since I have found some mistakes in the context parameters. To be fixed soon. -C, 29 Dec 2018
# ## Gephi Results
#
# Below the results of the gephi graph are displayed. Force-Atlas was used to arrange the nodes. The results show discernible neighborhoods of similar nouns.
#
# <img src="gephidata/graphs/full_graph.png">
# ## Examining Semantic Networks
#
# Below we look closer at the neighborhoods found in the semantic network. Specifically, we want to see what features give rise to the similarities that are registered.
# ### Group 1
#
# <img src="gephidata/graphs/group1.png">
# In[119]:
group1_targets = ['חק.n1', 'שׁבת.n1', 'תורה.n1', 'מצוה.n1',
'חקה.n1', 'עדות.n1', 'משׁפט.n1']
group1_tendencies = fishertransf[group1_targets].quantile(0.75, axis=1).sort_values(ascending=False).head(15)
| pd.DataFrame(group1_tendencies) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Fri Apr 2 09:20:37 2021
Compiles CSAS data into SQLite DB
@author: buriona,tclarkin
"""
import sys
import os
from pathlib import Path
import pandas as pd
import numpy as np
import sqlite3
import sqlalchemy as sql
import zipfile
from zipfile import ZipFile
from requests import get as r_get
from io import StringIO
# Load directories and defaults
this_dir = Path(__file__).absolute().resolve().parent
#this_dir = Path('C:/Programs/shread_dash/database/CSAS')
ZIP_IT = False
ZIP_FRMT = zipfile.ZIP_LZMA
DEFAULT_DATE_FIELD = 'date'
DEFAULT_ARCH_DIR = Path(this_dir,'csas_archive')
DEFAULT_CSV_DIR = Path(this_dir,'data')
DEFAULT_DB_DIR = this_dir
COL_TYPES = {
'date': str,'site':str,'type':str,'albedo':float,'snwd':float,'temp':float,'flow':float
}
# Define functions
def compose_date(years, months=1, days=1, weeks=None, hours=None, minutes=None,
seconds=None, milliseconds=None, microseconds=None, nanoseconds=None):
"""
for composing dates from arrays of specified quantities (years, months, etc)
output: datetime array
"""
years = np.asarray(years) - 1970
months = np.asarray(months) - 1
days = np.asarray(days) - 1
types = ('<M8[Y]', '<m8[M]', '<m8[D]', '<m8[W]', '<m8[h]',
'<m8[m]', '<m8[s]', '<m8[ms]', '<m8[us]', '<m8[ns]')
vals = (years, months, days, weeks, hours, minutes, seconds,
milliseconds, microseconds, nanoseconds)
return sum(np.asarray(v, dtype=t) for t, v in zip(types, vals)
if v is not None)
def process_csas_archive(data_dir=DEFAULT_CSV_DIR,csas_archive=DEFAULT_ARCH_DIR,verbose=False):
"""
process csas archive data (.csv in DEFAULT_ARCH_DIR) from https://snowstudies.org/archived-data/
output: none in consul, formatted .csv files to DEFAULT_CSV_DIR
"""
# Check for output directory:
if os.path.isdir(data_dir) is False:
os.mkdir(data_dir)
print('Preparing processed csas .csv files for database creation...')
# Check for input data
if os.path.isdir(csas_archive) is False:
print("No archive data provided...")
return
if csas_archive.glob("*.csv") is []:
print("No archive data provided...")
return
for archive_file in csas_archive.glob("*.csv"):
if "dust" in str(archive_file):
continue
else:
file = str(archive_file).replace(str(csas_archive),"").replace("\\","")
site = file.split("_")[0]
if verbose:
print(f'Processing {archive_file.name}...')
df_in = pd.read_csv(archive_file)
# Create output df
df_out = pd.DataFrame(index=df_in.index)
df_out["site"] = site
# Check dates
if "24hr" in str(archive_file):
dtype = "dv"
dates = compose_date(years=df_in.Year, days=df_in.DOY)
if "1hr" in str(archive_file):
dtype = "iv"
dates = compose_date(years=df_in.Year, days=df_in.DOY, hours=df_in.Hour / 100)
df_out["type"] = dtype
# Check for albedo
if ("PyDwn_Unfilt_W" in str(df_in.columns)) and ("PyUp_Unfilt_W" in str(df_in.columns)):
df_out["albedo"] = df_in["PyDwn_Unfilt_W"] / df_in["PyUp_Unfilt_W"]
df_out.loc[df_out["albedo"] > 1, "albedo"] = 1
df_out.loc[df_out["albedo"] < 0, "albedo"] = 0
else:
df_out["albedo"] = np.nan
# Check for snow depth
if ("Sno_Height_M" in str(df_in.columns)):
df_out["snwd"] = df_in["Sno_Height_M"]*3.281*12 # Convert to inches
else:
df_out["snwd"] = np.nan
# Check for temperature
if dtype=="dv":
if ("UpAir_Avg_C" in str(df_in.columns)):
df_out["temp"] = df_in["UpAir_Avg_C"]*9/5+32 # Convert to F
elif ("Air_Max_C" in str(df_in.columns)) & ("Air_Min_C" in str(df_in.columns)):
df_out["temp"] =(df_in["Air_Max_C"]/2+df_in["Air_Min_C"]/2)*9/5+32 # Convert to F
else:
df_out["temp"] = np.nan
elif dtype=="iv":
if ("UpAir_Max_C" in str(df_in.columns)):
df_out["temp"] = df_in["UpAir_Max_C"]*9/5+32 # Convert to F
elif ("Air_Max_C" in str(df_in.columns)):
df_out["temp"] = df_in["Air_Max_C"]*9/5+32 # Convert to F
else:
df_out["temp"] = np.nan
# Check for flow
if ("Discharge" in str(df_in.columns)):
df_out["flow"] = df_in["Discharge_CFS"]
else:
df_out["flow"] = np.nan
df_out.index = dates
df_out.to_csv(Path(data_dir,file),index_label="date")
def process_csas_live(data_dir=DEFAULT_CSV_DIR,verbose=False):
"""
process csas live data from https://snowstudies.org/current-conditions/
output: none in consul, formatted .csv files to DEFAULT_CSV_DIR
"""
# Check for output directory:
if os.path.isdir(data_dir) is False:
os.mkdir(data_dir)
csas_sites = ["SBSP","SASP","PTSP","SBSG"]
csas_dtypes = ["iv","dv"]
# Set filepath extension for dtype
for dtype in csas_dtypes:
if verbose:
print(f'Processing {dtype} data...')
if dtype == "iv":
ext = "Hourly.php"
if dtype == "dv":
ext = "Daily.php"
for site in csas_sites:
if verbose:
print(f'for {site}...')
# Construct url
site_url = "https://www.snowstudies.info/NRTData/" + site + "Full" + ext
print(site_url)
# Import
failed = True
tries = 0
while failed:
try:
csv_str = r_get(site_url,timeout=None,verify=False).text
failed = False
except TimeoutError:
raise Exception("Timeout; Data unavailable?")
tries += 1
print(tries)
if tries > 15:
return
csv_io = StringIO(csv_str)
try:
f = pd.read_html(csv_io)
except ValueError:
return
df_in = f[1]
if df_in.empty:
if verbose:
print("Data not available")
return
if df_in is None:
if verbose:
print("Data not available")
return
if dtype == "dv":
dates = compose_date(years=df_in.Year,days=df_in.Day)
if dtype == "iv":
dates = compose_date(years=df_in.Year, days=df_in.Day,hours=df_in.Hour/100)
df_out = pd.DataFrame(index=df_in.index,
columns=["site","type","albedo","snwd","temp","flow"])
df_out["site"] = site
df_out["type"] = dtype
for col in df_in.columns:
# Check for albedo and solar radiation
if "Albedo" in col:
df_out["albedo"] = df_in[col]
if "Solar Radiation-Up" in col:
df_out["radup"] = df_in[col]
if "Solar Radiation-Down" in col:
df_out["raddn"] = df_in[col]
# Check for snow depth
if "Snow Depth" in col:
df_out["snwd"] = df_in[col]*3.281*12
df_out.loc[df_out["snwd"] > 109, 'snwd'] = np.nan # 109 is common error value
df_out["snwd"] = df_out["snwd"].interpolate(limit=3)
# Check for temp
if ("Air Temperature" in col) & ("(C" in col):
df_out["temp"] = df_in[col]*9/5+32
# Check for flow
if "Discharge" in col:
df_out["flow"] = df_in[col]
# Fix albedo
if (all(pd.isna(df_out["albedo"]))==True) and ("radup" in df_out.columns) and ("raddn" in df_out.columns):
df_out["albedo"] = df_out["raddn"] / df_out["radup"]
df_out.loc[df_out["albedo"]>1,'albedo'] = 1
df_out.loc[df_out["albedo"]<0,"albedo"] = 0
if ("radup" in df_out.columns):
df_out = df_out.drop(labels=["radup"],axis=1)
if ("raddn" in df_out.columns):
df_out = df_out.drop(labels=["raddn"], axis=1)
# Add date index
df_out.index=dates
file = f"{site}_{dtype}_live.csv"
df_out.to_csv(Path(data_dir,file),index_label="date")
def get_dfs(data_dir=DEFAULT_CSV_DIR,verbose=False):
"""
Get and merge dataframes imported using functions
"""
csas_iv_df_list = []
csas_dv_df_list = []
print('Preparing .csv files for database creation...')
for data_file in data_dir.glob('*.csv'):
if verbose:
print(f'Adding {data_file.name} to dataframe...')
df = pd.read_csv(
data_file,
usecols=COL_TYPES.keys(),
parse_dates=['date'],
dtype=COL_TYPES
)
if not df.empty:
csas_iv_df_list.append(
df[df['type'] == 'iv'].drop(columns='type').copy()
)
csas_dv_df_list.append(
df[df['type'] == 'dv'].drop(columns='type').copy()
)
df_csas_iv = pd.concat(csas_iv_df_list)
df_csas_iv.name = 'csas_iv'
df_csas_dv = pd.concat(csas_dv_df_list)
df_csas_dv.name = 'csas_dv'
print(' Success!!!\n')
return {'csas_iv':df_csas_iv,'csas_dv':df_csas_dv}
def get_unique_dates(tbl_name, db_path, date_field=DEFAULT_DATE_FIELD):
"""
Get unique dates from shread data, to ensure no duplicates
"""
if not db_path.is_file():
return pd.DataFrame(columns=[DEFAULT_DATE_FIELD])
db_con_str = f'sqlite:///{db_path.as_posix()}'
eng = sql.create_engine(db_con_str)
with eng.connect() as con:
try:
unique_dates = pd.read_sql(
f'select distinct {date_field} from {tbl_name}',
con
).dropna()
except Exception:
return pd.DataFrame(columns=[DEFAULT_DATE_FIELD])
return pd.to_datetime(unique_dates[date_field])
def write_db(df, db_path=DEFAULT_DB_DIR, if_exists='replace', check_dups=False,
zip_db=ZIP_IT, zip_frmt=ZIP_FRMT, verbose=False):
"""
Write dataframe to database
"""
sensor = df.name
print(f'Creating sqlite db for {df.name}...\n')
print(' Getting unique site names...')
site_list = | pd.unique(df['site']) | pandas.unique |
import sys
import unique_count
from math import sqrt
from math import atanh
from math import erf
from matplotlib import pyplot as plt
from scipy.stats import spearmanr
from pathlib import Path
from excel_processing import read_mutation_files
from excel_processing import parse_endometrium_mutations
import pandas as pd
from decimal import *
getcontext().prec = 100
def recursive_table_dump_header(table):
result = []
if isinstance(table, dict):
for key in table.keys():
if not isinstance(table[key], dict):
result += [str(key)]
result += recursive_table_dump_header(table[key])
return result
def recursive_table_dump_lines(table, lines):
line = []
if isinstance(table, dict):
for key in table.keys():
line += recursive_table_dump_lines(table[key], lines)
else:
line += [str(table)]
lines.append(line)
return line
def recursive_table_dump(table, output):
with open(output, "w") as csv_file:
lines = []
header = ";".join(recursive_table_dump_header(table))
recursive_table_dump_lines(table, lines)
csv_file.write(header + "\n")
for line in lines:
csv_file.write(";".join(line) + "\n")
def count_by_hla_by_mutation(data, hlas, mutations, min_distance=0):
"""
Reads a pandas dataframe of epitopes, counting distinct epitopes by HLA.
"""
result = {}
for mutation in mutations:
result[mutation] = {}
dictionaries, _ = unique_count.filter_same(data, min_distance=min_distance,
by_hla=mutation, length_total=200)
for hla in hlas:
result[mutation][hla] = 0
for candidate_dict in dictionaries:
result[mutation][candidate_dict["HLA"]] += 1
return result
def m1m2count(data):
"""
Reads the counts of epitopes for -1 and -2 mutations by HLA in a dataset.
"""
hlas = list(data["HLA"].sort_values().unique())
mutations = list(data["ID"].sort_values().unique())
m_1 = [mut for mut in mutations if mut.endswith("_m1")]
m_2 = [mut for mut in mutations if mut.endswith("_m2")]
m_1_counts = count_by_hla_by_mutation(data, hlas, m_1)
m_2_counts = count_by_hla_by_mutation(data, hlas, m_2)
return m_1_counts, m_2_counts, hlas, m_1, m_2
def m1m2count_merge(data):
"""
Reads the total counts of epitopes for -1 and -2 mutations a dataset.
"""
m_1_counts, m_2_counts, hlas, m_1, m_2 = m1m2count(data)
m_1_total = {}
m_2_total = {}
for mutation in m_1:
m_1_total[mutation] = 0
for mutation in m_2:
m_2_total[mutation] = 0
for mutation in m_1:
for hla in hlas:
m_1_total[mutation] += m_1_counts[mutation][hla]
for mutation in m_2:
for hla in hlas:
m_2_total[mutation] += m_2_counts[mutation][hla]
return m_1_total, m_2_total, hlas, m_1, m_2
def std_normal_cdf(x):
return 0.5 + 0.5 * erf(x / sqrt(2))
def fisher_transform(x):
return atanh(x)
def fisher_Z(x, x0, N):
return (fisher_transform(x) - fisher_transform(x0)) * sqrt(N - 3)
def pcc_test(pcc_val, N, two_sided=False):
Z = fisher_Z(pcc_val, 0, N)
cdf = std_normal_cdf(-abs(Z))
if two_sided:
return 2 * cdf
else:
return cdf
def pcc(x, y, indexes):
"""Pearson's correlation coefficient."""
mean_x = sum([x[idx] for idx in indexes if x[idx] > 0.0]) / len(indexes)
mean_y = sum([y[idx] for idx in indexes if y[idx] > 0.0]) / len(indexes)
sum_sq_x = sum(map(lambda idx: (x[idx] - mean_x) ** 2 if x[idx] > 0.0 else 0.0, indexes))
sum_sq_y = sum(map(lambda idx: (y[idx] - mean_y) ** 2 if y[idx] > 0.0 else 0.0, indexes))
prod_sum = sum(map(lambda idx: (x[idx] - mean_x) * (y[idx] - mean_y) if y[idx] > 0.0 and x[idx] > 0.0 else 0.0, indexes))
return (prod_sum + 1e-6) / sqrt(sum_sq_x * sum_sq_y + 1e-6)
def m1m2p1p2_correlate(data, probabilities):
"""
Correlates the mutation probability with the number of epitopes for that
mutation.
"""
m_1_total, m_2_total, hlas, m_1, m_2 = m1m2count_merge(data)
p_m = probabilities
indexes_m_1 = [hla for key in p_m.keys() if key.endswith("_m1") and key in m_1]
indexes_m_2 = [key for key in p_m.keys() if key.endswith("_m2") and key in m_2]
for index in indexes_m_1:
renamed = index[:-3] + "_m2"
if renamed in m_2_total:
continue
m_2_total[renamed] = 0
for index in indexes_m_2:
renamed = index[:-3] + "_m1"
if renamed in m_1_total:
continue
m_1_total[renamed] = 0
indexes = [key[:-3] for key in indexes_m_1]
indexes += [key[:-3] for key in indexes_m_2]
indexes = list(sorted(set(indexes)))
mq = [m_2_total[idx + "_m2"] / m_1_total[idx + "_m1"] for idx in indexes if p_m[idx + "_m1"] != 0 and m_1_total[idx + "_m1"] != 0]
pq = [p_m[idx + "_m2"] / p_m[idx + "_m1"] for idx in indexes if p_m[idx + "_m1"] != 0 and m_1_total[idx + "_m1"] != 0]
pcc_1 = pcc(m_1_total, p_m, indexes_m_1)
pcc_2 = pcc(m_2_total, p_m, indexes_m_2)
pcc_q = pcc(mq, pq, list(range(len(pq))))
pval_1 = pcc_test(pcc_1, len(indexes_m_1), True)
pval_2 = pcc_test(pcc_2, len(indexes_m_2), True)
pval_q = pcc_test(pcc_q, len(pq), True)
return pcc_1, pcc_2, pcc_q, pval_1, pval_2, pval_q
def dump_full(table, output):
cohorts = [
"European Caucasian",
"USA African American",
"USA Hispanic",
"Japan",
"Germany"
]
strengths = ["strong", "weak", "garbage"]
mms = [1, 2]
result = []
for cohort in cohorts:
object_cohort = table[cohort]
for strength in strengths:
object_strength = object_cohort[strength]
for mm in mms:
object_mm = object_strength[mm]
for candidate in object_mm.keys():
if candidate == "mm":
continue
object_candidate = object_mm[candidate]
for hla in object_candidate.keys():
if not (hla in ["GEDS", "IRS", "candidate"]):
object_hla = object_candidate[hla]
object_eds = object_hla["EDS"]
object_epds = object_hla["EPDS"]
data = [cohort, strength, mm, candidate, hla]
for idx in range(0, 100, 10):
data.append(object_eds[idx * 0.01])
for idx in range(0, 100, 10):
data.append(object_epds[idx * 0.01])
for idx in range(0, 100, 10):
data.append(object_candidate["GEDS"][idx * 0.01])
for idx in range(0, 100, 10):
data.append(object_candidate["IRS"][idx * 0.01])
result.append(data)
header = ["cohort", "strength", "mm", "candidate", "hla"]
for idx in range(0, 100, 10):
header.append(f"EDS: {idx} %")
for idx in range(0, 100, 10):
header.append(f"EPDS: {idx} %")
for idx in range(0, 100, 10):
header.append(f"GEDS: {idx} %")
for idx in range(0, 100, 10):
header.append(f"IRS: {idx} %")
with open(output, "w") as csv_file:
csv_file.write(";".join(header) + "\n")
for line in result:
csv_file.write(";".join(list(map(lambda x: str(x), line))) + "\n")
def doGEDS(p, hlas, freqs, nepitopes):
prod_A = Decimal(1.0)
prod_B = Decimal(1.0)
prob = Decimal(p)
for raw_hla, freq in zip(list(freqs["Allele"]),
list(freqs["Allele Frequency"])):
hla = f"HLA-{raw_hla}".replace("*", "")
frequency = Decimal(freq)
if hla in hlas:
nh = nepitopes[hla] if hla in nepitopes.keys() else 0
nh = Decimal(nh)
pih = Decimal(1) - (Decimal(1) - prob) ** (nh)
Fh = Decimal(1) - (Decimal(1) - frequency) ** 2
term = Decimal(1) - Fh * pih
if hla.startswith("HLA-A"):
prod_A *= term
else:
prod_B *= term
result = (Decimal(1) - prod_A) + (Decimal(1) - prod_B) - (Decimal(1) - prod_A) * (Decimal(1) - prod_B)
return float(result)
if __name__ == "__main__":
assert len(sys.argv) == 4
DO_SORT = int(sys.argv[1])
DO_POSTPROC = int(sys.argv[2])
DO_ANALYSIS = int(sys.argv[3])
# Eval:
if DO_SORT:
data_full = unique_count.read_data(Path("table_dump_colon.csv"), delimiter=";")
data_full = data_full.sort_values(["GEDS: 50 %"], ascending=False)
data_wanted = data_full[data_full["strength"] == "weak"]
data_wanted.to_csv("table_sorted_strong_GEDS.csv")
# Get some numbers:
if DO_POSTPROC:
# Load datasets:
data_strong = unique_count.read_data(Path("./NetMHCPanOutput/cMNR_peptides.csv.strong.csv"), delimiter=",")
data_weak = unique_count.read_data(Path("./NetMHCPanOutput/cMNR_peptides.csv.weak.csv"), delimiter=",")
data_garbage = unique_count.read_data(Path("./NetMHCPanOutput/cMNR_peptides.csv.garbage.csv"), delimiter=",")
data_weak = pd.concat([data_strong, data_weak])
data_garbage = | pd.concat([data_weak, data_garbage]) | pandas.concat |
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
"""
BLIS - Balancing Load of Intermittent Solar:
A characteristic-based transient power plant model
Copyright (C) 2020. University of Virginia Licensing & Ventures Group (UVA LVG). All Rights Reserved.
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
import pandas as pd
import time
import numpy as np
from joblib import Parallel, delayed, parallel_backend
from blis import Solar, Grid, Battery, SBGS, monteCarloInputs
import os
# =====================
# Function to enable parameter sweep
# =====================
def parameterSweep(inputs):
# Record time to solve
t0 = time.time()
# Load_Data - Expected Columns (units): DatetimeUTC (UTC format), t (min), dt (min), demand (MW), solar (MW)
data = pd.read_csv('data063.csv')
# Scale data to match provided pvSize
data.solar = data.solar * inputs.pvSize / 32.3
# Solar Plant - All inputs are optional (default values shown below)
solar = Solar(plantType='PV', capacity=inputs.pvSize, cost_install=2004., cost_OM_fix=22.02)
# Battery Storage - All inputs are optional (default values shown below)
batt = Battery(capacity=inputs.capacity, rateMax=inputs.rate, roundTripEff=inputs.eff,
cost_install=inputs.costInstall, cost_OM_fix=inputs.costOM,
initCharge=0.0)
# Grid Electricity Supply - All inputs are optional (default values shown below)
grid = Grid(capacity=1000.0, maxEmissions=0.5, emissionCurve_hr=np.linspace(1, 24, 24),
emissionCurve_pct=np.linspace(100, 100, 24), cost_OM_var=100.0)
# Create SBGS (controller is built-in), data is only required inputs, all other components will revert to default if not specified
hres = SBGS(data, solar=solar, batt=batt, grid=grid, i=0.02, n=20)
# Run Simulation
results = hres.run()
# Display Elapsed Time
t1 = time.time()
print
"Time Elapsed: " + str(round(t1 - t0, 2)) + " s"
# Combine inputs and results into output and then return
output = pd.concat([inputs, results], axis=0)
return output
# =====================
# Main Program
# =====================
if __name__ == '__main__':
# ==============
# User Inputs
# ==============
studyName = "results_sizing"
# Monte Carlo Case Inputs (uses excel, each sheet is a separate study)
xls_filename = "inputs_sizing.xlsx"
# sheetnames = ["CAES", "BATT", "UTES", "Flywheel"]
sheetnames = ["CAES"]
# Specify number of iterations per case
iterations = 500 # To test
# iterations = 100 # Used in article
# Number of cores to use
num_cores = multiprocessing.cpu_count() - 1 # Consider saving one for other processes
# ==============
# Run Simulations
# ==============
all_outputs = []
count = 0
# Iterate each Monte Carlo case
for sheetname in sheetnames:
inputs = monteCarloInputs(xls_filename, sheetname, iterations)
# Perform Simulations (Run all plant variations in parallel)
with parallel_backend('multiprocessing', n_jobs=num_cores):
output = Parallel(verbose=10)(
delayed(parameterSweep)(inputs.loc[index]) for index in range(iterations))
# Add output to all_outputs
all_outputs = all_outputs + output
# Move output to dataframe and save (if iterations greater than 10)
if iterations > 10:
df = pd.DataFrame(output)
df.to_csv(studyName + '_pt' + str(count) + '.csv')
count = count + 1
# Combine outputs into single dataframe and save
df = | pd.DataFrame(all_outputs) | pandas.DataFrame |
# -*- coding: utf-8 -*-
from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
import time,datetime
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.wait import WebDriverWait
import pymysql
from pandas import DataFrame
from imageio import imread
import matplotlib.pyplot as plt
import jieba
from wordcloud import WordCloud,ImageColorGenerator
class wyy(): #从网易云音乐获取评论并制作词云
def __init__(self):
self.url = 'https://music.163.com/'
self.chrome_options = Options()
self.chrome_options.add_argument('--headless')
self.chrome_options.add_argument('--disable-gpu')
self.people = {'names':[], 'comments':[], 'dates':[], 'votes':[], 'replied_names':[], 'replied_comments':[]}
self.isCN = 1 #默认启用中文分词
self.back_coloring_path = r'D:\ciyun\background_image\mouse.jpg' #设置背景图片路径#图片不同导致生成的image1能否有形状
self.text_path = r'D:\ciyun\use.txt' #设置要分析的文本路径
self.font_path = r'D:\ciyun\youyuan.ttf' #设置中文字体路径
self.stopwords_path = r'D:\ciyun\stopwords.txt' #停用词词表
self.imagename1 = r'D:\ciyun\defaultcolor.png' #保存的图片名字1
self.imagename2 = r'D:\ciyun\colorbyimage.png' #保存的图片名字2
self.my_word_list = [] #在jieba的词库中添加新词,这里未添加任何新词,根据时间自行添加
def search(self,name):
'根据歌名歌手来搜索歌曲'
#self.driver = webdriver.Chrome() #打开浏览器进行操作
driver = webdriver.Chrome(chrome_options = self.chrome_options) #无头模式的Chrome
driver.get(self.url)
time.sleep(0.5)
driver.set_window_size(1280,800) #在无头模式下把window_size放大以便能找到下面的‘srch’元素
put = driver.find_element_by_id("srch")
put.send_keys(name)
time.sleep(0.5)
put.send_keys(Keys.ENTER)
time.sleep(2)
wait = WebDriverWait(driver,10)
wait.until(EC.presence_of_element_located((By.ID,'g_iframe')))
driver.switch_to_frame('g_iframe') #网页使用了iframe,需要进行读取
time.sleep(1)
put = driver.find_element_by_class_name('fst') #选中单曲列表
put.click()
wait = WebDriverWait(driver,10)
wait.until(EC.presence_of_element_located((By.CLASS_NAME,'w0')))
music = driver.find_element_by_class_name('w0')
music = music.find_element_by_class_name('text')
music_lyrics = driver.find_element_by_class_name('w1').text
music_name = driver.find_element_by_class_name('w0').text
print('您搜索到的音乐是 '+music_name+' '+music_lyrics)
#因为歌名后可能有其他元素,使用下面的try语句
try:
music = music.find_element_by_class_name('s-fc7')
except:
pass
music.click()
time.sleep(1)
return driver
def lyrics_download(self,name): #歌词下载
try:
driver = self.search(name)
content = driver.page_source
content = content.replace('<br />','\n') #使输出结果更友好
html = BeautifulSoup(content,'lxml')
lyrics = html.find(id = 'lyric-content').text
lyrics = '\n'.join(lyrics.split('\n')[:-1]) #把最后的‘展开’两个字取消
finally:
driver.close()
return lyrics
def download_next_page(self,driver):
'获取下一页的html代码'
time.sleep(0.5)
next_page = driver.find_element_by_class_name('znxt')
time.sleep(0.5)
driver.execute_script("window.scrollTo(0,document.body.scrollHeight)") #把进度条来到翻页按钮处模仿用户操作
next_page.click()
wait = WebDriverWait(driver,10)
wait.until(EC.presence_of_element_located((By.CLASS_NAME,'itm'))) #确保页面加载完成
content = driver.page_source
content = content.replace('<br />','\n')
html = BeautifulSoup(content,'lxml')
return html
def download_previous_page(self,driver):
'获取上一页的HTML代码'
time.sleep(1)
previous_page = driver.find_element_by_class_name('zprv')
previous_page.click()
wait = WebDriverWait(driver,10)
wait.until(EC.presence_of_element_located((By.CLASS_NAME,'itm')))
content = driver.page_source
content = content.replace('<br />','\n')
html = BeautifulSoup(content,'lxml')
return html
def change_time(self,time):
'把时间格式统一为%Y-%m-%d %H:%M,但是时间过早的评论只显示了日期'
now = datetime.datetime.now()
day = now.strftime('%Y-%m-%d')
year = now.strftime('%Y')
'把时间转换为统一格式'
if '昨天' in time:
time = time.replace('昨天',day+' ')
elif '前' in time:
minut = int(time[:time.index('分')])
time = (now + datetime.timedelta(minutes=-minut)).strftime('%Y-%m-%d %H:%M')
elif len(time) == 5:
time = day + ' ' + time
elif time.index('月') == 1:
time = time.replace('月','-').replace('日','')
time = year+ '-' + time
elif '年' in time:
time = time.replace('年','-').replace('月','-').replace('日','')
else:
print('不明时间格式')
return None
return time
def change_vote(self,vote):
'确保评论的点赞数格式统一为int'
try:
change = vote[vote.index('(')+1:vote.index(')')]
if '万' in change:
change = int(float(change[:change.index('万')])*10000)
else:
change = int(change)
except:
change = 0
return change
def one_page_comments_download(self,html):
'收集用户评论的姓名,内容,时间,得赞数,针对谁的回复(姓名和内容)'
persons = html.find_all(class_ = 'itm')
for person in persons:
comment = person.find(class_ = 'cnt').text
name = comment[:comment.index(':')]
comment = comment[comment.index(':')+1:]
date = person.find(class_ = 'time')
date = self.change_time(date.text)
vote = person.find(class_ = 'rp')
try:
vote = vote.text[vote.text.index('(')+1:vote.text.index(')')]
vote = int(vote)
except ValueError:
vote = 0
try:
replied_comment = person.find(class_ = 'que').text
if '删除' in replied_comment: #遇到’该评论已被删除‘
replied_comment = replied_comment
replied_name = None
else:
replied_name = replied_comment[:replied_comment.index(':')]
replied_comment = replied_comment[replied_comment.index(':')+1:]
except AttributeError as e:
replied_comment = None
replied_name = None
self.people['names'].append(name)
self.people['comments'].append(comment)
self.people['dates'].append(date)
self.people['votes'].append(vote)
self.people['replied_names'].append(replied_name)
self.people['replied_comments'].append(replied_comment)
def great_comments(self,name = '等你下课 周杰伦'): #只获取点赞数最多的不超过15条评论
'获得在第一个页面上的精彩评论(至多15条)'
try:
browser = self.search(name)
wait = WebDriverWait(browser,10)
wait.until(EC.presence_of_element_located((By.CLASS_NAME,'itm')))
content = browser.page_source
content = content.replace('<br />','\n')
html = BeautifulSoup(content,'lxml')
great = {'names':[], 'comments':[], 'dates':[], 'votes':[], 'replied_names':[], 'replied_comments':[]}
persons = html.find_all(class_ = 'itm')
for person in persons[0:15]:
comment = person.find(class_ = 'cnt').text
name = comment[:comment.index(':')]
comment = comment[comment.index(':')+1:]
date = person.find(class_ = 'time')
date = self.change_time(date.text)
vote = person.find(class_ = 'rp')
vote = self.change_vote(vote.text)
if vote < 10: #可能一首歌并没有15条点赞数最多的评论
break
try:
replied_comment = person.find(class_ = 'que').text
replied_name = replied_comment[:replied_comment.index(':')]
replied_comment = replied_comment[replied_comment.index(':')+1:]
except AttributeError as e:
replied_comment = None
replied_name = None
#print(e)
great['names'].append(name)
great['comments'].append(comment)
great['dates'].append(date)
great['votes'].append(vote)
great['replied_names'].append(replied_name)
great['replied_comments'].append(replied_comment)
print('获取了点赞数最多的评论')
finally:
browser.close()
return great
def save_mysql(self,people):
'把获取的数据存入数据库'
db = pymysql.connect(host = 'localhost', port = 3306 ,user = 'root', passwd = '<PASSWORD>', db = 'your db', charset='utf8mb4') #使用utf8mb4来显示一些表情符号等等
cursor = db.cursor()
sql1 = 'USE text'
sql2 = 'INSERT INTO wyycomments (name, own_comment, vote, date, replied_name, replied_comment) VALUES (%s,%s,%s,%s,%s,%s)'
for i in range(len(people['names'])):
try:
cursor.execute(sql1)
cursor.execute(sql2,(people['names'][i],people['comments'][i],people['votes'][i],people['dates'][i],people['replied_names'][i],people['replied_comments'][i]))
cursor.connection.commit()
except Exception as e:
print(e)
db.rollback()
continue
cursor.close()
db.close()
def save_csv(self,people):
'把获取的数据存入csv文件中'
people = | DataFrame(people) | pandas.DataFrame |
'''
calculate intrinsic economic value of a property based on buy or rent decision indifference (arbitrage)
Rent = Price*mortgage_rate
+ DEPRECIATION_RATE*min(Building, Price)
- growth * Land
+ Price*tax
- (Price - 24k) * tax_braket
+ Price * mortgage_insurance
'''
### PATHES to change
HOST = "/home/invisement/PROJECTS/inVisement2/apps-workshop/"
INPUT_PATH = HOST + "data/"
OUTPUT_PATH = HOST + "data/"
def main():
''' read all input files, prepare housing table, calculate intrinsic value, add return columns, and save output '''
housing_table = read_and_join_input_files()
housing_table = prepare_housing_valuation_table(housing_table)
housing_table['intrinsic house value'] = calculate_intrinsic_value (housing_table).round()
housing_table["total return"] = (housing_table['intrinsic house value'] - housing_table['house price'])/housing_table['house price']
housing_table['net annual return'] = housing_table['total return'] * (housing_table['mortgage rate']+PMI_RATE)
housing_table['annual return'] = housing_table['net annual return'] + housing_table['mortgage rate'] + PMI_RATE
return prune_and_save (housing_table)
### LIBRARIES
import datetime, pandas as pd, numpy as np
### CONSTANTS
PMI_RATE = 0.01 # private mortgage insurance rate
FEDERAL_INCOME_TAX_RATE = 0.30 # federal tax bracket rate
DEPRECIATION_RATE = 0.03 # home annual depreciation rate for a base home (building)
STANDARD_TAX_DEDUCTION = 24000
MAX_MORTGAGE_CREDIT = 1000000 # maximum allowance for tax deduction on mortgage
BASE_QUANTILE = 0.3
### INPUTS
PRICE_FILE = INPUT_PATH + "house price by county.csv"
RENT_FILE = INPUT_PATH + "rent by county.csv"
MORTGAGE_FILE = INPUT_PATH + "mortgage rate 30 year fixed.csv"
GROWTH_FILE = INPUT_PATH + "nominal gdp growth.csv"
TAX_FILE = INPUT_PATH + "property tax by fips.csv"
### OUTPUTS
HOUSING_FILE = OUTPUT_PATH + "housing valuation.csv"
LATEST_HOUSING_FILE = OUTPUT_PATH + "latest housing valuation.csv"
### FUNCTIONS
def read_and_join_input_files (base_quantile=BASE_QUANTILE):
price = pd.read_csv(PRICE_FILE, dtype=str)
price['house price'] = pd.to_numeric(price['house price'])
price['fips'] = price['state fips'].str.zfill(2) + price['county fips'].str.zfill(3)
price = price.filter(['fips', 'state', 'county', 'date', 'house price'])
rent = pd.read_csv(RENT_FILE, dtype=str)
rent['rent'] = | pd.to_numeric(rent['rent']) | pandas.to_numeric |
'''
流调查询
'''
from settings_class import settings_obj
import pandas as pd
from util.logger import logger
from sqlalchemy import desc
from datetime import datetime,timedelta
def get_forward_backward_time(current_time, minutes):
# 获取向前和向后的时间
if isinstance(current_time,str):
current = datetime.strptime(current_time, "%Y-%m-%d %H:%M:%S")
else:
current =current_time
backward_time = current + timedelta(minutes=minutes)
forward_time = current - timedelta(minutes=minutes)
return forward_time, backward_time
def get_forward_contact(forward_time, current_time, location_condition_str, user_id, conn,table_obj,close_contact_record_num):
'''
先向前查询,逻辑为 查询在向前时间到当前时间范围内,在该校区和该地点内的,并以时间倒序排序查询的限制close_contact_record_num
:return:
'''
# 查(在限制的时间范围内)close_contact_record_num 条记录
s = f"conn.session.query({settings_obj.result_filed_names_str}).filter(table_obj.c[settings_obj.user_id_field_name]!=user_id, table_obj.c[settings_obj.time_field_name]>=forward_time, table_obj.c[settings_obj.time_field_name]<=current_time, {location_condition_str}).order_by(desc(table_obj.c[settings_obj.time_field_name])).limit(close_contact_record_num).all()"
records = eval(s)
forward_results = [list(result) for result in records]
return forward_results
def get_backward_contact(backward_time,current_time,location_condition_str,user_id,conn,table_obj,close_contact_record_num):
'''
再向后查询,逻辑为 查询在当前时间到向后时间范围内,在该校区和该地点内的,并以时间正序排序查询的限制close_contact_record_num 条消费记录
:param close_contact_record_num:
:param backward_time:
:param current_time:
:param location_condition_str:
:param user_id:
:param conn:
:return:
'''
# 先查(在限制的时间范围内))close_contact_record_num 条记录
s =f"conn.session.query({settings_obj.result_filed_names_str}).filter(table_obj.c[settings_obj.user_id_field_name]!=user_id, table_obj.c[settings_obj.time_field_name]>=current_time, table_obj.c[settings_obj.time_field_name]<=backward_time, {location_condition_str}).order_by(table_obj.c[settings_obj.time_field_name]).limit(close_contact_record_num).all()"
records = eval(s)
backward_results = [list(result) for result in records]
return backward_results
def get_backward_contacts(backward_time, current_time, location_condition_str, user_id, conn,table_obj, close_contact_record_num=settings_obj.close_contact_people_num, backward_result_df=None):
if backward_result_df is None:
backward_result_df = pd.DataFrame(columns=settings_obj.result_filed_names)
backward_results = get_backward_contact(backward_time, current_time, location_condition_str, user_id, conn,table_obj, close_contact_record_num)
if len(backward_results)>0:
tem_df = pd.DataFrame(backward_results,columns=backward_result_df.columns)
backward_result_df = pd.concat([backward_result_df, tem_df], ignore_index=True)
backward_result_df.sort_values(by=settings_obj.time_field_name, inplace=True, ascending=True) # 升序
if len(backward_results)>=close_contact_record_num:
close_contact_record_num = settings_obj.close_contact_people_num - backward_result_df[settings_obj.user_id_field_name].nunique()
if close_contact_record_num>0 and len(backward_result_df)>0:
current_time = backward_result_df.loc[len(backward_result_df)-1,settings_obj.time_field_name]
user_id = backward_result_df.loc[len(backward_result_df)-1,settings_obj.user_id_field_name]
backward_result_df = get_backward_contacts(backward_time,current_time, location_condition_str, user_id, conn,table_obj,close_contact_record_num=close_contact_record_num,backward_result_df=backward_result_df)
return backward_result_df
def get_forward_contacts(forward_time, current_time, location_condition_str, user_id, conn, table_obj, close_contact_record_num=settings_obj.close_contact_people_num, forward_result_df=None):
'''
递归查询
:param table_obj:
:param forward_time:
:param current_time:
:param location_condition_str:
:param user_id:
:param conn:
:param close_contact_record_num:
:param forward_result_df:
:return: 返回dataframe格式
'''
if forward_result_df is None:
forward_result_df = pd.DataFrame(columns=settings_obj.result_filed_names)
forward_results = get_forward_contact(forward_time, current_time, location_condition_str, user_id, conn,table_obj,close_contact_record_num)
if len(forward_results)>0:
tem_df = pd.DataFrame(forward_results, columns=forward_result_df.columns)
forward_result_df = pd.concat([forward_result_df, tem_df], ignore_index=True)
forward_result_df.sort_values(by=settings_obj.time_field_name, inplace=True, ascending=True) # 升序
if len(forward_results)>=close_contact_record_num:
# 这时候在forward_time, current_time时间范围内所有记录都已经被查询出
close_contact_record_num = settings_obj.close_contact_people_num - forward_result_df[settings_obj.user_id_field_name].nunique()
if close_contact_record_num>0 and len(forward_result_df)>0:
current_time = forward_result_df.loc[0,settings_obj.time_field_name]
user_id = forward_result_df.loc[0,settings_obj.user_id_field_name]
forward_result_df = get_forward_contacts(forward_time,current_time, location_condition_str, user_id, conn, table_obj,close_contact_record_num=close_contact_record_num,forward_result_df=forward_result_df)
return forward_result_df
def change_time_type(time,table_obj):
'''
转换str_time的类型
:param time: 字符串或者datetime两种类型
:param table_obj:
:return:
'''
if isinstance(time,datetime) or isinstance(time,str):
if table_obj.c[settings_obj.time_field_name].type.python_type==datetime:
if isinstance(time,str):
return datetime.strptime(time,'%Y-%m-%d %H:%M:%S')
else:
if isinstance(time,datetime):
return str(time)
return time
def change_userid_type(user_id,table_obj):
if table_obj.c[settings_obj.user_id_field_name].type.python_type==int:
return int(user_id)
return str(user_id)
def get_contact(user_id, conn, table_obj):
# 针对user_id进行密接查询
# 首先查询user_id在flow_tone_start_time和flow_tone_end_time时间内的消费记录
# 因为user_id对应的消费记录也需要输出,所以查询时要查出包括location_filed_names、time_field_name、result_filed_names中的所有字段
flow_tone_start_time = change_time_type(settings_obj.flow_tone_start_time, table_obj)
flow_tone_end_time = change_time_type(settings_obj.flow_tone_end_time, table_obj)
user_id = change_userid_type(user_id,table_obj)
s = f"conn.session.query({settings_obj.filed_names_str}).filter(table_obj.c[settings_obj.user_id_field_name]== user_id, table_obj.c[settings_obj.time_field_name]>=flow_tone_start_time, table_obj.c[settings_obj.time_field_name]<=flow_tone_end_time).order_by(table_obj.c[settings_obj.time_field_name]).all()"
records = eval(s)
result_df = pd.DataFrame(columns=settings_obj.result_filed_names)
for record in records:
# 针对每一条消费记录,分别查询在该时间前后close_contact_time分钟内在该地点消费的相关记录,不超过多少人
current_time = record[settings_obj.time_field_name]
forward_time, backward_time = get_forward_backward_time(current_time, settings_obj.close_contact_time)
forward_time = change_time_type(forward_time, table_obj)
backward_time = change_time_type(backward_time, table_obj)
current_time = change_time_type(current_time, table_obj)
location_condition_list = []
for location_filed_name in settings_obj.location_filed_names:
location_condition = f"table_obj.c['{location_filed_name}']=='{record[location_filed_name]}'"
location_condition_list.append(location_condition)
location_condition_str = ", ".join(location_condition_list)
# 先向前查询
forward_results = get_forward_contacts(forward_time,current_time,location_condition_str,user_id,conn,table_obj)
forward_results.drop_duplicates(subset=[settings_obj.user_id_field_name], keep ='last', inplace = True) # 去重
# 先向后查询
backward_results = get_backward_contacts(backward_time,current_time,location_condition_str,user_id,conn,table_obj)
backward_results.drop_duplicates(subset=[settings_obj.user_id_field_name], keep ='first', inplace = True)
# 放在一起
result_df = | pd.concat([result_df, forward_results], ignore_index=True) | pandas.concat |
from context import dero
import pandas as pd
from pandas.util.testing import assert_frame_equal
from pandas import Timestamp
from numpy import nan
import numpy
class DataFrameTest:
df = pd.DataFrame([
(10516, 'a', '1/1/2000', 1.01),
(10516, 'a', '1/2/2000', 1.02),
(10516, 'a', '1/3/2000', 1.03),
(10516, 'a', '1/4/2000', 1.04),
(10516, 'b', '1/1/2000', 1.05),
(10516, 'b', '1/2/2000', 1.06),
(10516, 'b', '1/3/2000', 1.07),
(10516, 'b', '1/4/2000', 1.08),
(10517, 'a', '1/1/2000', 1.09),
(10517, 'a', '1/2/2000', 1.10),
(10517, 'a', '1/3/2000', 1.11),
(10517, 'a', '1/4/2000', 1.12),
], columns = ['PERMNO','byvar','Date', 'RET'])
df_duplicate_row = pd.DataFrame([
(10516, 'a', '1/1/2000', 1.01),
(10516, 'a', '1/2/2000', 1.02),
(10516, 'a', '1/3/2000', 1.03),
(10516, 'a', '1/3/2000', 1.03), #this is a duplicated row
(10516, 'a', '1/4/2000', 1.04),
(10516, 'b', '1/1/2000', 1.05),
(10516, 'b', '1/2/2000', 1.06),
(10516, 'b', '1/3/2000', 1.07),
(10516, 'b', '1/4/2000', 1.08),
(10517, 'a', '1/1/2000', 1.09),
(10517, 'a', '1/2/2000', 1.10),
(10517, 'a', '1/3/2000', 1.11),
(10517, 'a', '1/4/2000', 1.12),
], columns = ['PERMNO','byvar','Date', 'RET'])
df_weight = pd.DataFrame(data = [
(10516, 'a', '1/1/2000', 1.01, 0),
(10516, 'a', '1/2/2000', 1.02, 1),
(10516, 'a', '1/3/2000', 1.03, 1),
(10516, 'a', '1/4/2000', 1.04, 0),
(10516, 'b', '1/1/2000', 1.05, 1),
(10516, 'b', '1/2/2000', 1.06, 1),
(10516, 'b', '1/3/2000', 1.07, 1),
(10516, 'b', '1/4/2000', 1.08, 1),
(10517, 'a', '1/1/2000', 1.09, 0),
(10517, 'a', '1/2/2000', 1.1, 0),
(10517, 'a', '1/3/2000', 1.11, 0),
(10517, 'a', '1/4/2000', 1.12, 1),
], columns = ['PERMNO', 'byvar', 'Date', 'RET', 'weight'])
df_nan_byvar = pd.DataFrame(data = [
('a', 1),
(nan, 2),
('b', 3),
('b', 4),
], columns = ['byvar', 'val'])
df_nan_byvar_and_val = pd.DataFrame(data = [
('a', 1),
(nan, 2),
('b', nan),
('b', 4),
], columns = ['byvar', 'val'])
single_ticker_df = pd.DataFrame(data = [
('a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
], columns = ['byvar', 'Date', 'TICKER'])
df_datetime = df.copy()
df_datetime['Date'] = pd.to_datetime(df_datetime['Date'])
df_datetime_no_ret = df_datetime.copy()
df_datetime_no_ret.drop('RET', axis=1, inplace=True)
df_gvkey_str = pd.DataFrame([
('001076','3/1/1995'),
('001076','4/1/1995'),
('001722','1/1/2012'),
('001722','7/1/2012'),
('001722', nan),
(nan ,'1/1/2012')
], columns=['GVKEY','Date'])
df_gvkey_str['Date'] = pd.to_datetime(df_gvkey_str['Date'])
df_gvkey_num = df_gvkey_str.copy()
df_gvkey_num['GVKEY'] = df_gvkey_num['GVKEY'].astype('float64')
df_gvkey_str2 = pd.DataFrame([
('001076','2/1/1995'),
('001076','3/2/1995'),
('001722','11/1/2011'),
('001722','10/1/2011'),
('001722', nan),
(nan ,'1/1/2012')
], columns=['GVKEY','Date'])
df_gvkey_str2['Date'] = pd.to_datetime(df_gvkey_str2['Date'])
df_fill_data = pd.DataFrame(
data=[
(4, 'c', nan, 'a'),
(1, 'd', 3, 'a'),
(10, 'e', 100, 'a'),
(2, nan, 6, 'b'),
(5, 'f', 8, 'b'),
(11, 'g', 150, 'b'),
],
columns=['y', 'x1', 'x2', 'group']
)
class TestCumulate(DataFrameTest):
expect_between_1_3 = pd.DataFrame(data = [
(10516, 'a', '1/1/2000', 1.01, 1.01),
(10516, 'a', '1/2/2000', 1.02, 1.02),
(10516, 'a', '1/3/2000', 1.03, 1.0506),
(10516, 'a', '1/4/2000', 1.04, 1.04),
(10516, 'b', '1/1/2000', 1.05, 1.05),
(10516, 'b', '1/2/2000', 1.06, 1.06),
(10516, 'b', '1/3/2000', 1.07, 1.1342),
(10516, 'b', '1/4/2000', 1.08, 1.08),
(10517, 'a', '1/1/2000', 1.09, 1.09),
(10517, 'a', '1/2/2000', 1.1, 1.1),
(10517, 'a', '1/3/2000', 1.11, 1.2210000000000003),
(10517, 'a', '1/4/2000', 1.12, 1.12),
], columns = ['PERMNO', 'byvar', 'Date', 'RET', 'cum_RET'])
expect_first = pd.DataFrame([
(10516, 'a', '1/1/2000', 1.01, 1.01),
(10516, 'a', '1/2/2000', 1.02, 1.02),
(10516, 'a', '1/3/2000', 1.03, 1.0506),
(10516, 'a', '1/4/2000', 1.04, 1.092624),
(10516, 'b', '1/1/2000', 1.05, 1.05),
(10516, 'b', '1/2/2000', 1.06, 1.06),
(10516, 'b', '1/3/2000', 1.07, 1.1342),
(10516, 'b', '1/4/2000', 1.08, 1.224936),
(10517, 'a', '1/1/2000', 1.09, 1.09),
(10517, 'a', '1/2/2000', 1.10, 1.10),
(10517, 'a', '1/3/2000', 1.11, 1.221),
(10517, 'a', '1/4/2000', 1.12, 1.36752),
], columns = ['PERMNO','byvar','Date', 'RET', 'cum_RET'])
def test_method_between_1_3(self):
cum_df = dero.pandas.cumulate(self.df, 'RET', 'between', periodvar='Date',
byvars=['PERMNO','byvar'], time=[1,3])
assert_frame_equal(self.expect_between_1_3, cum_df, check_dtype=False)
def test_method_between_m2_0(self):
cum_df = dero.pandas.cumulate(self.df, 'RET', 'between', periodvar='Date',
byvars=['PERMNO','byvar'], time=[-2,0])
#Actually same result as [1,3]
| assert_frame_equal(self.expect_between_1_3, cum_df, check_dtype=False) | pandas.util.testing.assert_frame_equal |
from classes.handlers.ParamsHandler import ParamsHandler
from classes.handlers.PIDExtractor import PIDExtractor
from typing import List
import pandas as pd
import os
class DataHandler:
def __init__(self, mode: str, output_folder: str, extraction_method: str):
self.mode = mode
self.output_folder = output_folder
self.extraction_method = extraction_method
self.pid_file_paths = None
self.dataset_name = ParamsHandler.load_parameters('settings')['dataset']
def load_data(self, tasks: List) -> dict:
tasks_data = {task: None for task in tasks}
self.pid_file_paths = {task: os.path.join('assets', self.dataset_name, 'PIDs', self.mode + '_' + self.extraction_method + '_' + task + '_pids.csv') for task in tasks}
# extract PIDs
PIDExtractor(mode=self.mode, extraction_method=self.extraction_method, output_folder=self.output_folder,
pid_file_paths=self.pid_file_paths, dataset_name=self.dataset_name).get_list_of_pids(tasks=tasks)
for task in tasks:
print(task)
task_path = os.path.join(self.dataset_name, task)
params = ParamsHandler.load_parameters(task_path)
modalities = params['modalities']
feature_set = params['feature_sets']
modality_data = {modality: None for modality in modalities}
for modality in modalities:
modality_data[modality] = self.get_data(task, modality, feature_set, self.pid_file_paths[task])
tasks_data[task] = modality_data
return tasks_data
@staticmethod
def get_data(task: str, modality: str, feature_set: dict, pid_file_path: str) -> dict:
dataset_name = ParamsHandler.load_parameters('settings')['dataset']
feature_path = os.path.join(dataset_name, 'feature_sets')
feature_subsets_path = os.path.join(feature_path, 'feature_subsets')
data_path = os.path.join('datasets', dataset_name)
# get pids from a saved file, which was created by get_list_of_pids based on the conditions given to it
pids = pd.read_csv(pid_file_path)
# initializing the dataset as the list of PIDs
dataset = pids
final_features = []
features = list(feature_set.values())
# unpacking all features from their feature sets into final_features
for feat in features:
features_subset = ParamsHandler.load_parameters(os.path.join(feature_path, feat))
final_features.extend(features_subset)
if modality == 'eye':
for feat in final_features:
to_select = ['interview']
if feat.startswith('eye'):
print("--", feat)
to_select.extend(ParamsHandler.load_parameters(os.path.join(feature_subsets_path, feat)))
eye_data = pd.read_csv(os.path.join(data_path, feat + '.csv'))
eye_dataset = eye_data.loc[eye_data['task'] == task]
eye_dataset = eye_dataset[to_select]
dataset = pd.merge(dataset, eye_dataset, on='interview')
elif modality == 'speech':
# NLP data files merging. No need to put it in the loop as that adds time
text_data = pd.read_csv(os.path.join(data_path, 'text.csv'))
acoustic_data = pd.read_csv(os.path.join(data_path, 'acoustic.csv'))
if dataset_name == 'canary':
task_mod_dict = {'CookieTheft': 1, 'Reading': 2, 'Memory': 3}
task_mod = task_mod_dict[task]
lang_merged = pd.merge(text_data, acoustic_data, on=['interview', 'task'])
elif dataset_name == 'dementia_bank':
discourse_data = pd.read_csv(os.path.join(data_path, 'discourse.csv'))
demographic_data = pd.read_csv(os.path.join(data_path, 'demographic.csv'))
lang_merged = pd.merge(text_data, acoustic_data, on=['interview'])
lang_merged = pd.merge(lang_merged, discourse_data, on=['interview'])
lang_merged = | pd.merge(lang_merged, demographic_data, on=['interview']) | pandas.merge |
from datetime import datetime, timedelta
import pandas as pd
from pytz import timezone, utc
def create_dataset() -> pd.DataFrame:
now = datetime.utcnow()
ts = | pd.Timestamp(now) | pandas.Timestamp |
""" test fancy indexing & misc """
from datetime import datetime
import re
import weakref
import numpy as np
import pytest
import pandas.util._test_decorators as td
from pandas.core.dtypes.common import (
is_float_dtype,
is_integer_dtype,
)
import pandas as pd
from pandas import (
DataFrame,
Index,
NaT,
Series,
date_range,
offsets,
timedelta_range,
)
import pandas._testing as tm
from pandas.core.api import Float64Index
from pandas.tests.indexing.common import _mklbl
from pandas.tests.indexing.test_floats import gen_obj
# ------------------------------------------------------------------------
# Indexing test cases
class TestFancy:
"""pure get/set item & fancy indexing"""
def test_setitem_ndarray_1d(self):
# GH5508
# len of indexer vs length of the 1d ndarray
df = DataFrame(index=Index(np.arange(1, 11)))
df["foo"] = np.zeros(10, dtype=np.float64)
df["bar"] = np.zeros(10, dtype=complex)
# invalid
msg = "Must have equal len keys and value when setting with an iterable"
with pytest.raises(ValueError, match=msg):
df.loc[df.index[2:5], "bar"] = np.array([2.33j, 1.23 + 0.1j, 2.2, 1.0])
# valid
df.loc[df.index[2:6], "bar"] = np.array([2.33j, 1.23 + 0.1j, 2.2, 1.0])
result = df.loc[df.index[2:6], "bar"]
expected = Series(
[2.33j, 1.23 + 0.1j, 2.2, 1.0], index=[3, 4, 5, 6], name="bar"
)
tm.assert_series_equal(result, expected)
def test_setitem_ndarray_1d_2(self):
# GH5508
# dtype getting changed?
df = DataFrame(index=Index(np.arange(1, 11)))
df["foo"] = np.zeros(10, dtype=np.float64)
df["bar"] = np.zeros(10, dtype=complex)
msg = "Must have equal len keys and value when setting with an iterable"
with pytest.raises(ValueError, match=msg):
df[2:5] = np.arange(1, 4) * 1j
def test_getitem_ndarray_3d(
self, index, frame_or_series, indexer_sli, using_array_manager
):
# GH 25567
obj = gen_obj(frame_or_series, index)
idxr = indexer_sli(obj)
nd3 = np.random.randint(5, size=(2, 2, 2))
msgs = []
if frame_or_series is Series and indexer_sli in [tm.setitem, tm.iloc]:
msgs.append(r"Wrong number of dimensions. values.ndim > ndim \[3 > 1\]")
if using_array_manager:
msgs.append("Passed array should be 1-dimensional")
if frame_or_series is Series or indexer_sli is tm.iloc:
msgs.append(r"Buffer has wrong number of dimensions \(expected 1, got 3\)")
if using_array_manager:
msgs.append("indexer should be 1-dimensional")
if indexer_sli is tm.loc or (
frame_or_series is Series and indexer_sli is tm.setitem
):
msgs.append("Cannot index with multidimensional key")
if frame_or_series is DataFrame and indexer_sli is tm.setitem:
msgs.append("Index data must be 1-dimensional")
if isinstance(index, pd.IntervalIndex) and indexer_sli is tm.iloc:
msgs.append("Index data must be 1-dimensional")
if isinstance(index, (pd.TimedeltaIndex, pd.DatetimeIndex, pd.PeriodIndex)):
msgs.append("Data must be 1-dimensional")
if len(index) == 0 or isinstance(index, pd.MultiIndex):
msgs.append("positional indexers are out-of-bounds")
msg = "|".join(msgs)
potential_errors = (IndexError, ValueError, NotImplementedError)
with pytest.raises(potential_errors, match=msg):
idxr[nd3]
def test_setitem_ndarray_3d(self, index, frame_or_series, indexer_sli):
# GH 25567
obj = gen_obj(frame_or_series, index)
idxr = indexer_sli(obj)
nd3 = np.random.randint(5, size=(2, 2, 2))
if indexer_sli is tm.iloc:
err = ValueError
msg = f"Cannot set values with ndim > {obj.ndim}"
else:
err = ValueError
msg = "|".join(
[
r"Buffer has wrong number of dimensions \(expected 1, got 3\)",
"Cannot set values with ndim > 1",
"Index data must be 1-dimensional",
"Data must be 1-dimensional",
"Array conditional must be same shape as self",
]
)
with pytest.raises(err, match=msg):
idxr[nd3] = 0
def test_getitem_ndarray_0d(self):
# GH#24924
key = np.array(0)
# dataframe __getitem__
df = DataFrame([[1, 2], [3, 4]])
result = df[key]
expected = Series([1, 3], name=0)
tm.assert_series_equal(result, expected)
# series __getitem__
ser = Series([1, 2])
result = ser[key]
assert result == 1
def test_inf_upcast(self):
# GH 16957
# We should be able to use np.inf as a key
# np.inf should cause an index to convert to float
# Test with np.inf in rows
df = DataFrame(columns=[0])
df.loc[1] = 1
df.loc[2] = 2
df.loc[np.inf] = 3
# make sure we can look up the value
assert df.loc[np.inf, 0] == 3
result = df.index
expected = Float64Index([1, 2, np.inf])
tm.assert_index_equal(result, expected)
def test_setitem_dtype_upcast(self):
# GH3216
df = DataFrame([{"a": 1}, {"a": 3, "b": 2}])
df["c"] = np.nan
assert df["c"].dtype == np.float64
df.loc[0, "c"] = "foo"
expected = DataFrame(
[{"a": 1, "b": np.nan, "c": "foo"}, {"a": 3, "b": 2, "c": np.nan}]
)
tm.assert_frame_equal(df, expected)
@pytest.mark.parametrize("val", [3.14, "wxyz"])
def test_setitem_dtype_upcast2(self, val):
# GH10280
df = DataFrame(
np.arange(6, dtype="int64").reshape(2, 3),
index=list("ab"),
columns=["foo", "bar", "baz"],
)
left = df.copy()
left.loc["a", "bar"] = val
right = DataFrame(
[[0, val, 2], [3, 4, 5]],
index=list("ab"),
columns=["foo", "bar", "baz"],
)
tm.assert_frame_equal(left, right)
assert is_integer_dtype(left["foo"])
assert is_integer_dtype(left["baz"])
def test_setitem_dtype_upcast3(self):
left = DataFrame(
np.arange(6, dtype="int64").reshape(2, 3) / 10.0,
index=list("ab"),
columns=["foo", "bar", "baz"],
)
left.loc["a", "bar"] = "wxyz"
right = DataFrame(
[[0, "wxyz", 0.2], [0.3, 0.4, 0.5]],
index=list("ab"),
columns=["foo", "bar", "baz"],
)
tm.assert_frame_equal(left, right)
assert is_float_dtype(left["foo"])
assert is_float_dtype(left["baz"])
def test_dups_fancy_indexing(self):
# GH 3455
df = tm.makeCustomDataframe(10, 3)
df.columns = ["a", "a", "b"]
result = df[["b", "a"]].columns
expected = Index(["b", "a", "a"])
tm.assert_index_equal(result, expected)
def test_dups_fancy_indexing_across_dtypes(self):
# across dtypes
df = DataFrame([[1, 2, 1.0, 2.0, 3.0, "foo", "bar"]], columns=list("aaaaaaa"))
df.head()
str(df)
result = DataFrame([[1, 2, 1.0, 2.0, 3.0, "foo", "bar"]])
result.columns = list("aaaaaaa")
# TODO(wesm): unused?
df_v = df.iloc[:, 4] # noqa
res_v = result.iloc[:, 4] # noqa
tm.assert_frame_equal(df, result)
def test_dups_fancy_indexing_not_in_order(self):
# GH 3561, dups not in selected order
df = DataFrame(
{"test": [5, 7, 9, 11], "test1": [4.0, 5, 6, 7], "other": list("abcd")},
index=["A", "A", "B", "C"],
)
rows = ["C", "B"]
expected = DataFrame(
{"test": [11, 9], "test1": [7.0, 6], "other": ["d", "c"]}, index=rows
)
result = df.loc[rows]
tm.assert_frame_equal(result, expected)
result = df.loc[Index(rows)]
tm.assert_frame_equal(result, expected)
rows = ["C", "B", "E"]
with pytest.raises(KeyError, match="not in index"):
df.loc[rows]
# see GH5553, make sure we use the right indexer
rows = ["F", "G", "H", "C", "B", "E"]
with pytest.raises(KeyError, match="not in index"):
df.loc[rows]
def test_dups_fancy_indexing_only_missing_label(self):
# List containing only missing label
dfnu = DataFrame(np.random.randn(5, 3), index=list("AABCD"))
with pytest.raises(
KeyError,
match=re.escape(
"\"None of [Index(['E'], dtype='object')] are in the [index]\""
),
):
dfnu.loc[["E"]]
# ToDo: check_index_type can be True after GH 11497
@pytest.mark.parametrize("vals", [[0, 1, 2], list("abc")])
def test_dups_fancy_indexing_missing_label(self, vals):
# GH 4619; duplicate indexer with missing label
df = DataFrame({"A": vals})
with pytest.raises(KeyError, match="not in index"):
df.loc[[0, 8, 0]]
def test_dups_fancy_indexing_non_unique(self):
# non unique with non unique selector
df = DataFrame({"test": [5, 7, 9, 11]}, index=["A", "A", "B", "C"])
with pytest.raises(KeyError, match="not in index"):
df.loc[["A", "A", "E"]]
def test_dups_fancy_indexing2(self):
# GH 5835
# dups on index and missing values
df = DataFrame(np.random.randn(5, 5), columns=["A", "B", "B", "B", "A"])
with pytest.raises(KeyError, match="not in index"):
df.loc[:, ["A", "B", "C"]]
def test_dups_fancy_indexing3(self):
# GH 6504, multi-axis indexing
df = DataFrame(
np.random.randn(9, 2), index=[1, 1, 1, 2, 2, 2, 3, 3, 3], columns=["a", "b"]
)
expected = df.iloc[0:6]
result = df.loc[[1, 2]]
tm.assert_frame_equal(result, expected)
expected = df
result = df.loc[:, ["a", "b"]]
tm.assert_frame_equal(result, expected)
expected = df.iloc[0:6, :]
result = df.loc[[1, 2], ["a", "b"]]
tm.assert_frame_equal(result, expected)
def test_duplicate_int_indexing(self, indexer_sl):
# GH 17347
ser = Series(range(3), index=[1, 1, 3])
expected = Series(range(2), index=[1, 1])
result = indexer_sl(ser)[[1]]
tm.assert_series_equal(result, expected)
def test_indexing_mixed_frame_bug(self):
# GH3492
df = DataFrame(
{"a": {1: "aaa", 2: "bbb", 3: "ccc"}, "b": {1: 111, 2: 222, 3: 333}}
)
# this works, new column is created correctly
df["test"] = df["a"].apply(lambda x: "_" if x == "aaa" else x)
# this does not work, ie column test is not changed
idx = df["test"] == "_"
temp = df.loc[idx, "a"].apply(lambda x: "-----" if x == "aaa" else x)
df.loc[idx, "test"] = temp
assert df.iloc[0, 2] == "-----"
def test_multitype_list_index_access(self):
# GH 10610
df = DataFrame(np.random.random((10, 5)), columns=["a"] + [20, 21, 22, 23])
with pytest.raises(KeyError, match=re.escape("'[26, -8] not in index'")):
df[[22, 26, -8]]
assert df[21].shape[0] == df.shape[0]
def test_set_index_nan(self):
# GH 3586
df = DataFrame(
{
"PRuid": {
17: "nonQC",
18: "nonQC",
19: "nonQC",
20: "10",
21: "11",
22: "12",
23: "13",
24: "24",
25: "35",
26: "46",
27: "47",
28: "48",
29: "59",
30: "10",
},
"QC": {
17: 0.0,
18: 0.0,
19: 0.0,
20: np.nan,
21: np.nan,
22: np.nan,
23: np.nan,
24: 1.0,
25: np.nan,
26: np.nan,
27: np.nan,
28: np.nan,
29: np.nan,
30: np.nan,
},
"data": {
17: 7.9544899999999998,
18: 8.0142609999999994,
19: 7.8591520000000008,
20: 0.86140349999999999,
21: 0.87853110000000001,
22: 0.8427041999999999,
23: 0.78587700000000005,
24: 0.73062459999999996,
25: 0.81668560000000001,
26: 0.81927080000000008,
27: 0.80705009999999999,
28: 0.81440240000000008,
29: 0.80140849999999997,
30: 0.81307740000000006,
},
"year": {
17: 2006,
18: 2007,
19: 2008,
20: 1985,
21: 1985,
22: 1985,
23: 1985,
24: 1985,
25: 1985,
26: 1985,
27: 1985,
28: 1985,
29: 1985,
30: 1986,
},
}
).reset_index()
result = (
df.set_index(["year", "PRuid", "QC"])
.reset_index()
.reindex(columns=df.columns)
)
tm.assert_frame_equal(result, df)
def test_multi_assign(self):
# GH 3626, an assignment of a sub-df to a df
df = DataFrame(
{
"FC": ["a", "b", "a", "b", "a", "b"],
"PF": [0, 0, 0, 0, 1, 1],
"col1": list(range(6)),
"col2": list(range(6, 12)),
}
)
df.iloc[1, 0] = np.nan
df2 = df.copy()
mask = ~df2.FC.isna()
cols = ["col1", "col2"]
dft = df2 * 2
dft.iloc[3, 3] = np.nan
expected = DataFrame(
{
"FC": ["a", np.nan, "a", "b", "a", "b"],
"PF": [0, 0, 0, 0, 1, 1],
"col1": Series([0, 1, 4, 6, 8, 10]),
"col2": [12, 7, 16, np.nan, 20, 22],
}
)
# frame on rhs
df2.loc[mask, cols] = dft.loc[mask, cols]
tm.assert_frame_equal(df2, expected)
# with an ndarray on rhs
# coerces to float64 because values has float64 dtype
# GH 14001
expected = DataFrame(
{
"FC": ["a", np.nan, "a", "b", "a", "b"],
"PF": [0, 0, 0, 0, 1, 1],
"col1": [0.0, 1.0, 4.0, 6.0, 8.0, 10.0],
"col2": [12, 7, 16, np.nan, 20, 22],
}
)
df2 = df.copy()
df2.loc[mask, cols] = dft.loc[mask, cols].values
tm.assert_frame_equal(df2, expected)
def test_multi_assign_broadcasting_rhs(self):
# broadcasting on the rhs is required
df = DataFrame(
{
"A": [1, 2, 0, 0, 0],
"B": [0, 0, 0, 10, 11],
"C": [0, 0, 0, 10, 11],
"D": [3, 4, 5, 6, 7],
}
)
expected = df.copy()
mask = expected["A"] == 0
for col in ["A", "B"]:
expected.loc[mask, col] = df["D"]
df.loc[df["A"] == 0, ["A", "B"]] = df["D"]
tm.assert_frame_equal(df, expected)
# TODO(ArrayManager) setting single item with an iterable doesn't work yet
# in the "split" path
@td.skip_array_manager_not_yet_implemented
def test_setitem_list(self):
# GH 6043
# iloc with a list
df = DataFrame(index=[0, 1], columns=[0])
df.iloc[1, 0] = [1, 2, 3]
df.iloc[1, 0] = [1, 2]
result = DataFrame(index=[0, 1], columns=[0])
result.iloc[1, 0] = [1, 2]
tm.assert_frame_equal(result, df)
def test_string_slice(self):
# GH 14424
# string indexing against datetimelike with object
# dtype should properly raises KeyError
df = DataFrame([1], Index([pd.Timestamp("2011-01-01")], dtype=object))
assert df.index._is_all_dates
with pytest.raises(KeyError, match="'2011'"):
df["2011"]
with pytest.raises(KeyError, match="'2011'"):
df.loc["2011", 0]
def test_string_slice_empty(self):
# GH 14424
df = DataFrame()
assert not df.index._is_all_dates
with pytest.raises(KeyError, match="'2011'"):
df["2011"]
with pytest.raises(KeyError, match="^0$"):
df.loc["2011", 0]
def test_astype_assignment(self):
# GH4312 (iloc)
df_orig = DataFrame(
[["1", "2", "3", ".4", 5, 6.0, "foo"]], columns=list("ABCDEFG")
)
df = df_orig.copy()
df.iloc[:, 0:2] = df.iloc[:, 0:2].astype(np.int64)
expected = DataFrame(
[[1, 2, "3", ".4", 5, 6.0, "foo"]], columns=list("ABCDEFG")
)
tm.assert_frame_equal(df, expected)
df = df_orig.copy()
df.iloc[:, 0:2] = df.iloc[:, 0:2]._convert(datetime=True, numeric=True)
expected = DataFrame(
[[1, 2, "3", ".4", 5, 6.0, "foo"]], columns=list("ABCDEFG")
)
tm.assert_frame_equal(df, expected)
# GH5702 (loc)
df = df_orig.copy()
df.loc[:, "A"] = df.loc[:, "A"].astype(np.int64)
expected = DataFrame(
[[1, "2", "3", ".4", 5, 6.0, "foo"]], columns=list("ABCDEFG")
)
tm.assert_frame_equal(df, expected)
df = df_orig.copy()
df.loc[:, ["B", "C"]] = df.loc[:, ["B", "C"]].astype(np.int64)
expected = DataFrame(
[["1", 2, 3, ".4", 5, 6.0, "foo"]], columns=list("ABCDEFG")
)
tm.assert_frame_equal(df, expected)
def test_astype_assignment_full_replacements(self):
# full replacements / no nans
df = DataFrame({"A": [1.0, 2.0, 3.0, 4.0]})
df.iloc[:, 0] = df["A"].astype(np.int64)
expected = DataFrame({"A": [1, 2, 3, 4]})
tm.assert_frame_equal(df, expected)
df = DataFrame({"A": [1.0, 2.0, 3.0, 4.0]})
df.loc[:, "A"] = df["A"].astype(np.int64)
expected = DataFrame({"A": [1, 2, 3, 4]})
tm.assert_frame_equal(df, expected)
@pytest.mark.parametrize("indexer", [tm.getitem, tm.loc])
def test_index_type_coercion(self, indexer):
# GH 11836
# if we have an index type and set it with something that looks
# to numpy like the same, but is actually, not
# (e.g. setting with a float or string '0')
# then we need to coerce to object
# integer indexes
for s in [Series(range(5)), Series(range(5), index=range(1, 6))]:
assert s.index.is_integer()
s2 = s.copy()
indexer(s2)[0.1] = 0
assert s2.index.is_floating()
assert indexer(s2)[0.1] == 0
s2 = s.copy()
indexer(s2)[0.0] = 0
exp = s.index
if 0 not in s:
exp = Index(s.index.tolist() + [0])
tm.assert_index_equal(s2.index, exp)
s2 = s.copy()
indexer(s2)["0"] = 0
assert s2.index.is_object()
for s in [Series(range(5), index=np.arange(5.0))]:
assert s.index.is_floating()
s2 = s.copy()
indexer(s2)[0.1] = 0
assert s2.index.is_floating()
assert indexer(s2)[0.1] == 0
s2 = s.copy()
indexer(s2)[0.0] = 0
tm.assert_index_equal(s2.index, s.index)
s2 = s.copy()
indexer(s2)["0"] = 0
assert s2.index.is_object()
class TestMisc:
def test_float_index_to_mixed(self):
df = DataFrame({0.0: np.random.rand(10), 1.0: np.random.rand(10)})
df["a"] = 10
expected = DataFrame({0.0: df[0.0], 1.0: df[1.0], "a": [10] * 10})
tm.assert_frame_equal(expected, df)
def test_float_index_non_scalar_assignment(self):
df = DataFrame({"a": [1, 2, 3], "b": [3, 4, 5]}, index=[1.0, 2.0, 3.0])
df.loc[df.index[:2]] = 1
expected = DataFrame({"a": [1, 1, 3], "b": [1, 1, 5]}, index=df.index)
tm.assert_frame_equal(expected, df)
def test_loc_setitem_fullindex_views(self):
df = DataFrame({"a": [1, 2, 3], "b": [3, 4, 5]}, index=[1.0, 2.0, 3.0])
df2 = df.copy()
df.loc[df.index] = df.loc[df.index]
tm.assert_frame_equal(df, df2)
def test_rhs_alignment(self):
# GH8258, tests that both rows & columns are aligned to what is
# assigned to. covers both uniform data-type & multi-type cases
def run_tests(df, rhs, right_loc, right_iloc):
# label, index, slice
lbl_one, idx_one, slice_one = list("bcd"), [1, 2, 3], slice(1, 4)
lbl_two, idx_two, slice_two = ["joe", "jolie"], [1, 2], slice(1, 3)
left = df.copy()
left.loc[lbl_one, lbl_two] = rhs
tm.assert_frame_equal(left, right_loc)
left = df.copy()
left.iloc[idx_one, idx_two] = rhs
tm.assert_frame_equal(left, right_iloc)
left = df.copy()
left.iloc[slice_one, slice_two] = rhs
| tm.assert_frame_equal(left, right_iloc) | pandas._testing.assert_frame_equal |
# %%
import pandas as pd
from zhconv import convert
from scipy import stats
from fuzzywuzzy import fuzz
from time import perf_counter
import searchconsole
from datetime import datetime
from datetime import timedelta
# --------------DATA RETRIVING---------------
# no credentials saved, do not save credentials
#account = searchconsole.authenticate(client_config='client_secrets.json')
# no credentials saved, want to save credentials
#account = searchconsole.authenticate(client_config='client_secrets.json', serialize = 'credentials.json')
# credentials saved as credentials.json
account = searchconsole.authenticate(client_config='client_secrets.json',
credentials='credentials.json')
# webproperty must match what's shown on Google Search Console
webproperty = account['******'] # website url
start = datetime.strptime("******", "%Y-%m-%d") # modify start date
end = datetime.strptime("******", "%Y-%m-%d") # modify end date
df = pd.DataFrame()
while start != end:
start_datetime = datetime.strftime(
start, "%Y-%m-%d")
# interval = 1 day
shifted_datetime = datetime.strftime(start + timedelta(days=1), "%Y-%m-%d")
report = webproperty.query.range(
start_datetime, shifted_datetime).dimension("query").get()
df1 = pd.DataFrame(report.rows)
df1['date'] = datetime.strftime(start, "%Y-%m-%d")
df = pd.concat([df, df1])
print(f"Trend of {start} retrived")
start = start + timedelta(days=1)
print("=========================")
print("ALL DATA RETRIVED")
print("=========================")
df
# df.to_csv('trend.csv', index=False)
# --------------DATA RETRIVING FINISHED---------------
# %%
# --------------DATA PREPARATION---------------
# unify characters, merge similar keywords
# Merge split words and convert Tranditional Chinese to Simplified Chinese -> 'modified_query'
df['modified_query'] = df['query'].apply(lambda x: convert(x, 'zh-cn'))
df['modified_query'] = df['modified_query'].apply(lambda x: x.replace(" ", ""))
# Identify similar keywords
# option 1: fuzzy match words
# TODO: use process in stead of iteration:
# https://towardsdatascience.com/fuzzywuzzy-find-similar-strings-within-one-column-in-a-pandas-data-frame-99f6c2a0c212
# http://jonathansoma.com/lede/algorithms-2017/classes/fuzziness-matplotlib/fuzzing-matching-in-pandas-with-fuzzywuzzy/
similar = df[['modified_query', 'query']
].drop_duplicates(subset='modified_query')
similar1 = similar
# record time
timer1 = perf_counter()
for index, row in similar1.iterrows(): # for each row in second df
for index1, row1 in similar1.iterrows(): # loop through the whole second df
r = fuzz.token_sort_ratio(row['modified_query'],
row1['modified_query'])
if r > 80: # 80 is for conservitive result
# if match, add it into main df
similar.loc[index1, 'aggregated_query'] = row['modified_query']
# and drop the row in second df
similar1 = similar1.drop(index1, axis=0)
print(f"{len(similar1)} rows remain")
print("=========================")
print(
f"{len(similar['aggregated_query'].unique())} unique keywords identified")
print("=========================")
timer2 = perf_counter()
print(f"Identifying Keywords: {timer2 - timer1} Seconds")
# put identified keywords back to df
df = pd.merge(df, similar, how='left', on='modified_query')
# record time
timer3 = perf_counter()
print(f"Total running time: {timer3 - timer1} Seconds")
df.to_csv('prepared.csv')
df
# option 2: count words frequency
# --------------DATA PREPARATION FINISHED---------------
# %%
# --------------LEFT PATH---------------
# sum clicks and impressions for each keywords, to find the top keywords that worth analyzing
# extract columns and sum
total_count = df[['aggregated_query', 'clicks', 'impressions']]
total_count = total_count.groupby('aggregated_query').sum().reset_index()
# pick fixed number of rows
# TODO: switch to dynamic picking
top_clicks = total_count.nlargest(
int(round(len(total_count) * 0.2)), ['clicks'])
top_impressions = total_count.nlargest(
int(round(len(total_count) * 0.2)), ['impressions'])
# test df
#hc_hi = pd.merge(top_clicks, top_impressions, how='inner', on=['modified_query'])[['modified_query', 'clicks_x', 'impressions_x']].rename(columns={'clicks_x': 'clicks', 'impressions_x': 'impressions'})
# categorization
hc_hi = top_clicks['aggregated_query'][top_clicks['aggregated_query'].isin(
top_impressions['aggregated_query'])] # rows in both top results
hc_li = top_clicks['aggregated_query'][~top_clicks['aggregated_query'].isin(
top_impressions['aggregated_query'])] # rows in clicks top results but not impressions top results
lc_hi = top_impressions['aggregated_query'][~top_impressions['aggregated_query'].isin(
top_clicks['aggregated_query'])] # rows in impressions top results but not clicks top results
total_count.loc[total_count['aggregated_query'].isin(
hc_hi), 'category'] = 'hc_hi'
total_count.loc[total_count['aggregated_query'].isin(
hc_li), 'category'] = 'hc_li'
total_count.loc[total_count['aggregated_query'].isin(
lc_hi), 'category'] = 'lc_hi'
category_df = total_count.dropna(subset=['category'])
category_df
#%%
#TODO:
# split keywords, drop duplicated, and get Google Trends
# --------------LEFT PATH END---------------
# %%
# --------------MIDDLE PATH---------------
# Aggregate modified query on the same day
merged_df = df[['aggregated_query', 'clicks', 'impressions', 'date']]
# identify keywords repeated in the same day and sum them
duplicated = merged_df[merged_df.duplicated(
['aggregated_query', 'date'], keep=False)]
duplicated = duplicated.groupby(['aggregated_query', 'date']).sum()[
['clicks', 'impressions']].reset_index()
# concat with non-repeat keywords
not_duplicated = merged_df.drop_duplicates(
['aggregated_query', 'date'], keep=False)
merged_df = pd.concat([duplicated, not_duplicated])
# recalculate CTR
merged_df['CTR'] = merged_df['clicks'] / merged_df['impressions']
merged_df
# %%
# Filter merged_df by category list
filtered_df = pd.merge(merged_df, category_df, how='inner', on=['aggregated_query'])[
['aggregated_query', 'date', 'clicks_x', 'impressions_x', 'CTR', 'category']].rename(columns={'clicks_x': 'clicks', 'impressions_x': 'impressions'})
# %%
# simple linear regression
# test not to group
slope_df = filtered_df
slope_df['date'] = pd.to_datetime(
slope_df['date']).map(datetime.toordinal)
# clicks and impressions both uptrend = uptrend
# mixed = sideways
# both downtrend = downtrend
# clicks linear regression
clicks = pd.DataFrame(
columns=['aggregated_query', 'slope', 'intercept', 'r_value', 'p_value', 'std_err'])
for query in slope_df['aggregated_query'].unique():
df_query = slope_df[slope_df['aggregated_query'] == query]
slope, intercept, r_value, p_value, std_err = stats.linregress(
df_query['date'], df_query['clicks'])
clicks = clicks.append({"aggregated_query": query, "slope": slope, "intercept": intercept,
"r_value": r_value, "p_value": p_value, "std_err": std_err}, ignore_index=True)
clicks = clicks[clicks['p_value'] < 0.1]
clicks.sort_values(
'slope', ascending=False, inplace=True)
# impressions linear regression
impressions = pd.DataFrame(
columns=['aggregated_query', 'slope', 'intercept', 'r_value', 'p_value', 'std_err'])
for query in slope_df['aggregated_query'].unique():
df_query = slope_df[slope_df['aggregated_query'] == query]
slope, intercept, r_value, p_value, std_err = stats.linregress(
df_query['date'], df_query['impressions'])
impressions = impressions.append({"aggregated_query": query, "slope": slope, "intercept": intercept,
"r_value": r_value, "p_value": p_value, "std_err": std_err}, ignore_index=True)
impressions = impressions[impressions['p_value'] < 0.1]
impressions.sort_values(
'slope', ascending=False, inplace=True)
# merge and identify trend
slope = pd.merge(clicks, impressions, how='outer', on='aggregated_query')
for index, row in slope.iterrows():
if row['slope_x'] > 0 and row['slope_y'] > 0:
slope.loc[index, 'trend'] = 'uptrend'
elif row['slope_x'] < 0 and row['slope_y'] < 0:
slope.loc[index, 'trend'] = 'downtrend'
else:
slope.loc[index, 'trend'] = 'sideways trend' # including clicks or impressions p value > required value
slope
"""
CTR version instead of clicks
# CTR and impressions both uptrend = uptrend
# mixed = sideways
# both downtrend = downtrend
# CTR linear regression
ctr = pd.DataFrame(
columns=['aggregated_query', 'slope', 'intercept', 'r_value', 'p_value', 'std_err'])
for query in slope_df['aggregated_query'].unique():
df_query = slope_df[slope_df['aggregated_query'] == query]
slope, intercept, r_value, p_value, std_err = stats.linregress(
df_query['date'], df_query['CTR'])
ctr = ctr.append({"aggregated_query": query, "slope": slope, "intercept": intercept,
"r_value": r_value, "p_value": p_value, "std_err": std_err}, ignore_index=True)
ctr = ctr[ctr['p_value'] < 0.1]
ctr.sort_values(
'slope', ascending=False, inplace=True)
# impressions linear regression
impressions = pd.DataFrame(
columns=['aggregated_query', 'slope', 'intercept', 'r_value', 'p_value', 'std_err'])
for query in slope_df['aggregated_query'].unique():
df_query = slope_df[slope_df['aggregated_query'] == query]
slope, intercept, r_value, p_value, std_err = stats.linregress(
df_query['date'], df_query['impressions'])
impressions = impressions.append({"aggregated_query": query, "slope": slope, "intercept": intercept,
"r_value": r_value, "p_value": p_value, "std_err": std_err}, ignore_index=True)
impressions = impressions[impressions['p_value'] < 0.1]
impressions.sort_values(
'slope', ascending=False, inplace=True)
# merge and identify trend
slope = pd.merge(ctr, impressions, how='outer', on='aggregated_query')
for index, row in slope.iterrows():
if row['slope_x'] > 0 and row['slope_y'] > 0:
slope.loc[index, 'trend'] = 'uptrend'
elif row['slope_x'] < 0 and row['slope_y'] < 0:
slope.loc[index, 'trend'] = 'downtrend'
else:
slope.loc[index, 'trend'] = 'sideways trend' # including clicks or impressions p value > required value
slope
"""
#%%
# TODO: Google Trends cross-reference
# FINAL OUTPUT 1:
slope
# --------------MIDDLE PATH---------------
# %%
# --------------RIGHT PATH---------------
# create a lookup table for qoriginal and modified queries
query_lockup = df[['aggregated_query', 'modified_query', 'query_x']].drop_duplicates(
['aggregated_query', 'modified_query', 'query_x']).sort_values(['aggregated_query', 'modified_query']).reset_index(drop=True)
# # pivot
# query_lockup['count'] = None
# for index, row in query_lockup.iterrows():
# if index == 0:
# row['count'] = 1
# elif row['aggregated_query'] != query_lockup.loc[index - 1, 'aggregated_query']:
# row['count'] = 1
# elif row['aggregated_query'] == query_lockup.loc[index - 1, 'aggregated_query']:
# row['count'] = query_lockup.loc[index - 1, 'count'] + 1
# query_lockup['count'].astype(int)
# query_df = query_lockup.pivot(
# index='aggregated_query', columns='count', values='query_x').reset_index()
# query_df.columns.name = None
query_lockup.columns = ['aggregated_query', 'modified_query', 'original_query']
# filter keywords with identified trends
trend = | pd.merge(query_lockup, slope, how='left', on='aggregated_query') | pandas.merge |
# JACSNET Evaluation
# Author: <NAME> 04.11.19
# get libraries
import sys
import numpy as np
import pandas as pd
import scipy
import csv
import matplotlib.pyplot as plt
import tensorflow as tf
import itertools
import librosa
import librosa.display
import keras
from keras.models import Model
from keras.layers import *
from keras.callbacks import ModelCheckpoint, EarlyStopping, ReduceLROnPlateau
from keras import optimizers
import keras.backend as K
import museval
import musdb
import norbert
from sklearn.metrics import confusion_matrix, multilabel_confusion_matrix, roc_auc_score, accuracy_score, roc_curve, precision_recall_curve, f1_score, auc, average_precision_score
from scipy.optimize import brentq
from scipy.interpolate import interp1d
from model import UNETmodule, RECOVmodule
from utils import checkNoise
def custom_loss_wrapper_a(mask):
def custom_loss_a(y_true, y_pred):
mae = K.mean(K.abs(np.multiply(mask, y_pred) - y_true), axis=-1)
y_true = K.clip(y_true, K.epsilon(), 1)
y_pred = K.clip(np.multiply(mask, y_pred), K.epsilon(), 1)
KL = K.sum(y_true * K.log(y_true / y_pred), axis=-1)
return mae + (0.5*KL)
return custom_loss_a
def binary_focal_loss(gamma=2., alpha=.25):
"""
Binary form of focal loss.
FL(p_t) = -alpha * (1 - p_t)**gamma * log(p_t)
where p = sigmoid(x), p_t = p or 1 - p depending on if the label is 1 or 0, respectively.
References:
https://arxiv.org/pdf/1708.02002.pdf
Usage:
model.compile(loss=[binary_focal_loss(alpha=.25, gamma=2)], metrics=["accuracy"], optimizer=adam)
"""
def binary_focal_loss_fixed(y_true, y_pred):
"""
:param y_true: A tensor of the same shape as `y_pred`
:param y_pred: A tensor resulting from a sigmoid
:return: Output tensor.
"""
pt_1 = tf.where(tf.equal(y_true, 1), y_pred, tf.ones_like(y_pred))
pt_0 = tf.where(tf.equal(y_true, 0), y_pred, tf.zeros_like(y_pred))
epsilon = K.epsilon()
# clip to prevent NaN's and Inf's
pt_1 = K.clip(pt_1, epsilon, 1. - epsilon)
pt_0 = K.clip(pt_0, epsilon, 1. - epsilon)
return -K.sum(alpha * K.pow(1. - pt_1, gamma) * K.log(pt_1)) \
-K.sum((1 - alpha) * K.pow(pt_0, gamma) * K.log(1. - pt_0))
return binary_focal_loss_fixed
def mulphase(inp, phase):
voc_spec = np.abs(inp)
voc_spec = np.multiply(voc_spec, phase)
return voc_spec
def roc_auc_score_FIXED(y_true, y_pred):
if len(np.unique(y_true)) == 1: # bug in roc_auc_score
return accuracy_score(y_true, np.rint(y_pred))
return roc_auc_score(y_true, y_pred)
def eval_metrics(y_true, y_pred):
fpr, tpr, threshold = roc_curve(y_true, y_pred, pos_label=1)
eer = brentq(lambda x : 1. - x - interp1d(fpr, tpr)(x), 0., 1.)
auc_res = roc_auc_score_FIXED(y_true, y_pred)
return auc_res, eer
def plot_confusion_matrix(cm, classes, title, ax):
ax.imshow(cm, interpolation='nearest', cmap=plt.cm.Blues)
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
ax.text(j, i, cm[i, j],
horizontalalignment="center",
color="white" if cm[i, j] > cm.max() / 2. else "black")
tick_marks = np.arange(len(classes))
ax.set_xticks(tick_marks), ax.xaxis.set_ticklabels(classes)
ax.set_yticks(tick_marks), ax.yaxis.set_ticklabels(classes)
ax.set_xlabel('Predicted')
ax.set_ylabel('Truth')
ax.set_title(title)
ax.grid(False)
def plot_multiclass_confusion_matrix(y_true, y_pred, label_to_class, save_plot=False):
fig, axes = plt.subplots(int(np.ceil(len(label_to_class) / 2)), 2, figsize=(5, 5))
axes = axes.flatten()
for i, conf_matrix in enumerate(multilabel_confusion_matrix(y_true, y_pred)):
tn, fp, fn, tp = conf_matrix.ravel()
f1 = 2 * tp / (2 * tp + fp + fn + sys.float_info.epsilon)
recall = tp / (tp + fn + sys.float_info.epsilon)
precision = tp / (tp + fp + sys.float_info.epsilon)
plot_confusion_matrix(
np.array([[tp, fn], [fp, tn]]),
classes=['+', '-'],
title=f'Label: {label_to_class[i]}\nf1={f1:.5f}\nrecall={recall:.5f}\nprecision={precision:.5f}',
ax=axes[i]
)
plt.tight_layout()
plt.show()
if save_plot:
plt.savefig('confusion_matrices.png', dpi=50)
def main(args):
# Parameters
seed = 3
num_classes = 4
num_epochs = 100
drop_prob = 0
learning_rate = 1e-4
window_size = 2048
hop_size = 512
window = np.blackman(window_size)
# Model Architecture
inputs = Input(shape=[1025, 94, 1])
UNETout1 = UNETmodule(inputs, num_classes, drop_prob)
sep_sources = Activation('softmax', name='sep_sources')(UNETout1)
UNETout2 = UNETmodule(sep_sources, 1, drop_prob)
recov_input = Activation('sigmoid', name='recov_input')(UNETout2)
sourceclass = GlobalAveragePooling2D()(sep_sources)
sourceclass = Dense(128, activation='relu')(sourceclass)
sourceclass = Dense(128, activation='relu')(sourceclass)
sourceclass = Dense(128, activation='relu')(sourceclass)
sourceclass = Dense(num_classes)(sourceclass)
sourceclass = Activation('sigmoid', name='sourceclass')(sourceclass)
# Train Model Architecture
loss_funcs = {
"sep_sources": custom_loss_wrapper_a(mask = inputs),
"sourceclass": binary_focal_loss(),
"recov_input": custom_loss_wrapper_a(mask = inputs)
}
lossWeights = {"sep_sources": 10, "sourceclass": 0.01, "recov_input": 10}
optimizer = optimizers.Adam(lr=learning_rate)
reduce_lr = ReduceLROnPlateau(monitor='val_loss', patience=5)
model = Model(inputs=inputs, outputs=[sep_sources, sourceclass, recov_input])
model.compile(loss=loss_funcs, optimizer=optimizer, loss_weights=lossWeights)
# Load Model
model.load_weights("model_weightsv2.hdf5")
# initiate musdb
mus = musdb.DB(root_dir="D:/Data/Musdb18")
# load the testing tracks
tracks = mus.load_mus_tracks(subsets=['test'])
SDR_results = []
SIR_results = []
ISR_results = []
SAR_results = []
classresults = []
classresults_hat = []
labelresults = []
duration = 3
for track in tracks:
print(track.name)
audio, sr = librosa.load(str('D:/Data/Musdb18/mixturev2/test/' + track.name + '.wav'))
# audio = librosa.util.normalize(audio, norm=np.inf, axis=None)
dur = librosa.get_duration(y=audio, sr=sr)
for i in range(0, int(dur - duration), duration):
labelclass = [0, 0, 0, 0]
# STFT Mixture
mixture, sr = librosa.load(str('D:/Data/Musdb18/mixturev2/test/' + track.name + '.wav'), offset=i, duration=duration, mono=True, sr=16000)
orig = librosa.core.stft(mixture, hop_length=hop_size, n_fft=window_size, window=window)
magnitude, phase = librosa.magphase(orig)
orig_norm = 2 * magnitude / np.sum(window)
X = np.reshape(orig_norm, (1, 1025, 94, 1))
X = X.astype('float32')
(sources, sourceclass, inp) = model.predict(X, batch_size=1)
classresults.append(sourceclass)
sourceclass_hat = np.around(sourceclass, decimals = 1)
sourceclass_hat[sourceclass_hat <= 0.5] = 0
sourceclass_hat[sourceclass_hat > 0.5] = 1
classresults_hat.append(sourceclass_hat)
sources_reshape = np.reshape(sources, (1025, 94, 1, 4))
orig_reshape = np.reshape(orig, (1025, 94, 1))
source_spec = norbert.wiener(sources_reshape, orig_reshape, use_softmask=True)
inp_reshape = np.reshape(inp, (1025, 94, 1, 1))
inp = norbert.wiener(inp_reshape, orig_reshape, use_softmask=False)
inp_spec = np.reshape(inp, (1025, 94))
#inp_spec = mulphase(target_pred_mag_inp, phase)
voc_spec = np.reshape(source_spec[:,:,:,0], (1025, 94))
bas_spec = np.reshape(source_spec[:,:,:,1], (1025, 94))
dru_spec = np.reshape(source_spec[:,:,:,2], (1025, 94))
oth_spec = np.reshape(source_spec[:,:,:,3], (1025, 94))
# Get ground truth
gt_voc, sr = librosa.load(str('D:/Data/Musdb18/groundtruth/test/' + track.name + '/vocals.wav'), offset=i, duration=3, mono=True, sr=16000)
if ((checkNoise(gt_voc) > 0.05)): labelclass[0] = 1
# gt_voc_down = scipy.signal.decimate(gt_voc, 2)
gt_voc_final = np.reshape(gt_voc, (1, gt_voc.shape[0], 1))
gt_bas, sr = librosa.load(str('D:/Data/Musdb18/groundtruth/test/' + track.name + '/bass.wav'), offset=i, duration=3, mono=True, sr=16000)
if ((checkNoise(gt_bas) > 0.05)): labelclass[1] = 1
# gt_bas_down = scipy.signal.decimate(gt_bas, 2)
gt_bas_final = np.reshape(gt_bas, (1, gt_bas.shape[0], 1))
gt_dru, sr = librosa.load(str('D:/Data/Musdb18/groundtruth/test/' + track.name + '/drums.wav'), offset=i, duration=3, mono=True, sr=16000)
if ((checkNoise(gt_dru) > 0.05)): labelclass[2] = 1
# gt_dru_down = scipy.signal.decimate(gt_dru, 2)
gt_dru_final = np.reshape(gt_dru, (1, gt_dru.shape[0], 1))
gt_others, sr = librosa.load(str('D:/Data/Musdb18/groundtruth/test/' + track.name + '/other.wav'), offset=i, duration=3, mono=True, sr=16000)
if ((checkNoise(gt_others) > 0.05)): labelclass[3] = 1
# gt_others_down = scipy.signal.decimate(gt_others, 2)
gt_others_final = np.reshape(gt_others, (1, gt_others.shape[0], 1))
gt_inp_final = np.reshape(mixture, (1, mixture.shape[0], 1))
labelresults.append(labelclass)
# Get predictions
vocals = librosa.core.istft(voc_spec, hop_length=hop_size, length=gt_voc_final.shape[1], window=window, win_length=window_size)
# vocals = scipy.signal.wiener(vocals)
vocals = np.reshape(vocals, (1, vocals.shape[0], 1))
bass = librosa.core.istft(bas_spec, hop_length=hop_size, length=gt_bas_final.shape[1], window=window, win_length=window_size)
# bass = scipy.signal.wiener(bass)
bass = np.reshape(bass, (1, bass.shape[0], 1))
drums = librosa.core.istft(dru_spec, hop_length=hop_size, length=gt_dru_final.shape[1], window=window, win_length=window_size)
# drums = scipy.signal.wiener(drums)
drums = np.reshape(drums, (1, drums.shape[0], 1))
others = librosa.core.istft(oth_spec, hop_length=hop_size, length=gt_others_final.shape[1], window=window, win_length=window_size)
# others = scipy.signal.wiener(others)
others = np.reshape(others, (1, others.shape[0], 1))
# if(sourceclass[0][0] <= 0.5):
# vocals = np.zeros((gt_voc_final.shape[1], 1))
# # vocals.fill(0.04)
# vocals = np.reshape(vocals, (1, vocals.shape[0], 1))
# else:
# vocals = librosa.core.istft(voc_spec, hop_length=hop_size, length=gt_voc_final.shape[1], window=window)
# # vocals = scipy.signal.wiener(vocals)
# vocals = np.reshape(vocals, (1, vocals.shape[0], 1))
#
# if(sourceclass[0][1] <= 0.5):
# bass = np.zeros((gt_bas_final.shape[1], 1))
# # bass.fill(0.04)
# bass = np.reshape(bass, (1, bass.shape[0], 1))
# else:
# bass = librosa.core.istft(bas_spec, hop_length=hop_size, length=gt_bas_final.shape[1], window=window)
# # bass = scipy.signal.wiener(bass)
# bass = np.reshape(bass, (1, bass.shape[0], 1))
#
# if(sourceclass[0][2] <= 0.5):
# drums = np.zeros((gt_dru_final.shape[1], 1))
# # drums.fill(0.04)
# drums = np.reshape(drums, (1, drums.shape[0], 1))
# else:
# drums = librosa.core.istft(dru_spec, hop_length=hop_size, length=gt_dru_final.shape[1], window=window)
# # drums = scipy.signal.wiener(drums)
# drums = np.reshape(drums, (1, drums.shape[0], 1))
#
# if(sourceclass[0][3] <= 0.5):
# others = np.zeros((gt_others_final.shape[1], 1))
# # others.fill(0.04)
# others = np.reshape(others, (1, others.shape[0], 1))
# else:
# others = librosa.core.istft(oth_spec, hop_length=hop_size, length=gt_others_final.shape[1], window=window)
# # others = scipy.signal.wiener(others)
# others = np.reshape(others, (1, others.shape[0], 1))
recov = librosa.core.istft(inp_spec, hop_length=hop_size, length=gt_inp_final.shape[1], window=window, win_length=window_size)
# recov = scipy.signal.wiener(recov)
recov = np.reshape(recov, (1, recov.shape[0], 1))
all_zeros = np.all(gt_voc<=0) or np.all(gt_bas<=0) or np.all(gt_dru<=0) or np.all(gt_others<=0) or np.all(vocals<=0) or np.all(bass<=0) or np.all(drums<=0) or np.all(others<=0)
# print(all_zeros)
if all_zeros == False:
# noise_thresh = checkNoise(gt_voc)
#
# if noise_thresh > 0.05:
# noise_thresh = checkNoise(gt_bas)
#
# if noise_thresh > 0.05:
# noise_thresh = checkNoise(gt_dru)
#
# if noise_thresh > 0.05:
# Evaluate
REF = np.concatenate((gt_voc_final, gt_bas_final, gt_dru_final, gt_others_final, gt_inp_final), axis=0)
EST = np.concatenate((vocals, bass, drums, others, recov), axis=0)
[SDR, ISR, SIR, SAR] = museval.evaluate(REF, EST, win=52565, hop=52565)
SDR_results.append(SDR)
ISR_results.append(ISR)
SIR_results.append(SIR)
SAR_results.append(SAR)
y_true = np.array(labelresults, dtype=float)
y_pred = np.array(classresults, dtype=float)
y_pred = np.reshape(y_pred, (y_pred.shape[0], y_pred.shape[2]))
y_pred_hat = np.array(classresults_hat, dtype=float)
y_pred_hat = np.reshape(y_pred_hat, (y_pred_hat.shape[0], y_pred_hat.shape[2]))
auc_voc, eer_voc = eval_metrics(y_true[:,0], y_pred[:,0])
auc_bas, eer_bas = eval_metrics(y_true[:,1], y_pred[:,1])
auc_dru, eer_dru = eval_metrics(y_true[:,2], y_pred[:,2])
auc_oth, eer_oth = eval_metrics(y_true[:,3], y_pred[:,3])
target_names = ['Vocals', 'Bass', 'Drums', 'Others']
plot_multiclass_confusion_matrix(y_true, y_pred_hat, target_names, save_plot=False)
SDR_array = np.array(SDR_results)
SDR_array = np.reshape(SDR_array, (SDR_array.shape[0], SDR_array.shape[1]))
SDR_df = pd.DataFrame(SDR_array)
SDR_df.to_csv('SDR_revise1.csv')
ISR_array = np.array(ISR_results)
ISR_array = np.reshape(ISR_array, (ISR_array.shape[0], ISR_array.shape[1]))
ISR_df = pd.DataFrame(ISR_array)
ISR_df.to_csv('ISR_revise1.csv')
SIR_array = np.array(SIR_results)
SIR_array = np.reshape(SIR_array, (SIR_array.shape[0], SIR_array.shape[1]))
SIR_df = | pd.DataFrame(SIR_array) | pandas.DataFrame |
import pandas as pd
import numpy as np
from itertools import chain,repeat
f = open("test3_preds.txt",errors='ignore')
f2 = open("test3.txt",errors='ignore')
l2 = f2.readlines()
l2 = [x.strip() for x in l2]
l = f.readlines()
l = [x.strip() for x in l]
f2 = open("test3.txt")
cols = ["Seed Text",'<Comedy>', '<Action>', '<Adventure>', '<Crime>', '<Drama>', '<Fantasy>', '<Horror>', '<Music>', '<Romance>', '<Sci-Fi>', '<Thriller>']
df = {}
df["Seed Text"] = l2
l3 = np.array(l).reshape(20,11).T
for i in range(1,len(cols)):
c = cols[i]
l4 = [x.replace(c,'') for x in l3[i-1]]
df[c] = l4
df = pd.DataFrame(df)
df2 = []
for i in range(len(df)):
for c in cols[1:]:
df2.append((c,df.loc[i,"Seed Text"],df.loc[i,c]))
df2 = | pd.DataFrame(df2,columns=["Genre","Seed Text","Generated"]) | pandas.DataFrame |
import glob
import numpy as np
import scipy
import os
from sklearn.cluster import KMeans
from sklearn.metrics import adjusted_rand_score
from joblib import dump
import pandas as pd
from multiprocessing.pool import ThreadPool
from pyhydra.utils import check_symmetric, launch_svc
__author__ = "<NAME>"
__copyright__ = "Copyright 2019-2020 The CBICA & SBIA Lab"
__credits__ = ["<NAME>, <NAME>"]
__license__ = "See LICENSE file"
__version__ = "0.1.0"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Development"
def consensus_clustering(clustering_results, k):
"""
This function performs consensus clustering on a co-occurence matrix
:param clustering_results: an array containing all the clustering results across different iterations, in order to
perform
:param k:
:return:
"""
num_pt = clustering_results.shape[0]
cooccurence_matrix = np.zeros((num_pt, num_pt))
for i in range(num_pt - 1):
for j in range(i + 1, num_pt):
cooccurence_matrix[i, j] = sum(clustering_results[i, :] == clustering_results[j, :])
cooccurence_matrix = np.add(cooccurence_matrix, cooccurence_matrix.transpose())
## here is to compute the Laplacian matrix
Laplacian = np.subtract(np.diag(np.sum(cooccurence_matrix, axis=1)), cooccurence_matrix)
Laplacian_norm = np.subtract(np.eye(num_pt), np.matmul(np.matmul(np.diag(1 / np.sqrt(np.sum(cooccurence_matrix, axis=1))), cooccurence_matrix), np.diag(1 / np.sqrt(np.sum(cooccurence_matrix, axis=1)))))
## replace the nan with 0
Laplacian_norm = np.nan_to_num(Laplacian_norm)
## check if the Laplacian norm is symmetric or not, because matlab eig function will automatically check this, but not in numpy or scipy
if check_symmetric(Laplacian_norm):
## extract the eigen value and vector
## matlab eig equivalence is eigh, not eig from numpy or scipy, see this post: https://stackoverflow.com/questions/8765310/scipy-linalg-eig-return-complex-eigenvalues-for-covariance-matrix
## Note, the eigenvector is not unique, thus the matlab and python eigenvector may be different, but this will not affect the results.
evalue, evector = scipy.linalg.eigh(Laplacian_norm)
else:
# evalue, evector = np.linalg.eig(Laplacian_norm)
raise Exception("The Laplacian matrix should be symmetric here...")
## check if the eigen vector is complex
if np.any(np.iscomplex(evector)):
evalue, evector = scipy.linalg.eigh(Laplacian)
## create the kmean algorithm with sklearn
kmeans = KMeans(n_clusters=k, n_init=20).fit(evector.real[:, 0: k])
final_predict = kmeans.labels_
return final_predict
def cv_cluster_stability(result, k):
"""
To compute the adjusted rand index across different pair of 2 folds cross CV
:param result:
:return:
"""
num_pair = 0
aris = []
if k == 1:
adjusted_rand_index = 0 ## note, here, we manually set it to be 0, because it does not make sense when k==1. TODO, need to clarify if there is really heterogeneity in the data, i.e., k == 1 or k>1
else:
for i in range(result.shape[1] - 1):
for j in range(i+1, result.shape[1]):
num_pair += 1
non_zero_index = np.all(result[:, [i, j]], axis=1)
pair_result = result[:, [i, j]][non_zero_index]
ari = adjusted_rand_score(pair_result[:, 0], pair_result[:, 1])
aris.append(ari)
adjusted_rand_index = np.mean(np.asarray(aris))
return adjusted_rand_index
def hydra_solver_svm_tl(num_component, num_component_former, num_repetition, X, y, k, output_dir, num_iteration, tol, balanced, predefined_c, n_threads, num_run):
"""
This is the main function of HYDRA, which find the convex polytope using a supervised classification fashion.
:param num_repetition: the number of iteration of CV currently. This is helpful to reconstruct the model and also moniter the processing
:param X: corrected training data feature
:param y: traing data label
:param k: hyperparameter for desired number of clusters in patients
:param options: commandline parameters
:return: the optimal model
"""
index_pt = np.where(y == 1)[0] # index for PTs
index_cn = np.where(y == -1)[0] # index for CNs
### initialize the final weight for the polytope from the former C
weight_file = os.path.join(output_dir, 'clustering_run' + str(num_run-1), 'component_' + str(num_component_former), str(k) + '_clusters', 'tsv', 'weight_sample_cv_' + str(num_repetition) + '.tsv')
weight_sample = pd.read_csv(weight_file, sep='\t').to_numpy()
## cluster assignment is based on this svm scores across different SVM/hyperplanes
svm_scores = np.zeros((weight_sample.shape[0], weight_sample.shape[1]))
update_weights_pool = ThreadPool(n_threads)
for j in range(num_iteration):
for m in range(k):
sample_weight = np.ascontiguousarray(weight_sample[:, m])
if np.count_nonzero(sample_weight[index_pt]) == 0:
print("Cluster dropped, meaning that all PT has been assigned to one single hyperplane in iteration: %d" % (j-1))
svm_scores[:, m] = np.asarray([np.NINF] * (y.shape[0]))
else:
results = update_weights_pool.apply_async(launch_svc, args=(X, y, predefined_c, sample_weight, balanced))
weight_coef = results.get()[0]
intesept = results.get()[1]
## Apply the data again the trained model to get the final SVM scores
svm_scores[:, m] = (np.matmul(weight_coef, X.transpose()) + intesept).transpose().squeeze()
final_predict = np.argmax(svm_scores[index_pt], axis=1)
## decide the converge of the polytope based on the toleration
weight_sample_hold = weight_sample.copy()
# after each iteration, first set the weight of patient rows to be 0
weight_sample[index_pt, :] = 0
# then set the pt's weight to be 1 for the assigned hyperplane
for n in range(len(index_pt)):
weight_sample[index_pt[n], final_predict[n]] = 1
## check the loss comparted to the tolorence for stopping criteria
loss = np.linalg.norm(np.subtract(weight_sample, weight_sample_hold), ord='fro')
print("The loss is: %f" % loss)
if loss < tol:
print("The polytope has been converged for iteration %d in finding %d clusters" % (j, k))
break
update_weights_pool.close()
update_weights_pool.join()
## after deciding the final convex polytope, we refit the training data once to save the best model
weight_sample_final = np.zeros((y.shape[0], k))
## change the weight of PTs to be 1, CNs to be 1/k
# then set the pt's weight to be 1 for the assigned hyperplane
for n in range(len(index_pt)):
weight_sample_final[index_pt[n], final_predict[n]] = 1
weight_sample_final[index_cn] = 1 / k
update_weights_pool_final = ThreadPool(n_threads)
for o in range(k):
sample_weight = np.ascontiguousarray(weight_sample_final[:, o])
if np.count_nonzero(sample_weight[index_pt]) == 0:
print("Cluster dropped, meaning that the %d th hyperplane is useless!" % (o))
else:
results = update_weights_pool_final.apply_async(launch_svc, args=(X, y, predefined_c, sample_weight, balanced))
## save the final model for the k SVMs/hyperplanes
if not os.path.exists(
os.path.join(output_dir, 'clustering_run' + str(num_run), 'component_' + str(num_component),
str(k) + '_clusters', 'models')):
os.makedirs(os.path.join(output_dir, 'clustering_run' + str(num_run), 'component_' + str(num_component),
str(k) + '_clusters', 'models'))
dump(results.get()[2],
os.path.join(output_dir, 'clustering_run' + str(num_run), 'component_' + str(num_component),
str(k) + '_clusters', 'models',
'svm-' + str(o) + '_last_repetition.joblib'))
update_weights_pool_final.close()
update_weights_pool_final.join()
y[index_pt] = final_predict + 1
if not os.path.exists(os.path.join(output_dir, 'clustering_run' + str(num_run), 'component_' + str(num_component), str(k) + '_clusters', 'tsv')):
os.makedirs(os.path.join(output_dir, 'clustering_run' + str(num_run), 'component_' + str(num_component), str(k) + '_clusters', 'tsv'))
## save the assigned weight for each subject across k-fold
columns = ['hyperplane' + str(i) for i in range(k)]
weight_sample_df = pd.DataFrame(weight_sample_final, columns=columns)
weight_sample_df.to_csv(os.path.join(output_dir, 'clustering_run' + str(num_run), 'component_' + str(num_component), str(k) + '_clusters', 'tsv', 'weight_sample_cv_' + str(num_repetition) + '.tsv'), index=False, sep='\t', encoding='utf-8')
## save the final_predict_all
columns = ['y_hat']
y_hat_df = pd.DataFrame(y, columns=columns)
y_hat_df.to_csv(os.path.join(output_dir, 'clustering_run' + str(num_run), 'component_' + str(num_component), str(k) + '_clusters', 'tsv', 'y_hat_cv_' + str(num_repetition) + '.tsv'), index=False, sep='\t', encoding='utf-8')
## save the pt index
columns = ['pt_index']
pt_df = pd.DataFrame(index_pt, columns=columns)
pt_df.to_csv(os.path.join(output_dir, 'clustering_run' + str(num_run), 'component_' + str(num_component), str(k) + '_clusters', 'tsv', 'pt_index_cv_' + str(num_repetition) + '.tsv'), index=False, sep='\t', encoding='utf-8')
return y
def cluster_stability_across_resolution(c, c_former, output_dir, k_continuing, num_run, stop_tol=0.98):
"""
To evaluate the stability of clustering across two different C for stopping criterion.
Args:
c:
c_former:
output_dir:
k_continuing:
num_run:
stop_tol:
max_num_iter:
Returns:
"""
## read the output of current C and former Cs
cluster_ass1 = os.path.join(output_dir, 'clustering_run' + str(num_run), 'component_' + str(c), 'clustering_assignment.tsv')
ass1_df = pd.read_csv(cluster_ass1, sep='\t')
ass1_df = ass1_df.loc[ass1_df['diagnosis'] == 1]
cluster_ass2 = os.path.join(output_dir, 'clustering_run' + str(num_run-1), 'component_' + str(c_former), 'clustering_assignment.tsv')
ass2_df = pd.read_csv(cluster_ass2, sep='\t')
ass2_df = ass2_df.loc[ass2_df['diagnosis'] == 1]
df_final = pd.DataFrame(columns=['C', 'K', 'num_run'])
k_continuing_update = []
k_converged = []
for i in k_continuing:
ari = adjusted_rand_score(ass1_df['assignment_' + str(i)], ass2_df['assignment_' + str(i)])
print("For k == %d, run %d got ARI == %f compared to former run" % (i, num_run, ari))
if ari < stop_tol and num_run:
k_continuing_update.append(i)
else:
print("Model has been converged or stop at the max iteration: C == %d, K == %d and run == %d" % (c, i, num_run))
k_converged.append(i)
df_row = pd.DataFrame(columns=['C', 'K', 'num_run'])
df_row.loc[len(['C', 'K', 'num_run'])] = [c, i, num_run]
df_final = df_final.append(df_row)
if len(k_converged) != 0:
df_final.to_csv(os.path.join(output_dir, 'results_convergence_run' + str(num_run) + '.tsv'), index=False, sep='\t', encoding='utf-8')
return k_continuing_update, k_converged
def summary_clustering_result_multiscale(output_dir, k_min, k_max):
"""
This is a function to summarize the clustering results
:param num_components_min:
:param num_components_max:
:param num_components_step:
:param output_dir:
:return:
"""
clu_col_list = ['assignment_' + str(e) for e in range(k_min, k_max)]
df_clusters = pd.DataFrame(columns=clu_col_list)
## read the convergence tsv
convergence_tsvs = [f for f in glob.glob(output_dir + "/results_convergence_*.tsv", recursive=True)]
for tsv in convergence_tsvs:
df_convergence = pd.read_csv(tsv, sep='\t')
## sorf by K
df_convergence = df_convergence.sort_values(by=['K'])
for i in range(df_convergence.shape[0]):
k = df_convergence['K'].tolist()[i]
num_run = df_convergence['num_run'].tolist()[i]
C = df_convergence['C'].tolist()[i]
cluster_file = os.path.join(output_dir, 'clustering_run' + str(num_run), 'component_' + str(C), 'clustering_assignment.tsv')
df_cluster = pd.read_csv(cluster_file, sep='\t')
if i == 0:
df_header = df_cluster.iloc[:, 0:3]
assign = df_cluster['assignment_' + str(k)]
df_clusters['assignment_' + str(k)] = assign
## concatenqte the header
df_assignment = pd.concat((df_header, df_clusters), axis=1)
## save the result
df_assignment.to_csv(os.path.join(output_dir, 'results_cluster_assignment_final.tsv'), index=False, sep='\t', encoding='utf-8')
def shift_list(c_list, index):
"""
This is a function to reorder a list to have all posibility by putting each element in the first place
Args:
c_list: list to shift
index: the index of which element to shift
Returns:
"""
new_list = c_list[index:] + c_list[:index]
return new_list
def consensus_clustering_across_c(output_dir, c_list, k_min, k_max):
"""
This is for consensus learning at the end across different Cs
Args:
output_dir:
c_list:
Returns:
"""
k_list = list(range(k_min, k_max+1))
for k in k_list:
for i in c_list:
clu_col_list = ['c_' + str(i) + '_assignment_' + str(e) for e in k_list]
df_clusters = pd.DataFrame(columns=clu_col_list)
tsv = os.path.join(output_dir, 'initialization_c_' + str(i), 'results_cluster_assignment_final.tsv')
df = pd.read_csv(tsv, sep='\t')
if i == c_list[0]:
df_header = df.iloc[:, 0:3]
df_clusters['c_' + str(i) + '_assignment_' + str(k)] = df['assignment_' + str(k)]
if i == c_list[0]:
df_final = df_clusters
else:
df_final = | pd.concat([df_final, df_clusters], axis=1) | pandas.concat |
import numpy as np
import pandas as pd
def calc_humidity_ratio(rh_percent, dry_bulb_C, patm_mbar):
"""
convert relative humidity to moisture content
Based on https://www.vaisala.com/sites/default/files/documents/Humidity_Conversion_Formulas_B210973EN.pdf
"""
patm_hPa = patm_mbar
A, m, Tn = get_phycometric_constants(dry_bulb_C)
T_dry = dry_bulb_C
p_ws_hPa = A * 10 ** ((m * T_dry) / (T_dry + Tn))
p_w_hPa = p_ws_hPa * rh_percent / 100
B_kgperkg = 0.6219907
x_kgperkg = B_kgperkg * p_w_hPa / (patm_hPa - p_w_hPa)
return x_kgperkg
def calc_h_sen(dry_bulb_C):
"""
Calc specific temperature of moist air (sensible)
"""
CPA_kJ_kgC = 1.006
h_kJ_kg = dry_bulb_C * CPA_kJ_kgC
return h_kJ_kg
def calc_h_lat(dry_bulb_C, humidity_ratio_out_kgperkg):
"""
Calc specific temperature of moist air (latent)
:param temperatures_out_C:
:param CPA:
:return:
"""
CPW_kJ_kgC = 1.84
h_we_kJ_kg = 2501
h_kJ_kg = humidity_ratio_out_kgperkg * (dry_bulb_C * CPW_kJ_kgC + h_we_kJ_kg)
return h_kJ_kg
def get_phycometric_constants(T_C):
if -20 <= T_C <= 50:
m = 7.591386
Tn = 240.7263
A = 6.116441
elif -70 <= T_C <= 0:
m = 9.778707
Tn = 273.1466
A = 6.114742
return A, m, Tn
def calc_enthalpy_gradient_sensible(dry_bulb_C, dry_bulb_C_base_cooling):
H_sen_outdoor_kjperkg = calc_h_sen(dry_bulb_C)
# Cooling case
AH_sensible_kJperKg = H_sen_outdoor_kjperkg - calc_h_sen(dry_bulb_C_base_cooling)
if AH_sensible_kJperKg > 0.0:
return abs(AH_sensible_kJperKg)
else:
return 0.0
def calc_enthalpy_gradient_latent(dry_bulb_C,
x_kgperkg,
dry_bulb_C_base_cooling,
x_kgperkg_base_cooling):
H_latent_outdoor_kjperkg = calc_h_lat(dry_bulb_C, x_kgperkg)
# Cooling case
AH_latent_kJperKg = H_latent_outdoor_kjperkg - calc_h_lat(dry_bulb_C_base_cooling, x_kgperkg_base_cooling)
if AH_latent_kJperKg > 0.0:
return abs(AH_latent_kJperKg)
else:
return 0.0
def daily_enthalpy_gradients_daily_data(dry_bulb_C: np.array,
rh_percent: np.array):
# Enhtalpy gradients
dry_bulb_C_base_heating = 18.3
dry_bulb_C_base_cooling = 21.3
rh_percent_base_heating = 40
rh_percent_base_cooling = 70
patm_mbar = 1013.25
patm_mbar_base = 1013.25
# FEATURE 3 and 4: Enthalpy Sensible heat for heating and cooling seasons kg/kJ:
DEG_C_kJperKg, \
DEG_H_kJperKg = np.vectorize(calc_enthalpy_gradient_sensible, otypes=[np.float32, np.float32])(dry_bulb_C,
dry_bulb_C_base_heating,
dry_bulb_C_base_cooling)
# FEATURE 5 and 6: Enthalpy Sensible heat for heating and cooling seasons kg/kJ:
x_kgperkg_base_heating = calc_humidity_ratio(rh_percent_base_heating, dry_bulb_C_base_heating, patm_mbar_base)
x_kgperkg_base_cooling = calc_humidity_ratio(rh_percent_base_cooling, dry_bulb_C_base_cooling, patm_mbar_base)
x_kgperkg = np.vectorize(calc_humidity_ratio, otypes=[np.float32])(rh_percent, dry_bulb_C, patm_mbar)
DEG_DEHUM_kJperKg, \
DEG_HUM_kJperKg = np.vectorize(calc_enthalpy_gradient_latent, otypes=[np.float32, np.float32])(dry_bulb_C,
x_kgperkg,
dry_bulb_C_base_heating,
dry_bulb_C_base_cooling,
x_kgperkg_base_heating,
x_kgperkg_base_cooling)
return DEG_C_kJperKg, DEG_H_kJperKg, DEG_DEHUM_kJperKg, DEG_HUM_kJperKg
def daily_enthalpy_gradients_hourly_data(timestamp,
dry_bulb_C,
rh_percent):
# Enhtalpy gradients
dry_bulb_C_base_cooling = 21
rh_percent_base_cooling = 50
patm_mbar = 1013.25
patm_mbar_base = 1013.25
# FEATURE 3 and 4: Enthalpy Sensible heat for cooling seasons kg/kJ:
DEG_C_kJperKg = np.vectorize(calc_enthalpy_gradient_sensible)(dry_bulb_C, dry_bulb_C_base_cooling)
# FEATURE 5 and 6: Enthalpy Sensible heat for heating and cooling seasons kg/kJ:
x_kgperkg_base_cooling = calc_humidity_ratio(rh_percent_base_cooling, dry_bulb_C_base_cooling, patm_mbar_base)
x_kgperkg = np.vectorize(calc_humidity_ratio)(rh_percent, dry_bulb_C, patm_mbar)
DEG_DEHUM_kJperKg = np.vectorize(calc_enthalpy_gradient_latent)(dry_bulb_C,
x_kgperkg,
dry_bulb_C_base_cooling,
x_kgperkg_base_cooling)
result = | pd.DataFrame({'DEG_C_kJperKg': DEG_C_kJperKg, 'DEG_DEHUM_kJperKg': DEG_DEHUM_kJperKg}, index=timestamp) | pandas.DataFrame |
from __future__ import division
import pytest
import numpy as np
from datetime import timedelta
from pandas import (
Interval, IntervalIndex, Index, isna, notna, interval_range, Timestamp,
Timedelta, compat, date_range, timedelta_range, DateOffset)
from pandas.compat import lzip
from pandas.tseries.offsets import Day
from pandas._libs.interval import IntervalTree
from pandas.tests.indexes.common import Base
import pandas.util.testing as tm
import pandas as pd
@pytest.fixture(scope='class', params=['left', 'right', 'both', 'neither'])
def closed(request):
return request.param
@pytest.fixture(scope='class', params=[None, 'foo'])
def name(request):
return request.param
class TestIntervalIndex(Base):
_holder = IntervalIndex
def setup_method(self, method):
self.index = | IntervalIndex.from_arrays([0, 1], [1, 2]) | pandas.IntervalIndex.from_arrays |
# -*- coding: utf-8 -*-
"""
Created on Sat Apr 25 10:28:58 2020
@author: <NAME>
"""
import pandas as pd
import os
import time
import re
def get_path(path):
"""返回文件夹下所有文件地址:file_path列表"""
g = os.walk(path)
file_path = []
for path, dir_list, file_list in g:
for file_name in file_list:
if (re.search('xlsx$', file_name) != None) and (re.search('~\$', file_name) == None):
# 只读xlsx文件地址,且不读取打开Excel时的临时文件
file_path.append(os.path.join(path, file_name))
return file_path
def read_excel(path):
"""每个Excel表存储为一个excel变量(以sheet名为键的字典)"""
p = get_path(path)
num_file = len(p)
excel = [[] for _ in range(num_file)]
for i,p0 in zip(range(num_file),p):
excel[i] = pd.read_excel(p0, header=None, sheet_name=None)
return excel
def sheets_name(excel):
"""返回所有sheet的名字"""
return list(excel[0].keys())
def concat_one_sheet(excel,sheetname):
"""合并一张sheet"""
sheet = []
for i in range(len(excel)):
sheet.append(excel[i][sheetname])
return | pd.concat(sheet) | pandas.concat |
from django.shortcuts import render, redirect
from django.http import HttpResponse
from django.contrib import messages
import tweepy
from textblob import TextBlob
import pandas as pd
import numpy as np
import urllib.request
import re
import os
# Home and Intro page
def homepage(request):
return render(request, "main/home.html", {})
# Returns the authenticated Tweepy api
def get_tweepy():
try:
consumer_key = os.environ.get('C_K')
consumer_secret = os.environ.get('C_S')
access_token = os.environ.get('A_T')
access_token_secret = os.environ.get('A_T_S')
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth, wait_on_rate_limit = True)
return api
except Exception as e:
print(e)
# returns the cleaned tweet text
def cleanTxt(text):
#removes @mentions
text = re.sub(r'@[A-Za-z0-9]+', '', text)
#removes hashtags
text = re.sub(r'#', '', text)
#removes RT
text = re.sub(r'RT[\s]+', '', text)
#removes hyper link
text = re.sub(r'https?:\/\/\S+', '', text)
return text
# Returns polarity of the sentiments
def getPolarity(text):
return TextBlob(text).sentiment.polarity
# Analyzes the polarity of the sentiments
def getAnalysis(score):
if score < 0:
return 'Negative'
elif score == 0:
return "Neutral"
else:
return 'Positive'
# result page shows the analyzation of the data
def result(request):
if request.method == "POST":
try:
# Gets the text from the input bar
text = request.POST.get('text')
api = get_tweepy()
# Searches for 500 tweets that contain the text
cursor = api.search(text, count=1200, lang="en")
# Adds Tweets found to a set to remove duplicate ones
Tweets = set()
for t in cursor:
Tweets.add(t.text)
# Creates a dataframe with a column for the tweets found
df = pd.DataFrame(data=[tweet for tweet in Tweets], columns=['Tweets'])
| pd.set_option('display.max_colwidth', None) | pandas.set_option |
__author__ = 'John'
import numpy as np;
import random
import math
import scipy.sparse.bsr
from sklearn.cross_validation import train_test_split, KFold
from numpy.linalg import inv
from sklearn.decomposition import ProjectedGradientNMF
from itertools import groupby
import itertools
import similarity
import cf;
import nmf_analysis
import content
import wlas
import evaluate
import pop_rec
import pandas as pd
from sklearn import cross_validation
class one_class:
def __init__(self, filename = None, learner =None, X=None):
self.learner = learner; #X is the matrix that we are dealing with. The rows are items and columns are users.
#training_data #binary mat determining which is enteries are in the training set
#testing_data #binary mat determining which is enteries are in the testing set
self.writing_string =""
self.filename = filename
#partitions data into training and testing data by percentage
def cv(self, k):
#output: gives you a list of indices
X = self.learner.X
#find indices of ones and put them into training/testing sets
ones_x, ones_y = np.nonzero(X[: ,:] == 1)
one_coord =np.array([ones_x, ones_y]);
one_coord = one_coord.T
np.random.shuffle(one_coord)
kf_ones = cross_validation.KFold(one_coord.shape[0], n_folds=k)
#find indices of ones and put them into training/testing sets
zero_x, zero_y = np.nonzero(X[: ,:] == 0)
zero_coord = np.array([zero_x, zero_y]);
zero_coord = zero_coord.T
np.random.shuffle(zero_coord)
kf_zeros = cross_validation.KFold(zero_coord.shape[0], n_folds=k)
training = list()
testing = list()
for ones, zeros in zip(kf_ones, kf_zeros):
training.append(np.concatenate((one_coord[ones[0]], zero_coord[zeros[0]]),axis=0))
testing.append(np.concatenate((one_coord[ones[1]], zero_coord[zeros[1]]),axis=0))
#This makes the training set
#create a numpy array
return (training, testing)
def split_training(self, k, training):
#output: returns to you a kfold validation split in the training set
np.random.shuffle(training)
kf_indices = cross_validation.KFold(training.shape[0], n_folds=k)
#X = self.learner.X
training_set = list()
validation_set = list()
for training_ind, validation_ind in kf_indices:
training_set.append(training[training_ind])
validation_set.append(training[validation_ind])
return (training_set, validation_set)
def cv_parameter_tuning(self, k, learner_dict=None, fun_list = None, filename = None): #next time add testing indices as variable
#put results into a dictionary
training_ind, testing_ind=self.cv(k)
self.results = dict()
self.iteration = 0
#save everything to a csv file
for test in testing_ind:
self.recursive_parameter_tuning(self.learner, test, learner_dict =learner_dict, fun_list=fun_list)
self.iteration = self.iteration +1
writeup = | pd.DataFrame(self.results) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 5 16:37:53 2019
@author: sdenaro
"""
import pandas as pd
import numpy as np
def setup(year,operating_horizon,perfect_foresight):
#read generator parameters into DataFrame
df_gen = pd.read_csv('PNW_data_file/generators.csv',header=0)
zone = ['PNW']
##time series of load for each zone
df_load = pd.read_csv('../Stochastic_engine/Synthetic_demand_pathflows/Sim_hourly_load.csv',header=0)
df_load = df_load[zone]
df_load = df_load.loc[year*8760:year*8760+8759,:]
df_load = df_load.reset_index(drop=True)
##time series of operational reserves for each zone
rv= df_load.values
reserves = np.zeros((len(rv),1))
for i in range(0,len(rv)):
reserves[i] = np.sum(rv[i,:])*.04
df_reserves = pd.DataFrame(reserves)
df_reserves.columns = ['reserves']
##daily hydropower availability
df_hydro = pd.read_csv('Hydro_setup/PNW_dispatchable_hydro.csv',header=0)
##time series of wind generation for each zone
df_wind = pd.read_csv('../Stochastic_engine/Synthetic_wind_power/wind_power_sim.csv',header=0)
df_wind = df_wind.loc[:,'PNW']
df_wind = df_wind.loc[year*8760:year*8760+8759]
df_wind = df_wind.reset_index()
##time series solar for each TAC
df_solar = pd.read_csv('PNW_data_file/solar.csv',header=0)
##daily time series of dispatchable imports by path
df_imports = pd.read_csv('Path_setup/PNW_dispatchable_imports.csv',header=0)
##hourly time series of exports by zone
df_exports = pd.read_csv('Path_setup/PNW_exports.csv',header=0)
##daily time series of dispatchable imports by path
forecast_days = ['fd1','fd2','fd3','fd4','fd5','fd6','fd7']
#must run resources (LFG,ag_waste,nuclear)
df_must = pd.read_csv('PNW_data_file/must_run.csv',header=0)
#natural gas prices
df_ng = pd.read_excel('../Stochastic_engine/Gas_prices/NG.xlsx', header=0)
df_ng = df_ng[zone]
df_ng = df_ng.loc[year*365:year*365+364,:]
df_ng = df_ng.reset_index()
#california imports hourly minimum flows
df_PNW_import_mins = pd.read_csv('Path_setup/PNW_path_mins.csv', header=0)
#california hydro hourly minimum flows
df_PNW_hydro_mins = | pd.read_csv('Hydro_setup/PNW_hydro_mins.csv', header=0) | pandas.read_csv |
import requests
import json
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from pprint import pprint
from datetime import timedelta
import datetime as dt
from pandas.io.json import json_normalize
import warnings
warnings.filterwarnings('ignore')
class PR:
def __init__(self, repos):
self.repos = repos
token = 'token'
self.headers = {'Authorization': f'token {token}'}
self.configure_pandas()
self.df = self.init_df()
def init_df(self):
try:
dfs = []
for repo in self.repos:
url = f'https://api.github.com/repos/filetrust/{repo}/pulls'
res = requests.get(url, headers=self.headers, params={'state': 'all'}).json()
data = json_normalize(res, max_level=1)
temp_df = pd.DataFrame(data)
temp_df['repo'] = repo
dfs.append(temp_df)
df = | pd.concat(dfs, ignore_index=True) | pandas.concat |
from argparse import ArgumentParser
import os
import pandas as pd
import csv
import zipfile
import re
# command line arguments parser
parser = ArgumentParser()
parser.add_argument(dest='type', type=str, default='all', help="Which data type to convert? [all|tweets|chess|music|shakespeare|javascript|typescript|json|html]")
parser.add_argument('--data-dir', '-d', dest='data_dir', type=str, default='../datasets/', help="Path to the directory containing the raw data.")
parser.add_argument('--output-dir', '-o', dest='output_dir', type=str, default='./data/', help="Path to the output directory.")
parser.add_argument('--short-filename', '-s', dest='short_filename', type=str, default='false', help="Does not include parameter info in filename.")
parser.add_argument('--postfix', '-p', dest='postfix', type=str, default='', help="Postfix is appended to the filename stem before the suffix and parameter info (if applicable).")
parser.add_argument('--num-samples', '-n', dest='num_samples', type=int, default=1000, help="Max number of samples to be exported.")
parser.add_argument('--max-length', dest='max_length', type=int, default=2000, help="Max length of a sample.")
parser.add_argument('--min-length', dest='min_length', type=int, default=10, help="Min length of a sample.")
parser.add_argument('--preserve-lines', dest='preserve_lines', type=str, default='false', help="Preserve line breaks in data and don't collapse the whole sample into a single line (except html).")
parser.add_argument('--preserve-form', dest='preserve_form', type=str, default='false', help="Preserve original form including linebreaks and comments (javascript and typescript only), and urls (tweets only)")
args = parser.parse_args()
# form requires newlines to be preserved
if args.preserve_form == 'true':
args.preserve_lines = 'true'
# collapsing sample into one line requires form not to be preserved
if args.preserve_lines == 'false':
args.preserve_form = 'false'
# set postfix for output files if short-filename is false
if args.postfix != '':
args.postfix = '_' + args.postfix
if args.short_filename == 'false':
args.postfix += f'_n{args.num_samples}_min{args.min_length}_max{args.max_length}'
if args.preserve_lines == 'false':
args.postfix += '_nolines'
else:
args.postfix += '_lines'
if args.preserve_form == 'false':
args.postfix += '_noform'
else:
args.postfix += '_form'
# print arguments to show values in use
print(args)
# helper to use code samples in zip file
def process_zip(name, regs, args):
with open(os.path.join(args.output_dir, name + args.postfix + '.txt'), 'w+') as fh:
with zipfile.ZipFile(os.path.join(args.data_dir, name + '.zip'), 'r') as z:
cnt = 0
for entry in z.namelist():
text = z.read(entry).decode('utf-8')
for reg, sub in regs.items():
text = re.sub(reg, sub, text, flags=re.DOTALL)
if len(text) > args.min_length and len(text) <= args.max_length:
sample = text.strip() + "\n"
if args.preserve_form == 'true':
sample += "\n\n"
fh.write(sample)
cnt += 1
if cnt >= args.num_samples:
break
# dataset from: https://dataverse.harvard.edu/dataset.xhtml?persistentId=doi%3A10.7910%2FDVN%2FKJEBIL
if args.type in ['all','tweets']: # parse trump tweets
print('prepare tweet data set...')
df1 = pd.read_json(os.path.join(args.data_dir, 'realdonaldtrump-1.ndjson'), lines=True)
df2 = pd.read_json(os.path.join(args.data_dir, 'realdonaldtrump-2.ndjson'), lines=True)
df = | pd.concat([df1, df2], sort=True) | pandas.concat |
####
#
# The MIT License (MIT)
#
# Copyright 2021, 2022 <NAME> <<EMAIL>>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is furnished
# to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
####
import os
import re
import pandas as pd
import glob
from typing import Dict, Optional, Tuple
from pandas.api.types import is_float, is_bool, is_integer
# ----------------------------------------------------------------------------------------------------------------------
# Helper scripts to load the result of our experiments
# ----------------------------------------------------------------------------------------------------------------------
def dict2fn(d: Dict, sep: str = "__", pref: Optional[str] = None, ext: Optional[str] = None) -> str:
"""
:param d: dictionary, input (key, value)-pairs
:param sep: string, used to separate the key=value pairs in the filename
:param pref: string, added as prefix to the filename. The prefix and rest of the filename is separated by the string
specified in the 'sep' parameter
:param ext: string, added as extension to the filename. The extension is separated by the systems file-extension
separator, e.g. '.'.
:return: string, compiled filename
"""
out = []
for k, v in sorted(d.items()):
if v is None:
out.append("{}".format(k))
else:
out.append("{}={}".format(k, v))
out = sep.join(out)
if pref is not None:
out = sep.join([pref, out])
if ext is not None:
out = os.extsep.join([out, ext])
return out
def fn2dict(params: str) -> Tuple[Dict, bool, Optional[str], Optional[str]]:
"""
Roughly the inverse function of "dict2fn".
:param params: string, filename to split up
:return:
"""
param_name_pattern = re.compile(r'(?:[a-zA-Z0-9]+_?)+=')
# Debugging outputs do have an additional prefix which we need to remove
if params.startswith("debug__"):
params = params.removeprefix("debug__")
is_debug = True
pref = "debug"
else:
# FIXME: we only support "debug" as prefix, here
is_debug = False
pref = None
# Get the file extension (assume that there is no "." in the filename that does NOT separate the extension)
_ext_idx = params.find(os.extsep)
if _ext_idx >= 0:
ext = params[(_ext_idx + 1):]
else:
ext = None
# Split the filename and extract the (key, value)-pairs
ks = [m.removesuffix("=") for m in param_name_pattern.findall(params)]
vs = [v.removesuffix("__") for v in param_name_pattern.split(params) if len(v) > 0]
assert len(ks) == len(vs)
# Construct the output dictionary
out = {}
for k, v in zip(ks, vs):
if is_bool(v):
out[k] = bool(v)
elif is_integer(v):
out[k] = int(v)
elif is_float(v):
out[k] = int(v)
else:
assert isinstance(v, str)
out[k] = v
return out, is_debug, pref, ext
def load_topk__publication(
setting: Dict, agg_setting: Dict, basedir: str = ".", top_k_method: str = "csi",
load_max_model_number: bool = False
) -> pd.DataFrame:
"""
Load the Top-k accuracies in the "publications" folder. These are the results for the SSVM (exp_ver >= 3)
:param setting: dictionary, specifying the parameters of the experiments for which the results should be loaded.
:param agg_setting: dictionary, specifying the parameter of the margin score aggregation, that means how are the
molecular candidates identified when choosing the margin score.
:param basedir: string, directory containing the results
:param top_k_method: string, specifies which top-k accuracy calculation method should be used. We always use the one
used in the original SIRIUS publication [1].
:param load_max_model_number: boolean, indicating whether only the results of the averaged marginals for the maximum
number of available SSVM models should be loaded.
:return: dataframe, containing all results (row-wise concatenation) that match the "setting" and "agg_setting"
:references:
[1] Dührkop, Kai / Shen, Huibin / Meusel, Marvin / Rousu, Juho / Böcker, Sebastian
Searching molecular structure databases with tandem mass spectra using CSI:FingerID
2015
"""
assert top_k_method == "csi", "We always use the top-k accuracy calculated as in the original SIRIUS publication."
# Collect result data-frames. There might be several, e.g. if the "setting" dictionary contains wildcards (*) for
# some parameters, such as the dataset.
df = []
# Prefix of the result file depending on whether only the results for the maximum number of SSVM models should be
# loaded.
_top_k_fn = "top_k__max_models" if load_max_model_number else "top_k"
# Iterate over all result files matching the parameters
for ifn in sorted(glob.glob(
os.path.join(
basedir, dict2fn(setting), dict2fn(agg_setting, pref="combined"), os.extsep.join([_top_k_fn, "tsv"])
)
)):
# Parse the actual parameters from the basename (the setting might contain wildcards)
params, _, _, _ = fn2dict(ifn.split(os.sep)[-3]) # /path/to/PARAMS/not/file.tsv --> PARAMS
# Read the top-k performance results
_df = pd.read_csv(ifn, sep="\t", dtype={"scoring_method": str, "top_k_method": str})
# Restrict the results to the one with the specified top-k accuracy method
_df = _df[_df["top_k_method"] == top_k_method]
assert _df["top_k_method"].nunique() == 1, "There should be only two different top-k accuracy method."
# Add the parameters to the dataframe
for k, v in params.items():
if k not in _df.columns:
_df[k] = v
df.append(_df)
# All loaded results are concatenated into a single dataframe
df = pd.concat(df, ignore_index=True)
return df
def load_topk__comparison(setting: Dict, agg_setting: Dict, basedir: str = ".", top_k_method: str = "csi") \
-> pd.DataFrame:
"""
Load the Top-k accuracies in the "comparison" folder. These are the results for the comparison methods, i.e.
RT filtering, LogP scoring and RO score integration approaches.
:param setting: dictionary, specifying the parameters of the experiments for which the results should be loaded.
:param agg_setting: dictionary, specifying the parameter of the margin score aggregation, that means how are the
molecular candidates identified when choosing the margin score.
:param basedir: string, directory containing the results
:param top_k_method: string, specifies which top-k accuracy calculation method should be used. We always use the one
used in the original SIRIUS publication [1].
:return: dataframe, containing all results (row-wise concatenation) that match the "setting" and "agg_setting"
:references:
[1] <NAME> / <NAME> / Meusel, Marvin / <NAME> / Böcker, Sebastian
Searching molecular structure databases with tandem mass spectra using CSI:FingerID
2015
"""
spl_pattern = re.compile(r"spl=([0-9]+)")
assert top_k_method == "csi", "We always use the top-k accuracy calculated as in the original SIRIUS publication."
# Collect result data-frames. There might be several, e.g. if the "setting" dictionary contains wildcards (*) for
# some parameters, such as the dataset.
df = []
# Input directory
idir = os.path.join(basedir, dict2fn(setting))
for ifn in sorted(glob.glob(
os.path.join(idir, dict2fn({"spl": "*", "cand_agg_id": agg_setting["cand_agg_id"]}, pref="top_k", ext="tsv"))
)):
# Parse the actual parameters from the basename (the setting might contain wildcards)
params, _, _, _ = fn2dict(ifn.split(os.sep)[-2]) # /path/to/PARAMS/file.tsv --> PARAMS
# Read the top-k performance results
_df = pd.read_csv(ifn, sep="\t", converters={"scoring_method": str})
# Restrict the specified top-k method
_df = _df[_df["top_k_method"] == top_k_method]
assert _df["top_k_method"].nunique() == 1
# Add the parameters to the dataframe
for k, v in params.items():
if k not in _df.columns:
_df[k] = v
# The scoring method label should include the "score_int_app" which distinguishes the different filtering
# approaches using the predicted retention times
_df.loc[_df["scoring_method"] == "MS + RT", "scoring_method"] = "MS + RT (%s)" % params["score_int_app"]
# Add the evaluation split index
eval_indx = spl_pattern.findall(os.path.basename(ifn).removesuffix(os.extsep + "tsv"))
assert len(eval_indx) == 1
_df["eval_indx"] = int(eval_indx[0])
df.append(_df)
if len(df) > 0:
df = pd.concat(df, ignore_index=True).rename({"ds": "dataset"}, axis=1)
# Compute the accuracy and add it as column
df["top_k_acc"] = (df["correct_leq_k"] / df["seq_length"]) * 100
else:
# Empty dataframe
df = pd.DataFrame(df)
return df
def load_topk__cand_set_info(setting: Dict, basedir: str = ".") -> pd.DataFrame:
"""
Load the Top-k accuracies in the "comparison" folder. These are the results for the comparison methods, i.e.
RT filtering, LogP scoring and RO score integration approaches.
"""
df = []
for ifn in sorted(glob.glob(
os.path.join(basedir, dict2fn(setting), dict2fn({"spl": "*"}, pref="cand_set_info", ext="tsv"))
)):
# Parse the actual parameters from the basename (the setting might contain wildcards)
params, _, _, _ = fn2dict(ifn.split(os.sep)[-2]) # /path/to/PARAMS/file.tsv --> PARAMS
# Read the top-k performance results
_df = | pd.read_csv(ifn, sep="\t") | pandas.read_csv |
# Machine Learning Project 1 - House Price Prediction
import pandas as pd
df1 = pd.read_csv('bengaluru_house_prices.csv')
df1.head()
df1.info()
df1.shape
df2 = df1.drop(['area_type', 'society', 'balcony'], axis=1)
df2.head()
df2.isnull().sum()
df3 = df2.dropna()
df3.isnull().sum()
df3.head()
df3['availability'].unique()
df3.groupby("availability")["availability"].count()
df3['availability'] = df3['availability'].apply(lambda x: x if x in ('Ready To Move') else 'Future Possession')
df3.groupby("availability")["availability"].count()
df3['location'].unique()
df3.groupby("location")["location"].count().sort_values(ascending=False)
locations = df3.groupby('location')['location'].count().sort_values()
locations
locations_20cnt = locations[locations <= 20]
locations_20cnt
df3['location'] = df3['location'].apply(lambda x: 'Others' if x in locations_20cnt else x)
df3.groupby("location")["location"].count().sort_values(ascending=False)
df3.head()
df3['size'].unique()
import re
df3['bhks'] = df3['size'].apply(lambda x: int(re.findall('\d+', x)[0].strip()))
df3.head()
df3['total_sqft'].unique()
def get_mean(x):
if re.findall('-', x):
ss = x.strip().split('-')
return ((float(ss[0]) + float(ss[0])) / 2)
try:
return float(x.strip())
except:
return None
df3['total_sqft_new'] = df3['total_sqft'].apply(get_mean)
df3.head()
df3.isnull().sum()
df4 = df3.dropna()
df4.isnull().sum()
df4['bath'].unique()
df4.groupby('bath')['bath'].count().sort_values()
df5 = df4[df4['bath'] <= 10]
df5.head()
df6 = df5.drop(['size', 'total_sqft'], axis=1)
df6.head()
df6[df6['total_sqft_new'] / df6['bhks'] < 400]
df7 = df6[df6['total_sqft_new'] / df6['bhks'] > 400]
df7.head()
df7['price_per_sqft'] = df7['price'] * 100000 / df7['total_sqft_new']
df7
df7['price_per_sqft'].describe()
def rmv_price_outlier(df):
df_new = pd.DataFrame()
for key, sdf in df.groupby('location'):
m = sdf['price_per_sqft'].mean()
s = sdf['price_per_sqft'].std()
# print (sdf['location'])
rdf = sdf[(sdf['price_per_sqft'] <= m + s) & (sdf['price_per_sqft'] > m - s)]
# print(rdf)
df_new = pd.concat([df_new, rdf], ignore_index=True)
return df_new
df8 = rmv_price_outlier(df7)
df8.head()
df8.shape
availability_dummy = pd.get_dummies(df8['availability'], drop_first=True)
availability_dummy
location_dummy = pd.get_dummies(df8['location'], drop_first=True)
df9 = | pd.concat([df8, availability_dummy, location_dummy], axis=1) | pandas.concat |
from __future__ import division
from contextlib import contextmanager
from datetime import datetime
from functools import wraps
import locale
import os
import re
from shutil import rmtree
import string
import subprocess
import sys
import tempfile
import traceback
import warnings
import numpy as np
from numpy.random import rand, randn
from pandas._libs import testing as _testing
import pandas.compat as compat
from pandas.compat import (
PY2, PY3, Counter, StringIO, callable, filter, httplib, lmap, lrange, lzip,
map, raise_with_traceback, range, string_types, u, unichr, zip)
from pandas.core.dtypes.common import (
is_bool, is_categorical_dtype, is_datetime64_dtype, is_datetime64tz_dtype,
is_datetimelike_v_numeric, is_datetimelike_v_object,
is_extension_array_dtype, is_interval_dtype, is_list_like, is_number,
is_period_dtype, is_sequence, is_timedelta64_dtype, needs_i8_conversion)
from pandas.core.dtypes.missing import array_equivalent
import pandas as pd
from pandas import (
Categorical, CategoricalIndex, DataFrame, DatetimeIndex, Index,
IntervalIndex, MultiIndex, Panel, PeriodIndex, RangeIndex, Series,
bdate_range)
from pandas.core.algorithms import take_1d
from pandas.core.arrays import (
DatetimeArrayMixin as DatetimeArray, ExtensionArray, IntervalArray,
PeriodArray, TimedeltaArrayMixin as TimedeltaArray, period_array)
import pandas.core.common as com
from pandas.io.common import urlopen
from pandas.io.formats.printing import pprint_thing
N = 30
K = 4
_RAISE_NETWORK_ERROR_DEFAULT = False
# set testing_mode
_testing_mode_warnings = (DeprecationWarning, compat.ResourceWarning)
def set_testing_mode():
# set the testing mode filters
testing_mode = os.environ.get('PANDAS_TESTING_MODE', 'None')
if 'deprecate' in testing_mode:
warnings.simplefilter('always', _testing_mode_warnings)
def reset_testing_mode():
# reset the testing mode filters
testing_mode = os.environ.get('PANDAS_TESTING_MODE', 'None')
if 'deprecate' in testing_mode:
warnings.simplefilter('ignore', _testing_mode_warnings)
set_testing_mode()
def reset_display_options():
"""
Reset the display options for printing and representing objects.
"""
pd.reset_option('^display.', silent=True)
def round_trip_pickle(obj, path=None):
"""
Pickle an object and then read it again.
Parameters
----------
obj : pandas object
The object to pickle and then re-read.
path : str, default None
The path where the pickled object is written and then read.
Returns
-------
round_trip_pickled_object : pandas object
The original object that was pickled and then re-read.
"""
if path is None:
path = u('__{random_bytes}__.pickle'.format(random_bytes=rands(10)))
with ensure_clean(path) as path:
pd.to_pickle(obj, path)
return pd.read_pickle(path)
def round_trip_pathlib(writer, reader, path=None):
"""
Write an object to file specified by a pathlib.Path and read it back
Parameters
----------
writer : callable bound to pandas object
IO writing function (e.g. DataFrame.to_csv )
reader : callable
IO reading function (e.g. pd.read_csv )
path : str, default None
The path where the object is written and then read.
Returns
-------
round_trip_object : pandas object
The original object that was serialized and then re-read.
"""
import pytest
Path = pytest.importorskip('pathlib').Path
if path is None:
path = '___pathlib___'
with ensure_clean(path) as path:
writer(Path(path))
obj = reader(Path(path))
return obj
def round_trip_localpath(writer, reader, path=None):
"""
Write an object to file specified by a py.path LocalPath and read it back
Parameters
----------
writer : callable bound to pandas object
IO writing function (e.g. DataFrame.to_csv )
reader : callable
IO reading function (e.g. pd.read_csv )
path : str, default None
The path where the object is written and then read.
Returns
-------
round_trip_object : pandas object
The original object that was serialized and then re-read.
"""
import pytest
LocalPath = pytest.importorskip('py.path').local
if path is None:
path = '___localpath___'
with ensure_clean(path) as path:
writer(LocalPath(path))
obj = reader(LocalPath(path))
return obj
@contextmanager
def decompress_file(path, compression):
"""
Open a compressed file and return a file object
Parameters
----------
path : str
The path where the file is read from
compression : {'gzip', 'bz2', 'zip', 'xz', None}
Name of the decompression to use
Returns
-------
f : file object
"""
if compression is None:
f = open(path, 'rb')
elif compression == 'gzip':
import gzip
f = gzip.open(path, 'rb')
elif compression == 'bz2':
import bz2
f = bz2.BZ2File(path, 'rb')
elif compression == 'xz':
lzma = compat.import_lzma()
f = lzma.LZMAFile(path, 'rb')
elif compression == 'zip':
import zipfile
zip_file = zipfile.ZipFile(path)
zip_names = zip_file.namelist()
if len(zip_names) == 1:
f = zip_file.open(zip_names.pop())
else:
raise ValueError('ZIP file {} error. Only one file per ZIP.'
.format(path))
else:
msg = 'Unrecognized compression type: {}'.format(compression)
raise ValueError(msg)
try:
yield f
finally:
f.close()
if compression == "zip":
zip_file.close()
def assert_almost_equal(left, right, check_dtype="equiv",
check_less_precise=False, **kwargs):
"""
Check that the left and right objects are approximately equal.
By approximately equal, we refer to objects that are numbers or that
contain numbers which may be equivalent to specific levels of precision.
Parameters
----------
left : object
right : object
check_dtype : bool / string {'equiv'}, default 'equiv'
Check dtype if both a and b are the same type. If 'equiv' is passed in,
then `RangeIndex` and `Int64Index` are also considered equivalent
when doing type checking.
check_less_precise : bool or int, default False
Specify comparison precision. 5 digits (False) or 3 digits (True)
after decimal points are compared. If int, then specify the number
of digits to compare.
When comparing two numbers, if the first number has magnitude less
than 1e-5, we compare the two numbers directly and check whether
they are equivalent within the specified precision. Otherwise, we
compare the **ratio** of the second number to the first number and
check whether it is equivalent to 1 within the specified precision.
"""
if isinstance(left, pd.Index):
return assert_index_equal(left, right,
check_exact=False,
exact=check_dtype,
check_less_precise=check_less_precise,
**kwargs)
elif isinstance(left, pd.Series):
return assert_series_equal(left, right,
check_exact=False,
check_dtype=check_dtype,
check_less_precise=check_less_precise,
**kwargs)
elif isinstance(left, pd.DataFrame):
return assert_frame_equal(left, right,
check_exact=False,
check_dtype=check_dtype,
check_less_precise=check_less_precise,
**kwargs)
else:
# Other sequences.
if check_dtype:
if is_number(left) and is_number(right):
# Do not compare numeric classes, like np.float64 and float.
pass
elif is_bool(left) and is_bool(right):
# Do not compare bool classes, like np.bool_ and bool.
pass
else:
if (isinstance(left, np.ndarray) or
isinstance(right, np.ndarray)):
obj = "numpy array"
else:
obj = "Input"
assert_class_equal(left, right, obj=obj)
return _testing.assert_almost_equal(
left, right,
check_dtype=check_dtype,
check_less_precise=check_less_precise,
**kwargs)
def _check_isinstance(left, right, cls):
"""
Helper method for our assert_* methods that ensures that
the two objects being compared have the right type before
proceeding with the comparison.
Parameters
----------
left : The first object being compared.
right : The second object being compared.
cls : The class type to check against.
Raises
------
AssertionError : Either `left` or `right` is not an instance of `cls`.
"""
err_msg = "{name} Expected type {exp_type}, found {act_type} instead"
cls_name = cls.__name__
if not isinstance(left, cls):
raise AssertionError(err_msg.format(name=cls_name, exp_type=cls,
act_type=type(left)))
if not isinstance(right, cls):
raise AssertionError(err_msg.format(name=cls_name, exp_type=cls,
act_type=type(right)))
def assert_dict_equal(left, right, compare_keys=True):
_check_isinstance(left, right, dict)
return _testing.assert_dict_equal(left, right, compare_keys=compare_keys)
def randbool(size=(), p=0.5):
return rand(*size) <= p
RANDS_CHARS = np.array(list(string.ascii_letters + string.digits),
dtype=(np.str_, 1))
RANDU_CHARS = np.array(list(u("").join(map(unichr, lrange(1488, 1488 + 26))) +
string.digits), dtype=(np.unicode_, 1))
def rands_array(nchars, size, dtype='O'):
"""Generate an array of byte strings."""
retval = (np.random.choice(RANDS_CHARS, size=nchars * np.prod(size))
.view((np.str_, nchars)).reshape(size))
if dtype is None:
return retval
else:
return retval.astype(dtype)
def randu_array(nchars, size, dtype='O'):
"""Generate an array of unicode strings."""
retval = (np.random.choice(RANDU_CHARS, size=nchars * np.prod(size))
.view((np.unicode_, nchars)).reshape(size))
if dtype is None:
return retval
else:
return retval.astype(dtype)
def rands(nchars):
"""
Generate one random byte string.
See `rands_array` if you want to create an array of random strings.
"""
return ''.join(np.random.choice(RANDS_CHARS, nchars))
def randu(nchars):
"""
Generate one random unicode string.
See `randu_array` if you want to create an array of random unicode strings.
"""
return ''.join(np.random.choice(RANDU_CHARS, nchars))
def close(fignum=None):
from matplotlib.pyplot import get_fignums, close as _close
if fignum is None:
for fignum in get_fignums():
_close(fignum)
else:
_close(fignum)
# -----------------------------------------------------------------------------
# locale utilities
def check_output(*popenargs, **kwargs):
# shamelessly taken from Python 2.7 source
r"""Run command with arguments and return its output as a byte string.
If the exit code was non-zero it raises a CalledProcessError. The
CalledProcessError object will have the return code in the returncode
attribute and output in the output attribute.
The arguments are the same as for the Popen constructor. Example:
>>> check_output(["ls", "-l", "/dev/null"])
'crw-rw-rw- 1 root root 1, 3 Oct 18 2007 /dev/null\n'
The stdout argument is not allowed as it is used internally.
To capture standard error in the result, use stderr=STDOUT.
>>> check_output(["/bin/sh", "-c",
... "ls -l non_existent_file ; exit 0"],
... stderr=STDOUT)
'ls: non_existent_file: No such file or directory\n'
"""
if 'stdout' in kwargs:
raise ValueError('stdout argument not allowed, it will be overridden.')
process = subprocess.Popen(stdout=subprocess.PIPE, stderr=subprocess.PIPE,
*popenargs, **kwargs)
output, unused_err = process.communicate()
retcode = process.poll()
if retcode:
cmd = kwargs.get("args")
if cmd is None:
cmd = popenargs[0]
raise subprocess.CalledProcessError(retcode, cmd, output=output)
return output
def _default_locale_getter():
try:
raw_locales = check_output(['locale -a'], shell=True)
except subprocess.CalledProcessError as e:
raise type(e)("{exception}, the 'locale -a' command cannot be found "
"on your system".format(exception=e))
return raw_locales
def get_locales(prefix=None, normalize=True,
locale_getter=_default_locale_getter):
"""Get all the locales that are available on the system.
Parameters
----------
prefix : str
If not ``None`` then return only those locales with the prefix
provided. For example to get all English language locales (those that
start with ``"en"``), pass ``prefix="en"``.
normalize : bool
Call ``locale.normalize`` on the resulting list of available locales.
If ``True``, only locales that can be set without throwing an
``Exception`` are returned.
locale_getter : callable
The function to use to retrieve the current locales. This should return
a string with each locale separated by a newline character.
Returns
-------
locales : list of strings
A list of locale strings that can be set with ``locale.setlocale()``.
For example::
locale.setlocale(locale.LC_ALL, locale_string)
On error will return None (no locale available, e.g. Windows)
"""
try:
raw_locales = locale_getter()
except Exception:
return None
try:
# raw_locales is "\n" separated list of locales
# it may contain non-decodable parts, so split
# extract what we can and then rejoin.
raw_locales = raw_locales.split(b'\n')
out_locales = []
for x in raw_locales:
if PY3:
out_locales.append(str(
x, encoding=pd.options.display.encoding))
else:
out_locales.append(str(x))
except TypeError:
pass
if prefix is None:
return _valid_locales(out_locales, normalize)
pattern = re.compile('{prefix}.*'.format(prefix=prefix))
found = pattern.findall('\n'.join(out_locales))
return _valid_locales(found, normalize)
@contextmanager
def set_locale(new_locale, lc_var=locale.LC_ALL):
"""Context manager for temporarily setting a locale.
Parameters
----------
new_locale : str or tuple
A string of the form <language_country>.<encoding>. For example to set
the current locale to US English with a UTF8 encoding, you would pass
"en_US.UTF-8".
lc_var : int, default `locale.LC_ALL`
The category of the locale being set.
Notes
-----
This is useful when you want to run a particular block of code under a
particular locale, without globally setting the locale. This probably isn't
thread-safe.
"""
current_locale = locale.getlocale()
try:
locale.setlocale(lc_var, new_locale)
normalized_locale = locale.getlocale()
if com._all_not_none(*normalized_locale):
yield '.'.join(normalized_locale)
else:
yield new_locale
finally:
locale.setlocale(lc_var, current_locale)
def can_set_locale(lc, lc_var=locale.LC_ALL):
"""
Check to see if we can set a locale, and subsequently get the locale,
without raising an Exception.
Parameters
----------
lc : str
The locale to attempt to set.
lc_var : int, default `locale.LC_ALL`
The category of the locale being set.
Returns
-------
is_valid : bool
Whether the passed locale can be set
"""
try:
with set_locale(lc, lc_var=lc_var):
pass
except (ValueError,
locale.Error): # horrible name for a Exception subclass
return False
else:
return True
def _valid_locales(locales, normalize):
"""Return a list of normalized locales that do not throw an ``Exception``
when set.
Parameters
----------
locales : str
A string where each locale is separated by a newline.
normalize : bool
Whether to call ``locale.normalize`` on each locale.
Returns
-------
valid_locales : list
A list of valid locales.
"""
if normalize:
normalizer = lambda x: locale.normalize(x.strip())
else:
normalizer = lambda x: x.strip()
return list(filter(can_set_locale, map(normalizer, locales)))
# -----------------------------------------------------------------------------
# Stdout / stderr decorators
@contextmanager
def set_defaultencoding(encoding):
"""
Set default encoding (as given by sys.getdefaultencoding()) to the given
encoding; restore on exit.
Parameters
----------
encoding : str
"""
if not PY2:
raise ValueError("set_defaultencoding context is only available "
"in Python 2.")
orig = sys.getdefaultencoding()
reload(sys) # noqa:F821
sys.setdefaultencoding(encoding)
try:
yield
finally:
sys.setdefaultencoding(orig)
def capture_stdout(f):
r"""
Decorator to capture stdout in a buffer so that it can be checked
(or suppressed) during testing.
Parameters
----------
f : callable
The test that is capturing stdout.
Returns
-------
f : callable
The decorated test ``f``, which captures stdout.
Examples
--------
>>> from pandas.util.testing import capture_stdout
>>> import sys
>>>
>>> @capture_stdout
... def test_print_pass():
... print("foo")
... out = sys.stdout.getvalue()
... assert out == "foo\n"
>>>
>>> @capture_stdout
... def test_print_fail():
... print("foo")
... out = sys.stdout.getvalue()
... assert out == "bar\n"
...
AssertionError: assert 'foo\n' == 'bar\n'
"""
@compat.wraps(f)
def wrapper(*args, **kwargs):
try:
sys.stdout = StringIO()
f(*args, **kwargs)
finally:
sys.stdout = sys.__stdout__
return wrapper
def capture_stderr(f):
r"""
Decorator to capture stderr in a buffer so that it can be checked
(or suppressed) during testing.
Parameters
----------
f : callable
The test that is capturing stderr.
Returns
-------
f : callable
The decorated test ``f``, which captures stderr.
Examples
--------
>>> from pandas.util.testing import capture_stderr
>>> import sys
>>>
>>> @capture_stderr
... def test_stderr_pass():
... sys.stderr.write("foo")
... out = sys.stderr.getvalue()
... assert out == "foo\n"
>>>
>>> @capture_stderr
... def test_stderr_fail():
... sys.stderr.write("foo")
... out = sys.stderr.getvalue()
... assert out == "bar\n"
...
AssertionError: assert 'foo\n' == 'bar\n'
"""
@compat.wraps(f)
def wrapper(*args, **kwargs):
try:
sys.stderr = StringIO()
f(*args, **kwargs)
finally:
sys.stderr = sys.__stderr__
return wrapper
# -----------------------------------------------------------------------------
# Console debugging tools
def debug(f, *args, **kwargs):
from pdb import Pdb as OldPdb
try:
from IPython.core.debugger import Pdb
kw = dict(color_scheme='Linux')
except ImportError:
Pdb = OldPdb
kw = {}
pdb = Pdb(**kw)
return pdb.runcall(f, *args, **kwargs)
def pudebug(f, *args, **kwargs):
import pudb
return pudb.runcall(f, *args, **kwargs)
def set_trace():
from IPython.core.debugger import Pdb
try:
Pdb(color_scheme='Linux').set_trace(sys._getframe().f_back)
except Exception:
from pdb import Pdb as OldPdb
OldPdb().set_trace(sys._getframe().f_back)
# -----------------------------------------------------------------------------
# contextmanager to ensure the file cleanup
@contextmanager
def ensure_clean(filename=None, return_filelike=False):
"""Gets a temporary path and agrees to remove on close.
Parameters
----------
filename : str (optional)
if None, creates a temporary file which is then removed when out of
scope. if passed, creates temporary file with filename as ending.
return_filelike : bool (default False)
if True, returns a file-like which is *always* cleaned. Necessary for
savefig and other functions which want to append extensions.
"""
filename = filename or ''
fd = None
if return_filelike:
f = tempfile.TemporaryFile(suffix=filename)
try:
yield f
finally:
f.close()
else:
# don't generate tempfile if using a path with directory specified
if len(os.path.dirname(filename)):
raise ValueError("Can't pass a qualified name to ensure_clean()")
try:
fd, filename = tempfile.mkstemp(suffix=filename)
except UnicodeEncodeError:
import pytest
pytest.skip('no unicode file names on this system')
try:
yield filename
finally:
try:
os.close(fd)
except Exception:
print("Couldn't close file descriptor: {fdesc} (file: {fname})"
.format(fdesc=fd, fname=filename))
try:
if os.path.exists(filename):
os.remove(filename)
except Exception as e:
print("Exception on removing file: {error}".format(error=e))
@contextmanager
def ensure_clean_dir():
"""
Get a temporary directory path and agrees to remove on close.
Yields
------
Temporary directory path
"""
directory_name = tempfile.mkdtemp(suffix='')
try:
yield directory_name
finally:
try:
rmtree(directory_name)
except Exception:
pass
@contextmanager
def ensure_safe_environment_variables():
"""
Get a context manager to safely set environment variables
All changes will be undone on close, hence environment variables set
within this contextmanager will neither persist nor change global state.
"""
saved_environ = dict(os.environ)
try:
yield
finally:
os.environ.clear()
os.environ.update(saved_environ)
# -----------------------------------------------------------------------------
# Comparators
def equalContents(arr1, arr2):
"""Checks if the set of unique elements of arr1 and arr2 are equivalent.
"""
return frozenset(arr1) == frozenset(arr2)
def assert_index_equal(left, right, exact='equiv', check_names=True,
check_less_precise=False, check_exact=True,
check_categorical=True, obj='Index'):
"""Check that left and right Index are equal.
Parameters
----------
left : Index
right : Index
exact : bool / string {'equiv'}, default 'equiv'
Whether to check the Index class, dtype and inferred_type
are identical. If 'equiv', then RangeIndex can be substituted for
Int64Index as well.
check_names : bool, default True
Whether to check the names attribute.
check_less_precise : bool or int, default False
Specify comparison precision. Only used when check_exact is False.
5 digits (False) or 3 digits (True) after decimal points are compared.
If int, then specify the digits to compare
check_exact : bool, default True
Whether to compare number exactly.
check_categorical : bool, default True
Whether to compare internal Categorical exactly.
obj : str, default 'Index'
Specify object name being compared, internally used to show appropriate
assertion message
"""
__tracebackhide__ = True
def _check_types(l, r, obj='Index'):
if exact:
assert_class_equal(l, r, exact=exact, obj=obj)
# Skip exact dtype checking when `check_categorical` is False
if check_categorical:
assert_attr_equal('dtype', l, r, obj=obj)
# allow string-like to have different inferred_types
if l.inferred_type in ('string', 'unicode'):
assert r.inferred_type in ('string', 'unicode')
else:
assert_attr_equal('inferred_type', l, r, obj=obj)
def _get_ilevel_values(index, level):
# accept level number only
unique = index.levels[level]
labels = index.codes[level]
filled = take_1d(unique.values, labels, fill_value=unique._na_value)
values = unique._shallow_copy(filled, name=index.names[level])
return values
# instance validation
_check_isinstance(left, right, Index)
# class / dtype comparison
_check_types(left, right, obj=obj)
# level comparison
if left.nlevels != right.nlevels:
msg1 = '{obj} levels are different'.format(obj=obj)
msg2 = '{nlevels}, {left}'.format(nlevels=left.nlevels, left=left)
msg3 = '{nlevels}, {right}'.format(nlevels=right.nlevels, right=right)
raise_assert_detail(obj, msg1, msg2, msg3)
# length comparison
if len(left) != len(right):
msg1 = '{obj} length are different'.format(obj=obj)
msg2 = '{length}, {left}'.format(length=len(left), left=left)
msg3 = '{length}, {right}'.format(length=len(right), right=right)
raise_assert_detail(obj, msg1, msg2, msg3)
# MultiIndex special comparison for little-friendly error messages
if left.nlevels > 1:
for level in range(left.nlevels):
# cannot use get_level_values here because it can change dtype
llevel = _get_ilevel_values(left, level)
rlevel = _get_ilevel_values(right, level)
lobj = 'MultiIndex level [{level}]'.format(level=level)
assert_index_equal(llevel, rlevel,
exact=exact, check_names=check_names,
check_less_precise=check_less_precise,
check_exact=check_exact, obj=lobj)
# get_level_values may change dtype
_check_types(left.levels[level], right.levels[level], obj=obj)
# skip exact index checking when `check_categorical` is False
if check_exact and check_categorical:
if not left.equals(right):
diff = np.sum((left.values != right.values)
.astype(int)) * 100.0 / len(left)
msg = '{obj} values are different ({pct} %)'.format(
obj=obj, pct=np.round(diff, 5))
raise_assert_detail(obj, msg, left, right)
else:
_testing.assert_almost_equal(left.values, right.values,
check_less_precise=check_less_precise,
check_dtype=exact,
obj=obj, lobj=left, robj=right)
# metadata comparison
if check_names:
assert_attr_equal('names', left, right, obj=obj)
if isinstance(left, pd.PeriodIndex) or isinstance(right, pd.PeriodIndex):
assert_attr_equal('freq', left, right, obj=obj)
if (isinstance(left, pd.IntervalIndex) or
isinstance(right, pd.IntervalIndex)):
assert_interval_array_equal(left.values, right.values)
if check_categorical:
if is_categorical_dtype(left) or is_categorical_dtype(right):
assert_categorical_equal(left.values, right.values,
obj='{obj} category'.format(obj=obj))
def assert_class_equal(left, right, exact=True, obj='Input'):
"""checks classes are equal."""
__tracebackhide__ = True
def repr_class(x):
if isinstance(x, Index):
# return Index as it is to include values in the error message
return x
try:
return x.__class__.__name__
except AttributeError:
return repr(type(x))
if exact == 'equiv':
if type(left) != type(right):
# allow equivalence of Int64Index/RangeIndex
types = {type(left).__name__, type(right).__name__}
if len(types - {'Int64Index', 'RangeIndex'}):
msg = '{obj} classes are not equivalent'.format(obj=obj)
raise_assert_detail(obj, msg, repr_class(left),
repr_class(right))
elif exact:
if type(left) != type(right):
msg = '{obj} classes are different'.format(obj=obj)
raise_assert_detail(obj, msg, repr_class(left),
repr_class(right))
def assert_attr_equal(attr, left, right, obj='Attributes'):
"""checks attributes are equal. Both objects must have attribute.
Parameters
----------
attr : str
Attribute name being compared.
left : object
right : object
obj : str, default 'Attributes'
Specify object name being compared, internally used to show appropriate
assertion message
"""
__tracebackhide__ = True
left_attr = getattr(left, attr)
right_attr = getattr(right, attr)
if left_attr is right_attr:
return True
elif (is_number(left_attr) and np.isnan(left_attr) and
is_number(right_attr) and np.isnan(right_attr)):
# np.nan
return True
try:
result = left_attr == right_attr
except TypeError:
# datetimetz on rhs may raise TypeError
result = False
if not isinstance(result, bool):
result = result.all()
if result:
return True
else:
msg = 'Attribute "{attr}" are different'.format(attr=attr)
raise_assert_detail(obj, msg, left_attr, right_attr)
def assert_is_valid_plot_return_object(objs):
import matplotlib.pyplot as plt
if isinstance(objs, (pd.Series, np.ndarray)):
for el in objs.ravel():
msg = ("one of 'objs' is not a matplotlib Axes instance, type "
"encountered {name!r}").format(name=el.__class__.__name__)
assert isinstance(el, (plt.Axes, dict)), msg
else:
assert isinstance(objs, (plt.Artist, tuple, dict)), (
'objs is neither an ndarray of Artist instances nor a '
'single Artist instance, tuple, or dict, "objs" is a {name!r}'
.format(name=objs.__class__.__name__))
def isiterable(obj):
return hasattr(obj, '__iter__')
def is_sorted(seq):
if isinstance(seq, (Index, Series)):
seq = seq.values
# sorting does not change precisions
return assert_numpy_array_equal(seq, np.sort(np.array(seq)))
def assert_categorical_equal(left, right, check_dtype=True,
check_category_order=True, obj='Categorical'):
"""Test that Categoricals are equivalent.
Parameters
----------
left : Categorical
right : Categorical
check_dtype : bool, default True
Check that integer dtype of the codes are the same
check_category_order : bool, default True
Whether the order of the categories should be compared, which
implies identical integer codes. If False, only the resulting
values are compared. The ordered attribute is
checked regardless.
obj : str, default 'Categorical'
Specify object name being compared, internally used to show appropriate
assertion message
"""
_check_isinstance(left, right, Categorical)
if check_category_order:
assert_index_equal(left.categories, right.categories,
obj='{obj}.categories'.format(obj=obj))
assert_numpy_array_equal(left.codes, right.codes,
check_dtype=check_dtype,
obj='{obj}.codes'.format(obj=obj))
else:
assert_index_equal(left.categories.sort_values(),
right.categories.sort_values(),
obj='{obj}.categories'.format(obj=obj))
assert_index_equal(left.categories.take(left.codes),
right.categories.take(right.codes),
obj='{obj}.values'.format(obj=obj))
assert_attr_equal('ordered', left, right, obj=obj)
def assert_interval_array_equal(left, right, exact='equiv',
obj='IntervalArray'):
"""Test that two IntervalArrays are equivalent.
Parameters
----------
left, right : IntervalArray
The IntervalArrays to compare.
exact : bool / string {'equiv'}, default 'equiv'
Whether to check the Index class, dtype and inferred_type
are identical. If 'equiv', then RangeIndex can be substituted for
Int64Index as well.
obj : str, default 'IntervalArray'
Specify object name being compared, internally used to show appropriate
assertion message
"""
_check_isinstance(left, right, IntervalArray)
assert_index_equal(left.left, right.left, exact=exact,
obj='{obj}.left'.format(obj=obj))
assert_index_equal(left.right, right.right, exact=exact,
obj='{obj}.left'.format(obj=obj))
assert_attr_equal('closed', left, right, obj=obj)
def assert_period_array_equal(left, right, obj='PeriodArray'):
_check_isinstance(left, right, PeriodArray)
assert_numpy_array_equal(left._data, right._data,
obj='{obj}.values'.format(obj=obj))
assert_attr_equal('freq', left, right, obj=obj)
def assert_datetime_array_equal(left, right, obj='DatetimeArray'):
__tracebackhide__ = True
_check_isinstance(left, right, DatetimeArray)
assert_numpy_array_equal(left._data, right._data,
obj='{obj}._data'.format(obj=obj))
assert_attr_equal('freq', left, right, obj=obj)
assert_attr_equal('tz', left, right, obj=obj)
def assert_timedelta_array_equal(left, right, obj='TimedeltaArray'):
__tracebackhide__ = True
_check_isinstance(left, right, TimedeltaArray)
assert_numpy_array_equal(left._data, right._data,
obj='{obj}._data'.format(obj=obj))
assert_attr_equal('freq', left, right, obj=obj)
def raise_assert_detail(obj, message, left, right, diff=None):
__tracebackhide__ = True
if isinstance(left, np.ndarray):
left = | pprint_thing(left) | pandas.io.formats.printing.pprint_thing |
import pandas as pd
import umap
import umap.plot
import numpy as np
import matplotlib.pyplot as plt
import os
from matplotlib.patches import Patch
seed = 42 # random seed
x = 'meredig_fingerprints_42349.csv' # X encodings of QMOF-42362
csd_14482 = os.path.join(
'..', 'opt-bandgaps.csv') # .csv of QMOF-14482 data
#---------------------------------------
# Encoding
X = | pd.read_csv(x, delimiter=',', header=0, index_col=0) | pandas.read_csv |
# Created by <NAME> on 11/18/2019, 2:03 PM
# Made for easy addition of more algorithms, just follow commented directions at HERE comments
# Prerequisite: Install pyod
from __future__ import division
from __future__ import print_function
from time import time
import sys
import os
# temporary solution for relative imports in case pyod is not installed
# if pyod is installed, no need to use the following line
sys.path.append(
os.path.abspath(os.path.join(os.path.dirname("__file__"), '..')))
# supress warnings for clean output
import warnings
warnings.filterwarnings("ignore")
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from scipy.io import loadmat
# HERE: Import the other models you want to test
from pyod.models.iforest import IForest
from pyod.utils.utility import standardizer
from pyod.utils.utility import precision_n_scores
from sklearn.metrics import roc_auc_score
# Define data file and read X and y
mat_file_list = ['arrhythmia.mat',
'cardio.mat',
'glass.mat',
'ionosphere.mat',
'letter.mat',
'lympho.mat',
'mnist.mat',
'musk.mat',
'optdigits.mat',
'pendigits.mat',
'pima.mat',
'satellite.mat',
'satimage-2.mat',
'shuttle.mat',
'vertebral.mat',
'vowels.mat',
'wbc.mat']
# define the number of iterations
# HERE: Change n_classifiers to the number of classifiers you want to benchmark
n_ite = 10
n_classifiers = 1
# HERE: Add additional classifiers as parameters in df_columns
df_columns = ['Data', '#Samples', '#Dimensions', 'Outlier Perc',
'IForest']
# initialize the container for saving the results
roc_df = pd.DataFrame(columns=df_columns)
prn_df = | pd.DataFrame(columns=df_columns) | pandas.DataFrame |
"""
Contains the Ligand class of the pipeline.
"""
from enum import Enum # for creating enumeration classes
from pathlib import Path
import pandas as pd # for creating dataframes and handling data
from rdkit.Chem import PandasTools # for displaying structures inside Pandas DataFrame
from .helpers import pubchem, rdkit
class Ligand:
"""
Ligand object with properties as attributes and methods to visualize and work with ligands.
Take a ligand identifier type and corresponding value,
and create a Ligand object, while assigning some properties as attributes.
Attributes
----------
dataframe : Pandas DataFrame
Dataframe containing the most important data available for the ligand.
Each time a process is performed on the ligand (e.g. docking),
the important results are added to the dataframe.
rdkit_obj : rdkit.Chem.rdchem.Mol
RDKit molecule object for the ligand with its own set of attributes.
TODO see `Consts` class. More attributes?
"""
class Consts:
"""
Available properties that are assigned as instance attributes upon instantiation.
"""
class IdentifierTypes(Enum):
NAME = "name"
IUPAC_NAME = "iupac_name"
SMILES = "smiles"
CID = "cid"
INCHI = "inchi"
INCHIKEY = "inchikey"
def __init__(self, identifier_type, identifier_value, ligand_output_path):
"""
Parameters
----------
identifier_type : enum 'InputTypes' from the 'Consts.Ligand' class
Type of the ligand identifier, e.g. InputTypes.SMILES.
indentifier_value : str
Value of the ligand identifier, e.g. its SMILES.
ligand_output_path : str or pathlib.Path
Output path of the project for ligand data.
"""
self.dataframe = pd.DataFrame(columns=["Value"])
self.dataframe.index.name = "Property"
setattr(self, identifier_type.name.lower(), identifier_value)
for identifier in self.Consts.IdentifierTypes:
new_id = pubchem.convert_compound_identifier(
identifier_type.value, identifier_value, identifier.value
)
setattr(self, identifier.value, new_id)
self.dataframe.loc[identifier.value] = new_id
self.rdkit_obj = rdkit.create_molecule_object("smiles", self.smiles)
dict_of_properties = rdkit.calculate_druglikeness(self.rdkit_obj)
for property_ in dict_of_properties:
setattr(self, property_, dict_of_properties[property_])
self.dataframe.loc[property_] = dict_of_properties[property_]
ligand_output_path = Path(ligand_output_path)
self.save_as_image(ligand_output_path / f"CID_{self.cid}")
self.dataframe.to_csv(ligand_output_path / f"CID_{self.cid}.csv")
def __repr__(self):
return f"<Ligand CID: {self.cid}>"
def __call__(self):
df = pd.DataFrame(columns=["smiles"])
df.loc[1] = self.smiles
PandasTools.AddMoleculeColumnToFrame(df, smilesCol="smiles")
romol = df.loc[1, "ROMol"]
return | pd.concat({romol: self.dataframe}, names=["Structure"]) | pandas.concat |
import pandas as pd
import numpy as np
from sklearn.pipeline import make_pipeline
from sklearn.metrics import (confusion_matrix,f1_score,classification_report)
from sklearn.model_selection import (train_test_split, GridSearchCV)
from joblib import dump
from sklearn.neighbors import (NeighborhoodComponentsAnalysis, KNeighborsClassifier)
from sklearn.preprocessing import StandardScaler
from tempfile import mkdtemp
from shutil import rmtree
from joblib import Memory
################################################################################
################################################################################
def KNN(df, *args, **kwargs):
unique_test_name = 'StandardScaler KNN GridSearchCV Optimised with SMOTE ENN'
# Create a temporary folder to store the transformers of the pipeline
cachedir = mkdtemp()
memory = Memory(location=cachedir, verbose=10)
y = df['QuoteConversion_Flag'].values
IDs = df.Quote_ID
X = df.drop(['QuoteConversion_Flag', 'Quote_ID'], axis=1).values
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=42)
param_grid = {
'knn__n_neighbours': np.arange(3, 12),
'knn__algorithm': ['ball_tree','kd_tree', 'brute'],
'knn__leaf_size': np.arange(20, 30),
'knn__p': [1, 2, 3, 4, 5],
'nca__n_components': np.arange(2,12),
'nca__max_iter': np.arange(1000, 2000),
'nca__tol': 10.0 ** -np.arange(1, 8),
}
# model classes
nca = NeighborhoodComponentsAnalysis(random_state=42, warm_start=False)
knn = KNeighborsClassifier(n_jobs=-1)
model = [make_pipeline(StandardScaler(), nca, knn, memory=memory)]
grid = GridSearchCV(model, param_grid, cv=1000, iid=False, n_jobs=-1)
grid.fit(X_train, y_train)
print("-----------------Best Param Overview--------------------")
print("Best score: %0.4f" % grid.best_score_)
print("Using the following parameters:")
print(grid.best_params_)
results = pd.DataFrame(grid.cv_results_)
results.to_csv(unique_test_name+'_cv_results.csv', index=False)
prediction = grid.predict(X_test)
print("-----------------Scoring Model--------------------")
print(classification_report(prediction, y_test))
print(confusion_matrix(prediction, y_test), "\n")
prediction = pd.DataFrame(data=prediction, columns=['QuoteConversion_Flag'])
results = | pd.concat([IDs, prediction], axis=1) | pandas.concat |
#1 DR Number 11 Victim Age 22 Crime Description
#2 Date Reported 12 Victim Sex 23 Crime Code 1
#3 Date Occurred 13 Victim Descent 24 Crime Code 2
#4 Time Occurred 14 Premise Code 25 Crime Code 3
#5 Area ID 15 Premise Description 26 Crime Code 4
#6 Area Name 16 Weapon Used Code 27 Address
#7 Reporting District 17 Weapon Description 28 Cross Street
#8 Crime Code 18 Status Code 29 Location
#9 Crime Description 20 Status Description
#10 MO Codes 21 Crime Code
import pandas as pd
from app.settings import setting
source = pd.read_csv(setting['file'],skiprows=1,header=None,nrows=setting['limit']);
def field(x, y):
src = source[x]
data = src.rename(columns = setting['vars'])
data = data.groupby(y).size().to_frame(name = 'count').reset_index()
items = []
for i in range(len(data)):
obj = {}
for name in y:
obj.update({name:data[name][i]})
items.append(obj)
return items
def db():
src = source[setting['source']]
src['lat'] = pd.DataFrame(src[25].str.replace('(','').str.replace(')','').str.split(', ').str[0])
src['lng'] = pd.DataFrame(src[25].str.replace('(','').str.replace(')','').str.split(', ').str[1])
data = src.rename(columns = setting['vars'])
data[['lat','lng']] = data[['lat','lng']].apply(pd.to_numeric)
data['date_occured'] = pd.to_datetime(source[2])
data['date_reported'] = | pd.to_datetime(source[1]) | pandas.to_datetime |
import os
import pandas as pd
import numpy as np
import random
from human_ISH_config import *
import h5py
import time
from shutil import copyfile
import operator
import matplotlib.pyplot as plt
import math
import json
random.seed(1)
def get_stats(images_info_df):
"""
Uses the images_info_df and calculates some stats.
:param images_info_df: pandas dataframe that has the information of all image
:return: a dictionary containing stats.
"""
stats_dict = {'image_count':None, 'donor_count':None, 'female_donor_count':None, 'male_donor_count':None,
'unique_genes_count': None, 'unique_entrez_id_count' : None}
image_id_list = images_info_df['image_id']
gene_symbol_list = images_info_df['gene_symbol']
entrez_id_list = images_info_df['entrez_id']
experiment_id_list = images_info_df['experiment_id']
specimen_id_list = images_info_df['specimen_id']
donor_id_list = images_info_df['donor_id']
donor_sex_list = images_info_df['donor_sex']
female_donors = images_info_df[images_info_df['donor_sex'] == 'F']
male_donors = images_info_df[images_info_df['donor_sex'] == 'M']
# -----------
# How many donors does this study have? How many are female and how many are male?
donors_count = len(set(images_info_df['donor_id']))
print ("Total number of donors: {}".format(donors_count))
female_donors_count = len(set(female_donors['donor_id']))
print("Number of female donors: {}".format(female_donors_count))
male_donors_count = len(set(male_donors['donor_id']))
print("Number of male donors: {}".format(male_donors_count))
if female_donors_count + male_donors_count != donors_count:
print ("something is not right about the number of female and male donors ...")
# -----------
# How many unique genes does this study include?
gene_count = len(set(gene_symbol_list))
print ("Number of unique genes: {}".format(gene_count))
entrez_id_count = len(set(entrez_id_list))
print("Number of unique entrez IDs: {}".format(entrez_id_count))
if entrez_id_count != gene_count:
print ("something is not right. The number of unique genes should be equal to the number of unique entrez IDs")
# -----------
# How many genes have been tested from each donor.
# How many images do we have from each donor.
group_by_donor = images_info_df.groupby('donor_id')
unique_gene_count_per_donor_list = []
unique_image_count_per_donor_list = []
for key, item in group_by_donor:
this_group_genes = group_by_donor.get_group(key)['gene_symbol']
this_group_images = group_by_donor.get_group(key)['image_id']
unique_gene_count_per_donor_list.append(len(set(this_group_genes)))
unique_image_count_per_donor_list.append(len(set(this_group_images)))
print("Minimum number of unique genes from a donor: {}".format(min(unique_gene_count_per_donor_list)))
print("Maximum number of unique genes from a donor: {}".format(max(unique_gene_count_per_donor_list)))
print("Average number of unique genes from a donor: {}".format(np.mean(unique_gene_count_per_donor_list)))
print("Minimum number of images from a donor: {}".format(min(unique_image_count_per_donor_list)))
print("Maximum number of images from a donor: {}".format(max(unique_image_count_per_donor_list)))
print("Average number of images from a donor: {}".format(np.mean(unique_image_count_per_donor_list)))
# -----------
# How many images do we have from each gene.
# How many donors do we have from each gene.
group_by_gene = images_info_df.groupby('gene_symbol')
unique_donor_count_per_gene_list = []
unique_image_count_per_gene_list = []
for key, item in group_by_gene:
this_group_donors = group_by_gene.get_group(key)['donor_id']
this_group_images = group_by_gene.get_group(key)['image_id']
unique_donor_count_per_gene_list.append(len(set(this_group_donors)))
unique_image_count_per_gene_list.append(len(set(this_group_images)))
print("Minimum number of unique donors from a gene: {}".format(min(unique_donor_count_per_gene_list)))
print("Maximum number of unique donors from a gene: {}".format(max(unique_donor_count_per_gene_list)))
print("Average number of unique donors from a gene: {}".format(np.mean(unique_donor_count_per_gene_list)))
print("Minimum number of images from a gene: {}".format(min(unique_image_count_per_gene_list)))
print("Maximum number of images from a gene: {}".format(max(unique_image_count_per_gene_list)))
print("Average number of images from a gene: {}".format(np.mean(unique_image_count_per_gene_list)))
gene_on_all_donors_count = 0
gene_on_only_one_donor_count = 0
for item in unique_donor_count_per_gene_list:
if item == donors_count:
gene_on_all_donors_count +=1
if item == 1:
gene_on_only_one_donor_count += 1
print ("There are {} genes that have been sampled from all the {} donors.".format(gene_on_all_donors_count, donors_count))
print ("There are {} genes that have been sampled from only 1 donor.".format(gene_on_only_one_donor_count))
# -----------
stats_dict['image_count'] = len(image_id_list)
stats_dict['donor_count'] = donors_count
stats_dict['female_donor_count'] = female_donors_count
stats_dict['male_donor_count'] = male_donors_count
stats_dict['unique_genes_count'] = gene_count
stats_dict['unique_entrez_id_count'] = entrez_id_count
# -------------------
# I want to group by donor, in each donor, see on average, how many images there are per gene
# and then average over all the donors
group_by_donor = images_info_df.groupby('donor_id')
avg_num_of_imaes_per_gene_list = []
for key, item in group_by_donor:
# for each donor
this_group_genes = list(group_by_donor.get_group(key)['gene_symbol']) # get a list of its genes (has duplicates)
# for each unique genes, see how many times it appears in the list (== how many images we have of it in this donor)
this_group_genes_count_list = [[x,this_group_genes.count(x)] for x in set(this_group_genes)]
sum = 0
for item in this_group_genes_count_list:
sum += item[1]
# in this donor, on average, we have 'avg' number of images per each gene.
avg = sum / len(this_group_genes_count_list)
# append it to the list
avg_num_of_imaes_per_gene_list.append(avg)
avg_num_of_images_per_gene_in_each_donor_over_all = np.mean(avg_num_of_imaes_per_gene_list)
print ("Average number of images per each gene in each donor, Over all donors: ",avg_num_of_images_per_gene_in_each_donor_over_all)
return stats_dict
def define_sets_with_no_shared_genes(images_info_df):
"""
We want to create training, validation, and test set.
The condition is that the sets should not have any genes in common.
If INCLUDE_SZ_DATA flag is set to false, we want to make sure there are no schizophrenia genes in the training set.
if TRAIN_ON_ALL flag is set to True, then all the genes will be considered as training. We won't have a validation or test.
:param images_info_df: pandas dataframe that has the information of all image
:return: 3 pandas dataframes: training, validation, test
"""
unique_genes = list(np.unique(images_info_df['gene_symbol']))
total_unique_gene_count = len(unique_genes)
print(total_unique_gene_count)
if TRAIN_ON_ALL == False:
test_genes_count = int((TEST_SPLIT / 100.0) * total_unique_gene_count)
validation_gene_count = int((VALIDATION_SPLIT / 100.0) * total_unique_gene_count)
test_genes = random.sample(unique_genes, test_genes_count)
remaining_genes = [x for x in unique_genes if x not in test_genes]
validation_genes = random.sample(remaining_genes, validation_gene_count)
training_genes = [x for x in remaining_genes if x not in validation_genes]
# ------- filter SZ genes if necessary -------
if INCLUDE_SZ_DATA == False:
path_to_SZ_info = os.path.join(DATA_DIR, "schizophrenia", "human_ISH_info.csv")
sz_info_df = pd.read_csv(path_to_SZ_info)
sz_unique_genes = list(set(list(sz_info_df['gene_symbol'])))
print(
"There are {} genes in the training set. {} schizophrenia-associated genes will be removed"
.format(len(training_genes), len(sz_unique_genes)))
training_genes = [x for x in training_genes if x not in sz_unique_genes]
print ("Number of remaining genes: {}".format(len(training_genes)))
# --------------------------------------------
training_df = images_info_df[images_info_df['gene_symbol'].isin(training_genes)]
validation_df = images_info_df[images_info_df['gene_symbol'].isin(validation_genes)]
test_df = images_info_df[images_info_df['gene_symbol'].isin(test_genes)]
training_df = training_df.sort_values(by=['image_id'])
validation_df = validation_df.sort_values(by=['image_id'])
test_df = test_df.sort_values(by=['image_id'])
train_val_df = pd.concat([training_df, validation_df], ignore_index=True)
train_val_df = train_val_df.sort_values(by=['image_id'])
sets_path = os.path.join(DATA_DIR, STUDY, "sets_"+str(PATCH_COUNT_PER_IMAGE)+"_patches_"+str(SEGMENTATION_TRAINING_SAMPLES)+"_seg")
if (not os.path.exists(sets_path)):
os.mkdir(sets_path)
if INCLUDE_SZ_DATA == True:
training_df.to_csv(os.path.join(sets_path, "training.csv"), index=None)
train_val_df.to_csv(os.path.join(sets_path, "training_validation.csv"), index=None)
else:
training_df.to_csv(os.path.join(sets_path, "no_sz_training.csv"), index=None)
train_val_df.to_csv(os.path.join(sets_path, "no_sz_training_validation.csv"), index=None)
validation_df.to_csv(os.path.join(sets_path, "validation.csv"), index=None)
test_df.to_csv(os.path.join(sets_path, "test.csv"), index=None)
else:
print ("-" * 50)
print ('TRAINING ON ALL')
print("-" * 50)
training_genes = [x for x in unique_genes]
# ------- filter SZ genes if necessary -------
if INCLUDE_SZ_DATA == False:
path_to_SZ_info = os.path.join(DATA_DIR, "schizophrenia", "human_ISH_info.csv")
sz_info_df = | pd.read_csv(path_to_SZ_info) | pandas.read_csv |
from __future__ import division
from functools import wraps
import pandas as pd
import numpy as np
import time
import csv, sys
import os.path
import logging
from .ted_functions import TedFunctions
from .ted_aggregate_methods import TedAggregateMethods
from base.uber_model import UberModel, ModelSharedInputs
class TedSpeciesProperties(object):
"""
Listing of species properties that will eventually be read in from a SQL db
"""
def __init__(self):
"""Class representing Species properties"""
super(TedSpeciesProperties, self).__init__()
self.sci_name = pd.Series([], dtype='object')
self.com_name = pd.Series([], dtype='object')
self.taxa = pd.Series([], dtype='object')
self.order = pd.Series([], dtype='object')
self.usfws_id = pd.Series([], dtype='object')
self.body_wgt = pd.Series([], dtype='object')
self.diet_item = pd.Series([], dtype='object')
self.h2o_cont = pd.Series([], dtype='float')
def read_species_properties(self):
# this is a temporary method to initiate the species/diet food items lists (this will be replaced with
# a method to access a SQL database containing the properties
#filename = './ted/tests/TEDSpeciesProperties.csv'
filename = os.path.join(os.path.dirname(__file__),'tests/TEDSpeciesProperties.csv')
try:
with open(filename,'rt') as csvfile:
# csv.DictReader uses first line in file for column headings by default
dr = pd.read_csv(csvfile) # comma is default delimiter
except csv.Error as e:
sys.exit('file: %s, %s' (filename, e))
print(dr)
self.sci_name = dr.ix[:,'Scientific Name']
self.com_name = dr.ix[:,'Common Name']
self.taxa = dr.ix[:,'Taxa']
self.order = dr.ix[:,'Order']
self.usfws_id = dr.ix[:,'USFWS Species ID (ENTITY_ID)']
self.body_wgt= dr.ix[:,'BW (g)']
self.diet_item = dr.ix[:,'Food item']
self.h2o_cont = dr.ix[:,'Water content of diet']
class TedInputs(ModelSharedInputs):
"""
Required inputs class for Ted.
"""
def __init__(self):
"""Class representing the inputs for Ted"""
super(TedInputs, self).__init__()
# Inputs: Assign object attribute variables from the input Pandas DataFrame
self.chemical_name = pd.Series([], dtype="object", name="chemical_name")
# application parameters for min/max application scenarios
self.crop_min = pd.Series([], dtype="object", name="crop")
self.app_method_min = pd.Series([], dtype="object", name="app_method_min")
self.app_rate_min = pd.Series([], dtype="float", name="app_rate_min")
self.num_apps_min = pd.Series([], dtype="int", name="num_apps_min")
self.app_interval_min = pd.Series([], dtype="int", name="app_interval_min")
self.droplet_spec_min = pd.Series([], dtype="object", name="droplet_spec_min")
self.boom_hgt_min = pd.Series([], dtype="object", name="droplet_spec_min")
self.pest_incorp_depth_min = pd.Series([], dtype="object", name="pest_incorp_depth")
self.crop_max = pd.Series([], dtype="object", name="crop")
self.app_method_max = pd.Series([], dtype="object", name="app_method_max")
self.app_rate_max = pd.Series([], dtype="float", name="app_rate_max")
self.num_apps_max = pd.Series([], dtype="int", name="num_app_maxs")
self.app_interval_max = pd.Series([], dtype="int", name="app_interval_max")
self.droplet_spec_max = pd.Series([], dtype="object", name="droplet_spec_max")
self.boom_hgt_max = pd.Series([], dtype="object", name="droplet_spec_max")
self.pest_incorp_depth_max = pd.Series([], dtype="object", name="pest_incorp_depth")
# physical, chemical, and fate properties of pesticide
self.foliar_diss_hlife = pd.Series([], dtype="float", name="foliar_diss_hlife")
self.aerobic_soil_meta_hlife = pd.Series([], dtype="float", name="aerobic_soil_meta_hlife")
self.frac_retained_mamm = pd.Series([], dtype="float", name="frac_retained_mamm")
self.frac_retained_birds = pd.Series([], dtype="float", name="frac_retained_birds")
self.log_kow = pd.Series([], dtype="float", name="log_kow")
self.koc = pd.Series([], dtype="float", name="koc")
self.solubility = pd.Series([], dtype="float", name="solubility")
self.henry_law_const = pd.Series([], dtype="float", name="henry_law_const")
# bio concentration factors (ug active ing/kg-ww) / (ug active ing/liter)
self.aq_plant_algae_bcf_mean = pd.Series([], dtype="float", name="aq_plant_algae_bcf_mean")
self.aq_plant_algae_bcf_upper = pd.Series([], dtype="float", name="aq_plant_algae_bcf_upper")
self.inv_bcf_mean = pd.Series([], dtype="float", name="inv_bcf_mean")
self.inv_bcf_upper = pd.Series([], dtype="float", name="inv_bcf_upper")
self.fish_bcf_mean = pd.Series([], dtype="float", name="fish_bcf_mean")
self.fish_bcf_upper = pd.Series([], dtype="float", name="fish_bcf_upper")
# bounding water concentrations (ug active ing/liter)
self.water_conc_1 = pd.Series([], dtype="float", name="water_conc_1") # lower bound
self.water_conc_2 = pd.Series([], dtype="float", name="water_conc_2") # upper bound
# health value inputs
# naming convention (based on listing from OPP TED Excel spreadsheet 'inputs' worksheet):
# dbt: dose based toxicity
# cbt: concentration-based toxicity
# arbt: application rate-based toxicity
# 1inmill_mort: 1/million mortality (note initial character is numeral 1, not letter l)
# 1inten_mort: 10% mortality (note initial character is numeral 1, not letter l)
# others are self explanatory
# dose based toxicity(dbt): mammals (mg-pest/kg-bw) & weight of test animal (grams)
self.dbt_mamm_1inmill_mort = pd.Series([], dtype="float", name="dbt_mamm_1inmill_mort")
self.dbt_mamm_1inten_mort = pd.Series([], dtype="float", name="dbt_mamm_1inten_mort")
self.dbt_mamm_low_ld50 = pd.Series([], dtype="float", name="dbt_mamm_low_ld50")
self.dbt_mamm_rat_oral_ld50 = pd.Series([], dtype="float", name="dbt_mamm_1inten_mort")
self.dbt_mamm_rat_derm_ld50 = pd.Series([], dtype="float", name="dbt_mamm_rat_derm_ld50")
self.dbt_mamm_rat_inhal_ld50 = pd.Series([], dtype="float", name="dbt_mamm_rat_inhal_ld50")
self.dbt_mamm_sub_direct = pd.Series([], dtype="float", name="dbt_mamm_sub_direct")
self.dbt_mamm_sub_indirect = pd.Series([], dtype="float", name="dbt_mamm_sub_indirect")
self.dbt_mamm_1inmill_mort_wgt = pd.Series([], dtype="float", name="dbt_mamm_1inmill_mort_wgt")
self.dbt_mamm_1inten_mort_wgt = pd.Series([], dtype="float", name="dbt_mamm_1inten_mort_wgt")
self.dbt_mamm_low_ld50_wgt = pd.Series([], dtype="float", name="dbt_mamm_low_ld50_wgt")
self.dbt_mamm_rat_oral_ld50_wgt = pd.Series([], dtype="float", name="dbt_mamm_1inten_mort_wgt")
self.dbt_mamm_rat_derm_ld50_wgt = pd.Series([], dtype="float", name="dbt_mamm_rat_derm_ld50_wgt")
self.dbt_mamm_rat_inhal_ld50_wgt = pd.Series([], dtype="float", name="dbt_mamm_rat_inhal_ld50_wgt")
self.dbt_mamm_sub_direct_wgt = pd.Series([], dtype="float", name="dbt_mamm_sub_direct_wgt")
self.dbt_mamm_sub_indirect_wgt = pd.Series([], dtype="float", name="dbt_mamm_sub_indirect_wgt")
# dose based toxicity(dbt): birds (mg-pest/kg-bw) & weight of test animal (grams)
self.dbt_bird_1inmill_mort = pd.Series([], dtype="float", name="dbt_bird_1inmill_mort")
self.dbt_bird_1inten_mort = pd.Series([], dtype="float", name="dbt_bird_1inten_mort")
self.dbt_bird_low_ld50 = pd.Series([], dtype="float", name="dbt_bird_low_ld50")
self.dbt_bird_hc05 = pd.Series([], dtype="float", name="dbt_bird_hc05")
self.dbt_bird_hc50 = pd.Series([], dtype="float", name="dbt_bird_hc50")
self.dbt_bird_hc95 = pd.Series([], dtype="float", name="dbt_bird_hc95")
self.dbt_bird_sub_direct = pd.Series([], dtype="float", name="dbt_bird_sub_direct")
self.dbt_bird_sub_indirect = pd.Series([], dtype="float", name="dbt_bird_sub_indirect")
self.mineau_sca_fact = pd.Series([], dtype="float", name="mineau_sca_fact")
self.dbt_bird_1inmill_mort_wgt = pd.Series([], dtype="float", name="dbt_bird_1inmill_mort_wgt")
self.dbt_bird_1inten_mort_wgt = pd.Series([], dtype="float", name="dbt_bird_1inten_mort_wgt")
self.dbt_bird_low_ld50_wgt = pd.Series([], dtype="float", name="dbt_bird_low_ld50_wgt")
self.dbt_bird_hc05_wgt = pd.Series([], dtype="float", name="dbt_bird_hc05_wgt")
self.dbt_bird_hc50_wgt = pd.Series([], dtype="float", name="dbt_bird_hc50_wgt")
self.dbt_bird_hc95_wgt = pd.Series([], dtype="float", name="dbt_bird_hc95_wgt")
self.dbt_bird_sub_direct_wgt = pd.Series([], dtype="float", name="dbt_bird_sub_direct_wgt")
self.dbt_bird_sub_indirect_wgt = pd.Series([], dtype="float", name="dbt_bird_sub_indirect_wgt")
self.mineau_sca_fact_wgt = pd.Series([], dtype="float", name="mineau_sca_fact_wgt")
# dose based toxicity(dbt): reptiles, terrestrial-phase amphibians (mg-pest/kg-bw) & weight of test animal (grams)
self.dbt_reptile_1inmill_mort = pd.Series([], dtype="float", name="dbt_reptile_1inmill_mort")
self.dbt_reptile_1inten_mort = pd.Series([], dtype="float", name="dbt_reptile_1inten_mort")
self.dbt_reptile_low_ld50 = pd.Series([], dtype="float", name="dbt_reptile_low_ld50")
self.dbt_reptile_sub_direct = pd.Series([], dtype="float", name="dbt_reptile_sub_direct")
self.dbt_reptile_sub_indirect = pd.Series([], dtype="float", name="dbt_reptile_sub_indirect")
self.dbt_reptile_1inmill_mort_wgt = pd.Series([], dtype="float", name="dbt_reptile_1inmill_mort_wgt")
self.dbt_reptile_1inten_mort_wgt = pd.Series([], dtype="float", name="dbt_reptile_1inten_mort_wgt")
self.dbt_reptile_low_ld50_wgt = pd.Series([], dtype="float", name="dbt_reptile_low_ld50_wgt")
self.dbt_reptile_sub_direct_wgt = pd.Series([], dtype="float", name="dbt_reptile_sub_direct_wgt")
self.dbt_reptile_sub_indirect_wgt = pd.Series([], dtype="float", name="dbt_reptile_sub_indirect_wgt")
# concentration-based toxicity (cbt) : mammals (mg-pest/kg-diet food)
self.cbt_mamm_1inmill_mort = pd.Series([], dtype="float", name="cbt_mamm_1inmill_mort")
self.cbt_mamm_1inten_mort = pd.Series([], dtype="float", name="cbt_mamm_1inten_mort")
self.cbt_mamm_low_lc50 = pd.Series([], dtype="float", name="cbt_mamm_low_lc50")
self.cbt_mamm_sub_direct = pd.Series([], dtype="float", name="cbt_mamm_sub_direct")
self.cbt_mamm_grow_noec = pd.Series([], dtype="float", name="cbt_mamm_grow_noec")
self.cbt_mamm_grow_loec = pd.Series([], dtype="float", name="cbt_mamm_grow_loec")
self.cbt_mamm_repro_noec = pd.Series([], dtype="float", name="cbt_mamm_repro_noec")
self.cbt_mamm_repro_loec = pd.Series([], dtype="float", name="cbt_mamm_repro_loec")
self.cbt_mamm_behav_noec = pd.Series([], dtype="float", name="cbt_mamm_behav_noec")
self.cbt_mamm_behav_loec = pd.Series([], dtype="float", name="cbt_mamm_behav_loec")
self.cbt_mamm_sensory_noec = pd.Series([], dtype="float", name="cbt_mamm_sensory_noec")
self.cbt_mamm_sensory_loec = pd.Series([], dtype="float", name="cbt_mamm_sensory_loec")
self.cbt_mamm_sub_indirect = pd.Series([], dtype="float", name="cbt_mamm_sub_indirect")
# concentration-based toxicity (cbt) : birds (mg-pest/kg-diet food)
self.cbt_bird_1inmill_mort = pd.Series([], dtype="float", name="cbt_bird_1inmill_mort")
self.cbt_bird_1inten_mort = pd.Series([], dtype="float", name="cbt_bird_1inten_mort")
self.cbt_bird_low_lc50 = pd.Series([], dtype="float", name="cbt_bird_low_lc50")
self.cbt_bird_sub_direct = pd.Series([], dtype="float", name="cbt_bird_sub_direct")
self.cbt_bird_grow_noec = pd.Series([], dtype="float", name="cbt_bird_grow_noec")
self.cbt_bird_grow_loec = pd.Series([], dtype="float", name="cbt_bird_grow_loec")
self.cbt_bird_repro_noec = pd.Series([], dtype="float", name="cbt_bird_repro_noec")
self.cbt_bird_repro_loec = pd.Series([], dtype="float", name="cbt_bird_repro_loec")
self.cbt_bird_behav_noec = pd.Series([], dtype="float", name="cbt_bird_behav_noec")
self.cbt_bird_behav_loec = pd.Series([], dtype="float", name="cbt_bird_behav_loec")
self.cbt_bird_sensory_noec = pd.Series([], dtype="float", name="cbt_bird_sensory_noec")
self.cbt_bird_sensory_loec = pd.Series([], dtype="float", name="cbt_bird_sensory_loec")
self.cbt_bird_sub_indirect = pd.Series([], dtype="float", name="cbt_bird_sub_indirect")
# concentration-based toxicity (cbt) : reptiles, terrestrial-phase amphibians (mg-pest/kg-diet food)
self.cbt_reptile_1inmill_mort = pd.Series([], dtype="float", name="cbt_reptile_1inmill_mort")
self.cbt_reptile_1inten_mort = pd.Series([], dtype="float", name="cbt_reptile_1inten_mort")
self.cbt_reptile_low_lc50 = pd.Series([], dtype="float", name="cbt_reptile_low_lc50")
self.cbt_reptile_sub_direct = pd.Series([], dtype="float", name="cbt_reptile_sub_direct")
self.cbt_reptile_grow_noec = pd.Series([], dtype="float", name="cbt_reptile_grow_noec")
self.cbt_reptile_grow_loec = pd.Series([], dtype="float", name="cbt_reptile_grow_loec")
self.cbt_reptile_repro_noec = pd.Series([], dtype="float", name="cbt_reptile_repro_noec")
self.cbt_reptile_repro_loec = pd.Series([], dtype="float", name="cbt_reptile_repro_loec")
self.cbt_reptile_behav_noec = pd.Series([], dtype="float", name="cbt_reptile_behav_noec")
self.cbt_reptile_behav_loec = pd.Series([], dtype="float", name="cbt_reptile_behav_loec")
self.cbt_reptile_sensory_noec = pd.Series([], dtype="float", name="cbt_reptile_sensory_noec")
self.cbt_reptile_sensory_loec = pd.Series([], dtype="float", name="cbt_reptile_sensory_loec")
self.cbt_reptile_sub_indirect = pd.Series([], dtype="float", name="cbt_reptile_sub_indirect")
# concentration-based toxicity (cbt) : invertebrates body weight (mg-pest/kg-bw(ww))
self.cbt_inv_bw_1inmill_mort = pd.Series([], dtype="float", name="cbt_inv_bw_1inmill_mort")
self.cbt_inv_bw_1inten_mort = pd.Series([], dtype="float", name="cbt_inv_bw_1inten_mort")
self.cbt_inv_bw_low_lc50 = pd.Series([], dtype="float", name="cbt_inv_bw_low_lc50")
self.cbt_inv_bw_sub_direct = pd.Series([], dtype="float", name="cbt_inv_bw_sub_direct")
self.cbt_inv_bw_grow_noec = pd.Series([], dtype="float", name="cbt_inv_bw_grow_noec")
self.cbt_inv_bw_grow_loec = pd.Series([], dtype="float", name="cbt_inv_bw_grow_loec")
self.cbt_inv_bw_repro_noec = pd.Series([], dtype="float", name="cbt_inv_bw_repro_noec")
self.cbt_inv_bw_repro_loec = pd.Series([], dtype="float", name="cbt_inv_bw_repro_loec")
self.cbt_inv_bw_behav_noec = pd.Series([], dtype="float", name="cbt_inv_bw_behav_noec")
self.cbt_inv_bw_behav_loec = pd.Series([], dtype="float", name="cbt_inv_bw_behav_loec")
self.cbt_inv_bw_sensory_noec = pd.Series([], dtype="float", name="cbt_inv_bw_sensory_noec")
self.cbt_inv_bw_sensory_loec = pd.Series([], dtype="float", name="cbt_inv_bw_sensory_loec")
self.cbt_inv_bw_sub_indirect = pd.Series([], dtype="float", name="cbt_inv_bw_sub_indirect")
# concentration-based toxicity (cbt) : invertebrates body diet (mg-pest/kg-food(ww))
self.cbt_inv_food_1inmill_mort = pd.Series([], dtype="float", name="cbt_inv_food_1inmill_mort")
self.cbt_inv_food_1inten_mort = pd.Series([], dtype="float", name="cbt_inv_food_1inten_mort")
self.cbt_inv_food_low_lc50 = pd.Series([], dtype="float", name="cbt_inv_food_low_lc50")
self.cbt_inv_food_sub_direct = pd.Series([], dtype="float", name="cbt_inv_food_sub_direct")
self.cbt_inv_food_grow_noec = pd.Series([], dtype="float", name="cbt_inv_food_grow_noec")
self.cbt_inv_food_grow_loec = pd.Series([], dtype="float", name="cbt_inv_food_grow_loec")
self.cbt_inv_food_repro_noec = pd.Series([], dtype="float", name="cbt_inv_food_repro_noec")
self.cbt_inv_food_repro_loec = pd.Series([], dtype="float", name="cbt_inv_food_repro_loec")
self.cbt_inv_food_behav_noec = pd.Series([], dtype="float", name="cbt_inv_food_behav_noec")
self.cbt_inv_food_behav_loec = pd.Series([], dtype="float", name="cbt_inv_food_behav_loec")
self.cbt_inv_food_sensory_noec = pd.Series([], dtype="float", name="cbt_inv_food_sensory_noec")
self.cbt_inv_food_sensory_loec = pd.Series([], dtype="float", name="cbt_inv_food_sensory_loec")
self.cbt_inv_food_sub_indirect = pd.Series([], dtype="float", name="cbt_inv_food_sub_indirect")
# concentration-based toxicity (cbt) : invertebrates soil (mg-pest/kg-soil(dw))
self.cbt_inv_soil_1inmill_mort = pd.Series([], dtype="float", name="cbt_inv_soil_1inmill_mort")
self.cbt_inv_soil_1inten_mort = pd.Series([], dtype="float", name="cbt_inv_soil_1inten_mort")
self.cbt_inv_soil_low_lc50 = pd.Series([], dtype="float", name="cbt_inv_soil_low_lc50")
self.cbt_inv_soil_sub_direct = pd.Series([], dtype="float", name="cbt_inv_soil_sub_direct")
self.cbt_inv_soil_grow_noec = pd.Series([], dtype="float", name="cbt_inv_soil_grow_noec")
self.cbt_inv_soil_grow_loec = pd.Series([], dtype="float", name="cbt_inv_soil_grow_loec")
self.cbt_inv_soil_repro_noec = pd.Series([], dtype="float", name="cbt_inv_soil_repro_noec")
self.cbt_inv_soil_repro_loec = pd.Series([], dtype="float", name="cbt_inv_soil_repro_loec")
self.cbt_inv_soil_behav_noec = pd.Series([], dtype="float", name="cbt_inv_soil_behav_noec")
self.cbt_inv_soil_behav_loec = pd.Series([], dtype="float", name="cbt_inv_soil_behav_loec")
self.cbt_inv_soil_sensory_noec = pd.Series([], dtype="float", name="cbt_inv_soil_sensory_noec")
self.cbt_inv_soil_sensory_loec = pd.Series([], dtype="float", name="cbt_inv_soil_sensory_loec")
self.cbt_inv_soil_sub_indirect = pd.Series([], dtype="float", name="cbt_inv_soil_sub_indirect")
# application rate-based toxicity (arbt) : mammals (lbs active ingredient/Acre)
self.arbt_mamm_mort = pd.Series([], dtype="float", name="arbt_mamm_mort")
self.arbt_mamm_growth = pd.Series([], dtype="float", name="arbt_mamm_growth")
self.arbt_mamm_repro = pd.Series([], dtype="float", name="arbt_mamm_repro")
self.arbt_mamm_behav = | pd.Series([], dtype="float", name="arbt_mamm_behav") | pandas.Series |
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Conv2D, Dropout, MaxPool2D, Flatten, GlobalAveragePooling2D, add, average, \
maximum
import tensorflow_addons as tfa
from tensorflow_docs import modeling
import tensorflow_datasets as tfds
import tensorflow_hub as hub
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import os
import pathlib
import shutil
tfds.disable_progress_bar() # disable tqdm progress bar
print("TensorFlow Version: ", tf.__version__)
print("Number of GPU available: ", len(tf.config.experimental.list_physical_devices("GPU")))
def read_and_label(file_path):
img = tf.io.read_file(file_path)
img = decode_img(img)
label = get_label(file_path)
return img, label
def decode_img(img):
img = tf.image.decode_jpeg(img, channels=3)
img = tf.image.convert_image_dtype(img, tf.float32)
return tf.image.resize(img, [IMG_WIDTH, IMG_HEIGHT])
def get_label(file_path):
parts = tf.strings.split(file_path, os.path.sep)
return tf.reshape(tf.where(parts[-4] == CLASS_NAMES), [])
def augment(image, label):
image = tf.image.random_hue(image, max_delta=0.05, seed=5)
image = tf.image.random_contrast(image, 0.95, 1.05, seed=5) # tissue quality
image = tf.image.random_saturation(image, 0.95, 1.05, seed=5) # stain quality
image = tf.image.random_brightness(image, max_delta=0.05) # tissue thickness, glass transparency (clean)
image = tf.image.random_flip_left_right(image, seed=5) # cell orientation
image = tf.image.random_flip_up_down(image, seed=5) # cell orientation
image = tf.image.rot90(image, tf.random.uniform(shape=[], minval=0, maxval=4, dtype=tf.int32)) # cell orientation
return image, label
IMG_HEIGHT = 100
IMG_WIDTH = 100
BATCH_SIZE = 32
val_fraction = 10
# list location of all training images
data_dir = r'C:\Users\kuki\Desktop\Research\Skin\RCNN data\train'
data_dir = pathlib.Path(data_dir)
train_image_count = len(list(data_dir.glob('*\*\image\*.jpg')))
CLASS_NAMES = np.array(
[item.name for item in data_dir.glob('*') if item.name != "LICENSE.txt" and item.name != ".DS_store"])
list_ds = tf.data.Dataset.list_files(str(data_dir / '*/*/image/*'))
AUTOTUNE = tf.data.experimental.AUTOTUNE
labeled_ds = list_ds.map(read_and_label, num_parallel_calls=AUTOTUNE)
# plt.figure(figsize=(10,10))
# for idx,elem in enumerate(labeled_ds.take(25)):
# img = elem[0]
# label = elem[1]
# ax = plt.subplot(5,5,idx+1)
# plt.imshow(img)
# plt.title(CLASS_NAMES[label].title())
# plt.axis('off')
test_data_dir = r'C:\Users\kuki\Desktop\Research\Skin\RCNN data\test'
test_data_dir = pathlib.Path(test_data_dir)
test_image_count = len(list(test_data_dir.glob('*\*\image\*.jpg')))
test_list_ds = tf.data.Dataset.list_files(str(test_data_dir / '*\*\image\*'))
test_labeled_ds = test_list_ds.map(read_and_label, num_parallel_calls=AUTOTUNE)
val_image_count = test_image_count // 100 * val_fraction # // : floor division ex) 15/2 = 7.5 -> 7
STEPS_PER_EPOCH = train_image_count // BATCH_SIZE
TEST_STEPS = test_image_count // BATCH_SIZE
VALIDATION_STEPS = val_image_count // BATCH_SIZE
shuffle_buffer_size = 3000 # take first 100 from dataset and shuffle and pick one.
train_ds = (labeled_ds
# .skip(val_image_count)
.cache("./cache/fibro_train.tfcache")
.shuffle(buffer_size=shuffle_buffer_size)
.repeat()
.batch(BATCH_SIZE)
.map(augment, num_parallel_calls=AUTOTUNE) # always batch before mapping
.prefetch(buffer_size=AUTOTUNE)
)
# no shuffle, augment for validation and test dataset
val_ds = (test_labeled_ds
.shuffle(buffer_size=shuffle_buffer_size)
.take(val_image_count)
.cache("./cache/fibro_val.tfcache")
.repeat()
.batch(BATCH_SIZE)
.prefetch(buffer_size=AUTOTUNE))
test_ds = (test_labeled_ds
.cache("./cache/fibro_test.tfcache")
.repeat()
.batch(BATCH_SIZE)
.prefetch(buffer_size=AUTOTUNE) # time it takes to produce next element
)
checkpoint_dir = "training_1"
shutil.rmtree(checkpoint_dir, ignore_errors=True)
def get_callbacks(name):
return [
modeling.EpochDots(),
tf.keras.callbacks.EarlyStopping(monitor='val_sparse_categorical_crossentropy',
patience=100, restore_best_weights=True),
# tf.keras.callbacks.TensorBoard(log_dir/name, histogram_freq=1),
tf.keras.callbacks.ModelCheckpoint(filepath=checkpoint_dir + "/{}/cp.ckpt".format(name),
verbose=0,
monitor='val_sparse_categorical_crossentropy',
save_weights_only=True,
save_best_only=True),
tf.keras.callbacks.ReduceLROnPlateau(monitor='val_sparse_categorical_crossentropy',
factor=0.1, patience=50, verbose=0, mode='auto',
min_delta=0.0001, cooldown=0, min_lr=0),
]
lr_schedule = tf.keras.optimizers.schedules.InverseTimeDecay(
1e-3,
decay_steps=STEPS_PER_EPOCH * 100,
decay_rate=1,
staircase=False)
def compilefit(model, name, lr, max_epochs=1000):
optimizer = tfa.optimizers.AdamW(lr)
model.compile(optimizer=optimizer,
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=[tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), 'accuracy'])
model_history = model.fit(train_ds,
steps_per_epoch=STEPS_PER_EPOCH,
epochs=max_epochs,
verbose=0,
validation_data=val_ds,
callbacks=get_callbacks(name),
validation_steps=VALIDATION_STEPS,
use_multiprocessing=True
)
namename = os.path.dirname(name)
if not os.path.isdir(os.path.abspath(namename)):
os.mkdir(os.path.abspath(namename))
if not os.path.isdir(os.path.abspath(name)):
os.mkdir(os.path.abspath(name))
model.save(pathlib.Path(name) / 'full_model.h5')
return model_history
def plotdf(dfobj, condition, lr=None):
dfobj.pop('loss')
dfobj.pop('val_loss')
dfobj1 = dfobj.copy()
dfobj2 = dfobj.copy()
dfobj.pop('lr')
dfobj.pop('sparse_categorical_crossentropy')
dfobj.pop('val_sparse_categorical_crossentropy')
pd.DataFrame(dfobj).plot(title=condition)
dfobj1.pop('lr')
dfobj1.pop('accuracy')
dfobj1.pop('val_accuracy')
pd.DataFrame(dfobj1).plot(title=condition)
if lr is not 'decay':
dfobj2.pop('sparse_categorical_crossentropy')
dfobj2.pop('val_sparse_categorical_crossentropy')
dfobj2.pop('accuracy')
dfobj2.pop('val_accuracy')
| pd.DataFrame(dfobj2) | pandas.DataFrame |
import pandas
data=pandas.read_csv("Day25/Squirrel_census_DA/2018_Central_Park_Squirrel_Census_Data.csv")
# print(data.head(5))
GraySquirel_count=len(data[data["Primary Fur Color"]=="Gray"])
CinnamonSquirel_count=len(data[data["Primary Fur Color"]=="Cinnamon"])
BlackSquirel_count=len(data[data["Primary Fur Color"]=="Black"])
print(GraySquirel_count)
print(CinnamonSquirel_count)
print(BlackSquirel_count)
#data frame
data_dict={
"Fur Color":["Gray","Cinnamon","Black"],
"Count":[GraySquirel_count,CinnamonSquirel_count,BlackSquirel_count]
}
# print(data_dict)
df= | pandas.DataFrame(data_dict) | pandas.DataFrame |
from datetime import datetime, timedelta
import numpy as np
import pytest
from pandas._libs.tslibs import period as libperiod
import pandas as pd
from pandas import DatetimeIndex, Period, PeriodIndex, Series, notna, period_range
import pandas._testing as tm
class TestGetItem:
def test_ellipsis(self):
# GH#21282
idx = period_range("2011-01-01", "2011-01-31", freq="D", name="idx")
result = idx[...]
assert result.equals(idx)
assert result is not idx
def test_getitem(self):
idx1 = pd.period_range("2011-01-01", "2011-01-31", freq="D", name="idx")
for idx in [idx1]:
result = idx[0]
assert result == pd.Period("2011-01-01", freq="D")
result = idx[-1]
assert result == pd.Period("2011-01-31", freq="D")
result = idx[0:5]
expected = pd.period_range("2011-01-01", "2011-01-05", freq="D", name="idx")
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
assert result.freq == "D"
result = idx[0:10:2]
expected = pd.PeriodIndex(
["2011-01-01", "2011-01-03", "2011-01-05", "2011-01-07", "2011-01-09"],
freq="D",
name="idx",
)
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
assert result.freq == "D"
result = idx[-20:-5:3]
expected = pd.PeriodIndex(
["2011-01-12", "2011-01-15", "2011-01-18", "2011-01-21", "2011-01-24"],
freq="D",
name="idx",
)
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
assert result.freq == "D"
result = idx[4::-1]
expected = PeriodIndex(
["2011-01-05", "2011-01-04", "2011-01-03", "2011-01-02", "2011-01-01"],
freq="D",
name="idx",
)
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
assert result.freq == "D"
def test_getitem_index(self):
idx = period_range("2007-01", periods=10, freq="M", name="x")
result = idx[[1, 3, 5]]
exp = pd.PeriodIndex(["2007-02", "2007-04", "2007-06"], freq="M", name="x")
tm.assert_index_equal(result, exp)
result = idx[[True, True, False, False, False, True, True, False, False, False]]
exp = pd.PeriodIndex(
["2007-01", "2007-02", "2007-06", "2007-07"], freq="M", name="x"
)
tm.assert_index_equal(result, exp)
def test_getitem_partial(self):
rng = period_range("2007-01", periods=50, freq="M")
ts = Series(np.random.randn(len(rng)), rng)
with pytest.raises(KeyError, match=r"^'2006'$"):
ts["2006"]
result = ts["2008"]
assert (result.index.year == 2008).all()
result = ts["2008":"2009"]
assert len(result) == 24
result = ts["2008-1":"2009-12"]
assert len(result) == 24
result = ts["2008Q1":"2009Q4"]
assert len(result) == 24
result = ts[:"2009"]
assert len(result) == 36
result = ts["2009":]
assert len(result) == 50 - 24
exp = result
result = ts[24:]
tm.assert_series_equal(exp, result)
ts = ts[10:].append(ts[10:])
msg = "left slice bound for non-unique label: '2008'"
with pytest.raises(KeyError, match=msg):
ts[slice("2008", "2009")]
def test_getitem_datetime(self):
rng = period_range(start="2012-01-01", periods=10, freq="W-MON")
ts = Series(range(len(rng)), index=rng)
dt1 = datetime(2011, 10, 2)
dt4 = datetime(2012, 4, 20)
rs = ts[dt1:dt4]
tm.assert_series_equal(rs, ts)
def test_getitem_nat(self):
idx = pd.PeriodIndex(["2011-01", "NaT", "2011-02"], freq="M")
assert idx[0] == pd.Period("2011-01", freq="M")
assert idx[1] is pd.NaT
s = pd.Series([0, 1, 2], index=idx)
assert s[pd.NaT] == 1
s = pd.Series(idx, index=idx)
assert s[pd.Period("2011-01", freq="M")] == pd.Period("2011-01", freq="M")
assert s[pd.NaT] is pd.NaT
def test_getitem_list_periods(self):
# GH 7710
rng = period_range(start="2012-01-01", periods=10, freq="D")
ts = Series(range(len(rng)), index=rng)
exp = ts.iloc[[1]]
tm.assert_series_equal(ts[[Period("2012-01-02", freq="D")]], exp)
def test_getitem_seconds(self):
# GH#6716
didx = pd.date_range(start="2013/01/01 09:00:00", freq="S", periods=4000)
pidx = period_range(start="2013/01/01 09:00:00", freq="S", periods=4000)
for idx in [didx, pidx]:
# getitem against index should raise ValueError
values = [
"2014",
"2013/02",
"2013/01/02",
"2013/02/01 9H",
"2013/02/01 09:00",
]
for v in values:
# GH7116
# these show deprecations as we are trying
# to slice with non-integer indexers
# with pytest.raises(IndexError):
# idx[v]
continue
s = Series(np.random.rand(len(idx)), index=idx)
tm.assert_series_equal(s["2013/01/01 10:00"], s[3600:3660])
tm.assert_series_equal(s["2013/01/01 9H"], s[:3600])
for d in ["2013/01/01", "2013/01", "2013"]:
tm.assert_series_equal(s[d], s)
def test_getitem_day(self):
# GH#6716
# Confirm DatetimeIndex and PeriodIndex works identically
didx = pd.date_range(start="2013/01/01", freq="D", periods=400)
pidx = period_range(start="2013/01/01", freq="D", periods=400)
for idx in [didx, pidx]:
# getitem against index should raise ValueError
values = [
"2014",
"2013/02",
"2013/01/02",
"2013/02/01 9H",
"2013/02/01 09:00",
]
for v in values:
# GH7116
# these show deprecations as we are trying
# to slice with non-integer indexers
# with pytest.raises(IndexError):
# idx[v]
continue
s = Series(np.random.rand(len(idx)), index=idx)
tm.assert_series_equal(s["2013/01"], s[0:31])
tm.assert_series_equal(s["2013/02"], s[31:59])
tm.assert_series_equal(s["2014"], s[365:])
invalid = ["2013/02/01 9H", "2013/02/01 09:00"]
for v in invalid:
with pytest.raises(KeyError, match=v):
s[v]
class TestWhere:
@pytest.mark.parametrize("klass", [list, tuple, np.array, Series])
def test_where(self, klass):
i = period_range("20130101", periods=5, freq="D")
cond = [True] * len(i)
expected = i
result = i.where(klass(cond))
tm.assert_index_equal(result, expected)
cond = [False] + [True] * (len(i) - 1)
expected = PeriodIndex([pd.NaT] + i[1:].tolist(), freq="D")
result = i.where(klass(cond))
tm.assert_index_equal(result, expected)
def test_where_other(self):
i = period_range("20130101", periods=5, freq="D")
for arr in [np.nan, pd.NaT]:
result = i.where(notna(i), other=np.nan)
expected = i
tm.assert_index_equal(result, expected)
i2 = i.copy()
i2 = pd.PeriodIndex([pd.NaT, pd.NaT] + i[2:].tolist(), freq="D")
result = i.where(notna(i2), i2)
tm.assert_index_equal(result, i2)
i2 = i.copy()
i2 = pd.PeriodIndex([pd.NaT, pd.NaT] + i[2:].tolist(), freq="D")
result = i.where(notna(i2), i2.values)
tm.assert_index_equal(result, i2)
def test_where_invalid_dtypes(self):
pi = period_range("20130101", periods=5, freq="D")
i2 = pi.copy()
i2 = pd.PeriodIndex([pd.NaT, pd.NaT] + pi[2:].tolist(), freq="D")
with pytest.raises(TypeError, match="Where requires matching dtype"):
pi.where(notna(i2), i2.asi8)
with pytest.raises(TypeError, match="Where requires matching dtype"):
pi.where(notna(i2), i2.asi8.view("timedelta64[ns]"))
with pytest.raises(TypeError, match="Where requires matching dtype"):
pi.where(notna(i2), i2.to_timestamp("S"))
class TestTake:
def test_take(self):
# GH#10295
idx1 = pd.period_range("2011-01-01", "2011-01-31", freq="D", name="idx")
for idx in [idx1]:
result = idx.take([0])
assert result == pd.Period("2011-01-01", freq="D")
result = idx.take([5])
assert result == pd.Period("2011-01-06", freq="D")
result = idx.take([0, 1, 2])
expected = pd.period_range("2011-01-01", "2011-01-03", freq="D", name="idx")
tm.assert_index_equal(result, expected)
assert result.freq == "D"
assert result.freq == expected.freq
result = idx.take([0, 2, 4])
expected = pd.PeriodIndex(
["2011-01-01", "2011-01-03", "2011-01-05"], freq="D", name="idx"
)
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
assert result.freq == "D"
result = idx.take([7, 4, 1])
expected = pd.PeriodIndex(
["2011-01-08", "2011-01-05", "2011-01-02"], freq="D", name="idx"
)
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
assert result.freq == "D"
result = idx.take([3, 2, 5])
expected = PeriodIndex(
["2011-01-04", "2011-01-03", "2011-01-06"], freq="D", name="idx"
)
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
assert result.freq == "D"
result = idx.take([-3, 2, 5])
expected = PeriodIndex(
["2011-01-29", "2011-01-03", "2011-01-06"], freq="D", name="idx"
)
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
assert result.freq == "D"
def test_take_misc(self):
index = period_range(start="1/1/10", end="12/31/12", freq="D", name="idx")
expected = PeriodIndex(
[
datetime(2010, 1, 6),
datetime(2010, 1, 7),
datetime(2010, 1, 9),
datetime(2010, 1, 13),
],
freq="D",
name="idx",
)
taken1 = index.take([5, 6, 8, 12])
taken2 = index[[5, 6, 8, 12]]
for taken in [taken1, taken2]:
tm.assert_index_equal(taken, expected)
assert isinstance(taken, PeriodIndex)
assert taken.freq == index.freq
assert taken.name == expected.name
def test_take_fill_value(self):
# GH#12631
idx = pd.PeriodIndex(
["2011-01-01", "2011-02-01", "2011-03-01"], name="xxx", freq="D"
)
result = idx.take(np.array([1, 0, -1]))
expected = pd.PeriodIndex(
["2011-02-01", "2011-01-01", "2011-03-01"], name="xxx", freq="D"
)
tm.assert_index_equal(result, expected)
# fill_value
result = idx.take(np.array([1, 0, -1]), fill_value=True)
expected = pd.PeriodIndex(
["2011-02-01", "2011-01-01", "NaT"], name="xxx", freq="D"
)
tm.assert_index_equal(result, expected)
# allow_fill=False
result = idx.take(np.array([1, 0, -1]), allow_fill=False, fill_value=True)
expected = pd.PeriodIndex(
["2011-02-01", "2011-01-01", "2011-03-01"], name="xxx", freq="D"
)
tm.assert_index_equal(result, expected)
msg = (
"When allow_fill=True and fill_value is not None, "
"all indices must be >= -1"
)
with pytest.raises(ValueError, match=msg):
idx.take(np.array([1, 0, -2]), fill_value=True)
with pytest.raises(ValueError, match=msg):
idx.take(np.array([1, 0, -5]), fill_value=True)
msg = "index -5 is out of bounds for( axis 0 with)? size 3"
with pytest.raises(IndexError, match=msg):
idx.take(np.array([1, -5]))
class TestIndexing:
def test_get_loc_msg(self):
idx = period_range("2000-1-1", freq="A", periods=10)
bad_period = Period("2012", "A")
with pytest.raises(KeyError, match=r"^Period\('2012', 'A-DEC'\)$"):
idx.get_loc(bad_period)
try:
idx.get_loc(bad_period)
except KeyError as inst:
assert inst.args[0] == bad_period
def test_get_loc_nat(self):
didx = DatetimeIndex(["2011-01-01", "NaT", "2011-01-03"])
pidx = PeriodIndex(["2011-01-01", "NaT", "2011-01-03"], freq="M")
# check DatetimeIndex compat
for idx in [didx, pidx]:
assert idx.get_loc(pd.NaT) == 1
assert idx.get_loc(None) == 1
assert idx.get_loc(float("nan")) == 1
assert idx.get_loc(np.nan) == 1
def test_get_loc(self):
# GH 17717
p0 = pd.Period("2017-09-01")
p1 = pd.Period("2017-09-02")
p2 = pd.Period("2017-09-03")
# get the location of p1/p2 from
# monotonic increasing PeriodIndex with non-duplicate
idx0 = pd.PeriodIndex([p0, p1, p2])
expected_idx1_p1 = 1
expected_idx1_p2 = 2
assert idx0.get_loc(p1) == expected_idx1_p1
assert idx0.get_loc(str(p1)) == expected_idx1_p1
assert idx0.get_loc(p2) == expected_idx1_p2
assert idx0.get_loc(str(p2)) == expected_idx1_p2
msg = "Cannot interpret 'foo' as period"
with pytest.raises(KeyError, match=msg):
idx0.get_loc("foo")
with pytest.raises(KeyError, match=r"^1\.1$"):
idx0.get_loc(1.1)
msg = (
r"'PeriodIndex\(\['2017-09-01', '2017-09-02', '2017-09-03'\],"
r" dtype='period\[D\]', freq='D'\)' is an invalid key"
)
with pytest.raises(TypeError, match=msg):
idx0.get_loc(idx0)
# get the location of p1/p2 from
# monotonic increasing PeriodIndex with duplicate
idx1 = pd.PeriodIndex([p1, p1, p2])
expected_idx1_p1 = slice(0, 2)
expected_idx1_p2 = 2
assert idx1.get_loc(p1) == expected_idx1_p1
assert idx1.get_loc(str(p1)) == expected_idx1_p1
assert idx1.get_loc(p2) == expected_idx1_p2
assert idx1.get_loc(str(p2)) == expected_idx1_p2
msg = "Cannot interpret 'foo' as period"
with pytest.raises(KeyError, match=msg):
idx1.get_loc("foo")
with pytest.raises(KeyError, match=r"^1\.1$"):
idx1.get_loc(1.1)
msg = (
r"'PeriodIndex\(\['2017-09-02', '2017-09-02', '2017-09-03'\],"
r" dtype='period\[D\]', freq='D'\)' is an invalid key"
)
with pytest.raises(TypeError, match=msg):
idx1.get_loc(idx1)
# get the location of p1/p2 from
# non-monotonic increasing/decreasing PeriodIndex with duplicate
idx2 = pd.PeriodIndex([p2, p1, p2])
expected_idx2_p1 = 1
expected_idx2_p2 = np.array([True, False, True])
assert idx2.get_loc(p1) == expected_idx2_p1
assert idx2.get_loc(str(p1)) == expected_idx2_p1
tm.assert_numpy_array_equal(idx2.get_loc(p2), expected_idx2_p2)
tm.assert_numpy_array_equal(idx2.get_loc(str(p2)), expected_idx2_p2)
def test_is_monotonic_increasing(self):
# GH 17717
p0 = pd.Period("2017-09-01")
p1 = pd.Period("2017-09-02")
p2 = pd.Period("2017-09-03")
idx_inc0 = pd.PeriodIndex([p0, p1, p2])
idx_inc1 = pd.PeriodIndex([p0, p1, p1])
idx_dec0 = pd.PeriodIndex([p2, p1, p0])
idx_dec1 = pd.PeriodIndex([p2, p1, p1])
idx = pd.PeriodIndex([p1, p2, p0])
assert idx_inc0.is_monotonic_increasing is True
assert idx_inc1.is_monotonic_increasing is True
assert idx_dec0.is_monotonic_increasing is False
assert idx_dec1.is_monotonic_increasing is False
assert idx.is_monotonic_increasing is False
def test_is_monotonic_decreasing(self):
# GH 17717
p0 = pd.Period("2017-09-01")
p1 = pd.Period("2017-09-02")
p2 = pd.Period("2017-09-03")
idx_inc0 = pd.PeriodIndex([p0, p1, p2])
idx_inc1 = pd.PeriodIndex([p0, p1, p1])
idx_dec0 = pd.PeriodIndex([p2, p1, p0])
idx_dec1 = pd.PeriodIndex([p2, p1, p1])
idx = pd.PeriodIndex([p1, p2, p0])
assert idx_inc0.is_monotonic_decreasing is False
assert idx_inc1.is_monotonic_decreasing is False
assert idx_dec0.is_monotonic_decreasing is True
assert idx_dec1.is_monotonic_decreasing is True
assert idx.is_monotonic_decreasing is False
def test_contains(self):
# GH 17717
p0 = pd.Period("2017-09-01")
p1 = pd.Period("2017-09-02")
p2 = pd.Period("2017-09-03")
p3 = pd.Period("2017-09-04")
ps0 = [p0, p1, p2]
idx0 = pd.PeriodIndex(ps0)
for p in ps0:
assert p in idx0
assert str(p) in idx0
assert "2017-09-01 00:00:01" in idx0
assert "2017-09" in idx0
assert p3 not in idx0
def test_get_value(self):
# GH 17717
p0 = pd.Period("2017-09-01")
p1 = pd.Period("2017-09-02")
p2 = pd.Period("2017-09-03")
idx0 = pd.PeriodIndex([p0, p1, p2])
input0 = np.array([1, 2, 3])
expected0 = 2
result0 = idx0.get_value(input0, p1)
assert result0 == expected0
idx1 = pd.PeriodIndex([p1, p1, p2])
input1 = np.array([1, 2, 3])
expected1 = np.array([1, 2])
result1 = idx1.get_value(input1, p1)
tm.assert_numpy_array_equal(result1, expected1)
idx2 = pd.PeriodIndex([p1, p2, p1])
input2 = np.array([1, 2, 3])
expected2 = np.array([1, 3])
result2 = idx2.get_value(input2, p1)
tm.assert_numpy_array_equal(result2, expected2)
def test_get_indexer(self):
# GH 17717
p1 = pd.Period("2017-09-01")
p2 = pd.Period("2017-09-04")
p3 = pd.Period("2017-09-07")
tp0 = pd.Period("2017-08-31")
tp1 = pd.Period("2017-09-02")
tp2 = pd.Period("2017-09-05")
tp3 = pd.Period("2017-09-09")
idx = pd.PeriodIndex([p1, p2, p3])
tm.assert_numpy_array_equal(
idx.get_indexer(idx), np.array([0, 1, 2], dtype=np.intp)
)
target = pd.PeriodIndex([tp0, tp1, tp2, tp3])
tm.assert_numpy_array_equal(
idx.get_indexer(target, "pad"), np.array([-1, 0, 1, 2], dtype=np.intp)
)
tm.assert_numpy_array_equal(
idx.get_indexer(target, "backfill"), np.array([0, 1, 2, -1], dtype=np.intp)
)
tm.assert_numpy_array_equal(
idx.get_indexer(target, "nearest"), np.array([0, 0, 1, 2], dtype=np.intp)
)
res = idx.get_indexer(target, "nearest", tolerance=pd.Timedelta("1 day"))
tm.assert_numpy_array_equal(res, np.array([0, 0, 1, -1], dtype=np.intp))
def test_get_indexer_mismatched_dtype(self):
# Check that we return all -1s and do not raise or cast incorrectly
dti = pd.date_range("2016-01-01", periods=3)
pi = dti.to_period("D")
pi2 = dti.to_period("W")
expected = np.array([-1, -1, -1], dtype=np.intp)
result = pi.get_indexer(dti)
tm.assert_numpy_array_equal(result, expected)
# This should work in both directions
result = dti.get_indexer(pi)
tm.assert_numpy_array_equal(result, expected)
result = pi.get_indexer(pi2)
tm.assert_numpy_array_equal(result, expected)
# We expect the same from get_indexer_non_unique
result = pi.get_indexer_non_unique(dti)[0]
tm.assert_numpy_array_equal(result, expected)
result = dti.get_indexer_non_unique(pi)[0]
tm.assert_numpy_array_equal(result, expected)
result = pi.get_indexer_non_unique(pi2)[0]
tm.assert_numpy_array_equal(result, expected)
def test_get_indexer_non_unique(self):
# GH 17717
p1 = pd.Period("2017-09-02")
p2 = pd.Period("2017-09-03")
p3 = pd.Period("2017-09-04")
p4 = pd.Period("2017-09-05")
idx1 = pd.PeriodIndex([p1, p2, p1])
idx2 = pd.PeriodIndex([p2, p1, p3, p4])
result = idx1.get_indexer_non_unique(idx2)
expected_indexer = np.array([1, 0, 2, -1, -1], dtype=np.intp)
expected_missing = np.array([2, 3], dtype=np.int64)
| tm.assert_numpy_array_equal(result[0], expected_indexer) | pandas._testing.assert_numpy_array_equal |
import unittest
import dolphindb as ddb
import pandas as pd
import numpy as np
from numpy.testing import assert_array_equal
from pandas.testing import assert_frame_equal, assert_series_equal
from setup import HOST, PORT, WORK_DIR, DATA_DIR
from datetime import datetime
class TestUploadObject(unittest.TestCase):
@classmethod
def setUp(cls):
cls.s = ddb.session()
cls.s.connect(HOST, PORT, "admin", "123456")
@classmethod
def tearDownClass(cls):
pass
def test_upload_int_scalar(self):
a = 1
self.s.upload({"a": a})
self.assertEqual(self.s.run("eqObj(a, 1)"), True)
re = self.s.run("a")
self.assertEqual(re, 1)
def test_upload_bool_scalar(self):
a = True
self.s.upload({"a": a})
self.assertEqual(self.s.run("eqObj(a, true)"), True)
re = self.s.run("a")
self.assertEqual(re, True)
def test_upload_float_scalar(self):
a = 5.5
self.s.upload({"a": a})
self.assertEqual(self.s.run("eqObj(a, 5.5, 1)"), True)
re = self.s.run("a")
self.assertEqual(re, 5.5)
def test_upload_complex_scalar(self):
pass
def test_upload_string_scalar(self):
a = 'Runoob'
self.s.upload({'a': a})
self.assertEqual(self.s.run("eqObj(a, 'Runoob')"), True)
re = self.s.run("a")
self.assertEqual(re, 'Runoob')
def test_upload_mix_list(self):
list = ['abcd', 786, 2.23, 'runoob', 70.2]
self.s.upload({'list': list})
self.assertEqual(self.s.run("eqObj(list, ['abcd', 786, 2.23, 'runoob', 70.2])"), True)
re = self.s.run("list")
self.assertEqual(re == list, True)
def test_upload_int_list(self):
a = [4, 5, 7, -3]
self.s.upload({"a": a})
self.assertEqual(self.s.run("eqObj(a, [4, 5, 7, -3])"), True)
re = self.s.run("a")
self.assertEqual((re == a).all(), True)
def test_upload_string_list(self):
a = ['aaa', 'bbb', 'ccc']
self.s.upload({"a": a})
self.assertEqual(self.s.run("eqObj(a, ['aaa', 'bbb', 'ccc'])"), True)
re = self.s.run("a")
self.assertEqual((re == a).all(), True)
def test_upload_bool_list(self):
a = [True, False, False, True]
self.s.upload({"a": a})
self.assertEqual(self.s.run("eqObj(a, [true, false, false, true])"), True)
re = self.s.run("a")
self.assertEqual((re == a).all(), True)
def test_upload_list_list(self):
a = [[1, 2, 3], [4, 5, 6]]
self.s.upload({"a": a})
self.assertEqual(self.s.run("eqObj(a[0], [1, 2, 3])"), True)
self.assertEqual(self.s.run("eqObj(a[1], [4, 5, 6])"), True)
re = self.s.run("a")
assert_array_equal(re[0], np.array([1, 2, 3]))
assert_array_equal(re[1], np.array([4, 5, 6]))
def test_upload_list_list(self):
a = [[1, 2, 3], [4, 5, 6]]
self.s.upload({"a": a})
self.assertEqual(self.s.run("eqObj(a[0], [1, 2, 3])"), True)
self.assertEqual(self.s.run("eqObj(a[1], [4, 5, 6])"), True)
re = self.s.run("a")
assert_array_equal(re, [[1, 2, 3], [4, 5, 6]])
def test_upload_tuple(self):
tuple = ('abcd', 786 , 2.23, 'runoob', 70.2)
self.s.upload({"tuple": tuple})
self.assertEqual(self.s.run("eqObj(tuple, ['abcd', 786, 2.23, 'runoob', 70.2])"), True)
re = self.s.run("tuple")
self.assertEqual(re, ['abcd', 786, 2.23, 'runoob', 70.2])
def test_upload_set(self):
a = set('abracadabra')
self.s.upload({'a': a})
self.assertEqual(self.s.run("eqObj(sort(a.keys()), `a`b`c`d`r)"), True)
re = self.s.run("a")
self.assertSetEqual(re, a)
def test_upload_pandas_series_without_index(self):
a = pd.Series([4, 7, -5, 3])
self.s.upload({"a": a})
self.assertEqual(self.s.run("eqObj(a, [4,7,-5,3])"), True)
re = self.s.run("a")
assert_array_equal(re, [4, 7, -5, 3])
def test_upload_pandas_series_dtype_object(self):
a = pd.Series(['a', 'b', 'c', 'd'], dtype = "object")
self.s.upload({"a": a})
self.assertEqual(self.s.run("eqObj(a, `a`b`c`d)"), True)
re = self.s.run("a")
assert_array_equal(re, ['a', 'b', 'c', 'd'])
def test_upload_pandas_series_dtype_int32(self):
a = pd.Series([1, 2, 3], dtype="int32")
self.s.upload({"a": a})
self.assertEqual(self.s.run("eqObj(a, [1, 2, 3])"), True)
re = self.s.run("a")
assert_array_equal(re, [1, 2, 3])
def test_upload_pandas_series_dtype_int64(self):
a = pd.Series([1, 2, 3], dtype="int64")
self.s.upload({"a": a})
self.assertEqual(self.s.run("eqObj(a, [1, 2, 3])"), True)
re = self.s.run("a")
assert_array_equal(re, [1, 2, 3])
def test_upload_pandas_series_dtype_float32(self):
a = pd.Series([1, 2, np.nan], dtype="float32")
self.s.upload({"a": a})
self.assertEqual(self.s.run("eqObj(a, [1.0, 2.0, NULL])"), True)
re = self.s.run("a")
assert_array_equal(re, [1, 2, np.nan])
def test_upload_pandas_series_dtype_float64(self):
a = pd.Series([1, 2, np.nan], dtype="float64")
self.s.upload({"a": a})
self.assertEqual(self.s.run("eqObj(a, [1.0, 2.0, NULL])"), True)
re = self.s.run("a")
assert_array_equal(re, [1, 2, np.nan])
def test_upload_pandas_series_dtype_datetime64(self):
a = pd.Series(['2018-07-01', '2019-07-01', '2019-10-01'], dtype="datetime64[ns]")
self.s.upload({"a": a})
self.assertEqual(self.s.run("eqObj(a, [2018.07.01T00:00:00.000000000, 2019.07.01T00:00:00.000000000, 2019.10.01T00:00:00.000000000])"), True)
re = self.s.run("a")
assert_array_equal(re, np.array(['2018-07-01T00:00:00.000000000','2019-07-01T00:00:00.000000000','2019-10-01T00:00:00.000000000'], dtype="datetime64[ns]"))
def test_upload_pandas_series_with_index(self):
a = pd.Series([4, 7, -5, 3], index=['a', 'b', 'c', 'd'])
self.s.upload({"a": a})
self.assertEqual(self.s.run("eqObj(a, [4,7,-5,3])"), True)
re = self.s.run("a")
assert_array_equal(re, [4, 7, -5, 3]) # index aborted
def test_upload_nan(self):
a = np.nan
self.s.upload({"a": a})
self.assertEqual(self.s.run("eqObj(a, int())"), True)
re = self.s.run("a")
self.assertEqual(pd.isnull(re), True)
def test_upload_array_with_nan(self):
a = [np.nan, 1, 2, 3]
self.s.upload({'a': a})
self.assertEqual(self.s.run("eqObj(a, [,1,2,3])"), True)
re = self.s.run("a")
assert_array_equal(re, [np.nan, 1, 2, 3])
def test_upload_dataframe(self):
data = {'state': ['Ohio', 'Ohio', 'Ohio', 'Nevada', 'Nevada'],
'year': [2000, 2001, 2002, 2001, 2002],
'pop': [1.5, 1.7, 3.6, 2.4, 2.9]}
df = pd.DataFrame(data)
self.s.upload({"t1": df})
self.assertEqual(self.s.run("all(each(eqObj,t1.values(),"
"table(['Ohio', 'Ohio', 'Ohio', 'Nevada', 'Nevada'] as state, "
"[2000, 2001, 2002, 2001, 2002] as year, "
"[1.5, 1.7, 3.6, 2.4, 2.9] as pop).values()))"), True)
re = self.s.run("t1")
assert_frame_equal(re, df)
def test_upload_dict(self):
data = {'state': ['Ohio', 'Ohio', 'Ohio', 'Nevada', 'Nevada'],
'year': [2000, 2001, 2002, 2001, 2002],
'pop': [5, 7, 6, 4, 9]}
self.s.upload({"d": data})
self.assertEqual(self.s.run("eqObj(d[`state].sort(), `Nevada`Nevada`Ohio`Ohio`Ohio)"), True)
self.assertEqual(self.s.run("eqObj(d[`year].sort(), [2000, 2001, 2001, 2002, 2002])"), True)
self.assertEqual(self.s.run("eqObj(d[`pop].sort(), [4, 5, 6, 7, 9])"), True)
re = self.s.run("d")
self.assertEqual((data['state'] == re['state']).all(), True)
self.assertEqual((data['year'] == re['year']).all(), True)
self.assertEqual((data['pop'] == re['pop']).all(), True)
def test_upload_numpy_one_dimension_array(self):
a = np.array(range(10))
self.s.upload({"a": a})
self.assertEqual(self.s.run("eqObj(a, 0..9)"), True)
re =self.s.run("a")
assert_array_equal(re, [0,1,2,3,4,5,6,7,8,9])
def test_upload_numpy_two_dimension_array(self):
a = np.array([[1, 2, 3], [4, 5, 6]])
self.s.upload({"a": a})
self.assertEqual(self.s.run("eqObj(a, 1 4 2 5 3 6$2:3)"), True)
re = self.s.run("a")
# TODO:BUG
# assert_array_equal(re, a)
assert_array_equal(re[0], a)
def test_upload_matrix(self):
a = self.s.run("cross(+, 1..5, 1..5)")
b = self.s.run("1..25$5:5")
self.s.upload({'a': a[0], 'b': b[0]})
self.assertEqual(self.s.run("eqObj(a, cross(+, 1..5, 1..5))"), True)
self.assertEqual(self.s.run("eqObj(b, 1..25$5:5)"), True)
re = self.s.run('a+b')
self.assertEqual((re[0][0] == [3, 9, 15, 21, 27]).all(), True)
self.assertEqual((re[0][1] == [5, 11, 17, 23, 29]).all(), True)
self.assertEqual((re[0][2] == [7, 13, 19, 25, 31]).all(), True)
self.assertEqual((re[0][3] == [9, 15, 21, 27, 33]).all(), True)
self.assertEqual((re[0][4] == [11, 17, 23, 29, 35]).all(), True)
def test_upload_numpy_eye_matrix(self):
a = np.eye(4)
self.s.upload({"a": a})
self.assertEqual(self.s.run("eqObj(a, [1,0,0,0,0,1,0,0,0,0,1,0,0,0,0,1]*1.0$4:4)"), True)
re = self.s.run("a")
assert_array_equal(re[0], [[1., 0., 0., 0.],[0., 1., 0., 0.],[0., 0., 1., 0.],[0., 0., 0., 1.]])
def test_upload_numpy_matrix(self):
a = np.matrix('1 2; 3 4')
self.s.upload({"a": a})
self.assertEqual(self.s.run("eqObj(a, [1,3,2,4]$2:2)"), True)
re = self.s.run("a")
assert_array_equal(re[0], [[1, 2],[3, 4]])
def test_upload_float32_dataframe(self):
pdf = pd.DataFrame({'tfloat': np.arange(1, 10, 1, dtype='float32')})
pdf.loc[1,:]=np.nan
self.s.upload({'t':pdf})
re=self.s.run("t")
assert_frame_equal(pdf, re, check_dtype=False)
def test_upload_numpy_scalar_dtype_datetime64_day(self):
a = np.datetime64('2012-06-08', 'D')
self.s.upload({'a': a})
self.assertTrue(self.s.run("eqObj(a, 2012.06.08)"))
re = self.s.run('a')
self.assertEqual(a, re)
# TODO:
# a = np.datetime64('NaT', 'D')
# self.s.upload({'a': a})
# self.assertTrue(self.s.run("eqObj(a, date())"))
# re = self.s.run('a')
# self.assertEqual(a, re)
def test_upload_numpy_scalar_dtype_datetime64_month(self):
a = np.datetime64('2012-06', 'M')
self.s.upload({'a': a})
self.assertTrue(self.s.run("eqObj(a, 2012.06M)"))
re = self.s.run('a')
self.assertEqual(a, re)
# TODO:
# a = np.datetime64('NaT', 'M')
# self.s.upload({'a': a})
# self.assertTrue(self.s.run("eqObj(a, month())"))
# re = self.s.run('a')
# self.assertEqual(a, re)
def test_upload_numpy_scalar_dtype_year(self):
pass
# a = np.datetime64('2012', 'Y')
# self.s.upload({'a': a})
def test_upload_numpy_scalar_dtype_datetime64_minute(self):
a = np.datetime64('2005-02-25T03:30', 'm')
self.s.upload({'a': a})
re = self.s.run('a')
self.assertEqual(a, re)
# TODO:
# a = np.datetime64('NaT', 'm')
# self.s.upload({'a': a})
# re = self.s.run('a')
# self.assertEqual(a, re)
def test_upload_numpy_scalar_dtype_datetime64_second(self):
a = np.datetime64('2005-02-25T03:30:25', 's')
self.s.upload({'a': a})
self.assertTrue(self.s.run("eqObj(a, 2005.02.25T03:30:25)"))
re = self.s.run('a')
self.assertEqual(a, re)
# TODO:
# a = np.datetime64('NaT', 's')
# self.s.upload({'a': a})
# re = self.s.run('a')
# self.assertEqual(a, re)
def test_upload_numpy_scalar_dtype_datetime64_millisecond(self):
a = np.datetime64('2005-02-25T03:30:25.008', 'ms')
self.s.upload({'a': a})
# self.assertTrue(self.s.run("eqObj(a, 2005.02.05T03:30:25.008)"))
re = self.s.run('a')
self.assertEqual(re, a)
# TODO:
# a = np.datetime64('NaT', 'ms')
# self.s.upload({'a': a})
# self.assertTrue(self.s.run("eqObj(a, timestamp())"))
# re = self.s.run('a')
# self.assertEqual(a, re)
def test_upload_numpy_scalar_dtype_datetime64_nanosecond(self):
a = np.datetime64('2005-02-25T03:30:25.008007006', 'ns')
self.s.upload({'a': a})
self.assertTrue(self.s.run("eqObj(a, 2005.02.25T03:30:25.008007006)"))
re = self.s.run('a')
self.assertEqual(re, a)
# TODO:
# a = np.datetime64('NaT', 'ns')
# self.s.upload({'a': a})
# self.assertTrue(self.s.run("eqObj(a, nanotimestamp())"))
# re = self.s.run('a')
# self.assertEqual(a, re)
def test_upload_numpy_array_dtype_datetime64_D(self):
a = np.array(['2012-06-12', '1968-12-05', '2003-09-28'], dtype='datetime64[D]')
self.s.upload({'aa': a})
self.assertTrue(self.s.run("eqObj(aa, [2012.06.12, 1968.12.05, 2003.09.28])"))
re = self.s.run("aa")
assert_array_equal(a, re)
def test_upload_dataframe_np_datetime64(self):
df = pd.DataFrame({'col1': np.array(['2012-06', '2012-07', '', '2024-12'], dtype = 'datetime64[M]'),
'col2': np.array(['2012-06-01', '', '2012-07-05', '2013-09-08'], dtype = 'datetime64[D]'),
'col3': np.array(['2012-06-01T12:30:00', '2012-06-01T12:30:01', '', ''], dtype = 'datetime64'),
'col4': np.array(['2012-06-08T12:30:00.000','','','2012-06-08T12:30:00.001'], dtype='datetime64'),
'col5': np.array(['2012-06-08T12:30:00.000001', '', '2012-06-08T12:30:00.000002', ''], dtype = 'datetime64')})
self.s.upload({'t': df})
script = '''
expected = table(nanotimestamp([2012.06.01, 2012.07.01, NULL, 2024.12.01]) as col1, nanotimestamp([2012.06.01, NULL, 2012.07.05, 2013.09.08]) as col2, nanotimestamp([2012.06.01T12:30:00, 2012.06.01T12:30:01, NULL, NULL]) as col3, nanotimestamp([2012.06.08T12:30:00.000, NULL, NULL, 2012.06.08T12:30:00.001]) as col4, [2012.06.08T12:30:00.000001000, NULL, 2012.06.08T12:30:00.000002000, NULL] as col5)
loop(eqObj, expected.values(), t.values())
'''
re = self.s.run(script)
assert_array_equal(re, [True, True, True, True, True])
def test_upload_dataframe_chinese_column_name(self):
df = pd.DataFrame({'编号':[1, 2, 3, 4, 5], '序号':['壹','贰','叁','肆','伍']})
self.s.upload({'t': df})
re = self.s.run("select * from t")
assert_array_equal(re['编号'], [1, 2, 3, 4, 5])
assert_array_equal(re['序号'], ['壹','贰','叁','肆','伍'])
def test_upload_dataframe_chinese_column_name(self):
df = pd.DataFrame({'编号':[1, 2, 3, 4, 5], '序号':['壹','贰','叁','肆','伍']})
self.s.upload({'t': df})
re = self.s.run("select * from t")
assert_array_equal(re['编号'], [1, 2, 3, 4, 5])
assert_array_equal(re['序号'], ['壹','贰','叁','肆','伍'])
def test_upload_numpy_scalar_dtype_datetime64_h(self):
a =np.datetime64("2020-01-01T01",'h')
self.s.upload({'a': a})
self.assertTrue(self.s.run("eqObj(a, datehour(2020.01.01T01:00:00))"))
re = self.s.run('a')
self.assertEqual(a, re)
# a = np.datetime64('NaT', 'h')
# self.s.upload({'a': a})
# self.assertTrue(self.s.run("eqObj(a, datehour())"))
# re = self.s.run('a')
# self.assertEqual(a, re)
def test_upload_numpy_array_dtype_datetime64_h(self):
a = np.array(['2012-06-12T01', '1968-12-05T01', '2003-09-28T01'], dtype='datetime64[h]')
self.s.upload({'a': a})
self.assertTrue(self.s.run("eqObj(a, datehour([2012.06.12T01:00:00,1968.12.05T01:00:00,2003.09.28T01:00:00]))"))
re = self.s.run('a')
assert_array_equal(a, re)
b = np.repeat(np.datetime64("2020-01-01T01",'h'),500000)
self.s.upload({'b': b})
self.assertTrue(self.s.run("eqObj(b, take(datehour(2020.01.01T01:00:00),500000))"))
re = self.s.run('b')
assert_array_equal(b, re)
c = np.repeat(np.datetime64("Nat",'h'),500000)
self.s.upload({'c': c})
self.assertTrue(self.s.run("eqObj(c, take(datehour(),500000))"))
re = self.s.run('c')
assert_array_equal(c,re)
def test_upload_dict_twice(self):
data = {'id': [1, 2, 2, 3],
'date': np.array(['2019-02-04', '2019-02-05', '2019-02-09', '2019-02-13'], dtype='datetime64[D]'),
'ticker': ['AAPL', 'AMZN', 'AMZN', 'A'],
'price': [22.2, 3.5, 21.4, 26.5]}
self.s.upload({"t1": data})
self.s.upload({"t1": data})
re=self.s.run("t1")
assert_array_equal(data['id'], re['id'])
assert_array_equal(data['date'], re['date'])
assert_array_equal(data['ticker'], re['ticker'])
assert_array_equal(data['price'], re['price'])
def test_upload_dict_repeatedly(self):
data = {'id': [1, 2, 2, 3],
'date': np.array(['2019-02-04', '2019-02-05', '2019-02-09', '2019-02-13'], dtype='datetime64[D]'),
'ticker': ['AAPL', 'AMZN', 'AMZN', 'A'],
'price': [22.2, 3.5, 21.4, 26.5]}
for i in range(1,100): {
self.s.upload({"t1": data})
}
re=self.s.run("t1")
assert_array_equal(data['id'], re['id'])
assert_array_equal(data['date'], re['date'])
assert_array_equal(data['ticker'], re['ticker'])
assert_array_equal(data['price'], re['price'])
def test_upload_list_twice(self):
data = [1,2,3]
self.s.upload({"t1": data})
self.s.upload({"t1": data})
re=self.s.run("t1")
assert_array_equal(data, re)
def test_upload_list_repeatedly(self):
data = [1,2,3]
for i in range(1,100): {
self.s.upload({"t1": data})
}
re=self.s.run("t1")
assert_array_equal(data, re)
def test_upload_array_twice(self):
data = np.array([1,2,3.0],dtype=np.double)
self.s.upload({'arr':data})
self.s.upload({'arr':data})
re = self.s.run("arr")
assert_array_equal(data, re)
def test_upload_array_repeatedly(self):
data = np.array([1,2,3.0],dtype=np.double)
for i in range(1,100):{
self.s.upload({'arr':data})
}
re = self.s.run("arr")
assert_array_equal(data, re)
def test_upload_DataFrame_twice(self):
df = pd.DataFrame({'id': np.int32([1, 2, 3, 6, 8]), 'x': np.int32([5, 4, 3, 2, 1])})
self.s.upload({'t1': df})
self.s.upload({'t1': df})
re = self.s.run("t1.x.avg()")
assert_array_equal(3.0, re)
def test_upload_DataFrame_repeatedly(self):
df = pd.DataFrame({'id': np.int32([1, 2, 3, 6, 8]), 'x': np.int32([5, 4, 3, 2, 1])})
for i in range(1,1000):{
self.s.upload({'t1': df})
}
re = self.s.run("t1.x.avg()")
assert_array_equal(3.0, re)
def test_upload_paramete(self):
df = pd.DataFrame({'id': np.int32([1, 2, 3, 6, 8]), 'x': np.int32([5, 4, 3, 2, 1])})
with self.assertRaises(TypeError):
self.s.upload(nameObjectDict_ERROR={'t1': df})
self.s.upload(nameObjectDict={'t1': df})
def test_upload_table_and_update(self):
tb=pd.DataFrame({'id': [1, 2, 2, 3],
'ticker': ['AAPL', 'AMZN', 'AMZN', 'A'],
'price': [22, 3.5, 21, 26]})
memtab="test_upload"
tt = self.s.table(data=tb.to_dict(), tableAliasName=memtab)
self.s.run("update " + memtab + " set wd_time=price")
cols = self.s.run("test_upload.colNames()")
self.assertEqual(len(cols), 4)
def test_upload_DataFrame_None(self):
df = | pd.DataFrame({'organization_code': [None, None,None]}) | pandas.DataFrame |
"""
Open AI Gym LunarLander-v2
<NAME>
2021
"""
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
from stable_baselines3.common.callbacks import BaseCallback
from tqdm import tqdm
from os import listdir
from tensorflow.python.summary.summary_iterator import summary_iterator
class LogStepsCallback(BaseCallback):
def __init__(self, log_dir, verbose=0):
self.log_dir = log_dir
super(LogStepsCallback, self).__init__(verbose)
def _on_training_start(self) -> None:
self.results = pd.DataFrame(columns=['Reward', 'Done'])
print("Τraining starts!")
def _on_step(self) -> bool:
if 'reward' in self.locals:
keys = ['reward', 'done']
else:
keys = ['rewards', 'dones']
self.results.loc[len(self.results)] = [self.locals[keys[0]][0], self.locals[keys[1]][0]]
return True
def _on_training_end(self) -> None:
self.results.to_csv(self.log_dir + 'training_data.csv', index=False)
print("Τraining ends!")
class TqdmCallback(BaseCallback):
def __init__(self):
super().__init__()
self.progress_bar = None
def _on_training_start(self):
self.progress_bar = tqdm(total=self.locals['total_timesteps'])
def _on_step(self):
self.progress_bar.update(1)
return True
def _on_training_end(self):
self.progress_bar.close()
self.progress_bar = None
def save_dict_to_file(dict, path, txt_name='hyperparameter_dict'):
f = open(path + '/' + txt_name + '.txt', 'w')
f.write(str(dict))
f.close()
def calc_episode_rewards(training_data):
# Calculate the rewards for each training episode
episode_rewards = []
temp_reward_sum = 0
for step in range(training_data.shape[0]):
reward, done = training_data.iloc[step, :]
temp_reward_sum += reward
if done:
episode_rewards.append(temp_reward_sum)
temp_reward_sum = 0
result = | pd.DataFrame(columns=['Reward']) | pandas.DataFrame |
import pandas as pd
import numpy as np
import math
from statistics import mean
from sklearn.decomposition import PCA
from sklearn.metrics import r2_score, mean_absolute_error, mean_squared_error, mean_squared_log_error
import os
root_path = os.path.dirname(os.path.abspath('__file__'))
# root_path = os.path.abspath(os.path.join(root_path,os.path.pardir))
from metrics_ import PPTS,mean_absolute_percentage_error
def read_two_stage(station,decomposer,predict_pattern,wavelet_level="db10-2"):
if decomposer=="dwt":
model_path = root_path+"\\"+station+"_"+decomposer+"\\projects\\esvr\\"+wavelet_level+"\\"+predict_pattern+"\\"
else:
model_path = root_path+"\\"+station+"_"+decomposer+"\\projects\\esvr\\"+predict_pattern+"\\"
predictions = pd.DataFrame()
time_cost=[]
for j in range(1,11):
model_name = station+"_"+decomposer+"_esvr_"+predict_pattern+"_seed"+str(j)+".csv"
data = pd.read_csv(model_path+model_name)
if j==1:
records = data['test_y'][0:120]
test_pred=data['test_pred'][0:120]
time_cost.append(data['time_cost'][0])
test_pred=test_pred.reset_index(drop=True)
predictions = pd.concat([predictions,test_pred],axis=1)
predictions = predictions.mean(axis=1)
records = records.values.flatten()
predictions = predictions.values.flatten()
r2=r2_score(y_true=records,y_pred=predictions)
nrmse=math.sqrt(mean_squared_error(y_true=records,y_pred=predictions))/(sum(records)/len(predictions))
mae=mean_absolute_error(y_true=records,y_pred=predictions)
mape=mean_absolute_percentage_error(y_true=records,y_pred=predictions)
ppts=PPTS(y_true=records,y_pred=predictions,gamma=5)
time_cost=mean(time_cost)
return records,predictions,r2,nrmse,mae,mape,ppts,time_cost
def read_two_stage_traindev_test(station,decomposer,predict_pattern,wavelet_level="db10-2"):
if decomposer=="dwt":
model_path = root_path+"\\"+station+"_"+decomposer+"\\projects\\esvr\\"+wavelet_level+"\\"+predict_pattern+"\\"
else:
model_path = root_path+"\\"+station+"_"+decomposer+"\\projects\\esvr\\"+predict_pattern+"\\"
test_predss = pd.DataFrame()
dev_predss = pd.DataFrame()
time_cost=[]
for j in range(1,11):
model_name = station+"_"+decomposer+"_esvr_"+predict_pattern+"_seed"+str(j)+".csv"
data = pd.read_csv(model_path+model_name)
if j==1:
test_y = data['test_y'][0:120]
dev_y = data['dev_y'][0:120]
dev_pred=data['dev_pred'][0:120]
test_pred=data['test_pred'][0:120]
time_cost.append(data['time_cost'][0])
dev_pred=dev_pred.reset_index(drop=True)
test_pred=test_pred.reset_index(drop=True)
test_predss = pd.concat([test_predss,test_pred],axis=1)
dev_predss = pd.concat([dev_predss,dev_pred],axis=1)
test_predss = test_predss.mean(axis=1)
dev_predss = dev_predss.mean(axis=1)
test_y = test_y.values.flatten()
dev_y = dev_y.values.flatten()
test_predss = test_predss.values.flatten()
dev_predss = dev_predss.values.flatten()
test_nse=r2_score(y_true=test_y,y_pred=test_predss)
test_nrmse=math.sqrt(mean_squared_error(y_true=test_y,y_pred=test_predss))/(sum(test_y)/len(test_predss))
test_mae=mean_absolute_error(y_true=test_y,y_pred=test_predss)
test_mape=mean_absolute_percentage_error(y_true=test_y,y_pred=test_predss)
test_ppts=PPTS(y_true=test_y,y_pred=test_predss,gamma=5)
dev_nse=r2_score(y_true=dev_y,y_pred=dev_predss)
dev_nrmse=math.sqrt(mean_squared_error(y_true=dev_y,y_pred=dev_predss))/(sum(dev_y)/len(dev_predss))
dev_mae=mean_absolute_error(y_true=dev_y,y_pred=dev_predss)
dev_mape=mean_absolute_percentage_error(y_true=dev_y,y_pred=dev_predss)
dev_ppts=PPTS(y_true=dev_y,y_pred=dev_predss,gamma=5)
metrics_dict={
"dev_nse":dev_nse,
"dev_nrmse":dev_nrmse,
"dev_mae":dev_mae,
"dev_mape":dev_mape,
"dev_ppts":dev_ppts,
"test_nse":test_nse,
"test_nrmse":test_nrmse,
"test_mae":test_mae,
"test_mape":test_mape,
"test_ppts":test_ppts,
"time_cost":time_cost,
}
time_cost=mean(time_cost)
return dev_y,dev_predss,test_y,test_predss,metrics_dict
def read_two_stage_max(station,decomposer,predict_pattern,wavelet_level="db10-2"):
if decomposer=="dwt":
model_path = root_path+"\\"+station+"_"+decomposer+"\\projects\\esvr\\"+wavelet_level+"\\"+predict_pattern+"\\"
else:
model_path = root_path+"\\"+station+"_"+decomposer+"\\projects\\esvr\\"+predict_pattern+"\\"
predictions = pd.DataFrame()
time_cost=[]
r2list=[]
for j in range(1,11):
model_name = station+"_"+decomposer+"_esvr_"+predict_pattern+"_seed"+str(j)+".csv"
data = pd.read_csv(model_path+model_name)
r2list.append(data['test_r2'][0])
print("one-month NSE LIST:{}".format(r2list))
max_id = r2list.index(max(r2list))
print("one-month max id:{}".format(max_id))
model_name = station+"_"+decomposer+"_esvr_"+predict_pattern+"_seed"+str(max_id+1)+".csv"
data = pd.read_csv(model_path+model_name)
records = data['test_y'][0:120]
test_pred=data['test_pred'][0:120]
records = records.values.flatten()
predictions = test_pred.values.flatten()
r2=data['test_r2'][0]
nrmse=data['test_nrmse'][0]
mae=data['test_mae'][0]
mape=data['test_mape'][0]
ppts=data['test_ppts'][0]
time_cost=data['time_cost'][0]
return records,predictions,r2,nrmse,mae,mape,ppts,time_cost
def read_pure_esvr(station):
model_path = root_path+"\\"+station+"\\projects\\esvr\\"
predictions = pd.DataFrame()
time_cost=[]
for j in range(1,11):
model_name = station+"_esvr_seed"+str(j)+".csv"
data = pd.read_csv(model_path+model_name)
if j==1:
records = data['test_y'][0:120]
test_pred=data['test_pred'][0:120]
time_cost.append(data['time_cost'][0])
test_pred=test_pred.reset_index(drop=True)
predictions = pd.concat([predictions,test_pred],axis=1)
predictions = predictions.mean(axis=1)
records = records.values.flatten()
predictions = predictions.values.flatten()
r2=r2_score(y_true=records,y_pred=predictions)
nrmse=math.sqrt(mean_squared_error(y_true=records,y_pred=predictions))/(sum(records)/len(records))
mae=mean_absolute_error(y_true=records,y_pred=predictions)
mape=mean_absolute_percentage_error(y_true=records,y_pred=predictions)
ppts=PPTS(y_true=records,y_pred=predictions,gamma=5)
time_cost=mean(time_cost)
return records,predictions,r2,nrmse,mae,mape,ppts,time_cost
def read_pca_metrics(station,decomposer,start_component,stop_component,wavelet_level="db10-2"):
if decomposer=="dwt":
model_path = root_path+"\\"+station+"_"+decomposer+"\\data\\"+wavelet_level+"\\one_step_1_month_forecast\\"
else:
model_path = root_path+"\\"+station+"_"+decomposer+"\\data\\one_step_1_month_forecast\\"
train = pd.read_csv(model_path+"minmax_unsample_train.csv")
dev = pd.read_csv(model_path+"minmax_unsample_dev.csv")
test = pd.read_csv(model_path+"minmax_unsample_test.csv")
norm_id=pd.read_csv(model_path+"norm_unsample_id.csv")
sMax = (norm_id['series_max']).values
sMin = (norm_id['series_min']).values
# Conncat the training, development and testing samples
samples = pd.concat([train,dev,test],axis=0)
samples = samples.reset_index(drop=True)
# Renormalized the entire samples
samples = np.multiply(samples + 1,sMax - sMin) / 2 + sMin
y = samples['Y']
X = samples.drop('Y',axis=1)
pca = PCA(n_components='mle')
pca.fit(X)
n_components_pca_mle = pca.n_components_
print("n_components_pca_mle:{}".format(n_components_pca_mle))
mle = X.shape[1]-n_components_pca_mle
nrmse=[]
r2=[]
mae=[]
mape=[]
ppts=[]
for i in range(start_component,stop_component+1):
if decomposer=="dwt":
model_path = root_path+"\\"+station+"_"+decomposer+"\\projects\\esvr\\"+wavelet_level+"\\one_step_1_month_forecast_with_pca_"+str(i)+"\\"
else:
model_path = root_path+"\\"+station+"_"+decomposer+"\\projects\\esvr\\one_step_1_month_forecast_with_pca_"+str(i)+"\\"
# averaging the trained svr with different seed
test_pred_df = pd.DataFrame()
for j in range(1,11):
model_name = station+"_"+decomposer+"_esvr_one_step_1_month_forecast_with_pca_"+str(i)+"_seed"+str(j)+".csv"
data = pd.read_csv(model_path+model_name)
test_y = data['test_y'][0:120]
test_pred=data['test_pred'][0:120]
test_pred_df = pd.concat([test_pred_df,test_pred],axis=1)
test_pred = test_pred_df.mean(axis=1)
test_y = test_y.values
test_pred = test_pred.values
print(type(test_y))
print(type(test_pred))
r2.append(r2_score(y_true=test_y,y_pred=test_pred))
nrmse.append(math.sqrt(mean_squared_error(y_true=test_y,y_pred=test_pred))/(sum(test_y)/len(test_y)))
mae.append(mean_absolute_error(y_true=test_y,y_pred=test_pred))
mape.append(mean_absolute_percentage_error(y_true=test_y,y_pred=test_pred))
ppts.append(PPTS(y_true=test_y,y_pred=test_pred,gamma=5))
pc0_records,pc0_predictions,pc0_r2,pc0_nrmse,pc0_mae,pc0_mape,pc0_ppts,pc0_time_cost=read_two_stage(station=station,decomposer=decomposer,predict_pattern="one_step_1_month_forecast",)
r2.append(pc0_r2)
nrmse.append(pc0_nrmse)
mae.append(pc0_mae)
mape.append(pc0_mape)
ppts.append(pc0_ppts)
r2.reverse()
nrmse.reverse()
mae.reverse()
mape.reverse()
ppts.reverse()
return mle,r2,nrmse,mae,mape,ppts
def read_long_leading_time(station,decomposer,mode='new',wavelet_level="db10-2"):
records=[]
predictions=[]
nrmse=[]
r2=[]
mae=[]
mape=[]
ppts=[]
if decomposer=="dwt":
model_path = root_path+"\\"+station+"_"+decomposer+"\\projects\\esvr\\"+wavelet_level+"\\"
else:
model_path = root_path+"\\"+station+"_"+decomposer+"\\projects\\esvr\\"
m1_records,m1_predictions,m1_r2,m1_nrmse,m1_mae,m1_mape,m1_ppts,m1_time_cost=read_two_stage(station=station,decomposer=decomposer,predict_pattern="one_step_1_month_forecast",)
records.append(m1_records)
predictions.append(m1_predictions)
r2.append(m1_r2)
nrmse.append(m1_nrmse)
mae.append(m1_mae)
mape.append(m1_mape)
ppts.append(m1_ppts)
# averaging the trained svr with different seed
test_pred_df = pd.DataFrame()
leading_times=[3,5,7,9]
for leading_time in leading_times:
print("Reading mode:{}".format(mode))
if mode==None:
file_path = model_path+"one_step_"+str(leading_time)+"_month_forecast//"
else:
file_path = model_path+"one_step_"+str(leading_time)+"_month_forecast_"+mode+"//"
for j in range(1,11):
if mode == None:
model_name = station+"_"+decomposer+"_esvr_one_step_"+str(leading_time)+"_month_forecast_seed"+str(j)+".csv"
else:
model_name = station+"_"+decomposer+"_esvr_one_step_"+str(leading_time)+"_month_forecast_"+mode+"_seed"+str(j)+".csv"
data = pd.read_csv(file_path+model_name)
test_y = data['test_y'][0:120]
test_pred=data['test_pred'][0:120]
test_pred_df = pd.concat([test_pred_df,test_pred],axis=1)
test_pred = test_pred_df.mean(axis=1)
test_y = test_y.values
test_pred = test_pred.values
print(type(test_y))
print(type(test_pred))
records.append(test_y)
predictions.append(test_pred)
r2.append(r2_score(y_true=test_y,y_pred=test_pred))
nrmse.append(math.sqrt(mean_squared_error(y_true=test_y,y_pred=test_pred))/(sum(test_y)/len(test_y)))
mae.append(mean_absolute_error(y_true=test_y,y_pred=test_pred))
mape.append(mean_absolute_percentage_error(y_true=test_y,y_pred=test_pred))
ppts.append(PPTS(y_true=test_y,y_pred=test_pred,gamma=5))
return records,predictions,r2,nrmse,mae,mape,ppts
def read_long_leading_time_max(station,decomposer,model='new',wavelet_level="db10-2"):
records=[]
predictions=[]
nrmse=[]
r2=[]
mae=[]
mape=[]
ppts=[]
if decomposer=="dwt":
model_path = root_path+"\\"+station+"_"+decomposer+"\\projects\\esvr\\"+wavelet_level+"\\"
else:
model_path = root_path+"\\"+station+"_"+decomposer+"\\projects\\esvr\\"
m1_records,m1_predictions,m1_r2,m1_nrmse,m1_mae,m1_mape,m1_ppts,m1_time_cost=read_two_stage_max(station=station,decomposer=decomposer,predict_pattern="one_step_1_month_forecast",)
records.append(m1_records)
predictions.append(m1_predictions)
r2.append(m1_r2)
nrmse.append(m1_nrmse)
mae.append(m1_mae)
mape.append(m1_mape)
ppts.append(m1_ppts)
# averaging the trained svr with different seed
test_pred_df = | pd.DataFrame() | pandas.DataFrame |
#!/usr/bin/env python
# coding: utf-8
# In[ ]:
# standard libraries
import pandas as pd
# Binance wrapper libraries
from binance.client import Client
from binance.websockets import BinanceSocketManager
# In[ ]:
def web_socket_modularized():
"""
Signature: web_socket() -> 'BinanceSocketManager'
Docstring:
Deals with real-time data.
Also takes care of plotting.
It makes use of Binance' API ('https://api.binance.com/api/v3/')
Returns
-------
BinanceSocketManager
Example
-------
>>> web_socket()
"""
# real-time data and chart
# have to be global for real-time interaction
global rtdata
global rtchart
# initialize client without API keys, as they are not needed for now
client = Client("", "")
# this function runs every time the socket receives new data
def process_message(x):
global rtdata
global rtchart
# get the last minute from the existing data
t1 = pd.to_datetime(rtdata.tail(1).index.values[0])
# get the last minute from the new data
t2 = pd.to_datetime(x['k']['t'], unit='ms')
# convert the new data (kline tipe) into a dataframe
new_df = pd.DataFrame([x['k']])
# change the data type for t
new_df['t'] = pd.to_datetime(new_df['t'], unit='ms')
# change the data type for T
new_df['T'] = pd.to_datetime(new_df['T'], unit='ms')
# change to index into datetime with frequency = minutes
new_df.index = pd.DatetimeIndex(new_df.t, freq='min')
# drop the t column as it is now the index
new_df = new_df.drop('t', axis=1)
# reindex the dataframe using the existing data as a reference
new_df.reindex(columns=rtdata.columns)
# if the timestamps are different then append new values
if t1 != t2:
rtdata = pd.concat([rtdata, new_df], axis=0)
#if it's still the same minute then update the value
#this way we can see every change even before the candle is over
else:
rtdata.loc[rtdata.index[-1]] = new_df.loc[new_df.index[-1]]
# update the chart
rtchart.data[0].x=rtdata.index
rtchart.data[0].open=rtdata.o
rtchart.data[0].high=rtdata.h
rtchart.data[0].low=rtdata.l
rtchart.data[0].close=rtdata.c
# recenter the plot leaving some space for predictions
rtchart.update_xaxes(range=[rtdata.index[-16],rtdata.index[-1] + | pd.Timedelta(minutes=5) | pandas.Timedelta |
import matplotlib.pyplot as plt
import matplotlib.lines
import numpy as np
import pandas as pd
from sklearn.metrics import roc_auc_score, average_precision_score
from oolearning.converters.TwoClassConverterBase import TwoClassConverterBase
from oolearning.converters.TwoClassPrecisionRecallOptimizerConverter import \
TwoClassPrecisionRecallOptimizerConverter
from oolearning.converters.TwoClassRocOptimizerConverter import TwoClassRocOptimizerConverter
from oolearning.evaluators.TwoClassEvaluator import TwoClassEvaluator
class TwoClassProbabilityEvaluator(TwoClassEvaluator):
"""
Evaluates 2-class classification problems, where "probabilities" are supplied as well as a Converter (i.e.
an object that encapsulates the logic to convert the probabilities to classes.
"""
def __init__(self,
converter: TwoClassConverterBase):
super().__init__(positive_class=converter.positive_class)
self._converter = converter
self._actual_classes = None
self._predicted_probabilities = None
self._auc_roc = None
self._auc_precision_recall = None
self._fpr = None
self._tpr = None
self._ideal_threshold_roc = None
self._ppv = None
self._ideal_threshold_ppv_tpr = None
@property
def auc_roc(self):
return self._auc_roc
@property
def auc_precision_recall(self):
return self._auc_precision_recall
def evaluate(self,
actual_values: np.ndarray, predicted_values: pd.DataFrame):
self._actual_classes = actual_values
self._predicted_probabilities = predicted_values
self._auc_roc = roc_auc_score(y_true=[1 if x == self._positive_class else 0 for x in actual_values],
y_score=predicted_values[self._positive_class])
# according to this (), average precision is same as auc of pr curve
self._auc_precision_recall = average_precision_score(y_true=[1 if x == self._positive_class else 0
for x in actual_values],
y_score=predicted_values[self._positive_class])
predicted_classes = self._converter.convert(values=predicted_values)
super().evaluate(actual_values=actual_values, predicted_values=predicted_classes)
def plot_roc_curve(self):
"""
:return: an ROC curve, indicating the point (threshold) that has the minimum distance to the
upper left corner (i.e. a perfect predictor). If a threshold is specified in the
class constructor, then that threshold is also annotated on the graph.
"""
if self._fpr is None or self._tpr is None or self._ideal_threshold_roc is None:
converter = TwoClassRocOptimizerConverter(actual_classes=self._actual_classes,
positive_class=self._converter.positive_class)
converter.convert(values=self._predicted_probabilities)
self._fpr = converter.false_positive_rates
self._tpr = converter.true_positive_rates
self._ideal_threshold_roc = converter.ideal_threshold
self._create_curve(x_coordinates=self._fpr,
y_coordinates=self._tpr,
threshold=0.5,
ideal_threshold=self._ideal_threshold_roc,
title='ROC (AUC={0})'.format(round(self.auc_roc, 3)),
x_label='False Positive Rate (1 - True Negative Rate)',
y_label='True Positive Rate',
corner='Left')
plt.tight_layout()
def plot_precision_recall_curve(self):
"""
# TODO document
"""
self.plot_ppv_tpr_curve()
def plot_ppv_tpr_curve(self):
"""
# TODO document
"""
if self._ppv is None or self._tpr is None or self._ideal_threshold_ppv_tpr is None:
converter = TwoClassPrecisionRecallOptimizerConverter(actual_classes=self._actual_classes,
positive_class=self._converter.positive_class) # noqa
converter.convert(values=self._predicted_probabilities)
self._ppv = converter.positive_predictive_values
self._tpr = converter.true_positive_rates
self._ideal_threshold_ppv_tpr = converter.ideal_threshold
self._create_curve(x_coordinates=self._tpr,
y_coordinates=self._ppv,
threshold=0.5,
ideal_threshold=self._ideal_threshold_ppv_tpr,
title='Positive Predictive Value vs. True Positive Rate',
x_label='True Positive Rate',
y_label='Positive Predictive Value',
corner='Right')
plt.tight_layout()
def plot_calibration(self):
"""
:return: calibration plot. Predicted probabilities are matched with the actual class and binned by
the prediction in intervals of 0.1. i.e. all probabilities/classes that have a prediction between
0 to 0.1 are grouped together, > 0.1 <= 0.2 are grouped together, and so on. For each group, the
percent of positive classes found is calculated. For example, in the group that has predicted
probabilities between 0 and 0.1, we would expect the average probability to be 0.05, and therefore
we would expect about 0.05 (i.e. 5%) of the group to be a positive class. The percentage of
positive classes for each bin is plotted. If the points fall along a 45 degree line, the model
has produced well-calibrated probabilities.
"""
calibration_data = pd.concat([self._predicted_probabilities[self._positive_class],
self._actual_classes], axis=1)
calibration_data.columns = ['probabilities', 'actual_classes']
bin_labels = ['[0, 0.1]', '(0.1, 0.2]', '(0.2, 0.3]', '(0.3, 0.4]', '(0.4, 0.5]', '(0.5, 0.6]',
'(0.6, 0.7]', '(0.7, 0.8]', '(0.8, 0.9]', '(0.9, 1.0]']
# .cut maintains distribution shape
bins = pd.cut(calibration_data.probabilities,
bins=np.arange(0.0, 1.1, 0.1),
include_lowest=True,
labels=bin_labels)
calibration_data['bins'] = bins
# calibration_data.bins.value_counts(ascending=True)
# calibration_data.head()
# calibration_data.sort_values(['bins', 'actual_classes'])
def calibration_grouping(x):
# noinspection PyTypeChecker
number_positive_events = sum(x.actual_classes == self._positive_class)
total_observations = len(x.actual_classes)
d = {'Positive Events Found': number_positive_events,
'Total Observations': total_observations,
'Actual Calibration': 0 if total_observations == 0
else number_positive_events / total_observations}
return | pd.Series(d, index=['Positive Events Found', 'Total Observations', 'Actual Calibration']) | pandas.Series |
import torch
from models.mobilenetv2 import MobileNetV2
import sklearn.metrics
from tqdm import tqdm
import argparse
import logging
from utils.dataset import LoadImagesAndLabels, preprocess
from utils.torch_utils import select_device
import yaml
import pandas as pd
import os
import numpy as np
LOGGER = logging.getLogger(__name__)
LOGGER.setLevel(logging.INFO)
logging.basicConfig()
def evaluate(opt):
if isinstance(opt.val_csv,str):
opt.val_csv = [opt.val_csv]
df_val = []
for df in opt.val_csv:
df = pd.read_csv(df)
df_val.append(df)
df_val = | pd.concat(df_val, axis=0) | pandas.concat |
"""genes_from_edge_table.py: This script pulls a single gene from the Coexpression edge table to identify adjacent nodes and prints the gene with neighbors to the screen."""
__author__ = "<NAME>"
import pandas as pd
from sys import argv, exit
if len(argv) != 3:
print("\npython {0} edge.table gene_symbol\n".format(argv[0]))
exit(1)
df = | pd.read_csv(argv[1]) | pandas.read_csv |
# run all tests:
# pytest -sv --cov-report term-missing --cov=workflow_array_ephys -p no:warnings tests/
# run one test, debug:
# pytest [above options] --pdb tests/tests_name.py -k function_name
import os
import sys
import pytest
import pandas as pd
import pathlib
import datajoint as dj
from workflow_array_ephys.paths import get_ephys_root_data_dir
from element_interface.utils import find_full_path
# ------------------- SOME CONSTANTS -------------------
_tear_down = False
verbose = False
test_user_data_dir = pathlib.Path('./tests/user_data')
test_user_data_dir.mkdir(exist_ok=True)
sessions_dirs = ['subject1/session1',
'subject2/session1',
'subject2/session2',
'subject3/session1',
'subject4/experiment1',
'subject5/session1',
'subject6/session1']
# -------------------- HELPER CLASS --------------------
class QuietStdOut:
"""If verbose set to false, used to quiet tear_down table.delete prints"""
def __enter__(self):
self._original_stdout = sys.stdout
sys.stdout = open(os.devnull, 'w')
def __exit__(self, exc_type, exc_val, exc_tb):
sys.stdout.close()
sys.stdout = self._original_stdout
# ---------------------- FIXTURES ----------------------
@pytest.fixture(autouse=True)
def dj_config():
""" If dj_local_config exists, load"""
if pathlib.Path('./dj_local_conf.json').exists():
dj.config.load('./dj_local_conf.json')
dj.config['safemode'] = False
dj.config['custom'] = {
'database.prefix': (os.environ.get('DATABASE_PREFIX')
or dj.config['custom']['database.prefix']),
'ephys_root_data_dir': (os.environ.get('EPHYS_ROOT_DATA_DIR').split(',') if os.environ.get('EPHYS_ROOT_DATA_DIR') else dj.config['custom']['ephys_root_data_dir'])
}
return
@pytest.fixture(autouse=True)
def test_data(dj_config):
"""If data does not exist or partial data is present,
attempt download with DJArchive to the first listed root directory"""
test_data_exists = True
for p in sessions_dirs:
try:
find_full_path(get_ephys_root_data_dir(), p)
except FileNotFoundError:
test_data_exists = False # If data not found
if not test_data_exists: # attempt to djArchive dowload
try:
dj.config['custom'].update({
'djarchive.client.endpoint':
os.environ['DJARCHIVE_CLIENT_ENDPOINT'],
'djarchive.client.bucket':
os.environ['DJARCHIVE_CLIENT_BUCKET'],
'djarchive.client.access_key':
os.environ['DJARCHIVE_CLIENT_ACCESSKEY'],
'djarchive.client.secret_key':
os.environ['DJARCHIVE_CLIENT_SECRETKEY']
})
except KeyError as e:
raise FileNotFoundError(
f' Full test data not available.'
f'\nAttempting to download from DJArchive,'
f' but no credentials found in environment variables.'
f'\nError: {str(e)}')
import djarchive_client
client = djarchive_client.client()
test_data_dir = get_ephys_root_data_dir()
if isinstance(test_data_dir, list): # if multiple root dirs, first
test_data_dir = test_data_dir[0]
client.download('workflow-array-ephys-benchmark',
'v2',
str(test_data_dir), create_target=False)
return
@pytest.fixture
def pipeline():
from workflow_array_ephys import pipeline
yield {'subject': pipeline.subject,
'lab': pipeline.lab,
'ephys': pipeline.ephys,
'probe': pipeline.probe,
'session': pipeline.session,
'get_ephys_root_data_dir': pipeline.get_ephys_root_data_dir}
if verbose and _tear_down:
pipeline.subject.Subject.delete()
elif not verbose and _tear_down:
with QuietStdOut():
pipeline.subject.Subject.delete()
@pytest.fixture
def subjects_csv():
""" Create a 'subjects.csv' file"""
input_subjects = pd.DataFrame(columns=['subject', 'sex',
'subject_birth_date',
'subject_description'])
input_subjects.subject = ['subject1', 'subject2',
'subject3', 'subject4',
'subject5', 'subject6']
input_subjects.sex = ['F', 'M', 'M', 'M', 'F', 'F']
input_subjects.subject_birth_date = ['2020-01-01 00:00:01',
'2020-01-01 00:00:01',
'2020-01-01 00:00:01',
'2020-01-01 00:00:01',
'2020-01-01 00:00:01',
'2020-01-01 00:00:01']
input_subjects.subject_description = ['dl56', 'SC035', 'SC038',
'oe_talab', 'rich', 'manuel']
input_subjects = input_subjects.set_index('subject')
subjects_csv_path = pathlib.Path('./tests/user_data/subjects.csv')
input_subjects.to_csv(subjects_csv_path) # write csv file
yield input_subjects, subjects_csv_path
subjects_csv_path.unlink() # delete csv file after use
@pytest.fixture
def ingest_subjects(pipeline, subjects_csv):
from workflow_array_ephys.ingest import ingest_subjects
_, subjects_csv_path = subjects_csv
ingest_subjects(subjects_csv_path, verbose=verbose)
return
@pytest.fixture
def sessions_csv(test_data):
""" Create a 'sessions.csv' file"""
input_sessions = | pd.DataFrame(columns=['subject', 'session_dir']) | pandas.DataFrame |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
sns.set()
from sklearn.model_selection import train_test_split
from sklearn.model_selection import cross_val_score
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import StandardScaler, PolynomialFeatures, OneHotEncoder
from sklearn.pipeline import Pipeline
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression, Lasso, LassoCV, Ridge, RidgeCV
from sklearn.metrics import r2_score, mean_squared_error
"""
1. get_Xy(df): Separate features and target variable
2. get_score(X_train,X_val,y_train,y_val)
3. categorical(X_train,X_val,X_test,cat_variable)
"""
def get_Xy(df):
df = df.dropna()
target = 'opening_weekend_usa'
all_column = df.columns.values.tolist()
all_column.remove(target)
y = df[target]
X = df[all_column]
return X, y
def get_score(X_train,X_val,y_train,y_val):
# fit linear regression to training data
lr_model = LinearRegression()
lr_model.fit(X_train, y_train)
y_pred = lr_model.predict(X_val)
# score fit model on validation data
train_score = lr_model.score(X_train, y_train)
val_score = lr_model.score(X_val, y_val)
rmse = np.sqrt(mean_squared_error(y_val, y_pred))
# report results
print('\nTrain R^2 score was:', train_score)
print('Validation R^2 score was:', val_score)
print(f'RMSE: {rmse:.2f} \n')
# print('Feature coefficient results:')
# for feature, coef in zip(X.columns, lr_model.coef_):
# print(feature, ':', f'{coef:.2f}')
# Visualization
fig, ax = plt.subplots(1, 1)
plt.scatter(y_val, y_pred, alpha=0.4)
ax.set_xlabel('Opening weekend revenue ($ in millions)',fontsize=20)
ax.set_ylabel('Prediction ($ in millions)',fontsize=20)
ax.set_title('R$^2$: %0.2f' % val_score, fontsize=20)
plt.xticks(fontsize=16)
plt.yticks(fontsize=16)
x=np.linspace(0,0.7e2,50)
# x=np.linspace(4,9,50)
y=x
plt.plot(x,y,color='firebrick',linewidth=3,alpha=0.6)
plt.ylim(0,)
plt.xlim(0,)
return fig, lr_model, y_pred
def categorical_multilabel(X_train,X_val,X_test,cat_variable):
"""
Input: X_train,X_val,X_test,categorical_variable
Processing: preprocessing the three sets separately:
1. Separate continuous and categorical variable
2. Scaling + polynomial fit the conitnuous variables and get_dummies on the categorical variable
3. Combine back the continuous and categorical data
Return: tranformed X_train, X_val, X_test
"""
scaler = StandardScaler()
poly = PolynomialFeatures(degree=2,interaction_only = False)
# Train set
# Convert genre to dummies
X_train_genre = X_train[cat_variable].str.join(sep='*').str.get_dummies(sep='*')
known_columns = X_train_genre.columns
# Scaling continuous variables
X_train_con = X_train[con_feature]
X_train_con_scaled = scaler.fit_transform(X_train_con)
X_train_con_scaled_df = pd.DataFrame(X_train_con_scaled, columns=X_train_con.columns, index=X_train_con.index)
X_train_poly = poly.fit_transform(X_train_con_scaled)
X_train_poly_df = pd.DataFrame(X_train_poly, columns=poly.get_feature_names(X_train_con.columns), index=X_train_con.index)
#Combine
# X_train = pd.concat([X_train_genre,X_train_con_scaled_df],axis=1)
X_train = pd.concat([X_train_genre,X_train_poly_df],axis=1)
# Val set
# Convert genre to dummies
X_val_genre = X_val[cat_variable].str.join(sep='*').str.get_dummies(sep='*')
val_columns = X_val_genre.columns
X_val_genre = X_val_genre[[x for x in val_columns if x in known_columns]]
fill_dict = { c : 0 for c in [x for x in known_columns if x not in val_columns] }
X_val_genre = X_val_genre.assign(**fill_dict)
# Scaling continuous variables
X_val_con = X_val[con_feature]
X_val_con_scaled = scaler.transform(X_val_con)
X_val_con_scaled_df = pd.DataFrame(X_val_con_scaled, columns=X_val_con.columns, index=X_val_con.index)
X_val_poly = poly.transform(X_val_con_scaled)
X_val_poly_df = pd.DataFrame(X_val_poly, columns=poly.get_feature_names(X_val_con.columns), index=X_val_con.index)
#Combine
# X_val = pd.concat([X_val_genre,X_val_con_scaled_df],axis=1)
X_val = pd.concat([X_val_genre,X_val_poly_df],axis=1)
# Test set
# Convert genre to dummies
X_test_genre = X_test[cat_variable].str.join(sep='*').str.get_dummies(sep='*')
test_columns = X_test.columns
X_test_genre = X_test_genre[[x for x in test_columns if x in known_columns]]
fill_dict = { c : 0 for c in [x for x in known_columns if x not in test_columns] }
X_test_genre = X_test_genre.assign(**fill_dict)
# Scaling continuous variables
X_test_con = X_test[con_feature]
X_test_con_scaled = scaler.transform(X_test_con)
X_test_con_scaled_df = pd.DataFrame(X_test_con_scaled, columns=X_test_con.columns, index=X_test_con.index)
X_test_poly = poly.transform(X_test_con_scaled)
X_test_poly_df = pd.DataFrame(X_test_poly, columns=poly.get_feature_names(X_test_con.columns), index=X_test_con.index)
#Combine
# X_test = pd.concat([X_test_genre,X_test_con_scaled_df],axis=1)
X_test = | pd.concat([X_test_genre,X_test_poly_df],axis=1) | pandas.concat |
import datetime
import numpy
import pandas
__all__ = ['get_arrays']
def get_timescale(t, unit):
''' Take a datetime or a numerical type, return two things:
1. A unit
2. A function that converts it to numerical form
'''
def get_timedelta_converter(t_factor):
return lambda td: td.total_seconds() * t_factor
if not isinstance(t, datetime.timedelta) or \
not isinstance(t, pandas.Timedelta):
# Assume numeric type
return None, lambda x: float(x)
for u, f in [('years', 365.25*24*60*60), ('days', 24*60*60),
('hours', 60*60), ('minutes', 60), ('seconds', 1)]:
if u == unit or (unit is None and t >= datetime.timedelta(seconds=f)):
return u, get_timedelta_converter(1./f)
raise Exception('Could not find unit for %f and %s' % (t, unit))
def get_groups(data, group_min_size, max_groups):
''' Picks the top groups out of a dataset
1. Remove groups with too few data points
2. Pick the top groups
3. Sort groups lexicographically
'''
group2count = {}
for group in data:
group2count[group] = group2count.get(group, 0) + 1
groups = [group for group, count in group2count.items() if count >= group_min_size]
if max_groups >= 0:
groups = sorted(groups, key=group2count.get, reverse=True)[:max_groups]
return sorted(groups, key=lambda g: (g is None, g)) # Put Nones last
def _sub(a, b):
# Computes a - b for a bunch of different cases
if isinstance(a, datetime.datetime) and a.tzinfo is not None:
return a.astimezone(b.tzinfo) - b
else:
# Either naive timestamps or numerical type
return a - b
def get_arrays(data, features=None, groups=None, created=None,
converted=None, now=None, unit=None,
group_min_size=0, max_groups=-1):
''' Converts a dataframe to a list of numpy arrays.
Generates either feature data, or group data.
:param data: Pandas dataframe
:param features: string (optional), refers to a column in the dataframe
containing features, each being a 1d-vector or list of features.
If not provided, then it it will look for a column in the dataframe
named "features". This argument can also be a list of columns.
:param groups: string (optional), refers to a column in the dataframe
containing the groups for each row. If not provided, then it will
look for a column in the dataframe named "groups".
:param created: string (optional), refers to a column in the dataframe
containing timestamps of when each item was "created". If not
provided, then it will look for a column in the dataframe named
"created".
:param converted: string, refers to a column in the dataframe
containing timestamps of when each item converted. If there is no
column containing creation values, then the converted values should
be timedeltas denoting time until conversion. If this argument is
not provided, then it will look for a column in the dataframe named
"created".
:param now: string (optional), refers to a column in the dataframe
containing the point in time up until which we have observed
non-conversion. If there is no column containing creation value,
then these values should be timedeltas. If this argument is not
provided, the current timestamp will be used.
:param unit: string (optional), time unit to use when converting to
numerical values. Has to be one of "years", "days", "hours",
"minutes", or "seconds". If not provided, then a choice will be
made based on the largest time interval in the inputs.
:param group_min_size: integer (optional), only include groups that
has at least this many observations
:param max_groups: integer (optional), only include the `n` largest
groups
:returns: tuple (unit, groups, arrays)
`unit` is the unit chosen. Will be one of "years", "days", "hours",
"minutes", or "seconds". If the `unit` parameter is passed, this
will be the same.
`groups` is a list of strings containing the groups. Will be `None`
if `groups` is not set.
`arrays` is a tuple of numpy arrays `(G, B, T)` or `(X, B, T)`
containing the transformed input in numerical format. `G`, `B`, `T`
will all be 1D numpy arrays. `X` will be a 2D numpy array.
'''
res = []
# First, construct either the `X` or the `G` array
if features is None and groups is None:
if 'group' in data.columns:
groups = 'group'
elif 'features' in data.columns:
features = 'features'
else:
raise Exception('Neither of the `features` or `group` parameters'
' was provided, and there was no `features` or'
' `groups` dataframe column')
if groups is not None:
groups_list = get_groups(data[groups], group_min_size, max_groups)
group2j = dict((group, j) for j, group in enumerate(groups_list))
# Remove rows for rare groups
data = data[data[groups].isin(group2j.keys())]
G = data[groups].apply(lambda g: group2j.get(g, -1)).values
res.append(G)
else:
groups_list = None
if type(features) == tuple:
features = list(features) # Otherwise sad Panda
X = numpy.array([numpy.array(z) for z in data[features].values])
res.append(X)
# Next, construct the `B` and `T` arrays
if converted is None:
if 'converted' in data.columns:
converted = 'converted'
else:
raise Exception('The `converted` parameter was not provided'
' and there was no `converted` dataframe column')
if now is None and 'now' in data.columns:
now = 'now'
if created is None and 'created' in data.columns:
created = 'created'
B = ~ | pandas.isnull(data[converted]) | pandas.isnull |
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from code import visualize
# Iteration 13
def split_3(df, test_size=0.1, oversampling_ratio=1):
print('\nSplit - Train&Dev Size = ', 1-test_size, ' , Test Size = ', test_size, '.', sep='')
vcs_one = df['city_id'].value_counts()
vcs_one = np.array(vcs_one[vcs_one==1].index)
df['city_id'][df['city_id'].isin(vcs_one)]=-1
df_train_dev, df_test = train_test_split(df, test_size=test_size, shuffle=True, stratify=df['city_id'], random_state=0)
n = round(oversampling_ratio*len(df_train_dev))
df_sampled = df_train_dev.sample(n=n, replace=True, weights=1+np.log1p(df_train_dev['n_clicks']), random_state=0)
df_train_dev = pd.concat([df_train_dev, df_sampled])
X_train_dev = np.array(df_train_dev.drop(columns=['n_clicks']), dtype=float)
y_train_dev = np.array(df_train_dev['n_clicks'], dtype=float)
X_test = np.array(df_test.drop(columns=['n_clicks']), dtype=float)
y_test = np.array(df_test['n_clicks'], dtype=float)
print('n_clicks summaries in train and test sets:', | pd.DataFrame(y_train_dev) | pandas.DataFrame |
import builtins
from io import StringIO
import numpy as np
import pytest
from pandas.errors import UnsupportedFunctionCall
import pandas as pd
from pandas import DataFrame, Index, MultiIndex, Series, Timestamp, date_range, isna
import pandas._testing as tm
import pandas.core.nanops as nanops
from pandas.util import _test_decorators as td
@pytest.fixture(
params=[np.int32, np.int64, np.float32, np.float64],
ids=["np.int32", "np.int64", "np.float32", "np.float64"],
)
def numpy_dtypes_for_minmax(request):
"""
Fixture of numpy dtypes with min and max values used for testing
cummin and cummax
"""
dtype = request.param
min_val = (
np.iinfo(dtype).min if np.dtype(dtype).kind == "i" else np.finfo(dtype).min
)
max_val = (
np.iinfo(dtype).max if np.dtype(dtype).kind == "i" else np.finfo(dtype).max
)
return (dtype, min_val, max_val)
@pytest.mark.parametrize("agg_func", ["any", "all"])
@pytest.mark.parametrize("skipna", [True, False])
@pytest.mark.parametrize(
"vals",
[
["foo", "bar", "baz"],
["foo", "", ""],
["", "", ""],
[1, 2, 3],
[1, 0, 0],
[0, 0, 0],
[1.0, 2.0, 3.0],
[1.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[True, True, True],
[True, False, False],
[False, False, False],
[np.nan, np.nan, np.nan],
],
)
def test_groupby_bool_aggs(agg_func, skipna, vals):
df = DataFrame({"key": ["a"] * 3 + ["b"] * 3, "val": vals * 2})
# Figure out expectation using Python builtin
exp = getattr(builtins, agg_func)(vals)
# edge case for missing data with skipna and 'any'
if skipna and all(isna(vals)) and agg_func == "any":
exp = False
exp_df = DataFrame([exp] * 2, columns=["val"], index=Index(["a", "b"], name="key"))
result = getattr(df.groupby("key"), agg_func)(skipna=skipna)
tm.assert_frame_equal(result, exp_df)
def test_max_min_non_numeric():
# #2700
aa = DataFrame({"nn": [11, 11, 22, 22], "ii": [1, 2, 3, 4], "ss": 4 * ["mama"]})
result = aa.groupby("nn").max()
assert "ss" in result
result = aa.groupby("nn").max(numeric_only=False)
assert "ss" in result
result = aa.groupby("nn").min()
assert "ss" in result
result = aa.groupby("nn").min(numeric_only=False)
assert "ss" in result
def test_min_date_with_nans():
# GH26321
dates = pd.to_datetime(
pd.Series(["2019-05-09", "2019-05-09", "2019-05-09"]), format="%Y-%m-%d"
).dt.date
df = pd.DataFrame({"a": [np.nan, "1", np.nan], "b": [0, 1, 1], "c": dates})
result = df.groupby("b", as_index=False)["c"].min()["c"]
expected = pd.to_datetime(
pd.Series(["2019-05-09", "2019-05-09"], name="c"), format="%Y-%m-%d"
).dt.date
tm.assert_series_equal(result, expected)
result = df.groupby("b")["c"].min()
expected.index.name = "b"
tm.assert_series_equal(result, expected)
def test_intercept_builtin_sum():
s = Series([1.0, 2.0, np.nan, 3.0])
grouped = s.groupby([0, 1, 2, 2])
result = grouped.agg(builtins.sum)
result2 = grouped.apply(builtins.sum)
expected = grouped.sum()
tm.assert_series_equal(result, expected)
tm.assert_series_equal(result2, expected)
# @pytest.mark.parametrize("f", [max, min, sum])
# def test_builtins_apply(f):
@pytest.mark.parametrize("f", [max, min, sum])
@pytest.mark.parametrize("keys", ["jim", ["jim", "joe"]]) # Single key # Multi-key
def test_builtins_apply(keys, f):
# see gh-8155
df = pd.DataFrame(np.random.randint(1, 50, (1000, 2)), columns=["jim", "joe"])
df["jolie"] = np.random.randn(1000)
fname = f.__name__
result = df.groupby(keys).apply(f)
ngroups = len(df.drop_duplicates(subset=keys))
assert_msg = f"invalid frame shape: {result.shape} (expected ({ngroups}, 3))"
assert result.shape == (ngroups, 3), assert_msg
tm.assert_frame_equal(
result, # numpy's equivalent function
df.groupby(keys).apply(getattr(np, fname)),
)
if f != sum:
expected = df.groupby(keys).agg(fname).reset_index()
expected.set_index(keys, inplace=True, drop=False)
tm.assert_frame_equal(result, expected, check_dtype=False)
tm.assert_series_equal(getattr(result, fname)(), getattr(df, fname)())
def test_arg_passthru():
# make sure that we are passing thru kwargs
# to our agg functions
# GH3668
# GH5724
df = pd.DataFrame(
{
"group": [1, 1, 2],
"int": [1, 2, 3],
"float": [4.0, 5.0, 6.0],
"string": list("abc"),
"category_string": pd.Series(list("abc")).astype("category"),
"category_int": [7, 8, 9],
"datetime": pd.date_range("20130101", periods=3),
"datetimetz": pd.date_range("20130101", periods=3, tz="US/Eastern"),
"timedelta": pd.timedelta_range("1 s", periods=3, freq="s"),
},
columns=[
"group",
"int",
"float",
"string",
"category_string",
"category_int",
"datetime",
"datetimetz",
"timedelta",
],
)
expected_columns_numeric = Index(["int", "float", "category_int"])
# mean / median
expected = pd.DataFrame(
{
"category_int": [7.5, 9],
"float": [4.5, 6.0],
"timedelta": [pd.Timedelta("1.5s"), pd.Timedelta("3s")],
"int": [1.5, 3],
"datetime": [
pd.Timestamp("2013-01-01 12:00:00"),
pd.Timestamp("2013-01-03 00:00:00"),
],
"datetimetz": [
pd.Timestamp("2013-01-01 12:00:00", tz="US/Eastern"),
pd.Timestamp("2013-01-03 00:00:00", tz="US/Eastern"),
],
},
index=Index([1, 2], name="group"),
columns=["int", "float", "category_int", "datetime", "datetimetz", "timedelta"],
)
for attr in ["mean", "median"]:
result = getattr(df.groupby("group"), attr)()
tm.assert_index_equal(result.columns, expected_columns_numeric)
result = getattr(df.groupby("group"), attr)(numeric_only=False)
tm.assert_frame_equal(result.reindex_like(expected), expected)
# TODO: min, max *should* handle
# categorical (ordered) dtype
expected_columns = Index(
[
"int",
"float",
"string",
"category_int",
"datetime",
"datetimetz",
"timedelta",
]
)
for attr in ["min", "max"]:
result = getattr(df.groupby("group"), attr)()
tm.assert_index_equal(result.columns, expected_columns)
result = getattr(df.groupby("group"), attr)(numeric_only=False)
tm.assert_index_equal(result.columns, expected_columns)
expected_columns = Index(
[
"int",
"float",
"string",
"category_string",
"category_int",
"datetime",
"datetimetz",
"timedelta",
]
)
for attr in ["first", "last"]:
result = getattr(df.groupby("group"), attr)()
tm.assert_index_equal(result.columns, expected_columns)
result = getattr(df.groupby("group"), attr)(numeric_only=False)
tm.assert_index_equal(result.columns, expected_columns)
expected_columns = Index(["int", "float", "string", "category_int", "timedelta"])
result = df.groupby("group").sum()
tm.assert_index_equal(result.columns, expected_columns_numeric)
result = df.groupby("group").sum(numeric_only=False)
tm.assert_index_equal(result.columns, expected_columns)
expected_columns = Index(["int", "float", "category_int"])
for attr in ["prod", "cumprod"]:
result = getattr(df.groupby("group"), attr)()
tm.assert_index_equal(result.columns, expected_columns_numeric)
result = getattr(df.groupby("group"), attr)(numeric_only=False)
tm.assert_index_equal(result.columns, expected_columns)
# like min, max, but don't include strings
expected_columns = Index(
["int", "float", "category_int", "datetime", "datetimetz", "timedelta"]
)
for attr in ["cummin", "cummax"]:
result = getattr(df.groupby("group"), attr)()
# GH 15561: numeric_only=False set by default like min/max
tm.assert_index_equal(result.columns, expected_columns)
result = getattr(df.groupby("group"), attr)(numeric_only=False)
tm.assert_index_equal(result.columns, expected_columns)
expected_columns = Index(["int", "float", "category_int", "timedelta"])
result = getattr(df.groupby("group"), "cumsum")()
tm.assert_index_equal(result.columns, expected_columns_numeric)
result = getattr(df.groupby("group"), "cumsum")(numeric_only=False)
tm.assert_index_equal(result.columns, expected_columns)
def test_non_cython_api():
# GH5610
# non-cython calls should not include the grouper
df = DataFrame(
[[1, 2, "foo"], [1, np.nan, "bar"], [3, np.nan, "baz"]], columns=["A", "B", "C"]
)
g = df.groupby("A")
gni = df.groupby("A", as_index=False)
# mad
expected = DataFrame([[0], [np.nan]], columns=["B"], index=[1, 3])
expected.index.name = "A"
result = g.mad()
tm.assert_frame_equal(result, expected)
expected = DataFrame([[1, 0.0], [3, np.nan]], columns=["A", "B"], index=[0, 1])
result = gni.mad()
tm.assert_frame_equal(result, expected)
# describe
expected_index = pd.Index([1, 3], name="A")
expected_col = pd.MultiIndex(
levels=[["B"], ["count", "mean", "std", "min", "25%", "50%", "75%", "max"]],
codes=[[0] * 8, list(range(8))],
)
expected = pd.DataFrame(
[
[1.0, 2.0, np.nan, 2.0, 2.0, 2.0, 2.0, 2.0],
[0.0, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan],
],
index=expected_index,
columns=expected_col,
)
result = g.describe()
tm.assert_frame_equal(result, expected)
expected = pd.concat(
[
df[df.A == 1].describe().unstack().to_frame().T,
df[df.A == 3].describe().unstack().to_frame().T,
]
)
expected.index = pd.Index([0, 1])
result = gni.describe()
tm.assert_frame_equal(result, expected)
# any
expected = DataFrame(
[[True, True], [False, True]], columns=["B", "C"], index=[1, 3]
)
expected.index.name = "A"
result = g.any()
tm.assert_frame_equal(result, expected)
# idxmax
expected = DataFrame([[0.0], [np.nan]], columns=["B"], index=[1, 3])
expected.index.name = "A"
result = g.idxmax()
tm.assert_frame_equal(result, expected)
def test_cython_api2():
# this takes the fast apply path
# cumsum (GH5614)
df = DataFrame([[1, 2, np.nan], [1, np.nan, 9], [3, 4, 9]], columns=["A", "B", "C"])
expected = DataFrame([[2, np.nan], [np.nan, 9], [4, 9]], columns=["B", "C"])
result = df.groupby("A").cumsum()
tm.assert_frame_equal(result, expected)
# GH 5755 - cumsum is a transformer and should ignore as_index
result = df.groupby("A", as_index=False).cumsum()
tm.assert_frame_equal(result, expected)
# GH 13994
result = df.groupby("A").cumsum(axis=1)
expected = df.cumsum(axis=1)
tm.assert_frame_equal(result, expected)
result = df.groupby("A").cumprod(axis=1)
expected = df.cumprod(axis=1)
tm.assert_frame_equal(result, expected)
def test_cython_median():
df = DataFrame(np.random.randn(1000))
df.values[::2] = np.nan
labels = np.random.randint(0, 50, size=1000).astype(float)
labels[::17] = np.nan
result = df.groupby(labels).median()
exp = df.groupby(labels).agg(nanops.nanmedian)
tm.assert_frame_equal(result, exp)
df = DataFrame(np.random.randn(1000, 5))
rs = df.groupby(labels).agg(np.median)
xp = df.groupby(labels).median()
tm.assert_frame_equal(rs, xp)
def test_median_empty_bins(observed):
df = pd.DataFrame(np.random.randint(0, 44, 500))
grps = range(0, 55, 5)
bins = pd.cut(df[0], grps)
result = df.groupby(bins, observed=observed).median()
expected = df.groupby(bins, observed=observed).agg(lambda x: x.median())
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"dtype", ["int8", "int16", "int32", "int64", "float32", "float64", "uint64"]
)
@pytest.mark.parametrize(
"method,data",
[
("first", {"df": [{"a": 1, "b": 1}, {"a": 2, "b": 3}]}),
("last", {"df": [{"a": 1, "b": 2}, {"a": 2, "b": 4}]}),
("min", {"df": [{"a": 1, "b": 1}, {"a": 2, "b": 3}]}),
("max", {"df": [{"a": 1, "b": 2}, {"a": 2, "b": 4}]}),
("nth", {"df": [{"a": 1, "b": 2}, {"a": 2, "b": 4}], "args": [1]}),
("count", {"df": [{"a": 1, "b": 2}, {"a": 2, "b": 2}], "out_type": "int64"}),
],
)
def test_groupby_non_arithmetic_agg_types(dtype, method, data):
# GH9311, GH6620
df = pd.DataFrame(
[{"a": 1, "b": 1}, {"a": 1, "b": 2}, {"a": 2, "b": 3}, {"a": 2, "b": 4}]
)
df["b"] = df.b.astype(dtype)
if "args" not in data:
data["args"] = []
if "out_type" in data:
out_type = data["out_type"]
else:
out_type = dtype
exp = data["df"]
df_out = pd.DataFrame(exp)
df_out["b"] = df_out.b.astype(out_type)
df_out.set_index("a", inplace=True)
grpd = df.groupby("a")
t = getattr(grpd, method)(*data["args"])
tm.assert_frame_equal(t, df_out)
@pytest.mark.parametrize(
"i",
[
(
Timestamp("2011-01-15 12:50:28.502376"),
Timestamp("2011-01-20 12:50:28.593448"),
),
(24650000000000001, 24650000000000002),
],
)
def test_groupby_non_arithmetic_agg_int_like_precision(i):
# see gh-6620, gh-9311
df = pd.DataFrame([{"a": 1, "b": i[0]}, {"a": 1, "b": i[1]}])
grp_exp = {
"first": {"expected": i[0]},
"last": {"expected": i[1]},
"min": {"expected": i[0]},
"max": {"expected": i[1]},
"nth": {"expected": i[1], "args": [1]},
"count": {"expected": 2},
}
for method, data in grp_exp.items():
if "args" not in data:
data["args"] = []
grouped = df.groupby("a")
res = getattr(grouped, method)(*data["args"])
assert res.iloc[0].b == data["expected"]
@pytest.mark.parametrize(
"func, values",
[
("idxmin", {"c_int": [0, 2], "c_float": [1, 3], "c_date": [1, 2]}),
("idxmax", {"c_int": [1, 3], "c_float": [0, 2], "c_date": [0, 3]}),
],
)
def test_idxmin_idxmax_returns_int_types(func, values):
# GH 25444
df = pd.DataFrame(
{
"name": ["A", "A", "B", "B"],
"c_int": [1, 2, 3, 4],
"c_float": [4.02, 3.03, 2.04, 1.05],
"c_date": ["2019", "2018", "2016", "2017"],
}
)
df["c_date"] = pd.to_datetime(df["c_date"])
result = getattr(df.groupby("name"), func)()
expected = pd.DataFrame(values, index=Index(["A", "B"], name="name"))
tm.assert_frame_equal(result, expected)
def test_fill_consistency():
# GH9221
# pass thru keyword arguments to the generated wrapper
# are set if the passed kw is None (only)
df = DataFrame(
index=pd.MultiIndex.from_product(
[["value1", "value2"], date_range("2014-01-01", "2014-01-06")]
),
columns=Index(["1", "2"], name="id"),
)
df["1"] = [
np.nan,
1,
np.nan,
np.nan,
11,
np.nan,
np.nan,
2,
np.nan,
np.nan,
22,
np.nan,
]
df["2"] = [
np.nan,
3,
np.nan,
np.nan,
33,
np.nan,
np.nan,
4,
np.nan,
np.nan,
44,
np.nan,
]
expected = df.groupby(level=0, axis=0).fillna(method="ffill")
result = df.T.groupby(level=0, axis=1).fillna(method="ffill").T
tm.assert_frame_equal(result, expected)
def test_groupby_cumprod():
# GH 4095
df = pd.DataFrame({"key": ["b"] * 10, "value": 2})
actual = df.groupby("key")["value"].cumprod()
expected = df.groupby("key")["value"].apply(lambda x: x.cumprod())
expected.name = "value"
tm.assert_series_equal(actual, expected)
df = pd.DataFrame({"key": ["b"] * 100, "value": 2})
actual = df.groupby("key")["value"].cumprod()
# if overflows, groupby product casts to float
# while numpy passes back invalid values
df["value"] = df["value"].astype(float)
expected = df.groupby("key")["value"].apply(lambda x: x.cumprod())
expected.name = "value"
tm.assert_series_equal(actual, expected)
def scipy_sem(*args, **kwargs):
from scipy.stats import sem
return sem(*args, ddof=1, **kwargs)
@pytest.mark.parametrize(
"op,targop",
[
("mean", np.mean),
("median", np.median),
("std", np.std),
("var", np.var),
("sum", np.sum),
("prod", np.prod),
("min", np.min),
("max", np.max),
("first", lambda x: x.iloc[0]),
("last", lambda x: x.iloc[-1]),
("count", np.size),
pytest.param("sem", scipy_sem, marks=td.skip_if_no_scipy),
],
)
def test_ops_general(op, targop):
df = DataFrame(np.random.randn(1000))
labels = np.random.randint(0, 50, size=1000).astype(float)
result = getattr(df.groupby(labels), op)().astype(float)
expected = df.groupby(labels).agg(targop)
tm.assert_frame_equal(result, expected)
def test_max_nan_bug():
raw = """,Date,app,File
-04-23,2013-04-23 00:00:00,,log080001.log
-05-06,2013-05-06 00:00:00,,log.log
-05-07,2013-05-07 00:00:00,OE,xlsx"""
df = pd.read_csv(StringIO(raw), parse_dates=[0])
gb = df.groupby("Date")
r = gb[["File"]].max()
e = gb["File"].max().to_frame()
tm.assert_frame_equal(r, e)
assert not r["File"].isna().any()
def test_nlargest():
a = Series([1, 3, 5, 7, 2, 9, 0, 4, 6, 10])
b = Series(list("a" * 5 + "b" * 5))
gb = a.groupby(b)
r = gb.nlargest(3)
e = Series(
[7, 5, 3, 10, 9, 6],
index=MultiIndex.from_arrays([list("aaabbb"), [3, 2, 1, 9, 5, 8]]),
)
tm.assert_series_equal(r, e)
a = Series([1, 1, 3, 2, 0, 3, 3, 2, 1, 0])
gb = a.groupby(b)
e = Series(
[3, 2, 1, 3, 3, 2],
index=MultiIndex.from_arrays([list("aaabbb"), [2, 3, 1, 6, 5, 7]]),
)
tm.assert_series_equal(gb.nlargest(3, keep="last"), e)
def test_nlargest_mi_grouper():
# see gh-21411
npr = np.random.RandomState(123456789)
dts = date_range("20180101", periods=10)
iterables = [dts, ["one", "two"]]
idx = MultiIndex.from_product(iterables, names=["first", "second"])
s = Series(npr.randn(20), index=idx)
result = s.groupby("first").nlargest(1)
exp_idx = MultiIndex.from_tuples(
[
(dts[0], dts[0], "one"),
(dts[1], dts[1], "one"),
(dts[2], dts[2], "one"),
(dts[3], dts[3], "two"),
(dts[4], dts[4], "one"),
(dts[5], dts[5], "one"),
(dts[6], dts[6], "one"),
(dts[7], dts[7], "one"),
(dts[8], dts[8], "two"),
(dts[9], dts[9], "one"),
],
names=["first", "first", "second"],
)
exp_values = [
2.2129019979039612,
1.8417114045748335,
0.858963679564603,
1.3759151378258088,
0.9430284594687134,
0.5296914208183142,
0.8318045593815487,
-0.8476703342910327,
0.3804446884133735,
-0.8028845810770998,
]
expected = Series(exp_values, index=exp_idx)
tm.assert_series_equal(result, expected, check_exact=False, rtol=1e-3)
def test_nsmallest():
a = Series([1, 3, 5, 7, 2, 9, 0, 4, 6, 10])
b = Series(list("a" * 5 + "b" * 5))
gb = a.groupby(b)
r = gb.nsmallest(3)
e = Series(
[1, 2, 3, 0, 4, 6],
index=MultiIndex.from_arrays([list("aaabbb"), [0, 4, 1, 6, 7, 8]]),
)
tm.assert_series_equal(r, e)
a = Series([1, 1, 3, 2, 0, 3, 3, 2, 1, 0])
gb = a.groupby(b)
e = Series(
[0, 1, 1, 0, 1, 2],
index=MultiIndex.from_arrays([list("aaabbb"), [4, 1, 0, 9, 8, 7]]),
)
tm.assert_series_equal(gb.nsmallest(3, keep="last"), e)
@pytest.mark.parametrize("func", ["cumprod", "cumsum"])
def test_numpy_compat(func):
# see gh-12811
df = pd.DataFrame({"A": [1, 2, 1], "B": [1, 2, 3]})
g = df.groupby("A")
msg = "numpy operations are not valid with groupby"
with pytest.raises(UnsupportedFunctionCall, match=msg):
getattr(g, func)(1, 2, 3)
with pytest.raises(UnsupportedFunctionCall, match=msg):
getattr(g, func)(foo=1)
def test_cummin(numpy_dtypes_for_minmax):
dtype = numpy_dtypes_for_minmax[0]
min_val = numpy_dtypes_for_minmax[1]
# GH 15048
base_df = pd.DataFrame(
{"A": [1, 1, 1, 1, 2, 2, 2, 2], "B": [3, 4, 3, 2, 2, 3, 2, 1]}
)
expected_mins = [3, 3, 3, 2, 2, 2, 2, 1]
df = base_df.astype(dtype)
expected = pd.DataFrame({"B": expected_mins}).astype(dtype)
result = df.groupby("A").cummin()
tm.assert_frame_equal(result, expected)
result = df.groupby("A").B.apply(lambda x: x.cummin()).to_frame()
tm.assert_frame_equal(result, expected)
# Test w/ min value for dtype
df.loc[[2, 6], "B"] = min_val
expected.loc[[2, 3, 6, 7], "B"] = min_val
result = df.groupby("A").cummin()
tm.assert_frame_equal(result, expected)
expected = df.groupby("A").B.apply(lambda x: x.cummin()).to_frame()
tm.assert_frame_equal(result, expected)
# Test nan in some values
base_df.loc[[0, 2, 4, 6], "B"] = np.nan
expected = | pd.DataFrame({"B": [np.nan, 4, np.nan, 2, np.nan, 3, np.nan, 1]}) | pandas.DataFrame |
# %%
import os
import pandas as pd
import numpy as np
import datetime
# %% CARGA DATOS 217065
MB1 = pd.read_excel(r'D:\Basededatos\Origen\BBDD AUTOMÓVILES 9 MILLONES\BBDD AUTOMÓVILES MERCEDES BENZ Y B.M.W. 1.xlsx', engine='openpyxl') # 99955
MB2 = | pd.read_excel(r'D:\Basededatos\Origen\BBDD AUTOMÓVILES 9 MILLONES\BBDD AUTOMÓVILES MERCEDES BENZ Y B.M.W. 2.xlsx', engine='openpyxl') | pandas.read_excel |
from nanobt.backtesting import Backtesting
from nanobt.trades import TradeHistory, SideOrder
import talib
import pandas as pd
INIT_PORTFOLIO = 1000
class EMAStrategy(Backtesting):
def __init__(self, value_rapid_ema, value_slow_ema):
super().__init__()
self.rapid_ema = None
self.slow_ema = None
self.vr_ema = value_rapid_ema
self.vs_ema = value_slow_ema
def updateIndicators(self):
self.rapid_ema = talib.SMA(self.candles['close'].values, timeperiod=self.vr_ema)
self.slow_ema = talib.SMA(self.candles['close'].values, timeperiod=self.vs_ema)
def next(self):
self.updateIndicators()
if not self.position:
if self.rapid_ema[-1] > self.slow_ema[-1]:
self.entry(SideOrder.BUY)
elif self.rapid_ema[-1] < self.slow_ema[-1]:
self.entry(SideOrder.SELL)
else:
if self.rapid_ema[-1] > self.slow_ema[-1]:
if self.position.side == SideOrder.SELL:
self.exit()
elif self.rapid_ema[-1] < self.slow_ema[-1]:
if self.position.side == SideOrder.BUY:
self.exit()
data = | pd.read_csv('./data/binance_BTCUSDT_5m.csv') | pandas.read_csv |
'''
ML-Based Trading Strategy
'''
import cbpro
import zmq
import sys
import json
import time
import os
import pickle
import pandas as pd
import numpy as np
import datetime as dt
# the following libraries are to update the persisted ML model
from sklearn.svm import SVC
from sklearn.metrics import accuracy_score
# overload the on_message behavior of cbpro.WebsocketClient
class MyWebsocketClient(cbpro.WebsocketClient):
def on_open(self):
self.url = "wss://ws-feed.pro.coinbase.com/"
self.products = symbol
self.channels = ['ticker']
self.should_print = False
def on_message(self, msg):
self.data = msg
def on_close(self):
print("-- Goodbye! --")
def logger_monitor(message, time=True, sep=True):
# logger and monitor function
with open(log_file, 'a') as f:
t = str(dt.datetime.now())
msg = ''
if time:
msg += ',' + t + ','
if sep:
msg += 3 * '='
msg += ',' + message + ','
# sends the message via the socket
socket.send_string(msg)
# writes the message to the log file
f.write(msg)
return
def report_positions(pos):
'''Logs and sends position data'''
out = ''
out += ',Going {},'.format(pos) + ','
time.sleep(0.033) # waits for the order to be executed
# get orders (will possibly make multiple HTTP requests)
#get_orders_gen = auth_client.get_orders()
get_fills = list(fills_gen)
out += ',' + str(get_fills) + ','
logger_monitor(out)
return
# callback function - algo trading minimal working example
# https://en.wikipedia.org/wiki/Minimal_working_example
def trading_mwe(symbol, amount, position, bar, min_bars, twentyfour, df_accounts, df_fills):
# Welcome message
print('')
print('*'*50)
print('*** Welcome to Tenzin II Crypto Trader ***')
print('*'*50)
print('')
print('Trading: ', symbol)
print('Amount per trade: ', amount)
print('')
print('Last 24 hrs:')
print('')
print('Open: .........', twentyfour['open'])
print('Last: .........', twentyfour['last'])
print('High: .........', twentyfour['high'])
print('Low: .......', twentyfour['low'])
print('Volume: ......', twentyfour['volume'])
print('30 day Volume: ', twentyfour['volume_30day'])
print('')
print('Recent orders: ')
print(df_fills.loc[-3:,['product_id', 'fee', 'side', 'settled', 'usd_volume']])
print('')
print('Account Positions: ')
print(df_accounts[['currency', 'balance']])
print('')
# global variables
global wsClient, df, dataframe, algorithm, log_file
# intialize variables
trading = 'n' # default == not trading
# ask to start trading
trading = input('Start trading? [Y]/[n]:')
while trading == 'Y':
while wsClient.data:
start = time.process_time() # reference for start of trading
end = start + 10.0 # when to end trading in minutes
tick = wsClient.data
dataframe = dataframe.append(tick, ignore_index=True)
dataframe.index = pd.to_datetime(dataframe['time'], infer_datetime_format=True)
# resampling of the tick data
df = dataframe.resample(bar, label='right').last().ffill()
if len(df) > min_bars:
min_bars = len(df)
logger_monitor('NUMBER OF TICKS: {} |'.format(len(dataframe))+\
'NUMBER OF BARS: {}'.format(min_bars))
# data processing and feature preparation
df['Mid'] = df[['price']].mean(axis=1)
df['Returns'] = np.log(df['Mid']/df['Mid'].shift(1))
df['Direction'] = np.where(df['Returns'] > 0, 1, -1)
# picks relevant points
features = df['Direction'].iloc[-(lags + 1): -1]
# necessary reshaping
features = features.values.reshape(1, -1)
# generates the signal (+1 or -1)
signal = algorithm.predict(features)[0]
# logs and sends major financial information
logger_monitor('MOST RECENT DATA\n'+\
str(df[['Mid', 'Returns', 'Direction']].tail()),False)
logger_monitor('\n' + 'features: ' + str(features) + ',' +\
'position: ' + str(position) + ',' +\
'signal: ' + str(signal) + ',', False)
# trading logic
if position in [0, -1] and signal == 1:
auth_client.place_market_order(product_id = symbol,
side = 'buy', \
funds = amount - position * amount)
position = 1
report_positions('LONG')
elif position in [0, 1] and signal == -1:
auth_client.place_market_order(product_id = symbol,\
side = 'sell', funds = amount + position * amount)
position = -1
report_positions('SHORT')
else: # no trade
logger_monitor('no trade placed')
logger_monitor(',****END OF CYCLE****,', False, False)
#time.sleep(15.0)
if len(df) > 100:
# ends the trading session
# long positions are held, open orders are closed
logger_monitor(',ending trading session, max # ticks received,',\
False, False)
# cancel orders
report_positions(',CANCEL ORDERS,')
auth_client.cancel_all(product_id=symbol)
logger_monitor(',***CANCELING UNFILLED ORDERS***,')
# save data
df.to_csv('tick_history.csv')
return
if time.process_time() > end:
# ends the trading session based on max. time defined by end
# long positions are held, open orders are closed
logger_monitor(',ending trading session, time-out end reached,',\
False, False)
# cancel orders
report_positions(',CANCEL ORDERS,')
auth_client.cancel_all(product_id=symbol)
logger_monitor(',***CANCELING UNFILLED ORDERS***,')
# save data
df.to_csv('tick_history.csv')
return
# Ask to continue trading
trading = input('Restart trading? [Y]/[n]:')
if trading == 'n':
return
if __name__ == '__main__':
# File path to save data to
path = os.getcwd() # for .ipynb implementation
#path = os.path.dirname(__file__) # for .py implementation
# log file to record trading
log_file = 'online_trading.log'
# loads the persisted trading algorithm object
algorithm = pickle.load(open('algorithm_dailyBTC.pkl', 'rb'))
# sets up the socket communication via ZeroMQ (here: "publisher")
context = zmq.Context()
socket = context.socket(zmq.PUB)
# this binds the socket communication to all IP addresses of the machine
# socket.bind('tcp://0.0.0.0:5555')
# socket.bind('tcp://*:5555')
socket.bind('tcp://*:5555')
# Authentication credentials
api_key = os.environ.get('CBPRO_SANDBOX_KEY')
api_secret = os.environ.get('CBPRO_SANDBOX_SECRET')
passphrase = os.environ.get('CBPRO_SANDBOX_PASSPHRASE')
# sandbox authenticated client
auth_client = cbpro.AuthenticatedClient(api_key, api_secret, passphrase, \
api_url='https://api-public.sandbox.pro.coinbase.com')
# live account authenticated client
# uses a different set of API access credentials (api_key, api_secret, passphrase)
# auth_client = cbpro.AuthenticatedCliet(api_key, api_secret, passphrase)
# parameters for the trading algorithm
# the trading algorithm runs silently for 500 ticks
# use stratMonitoring.ipynb to monitor trading activity
symbol = 'BTC-USD'
bar = '15s' # 15s is for testing; reset to trading frequency
amount = 225 # amount to be traded in $USD
position = 0 # beginning, neutral, position
lags = 5 # number of lags for features data
# minumum number of resampled bars required for the first predicted value (& first trade)
min_bars = lags + 1
# orders & fills generators to report positions:
orders_gen = auth_client.get_orders()
fills_gen = auth_client.get_fills(product_id=symbol)
# Get stats for the last 24 hrs
twentyfour = auth_client.get_product_24hr_stats(symbol)
# Get filled orders
all_fills = list(fills_gen)
df_fills = pd.DataFrame(all_fills)
#filepath = os.path.join(path, 'fills-{}.csv'.format(now))
#df_fills.to_csv(filepath)
# Get account positions
accounts = auth_client.get_accounts()
df_accounts = pd.DataFrame(accounts)
#filepath = os.path.join(path, 'accounts-{}.csv'.format(now))
#df_accounts.to_csv(filepath)
# the main asynchronous loop using the callback function
# Coinbase Pro web socket connection is rate-limited to 4 seconds per request per IP.
wsClient = MyWebsocketClient()
dataframe = pd.DataFrame() # dataframe for storing wsClient feed
df = | pd.DataFrame() | pandas.DataFrame |
import pandas as pd
import numpy as np
from itertools import product
from matplotlib import pyplot as plt
from sklearn import tree
from sklearn.datasets import make_moons, make_circles, make_classification
import graphviz
from mlxtend.plotting import plot_decision_regions
def _get_scikit_datasets(n_points=100):
rng = np.random.RandomState(2)
X, y = make_classification(n_samples=n_points, n_features=2,
n_redundant=0, n_informative=2,
random_state=1, n_clusters_per_class=1)
X += 2 * rng.uniform(size=X.shape)
linearly_separable = (X, y)
datasets = [make_moons(n_samples=n_points, noise=0.3, random_state=0),
make_circles(n_samples=n_points, noise=0.2, factor=0.5,
random_state=1),
linearly_separable
]
return datasets
def get_importances(model, feature_names):
dict(zip(feature_names, model.feature_importances))
return | pd.Series() | pandas.Series |
from unittest import TestCase, skip
from unittest.mock import patch, Mock
from typer.testing import CliRunner
from mls.__main__ import app, _filter_and_create_rich_table
from pandas import DataFrame
from tests.mls_data import supporters, player, conference, txn
class TestApp(TestCase):
@classmethod
def setUpClass(cls):
cls.runner = CliRunner()
def test_game(self):
# TODO
pass
def test_filter_table(self):
df = DataFrame(conference)
tbl = _filter_and_create_rich_table(df, "Test title")
self.assertEqual(tbl.title, "Test title")
self.assertGreater(tbl.row_count, 10)
@patch("mls.__main__.read_html", return_value=[DataFrame(supporters)])
def test_standings_supporters_shield(self, mock_read_html: Mock):
result = self.runner.invoke(app, ["standings"])
self.assertEqual(0, result.exit_code)
self.assertIn("MLS Standings", result.stdout)
@patch("mls.__main__.read_html", return_value=[DataFrame(conference)])
def test_standings_eastern_conference(self, mock_read_html: Mock):
east_result = self.runner.invoke(app, ["standings", "east"])
self.assertEqual(0, east_result.exit_code)
self.assertIn("Eastern", east_result.stdout)
@patch("mls.__main__.read_html", return_value=[ | DataFrame(conference) | pandas.DataFrame |
import pandas as pd
error_value =-100
missing_value = -999
missing_label ='missing'
class processing():
def __init__(self):
pass
def fetch_risk_bucket(self,df_agg,col_name):
df_agg['cls'] =df_agg['Fraud']/(df_agg['Non-Fraud']+0.1)
df_agg['times'] = | pd.cut(df_agg['cls'],
bins=[-1,0,0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,1,100000],
labels =['no-risk','risk<0.1','risk<0.2','risk<0.3','risk<0.4','risk<0.5','risk<0.6',
'risk<0.7','risk<0.8','risk<0.9','risk<1','high-risk']) | pandas.cut |
import pandas as pd
import re
from nltk.tokenize import word_tokenize
import nltk
from nltk.stem import WordNetLemmatizer
from nltk.corpus import stopwords
from nltk.stem import PorterStemmer
import numpy as np
from sklearn.feature_extraction.text import CountVectorizer
from collections import OrderedDict
import datetime
from datetime import datetime, timedelta
# # Get Relevant Reviews from both aapl_public.tsv and aapl_amazon_csv
# 1. aapl_public.txv is aws public dataset. Wireless products dataset was picked after reviewing through other relevant datasets, only wireless products dataset contains Apple Products
# 2. aapl_amazon_csv is amazon reviews that was scraped using scrapy by going Amazon Apple products
# 3. Main part here is to get reviews relevant to Apple Company and from 2010-2019
# 4. Apple Products Dates is a dataset of Apple Products and its Release Dates that is found online (https://en.wikipedia.org/wiki/Timeline_of_Apple_Inc._products) & (https://941am.com/) as reference
# 5. This dataset will help to filter and retrieve relevant products on both aapl_public and aapl_amazon datasets
df_wireless= pd.read_csv('../../Raw Data/Amazon Product AAPL/aapl_public.tsv', sep='\t',
usecols=['marketplace', 'customer_id', 'review_id',
'product_id', 'product_parent', 'product_title',
'product_category', 'star_rating', 'helpful_votes',
'total_votes', 'vine', 'verified_purchase', 'review_headline',
'review_body', 'review_date'])
#get only apple related products by looking at the product title
#get products where the review dates are 2010 and above
df_wireless['product_title'] = df_wireless['product_title'].str.lower()
df_wireless= df_wireless[df_wireless['product_title'].notna()]
df_wireless_apple = df_wireless[df_wireless['product_title'].str.contains("apple")]
df = df_wireless_apple[df_wireless_apple['review_date']> '2010-01-01']
df = pd.read_excel('aapl_public_filtered.xlsx')
#get relevant variables
df = df[['product_title','review_id', 'product_id','product_category', 'star_rating', 'review_headline', 'review_body', 'review_date']]
#remove numbers and special characters
df["review_body"] = df['review_body'].replace("[^a-zA-Z]+", " ", regex = True)
df["review_headline"] = df['review_headline'].replace("[^a-zA-Z]+", " ", regex = True)
#remove white spaces
df["review_body"] = df['review_body'].str.strip()
df["review_headline"] = df['review_headline'].str.strip()
#fill review body na with empty string
df['review_body'].fillna('', inplace= True)
df = df[df['review_body'].notnull()]
#APPLE PRODUCTS AND ITS RELEASE DATES. Taken from https://941am.com/
#https://en.wikipedia.org/wiki/Timeline_of_Apple_Inc._products
products_dates = pd.read_excel('../../Raw Data/Amazon Product AAPL/aapl_products_dates.xlsx')
#Clean the product names and products where the release date is 2015 and below
products_dates["new"] = products_dates["Product Name"].replace("[^a-zA-Z0-9]+", " ", regex = True)
products_dates['new'] = products_dates['new'].str.lower()
products_dates['new'] = products_dates['new'].str.replace('gen', 'generation')
products_2015 = products_dates[products_dates['Date'] < " 2016-01-01"]
#create product dates dictionary
pd_dates = pd.Series(products_2015['Date'].values,index=products_2015['new']).to_dict()
pd_dates = OrderedDict(sorted(pd_dates.items(), key=lambda x:x[1], reverse=True))
#creates a list of product names
p_list =list(products_dates['new'].str.strip())
pattern = '|'.join(p_list)
#take products only if product title contains the product names
df_new = df[df.product_title.str.contains(pattern)]
#a list of words that should not be included in the product titles
#remove those rows where product titles contains np_list
np_list = ['earphones','earphone', 'earbuds','bluetooth', 'case', 'cable','speaker', 'portable', 'headphones', 'headset',
'bluetooth', 'protector', 'samsung', 'android', 'adapter', 'usb', 'charger', 'earbud', 'cover', 'hdmi',
'stand','leather','replacement', 'mount', 'holder', 'battery', 'mounting', 'sticker', 'replaceable',
'bumper','len', 'packaging', 'package', '/', 'armband', 'frame', 'stylus', 'band', 'digitizer', 'charging','cleaner',
'display', 'skin','kit','handset', 'set', 'strap','headphone', 'accessory', 'decal', 'wallet', 'bag', 'pouch',
'mp3', 'mp3s', 'adaptor','plug', 'shell', 'cellet','cloths', 'cloth', '3102mss','selfiepod','tool', 'shield',
'shock', 'armor', 'film', 'protection', 'sim', 'plastic','tripod', 'car','cradle','tempered','design','invisibleshield',
]
remove = '|'.join(np_list)
df_new_wl = df_new[~df_new.product_title.str.contains(remove)]
df_new_wl['product_title'] = df_new_wl['product_title'].replace("[^a-zA-Z0-9]+", " ", regex = True)
def like_function(x):
'''
return the release date of the product if the product_title
of the dataset contains the product in pd_dates dict
'''
date = ""
for key in pd_dates: #key = product_title
if key in x:
date = pd_dates[key]
break
return date
def u_like_function(x):
'''
return the product name of the product if the product_title
of the dataset contains the product in pd_dates dict
'''
product = ""
for key in pd_dates:
if key in x:
product = key
break
return product
# Create new variables release date (of product) and unique products (that comes from Apple Product Dates)
df_new_wl['release_date'] = df_new_wl.product_title.apply(like_function)
df_new_wl['unique_products'] = df_new_wl.product_title.apply(u_like_function)
#SCRAPED AMAZON REVIEWS
amazon_scrape = pd.read_csv("../../Raw Data/Amazon Product AAPL/aapl_amazon.csv",encoding = "ISO-8859-1")
amazon_scrape['product name'] = amazon_scrape['product name'].str.lower()
#Keep products where product names only contain the product names in the Apple Products Dates
df_am_scr = amazon_scrape[amazon_scrape['product name'].str.contains(pattern)]
#a list of words that should not be included in the product titles
#remove those rows where product titles contains anp_list
anp_list = ['/', 'mount', 'cider', 'case', 'onion', 'delivery','cable','water', 'adapter','smart', 'earphones', 'guide',
'remote', 'protector']
remove = '|'.join(anp_list)
df_am_scr = df_am_scr[~df_am_scr['product name'].str.contains(remove)]
#clean the product name
df_am_scr['product name'] = df_am_scr['product name'].replace("[^a-zA-Z0-9]+", " ", regex = True)
df_am_scr.reset_index(inplace = True, drop=True)
#Get products and dates as dictionary
pd_dates_2 = pd.Series(products_dates['Date'].values,index=products_dates['new']).to_dict()
pd_dates_2 = OrderedDict(sorted(pd_dates_2.items(), key=lambda x:x[1], reverse=False))
def like_function(x):
'''
return the release date of the product if the product_title
of the dataset contains the product in pd_dates_2 dict
'''
date = ""
for key in pd_dates_2:
if key in x:
date = pd_dates_2[key]
break
return date
def u_like_function(x):
'''
return the product name of the product if the product_title
of the dataset contains the product in pd_dates_2 dict
'''
product = ""
for key in pd_dates_2:
if key in x:
product = key
break
return product
df_am_scr['release_date'] = df_am_scr['product name'].apply(like_function)
df_am_scr['unique_products'] = df_am_scr['product name'].apply(u_like_function)
#remove numbers and special characters
df_am_scr['comment'] = df_am_scr['comment'].replace("[^a-zA-Z]+", " ", regex = True)
#df["review_headline"] = df['review_headline'].replace("[^a-zA-Z]+", " ", regex = True)
#remove white spaces
df_am_scr['comment'] = df_am_scr['comment'].str.strip()
#df["review_headline"] = df['review_headline'].str.strip()
df_am_scr['comment'].fillna('', inplace= True)
df_new_wl_2 = df_new_wl[['product_title',
'star_rating', 'review_body', 'review_date',
'release_date', 'unique_products']]
df_apple = pd.concat([df_new_wl_2,df_am_scr.rename(columns={'product name':'product_title',
'comment': 'review_body',
'stars' : 'star_rating',
'date': 'review_date'})], ignore_index=True)
#Clean datetime format
#change all datetime to the same string format
for index, row in df_apple.iterrows():
date = row['review_date']
print(date)
if len(date) > 10:
new = datetime.strptime(date, '%B %d, %Y')
df_apple.at[index,'review_date'] = str(new.year)+ '-'+ str(new.month)+ '-' + str(new.day)
print(row['review_date'])
df_apple.sort_values('review_date')
# index 4509 date is wrong, drop that row
df_apple.loc[4509]
df_apple.drop([4509], inplace = True)
#string date to datetime object
df_apple['review_date'] = df_apple['review_date'].apply(lambda x: datetime.strptime(x, '%Y-%m-%d'))
df_apple
# ## Map Products Release Dates and take Top 5 latest products based on review dates
# 1. Make sure that the products mapped from the Apple Products Dates and Release dates is up to date with the products in the Amazon Reviews Dataset
# 2. Keep the reviews only if the product is in the top 5 latest products based on review dates and release dates. This is to ensure that the review is not too outdated.
#
# For example, if the product review is iPhone 4 which is release in 2010, and the review date is 2015, it will not be accurate in telling us Apple's Sales/Performance in 2015 as the phone was released 5 years ago. So to account for that, we are only going to take the top 5 latest products based on their release dates and map to the review dates and drop other outdated reviews
products_dates["new"] = products_dates["Product Name"].replace("[^a-zA-Z0-9]+", " ", regex = True)
products_dates['new'] = products_dates['new'].str.lower()
products_dates['new'] = products_dates['new'].str.replace('gen', 'generation')
##Get products and dates as dictionary
pd_dates = | pd.Series(products_dates['Date'].values,index=products_dates['new']) | pandas.Series |
from __future__ import division
import copy
import bt
from bt.core import Node, StrategyBase, SecurityBase, AlgoStack, Strategy
from bt.core import FixedIncomeStrategy, HedgeSecurity, FixedIncomeSecurity
from bt.core import CouponPayingSecurity, CouponPayingHedgeSecurity
from bt.core import is_zero
import pandas as pd
import numpy as np
from nose.tools import assert_almost_equal as aae
import sys
if sys.version_info < (3, 3):
import mock
else:
from unittest import mock
def test_node_tree1():
# Create a regular strategy
c1 = Node('c1')
c2 = Node('c2')
p = Node('p', children=[c1, c2, 'c3', 'c4'])
assert 'c1' in p.children
assert 'c2' in p.children
assert p['c1'] != c1
assert p['c1'] != c2
c1 = p['c1']
c2 = p['c2']
assert len(p.children) == 2
assert p == c1.parent
assert p == c2.parent
assert p == c1.root
assert p == c2.root
# Create a new parent strategy with a child sub-strategy
m = Node('m', children=[p, c1])
p = m['p']
mc1 = m['c1']
c1 = p['c1']
c2 = p['c2']
assert len(m.children) == 2
assert 'p' in m.children
assert 'c1' in m.children
assert mc1 != c1
assert p.parent == m
assert len(p.children) == 2
assert 'c1' in p.children
assert 'c2' in p.children
assert p == c1.parent
assert p == c2.parent
assert m == p.root
assert m == c1.root
assert m == c2.root
# Add a new node into the strategy
c0 = Node('c0', parent=p)
c0 = p['c0']
assert 'c0' in p.children
assert p == c0.parent
assert m == c0.root
assert len(p.children) == 3
# Add a new sub-strategy into the parent strategy
p2 = Node( 'p2', children = [c0, c1], parent=m )
p2 = m['p2']
c0 = p2['c0']
c1 = p2['c1']
assert 'p2' in m.children
assert p2.parent == m
assert len(p2.children) == 2
assert 'c0' in p2.children
assert 'c1' in p2.children
assert c0 != p['c0']
assert c1 != p['c1']
assert p2 == c0.parent
assert p2 == c1.parent
assert m == p2.root
assert m == c0.root
assert m == c1.root
def test_node_tree2():
# Just like test_node_tree1, but using the dictionary constructor
c = Node('template')
p = Node('p', children={'c1':c, 'c2':c, 'c3':'', 'c4':''})
assert 'c1' in p.children
assert 'c2' in p.children
assert p['c1'] != c
assert p['c1'] != c
c1 = p['c1']
c2 = p['c2']
assert len(p.children) == 2
assert c1.name == 'c1'
assert c2.name == 'c2'
assert p == c1.parent
assert p == c2.parent
assert p == c1.root
assert p == c2.root
def test_node_tree3():
c1 = Node('c1')
c2 = Node('c1') # Same name!
raised = False
try:
p = Node('p', children=[c1, c2, 'c3', 'c4'])
except ValueError:
raised = True
assert raised
raised = False
try:
p = Node('p', children=['c1', 'c1'])
except ValueError:
raised = True
assert raised
c1 = Node('c1')
c2 = Node('c2')
p = Node('p', children=[c1, c2, 'c3', 'c4'])
raised = False
try:
Node('c1', parent = p )
except ValueError:
raised = True
assert raised
# This does not raise, as it's just providing an implementation of 'c3',
# which had been declared earlier
c3 = Node('c3', parent = p )
assert 'c3' in p.children
def test_integer_positions():
c1 = Node('c1')
c2 = Node('c2')
c1.integer_positions = False
p = Node('p', children=[c1, c2])
c1 = p['c1']
c2 = p['c2']
assert p.integer_positions
assert c1.integer_positions
assert c2.integer_positions
p.use_integer_positions(False)
assert not p.integer_positions
assert not c1.integer_positions
assert not c2.integer_positions
c3 = Node('c3', parent=p)
c3 = p['c3']
assert not c3.integer_positions
p2 = Node( 'p2', children = [p] )
p = p2['p']
c1 = p['c1']
c2 = p['c2']
assert p2.integer_positions
assert p.integer_positions
assert c1.integer_positions
assert c2.integer_positions
def test_strategybase_tree():
s1 = SecurityBase('s1')
s2 = SecurityBase('s2')
s = StrategyBase('p', [s1, s2])
s1 = s['s1']
s2 = s['s2']
assert len(s.children) == 2
assert 's1' in s.children
assert 's2' in s.children
assert s == s1.parent
assert s == s2.parent
def test_node_members():
s1 = SecurityBase('s1')
s2 = SecurityBase('s2')
s = StrategyBase('p', [s1, s2])
s1 = s['s1']
s2 = s['s2']
actual = s.members
assert len(actual) == 3
assert s1 in actual
assert s2 in actual
assert s in actual
actual = s1.members
assert len(actual) == 1
assert s1 in actual
actual = s2.members
assert len(actual) == 1
assert s2 in actual
def test_node_full_name():
s1 = SecurityBase('s1')
s2 = SecurityBase('s2')
s = StrategyBase('p', [s1, s2])
# we cannot access s1 and s2 directly since they are copied
# we must therefore access through s
assert s.full_name == 'p'
assert s['s1'].full_name == 'p>s1'
assert s['s2'].full_name == 'p>s2'
def test_security_setup_prices():
c1 = SecurityBase('c1')
c2 = SecurityBase('c2')
s = StrategyBase('p', [c1, c2])
c1 = s['c1']
c2 = s['c2']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[0]] = 105
data['c2'][dts[0]] = 95
s.setup(data)
i = 0
s.update(dts[i], data.loc[dts[i]])
assert c1.price == 105
assert len(c1.prices) == 1
assert c1.prices[0] == 105
assert c2.price == 95
assert len(c2.prices) == 1
assert c2.prices[0] == 95
# now with setup
c1 = SecurityBase('c1')
c2 = SecurityBase('c2')
s = StrategyBase('p', [c1, c2])
c1 = s['c1']
c2 = s['c2']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[0]] = 105
data['c2'][dts[0]] = 95
s.setup(data)
i = 0
s.update(dts[i], data.loc[dts[i]])
assert c1.price == 105
assert len(c1.prices) == 1
assert c1.prices[0] == 105
assert c2.price == 95
assert len(c2.prices) == 1
assert c2.prices[0] == 95
def test_strategybase_tree_setup():
c1 = SecurityBase('c1')
c2 = SecurityBase('c2')
s = StrategyBase('p', [c1, c2])
c1 = s['c1']
c2 = s['c2']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[1]] = 105
data['c2'][dts[1]] = 95
s.setup(data)
assert len(s.data) == 3
assert len(c1.data) == 3
assert len(c2.data) == 3
assert len(s._prices) == 3
assert len(c1._prices) == 3
assert len(c2._prices) == 3
assert len(s._values) == 3
assert len(c1._values) == 3
assert len(c2._values) == 3
def test_strategybase_tree_adjust():
c1 = SecurityBase('c1')
c2 = SecurityBase('c2')
s = StrategyBase('p', [c1, c2])
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[1]] = 105
data['c2'][dts[1]] = 95
s.setup(data)
s.adjust(1000)
assert s.capital == 1000
assert s.value == 1000
assert c1.value == 0
assert c2.value == 0
assert c1.weight == 0
assert c2.weight == 0
s.update(dts[0])
assert s.flows[ dts[0] ] == 1000
def test_strategybase_tree_update():
c1 = SecurityBase('c1')
c2 = SecurityBase('c2')
s = StrategyBase('p', [c1, c2])
c1 = s['c1']
c2 = s['c2']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[1]] = 105
data['c2'][dts[1]] = 95
s.setup(data)
i = 0
s.update(dts[i], data.loc[dts[i]])
assert c1.price == 100
assert c2.price == 100
i = 1
s.update(dts[i], data.loc[dts[i]])
assert c1.price == 105
assert c2.price == 95
i = 2
s.update(dts[i], data.loc[dts[i]])
assert c1.price == 100
assert c2.price == 100
def test_update_fails_if_price_is_nan_and_position_open():
c1 = SecurityBase('c1')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1'], data=100)
data['c1'][dts[1]] = np.nan
c1.setup(data)
i = 0
# mock in position
c1._position = 100
c1.update(dts[i], data.loc[dts[i]])
# test normal case - position & non-nan price
assert c1._value == 100 * 100
i = 1
# this should fail, because we have non-zero position, and price is nan, so
# bt has no way of updating the _value
try:
c1.update(dts[i], data.loc[dts[i]])
assert False
except Exception as e:
assert str(e).startswith('Position is open')
# on the other hand, if position was 0, this should be fine, and update
# value to 0
c1._position = 0
c1.update(dts[i], data.loc[dts[i]])
assert c1._value == 0
def test_strategybase_tree_allocate():
c1 = SecurityBase('c1')
c2 = SecurityBase('c2')
s = StrategyBase('p', [c1, c2])
c1 = s['c1']
c2 = s['c2']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[1]] = 105
data['c2'][dts[1]] = 95
s.setup(data)
i = 0
s.update(dts[i], data.loc[dts[i]])
s.adjust(1000)
# since children have w == 0 this should stay in s
s.allocate(1000)
assert s.value == 1000
assert s.capital == 1000
assert c1.value == 0
assert c2.value == 0
# now allocate directly to child
c1.allocate(500)
assert c1.position == 5
assert c1.value == 500
assert s.capital == 1000 - 500
assert s.value == 1000
assert c1.weight == 500.0 / 1000
assert c2.weight == 0
def test_strategybase_tree_allocate_child_from_strategy():
c1 = SecurityBase('c1')
c2 = SecurityBase('c2')
s = StrategyBase('p', [c1, c2])
c1 = s['c1']
c2 = s['c2']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[1]] = 105
data['c2'][dts[1]] = 95
s.setup(data)
i = 0
s.update(dts[i], data.loc[dts[i]])
s.adjust(1000)
# since children have w == 0 this should stay in s
s.allocate(1000)
assert s.value == 1000
assert s.capital == 1000
assert c1.value == 0
assert c2.value == 0
# now allocate to c1
s.allocate(500, 'c1')
assert c1.position == 5
assert c1.value == 500
assert s.capital == 1000 - 500
assert s.value == 1000
assert c1.weight == 500.0 / 1000
assert c2.weight == 0
def test_strategybase_tree_allocate_level2():
c1 = SecurityBase('c1')
c12 = copy.deepcopy(c1)
c2 = SecurityBase('c2')
c22 = copy.deepcopy(c2)
s1 = StrategyBase('s1', [c1, c2])
s2 = StrategyBase('s2', [c12, c22])
m = StrategyBase('m', [s1, s2])
s1 = m['s1']
s2 = m['s2']
c1 = s1['c1']
c2 = s1['c2']
c12 = s2['c1']
c22 = s2['c2']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[1]] = 105
data['c2'][dts[1]] = 95
m.setup(data)
i = 0
m.update(dts[i], data.loc[dts[i]])
m.adjust(1000)
# since children have w == 0 this should stay in s
m.allocate(1000)
assert m.value == 1000
assert m.capital == 1000
assert s1.value == 0
assert s2.value == 0
assert c1.value == 0
assert c2.value == 0
# now allocate directly to child
s1.allocate(500)
assert s1.value == 500
assert m.capital == 1000 - 500
assert m.value == 1000
assert s1.weight == 500.0 / 1000
assert s2.weight == 0
# now allocate directly to child of child
c1.allocate(200)
assert s1.value == 500
assert s1.capital == 500 - 200
assert c1.value == 200
assert c1.weight == 200.0 / 500
assert c1.position == 2
assert m.capital == 1000 - 500
assert m.value == 1000
assert s1.weight == 500.0 / 1000
assert s2.weight == 0
assert c12.value == 0
def test_strategybase_tree_allocate_long_short():
c1 = SecurityBase('c1')
c2 = SecurityBase('c2')
s = StrategyBase('p', [c1, c2])
c1 = s['c1']
c2 = s['c2']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[1]] = 105
data['c2'][dts[1]] = 95
s.setup(data)
i = 0
s.update(dts[i], data.loc[dts[i]])
s.adjust(1000)
c1.allocate(500)
assert c1.position == 5
assert c1.value == 500
assert c1.weight == 500.0 / 1000
assert s.capital == 1000 - 500
assert s.value == 1000
c1.allocate(-200)
assert c1.position == 3
assert c1.value == 300
assert c1.weight == 300.0 / 1000
assert s.capital == 1000 - 500 + 200
assert s.value == 1000
c1.allocate(-400)
assert c1.position == -1
assert c1.value == -100
assert c1.weight == -100.0 / 1000
assert s.capital == 1000 - 500 + 200 + 400
assert s.value == 1000
# close up
c1.allocate(-c1.value)
assert c1.position == 0
assert c1.value == 0
assert c1.weight == 0
assert s.capital == 1000 - 500 + 200 + 400 - 100
assert s.value == 1000
def test_strategybase_tree_allocate_update():
c1 = SecurityBase('c1')
c2 = SecurityBase('c2')
s = StrategyBase('p', [c1, c2])
c1 = s['c1']
c2 = s['c2']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[1]] = 105
data['c2'][dts[1]] = 95
s.setup(data)
i = 0
s.update(dts[i], data.loc[dts[i]])
assert s.price == 100
s.adjust(1000)
assert s.price == 100
assert s.value == 1000
assert s._value == 1000
c1.allocate(500)
assert c1.position == 5
assert c1.value == 500
assert c1.weight == 500.0 / 1000
assert s.capital == 1000 - 500
assert s.value == 1000
assert s.price == 100
i = 1
s.update(dts[i], data.loc[dts[i]])
assert c1.position == 5
assert c1.value == 525
assert c1.weight == 525.0 / 1025
assert s.capital == 1000 - 500
assert s.value == 1025
assert np.allclose(s.price, 102.5)
def test_strategybase_universe():
s = StrategyBase('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[0]] = 105
data['c2'][dts[0]] = 95
s.setup(data)
i = 0
s.update(dts[i])
assert len(s.universe) == 1
assert 'c1' in s.universe
assert 'c2' in s.universe
assert s.universe['c1'][dts[i]] == 105
assert s.universe['c2'][dts[i]] == 95
# should not have children unless allocated
assert len(s.children) == 0
def test_strategybase_allocate():
s = StrategyBase('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[0]] = 100
data['c2'][dts[0]] = 95
s.setup(data)
i = 0
s.update(dts[i])
s.adjust(1000)
s.allocate(100, 'c1')
c1 = s['c1']
assert c1.position == 1
assert c1.value == 100
assert s.value == 1000
def test_strategybase_lazy():
# A mix of test_strategybase_universe and test_strategybase_allocate
# to make sure that assets with lazy_add work correctly.
c1 = SecurityBase('c1', multiplier=2, lazy_add=True, )
c2 = FixedIncomeSecurity('c2', lazy_add=True)
s = StrategyBase('s', [c1, c2])
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[0]] = 105
data['c2'][dts[0]] = 95
s.setup(data)
i = 0
s.update(dts[i])
assert len(s.universe) == 1
assert 'c1' in s.universe
assert 'c2' in s.universe
assert s.universe['c1'][dts[i]] == 105
assert s.universe['c2'][dts[i]] == 95
# should not have children unless allocated
assert len(s.children) == 0
s.adjust(1000)
s.allocate(100, 'c1')
s.allocate(100, 'c2')
c1 = s['c1']
c2 = s['c2']
assert c1.multiplier == 2
assert isinstance( c2, FixedIncomeSecurity)
def test_strategybase_close():
s = StrategyBase('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
s.setup(data)
i = 0
s.update(dts[i])
s.adjust(1000)
s.allocate(100, 'c1')
c1 = s['c1']
assert c1.position == 1
assert c1.value == 100
assert s.value == 1000
s.close('c1')
assert c1.position == 0
assert c1.value == 0
assert s.value == 1000
def test_strategybase_flatten():
s = StrategyBase('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
s.setup(data)
i = 0
s.update(dts[i])
s.adjust(1000)
s.allocate(100, 'c1')
c1 = s['c1']
s.allocate(100, 'c2')
c2 = s['c2']
assert c1.position == 1
assert c1.value == 100
assert c2.position == 1
assert c2.value == 100
assert s.value == 1000
s.flatten()
assert c1.position == 0
assert c1.value == 0
assert s.value == 1000
def test_strategybase_multiple_calls():
s = StrategyBase('s')
dts = pd.date_range('2010-01-01', periods=5)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data.c2[dts[0]] = 95
data.c1[dts[1]] = 95
data.c2[dts[2]] = 95
data.c2[dts[3]] = 95
data.c2[dts[4]] = 95
data.c1[dts[4]] = 105
s.setup(data)
# define strategy logic
def algo(target):
# close out any open positions
target.flatten()
# get stock w/ lowest price
c = target.universe.loc[target.now].idxmin()
# allocate all capital to that stock
target.allocate(target.value, c)
# replace run logic
s.run = algo
# start w/ 1000
s.adjust(1000)
# loop through dates manually
i = 0
# update t0
s.update(dts[i])
assert len(s.children) == 0
assert s.value == 1000
# run t0
s.run(s)
assert len(s.children) == 1
assert s.value == 1000
assert s.capital == 50
c2 = s['c2']
assert c2.value == 950
assert c2.weight == 950.0 / 1000
assert c2.price == 95
# update out t0
s.update(dts[i])
c2 = s['c2']
assert len(s.children) == 1
assert s.value == 1000
assert s.capital == 50
assert c2.value == 950
assert c2.weight == 950.0 / 1000
assert c2.price == 95
# update t1
i = 1
s.update(dts[i])
assert s.value == 1050
assert s.capital == 50
assert len(s.children) == 1
assert 'c2' in s.children
c2 = s['c2']
assert c2.value == 1000
assert c2.weight == 1000.0 / 1050.0
assert c2.price == 100
# run t1 - close out c2, open c1
s.run(s)
assert len(s.children) == 2
assert s.value == 1050
assert s.capital == 5
c1 = s['c1']
assert c1.value == 1045
assert c1.weight == 1045.0 / 1050
assert c1.price == 95
assert c2.value == 0
assert c2.weight == 0
assert c2.price == 100
# update out t1
s.update(dts[i])
assert len(s.children) == 2
assert s.value == 1050
assert s.capital == 5
assert c1 == s['c1']
assert c1.value == 1045
assert c1.weight == 1045.0 / 1050
assert c1.price == 95
assert c2.value == 0
assert c2.weight == 0
assert c2.price == 100
# update t2
i = 2
s.update(dts[i])
assert len(s.children) == 2
assert s.value == 1105
assert s.capital == 5
assert c1.value == 1100
assert c1.weight == 1100.0 / 1105
assert c1.price == 100
assert c2.value == 0
assert c2.weight == 0
assert c2.price == 95
# run t2
s.run(s)
assert len(s.children) == 2
assert s.value == 1105
assert s.capital == 60
assert c1.value == 0
assert c1.weight == 0
assert c1.price == 100
assert c2.value == 1045
assert c2.weight == 1045.0 / 1105
assert c2.price == 95
# update out t2
s.update(dts[i])
assert len(s.children) == 2
assert s.value == 1105
assert s.capital == 60
assert c1.value == 0
assert c1.weight == 0
assert c1.price == 100
assert c2.value == 1045
assert c2.weight == 1045.0 / 1105
assert c2.price == 95
# update t3
i = 3
s.update(dts[i])
assert len(s.children) == 2
assert s.value == 1105
assert s.capital == 60
assert c1.value == 0
assert c1.weight == 0
assert c1.price == 100
assert c2.value == 1045
assert c2.weight == 1045.0 / 1105
assert c2.price == 95
# run t3
s.run(s)
assert len(s.children) == 2
assert s.value == 1105
assert s.capital == 60
assert c1.value == 0
assert c1.weight == 0
assert c1.price == 100
assert c2.value == 1045
assert c2.weight == 1045.0 / 1105
assert c2.price == 95
# update out t3
s.update(dts[i])
assert len(s.children) == 2
assert s.value == 1105
assert s.capital == 60
assert c1.value == 0
assert c1.weight == 0
assert c1.price == 100
assert c2.value == 1045
assert c2.weight == 1045.0 / 1105
assert c2.price == 95
# update t4
i = 4
s.update(dts[i])
assert len(s.children) == 2
assert s.value == 1105
assert s.capital == 60
assert c1.value == 0
assert c1.weight == 0
# accessing price should refresh - this child has been idle for a while -
# must make sure we can still have a fresh prices
assert c1.price == 105
assert len(c1.prices) == 5
assert c2.value == 1045
assert c2.weight == 1045.0 / 1105
assert c2.price == 95
# run t4
s.run(s)
assert len(s.children) == 2
assert s.value == 1105
assert s.capital == 60
assert c1.value == 0
assert c1.weight == 0
assert c1.price == 105
assert c2.value == 1045
assert c2.weight == 1045.0 / 1105
assert c2.price == 95
# update out t4
s.update(dts[i])
assert len(s.children) == 2
assert s.value == 1105
assert s.capital == 60
assert c1.value == 0
assert c1.weight == 0
assert c1.price == 105
assert c2.value == 1045
assert c2.weight == 1045.0 / 1105
assert c2.price == 95
def test_strategybase_multiple_calls_preset_secs():
c1 = SecurityBase('c1')
c2 = SecurityBase('c2')
s = StrategyBase('s', [c1, c2])
c1 = s['c1']
c2 = s['c2']
dts = pd.date_range('2010-01-01', periods=5)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data.c2[dts[0]] = 95
data.c1[dts[1]] = 95
data.c2[dts[2]] = 95
data.c2[dts[3]] = 95
data.c2[dts[4]] = 95
data.c1[dts[4]] = 105
s.setup(data)
# define strategy logic
def algo(target):
# close out any open positions
target.flatten()
# get stock w/ lowest price
c = target.universe.loc[target.now].idxmin()
# allocate all capital to that stock
target.allocate(target.value, c)
# replace run logic
s.run = algo
# start w/ 1000
s.adjust(1000)
# loop through dates manually
i = 0
# update t0
s.update(dts[i])
assert len(s.children) == 2
assert s.value == 1000
# run t0
s.run(s)
assert len(s.children) == 2
assert s.value == 1000
assert s.capital == 50
assert c2.value == 950
assert c2.weight == 950.0 / 1000
assert c2.price == 95
# update out t0
s.update(dts[i])
assert len(s.children) == 2
assert s.value == 1000
assert s.capital == 50
assert c2.value == 950
assert c2.weight == 950.0 / 1000
assert c2.price == 95
# update t1
i = 1
s.update(dts[i])
assert s.value == 1050
assert s.capital == 50
assert len(s.children) == 2
assert c2.value == 1000
assert c2.weight == 1000.0 / 1050.
assert c2.price == 100
# run t1 - close out c2, open c1
s.run(s)
assert c1.value == 1045
assert c1.weight == 1045.0 / 1050
assert c1.price == 95
assert c2.value == 0
assert c2.weight == 0
assert c2.price == 100
assert len(s.children) == 2
assert s.value == 1050
assert s.capital == 5
# update out t1
s.update(dts[i])
assert len(s.children) == 2
assert s.value == 1050
assert s.capital == 5
assert c1.value == 1045
assert c1.weight == 1045.0 / 1050
assert c1.price == 95
assert c2.value == 0
assert c2.weight == 0
assert c2.price == 100
# update t2
i = 2
s.update(dts[i])
assert len(s.children) == 2
assert s.value == 1105
assert s.capital == 5
assert c1.value == 1100
assert c1.weight == 1100.0 / 1105
assert c1.price == 100
assert c2.value == 0
assert c2.weight == 0
assert c2.price == 95
# run t2
s.run(s)
assert len(s.children) == 2
assert s.value == 1105
assert s.capital == 60
assert c1.value == 0
assert c1.weight == 0
assert c1.price == 100
assert c2.value == 1045
assert c2.weight == 1045.0 / 1105
assert c2.price == 95
# update out t2
s.update(dts[i])
assert len(s.children) == 2
assert s.value == 1105
assert s.capital == 60
assert c1.value == 0
assert c1.weight == 0
assert c1.price == 100
assert c2.value == 1045
assert c2.weight == 1045.0 / 1105
assert c2.price == 95
# update t3
i = 3
s.update(dts[i])
assert len(s.children) == 2
assert s.value == 1105
assert s.capital == 60
assert c1.value == 0
assert c1.weight == 0
assert c1.price == 100
assert c2.value == 1045
assert c2.weight == 1045.0 / 1105
assert c2.price == 95
# run t3
s.run(s)
assert len(s.children) == 2
assert s.value == 1105
assert s.capital == 60
assert c1.value == 0
assert c1.weight == 0
assert c1.price == 100
assert c2.value == 1045
assert c2.weight == 1045.0 / 1105
assert c2.price == 95
# update out t3
s.update(dts[i])
assert len(s.children) == 2
assert s.value == 1105
assert s.capital == 60
assert c1.value == 0
assert c1.weight == 0
assert c1.price == 100
assert c2.value == 1045
assert c2.weight == 1045.0 / 1105
assert c2.price == 95
# update t4
i = 4
s.update(dts[i])
assert len(s.children) == 2
assert s.value == 1105
assert s.capital == 60
assert c1.value == 0
assert c1.weight == 0
# accessing price should refresh - this child has been idle for a while -
# must make sure we can still have a fresh prices
assert c1.price == 105
assert len(c1.prices) == 5
assert c2.value == 1045
assert c2.weight == 1045.0 / 1105
assert c2.price == 95
# run t4
s.run(s)
assert len(s.children) == 2
assert s.value == 1105
assert s.capital == 60
assert c1.value == 0
assert c1.weight == 0
assert c1.price == 105
assert c2.value == 1045
assert c2.weight == 1045.0 / 1105
assert c2.price == 95
# update out t4
s.update(dts[i])
assert len(s.children) == 2
assert s.value == 1105
assert s.capital == 60
assert c1.value == 0
assert c1.weight == 0
assert c1.price == 105
assert c2.value == 1045
assert c2.weight == 1045.0 / 1105
assert c2.price == 95
def test_strategybase_multiple_calls_no_post_update():
s = StrategyBase('s')
s.set_commissions(lambda q, p: 1)
dts = pd.date_range('2010-01-01', periods=5)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data.c2[dts[0]] = 95
data.c1[dts[1]] = 95
data.c2[dts[2]] = 95
data.c2[dts[3]] = 95
data.c2[dts[4]] = 95
data.c1[dts[4]] = 105
s.setup(data)
# define strategy logic
def algo(target):
# close out any open positions
target.flatten()
# get stock w/ lowest price
c = target.universe.loc[target.now].idxmin()
# allocate all capital to that stock
target.allocate(target.value, c)
# replace run logic
s.run = algo
# start w/ 1000
s.adjust(1000)
# loop through dates manually
i = 0
# update t0
s.update(dts[i])
assert len(s.children) == 0
assert s.value == 1000
# run t0
s.run(s)
assert len(s.children) == 1
assert s.value == 999
assert s.capital == 49
c2 = s['c2']
assert c2.value == 950
assert c2.weight == 950.0 / 999
assert c2.price == 95
# update t1
i = 1
s.update(dts[i])
assert s.value == 1049
assert s.capital == 49
assert len(s.children) == 1
assert 'c2' in s.children
c2 = s['c2']
assert c2.value == 1000
assert c2.weight == 1000.0 / 1049.0
assert c2.price == 100
# run t1 - close out c2, open c1
s.run(s)
assert len(s.children) == 2
assert s.value == 1047
assert s.capital == 2
c1 = s['c1']
assert c1.value == 1045
assert c1.weight == 1045.0 / 1047
assert c1.price == 95
assert c2.value == 0
assert c2.weight == 0
assert c2.price == 100
# update t2
i = 2
s.update(dts[i])
assert len(s.children) == 2
assert s.value == 1102
assert s.capital == 2
assert c1.value == 1100
assert c1.weight == 1100.0 / 1102
assert c1.price == 100
assert c2.value == 0
assert c2.weight == 0
assert c2.price == 95
# run t2
s.run(s)
assert len(s.children) == 2
assert s.value == 1100
assert s.capital == 55
assert c1.value == 0
assert c1.weight == 0
assert c1.price == 100
assert c2.value == 1045
assert c2.weight == 1045.0 / 1100
assert c2.price == 95
# update t3
i = 3
s.update(dts[i])
assert len(s.children) == 2
assert s.value == 1100
assert s.capital == 55
assert c1.value == 0
assert c1.weight == 0
assert c1.price == 100
assert c2.value == 1045
assert c2.weight == 1045.0 / 1100
assert c2.price == 95
# run t3
s.run(s)
assert len(s.children) == 2
assert s.value == 1098
assert s.capital == 53
assert c1.value == 0
assert c1.weight == 0
assert c1.price == 100
assert c2.value == 1045
assert c2.weight == 1045.0 / 1098
assert c2.price == 95
# update t4
i = 4
s.update(dts[i])
assert len(s.children) == 2
assert s.value == 1098
assert s.capital == 53
assert c1.value == 0
assert c1.weight == 0
# accessing price should refresh - this child has been idle for a while -
# must make sure we can still have a fresh prices
assert c1.price == 105
assert len(c1.prices) == 5
assert c2.value == 1045
assert c2.weight == 1045.0 / 1098
assert c2.price == 95
# run t4
s.run(s)
assert len(s.children) == 2
assert s.value == 1096
assert s.capital == 51
assert c1.value == 0
assert c1.weight == 0
assert c1.price == 105
assert c2.value == 1045
assert c2.weight == 1045.0 / 1096
assert c2.price == 95
def test_strategybase_prices():
dts = pd.date_range('2010-01-01', periods=21)
rawd = [13.555, 13.75, 14.16, 13.915, 13.655,
13.765, 14.02, 13.465, 13.32, 14.65,
14.59, 14.175, 13.865, 13.865, 13.89,
13.85, 13.565, 13.47, 13.225, 13.385,
12.89]
data = pd.DataFrame(index=dts, data=rawd, columns=['a'])
s = StrategyBase('s')
s.set_commissions(lambda q, p: 1)
s.setup(data)
# buy 100 shares on day 1 - hold until end
# just enough to buy 100 shares + 1$ commission
s.adjust(1356.50)
s.update(dts[0])
# allocate all capital to child a
# a should be dynamically created and should have
# 100 shares allocated. s.capital should be 0
s.allocate(s.value, 'a')
assert s.capital == 0
assert s.value == 1355.50
assert len(s.children) == 1
aae(s.price, 99.92628, 5)
a = s['a']
assert a.position == 100
assert a.value == 1355.50
assert a.weight == 1
assert a.price == 13.555
assert len(a.prices) == 1
# update through all dates and make sure price is ok
s.update(dts[1])
aae(s.price, 101.3638, 4)
s.update(dts[2])
aae(s.price, 104.3863, 4)
s.update(dts[3])
aae(s.price, 102.5802, 4)
# finish updates and make sure ok at end
for i in range(4, 21):
s.update(dts[i])
assert len(s.prices) == 21
aae(s.prices[-1], 95.02396, 5)
aae(s.prices[-2], 98.67306, 5)
def test_fail_if_root_value_negative():
s = StrategyBase('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[0]] = 100
data['c2'][dts[0]] = 95
s.setup(data)
s.adjust(-100)
# trigger update
s.update(dts[0])
assert s.bankrupt
# make sure only triggered if root negative
c1 = StrategyBase('c1')
s = StrategyBase('s', children=[c1])
c1 = s['c1']
s.setup(data)
s.adjust(1000)
c1.adjust(-100)
s.update(dts[0])
# now make it trigger
c1.adjust(-1000)
# trigger update
s.update(dts[0])
assert s.bankrupt
def test_fail_if_0_base_in_return_calc():
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[0]] = 100
data['c2'][dts[0]] = 95
# must setup tree because if not negative root error pops up first
c1 = StrategyBase('c1')
s = StrategyBase('s', children=[c1])
c1 = s['c1']
s.setup(data)
s.adjust(1000)
c1.adjust(100)
s.update(dts[0])
c1.adjust(-100)
s.update(dts[1])
try:
c1.adjust(-100)
s.update(dts[1])
assert False
except ZeroDivisionError as e:
if 'Could not update' not in str(e):
assert False
def test_strategybase_tree_rebalance():
c1 = SecurityBase('c1')
c2 = SecurityBase('c2')
s = StrategyBase('p', [c1, c2])
s.set_commissions(lambda q, p: 1)
c1 = s['c1']
c2 = s['c2']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[1]] = 105
data['c2'][dts[1]] = 95
s.setup(data)
i = 0
s.update(dts[i], data.loc[dts[i]])
s.adjust(1000)
assert s.value == 1000
assert s.capital == 1000
assert c1.value == 0
assert c2.value == 0
# now rebalance c1
s.rebalance(0.5, 'c1', update=True)
assert s.root.stale == True
assert c1.position == 4
assert c1.value == 400
assert s.capital == 1000 - 401
assert s.value == 999
assert c1.weight == 400.0 / 999
assert c2.weight == 0
# Check that rebalance with update=False
# does not mark the node as stale
s.rebalance(0.6, 'c1', update=False)
assert s.root.stale == False
def test_strategybase_tree_decimal_position_rebalance():
c1 = SecurityBase('c1')
c2 = SecurityBase('c2')
s = StrategyBase('p', [c1, c2])
s.use_integer_positions(False)
c1 = s['c1']
c2 = s['c2']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
s.setup(data)
i = 0
s.update(dts[i], data.loc[dts[i]])
s.adjust(1000.2)
s.rebalance(0.42, 'c1')
s.rebalance(0.58, 'c2')
aae(c1.value, 420.084)
aae(c2.value, 580.116)
aae(c1.value + c2.value, 1000.2)
def test_rebalance_child_not_in_tree():
s = StrategyBase('p')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[1]] = 105
data['c2'][dts[1]] = 95
s.setup(data)
i = 0
s.update(dts[i])
s.adjust(1000)
# rebalance to 0 w/ child that is not present - should ignore
s.rebalance(0, 'c2')
assert s.value == 1000
assert s.capital == 1000
assert len(s.children) == 0
def test_strategybase_tree_rebalance_to_0():
c1 = SecurityBase('c1')
c2 = SecurityBase('c2')
s = StrategyBase('p', [c1, c2])
c1 = s['c1']
c2 = s['c2']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[1]] = 105
data['c2'][dts[1]] = 95
s.setup(data)
i = 0
s.update(dts[i], data.loc[dts[i]])
s.adjust(1000)
assert s.value == 1000
assert s.capital == 1000
assert c1.value == 0
assert c2.value == 0
# now rebalance c1
s.rebalance(0.5, 'c1')
assert c1.position == 5
assert c1.value == 500
assert s.capital == 1000 - 500
assert s.value == 1000
assert c1.weight == 500.0 / 1000
assert c2.weight == 0
# now rebalance c1
s.rebalance(0, 'c1')
assert c1.position == 0
assert c1.value == 0
assert s.capital == 1000
assert s.value == 1000
assert c1.weight == 0
assert c2.weight == 0
def test_strategybase_tree_rebalance_level2():
c1 = SecurityBase('c1')
c12 = copy.deepcopy(c1)
c2 = SecurityBase('c2')
c22 = copy.deepcopy(c2)
s1 = StrategyBase('s1', [c1, c2])
s2 = StrategyBase('s2', [c12, c22])
m = StrategyBase('m', [s1, s2])
s1 = m['s1']
s2 = m['s2']
c1 = s1['c1']
c2 = s1['c2']
c12 = s2['c1']
c22 = s2['c2']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[1]] = 105
data['c2'][dts[1]] = 95
m.setup(data)
i = 0
m.update(dts[i], data.loc[dts[i]])
m.adjust(1000)
assert m.value == 1000
assert m.capital == 1000
assert s1.value == 0
assert s2.value == 0
assert c1.value == 0
assert c2.value == 0
# now rebalance child s1 - since its children are 0, no waterfall alloc
m.rebalance(0.5, 's1')
assert s1.value == 500
assert m.capital == 1000 - 500
assert m.value == 1000
assert s1.weight == 500.0 / 1000
assert s2.weight == 0
# now allocate directly to child of child
s1.rebalance(0.4, 'c1')
assert s1.value == 500
assert s1.capital == 500 - 200
assert c1.value == 200
assert c1.weight == 200.0 / 500
assert c1.position == 2
assert m.capital == 1000 - 500
assert m.value == 1000
assert s1.weight == 500.0 / 1000
assert s2.weight == 0
assert c12.value == 0
# now rebalance child s1 again and make sure c1 also gets proportional
# increase
m.rebalance(0.8, 's1')
assert s1.value == 800
aae(m.capital, 200, 1)
assert m.value == 1000
assert s1.weight == 800 / 1000
assert s2.weight == 0
assert c1.value == 300.0
assert c1.weight == 300.0 / 800
assert c1.position == 3
# now rebalance child s1 to 0 - should close out s1 and c1 as well
m.rebalance(0, 's1')
assert s1.value == 0
assert m.capital == 1000
assert m.value == 1000
assert s1.weight == 0
assert s2.weight == 0
assert c1.weight == 0
def test_strategybase_tree_rebalance_base():
c1 = SecurityBase('c1')
c2 = SecurityBase('c2')
s = StrategyBase('p', [c1, c2])
s.set_commissions(lambda q, p: 1)
c1 = s['c1']
c2 = s['c2']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[1]] = 105
data['c2'][dts[1]] = 95
s.setup(data)
i = 0
s.update(dts[i], data.loc[dts[i]])
s.adjust(1000)
assert s.value == 1000
assert s.capital == 1000
assert c1.value == 0
assert c2.value == 0
# check that 2 rebalances of equal weight lead to two different allocs
# since value changes after first call
s.rebalance(0.5, 'c1')
assert c1.position == 4
assert c1.value == 400
assert s.capital == 1000 - 401
assert s.value == 999
assert c1.weight == 400.0 / 999
assert c2.weight == 0
s.rebalance(0.5, 'c2')
assert c2.position == 4
assert c2.value == 400
assert s.capital == 1000 - 401 - 401
assert s.value == 998
assert c2.weight == 400.0 / 998
assert c1.weight == 400.0 / 998
# close out everything
s.flatten()
# adjust to get back to 1000
s.adjust(4)
assert s.value == 1000
assert s.capital == 1000
assert c1.value == 0
assert c2.value == 0
# now rebalance but set fixed base
base = s.value
s.rebalance(0.5, 'c1', base=base)
assert c1.position == 4
assert c1.value == 400
assert s.capital == 1000 - 401
assert s.value == 999
assert c1.weight == 400.0 / 999
assert c2.weight == 0
s.rebalance(0.5, 'c2', base=base)
assert c2.position == 4
assert c2.value == 400
assert s.capital == 1000 - 401 - 401
assert s.value == 998
assert c2.weight == 400.0 / 998
assert c1.weight == 400.0 / 998
def test_algo_stack():
a1 = mock.MagicMock(return_value=True)
a2 = mock.MagicMock(return_value=False)
a3 = mock.MagicMock(return_value=True)
# no run_always for now
del a1.run_always
del a2.run_always
del a3.run_always
stack = AlgoStack(a1, a2, a3)
target = mock.MagicMock()
assert not stack(target)
assert a1.called
assert a2.called
assert not a3.called
# now test that run_always marked are run
a1 = mock.MagicMock(return_value=True)
a2 = mock.MagicMock(return_value=False)
a3 = mock.MagicMock(return_value=True)
# a3 will have run_always
del a1.run_always
del a2.run_always
stack = AlgoStack(a1, a2, a3)
target = mock.MagicMock()
assert not stack(target)
assert a1.called
assert a2.called
assert a3.called
def test_set_commissions():
s = StrategyBase('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
s.set_commissions(lambda x, y: 1.0)
s.setup(data)
s.update(dts[0])
s.adjust(1000)
s.allocate(500, 'c1')
assert s.capital == 599
s.set_commissions(lambda x, y: 0.0)
s.allocate(-400, 'c1')
assert s.capital == 999
def test_strategy_tree_proper_return_calcs():
s1 = StrategyBase('s1')
s2 = StrategyBase('s2')
m = StrategyBase('m', [s1, s2])
s1 = m['s1']
s2 = m['s2']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data.loc['c1', dts[1]] = 105
data.loc['c2', dts[1]] = 95
m.setup(data)
i = 0
m.update(dts[i], data.loc[dts[i]])
m.adjust(1000)
# since children have w == 0 this should stay in s
m.allocate(1000)
assert m.value == 1000
assert m.capital == 1000
assert m.price == 100
assert s1.value == 0
assert s2.value == 0
# now allocate directly to child
s1.allocate(500)
assert m.capital == 500
assert m.value == 1000
assert m.price == 100
assert s1.value == 500
assert s1.weight == 500.0 / 1000
assert s1.price == 100
assert s2.weight == 0
# allocate to child2 via parent method
m.allocate(500, 's2')
assert m.capital == 0
assert m.value == 1000
assert m.price == 100
assert s1.value == 500
assert s1.weight == 500.0 / 1000
assert s1.price == 100
assert s2.value == 500
assert s2.weight == 500.0 / 1000
assert s2.price == 100
# now allocate and incur commission fee
s1.allocate(500, 'c1')
assert m.capital == 0
assert m.value == 1000
assert m.price == 100
assert s1.value == 500
assert s1.weight == 500.0 / 1000
assert s1.price == 100
assert s2.value == 500
assert s2.weight == 500.0 / 1000.0
assert s2.price == 100
def test_strategy_tree_proper_universes():
def do_nothing(x):
return True
child1 = Strategy('c1', [do_nothing], ['b', 'c'])
parent = Strategy('m', [do_nothing], [child1, 'a'])
child1 = parent['c1']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(
{'a': pd.Series(data=1, index=dts, name='a'),
'b': pd.Series(data=2, index=dts, name='b'),
'c': pd.Series(data=3, index=dts, name='c')})
parent.setup(data, test_data1 = 'test1')
assert len(parent.children) == 1
assert 'c1' in parent.children
assert len(parent._universe.columns) == 2
assert 'c1' in parent._universe.columns
assert 'a' in parent._universe.columns
assert len(child1._universe.columns) == 2
assert 'b' in child1._universe.columns
assert 'c' in child1._universe.columns
assert parent._has_strat_children
assert len(parent._strat_children) == 1
assert parent.get_data( 'test_data1' ) == 'test1'
# New child strategy with parent (and using dictionary notation}
child2 = Strategy('c2', [do_nothing], {'a' : SecurityBase(''), 'b' : ''}, parent=parent)
# Setup the child from the parent, but pass in some additional data
child2.setup_from_parent(test_data2 = 'test2')
assert 'a' in child2._universe.columns
assert 'b' in child2._universe.columns
assert 'c2' in parent._universe.columns
# Make sure child has data from the parent and the additional data
assert child2.get_data('test_data1') == 'test1'
assert child2.get_data('test_data2') == 'test2'
assert len(parent._strat_children) == 2
def test_strategy_tree_paper():
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['a'], data=100.)
data['a'].loc[dts[1]] = 101
data['a'].loc[dts[2]] = 102
s = Strategy('s',
[bt.algos.SelectWhere(data > 100),
bt.algos.WeighEqually(),
bt.algos.Rebalance()])
m = Strategy('m', [], [s])
s = m['s']
m.setup(data)
m.update(dts[0])
m.run()
assert m.price == 100
assert s.price == 100
assert s._paper_trade
assert s._paper.price == 100
s.update(dts[1])
m.run()
assert m.price == 100
assert m.value == 0
assert s.value == 0
assert s.price == 100
s.update(dts[2])
m.run()
assert m.price == 100
assert m.value == 0
assert s.value == 0
assert np.allclose(s.price, 100. * (102 / 101.))
def test_dynamic_strategy():
def do_nothing(x):
return True
# Start with an empty parent
parent = Strategy('p', [do_nothing], [])
dts = pd.date_range('2010-01-01', periods=4)
data = pd.DataFrame(index=dts, columns=['c1', 'c2', 'c3'], data=100.)
data['c1'][dts[2]] = 105.
data['c2'][dts[2]] = 95.
parent.setup( data )
# NOTE: Price of the sub-strategy won't be correct in this example because
# we are not using the algo stack to impact weights, and so the paper
# trading strategy does not see the same actions as we are doing.
i = 0
parent.adjust( 1e6 )
parent.update( dts[i] )
assert parent.price == 100.
assert parent.value == 1e6
i = 1
parent.update( dts[i] )
# On this step, we decide to put a trade on c1 vs c2 and track it as a strategy
trade = Strategy('c1_vs_c2', [], children = ['c1', 'c2'], parent = parent )
trade.setup_from_parent()
trade.update( parent.now )
assert trade.price == 100.
assert trade.value == 0
# Allocate capital to the trade
parent.allocate( 1e5, trade.name )
assert trade.value == 1e5
assert trade.price == 100.
# Go long 'c1' and short 'c2'
trade.rebalance( 1., 'c1')
trade.rebalance( -1., 'c2')
assert parent.universe[ trade.name ][ dts[i] ] == 100.
assert parent.positions['c1'][ dts[i] ] == 1e3
assert parent.positions['c2'][ dts[i] ] == -1e3
i = 2
parent.update( dts[i] )
assert trade.value == 1e5 + 10 * 1e3
assert parent.value == 1e6 + 10 * 1e3
# On this step, we close the trade, and allocate capital back to the parent
trade.flatten()
trade.update( trade.now ) # Need to update after flattening (for now)
parent.allocate( -trade.capital, trade.name )
assert trade.value == 0
assert trade.capital == 0
assert parent.value == 1e6 + 10 * 1e3
assert parent.capital == parent.value
assert parent.positions['c1'][ dts[i] ] == 0.
assert parent.positions['c2'][ dts[i] ] == 0.
i = 3
parent.update( dts[i] )
# Just make sure we can update one step beyond closing
# Note that "trade" is still a child of parent, and it also has children,
# so it will keep getting updated (and paper trading will still happen).
assert trade.value == 0
assert trade.capital == 0
assert trade.values[ dts[i] ] == 0.
def test_dynamic_strategy2():
# Start with an empty parent
parent = Strategy('p', [], [])
dts = pd.date_range('2010-01-01', periods=4)
data = pd.DataFrame(index=dts, columns=['c1', 'c2', 'c3'], data=100.)
data['c1'][dts[2]] = 105.
data['c2'][dts[2]] = 95.
data['c1'][dts[3]] = 101.
data['c2'][dts[3]] = 99.
parent.setup( data )
i = 0
parent.adjust( 1e6 )
parent.update( dts[i] )
assert parent.price == 100.
assert parent.value == 1e6
i = 1
parent.update( dts[i] )
# On this step, we decide to put a trade on c1 vs c2 and track it as a strategy
def trade_c1_vs_c2( strategy ):
if strategy.now == dts[1]:
strategy.rebalance( 1., 'c1')
strategy.rebalance( -1., 'c2')
trade = Strategy('c1_vs_c2', [trade_c1_vs_c2], children = ['c1', 'c2'], parent = parent )
trade.setup_from_parent()
trade.update( parent.now )
assert trade.price == 100.
assert trade.value == 0
# Allocate capital to the trade
parent.allocate( 1e5, trade.name )
assert trade.value == 1e5
assert trade.price == 100.
# Run the strategy for the timestep
parent.run()
assert parent.universe[ trade.name ][ dts[i] ] == 100.
assert np.isnan( parent.universe[ trade.name ][ dts[0] ] )
assert parent.positions['c1'][ dts[i] ] == 1e3
assert parent.positions['c2'][ dts[i] ] == -1e3
i = 2
parent.update( dts[i] )
trade = parent[ trade.name ]
assert trade.value == 1e5 + 10 * 1e3
assert parent.value == 1e6 + 10 * 1e3
aae( trade.price, 110.)
# Next we close the trade by flattening positions
trade.flatten()
trade.update( trade.now ) # Need to update after flattening (for now)
aae( trade.price, 110.)
# Finally we allocate capital back to the parent to be re-deployed
parent.allocate( -trade.capital, trade.name )
assert trade.value == 0
assert trade.capital == 0
aae( trade.price, 110.) # Price stays the same even after capital de-allocated
assert parent.value == 1e6 + 10 * 1e3
assert parent.capital == parent.value
assert parent.positions['c1'][ dts[i] ] == 0.
assert parent.positions['c2'][ dts[i] ] == 0.
i = 3
parent.update( dts[i] )
# Just make sure we can update one step beyond closing
assert parent.value == 1e6 + 10 * 1e3
# Note that "trade" is still a child of parent, and it also has children,
# so it will keep getting updated (and paper trading will still happen).
assert trade.value == 0
assert trade.capital == 0
assert trade.values[ dts[i] ] == 0.
# Paper trading price, as asset prices have moved, paper trading price
# keeps updating. Note that if the flattening of the position was part
# of the definition of trade_c1_vs_c2, then the paper trading price
# would be fixed after flattening, as it would apply to both real and paper.
aae( trade.price, 102.)
aae( parent.universe[ trade.name ][ dts[i] ], 102. )
def test_outlays():
c1 = SecurityBase('c1')
c2 = SecurityBase('c2')
s = StrategyBase('p', [c1, c2])
c1 = s['c1']
c2 = s['c2']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
data['c1'][dts[0]] = 105
data['c2'][dts[0]] = 95
s.setup(data)
i = 0
s.update(dts[i], data.loc[dts[i]])
# allocate 1000 to strategy
s.adjust(1000)
# now let's see what happens when we allocate 500 to each child
c1.allocate(500)
c2.allocate(500)
#calling outlays should automatically update the strategy, since stale
assert c1.outlays[dts[0]] == (4 * 105)
assert c2.outlays[dts[0]] == (5 * 95)
assert c1.data['outlay'][dts[0]] == (4 * 105)
assert c2.data['outlay'][dts[0]] == (5 * 95)
i = 1
s.update(dts[i], data.loc[dts[i]])
c1.allocate(-400)
c2.allocate(100)
# out update
assert c1.outlays[dts[1]] == (-4 * 100)
assert c2.outlays[dts[1]] == 100
assert c1.data['outlay'][dts[1]] == (-4 * 100)
assert c2.data['outlay'][dts[1]] == 100
def test_child_weight_above_1():
# check for child weights not exceeding 1
s = StrategyBase('s')
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(np.random.randn(3, 2) + 100,
index=dts, columns=['c1', 'c2'])
s.setup(data)
i = 0
s.update(dts[i])
s.adjust(1e6)
s.allocate(1e6, 'c1')
c1 = s['c1']
assert c1.weight <= 1
def test_fixed_commissions():
c1 = SecurityBase('c1')
c2 = SecurityBase('c2')
s = StrategyBase('p', [c1, c2])
# fixed $1 commission per transaction
s.set_commissions(lambda q, p: 1)
c1 = s['c1']
c2 = s['c2']
dts = pd.date_range('2010-01-01', periods=3)
data = pd.DataFrame(index=dts, columns=['c1', 'c2'], data=100)
s.setup(data)
i = 0
s.update(dts[i], data.loc[dts[i]])
# allocate 1000 to strategy
s.adjust(1000)
# now let's see what happens when we allocate 500 to each child
c1.allocate(500)
c2.allocate(500)
# out update
s.update(dts[i])
assert c1.value == 400
assert c2.value == 400
assert s.capital == 198
# de-alloc 100 from c1. This should force c1 to sell 2 units to raise at
# least 100 (because of commissions)
c1.allocate(-100)
s.update(dts[i])
assert c1.value == 200
assert s.capital == 198 + 199
# allocate 100 to c2. This should leave things unchaged, since c2 cannot
# buy one unit since the commission will cause total outlay to exceed
# allocation
c2.allocate(100)
s.update(dts[i])
assert c2.value == 400
assert s.capital == 198 + 199
# ok try again w/ 101 allocation. This time, it should work
c2.allocate(101)
s.update(dts[i])
assert c2.value == 500
assert s.capital == 198 + 199 - 101
# ok now let's close the whole position. Since we are closing, we expect
# the allocation to go through, even though the outlay > amount
c2.allocate(-500)
s.update(dts[i])
assert c2.value == 0
assert s.capital == 198 + 199 - 101 + 499
# now we are going to go short c2
# we want to 'raise' 100 dollars. Since we need at a minimum 100, but we
# also have commissions, we will actually short 2 units in order to raise
# at least 100
c2.allocate(-100)
s.update(dts[i])
assert c2.value == -200
assert s.capital == 198 + 199 - 101 + 499 + 199
def test_degenerate_shorting():
# can have situation where you short infinitely if commission/share > share
# price
c1 = SecurityBase('c1')
s = StrategyBase('p', [c1])
# $1/share commission
s.set_commissions(lambda q, p: abs(q) * 1)
c1 = s['c1']
dts = | pd.date_range('2010-01-01', periods=3) | pandas.date_range |
#!/usr/bin/python
# coding: utf-8
import json
import pickle
import re
import jieba
import numpy as np
import pandas as pd
from sklearn.decomposition import TruncatedSVD
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.preprocessing import QuantileTransformer
def max_min_scaler(x):
return (x - np.min(x)) / (np.max(x) - np.min(x))
def trans(text):
text = list(jieba.cut(text))
text = " ".join([x for x in text if not x.isdigit()])
return text
def get_sub_col(col):
sub1_col, sub2_col = [sub for sub in col.split(" \n \n ")]
return pd.Series([sub1_col, sub2_col])
def get_sub_col_raw(col):
sub1_col, sub2_col = [sub for sub in col.split("\n\n")]
return pd.Series([sub1_col, sub2_col])
def get_length_features(df, stopwords, name):
def words_count(text, stopwords):
wordlist = [word for word in str(text).split() if word not in stopwords]
return len(wordlist)
# Word
df[f"col_len_{name}"] = df["col"].apply(lambda s: words_count(s, stopwords))
df[f"sub1_col_len_{name}"] = df["sub1_col"].apply(
lambda s: words_count(s, stopwords)
)
df[f"sub2_col_len_{name}"] = df["sub2_col"].apply(
lambda s: words_count(s, stopwords)
)
df[f"col_len_ratio_{name}"] = (
df[f"sub1_col_len_{name}"] / df[f"sub2_col_len_{name}"]
)
df[f"col_len_ratio2_{name}"] = (
df[f"sub2_col_len_{name}"] / df[f"col_len_{name}"]
)
df[f"col_len_c_{name}"] = df["col"].apply(len)
df[f"sub1_col_len_c_{name}"] = df["sub1_col"].apply(len)
df[f"sub2_col_len_c_{name}"] = df["sub2_col"].apply(len)
df[f"col_len_c_ratio_{name}"] = (
df[f"sub1_col_len_c_{name}"] / df[f"sub2_col_len_c_{name}"]
)
df[f"col_len_c_ratio2_{name}"] = (
df[f"sub2_col_len_c_{name}"] / df[f"col_len_c_{name}"]
)
df[f"sub1_col_len_{name}"] = df[[f"sub1_col_len_{name}"]].apply(
max_min_scaler
)
df[f"sub2_col_len_{name}"] = df[[f"sub2_col_len_{name}"]].apply(
max_min_scaler
)
df[f"sub1_col_len_c_{name}"] = df[[f"sub1_col_len_c_{name}"]].apply(
max_min_scaler
)
df[f"sub2_col_len_c_{name}"] = df[[f"sub2_col_len_c_{name}"]].apply(
max_min_scaler
)
useful_cols = [
f"sub1_col_len_{name}",
f"sub2_col_len_{name}",
f"col_len_ratio_{name}",
f"col_len_ratio2_{name}",
#
f"sub1_col_len_c_{name}",
f"sub2_col_len_c_{name}",
f"col_len_c_ratio_{name}",
f"col_len_c_ratio2_{name}",
]
return df[useful_cols]
def get_plantiff_features(df, name):
def f_plantiff_is_company(x):
r = re.search(r"原告(.*?)被告", x)
s = 0
if r:
plantiff = r.group(1)
if "法定代表人" in plantiff:
return 1
return s
reg = re.compile(r"原告")
df[f"sub1_col_num_plantiff_{name}"] = df["sub1_col_raw"].apply(
lambda s: len(reg.findall(s))
)
df[f"sub1_col_bool_plantiff_{name}"] = df["sub1_col_raw"].apply(
lambda s: f_plantiff_is_company(s)
)
useful_cols = [
f"sub1_col_num_plantiff_{name}",
f"sub1_col_bool_plantiff_{name}",
]
return df[useful_cols]
def get_defendant_features(df, name):
def f_defandent_noreply(text):
if any(
ss in text
for ss in ["未答辩", "拒不到庭", "未到庭", "未做答辩", "未应诉答辩", "未作出答辩", "未出庭"]
):
return 1
return 0
reg = re.compile(r"被告.*?法定代表人.*?。")
df[f"sub1_col_bool_defendant_{name}"] = df["sub1_col_raw"].apply(
lambda s: int(len(reg.findall(s)) > 0)
)
df[f"sub2_col_bool_defendant_noreply_{name}"] = df["sub2_col_raw"].apply(
lambda s: f_defandent_noreply(s)
)
reg = re.compile(r"被告")
df[f"sub1_col_num_defendant_{name}"] = df["sub1_col_raw"].apply(
lambda s: len(reg.findall(s))
)
useful_cols = [
f"sub1_col_bool_defendant_{name}",
f"sub1_col_num_defendant_{name}",
]
return df[useful_cols]
def get_guarantor_features(df, name):
reg = re.compile(r"担保")
df[f"sub2_col_bool_guarantor_{name}"] = df["sub2_col_raw"].apply(
lambda s: int(len(reg.findall(s)) > 0)
)
df[f"sub2_col_num_guarantor_{name}"] = df["sub2_col_raw"].apply(
lambda s: len(reg.findall(s))
)
useful_cols = [
f"sub2_col_bool_guarantor_{name}",
f"sub2_col_num_guarantor_{name}",
]
return df[useful_cols]
def get_guaranty_features(df, name):
reg = re.compile(r"抵押")
df[f"sub2_col_bool_guaranty_{name}"] = df["sub2_col_raw"].apply(
lambda s: int(len(reg.findall(s)) > 0)
)
df[f"sub2_col_num_guaranty_{name}"] = df["sub2_col_raw"].apply(
lambda s: len(reg.findall(s))
)
useful_cols = [
f"sub2_col_bool_guarantor_{name}",
f"sub2_col_num_guarantor_{name}",
]
return df[useful_cols]
def get_interest_features(df, name):
def do_lixi(text):
m_reg = re.compile(r"(月利息|月息|月利率|月利息率)按?(\d+(\.\d{1,2})?)(%|分)")
mm = m_reg.search(text)
m2_reg = re.compile(r"(月利息|月息|月利率|月利息率)按?(\d+(\.\d{1,2})?)毛")
mm2 = m2_reg.search(text)
m3_reg = re.compile(r"月(\d+(\.\d{1,2})?)%(利息|息|利率|利息率)")
mm3 = m3_reg.search(text)
y_reg = re.compile(r"(年利息|年息|年利率|年利息率)按?(\d+(\.\d{1,2})?)(%|分)")
ym = y_reg.search(text)
y2_reg = re.compile(r"(年利息|年息|年利率|年利息率)按?(\d+(\.\d{1,2})?)毛")
ym2 = y2_reg.search(text)
y3_reg = re.compile(r"年(\d+(\.\d{1,2})?)%(利息|息|利率|利息率)")
ym3 = y3_reg.search(text)
if mm:
return round(float(mm.group(2)) * 12, 2)
elif mm2:
return round(float(mm2.group(2)) * 10 * 12, 2)
elif mm3:
return round(float(mm3.group(1)) * 12, 2)
elif ym:
return float(ym.group(2))
elif ym2:
return round(float(ym2.group(2)) * 10, 2)
elif ym3:
return float(ym3.group(1))
else:
return 0
def do_lixi_c(text):
m_reg = re.compile(r"(月利息|月息|月利率|月利息率)按?(\d+(\.\d{1,2})?)(%|分)")
mm = m_reg.search(text)
m2_reg = re.compile(r"(月利息|月息|月利率|月利息率)按?(\d+(\.\d{1,2})?)毛")
mm2 = m2_reg.search(text)
m3_reg = re.compile(r"月(\d+(\.\d{1,2})?)%(利息|息|利率|利息率)")
mm3 = m3_reg.search(text)
y_reg = re.compile(r"(年利息|年息|年利率|年利息率)按?(\d+(\.\d{1,2})?)(%|分)")
ym = y_reg.search(text)
y2_reg = re.compile(r"(年利息|年息|年利率|年利息率)按?(\d+(\.\d{1,2})?)毛")
ym2 = y2_reg.search(text)
y3_reg = re.compile(r"年(\d+(\.\d{1,2})?)%(利息|息|利率|利息率)")
ym3 = y3_reg.search(text)
count = 0
if mm:
count = round(float(mm.group(2)) * 12, 2)
elif mm2:
count = round(float(mm2.group(2)) * 10 * 12, 2)
elif mm3:
count = round(float(mm3.group(1)) * 12, 2)
elif ym:
count = float(ym.group(2))
elif ym2:
count = round(float(ym2.group(2)) * 10, 2)
elif ym3:
count = float(ym3.group(1))
else:
count = 0
if count == 0:
return 0
elif count < 24:
return 1
elif count < 36:
return 2
else:
return 3
reg = re.compile(r"约定利息|约定月利息|年息|月息|利息|利率")
df[f"sub2_col_bool_interest_{name}"] = df["sub2_col_raw"].apply(
lambda s: int(len(reg.findall(s)) > 0)
)
df[f"sub2_col_num_interest_{name}"] = df["sub2_col_raw"].apply(
lambda s: do_lixi(s)
)
df[f"sub2_col_num_interest_c_{name}"] = df["sub2_col_raw"].apply(
lambda s: do_lixi_c(s)
)
useful_cols = [
f"sub2_col_bool_interest_{name}",
f"sub2_col_num_interest_{name}",
f"sub2_col_num_interest_c_{name}",
]
return df[useful_cols]
def get_couple_features(df, name):
reg = re.compile(r"夫妻")
df[f"sub2_col_bool_couple_{name}"] = df["sub2_col_raw"].apply(
lambda s: int(len(reg.findall(s)) > 0)
)
df[f"sub2_col_num_couple_{name}"] = df["sub2_col_raw"].apply(
lambda s: len(reg.findall(s))
)
useful_cols = [
f"sub2_col_bool_couple_{name}",
f"sub2_col_num_couple_{name}",
]
return df[useful_cols]
def get_death_features(df, name):
reg = re.compile(r"死亡")
df[f"sub2_col_bool_death_{name}"] = df["sub2_col_raw"].apply(
lambda s: int(len(reg.findall(s)) > 0)
)
df[f"sub2_col_num_death_{name}"] = df["sub2_col_raw"].apply(
lambda s: len(reg.findall(s))
)
useful_cols = [f"sub2_col_bool_death_{name}", f"sub2_col_num_death_{name}"]
return df[useful_cols]
def do_basic_feature(df, stopwords, name):
feature_list = []
feature = get_length_features(df, stopwords, name)
feature_list.append(feature)
feature = get_plantiff_features(df, name)
feature_list.append(feature)
feature = get_defendant_features(df, name)
feature_list.append(feature)
feature = get_guarantor_features(df, name)
feature_list.append(feature)
feature = get_guaranty_features(df, name)
feature_list.append(feature)
feature = get_interest_features(df, name)
feature_list.append(feature)
feature = get_couple_features(df, name)
feature_list.append(feature)
index = feature_list[0].index
for feature_dataset in feature_list[1:]:
pd.testing.assert_index_equal(index, feature_dataset.index)
df = pd.concat(feature_list, axis=1)
return df
def do_tfidf_feature(df, tfidf):
n_components = 30
svd = TruncatedSVD(
n_components=n_components, algorithm="arpack", random_state=2019
)
col_tfidf = tfidf.transform(df["col"])
feature_names = tfidf.get_feature_names()
ret_df = pd.DataFrame(col_tfidf.toarray(), columns=feature_names)
return ret_df
col_svd = svd.fit_transform(col_tfidf)
best_fearures = [
feature_names[i] + "i" for i in svd.components_[0].argsort()[::-1]
]
ret_df = pd.DataFrame(col_svd, columns=best_fearures[:n_components])
return ret_df
def get_length_related_features_col2(df):
df_copy = df.copy()
df_copy["col2_len"] = df_copy["col2"].apply(len)
df_copy["col2_len_relative"] = (
df_copy["col2_len"] / df_copy["col2_len"].max()
)
df_copy["col2_title_len"] = df_copy["col2"].apply(
lambda s: len(s.split("\n\n")[0])
)
df_copy["col2_title_relative"] = (
df_copy["col2_title_len"] / df_copy["col2_title_len"].max()
)
df_copy["col2_content_len"] = (
df_copy["col2_len"] - df_copy["col2_title_len"]
)
df_copy["col2_content_len_relative"] = (
df_copy["col2_content_len"] / df_copy["col2_content_len"].max()
)
df_copy["col2_title_ratio"] = (
df_copy["col2_title_len"] / df_copy["col2_len"]
)
useful_cols = [
"col2_len_relative",
"col2_title_relative",
"col2_content_len_relative",
"col2_title_ratio",
]
return df_copy[useful_cols]
def get_col2_re_features(df):
old_cols = set(df.columns)
# 原告数量, 原告数据差
reg = re.compile(r"原告")
df["col2_num_accuser"] = df["col2"].apply(
lambda s: len(reg.findall(s.split("\n\n")[0]))
)
# 原告中男性数量, 比例
reg = re.compile(r"原告.*?男.*?被告")
df["col2_num_male_accuser"] = df["col2"].apply(
lambda s: len(reg.findall(s.split("\n\n")[0]))
)
df["col2_num_male_accuser_rate"] = (
df["col2_num_male_accuser"] / df["col2_num_accuser"]
)
# 原告中委托诉讼代理人数量,比例
reg = re.compile(r"原告.*?委托诉讼代理人.*?被告")
df["col2_num_company_accuser"] = df["col2"].apply(
lambda s: len(reg.findall(s.split("\n\n")[0]))
)
df["col2_num_company_accuser_rate"] = (
df["col2_num_company_accuser"] / df["col2_num_accuser"]
)
# 被告数量, 原告数据差
reg = re.compile(r"被告")
df["col2_num_defendant"] = df["col2"].apply(
lambda s: len(reg.findall(s.split("\n\n")[0]))
)
# 被告中男性数量, 比例
reg = re.compile(r"被告.*?男.*?。")
df["col2_num_male_defendant"] = df["col2"].apply(
lambda s: len(reg.findall(s.split("\n\n")[0]))
)
df["col2_num_male_defendant_rate"] = (
df["col2_num_male_defendant"] / df["col2_num_defendant"]
)
df["col2_defendant_minus_num_accuser"] = (
df["col2_num_defendant"] - df["col2_num_accuser"]
).astype(int)
# 被告中法定代表人数量,比例
reg = re.compile(r"被告.*?法定代表人.*?。")
df["col2_num_company_defendant"] = df["col2"].apply(
lambda s: len(reg.findall(s.split("\n\n")[0]))
)
df["col2_num_company_accuser_rate"] = (
df["col2_num_company_defendant"] / df["col2_num_defendant"]
)
# 担保
reg = re.compile(r"担保")
df["col2_num_danbao"] = df["col2"].apply(
lambda s: len(reg.findall(s.split("\n\n")[1]))
)
# 标点符号数量
reg = re.compile(r"[。:,]")
df["col2_num_punctuation"] = df["col2"].apply(
lambda s: len(reg.findall(s.split("\n\n")[1]))
)
# 词数量
df["col2_num_word"] = df["col2"].apply(
lambda s: len(list(jieba.cut(s.split("\n\n")[1])))
)
df["col2_num_word_ratio"] = df["col2_num_word"] / df["col2_num_word"].max()
df["col2_num_word_divide_length"] = df["col2_num_word"] / df["col2"].apply(
len
)
useful_cols = list(set(df.columns).difference(old_cols))
return df[useful_cols]
def do_feature_engineering(list_text):
df = pd.DataFrame(list_text, columns=["col2"])
feature_list = []
feature = get_length_related_features_col2(df)
feature_list.append(feature)
feature = get_col2_re_features(df)
feature_list.append(feature)
index = feature_list[0].index
for feature_dataset in feature_list[1:]:
pd.testing.assert_index_equal(index, feature_dataset.index)
data = | pd.concat(feature_list, axis=1) | pandas.concat |
import pandas as pd
import numpy as np
from datetime import datetime
###############
# SELECT DATA #
###############
print("Selecting attributes...")
# GIT_COMMITS
gitCommits = pd.read_csv("../../data/raw/GIT_COMMITS.csv")
attributes = ['projectID', 'commitHash', 'author', 'committer', 'committerDate']
gitCommits = gitCommits[attributes]
gitCommits.to_csv('../../data/interim/DataPreparation/SelectData/GIT_COMMITS_select.csv', header=True)
# GIT_COMMITS_CHANGES
gitCommitsChanges = pd.read_csv("../../data/raw/GIT_COMMITS_CHANGES.csv")
attributes = ['projectID', 'commitHash', 'changeType', 'linesAdded', 'linesRemoved']
gitCommitsChanges = gitCommitsChanges[attributes]
gitCommitsChanges.to_csv('../../data/interim/DataPreparation/SelectData/GIT_COMMITS_CHANGES_select.csv', header=True)
# JIRA_ISSUES
jiraIssues = pd.read_csv("../../data/raw/JIRA_ISSUES.csv")
attributes = ['projectID', 'key', 'creationDate', 'resolutionDate', 'type', 'priority', 'assignee', 'reporter']
jiraIssues = jiraIssues[attributes]
jiraIssues.to_csv('../../data/interim/DataPreparation/SelectData/JIRA_ISSUES_select.csv', header=True)
# REFACTORING_MINER
refactoringMiner = pd.read_csv("../../data/raw/REFACTORING_MINER.csv")
attributes = ['projectID', 'commitHash', 'refactoringType']
refactoringMiner = refactoringMiner[attributes]
refactoringMiner.to_csv('../../data/interim/DataPreparation/SelectData/REFACTORING_MINER_select.csv', header=True)
# SONAR_ISSUES
sonarIssues = pd.read_csv("../../data/raw/SONAR_ISSUES.csv")
attributes = ['projectID', 'creationDate', 'closeDate', 'creationCommitHash', 'closeCommitHash', 'type', 'severity',
'debt', 'author']
sonarIssues = sonarIssues[attributes]
sonarIssues.to_csv('../../data/interim/DataPreparation/SelectData/SONAR_ISSUES_select.csv', header=True)
# SONAR_MEASURES
sonarMeasures = pd.read_csv("../../data/raw/SONAR_MEASURES.csv")
attributes = ['commitHash', 'projectID', 'functions', 'commentLinesDensity', 'complexity', 'functionComplexity', 'duplicatedLinesDensity',
'violations', 'blockerViolations', 'criticalViolations', 'infoViolations', 'majorViolations', 'minorViolations', 'codeSmells',
'bugs', 'vulnerabilities', 'cognitiveComplexity', 'ncloc', 'sqaleIndex', 'sqaleDebtRatio', 'reliabilityRemediationEffort', 'securityRemediationEffort']
sonarMeasures = sonarMeasures[attributes]
sonarMeasures.to_csv('../../data/interim/DataPreparation/SelectData/SONAR_MEASURES_select.csv', header=True)
# SZZ_FAULT_INDUCING_COMMITS
szzFaultInducingCommits = pd.read_csv("../../data/raw/SZZ_FAULT_INDUCING_COMMITS.csv")
attributes = ['projectID', 'faultFixingCommitHash', 'faultInducingCommitHash', 'key']
szzFaultInducingCommits = szzFaultInducingCommits[attributes]
szzFaultInducingCommits.to_csv('../../data/interim/DataPreparation/SelectData/SZZ_FAULT_INDUCING_COMMITS_select.csv', header=True)
print("Attributes selected.")
##############
# CLEAN DATA #
##############
print("Cleaning data...")
def intersection(l1, l2):
temp = set(l2)
l3 = [value for value in l1 if value in temp]
return l3
def difference(li1, li2):
return (list(list(set(li1)-set(li2)) + list(set(li2)-set(li1))))
# GIT_COMMITS
gitCommits = pd.read_csv("../../data/interim/DataPreparation/SelectData/GIT_COMMITS_select.csv")
authorNan = list(np.where(gitCommits.author.isna()))[0]
committerNan = list(np.where(gitCommits.committer.isna()))[0]
inters = intersection(authorNan, committerNan)
gitCommits = gitCommits.drop(inters)
gitCommits.to_csv('../../data/interim/DataPreparation/CleanData/GIT_COMMITS_clean.csv', header=True)
# GIT_COMMITS_CHANGES
gitCommitsChanges = pd.read_csv("../../data/interim/DataPreparation/SelectData/GIT_COMMITS_CHANGES_select.csv").iloc[:,1:]
gitCommitsChanges.to_csv('../../data/interim/DataPreparation/CleanData/GIT_COMMITS_CHANGES_clean.csv', header=True)
# JIRA_ISSUES
jiraIssues = | pd.read_csv("../../data/interim/DataPreparation/SelectData/JIRA_ISSUES_select.csv") | pandas.read_csv |
from transformers import BertModel, BertTokenizer
import torch
import numpy as np
import pandas as pd
from torch import nn
from torch.utils.data import Dataset, DataLoader
from utils import cleaning
RANDOM_SEED = 42
np.random.seed(RANDOM_SEED)
torch.manual_seed(RANDOM_SEED)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
class OMPDataset(Dataset):
def __init__(self, text, targets, tokenizer, max_len):
self.text = text
self.targets = targets
self.tokenizer = tokenizer
self.max_len = max_len
def __len__(self):
return len(self.text)
def __getitem__(self, item):
text = str(self.text[item])
target = self.targets[item]
encoding = self.tokenizer.encode_plus(
text,
add_special_tokens=True,
max_length=self.max_len,
return_token_type_ids=False,
padding = 'max_length',
return_attention_mask=True,
return_tensors='pt',
)
return {
'text': text,
'input_ids': encoding['input_ids'].flatten(),
'attention_mask': encoding['attention_mask'].flatten(),
'targets': torch.tensor(target, dtype=torch.float)
}
def create_data_loader(df:pd.DataFrame, label:str, tokenizer:BertTokenizer, max_len:int, batch_size:int) -> DataLoader:
ds = OMPDataset(
text=df.text.to_numpy(),
targets=df[label].to_numpy(),
tokenizer=tokenizer,
max_len=max_len
)
return DataLoader(ds, batch_size=batch_size, num_workers=0, shuffle=False)
class BinaryClassifier(nn.Module):
def __init__(self):
super(BinaryClassifier, self).__init__()
self.bert = BertModel.from_pretrained("deepset/gbert-base")
self.dropout = nn.Dropout(p=0.3)
self.out = nn.Linear(self.bert.config.hidden_size, 1)
self.tokenizer = BertTokenizer.from_pretrained("deepset/gbert-base")
def forward(self, input_ids, attention_mask):
_, pooled_output = self.bert(
input_ids=input_ids,
attention_mask=attention_mask
).values()
output = self.dropout(pooled_output)
output = self.out(output)
return output
def get_model(state_dict_file: str) -> BinaryClassifier:
model = BinaryClassifier().to(device)
model.load_state_dict(torch.load(f"{state_dict_file}"))
return model
def get_prediction(text_list: list, model: BinaryClassifier) -> list:
BATCH_SIZE = 1
MAX_LEN = 264
model.eval()
# ## Data loading
normalize = lambda x: cleaning.normalize(x, url_emoji_dummy=False, pure_words=False)
X = | pd.Series(text_list) | pandas.Series |
import pytest
import numpy as np
import pandas as pd
from systrade.trading.brokers import PaperBroker
T_START = pd.to_datetime('2019/07/10-09:30:00:000000', format='%Y/%m/%d-%H:%M:%S:%f')
T_END = pd.to_datetime('2019/07/10-10:00:00:000000', format='%Y/%m/%d-%H:%M:%S:%f')
TIMEINDEX = pd.date_range(start=T_START,end=T_END,freq='1min')
DATA_DF = pd.DataFrame(data={'tick0':np.arange(len(TIMEINDEX)) ,
'tick1':np.arange(len(TIMEINDEX)-1,-1,-1)},
index=TIMEINDEX)
# DATA_DF = pd.DataFrame(data={'tick0':np.arange(len(TIMEINDEX))},
# index=TIMEINDEX)
class TestPaperBroker:
def test_init(self):
testseries = pd.Series(np.arange(10))
with pytest.raises(TypeError):
broker = PaperBroker(testseries)
with pytest.raises(TypeError):
broker = PaperBroker(DATA_DF,slippage_time=1.0)
with pytest.raises(TypeError):
broker = PaperBroker(DATA_DF,transaction_cost=lambda x: x**2)
with pytest.raises(ValueError):
broker = PaperBroker(DATA_DF,transaction_cost=-0.5)
with pytest.raises(TypeError):
broker = PaperBroker(DATA_DF,spread_pct=lambda x: x**2)
with pytest.raises(ValueError):
broker = PaperBroker(DATA_DF,spread_pct=-0.5)
with pytest.raises(ValueError):
broker = PaperBroker(DATA_DF,spread_pct=200)
def test_next_extant_time(self):
broker = PaperBroker(DATA_DF)
t_get = pd.to_datetime('2019/07/10-09:35:05:000000', format='%Y/%m/%d-%H:%M:%S:%f')
t_out = broker.next_extant_time(t_get)
t_expect = pd.to_datetime('2019/07/10-09:36:00:000000', format='%Y/%m/%d-%H:%M:%S:%f')
assert t_out==t_expect
t_get = pd.to_datetime('2019/07/10-11:35:00:000000', format='%Y/%m/%d-%H:%M:%S:%f')
with pytest.raises(ValueError):
t_out = broker.next_extant_time(t_get)
def test_get_timeindex_subset(self):
broker = PaperBroker(DATA_DF)
t0 = pd.to_datetime('2019/07/10-09:29:00:000000', format='%Y/%m/%d-%H:%M:%S:%f')
t1 = pd.to_datetime('2019/07/10-09:36:00:000000', format='%Y/%m/%d-%H:%M:%S:%f')
with pytest.raises(ValueError):
tind = broker.get_timeindex_subset(t0,t1)
t0 = pd.to_datetime('2019/07/10-09:34:00:000000', format='%Y/%m/%d-%H:%M:%S:%f')
t1 = pd.to_datetime('2019/07/10-11:36:00:000000', format='%Y/%m/%d-%H:%M:%S:%f')
with pytest.raises(ValueError):
tind = broker.get_timeindex_subset(t0,t1)
with pytest.raises(TypeError):
tind = broker.get_timeindex_subset(0,t1)
with pytest.raises(TypeError):
tind = broker.get_timeindex_subset(t0,1)
t1 = pd.to_datetime('2019/07/10-09:36:00:000000', format='%Y/%m/%d-%H:%M:%S:%f')
tind = broker.get_timeindex_subset(t0,t1)
print(tind)
print(pd.date_range(t0,t1,freq='1min'))
assert np.array_equal(tind.values,pd.date_range(t0,t1,freq='1min').values)
def test_get_firstlast_times(self):
broker = PaperBroker(DATA_DF)
t0,t1 = broker.get_firstlast_times()
assert t0==T_START
assert t1==T_END
def test_get_tick_list(self):
broker = PaperBroker(DATA_DF)
ticks = broker.get_tick_list()
assert ticks == ['tick0','tick1']
def test_get_price_list(self):
broker = PaperBroker(DATA_DF)
t0 = T_START
t1 = T_START + pd.DateOffset(minutes=5)
with pytest.raises(ValueError):
prices = broker.get_price_list('badtick',t0,t1)
with pytest.raises(ValueError):
prices = broker.get_price_list(['badtick'],t0,t1)
prices = broker.get_price_list('tick0',t0,t1)
assert np.array_equal(prices['tick0'].values , np.arange(6) )
prices = broker.get_price_list(['tick0','tick1'],t0,t1)
assert np.array_equal(prices['tick0'].values , np.arange(6) )
assert np.array_equal(prices['tick1'].values ,
np.arange(len(TIMEINDEX)-1,len(TIMEINDEX)-7,-1) )
def test_get_unslipped_price(self):
broker = PaperBroker(DATA_DF)
t_get = T_START+pd.DateOffset(minutes=5)
with pytest.raises(ValueError):
pp = broker.get_unslipped_price('badtick',t_get)
price = broker.get_unslipped_price('tick0',t_get)
assert price == 5
def test_get_price(self):
broker = PaperBroker(DATA_DF,
slippage_time=pd.DateOffset(seconds=30),
transaction_cost = 2.0)
t_get = T_START+ | pd.DateOffset(minutes=5) | pandas.DateOffset |
#!/usr/bin/env python
"""Tests for `qnorm` package."""
import unittest
import numpy as np
import pandas as pd
import qnorm
import tracemalloc
tracemalloc.start()
df1 = pd.DataFrame(
{
"C1": {"A": 5.0, "B": 2.0, "C": 3.0, "D": 4.0},
"C2": {"A": 4.0, "B": 1.0, "C": 4.0, "D": 2.0},
"C3": {"A": 3.0, "B": 4.0, "C": 6.0, "D": 8.0},
}
)
df1.to_csv("test.csv")
df1.to_hdf("test.hdf", key="qnorm", format="table", data_columns=True, mode="w")
df1.to_parquet("test.parquet")
class TestQnorm(unittest.TestCase):
def test_000_numpy(self):
"""
test numpy support
"""
arr = np.random.normal(size=(20, 2))
qnorm.quantile_normalize(arr)
def test_001_pandas(self):
"""
test pandas support
"""
df = pd.DataFrame(
{
"C1": {"A": 5.0, "B": 2.0, "C": 3.0, "D": 4.0},
"C2": {"A": 4.0, "B": 1.0, "C": 4.0, "D": 2.0},
"C3": {"A": 3.0, "B": 4.0, "C": 6.0, "D": 8.0},
}
)
qnorm.quantile_normalize(df)
def test_002_wiki(self):
"""
test the wiki example
https://en.wikipedia.org/wiki/Quantile_normalization
"""
df = pd.DataFrame(
{
"C1": {"A": 5.0, "B": 2.0, "C": 3.0, "D": 4.0},
"C2": {"A": 4.0, "B": 1.0, "C": 4.0, "D": 2.0},
"C3": {"A": 3.0, "B": 4.0, "C": 6.0, "D": 8.0},
}
)
result = np.array(
[
[5.66666667, 5.16666667, 2.0],
[2.0, 2.0, 3.0],
[3.0, 5.16666667, 4.66666667],
[4.66666667, 3.0, 5.66666667],
]
)
np.testing.assert_array_almost_equal(
qnorm.quantile_normalize(df).values, result
)
def test_003_no_change(self):
"""
no sorting should happen here
"""
arr = np.empty(shape=(20, 3))
for col in range(arr.shape[1]):
vals = np.arange(arr.shape[0])
np.random.shuffle(vals)
arr[:, col] = vals
qnorm_arr = qnorm.quantile_normalize(arr)
np.testing.assert_array_almost_equal(arr, qnorm_arr)
def test_004_double(self):
"""
if dtype is double, return double
"""
arr = np.random.normal(0, 1, size=(20, 3))
arr = arr.astype(np.float64)
qnorm_arr = qnorm.quantile_normalize(arr)
assert qnorm_arr.dtype == np.float64
def test_005_single(self):
"""
if dtype is single, return single
"""
arr = np.random.normal(0, 1, size=(20, 3))
arr = arr.astype(np.float32)
qnorm_arr = qnorm.quantile_normalize(arr)
assert qnorm_arr.dtype == np.float32
def test_006_target(self):
"""
test if the target is used instead of the qnorm values
"""
arr = np.array([np.arange(0, 10), np.arange(0, 10)]).T
np.random.shuffle(arr)
target = np.arange(10, 20)
qnorm_arr = qnorm.quantile_normalize(arr, target=target)
for val in target:
assert (
val in qnorm_arr[:, 0] and val in qnorm_arr[:, 1]
), f"value {val} not in qnorm array"
def test_007_target_notsorted(self):
"""
make sure an unsorted target gets sorted first
"""
arr = np.array([np.arange(0, 10), np.arange(0, 10)]).T
np.random.shuffle(arr)
# take the reverse, which should be sorted by qnorm
target = np.arange(10, 20)[::-1]
qnorm_arr = qnorm.quantile_normalize(arr, target=target)
for val in target:
assert (
val in qnorm_arr[:, 0] and val in qnorm_arr[:, 1]
), f"value {val} not in qnorm array"
def test_008_short_target(self):
"""
test if an error is raised with a invalid sized target
"""
arr = np.array([np.arange(0, 10), np.arange(0, 10)]).T
target = np.arange(10, 15)
self.assertRaises(ValueError, qnorm.quantile_normalize, arr, target)
def test_009_wiki_ncpus(self):
"""
test if an error is raised with a invalid sized target
"""
df = pd.DataFrame(
{
"C1": {"A": 5.0, "B": 2.0, "C": 3.0, "D": 4.0},
"C2": {"A": 4.0, "B": 1.0, "C": 4.0, "D": 2.0},
"C3": {"A": 3.0, "B": 4.0, "C": 6.0, "D": 8.0},
}
)
result = np.array(
[
[5.66666667, 5.16666667, 2.0],
[2.0, 2.0, 3.0],
[3.0, 5.16666667, 4.66666667],
[4.66666667, 3.0, 5.66666667],
]
)
np.testing.assert_array_almost_equal(
qnorm.quantile_normalize(df, ncpus=10).values, result
)
def test_010_axis_numpy(self):
"""
test numpy axis support
"""
arr = np.random.normal(size=(50, 4))
np.testing.assert_array_almost_equal(
qnorm.quantile_normalize(arr.T, axis=0).T,
qnorm.quantile_normalize(arr, axis=1),
)
np.testing.assert_array_almost_equal(
qnorm.quantile_normalize(arr, axis=1),
qnorm.quantile_normalize(arr.T, axis=0).T,
)
def test_011_axis_pandas(self):
"""
test numpy axis support
"""
df = pd.DataFrame(
{
"C1": {"A": 5.0, "B": 2.0, "C": 3.0, "D": 4.0},
"C2": {"A": 4.0, "B": 1.0, "C": 4.0, "D": 2.0},
"C3": {"A": 3.0, "B": 4.0, "C": 6.0, "D": 8.0},
}
)
np.testing.assert_array_almost_equal(
qnorm.quantile_normalize(df.T, axis=0).T,
qnorm.quantile_normalize(df, axis=1),
)
np.testing.assert_array_almost_equal(
qnorm.quantile_normalize(df, axis=1),
qnorm.quantile_normalize(df.T, axis=0).T,
)
def test_012_from_csv(self):
"""
test the basic incremental_quantile_normalize functionality
"""
qnorm.incremental_quantile_normalize("test.csv", "test_out.csv")
df1 = pd.read_csv("test.csv", index_col=0, header=0)
df2 = pd.read_csv("test_out.csv", index_col=0, header=0)
np.testing.assert_almost_equal(
qnorm.quantile_normalize(df1), df2.values, decimal=5
)
def test_013_from_csv_rowchunk(self):
"""
test the incremental_quantile_normalize with rowchunks functionality
"""
df1 = pd.read_csv("test.csv", index_col=0, header=0)
for rowchunksize in range(1, 10):
qnorm.incremental_quantile_normalize(
"test.csv", "test_out.csv", rowchunksize=rowchunksize
)
df2 = pd.read_csv("test_out.csv", index_col=0, header=0)
np.testing.assert_almost_equal(
qnorm.quantile_normalize(df1), df2.values, decimal=5
)
def test_014_from_csv_colchunk(self):
"""
test the incremental_quantile_normalize with colchunks functionality
"""
df1 = pd.read_csv("test.csv", index_col=0, header=0)
for colchunksize in range(1, 10):
qnorm.incremental_quantile_normalize(
"test.csv", "test_out.csv", colchunksize=colchunksize
)
df2 = pd.read_csv("test_out.csv", index_col=0, header=0)
np.testing.assert_almost_equal(
qnorm.quantile_normalize(df1), df2.values, decimal=5
)
def test_015_from_csv_colrowchunk(self):
"""
test the incremental_quantile_normalize with both row and colchunks
"""
df1 = pd.read_csv("test.csv", index_col=0, header=0)
for colchunksize in range(1, 10):
for rowchunksize in range(1, 10):
qnorm.incremental_quantile_normalize(
"test.csv",
"test_out.csv",
rowchunksize=rowchunksize,
colchunksize=colchunksize,
)
df2 = pd.read_csv("test_out.csv", index_col=0, header=0)
np.testing.assert_almost_equal(
qnorm.quantile_normalize(df1), df2.values, decimal=5
)
def test_016_from_csv_largefile(self):
"""
test whether or not incremental_quantile_normalize works with a larger
random file
"""
np.random.seed(42)
df1 = pd.DataFrame(index=range(5000), columns=range(100))
df1[:] = np.random.randint(0, 100, size=df1.shape)
df1.to_csv("test_large.csv")
qnorm.incremental_quantile_normalize(
"test_large.csv",
"test_large_out.csv",
rowchunksize=11,
colchunksize=11,
)
df2 = pd.read_csv("test_large_out.csv", index_col=0, header=0)
np.testing.assert_almost_equal(
qnorm.quantile_normalize(df1), df2.values, decimal=4
)
def test_017_from_hdf(self):
"""
test the basic incremental_quantile_normalize functionality
"""
qnorm.incremental_quantile_normalize("test.hdf", "test_out.hdf")
df1 = pd.read_hdf("test.hdf", index_col=0, header=0)
df2 = pd.read_hdf("test_out.hdf", index_col=0, header=0)
np.testing.assert_almost_equal(
qnorm.quantile_normalize(df1), df2.values, decimal=5
)
def test_018_from_hdf_rowchunk(self):
"""
test the incremental_quantile_normalize with rowchunks functionality
"""
df1 = pd.read_hdf("test.hdf", index_col=0, header=0)
for rowchunksize in range(1, 10):
qnorm.incremental_quantile_normalize(
"test.hdf", "test_out.hdf", rowchunksize=rowchunksize
)
df2 = pd.read_hdf("test_out.hdf", index_col=0, header=0)
np.testing.assert_almost_equal(
qnorm.quantile_normalize(df1), df2.values, decimal=5
)
def test_019_from_hdf_colchunk(self):
"""
test the incremental_quantile_normalize with colchunks functionality
"""
df1 = pd.read_hdf("test.hdf", index_col=0, header=0)
for colchunksize in range(1, 10):
qnorm.incremental_quantile_normalize(
"test.hdf", "test_out.hdf", colchunksize=colchunksize
)
df2 = pd.read_hdf("test_out.hdf", index_col=0, header=0)
np.testing.assert_almost_equal(
qnorm.quantile_normalize(df1), df2.values, decimal=5
)
def test_020_from_hdf_colrowchunk(self):
"""
test the incremental_quantile_normalize with both row and colchunks
"""
df1 = pd.read_hdf("test.hdf", index_col=0, header=0)
for colchunksize in range(1, 10):
for rowchunksize in range(1, 10):
qnorm.incremental_quantile_normalize(
"test.hdf",
"test_out.hdf",
rowchunksize=rowchunksize,
colchunksize=colchunksize,
)
df2 = pd.read_hdf("test_out.hdf", index_col=0, header=0)
np.testing.assert_almost_equal(
qnorm.quantile_normalize(df1), df2.values, decimal=5
)
def test_021_from_hdf_largefile(self):
"""
test whether or not incremental_quantile_normalize works with a larger
random file
"""
np.random.seed(42)
df1 = pd.DataFrame(
index=range(5000),
columns=["sample" + str(col) for col in range(100)],
dtype=int,
)
df1[:] = np.random.randint(0, 100, size=df1.shape)
df1.to_hdf(
"test_large.hdf", key="qnorm", format="table", data_columns=True
)
qnorm.incremental_quantile_normalize(
"test_large.hdf",
"test_large_out.hdf",
rowchunksize=11,
colchunksize=11,
)
df2 = pd.read_hdf("test_large_out.hdf", index_col=0, header=0)
np.testing.assert_almost_equal(
qnorm.quantile_normalize(df1), df2.values, decimal=4
)
def test_022(self):
"""
Test another array, not just wiki example.
"""
df = pd.DataFrame(
{
"C1": {
"A": 2.0,
"B": 2.0,
"C": 2.0,
"D": 2.0,
"E": 6.0,
"F": 1.0,
},
"C2": {
"A": 2.0,
"B": 2.0,
"C": 1.0,
"D": 3.5,
"E": 5.0,
"F": 1.0,
},
}
)
np.testing.assert_almost_equal(
qnorm.quantile_normalize(df).values,
np.array(
[
[2.0625, 2.0],
[2.0625, 2.0],
[2.0625, 1.25],
[2.0625, 2.75],
[5.5, 5.5],
[1.0, 1.25],
]
),
)
def test_023_from_parquet(self):
"""
test the basic incremental_quantile_normalize functionality
"""
qnorm.incremental_quantile_normalize("test.parquet", "test_out.parquet")
df1 = pd.read_parquet("test.parquet")
df2 = pd.read_parquet("test_out.parquet")
np.testing.assert_almost_equal(
qnorm.quantile_normalize(df1), df2.values, decimal=5
)
def test_024_from_parquet_rowchunk(self):
"""
test the incremental_quantile_normalize with rowchunks functionality
"""
df1 = pd.read_parquet("test.parquet")
for rowchunksize in range(1, 10):
qnorm.incremental_quantile_normalize(
"test.parquet", "test_out.parquet", rowchunksize=rowchunksize
)
df2 = pd.read_parquet("test_out.parquet")
np.testing.assert_almost_equal(
qnorm.quantile_normalize(df1), df2.values, decimal=5
)
def test_025_from_parquet_colchunk(self):
"""
test the incremental_quantile_normalize with colchunks functionality
"""
df1 = pd.read_parquet("test.parquet")
for colchunksize in range(1, 10):
qnorm.incremental_quantile_normalize(
"test.parquet", "test_out.parquet", colchunksize=colchunksize
)
df2 = | pd.read_parquet("test_out.parquet") | pandas.read_parquet |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
Date: 2022/2/23 21:22
Desc: 东方财富网-数据中心-年报季报-分红送配
http://data.eastmoney.com/yjfp/
"""
import pandas as pd
import requests
from tqdm import tqdm
def stock_fhps_em(date: str = "20210630") -> pd.DataFrame:
"""
东方财富网-数据中心-年报季报-分红送配
http://data.eastmoney.com/yjfp/
:param date: 分红送配报告期
:type date: str
:return: 分红送配
:rtype: pandas.DataFrame
"""
url = 'https://datacenter-web.eastmoney.com/api/data/v1/get'
params = {
'sortColumns': 'PLAN_NOTICE_DATE',
'sortTypes': '-1',
'pageSize': '500',
'pageNumber': '1',
'reportName': 'RPT_SHAREBONUS_DET',
'columns': 'ALL',
'quoteColumns':'',
'js': '{"data":(x),"pages":(tp)}',
'source': 'WEB',
'client': 'WEB',
'filter': f"""(REPORT_DATE='{"-".join([date[:4], date[4:6], date[6:]])}')"""
}
r = requests.get(url, params=params)
data_json = r.json()
total_pages = int(data_json['result']['pages'])
big_df = pd.DataFrame()
for page in tqdm(range(1, total_pages + 1), leave=False):
params.update({'pageNumber': page})
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json['result']['data'])
big_df = pd.concat([big_df, temp_df], ignore_index=True)
big_df.columns = [
'_',
'名称',
'_',
'_',
'代码',
'送转股份-送转总比例',
'送转股份-送转比例',
'送转股份-转股比例',
'现金分红-现金分红比例',
'预案公告日',
'股权登记日',
'除权除息日',
'_',
'方案进度',
'_',
'最新公告日期',
'_',
'_',
'_',
'每股收益',
'每股净资产',
'每股公积金',
'每股未分配利润',
'净利润同比增长',
'总股本',
'_',
'现金分红-股息率',
'-',
'-',
'-',
]
big_df = big_df[[
'代码',
'名称',
'送转股份-送转总比例',
'送转股份-送转比例',
'送转股份-转股比例',
'现金分红-现金分红比例',
'现金分红-股息率',
'每股收益',
'每股净资产',
'每股公积金',
'每股未分配利润',
'净利润同比增长',
'总股本',
'预案公告日',
'股权登记日',
'除权除息日',
'方案进度',
'最新公告日期',
]]
big_df['送转股份-送转总比例'] = pd.to_numeric(big_df['送转股份-送转总比例'])
big_df['送转股份-送转比例'] = pd.to_numeric(big_df['送转股份-送转比例'])
big_df['送转股份-转股比例'] = pd.to_numeric(big_df['送转股份-转股比例'])
big_df['现金分红-现金分红比例'] = pd.to_numeric(big_df['现金分红-现金分红比例'])
big_df['现金分红-股息率'] = pd.to_numeric(big_df['现金分红-股息率'])
big_df['每股收益'] = pd.to_numeric(big_df['每股收益'])
big_df['每股净资产'] = pd.to_numeric(big_df['每股净资产'])
big_df['每股公积金'] = pd.to_numeric(big_df['每股公积金'])
big_df['每股未分配利润'] = pd.to_numeric(big_df['每股未分配利润'])
big_df['净利润同比增长'] = pd.to_numeric(big_df['净利润同比增长'])
big_df['总股本'] = pd.to_ | numeric(big_df['总股本']) | pandas.to_numeric |
from __future__ import print_function, division
import pandas as pd
from datetime import timedelta
from nilmtk.tests.testingtools import data_dir
from os.path import join
import itertools
from collections import OrderedDict
import numpy as np
from nilmtk.consts import JOULES_PER_KWH
from nilmtk.measurement import measurement_columns, AC_TYPES
from nilmtk.utils import flatten_2d_list
MAX_SAMPLE_PERIOD = 15
def power_data(simple=True):
"""
Returns
-------
DataFrame
"""
if simple:
STEP = 10
data = [0, 0, 0, 100, 100, 100, 150,
150, 200, 0, 0, 100, 5000, 0]
secs = np.arange(start=0, stop=len(data) * STEP, step=STEP)
else:
data = [0, 0, 0, 100, 100, 100, 150,
150, 200, 0, 0, 100, 5000, 0]
secs = [0, 10, 20, 30, 200, 210, 220,
230, 240, 249, 260, 270, 290, 1000]
data = np.array(data, dtype=np.float32)
active = data
reactive = data * 0.9
apparent = data * 1.1
index = [pd.Timestamp('2010-01-01') + timedelta(seconds=sec)
for sec in secs]
column_tuples = [('power', ac_type)
for ac_type in ['active', 'reactive', 'apparent']]
df = pd.DataFrame(np.array([active, reactive, apparent]).transpose(),
index=index, dtype=np.float32,
columns=measurement_columns(column_tuples))
# calculate energy
# this is not cumulative energy
timedelta_secs = np.diff(secs).clip(
0, MAX_SAMPLE_PERIOD).astype(np.float32)
for ac_type in AC_TYPES:
joules = timedelta_secs * df['power', ac_type].values[:-1]
joules = np.concatenate([joules, [0]])
kwh = joules / JOULES_PER_KWH
if ac_type == 'reactive':
df['energy', ac_type] = kwh
elif ac_type == 'apparent':
df['cumulative energy', ac_type] = kwh.cumsum()
return df
def create_random_df_hierarchical_column_index():
N_PERIODS = 1E4
N_METERS = 5
N_MEASUREMENTS_PER_METER = 3
meters = ['meter{:d}'.format(i) for i in range(1, N_METERS + 1)]
meters = [[m] * N_MEASUREMENTS_PER_METER for m in meters]
meters = flatten_2d_list(meters)
level2 = ['power', 'power', 'voltage'][
:N_MEASUREMENTS_PER_METER] * N_METERS
level3 = ['active', 'reactive', ''][:N_MEASUREMENTS_PER_METER] * N_METERS
columns = [meters, level2, level3]
columns = pd.MultiIndex.from_arrays(columns)
rng = pd.date_range('2012-01-01', freq='S', periods=N_PERIODS)
data = np.random.randint(low=0, high=1000,
size=(N_PERIODS,
N_METERS * N_MEASUREMENTS_PER_METER))
return pd.DataFrame(data=data, index=rng, columns=columns, dtype=np.float32)
MEASUREMENTS = [('power', 'active'), ('energy', 'reactive'), ('voltage', '')]
def create_random_df():
N_PERIODS = 1E4
rng = pd.date_range('2012-01-01', freq='S', periods=N_PERIODS)
data = np.random.randint(
low=0, high=1000, size=(N_PERIODS, len(MEASUREMENTS)))
return pd.DataFrame(data=data, index=rng, dtype=np.float32,
columns=measurement_columns(MEASUREMENTS))
TEST_METER = {'manufacturer': 'Test Manufacturer',
'model': 'Random Meter',
'sample_period': 10,
'max_sample_period': MAX_SAMPLE_PERIOD,
'measurements': []}
for col in MEASUREMENTS:
TEST_METER['measurements'].append({
'physical_quantity': col[0], 'type': col[1],
'lower_limit': 0, 'upper_limit': 6000})
def add_building_metadata(store, elec_meters, key='building1', appliances=[]):
node = store.get_node(key)
md = {
'instance': 1,
'elec_meters': elec_meters,
'appliances': appliances
}
node._f_setattr('metadata', md)
def create_co_test_hdf5():
FILENAME = join(data_dir(), 'co_test.h5')
N_METERS = 3
chunk = 1000
N_PERIODS = 4 * chunk
rng = | pd.date_range('2012-01-01', freq='S', periods=N_PERIODS) | pandas.date_range |
import pandas as pd
import numpy as np
from scipy import stats
import warnings
from qualipy.util import get_column
from qualipy.reflect.function import function
# numeric
@function(return_format=float, input_format=float)
def mean(data, column):
return data[column].mean()
@function(return_format=float, input_format=float)
def count(data, column):
return data.shape[0]
@function(return_format=float, input_format=float)
def std(data, column):
return data[column].std()
@function(return_format=float, input_format=float)
def max(data, column):
return data[column].max()
@function(return_format=float, input_format=float)
def min(data, column):
return data[column].min()
@function(allowed_arguments=["quantile"], return_format=float, input_format=float)
def quantile(data, column, quantile=0.5):
return data[column].quantile(quantile)
@function(return_format=int)
def number_of_duplicates(data, column):
return data.shape[0] - data.drop_duplicates().shape[0]
@function(return_format=float)
def percentage_missing(data, column):
missing_data = data[(data[column].isnull()) | (data[column] == "")]
try:
return missing_data.shape[0] / data.shape[0]
except ZeroDivisionError:
return 1
@function(
return_format=int,
display_name="Number of Unique Elements",
description="This is a raw count of the total number of unique elements in the column",
)
def number_of_unique(data, column):
return data[column].nunique()
@function()
def get_top(data, column):
return data[column].describe()["top"]
@function()
def freq(data, column):
return data[column].describe()["freq"]
@function(return_format=bool)
def is_unique(data, column):
if column == "index":
return data.index.unique().shape[0] == data.shape[0]
return data[column].unique().shape[0] == data.shape[0]
@function(allowed_arguments=["column_two"], return_format=float)
def correlation_two_columns(data, column, column_two):
return data[column].corr(data[column_two])
@function(allowed_arguments=["std_away"], return_format=int)
def number_of_outliers(data, column, std_away):
data = data[data[column].notnull()]
return data[np.abs(stats.zscore(data[column])) > std_away].shape[0]
# non numeric
@function(return_format=dict)
def value_counts(data, column):
if data[column].nunique() > 100:
warnings.warn(
f"Too many unique columns for column {column}. Ignoring value counts"
)
return np.NaN
return (
data[data[column] != "nan"][column]
.value_counts()
.sort_values(ascending=False)
.head(100)
.to_dict()
)
@function(allowed_arguments=["include_nan", "column_two"], return_format=dict)
def heatmap(data, column, column_two=None, include_nan=True):
if include_nan:
data = data[(data[column] != "nan") & (data[column_two] != "nan")]
cross = pd.crosstab(data[column], data[column_two])
cross_data = {
"z": cross.values.tolist(),
"y": cross.index.values.tolist(),
"x": cross.columns.tolist(),
}
return cross_data
@function(return_format=dict, allowed_arguments=["column_two"])
def correlation(data, column, column_two):
corrs = pd.DataFrame(
{column: data[column].values, "other_col": data[column_two].values}
)
corrs_data = {
"z": corrs.values.tolist(),
"y": corrs.index.values.tolist(),
"x": corrs.columns.tolist(),
}
return corrs_data
@function(return_format=dict, allowed_arguments=["time_freq", "epoch_datetime"])
def events_per_time_period(data, column, epoch_datetime="max", time_freq="1D"):
d = data.copy()
d[column] = pd.to_datetime(d[column])
d = d[d[column].notnull()]
counts = d.groupby(pd.Grouper(key=column, freq=time_freq)).apply(
lambda g: g.shape[0]
)
if epoch_datetime == "max":
epoch_datetime = | pd.to_datetime(d[column]) | pandas.to_datetime |
import asyncio
import os
import uuid
import pandas as pd
import pytest
from storey import build_flow, CSVSource, CSVTarget, SyncEmitSource, Reduce, Map, FlatMap, AsyncEmitSource, ParquetTarget
from .integration_test_utils import _generate_table_name
has_azure_credentials = os.getenv("AZURE_ACCOUNT_NAME") and os.getenv("AZURE_ACCOUNT_KEY") and os.getenv("AZURE_BLOB_STORE")
if has_azure_credentials:
storage_options = {"account_name": os.getenv("AZURE_ACCOUNT_NAME"), "account_key": os.getenv("AZURE_ACCOUNT_KEY")}
from adlfs import AzureBlobFileSystem
@pytest.fixture()
def azure_create_csv():
# Setup
azure_blob = os.getenv("AZURE_BLOB_STORE")
file_path = _generate_table_name(f'{azure_blob}/az_storey')
_write_test_csv(file_path)
# Test runs
yield file_path
# Teardown
_delete_file(file_path)
@pytest.fixture()
def azure_teardown_file():
# Setup
azure_blob = os.getenv("AZURE_BLOB_STORE")
file_path = _generate_table_name(f'{azure_blob}/az_storey')
# Test runs
yield file_path
# Teardown
_delete_file(file_path)
@pytest.fixture()
def azure_setup_teardown_test():
# Setup
table_name = _generate_table_name(f'{os.getenv("AZURE_BLOB_STORE")}/test')
# Test runs
yield table_name
# Teardown
azure_recursive_delete(table_name)
def _write_test_csv(file_path):
az_fs = AzureBlobFileSystem(**storage_options)
data = "n,n*10\n0,0\n1,10\n2,20\n3,30\n4,40\n5,50\n6,60\n7,70\n8,80\n9,90\n"
with az_fs.open(file_path, 'w') as f:
f.write(data)
def _delete_file(path):
az_fs = AzureBlobFileSystem(**storage_options)
az_fs.delete(path)
def azure_recursive_delete(path):
az_fs = AzureBlobFileSystem(**storage_options)
az_fs.rm(path, True)
@pytest.mark.skipif(not has_azure_credentials, reason='No azure credentials found')
def test_csv_reader_from_azure(azure_create_csv):
controller = build_flow([
CSVSource(f'az:///{azure_create_csv}', header=True, storage_options=storage_options),
FlatMap(lambda x: x),
Map(lambda x: int(x)),
Reduce(0, lambda acc, x: acc + x),
]).run()
termination_result = controller.await_termination()
assert termination_result == 495
@pytest.mark.skipif(not has_azure_credentials, reason='No azure credentials found')
def test_csv_reader_from_azure_error_on_file_not_found():
controller = build_flow([
CSVSource(f'az:///{os.getenv("AZURE_BLOB_STORE")}/idontexist.csv', header=True, storage_options=storage_options),
]).run()
try:
controller.await_termination()
assert False
except FileNotFoundError:
pass
async def async_test_write_csv_to_azure(azure_teardown_csv):
controller = build_flow([
AsyncEmitSource(),
CSVTarget(f'az:///{azure_teardown_csv}', columns=['n', 'n*10'], header=True, storage_options=storage_options)
]).run()
for i in range(10):
await controller.emit([i, 10 * i])
await controller.terminate()
await controller.await_termination()
actual = AzureBlobFileSystem(**storage_options).open(azure_teardown_csv).read()
expected = "n,n*10\n0,0\n1,10\n2,20\n3,30\n4,40\n5,50\n6,60\n7,70\n8,80\n9,90\n"
assert actual.decode("utf-8") == expected
@pytest.mark.skipif(not has_azure_credentials, reason='No azure credentials found')
def test_write_csv_to_azure(azure_teardown_file):
asyncio.run(async_test_write_csv_to_azure(azure_teardown_file))
@pytest.mark.skipif(not has_azure_credentials, reason='No azure credentials found')
def test_write_csv_with_dict_to_azure(azure_teardown_file):
file_path = f'az:///{azure_teardown_file}'
controller = build_flow([
SyncEmitSource(),
CSVTarget(file_path, columns=['n', 'n*10'], header=True, storage_options=storage_options)
]).run()
for i in range(10):
controller.emit({'n': i, 'n*10': 10 * i})
controller.terminate()
controller.await_termination()
actual = AzureBlobFileSystem(**storage_options).open(azure_teardown_file).read()
expected = "n,n*10\n0,0\n1,10\n2,20\n3,30\n4,40\n5,50\n6,60\n7,70\n8,80\n9,90\n"
assert actual.decode("utf-8") == expected
@pytest.mark.skipif(not has_azure_credentials, reason='No azure credentials found')
def test_write_csv_infer_columns_without_header_to_azure(azure_teardown_file):
file_path = f'az:///{azure_teardown_file}'
controller = build_flow([
SyncEmitSource(),
CSVTarget(file_path, storage_options=storage_options)
]).run()
for i in range(10):
controller.emit({'n': i, 'n*10': 10 * i})
controller.terminate()
controller.await_termination()
actual = AzureBlobFileSystem(**storage_options).open(azure_teardown_file).read()
expected = "0,0\n1,10\n2,20\n3,30\n4,40\n5,50\n6,60\n7,70\n8,80\n9,90\n"
assert actual.decode("utf-8") == expected
@pytest.mark.skipif(not has_azure_credentials, reason='No azure credentials found')
def test_write_csv_from_lists_with_metadata_and_column_pruning_to_azure(azure_teardown_file):
file_path = f'az:///{azure_teardown_file}'
controller = build_flow([
SyncEmitSource(),
CSVTarget(file_path, columns=['event_key=$key', 'n*10'], header=True, storage_options=storage_options)
]).run()
for i in range(10):
controller.emit({'n': i, 'n*10': 10 * i}, key=f'key{i}')
controller.terminate()
controller.await_termination()
actual = AzureBlobFileSystem(**storage_options).open(azure_teardown_file).read()
expected = "event_key,n*10\nkey0,0\nkey1,10\nkey2,20\nkey3,30\nkey4,40\nkey5,50\nkey6,60\nkey7,70\nkey8,80\nkey9,90\n"
assert actual.decode("utf-8") == expected
@pytest.mark.skipif(not has_azure_credentials, reason='No azure credentials found')
def test_write_to_parquet_to_azure(azure_setup_teardown_test):
out_dir = f'az:///{azure_setup_teardown_test}'
columns = ['my_int', 'my_string']
controller = build_flow([
SyncEmitSource(),
ParquetTarget(out_dir, partition_cols='my_int', columns=columns, max_events=1, storage_options=storage_options)
]).run()
expected = []
for i in range(10):
controller.emit([i, f'this is {i}'])
expected.append([i, f'this is {i}'])
expected = | pd.DataFrame(expected, columns=columns, dtype='int32') | pandas.DataFrame |
import doctest
import math
import os
import random
import sklearn
import pandas as pd
import numpy as np
from datetime import timedelta
from sklearn.utils.estimator_checks import check_transformer_general, check_transformers_unfitted
from unittest2 import TestSuite, TextTestRunner, TestCase # or `from unittest import ...` if on Python 3.4+
import category_encoders as encoders
__author__ = 'willmcginnis'
# subroutines
def create_array(n_rows=1000, extras=False, has_none=True):
"""
Creates a numpy dataset with some categorical variables
:return:
"""
ds = [[
random.random(),
random.random(),
random.choice(['A', 'B', 'C']),
random.choice(['A', 'B', 'C', 'D']) if extras else random.choice(['A', 'B', 'C']),
random.choice(['A', 'B', 'C', None, np.nan]) if has_none else random.choice(['A', 'B', 'C']),
random.choice(['A'])
] for _ in range(n_rows)]
return np.array(ds)
def create_dataset(n_rows=1000, extras=False, has_none=True):
"""
Creates a dataset with some categorical variables
"""
ds = [[
random.random(), # Floats
random.choice([float('nan'), float('inf'), float('-inf'), -0, 0, 1, -1, math.pi]), # Floats with edge scenarios
row, # Unique integers
str(row), # Unique strings
random.choice(['A']), # Invariant
random.choice(['A', 'B_b', 'C_c_c']), # Strings with underscores to test reverse_dummies()
random.choice(['A', 'B', 'C', None]) if has_none else random.choice(['A', 'B', 'C']), # None
random.choice(['A', 'B', 'C', 'D']) if extras else random.choice(['A', 'B', 'C']), # With a new string value
random.choice([12, 43, -32]) # Number in the column name
] for row in range(n_rows)]
df = pd.DataFrame(ds, columns=['float', 'float_edge', 'unique_int', 'unique_str', 'invariant', 'underscore', 'none', 'extra', 321])
return df
def verify_numeric(X_test):
for dt in X_test.dtypes:
numeric = False
if np.issubdtype(dt, np.dtype(int)) or np.issubdtype(dt, np.dtype(float)):
numeric = True
assert numeric
def verify_inverse_transform(x, x_inv):
"""
Verify x is equal to x_inv. The test returns true for NaN.equals(NaN) as it should.
"""
assert x.equals(x_inv)
# data definitions
np_X = create_array(n_rows=100)
np_X_t = create_array(n_rows=50, extras=True)
np_y = np.random.randn(np_X.shape[0]) > 0.5
np_y_t = np.random.randn(np_X_t.shape[0]) > 0.5
X = create_dataset(n_rows=100)
X_t = create_dataset(n_rows=50, extras=True)
y = pd.DataFrame(np_y)
y_t = pd.DataFrame(np_y_t)
# this class utilises parametrised tests where we loop over different encoders
# tests that are applicable to only one encoder are the end of the class
class TestEncoders(TestCase):
def test_np(self):
for encoder_name in encoders.__all__:
with self.subTest(encoder_name=encoder_name):
# Encode a numpy array
enc = getattr(encoders, encoder_name)()
enc.fit(np_X, np_y)
verify_numeric(enc.transform(np_X_t))
def test_classification(self):
for encoder_name in encoders.__all__:
with self.subTest(encoder_name=encoder_name):
cols = ['unique_str', 'underscore', 'extra', 'none', 'invariant', 321]
enc = getattr(encoders, encoder_name)(cols=cols)
enc.fit(X, np_y)
verify_numeric(enc.transform(X_t))
enc = getattr(encoders, encoder_name)(verbose=1)
enc.fit(X, np_y)
verify_numeric(enc.transform(X_t))
enc = getattr(encoders, encoder_name)(drop_invariant=True)
enc.fit(X, np_y)
verify_numeric(enc.transform(X_t))
enc = getattr(encoders, encoder_name)(return_df=False)
enc.fit(X, np_y)
self.assertTrue(isinstance(enc.transform(X_t), np.ndarray))
self.assertEqual(enc.transform(X_t).shape[0], X_t.shape[0], 'Row count must not change')
# documented in issue #122
# when we use the same encoder on two different datasets, it should not explode
# X_a = pd.DataFrame(data=['1', '2', '2', '2', '2', '2'], columns=['col_a'])
# X_b = pd.DataFrame(data=['1', '1', '1', '2', '2', '2'], columns=['col_b']) # different values and name
# y_dummy = [True, False, True, False, True, False]
# enc = getattr(encoders, encoder_name)()
# enc.fit(X_a, y_dummy)
# enc.fit(X_b, y_dummy)
# verify_numeric(enc.transform(X_b))
def test_impact_encoders(self):
for encoder_name in ['LeaveOneOutEncoder', 'TargetEncoder', 'WOEEncoder']:
with self.subTest(encoder_name=encoder_name):
# encode a numpy array and transform with the help of the target
enc = getattr(encoders, encoder_name)()
enc.fit(np_X, np_y)
verify_numeric(enc.transform(np_X_t, np_y_t))
# target is a DataFrame
enc = getattr(encoders, encoder_name)()
enc.fit(X, y)
verify_numeric(enc.transform(X_t, y_t))
# when we run transform(X, y) and there is a new value in X, something is wrong and we raise an error
enc = getattr(encoders, encoder_name)(impute_missing=True, handle_unknown='error', cols=['extra'])
enc.fit(X, y)
self.assertRaises(ValueError, enc.transform, (X_t, y_t))
def test_error_handling(self):
for encoder_name in encoders.__all__:
with self.subTest(encoder_name=encoder_name):
# we exclude some columns
X = create_dataset(n_rows=100)
X = X.drop(['unique_str', 'none'], axis=1)
X_t = create_dataset(n_rows=50, extras=True)
X_t = X_t.drop(['unique_str', 'none'], axis=1)
# illegal state, we have to first train the encoder...
enc = getattr(encoders, encoder_name)()
with self.assertRaises(ValueError):
enc.transform(X)
# wrong count of attributes
enc = getattr(encoders, encoder_name)()
enc.fit(X, y)
with self.assertRaises(ValueError):
enc.transform(X_t.iloc[:, 0:3])
# no cols
enc = getattr(encoders, encoder_name)(cols=[])
enc.fit(X, y)
self.assertTrue(enc.transform(X_t).equals(X_t))
def test_handle_unknown_error(self):
# BaseN has problems with None -> ignore None
X = create_dataset(n_rows=100, has_none=False)
X_t = create_dataset(n_rows=50, extras=True, has_none=False)
for encoder_name in (set(encoders.__all__) - {'HashingEncoder'}): # HashingEncoder supports new values by design -> excluded
with self.subTest(encoder_name=encoder_name):
# new value during scoring
enc = getattr(encoders, encoder_name)(handle_unknown='error')
enc.fit(X, y)
with self.assertRaises(ValueError):
_ = enc.transform(X_t)
def test_sklearn_compliance(self):
for encoder_name in encoders.__all__:
with self.subTest(encoder_name=encoder_name):
# in sklearn < 0.19.0, these methods require classes,
# in sklearn >= 0.19.0, these methods require instances
if sklearn.__version__ < '0.19.0':
encoder = getattr(encoders, encoder_name)
else:
encoder = getattr(encoders, encoder_name)()
check_transformer_general(encoder_name, encoder)
check_transformers_unfitted(encoder_name, encoder)
def test_inverse_transform(self):
# we do not allow None in these data (but "none" column without any None is ok)
X = create_dataset(n_rows=100, has_none=False)
X_t = create_dataset(n_rows=50, has_none=False)
X_t_extra = create_dataset(n_rows=50, extras=True, has_none=False)
cols = ['underscore', 'none', 'extra', 321]
for encoder_name in ['BaseNEncoder', 'BinaryEncoder', 'OneHotEncoder', 'OrdinalEncoder']:
with self.subTest(encoder_name=encoder_name):
# simple run
enc = getattr(encoders, encoder_name)(verbose=1, cols=cols)
enc.fit(X)
verify_inverse_transform(X_t, enc.inverse_transform(enc.transform(X_t)))
# when a new value is encountered, do not raise an exception
enc = getattr(encoders, encoder_name)(verbose=1, cols=cols)
enc.fit(X, y)
_ = enc.inverse_transform(enc.transform(X_t_extra))
def test_types(self):
X = pd.DataFrame({
'Int': [1, 2, 1, 2],
'Float': [1.1, 2.2, 3.3, 4.4],
'Complex': [3.45J, 3.45J, 3.45J, 3.45J],
'None': [None, None, None, None],
'Str': ['a', 'c', 'c', 'd'],
'PdTimestamp': [pd.Timestamp('2012-05-01'), pd.Timestamp('2012-05-02'), pd.Timestamp('2012-05-03'), | pd.Timestamp('2012-05-06') | pandas.Timestamp |
r"""
Auxiliary functions for :class:`anndata.AnnData` objects
that are not covered in :mod:`scanpy`.
"""
import os
from itertools import chain
from typing import Callable, List, Mapping, Optional
import anndata
import numpy as np
import pandas as pd
import scipy.sparse
import sklearn.decomposition
import sklearn.feature_extraction.text
import sklearn.preprocessing
import sklearn.neighbors
import sklearn.utils.extmath
from . import genomics, num
from .utils import logged, smart_tqdm
def lsi(
adata: anndata.AnnData, n_components: int = 20,
use_highly_variable: Optional[bool] = None, **kwargs
) -> None:
r"""
LSI analysis (following the Seurat v3 approach)
Parameters
----------
adata
Input dataset
n_components
Number of dimensions to use
use_highly_variable
Whether to use highly variable features only, stored in
``adata.var['highly_variable']``. By default uses them if they
have been determined beforehand.
**kwargs
Additional keyword arguments are passed to
:func:`sklearn.utils.extmath.randomized_svd`
"""
if use_highly_variable is None:
use_highly_variable = "highly_variable" in adata.var
adata_use = adata[:, adata.var["highly_variable"]] if use_highly_variable else adata
X = num.tfidf(adata_use.X)
X_norm = sklearn.preprocessing.Normalizer(norm="l1").fit_transform(X)
X_norm = np.log1p(X_norm * 1e4)
X_lsi = sklearn.utils.extmath.randomized_svd(X_norm, n_components, **kwargs)[0]
X_lsi -= X_lsi.mean(axis=1, keepdims=True)
X_lsi /= X_lsi.std(axis=1, ddof=1, keepdims=True)
adata.obsm["X_lsi"] = X_lsi
@logged
def get_gene_annotation(
adata: anndata.AnnData, var_by: str = None,
gtf: os.PathLike = None, gtf_by: str = None,
by_func: Optional[Callable] = None
) -> None:
r"""
Get genomic annotation of genes by joining with a GTF file.
Parameters
----------
adata
Input dataset
var_by
Specify a column in ``adata.var`` used to merge with GTF attributes,
otherwise ``adata.var_names`` is used by default.
gtf
Path to the GTF file
gtf_by
Specify a field in the GTF attributes used to merge with ``adata.var``,
e.g. "gene_id", "gene_name".
by_func
Specify an element-wise function used to transform merging fields,
e.g. removing suffix in gene IDs.
Note
----
The genomic locations are converted to 0-based as specified
in bed format rather than 1-based as specified in GTF format.
"""
if gtf is None or gtf_by is None:
raise ValueError("Arguments `gtf` and `gtf_by` must be specified!")
var_by = adata.var_names if var_by is None else adata.var[var_by]
gtf = genomics.read_gtf(gtf).query("feature == 'gene'").split_attribute()
if by_func:
by_func = np.vectorize(by_func)
var_by = by_func(var_by)
gtf[gtf_by] = by_func(gtf[gtf_by]) # Safe inplace modification
gtf = gtf.sort_values("seqname").drop_duplicates(
subset=[gtf_by], keep="last"
) # Typically, scaffolds come first, chromosomes come last
merge_df = pd.concat([
pd.DataFrame(gtf.to_bed(name=gtf_by)),
pd.DataFrame(gtf).drop(columns=genomics.Gtf.COLUMNS) # Only use the splitted attributes
], axis=1).set_index(gtf_by).reindex(var_by).set_index(adata.var.index)
adata.var = pd.concat([adata.var, merge_df], axis=1)
def aggregate_obs(
adata: anndata.AnnData, by: str, X_agg: Optional[str] = "sum",
obs_agg: Optional[Mapping[str, str]] = None,
obsm_agg: Optional[Mapping[str, str]] = None,
layers_agg: Optional[Mapping[str, str]] = None
) -> anndata.AnnData:
r"""
Aggregate obs in a given dataset by certain categories
Parameters
----------
adata
Dataset to be aggregated
by
Specify a column in ``adata.obs`` used for aggregation,
must be discrete.
X_agg
Aggregation function for ``adata.X``, must be one of
``{"sum", "mean", ``None``}``. Setting to ``None`` discards
the ``adata.X`` matrix.
obs_agg
Aggregation methods for ``adata.obs``, indexed by obs columns,
must be one of ``{"sum", "mean", "majority"}``, where ``"sum"``
and ``"mean"`` are for continuous data, and ``"majority"`` is for
discrete data. Fields not specified will be discarded.
obsm_agg
Aggregation methods for ``adata.obsm``, indexed by obsm keys,
must be one of ``{"sum", "mean"}``. Fields not specified will be
discarded.
layers_agg
Aggregation methods for ``adata.layers``, indexed by layer keys,
must be one of ``{"sum", "mean"}``. Fields not specified will be
discarded.
Returns
-------
aggregated
Aggregated dataset
"""
obs_agg = obs_agg or {}
obsm_agg = obsm_agg or {}
layers_agg = layers_agg or {}
by = adata.obs[by]
agg_idx = pd.Index(by.cat.categories) \
if pd.api.types.is_categorical_dtype(by) \
else pd.Index(np.unique(by))
agg_sum = scipy.sparse.coo_matrix((
np.ones(adata.shape[0]), (
agg_idx.get_indexer(by),
np.arange(adata.shape[0])
)
)).tocsr()
agg_mean = agg_sum.multiply(1 / agg_sum.sum(axis=1))
agg_method = {
"sum": lambda x: agg_sum @ x,
"mean": lambda x: agg_mean @ x,
"majority": lambda x: pd.crosstab(by, x).idxmax(axis=1).loc[agg_idx].to_numpy()
}
X = agg_method[X_agg](adata.X) if X_agg and adata.X is not None else None
obs = pd.DataFrame({
k: agg_method[v](adata.obs[k])
for k, v in obs_agg.items()
}, index=agg_idx.astype(str))
obsm = {
k: agg_method[v](adata.obsm[k])
for k, v in obsm_agg.items()
}
layers = {
k: agg_method[v](adata.layers[k])
for k, v in layers_agg.items()
}
for c in obs:
if pd.api.types.is_categorical_dtype(adata.obs[c]):
obs[c] = pd.Categorical(obs[c], categories=adata.obs[c].cat.categories)
return anndata.AnnData(
X=X, obs=obs, var=adata.var,
obsm=obsm, varm=adata.varm, layers=layers
)
def transfer_labels(
ref: anndata.AnnData, query: anndata.AnnData, field: str,
n_neighbors: int = 5, use_rep: Optional[str] = None,
key_added: Optional[str] = None, **kwargs
) -> None:
r"""
Transfer discrete labels from reference dataset to query dataset
Parameters
----------
ref
Reference dataset
query
Query dataset
field
Field to be transferred in ``ref.obs`` (must be discrete)
n_neighbors
Number of nearest neighbors used for label transfer
use_rep
Data representation based on which to find nearest neighbors,
by default uses ``{ref, query}.X``.
key_added
New ``query.obs`` key added for the transfered labels,
by default the same as ``field``.
**kwargs
Additional keyword arguments are passed to
:class:`sklearn.neighbors.NearestNeighbors`
"""
ref_mat = ref.obsm[use_rep] if use_rep else ref.X
query_mat = query.obsm[use_rep] if use_rep else query.X
nn = sklearn.neighbors.NearestNeighbors(
n_neighbors=n_neighbors, **kwargs
).fit(ref_mat)
nni = nn.kneighbors(query_mat, return_distance=False)
hits = ref.obs[field].to_numpy()[nni]
pred = pd.crosstab(
np.repeat(query.obs_names, n_neighbors), hits.ravel()
).idxmax(axis=1).loc[query.obs_names]
if pd.api.types.is_categorical_dtype(ref.obs[field]):
pred = pd.Categorical(pred, categories=ref.obs[field].cat.categories)
query.obs[key_added or field] = pred
def extract_rank_genes_groups(
adata: anndata.AnnData, groups: Optional[List[str]] = None,
filter_by: str = "pvals_adj < 0.01", sort_by: str = "scores",
ascending: str = False
) -> pd.DataFrame:
r"""
Extract result of :func:`scanpy.tl.rank_genes_groups` in the form of
marker gene data frame for specific cell groups
Parameters
----------
adata
Input dataset
groups
Target groups for which markers should be extracted,
by default extract all groups.
filter_by
Marker filtering criteria (passed to :meth:`pandas.DataFrame.query`)
sort_by
Column used for sorting markers
ascending
Whether to sort in ascending order
Returns
-------
marker_df
Extracted marker data frame
Note
----
Markers shared by multiple groups will be assign to the group
with highest score.
"""
if "rank_genes_groups" not in adata.uns:
raise ValueError("Please call `sc.tl.rank_genes_groups` first!")
if groups is None:
groups = adata.uns["rank_genes_groups"][sort_by].dtype.names
df = pd.concat([
pd.DataFrame({
k: np.asarray(v[g])
for k, v in adata.uns["rank_genes_groups"].items()
if k != "params"
}).assign(group=g)
for g in groups
])
df["group"] = pd.Categorical(df["group"], categories=groups)
df = df.sort_values(
sort_by, ascending=ascending
).drop_duplicates(
subset=["names"], keep="first"
).sort_values(
["group", sort_by], ascending=[True, ascending]
).query(filter_by)
df = df.reset_index(drop=True)
return df
def bedmap2anndata(
bedmap: os.PathLike, var_col: int = 3, obs_col: int = 6
) -> anndata.AnnData:
r"""
Convert bedmap result to :class:`anndata.AnnData` object
Parameters
----------
bedmap
Path to bedmap result
var_col
Variable column (0-based)
obs_col
Observation column (0-based)
Returns
-------
adata
Converted :class:`anndata.AnnData` object
Note
----
Similar to ``rliger::makeFeatureMatrix``,
but more automated and memory efficient.
"""
bedmap = pd.read_table(
bedmap, sep="\t", header=None, usecols=[var_col, obs_col]
).dropna()
obs_pool = bedmap[obs_col].str.split(";")
var_pool = bedmap[var_col]
obs_names = pd.Index(set(chain.from_iterable(obs_pool)))
var_names = pd.Index(set(var_pool))
X = scipy.sparse.lil_matrix((var_names.size, obs_names.size)) # Transposed
for obs, var in smart_tqdm(zip(obs_pool, var_pool), total=bedmap.shape[0]):
row = obs_names.get_indexer(obs)
col = var_names.get_loc(var)
X.rows[col] += row.tolist()
X.data[col] += [1] * row.size
X = X.tocsc().T # Transpose back
X.sum_duplicates()
return anndata.AnnData(
X=X, obs=pd.DataFrame(index=obs_names),
var= | pd.DataFrame(index=var_names) | pandas.DataFrame |
import logging
from typing import NamedTuple, Dict, List, Set, Union
import d3m
import d3m.metadata.base as mbase
import numpy as np
import pandas as pd
from common_primitives import utils
from d3m.container import DataFrame as d3m_DataFrame
from d3m.metadata import hyperparams as metadata_hyperparams
from d3m.metadata import hyperparams, params
from d3m.metadata.hyperparams import Enumeration, UniformInt, UniformBool
from d3m.primitive_interfaces.base import CallResult
from d3m.primitive_interfaces.unsupervised_learning import UnsupervisedLearnerPrimitiveBase
from . import config
_logger = logging.getLogger(__name__)
Input = d3m.container.DataFrame
Output = d3m.container.DataFrame
class EncParams(params.Params):
mapping: Dict
cat_columns: List[str]
empty_columns: List[int]
class EncHyperparameter(hyperparams.Hyperparams):
n_limit = UniformInt(lower=5, upper=100, default=12,
description='Limits the maximum number of columns generated from a single categorical column',
semantic_types=['http://schema.org/Integer',
'https://metadata.datadrivendiscovery.org/types/TuningParameter'])
use_columns = hyperparams.Set(
elements=hyperparams.Hyperparameter[int](-1),
default=(),
semantic_types=['https://metadata.datadrivendiscovery.org/types/ControlParameter'],
description="A set of column indices to force primitive to operate on. If any specified column cannot be parsed, it is skipped.",
)
exclude_columns = hyperparams.Set(
elements=hyperparams.Hyperparameter[int](-1),
default=(),
semantic_types=['https://metadata.datadrivendiscovery.org/types/ControlParameter'],
description="A set of column indices to not operate on. Applicable only if \"use_columns\" is not provided.",
)
return_result = hyperparams.Enumeration(
values=['append', 'replace', 'new'],
default='replace',
semantic_types=['https://metadata.datadrivendiscovery.org/types/ControlParameter'],
description="Should parsed columns be appended, should they replace original columns, or should only parsed columns be returned? This hyperparam is ignored if use_semantic_types is set to false.",
)
use_semantic_types = hyperparams.UniformBool(
default=False,
semantic_types=['https://metadata.datadrivendiscovery.org/types/ControlParameter'],
description="Controls whether semantic_types metadata will be used for filtering columns in input dataframe. Setting this to false makes the code ignore return_result and will produce only the output dataframe"
)
add_index_columns = hyperparams.UniformBool(
default=True,
semantic_types=['https://metadata.datadrivendiscovery.org/types/ControlParameter'],
description="Also include primary index columns if input data has them. Applicable only if \"return_result\" is set to \"new\".",
)
class Encoder(UnsupervisedLearnerPrimitiveBase[Input, Output, EncParams, EncHyperparameter]):
"""
An one-hot encoder, which
1. n_limit: max number of distinct values to one-hot encode,
remaining values with fewer occurence are put in [colname]_other_ column.
2. feed in data by set_training_data, then apply fit() function to tune the encoder.
3. produce(): input data would be encoded and return.
"""
metadata = hyperparams.base.PrimitiveMetadata({
"id": "18f0bb42-6350-3753-8f2d-d1c3da70f279",
"version": config.VERSION,
"name": "ISI DSBox Data Encoder",
"description": "Encode data, such as one-hot encoding for categorical data",
"python_path": "d3m.primitives.data_preprocessing.Encoder.DSBOX",
"primitive_family": "DATA_PREPROCESSING",
"algorithm_types": ["ENCODE_ONE_HOT"],
"source": {
"name": config.D3M_PERFORMER_TEAM,
"contact": config.D3M_CONTACT,
"uris": [config.REPOSITORY]
},
"keywords": ["preprocessing", "encoding"],
"installation": [config.INSTALLATION],
})
def __repr__(self):
return "%s(%r)" % ('Encoder', self.__dict__)
def __init__(self, *, hyperparams: EncHyperparameter) -> None:
super().__init__(hyperparams=hyperparams)
self.hyperparams = hyperparams
self._mapping: Dict = {}
self._input_data: Input = None
self._input_data_copy = None
self._fitted = False
self._cat_columns = []
self._col_index = None
self._empty_columns = []
def set_training_data(self, *, inputs: Input) -> None:
self._input_data = inputs
self._fitted = False
def _trim_features(self, feature, n_limit):
topn = feature.dropna().unique()
if n_limit:
if feature.dropna().nunique() > n_limit:
topn = list(feature.value_counts().head(n_limit).index)
topn.append('other_')
topn = [x for x in topn if x]
return feature.name, topn
def fit(self, *, timeout: float = None, iterations: int = None) -> CallResult[None]:
if self._fitted:
return
if self._input_data is None:
raise ValueError('Missing training(fitting) data.')
# Look at attribute columns only
# print('fit in', self._input_data.columns)
data = self._input_data.copy()
all_attributes = utils.list_columns_with_semantic_types(metadata=data.metadata, semantic_types=[
"https://metadata.datadrivendiscovery.org/types/Attribute"])
# Remove columns with all empty values, structural type str
numeric = utils.list_columns_with_semantic_types(
data.metadata, ['http://schema.org/Integer', 'http://schema.org/Float'])
numeric = [x for x in numeric if x in all_attributes]
self._empty_columns = []
_logger.debug(f'Numeric columns: {numeric}')
for element in numeric:
if data.metadata.query((mbase.ALL_ELEMENTS, element)).get('structural_type', ()) == str:
if pd.isnull(pd.to_numeric(data.iloc[:, element])).sum() == data.shape[0]:
_logger.debug(f'Empty numeric str column: {element}')
self._empty_columns.append(element)
# Remove columns with all empty values, structural numeric
is_empty = pd.isnull(data).sum(axis=0) == data.shape[0]
for i in all_attributes:
if is_empty.iloc[i] and i not in self._empty_columns:
_logger.debug(f'Empty numeric str column: {element}')
self._empty_columns.append(i)
_logger.debug('Removing entirely empty columns: {}'.format(data.columns[self._empty_columns]))
data = utils.remove_columns(data, self._empty_columns)
categorical_attributes = utils.list_columns_with_semantic_types(metadata=data.metadata,
semantic_types=[
"https://metadata.datadrivendiscovery.org/types/OrdinalData",
"https://metadata.datadrivendiscovery.org/types/CategoricalData"])
all_attributes = utils.list_columns_with_semantic_types(metadata=data.metadata, semantic_types=[
"https://metadata.datadrivendiscovery.org/types/Attribute"])
self._cat_col_index = list(set(all_attributes).intersection(categorical_attributes))
self._cat_columns = data.columns[self._cat_col_index].tolist()
_logger.debug('Encoding columns: {}'.format(self._cat_columns))
mapping = {}
for column_name in self._cat_columns:
col = data[column_name]
temp = self._trim_features(col, self.hyperparams['n_limit'])
if temp:
mapping[temp[0]] = temp[1]
self._mapping = mapping
self._fitted = True
return CallResult(None, has_finished=True)
def produce(self, *, inputs: Input, timeout: float = None, iterations: int = None) -> CallResult[Output]:
"""
Convert and output the input data into encoded format,
using the trained (fitted) encoder.
Notice that [colname]_other_ and [colname]_nan columns
are always kept for one-hot encoded columns.
"""
self._input_data_copy = inputs.copy()
# Remove columns with all empty values
_logger.debug('Removing entirely empty columns: {}'.format(self._input_data_copy.columns[self._empty_columns]))
self._input_data_copy = utils.remove_columns(self._input_data_copy, self._empty_columns)
# Return if there is nothing to encode
if len(self._cat_columns) == 0:
return CallResult(self._input_data_copy, True, 1)
_logger.debug('Encoding columns: {}'.format(self._cat_columns))
data_encode = self._input_data_copy[list(self._mapping.keys())]
# Get rid of false SettingWithCopyWarning
data_encode.is_copy = None
res = []
for column_name in self._cat_columns:
feature = data_encode[column_name].copy()
other_ = lambda x: 'Other' if (x and x not in self._mapping[column_name]) else x
nan_ = lambda x: x if x else np.nan
feature.loc[feature.notnull()] = feature[feature.notnull()].apply(other_)
feature = feature.apply(nan_)
new_column_names = ['{}_{}'.format(column_name, i) for i in self._mapping[column_name] + ['nan']]
encoded = pd.get_dummies(feature, dummy_na=True, prefix=column_name)
missed = [name for name in new_column_names if name not in list(encoded.columns)]
for m in missed:
# print('missing', m)
encoded[m] = 0
encoded = encoded[new_column_names]
res.append(encoded)
# data_encode.loc[:,column_name] = feature
# Drop columns that will be encoded
# data_rest = self._input_data_copy.drop(self._mapping.keys(), axis=1)
columns_names = self._input_data_copy.columns.tolist()
drop_indices = [columns_names.index(col) for col in self._mapping.keys()]
drop_indices = sorted(drop_indices)
all_categorical = False
try:
self._input_data_copy = utils.remove_columns(self._input_data_copy, drop_indices)
except ValueError:
_logger.warning("[warn] All the attributes are categorical!")
all_categorical = True
# metadata for columns that are not one hot encoded
# self._col_index = [self._input_data_copy.columns.get_loc(c) for c in data_rest.columns]
# data_rest.metadata = utils.select_columns_metadata(self._input_data_copy.metadata, self._col_index)
# encode data
# encoded = d3m_DataFrame(pd.get_dummies(data_encode, dummy_na=True, prefix=self._cat_columns, prefix_sep='_',
# columns=self._cat_columns))
encoded = d3m_DataFrame( | pd.concat(res, axis=1) | pandas.concat |
# -*- coding: UTF-8 -*-
from __future__ import division
import pandas as pd
# For the next set of questions, we will be using census data from the United States Census Bureau.
# Counties are political and geographic subdivisions of states in the United States.
# This dataset contains population data for counties and states in the US from 2010 to 2015.
# See this document for a description of the variable names.
# The census dataset (census.csv) should be loaded as census_df. Answer questions using this as appropriate.
census_df = pd.read_csv('../data/census.csv')
print(census_df.head(1))
# SUMLEV REGION DIVISION STATE COUNTY STNAME CTYNAME CENSUS2010POP \
# 0 40 3 6 1 0 Alabama Alabama 4779736
# ESTIMATESBASE2010 POPESTIMATE2010 ... RDOMESTICMIG2011 \
# 0 4780127 4785161 ... 0.002295
# RDOMESTICMIG2012 RDOMESTICMIG2013 RDOMESTICMIG2014 RDOMESTICMIG2015 \
# 0 -0.193196 0.381066 0.582002 -0.467369
# RNETMIG2011 RNETMIG2012 RNETMIG2013 RNETMIG2014 RNETMIG2015
# 0 1.030015 0.826644 1.383282 1.724718 0.712594
# Question 5
# ----------------------------------------
# Quiz Question: Which state has the most counties in it? (hint: consider the sumlevel key carefully!
# You'll need this for future questions too...)
# This function should return a single string value.
def answer_five():
stnames = census_df.groupby(census_df['STNAME'])
# print(stnames) # pandas.core.groupby.DataFrameGroupBy object at 0x0000000006BE4A90>
return stnames['COUNTY'].count().argmax()
def answer_five2():
# 'STNAME' becomes the index name
county_grp = census_df['COUNTY'].groupby(census_df['STNAME'])
# print(county_grp) # <pandas.core.groupby.SeriesGroupBy object at 0x0000000006BE4A90>
return county_grp.count().argmax()
print('\nQuestion 5')
print(answer_five()) # Texas
print(answer_five2()) # Texas
# Question 6
# ----------------------------------------
# Quiz Question: Only looking at the three most populous counties for each state, what are the three
# most populous states (in order of highest population to lowest population)?
# This function should return a list of string values.
def answer_six():
# 1. Remove sub level = 40 (state level)
df = census_df[census_df['SUMLEV'] == 50]
# 2. First is to get top 3 most populous counties for each state
group_by_state = df.groupby(census_df['STNAME'])
top3_country = group_by_state['POPESTIMATE2015'].nlargest(3)
# print(top3_grp)
# STNAME
# Alabama 0 4779736
# 37 658466
# 49 412992
# make index as list with column names. "top3" variable here is same as "top3_country".
top3 = top3_country.reset_index()
# print(top3)
# STNAME level_1 CENSUS2010POP
# 0 Alabama 0 4779736
# 1 Alabama 37 658466
# 2 Alabama 49 412992
# 3. Sum the value by state and get the top 3 most populous states
group_by_state_in_top3 = top3.groupby(top3['STNAME'])
county = group_by_state_in_top3['POPESTIMATE2015'].sum()
# print(county)
# STNAME
# Alabama 5851194
# Alaska 1099638
# Arizona 11189397
return county.nlargest(3).index.values.tolist()
print('\nQuestion 6')
print(type(answer_six())) # <class 'list'>
print(answer_six()) # ['California', 'Texas', 'Illinois']
# Question 7
# ----------------------------------------
# Quiz Question: Which county has had the largest absolute change in population within the period 2010-2015?
# (Hint: population values are stored in columns POPESTIMATE2010 through POPESTIMATE2015,
# you need to consider all six columns.)
# e.g. If County Population in the 5 year period is 100, 120, 80, 105, 100, 130,
# then its largest change in the period would be |130-80| = 50.
# This function should return a single string value.
def answer_seven():
# remove sub level = 40 (state level)
df = census_df[census_df['SUMLEV'] == 50]
# largest change would be the max population during the 5-year period minus the min population
df2 = pd.DataFrame()
years = ['POPESTIMATE2010', 'POPESTIMATE2011', 'POPESTIMATE2012', 'POPESTIMATE2013', 'POPESTIMATE2014',
'POPESTIMATE2015']
# 重新组合一个data frame
df2['CTYNAME'] = df['CTYNAME']
df2['Y_MIN'] = df[years].min(axis=1)
df2['Y_MAX'] = df[years].max(axis=1)
df2['Y_CHANGE'] = abs(df2['Y_MAX'] - df2['Y_MIN'])
# print(df2)
# CTYNAME Y_MIN Y_MAX Y_CHANGE
# 1 Autauga County 54660 55347 687
# 2 Baldwin County 183193 203709 20516
# 3 Barbour County 26489 27341 852
# print(df2['Y_CHANGE'].nlargest(5))
# 获取最大Y_CHANGE值的索引值, 后续再通过dataframe.loc去定位到该行.
# dataframe.iloc按下标选取,或者使用dataframe.loc按索引选取.
index = df2['Y_CHANGE'].argmax()
# print(index) # 2667
return df2.loc[index, 'CTYNAME']
print('\nQuestion 7')
print(answer_seven()) # Harris County
def answer_seven_2():
df = | pd.DataFrame(census_df[census_df['SUMLEV'] == 50]) | pandas.DataFrame |
#======================================================
# General Utility Functions
#======================================================
'''
Info: Utility functions for general applications.
Version: 2.0
Author: <NAME>
Created: Saturday, 13 April 2019
'''
# Import modules
import os
import numpy as np
import pandas as pd
import sys
#import dill as pickle
import pickle
from datetime import datetime
#------------------------------
# Utility Functions
#------------------------------
# Set title
def set_title(string):
# Check if string is too long
string_size = len(string)
max_length = 57
if string_size > max_length:
print('TITLE TOO LONG')
else:
lr_buffer_len = int((max_length - string_size) / 2)
full_buffer_len = lr_buffer_len * 2 + string_size
print('\n')
print(full_buffer_len * '=')
print(full_buffer_len * ' ')
print(lr_buffer_len * ' ' + string + lr_buffer_len * ' ')
print(full_buffer_len * ' ')
print(full_buffer_len * '='+'\n\n')
# Set section
def set_section(string):
# Check if string is too long
string_size = len(string)
max_length = 100
if string_size > max_length:
print('TITLE TOO LONG')
else:
full_buffer_len = string_size
print('\n')
print(full_buffer_len * '-')
print(string)
print(full_buffer_len * '-'+'\n')
# Print time taken
def print_dur(string, st):
print(string, datetime.now() - st)
# Date conversion
def pdf_cast_date(df, date_field):
#df.loc[:, date_field] = list(map(lambda x: pd.to_datetime(x, format='%m/%d/%Y %H:%M'), list(df.loc[:, date_field])))
#df.loc[:, date_field] = list(map(lambda x: pd.to_datetime(x, format='%Y/%m/%d %H:%M:%S'), list(df.loc[:, date_field])))
df.loc[:, date_field] = list(map(lambda x: | pd.to_datetime(x, format='%d/%m/%Y %H:%M') | pandas.to_datetime |
# ETL:
# Extract: selecting the right data and obtaining it
# Transform: data cleansing is applied to that data while it sits in a staging area
# Loading: loading of the transformed data into the data store or a data warehouse
import numpy as np
import pandas as pd
import re
from scripts.utils_date_features import *
from scripts.utils_religion_features import *
from scripts.utils_nlp_features import *
############################## Attendance dataframe building ################################
def main():
# read data from main CSVs
effectifEcoles = pd.read_csv('../data/effectifs_ecolesnantes.csv', header=0, sep=';')
effectifEcoles.drop(['Début année scolaire'], axis=1, inplace=True)
appariement = pd.read_csv('../data/appariement_ecoles_cantines.csv', header=0, sep=',')
appariement.rename(columns={"ecole": "Ecole"}, inplace=True)
freqJ = pd.read_csv('../data/frequentation_cantines_v2.csv', header=0, sep=',', low_memory=False)
freqJ["date"] = pd.to_datetime(freqJ["date"])
# drop useless duplicate columns
freqJ.drop(['site_nom_sal', 'site_id', 'prevision_s', 'reel_s'], axis=1, inplace=True)
freqJ.rename(columns={'site_nom':'cantine_nom'}, inplace=True)
freqJ.sort_values(by='date', inplace=True, ascending=True)
freqJ.reset_index(inplace=True, drop=True)
# get aggregated headcounts by canteen
effectifEcoles = | pd.merge(effectifEcoles, appariement[['cantine_nom','Ecole']], on='Ecole') | pandas.merge |
import abc
import base64
import contextlib
from functools import partial
import gc
import os
import random
import signal
import time
from typing import Iterable, Tuple, Union
from attrdict import AttrDict
from dataclasses import dataclass
import IPython
from IPython import get_ipython
from IPython.core.interactiveshell import InteractiveShell
from IPython.display import *
from more_itertools import first
import numpy as np
import pandas as pd
import prompt_toolkit
import potoo.pandas
from potoo.pandas import cat_to_str
from potoo.util import AttrContext, or_else, deep_round_sig, singleton
def displayed(x, f=lambda x: x):
display(f(x))
return x
def ipy_format(*xs: any, mimetype='text/plain', join='\n') -> str:
"""
Format like IPython.display.display
- Spec: print(ipy_format(*xs)) ~ display(*xs)
- Manually line-join multiple args like display(x, y) does
"""
return join.join(
formats.get(mimetype, formats['text/plain'])
for x in xs
for formats in [ipy_all_formats(x)]
)
def ipy_all_formats(x: any) -> str:
formats, _metadata = get_ipython().display_formatter.format(x)
return formats
def ipy_text(*xs: any) -> str:
return ipy_format(*xs, mimetype='text/plain', join='\n')
def ipy_html(*xs: any) -> str:
return ipy_format(*xs, mimetype='text/html', join='<br/>')
def ipy_print(*xs: any, **kwargs) -> str:
"""
Print like IPython.display.display, but also allow control over print kwargs like flush, file, etc.
- Spec: ipy_print(*xs) ~ display(*xs)
"""
print(ipy_format(*xs), **kwargs)
def is_ipython_console():
return or_else(None, lambda: get_ipython().__class__.__name__) == 'TerminalInteractiveShell'
def is_ipython_notebook():
return or_else(None, lambda: get_ipython().__class__.__name__) == 'ZMQInteractiveShell'
def ipy_load_ext_no_warnings(module_str: str) -> str:
"""Like %load_ext, except silence warnings (e.g. when the extension is already loaded)"""
# The warnings are emitted from ExtensionMagics.load_ext (see its source), so we just call what it calls
ipy = get_ipython()
return ipy.extension_manager.load_extension(module_str)
def disable_special_control_backslash_handler():
"""
Replace special ipy binding for C-\ with normal os SIGQUIT handler
- Since https://github.com/ipython/ipython/pull/9820/commits/37863a8
"""
ipy = get_ipython()
if hasattr(ipy, 'pt_cli'):
ipy.pt_cli.application.key_bindings_registry.add_binding(prompt_toolkit.keys.Keys.ControlBackslash)(
lambda ev: os.kill(os.getpid(), signal.SIGQUIT)
)
def set_display_on_ipython_prompt():
"""
set_display on each ipython prompt: workaround bug where user SIGWINCH handlers are ignored while readline is active
- https://bugs.python.org/issue23735
- Doesn't happen in python readline, just ipython readline
- Happens with python-3.6.0 and python-3.6.2 (latest as of 2017-08-26)
"""
if is_ipython_console():
ipy = get_ipython()
ipy.set_hook(
'pre_run_code_hook',
_warn_deprecated=False,
hook=lambda *args: potoo.pandas.set_display(),
)
def gc_on_ipy_post_run_cell():
"""
Force gc after each ipy cell run, since it's _really_ easy to accumulate many uncollected-but-collectable GBs of mem
pressure by re-executing one heavy cell over and over again
- Adds ballpark ~40ms per cell execution
- Use in combination with ipy `c.InteractiveShell.cache_size = 0` (see ~/.ipython/profile_default/ipython_config.py)
"""
ipy = get_ipython()
if ipy: # None if not ipython
# gc.collect() takes ballpark ~40ms, so avoid running it every single time
def pre_run_cell(info):
info.start_s = time.time()
def post_run_cell(result):
if hasattr(result.info, 'start_s'):
elapsed_s = time.time() - result.info.start_s
if elapsed_s > .5 or random.random() < 1/20:
gc.collect()
ipy.events.register('pre_run_cell', pre_run_cell)
ipy.events.register('post_run_cell', post_run_cell)
@singleton
@dataclass
class ipy_formats(AttrContext):
# A pile of hacks to make values display prettier in ipython/jupyter
# - TODO Un-hack these into something that doesn't assume a ~/.pythonrc hook so that ipynb output is reproducible
deep_round_sig: bool = True # Very useful by default [and hopefully doesn't break anything...]
stack_iters: bool = False # Useful, but maybe not by default
# Internal state (only used as a stack-structured dynamic var)
_fancy_cells: bool = False
@property
def precision(self):
return pd.get_option('display.precision')
# TODO Respect display.max_rows (currently treats it as unlimited)
def set(self):
self.ipy = get_ipython()
if self.ipy:
# TODO These 'text/plain' formatters:
# 1. Do nothing, and I don't know why -- maybe interference from potoo.pretty?
# 2. Are untested, because (1)
# pd.DataFrame
self.ipy.display_formatter.formatters['text/html'].for_type(pd.DataFrame, lambda df: (
self._format_df(df, mimetype='text/html')
))
self.ipy.display_formatter.formatters['text/plain'].for_type(pd.DataFrame, lambda df, p, cycle: (
p.text(self._format_df(df, mimetype='text/plain'))
))
# pd.Series
self.ipy.display_formatter.formatters['text/html'].for_type(pd.Series, lambda s: (
self._format_series(s, mimetype='text/html')
))
self.ipy.display_formatter.formatters['text/plain'].for_type(pd.Series, lambda s, p, cycle: (
p.text(self._format_series(s, mimetype='text/plain'))
))
# Prevent plotnine plots from displaying their repr str (e.g. '<ggplot: (-9223372036537068975)>') after repr
# has already side-effected the plotting of an image
# - Returning '' causes the formatter to be ignored
# - Returning ' ' is a HACK but empirically causes no text output to show up (in atom hydrogen-extras)
# - To undo: self.ipy.display_formatter.formatters['text/html'].pop('plotnine.ggplot.ggplot')
self.ipy.display_formatter.formatters['text/html'].for_type_by_name(
'plotnine.ggplot', 'ggplot',
lambda g: ' ',
)
def _format_df(self, df: pd.DataFrame, mimetype: str, **df_to_kwargs) -> str:
with contextlib.ExitStack() as stack:
# Implicitly trigger _fancy_cells by putting >0 df_cell values in your df (typical usage is whole cols)
stack.enter_context(self.context(_fancy_cells=df.applymap(lambda x: isinstance(x, df_cell)).any().any()))
# Format df of cells to a df of formatted strs
df = df.apply(axis=0, func=lambda col: (col
# cat_to_str to avoid .apply mapping all cat values, which we don't need and could be slow for large cats
.pipe(cat_to_str)
.apply(self._format_df_cell, mimetype=mimetype)
))
# Format df of formatted strs to one formatted str
if self._fancy_cells:
# Disable max_colwidth else pandas will truncate and break our strs (e.g. <img> with long data url)
# - Isolate this to just df.to_* so that self._format_pd_any (above) sees the real max_colwidth, so
# that it correctly renders truncated strs for text/plain cells (via a manual ipy_format)
# - TODO Can we clean this up now that we have df_cell_str...?
stack.enter_context(pd.option_context('display.max_colwidth', -1))
if mimetype == 'text/html':
return (
df.to_html(**df_to_kwargs,
escape=False, # Allow html in cells
)
# HACK Hide '\n' from df.to_html, else it incorrectly renders them as '\\n' (and breaks <script>)
.replace('\a', '\n')
)
else:
return df.to_string(**df_to_kwargs)
def _format_series(self, s: pd.Series, mimetype: str) -> str:
# cat_to_str to avoid .apply mapping all cat values, which we don't need and could be slow for large cats
text = s.pipe(cat_to_str).apply(self._format_pd_any, mimetype=mimetype).to_string()
if mimetype == 'text/html':
# df_cell as <pre> instead of fancy html since Series doesn't properly support .to_html like DataFrames do
# - https://github.com/pandas-dev/pandas/issues/8829
# - Use <div style=...> instad of <pre>, since <pre> brings along a lot of style baggage we don't want
return '<div style="white-space: pre">%s</div>' % text
else:
return text
def _format_df_cell(self, x: any, mimetype: str) -> any:
# We exclude dicts/mappings here since silently showing only dict keys (because iter(dict)) would be confusing
# - In most cases it's preferred to apply df_cell_stack locally instead of setting stack_iters=True globally,
# but it's important to keep the global option in cases like %%sql magic, where the result df displays as is
if self.stack_iters and isinstance(x, (list, tuple, np.ndarray)):
x = df_cell_stack(x)
ret = self._format_pd_any(x, mimetype=mimetype)
# HACK An ad-hoc, weird thing to help out atom/jupyter styling
# - TODO Should this be: _has_number(ret)? _has_number(x)? _has_number(col) from one frame above?
if mimetype == 'text/html' and not self._has_number(ret):
ret = '<div class="not-number">%s</div>' % (ret,)
# HACK Hide '\n' from df.to_html, else it incorrectly renders them as '\\n' (and breaks <script>)
if mimetype == 'text/html' and isinstance(ret, str):
ret = ret.replace('\n', '\a')
return ret
def _format_pd_any(self, x: any, mimetype: str) -> any:
# HACK HACK HACK Way too much stuff going on in here that's none of our business...
# If not _fancy_cells, defer formatting to pandas (to respect e.g. 'display.max_colwidth')
# - Principle of least surprise: if I put no df_cell's in my df, everything should be normal
if not self._fancy_cells:
# Apply self.precision to numbers, like numpy but everywhere
# - But only if deep_round_sig
# - And don't touch np.array's since they can be really huge, and numpy already truncates them for us
# - TODO How to achieve self.precision less brutishly?
if self.deep_round_sig and not isinstance(x, np.ndarray):
return deep_round_sig(x, self.precision)
else:
return x
# If _fancy_cells but not a df_cell value, manually emulate pandas formatting
# - This is necessary only because we have to disable 'display.max_colwidth' above to accommodate long
# formatted strs (e.g. <img>) from df_cell values (next condition)
# - This emulation will violate the principle of least surprise if do something wrong, which is why take care
# to avoid it if the user put no df_cell's in their df (via _fancy_cells)
# - TODO What are we missing by not reusing pd.io.formats.format.format_array?
elif not isinstance(x, df_cell):
# Do the unfancy conversion (e.g. for self.precision)
with self.context(_fancy_cells=False):
x = self._format_pd_any(x, mimetype=mimetype)
# Truncate str(x) to 'display.max_colwidth' _only if necessary_, else leave x as is so pandas can format it
# - e.g. datetime.date: pandas '2000-01-01' vs. str() 'datetime.date(2000, 1, 1)'
return truncate_like_pd_max_colwidth(x)
# If _fancy_cells and a df_cell value, format to html str like ipython display()
else:
return ipy_formats_to_mimetype(x, mimetype=mimetype)
def _has_number(self, x: any) -> bool:
return (
np.issubdtype(type(x), np.number) or
isinstance(x, list) and any(self._has_number(y) for y in x)
)
# XXX Subsumed by deep_round_sig
#
# def _format_pd_any(self, x: any) -> any:
# if np.issubdtype(type(x), np.complexfloating):
# return self._round_to_precision_complex(x)
# else:
# return x
#
# # HACK Pandas by default displays complex values with precision 16, even if you np.set_printoptions(precision=...)
# # and pd.set_option('display.precision', ...). This is a hack to display like precision=3.
# # - TODO Submit bug to pandas
# # - TODO Make this more sophisticated, e.g. to reuse (or mimic) the magic in numpy.core.arrayprint
# # - Return complex, not str, so that e.g. pd.Series displays 'dtype: complex' and not 'dtype: object'
# def _round_to_precision_complex(self, z: complex) -> complex:
# # Use complex(...) instead of type(z)(...), since complex parses from str but e.g. np.complex128 doesn't.
# # Use z.__format___ instead of '%.3g' % ..., since the latter doesn't support complex numbers (dunno why).
# return complex(z.__format__('.%sg' % self.precision))
def ipy_formats_to_text(x: any) -> str:
"""Format to text str using ipython formatters, to emulate ipython display()"""
return ipy_formats_to_mimetype(x, mimetype='text/plain')
def ipy_formats_to_html(x: any) -> str:
"""Format to html str using ipython formatters, to emulate ipython display()"""
return ipy_formats_to_mimetype(x, mimetype='text/html')
def ipy_formats_to_mimetype(x: any, mimetype: str) -> str:
"""Format to str for mimetype using ipython formatters, to emulate ipython display()"""
# TODO Clean up / modularize dispatch on format
# TODO Handle more formats (svg, ...)
ret = None
formats = ipy_all_formats(x)
if mimetype == 'text/html':
html = formats.get('text/html')
img_types = [k for k in formats.keys() if k.startswith('image/')]
if html:
ret = html.strip()
elif img_types:
img_type = first(img_types)
img = formats[img_type]
if isinstance(img, str):
img_b64 = img.strip()
elif isinstance(img, bytes):
img_b64 = base64.b64encode(img).decode()
ret = f'<img src="data:{img_type};base64,{img_b64}"></img>'
if ret is None:
ret = formats['text/plain']
return ret
@dataclass
class df_cell:
"""
Mark a df cell to be displayed in some special way determined by the subtype
- Motivating use case is ipy_formats._format_df
"""
value: any
@abc.abstractmethod
def _repr_mimebundle_(self, include=None, exclude=None):
...
@classmethod
def many(cls, xs: Iterable[any]) -> Iterable['cls']:
return [cls(x) for x in xs]
# TODO Untested
class df_cell_union(df_cell):
"""
Union the mimebundles of multiple df_cell's
"""
def _repr_mimebundle_(self, include=None, exclude=None):
df_cells = list(self.value) # Materialize iters
df_cells = [x if isinstance(x, df_cell) else df_cell_display(x) for x in df_cells] # Coerce to df_cell
ret = {}
for cell in df_cells:
for k, v in cell._repr_mimebundle_(include=include, exclude=exclude).items():
print(('[kv]', k, v))
ret.setdefault(k, v)
return ret
class df_cell_display(df_cell):
"""
Mark a df cell to be displayed like ipython display()
"""
def _repr_mimebundle_(self, include=None, exclude=None):
# Ref: https://ipython.readthedocs.io/en/stable/config/integrating.html
formats, _metadata = get_ipython().display_formatter.format(self.value)
return formats
class df_cell_str(df_cell):
"""
Mark a df cell to be displayed like str(value)
- Useful e.g. for bypassing pandas 'display.max_colwidth' or ipython str wrapping
"""
def _repr_mimebundle_(self, include=None, exclude=None):
return {
'text/plain': str(self.value),
'text/html': str(self.value),
}
class df_cell_stack(df_cell):
"""
Mark a df cell to be displayed as vertically stacked values (inspired by the bigquery web UI)
- Assumes the cell value is iterable
"""
def _repr_mimebundle_(self, include=None, exclude=None):
self.value = list(self.value) # Materialize iters
return {
'text/plain': '' if len(self.value) == 0 else (
pd.Series(ipy_formats._format_pd_any(x, mimetype='text/plain') for x in self.value)
.to_string(index=False)
),
'text/html': '' if len(self.value) == 0 else (
pd.Series(ipy_formats._format_pd_any(x, mimetype='text/html') for x in self.value)
.to_string(index=False)
.replace('\n', '<br/>')
),
}
def truncate_like_pd_max_colwidth(x: any) -> str:
"""
Emulate the behavior of pandas 'display.max_colwidth'
- TODO Ugh, how can we avoid doing this ourselves?
"""
max_colwidth = | pd.get_option("display.max_colwidth") | pandas.get_option |
import os
import unittest
import src.openbiolink.utils as utils
import pandas
import numpy as np
from src.openbiolink.edgeType import EdgeType
class TestUtils(unittest.TestCase):
# ----- get_leaf_subclasses -------
def test_get_leaf_subclasses_no_subclasses(self):
# given
class A:
pass
cls = A
# when
result = utils.get_leaf_subclasses(cls)
# then
true_result = {A}
self.assertEqual(true_result, result)
def test_get_leaf_subclasses_one_lvl(self):
# given
class A:
pass
class B (A):
pass
cls = A
# when
result = utils.get_leaf_subclasses(cls)
# then
true_result = {B}
self.assertEqual(true_result, result)
def test_get_leaf_subclasses_two_lvl(self):
# given
class A:
pass
class B (A):
pass
class C (B):
pass
cls = A
# when
result = utils.get_leaf_subclasses(cls)
# then
true_result = {C}
self.assertEqual(true_result, result)
def test_get_leaf_subclasses_diff_lvls(self):
# given
class A:
pass
class B (A):
pass
class C (B):
pass
class D (A):
pass
cls = A
# when
result = utils.get_leaf_subclasses(cls)
# then
true_result = {C,D}
def test_get_leaf_subclasses_None(self):
# given
cls = None
# when
result = utils.get_leaf_subclasses(cls)
# then
true_result = None
self.assertEqual(true_result, result)
# ----- make_undir -------
def test_make_undir_all_undir(self):
# given
df = pandas.DataFrame({'id1': list('abc'), 'id2': list('xyz')})
# when
result = utils.make_undir(df)
# then
true_result = df
np.testing.assert_array_equal(true_result, result)
def test_make_undir_all_dir(self):
# given
df = pandas.DataFrame({'id1': list('abcd'), 'id2': list('badc')})
# when
result = utils.make_undir(df)
#then
true_result = pandas.DataFrame({'id1': list('ac'), 'id2': list('bd')})
np.testing.assert_array_equal(true_result, result)
def test_make_undir_mixed(self):
# given
df = pandas.DataFrame({'id1': list('abc'), 'id2': list('cxa')})
#when
result = utils.make_undir(df)
#then
true_result = pandas.DataFrame({'id1': list('ab'), 'id2': list('cx')})
np.testing.assert_array_equal(true_result, result)
def test_db_mapping_file_to_dic(self):
#given
#file with content
# x foo a
# y bar a; b
# z baz
# q c
path = os.path.dirname(os.path.abspath(__file__))
file_path = os.path.join(path, 'test_mapping_file.tsv')
#when
result = utils.db_mapping_file_to_dic(file_path, 0, 2, '\t')
#then
true_result = {'x': ['a'], 'y': ['a; b'], 'q': ['c']}
self.assertEqual(true_result, result )
def test_cls_list_to_dic(self):
self.fail()
def test_file_exists(self):
self.fail()
def test_get_diff(self):
self.fail()
# ----- remove_parent_duplicates_and_reverses -----
def test_remove_parent_parent_in_remain(self):
#given
remove = pandas.DataFrame(
{'id1': ['a'],
'edgeType':[EdgeType.GENE_REACTION_GENE],
'id2': ["0"],
'qscore': "100",
'value': [1]})
remain = pandas.DataFrame(
{'id1': ['a', 'b', 'c'],
'edgeType': [EdgeType.GENE_GENE, EdgeType.GENE_GENE, EdgeType.GENE_GENE],
'id2': ["0", "1", "2"],
'qscore': [120.0, 50,20],
'value': [1,1,0]})
#when
result = utils.remove_parent_duplicates_and_reverses(remove_set=remove, remain_set=remain)
#then
true_result = pandas.DataFrame(
{'id1': ['b', 'c'],
'edgeType': [ EdgeType.GENE_GENE, EdgeType.GENE_GENE],
'id2': [ "1", "2"],
'qscore': [50, 20],
'value': [1,0]})
np.testing.assert_array_equal(true_result.values, result.values)
def test_remove_parent_parent_in_remove(self):
#given
remove = pandas.DataFrame(
{'id1': ['a'],
'edgeType':[EdgeType.GENE_GENE],
'id2': ["0"],
'qscore': "100",
'value': list('x')})
remain = pandas.DataFrame(
{'id1': ['a', 'b', 'c'],
'edgeType': [EdgeType.GENE_REACTION_GENE, EdgeType.GENE_REACTION_GENE, EdgeType.GENE_REACTION_GENE],
'id2': ["0", "1", "2"],
'value': list('xxy')})
#when
result = utils.remove_parent_duplicates_and_reverses(remove_set=remove, remain_set=remain)
#then
true_result = pandas.DataFrame(
{'id1': ['a', 'b', 'c'],
'edgeType': [EdgeType.GENE_REACTION_GENE, EdgeType.GENE_REACTION_GENE, EdgeType.GENE_REACTION_GENE],
'id2': ["0", "1", "2"],
'qscore': [100.0, 50, 20],
'value': list('xxy')})
np.testing.assert_array_equal(true_result.values, result.values)
def test_remove_parent_reverse_parent(self):
#given
remove = pandas.DataFrame(
{'id1': ['a'],
'edgeType':[EdgeType.GENE_REACTION_GENE],
'id2': ["0"],
'qscore': ["a"],
'value': list('x')})
remain = pandas.DataFrame(
{'id1': ['0', 'b', 'c'],
'edgeType': [EdgeType.GENE_GENE, EdgeType.GENE_GENE, EdgeType.GENE_GENE],
'id2': ["a", "1", "2"],
'qscore': [100.0, 50, 20],
'value': list('xxy')})
#when
result = utils.remove_parent_duplicates_and_reverses(remove_set=remove, remain_set=remain)
#then
true_result = pandas.DataFrame(
{'id1': ['b', 'c'],
'edgeType': [EdgeType.GENE_GENE, EdgeType.GENE_GENE],
'id2': ["1", "2"],
'qscore': [50, 20],
'value': list('xy')})
np.testing.assert_array_equal(true_result.values, result.values)
def test_remove_parent_child_and_parent_and_sibling_in_remain(self):
# given
remove = pandas.DataFrame(
{'id1': ['a'],
'edgeType': [EdgeType.GENE_REACTION_GENE],
'id2': ["0"],
'qscore': "100",
'value': list('x')})
remain = pandas.DataFrame(
{'id1': ['a', 'a', 'a'],
'edgeType': [EdgeType.GENE_GENE, EdgeType.GENE_EXPRESSION_GENE, EdgeType.GENE_REACTION_GENE],
'id2': ["0", "0", "0"],
'qscore': [100.0, 50, 20],
'value': list('xxx')})
# when
result = utils.remove_parent_duplicates_and_reverses(remove_set=remove, remain_set=remain)
# then
true_result = pandas.DataFrame(
{'id1': ['a', 'a'],
'edgeType': [EdgeType.GENE_EXPRESSION_GENE, EdgeType.GENE_REACTION_GENE],
'id2': ["0", "0"],
'qscore': [50, 20],
'value': list('xx')})
np.testing.assert_array_equal(true_result.values, result.values)
def test_remove_parent_empty_remove(self):
# given
remove = | pandas.DataFrame() | pandas.DataFrame |
# Arithmetic tests for DataFrame/Series/Index/Array classes that should
# behave identically.
from datetime import datetime, timedelta
import numpy as np
import pytest
from pandas.errors import (
NullFrequencyError, OutOfBoundsDatetime, PerformanceWarning)
import pandas as pd
from pandas import (
DataFrame, DatetimeIndex, NaT, Series, Timedelta, TimedeltaIndex,
Timestamp, timedelta_range)
import pandas.util.testing as tm
def get_upcast_box(box, vector):
"""
Given two box-types, find the one that takes priority
"""
if box is DataFrame or isinstance(vector, DataFrame):
return DataFrame
if box is Series or isinstance(vector, Series):
return Series
if box is pd.Index or isinstance(vector, pd.Index):
return pd.Index
return box
# ------------------------------------------------------------------
# Timedelta64[ns] dtype Comparisons
class TestTimedelta64ArrayLikeComparisons:
# Comparison tests for timedelta64[ns] vectors fully parametrized over
# DataFrame/Series/TimedeltaIndex/TimedeltaArray. Ideally all comparison
# tests will eventually end up here.
def test_compare_timedelta64_zerodim(self, box_with_array):
# GH#26689 should unbox when comparing with zerodim array
box = box_with_array
xbox = box_with_array if box_with_array is not pd.Index else np.ndarray
tdi = pd.timedelta_range('2H', periods=4)
other = np.array(tdi.to_numpy()[0])
tdi = tm.box_expected(tdi, box)
res = tdi <= other
expected = np.array([True, False, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(res, expected)
with pytest.raises(TypeError):
# zero-dim of wrong dtype should still raise
tdi >= np.array(4)
class TestTimedelta64ArrayComparisons:
# TODO: All of these need to be parametrized over box
def test_compare_timedelta_series(self):
# regression test for GH#5963
s = pd.Series([timedelta(days=1), timedelta(days=2)])
actual = s > timedelta(days=1)
expected = pd.Series([False, True])
tm.assert_series_equal(actual, expected)
def test_tdi_cmp_str_invalid(self, box_with_array):
# GH#13624
xbox = box_with_array if box_with_array is not pd.Index else np.ndarray
tdi = TimedeltaIndex(['1 day', '2 days'])
tdarr = tm.box_expected(tdi, box_with_array)
for left, right in [(tdarr, 'a'), ('a', tdarr)]:
with pytest.raises(TypeError):
left > right
with pytest.raises(TypeError):
left >= right
with pytest.raises(TypeError):
left < right
with pytest.raises(TypeError):
left <= right
result = left == right
expected = np.array([False, False], dtype=bool)
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
result = left != right
expected = np.array([True, True], dtype=bool)
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
@pytest.mark.parametrize('dtype', [None, object])
def test_comp_nat(self, dtype):
left = pd.TimedeltaIndex([pd.Timedelta('1 days'), pd.NaT,
pd.Timedelta('3 days')])
right = pd.TimedeltaIndex([pd.NaT, pd.NaT, pd.Timedelta('3 days')])
lhs, rhs = left, right
if dtype is object:
lhs, rhs = left.astype(object), right.astype(object)
result = rhs == lhs
expected = np.array([False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = rhs != lhs
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(lhs == pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT == rhs, expected)
expected = np.array([True, True, True])
tm.assert_numpy_array_equal(lhs != pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT != lhs, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(lhs < pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT > lhs, expected)
def test_comparisons_nat(self):
tdidx1 = pd.TimedeltaIndex(['1 day', pd.NaT, '1 day 00:00:01', pd.NaT,
'1 day 00:00:01', '5 day 00:00:03'])
tdidx2 = pd.TimedeltaIndex(['2 day', '2 day', pd.NaT, pd.NaT,
'1 day 00:00:02', '5 days 00:00:03'])
tdarr = np.array([np.timedelta64(2, 'D'),
np.timedelta64(2, 'D'), np.timedelta64('nat'),
np.timedelta64('nat'),
np.timedelta64(1, 'D') + np.timedelta64(2, 's'),
np.timedelta64(5, 'D') + np.timedelta64(3, 's')])
cases = [(tdidx1, tdidx2), (tdidx1, tdarr)]
# Check pd.NaT is handles as the same as np.nan
for idx1, idx2 in cases:
result = idx1 < idx2
expected = np.array([True, False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = idx2 > idx1
expected = np.array([True, False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= idx2
expected = np.array([True, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx2 >= idx1
expected = np.array([True, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 == idx2
expected = np.array([False, False, False, False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 != idx2
expected = np.array([True, True, True, True, True, False])
tm.assert_numpy_array_equal(result, expected)
# TODO: better name
def test_comparisons_coverage(self):
rng = timedelta_range('1 days', periods=10)
result = rng < rng[3]
expected = np.array([True, True, True] + [False] * 7)
tm.assert_numpy_array_equal(result, expected)
# raise TypeError for now
with pytest.raises(TypeError):
rng < rng[3].value
result = rng == list(rng)
exp = rng == rng
tm.assert_numpy_array_equal(result, exp)
# ------------------------------------------------------------------
# Timedelta64[ns] dtype Arithmetic Operations
class TestTimedelta64ArithmeticUnsorted:
# Tests moved from type-specific test files but not
# yet sorted/parametrized/de-duplicated
def test_ufunc_coercions(self):
# normal ops are also tested in tseries/test_timedeltas.py
idx = TimedeltaIndex(['2H', '4H', '6H', '8H', '10H'],
freq='2H', name='x')
for result in [idx * 2, np.multiply(idx, 2)]:
assert isinstance(result, TimedeltaIndex)
exp = TimedeltaIndex(['4H', '8H', '12H', '16H', '20H'],
freq='4H', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == '4H'
for result in [idx / 2, np.divide(idx, 2)]:
assert isinstance(result, TimedeltaIndex)
exp = TimedeltaIndex(['1H', '2H', '3H', '4H', '5H'],
freq='H', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == 'H'
idx = TimedeltaIndex(['2H', '4H', '6H', '8H', '10H'],
freq='2H', name='x')
for result in [-idx, np.negative(idx)]:
assert isinstance(result, TimedeltaIndex)
exp = TimedeltaIndex(['-2H', '-4H', '-6H', '-8H', '-10H'],
freq='-2H', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == '-2H'
idx = TimedeltaIndex(['-2H', '-1H', '0H', '1H', '2H'],
freq='H', name='x')
for result in [abs(idx), np.absolute(idx)]:
assert isinstance(result, TimedeltaIndex)
exp = TimedeltaIndex(['2H', '1H', '0H', '1H', '2H'],
freq=None, name='x')
tm.assert_index_equal(result, exp)
assert result.freq is None
def test_subtraction_ops(self):
# with datetimes/timedelta and tdi/dti
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = pd.date_range('20130101', periods=3, name='bar')
td = Timedelta('1 days')
dt = Timestamp('20130101')
msg = "cannot subtract a datelike from a TimedeltaArray"
with pytest.raises(TypeError, match=msg):
tdi - dt
with pytest.raises(TypeError, match=msg):
tdi - dti
msg = (r"descriptor '__sub__' requires a 'datetime\.datetime' object"
" but received a 'Timedelta'")
with pytest.raises(TypeError, match=msg):
td - dt
msg = "bad operand type for unary -: 'DatetimeArray'"
with pytest.raises(TypeError, match=msg):
td - dti
result = dt - dti
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'], name='bar')
tm.assert_index_equal(result, expected)
result = dti - dt
expected = TimedeltaIndex(['0 days', '1 days', '2 days'], name='bar')
tm.assert_index_equal(result, expected)
result = tdi - td
expected = TimedeltaIndex(['0 days', pd.NaT, '1 days'], name='foo')
tm.assert_index_equal(result, expected, check_names=False)
result = td - tdi
expected = TimedeltaIndex(['0 days', pd.NaT, '-1 days'], name='foo')
tm.assert_index_equal(result, expected, check_names=False)
result = dti - td
expected = DatetimeIndex(
['20121231', '20130101', '20130102'], name='bar')
tm.assert_index_equal(result, expected, check_names=False)
result = dt - tdi
expected = DatetimeIndex(['20121231', pd.NaT, '20121230'], name='foo')
tm.assert_index_equal(result, expected)
def test_subtraction_ops_with_tz(self):
# check that dt/dti subtraction ops with tz are validated
dti = pd.date_range('20130101', periods=3)
ts = Timestamp('20130101')
dt = ts.to_pydatetime()
dti_tz = pd.date_range('20130101', periods=3).tz_localize('US/Eastern')
ts_tz = Timestamp('20130101').tz_localize('US/Eastern')
ts_tz2 = Timestamp('20130101').tz_localize('CET')
dt_tz = ts_tz.to_pydatetime()
td = Timedelta('1 days')
def _check(result, expected):
assert result == expected
assert isinstance(result, Timedelta)
# scalars
result = ts - ts
expected = Timedelta('0 days')
_check(result, expected)
result = dt_tz - ts_tz
expected = Timedelta('0 days')
_check(result, expected)
result = ts_tz - dt_tz
expected = Timedelta('0 days')
_check(result, expected)
# tz mismatches
msg = ("Timestamp subtraction must have the same timezones or no"
" timezones")
with pytest.raises(TypeError, match=msg):
dt_tz - ts
msg = "can't subtract offset-naive and offset-aware datetimes"
with pytest.raises(TypeError, match=msg):
dt_tz - dt
msg = ("Timestamp subtraction must have the same timezones or no"
" timezones")
with pytest.raises(TypeError, match=msg):
dt_tz - ts_tz2
msg = "can't subtract offset-naive and offset-aware datetimes"
with pytest.raises(TypeError, match=msg):
dt - dt_tz
msg = ("Timestamp subtraction must have the same timezones or no"
" timezones")
with pytest.raises(TypeError, match=msg):
ts - dt_tz
with pytest.raises(TypeError, match=msg):
ts_tz2 - ts
with pytest.raises(TypeError, match=msg):
ts_tz2 - dt
with pytest.raises(TypeError, match=msg):
ts_tz - ts_tz2
# with dti
with pytest.raises(TypeError, match=msg):
dti - ts_tz
with pytest.raises(TypeError, match=msg):
dti_tz - ts
with pytest.raises(TypeError, match=msg):
dti_tz - ts_tz2
result = dti_tz - dt_tz
expected = TimedeltaIndex(['0 days', '1 days', '2 days'])
tm.assert_index_equal(result, expected)
result = dt_tz - dti_tz
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'])
tm.assert_index_equal(result, expected)
result = dti_tz - ts_tz
expected = TimedeltaIndex(['0 days', '1 days', '2 days'])
tm.assert_index_equal(result, expected)
result = ts_tz - dti_tz
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'])
tm.assert_index_equal(result, expected)
result = td - td
expected = Timedelta('0 days')
_check(result, expected)
result = dti_tz - td
expected = DatetimeIndex(
['20121231', '20130101', '20130102'], tz='US/Eastern')
tm.assert_index_equal(result, expected)
def test_dti_tdi_numeric_ops(self):
# These are normally union/diff set-like ops
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = pd.date_range('20130101', periods=3, name='bar')
# TODO(wesm): unused?
# td = Timedelta('1 days')
# dt = Timestamp('20130101')
result = tdi - tdi
expected = TimedeltaIndex(['0 days', pd.NaT, '0 days'], name='foo')
tm.assert_index_equal(result, expected)
result = tdi + tdi
expected = TimedeltaIndex(['2 days', pd.NaT, '4 days'], name='foo')
tm.assert_index_equal(result, expected)
result = dti - tdi # name will be reset
expected = DatetimeIndex(['20121231', pd.NaT, '20130101'])
tm.assert_index_equal(result, expected)
def test_addition_ops(self):
# with datetimes/timedelta and tdi/dti
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = pd.date_range('20130101', periods=3, name='bar')
td = Timedelta('1 days')
dt = Timestamp('20130101')
result = tdi + dt
expected = DatetimeIndex(['20130102', pd.NaT, '20130103'], name='foo')
tm.assert_index_equal(result, expected)
result = dt + tdi
expected = DatetimeIndex(['20130102', pd.NaT, '20130103'], name='foo')
tm.assert_index_equal(result, expected)
result = td + tdi
expected = TimedeltaIndex(['2 days', pd.NaT, '3 days'], name='foo')
tm.assert_index_equal(result, expected)
result = tdi + td
expected = TimedeltaIndex(['2 days', pd.NaT, '3 days'], name='foo')
tm.assert_index_equal(result, expected)
# unequal length
msg = "cannot add indices of unequal length"
with pytest.raises(ValueError, match=msg):
tdi + dti[0:1]
with pytest.raises(ValueError, match=msg):
tdi[0:1] + dti
# random indexes
with pytest.raises(NullFrequencyError):
tdi + pd.Int64Index([1, 2, 3])
# this is a union!
# pytest.raises(TypeError, lambda : Int64Index([1,2,3]) + tdi)
result = tdi + dti # name will be reset
expected = DatetimeIndex(['20130102', pd.NaT, '20130105'])
tm.assert_index_equal(result, expected)
result = dti + tdi # name will be reset
expected = DatetimeIndex(['20130102', pd.NaT, '20130105'])
tm.assert_index_equal(result, expected)
result = dt + td
expected = Timestamp('20130102')
assert result == expected
result = td + dt
expected = Timestamp('20130102')
assert result == expected
# TODO: Needs more informative name, probably split up into
# more targeted tests
@pytest.mark.parametrize('freq', ['D', 'B'])
def test_timedelta(self, freq):
index = pd.date_range('1/1/2000', periods=50, freq=freq)
shifted = index + timedelta(1)
back = shifted + timedelta(-1)
tm.assert_index_equal(index, back)
if freq == 'D':
expected = pd.tseries.offsets.Day(1)
assert index.freq == expected
assert shifted.freq == expected
assert back.freq == expected
else: # freq == 'B'
assert index.freq == pd.tseries.offsets.BusinessDay(1)
assert shifted.freq is None
assert back.freq == pd.tseries.offsets.BusinessDay(1)
result = index - timedelta(1)
expected = index + timedelta(-1)
tm.assert_index_equal(result, expected)
# GH#4134, buggy with timedeltas
rng = pd.date_range('2013', '2014')
s = Series(rng)
result1 = rng - pd.offsets.Hour(1)
result2 = DatetimeIndex(s - np.timedelta64(100000000))
result3 = rng - np.timedelta64(100000000)
result4 = DatetimeIndex(s - pd.offsets.Hour(1))
tm.assert_index_equal(result1, result4)
tm.assert_index_equal(result2, result3)
class TestAddSubNaTMasking:
# TODO: parametrize over boxes
def test_tdi_add_timestamp_nat_masking(self):
# GH#17991 checking for overflow-masking with NaT
tdinat = pd.to_timedelta(['24658 days 11:15:00', 'NaT'])
tsneg = Timestamp('1950-01-01')
ts_neg_variants = [tsneg,
tsneg.to_pydatetime(),
tsneg.to_datetime64().astype('datetime64[ns]'),
tsneg.to_datetime64().astype('datetime64[D]')]
tspos = Timestamp('1980-01-01')
ts_pos_variants = [tspos,
tspos.to_pydatetime(),
tspos.to_datetime64().astype('datetime64[ns]'),
tspos.to_datetime64().astype('datetime64[D]')]
for variant in ts_neg_variants + ts_pos_variants:
res = tdinat + variant
assert res[1] is pd.NaT
def test_tdi_add_overflow(self):
# See GH#14068
# preliminary test scalar analogue of vectorized tests below
with pytest.raises(OutOfBoundsDatetime):
pd.to_timedelta(106580, 'D') + Timestamp('2000')
with pytest.raises(OutOfBoundsDatetime):
Timestamp('2000') + pd.to_timedelta(106580, 'D')
_NaT = int(pd.NaT) + 1
msg = "Overflow in int64 addition"
with pytest.raises(OverflowError, match=msg):
pd.to_timedelta([106580], 'D') + Timestamp('2000')
with pytest.raises(OverflowError, match=msg):
Timestamp('2000') + pd.to_timedelta([106580], 'D')
with pytest.raises(OverflowError, match=msg):
pd.to_timedelta([_NaT]) - Timedelta('1 days')
with pytest.raises(OverflowError, match=msg):
pd.to_timedelta(['5 days', _NaT]) - Timedelta('1 days')
with pytest.raises(OverflowError, match=msg):
(pd.to_timedelta([_NaT, '5 days', '1 hours']) -
pd.to_timedelta(['7 seconds', _NaT, '4 hours']))
# These should not overflow!
exp = TimedeltaIndex([pd.NaT])
result = pd.to_timedelta([pd.NaT]) - Timedelta('1 days')
tm.assert_index_equal(result, exp)
exp = TimedeltaIndex(['4 days', pd.NaT])
result = pd.to_timedelta(['5 days', pd.NaT]) - Timedelta('1 days')
tm.assert_index_equal(result, exp)
exp = TimedeltaIndex([pd.NaT, pd.NaT, '5 hours'])
result = (pd.to_timedelta([pd.NaT, '5 days', '1 hours']) +
pd.to_timedelta(['7 seconds', pd.NaT, '4 hours']))
tm.assert_index_equal(result, exp)
class TestTimedeltaArraylikeAddSubOps:
# Tests for timedelta64[ns] __add__, __sub__, __radd__, __rsub__
# TODO: moved from frame tests; needs parametrization/de-duplication
def test_td64_df_add_int_frame(self):
# GH#22696 Check that we don't dispatch to numpy implementation,
# which treats int64 as m8[ns]
tdi = pd.timedelta_range('1', periods=3)
df = tdi.to_frame()
other = pd.DataFrame([1, 2, 3], index=tdi) # indexed like `df`
with pytest.raises(TypeError):
df + other
with pytest.raises(TypeError):
other + df
with pytest.raises(TypeError):
df - other
with pytest.raises(TypeError):
other - df
# TODO: moved from tests.indexes.timedeltas.test_arithmetic; needs
# parametrization+de-duplication
def test_timedelta_ops_with_missing_values(self):
# setup
s1 = pd.to_timedelta(Series(['00:00:01']))
s2 = pd.to_timedelta(Series(['00:00:02']))
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
# Passing datetime64-dtype data to TimedeltaIndex is deprecated
sn = pd.to_timedelta(Series([pd.NaT]))
df1 = pd.DataFrame(['00:00:01']).apply(pd.to_timedelta)
df2 = pd.DataFrame(['00:00:02']).apply(pd.to_timedelta)
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
# Passing datetime64-dtype data to TimedeltaIndex is deprecated
dfn = pd.DataFrame([pd.NaT]).apply(pd.to_timedelta)
scalar1 = pd.to_timedelta('00:00:01')
scalar2 = pd.to_timedelta('00:00:02')
timedelta_NaT = pd.to_timedelta('NaT')
actual = scalar1 + scalar1
assert actual == scalar2
actual = scalar2 - scalar1
assert actual == scalar1
actual = s1 + s1
tm.assert_series_equal(actual, s2)
actual = s2 - s1
tm.assert_series_equal(actual, s1)
actual = s1 + scalar1
tm.assert_series_equal(actual, s2)
actual = scalar1 + s1
tm.assert_series_equal(actual, s2)
actual = s2 - scalar1
tm.assert_series_equal(actual, s1)
actual = -scalar1 + s2
tm.assert_series_equal(actual, s1)
actual = s1 + timedelta_NaT
tm.assert_series_equal(actual, sn)
actual = timedelta_NaT + s1
tm.assert_series_equal(actual, sn)
actual = s1 - timedelta_NaT
tm.assert_series_equal(actual, sn)
actual = -timedelta_NaT + s1
tm.assert_series_equal(actual, sn)
with pytest.raises(TypeError):
s1 + np.nan
with pytest.raises(TypeError):
np.nan + s1
with pytest.raises(TypeError):
s1 - np.nan
with pytest.raises(TypeError):
-np.nan + s1
actual = s1 + pd.NaT
tm.assert_series_equal(actual, sn)
actual = s2 - pd.NaT
tm.assert_series_equal(actual, sn)
actual = s1 + df1
tm.assert_frame_equal(actual, df2)
actual = s2 - df1
tm.assert_frame_equal(actual, df1)
actual = df1 + s1
tm.assert_frame_equal(actual, df2)
actual = df2 - s1
tm.assert_frame_equal(actual, df1)
actual = df1 + df1
tm.assert_frame_equal(actual, df2)
actual = df2 - df1
tm.assert_frame_equal(actual, df1)
actual = df1 + scalar1
tm.assert_frame_equal(actual, df2)
actual = df2 - scalar1
tm.assert_frame_equal(actual, df1)
actual = df1 + timedelta_NaT
tm.assert_frame_equal(actual, dfn)
actual = df1 - timedelta_NaT
tm.assert_frame_equal(actual, dfn)
with pytest.raises(TypeError):
df1 + np.nan
with pytest.raises(TypeError):
df1 - np.nan
actual = df1 + pd.NaT # NaT is datetime, not timedelta
tm.assert_frame_equal(actual, dfn)
actual = df1 - pd.NaT
tm.assert_frame_equal(actual, dfn)
# TODO: moved from tests.series.test_operators, needs splitting, cleanup,
# de-duplication, box-parametrization...
def test_operators_timedelta64(self):
# series ops
v1 = pd.date_range('2012-1-1', periods=3, freq='D')
v2 = pd.date_range('2012-1-2', periods=3, freq='D')
rs = Series(v2) - Series(v1)
xp = Series(1e9 * 3600 * 24,
rs.index).astype('int64').astype('timedelta64[ns]')
tm.assert_series_equal(rs, xp)
assert rs.dtype == 'timedelta64[ns]'
df = DataFrame(dict(A=v1))
td = Series([timedelta(days=i) for i in range(3)])
assert td.dtype == 'timedelta64[ns]'
# series on the rhs
result = df['A'] - df['A'].shift()
assert result.dtype == 'timedelta64[ns]'
result = df['A'] + td
assert result.dtype == 'M8[ns]'
# scalar Timestamp on rhs
maxa = df['A'].max()
assert isinstance(maxa, Timestamp)
resultb = df['A'] - df['A'].max()
assert resultb.dtype == 'timedelta64[ns]'
# timestamp on lhs
result = resultb + df['A']
values = [Timestamp('20111230'), Timestamp('20120101'),
Timestamp('20120103')]
expected = Series(values, name='A')
tm.assert_series_equal(result, expected)
# datetimes on rhs
result = df['A'] - datetime(2001, 1, 1)
expected = Series(
[timedelta(days=4017 + i) for i in range(3)], name='A')
tm.assert_series_equal(result, expected)
assert result.dtype == 'm8[ns]'
d = datetime(2001, 1, 1, 3, 4)
resulta = df['A'] - d
assert resulta.dtype == 'm8[ns]'
# roundtrip
resultb = resulta + d
tm.assert_series_equal(df['A'], resultb)
# timedeltas on rhs
td = timedelta(days=1)
resulta = df['A'] + td
resultb = resulta - td
tm.assert_series_equal(resultb, df['A'])
assert resultb.dtype == 'M8[ns]'
# roundtrip
td = timedelta(minutes=5, seconds=3)
resulta = df['A'] + td
resultb = resulta - td
tm.assert_series_equal(df['A'], resultb)
assert resultb.dtype == 'M8[ns]'
# inplace
value = rs[2] + np.timedelta64(timedelta(minutes=5, seconds=1))
rs[2] += np.timedelta64(timedelta(minutes=5, seconds=1))
assert rs[2] == value
def test_timedelta64_ops_nat(self):
# GH 11349
timedelta_series = Series([NaT, Timedelta('1s')])
nat_series_dtype_timedelta = Series([NaT, NaT],
dtype='timedelta64[ns]')
single_nat_dtype_timedelta = Series([NaT], dtype='timedelta64[ns]')
# subtraction
tm.assert_series_equal(timedelta_series - NaT,
nat_series_dtype_timedelta)
tm.assert_series_equal(-NaT + timedelta_series,
nat_series_dtype_timedelta)
tm.assert_series_equal(timedelta_series - single_nat_dtype_timedelta,
nat_series_dtype_timedelta)
tm.assert_series_equal(-single_nat_dtype_timedelta + timedelta_series,
nat_series_dtype_timedelta)
# addition
tm.assert_series_equal(nat_series_dtype_timedelta + NaT,
nat_series_dtype_timedelta)
tm.assert_series_equal(NaT + nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
tm.assert_series_equal(nat_series_dtype_timedelta +
single_nat_dtype_timedelta,
nat_series_dtype_timedelta)
tm.assert_series_equal(single_nat_dtype_timedelta +
nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
tm.assert_series_equal(timedelta_series + NaT,
nat_series_dtype_timedelta)
tm.assert_series_equal(NaT + timedelta_series,
nat_series_dtype_timedelta)
tm.assert_series_equal(timedelta_series + single_nat_dtype_timedelta,
nat_series_dtype_timedelta)
tm.assert_series_equal(single_nat_dtype_timedelta + timedelta_series,
nat_series_dtype_timedelta)
tm.assert_series_equal(nat_series_dtype_timedelta + NaT,
nat_series_dtype_timedelta)
tm.assert_series_equal(NaT + nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
tm.assert_series_equal(nat_series_dtype_timedelta +
single_nat_dtype_timedelta,
nat_series_dtype_timedelta)
tm.assert_series_equal(single_nat_dtype_timedelta +
nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
# multiplication
tm.assert_series_equal(nat_series_dtype_timedelta * 1.0,
nat_series_dtype_timedelta)
tm.assert_series_equal(1.0 * nat_series_dtype_timedelta,
nat_series_dtype_timedelta)
tm.assert_series_equal(timedelta_series * 1, timedelta_series)
tm.assert_series_equal(1 * timedelta_series, timedelta_series)
tm.assert_series_equal(timedelta_series * 1.5,
Series([NaT, Timedelta('1.5s')]))
tm.assert_series_equal(1.5 * timedelta_series,
Series([NaT, Timedelta('1.5s')]))
tm.assert_series_equal(timedelta_series * np.nan,
nat_series_dtype_timedelta)
tm.assert_series_equal(np.nan * timedelta_series,
nat_series_dtype_timedelta)
# division
tm.assert_series_equal(timedelta_series / 2,
Series([NaT, Timedelta('0.5s')]))
tm.assert_series_equal(timedelta_series / 2.0,
Series([NaT, Timedelta('0.5s')]))
tm.assert_series_equal(timedelta_series / np.nan,
nat_series_dtype_timedelta)
# -------------------------------------------------------------
# Invalid Operations
def test_td64arr_add_str_invalid(self, box_with_array):
# GH#13624
tdi = TimedeltaIndex(['1 day', '2 days'])
tdi = tm.box_expected(tdi, box_with_array)
with pytest.raises(TypeError):
tdi + 'a'
with pytest.raises(TypeError):
'a' + tdi
@pytest.mark.parametrize('other', [3.14, np.array([2.0, 3.0])])
def test_td64arr_add_sub_float(self, box_with_array, other):
tdi = TimedeltaIndex(['-1 days', '-1 days'])
tdarr = tm.box_expected(tdi, box_with_array)
with pytest.raises(TypeError):
tdarr + other
with pytest.raises(TypeError):
other + tdarr
with pytest.raises(TypeError):
tdarr - other
with pytest.raises(TypeError):
other - tdarr
@pytest.mark.parametrize('freq', [None, 'H'])
def test_td64arr_sub_period(self, box_with_array, freq):
# GH#13078
# not supported, check TypeError
p = pd.Period('2011-01-01', freq='D')
idx = TimedeltaIndex(['1 hours', '2 hours'], freq=freq)
idx = tm.box_expected(idx, box_with_array)
with pytest.raises(TypeError):
idx - p
with pytest.raises(TypeError):
p - idx
@pytest.mark.parametrize('pi_freq', ['D', 'W', 'Q', 'H'])
@pytest.mark.parametrize('tdi_freq', [None, 'H'])
def test_td64arr_sub_pi(self, box_with_array, tdi_freq, pi_freq):
# GH#20049 subtracting PeriodIndex should raise TypeError
tdi = TimedeltaIndex(['1 hours', '2 hours'], freq=tdi_freq)
dti = Timestamp('2018-03-07 17:16:40') + tdi
pi = dti.to_period(pi_freq)
# TODO: parametrize over box for pi?
tdi = tm.box_expected(tdi, box_with_array)
with pytest.raises(TypeError):
tdi - pi
# -------------------------------------------------------------
# Binary operations td64 arraylike and datetime-like
def test_td64arr_sub_timestamp_raises(self, box_with_array):
idx = TimedeltaIndex(['1 day', '2 day'])
idx = tm.box_expected(idx, box_with_array)
msg = ("cannot subtract a datelike from|"
"Could not operate|"
"cannot perform operation")
with pytest.raises(TypeError, match=msg):
idx - Timestamp('2011-01-01')
def test_td64arr_add_timestamp(self, box_with_array, tz_naive_fixture):
# GH#23215
# TODO: parametrize over scalar datetime types?
tz = tz_naive_fixture
other = Timestamp('2011-01-01', tz=tz)
idx = TimedeltaIndex(['1 day', '2 day'])
expected = DatetimeIndex(['2011-01-02', '2011-01-03'], tz=tz)
idx = tm.box_expected(idx, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = idx + other
tm.assert_equal(result, expected)
result = other + idx
tm.assert_equal(result, expected)
def test_td64arr_add_sub_timestamp(self, box_with_array):
# GH#11925
ts = Timestamp('2012-01-01')
# TODO: parametrize over types of datetime scalar?
tdi = timedelta_range('1 day', periods=3)
expected = pd.date_range('2012-01-02', periods=3)
tdarr = tm.box_expected(tdi, box_with_array)
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(ts + tdarr, expected)
tm.assert_equal(tdarr + ts, expected)
expected2 = pd.date_range('2011-12-31', periods=3, freq='-1D')
expected2 = tm.box_expected(expected2, box_with_array)
tm.assert_equal(ts - tdarr, expected2)
tm.assert_equal(ts + (-tdarr), expected2)
with pytest.raises(TypeError):
tdarr - ts
def test_tdi_sub_dt64_array(self, box_with_array):
dti = pd.date_range('2016-01-01', periods=3)
tdi = dti - dti.shift(1)
dtarr = dti.values
expected = pd.DatetimeIndex(dtarr) - tdi
tdi = tm.box_expected(tdi, box_with_array)
expected = tm.box_expected(expected, box_with_array)
with pytest.raises(TypeError):
tdi - dtarr
# TimedeltaIndex.__rsub__
result = dtarr - tdi
tm.assert_equal(result, expected)
def test_tdi_add_dt64_array(self, box_with_array):
dti = pd.date_range('2016-01-01', periods=3)
tdi = dti - dti.shift(1)
dtarr = dti.values
expected = pd.DatetimeIndex(dtarr) + tdi
tdi = tm.box_expected(tdi, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = tdi + dtarr
tm.assert_equal(result, expected)
result = dtarr + tdi
tm.assert_equal(result, expected)
def test_td64arr_add_datetime64_nat(self, box_with_array):
# GH#23215
other = np.datetime64('NaT')
tdi = timedelta_range('1 day', periods=3)
expected = pd.DatetimeIndex(["NaT", "NaT", "NaT"])
tdser = tm.box_expected(tdi, box_with_array)
expected = tm.box_expected(expected, box_with_array)
tm.assert_equal(tdser + other, expected)
tm.assert_equal(other + tdser, expected)
# ------------------------------------------------------------------
# Operations with int-like others
def test_td64arr_add_int_series_invalid(self, box):
tdser = pd.Series(['59 Days', '59 Days', 'NaT'], dtype='m8[ns]')
tdser = tm.box_expected(tdser, box)
err = TypeError if box is not pd.Index else NullFrequencyError
int_ser = Series([2, 3, 4])
with pytest.raises(err):
tdser + int_ser
with pytest.raises(err):
int_ser + tdser
with pytest.raises(err):
tdser - int_ser
with pytest.raises(err):
int_ser - tdser
def test_td64arr_add_intlike(self, box_with_array):
# GH#19123
tdi = TimedeltaIndex(['59 days', '59 days', 'NaT'])
ser = tm.box_expected(tdi, box_with_array)
err = TypeError
if box_with_array in [pd.Index, tm.to_array]:
err = NullFrequencyError
other = Series([20, 30, 40], dtype='uint8')
# TODO: separate/parametrize
with pytest.raises(err):
ser + 1
with pytest.raises(err):
ser - 1
with pytest.raises(err):
ser + other
with pytest.raises(err):
ser - other
with pytest.raises(err):
ser + np.array(other)
with pytest.raises(err):
ser - np.array(other)
with pytest.raises(err):
ser + pd.Index(other)
with pytest.raises(err):
ser - pd.Index(other)
@pytest.mark.parametrize('scalar', [1, 1.5, np.array(2)])
def test_td64arr_add_sub_numeric_scalar_invalid(self, box_with_array,
scalar):
box = box_with_array
tdser = pd.Series(['59 Days', '59 Days', 'NaT'], dtype='m8[ns]')
tdser = tm.box_expected(tdser, box)
err = TypeError
if box in [pd.Index, tm.to_array] and not isinstance(scalar, float):
err = NullFrequencyError
with pytest.raises(err):
tdser + scalar
with pytest.raises(err):
scalar + tdser
with pytest.raises(err):
tdser - scalar
with pytest.raises(err):
scalar - tdser
@pytest.mark.parametrize('dtype', ['int64', 'int32', 'int16',
'uint64', 'uint32', 'uint16', 'uint8',
'float64', 'float32', 'float16'])
@pytest.mark.parametrize('vec', [
np.array([1, 2, 3]),
pd.Index([1, 2, 3]),
Series([1, 2, 3])
# TODO: Add DataFrame in here?
], ids=lambda x: type(x).__name__)
def test_td64arr_add_sub_numeric_arr_invalid(self, box, vec, dtype):
tdser = pd.Series(['59 Days', '59 Days', 'NaT'], dtype='m8[ns]')
tdser = tm.box_expected(tdser, box)
err = TypeError
if box is pd.Index and not dtype.startswith('float'):
err = NullFrequencyError
vector = vec.astype(dtype)
with pytest.raises(err):
tdser + vector
with pytest.raises(err):
vector + tdser
with pytest.raises(err):
tdser - vector
with pytest.raises(err):
vector - tdser
# ------------------------------------------------------------------
# Operations with timedelta-like others
# TODO: this was taken from tests.series.test_ops; de-duplicate
@pytest.mark.parametrize('scalar_td', [timedelta(minutes=5, seconds=4),
Timedelta(minutes=5, seconds=4),
Timedelta('5m4s').to_timedelta64()])
def test_operators_timedelta64_with_timedelta(self, scalar_td):
# smoke tests
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
td1 + scalar_td
scalar_td + td1
td1 - scalar_td
scalar_td - td1
td1 / scalar_td
scalar_td / td1
# TODO: this was taken from tests.series.test_ops; de-duplicate
def test_timedelta64_operations_with_timedeltas(self):
# td operate with td
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td2 = timedelta(minutes=5, seconds=4)
result = td1 - td2
expected = (Series([timedelta(seconds=0)] * 3) -
Series([timedelta(seconds=1)] * 3))
assert result.dtype == 'm8[ns]'
tm.assert_series_equal(result, expected)
result2 = td2 - td1
expected = (Series([timedelta(seconds=1)] * 3) -
Series([timedelta(seconds=0)] * 3))
tm.assert_series_equal(result2, expected)
# roundtrip
tm.assert_series_equal(result + td2, td1)
# Now again, using pd.to_timedelta, which should build
# a Series or a scalar, depending on input.
td1 = Series(pd.to_timedelta(['00:05:03'] * 3))
td2 = pd.to_timedelta('00:05:04')
result = td1 - td2
expected = (Series([timedelta(seconds=0)] * 3) -
Series([timedelta(seconds=1)] * 3))
assert result.dtype == 'm8[ns]'
tm.assert_series_equal(result, expected)
result2 = td2 - td1
expected = (Series([timedelta(seconds=1)] * 3) -
Series([timedelta(seconds=0)] * 3))
tm.assert_series_equal(result2, expected)
# roundtrip
tm.assert_series_equal(result + td2, td1)
def test_td64arr_add_td64_array(self, box):
dti = pd.date_range('2016-01-01', periods=3)
tdi = dti - dti.shift(1)
tdarr = tdi.values
expected = 2 * tdi
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
result = tdi + tdarr
tm.assert_equal(result, expected)
result = tdarr + tdi
tm.assert_equal(result, expected)
def test_td64arr_sub_td64_array(self, box):
dti = pd.date_range('2016-01-01', periods=3)
tdi = dti - dti.shift(1)
tdarr = tdi.values
expected = 0 * tdi
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
result = tdi - tdarr
tm.assert_equal(result, expected)
result = tdarr - tdi
tm.assert_equal(result, expected)
# TODO: parametrize over [add, sub, radd, rsub]?
@pytest.mark.parametrize('names', [(None, None, None),
('Egon', 'Venkman', None),
('NCC1701D', 'NCC1701D', 'NCC1701D')])
def test_td64arr_add_sub_tdi(self, box, names):
# GH#17250 make sure result dtype is correct
# GH#19043 make sure names are propagated correctly
if box is pd.DataFrame and names[1] == 'Venkman':
pytest.skip("Name propagation for DataFrame does not behave like "
"it does for Index/Series")
tdi = TimedeltaIndex(['0 days', '1 day'], name=names[0])
ser = Series([Timedelta(hours=3), Timedelta(hours=4)], name=names[1])
expected = Series([Timedelta(hours=3), Timedelta(days=1, hours=4)],
name=names[2])
ser = tm.box_expected(ser, box)
expected = tm.box_expected(expected, box)
result = tdi + ser
tm.assert_equal(result, expected)
if box is not pd.DataFrame:
assert result.dtype == 'timedelta64[ns]'
else:
assert result.dtypes[0] == 'timedelta64[ns]'
result = ser + tdi
tm.assert_equal(result, expected)
if box is not pd.DataFrame:
assert result.dtype == 'timedelta64[ns]'
else:
assert result.dtypes[0] == 'timedelta64[ns]'
expected = Series([Timedelta(hours=-3), Timedelta(days=1, hours=-4)],
name=names[2])
expected = tm.box_expected(expected, box)
result = tdi - ser
tm.assert_equal(result, expected)
if box is not pd.DataFrame:
assert result.dtype == 'timedelta64[ns]'
else:
assert result.dtypes[0] == 'timedelta64[ns]'
result = ser - tdi
tm.assert_equal(result, -expected)
if box is not pd.DataFrame:
assert result.dtype == 'timedelta64[ns]'
else:
assert result.dtypes[0] == 'timedelta64[ns]'
def test_td64arr_add_sub_td64_nat(self, box):
# GH#23320 special handling for timedelta64("NaT")
tdi = pd.TimedeltaIndex([NaT, Timedelta('1s')])
other = np.timedelta64("NaT")
expected = pd.TimedeltaIndex(["NaT"] * 2)
obj = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
result = obj + other
tm.assert_equal(result, expected)
result = other + obj
tm.assert_equal(result, expected)
result = obj - other
tm.assert_equal(result, expected)
result = other - obj
tm.assert_equal(result, expected)
def test_td64arr_sub_NaT(self, box):
# GH#18808
ser = Series([NaT, Timedelta('1s')])
expected = Series([NaT, NaT], dtype='timedelta64[ns]')
ser = tm.box_expected(ser, box)
expected = tm.box_expected(expected, box)
res = ser - pd.NaT
tm.assert_equal(res, expected)
def test_td64arr_add_timedeltalike(self, two_hours, box):
# only test adding/sub offsets as + is now numeric
rng = timedelta_range('1 days', '10 days')
expected = timedelta_range('1 days 02:00:00', '10 days 02:00:00',
freq='D')
rng = tm.box_expected(rng, box)
expected = tm.box_expected(expected, box)
result = rng + two_hours
tm.assert_equal(result, expected)
def test_td64arr_sub_timedeltalike(self, two_hours, box):
# only test adding/sub offsets as - is now numeric
rng = timedelta_range('1 days', '10 days')
expected = timedelta_range('0 days 22:00:00', '9 days 22:00:00')
rng = tm.box_expected(rng, box)
expected = tm.box_expected(expected, box)
result = rng - two_hours
tm.assert_equal(result, expected)
# ------------------------------------------------------------------
# __add__/__sub__ with DateOffsets and arrays of DateOffsets
# TODO: this was taken from tests.series.test_operators; de-duplicate
def test_timedelta64_operations_with_DateOffset(self):
# GH#10699
td = Series([timedelta(minutes=5, seconds=3)] * 3)
result = td + pd.offsets.Minute(1)
expected = Series([timedelta(minutes=6, seconds=3)] * 3)
tm.assert_series_equal(result, expected)
result = td - pd.offsets.Minute(1)
expected = Series([timedelta(minutes=4, seconds=3)] * 3)
tm.assert_series_equal(result, expected)
with tm.assert_produces_warning(PerformanceWarning):
result = td + Series([pd.offsets.Minute(1), pd.offsets.Second(3),
pd.offsets.Hour(2)])
expected = Series([timedelta(minutes=6, seconds=3),
timedelta(minutes=5, seconds=6),
timedelta(hours=2, minutes=5, seconds=3)])
tm.assert_series_equal(result, expected)
result = td + pd.offsets.Minute(1) + pd.offsets.Second(12)
expected = Series([timedelta(minutes=6, seconds=15)] * 3)
tm.assert_series_equal(result, expected)
# valid DateOffsets
for do in ['Hour', 'Minute', 'Second', 'Day', 'Micro', 'Milli',
'Nano']:
op = getattr(pd.offsets, do)
td + op(5)
op(5) + td
td - op(5)
op(5) - td
@pytest.mark.parametrize('names', [(None, None, None),
('foo', 'bar', None),
('foo', 'foo', 'foo')])
def test_td64arr_add_offset_index(self, names, box):
# GH#18849, GH#19744
if box is pd.DataFrame and names[1] == 'bar':
pytest.skip("Name propagation for DataFrame does not behave like "
"it does for Index/Series")
tdi = TimedeltaIndex(['1 days 00:00:00', '3 days 04:00:00'],
name=names[0])
other = pd.Index([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)],
name=names[1])
expected = TimedeltaIndex([tdi[n] + other[n] for n in range(len(tdi))],
freq='infer', name=names[2])
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
# The DataFrame operation is transposed and so operates as separate
# scalar operations, which do not issue a PerformanceWarning
warn = PerformanceWarning if box is not pd.DataFrame else None
with tm.assert_produces_warning(warn):
res = tdi + other
tm.assert_equal(res, expected)
with tm.assert_produces_warning(warn):
res2 = other + tdi
tm.assert_equal(res2, expected)
# TODO: combine with test_td64arr_add_offset_index by parametrizing
# over second box?
def test_td64arr_add_offset_array(self, box):
# GH#18849
tdi = TimedeltaIndex(['1 days 00:00:00', '3 days 04:00:00'])
other = np.array([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)])
expected = TimedeltaIndex([tdi[n] + other[n] for n in range(len(tdi))],
freq='infer')
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
# The DataFrame operation is transposed and so operates as separate
# scalar operations, which do not issue a PerformanceWarning
warn = PerformanceWarning if box is not pd.DataFrame else None
with tm.assert_produces_warning(warn):
res = tdi + other
tm.assert_equal(res, expected)
with tm.assert_produces_warning(warn):
res2 = other + tdi
tm.assert_equal(res2, expected)
@pytest.mark.parametrize('names', [(None, None, None),
('foo', 'bar', None),
('foo', 'foo', 'foo')])
def test_td64arr_sub_offset_index(self, names, box):
# GH#18824, GH#19744
if box is pd.DataFrame and names[1] == 'bar':
pytest.skip("Name propagation for DataFrame does not behave like "
"it does for Index/Series")
tdi = TimedeltaIndex(['1 days 00:00:00', '3 days 04:00:00'],
name=names[0])
other = pd.Index([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)],
name=names[1])
expected = TimedeltaIndex([tdi[n] - other[n] for n in range(len(tdi))],
freq='infer', name=names[2])
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, box)
# The DataFrame operation is transposed and so operates as separate
# scalar operations, which do not issue a PerformanceWarning
warn = PerformanceWarning if box is not pd.DataFrame else None
with tm.assert_produces_warning(warn):
res = tdi - other
tm.assert_equal(res, expected)
def test_td64arr_sub_offset_array(self, box_with_array):
# GH#18824
tdi = TimedeltaIndex(['1 days 00:00:00', '3 days 04:00:00'])
other = np.array([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)])
expected = TimedeltaIndex([tdi[n] - other[n] for n in range(len(tdi))],
freq='infer')
tdi = tm.box_expected(tdi, box_with_array)
expected = tm.box_expected(expected, box_with_array)
# The DataFrame operation is transposed and so operates as separate
# scalar operations, which do not issue a PerformanceWarning
warn = None if box_with_array is pd.DataFrame else PerformanceWarning
with tm.assert_produces_warning(warn):
res = tdi - other
tm.assert_equal(res, expected)
@pytest.mark.parametrize('names', [(None, None, None),
('foo', 'bar', None),
('foo', 'foo', 'foo')])
def test_td64arr_with_offset_series(self, names, box_df_fail):
# GH#18849
box = box_df_fail
box2 = Series if box in [pd.Index, tm.to_array] else box
tdi = TimedeltaIndex(['1 days 00:00:00', '3 days 04:00:00'],
name=names[0])
other = Series([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)],
name=names[1])
expected_add = Series([tdi[n] + other[n] for n in range(len(tdi))],
name=names[2])
tdi = tm.box_expected(tdi, box)
expected_add = tm.box_expected(expected_add, box2)
with tm.assert_produces_warning(PerformanceWarning):
res = tdi + other
tm.assert_equal(res, expected_add)
with tm.assert_produces_warning(PerformanceWarning):
res2 = other + tdi
tm.assert_equal(res2, expected_add)
# TODO: separate/parametrize add/sub test?
expected_sub = Series([tdi[n] - other[n] for n in range(len(tdi))],
name=names[2])
expected_sub = tm.box_expected(expected_sub, box2)
with tm.assert_produces_warning(PerformanceWarning):
res3 = tdi - other
tm.assert_equal(res3, expected_sub)
@pytest.mark.parametrize('obox', [np.array, pd.Index, pd.Series])
def test_td64arr_addsub_anchored_offset_arraylike(self, obox,
box_with_array):
# GH#18824
tdi = TimedeltaIndex(['1 days 00:00:00', '3 days 04:00:00'])
tdi = tm.box_expected(tdi, box_with_array)
anchored = obox([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)])
# addition/subtraction ops with anchored offsets should issue
# a PerformanceWarning and _then_ raise a TypeError.
with pytest.raises(TypeError):
with tm.assert_produces_warning(PerformanceWarning):
tdi + anchored
with pytest.raises(TypeError):
with tm.assert_produces_warning(PerformanceWarning):
anchored + tdi
with pytest.raises(TypeError):
with tm.assert_produces_warning(PerformanceWarning):
tdi - anchored
with pytest.raises(TypeError):
with tm.assert_produces_warning(PerformanceWarning):
anchored - tdi
class TestTimedeltaArraylikeMulDivOps:
# Tests for timedelta64[ns]
# __mul__, __rmul__, __div__, __rdiv__, __floordiv__, __rfloordiv__
# TODO: Moved from tests.series.test_operators; needs cleanup
@pytest.mark.parametrize("m", [1, 3, 10])
@pytest.mark.parametrize("unit", ['D', 'h', 'm', 's', 'ms', 'us', 'ns'])
def test_timedelta64_conversions(self, m, unit):
startdate = Series(pd.date_range('2013-01-01', '2013-01-03'))
enddate = Series(pd.date_range('2013-03-01', '2013-03-03'))
ser = enddate - startdate
ser[2] = np.nan
# op
expected = Series([x / np.timedelta64(m, unit) for x in ser])
result = ser / np.timedelta64(m, unit)
tm.assert_series_equal(result, expected)
# reverse op
expected = Series([Timedelta(np.timedelta64(m, unit)) / x
for x in ser])
result = np.timedelta64(m, unit) / ser
tm.assert_series_equal(result, expected)
# ------------------------------------------------------------------
# Multiplication
# organized with scalar others first, then array-like
def test_td64arr_mul_int(self, box_with_array):
idx = TimedeltaIndex(np.arange(5, dtype='int64'))
idx = tm.box_expected(idx, box_with_array)
result = idx * 1
tm.assert_equal(result, idx)
result = 1 * idx
tm.assert_equal(result, idx)
def test_td64arr_mul_tdlike_scalar_raises(self, two_hours, box_with_array):
rng = timedelta_range('1 days', '10 days', name='foo')
rng = tm.box_expected(rng, box_with_array)
with pytest.raises(TypeError):
rng * two_hours
def test_tdi_mul_int_array_zerodim(self, box_with_array):
rng5 = np.arange(5, dtype='int64')
idx = TimedeltaIndex(rng5)
expected = TimedeltaIndex(rng5 * 5)
idx = tm.box_expected(idx, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = idx * np.array(5, dtype='int64')
tm.assert_equal(result, expected)
def test_tdi_mul_int_array(self, box_with_array):
rng5 = np.arange(5, dtype='int64')
idx = TimedeltaIndex(rng5)
expected = TimedeltaIndex(rng5 ** 2)
idx = tm.box_expected(idx, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = idx * rng5
tm.assert_equal(result, expected)
def test_tdi_mul_int_series(self, box_with_array):
box = box_with_array
xbox = pd.Series if box in [pd.Index, tm.to_array] else box
idx = TimedeltaIndex(np.arange(5, dtype='int64'))
expected = TimedeltaIndex(np.arange(5, dtype='int64') ** 2)
idx = tm.box_expected(idx, box)
expected = tm.box_expected(expected, xbox)
result = idx * pd.Series(np.arange(5, dtype='int64'))
tm.assert_equal(result, expected)
def test_tdi_mul_float_series(self, box_with_array):
box = box_with_array
xbox = pd.Series if box in [pd.Index, tm.to_array] else box
idx = TimedeltaIndex(np.arange(5, dtype='int64'))
idx = tm.box_expected(idx, box)
rng5f = np.arange(5, dtype='float64')
expected = TimedeltaIndex(rng5f * (rng5f + 1.0))
expected = tm.box_expected(expected, xbox)
result = idx * Series(rng5f + 1.0)
tm.assert_equal(result, expected)
# TODO: Put Series/DataFrame in others?
@pytest.mark.parametrize('other', [
np.arange(1, 11),
pd.Int64Index(range(1, 11)),
pd.UInt64Index(range(1, 11)),
pd.Float64Index(range(1, 11)),
pd.RangeIndex(1, 11)
], ids=lambda x: type(x).__name__)
def test_tdi_rmul_arraylike(self, other, box_with_array):
box = box_with_array
xbox = get_upcast_box(box, other)
tdi = TimedeltaIndex(['1 Day'] * 10)
expected = timedelta_range('1 days', '10 days')
expected._data.freq = None
tdi = tm.box_expected(tdi, box)
expected = tm.box_expected(expected, xbox)
result = other * tdi
tm.assert_equal(result, expected)
commute = tdi * other
tm.assert_equal(commute, expected)
# ------------------------------------------------------------------
# __div__, __rdiv__
def test_td64arr_div_nat_invalid(self, box_with_array):
# don't allow division by NaT (maybe could in the future)
rng = timedelta_range('1 days', '10 days', name='foo')
rng = tm.box_expected(rng, box_with_array)
with pytest.raises(TypeError,
match="'?true_divide'? cannot use operands"):
rng / pd.NaT
with pytest.raises(TypeError, match='Cannot divide NaTType by'):
pd.NaT / rng
def test_td64arr_div_td64nat(self, box_with_array):
# GH#23829
rng = timedelta_range('1 days', '10 days',)
rng = tm.box_expected(rng, box_with_array)
other = np.timedelta64('NaT')
expected = np.array([np.nan] * 10)
expected = tm.box_expected(expected, box_with_array)
result = rng / other
tm.assert_equal(result, expected)
result = other / rng
tm.assert_equal(result, expected)
def test_td64arr_div_int(self, box_with_array):
idx = TimedeltaIndex(np.arange(5, dtype='int64'))
idx = tm.box_expected(idx, box_with_array)
result = idx / 1
tm.assert_equal(result, idx)
with pytest.raises(TypeError, match='Cannot divide'):
# GH#23829
1 / idx
def test_td64arr_div_tdlike_scalar(self, two_hours, box_with_array):
# GH#20088, GH#22163 ensure DataFrame returns correct dtype
rng = timedelta_range('1 days', '10 days', name='foo')
expected = pd.Float64Index((np.arange(10) + 1) * 12, name='foo')
rng = tm.box_expected(rng, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = rng / two_hours
tm.assert_equal(result, expected)
result = two_hours / rng
expected = 1 / expected
tm.assert_equal(result, expected)
def test_td64arr_div_tdlike_scalar_with_nat(self, two_hours,
box_with_array):
rng = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
expected = pd.Float64Index([12, np.nan, 24], name='foo')
rng = tm.box_expected(rng, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = rng / two_hours
tm.assert_equal(result, expected)
result = two_hours / rng
expected = 1 / expected
tm.assert_equal(result, expected)
def test_td64arr_div_td64_ndarray(self, box_with_array):
# GH#22631
rng = TimedeltaIndex(['1 days', pd.NaT, '2 days'])
expected = pd.Float64Index([12, np.nan, 24])
rng = tm.box_expected(rng, box_with_array)
expected = tm.box_expected(expected, box_with_array)
other = np.array([2, 4, 2], dtype='m8[h]')
result = rng / other
tm.assert_equal(result, expected)
result = rng / tm.box_expected(other, box_with_array)
tm.assert_equal(result, expected)
result = rng / other.astype(object)
tm.assert_equal(result, expected)
result = rng / list(other)
tm.assert_equal(result, expected)
# reversed op
expected = 1 / expected
result = other / rng
tm.assert_equal(result, expected)
result = tm.box_expected(other, box_with_array) / rng
tm.assert_equal(result, expected)
result = other.astype(object) / rng
tm.assert_equal(result, expected)
result = list(other) / rng
tm.assert_equal(result, expected)
def test_tdarr_div_length_mismatch(self, box_with_array):
rng = TimedeltaIndex(['1 days', pd.NaT, '2 days'])
mismatched = [1, 2, 3, 4]
rng = tm.box_expected(rng, box_with_array)
for obj in [mismatched, mismatched[:2]]:
# one shorter, one longer
for other in [obj, np.array(obj), pd.Index(obj)]:
with pytest.raises(ValueError):
rng / other
with pytest.raises(ValueError):
other / rng
# ------------------------------------------------------------------
# __floordiv__, __rfloordiv__
def test_td64arr_floordiv_tdscalar(self, box_with_array, scalar_td):
# GH#18831
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
expected = Series([0, 0, np.nan])
td1 = tm.box_expected(td1, box_with_array, transpose=False)
expected = tm.box_expected(expected, box_with_array, transpose=False)
result = td1 // scalar_td
tm.assert_equal(result, expected)
def test_td64arr_rfloordiv_tdscalar(self, box_with_array, scalar_td):
# GH#18831
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
expected = Series([1, 1, np.nan])
td1 = tm.box_expected(td1, box_with_array, transpose=False)
expected = tm.box_expected(expected, box_with_array, transpose=False)
result = scalar_td // td1
tm.assert_equal(result, expected)
def test_td64arr_rfloordiv_tdscalar_explicit(self, box_with_array,
scalar_td):
# GH#18831
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
expected = Series([1, 1, np.nan])
td1 = tm.box_expected(td1, box_with_array, transpose=False)
expected = tm.box_expected(expected, box_with_array, transpose=False)
# We can test __rfloordiv__ using this syntax,
# see `test_timedelta_rfloordiv`
result = td1.__rfloordiv__(scalar_td)
tm.assert_equal(result, expected)
def test_td64arr_floordiv_int(self, box_with_array):
idx = TimedeltaIndex(np.arange(5, dtype='int64'))
idx = tm.box_expected(idx, box_with_array)
result = idx // 1
tm.assert_equal(result, idx)
pattern = ('floor_divide cannot use operands|'
'Cannot divide int by Timedelta*')
with pytest.raises(TypeError, match=pattern):
1 // idx
def test_td64arr_floordiv_tdlike_scalar(self, two_hours, box_with_array):
tdi = timedelta_range('1 days', '10 days', name='foo')
expected = pd.Int64Index((np.arange(10) + 1) * 12, name='foo')
tdi = tm.box_expected(tdi, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = tdi // two_hours
tm.assert_equal(result, expected)
# TODO: Is this redundant with test_td64arr_floordiv_tdlike_scalar?
@pytest.mark.parametrize('scalar_td', [
timedelta(minutes=10, seconds=7),
Timedelta('10m7s'),
Timedelta('10m7s').to_timedelta64()
], ids=lambda x: type(x).__name__)
def test_td64arr_rfloordiv_tdlike_scalar(self, scalar_td, box_with_array):
# GH#19125
tdi = TimedeltaIndex(['00:05:03', '00:05:03', pd.NaT], freq=None)
expected = pd.Index([2.0, 2.0, np.nan])
tdi = tm.box_expected(tdi, box_with_array, transpose=False)
expected = tm.box_expected(expected, box_with_array, transpose=False)
res = tdi.__rfloordiv__(scalar_td)
tm.assert_equal(res, expected)
expected = pd.Index([0.0, 0.0, np.nan])
expected = tm.box_expected(expected, box_with_array, transpose=False)
res = tdi // (scalar_td)
tm.assert_equal(res, expected)
# ------------------------------------------------------------------
# mod, divmod
# TODO: operations with timedelta-like arrays, numeric arrays,
# reversed ops
def test_td64arr_mod_tdscalar(self, box_with_array, three_days):
tdi = timedelta_range('1 Day', '9 days')
tdarr = tm.box_expected(tdi, box_with_array)
expected = TimedeltaIndex(['1 Day', '2 Days', '0 Days'] * 3)
expected = tm.box_expected(expected, box_with_array)
result = tdarr % three_days
tm.assert_equal(result, expected)
if box_with_array is pd.DataFrame:
pytest.xfail("DataFrame does not have __divmod__ or __rdivmod__")
result = divmod(tdarr, three_days)
tm.assert_equal(result[1], expected)
tm.assert_equal(result[0], tdarr // three_days)
def test_td64arr_mod_int(self, box_with_array):
tdi = timedelta_range('1 ns', '10 ns', periods=10)
tdarr = tm.box_expected(tdi, box_with_array)
expected = TimedeltaIndex(['1 ns', '0 ns'] * 5)
expected = tm.box_expected(expected, box_with_array)
result = tdarr % 2
tm.assert_equal(result, expected)
with pytest.raises(TypeError):
2 % tdarr
if box_with_array is pd.DataFrame:
pytest.xfail("DataFrame does not have __divmod__ or __rdivmod__")
result = divmod(tdarr, 2)
tm.assert_equal(result[1], expected)
tm.assert_equal(result[0], tdarr // 2)
def test_td64arr_rmod_tdscalar(self, box_with_array, three_days):
tdi = timedelta_range('1 Day', '9 days')
tdarr = tm.box_expected(tdi, box_with_array)
expected = ['0 Days', '1 Day', '0 Days'] + ['3 Days'] * 6
expected = TimedeltaIndex(expected)
expected = tm.box_expected(expected, box_with_array)
result = three_days % tdarr
tm.assert_equal(result, expected)
if box_with_array is pd.DataFrame:
pytest.xfail("DataFrame does not have __divmod__ or __rdivmod__")
result = divmod(three_days, tdarr)
tm.assert_equal(result[1], expected)
tm.assert_equal(result[0], three_days // tdarr)
# ------------------------------------------------------------------
# Operations with invalid others
def test_td64arr_mul_tdscalar_invalid(self, box_with_array, scalar_td):
td1 = Series([timedelta(minutes=5, seconds=3)] * 3)
td1.iloc[2] = np.nan
td1 = tm.box_expected(td1, box_with_array)
# check that we are getting a TypeError
# with 'operate' (from core/ops.py) for the ops that are not
# defined
pattern = 'operate|unsupported|cannot|not supported'
with pytest.raises(TypeError, match=pattern):
td1 * scalar_td
with pytest.raises(TypeError, match=pattern):
scalar_td * td1
def test_td64arr_mul_too_short_raises(self, box_with_array):
idx = TimedeltaIndex(np.arange(5, dtype='int64'))
idx = tm.box_expected(idx, box_with_array)
with pytest.raises(TypeError):
idx * idx[:3]
with pytest.raises(ValueError):
idx * np.array([1, 2])
def test_td64arr_mul_td64arr_raises(self, box_with_array):
idx = TimedeltaIndex(np.arange(5, dtype='int64'))
idx = tm.box_expected(idx, box_with_array)
with pytest.raises(TypeError):
idx * idx
# ------------------------------------------------------------------
# Operations with numeric others
@pytest.mark.parametrize('one', [1, np.array(1), 1.0, np.array(1.0)])
def test_td64arr_mul_numeric_scalar(self, box_with_array, one):
# GH#4521
# divide/multiply by integers
tdser = pd.Series(['59 Days', '59 Days', 'NaT'], dtype='m8[ns]')
expected = Series(['-59 Days', '-59 Days', 'NaT'],
dtype='timedelta64[ns]')
tdser = tm.box_expected(tdser, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = tdser * (-one)
tm.assert_equal(result, expected)
result = (-one) * tdser
tm.assert_equal(result, expected)
expected = Series(['118 Days', '118 Days', 'NaT'],
dtype='timedelta64[ns]')
expected = tm.box_expected(expected, box_with_array)
result = tdser * (2 * one)
tm.assert_equal(result, expected)
result = (2 * one) * tdser
tm.assert_equal(result, expected)
@pytest.mark.parametrize('two', [2, 2.0, np.array(2), np.array(2.0)])
def test_td64arr_div_numeric_scalar(self, box_with_array, two):
# GH#4521
# divide/multiply by integers
tdser = pd.Series(['59 Days', '59 Days', 'NaT'], dtype='m8[ns]')
expected = Series(['29.5D', '29.5D', 'NaT'], dtype='timedelta64[ns]')
tdser = tm.box_expected(tdser, box_with_array)
expected = tm.box_expected(expected, box_with_array)
result = tdser / two
tm.assert_equal(result, expected)
with pytest.raises(TypeError, match='Cannot divide'):
two / tdser
@pytest.mark.parametrize('dtype', ['int64', 'int32', 'int16',
'uint64', 'uint32', 'uint16', 'uint8',
'float64', 'float32', 'float16'])
@pytest.mark.parametrize('vector', [np.array([20, 30, 40]),
pd.Index([20, 30, 40]),
Series([20, 30, 40])],
ids=lambda x: type(x).__name__)
def test_td64arr_rmul_numeric_array(self, box_with_array, vector, dtype):
# GH#4521
# divide/multiply by integers
xbox = get_upcast_box(box_with_array, vector)
tdser = pd.Series(['59 Days', '59 Days', 'NaT'], dtype='m8[ns]')
vector = vector.astype(dtype)
expected = Series(['1180 Days', '1770 Days', 'NaT'],
dtype='timedelta64[ns]')
tdser = tm.box_expected(tdser, box_with_array)
expected = tm.box_expected(expected, xbox)
result = tdser * vector
tm.assert_equal(result, expected)
result = vector * tdser
tm.assert_equal(result, expected)
@pytest.mark.parametrize('dtype', ['int64', 'int32', 'int16',
'uint64', 'uint32', 'uint16', 'uint8',
'float64', 'float32', 'float16'])
@pytest.mark.parametrize('vector', [np.array([20, 30, 40]),
pd.Index([20, 30, 40]),
Series([20, 30, 40])],
ids=lambda x: type(x).__name__)
def test_td64arr_div_numeric_array(self, box_with_array, vector, dtype):
# GH#4521
# divide/multiply by integers
xbox = get_upcast_box(box_with_array, vector)
tdser = pd.Series(['59 Days', '59 Days', 'NaT'], dtype='m8[ns]')
vector = vector.astype(dtype)
expected = Series(['2.95D', '1D 23H 12m', 'NaT'],
dtype='timedelta64[ns]')
tdser = tm.box_expected(tdser, box_with_array)
expected = tm.box_expected(expected, xbox)
result = tdser / vector
tm.assert_equal(result, expected)
pattern = ('true_divide cannot use operands|'
'cannot perform __div__|'
'cannot perform __truediv__|'
'unsupported operand|'
'Cannot divide')
with pytest.raises(TypeError, match=pattern):
vector / tdser
if not isinstance(vector, pd.Index):
# Index.__rdiv__ won't try to operate elementwise, just raises
result = tdser / vector.astype(object)
if box_with_array is pd.DataFrame:
expected = [tdser.iloc[0, n] / vector[n]
for n in range(len(vector))]
else:
expected = [tdser[n] / vector[n] for n in range(len(tdser))]
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
with pytest.raises(TypeError, match=pattern):
vector.astype(object) / tdser
@pytest.mark.parametrize('names', [(None, None, None),
('Egon', 'Venkman', None),
('NCC1701D', 'NCC1701D', 'NCC1701D')])
def test_td64arr_mul_int_series(self, box_df_fail, names):
# GH#19042 test for correct name attachment
box = box_df_fail # broadcasts along wrong axis, but doesn't raise
tdi = TimedeltaIndex(['0days', '1day', '2days', '3days', '4days'],
name=names[0])
# TODO: Should we be parametrizing over types for `ser` too?
ser = | Series([0, 1, 2, 3, 4], dtype=np.int64, name=names[1]) | pandas.Series |
import os
import glob
import numpy as np
import nibabel as nib
import pandas as pd
from glmsingle.design.make_design_matrix import make_design
from glmsingle.glmsingle import GLM_single
import time
sub = 2
ses = 1
stimdur = 0.5
tr = 2
proj_path = os.path.join(
'/home',
'adf',
'charesti',
'data',
'arsa-fmri',
'BIDS')
data_path = os.path.join(
proj_path,
'derivatives',
'fmriprep',
'sub-{}',
'ses-{}',
'func')
design_path = os.path.join(
proj_path,
'sub-{}',
'ses-{}',
'func')
runs = glob.glob(
os.path.join(data_path.format(sub, ses), '*preproc*nii.gz'))
runs.sort()
runs = runs[:-1]
eventfs = glob.glob(
os.path.join(design_path.format(sub, ses), '*events.tsv'))
eventfs.sort()
runs = runs[:3]
eventfs = eventfs[:3]
data = []
design = []
for i, (run, eventf) in enumerate(zip(runs, eventfs)):
print(f'run {i}')
y = nib.load(run).get_fdata().astype(np.float32)
dims = y.shape
# y = np.moveaxis(y, -1, 0)
# y = y.reshape([y.shape[0], -1])
n_volumes = y.shape[-1]
# Load onsets and item presented
onsets = pd.read_csv(eventf, sep='\t')["onset"].values
items = pd.read_csv(eventf, sep='\t')["stimnumber"].values
n_events = len(onsets)
# Create design matrix
events = | pd.DataFrame() | pandas.DataFrame |
from datetime import datetime
import numpy as np
import pytest
import pandas as pd
from pandas import (
Categorical,
CategoricalIndex,
DataFrame,
Index,
MultiIndex,
Series,
qcut,
)
import pandas._testing as tm
def cartesian_product_for_groupers(result, args, names, fill_value=np.NaN):
"""Reindex to a cartesian production for the groupers,
preserving the nature (Categorical) of each grouper
"""
def f(a):
if isinstance(a, (CategoricalIndex, Categorical)):
categories = a.categories
a = Categorical.from_codes(
np.arange(len(categories)), categories=categories, ordered=a.ordered
)
return a
index = MultiIndex.from_product(map(f, args), names=names)
return result.reindex(index, fill_value=fill_value).sort_index()
_results_for_groupbys_with_missing_categories = {
# This maps the builtin groupby functions to their expected outputs for
# missing categories when they are called on a categorical grouper with
# observed=False. Some functions are expected to return NaN, some zero.
# These expected values can be used across several tests (i.e. they are
# the same for SeriesGroupBy and DataFrameGroupBy) but they should only be
# hardcoded in one place.
"all": np.NaN,
"any": np.NaN,
"count": 0,
"corrwith": np.NaN,
"first": np.NaN,
"idxmax": np.NaN,
"idxmin": np.NaN,
"last": np.NaN,
"mad": np.NaN,
"max": np.NaN,
"mean": np.NaN,
"median": np.NaN,
"min": np.NaN,
"nth": np.NaN,
"nunique": 0,
"prod": np.NaN,
"quantile": np.NaN,
"sem": np.NaN,
"size": 0,
"skew": np.NaN,
"std": np.NaN,
"sum": 0,
"var": np.NaN,
}
def test_apply_use_categorical_name(df):
cats = qcut(df.C, 4)
def get_stats(group):
return {
"min": group.min(),
"max": group.max(),
"count": group.count(),
"mean": group.mean(),
}
result = df.groupby(cats, observed=False).D.apply(get_stats)
assert result.index.names[0] == "C"
def test_basic():
cats = Categorical(
["a", "a", "a", "b", "b", "b", "c", "c", "c"],
categories=["a", "b", "c", "d"],
ordered=True,
)
data = DataFrame({"a": [1, 1, 1, 2, 2, 2, 3, 4, 5], "b": cats})
exp_index = CategoricalIndex(list("abcd"), name="b", ordered=True)
expected = DataFrame({"a": [1, 2, 4, np.nan]}, index=exp_index)
result = data.groupby("b", observed=False).mean()
tm.assert_frame_equal(result, expected)
cat1 = Categorical(["a", "a", "b", "b"], categories=["a", "b", "z"], ordered=True)
cat2 = Categorical(["c", "d", "c", "d"], categories=["c", "d", "y"], ordered=True)
df = DataFrame({"A": cat1, "B": cat2, "values": [1, 2, 3, 4]})
# single grouper
gb = df.groupby("A", observed=False)
exp_idx = CategoricalIndex(["a", "b", "z"], name="A", ordered=True)
expected = DataFrame({"values": Series([3, 7, 0], index=exp_idx)})
result = gb.sum()
tm.assert_frame_equal(result, expected)
# GH 8623
x = DataFrame(
[[1, "<NAME>"], [2, "<NAME>"], [1, "<NAME>"]],
columns=["person_id", "person_name"],
)
x["person_name"] = Categorical(x.person_name)
g = x.groupby(["person_id"], observed=False)
result = g.transform(lambda x: x)
tm.assert_frame_equal(result, x[["person_name"]])
result = x.drop_duplicates("person_name")
expected = x.iloc[[0, 1]]
tm.assert_frame_equal(result, expected)
def f(x):
return x.drop_duplicates("person_name").iloc[0]
result = g.apply(f)
expected = x.iloc[[0, 1]].copy()
expected.index = Index([1, 2], name="person_id")
expected["person_name"] = expected["person_name"].astype("object")
tm.assert_frame_equal(result, expected)
# GH 9921
# Monotonic
df = DataFrame({"a": [5, 15, 25]})
c = pd.cut(df.a, bins=[0, 10, 20, 30, 40])
result = df.a.groupby(c, observed=False).transform(sum)
tm.assert_series_equal(result, df["a"])
tm.assert_series_equal(
df.a.groupby(c, observed=False).transform(lambda xs: np.sum(xs)), df["a"]
)
tm.assert_frame_equal(df.groupby(c, observed=False).transform(sum), df[["a"]])
tm.assert_frame_equal(
df.groupby(c, observed=False).transform(lambda xs: np.max(xs)), df[["a"]]
)
# Filter
tm.assert_series_equal(df.a.groupby(c, observed=False).filter(np.all), df["a"])
tm.assert_frame_equal(df.groupby(c, observed=False).filter(np.all), df)
# Non-monotonic
df = DataFrame({"a": [5, 15, 25, -5]})
c = pd.cut(df.a, bins=[-10, 0, 10, 20, 30, 40])
result = df.a.groupby(c, observed=False).transform(sum)
tm.assert_series_equal(result, df["a"])
tm.assert_series_equal(
df.a.groupby(c, observed=False).transform(lambda xs: np.sum(xs)), df["a"]
)
tm.assert_frame_equal(df.groupby(c, observed=False).transform(sum), df[["a"]])
tm.assert_frame_equal(
df.groupby(c, observed=False).transform(lambda xs: np.sum(xs)), df[["a"]]
)
# GH 9603
df = DataFrame({"a": [1, 0, 0, 0]})
c = pd.cut(df.a, [0, 1, 2, 3, 4], labels=Categorical(list("abcd")))
result = df.groupby(c, observed=False).apply(len)
exp_index = CategoricalIndex(c.values.categories, ordered=c.values.ordered)
expected = Series([1, 0, 0, 0], index=exp_index)
expected.index.name = "a"
tm.assert_series_equal(result, expected)
# more basic
levels = ["foo", "bar", "baz", "qux"]
codes = np.random.randint(0, 4, size=100)
cats = Categorical.from_codes(codes, levels, ordered=True)
data = DataFrame(np.random.randn(100, 4))
result = data.groupby(cats, observed=False).mean()
expected = data.groupby(np.asarray(cats), observed=False).mean()
exp_idx = CategoricalIndex(levels, categories=cats.categories, ordered=True)
expected = expected.reindex(exp_idx)
tm.assert_frame_equal(result, expected)
grouped = data.groupby(cats, observed=False)
desc_result = grouped.describe()
idx = cats.codes.argsort()
ord_labels = np.asarray(cats).take(idx)
ord_data = data.take(idx)
exp_cats = Categorical(
ord_labels, ordered=True, categories=["foo", "bar", "baz", "qux"]
)
expected = ord_data.groupby(exp_cats, sort=False, observed=False).describe()
tm.assert_frame_equal(desc_result, expected)
# GH 10460
expc = Categorical.from_codes(np.arange(4).repeat(8), levels, ordered=True)
exp = CategoricalIndex(expc)
tm.assert_index_equal((desc_result.stack().index.get_level_values(0)), exp)
exp = Index(["count", "mean", "std", "min", "25%", "50%", "75%", "max"] * 4)
tm.assert_index_equal((desc_result.stack().index.get_level_values(1)), exp)
def test_level_get_group(observed):
# GH15155
df = DataFrame(
data=np.arange(2, 22, 2),
index=MultiIndex(
levels=[CategoricalIndex(["a", "b"]), range(10)],
codes=[[0] * 5 + [1] * 5, range(10)],
names=["Index1", "Index2"],
),
)
g = df.groupby(level=["Index1"], observed=observed)
# expected should equal test.loc[["a"]]
# GH15166
expected = DataFrame(
data=np.arange(2, 12, 2),
index=MultiIndex(
levels=[CategoricalIndex(["a", "b"]), range(5)],
codes=[[0] * 5, range(5)],
names=["Index1", "Index2"],
),
)
result = g.get_group("a")
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("ordered", [True, False])
def test_apply(ordered):
# GH 10138
dense = Categorical(list("abc"), ordered=ordered)
# 'b' is in the categories but not in the list
missing = Categorical(list("aaa"), categories=["a", "b"], ordered=ordered)
values = np.arange(len(dense))
df = DataFrame({"missing": missing, "dense": dense, "values": values})
grouped = df.groupby(["missing", "dense"], observed=True)
# missing category 'b' should still exist in the output index
idx = MultiIndex.from_arrays([missing, dense], names=["missing", "dense"])
expected = DataFrame([0, 1, 2.0], index=idx, columns=["values"])
# GH#21636 tracking down the xfail, in some builds np.mean(df.loc[[0]])
# is coming back as Series([0., 1., 0.], index=["missing", "dense", "values"])
# when we expect Series(0., index=["values"])
result = grouped.apply(lambda x: np.mean(x))
tm.assert_frame_equal(result, expected)
# we coerce back to ints
expected = expected.astype("int")
result = grouped.mean()
tm.assert_frame_equal(result, expected)
result = grouped.agg(np.mean)
tm.assert_frame_equal(result, expected)
# but for transform we should still get back the original index
idx = MultiIndex.from_arrays([missing, dense], names=["missing", "dense"])
expected = Series(1, index=idx)
result = grouped.apply(lambda x: 1)
tm.assert_series_equal(result, expected)
def test_observed(observed):
# multiple groupers, don't re-expand the output space
# of the grouper
# gh-14942 (implement)
# gh-10132 (back-compat)
# gh-8138 (back-compat)
# gh-8869
cat1 = Categorical(["a", "a", "b", "b"], categories=["a", "b", "z"], ordered=True)
cat2 = Categorical(["c", "d", "c", "d"], categories=["c", "d", "y"], ordered=True)
df = DataFrame({"A": cat1, "B": cat2, "values": [1, 2, 3, 4]})
df["C"] = ["foo", "bar"] * 2
# multiple groupers with a non-cat
gb = df.groupby(["A", "B", "C"], observed=observed)
exp_index = MultiIndex.from_arrays(
[cat1, cat2, ["foo", "bar"] * 2], names=["A", "B", "C"]
)
expected = DataFrame({"values": Series([1, 2, 3, 4], index=exp_index)}).sort_index()
result = gb.sum()
if not observed:
expected = cartesian_product_for_groupers(
expected, [cat1, cat2, ["foo", "bar"]], list("ABC"), fill_value=0
)
tm.assert_frame_equal(result, expected)
gb = df.groupby(["A", "B"], observed=observed)
exp_index = MultiIndex.from_arrays([cat1, cat2], names=["A", "B"])
expected = DataFrame({"values": [1, 2, 3, 4]}, index=exp_index)
result = gb.sum()
if not observed:
expected = cartesian_product_for_groupers(
expected, [cat1, cat2], list("AB"), fill_value=0
)
tm.assert_frame_equal(result, expected)
# https://github.com/pandas-dev/pandas/issues/8138
d = {
"cat": Categorical(
["a", "b", "a", "b"], categories=["a", "b", "c"], ordered=True
),
"ints": [1, 1, 2, 2],
"val": [10, 20, 30, 40],
}
df = DataFrame(d)
# Grouping on a single column
groups_single_key = df.groupby("cat", observed=observed)
result = groups_single_key.mean()
exp_index = CategoricalIndex(
list("ab"), name="cat", categories=list("abc"), ordered=True
)
expected = DataFrame({"ints": [1.5, 1.5], "val": [20.0, 30]}, index=exp_index)
if not observed:
index = CategoricalIndex(
list("abc"), name="cat", categories=list("abc"), ordered=True
)
expected = expected.reindex(index)
tm.assert_frame_equal(result, expected)
# Grouping on two columns
groups_double_key = df.groupby(["cat", "ints"], observed=observed)
result = groups_double_key.agg("mean")
expected = DataFrame(
{
"val": [10, 30, 20, 40],
"cat": Categorical(
["a", "a", "b", "b"], categories=["a", "b", "c"], ordered=True
),
"ints": [1, 2, 1, 2],
}
).set_index(["cat", "ints"])
if not observed:
expected = cartesian_product_for_groupers(
expected, [df.cat.values, [1, 2]], ["cat", "ints"]
)
tm.assert_frame_equal(result, expected)
# GH 10132
for key in [("a", 1), ("b", 2), ("b", 1), ("a", 2)]:
c, i = key
result = groups_double_key.get_group(key)
expected = df[(df.cat == c) & (df.ints == i)]
tm.assert_frame_equal(result, expected)
# gh-8869
# with as_index
d = {
"foo": [10, 8, 4, 8, 4, 1, 1],
"bar": [10, 20, 30, 40, 50, 60, 70],
"baz": ["d", "c", "e", "a", "a", "d", "c"],
}
df = DataFrame(d)
cat = pd.cut(df["foo"], np.linspace(0, 10, 3))
df["range"] = cat
groups = df.groupby(["range", "baz"], as_index=False, observed=observed)
result = groups.agg("mean")
groups2 = df.groupby(["range", "baz"], as_index=True, observed=observed)
expected = groups2.agg("mean").reset_index()
tm.assert_frame_equal(result, expected)
def test_observed_codes_remap(observed):
d = {"C1": [3, 3, 4, 5], "C2": [1, 2, 3, 4], "C3": [10, 100, 200, 34]}
df = DataFrame(d)
values = pd.cut(df["C1"], [1, 2, 3, 6])
values.name = "cat"
groups_double_key = df.groupby([values, "C2"], observed=observed)
idx = MultiIndex.from_arrays([values, [1, 2, 3, 4]], names=["cat", "C2"])
expected = DataFrame({"C1": [3, 3, 4, 5], "C3": [10, 100, 200, 34]}, index=idx)
if not observed:
expected = cartesian_product_for_groupers(
expected, [values.values, [1, 2, 3, 4]], ["cat", "C2"]
)
result = groups_double_key.agg("mean")
tm.assert_frame_equal(result, expected)
def test_observed_perf():
# we create a cartesian product, so this is
# non-performant if we don't use observed values
# gh-14942
df = DataFrame(
{
"cat": np.random.randint(0, 255, size=30000),
"int_id": np.random.randint(0, 255, size=30000),
"other_id": np.random.randint(0, 10000, size=30000),
"foo": 0,
}
)
df["cat"] = df.cat.astype(str).astype("category")
grouped = df.groupby(["cat", "int_id", "other_id"], observed=True)
result = grouped.count()
assert result.index.levels[0].nunique() == df.cat.nunique()
assert result.index.levels[1].nunique() == df.int_id.nunique()
assert result.index.levels[2].nunique() == df.other_id.nunique()
def test_observed_groups(observed):
# gh-20583
# test that we have the appropriate groups
cat = Categorical(["a", "c", "a"], categories=["a", "b", "c"])
df = DataFrame({"cat": cat, "vals": [1, 2, 3]})
g = df.groupby("cat", observed=observed)
result = g.groups
if observed:
expected = {"a": Index([0, 2], dtype="int64"), "c": Index([1], dtype="int64")}
else:
expected = {
"a": Index([0, 2], dtype="int64"),
"b": Index([], dtype="int64"),
"c": Index([1], dtype="int64"),
}
tm.assert_dict_equal(result, expected)
def test_observed_groups_with_nan(observed):
# GH 24740
df = DataFrame(
{
"cat": Categorical(["a", np.nan, "a"], categories=["a", "b", "d"]),
"vals": [1, 2, 3],
}
)
g = df.groupby("cat", observed=observed)
result = g.groups
if observed:
expected = {"a": Index([0, 2], dtype="int64")}
else:
expected = {
"a": Index([0, 2], dtype="int64"),
"b": Index([], dtype="int64"),
"d": Index([], dtype="int64"),
}
tm.assert_dict_equal(result, expected)
def test_observed_nth():
# GH 26385
cat = Categorical(["a", np.nan, np.nan], categories=["a", "b", "c"])
ser = Series([1, 2, 3])
df = DataFrame({"cat": cat, "ser": ser})
result = df.groupby("cat", observed=False)["ser"].nth(0)
index = Categorical(["a", "b", "c"], categories=["a", "b", "c"])
expected = Series([1, np.nan, np.nan], index=index, name="ser")
expected.index.name = "cat"
tm.assert_series_equal(result, expected)
def test_dataframe_categorical_with_nan(observed):
# GH 21151
s1 = Categorical([np.nan, "a", np.nan, "a"], categories=["a", "b", "c"])
s2 = Series([1, 2, 3, 4])
df = DataFrame({"s1": s1, "s2": s2})
result = df.groupby("s1", observed=observed).first().reset_index()
if observed:
expected = DataFrame(
{"s1": Categorical(["a"], categories=["a", "b", "c"]), "s2": [2]}
)
else:
expected = DataFrame(
{
"s1": Categorical(["a", "b", "c"], categories=["a", "b", "c"]),
"s2": [2, np.nan, np.nan],
}
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("ordered", [True, False])
@pytest.mark.parametrize("observed", [True, False])
@pytest.mark.parametrize("sort", [True, False])
def test_dataframe_categorical_ordered_observed_sort(ordered, observed, sort):
# GH 25871: Fix groupby sorting on ordered Categoricals
# GH 25167: Groupby with observed=True doesn't sort
# Build a dataframe with cat having one unobserved category ('missing'),
# and a Series with identical values
label = Categorical(
["d", "a", "b", "a", "d", "b"],
categories=["a", "b", "missing", "d"],
ordered=ordered,
)
val = Series(["d", "a", "b", "a", "d", "b"])
df = DataFrame({"label": label, "val": val})
# aggregate on the Categorical
result = df.groupby("label", observed=observed, sort=sort)["val"].aggregate("first")
# If ordering works, we expect index labels equal to aggregation results,
# except for 'observed=False': label 'missing' has aggregation None
label = Series(result.index.array, dtype="object")
aggr = Series(result.array)
if not observed:
aggr[aggr.isna()] = "missing"
if not all(label == aggr):
msg = (
"Labels and aggregation results not consistently sorted\n"
f"for (ordered={ordered}, observed={observed}, sort={sort})\n"
f"Result:\n{result}"
)
assert False, msg
def test_datetime():
# GH9049: ensure backward compatibility
levels = pd.date_range("2014-01-01", periods=4)
codes = np.random.randint(0, 4, size=100)
cats = Categorical.from_codes(codes, levels, ordered=True)
data = DataFrame(np.random.randn(100, 4))
result = data.groupby(cats, observed=False).mean()
expected = data.groupby(np.asarray(cats), observed=False).mean()
expected = expected.reindex(levels)
expected.index = CategoricalIndex(
expected.index, categories=expected.index, ordered=True
)
tm.assert_frame_equal(result, expected)
grouped = data.groupby(cats, observed=False)
desc_result = grouped.describe()
idx = cats.codes.argsort()
ord_labels = cats.take(idx)
ord_data = data.take(idx)
expected = ord_data.groupby(ord_labels, observed=False).describe()
tm.assert_frame_equal(desc_result, expected)
tm.assert_index_equal(desc_result.index, expected.index)
tm.assert_index_equal(
desc_result.index.get_level_values(0), expected.index.get_level_values(0)
)
# GH 10460
expc = Categorical.from_codes(np.arange(4).repeat(8), levels, ordered=True)
exp = CategoricalIndex(expc)
tm.assert_index_equal((desc_result.stack().index.get_level_values(0)), exp)
exp = Index(["count", "mean", "std", "min", "25%", "50%", "75%", "max"] * 4)
tm.assert_index_equal((desc_result.stack().index.get_level_values(1)), exp)
def test_categorical_index():
s = np.random.RandomState(12345)
levels = ["foo", "bar", "baz", "qux"]
codes = s.randint(0, 4, size=20)
cats = Categorical.from_codes(codes, levels, ordered=True)
df = DataFrame(np.repeat(np.arange(20), 4).reshape(-1, 4), columns=list("abcd"))
df["cats"] = cats
# with a cat index
result = df.set_index("cats").groupby(level=0, observed=False).sum()
expected = df[list("abcd")].groupby(cats.codes, observed=False).sum()
expected.index = CategoricalIndex(
Categorical.from_codes([0, 1, 2, 3], levels, ordered=True), name="cats"
)
tm.assert_frame_equal(result, expected)
# with a cat column, should produce a cat index
result = df.groupby("cats", observed=False).sum()
expected = df[list("abcd")].groupby(cats.codes, observed=False).sum()
expected.index = CategoricalIndex(
Categorical.from_codes([0, 1, 2, 3], levels, ordered=True), name="cats"
)
tm.assert_frame_equal(result, expected)
def test_describe_categorical_columns():
# GH 11558
cats = CategoricalIndex(
["qux", "foo", "baz", "bar"],
categories=["foo", "bar", "baz", "qux"],
ordered=True,
)
df = DataFrame(np.random.randn(20, 4), columns=cats)
result = df.groupby([1, 2, 3, 4] * 5).describe()
tm.assert_index_equal(result.stack().columns, cats)
tm.assert_categorical_equal(result.stack().columns.values, cats.values)
def test_unstack_categorical():
# GH11558 (example is taken from the original issue)
df = DataFrame(
{"a": range(10), "medium": ["A", "B"] * 5, "artist": list("XYXXY") * 2}
)
df["medium"] = df["medium"].astype("category")
gcat = df.groupby(["artist", "medium"], observed=False)["a"].count().unstack()
result = gcat.describe()
exp_columns = CategoricalIndex(["A", "B"], ordered=False, name="medium")
tm.assert_index_equal(result.columns, exp_columns)
tm.assert_categorical_equal(result.columns.values, exp_columns.values)
result = gcat["A"] + gcat["B"]
expected = Series([6, 4], index=Index(["X", "Y"], name="artist"))
tm.assert_series_equal(result, expected)
def test_bins_unequal_len():
# GH3011
series = Series([np.nan, np.nan, 1, 1, 2, 2, 3, 3, 4, 4])
bins = pd.cut(series.dropna().values, 4)
# len(bins) != len(series) here
msg = r"Length of grouper \(8\) and axis \(10\) must be same length"
with pytest.raises(ValueError, match=msg):
series.groupby(bins).mean()
def test_as_index():
# GH13204
df = DataFrame(
{
"cat": Categorical([1, 2, 2], [1, 2, 3]),
"A": [10, 11, 11],
"B": [101, 102, 103],
}
)
result = df.groupby(["cat", "A"], as_index=False, observed=True).sum()
expected = DataFrame(
{
"cat": Categorical([1, 2], categories=df.cat.cat.categories),
"A": [10, 11],
"B": [101, 205],
},
columns=["cat", "A", "B"],
)
tm.assert_frame_equal(result, expected)
# function grouper
f = lambda r: df.loc[r, "A"]
result = df.groupby(["cat", f], as_index=False, observed=True).sum()
expected = DataFrame(
{
"cat": Categorical([1, 2], categories=df.cat.cat.categories),
"A": [10, 22],
"B": [101, 205],
},
columns=["cat", "A", "B"],
)
tm.assert_frame_equal(result, expected)
# another not in-axis grouper (conflicting names in index)
s = Series(["a", "b", "b"], name="cat")
result = df.groupby(["cat", s], as_index=False, observed=True).sum()
tm.assert_frame_equal(result, expected)
# is original index dropped?
group_columns = ["cat", "A"]
expected = DataFrame(
{
"cat": Categorical([1, 2], categories=df.cat.cat.categories),
"A": [10, 11],
"B": [101, 205],
},
columns=["cat", "A", "B"],
)
for name in [None, "X", "B"]:
df.index = Index(list("abc"), name=name)
result = df.groupby(group_columns, as_index=False, observed=True).sum()
tm.assert_frame_equal(result, expected)
def test_preserve_categories():
# GH-13179
categories = list("abc")
# ordered=True
df = DataFrame({"A": Categorical(list("ba"), categories=categories, ordered=True)})
index = CategoricalIndex(categories, categories, ordered=True, name="A")
tm.assert_index_equal(
df.groupby("A", sort=True, observed=False).first().index, index
)
tm.assert_index_equal(
df.groupby("A", sort=False, observed=False).first().index, index
)
# ordered=False
df = DataFrame({"A": Categorical(list("ba"), categories=categories, ordered=False)})
sort_index = CategoricalIndex(categories, categories, ordered=False, name="A")
nosort_index = CategoricalIndex(list("bac"), list("bac"), ordered=False, name="A")
tm.assert_index_equal(
df.groupby("A", sort=True, observed=False).first().index, sort_index
)
tm.assert_index_equal(
df.groupby("A", sort=False, observed=False).first().index, nosort_index
)
def test_preserve_categorical_dtype():
# GH13743, GH13854
df = DataFrame(
{
"A": [1, 2, 1, 1, 2],
"B": [10, 16, 22, 28, 34],
"C1": Categorical(list("abaab"), categories=list("bac"), ordered=False),
"C2": Categorical(list("abaab"), categories=list("bac"), ordered=True),
}
)
# single grouper
exp_full = DataFrame(
{
"A": [2.0, 1.0, np.nan],
"B": [25.0, 20.0, np.nan],
"C1": Categorical(list("bac"), categories=list("bac"), ordered=False),
"C2": Categorical(list("bac"), categories=list("bac"), ordered=True),
}
)
for col in ["C1", "C2"]:
result1 = df.groupby(by=col, as_index=False, observed=False).mean()
result2 = df.groupby(by=col, as_index=True, observed=False).mean().reset_index()
expected = exp_full.reindex(columns=result1.columns)
tm.assert_frame_equal(result1, expected)
tm.assert_frame_equal(result2, expected)
@pytest.mark.parametrize(
"func, values",
[
("first", ["second", "first"]),
("last", ["fourth", "third"]),
("min", ["fourth", "first"]),
("max", ["second", "third"]),
],
)
def test_preserve_on_ordered_ops(func, values):
# gh-18502
# preserve the categoricals on ops
c = Categorical(["first", "second", "third", "fourth"], ordered=True)
df = DataFrame({"payload": [-1, -2, -1, -2], "col": c})
g = df.groupby("payload")
result = getattr(g, func)()
expected = DataFrame(
{"payload": [-2, -1], "col": Series(values, dtype=c.dtype)}
).set_index("payload")
tm.assert_frame_equal(result, expected)
def test_categorical_no_compress():
data = Series(np.random.randn(9))
codes = np.array([0, 0, 0, 1, 1, 1, 2, 2, 2])
cats = Categorical.from_codes(codes, [0, 1, 2], ordered=True)
result = data.groupby(cats, observed=False).mean()
exp = data.groupby(codes, observed=False).mean()
exp.index = CategoricalIndex(
exp.index, categories=cats.categories, ordered=cats.ordered
)
tm.assert_series_equal(result, exp)
codes = np.array([0, 0, 0, 1, 1, 1, 3, 3, 3])
cats = Categorical.from_codes(codes, [0, 1, 2, 3], ordered=True)
result = data.groupby(cats, observed=False).mean()
exp = data.groupby(codes, observed=False).mean().reindex(cats.categories)
exp.index = CategoricalIndex(
exp.index, categories=cats.categories, ordered=cats.ordered
)
tm.assert_series_equal(result, exp)
cats = Categorical(
["a", "a", "a", "b", "b", "b", "c", "c", "c"],
categories=["a", "b", "c", "d"],
ordered=True,
)
data = DataFrame({"a": [1, 1, 1, 2, 2, 2, 3, 4, 5], "b": cats})
result = data.groupby("b", observed=False).mean()
result = result["a"].values
exp = np.array([1, 2, 4, np.nan])
tm.assert_numpy_array_equal(result, exp)
def test_groupby_empty_with_category():
# GH-9614
# test fix for when group by on None resulted in
# coercion of dtype categorical -> float
df = DataFrame({"A": [None] * 3, "B": Categorical(["train", "train", "test"])})
result = df.groupby("A").first()["B"]
expected = Series(
Categorical([], categories=["test", "train"]),
index=Series([], dtype="object", name="A"),
name="B",
)
tm.assert_series_equal(result, expected)
def test_sort():
# https://stackoverflow.com/questions/23814368/sorting-pandas-
# categorical-labels-after-groupby
# This should result in a properly sorted Series so that the plot
# has a sorted x axis
# self.cat.groupby(['value_group'])['value_group'].count().plot(kind='bar')
df = DataFrame({"value": np.random.randint(0, 10000, 100)})
labels = [f"{i} - {i+499}" for i in range(0, 10000, 500)]
cat_labels = Categorical(labels, labels)
df = df.sort_values(by=["value"], ascending=True)
df["value_group"] = pd.cut(
df.value, range(0, 10500, 500), right=False, labels=cat_labels
)
res = df.groupby(["value_group"], observed=False)["value_group"].count()
exp = res[sorted(res.index, key=lambda x: float(x.split()[0]))]
exp.index = CategoricalIndex(exp.index, name=exp.index.name)
tm.assert_series_equal(res, exp)
def test_sort2():
# dataframe groupby sort was being ignored # GH 8868
df = DataFrame(
[
["(7.5, 10]", 10, 10],
["(7.5, 10]", 8, 20],
["(2.5, 5]", 5, 30],
["(5, 7.5]", 6, 40],
["(2.5, 5]", 4, 50],
["(0, 2.5]", 1, 60],
["(5, 7.5]", 7, 70],
],
columns=["range", "foo", "bar"],
)
df["range"] = Categorical(df["range"], ordered=True)
index = CategoricalIndex(
["(0, 2.5]", "(2.5, 5]", "(5, 7.5]", "(7.5, 10]"], name="range", ordered=True
)
expected_sort = DataFrame(
[[1, 60], [5, 30], [6, 40], [10, 10]], columns=["foo", "bar"], index=index
)
col = "range"
result_sort = df.groupby(col, sort=True, observed=False).first()
tm.assert_frame_equal(result_sort, expected_sort)
# when categories is ordered, group is ordered by category's order
expected_sort = result_sort
result_sort = df.groupby(col, sort=False, observed=False).first()
tm.assert_frame_equal(result_sort, expected_sort)
df["range"] = Categorical(df["range"], ordered=False)
index = CategoricalIndex(
["(0, 2.5]", "(2.5, 5]", "(5, 7.5]", "(7.5, 10]"], name="range"
)
expected_sort = DataFrame(
[[1, 60], [5, 30], [6, 40], [10, 10]], columns=["foo", "bar"], index=index
)
index = CategoricalIndex(
["(7.5, 10]", "(2.5, 5]", "(5, 7.5]", "(0, 2.5]"],
categories=["(7.5, 10]", "(2.5, 5]", "(5, 7.5]", "(0, 2.5]"],
name="range",
)
expected_nosort = DataFrame(
[[10, 10], [5, 30], [6, 40], [1, 60]], index=index, columns=["foo", "bar"]
)
col = "range"
# this is an unordered categorical, but we allow this ####
result_sort = df.groupby(col, sort=True, observed=False).first()
tm.assert_frame_equal(result_sort, expected_sort)
result_nosort = df.groupby(col, sort=False, observed=False).first()
tm.assert_frame_equal(result_nosort, expected_nosort)
def test_sort_datetimelike():
# GH10505
# use same data as test_groupby_sort_categorical, which category is
# corresponding to datetime.month
df = DataFrame(
{
"dt": [
datetime(2011, 7, 1),
datetime(2011, 7, 1),
datetime(2011, 2, 1),
datetime(2011, 5, 1),
datetime(2011, 2, 1),
datetime(2011, 1, 1),
datetime(2011, 5, 1),
],
"foo": [10, 8, 5, 6, 4, 1, 7],
"bar": [10, 20, 30, 40, 50, 60, 70],
},
columns=["dt", "foo", "bar"],
)
# ordered=True
df["dt"] = Categorical(df["dt"], ordered=True)
index = [
datetime(2011, 1, 1),
datetime(2011, 2, 1),
datetime(2011, 5, 1),
datetime(2011, 7, 1),
]
result_sort = DataFrame(
[[1, 60], [5, 30], [6, 40], [10, 10]], columns=["foo", "bar"]
)
result_sort.index = CategoricalIndex(index, name="dt", ordered=True)
index = [
datetime(2011, 7, 1),
datetime(2011, 2, 1),
datetime(2011, 5, 1),
datetime(2011, 1, 1),
]
result_nosort = DataFrame(
[[10, 10], [5, 30], [6, 40], [1, 60]], columns=["foo", "bar"]
)
result_nosort.index = CategoricalIndex(
index, categories=index, name="dt", ordered=True
)
col = "dt"
tm.assert_frame_equal(
result_sort, df.groupby(col, sort=True, observed=False).first()
)
# when categories is ordered, group is ordered by category's order
tm.assert_frame_equal(
result_sort, df.groupby(col, sort=False, observed=False).first()
)
# ordered = False
df["dt"] = Categorical(df["dt"], ordered=False)
index = [
datetime(2011, 1, 1),
datetime(2011, 2, 1),
datetime(2011, 5, 1),
datetime(2011, 7, 1),
]
result_sort = DataFrame(
[[1, 60], [5, 30], [6, 40], [10, 10]], columns=["foo", "bar"]
)
result_sort.index = CategoricalIndex(index, name="dt")
index = [
datetime(2011, 7, 1),
datetime(2011, 2, 1),
datetime(2011, 5, 1),
datetime(2011, 1, 1),
]
result_nosort = DataFrame(
[[10, 10], [5, 30], [6, 40], [1, 60]], columns=["foo", "bar"]
)
result_nosort.index = CategoricalIndex(index, categories=index, name="dt")
col = "dt"
tm.assert_frame_equal(
result_sort, df.groupby(col, sort=True, observed=False).first()
)
tm.assert_frame_equal(
result_nosort, df.groupby(col, sort=False, observed=False).first()
)
def test_empty_sum():
# https://github.com/pandas-dev/pandas/issues/18678
df = DataFrame(
{"A": Categorical(["a", "a", "b"], categories=["a", "b", "c"]), "B": [1, 2, 1]}
)
expected_idx = CategoricalIndex(["a", "b", "c"], name="A")
# 0 by default
result = df.groupby("A", observed=False).B.sum()
expected = Series([3, 1, 0], expected_idx, name="B")
tm.assert_series_equal(result, expected)
# min_count=0
result = df.groupby("A", observed=False).B.sum(min_count=0)
expected = Series([3, 1, 0], expected_idx, name="B")
tm.assert_series_equal(result, expected)
# min_count=1
result = df.groupby("A", observed=False).B.sum(min_count=1)
expected = Series([3, 1, np.nan], expected_idx, name="B")
tm.assert_series_equal(result, expected)
# min_count>1
result = df.groupby("A", observed=False).B.sum(min_count=2)
expected = Series([3, np.nan, np.nan], expected_idx, name="B")
tm.assert_series_equal(result, expected)
def test_empty_prod():
# https://github.com/pandas-dev/pandas/issues/18678
df = DataFrame(
{"A": Categorical(["a", "a", "b"], categories=["a", "b", "c"]), "B": [1, 2, 1]}
)
expected_idx = CategoricalIndex(["a", "b", "c"], name="A")
# 1 by default
result = df.groupby("A", observed=False).B.prod()
expected = Series([2, 1, 1], expected_idx, name="B")
tm.assert_series_equal(result, expected)
# min_count=0
result = df.groupby("A", observed=False).B.prod(min_count=0)
expected = Series([2, 1, 1], expected_idx, name="B")
tm.assert_series_equal(result, expected)
# min_count=1
result = df.groupby("A", observed=False).B.prod(min_count=1)
expected = Series([2, 1, np.nan], expected_idx, name="B")
tm.assert_series_equal(result, expected)
def test_groupby_multiindex_categorical_datetime():
# https://github.com/pandas-dev/pandas/issues/21390
df = DataFrame(
{
"key1": Categorical(list("<KEY>")),
"key2": Categorical(
list(pd.date_range("2018-06-01 00", freq="1T", periods=3)) * 3
),
"values": np.arange(9),
}
)
result = df.groupby(["key1", "key2"]).mean()
idx = MultiIndex.from_product(
[
Categorical(["a", "b", "c"]),
Categorical(pd.date_range("2018-06-01 00", freq="1T", periods=3)),
],
names=["key1", "key2"],
)
expected = DataFrame({"values": [0, 4, 8, 3, 4, 5, 6, np.nan, 2]}, index=idx)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"as_index, expected",
[
(
True,
Series(
index=MultiIndex.from_arrays(
[Series([1, 1, 2], dtype="category"), [1, 2, 2]], names=["a", "b"]
),
data=[1, 2, 3],
name="x",
),
),
(
False,
DataFrame(
{
"a": Series([1, 1, 2], dtype="category"),
"b": [1, 2, 2],
"x": [1, 2, 3],
}
),
),
],
)
def test_groupby_agg_observed_true_single_column(as_index, expected):
# GH-23970
df = DataFrame(
{"a": Series([1, 1, 2], dtype="category"), "b": [1, 2, 2], "x": [1, 2, 3]}
)
result = df.groupby(["a", "b"], as_index=as_index, observed=True)["x"].sum()
tm.assert_equal(result, expected)
@pytest.mark.parametrize("fill_value", [None, np.nan, pd.NaT])
def test_shift(fill_value):
ct = Categorical(
["a", "b", "c", "d"], categories=["a", "b", "c", "d"], ordered=False
)
expected = Categorical(
[None, "a", "b", "c"], categories=["a", "b", "c", "d"], ordered=False
)
res = ct.shift(1, fill_value=fill_value)
tm.assert_equal(res, expected)
@pytest.fixture
def df_cat(df):
"""
DataFrame with multiple categorical columns and a column of integers.
Shortened so as not to contain all possible combinations of categories.
Useful for testing `observed` kwarg functionality on GroupBy objects.
Parameters
----------
df: DataFrame
Non-categorical, longer DataFrame from another fixture, used to derive
this one
Returns
-------
df_cat: DataFrame
"""
df_cat = df.copy()[:4] # leave out some groups
df_cat["A"] = df_cat["A"].astype("category")
df_cat["B"] = df_cat["B"].astype("category")
df_cat["C"] = Series([1, 2, 3, 4])
df_cat = df_cat.drop(["D"], axis=1)
return df_cat
@pytest.mark.parametrize(
"operation, kwargs", [("agg", {"dtype": "category"}), ("apply", {})]
)
def test_seriesgroupby_observed_true(df_cat, operation, kwargs):
# GH 24880
index = MultiIndex.from_frame(
DataFrame(
{"A": ["foo", "foo", "bar", "bar"], "B": ["one", "two", "one", "three"]},
**kwargs,
)
)
expected = Series(data=[1, 3, 2, 4], index=index, name="C")
grouped = df_cat.groupby(["A", "B"], observed=True)["C"]
result = getattr(grouped, operation)(sum)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("operation", ["agg", "apply"])
@pytest.mark.parametrize("observed", [False, None])
def test_seriesgroupby_observed_false_or_none(df_cat, observed, operation):
# GH 24880
index, _ = MultiIndex.from_product(
[
CategoricalIndex(["bar", "foo"], ordered=False),
CategoricalIndex(["one", "three", "two"], ordered=False),
],
names=["A", "B"],
).sortlevel()
expected = Series(data=[2, 4, np.nan, 1, np.nan, 3], index=index, name="C")
if operation == "agg":
expected = expected.fillna(0, downcast="infer")
grouped = df_cat.groupby(["A", "B"], observed=observed)["C"]
result = getattr(grouped, operation)(sum)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"observed, index, data",
[
(
True,
MultiIndex.from_tuples(
[
("foo", "one", "min"),
("foo", "one", "max"),
("foo", "two", "min"),
("foo", "two", "max"),
("bar", "one", "min"),
("bar", "one", "max"),
("bar", "three", "min"),
("bar", "three", "max"),
],
names=["A", "B", None],
),
[1, 1, 3, 3, 2, 2, 4, 4],
),
(
False,
MultiIndex.from_product(
[
CategoricalIndex(["bar", "foo"], ordered=False),
CategoricalIndex(["one", "three", "two"], ordered=False),
Index(["min", "max"]),
],
names=["A", "B", None],
),
[2, 2, 4, 4, np.nan, np.nan, 1, 1, np.nan, np.nan, 3, 3],
),
(
None,
MultiIndex.from_product(
[
CategoricalIndex(["bar", "foo"], ordered=False),
CategoricalIndex(["one", "three", "two"], ordered=False),
Index(["min", "max"]),
],
names=["A", "B", None],
),
[2, 2, 4, 4, np.nan, np.nan, 1, 1, np.nan, np.nan, 3, 3],
),
],
)
def test_seriesgroupby_observed_apply_dict(df_cat, observed, index, data):
# GH 24880
expected = Series(data=data, index=index, name="C")
result = df_cat.groupby(["A", "B"], observed=observed)["C"].apply(
lambda x: {"min": x.min(), "max": x.max()}
)
tm.assert_series_equal(result, expected)
def test_groupby_categorical_series_dataframe_consistent(df_cat):
# GH 20416
expected = df_cat.groupby(["A", "B"])["C"].mean()
result = df_cat.groupby(["A", "B"]).mean()["C"]
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("code", [([1, 0, 0]), ([0, 0, 0])])
def test_groupby_categorical_axis_1(code):
# GH 13420
df = DataFrame({"a": [1, 2, 3, 4], "b": [-1, -2, -3, -4], "c": [5, 6, 7, 8]})
cat = Categorical.from_codes(code, categories=list("abc"))
result = df.groupby(cat, axis=1).mean()
expected = df.T.groupby(cat, axis=0).mean().T
tm.assert_frame_equal(result, expected)
def test_groupby_cat_preserves_structure(observed, ordered):
# GH 28787
df = DataFrame(
{"Name": Categorical(["Bob", "Greg"], ordered=ordered), "Item": [1, 2]},
columns=["Name", "Item"],
)
expected = df.copy()
result = (
df.groupby("Name", observed=observed)
.agg(DataFrame.sum, skipna=True)
.reset_index()
)
tm.assert_frame_equal(result, expected)
def test_get_nonexistent_category():
# Accessing a Category that is not in the dataframe
df = DataFrame({"var": ["a", "a", "b", "b"], "val": range(4)})
with pytest.raises(KeyError, match="'vau'"):
df.groupby("var").apply(
lambda rows: DataFrame(
{"var": [rows.iloc[-1]["var"]], "val": [rows.iloc[-1]["vau"]]}
)
)
def test_series_groupby_on_2_categoricals_unobserved(reduction_func, observed, request):
# GH 17605
if reduction_func == "ngroup":
pytest.skip("ngroup is not truly a reduction")
if reduction_func == "corrwith": # GH 32293
mark = pytest.mark.xfail(
reason="TODO: implemented SeriesGroupBy.corrwith. See GH 32293"
)
request.node.add_marker(mark)
df = DataFrame(
{
"cat_1": Categorical(list("AABB"), categories=list("ABCD")),
"cat_2": Categorical(list("AB") * 2, categories=list("ABCD")),
"value": [0.1] * 4,
}
)
args = {"nth": [0]}.get(reduction_func, [])
expected_length = 4 if observed else 16
series_groupby = df.groupby(["cat_1", "cat_2"], observed=observed)["value"]
agg = getattr(series_groupby, reduction_func)
result = agg(*args)
assert len(result) == expected_length
def test_series_groupby_on_2_categoricals_unobserved_zeroes_or_nans(
reduction_func, request
):
# GH 17605
# Tests whether the unobserved categories in the result contain 0 or NaN
if reduction_func == "ngroup":
pytest.skip("ngroup is not truly a reduction")
if reduction_func == "corrwith": # GH 32293
mark = pytest.mark.xfail(
reason="TODO: implemented SeriesGroupBy.corrwith. See GH 32293"
)
request.node.add_marker(mark)
df = DataFrame(
{
"cat_1": Categorical(list("AABB"), categories=list("ABC")),
"cat_2": Categorical(list("AB") * 2, categories=list("ABC")),
"value": [0.1] * 4,
}
)
unobserved = [tuple("AC"), tuple("BC"), tuple("CA"), tuple("CB"), tuple("CC")]
args = {"nth": [0]}.get(reduction_func, [])
series_groupby = df.groupby(["cat_1", "cat_2"], observed=False)["value"]
agg = getattr(series_groupby, reduction_func)
result = agg(*args)
zero_or_nan = _results_for_groupbys_with_missing_categories[reduction_func]
for idx in unobserved:
val = result.loc[idx]
assert (pd.isna(zero_or_nan) and pd.isna(val)) or (val == zero_or_nan)
# If we expect unobserved values to be zero, we also expect the dtype to be int.
# Except for .sum(). If the observed categories sum to dtype=float (i.e. their
# sums have decimals), then the zeros for the missing categories should also be
# floats.
if zero_or_nan == 0 and reduction_func != "sum":
assert np.issubdtype(result.dtype, np.integer)
def test_dataframe_groupby_on_2_categoricals_when_observed_is_true(reduction_func):
# GH 23865
# GH 27075
# Ensure that df.groupby, when 'by' is two Categorical variables,
# does not return the categories that are not in df when observed=True
if reduction_func == "ngroup":
pytest.skip("ngroup does not return the Categories on the index")
df = DataFrame(
{
"cat_1": Categorical(list("AABB"), categories=list("ABC")),
"cat_2": Categorical(list("1111"), categories=list("12")),
"value": [0.1, 0.1, 0.1, 0.1],
}
)
unobserved_cats = [("A", "2"), ("B", "2"), ("C", "1"), ("C", "2")]
df_grp = df.groupby(["cat_1", "cat_2"], observed=True)
args = {"nth": [0], "corrwith": [df]}.get(reduction_func, [])
res = getattr(df_grp, reduction_func)(*args)
for cat in unobserved_cats:
assert cat not in res.index
@pytest.mark.parametrize("observed", [False, None])
def test_dataframe_groupby_on_2_categoricals_when_observed_is_false(
reduction_func, observed, request
):
# GH 23865
# GH 27075
# Ensure that df.groupby, when 'by' is two Categorical variables,
# returns the categories that are not in df when observed=False/None
if reduction_func == "ngroup":
pytest.skip("ngroup does not return the Categories on the index")
df = DataFrame(
{
"cat_1": Categorical(list("AABB"), categories=list("ABC")),
"cat_2": Categorical(list("1111"), categories=list("12")),
"value": [0.1, 0.1, 0.1, 0.1],
}
)
unobserved_cats = [("A", "2"), ("B", "2"), ("C", "1"), ("C", "2")]
df_grp = df.groupby(["cat_1", "cat_2"], observed=observed)
args = {"nth": [0], "corrwith": [df]}.get(reduction_func, [])
res = getattr(df_grp, reduction_func)(*args)
expected = _results_for_groupbys_with_missing_categories[reduction_func]
if expected is np.nan:
assert res.loc[unobserved_cats].isnull().all().all()
else:
assert (res.loc[unobserved_cats] == expected).all().all()
def test_series_groupby_categorical_aggregation_getitem():
# GH 8870
d = {"foo": [10, 8, 4, 1], "bar": [10, 20, 30, 40], "baz": ["d", "c", "d", "c"]}
df = DataFrame(d)
cat = pd.cut(df["foo"], np.linspace(0, 20, 5))
df["range"] = cat
groups = df.groupby(["range", "baz"], as_index=True, sort=True)
result = groups["foo"].agg("mean")
expected = groups.agg("mean")["foo"]
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"func, expected_values",
[(Series.nunique, [1, 1, 2]), (Series.count, [1, 2, 2])],
)
def test_groupby_agg_categorical_columns(func, expected_values):
# 31256
df = DataFrame(
{
"id": [0, 1, 2, 3, 4],
"groups": [0, 1, 1, 2, 2],
"value": Categorical([0, 0, 0, 0, 1]),
}
).set_index("id")
result = df.groupby("groups").agg(func)
expected = DataFrame(
{"value": expected_values}, index=Index([0, 1, 2], name="groups")
)
tm.assert_frame_equal(result, expected)
def test_groupby_agg_non_numeric():
df = DataFrame({"A": Categorical(["a", "a", "b"], categories=["a", "b", "c"])})
expected = DataFrame({"A": [2, 1]}, index=[1, 2])
result = df.groupby([1, 2, 1]).agg(Series.nunique)
tm.assert_frame_equal(result, expected)
result = df.groupby([1, 2, 1]).nunique()
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("func", ["first", "last"])
def test_groupy_first_returned_categorical_instead_of_dataframe(func):
# GH 28641: groupby drops index, when grouping over categorical column with
# first/last. Renamed Categorical instead of DataFrame previously.
df = DataFrame({"A": [1997], "B": Series(["b"], dtype="category").cat.as_ordered()})
df_grouped = df.groupby("A")["B"]
result = getattr(df_grouped, func)()
expected = Series(["b"], index=Index([1997], name="A"), name="B")
tm.assert_series_equal(result, expected)
def test_read_only_category_no_sort():
# GH33410
cats = np.array([1, 2])
cats.flags.writeable = False
df = DataFrame(
{"a": [1, 3, 5, 7], "b": Categorical([1, 1, 2, 2], categories=Index(cats))}
)
expected = DataFrame(data={"a": [2, 6]}, index=CategoricalIndex([1, 2], name="b"))
result = df.groupby("b", sort=False).mean()
tm.assert_frame_equal(result, expected)
def test_sorted_missing_category_values():
# GH 28597
df = DataFrame(
{
"foo": [
"small",
"large",
"large",
"large",
"medium",
"large",
"large",
"medium",
],
"bar": ["C", "A", "A", "C", "A", "C", "A", "C"],
}
)
df["foo"] = (
df["foo"]
.astype("category")
.cat.set_categories(["tiny", "small", "medium", "large"], ordered=True)
)
expected = DataFrame(
{
"tiny": {"A": 0, "C": 0},
"small": {"A": 0, "C": 1},
"medium": {"A": 1, "C": 1},
"large": {"A": 3, "C": 2},
}
)
expected = expected.rename_axis("bar", axis="index")
expected.columns = CategoricalIndex(
["tiny", "small", "medium", "large"],
categories=["tiny", "small", "medium", "large"],
ordered=True,
name="foo",
dtype="category",
)
result = df.groupby(["bar", "foo"]).size().unstack()
tm.assert_frame_equal(result, expected)
def test_agg_cython_category_not_implemented_fallback():
# https://github.com/pandas-dev/pandas/issues/31450
df = DataFrame({"col_num": [1, 1, 2, 3]})
df["col_cat"] = df["col_num"].astype("category")
result = df.groupby("col_num").col_cat.first()
expected = Series([1, 2, 3], index=Index([1, 2, 3], name="col_num"), name="col_cat")
tm.assert_series_equal(result, expected)
result = df.groupby("col_num").agg({"col_cat": "first"})
expected = expected.to_frame()
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("func", ["min", "max"])
def test_aggregate_categorical_lost_index(func: str):
# GH: 28641 groupby drops index, when grouping over categorical column with min/max
ds = Series(["b"], dtype="category").cat.as_ordered()
df = DataFrame({"A": [1997], "B": ds})
result = df.groupby("A").agg({"B": func})
expected = DataFrame({"B": ["b"]}, index=Index([1997], name="A"))
tm.assert_frame_equal(result, expected)
def test_aggregate_categorical_with_isnan():
# GH 29837
df = DataFrame(
{
"A": [1, 1, 1, 1],
"B": [1, 2, 1, 2],
"numerical_col": [0.1, 0.2, np.nan, 0.3],
"object_col": ["foo", "bar", "foo", "fee"],
"categorical_col": ["foo", "bar", "foo", "fee"],
}
)
df = df.astype({"categorical_col": "category"})
result = df.groupby(["A", "B"]).agg(lambda df: df.isna().sum())
index = pd.MultiIndex.from_arrays([[1, 1], [1, 2]], names=("A", "B"))
expected = DataFrame(
data={
"numerical_col": [1.0, 0.0],
"object_col": [0, 0],
"categorical_col": [0, 0],
},
index=index,
)
tm.assert_frame_equal(result, expected)
def test_categorical_transform():
# GH 29037
df = DataFrame(
{
"package_id": [1, 1, 1, 2, 2, 3],
"status": [
"Waiting",
"OnTheWay",
"Delivered",
"Waiting",
"OnTheWay",
"Waiting",
],
}
)
delivery_status_type = pd.CategoricalDtype(
categories=["Waiting", "OnTheWay", "Delivered"], ordered=True
)
df["status"] = df["status"].astype(delivery_status_type)
df["last_status"] = df.groupby("package_id")["status"].transform(max)
result = df.copy()
expected = DataFrame(
{
"package_id": [1, 1, 1, 2, 2, 3],
"status": [
"Waiting",
"OnTheWay",
"Delivered",
"Waiting",
"OnTheWay",
"Waiting",
],
"last_status": [
"Delivered",
"Delivered",
"Delivered",
"OnTheWay",
"OnTheWay",
"Waiting",
],
}
)
expected["status"] = expected["status"].astype(delivery_status_type)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("func", ["first", "last"])
def test_series_groupby_first_on_categorical_col_grouped_on_2_categoricals(
func: str, observed: bool
):
# GH 34951
cat = Categorical([0, 0, 1, 1])
val = [0, 1, 1, 0]
df = DataFrame({"a": cat, "b": cat, "c": val})
idx = Categorical([0, 1])
idx = pd.MultiIndex.from_product([idx, idx], names=["a", "b"])
expected_dict = {
"first": Series([0, np.NaN, np.NaN, 1], idx, name="c"),
"last": Series([1, np.NaN, np.NaN, 0], idx, name="c"),
}
expected = expected_dict[func]
if observed:
expected = expected.dropna().astype(np.int64)
srs_grp = df.groupby(["a", "b"], observed=observed)["c"]
result = getattr(srs_grp, func)()
| tm.assert_series_equal(result, expected) | pandas._testing.assert_series_equal |
import requests
from bs4 import BeautifulSoup as bs
from selenium import webdriver
from fake_useragent import UserAgent
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
import pandas as pd
import numpy as np
import re
import os
import pickle as pk
from collections import deque
import string
import time
import psycopg2 as pg
from pymongo import MongoClient
import leafly.data_preprocess as dp
import leafly.scrape_leafly as sl
ua = UserAgent()
MAIN_URL = 'http://analytical360.com/testresults'
def setup_driver():
dcap = dict(DesiredCapabilities.PHANTOMJS)
dcap["phantomjs.page.settings.userAgent"] = (ua.random)
driver = webdriver.PhantomJS(desired_capabilities=dcap)
driver.set_window_size(1920, 1080)
return driver
def get_headers_cookies(driver):
agent = ua.random # select a random user agent
headers = {
"Connection": "close", # another way to cover tracks
"User-Agent": agent
}
cookies = driver.get_cookies()
cooks = {}
for c in cookies:
cooks[c['name']] = c['value'] # map it to be usable for requests
return headers, cooks
def check_rows(res):
'''
args: takes a requests object
returns: all table rows in doc
checks to make sure the request was successful and the data we want is
there in the format we expect. Otherwise throws an error
'''
soup = bs(res.content, 'lxml')
rows = soup.findAll('tr', {'title': 'Click To View Detail Test Results'})
if len(rows) == 0 or not res.ok:
raise Exception('response returned:', res)
return rows
def get_links(rows):
'''
take requests response object
first checks to make sure there are any links in the row, if not, spits an
error
returns a list of unique links from all the rows
'''
links = set()
for i, r in enumerate(rows):
row_links = r.findAll('a')
if len(row_links) == 0:
raise Exception('no links in row', i)
links = set(row_links) | links
links = list(set([l.get('href') for l in links]))
return links
def get_flower_links(rows):
links = get_links(rows)
flower_links = [l for l in links if re.search('.*flowers.*', l)]
flower_rows = [r for r in rows if re.search('.*flowers.*', r.findAll('a')[0].get('href'))]
return flower_links, flower_rows
def check_groups(links):
'''
args: takes in list of links from analytical360
returns: unique product groups in links (i.e. edibles, flowers, etc)
checks to make sure number of groups in still 6
{'concentrates', 'edibles', 'flowers', 'liquids', 'listing', 'topicals'}
'''
groups = [l.split('/')[-2] for l in links]
groups = list(set(groups))
if len(groups) != 6:
raise Exception('number of product groups has changed!')
return groups
def make_links_dataframe(links):
'''
args: list of links
returns: dataframe of links with product group, link
'''
df = pd.DataFrame({'link': links, 'product': [
l.split('/')[-2] for l in links]})
return df
def extract_info(link):
'''
args: link to product page
returns: dict or df of properties of product
'''
res = requests.get(link)
s = bs(res.content)
h3s = s.findAll('h3')
if len(h3s) < 2:
raise Exception('can\'t find title in:', link)
name = h3s[1]
def get_links_selenium(driver):
# right now only gets one link
return driver.find_element_by_xpath('//*[@id="flowers"]/tbody/tr[1049]/td[1]/a')
def download_image(src, filename, headers, cooks):
r = requests.get(src, headers=headers, cookies=cooks)
with open(filename, 'wb') as f:
for chunk in r:
f.write(chunk)
def downloaded_strain_images():
pass
def get_flower_df(rows):
flower_links, flower_rows = get_flower_links(rows)
# get strain name and if there are cannabinoid percentages
flow_links = []
flow_names = []
flow_thc = []
flow_cbd = []
flow_active = []
nothc = []
nothcStrs = []
for r in flower_rows:
links = r.findAll('a')
strain = links[0].text
thc = links[1].text
cbd = links[2].text
activated = links[3].text
if thc == 'N/A' or re.search('.*\%.*', thc) is None:
nothc.append(r)
continue
flow_links.append(links[0].get('href'))
flow_names.append(strain)
flow_thc.append(thc)
flow_cbd.append(cbd)
flow_active.append(activated)
flow_df = pd.DataFrame({'name':flow_names, 'link':flow_links, 'thc':flow_thc, 'cbd':flow_cbd, 'activated':flow_active})
flow_df = flow_df.drop_duplicates()
return flow_df
def scrape_site(df, base_im_path='analytical360/new_images/', delay=None, sql=None, mongo=None):
'''
goes through analytical360 site and scrapes images and data
sql can be set to the name of a sql database to save info to, so the process can be stopped part way through
'''
if sql is not None:
# didn't get this working...couldn't figure out lists in sql
dbname=sql
conn = psycopg2.connect("dbname=" + sql + " host='localhost'")
if mongo is not None:
client = MongoClient()
db = client[mongo[0]]
coll = db[mongo[1]]
driver = setup_driver()
driver.get(MAIN_URL)
headers, cooks = get_headers_cookies(driver)
if not os.path.exists(base_im_path):
os.mkdir(base_im_path)
# pages that aren't really flowers, but concentrates
# others that have broken images links
black_list = set(['http://analytical360.com/m/flowers/604216', 'http://analytical360.com/m/flowers/550371'])
name_black_list = set(['Raw Pulp CJ',
'Batch 35 Spent Trim',
'B21 Spent Trim (CBD)',
'B21 CBD',
'B22 Spent Trim (THC)',
'ACDC x Bubster #14 Male',
'ACDC x Bubster #47 Male',
'Blue Dog #19 Male',
'Blue Dog #31 Male',
'Canna-Tsu #16 Male',
'Canna-Tsu #19 Male',
'Foo Dog #3 Male',
'Foo Dog #11 Male',
'Foo Dog #12 Male',
'Harle-Tsu #2 Male',
'Harle-Tsu #7 Male',
'Miami Blues #24',
'Swiss Gold #6 Male',
'Swiss Gold #18 Male',
'Swiss Gold #26 Male',
'Under Foo #8 Male',
'Under Foo #11 Male',
'Under Foo #27 Male',
'Under Foo #35 Male',
'Harle-Tsu #7Male'])
# broke here first time thru
# startrow = flow_df[flow_df['name'] == 'Mango Haze'].index[0]
# df_remain = flow_df.iloc[startrow:, :]
cannabinoids = []
terpenes = []
im_sources = []
no_imgs = []
names = []
clean_names = []
for i, r in df.iterrows():
if delay is not None:
time.sleep(delay)
link = r['link']
id = link.split('/')[-1]
if link in black_list or r['name'] in name_black_list or re.search('.*male.*', r['name'], re.IGNORECASE) is not None or re.search('.*raw\s*pulp.*', r['name'], re.IGNORECASE) is not None or re.search('.*spent\s+trim.*', r['name'], re.IGNORECASE) is not None:
continue
clean_name = re.sub('/', '-', r['name'])
clean_name = re.sub('[ + ' + string.punctuation + '\s]+', '', clean_name).lower()
clean_names.append(clean_name)
save_path = base_im_path + clean_name + id + '.jpg'
if mongo is not None and coll.find({'link':link}).count() != 0:
print('already processed', r['name'])
continue
print(r['name'])
names.append(r['name'])
driver.get(link)
print(link)
try:
img = driver.find_element_by_xpath('//*[@id="mainwrapper"]/div[4]/div[1]/div[5]/div/div[1]/img[1]')
src = img.get_attribute('src')
im_sources.append(src)
print(src)
if os.path.exists(save_path):
print(r['name'], 'already saved image')
else:
print(save_path)
if not isedible:
try:
download_image(src, save_path, headers, cooks)
except:
no_imgs.append(r)
im_sources.pop()
src = ''
except:
no_imgs.append(r)
src = ''
try:
table1 = driver.find_element_by_xpath('//*[@id="mainwrapper"]/div[4]/div[1]/div[7]/div/div[1]/ul')
except:
cannabinoids.append([])
terpenes.append([])
continue
table1soup = bs(table1.get_attribute('innerHTML'), 'lxml')
table1rows = [l.get_text() for l in table1soup.findAll('li')]
isedible = False
if re.search('serving\s*size', table1rows[0], re.IGNORECASE) is not None:
isedible = True
cannabinoids.append(table1rows)
try:
table2 = driver.find_element_by_xpath('//*[@id="mainwrapper"]/div[4]/div[1]/div[8]/div/div/ul')
except:
try:
table2 = driver.find_element_by_xpath('//*[@id="mainwrapper"]/div[4]/div[1]/div[9]/div/div/ul')
except:
terpenes.append([])
continue
table2soup = bs(table2.get_attribute('innerHTML'), 'lxml')
table2rows = [l.get_text() for l in table2soup.findAll('li')]
terpenes.append(table2rows)
coll.insert_one(
{'cannabinoids': table1rows,
'terpenes': table2rows,
'clean_name': clean_name,
'link':link,
'im_source':src,
'isedible':isedible,
'save_path':save_path,
'name': r['name']})
client.close()
return cannabinoids, terpenes, im_sources, no_imgs, names, clean_names
def save_raw_scrape(cannabinoids, terpenes, no_imgs, im_sources, names, clean_names, prefix=None):
if prefix is None:
pk.dump(cannabinoids, open('analytical360/cannabinoids.pk', 'w'), 2)
pk.dump(terpenes, open('analytical360/terpenes.pk', 'w'), 2)
pk.dump(no_imgs, open('analytical360/no_imgs.pk', 'w'), 2)
pk.dump(im_sources, open('analytical360/im_sources.pk', 'w'), 2)
pk.dump(names, open('analytical360/names.pk', 'w'), 2)
pk.dump(clean_names, open('analytical360/clean_names.pk', 'w'), 2)
else:
pk.dump(cannabinoids, open('analytical360/' + prefix + 'cannabinoids.pk', 'w'), 2)
pk.dump(terpenes, open('analytical360/' + prefix + 'terpenes.pk', 'w'), 2)
pk.dump(no_imgs, open('analytical360/' + prefix + 'no_imgs.pk', 'w'), 2)
pk.dump(im_sources, open('analytical360/' + prefix + 'im_sources.pk', 'w'), 2)
pk.dump(names, open('analytical360/' + prefix + 'names.pk', 'w'), 2)
pk.dump(clean_names, open('analytical360/' + prefix + 'clean_names.pk', 'w'), 2)
def load_raw_scrape(prefix=None):
if prefix is None:
cannabinoids = pk.load(open('analytical360/cannabinoids.pk'))
terpenes = pk.load(open('analytical360/terpenes.pk'))
no_imgs = pk.load(open('analytical360/no_imgs.pk'))
im_sources = pk.load(open('analytical360/im_sources.pk'))
names = pk.load(open('analytical360/names.pk'))
clean_names = pk.load(open('analytical360/clean_names.pk'))
else:
cannabinoids = pk.load(open('analytical360/' + prefix + 'cannabinoids.pk'))
terpenes = pk.load(open('analytical360/' + prefix + 'terpenes.pk'))
no_imgs = pk.load(open('analytical360/' + prefix + 'no_imgs.pk'))
im_sources = pk.load(open('analytical360/' + prefix + 'im_sources.pk'))
names = pk.load(open('analytical360/' + prefix + 'names.pk'))
clean_names = pk.load(open('analytical360/' + prefix + 'clean_names.pk'))
return cannabinoids, terpenes, no_imgs, im_sources, names, clean_names
def parse_raw_scrape(cannabinoids, terpenes, names):
'''
parses raw scrape data for cannabinoids and terpenes. Returns dataframe
with
'''
trail = deque([0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1])
cannabinoid_strs = deque(['thc-a', 'thc', 'cbn', 'thc total', 'thc-total', 'cbd-a', 'cbd', 'cbd-total', 'cbd total', 'cbg', 'cbc', 'activated total', 'activated', 'active'])
c_dict_keys = ['thca', 'thc', 'cbn', 'thc_total', 'cbda', 'cbd', 'cbd_total', 'cbg', 'cbc', 'activated_total']
conversion_dict = {'thc-a':'thca',
'thc total':'thc_total',
'thc-total':'thc_total',
'cbd-a':'cbda',
'cbd-total':'cbd_total',
'cbd total':'cbd_total',
'activated total':'activated_total',
'activated':'activated_total',
'active':'activated_total'} # converts similar strings to the dict key forf cannabiniod dict
cannabinoid_dict = {}
screen_tups = list(zip(list(range(len(trail))), trail, cannabinoid_strs))
for i, cann in enumerate(cannabinoids):
print(i)
temp_cann = c_dict_keys[:]
#cannabinoid_dict.setdefault('name', []).append(names[i])
for ca in cann:
for j, t, c in screen_tups:
has_str, num = find_string(ca, c, t)
if has_str:
# idx = list(cannabinoid_strs).index(c)
# cannabinoid_strs.rotate(-idx) # move that entry to the beginning of the list
# trail.rotate(-idx)
# screen_tups = zip(range(len(trail)), trail, cannabinoid_strs)
print('found', c, ca)
if c in conversion_dict:
cannabinoid_dict.setdefault(conversion_dict[c], []).append(num)
temp_cann.remove(conversion_dict[c])
else:
cannabinoid_dict.setdefault(c, []).append(num)
temp_cann.remove(c)
break
if len(temp_cann) > 0:
print('didn\'t scrape:', temp_cann)
for t in temp_cann:
cannabinoid_dict.setdefault(t, []).append('')
terp_strs = deque(['beta-Pinene',
'Humulene',
'Limonene',
'alpha-Pinene',
'Caryophyllene',
'Beta Pinene',
'Linalool',
'Caryophyllene oxide',
'Myrcene',
'TERPENE-TOTAL',
'Terpinolene',
'Ocimene',
'Alpha Pinene'])
t_dict_keys = ['beta_pinene',
'alpha_pinene',
'caryophyllene_oxide',
'Humulene',
'Limonene',
'Caryophyllene',
'Linalool',
'Myrcene',
'Terpinolene',
'Ocimene',
'total_terpenes']
# converts similar strings to the dict key for terp dict
terp_conv_dict = {'beta-Pinene':'beta_pinene',
'Beta Pinene':'beta_pinene',
'alpha-Pinene':'alpha_pinene',
'Alpha Pinene':'alpha_pinene',
'Caryophyllene oxide':'caryophyllene_oxide',
'TERPENE-TOTAL':'total_terpenes'}
terp_dict = {}
for i, terp in enumerate(terpenes):
print(i)
temp_cann = t_dict_keys[:]
#terp_dict.setdefault('name', []).append(names[i])
for ta in terp:
for c in terp_strs:
has_str, num = find_string(ta, c)
if has_str:
idx = list(terp_strs).index(c)
print('found', c, ta)
if c in terp_conv_dict:
terp_dict.setdefault(terp_conv_dict[c], []).append(num)
temp_cann.remove(terp_conv_dict[c])
else:
terp_dict.setdefault(c, []).append(num)
temp_cann.remove(c)
break
if len(temp_cann) > 0:
print('didn\'t scrape:', temp_cann)
for t in temp_cann:
terp_dict.setdefault(t, []).append('')
cannabinoid_dict['name'] = names
for k in cannabinoid_dict:
print(k, len(cannabinoid_dict[k]))
for k in terp_dict:
print(k, len(terp_dict[k]))
cdf = pd.DataFrame(cannabinoid_dict)
tdf = pd.DataFrame(terp_dict)
total_df = cdf.merge(tdf, left_index=True, right_index=True)
return total_df
def find_string(search_str, str_to_find='THC-A', trail=False):
if search_str.find('8-THC') != -1:
return 0, 0
# if search_str.find('< 0.01 TERPENE-TOTAL') != -1:
# return 1, 0
if trail:
find_str = '.*' + str_to_find + '.*'
else:
find_str = '.*' + str_to_find + '$'
has_str = 0
res = re.search(find_str, search_str, re.IGNORECASE)
if res:
num = re.search('[\d\.]*', search_str).group(0)
if search_str.find('<\s*0.01') != -1:
return 1, 0
return 1, num
return 0, 0
def check_for_string(cannabinoids, str_to_find='THC-A', trail=True):
if trail:
find_str = '.*' + str_to_find + '.*'
else:
find_str = '.*' + str_to_find
#c = [' '.join(r) for r in cannabinoids]
has_str = []
for c in cannabinoids:
has_str_val = 0
for j in c:
res = re.search(find_str, j, re.IGNORECASE)
if res:
has_str_val = 1
break
has_str.append(has_str_val)
return has_str
def check_if_fields_present():
trail = [0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1]
cannabinoid_strs = ['thc-a', 'thc', 'cbn', 'thc total', 'thc-total', 'cbd-a', 'cbd', 'cbd-total', 'cbd total', 'cbg', 'cbc', 'activated total', 'activated', 'active']
for t, c in zip(trail, cannabinoid_strs):
has_str = check_for_string(cannabinoids, c, t)
print(c, np.mean(has_str))
def stuff():
testdf = pd.DataFrame({'name':names})
testdf['name'].value_counts()[testdf['name'].value_counts() > 1]
def clean_flow_df(df, clean_names=None):
'''
converts strings with % into floats
'''
new_df = df.copy()
for c in ['activated', 'thc', 'cbd']:
new_df[c] = new_df[c].apply(lambda x: x.strip('%'))
new_df[c][new_df[c] == '< 0.01'] = 0
new_df[c] = new_df[c].astype('float64')
if clean_names is not None:
new_df['clean_name'] = new_df['name'].apply(clean_a_name)
new_df['im_name'] = [x['clean_name'] + x['link'].split('/')[-1] + '.jpg' for i, x in new_df.iterrows()]
#for i, n in enumerate(clean_names):
# new_df.set_value(i, 'im_name', n + df.iloc[i]['link'].split('/')[-1] + '.jpg')
return new_df
def clean_a_name(name_str):
clean_name = re.sub('/', '-', name_str)
clean_name = re.sub('[ + ' + string.punctuation + '\s]+', '', clean_name).lower()
return clean_name
def match_up_leafly_names(nameset):
''' checks how many names roughly match in the leafly database and
analytical360 db
agrs: nameset -- set of names found from analytical360 to match with
leafly strain names
'''
nameset = nameset.copy()
nameset = [clean_a_name(n) for n in nameset]
strains = sl.load_strain_list()
strain_clean_names = [clean_a_name(n.split('/')[-1]) for n in strains]
# to get short names:
short_names = []
for s in strain_clean_names:
if len(s) < 4:
short_names.append(s)
skip_set = set(['ice', 'avi', 'ash', 'haze', 'thai', 'wsu', 'goo', 'or', 'ogkush', 'goat', 'flo', 'fireog', 'bsc', 'b4'])
matches = []
# translation from some leafly strain names to specific analytical360 names
trans_dict = {'k1':'kk1',
'j1':'j1ridgewayg'}
for s in strain_clean_names:
# these names match too much, so we need to look for exact matches for now
if s in skip_set:
for f in nameset:
if f == s:
matches.append((s, f))
break # don't need to look through the rest
# of the nameset if this is true
else:
for f in nameset:
if f.find(s) != -1 and f.find(s + 'x') == -1:
matches.append((s, f))
return matches
# leafly_df = dp.load_data()
# leafly_df['clean_name'] = leafly_df['product'].apply(clean_a_name)
if __name__ == "__main__":
# attempt using selenium
# driver = setup_driver()
# driver.get('http://analytical360.com/testresults')
# links = get_links_selenium(driver)
# using requests: finding it hard to get all the correct entries
scrape_full_site = False
if scrape_full_site:
res = requests.get(MAIN_URL)
soup = bs(res.content, 'lxml')
rows = check_rows(res)
flow_df = get_flower_df(rows)
flow_df.to_pickle('analytical360/flow_df.pk')
cannabinoids, terpenes, im_sources, no_imgs, names, clean_names = scrape_site(flow_df)
save_raw_scrape(cannabinoids, terpenes, no_imgs, im_sources, names, clean_names)
else:
flow_df = | pd.read_pickle('analytical360/flow_df.pk') | pandas.read_pickle |
import pandas as pd
import numpy as np
import matplotlib.pyplot as pl
import os
from scipy import stats
from tqdm import tqdm
import mdtraj as md
########################################################
def get_3drobot_native(data_flag):
root_dir = '/home/hyang/bio/erf/data/decoys/3DRobot_set'
pdb_list = pd.read_csv(f'{root_dir}/pdb_no_missing_residue.csv')['pdb'].values
energy_native = []
for pdb_id in pdb_list:
df = pd.read_csv(f'{root_dir}/decoy_loss_{data_flag}/{pdb_id}_decoy_loss.csv')
energy_native.append(df['loss'].values[0])
energy_native = np.array(energy_native)
print(energy_native, np.mean(energy_native), np.min(energy_native), np.max(energy_native), np.std(energy_native))
def plot_3drobot(data_flag):
root_dir = '/home/hyang/bio/erf/data/decoys/3DRobot_set'
# pdb_list = pd.read_csv('pdb_local_rot.txt')['pdb'].values
# pdb_list = pd.read_csv('pdb_profile_diff.txt')['pdb'].values
# pdb_list = pd.read_csv(f'{root_dir}/pdb_profile_diff_match.txt')['pdb'].values
pdb_list = pd.read_csv(f'{root_dir}/pdb_no_missing_residue.csv')['pdb'].values
# data_flag = 'exp005_v2'
# data_flag = 'exp5'
# data_flag = 'exp6'
# data_flag = 'exp12'
# data_flag = 'exp14'
# data_flag = 'exp17'
# data_flag = 'exp21'
# data_flag = 'exp24'
# data_flag = 'exp29'
# data_flag = 'exp33'
# data_flag = 'exp35'
# data_flag = 'exp50'
# data_flag = 'exp50_relax'
# data_flag = 'exp49'
# data_flag = 'exp49_relax'
# data_flag = 'exp54'
# data_flag = 'exp61'
# data_flag = 'rosetta'
# data_flag = 'rosetta_relax'
# data_flag = 'rosetta_cen'
# if not os.path.exists(f'{root_dir}/fig_3drobot_{data_flag}'):
# os.system(f'mkdir -p {root_dir}/fig_3drobot_{data_flag}')
correct = 0
rank = []
for pdb_id in pdb_list:
df = pd.read_csv(f'{root_dir}/decoy_loss_{data_flag}/{pdb_id}_decoy_loss.csv')
decoy_name = df['NAME'].values
assert(decoy_name[0] == 'native.pdb')
ind = (df['loss'] != 999)
loss = df['loss'][ind].values
rmsd = df['RMSD'][ind].values
if np.argmin(loss) == 0:
correct += 1
num = np.arange(loss.shape[0]) + 1
rank_i = num[np.argsort(loss) == 0][0]
rank.append(rank_i)
if rank_i > 1:
print(pdb_id, rmsd[np.argmin(loss)])
fig = pl.figure()
pl.plot(rmsd, loss, 'bo')
pl.plot([rmsd[0]], [loss[0]], 'rs', markersize=12)
pl.title(f'{pdb_id}')
pl.xlabel('RMSD')
pl.ylabel('energy score')
# pl.savefig(f'{root_dir}/fig_3drobot_{data_flag}/{pdb_id}_score.pdf')
pl.savefig(f'{root_dir}/decoy_loss_{data_flag}/{pdb_id}_score.pdf')
pl.close(fig)
rank = np.array(rank)
print(rank)
fig = pl.figure()
pl.hist(rank, bins=np.arange(21)+0.5)
pl.savefig(f'{root_dir}/decoy_loss_{data_flag}/rank.pdf')
pl.close(fig)
########################################################
def plot_casp11_loss():
# pdb_list = pd.read_csv('pdb_list_new.txt')['pdb'].values
pdb_list = pd.read_csv('pdb_no_need_copy_native.txt')['pdb'].values
flist = pd.read_csv('list_casp11.txt')['fname'].values
casp_dict = {x.split('#')[1][:5]: x.split('_')[0] for x in flist}
df_tm = pd.read_csv('casp11_decoy.csv')
tm_score_dict = {x: y for x, y in zip(df_tm['Target'], df_tm['Decoys'])}
# data_flag = 'exp3_v2'
# data_flag = 'exp5'
# data_flag = 'exp7'
# data_flag = 'exp13'
# data_flag = 'exp15'
# data_flag = 'exp21'
# data_flag = 'exp24'
# data_flag = 'exp29'
# data_flag = 'exp33'
# data_flag = 'exp35'
data_flag = 'exp61'
if not os.path.exists(f'fig_casp11_{data_flag}'):
os.system(f'mkdir fig_casp11_{data_flag}')
correct = 0
rank = []
tm_score = []
for pdb_id in pdb_list:
data_path = f'data_casp11_{data_flag}/{pdb_id}_decoy_loss.csv'
if not os.path.exists(data_path):
continue
df = pd.read_csv(data_path)
decoy_name = df['NAME'].values
# ind = (df['loss'] != 999)
# loss = df['loss'][ind].values
tm_score.append(tm_score_dict[pdb_id])
loss = df['loss'].values
num = np.arange(loss.shape[0])
i = (decoy_name == f'{pdb_id}.native.pdb')
if num[i] == np.argmin(loss):
# print(num.shape[0] - num[i])
correct += 1
rank.append(num[np.argsort(loss) == num[i]][0] + 1)
fig = pl.figure()
pl.plot(num, loss, 'bo')
i = (decoy_name == f'{pdb_id}.Zhang-Server_model1.pdb')
pl.plot([num[i]], [loss[i]], 'g^', markersize=12, label='zhang')
i = (decoy_name == f'{pdb_id}.QUARK_model1.pdb')
pl.plot([num[i]], [loss[i]], 'c*', markersize=12, label='quark')
i = (decoy_name == f'{pdb_id}.native.pdb')
pl.plot([num[i]], [loss[i]], 'rs', markersize=12, label='native')
pdb_id = casp_dict[pdb_id]
pl.title(f'{pdb_id}')
pl.xlabel('num')
pl.ylabel('energy score')
pl.savefig(f'fig_casp11_{data_flag}/{pdb_id}_score.pdf')
pl.close(fig)
rank = np.array(rank)
tm_score = np.array(tm_score)
pl.figure()
pl.hist(rank, bins=np.arange(21)+0.5)
# pl.figure()
# pl.plot(tm_score, rank, 'bo')
a = (rank <= 5)
b = (rank > 5)
pl.figure()
pl.hist(tm_score[a], bins=np.arange(9)*0.1+0.2, label='rank=1 or 2', histtype='stepfilled')
pl.hist(tm_score[b], bins=np.arange(9)*0.1+0.2, label='rank>10', histtype='step')
pl.xlabel('Best TM-score in decoys')
pl.ylabel('Num')
pl.legend(loc=2)
########################################################
def plot_casp11(data_flag):
# plot RMSD vs. loss for CASP11
root_dir = '/home/hyang/bio/erf/data/decoys/casp11'
pdb_list = pd.read_csv(f'{root_dir}/casp11_rmsd/casp11_rmsd.txt')['pdb']
flist = pd.read_csv(f'{root_dir}/list_casp11.txt')['fname'].values
casp_dict = {x.split('#')[1][:5]: x.split('_')[0] for x in flist}
# data_flag = 'exp3_v2'
# data_flag = 'exp5'
# data_flag = 'exp7'
# data_flag = 'exp13'
# data_flag = 'exp21'
# data_flag = 'exp24'
# data_flag = 'exp29'
# data_flag = 'exp33'
# data_flag = 'exp35'
# data_flag = 'exp61'
for pdb_id in pdb_list:
data_path = f'{root_dir}/decoy_loss_{data_flag}/{pdb_id}_decoy_loss.csv'
if not os.path.exists(data_path):
continue
df = pd.read_csv(data_path)
decoy_name = df['NAME'].values
# ind = (df['loss'] != 999)
# loss = df['loss'][ind].values
loss = df['loss'].values
df2 = pd.read_csv(f'{root_dir}/casp11_rmsd/{pdb_id}_rmsd.csv')
rmsd = df2['rmsd'].values
assert(rmsd.shape[0] == loss.shape[0])
fig = pl.figure()
pl.plot(rmsd, loss, 'bo')
i = (decoy_name == f'{pdb_id}.Zhang-Server_model1.pdb')
pl.plot([rmsd[i]], [loss[i]], 'g^', markersize=12, label='zhang')
i = (decoy_name == f'{pdb_id}.QUARK_model1.pdb')
pl.plot([rmsd[i]], [loss[i]], 'c*', markersize=12, label='quark')
i = (decoy_name == f'{pdb_id}.native.pdb')
pl.plot([rmsd[i]], [loss[i]], 'rs', markersize=12, label='native')
pdb_id = casp_dict[pdb_id]
pl.title(f'{pdb_id}')
a = max(12, rmsd.max())
pl.xlim(-1, a)
pl.xlabel('RMSD')
pl.ylabel('energy score')
pl.savefig(f'{root_dir}/decoy_loss_{data_flag}/rmsd_{pdb_id}_score.pdf')
pl.close(fig)
########################################################
def prepare_casp13():
# prepare casp13 decoys
df = pd.read_csv('flist.txt')
pdb_count = df['pdb'].value_counts()
pdb_list = []
for pdb, count in zip(pdb_count.index, pdb_count.values):
if count > 1:
pdb_list.append(pdb)
else:
pdb_list.append(pdb + '-D1')
pdb_list = np.array(pdb_list)
pdb_list.sort()
df2 = pd.DataFrame({'pdb': pdb_list})
df2.to_csv('pdb_list.txt', index=False)
def plot_casp13(data_flag, casp_id='casp13', casp_score_type='GDT_TS'):
# plot results of casp13 / casp14 decoys
root_dir = f'/home/hyang/bio/erf/data/decoys/{casp_id}'
if casp_id == 'casp13':
pdb_list = pd.read_csv(f'{root_dir}/pdb_list_domain.txt')['pdb'].values
pdb_ids = [x.split('-')[0] for x in pdb_list]
else:
pdb_list = pd.read_csv(f'{root_dir}/pdb_list.txt')['pdb'].values
pdb_ids = pdb_list
# data_flag = 'exp61'
# if not os.path.exists(f'fig_casp13_{data_flag}'):
# os.system(f'mkdir fig_casp13_{data_flag}')
pearsonr_list = []
pearsonp_list = []
used_pdb_list = []
casp_score_max = []
casp_score_min = []
rank_1 = 0
for pdb_id, pdb_casp_name in zip(pdb_ids, pdb_list):
data_path = f'{root_dir}/decoy_loss_{data_flag}/{pdb_id}_decoy_loss.csv'
if not os.path.exists(data_path):
continue
df = pd.read_csv(data_path)
decoy_name = df['pdb'].values
# ind = (df['loss'] != 999)
# loss = df['loss'][ind].values
loss = df['loss'].values
if not os.path.exists(f'{root_dir}/casp_score/{pdb_casp_name}.txt'):
continue
df2 = pd.read_csv(f'{root_dir}/casp_score/{pdb_casp_name}.txt', sep='\s+')
casp_model = df2['Model']
if (casp_id == 'casp13') & (pdb_casp_name.endswith('-D1')):
casp_model = df2['Model'].apply(lambda x: x[:-3])
if casp_score_type == 'GDT_TS':
casp_score_data = df2['GDT_TS'].values
elif casp_score_type == 'RMSD_CA':
casp_score_data = df2['RMS_CA'].values
else:
raise ValueError('casp score type should be GDT_TS / RMSD_CA')
casp_dict = {x: y for x, y in zip(casp_model, casp_score_data)}
casp_score = []
for x in decoy_name:
try:
casp_score.append(casp_dict[x])
except KeyError:
casp_score.append(-1)
casp_score = np.array(casp_score)
idx = (casp_score > 0) & (loss > 0)
casp_score_good = casp_score[idx]
loss_good = loss[idx]
decoy_name_good = decoy_name[idx]
# if np.argmax(casp_score_good) == np.argmin(loss_good):
# rank_1 += 1
top5_idx = np.argpartition(loss_good, 5)[:5]
best_gdt_idx = np.argmax(casp_score_good)
if best_gdt_idx in top5_idx:
print(best_gdt_idx, top5_idx)
rank_1 += 1
print(pdb_casp_name, decoy_name_good[best_gdt_idx], decoy_name_good[top5_idx])
pearsonr = stats.pearsonr(casp_score_good, loss_good)
pearsonr_list.append(pearsonr[0])
pearsonp_list.append(pearsonr[1])
used_pdb_list.append(pdb_id)
casp_score_max.append(casp_score[idx].max())
casp_score_min.append(casp_score[idx].min())
df_i = pd.DataFrame({'pdb': decoy_name_good, casp_score_type: casp_score_good, 'energy': loss_good})
df_i.to_csv(f'{root_dir}/decoy_loss_{data_flag}/{pdb_id}_casp_score_{casp_score_type}_energy.csv', index=False)
fig = pl.figure()
# pl.plot(100.0, loss[0], 'rs')
pl.plot(casp_score[idx], loss[idx], 'bo')
pl.title(f'{pdb_id}')
# a = max(12, rmsd.max())
# pl.xlim(-1, a)
pl.xlabel(f'CASP {casp_score_type}')
pl.ylabel('energy score')
pl.savefig(f'{root_dir}/decoy_loss_{data_flag}/{pdb_id}_{casp_score_type}.pdf')
pl.close(fig)
fig = pl.figure()
# pl.plot(100.0, loss[0], 'rs')
pl.plot(casp_score_good, loss_good, 'bo')
for i in range(loss_good.shape[0]):
pl.text(casp_score_good[i], loss_good[i], decoy_name_good[i].split('S')[1][:-3], fontsize=6)
pl.title(f'{pdb_id}')
y_min = loss_good.min()
y_max = loss_good.max()
pl.ylim(y_min - (y_max - y_min) * 0.01, y_min + (y_max - y_min) * 0.15)
# a = max(12, rmsd.max())
pl.xlim(0, 100)
pl.xlabel(f'CASP {casp_score_type}')
pl.ylabel('energy score')
pl.savefig(f'{root_dir}/decoy_loss_{data_flag}/{pdb_id}_{casp_score_type}_zoom.pdf')
pl.close(fig)
print(f'rank_1 = {rank_1}')
df = pd.DataFrame({'pdb': used_pdb_list, 'pearsonr': pearsonr_list, 'pearsonp': pearsonp_list,
'casp_score_max': casp_score_max, 'casp_score_min': casp_score_min})
df.to_csv(f'{root_dir}/decoy_loss_{data_flag}/pearsonr_{casp_score_type}.txt', index=False)
fig = pl.figure()
if casp_score_type == 'GDT_TS':
pearsonr_bins = np.arange(11)*0.1-1
elif casp_score_type == 'RMSD_CA':
pearsonr_bins = np.arange(11)*0.1
else:
raise ValueError('casp score type should be gdt_ts / rmsd_ca')
pl.hist(df['pearsonr'], bins=pearsonr_bins)
pl.xlabel(r'Pearson $\rho$')
pl.ylabel('N')
pl.savefig(f'{root_dir}/decoy_loss_{data_flag}/pearsonr_{casp_score_type}.pdf')
pl.close(fig)
# casp_score_max = df['casp_score_max'].values
# fig = pl.figure()
# idx = (casp_score_max >= 50)
# pl.hist(df['pearsonr'][idx], bins=np.arange(11)*0.1-1)
# pl.xlabel(r'Pearson $\rho$')
# pl.ylabel('N')
# pl.savefig(f'{root_dir}/decoy_loss_{data_flag}/pearsonr_1.pdf')
# pl.close(fig)
# fig = pl.figure()
# idx = (casp_score_max < 50)
# pl.xlabel(r'Pearson $\rho$')
# pl.ylabel('N')
# pl.hist(df['pearsonr'][idx], bins=np.arange(11)*0.1-1)
# pl.savefig(f'{root_dir}/decoy_loss_{data_flag}/pearsonr_2.pdf')
# pl.close(fig)
########################################################
def plot_ru(decoy_set, decoy_loss_dir):
# decoy_set = '4state_reduced'
# decoy_set = 'lattice_ssfit'
# decoy_set = 'lmds'
# decoy_set = 'lmds_v2'
root_dir = f'/home/hyang/bio/erf/data/decoys/rudecoy/multiple/{decoy_set}'
# decoy_loss_dir = 'exp61'
if not os.path.exists(f'{root_dir}/{decoy_loss_dir}'):
os.system(f'mkdir -p {root_dir}/{decoy_loss_dir}')
pdb_id_list = pd.read_csv(f'{root_dir}/list', header=None, names=['pdb'])['pdb'].values
for pdb_id in pdb_id_list:
df = pd.read_csv(f'{root_dir}/{decoy_loss_dir}/{pdb_id}_decoy_loss.csv')
pdb_list = df['pdb'].values
loss = df['loss'].values
rmsd = df['score'].values
native_name = f'{pdb_id}.pdb'
i_native = np.arange(pdb_list.shape[0])[(pdb_list == native_name)]
i = np.argmin(loss)
print(i_native, i, pdb_list[i])
fig = pl.figure()
pl.plot(rmsd, loss, 'bo')
pl.plot([rmsd[i_native]], [loss[i_native]], 'rs', markersize=12)
pl.title(f'{pdb_id}')
pl.xlabel('RMSD')
pl.ylabel('energy score')
pl.savefig(f'{root_dir}/{decoy_loss_dir}/{pdb_id}_score.pdf')
pl.close(fig)
########################################################
def plot_md_trj(decoy_loss_dir):
# plot the MD trajectory data
root_dir = f'/home/hyang/bio/openmm/data'
if not os.path.exists(f'{root_dir}/{decoy_loss_dir}'):
os.system(f'mkdir -p {root_dir}/{decoy_loss_dir}')
pdb_id_list = pd.read_csv(f'{root_dir}/list', header=None, names=['pdb'])['pdb'].values
for pdb_id in pdb_id_list:
df = pd.read_csv(f'{root_dir}/{decoy_loss_dir}/{pdb_id}_decoy_loss.csv')
loss = df['loss'].values
rmsd = df['rmsd'].values
pdb = df['pdb'].values
# plot RMSD vs. Energy
fig = pl.figure()
idx = np.zeros(pdb.shape)
for i in range(pdb.shape[0]):
if pdb[i].startswith('T300'):
idx[i] = 1
elif pdb[i].startswith('T500'):
idx[i] = 2
pl.plot([rmsd[0]], [loss[0]], 'gs', markersize=12)
pl.plot([rmsd[1]], [loss[1]], 'g^', markersize=12)
pl.plot(rmsd[idx == 1], loss[idx == 1], 'g.', label='md_T300')
pl.plot(rmsd[idx == 2], loss[idx == 2], 'c.', label='md_T500')
pl.title(f'{pdb_id}')
pl.xlabel('RMSD')
pl.ylabel('energy score')
pl.savefig(f'{root_dir}/{decoy_loss_dir}/{pdb_id}_score.pdf')
pl.close(fig)
# plot RMSD vs. time & Energy vs. time
fig = pl.figure()
idx = np.zeros(pdb.shape)
for i in range(pdb.shape[0]):
if pdb[i].startswith('T300'):
idx[i] = 1
elif pdb[i].startswith('T500'):
idx[i] = 2
pl.subplot(211)
pl.plot(rmsd[idx == 1], 'g', label='md_T300')
pl.plot(rmsd[idx == 2], 'c', label='md_T500')
pl.ylabel('RMSD')
pl.legend()
pl.title(f'{pdb_id}')
pl.subplot(212)
pl.plot(loss[idx == 1], 'g')
pl.plot(loss[idx == 2], 'c')
pl.ylabel('energy score')
pl.xlabel('time-steps')
pl.savefig(f'{root_dir}/{decoy_loss_dir}/{pdb_id}_rmsd_energy_time.pdf')
pl.close(fig)
def plot_md_trj2():
# plot the MD trajectory data
root_dir = '/home/hyang/bio/erf/data/decoys/md/cullpdb_val_deep/'
pdb_id_list = pd.read_csv(f'{root_dir}/list', header=None, names=['pdb'])['pdb'].values
# pdb_id_list = ['3KXT']
for pdb_id in pdb_id_list:
df1 = pd.read_csv(f'{root_dir}/{pdb_id}_T300_energy_rmsd.csv')
loss1 = df1['energy'].values
rmsd1 = df1['rmsd'].values
df2 = pd.read_csv(f'{root_dir}/{pdb_id}_T500_energy_rmsd.csv')
loss2 = df2['energy'].values
rmsd2 = df2['rmsd'].values
# plot RMSD vs. Energy
fig = pl.figure()
pl.plot([rmsd1[0]], [loss1[0]], 'gs', markersize=12)
pl.plot(rmsd1, loss1, 'g.', label='T300')
pl.plot(rmsd2, loss2, 'c.', label='T500')
pl.title(f'{pdb_id}')
pl.xlabel('RMSD')
pl.ylabel('energy score')
pl.savefig(f'{root_dir}/{pdb_id}_score.pdf')
pl.close(fig)
# plot RMSD vs. time & Energy vs. time
fig = pl.figure()
pl.subplot(211)
pl.plot(rmsd1, 'g', label='md_T300')
pl.plot(rmsd2, 'c', label='md_T500')
pl.ylabel('RMSD')
pl.legend()
pl.title(f'{pdb_id}')
pl.subplot(212)
pl.plot(loss1, 'g')
pl.plot(loss2, 'c')
pl.ylabel('energy score')
pl.xlabel('time-steps')
pl.savefig(f'{root_dir}/{pdb_id}_rmsd_energy_time.pdf')
pl.close(fig)
def plot_md_trj3():
# plot the MD trajectory data
root_dir = '/home/hyang/bio/erf/data/decoys/md/BPTI'
df = pd.read_csv(f'{root_dir}/BPTI_energy_rmsd.csv')
loss1 = df['energy'].values
rmsd1 = df['rmsd'].values
# plot RMSD vs. Energy
fig = pl.figure()
pl.plot(rmsd1, loss1, 'g.', markersize=0.01)
pl.title('BPTI')
pl.xlabel('RMSD')
pl.ylabel('energy score')
pl.savefig(f'{root_dir}/BPTI_score.jpg')
pl.close(fig)
# plot RMSD vs. time & Energy vs. time
fig = pl.figure()
pl.subplot(211)
pl.plot(rmsd1, 'b.', markersize=0.01)
pl.ylabel('RMSD')
pl.title('BPTI')
pl.subplot(212)
pl.plot(loss1, 'g.', markersize=0.01)
pl.ylabel('energy score')
pl.xlabel('time-steps')
pl.savefig(f'{root_dir}/BPTI_rmsd_energy_time.jpg')
pl.close(fig)
def plot_bd_trj():
# plot the mixed Langevin dynamics trajectory data
root_dir = '/home/hyang/bio/erf/data/fold/exp205dynamics_val_deep501/'
pdb_selected = pd.read_csv(f'/home/hyang/bio/erf/data/fold/cullpdb_val_deep/sample.csv')['pdb'].values
pdb_selected = np.append(np.array(['1BPI_A']), pdb_selected)
for pdb_id in pdb_selected:
df1 = pd.read_csv(f'{root_dir}/{pdb_id}_energy.csv')
loss1 = df1['sample_energy'].values
rmsd1 = df1['sample_rmsd'].values
# plot RMSD vs. Energy
fig = pl.figure()
pl.plot([rmsd1[0]], [loss1[0]], 'gs', markersize=12)
pl.plot(rmsd1, loss1, 'go')
pl.title(f'{pdb_id}')
pl.xlabel('RMSD')
pl.ylabel('energy score')
pl.savefig(f'{root_dir}/{pdb_id}_score.pdf')
pl.close(fig)
# plot RMSD vs. time & Energy vs. time
fig = pl.figure()
pl.subplot(211)
pl.plot(rmsd1, 'go')
pl.ylabel('RMSD')
pl.title(f'{pdb_id}')
pl.subplot(212)
pl.plot(loss1, 'bs')
pl.ylabel('energy score')
pl.xlabel('time-steps')
pl.savefig(f'{root_dir}/{pdb_id}_rmsd_energy_time.pdf')
pl.close(fig)
def plot_openmm2():
root_dir = f'/home/hyang/bio/openmm/data'
decoy_loss_dir = 'exp63_65'
if not os.path.exists(f'{root_dir}/{decoy_loss_dir}'):
os.system(f'mkdir -p {root_dir}/{decoy_loss_dir}')
pdb_id_list = pd.read_csv(f'{root_dir}/list', header=None, names=['pdb'])['pdb'].values
for pdb_id in pdb_id_list:
fig = pl.figure()
df = pd.read_csv(f'{root_dir}/exp61/{pdb_id}_decoy_loss.csv')
loss = df['loss'].values * 15.0
rmsd = df['rmsd'].values
pl.plot(rmsd, loss, 'g.')
pl.plot([rmsd[0]], [loss[0]], 'g^', markersize=12)
df = pd.read_csv(f'{root_dir}/exp63/{pdb_id}_decoy_loss.csv')
loss = df['loss'].values
rmsd = df['rmsd'].values
pl.plot(rmsd, loss, 'bo')
pl.plot([rmsd[0]], [loss[0]], 'bs', markersize=12)
df = pd.read_csv(f'{root_dir}/exp65/{pdb_id}_decoy_loss.csv')
loss = df['loss'].values
rmsd = df['rmsd'].values
pl.plot(rmsd, loss, 'c.')
pl.plot([rmsd[0]], [loss[0]], 'cs', markersize=12)
pl.title(f'{pdb_id}')
pl.xlabel('RMSD')
pl.ylabel('energy score')
pl.savefig(f'{root_dir}/{decoy_loss_dir}/{pdb_id}_score.pdf')
pl.close(fig)
########################################################
def plot_make_decoy_relax():
# plot the relaxed decoys
root_dir = f'/Users/Plover/study/bio/play/erf/data/fold/exp61anneal_val_deep_loop_relax'
# if not os.path.exists(f'{root_dir}/{decoy_loss_dir}'):
# os.system(f'mkdir -p {root_dir}/{decoy_loss_dir}')
pdb_id_list = pd.read_csv(f'{root_dir}/sample.csv')['pdb'].values
for pdb_id in pdb_id_list:
df = pd.read_csv(f'{root_dir}/{pdb_id}_energy.csv')
# pdb_list = df['pdb'].values
sample_energy = df['sample_energy'].values
sample_energy_relaxed = df['sample_energy_relaxed'].values
sample_rmsd = df['sample_rmsd'].values
sample_relaxed_rmsd = df['sample_relaxed_rmsd'].values
fig = pl.figure()
pl.plot(sample_rmsd, sample_energy, 'g.', label='sample')
pl.plot(sample_relaxed_rmsd, sample_energy_relaxed, 'b.', label='sample_relaxed')
pl.plot([sample_rmsd[0]], [sample_energy[0]], 'ro', markersize=10)
pl.plot([sample_relaxed_rmsd[0]], [sample_energy_relaxed[0]], 'rs', markersize=10)
pl.legend()
pl.title(f'{pdb_id}')
pl.xlabel('RMSD')
pl.ylabel('energy score')
pl.savefig(f'{root_dir}/{pdb_id}_score.pdf')
pl.close(fig)
def plot_make_decoy(data_flag):
# plot the decoys without relax
root_dir = f'/home/hyang/bio/erf/data/fold/{data_flag}anneal_val_deep_loop'
pdb_id_list = pd.read_csv(f'{root_dir}/../cullpdb_val_deep/sample.csv')['pdb'].values
for pdb_id in pdb_id_list:
if not os.path.exists(f'{root_dir}/{pdb_id}_energy.csv'):
continue
df = pd.read_csv(f'{root_dir}/{pdb_id}_energy.csv')
sample_energy = df['sample_energy'].values
sample_rmsd = df['sample_rmsd'].values
fig = pl.figure()
pl.plot(sample_rmsd, sample_energy, 'g.', label='sample')
pl.plot([sample_rmsd[0]], [sample_energy[0]], 'ro', markersize=10)
pl.legend()
pl.title(f'{pdb_id}')
pl.xlabel('RMSD')
pl.ylabel('energy score')
pl.savefig(f'{root_dir}/{pdb_id}_score.pdf')
pl.close(fig)
def plot_openmm_bd_loop_decoy(data_flag, plot_frag=False, plot_loop=False, flag='', pdb_all=False):
# plot decoys from openmm / loop / Brownian dynamics together
md_root_dir = f'/home/hyang/bio/openmm/data/{data_flag}'
loop_root_dir = f'/home/hyang/bio/erf/data/fold/{data_flag}anneal_val_deep_loop'
bd_root_dir = f'/home/hyang/bio/erf/data/fold/{data_flag}dynamics_val_deep{flag}'
# plot_dir = f'/home/hyang/bio/erf/data/fold/{data_flag}_md_bd_loop{flag}_plot'
plot_dir = bd_root_dir
frag_root_dir = f'/home/hyang/bio/erf/data/fold/{data_flag}frag_deep/'
if not os.path.exists(f'{plot_dir}'):
os.system(f'mkdir -p {plot_dir}')
pdb_id_list = ['3KXT']
if pdb_all:
pdb_id_list = pd.read_csv(f'{md_root_dir}/../list', header=None, names=['pdb'])['pdb'].values
for pdb_id in pdb_id_list:
fig = pl.figure()
df = pd.read_csv(f'{md_root_dir}/{pdb_id}_decoy_loss.csv')
pdb = df['pdb'].values
idx = np.zeros(pdb.shape)
for i in range(pdb.shape[0]):
if pdb[i].startswith('T300'):
idx[i] = 1
elif pdb[i].startswith('T500'):
idx[i] = 2
loss = df['loss'].values
rmsd = df['rmsd'].values
pl.plot([rmsd[0]], [loss[0]], 'gs', markersize=12)
pl.plot([rmsd[1]], [loss[1]], 'g^', markersize=9)
pl.plot(rmsd[idx == 1], loss[idx == 1], 'g.', label='md_T300')
pl.plot(rmsd[idx == 2], loss[idx == 2], 'c.', label='md_T500')
if plot_loop:
df = pd.read_csv(f'{loop_root_dir}/{pdb_id}_A_energy.csv')
sample_energy = df['sample_energy'].values
sample_rmsd = df['sample_rmsd'].values
pl.plot(sample_rmsd, sample_energy, 'b.', label='loop')
pl.plot([sample_rmsd[0]], [sample_energy[0]], 'bo', markersize=10)
df = pd.read_csv(f'{bd_root_dir}/{pdb_id}_A_energy.csv')
sample_energy = df['sample_energy'].values
sample_rmsd = df['sample_rmsd'].values
pl.plot(sample_rmsd, sample_energy, 'r.', label='Brownian')
pl.plot([sample_rmsd[0]], [sample_energy[0]], 'ro', markersize=10)
if plot_frag:
df = pd.read_csv(f'{frag_root_dir}/{pdb_id}_A_energy_all.csv')
sample_energy = df['sample_energy'].values
sample_rmsd = df['sample_rmsd'].values
pl.plot(sample_rmsd, sample_energy, 'm.', label='Fragment')
pl.plot([sample_rmsd[0]], [sample_energy[0]], 'ro', markersize=10)
pl.legend()
pl.title(f'{pdb_id}')
pl.xlabel('RMSD')
pl.ylabel('energy score')
pl.savefig(f'{plot_dir}/{pdb_id}_score.pdf')
pl.close(fig)
def plot_openmm_loop_decoy2():
# plot two experiments together
md1_root_dir = f'/Users/Plover/study/bio/play/erf/data/fold/openmm/exp65'
loop1_root_dir = f'/Users/Plover/study/bio/play/erf/data/fold/exp65anneal_val_deep_loop'
md2_root_dir = f'/Users/Plover/study/bio/play/erf/data/fold/openmm/exp63'
loop2_root_dir = f'/Users/Plover/study/bio/play/erf/data/fold/exp63anneal_val_deep_loop'
plot_dir = f'/Users/Plover/study/bio/play/erf/data/fold/exp63_65_md_loop_plot'
pdb_id_list = pd.read_csv(f'{md1_root_dir}/list', header=None, names=['pdb'])['pdb'].values
for pdb_id in pdb_id_list:
fig = pl.figure()
df = pd.read_csv(f'{md1_root_dir}/{pdb_id}_decoy_loss.csv')
pdb = df['pdb'].values
idx = np.zeros(pdb.shape)
for i in range(pdb.shape[0]):
if pdb[i].startswith('T300'):
idx[i] = 1
elif pdb[i].startswith('T500'):
idx[i] = 2
loss = df['loss'].values
rmsd = df['rmsd'].values
pl.plot([rmsd[0]], [loss[0]], 'gs', markersize=12)
pl.plot([rmsd[1]], [loss[1]], 'g^', markersize=12)
pl.plot(rmsd[idx == 1], loss[idx == 1], 'g.', label='md1_T300')
pl.plot(rmsd[idx == 2], loss[idx == 2], 'c.', label='md1_T500')
df = pd.read_csv(f'{md2_root_dir}/{pdb_id}_decoy_loss.csv')
pdb = df['pdb'].values
idx = np.zeros(pdb.shape)
for i in range(pdb.shape[0]):
if pdb[i].startswith('T300'):
idx[i] = 1
elif pdb[i].startswith('T500'):
idx[i] = 2
loss = df['loss'].values
rmsd = df['rmsd'].values
pl.plot([rmsd[0]], [loss[0]], 'rs', markersize=12)
pl.plot([rmsd[1]], [loss[1]], 'r^', markersize=12)
pl.plot(rmsd[idx == 1], loss[idx == 1], 'r.', label='md2_T300')
pl.plot(rmsd[idx == 2], loss[idx == 2], 'm.', label='md2_T500')
df = pd.read_csv(f'{loop1_root_dir}/{pdb_id}_A_energy.csv')
sample_energy = df['sample_energy'].values
sample_rmsd = df['sample_rmsd'].values
pl.plot(sample_rmsd, sample_energy, 'b.', label='loop1')
pl.plot([sample_rmsd[0]], [sample_energy[0]], 'bo', markersize=10)
df = pd.read_csv(f'{loop2_root_dir}/{pdb_id}_A_energy.csv')
sample_energy = df['sample_energy'].values
sample_rmsd = df['sample_rmsd'].values
pl.plot(sample_rmsd, sample_energy, 'k.', label='loop2')
pl.plot([sample_rmsd[0]], [sample_energy[0]], 'ko', markersize=10)
pl.legend()
pl.title(f'{pdb_id}')
pl.xlabel('RMSD')
pl.ylabel('energy score')
pl.savefig(f'{plot_dir}/{pdb_id}_score.pdf')
pl.close(fig)
def plot_fold_decoy():
# plot folding decoys from annealing
root_dir = f'/home/hyang/bio/erf/data/fold/exp63anneal_val_deep_fd3'
pdb_id = '3SOL_A'
fig = pl.figure()
for i in range(2):
if not os.path.exists(f'{root_dir}/{pdb_id}_energy_{i}.csv'):
continue
df = pd.read_csv(f'{root_dir}/{pdb_id}_energy_{i}.csv')
sample_energy = df['sample_energy'].values
sample_rmsd = df['sample_rmsd'].values
pl.plot(sample_rmsd, sample_energy, 'g.', markersize=3)
pl.plot([sample_rmsd[0]], [sample_energy[0]], 'ro', markersize=10)
pl.title(f'{pdb_id}')
pl.xlabel('RMSD')
pl.ylabel('energy score')
pl.savefig(f'{root_dir}/{pdb_id}_score.png')
pl.close(fig)
def plot_decoy_seq(data_flag):
# plot decoy seq
root_dir = f'/home/hyang/bio/erf/data/decoys/decoys_seq/{data_flag}'
# decoy_flag_list = ['random', 'shuffle', 'type2', 'type9', 'type2LD']
decoy_flag_list = ['random', 'shuffle', 'type9']
df = pd.read_csv(f'{root_dir}/hhsuite_CB_cullpdb_val_no_missing_residue_sample_loss.csv')
pdb_list = df['pdb'].values
native_energy = {x: y for x, y in zip(df['pdb'], df['loss_native'])}
pl.figure()
for decoy_flag in decoy_flag_list:
rank = []
delta_loss = np.array([])
for pdb in pdb_list:
loss = pd.read_csv(f'{root_dir}/{pdb}_{decoy_flag}_loss.csv')['loss'].values
loss_native = native_energy[pdb]
rank.append(loss[loss < loss_native].shape[0] + 1)
if loss_native > loss.min():
print(decoy_flag, pdb, loss_native, loss.min())
delta_loss = np.append(delta_loss, loss - loss_native)
rank = np.array(rank)
# plot the energy gap
pl.hist(delta_loss, linewidth=2, label=decoy_flag, histtype='step')
pl.xlabel('E(decoy) - E(native)', fontsize=14)
pl.ylabel('Num', fontsize=14)
pl.legend()
pl.savefig(f'{root_dir}/delta_loss_{data_flag}.pdf')
pl.close()
# plot the tpe2LD result separately
pl.figure()
pl.hist(delta_loss, label='type2LD', histtype='step')
pl.xlabel('E(decoy) - E(native)')
pl.ylabel('Num')
pl.legend()
pl.savefig(f'{root_dir}/delta_loss_type2LD.pdf')
pl.close()
def plot_polyAA(data_flag):
# plot polyAA decoys
root_dir = f'/home/hyang/bio/erf/data/decoys/decoys_seq/{data_flag}'
amino_acids = pd.read_csv(f'{root_dir}/../../../amino_acids.csv')
decoy_flag = 'polyAA'
df = pd.read_csv(f'{root_dir}/hhsuite_CB_cullpdb_val_no_missing_residue_sample_loss.csv')
pdb_list = df['pdb'].values
loss_all = np.zeros((len(pdb_list), 20))
for i, pdb in enumerate(pdb_list):
loss = pd.read_csv(f'{root_dir}/{pdb}_{decoy_flag}_loss.csv')['loss'].values
loss_all[i] = loss
for i in range(20):
pl.figure()
pl.plot(df['loss_native'], loss_all[:, i], 'bo')
pl.title('poly-' + amino_acids.AA3C.values[i])
pl.xlabel('E(native)')
pl.ylabel('E(decoy)')
pl.plot(df['loss_native'], df['loss_native'], ls='--', color='g')
pl.savefig(f'{root_dir}/{data_flag}_{amino_acids.AA3C.values[i]}.pdf')
pl.close()
# plot all 20 polyAA figures in one big figure
fig = pl.figure(figsize=(10, 8))
# ax0 = fig.add_subplot(111) # The big subplot
# ax0.set(xticklabels=[], yticklabels=[]) # remove the tick labels
# ax0.tick_params(left=False, bottom=False) # remove the ticks
for i in range(20):
ax = fig.add_subplot(5, 4, i+1)
ax.plot(df['loss_native'], loss_all[:, i], 'bo', markersize=6)
pl.plot(df['loss_native'], df['loss_native'], ls='-', color='g')
ax.set(xticklabels=[], yticklabels=[]) # remove the tick labels
pl.title('poly-' + amino_acids.AA3C.values[i])
# ax0.set_xlabel('E(native)', fontsize=14)
# ax0.set_ylabel('E(decoy)', fontsize=14)
fig.text(0.5, 0.05, 'E(native)', fontsize=14, ha='center')
fig.text(0.05, 0.5, 'E(decoy)', fontsize=14, va='center', rotation='vertical')
pl.subplots_adjust(bottom=0.1, left=0.1, top=0.9, right=0.95, wspace=0.3, hspace=0.3)
pl.savefig(f'{root_dir}/{data_flag}_poly_AA_all.pdf')
pl.close()
def plot_bd_md_rmsf(data_flag, flag='', pdb_all=False):
bd_dir = f'/home/hyang/bio/erf/data/fold/{data_flag}dynamics_val_deep{flag}'
md_dir = '/home/hyang/bio/openmm/data/'
pdb_list = ['3KXT']
if pdb_all:
pdb_list = pd.read_csv(f'{md_dir}/list', header=None, names=['pdb'])['pdb'].values
for pdb in tqdm(pdb_list):
trj_md = md.load(f'{md_dir}/{pdb}/production_T300.dcd',
top=f'{md_dir}/{pdb}/production_T300.pdb')
trj_md2 = md.load(f'{md_dir}/{pdb}/production2_T300.dcd',
top=f'{md_dir}/{pdb}/production_T300.pdb')
trj_bd = md.load(f'{bd_dir}/{pdb}_A_sample.pdb')
a = np.arange(trj_bd.xyz.shape[0])
idx = (a == 0) | (a > len(a)/2) # frame 0 is used for coordinates alignments
trj_bd_eq = trj_bd[idx]
rmsd_bd = md.rmsd(trj_bd, trj_bd, frame=0)
rmsf_bd = md.rmsf(trj_bd, trj_bd, frame=0)
rmsf_bd_eq = md.rmsf(trj_bd_eq, trj_bd_eq, frame=0)
def get_rmsd_rmsf_md(trj_md):
trj_md_pro = trj_md.remove_solvent()
top = trj_md_pro.topology
# use CA to calculate RMSD
ca_idx = top.select("name == 'CA'")
xyz = trj_md_pro.xyz
xyz_ca = xyz[:, ca_idx]
# use CB to calculate RMSD
ca_gly = top.select('(name == CA) and (resname == GLY)')
cb = top.select('name == CB')
beads = np.append(ca_gly, cb)
beads = np.sort(beads)
xyz_cb = xyz[:, beads]
b = np.arange(xyz_cb.shape[0])
idx = (b == 0) | (b > len(b)/2) # frame 0 is used for coordinates alignments
xyz_cb_eq = xyz_cb[idx]
t_ca = md.Trajectory(xyz=xyz_ca, topology=None)
rmsd_md_ca = md.rmsd(t_ca, t_ca, frame=0)
rmsf_md_ca = md.rmsf(t_ca, t_ca, frame=0)
t_cb = md.Trajectory(xyz=xyz_cb, topology=None)
rmsd_md_cb = md.rmsd(t_cb, t_cb, frame=0)
rmsf_md_cb = md.rmsf(t_cb, t_cb, frame=0)
t_cb_eq = md.Trajectory(xyz=xyz_cb_eq, topology=None)
# rmsd2_cb_eq = md.rmsd(t_cb_eq, t_cb_eq, frame=0)
rmsf_md_cb_eq = md.rmsf(t_cb_eq, t_cb_eq, frame=0)
return rmsd_md_ca, rmsd_md_cb, rmsf_md_ca, rmsf_md_cb, rmsf_md_cb_eq
rmsd_md_ca, rmsd_md_cb, rmsf_md_ca, rmsf_md_cb, rmsf_md_cb_eq = get_rmsd_rmsf_md(trj_md)
rmsd_md2_ca, rmsd_md2_cb, rmsf_md2_ca, rmsf_md2_cb, rmsf_md2_cb_eq = get_rmsd_rmsf_md(trj_md2)
fig = pl.figure()
pl.plot(rmsd_bd*10, label='erf')
pl.plot(rmsd_md_ca*10, label='MD_CA')
pl.plot(rmsd_md_cb*10, label='MD_CB')
pl.plot(rmsd_md2_ca*10, label='MD2_CA')
pl.plot(rmsd_md2_cb*10, label='MD2_CB')
pl.legend()
pl.title('RMSD')
pl.xlabel('steps')
pl.ylabel(r'RMSD [$\AA$]')
pl.savefig(f'{bd_dir}/{pdb}_A_rmsd.pdf')
pl.close(fig)
fig = pl.figure()
pl.plot(rmsf_bd*10, label='erf')
pl.plot(rmsf_bd_eq*10, label='erf_eq')
pl.plot(rmsf_md_ca*10, label='MD_CA')
pl.plot(rmsf_md_cb*10, label='MD_CB')
pl.plot(rmsf_md_cb_eq*10, label='MD_CB_eq')
pl.plot(rmsf_md2_ca*10, label='MD2_CA')
pl.plot(rmsf_md2_cb*10, label='MD2_CB')
pl.plot(rmsf_md2_cb_eq*10, label='MD2_CB_eq')
pl.legend()
pl.title('RMSF')
pl.xlabel('Resdidue Number')
pl.ylabel(r'RMSF [$\AA$]')
pl.savefig(f'{bd_dir}/{pdb}_A_rmsf.pdf')
pl.close(fig)
fig = pl.figure()
pl.plot(rmsf_bd*10, label='erf')
pl.plot(rmsf_bd_eq*10, label='erf_eq')
pl.plot(rmsf_md2_cb*10, label='MD2_CB')
pl.plot(rmsf_md2_cb_eq*10, label='MD2_CB_eq')
pl.legend()
pl.title('RMSF')
pl.xlabel('Resdidue Number')
pl.ylabel(r'RMSF [$\AA$]')
pl.savefig(f'{bd_dir}/{pdb}_A_rmsf_simple.pdf')
pl.close(fig)
def plot_bd_md_rmsf2(data_flag, flag_list, pdb_all=False, debug=False, b_factor=False):
md_dir = '/home/hyang/bio/openmm/data/'
pdb_list = ['3KXT']
pdb_list = ['1BPI']
if pdb_all:
pdb_list = pd.read_csv(f'{md_dir}/list', header=None, names=['pdb'])['pdb'].values
def get_rmsd_rmsf_md(trj_md):
trj_md_pro = trj_md.remove_solvent()
top = trj_md_pro.topology
# use CA to calculate RMSD
ca_idx = top.select("name == 'CA'")
xyz = trj_md_pro.xyz
xyz_ca = xyz[:, ca_idx]
# use CB to calculate RMSD
ca_gly = top.select('(name == CA) and (resname == GLY)')
cb = top.select('name == CB')
beads = np.append(ca_gly, cb)
beads = np.sort(beads)
xyz_cb = xyz[:, beads]
b = np.arange(xyz_cb.shape[0])
idx = (b == 0) | (b > len(b) / 2) # frame 0 is used for coordinates alignments
xyz_cb_eq = xyz_cb[idx]
t_ca = md.Trajectory(xyz=xyz_ca, topology=None)
rmsd_md_ca = md.rmsd(t_ca, t_ca, frame=0)
rmsf_md_ca = md.rmsf(t_ca, t_ca, frame=0)
t_cb = md.Trajectory(xyz=xyz_cb, topology=None)
rmsd_md_cb = md.rmsd(t_cb, t_cb, frame=0)
rmsf_md_cb = md.rmsf(t_cb, t_cb, frame=0)
t_cb_eq = md.Trajectory(xyz=xyz_cb_eq, topology=None)
# rmsd2_cb_eq = md.rmsd(t_cb_eq, t_cb_eq, frame=0)
rmsf_md_cb_eq = md.rmsf(t_cb_eq, t_cb_eq, frame=0)
return rmsd_md_ca, rmsd_md_cb, rmsf_md_ca, rmsf_md_cb, rmsf_md_cb_eq
for pdb in tqdm(pdb_list):
if debug:
trj_md = md.load(f'{md_dir}/{pdb}/production_T300.dcd',
top=f'{md_dir}/{pdb}/production_T300.pdb')
rmsd_md_ca, rmsd_md_cb, rmsf_md_ca, rmsf_md_cb, rmsf_md_cb_eq = get_rmsd_rmsf_md(trj_md)
trj_md2 = md.load(f'{md_dir}/{pdb}/production2_T300.dcd',
top=f'{md_dir}/{pdb}/production_T300.pdb')
rmsd_md2_ca, rmsd_md2_cb, rmsf_md2_ca, rmsf_md2_cb, rmsf_md2_cb_eq = get_rmsd_rmsf_md(trj_md2)
rmsf_delta = []
rmsf_eq_delta = []
rmsf_delta_ref = []
rmsf_eq_delta_ref = []
for flag in tqdm(flag_list):
bd_dir = f'/home/hyang/bio/erf/data/fold/{data_flag}dynamics_val_deep{flag}'
trj_bd = md.load(f'{bd_dir}/{pdb}_A_sample.pdb')
a = np.arange(trj_bd.xyz.shape[0])
idx = (a == 0) | (a > len(a) / 2) # frame 0 is used for coordinates alignments
trj_bd_eq = trj_bd[idx]
rmsd_bd = md.rmsd(trj_bd, trj_bd, frame=0)
rmsf_bd = md.rmsf(trj_bd, trj_bd, frame=0)
rmsf_bd_eq = md.rmsf(trj_bd_eq, trj_bd_eq, frame=0)
# save RMSF
df_rmsf = pd.DataFrame({'RMSF_bd': rmsf_bd, 'RMSF_bd_eq': rmsf_bd_eq,
'RMSF_CB': rmsf_md2_cb, 'RMSF_CB_eq': rmsf_md2_cb_eq})
df_rmsf.to_csv(f'{bd_dir}/{pdb}_{flag}_rmsf.csv', index=False)
# compute RMSF differences between Browninan dynamics and MD
# rmsf_delta.append(np.sqrt(np.mean((rmsf_bd - rmsf_md2_cb)**2))) # root mean square of delta
# rmsf_eq_delta.append(np.sqrt(np.mean((rmsf_bd_eq - rmsf_md2_cb_eq)**2)))
rmsf_delta.append(np.mean(np.abs(rmsf_bd - rmsf_md2_cb)))
rmsf_eq_delta.append(np.mean(np.abs(rmsf_bd_eq - rmsf_md2_cb_eq)))
rmsf_delta_ref.append(np.mean(np.abs(rmsf_md2_cb - np.mean(rmsf_md2_cb))))
rmsf_eq_delta_ref.append(np.mean(np.abs(rmsf_md2_cb_eq - np.mean(rmsf_md2_cb_eq))))
fig = pl.figure()
pl.plot(rmsd_bd*10, label='erf')
if debug:
pl.plot(rmsd_md_ca*10, label='MD_CA')
pl.plot(rmsd_md_cb*10, label='MD_CB')
pl.plot(rmsd_md2_ca*10, label='MD2_CA')
pl.plot(rmsd_md2_cb*10, label='MD2_CB')
pl.legend()
pl.title('RMSD')
pl.xlabel('steps')
pl.ylabel(r'RMSD [$\AA$]')
pl.savefig(f'{bd_dir}/{pdb}_A_rmsd.pdf')
pl.close(fig)
if b_factor:
df_b_factor = | pd.read_csv(f'/home/hyang/bio/erf/data/fold/cullpdb_val_deep/{pdb}_A_bead.csv') | pandas.read_csv |
# ********************************************************************************** #
# #
# Project: FastClassAI workbecnch #
# #
# Author: <NAME> #
# Contact: <EMAIL> #
# #
# This notebook is a part of Skin AanaliticAI development kit, created #
# for evaluation of public datasets used for skin cancer detection with #
# large number of AI models and data preparation pipelines. #
# #
# License: MIT #
# Copyright (C) 2021.01.30 <NAME> #
# https://opensource.org/licenses/MIT #
# #
# ********************************************************************************** #
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os # allow changing, and navigating files and folders,
import sys
import re # module to use regular expressions,
import glob # lists names in folders that match Unix shell patterns
import random # functions that use and generate random numbers
import cv2
import numpy as np # support for multi-dimensional arrays and matrices
import pandas as pd # library for data manipulation and analysis
import seaborn as sns # advance plots, for statistics,
import matplotlib as mpl # to get some basif functions, heping with plot mnaking
import scipy.cluster.hierarchy as sch
import matplotlib.pyplot as plt # for making plots,
from PIL import Image, ImageDraw
import matplotlib.gridspec
from scipy.spatial import distance
from scipy.cluster import hierarchy
from matplotlib.font_manager import FontProperties
from scipy.cluster.hierarchy import leaves_list, ClusterNode, leaders
from sklearn.metrics import accuracy_score
import graphviz # allows visualizing decision trees,
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import ParameterGrid
from sklearn.dummy import DummyClassifier
from sklearn.tree import DecisionTreeClassifier # accepts only numerical data
from sklearn.tree import export_graphviz
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
#. require for plots below,
from src.utils.image_augmentation import * # to create batch_labels files,
from src.utils.data_loaders import load_encoded_imgbatch_using_logfile, load_raw_img_batch, load_raw_img_batch_with_custom_datagen
from src.utils.tools_for_plots import create_class_colors_dict
from src.utils.example_plots_after_clustering import plot_img_examples, create_spaces_between_img_clusters, plot_img_examples_from_dendrogram
from src.utils.annotated_pie_charts import annotated_pie_chart_with_class_and_group, prepare_img_classname_and_groupname
#############################################
import tensorflow as tf
import PIL.Image as Image
import tensorflow.keras as keras
from tensorflow.keras import backend as K # used for housekeeping of tf models,
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from sklearn.decomposition import PCA
from tensorflow.keras import backend as K
from tensorflow.keras import Sequential
from tensorflow.keras import activations
from tensorflow.keras import initializers
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import Dropout
from tensorflow.keras import optimizers
from tensorflow.keras import losses
from tensorflow.keras import metrics
from tensorflow.keras import Sequential, activations, initializers
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import confusion_matrix
#. os.environ['KMP_DUPLICATE_LIB_OK']='True' # To avoid restaring the kernel with keras, preffered solution; use conda install nomkl
#############################################
# Function, ................................................................................
def plot_NN_loss_acc(*, model_history_df, title="", n_mean=3, figsize=(8,4), top=0.75):
''' small function to plot loss and accuracy over epoch using data created with history.history() keras functions,
the columns shodul be called acc, loss, and val_acc, val_loss,
# ...
. model_history_df : dataframe, created with history.history() keras functions (see in the above)
. n_mean : int, how many last results use to caulate values displayed in suplot title
. title. : str, plot title,
'''
#.. figure, axes,
fig, (ax1, ax2) = plt.subplots(nrows=1, ncols=2, figsize=figsize)
fig.suptitle(title)
#.. Plot accuracy values
ax1.plot(model_history_df.loc[:,'loss'], label='train loss')
ax1.plot(model_history_df.loc[:,'val_loss'], label='val loss')
ax1.set_title('Mean validation loss {:.3f}'.format(np.mean(model_history_df.loc[:, 'val_loss'][-n_mean:])))
ax1.set_xlabel('epoch')
ax1.set_ylabel('loss value')
ax1.grid(ls="--", color="grey")
ax1.legend()
#.. Plot accuracy values
ax2.plot(model_history_df.loc[:, 'acc'], label='train acc')
ax2.plot(model_history_df.loc[:, 'val_acc'], label='val acc')
ax2.set_title('Mean validation acc {:.3f}'.format(
np.mean(model_history_df.loc[:, 'val_acc'][-n_mean:])))
ax2.set_xlabel('epoch')
ax2.set_ylabel('accuracy')
ax2.set_ylim(0,1)
ax2.grid(ls="--", color="grey")
ax2.legend()
# ...
plt.tight_layout()
plt.subplots_adjust(top=top)
plt.show()
# Function, .................................................................................
def create_convNN(*, input_size, output_size, kwargs, verbose=False):
''' function to build cnn, with 2 convolutions, one hidden layer and one
note: its not mistake with kwags, - i kept it intentionally as packed distionary,
to allow to read parameter sourse in the code'
'''
run=True
K.clear_session()
if run==True:
# Convolutional Network, ........................
model = keras.Sequential()
#.. 1st cnn, layer
model.add(keras.layers.Conv2D(
filters=kwargs['Conv2D_1__filters'],
kernel_size=kwargs['Conv2D_1__kernel_size'],
strides=kwargs['Conv2D_1__stride'],
activation=kwargs['Conv2D_1__activation'],
input_shape=input_size
))
#.. maxpool 1.
model.add(keras.layers.MaxPool2D(pool_size=kwargs['MaxPool2D_1__pool_size']))
#.. 2nd cnn layer,
model.add(keras.layers.Conv2D(
filters=kwargs['Conv2D_2__filters'],
kernel_size=kwargs['Conv2D_2__kernel_size'],
strides=kwargs['Conv2D_2__stride'],
activation=kwargs['Conv2D_2__activation'],
))
#.. maxpool 2,
model.add(keras.layers.MaxPool2D(pool_size=kwargs['MaxPool2D_2__pool_size']))
# flatten the results, .........................
model.add(keras.layers.Flatten())
# dense nn, ....................................
if kwargs["model"]=="two_dense_layers":
#.. First hidden layer
model.add(Dense(
units=kwargs['h1_unit_size'],
activation=kwargs["h1_activation"],
kernel_regularizer=tf.keras.regularizers.l2(kwargs['h1_l2']),
kernel_initializer=initializers.VarianceScaling(scale=2.0, seed=0)
))
model.add(tf.keras.layers.Dropout(kwargs["h1_Dropout"]))
else:
pass
#.. Output layer
model.add(Dense(
units=output_size,
activation=kwargs["out_activation"],
kernel_regularizer=tf.keras.regularizers.l2(kwargs['out_l2']),
kernel_initializer=initializers.VarianceScaling(scale=1.0, seed=0)
))
# Define Loss Function and Trianing Operation
model.compile(
optimizer= kwargs["optimizer"],
loss= losses.categorical_crossentropy,
metrics= kwargs["metrics"] # even one arg must be in the list
)
if verbose==True:
model.summary()
else:
pass
return model
# Function, .....................................................................................
def cnn_gridSearch(*,
grid,
method_name="cnn",
module_name,
PATH_raw,
train_subset_name, # one names, str, !
test_subset_names,
# ...
store_predictions=True,
track_progres=True,
verbose=False,
model_fit_verbose=0,
plot_history=False # applied only if verbose==True,
):
if track_progres==True:
print(f"{module_name} _________________________________________ { | pd.to_datetime('now') | pandas.to_datetime |
#!/usr/bin/python3
############ FLYCOP ############
# Author: <NAME>, <NAME>
# April 2018, April 2021
################################
"""
EcPp3 - Glycosilation project. Preliminary Analysis of the configurationsResults.txt file:
"""
import re
import sys
import os.path
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import seaborn as sns
# import shutil, errno
# import cobra
# import tabulate
# import getopt
# import copy
# import csv
# import math
# import cobra.flux_analysis.variability
# import massedit
# import subprocess
# import statistics
# import importlib
# import optlang
# import spec
# PARSING PARAMETERS
# -----------------------------------------------------------------------------
# Reading the arguments given by command line
domainName = sys.argv[1]
id_number = sys.argv[2]
# -----------------------------------------------------------------------------
###############################################################################
###############################################################################
# PRE-PROCESSING 'FLYCOP_config_V0_log'
# -----------------------------------------------------------------------------
# ERROR COUNT
ZeroDivisionError_count = 0 # Error count for ZeroDivisionError
nonOptimalSolution_count = 0 # Error count for "model solution was not optimal"
# -----------------------------------------------------------------------------
# Original Analysis of 'FLYCOP_"+domainName+"_"+id_number+"_log.txt'
# This is the logFile after FLYCOP run
# -----------------------------------------------------------------------------
input_file = "FLYCOP_"+domainName+"_"+id_number+"_log.txt"
output1 = open("nonOptimalError_configurations.txt", "w") # in LogError directory
with open(input_file, "r") as file:
lines = file.readlines()
last_error = ""
for line in lines:
if re.match("\[WARN \] \[PROCESS-ERR\]", line):
# ZeroDivisionError case
if re.findall("ZeroDivisionError: float division by zero", line.strip("\n")):
ZeroDivisionError_count += 1
last_error = "ZeroDivisionError"
# Non-optimal solution case
if re.findall("Exception: model solution was not optimal", line.strip("\n")):
nonOptimalSolution_count += 1
last_error = "NonOptimal"
if re.match("\[ERROR\]", line) and last_error == "NonOptimal":
if re.findall("The following algorithm call failed", line.strip("\n")): # NOTE THAT NEXT LINE might need to be adapted as well
extract = re.findall("-p1_sucr1 '[-]*[0.|\d.]*[\d]+' -p2_biomassEc '[-]*[0.|\d.]*[\d]+' -p3_biomassEc_glyc '[-]*[0.|\d.]*[\d]+' -p4_frc2 '[-]*[0.|\d.]*[\d]+' -p5_biomassKT '[-]*[0.|\d.]*[\d]+' -p6_nh4_Ec '[-]*[0.|\d.]*[\d]+' -p7_nh4_KT '[-]*[0.|\d.]*[\d]+' -p8_n_models '[\w]+'", line.strip("\n"))
output1.write(str(extract[0])+"\n")
last_error = ""
output1.close()
# WRITE A BRIEF ERROR SUMMARY
configs_summary = open("ConfigurationsSummary.txt", "w") # in PreliminaryAnalysis directory
configs_summary.write("-------------------------------------------------------\n")
configs_summary.write("ERROR SUMMARY\n")
configs_summary.write("-------------------------------------------------------\n")
configs_summary.write("Number of ZeroDivisionError configurations found: "+str(ZeroDivisionError_count)+"\n")
configs_summary.write("Number of nonOptimalSolution configurations found: "+str(nonOptimalSolution_count)+"\n")
configs_summary.write("Total of ERROR configurations found: "+str(ZeroDivisionError_count + nonOptimalSolution_count)+"\n")
configs_summary.close()
# -----------------------------------------------------------------------------
# Create a file with base configurations for non-optimal solutions (FLYCOP)
# Used in further comparison
# -----------------------------------------------------------------------------
output2 = open("nonOptimalConfigs_asStrings.txt", "w")
with open("nonOptimalError_configurations.txt", "r") as file:
lines = file.readlines()
for line in lines:
config = ""
params = re.findall("'[-]*[0.|\d.]*[\d]+'|'[\w]'", line) # Dudas con esta expresión: ¿funciona?
for parameter in params:
if re.findall("'[-]*[0.|\d.]*[\d]+'", parameter) or re.findall("'[\w]'", parameter):
parameter = float(parameter.replace("\'", ""))
config += ","+str(parameter) if config else str(parameter)
output2.write(config+"\n")
output2.close()
os.remove("nonOptimalError_configurations.txt")
###############################################################################
###############################################################################
###############################################################################
###############################################################################
# FURTHER CLASSIFYING RECORDS in configurationsResults.txt file
# -----------------------------------------------------------------------------
# Read configurationsResults.txt file
# configResults = pd.read_excel("configurationsResults_Scenario"+id_number+".xlsx", header = True, engine="openpyxl") # update python3.5
configResults = pd.read_csv("configurationsResults_Scenario"+id_number+".txt", sep = "\t", header='infer')
configResults["ConfigKey"] = "Acceptable" # New binary classification column
# INITIAL FILTERING: NON-ACCEPTABLE (NonOptimalConfig_Error) vs. ACCEPTABLE configurations
# -----------------------------------------------------------------------------
# Check if the file is empty (i.e. no 'nonOptimal' configurations found)
nonOptimal_flag = True if os.path.getsize("nonOptimalConfigs_asStrings.txt") else False
if nonOptimal_flag:
nonOptimal_file = pd.read_csv("nonOptimalConfigs_asStrings.txt", sep="\t", header='infer') # Single column (configuration of parameters)
for row in nonOptimal_file.itertuples():
bad_config = row[1]
for row in configResults.itertuples():
if configResults[configResults.BaseConfig == bad_config]: configResults[row[0], "ConfigKey"] = "NonOptimal"
# FURTHER SORTING:
# -----------------------------------------------------------------------------
# Sorting by 'ConfigKey': first 'Acceptable', then 'NonOptimal' configurations // Asegurar
# Sorting by 'ID_SD': excessive SD at the end of each group
# Within the last group, from highest to lowest fitness value
configResults = configResults.sort_values(by=["ConfigKey", 'ID_SD', 'fitFunc'], ascending=[True, True, False]) # CHANGE sorting_order if desired
# Remove repeated index_labels
for column in configResults.columns:
configResults = configResults.drop([column], axis = 1) if re.findall("Unnamed: .*", column) else configResults
# Save modified configurationsResults.txt file (overwritting last file)
configResults.to_csv("configurationsResults_Scenario"+id_number+".txt", sep = "\t", header='infer', index = False)
###############################################################################
# WRITE A BRIEF SUMMARY OF ACCEPTABLE vs. NON-ACCEPTABLE
# ------------------------------------------------------
configs_summary = open("ConfigurationsSummary.txt", "a") # in PreliminaryAnalysis directory
configs_summary.write("\n\n\n-------------------------------------------------------\n")
configs_summary.write("BRIEF SUMMARY OF CONFIGURATIONS\n")
configs_summary.write("-------------------------------------------------------\n")
# -------------------------
# ACCEPTABLE configurations
# -------------------------
configResults_acceptable = configResults[configResults["ConfigKey"] == "Acceptable"] # Fraction of dataframe 'Acceptable'
# Biomass Loss
biomass_loss_cases_acc = configResults_acceptable[configResults_acceptable["DeadTracking"] == 1]
biomass_loss_cases_accSD_acc = biomass_loss_cases_acc[biomass_loss_cases_acc["ID_SD"] == 0]
# No Biomass Loss
non_biomass_loss_cases_acc = configResults_acceptable[configResults_acceptable["DeadTracking"] == 0]
non_biomass_loss_cases_accSD_acc = non_biomass_loss_cases_acc[non_biomass_loss_cases_acc["ID_SD"] == 0]
configs_summary.write("ACCEPTABLE CONFIGURATIONS\n")
configs_summary.write("-------------------------------------------------------")
configs_summary.write("\nTotal of acceptable configurations: "+str(len(configResults_acceptable))+"\n")
configs_summary.write("\nTotal of acceptable configurations with biomass loss: "+str(biomass_loss_cases_acc.count()[0])+"\n")
configs_summary.write("\t - of which, the number of configurations with ACCEPTABLE SD (< 10% avgFit) is: "+str(biomass_loss_cases_accSD_acc.count()[0])+"\n")
configs_summary.write("\nTotal of acceptable configurations with NO biomass loss: "+str(non_biomass_loss_cases_acc.count()[0])+"\n")
configs_summary.write("\t - of which, the number of configurations with ACCEPTABLE SD (< 10% avgFit) is: "+str(non_biomass_loss_cases_accSD_acc.count()[0])+"\n\n")
# Fraction of dataframe 'Acceptable' with acceptable SD for further plotting
# NO further subdivision in biomass loss vs. non-biomass loss
# -----------------------------------------------------------------------------
configResults_acceptable_SDok = configResults_acceptable[configResults_acceptable["ID_SD"] == 0]
# ------------------------------------------------------
# NON-ACCEPTABLE (NonOptimalConfig_Error) configurations
# ------------------------------------------------------
configs_summary.write("\nNON-OPTIMAL CONFIGURATIONS\n")
configs_summary.write("-------------------------------------------------------")
if nonOptimal_flag:
configResults_nonOpt = configResults[configResults["ConfigKey"] == "NonOptimal"] # Fraction of dataframe 'NonOptimal'
# Biomass Loss
biomass_loss_cases_nonOpt = configResults_nonOpt[configResults_nonOpt["DeadTracking"] == 1]
biomass_loss_cases_accSD_nonOpt = biomass_loss_cases_nonOpt[biomass_loss_cases_nonOpt["ID_SD"] == 0]
# No Biomass Loss
non_biomass_loss_cases_nonOpt = configResults_nonOpt[configResults_nonOpt["DeadTracking"] == 0]
non_biomass_loss_cases_accSD_nonOpt = non_biomass_loss_cases_nonOpt[non_biomass_loss_cases_nonOpt["ID_SD"] == 0]
configs_summary.write("\nTotal of configurations with NonOptimalConfig_error: "+str(len(configResults_nonOpt))+"\n")
configs_summary.write("\nTotal of configurations with NonOptimalConfig_error and biomass loss: "+str(biomass_loss_cases_nonOpt.count()[0])+"\n")
configs_summary.write("\t - of which, the number of configurations with ACCEPTABLE SD (< 10% avgFit) is: "+str(biomass_loss_cases_accSD_nonOpt.count()[0])+"\n")
configs_summary.write("\nTotal of configurations with NonOptimalConfig_error and NO biomass loss: "+str(non_biomass_loss_cases_nonOpt.count()[0])+"\n")
configs_summary.write("\t - of which, the number of configurations with ACCEPTABLE SD (< 10% avgFit) is: "+str(non_biomass_loss_cases_accSD_nonOpt.count()[0])+"\n\n")
# Fraction of dataframe 'NonOptimal' with acceptable SD for further plotting
# NO further subdivision in biomass loss vs. non-biomass loss
# -------------------------------------------------------------------------
configResults_nonOpt_SDok = configResults_nonOpt[configResults_nonOpt["ID_SD"] == 0]
else:
configs_summary.write("\nNon-optimal configurations not found\n")
# ---------------------
configs_summary.close()
###############################################################################
###############################################################################
###############################################################################
###############################################################################
# PLOTTING THE DISTRIBUTION OF CONFIGURATIONS DEPENDING ON CONSORTIUM ARCHITECTURE
# --------------------------------------------------------------------------------
def basic_boxplot_scatter(dataframe, x_var, y_var, x_label, y_label, filename, plot_title):
fig = plt.figure(num=0, clear=True, figsize=(7, 7))
ax_boxplot = sns.boxplot(x = x_var, y = y_var, data = dataframe, boxprops=dict(alpha=0.2))
sns.stripplot(x=x_var, y=y_var, jitter = True, data = dataframe)
ax_boxplot.set(xlabel = x_label, ylabel = y_label)
plt.title(plot_title, fontsize = 14)
fig.savefig(filename+".png")
plt.close(fig)
# y_variable_count: Nombre de la columna sobre la que se cuenta para la altura de cada barra en el barplot
# x_categories: Lista ordenada de categorías en eje x
def basic_barplot(dataframe, y_variable_count, y_categories, x_categories, x_label, y_label, filename, plot_title):
# Lists for each y_category, depending on the y_variable_count (Dictionary format)
y_counts_dict = {}
for y_category in y_categories:
y_cat_values = []
for x_category in x_categories:
x_category_prefilter_df = dataframe[dataframe["ConfigKey"] == x_category]
y_cat_values.append(x_category_prefilter_df[x_category_prefilter_df[y_variable_count] == y_category].count()[0])
y_counts_dict[y_category] = y_cat_values
# Create figure
# fig = plt.figure(num=0, clear=True, figsize=(7, 7))
fig, axes = plt.subplots(num=0, clear=True, figsize=(7, 7))
colors = cm.rainbow(np.linspace(0, 1, len(y_categories)))
bottom_flag = False
for n_categories in range(len(y_categories)):
if not bottom_flag:
axes.bar(x_categories, y_counts_dict[y_categories[n_categories]], color=colors[n_categories], label=y_categories[n_categories])
bottom = np.array(y_counts_dict[y_categories[n_categories]])
bottom_flag = True
else:
axes.bar(x_categories, y_counts_dict[y_categories[n_categories]], color=colors[n_categories], bottom=bottom, label=y_categories[n_categories])
bottom = bottom + np.array(y_counts_dict[y_categories[n_categories]])
# Legend, labels, title
plt.legend(loc="lower left", bbox_to_anchor=(0.8,1.0))
plt.ylabel(y_label)
plt.xlabel(x_label)
plt.title(plot_title, fontsize = 12)
# Adjust y-axis
# plt.ylim(top=len(dataframe))
plt.yticks(np.linspace(0, len(dataframe), len(dataframe)//10))
# Bar labels
bars = axes.patches # Bars
labels = [bars[i].get_height() for i in range(len(bars))] # Labels
prev_height = 0
for bar, label in zip(bars, labels):
height = bar.get_height() + prev_height
prev_height = height
axes.text(bar.get_x() + bar.get_width() / 2, height - 10, label, fontfamily='sans', fontsize=12, color = "black")
# Save and close figure
fig.savefig(filename+".png")
plt.close(fig)
# CONSIDER WHETHER NON-OPTIMAL CONFIGURATIONS WERE FOUND OR NOT
# -------------------------------------------------------------
if nonOptimal_flag: # NonOptimal configurations found
combined_configResults_SDok = pd.concat([configResults_acceptable_SDok, configResults_nonOpt_SDok], ignore_index = True) # Combined dataframe for plotting
model_architecture_categories = combined_configResults_SDok["Consortium_Arch"].unique().tolist()
# BARPLOT OF MODEL ARCHITECTURE vs. Acceptable or NonOptimal configurations
basic_barplot(combined_configResults_SDok, y_variable_count="Consortium_Arch", y_categories = model_architecture_categories,
x_categories=["Nonoptimal", "Acceptable"], x_label="Configuration Key", y_label="Consortium Architecture",
filename="consortiumArchitectureEvaluation", plot_title="Consortium Architecture Evaluation I")
# --------------------------------------------------------------------------------------
# ConfigKey classification with further subdivision of biomass loss vs. non-biomass loss
# --------------------------------------------------------------------------------------
# NON-OPTIMAL CONFIGURATIONS
configResults_nonOpt_SDok_nonBL = configResults_nonOpt_SDok[configResults_nonOpt_SDok["DeadTracking"] == 0]
configResults_nonOpt_SDok_nonBL["ConfigKey"] = "Nonoptimal_nonBL"
configResults_nonOpt_SDok_BL = configResults_nonOpt_SDok[configResults_nonOpt_SDok["DeadTracking"] == 1]
configResults_nonOpt_SDok_BL["ConfigKey"] = "Nonoptimal_BL"
combined_nonOptimal = pd.concat([configResults_nonOpt_SDok_nonBL, configResults_nonOpt_SDok_BL], ignore_index = True) # Combined dataframe (Non-optimal)
# ACCEPTABLE CONFIGURATIONS
configResults_acceptable_SDok_nonBL = configResults_acceptable_SDok[configResults_acceptable_SDok["DeadTracking"] == 0]
configResults_acceptable_SDok_nonBL["ConfigKey"] = "Acceptable_nonBL"
configResults_acceptable_SDok_BL = configResults_acceptable_SDok[configResults_acceptable_SDok["DeadTracking"] == 1]
configResults_acceptable_SDok_BL["ConfigKey"] = "Acceptable_BL"
combined_acceptable = pd.concat([configResults_acceptable_SDok_nonBL, configResults_acceptable_SDok_BL], ignore_index = True) # Combined dataframe (acceptable)
# FINAL COMBINED DATAFRAME
final_combined = pd.concat([combined_nonOptimal, combined_acceptable], ignore_index = True) # biomass loss + non-biomass loss
# final_combined_nonBL = pd.concat([configResults_nonOpt_SDok_nonBL, configResults_acceptable_SDok_nonBL], ignore_index = True) # just non-biomass loss
model_architecture_categories = final_combined["Consortium_Arch"].unique().tolist()
# BARPLOT OF MODEL ARCHITECTURE vs. Acceptable or NonOptimal configurations, with further subdivision of biomass loss vs. non-biomass loss
basic_barplot(final_combined, y_variable_count="Consortium_Arch", y_categories = model_architecture_categories,
x_categories=["Nonoptimal_nonBL", "Nonoptimal_BL", "Acceptable_nonBL", "Acceptable_BL"],
x_label="Configuration Key (BL vs. nonBL)", y_label="Consortium Architecture",
filename="consortiumArchitectureEvaluation_BLvsnonBL", plot_title="Consortium Architecture Evaluation II")
# --------------------------------------------------------------------------------------
# ConfigKey classification with further subdivision depending on model architecture
# Acceptable + Non-optimal configurations:
# - with acceptable SD
# - without biomass loss
# --------------------------------------------------------------------------------------
# NON-OPTIMAL CONFIGURATIONS
configResults_nonOpt_SDok_nonBL_2models = configResults_nonOpt_SDok_nonBL[configResults_nonOpt_SDok_nonBL["Consortium_Arch"] == "2models"]
configResults_nonOpt_SDok_nonBL_2models["ConfigKey"] = "Acceptable_2models"
configResults_nonOpt_SDok_nonBL_3models = configResults_nonOpt_SDok_nonBL[configResults_nonOpt_SDok_nonBL["Consortium_Arch"] == "3models"]
configResults_nonOpt_SDok_nonBL_3models["ConfigKey"] = "Acceptable_3models"
combined_nonOptimal = pd.concat([configResults_nonOpt_SDok_nonBL_2models, configResults_nonOpt_SDok_nonBL_3models], ignore_index = True) # Combined dataframe (non-optimal)
# ACCEPTABLE
configResults_acceptable_SDok_nonBL_2models = configResults_acceptable_SDok_nonBL[configResults_acceptable_SDok_nonBL["Consortium_Arch"] == "2models"]
configResults_acceptable_SDok_nonBL_2models["ConfigKey"] = "Acceptable_2models"
configResults_acceptable_SDok_nonBL_3models = configResults_acceptable_SDok_nonBL[configResults_acceptable_SDok_nonBL["Consortium_Arch"] == "3models"]
configResults_acceptable_SDok_nonBL_3models["ConfigKey"] = "Acceptable_3models"
combined_acceptable = pd.concat([configResults_acceptable_SDok_nonBL_2models, configResults_acceptable_SDok_nonBL_3models], ignore_index = True) # Combined dataframe (acceptable)
# FINAL COMBINED DATAFRAME
final_combined = pd.concat([combined_nonOptimal, combined_acceptable], ignore_index = True) # biomass loss + non-biomass loss
# PLOT OF FITNESS vs. Acceptable or NonOptimal configurations, with further subdivision depending on model architecture (categorical, scatter-boxplot)
basic_boxplot_scatter(final_combined, "ConfigKey", "fitFunc", "Configuration Key - model arch", "Fitness (mM/gL)",
"consortiumArchitectureEvaluation_fitness", "Consortium Architecture Evaluation III")
else: # NonOptimal configurations NOT found
# --------------------------------------------------------------------------------------
# ACCEPTABLE CONFIGURATIONS with further subdivision of biomass loss vs. non-biomass loss
# --------------------------------------------------------------------------------------
configResults_acceptable_SDok_nonBL = configResults_acceptable_SDok[configResults_acceptable_SDok["DeadTracking"] == 0]
configResults_acceptable_SDok_nonBL["ConfigKey"] = "Acceptable_nonBL"
configResults_acceptable_SDok_BL = configResults_acceptable_SDok[configResults_acceptable_SDok["DeadTracking"] == 1]
configResults_acceptable_SDok_BL["ConfigKey"] = "Acceptable_BL"
combined_acceptable = pd.concat([configResults_acceptable_SDok_nonBL, configResults_acceptable_SDok_BL], ignore_index = True) # Combined dataframe (non-optimal)
model_architecture_categories = combined_acceptable["Consortium_Arch"].unique().tolist()
# BARPLOT OF MODEL ARCHITECTURE vs. Acceptable config with / without biomass loss (categorical, scatter-boxplot)
basic_barplot(combined_acceptable, y_variable_count="Consortium_Arch", y_categories = model_architecture_categories,
x_categories=["Acceptable_nonBL", "Acceptable_BL"], x_label="Acceptable config - BL vs. nonBL", y_label="Consortium Architecture",
filename="consortiumArchitectureEvaluation_BLvsnonBL", plot_title="Consortium Architecture Evaluation I")
# --------------------------------------------------------------------------------------
# ACCEPTABLE CONFIGURATIONS with further subdivision of biomass loss vs. non-biomass loss
# - with acceptable SD
# - without biomass loss
# --------------------------------------------------------------------------------------
# ACCEPTABLE
configResults_acceptable_SDok_nonBL_2models = configResults_acceptable_SDok_nonBL[configResults_acceptable_SDok_nonBL["Consortium_Arch"] == "2models"]
configResults_acceptable_SDok_nonBL_2models["ConfigKey"] = "Acceptable_2models"
configResults_acceptable_SDok_nonBL_3models = configResults_acceptable_SDok_nonBL[configResults_acceptable_SDok_nonBL["Consortium_Arch"] == "3models"]
configResults_acceptable_SDok_nonBL_3models["ConfigKey"] = "Acceptable_3models"
combined_acceptable = | pd.concat([configResults_acceptable_SDok_nonBL_2models, configResults_acceptable_SDok_nonBL_3models], ignore_index = True) | pandas.concat |
import pandas as pd
import numpy as np
from collections import Counter, defaultdict
class Hand:
def __init__(self, name, leap_limit=15):
self.name = name
self.leap_limit = leap_limit
def build_from_data(self, fingering_data):
data_size = len(fingering_data)
init_count = Counter()
transition_count = Counter()
emission_count = defaultdict(Counter)
for idx, data in enumerate(fingering_data):
print(f"Processing: ({idx + 1}/{data_size})")
init, transition, emission = self._count_fingering(
data, limit=self.leap_limit)
init_count += init
transition_count += transition
for k, counter in emission.items():
emission_count[k].update(counter)
self.init_prob = self._init_count_to_prob(init_count)
self.transition_prob = self._transition_count_to_prob(transition_count)
self.emission_prob = self._emission_count_to_prob(emission_count)
def build_from_params(self, init, transition, emission):
self.init_prob = init
self.transition_prob = transition
self.emission_prob = emission
def decoding(self, observations):
n_state = len(self.init_prob)
obs_len = len(observations)
delta = np.zeros((n_state, obs_len + 1))
psi = np.zeros((n_state, obs_len), dtype=int)
delta[:, 0] = np.log(self.init_prob)
for i, (pitch, time) in enumerate(
zip(observations.pitch_diff, observations.time_diff)
):
delta_mat = np.tile(delta[:, i], (n_state, 1)).transpose()
prod = delta_mat + \
np.log(self.transition_prob) + \
np.log(self.emission_prob[pitch])
if time < 0.03:
if self.name == "R":
if pitch[0] > 0:
prod[np.tril_indices(n_state)] -= 5
else:
prod[np.triu_indices(n_state)] -= 5
else:
if pitch[0] > 0:
prod[np.triu_indices(n_state)] -= 5
else:
prod[np.tril_indices(n_state)] -= 5
delta[:, i + 1] = np.amax(prod, axis=0)
psi[:, i] = prod.argmax(axis=0) + 1
opt_path = [np.argmax(delta[:, obs_len]) + 1]
for i in range(obs_len - 1, -1, -1):
opt_path.append(psi[opt_path[-1] - 1, i])
return opt_path[::-1]
def _count_fingering(self, fingering_data, limit=15):
hidden_state = list(
zip(
fingering_data.fingernum.shift(fill_value=0),
fingering_data.fingernum,
)
)
pos_x, pos_y = zip(*fingering_data.pitch.map(self._pitch_to_key))
model = pd.DataFrame(
{"hidden_state": hidden_state, "pos_x": pos_x, "pos_y": pos_y}
)
model["pos_diff"] = list(
zip(
model.pos_x.diff()
.fillna(0, downcast="infer")
.apply(lambda x: limit if x > limit else x)
.apply(lambda x: -limit if x < -limit else x),
model.pos_y.diff().fillna(0, downcast="infer"),
)
)
# First observation only
init = Counter([model.hidden_state[0][1]])
# Without first observation
transition = Counter(model.hidden_state[1:])
# Emission
emission = {
state: Counter(model[model.hidden_state == state].pos_diff)
for state in set(model.hidden_state[1:])
}
return (init, transition, Counter(emission))
def _pitch_to_key(self, pitch: str):
posx = {"C": 0, "D": 1, "E": 2, "F": 3,
"G": 4, "A": 5, "B": 6}[pitch[0]]
posy = 0
if pitch[1].isdigit():
posx += (int(pitch[1]) - 4) * 7
elif pitch[1] == "#":
posy = 1
posx += (int(pitch[2]) - 4) * 7
elif pitch[1] == "b" or pitch[1] == "-":
posy = 1
posx += (int(pitch[2]) - 4) * 7 - 1
return (posx, posy)
def _normalize(self, v):
return v / v.sum(axis=0)
def _init_count_to_prob(self, init_count):
init_prob = np.zeros(5)
for key, value in init_count.items():
if key < 0:
init_prob[-key - 1] = value
else:
init_prob[key - 1] = value
return self._normalize(init_prob)
def _transition_count_to_prob(self, transition_count):
transition_prob = np.zeros((5, 5))
for key, value in transition_count.items():
if key[0] < 0 and key[1] < 0:
transition_prob[-key[0] - 1, -key[1] - 1] = value
else:
transition_prob[key[0] - 1, key[1] - 1] = value
return np.apply_along_axis(self._normalize, axis=1, arr=transition_prob)
def _series_to_matrix(self, emission_prob):
out_prob = np.zeros((5, 5))
for key, value in emission_prob.items():
if key[0] < 0 and key[1] < 0:
out_prob[-key[0] - 1, -key[1] - 1] = value
else:
out_prob[key[0] - 1, key[1] - 1] = value
return out_prob
def _emission_count_to_prob(self, emission_count):
count_df = pd.DataFrame.from_dict(
emission_count).fillna(0, downcast="infer")
for i in range(-self.leap_limit, self.leap_limit + 1):
if (i, -1) not in count_df.index:
print((i, -1))
row = pd.Series(0, index=count_df.columns, name=(i, -1))
count_df = count_df.append(row)
if (i, 0) not in count_df.index:
print((i, 0))
row = pd.Series(0, index=count_df.columns, name=(i, 0))
count_df = count_df.append(row)
if (i, 1) not in count_df.index:
print((i, 1))
row = | pd.Series(0, index=count_df.columns, name=(i, 1)) | pandas.Series |
import pytest
import pandas as pd
from pandas.core.internals import ExtensionBlock
import pandas.util.testing as tm
from .base import BaseExtensionTests
class BaseConstructorsTests(BaseExtensionTests):
def test_array_from_scalars(self, data):
scalars = [data[0], data[1], data[2]]
result = data._from_sequence(scalars)
assert isinstance(result, type(data))
def test_series_constructor(self, data):
result = pd.Series(data)
assert result.dtype == data.dtype
assert len(result) == len(data)
assert isinstance(result._data.blocks[0], ExtensionBlock)
assert result._data.blocks[0].values is data
# Series[EA] is unboxed / boxed correctly
result2 = pd.Series(result)
assert result2.dtype == data.dtype
assert isinstance(result2._data.blocks[0], ExtensionBlock)
@pytest.mark.parametrize("from_series", [True, False])
def test_dataframe_constructor_from_dict(self, data, from_series):
if from_series:
data = pd.Series(data)
result = pd.DataFrame({"A": data})
assert result.dtypes['A'] == data.dtype
assert result.shape == (len(data), 1)
assert isinstance(result._data.blocks[0], ExtensionBlock)
def test_dataframe_from_series(self, data):
result = pd.DataFrame( | pd.Series(data) | pandas.Series |
"""
Copyright 2019 <NAME>.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
"""
import datetime as dt
from enum import auto, Enum
from typing import List, Dict, Tuple, Union
import pandas as pd
from gs_quant.api.gs.data import GsDataApi
from gs_quant.api.gs.risk_models import GsFactorRiskModelApi, GsRiskModelApi
from gs_quant.errors import MqValueError, MqRequestError
from gs_quant.markets.factor import Factor
from gs_quant.models.factor_risk_model_utils import build_asset_data_map, build_factor_data_map, \
build_factor_data_dataframe, build_pfp_data_dataframe, get_isc_dataframe, get_covariance_matrix_dataframe, \
get_closest_date_index, divide_request, batch_and_upload_partial_data, risk_model_data_to_json, get_universe_size
from gs_quant.target.risk_models import RiskModel as RiskModelBuilder, RiskModelEventType
from gs_quant.target.risk_models import RiskModelData, RiskModelCalendar, RiskModelFactor, \
DataAssetsRequest, Measure, CoverageType, UniverseIdentifier, Entitlements, Term, \
RiskModelUniverseIdentifierRequest, FactorType
class ReturnFormat(Enum):
"""Alternative format for data to be returned from get_data functions"""
JSON = auto()
DATA_FRAME = auto()
class RiskModel:
""" Risk Model Class """
def __init__(self,
id_: str,
name: str,
entitlements: Union[Dict, Entitlements] = None,
description: str = None):
self.__id: str = id_
self.__name: str = name
self.__description: str = description
self.__entitlements: Entitlements = entitlements if entitlements and isinstance(entitlements, Entitlements) \
else Entitlements.from_dict(entitlements) if entitlements and isinstance(entitlements, Dict) else None
@property
def id(self) -> str:
""" Get risk model id """
return self.__id
@property
def name(self) -> str:
""" Get risk model name """
return self.__name
@name.setter
def name(self, name: str):
""" Set risk model name """
self.__name = name
@property
def description(self) -> str:
""" Get risk model description """
return self.__description
@description.setter
def description(self, description: str):
""" Set risk model description """
self.__description = description
@property
def entitlements(self) -> Entitlements:
""" Get risk model entitlements """
return self.__entitlements
@entitlements.setter
def entitlements(self, entitlements: Union[Entitlements, Dict]):
""" Set risk model entitlements """
self.__entitlements = entitlements
def delete(self):
""" Delete existing risk model object from Marquee """
return GsRiskModelApi.delete_risk_model(self.id)
def get_dates(self, start_date: dt.date = None, end_date: dt.date = None, event_type: RiskModelEventType = None) \
-> List[dt.date]:
""" Get risk model dates for existing risk model
:param start_date: list returned including and after start_date
:param end_date: list returned up to and including end_date
:param event_type: which event type to retrieve
:return: list of dates where risk model data is present
"""
return [dt.datetime.strptime(date, "%Y-%m-%d").date() for date in
GsRiskModelApi.get_risk_model_dates(self.id, start_date, end_date, event_type=event_type)]
def get_calendar(self, start_date: dt.date = None, end_date: dt.date = None) -> RiskModelCalendar:
""" Get risk model calendar for existing risk model between start and end date
:param start_date: list returned including and after start_date
:param end_date: list returned up to and including end_date
:return: RiskModelCalendar for model
"""
calendar = GsRiskModelApi.get_risk_model_calendar(self.id)
if not start_date and not end_date:
return calendar
start_idx = get_closest_date_index(start_date, calendar.business_dates, 'after') if start_date else 0
end_idx = get_closest_date_index(end_date, calendar.business_dates, 'before') if end_date else len(
calendar.business_dates)
return RiskModelCalendar(calendar.business_dates[start_idx:end_idx + 1])
def upload_calendar(self, calendar: RiskModelCalendar):
""" Upload risk model calendar to existing risk model
:param calendar: RiskModelCalendar containing list of dates where model data is expected
"""
return GsRiskModelApi.upload_risk_model_calendar(self.id, calendar)
def get_missing_dates(self, start_date: dt.date = None, end_date: dt.date = None) -> List[dt.date]:
""" Get any dates where data is not published according to expected days returned from the risk model calendar
:param end_date: date to truncate missing dates at
If no end_date is provided, end_date defaults to T-1 date according
to the risk model calendar
"""
posted_dates = self.get_dates()
if not start_date:
start_date = posted_dates[0]
if not end_date:
end_date = dt.date.today() - dt.timedelta(days=1)
calendar = [dt.datetime.strptime(date, "%Y-%m-%d").date() for date in self.get_calendar(
start_date=start_date,
end_date=end_date).business_dates]
return [date for date in calendar if date not in posted_dates]
def get_most_recent_date_from_calendar(self) -> dt.date:
""" Get T-1 date according to risk model calendar """
yesterday = dt.date.today() - dt.timedelta(1)
calendar = self.get_calendar(end_date=yesterday).business_dates
return dt.datetime.strptime(calendar[len(calendar) - 1], '%Y-%m-%d').date()
def __str__(self):
return self.id
def __repr__(self):
s = "{}('{}','{}'".format(self.__class__.__name__, self.id, self.name)
if self.entitlements is not None:
s += ", entitlements={}".format(self)
if self.description is not None:
s += ", description={}".format(self)
s += ")"
return s
class FactorRiskModel(RiskModel):
""" Factor Risk Model used for calculating asset level factor risk"""
def __init__(self,
id_: str,
name: str,
coverage: CoverageType,
term: Term,
universe_identifier: UniverseIdentifier,
vendor: str,
version: float,
entitlements: Union[Dict, Entitlements] = None,
description: str = None):
""" Create new factor risk model object
:param id_: risk model id (cannot be changed)
:param name: risk model name
:param coverage: coverage of risk model asset universe
:param term: horizon term
:param universe_identifier: identifier used in asset universe upload (cannot be changed)
:param vendor: risk model vendor
:param version: version of model
:param entitlements: entitlements associated with risk model
:param description: risk model description
:return: FactorRiskModel object
"""
super().__init__(id_, name, entitlements=entitlements, description=description)
self.__coverage = coverage
self.__term = term
self.__universe_identifier = universe_identifier
self.__vendor = vendor
self.__version = version
@property
def vendor(self) -> str:
""" Get risk model vendor """
return self.__vendor
@vendor.setter
def vendor(self, vendor):
""" Set risk model vendor """
self.__vendor = vendor
@property
def universe_identifier(self) -> UniverseIdentifier:
""" Get risk model universe identifier """
return self.__universe_identifier
@property
def term(self) -> Term:
""" Get risk model term """
return self.__term
@term.setter
def term(self, term: Term):
""" Set risk model term """
self.__term = term
@property
def version(self) -> float:
""" Get risk model version """
return self.__version
@version.setter
def version(self, version: float):
""" Set risk model version """
self.__version = version
@property
def coverage(self) -> CoverageType:
""" Get risk model coverage """
return self.__coverage
@coverage.setter
def coverage(self, coverage: CoverageType):
""" Set risk model coverage """
self.__coverage = coverage
@classmethod
def from_target(cls, model: RiskModelBuilder):
uid = model.universe_identifier
return FactorRiskModel(
model.id,
model.name,
model.coverage if isinstance(model.coverage, CoverageType) else CoverageType(model.coverage),
model.term if isinstance(model.term, Term) else Term(model.term),
uid if isinstance(uid, UniverseIdentifier) else UniverseIdentifier(uid) if uid else None,
model.vendor,
model.version,
entitlements=model.entitlements,
description=model.description
)
@classmethod
def from_many_targets(cls, models: Tuple[RiskModelBuilder, ...]):
return [cls.from_target(model) for model in models]
@classmethod
def get(cls, model_id: str):
""" Get a factor risk model from Marquee
:param model_id: risk model id corresponding to Marquee Factor Risk Model
:return: Factor Risk Model object """
model = GsRiskModelApi.get_risk_model(model_id)
return cls.from_target(model)
@classmethod
def get_many(cls,
ids: List[str] = None,
terms: List[str] = None,
vendors: List[str] = None,
names: List[str] = None,
coverages: List[str] = None,
limit: int = None):
""" Get a factor risk model from Marquee
:param ids: list of model identifiers in Marquee
:param terms: list of model terms
:param vendors: list of model vendors
:param names: list of model names
:param coverages: list of model coverages
:param limit: limit of number of models in response
:return: Factor Risk Model object
"""
models = GsRiskModelApi.get_risk_models(ids=ids,
terms=terms,
vendors=vendors,
names=names,
coverages=coverages,
limit=limit)
return cls.from_many_targets(models)
def save(self):
""" Upload current Factor Risk Model object to Marquee """
new_model = RiskModelBuilder(self.coverage,
self.id,
self.name,
self.term,
self.universe_identifier,
self.vendor,
self.version,
description=self.description,
entitlements=self.entitlements)
GsRiskModelApi.create_risk_model(new_model)
def update(self):
""" Update factor risk model object on Marquee """
updated_model = RiskModelBuilder(self.coverage,
self.id,
self.name,
self.term,
self.universe_identifier,
self.vendor,
self.version,
description=self.description,
entitlements=self.entitlements)
GsRiskModelApi.update_risk_model(updated_model)
def get_factor(self, name: str) -> Factor:
""" Get risk model factor from its name
:param name: factor name associated with risk model
:return: Factor object
"""
name_matches = [f for f in self.get_factor_data(format=ReturnFormat.JSON) if f['name'] == name]
if not name_matches:
raise MqValueError(f'Factor with name {name} does not in exist in risk model {self.id}')
factor = name_matches.pop()
return Factor(risk_model_id=self.id,
id_=factor['identifier'],
type_=factor['type'],
name=factor.get('name'),
category=factor.get('factorCategory'),
tooltip=factor.get('tooltip'),
description=factor.get('description'),
glossary_description=factor.get('glossaryDescription'))
def get_many_factors(self):
factors = self.get_factor_data(format=ReturnFormat.JSON)
return [Factor(risk_model_id=self.id,
id_=f['identifier'],
type_=f['type'],
name=f.get('name'),
category=f.get('factorCategory'),
tooltip=f.get('tooltip'),
description=f.get('description'),
glossary_description=f.get('glossaryDescription')) for f in factors]
def save_factor_metadata(self, factor_metadata: RiskModelFactor):
""" Add metadata to a factor in a risk model
:param factor_metadata: factor metadata object
"""
try:
GsFactorRiskModelApi.get_risk_model_factor(self.id, factor_id=factor_metadata.identifier)
except MqRequestError:
GsFactorRiskModelApi.create_risk_model_factor(self.id, factor_metadata)
GsFactorRiskModelApi.update_risk_model_factor(self.id, factor_metadata)
def delete_factor_metadata(self, factor_id: str):
""" Delete a factor's metadata from a risk model
:param factor_id: factor id associated with risk model's factor
"""
GsFactorRiskModelApi.delete_risk_model_factor(self.id, factor_id)
def get_factor_data(self,
start_date: dt.date = None,
end_date: dt.date = None,
identifiers: List[str] = None,
include_performance_curve: bool = False,
category_filter: List[str] = None,
factor_type: FactorType = None,
format: ReturnFormat = ReturnFormat.DATA_FRAME) -> Union[List[Dict], pd.DataFrame]:
""" Get factor data for existing risk model
:param start_date: start date for data request
:param end_date: end date for data request
:param identifiers: list of factor ids associated with risk model
:param include_performance_curve: request to include the performance curve of the factors
:param category_filter: filter the results to those having one of the specified categories. \
Default is to return all results
:param factor_type:
:param format: which format to return the results in
:return: risk model factor data
"""
factor_data = GsFactorRiskModelApi.get_risk_model_factor_data(
self.id,
start_date,
end_date,
identifiers,
include_performance_curve
)
if factor_type:
factor_data = [factor for factor in factor_data if factor['type'] == factor_type.value]
if category_filter:
if factor_type == FactorType.Category:
print('Category Filter is not applicable for the Category FactorType')
else:
factor_data = [factor for factor in factor_data if factor['factorCategory'] in category_filter]
if format == ReturnFormat.DATA_FRAME:
factor_data = pd.DataFrame(factor_data)
return factor_data
def get_asset_universe(self,
start_date: dt.date,
end_date: dt.date = None,
assets: DataAssetsRequest = DataAssetsRequest(RiskModelUniverseIdentifierRequest.gsid, []),
format: ReturnFormat = ReturnFormat.DATA_FRAME) -> Union[List[Dict], pd.DataFrame]:
""" Get asset universe data for existing risk model
:param start_date: start date for data request
:param end_date: end date for data request
:param assets: DataAssetsRequest object with identifier and list of assets to retrieve for request
:param format: which format to return the results in
:return: risk model universe
"""
if not assets.universe and not end_date:
end_date = start_date
results = GsFactorRiskModelApi.get_risk_model_data(
model_id=self.id,
start_date=start_date,
end_date=end_date,
assets=assets,
measures=[Measure.Asset_Universe],
limit_factors=False
).get('results')
dates = [dt.datetime.strptime((data.get('date')), '%Y-%m-%d').date() for data in results]
universe = [data.get('assetData').get('universe') for data in results]
dates_to_universe = dict(zip(dates, universe))
if format == ReturnFormat.DATA_FRAME:
dates_to_universe = pd.DataFrame(dates_to_universe)
return dates_to_universe
def get_historical_beta(self,
start_date: dt.date,
end_date: dt.date = None,
assets: DataAssetsRequest = DataAssetsRequest(RiskModelUniverseIdentifierRequest.gsid, []),
format: ReturnFormat = ReturnFormat.DATA_FRAME) -> Union[List[Dict], pd.DataFrame]:
""" Get historical beta data for existing risk model
:param start_date: start date for data request
:param end_date: end date for data request
:param assets: DataAssetsRequest object with identifier and list of assets to retrieve for request
:param format: which format to return the results in
:return: historical beta for assets requested
"""
results = GsFactorRiskModelApi.get_risk_model_data(
model_id=self.id,
start_date=start_date,
end_date=end_date,
assets=assets,
measures=[Measure.Historical_Beta, Measure.Asset_Universe],
limit_factors=False
).get('results')
universe = assets.universe if assets.universe else results[0].get('assetData').get('universe')
historical_beta = build_asset_data_map(results, universe, 'historicalBeta')
if format == ReturnFormat.DATA_FRAME:
historical_beta = pd.DataFrame(historical_beta)
return historical_beta
def get_total_risk(self,
start_date: dt.date,
end_date: dt.date = None,
assets: DataAssetsRequest = DataAssetsRequest(RiskModelUniverseIdentifierRequest.gsid, []),
format: ReturnFormat = ReturnFormat.DATA_FRAME) -> Union[List[Dict], pd.DataFrame]:
""" Get total risk data for existing risk model
:param start_date: start date for data request
:param end_date: end date for data request
:param assets: DataAssetsRequest object with identifier and list of assets to retrieve for request
:param format: which format to return the results in
:return: total risk for assets requested
"""
results = GsFactorRiskModelApi.get_risk_model_data(
model_id=self.id,
start_date=start_date,
end_date=end_date,
assets=assets,
measures=[Measure.Total_Risk, Measure.Asset_Universe],
limit_factors=False
).get('results')
universe = assets.universe if assets.universe else results[0].get('assetData').get('universe')
total_risk = build_asset_data_map(results, universe, 'totalRisk')
if format == ReturnFormat.DATA_FRAME:
total_risk = pd.DataFrame(total_risk)
return total_risk
def get_specific_risk(self,
start_date: dt.date,
end_date: dt.date = None,
assets: DataAssetsRequest = DataAssetsRequest(RiskModelUniverseIdentifierRequest.gsid, []),
format: ReturnFormat = ReturnFormat.DATA_FRAME) -> Union[List[Dict], pd.DataFrame]:
""" Get specific risk data for existing risk model
:param start_date: start date for data request
:param end_date: end date for data request
:param assets: DataAssetsRequest object with identifier and list of assets to retrieve for request
:param format: which format to return the results in
:return: specific risk for assets requested
"""
results = GsFactorRiskModelApi.get_risk_model_data(
model_id=self.id,
start_date=start_date,
end_date=end_date,
assets=assets,
measures=[Measure.Specific_Risk, Measure.Asset_Universe],
limit_factors=False
).get('results')
universe = assets.universe if assets.universe else results[0].get('assetData').get('universe')
specific_risk = build_asset_data_map(results, universe, 'specificRisk')
if format == ReturnFormat.DATA_FRAME:
specific_risk = pd.DataFrame(specific_risk)
return specific_risk
def get_residual_variance(self,
start_date: dt.date,
end_date: dt.date = None,
assets: DataAssetsRequest = DataAssetsRequest(
RiskModelUniverseIdentifierRequest.gsid, []),
format: ReturnFormat = ReturnFormat.DATA_FRAME) -> Union[List[Dict], pd.DataFrame]:
""" Get residual variance data for existing risk model
:param start_date: start date for data request
:param end_date: end date for data request
:param assets: DataAssetsRequest object with identifier and list of assets to retrieve for request
:param format: which format to return the results in
:return: residual variance for assets requested
"""
results = GsFactorRiskModelApi.get_risk_model_data(
model_id=self.id,
start_date=start_date,
end_date=end_date,
assets=assets,
measures=[Measure.Residual_Variance, Measure.Asset_Universe],
limit_factors=False
).get('results')
universe = assets.universe if assets.universe else results[0].get('assetData').get('universe')
residual_variance = build_asset_data_map(results, universe, 'residualVariance')
if format == ReturnFormat.DATA_FRAME:
residual_variance = pd.DataFrame(residual_variance)
return residual_variance
def get_specific_return(self,
start_date: dt.date,
end_date: dt.date = None,
assets: DataAssetsRequest = DataAssetsRequest(RiskModelUniverseIdentifierRequest.gsid, []),
format: ReturnFormat = ReturnFormat.DATA_FRAME) -> Union[List[Dict], pd.DataFrame]:
""" Get specific return data for existing risk model
:param start_date: start date for data request
:param end_date: end date for data request
:param assets: DataAssetsRequest object with identifier and list of assets to retrieve for request
:param format: which format to return the results in
:return: specific returns for assets requested
"""
results = GsFactorRiskModelApi.get_risk_model_data(
model_id=self.id,
start_date=start_date,
end_date=end_date,
assets=assets,
measures=[Measure.Specific_Return, Measure.Asset_Universe],
limit_factors=False
).get('results')
universe = assets.universe if assets.universe else results[0].get('assetData').get('universe')
specific_return = build_asset_data_map(results, universe, 'specificReturn')
if format == ReturnFormat.DATA_FRAME:
specific_return = | pd.DataFrame(specific_return) | pandas.DataFrame |
import os
from datetime import datetime
from unittest import main, TestCase
from unittest.mock import patch
import xml.etree.ElementTree as ET
import pandas as pd
import numpy as np
from serenata_toolbox.datasets import helpers
class TestDatasetsHelpersXml(TestCase):
def setUp(self):
self.sampleXml = ET.fromstring("""<?xml version=\"1.0\" encoding=\"utf-8\"?>
<root>
<simpleText> Sample text </simpleText>
<brDate>31/05/2017</brDate>
<usDate>04/30/2017</usDate>
<brDateTime>31/05/2017 23:59:59</brDateTime>
<usDateTime>04/30/2017 11:59:59PM</usDateTime>
</root>
""")
def test_extract_text(self):
extracted = helpers.xml_extract_text(self.sampleXml, 'simpleText')
expected = 'Sample text'
self.assertEqual(expected, extracted)
def test_extract_date_default_to_br_format(self):
extracted = helpers.xml_extract_date(self.sampleXml, 'brDate')
expected = datetime(2017, 5, 31, 0, 0)
self.assertEqual(expected, extracted)
def test_extract_date_supports_custom_format(self):
extracted = helpers.xml_extract_date(self.sampleXml, 'usDate', '%m/%d/%Y')
expected = datetime(2017, 4, 30, 0, 0)
self.assertEqual(expected, extracted)
def test_extract_datetime_default_to_br_format(self):
extracted = helpers.xml_extract_datetime(self.sampleXml, 'brDateTime')
expected = datetime(2017, 5, 31, 23, 59, 59)
self.assertEqual(expected, extracted)
def test_extract_datetime_supports_custom_format(self):
extracted = helpers.xml_extract_datetime(self.sampleXml, 'usDateTime', '%m/%d/%Y %I:%M:%S%p')
expected = datetime(2017, 4, 30, 23, 59, 59)
self.assertEqual(expected, extracted)
class TestDatasetsHelpersDataframes(TestCase):
def test_translate_column(self):
records = [
['masculino'],
['feminino'],
['masculino'],
['feminino'],
]
df = | pd.DataFrame(records, columns=['gender']) | pandas.DataFrame |
from flask import Flask, render_template, request, Markup, jsonify, url_for, redirect
# import flask_login
# For dataset reading and writing
import pandas as pd
from pathlib import Path
unlabeled_dataset = pd.read_csv('./dataset/all_unlabeled_data.tsv', sep='\t')
out_path = './dataset/labeled_dataset.pkl'
out_file = Path(out_path)
if out_file.exists():
labeled_dataset = pd.read_pickle(out_path)
else:
labeled_dataset = | pd.DataFrame(columns=['tagger_name', 'id', 'medicine', 'effect', 'text', 'relation', 'outcome', 'case']) | pandas.DataFrame |
__author__ = 'lucabasa'
__version__ = '1.0'
__status__ = 'development'
import numpy as np
import pandas as pd
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import KFold, RandomizedSearchCV
import lightgbm as lgb
import xgboost as xgb
from sklearn.ensemble import RandomForestRegressor, ExtraTreesRegressor
def lightgbm_train(train, test, target, kfolds):
param = {'num_leaves': 111,
'min_data_in_leaf': 150,
'objective': 'regression',
'max_depth': 9,
'learning_rate': 0.005,
"boosting": "gbdt",
"feature_fraction": 0.7522,
"bagging_freq": 1,
"bagging_fraction": 0.7083 ,
"bagging_seed": 11,
"metric": 'rmse',
"lambda_l1": 0.2634,
"random_seed": 133,
"verbosity": -1}
'''
param = {'num_leaves': 50,
'min_data_in_leaf': 11,
'objective': 'regression',
'max_depth': 5,
'learning_rate': 0.005,
"boosting": "gbdt",
"feature_fraction": 0.8791,
"bagging_freq": 1,
"bagging_fraction": 0.9238 ,
"bagging_seed": 11,
"metric": 'rmse',
"lambda_l1": 4.8679,
"random_seed": 133,
"verbosity": -1}
'''
oof = np.zeros(len(train))
predictions = np.zeros(len(test))
feature_importance_df = pd.DataFrame()
comm_cols = list(set(train.columns).intersection(test.columns))
for fold_, (trn_idx, val_idx) in enumerate(kfolds.split(train.values, target.values)):
print("fold n°{}".format(fold_))
trn_data = lgb.Dataset(train.iloc[trn_idx][comm_cols],
label=target.iloc[trn_idx]
)
val_data = lgb.Dataset(train.iloc[val_idx][comm_cols],
label=target.iloc[val_idx]
)
num_round = 10000
clf = lgb.train(param,
trn_data,
num_round,
valid_sets = [trn_data, val_data],
verbose_eval=500,
early_stopping_rounds = 300)
oof[val_idx] = clf.predict(train.iloc[val_idx][comm_cols], num_iteration=clf.best_iteration)
fold_importance_df = pd.DataFrame()
fold_importance_df["feature"] = comm_cols
fold_importance_df["importance"] = clf.feature_importance()
fold_importance_df["fold"] = fold_ + 1
feature_importance_df = pd.concat([feature_importance_df, fold_importance_df], axis=0)
predictions += clf.predict(test[comm_cols], num_iteration=clf.best_iteration) / kfolds.n_splits
print("CV score: {:<8.5f}".format(mean_squared_error(oof, target)**0.5))
return predictions, mean_squared_error(oof, target)**0.5, feature_importance_df, oof
def xgb_train(train, test, target, kfolds):
oof = np.zeros(len(train))
predictions = np.zeros(len(test))
feature_importance_df = pd.DataFrame()
comm_cols = list(set(train.columns).intersection(test.columns))
for fold_, (trn_idx, val_idx) in enumerate(kfolds.split(train.values, target.values)):
print("fold n°{}".format(fold_))
trn_data = train.iloc[trn_idx][comm_cols]
val_data = train.iloc[val_idx][comm_cols]
trn_target = target.iloc[trn_idx]
val_target = target.iloc[val_idx]
clf = xgb.XGBRegressor(n_estimators=10000,
learning_rate=0.05,
max_depth=6,
n_jobs=6,
subsample=0.99,
random_state=408,
gamma=0.0217,
reg_alpha=0.9411,
colsample_bytree=0.3055).fit(trn_data, trn_target,
eval_set=[(val_data, val_target)],
eval_metric='rmse',
early_stopping_rounds=200,
verbose=500)
oof[val_idx] = clf.predict(train.iloc[val_idx][comm_cols],
ntree_limit=clf.best_iteration)
fold_importance_df = pd.DataFrame()
fold_importance_df["feature"] = comm_cols
fold_importance_df["importance"] = clf.feature_importances_
fold_importance_df["fold"] = fold_ + 1
feature_importance_df = pd.concat([feature_importance_df, fold_importance_df], axis=0)
predictions += clf.predict(test[comm_cols], ntree_limit=clf.best_iteration) / kfolds.n_splits
print("CV score: {:<8.5f}".format(mean_squared_error(oof, target)**0.5))
return predictions, mean_squared_error(oof, target)**0.5, feature_importance_df, oof
def rf_train(train, test, target, kfolds):
oof = np.zeros(len(train))
predictions = np.zeros(len(test))
feature_importance_df = pd.DataFrame()
comm_cols = list(set(train.columns).intersection(test.columns))
'''
grid_param = {'max_depth': np.arange(3,30),
'min_samples_split': np.arange(2, 50),
'min_samples_leaf': np.arange(1,40),
'max_features': ['sqrt', 'log2', None]}
print('Optimizing parameters')
grid = RandomizedSearchCV(RandomForestRegressor(n_estimators=300, n_jobs=4, random_state=345),
param_distributions=grid_param, n_iter=20, cv=kfolds,
random_state=654, n_jobs=-1, scoring='neg_mean_squared_error', verbose=3)
grid.fit(train[comm_cols], target)
best_forest = grid.best_estimator_
print(grid.best_params_)
print(round( (-grid.best_score_ )**0.5 ,3))
'''
best_forest = RandomForestRegressor(n_estimators=1000, n_jobs=-1, random_state=32, max_depth=20, max_features='sqrt')
for fold_, (trn_idx, val_idx) in enumerate(kfolds.split(train.values, target.values)):
print("fold n°{}".format(fold_))
trn_data = train.iloc[trn_idx][comm_cols]
val_data = train.iloc[val_idx][comm_cols]
trn_target = target.iloc[trn_idx]
val_target = target.iloc[val_idx]
clf = best_forest.fit(trn_data, trn_target)
oof[val_idx] = clf.predict(train.iloc[val_idx][comm_cols])
fold_importance_df = | pd.DataFrame() | pandas.DataFrame |
import os
import boto3
import pandas as pd
bucket_name = os.getenv("S3_BUCKET_NAME")
s3 = boto3.resource("s3")
s3_alec = s3.Bucket(bucket_name)
simulation_ids_list = []
scenario_ids_list = []
# Fetch simulation data
for i in ["applications", "outcomes", "portfolios", "scenarios"]:
file_paths_tmp = [f.key for f in s3_alec.objects.filter(Prefix=f"{i}/")]
simulation_ids_tmp = [
simulation_id.split("/")[2].split(".")[0] for simulation_id in file_paths_tmp
]
simulation_ids_list.extend(simulation_ids_tmp)
scenario_ids_tmp = [simulation_id.split("/")[1] for simulation_id in file_paths_tmp]
scenario_ids_list.extend(scenario_ids_tmp)
simulation_ids = pd.DataFrame({"simulation_id": simulation_ids_list})
simulation_ids = simulation_ids.value_counts().reset_index()
simulation_ids = simulation_ids.loc[
simulation_ids.loc[:, 0] == simulation_ids.loc[:, 0].max(), "simulation_id"
].tolist()
scenario_ids = | pd.DataFrame({"scenario_id": scenario_ids_list}) | pandas.DataFrame |
""" this will read the goes_r data"""
import pandas as pd
import xarray as xr
try:
import s3fs
has_s3fs = True
except ImportError:
print(
"Please install s3fs if retrieving from the Amazon S3 Servers. Otherwise continue with local data"
)
has_s3fs = False
try:
import h5py # noqa: F401
has_h5py = True
except ImportError:
print("Please install h5py to open files from the Amazon S3 servers.")
has_h5py = False
try:
import h5netcdf # noqa: F401
has_h5netcdf = True
except ImportError:
print("Please install h5netcdf to open files from the Amazon S3 servers.")
has_h5netcdf = False
from ..grids import _geos_16_grid
def _get_swath_from_fname(fname):
vert_grid_num = fname.split(".")[-4].split("v")[-1]
hori_grid_num = fname.split(".")[-4].split("v")[0].split("h")[-1]
return hori_grid_num, vert_grid_num
def _get_time_from_fname(fname):
import pandas as pd
u = pd.Series([fname.split(".")[-2]])
date = pd.to_datetime(u, format="%Y%j%H%M%S")[0]
return date
def _open_single_file(fname):
# open the file
dset = xr.open_dataset(fname)
dset = dset.rename({"t": "time"})
# get the area def
area = _geos_16_grid(dset)
dset.attrs["area"] = area
# get proj4 string
dset.attrs["proj4_srs"] = area.proj_str
# get longitude and latitudes
lon, lat = area.get_lonlats_dask()
dset.coords["longitude"] = (("y", "x"), lon)
dset.coords["latitude"] = (("y", "x"), lat)
for i in dset.variables:
dset[i].attrs["proj4_srs"] = area.proj_str
dset[i].attrs["area"] = area
# expand dimensions for time
dset = dset.expand_dims("time")
return dset
def open_dataset(date=None, filename=None, satellite="16", product=None):
g = GOES()
if filename is None:
try:
if date is None:
raise ValueError
if product is None:
raise ValueError
except ValueError:
print("Please provide a date and product to be able to retrieve data from Amazon S3")
ds = g.open_amazon_file(date=date, satellite=satellite, product=product)
else:
ds = g.open_local(filename)
return ds
class GOES:
def __init__(self):
self.date = None
self.satellite = "16"
self.product = "ABI-L2-AODF"
self.baseurl = f"s3://noaa-goes{self.satellite}/"
self.url = f"{self.baseurl}"
self.filename = None
self.fs = None
def _update_baseurl(self):
self.baseurl = f"s3://noaa-goes{self.satellite}/"
def set_product(self, product=None):
try:
if product is None:
raise ValueError
else:
self.url = f"{self.baseurl}{product}/"
except ValueError:
print("kwarg product must have a value")
def get_products(self):
products = [value.split("/")[-1] for value in self.fs.ls(self.baseurl)[:-1]]
return products
def date_to_url(self):
date = pd.Timestamp(self.date)
date_url_bit = date.strftime("%Y/%j/%H/")
self.url = f"{self.url}{date_url_bit}"
def _get_files(self, url=None):
try:
files = self.fs.ls(url)
if len(files) < 1:
raise ValueError
else:
return files
except ValueError:
print("Files not available for product and date")
def _get_closest_date(self, files=[]):
file_dates = [pd.to_datetime(f.split("_")[-1][:-4], format="c%Y%j%H%M%S") for f in files]
date = | pd.Timestamp(self.date) | pandas.Timestamp |
import os, inspect, sys
from collections import OrderedDict
from pathlib import Path
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
grandpadir = os.path.dirname(currentdir)
sys.path.insert(0, grandpadir)
from utils import helper_functions
from configpkg import ConfigMger, DatasetConfig
from holders.Dataset import Dataset
from utils.helper_functions import sort_files_by_dim, read_nav_files
from utils.shared_names import *
from comparison.comparison_utils import get_dataset_name
from utils.pseudo_samples import PseudoSamplesMger
import pandas as pd
import json
import numpy as np
from pipeline.automl.automl_constants import MAX_FEATURES
import matplotlib.pyplot as plt
pipeline = 'results_predictive_grouping'
test_confs = [
{'path': Path('..', pipeline, 'iforest'), 'detector': 'iforest', 'type': 'test'}
]
# conf = {'path': Path('..', pipeline, 'lof'), 'detector': 'lof', 'type': 'test'}
# conf = {'path': Path('..', pipeline, 'iforest'), 'detector': 'iforest', 'type': 'test'}
synth_confs =[
{'path': Path('..', pipeline, 'iforest'), 'detector': 'iforest', 'type': 'synthetic'},
{'path': Path('..', pipeline, 'lof'), 'detector': 'lof', 'type': 'synthetic'},
{'path': Path('..', pipeline, 'loda'), 'detector': 'loda', 'type': 'synthetic'}
]
# conf = {'path': Path('..', pipeline, 'lof'), 'detector': 'lof', 'type': 'real'}
# conf = {'path': Path('..', pipeline, 'iforest'), 'detector': 'iforest', 'type': 'real'}
# conf = {'path': Path('..', pipeline, 'loda'), 'detector': 'loda', 'type': 'real'}
confs_to_analyze = synth_confs
def analyze():
fig_name = None
prec_dict = OrderedDict()
recall_dict = OrderedDict()
for conf in confs_to_analyze:
if fig_name is None:
fig_name = 'real_features_perf.png' if conf['type'] == 'real' else 'synth-features-corr.png'
nav_files = read_nav_files(conf['path'], conf['type'])
nav_files = sort_files_by_dim(nav_files)
feature_perf_prec, feature_perf_recall = analysis_per_nav_file(nav_files, conf)
prec_dict[conf['detector']] = feature_perf_prec
recall_dict[conf['detector']] = feature_perf_recall
# tmp_dict = {'lof': None, 'iforest': None, 'loda': None}
# indexes = ['PROTEUS_{fs}', 'PROTEUS_{ca-lasso}', 'PROTEUS_{shap}', 'PROTEUS_{loda}']
# columns = ['S 20d (10%)', 'S 40d (10%)', 'S 60d (10%)', 'S 80d (10%)', 'S 100d (10%)']
# for k in tmp_dict.keys():
# if k == 'loda':
# tmp_dict[k] = pd.DataFrame(np.random.rand(4, 5), index=indexes, columns=columns)
# else:
# tmp_dict[k] = pd.DataFrame(np.random.rand(3, 5), index=indexes[0:-1], columns=columns)
# plot_dataframes(tmp_dict, tmp_dict.copy(), fig_name)
plot_dataframes(prec_dict, recall_dict, fig_name)
def analysis_per_nav_file(nav_files, conf):
dataset_names = []
feature_perf_prec = pd.DataFrame()
feature_perf_recall = | pd.DataFrame() | pandas.DataFrame |
"""
Created on Jun 30, 2013
Wrappers around scipy.stats statistical tests to use
Pandas data-structures and return Pandas objects as
results.
@author: agross
"""
import pandas as pd
import numpy as np
from scipy import stats
def _match_series(a, b):
"""
Matches two series on shared data.
(copied from Processing.Helpers to remove that dependency,
public use should go through Processing.Helpers)
"""
a, b = a.align(b, join='inner', copy=False)
valid = pd.notnull(a) & pd.notnull(b)
a = a[valid]
if not a.index.is_unique:
a = a.groupby(lambda s: s).first() # some sort of duplicate index bug
b = b[valid]
if not b.index.is_unique:
b = b.groupby(lambda s: s).first()
return a, b
def _split_on_index(s, matched=False, n_groups=2):
"""
Splits a series on the second level of its index.
"""
d = s.unstack()
if matched is True:
d = d.dropna()
assert(d.shape[1] == n_groups)
a, b = [i[1] for i in d.iteritems()]
return a, b
def wilcoxon_pandas(a, b=None):
"""
Wrapper to do a one way t-test on pandas matched samples
------------------------------------------------
a,b: matched measurements
OR
a: Series of matched measurements with assignment on second level
of multi-index.
"""
if isinstance(b, pd.Series):
a, b = _match_series(a, b)
elif b is None and isinstance(a.index, pd.MultiIndex):
a, b = _split_on_index(a, matched=True)
z, p = stats.wilcoxon(a, b)
return pd.Series({'T': z, 'p': p})
def ttest_rel(a, b=None):
"""
Wrapper to do a one way t-test on pandas matched samples
------------------------------------------------
a,b: matched measurements
OR
a: Series of matched measurements with assignment on second level
of multi-index.
"""
if isinstance(b, pd.Series):
a, b = _match_series(a, b)
elif b is None and isinstance(a.index, pd.MultiIndex):
a, b = _split_on_index(a, matched=True)
z, p = stats.ttest_rel(a, b)
return pd.Series({'t': z, 'p': p})
def anova(hit_vec, response_vec, min_size=5):
"""
Wrapper to do a one way anova on pandas Series
------------------------------------------------
hit_vec: Series of labels
response_vec: Series of measurements
"""
if hit_vec.value_counts().min < min_size:
return np.nan
if not np.alltrue(hit_vec.index == response_vec.index):
hit_vec, response_vec = _match_series(hit_vec, response_vec)
hit_vec, response_vec = _match_series(hit_vec, response_vec)
res = stats.f_oneway(*[response_vec[hit_vec == num] for num in
hit_vec.unique()])
return | pd.Series(res, index=['F', 'p']) | pandas.Series |
from functools import partial
from typing import Callable, Dict, Iterable, Optional, Union
import pandas as pd
import torch
from torch import nn
from torch.optim.lr_scheduler import ReduceLROnPlateau
from collie.interactions import (ApproximateNegativeSamplingInteractionsDataLoader,
Interactions,
InteractionsDataLoader)
from collie.model import MultiStagePipeline, ScaledEmbedding, ZeroEmbedding
from collie.utils import get_init_arguments, merge_docstrings
INTERACTIONS_LIKE_INPUT = Union[ApproximateNegativeSamplingInteractionsDataLoader,
Interactions,
InteractionsDataLoader]
class ColdStartModel(MultiStagePipeline):
# NOTE: the full docstring is merged in with ``MultiStagePipeline``'s using
# ``merge_docstrings``. Only the description of new or changed parameters are included in this
# docstring
"""
Training pipeline for a matrix factorization model optimized for the cold-start problem.
Many recommendation models suffer from the cold start problem, when a model is unable to provide
adequate recommendations for a new item until enough users have interacted with it. But, if
users only interact with recommended items, the item will never be recommended, and thus the
model will never improve recommendations for this item.
The ``ColdStartModel`` attempts to bypass this by limiting the item space down to "item
buckets", training a model on this as the item space, then expanding out to all items. During
this expansion, the learned-embeddings of each bucket is copied over to each corresponding
item, providing a smarter initialization than a random one for both existing and new items.
Now, when we have a new item, we can use its bucket embedding as an initialization into a model.
The stages in a ``ColdStartModel`` are, in order:
1. ``item_buckets``
Matrix factorization with item embeddings and bias terms bucketed by
``item_buckets`` argument. Unlike in the next stage, many items may map on to a single
bucket, and this will share the same embedding and bias representation. The model should
learn user preference for buckets in this stage.
2. ``no_buckets``
Standard matrix factorization as we do in ``MatrixFactorizationModel``. However, upon
advancing to this stage, the item embeddings are initialized with their bucketed embedding
value (and same for biases). Not only does this provide better initialization than random,
but allows new items to be incorporated into the model without training by using their
item bucket embedding and bias terms at prediction time.
Note that the cold start problem exists for new users as well, but this functionality will be
added to this model in a future version.
All ``ColdStartModel`` instances are subclasses of the ``LightningModule`` class provided by
PyTorch Lightning. This means to train a model, you will need a
``collie.model.CollieTrainer`` object, but the model can be saved and loaded without this
``Trainer`` instance. Example usage may look like:
.. code-block:: python
from collie.model import ColdStartModel, CollieTrainer
# instantiate and fit a ``ColdStartModel`` as expected
model = ColdStartModel(train=train, item_buckets=item_buckets)
trainer = CollieTrainer(model)
trainer.fit(model)
# train for X more epochs on the next stage, ``no_buckets``
trainer.max_epochs += X
model.advance_stage()
trainer.fit(model)
model.eval()
# do evaluation as normal with ``model``
# get item-item recommendations for a new item by using the bucket ID, Z
similar_items = model.item_bucket_item_similarity(item_bucket_id=Z)
model.save_model(filename='model.pth')
new_model = ColdStartModel(load_model_path='model.pth')
# do evaluation as normal with ``new_model``
Note
----
The ``forward`` calculation will be different depending on the stage that is set. Note this
when evaluating / saving and loading models in.
Parameters
----------
item_buckets: torch.tensor, 1-d
An ordered iterable containing the bucket ID for each item ID. For example, if you have
five films and are going to bucket by primary genre, and your data looks like:
* Item ID: 0, Genre ID: 1
* Item ID: 1, Genre ID: 0
* Item ID: 2, Genre ID: 2
* Item ID: 3, Genre ID: 2
* Item ID: 4, Genre ID: 1
Then ``item_buckets`` would be: ``[1, 0, 2, 2, 1]``
embedding_dim: int
Number of latent factors to use for user and item embeddings
dropout_p: float
Probability of dropout
item_buckets_stage_lr: float
Learning rate for user parameters and item bucket parameters optimized during the
``item_buckets`` stage
no_buckets_stage_lr: float
Learning rate for user parameters and item parameters optimized during the ``no_buckets``
stage
item_buckets_stage_lr: float
Optimizer used for user parameters and item bucket parameters optimized during the
``item_buckets`` stage. If a string, one of the following supported optimizers:
* ``'sgd'`` (for ``torch.optim.SGD``)
* ``'adam'`` (for ``torch.optim.Adam``)
no_buckets_stage_lr: float
Optimizer used for user parameters and item parameters optimized during the ``no_buckets``
stage. If a string, one of the following supported optimizers:
* ``'sgd'`` (for ``torch.optim.SGD``)
* ``'adam'`` (for ``torch.optim.Adam``)
"""
def __init__(self,
train: INTERACTIONS_LIKE_INPUT = None,
val: INTERACTIONS_LIKE_INPUT = None,
item_buckets: Iterable[int] = None,
embedding_dim: int = 30,
dropout_p: float = 0.0,
sparse: bool = False,
item_buckets_stage_lr: float = 1e-3,
no_buckets_stage_lr: float = 1e-3,
lr_scheduler_func: Optional[torch.optim.lr_scheduler._LRScheduler] = partial(
ReduceLROnPlateau,
patience=1,
verbose=False,
),
weight_decay: float = 0.0,
item_buckets_stage_optimizer: Union[str, torch.optim.Optimizer] = 'adam',
no_buckets_stage_optimizer: Union[str, torch.optim.Optimizer] = 'adam',
loss: Union[str, Callable[..., torch.tensor]] = 'hinge',
metadata_for_loss: Optional[Dict[str, torch.tensor]] = None,
metadata_for_loss_weights: Optional[Dict[str, float]] = None,
load_model_path: Optional[str] = None,
map_location: Optional[str] = None):
optimizer_config_list = None
num_item_buckets = None
if load_model_path is None:
# TODO: separate out optimizer and bias optimizer somehow
optimizer_config_list = [
{
'lr': item_buckets_stage_lr,
'optimizer': item_buckets_stage_optimizer,
'parameter_prefix_list': [
'user_embed',
'user_bias',
'item_bucket_embed',
'item_bucket_bias',
],
'stage': 'item_buckets',
},
{
'lr': no_buckets_stage_lr,
'optimizer': no_buckets_stage_optimizer,
'parameter_prefix_list': [
'user_embed',
'user_bias',
'item_embed',
'item_bias',
],
'stage': 'no_buckets',
},
]
if not isinstance(item_buckets, torch.Tensor):
item_buckets = torch.tensor(item_buckets)
# data quality checks for ``item_buckets``
assert item_buckets.dim() == 1, (
f'``item_buckets`` must be 1-dimensional, not {item_buckets.dim()}-dimensional!'
)
if len(item_buckets) != train.num_items:
raise ValueError(
'Length of ``item_buckets`` must be equal to the number of items in the '
f'dataset: {len(item_buckets)} != {train.num_items}.'
)
if min(item_buckets) != 0:
raise ValueError(f'``item_buckets`` IDs must start at 0, not {min(item_buckets)}!')
num_item_buckets = item_buckets.max().item() + 1
super().__init__(optimizer_config_list=optimizer_config_list,
num_item_buckets=num_item_buckets,
**get_init_arguments())
__doc__ = merge_docstrings(MultiStagePipeline, __doc__, __init__)
def _move_any_external_data_to_device(self):
"""Move the item buckets to the device before training."""
self.hparams.item_buckets = self.hparams.item_buckets.to(self.device)
def _copy_weights(self, old: nn.Embedding, new: nn.Embedding, buckets: torch.tensor) -> None:
new.weight.data.copy_(old.weight.data[buckets])
def set_stage(self, stage: str) -> None:
"""Set the stage for the model."""
current_stage = self.hparams.stage
if stage in self.hparams.stage_list:
if current_stage == 'item_buckets' and stage == 'no_buckets':
print('Copying over item embeddings...')
self._copy_weights(self.item_bucket_biases,
self.item_biases,
self.hparams.item_buckets)
self._copy_weights(self.item_bucket_embeddings,
self.item_embeddings,
self.hparams.item_buckets)
else:
raise ValueError(
f'"{stage}" is not a valid stage, please choose one of {self.hparams.stage_list}'
)
self.hparams.stage = stage
print(f'Set ``self.hparams.stage`` to "{stage}"')
def _setup_model(self, **kwargs) -> None:
"""
Method for building model internals that rely on the data passed in.
This method will be called after `prepare_data`.
"""
# define initial embedding groups
self.item_bucket_biases = ZeroEmbedding(
num_embeddings=self.hparams.num_item_buckets,
embedding_dim=1,
sparse=self.hparams.sparse,
)
self.item_bucket_embeddings = ScaledEmbedding(
num_embeddings=self.hparams.num_item_buckets,
embedding_dim=self.hparams.embedding_dim,
sparse=self.hparams.sparse,
)
# define fine-tuned embedding groups
self.user_biases = ZeroEmbedding(
num_embeddings=self.hparams.num_users,
embedding_dim=1,
sparse=self.hparams.sparse
)
self.item_biases = ZeroEmbedding(
num_embeddings=self.hparams.num_items,
embedding_dim=1,
sparse=self.hparams.sparse,
)
self.user_embeddings = ScaledEmbedding(
num_embeddings=self.hparams.num_users,
embedding_dim=self.hparams.embedding_dim,
sparse=self.hparams.sparse
)
self.item_embeddings = ScaledEmbedding(
num_embeddings=self.hparams.num_items,
embedding_dim=self.hparams.embedding_dim,
sparse=self.hparams.sparse,
)
self.dropout = nn.Dropout(p=self.hparams.dropout_p)
def forward(self, users: torch.tensor, items: torch.tensor) -> torch.tensor:
"""
Forward pass through the model.
Parameters
----------
users: tensor, 1-d
Array of user indices
items: tensor, 1-d
Array of item indices
Returns
-------
preds: tensor, 1-d
Predicted ratings or rankings
"""
user_embeddings = self.user_embeddings(users)
user_biases = self.user_biases(users)
if self.hparams.stage == 'item_buckets':
# transform item IDs to item bucket IDs
items = self.hparams.item_buckets[items]
item_embeddings = self.item_bucket_embeddings(items)
item_biases = self.item_bucket_biases(items)
elif self.hparams.stage == 'no_buckets':
item_embeddings = self.item_embeddings(items)
item_biases = self.item_biases(items)
pred_scores = (
torch.mul(self.dropout(user_embeddings), self.dropout(item_embeddings)).sum(axis=1)
+ user_biases.squeeze(1)
+ item_biases.squeeze(1)
)
return pred_scores.squeeze()
def item_bucket_item_similarity(self, item_bucket_id: int) -> pd.Series:
"""
Get most similar item indices to a item bucket by cosine similarity.
Cosine similarity is computed with item and item bucket embeddings from a trained model.
Parameters
----------
item_id: int
Returns
-------
sim_score_idxs: pd.Series
Sorted values as cosine similarity for each item in the dataset with the index being
the item ID
"""
item_bucket_embeddings = self.item_bucket_embeddings.weight.data
item_bucket_embeddings = (
item_bucket_embeddings / item_bucket_embeddings.norm(dim=1)[:, None]
)
item_embeddings = self._get_item_embeddings()
item_embeddings = item_embeddings / item_embeddings.norm(dim=1)[:, None]
sim_score_idxs = (
torch.matmul(item_bucket_embeddings[[item_bucket_id], :],
item_embeddings.transpose(1, 0))
.detach()
.cpu()
.numpy()
.squeeze()
)
sim_score_idxs_series = | pd.Series(sim_score_idxs) | pandas.Series |
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from collections import defaultdict, OrderedDict
from datetime import timedelta
import logging
import numpy as np
import pandas as pd
import pystan # noqa F401
from fbprophet.diagnostics import prophet_copy
from fbprophet.models import prophet_stan_model
from fbprophet.make_holidays import get_holiday_names, make_holidays_df
from fbprophet.plot import (
plot,
plot_components,
plot_forecast_component,
seasonality_plot_df,
plot_weekly,
plot_yearly,
plot_seasonality,
)
logger = logging.getLogger('fbprophet')
logger.addHandler(logging.NullHandler())
if len(logger.handlers) == 1:
logging.basicConfig(level=logging.INFO)
class Prophet(object):
"""Prophet forecaster.
Parameters
----------
growth: String 'linear' or 'logistic' to specify a linear or logistic
trend.
changepoints: List of dates at which to include potential changepoints. If
not specified, potential changepoints are selected automatically.
n_changepoints: Number of potential changepoints to include. Not used
if input `changepoints` is supplied. If `changepoints` is not supplied,
then n_changepoints potential changepoints are selected uniformly from
the first `changepoint_range` proportion of the history.
changepoint_range: Proportion of history in which trend changepoints will
be estimated. Defaults to 0.8 for the first 80%. Not used if
`changepoints` is specified.
Not used if input `changepoints` is supplied.
yearly_seasonality: Fit yearly seasonality.
Can be 'auto', True, False, or a number of Fourier terms to generate.
weekly_seasonality: Fit weekly seasonality.
Can be 'auto', True, False, or a number of Fourier terms to generate.
daily_seasonality: Fit daily seasonality.
Can be 'auto', True, False, or a number of Fourier terms to generate.
holidays: pd.DataFrame with columns holiday (string) and ds (date type)
and optionally columns lower_window and upper_window which specify a
range of days around the date to be included as holidays.
lower_window=-2 will include 2 days prior to the date as holidays. Also
optionally can have a column prior_scale specifying the prior scale for
that holiday.
seasonality_mode: 'additive' (default) or 'multiplicative'.
seasonality_prior_scale: Parameter modulating the strength of the
seasonality model. Larger values allow the model to fit larger seasonal
fluctuations, smaller values dampen the seasonality. Can be specified
for individual seasonalities using add_seasonality.
holidays_prior_scale: Parameter modulating the strength of the holiday
components model, unless overridden in the holidays input.
changepoint_prior_scale: Parameter modulating the flexibility of the
automatic changepoint selection. Large values will allow many
changepoints, small values will allow few changepoints.
mcmc_samples: Integer, if greater than 0, will do full Bayesian inference
with the specified number of MCMC samples. If 0, will do MAP
estimation.
interval_width: Float, width of the uncertainty intervals provided
for the forecast. If mcmc_samples=0, this will be only the uncertainty
in the trend using the MAP estimate of the extrapolated generative
model. If mcmc.samples>0, this will be integrated over all model
parameters, which will include uncertainty in seasonality.
uncertainty_samples: Number of simulated draws used to estimate
uncertainty intervals.
"""
def __init__(
self,
growth='linear',
changepoints=None,
n_changepoints=25,
changepoint_range=0.8,
yearly_seasonality='auto',
weekly_seasonality='auto',
daily_seasonality='auto',
holidays=None,
seasonality_mode='additive',
seasonality_prior_scale=10.0,
holidays_prior_scale=10.0,
changepoint_prior_scale=0.05,
mcmc_samples=0,
interval_width=0.80,
uncertainty_samples=1000,
):
self.growth = growth
self.changepoints = pd.to_datetime(changepoints)
if self.changepoints is not None:
self.n_changepoints = len(self.changepoints)
self.specified_changepoints = True
else:
self.n_changepoints = n_changepoints
self.specified_changepoints = False
self.changepoint_range = changepoint_range
self.yearly_seasonality = yearly_seasonality
self.weekly_seasonality = weekly_seasonality
self.daily_seasonality = daily_seasonality
self.holidays = holidays
self.seasonality_mode = seasonality_mode
self.seasonality_prior_scale = float(seasonality_prior_scale)
self.changepoint_prior_scale = float(changepoint_prior_scale)
self.holidays_prior_scale = float(holidays_prior_scale)
self.mcmc_samples = mcmc_samples
self.interval_width = interval_width
self.uncertainty_samples = uncertainty_samples
# Set during fitting or by other methods
self.start = None
self.y_scale = None
self.logistic_floor = False
self.t_scale = None
self.changepoints_t = None
self.seasonalities = {}
self.extra_regressors = OrderedDict({})
self.country_holidays = None
self.stan_fit = None
self.params = {}
self.history = None
self.history_dates = None
self.train_component_cols = None
self.component_modes = None
self.train_holiday_names = None
self.validate_inputs()
def validate_inputs(self):
"""Validates the inputs to Prophet."""
if self.growth not in ('linear', 'logistic'):
raise ValueError(
"Parameter 'growth' should be 'linear' or 'logistic'.")
if ((self.changepoint_range < 0) or (self.changepoint_range > 1)):
raise ValueError("Parameter 'changepoint_range' must be in [0, 1]")
if self.holidays is not None:
if not (
isinstance(self.holidays, pd.DataFrame)
and 'ds' in self.holidays # noqa W503
and 'holiday' in self.holidays # noqa W503
):
raise ValueError("holidays must be a DataFrame with 'ds' and "
"'holiday' columns.")
self.holidays['ds'] = pd.to_datetime(self.holidays['ds'])
has_lower = 'lower_window' in self.holidays
has_upper = 'upper_window' in self.holidays
if has_lower + has_upper == 1:
raise ValueError('Holidays must have both lower_window and ' +
'upper_window, or neither')
if has_lower:
if self.holidays['lower_window'].max() > 0:
raise ValueError('Holiday lower_window should be <= 0')
if self.holidays['upper_window'].min() < 0:
raise ValueError('Holiday upper_window should be >= 0')
for h in self.holidays['holiday'].unique():
self.validate_column_name(h, check_holidays=False)
if self.seasonality_mode not in ['additive', 'multiplicative']:
raise ValueError(
"seasonality_mode must be 'additive' or 'multiplicative'"
)
def validate_column_name(self, name, check_holidays=True,
check_seasonalities=True, check_regressors=True):
"""Validates the name of a seasonality, holiday, or regressor.
Parameters
----------
name: string
check_holidays: bool check if name already used for holiday
check_seasonalities: bool check if name already used for seasonality
check_regressors: bool check if name already used for regressor
"""
if '_delim_' in name:
raise ValueError('Name cannot contain "_delim_"')
reserved_names = [
'trend', 'additive_terms', 'daily', 'weekly', 'yearly',
'holidays', 'zeros', 'extra_regressors_additive', 'yhat',
'extra_regressors_multiplicative', 'multiplicative_terms',
]
rn_l = [n + '_lower' for n in reserved_names]
rn_u = [n + '_upper' for n in reserved_names]
reserved_names.extend(rn_l)
reserved_names.extend(rn_u)
reserved_names.extend([
'ds', 'y', 'cap', 'floor', 'y_scaled', 'cap_scaled'])
if name in reserved_names:
raise ValueError('Name "{}" is reserved.'.format(name))
if (check_holidays and self.holidays is not None and
name in self.holidays['holiday'].unique()):
raise ValueError(
'Name "{}" already used for a holiday.'.format(name))
if (check_holidays and self.country_holidays is not None and
name in get_holiday_names(self.country_holidays)):
raise ValueError(
'Name "{}" is a holiday name in {}.'.format(name, self.country_holidays))
if check_seasonalities and name in self.seasonalities:
raise ValueError(
'Name "{}" already used for a seasonality.'.format(name))
if check_regressors and name in self.extra_regressors:
raise ValueError(
'Name "{}" already used for an added regressor.'.format(name))
def setup_dataframe(self, df, initialize_scales=False):
"""Prepare dataframe for fitting or predicting.
Adds a time index and scales y. Creates auxiliary columns 't', 't_ix',
'y_scaled', and 'cap_scaled'. These columns are used during both
fitting and predicting.
Parameters
----------
df: pd.DataFrame with columns ds, y, and cap if logistic growth. Any
specified additional regressors must also be present.
initialize_scales: Boolean set scaling factors in self from df.
Returns
-------
pd.DataFrame prepared for fitting or predicting.
"""
if 'y' in df:
df['y'] = pd.to_numeric(df['y'])
if np.isinf(df['y'].values).any():
raise ValueError('Found infinity in column y.')
df['ds'] = pd.to_datetime(df['ds'])
if df['ds'].isnull().any():
raise ValueError('Found NaN in column ds.')
for name in self.extra_regressors:
if name not in df:
raise ValueError(
'Regressor "{}" missing from dataframe'.format(name))
df[name] = pd.to_numeric(df[name])
if df[name].isnull().any():
raise ValueError('Found NaN in column ' + name)
df = df.sort_values('ds')
df.reset_index(inplace=True, drop=True)
self.initialize_scales(initialize_scales, df)
if self.logistic_floor:
if 'floor' not in df:
raise ValueError("Expected column 'floor'.")
else:
df['floor'] = 0
if self.growth == 'logistic':
if 'cap' not in df:
raise ValueError(
"Capacities must be supplied for logistic growth in "
"column 'cap'"
)
df['cap_scaled'] = (df['cap'] - df['floor']) / self.y_scale
df['t'] = (df['ds'] - self.start) / self.t_scale
if 'y' in df:
df['y_scaled'] = (df['y'] - df['floor']) / self.y_scale
for name, props in self.extra_regressors.items():
df[name] = ((df[name] - props['mu']) / props['std'])
return df
def initialize_scales(self, initialize_scales, df):
"""Initialize model scales.
Sets model scaling factors using df.
Parameters
----------
initialize_scales: Boolean set the scales or not.
df: pd.DataFrame for setting scales.
"""
if not initialize_scales:
return
if self.growth == 'logistic' and 'floor' in df:
self.logistic_floor = True
floor = df['floor']
else:
floor = 0.
self.y_scale = (df['y'] - floor).abs().max()
if self.y_scale == 0:
self.y_scale = 1
self.start = df['ds'].min()
self.t_scale = df['ds'].max() - self.start
for name, props in self.extra_regressors.items():
standardize = props['standardize']
n_vals = len(df[name].unique())
if n_vals < 2:
standardize = False
if standardize == 'auto':
if set(df[name].unique()) == set([1, 0]):
# Don't standardize binary variables.
standardize = False
else:
standardize = True
if standardize:
mu = df[name].mean()
std = df[name].std()
self.extra_regressors[name]['mu'] = mu
self.extra_regressors[name]['std'] = std
def set_changepoints(self):
"""Set changepoints
Sets m$changepoints to the dates of changepoints. Either:
1) The changepoints were passed in explicitly.
A) They are empty.
B) They are not empty, and need validation.
2) We are generating a grid of them.
3) The user prefers no changepoints be used.
"""
if self.changepoints is not None:
if len(self.changepoints) == 0:
pass
else:
too_low = min(self.changepoints) < self.history['ds'].min()
too_high = max(self.changepoints) > self.history['ds'].max()
if too_low or too_high:
raise ValueError(
'Changepoints must fall within training data.')
else:
# Place potential changepoints evenly through first
# changepoint_range proportion of the history
hist_size = np.floor(
self.history.shape[0] * self.changepoint_range)
if self.n_changepoints + 1 > hist_size:
self.n_changepoints = hist_size - 1
logger.info(
'n_changepoints greater than number of observations.'
'Using {}.'.format(self.n_changepoints)
)
if self.n_changepoints > 0:
cp_indexes = (
np.linspace(0, hist_size - 1, self.n_changepoints + 1)
.round()
.astype(np.int)
)
self.changepoints = (
self.history.iloc[cp_indexes]['ds'].tail(-1)
)
else:
# set empty changepoints
self.changepoints = []
if len(self.changepoints) > 0:
self.changepoints_t = np.sort(np.array(
(self.changepoints - self.start) / self.t_scale))
else:
self.changepoints_t = np.array([0]) # dummy changepoint
@staticmethod
def fourier_series(dates, period, series_order):
"""Provides Fourier series components with the specified frequency
and order.
Parameters
----------
dates: pd.Series containing timestamps.
period: Number of days of the period.
series_order: Number of components.
Returns
-------
Matrix with seasonality features.
"""
# convert to days since epoch
t = np.array(
(dates - pd.datetime(1970, 1, 1))
.dt.total_seconds()
.astype(np.float)
) / (3600 * 24.)
return np.column_stack([
fun((2.0 * (i + 1) * np.pi * t / period))
for i in range(series_order)
for fun in (np.sin, np.cos)
])
@classmethod
def make_seasonality_features(cls, dates, period, series_order, prefix):
"""Data frame with seasonality features.
Parameters
----------
cls: Prophet class.
dates: pd.Series containing timestamps.
period: Number of days of the period.
series_order: Number of components.
prefix: Column name prefix.
Returns
-------
pd.DataFrame with seasonality features.
"""
features = cls.fourier_series(dates, period, series_order)
columns = [
'{}_delim_{}'.format(prefix, i + 1)
for i in range(features.shape[1])
]
return pd.DataFrame(features, columns=columns)
def construct_holiday_dataframe(self, dates):
"""Construct a dataframe of holiday dates.
Will combine self.holidays with the built-in country holidays
corresponding to input dates, if self.country_holidays is set.
Parameters
----------
dates: pd.Series containing timestamps used for computing seasonality.
Returns
-------
dataframe of holiday dates, in holiday dataframe format used in
initialization.
"""
all_holidays = pd.DataFrame()
if self.holidays is not None:
all_holidays = self.holidays.copy()
if self.country_holidays is not None:
year_list = list({x.year for x in dates})
country_holidays_df = make_holidays_df(
year_list=year_list, country=self.country_holidays
)
all_holidays = pd.concat((all_holidays, country_holidays_df), sort=False)
all_holidays.reset_index(drop=True, inplace=True)
# If the model has already been fit with a certain set of holidays,
# make sure we are using those same ones.
if self.train_holiday_names is not None:
# Remove holiday names didn't show up in fit
index_to_drop = all_holidays.index[
np.logical_not(
all_holidays.holiday.isin(self.train_holiday_names)
)
]
all_holidays = all_holidays.drop(index_to_drop)
# Add holiday names in fit but not in predict with ds as NA
holidays_to_add = pd.DataFrame({
'holiday': self.train_holiday_names[
np.logical_not(self.train_holiday_names.isin(all_holidays.holiday))
]
})
all_holidays = pd.concat((all_holidays, holidays_to_add), sort=False)
all_holidays.reset_index(drop=True, inplace=True)
return all_holidays
def make_holiday_features(self, dates, holidays):
"""Construct a dataframe of holiday features.
Parameters
----------
dates: pd.Series containing timestamps used for computing seasonality.
holidays: pd.Dataframe containing holidays, as returned by
construct_holiday_dataframe.
Returns
-------
holiday_features: pd.DataFrame with a column for each holiday.
prior_scale_list: List of prior scales for each holiday column.
holiday_names: List of names of holidays
"""
# Holds columns of our future matrix.
expanded_holidays = defaultdict(lambda: np.zeros(dates.shape[0]))
prior_scales = {}
# Makes an index so we can perform `get_loc` below.
# Strip to just dates.
row_index = pd.DatetimeIndex(dates.apply(lambda x: x.date()))
for _ix, row in holidays.iterrows():
dt = row.ds.date()
try:
lw = int(row.get('lower_window', 0))
uw = int(row.get('upper_window', 0))
except ValueError:
lw = 0
uw = 0
ps = float(row.get('prior_scale', self.holidays_prior_scale))
if np.isnan(ps):
ps = float(self.holidays_prior_scale)
if (
row.holiday in prior_scales and prior_scales[row.holiday] != ps
):
raise ValueError(
'Holiday {} does not have consistent prior scale '
'specification.'.format(row.holiday))
if ps <= 0:
raise ValueError('Prior scale must be > 0')
prior_scales[row.holiday] = ps
for offset in range(lw, uw + 1):
occurrence = dt + timedelta(days=offset)
try:
loc = row_index.get_loc(occurrence)
except KeyError:
loc = None
key = '{}_delim_{}{}'.format(
row.holiday,
'+' if offset >= 0 else '-',
abs(offset)
)
if loc is not None:
expanded_holidays[key][loc] = 1.
else:
# Access key to generate value
expanded_holidays[key]
holiday_features = pd.DataFrame(expanded_holidays)
# Make sure column order is consistent
holiday_features = holiday_features[sorted(holiday_features.columns.tolist())]
prior_scale_list = [
prior_scales[h.split('_delim_')[0]]
for h in holiday_features.columns
]
holiday_names = list(prior_scales.keys())
# Store holiday names used in fit
if self.train_holiday_names is None:
self.train_holiday_names = pd.Series(holiday_names)
return holiday_features, prior_scale_list, holiday_names
def add_regressor(
self, name, prior_scale=None, standardize='auto', mode=None
):
"""Add an additional regressor to be used for fitting and predicting.
The dataframe passed to `fit` and `predict` will have a column with the
specified name to be used as a regressor. When standardize='auto', the
regressor will be standardized unless it is binary. The regression
coefficient is given a prior with the specified scale parameter.
Decreasing the prior scale will add additional regularization. If no
prior scale is provided, self.holidays_prior_scale will be used.
Mode can be specified as either 'additive' or 'multiplicative'. If not
specified, self.seasonality_mode will be used. 'additive' means the
effect of the regressor will be added to the trend, 'multiplicative'
means it will multiply the trend.
Parameters
----------
name: string name of the regressor.
prior_scale: optional float scale for the normal prior. If not
provided, self.holidays_prior_scale will be used.
standardize: optional, specify whether this regressor will be
standardized prior to fitting. Can be 'auto' (standardize if not
binary), True, or False.
mode: optional, 'additive' or 'multiplicative'. Defaults to
self.seasonality_mode.
Returns
-------
The prophet object.
"""
if self.history is not None:
raise Exception(
"Regressors must be added prior to model fitting.")
self.validate_column_name(name, check_regressors=False)
if prior_scale is None:
prior_scale = float(self.holidays_prior_scale)
if mode is None:
mode = self.seasonality_mode
if prior_scale <= 0:
raise ValueError('Prior scale must be > 0')
if mode not in ['additive', 'multiplicative']:
raise ValueError("mode must be 'additive' or 'multiplicative'")
self.extra_regressors[name] = {
'prior_scale': prior_scale,
'standardize': standardize,
'mu': 0.,
'std': 1.,
'mode': mode,
}
return self
def add_seasonality(
self, name, period, fourier_order, prior_scale=None, mode=None
):
"""Add a seasonal component with specified period, number of Fourier
components, and prior scale.
Increasing the number of Fourier components allows the seasonality to
change more quickly (at risk of overfitting). Default values for yearly
and weekly seasonalities are 10 and 3 respectively.
Increasing prior scale will allow this seasonality component more
flexibility, decreasing will dampen it. If not provided, will use the
seasonality_prior_scale provided on Prophet initialization (defaults
to 10).
Mode can be specified as either 'additive' or 'multiplicative'. If not
specified, self.seasonality_mode will be used (defaults to additive).
Additive means the seasonality will be added to the trend,
multiplicative means it will multiply the trend.
Parameters
----------
name: string name of the seasonality component.
period: float number of days in one period.
fourier_order: int number of Fourier components to use.
prior_scale: optional float prior scale for this component.
mode: optional 'additive' or 'multiplicative'
Returns
-------
The prophet object.
"""
if self.history is not None:
raise Exception(
"Seasonality must be added prior to model fitting.")
if name not in ['daily', 'weekly', 'yearly']:
# Allow overwriting built-in seasonalities
self.validate_column_name(name, check_seasonalities=False)
if prior_scale is None:
ps = self.seasonality_prior_scale
else:
ps = float(prior_scale)
if ps <= 0:
raise ValueError('Prior scale must be > 0')
if mode is None:
mode = self.seasonality_mode
if mode not in ['additive', 'multiplicative']:
raise ValueError("mode must be 'additive' or 'multiplicative'")
self.seasonalities[name] = {
'period': period,
'fourier_order': fourier_order,
'prior_scale': ps,
'mode': mode,
}
return self
def add_country_holidays(self, country_name):
"""Add in built-in holidays for the specified country.
These holidays will be included in addition to any specified on model
initialization.
Holidays will be calculated for arbitrary date ranges in the history
and future. See the online documentation for the list of countries with
built-in holidays.
Built-in country holidays can only be set for a single country.
Parameters
----------
country_name: Name of the country, like 'UnitedStates' or 'US'
Returns
-------
The prophet object.
"""
if self.history is not None:
raise Exception(
"Country holidays must be added prior to model fitting."
)
# Validate names.
for name in get_holiday_names(country_name):
# Allow merging with existing holidays
self.validate_column_name(name, check_holidays=False)
# Set the holidays.
if self.country_holidays is not None:
logger.warning(
'Changing country holidays from {} to {}'.format(
self.country_holidays, country_name
)
)
self.country_holidays = country_name
return self
def make_all_seasonality_features(self, df):
"""Dataframe with seasonality features.
Includes seasonality features, holiday features, and added regressors.
Parameters
----------
df: pd.DataFrame with dates for computing seasonality features and any
added regressors.
Returns
-------
pd.DataFrame with regression features.
list of prior scales for each column of the features dataframe.
Dataframe with indicators for which regression components correspond to
which columns.
Dictionary with keys 'additive' and 'multiplicative' listing the
component names for each mode of seasonality.
"""
seasonal_features = []
prior_scales = []
modes = {'additive': [], 'multiplicative': []}
# Seasonality features
for name, props in self.seasonalities.items():
features = self.make_seasonality_features(
df['ds'],
props['period'],
props['fourier_order'],
name,
)
seasonal_features.append(features)
prior_scales.extend(
[props['prior_scale']] * features.shape[1])
modes[props['mode']].append(name)
# Holiday features
holidays = self.construct_holiday_dataframe(df['ds'])
if len(holidays) > 0:
features, holiday_priors, holiday_names = (
self.make_holiday_features(df['ds'], holidays)
)
seasonal_features.append(features)
prior_scales.extend(holiday_priors)
modes[self.seasonality_mode].extend(holiday_names)
# Additional regressors
for name, props in self.extra_regressors.items():
seasonal_features.append( | pd.DataFrame(df[name]) | pandas.DataFrame |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.