prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
import os
import matplotlib
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
import numpy as np
from sklearn.utils import shuffle
from sklearn.model_selection import train_test_split
from sklearn.model_selection import RandomizedSearchCV, GridSearchCV, cross_validate
from sklearn.model_selection import StratifiedKFold, KFold
from sklearn.linear_model import Lasso, LassoCV, LogisticRegressionCV, LogisticRegression
from sklearn.linear_model import ElasticNet, ElasticNetCV, enet_path
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import SVC
from sklearn.feature_selection import SelectFromModel
from sklearn.metrics import auc, roc_curve
from utils import kernel
from mics import classifier_mics
'''
函数名尽量保持了和scikit-learn相同的函数名,便于理解函数的作用
没有写留一法实现的函数,如果要用留一法直接在K折交叉验证参数中将折数设置为样本个数即实现了留一法(scikit-learn官方文件推荐)
不推荐在网格搜索法中使用留一法,当待选参数较多时会让模型开销极大
'''
class lasso():
'''LASSO特征选择的方法集锦,直接在class中选择是否进行交叉验证
输入:
X_train, X_test, y_train, y_test: 训练集和测试集的特征与标签
feature_name: 特征名称,顺序和X的列必须对应
path: 记录文件的存储路径,自行定义
cv_val:布尔型,是否进行网格搜索交叉验证
'''
def __init__(self, X_train, X_test, y_train, y_test, feature_name, path, cv_val=True):
self.X_train = X_train
self.X_test = X_test
self.y_train = y_train
self.y_test = y_test
self.name = feature_name
self.cv_val = cv_val
self.path = path
def lasso(self, alpha, cv):
'''使用LASSO进行特征选择,只进行一次,选择特征系数不为0的特征作为结果
得到的结果包括特征选择后的训练集和测试集特征,同时还有特征名和权重,每个特征名有一个权重值,顺序是对应的
输入:
alpha: 参数alpha
cv: int, 如果进行交叉验证,cv的折数
输出:
best_alpha(只有使用交叉验证时才有): 最优lasso惩罚参数
new_train_feature: 选择的训练集特征矩阵
new_test_feature: 选择后的测试集特征矩阵
new_feature_name: 选择后的特征名称
feature_weight: 选择后特征对应的系数
'''
if self.cv_val is True:
model_lasso = LassoCV(alphas=alpha, cv=cv)
model_lasso.fit(self.X_train, self.y_train)
coef = pd.Series(model_lasso.coef_)
print("Lasso picked " + str(sum(coef != 0)) + " variables and eliminated the other " + str(
sum(coef == 0)) + " variables")
img_path = os.path.join(self.path, 'lassoCV')
os.makedirs(img_path, exist_ok=True)
# 交叉验证得到的最佳lasso惩罚参数
best_alpha = model_lasso.alpha_
print('-----------------------------')
print('Best LASSO alpha:')
print(best_alpha)
# 将lasso中权重不为0的特征选择出来
model = SelectFromModel(model_lasso, prefit=True)
# 分别将训练集和测试集的特征使用上述lasso进行筛选
X_new_train = model.transform(self.X_train)
X_new_test = model.transform(self.X_test)
# 所有特征的mask,保留的特征用True,被筛掉的特征用False
mask = model.get_support()
new_feature_name = []
feature_weight = []
# 根据mask将保留特征的名字和权重分别存储到
for bool, feature, coef in zip(mask, self.name, coef):
if bool:
new_feature_name.append(feature)
feature_weight.append(coef)
# 将训练集和测试集的保留特征加上特征名
new_train_feature =
|
pd.DataFrame(data=X_new_train, columns=new_feature_name)
|
pandas.DataFrame
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import abc
import importlib
from pathlib import Path
from typing import Union, Iterable, List
import fire
import numpy as np
import pandas as pd
# pip install baostock
import baostock as bs
from loguru import logger
class CollectorFutureCalendar:
calendar_format = "%Y-%m-%d"
def __init__(self, qlib_dir: Union[str, Path], start_date: str = None, end_date: str = None):
"""
Parameters
----------
qlib_dir:
qlib data directory
start_date
start date
end_date
end date
"""
self.qlib_dir = Path(qlib_dir).expanduser().absolute()
self.calendar_path = self.qlib_dir.joinpath("calendars/day.txt")
self.future_path = self.qlib_dir.joinpath("calendars/day_future.txt")
self._calendar_list = self.calendar_list
_latest_date = self._calendar_list[-1]
self.start_date = _latest_date if start_date is None else pd.Timestamp(start_date)
self.end_date = _latest_date + pd.Timedelta(days=365 * 2) if end_date is None else pd.Timestamp(end_date)
@property
def calendar_list(self) -> List[pd.Timestamp]:
# load old calendar
if not self.calendar_path.exists():
raise ValueError(f"calendar does not exist: {self.calendar_path}")
calendar_df = pd.read_csv(self.calendar_path, header=None)
calendar_df.columns = ["date"]
calendar_df["date"] = pd.to_datetime(calendar_df["date"])
return calendar_df["date"].to_list()
def _format_datetime(self, datetime_d: [str, pd.Timestamp]):
datetime_d = pd.Timestamp(datetime_d)
return datetime_d.strftime(self.calendar_format)
def write_calendar(self, calendar: Iterable):
calendars_list = list(map(lambda x: self._format_datetime(x), sorted(set(self.calendar_list + calendar))))
np.savetxt(self.future_path, calendars_list, fmt="%s", encoding="utf-8")
@abc.abstractmethod
def collector(self) -> Iterable[pd.Timestamp]:
"""
Returns
-------
"""
raise NotImplementedError(f"Please implement the `collector` method")
class CollectorFutureCalendarCN(CollectorFutureCalendar):
def collector(self) -> Iterable[pd.Timestamp]:
lg = bs.login()
if lg.error_code != "0":
raise ValueError(f"login respond error_msg: {lg.error_msg}")
rs = bs.query_trade_dates(
start_date=self._format_datetime(self.start_date), end_date=self._format_datetime(self.end_date)
)
if rs.error_code != "0":
raise ValueError(f"query_trade_dates respond error_msg: {rs.error_msg}")
data_list = []
while (rs.error_code == "0") & rs.next():
data_list.append(rs.get_row_data())
calendar = pd.DataFrame(data_list, columns=rs.fields)
calendar["is_trading_day"] = calendar["is_trading_day"].astype(int)
return
|
pd.to_datetime(calendar[calendar["is_trading_day"] == 1]["calendar_date"])
|
pandas.to_datetime
|
import datetime
import hashlib
import os
import time
from warnings import (
catch_warnings,
simplefilter,
)
import numpy as np
import pytest
import pandas as pd
from pandas import (
DataFrame,
DatetimeIndex,
Index,
MultiIndex,
Series,
Timestamp,
concat,
date_range,
timedelta_range,
)
import pandas._testing as tm
from pandas.tests.io.pytables.common import (
_maybe_remove,
ensure_clean_path,
ensure_clean_store,
safe_close,
)
_default_compressor = "blosc"
ignore_natural_naming_warning = pytest.mark.filterwarnings(
"ignore:object name:tables.exceptions.NaturalNameWarning"
)
from pandas.io.pytables import (
HDFStore,
read_hdf,
)
pytestmark = pytest.mark.single_cpu
def test_context(setup_path):
with tm.ensure_clean(setup_path) as path:
try:
with HDFStore(path) as tbl:
raise ValueError("blah")
except ValueError:
pass
with tm.ensure_clean(setup_path) as path:
with HDFStore(path) as tbl:
tbl["a"] = tm.makeDataFrame()
assert len(tbl) == 1
assert type(tbl["a"]) == DataFrame
def test_no_track_times(setup_path):
# GH 32682
# enables to set track_times (see `pytables` `create_table` documentation)
def checksum(filename, hash_factory=hashlib.md5, chunk_num_blocks=128):
h = hash_factory()
with open(filename, "rb") as f:
for chunk in iter(lambda: f.read(chunk_num_blocks * h.block_size), b""):
h.update(chunk)
return h.digest()
def create_h5_and_return_checksum(track_times):
with ensure_clean_path(setup_path) as path:
df = DataFrame({"a": [1]})
with HDFStore(path, mode="w") as hdf:
hdf.put(
"table",
df,
format="table",
data_columns=True,
index=None,
track_times=track_times,
)
return checksum(path)
checksum_0_tt_false = create_h5_and_return_checksum(track_times=False)
checksum_0_tt_true = create_h5_and_return_checksum(track_times=True)
# sleep is necessary to create h5 with different creation time
time.sleep(1)
checksum_1_tt_false = create_h5_and_return_checksum(track_times=False)
checksum_1_tt_true = create_h5_and_return_checksum(track_times=True)
# checksums are the same if track_time = False
assert checksum_0_tt_false == checksum_1_tt_false
# checksums are NOT same if track_time = True
assert checksum_0_tt_true != checksum_1_tt_true
def test_iter_empty(setup_path):
with ensure_clean_store(setup_path) as store:
# GH 12221
assert list(store) == []
def test_repr(setup_path):
with ensure_clean_store(setup_path) as store:
repr(store)
store.info()
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeStringSeries()
store["c"] = tm.makeDataFrame()
df = tm.makeDataFrame()
df["obj1"] = "foo"
df["obj2"] = "bar"
df["bool1"] = df["A"] > 0
df["bool2"] = df["B"] > 0
df["bool3"] = True
df["int1"] = 1
df["int2"] = 2
df["timestamp1"] = Timestamp("20010102")
df["timestamp2"] = Timestamp("20010103")
df["datetime1"] = datetime.datetime(2001, 1, 2, 0, 0)
df["datetime2"] = datetime.datetime(2001, 1, 3, 0, 0)
df.loc[df.index[3:6], ["obj1"]] = np.nan
df = df._consolidate()._convert(datetime=True)
with catch_warnings(record=True):
simplefilter("ignore", pd.errors.PerformanceWarning)
store["df"] = df
# make a random group in hdf space
store._handle.create_group(store._handle.root, "bah")
assert store.filename in repr(store)
assert store.filename in str(store)
store.info()
# storers
with ensure_clean_store(setup_path) as store:
df = tm.makeDataFrame()
store.append("df", df)
s = store.get_storer("df")
repr(s)
str(s)
@pytest.mark.filterwarnings("ignore:object name:tables.exceptions.NaturalNameWarning")
def test_contains(setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeDataFrame()
store["foo/bar"] = tm.makeDataFrame()
assert "a" in store
assert "b" in store
assert "c" not in store
assert "foo/bar" in store
assert "/foo/bar" in store
assert "/foo/b" not in store
assert "bar" not in store
# gh-2694: tables.NaturalNameWarning
with catch_warnings(record=True):
store["node())"] = tm.makeDataFrame()
assert "node())" in store
def test_versioning(setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeDataFrame()
df = tm.makeTimeDataFrame()
_maybe_remove(store, "df1")
store.append("df1", df[:10])
store.append("df1", df[10:])
assert store.root.a._v_attrs.pandas_version == "0.15.2"
assert store.root.b._v_attrs.pandas_version == "0.15.2"
assert store.root.df1._v_attrs.pandas_version == "0.15.2"
# write a file and wipe its versioning
_maybe_remove(store, "df2")
store.append("df2", df)
# this is an error because its table_type is appendable, but no
# version info
store.get_node("df2")._v_attrs.pandas_version = None
msg = "'NoneType' object has no attribute 'startswith'"
with pytest.raises(Exception, match=msg):
store.select("df2")
@pytest.mark.parametrize(
"where, expected",
[
(
"/",
{
"": ({"first_group", "second_group"}, set()),
"/first_group": (set(), {"df1", "df2"}),
"/second_group": ({"third_group"}, {"df3", "s1"}),
"/second_group/third_group": (set(), {"df4"}),
},
),
(
"/second_group",
{
"/second_group": ({"third_group"}, {"df3", "s1"}),
"/second_group/third_group": (set(), {"df4"}),
},
),
],
)
def test_walk(where, expected):
# GH10143
objs = {
"df1": DataFrame([1, 2, 3]),
"df2": DataFrame([4, 5, 6]),
"df3": DataFrame([6, 7, 8]),
"df4": DataFrame([9, 10, 11]),
"s1": Series([10, 9, 8]),
# Next 3 items aren't pandas objects and should be ignored
"a1": np.array([[1, 2, 3], [4, 5, 6]]),
"tb1": np.array([(1, 2, 3), (4, 5, 6)], dtype="i,i,i"),
"tb2": np.array([(7, 8, 9), (10, 11, 12)], dtype="i,i,i"),
}
with ensure_clean_store("walk_groups.hdf", mode="w") as store:
store.put("/first_group/df1", objs["df1"])
store.put("/first_group/df2", objs["df2"])
store.put("/second_group/df3", objs["df3"])
store.put("/second_group/s1", objs["s1"])
store.put("/second_group/third_group/df4", objs["df4"])
# Create non-pandas objects
store._handle.create_array("/first_group", "a1", objs["a1"])
store._handle.create_table("/first_group", "tb1", obj=objs["tb1"])
store._handle.create_table("/second_group", "tb2", obj=objs["tb2"])
assert len(list(store.walk(where=where))) == len(expected)
for path, groups, leaves in store.walk(where=where):
assert path in expected
expected_groups, expected_frames = expected[path]
assert expected_groups == set(groups)
assert expected_frames == set(leaves)
for leaf in leaves:
frame_path = "/".join([path, leaf])
obj = store.get(frame_path)
if "df" in leaf:
tm.assert_frame_equal(obj, objs[leaf])
else:
tm.assert_series_equal(obj, objs[leaf])
def test_getattr(setup_path):
with ensure_clean_store(setup_path) as store:
s = tm.makeTimeSeries()
store["a"] = s
# test attribute access
result = store.a
tm.assert_series_equal(result, s)
result = getattr(store, "a")
tm.assert_series_equal(result, s)
df = tm.makeTimeDataFrame()
store["df"] = df
result = store.df
tm.assert_frame_equal(result, df)
# errors
for x in ["d", "mode", "path", "handle", "complib"]:
msg = f"'HDFStore' object has no attribute '{x}'"
with pytest.raises(AttributeError, match=msg):
getattr(store, x)
# not stores
for x in ["mode", "path", "handle", "complib"]:
getattr(store, f"_{x}")
def test_store_dropna(setup_path):
df_with_missing = DataFrame(
{"col1": [0.0, np.nan, 2.0], "col2": [1.0, np.nan, np.nan]},
index=list("abc"),
)
df_without_missing = DataFrame(
{"col1": [0.0, 2.0], "col2": [1.0, np.nan]}, index=list("ac")
)
# # Test to make sure defaults are to not drop.
# # Corresponding to Issue 9382
with ensure_clean_path(setup_path) as path:
df_with_missing.to_hdf(path, "df", format="table")
reloaded = read_hdf(path, "df")
tm.assert_frame_equal(df_with_missing, reloaded)
with ensure_clean_path(setup_path) as path:
df_with_missing.to_hdf(path, "df", format="table", dropna=False)
reloaded = read_hdf(path, "df")
tm.assert_frame_equal(df_with_missing, reloaded)
with ensure_clean_path(setup_path) as path:
df_with_missing.to_hdf(path, "df", format="table", dropna=True)
reloaded = read_hdf(path, "df")
tm.assert_frame_equal(df_without_missing, reloaded)
def test_to_hdf_with_min_itemsize(setup_path):
with ensure_clean_path(setup_path) as path:
# min_itemsize in index with to_hdf (GH 10381)
df = tm.makeMixedDataFrame().set_index("C")
df.to_hdf(path, "ss3", format="table", min_itemsize={"index": 6})
# just make sure there is a longer string:
df2 = df.copy().reset_index().assign(C="longer").set_index("C")
df2.to_hdf(path, "ss3", append=True, format="table")
tm.assert_frame_equal(read_hdf(path, "ss3"), concat([df, df2]))
# same as above, with a Series
df["B"].to_hdf(path, "ss4", format="table", min_itemsize={"index": 6})
df2["B"].to_hdf(path, "ss4", append=True, format="table")
tm.assert_series_equal(read_hdf(path, "ss4"), concat([df["B"], df2["B"]]))
@pytest.mark.parametrize("format", ["fixed", "table"])
def test_to_hdf_errors(format, setup_path):
data = ["\ud800foo"]
ser = Series(data, index=Index(data))
with ensure_clean_path(setup_path) as path:
# GH 20835
ser.to_hdf(path, "table", format=format, errors="surrogatepass")
result = read_hdf(path, "table", errors="surrogatepass")
tm.assert_series_equal(result, ser)
def test_create_table_index(setup_path):
with ensure_clean_store(setup_path) as store:
with catch_warnings(record=True):
def col(t, column):
return getattr(store.get_storer(t).table.cols, column)
# data columns
df = tm.makeTimeDataFrame()
df["string"] = "foo"
df["string2"] = "bar"
store.append("f", df, data_columns=["string", "string2"])
assert col("f", "index").is_indexed is True
assert col("f", "string").is_indexed is True
assert col("f", "string2").is_indexed is True
# specify index=columns
store.append("f2", df, index=["string"], data_columns=["string", "string2"])
assert col("f2", "index").is_indexed is False
assert col("f2", "string").is_indexed is True
assert col("f2", "string2").is_indexed is False
# try to index a non-table
_maybe_remove(store, "f2")
store.put("f2", df)
msg = "cannot create table index on a Fixed format store"
with pytest.raises(TypeError, match=msg):
store.create_table_index("f2")
def test_create_table_index_data_columns_argument(setup_path):
# GH 28156
with ensure_clean_store(setup_path) as store:
with catch_warnings(record=True):
def col(t, column):
return getattr(store.get_storer(t).table.cols, column)
# data columns
df = tm.makeTimeDataFrame()
df["string"] = "foo"
df["string2"] = "bar"
store.append("f", df, data_columns=["string"])
assert col("f", "index").is_indexed is True
assert col("f", "string").is_indexed is True
msg = "'Cols' object has no attribute 'string2'"
with pytest.raises(AttributeError, match=msg):
col("f", "string2").is_indexed
# try to index a col which isn't a data_column
msg = (
"column string2 is not a data_column.\n"
"In order to read column string2 you must reload the dataframe \n"
"into HDFStore and include string2 with the data_columns argument."
)
with pytest.raises(AttributeError, match=msg):
store.create_table_index("f", columns=["string2"])
def test_mi_data_columns(setup_path):
# GH 14435
idx = MultiIndex.from_arrays(
[date_range("2000-01-01", periods=5), range(5)], names=["date", "id"]
)
df =
|
DataFrame({"a": [1.1, 1.2, 1.3, 1.4, 1.5]}, index=idx)
|
pandas.DataFrame
|
import os
import pandas as pd
import pytest
from pandas.testing import assert_frame_equal
from .. import read_sql
@pytest.fixture(scope="module") # type: ignore
def postgres_url() -> str:
conn = os.environ["POSTGRES_URL"]
return conn
@pytest.mark.xfail
def test_on_non_select(postgres_url: str) -> None:
query = "CREATE TABLE non_select(id INTEGER NOT NULL)"
df = read_sql(postgres_url, query)
def test_aggregation(postgres_url: str) -> None:
query = "SELECT test_bool, SUM(test_float) FROM test_table GROUP BY test_bool"
df = read_sql(postgres_url, query)
expected = pd.DataFrame(
index=range(3),
data={
"test_bool": pd.Series([None, False, True], dtype="boolean"),
"sum": pd.Series([10.9, 5.2, -10.0], dtype="float64")
}
)
assert_frame_equal(df, expected, check_names=True)
def test_partition_on_aggregation(postgres_url: str) -> None:
query = "SELECT test_bool, SUM(test_int) AS test_int FROM test_table GROUP BY test_bool"
df = read_sql(postgres_url, query,
partition_on="test_int", partition_num=2)
expected = pd.DataFrame(
index=range(3),
data={
"test_bool": pd.Series([None, False, True], dtype="boolean"),
"test_int": pd.Series([4, 5, 1315], dtype="Int64")
}
)
assert_frame_equal(df, expected, check_names=True)
def test_aggregation2(postgres_url: str) -> None:
query = "select DISTINCT(test_bool) from test_table"
df = read_sql(postgres_url, query)
expected = pd.DataFrame(
index=range(3),
data={
"test_bool": pd.Series([None, False, True], dtype="boolean"),
}
)
assert_frame_equal(df, expected, check_names=True)
def test_partition_on_aggregation2(postgres_url: str) -> None:
query = "select MAX(test_int), MIN(test_int) from test_table"
df = read_sql(postgres_url, query,
partition_on="max", partition_num=2)
expected = pd.DataFrame(
index=range(1),
data={
"max": pd.Series([1314], dtype="Int64"),
"min": pd.Series([0], dtype="Int64"),
}
)
assert_frame_equal(df, expected, check_names=True)
def test_udf(postgres_url: str) -> None:
query = "select increment(test_int) as test_int from test_table ORDER BY test_int"
df = read_sql(postgres_url, query,
partition_on="test_int", partition_num=2)
expected = pd.DataFrame(
index=range(6),
data={
"test_int": pd.Series([1, 2, 3, 4, 5, 1315], dtype="Int64"),
}
)
assert_frame_equal(df, expected, check_names=True)
def test_manual_partition(postgres_url: str) -> None:
queries = [
"SELECT * FROM test_table WHERE test_int < 2",
"SELECT * FROM test_table WHERE test_int >= 2",
]
df = read_sql(postgres_url, query=queries)
expected = pd.DataFrame(
index=range(6),
data={
"test_int": pd.Series([1, 0, 2, 3, 4, 1314], dtype="Int64"),
"test_nullint": pd.Series([3, 5, None, 7, 9, 2], dtype="Int64"),
"test_str": pd.Series(
["str1", "a", "str2", "b", "c", None], dtype="object"
),
"test_float": pd.Series([None, 3.1, 2.2, 3, 7.8, -10], dtype="float64"),
"test_bool": pd.Series(
[True, None, False, False, None, True], dtype="boolean"
),
},
)
assert_frame_equal(df, expected, check_names=True)
def test_read_sql_without_partition(postgres_url: str) -> None:
query = "SELECT * FROM test_table"
df = read_sql(postgres_url, query)
expected = pd.DataFrame(
index=range(6),
data={
"test_int": pd.Series([1, 2, 0, 3, 4, 1314], dtype="Int64"),
"test_nullint": pd.Series([3, None, 5, 7, 9, 2], dtype="Int64"),
"test_str": pd.Series(
["str1", "str2", "a", "b", "c", None], dtype="object"
),
"test_float": pd.Series([None, 2.2, 3.1, 3, 7.8, -10], dtype="float64"),
"test_bool": pd.Series(
[True, False, None, False, None, True], dtype="boolean"
),
},
)
assert_frame_equal(df, expected, check_names=True)
def test_read_sql_with_partition(postgres_url: str) -> None:
query = "SELECT * FROM test_table"
df = read_sql(
postgres_url,
query,
partition_on="test_int",
partition_range=(0, 2000),
partition_num=3,
)
expected = pd.DataFrame(
index=range(6),
data={
"test_int": pd.Series([1, 2, 0, 3, 4, 1314], dtype="Int64"),
"test_nullint": pd.Series([3, None, 5, 7, 9, 2], dtype="Int64"),
"test_str": pd.Series(
["str1", "str2", "a", "b", "c", None], dtype="object"
),
"test_float": pd.Series([None, 2.2, 3.1, 3, 7.8, -10], dtype="float64"),
"test_bool": pd.Series(
[True, False, None, False, None, True], dtype="boolean"
),
},
)
assert_frame_equal(df, expected, check_names=True)
def test_read_sql_with_partition_without_partition_range(postgres_url: str) -> None:
query = "SELECT * FROM test_table where test_float > 3"
df = read_sql(
postgres_url,
query,
partition_on="test_int",
partition_num=3,
)
expected = pd.DataFrame(
index=range(2),
data={
"test_int": pd.Series([0, 4], dtype="Int64"),
"test_nullint": pd.Series([5, 9], dtype="Int64"),
"test_str": pd.Series(
["a", "c"], dtype="object"
),
"test_float": pd.Series([3.1, 7.8], dtype="float64"),
"test_bool": pd.Series(
[None, None], dtype="boolean"
),
},
)
assert_frame_equal(df, expected, check_names=True)
def test_read_sql_with_partition_and_selection(postgres_url: str) -> None:
query = "SELECT * FROM test_table WHERE 1 = 3 OR 2 = 2"
df = read_sql(
postgres_url,
query,
partition_on="test_int",
partition_range=(0, 2000),
partition_num=3,
)
expected = pd.DataFrame(
index=range(6),
data={
"test_int":
|
pd.Series([1, 2, 0, 3, 4, 1314], dtype="Int64")
|
pandas.Series
|
import sys
import unittest
import pandas as pd
from pysgrs import interfaces
from pysgrs import ciphers
from pysgrs import toolbox
from pysgrs import errors
from pysgrs import settings
class TestBruteForceBreaker:
ciphers = None
breaker = None
paths = (settings.resources / 'texts/fr').glob("*.txt")
plaintexts = []
ciphertexts = []
results = []
def load_texts(self):
for path in self.paths:
with path.open(encoding='utf-8') as fh:
text = fh.read()
plaintext = toolbox.AsciiCleaner.strip_accents(text)
self.plaintexts.append({
"text": text,
"normalized": plaintext,
"score": self.breaker.score.score(plaintext)
})
def generate_ciphertexts(self):
for plaintext in self.plaintexts:
for cipher in self.ciphers.generate():
ciphertext = cipher.encipher(plaintext["normalized"])
obj = {
"configuration": cipher.configuration(),
"cipher": cipher,
"plaintext": plaintext,
"ciphertext": ciphertext
}
self.ciphertexts.append(obj)
def apply_breaker(self):
for ciphertext in self.ciphertexts:
results = []
for result in self.breaker.attack(ciphertext["ciphertext"]):
results.append(result)
self.results.append(results)
def setUp(self):
self.load_texts()
self.generate_ciphertexts()
self.apply_breaker()
def test_complete_attack_results(self):
for (solution, results) in zip(self.ciphertexts, self.results):
df =
|
pd.DataFrame(results)
|
pandas.DataFrame
|
"""
Commands for making opening one or more dicoms very easy
"""
import os
from collections import namedtuple
from typing import Any
import numpy as np
import pandas as pd
import pydicom
from .utils import _rel_glob, BASE_DIR
type_info = namedtuple('type_info',
['inferrable', 'realtype', 'has_nulltype', 'length',
'is_complex'])
def _remove_empty_columns(in_df):
empty_cols = dict(filter(lambda kv: kv[1] > 0,
in_df.apply(_countmissingvalues,
axis=0).to_dict().items()))
# remove missing columns
return in_df[
[ccol for ccol in in_df.columns if empty_cols.get(ccol, 0) == 0]]
def safe_type_infer(x):
return type_info(True, type(x), has_nulltype=False,
length=_tlen(x), is_complex=False)
def _identify_column_types(in_df_dict):
return dict([(k, safe_type_infer(v)) for (k, v) in in_df_dict.items()])
def _dicoms_to_dict(dicom_list):
fvr = lambda x: None if x.first_valid_index() is None else x[
x.first_valid_index()]
out_list = []
for in_dicom in dicom_list:
temp_dict = {a.name: a.value for a in in_dicom.iterall()}
if in_dicom.__dict__.get('_pixel_array', None) is not None:
temp_dict['Pixel Array'] = in_dicom.pixel_array.tolist()
out_list += [temp_dict]
df_dicom =
|
pd.DataFrame(out_list)
|
pandas.DataFrame
|
import getpass
import pandas as pd
from PySide2 import QtGui
from PySide2.QtWidgets import QMainWindow, QMessageBox
from components.mensagens import Mensagens
from dao.relatorio_dao import RelatorioDao
from view.ui_tela_relatorio_chamados import Ui_RelatorioChamado
class TelaRelatorioChamado(QMainWindow, Ui_RelatorioChamado):
"""Classe da tela de relatório de chamados.
Esta classe tem por finalidade gerar vários relatórios conforme necessidade do usuário.
"""
def __init__(self):
super(TelaRelatorioChamado, self).__init__()
self.setupUi(self)
self.setWindowTitle("Relatório de Chamados")
self.setFixedSize(400, 466)
self.popula_combo_solucao()
self.mensagem = Mensagens()
self.btn_cancelar.clicked.connect(self.close)
self.btn_gerar_solucao.clicked.connect(self.gerar_relatorio_solucao)
"""Função que chamado o método de gerar relatório de soluções."""
self.btn_gerar_data.clicked.connect(self.gerar_relatorio_chamado_data)
"""Função que chamado o método de gerar relatório por Data."""
self.btn_gerar_tipo.clicked.connect(self.gerar_relatorio_tipo_chamado)
"""Função que chamado o método de gerar relatório por tipo."""
self.btn_gerar_status.clicked.connect(self.gerar_relatorio_status_chamado)
"""Função que chamado o método de gerar relatório por Status."""
self.btn_gerar_relatorio_padrao.clicked.connect(self.gerar_relatorio_padrao)
"""Função que chamado o método de gerar relatório padrão."""
def popula_combo_solucao(self):
"""Popular combo solução
Popula a combo de solução com o nome das soluções cadastradas.
:return: Lista de Soluções.
"""
relatorio_dao = RelatorioDao()
resultado = relatorio_dao.consulta_nome_solucao()
for i in resultado:
self.combo_solucao.addItem(str(i[0]))
def gerar_relatorio_chamado_data(self):
"""Gerar relatório por data.
Gera um relatório tendo como parametro a data e salva em .xlsx.
:return: Arquivo .xlsx
"""
user_windows = getpass.getuser()
if self.txt_data.text() == "":
self.mensagem.mensagem_campo_vazio('DATA')
else:
data = self.txt_data.text()
if self.radio_numero_chamado.isChecked():
try:
relatorio_dao = RelatorioDao()
resultado = relatorio_dao.relatorio_chamado_data_ordenado_por_numero(data)
if len(resultado) == 0:
msg = QMessageBox()
msg.setWindowIcon(QtGui.QIcon("_img/logo_janela.ico"))
msg.setIcon(QMessageBox.Information)
msg.setWindowTitle("Relatório de Chamados")
msg.setText('Não há dados para gerar este relatório.')
msg.exec_()
else:
dados = pd.DataFrame(resultado)
dados.columns = ['Chamado', 'Contrato', 'Cliente', 'Endereço', 'Contato', 'Telefone', 'E-mail',
'Problema', 'Observação', 'Status', 'Tipo de Chamado', 'Solução',
'Data Abertura',
'Data Fechamamento']
data_formatada = data.replace('/', '_')
dados.to_excel(f'c:\\Users\\{user_windows}\\Downloads\\'
f'Relatorio_chamados_{data_formatada}_por_numero_chamado.xlsx', index=False)
self.mensagem.mensagem_gerar_relatorio()
except ConnectionError as con_erro:
print(con_erro)
self.mensagem.mensagem_de_erro()
else:
try:
relatorio_dao = RelatorioDao()
resultado = relatorio_dao.relatorio_chamado_data_ordenado_por_contrato(data)
if len(resultado) == 0:
msg = QMessageBox()
msg.setWindowIcon(QtGui.QIcon("_img/logo_janela.ico"))
msg.setIcon(QMessageBox.Information)
msg.setWindowTitle("Relatório de Chamados")
msg.setText('Não há dados para gerar este relatório.')
msg.exec_()
else:
dados = pd.DataFrame(resultado)
dados.columns = ['Chamado', 'Contrato', 'Cliente', 'Endereço', 'Contato', 'Telefone', 'E-mail',
'Problema', 'Observação', 'Status', 'Tipo de Chamado', 'Solução',
'Data Abertura', 'Data Fechamamento']
data_formatada = data.replace('/', '_')
dados.to_excel(f'c:\\Users\\{user_windows}\\Downloads\\'
f'Relatorio_chamados_{data_formatada}_por_contrato.xlsx', index=False)
self.mensagem.mensagem_gerar_relatorio()
except ConnectionError as con_erro:
print(con_erro)
self.mensagem.mensagem_de_erro()
def gerar_relatorio_solucao(self):
"""Gerar relatório por solução.
Gera um relatório tendo como parametro a solução e salva em .xlsx.
:return: Arquivo .xlsx
"""
user_windows = getpass.getuser()
solucao = self.combo_solucao.currentText()
if self.combo_solucao.currentText() == "Selecione uma opção":
self.mensagem.mensagem_combo('SOLUÇÃO')
else:
if self.radio_numero_chamado.isChecked():
try:
relatorio_dao = RelatorioDao()
resultado = relatorio_dao.relatorio_solucao_ordenado_numero_chamado(solucao)
if len(resultado) == 0:
msg = QMessageBox()
msg.setWindowIcon(QtGui.QIcon("_img/logo_janela.ico"))
msg.setIcon(QMessageBox.Information)
msg.setWindowTitle("Relatório de Chamados")
msg.setText('Não há dados para gerar este relatório.')
msg.exec_()
else:
dados = pd.DataFrame(resultado)
dados.columns = ['Chamado', 'Contrato', 'Cliente', 'Endereço', 'Contato', 'Telefone', 'E-mail',
'Problema', 'Observação', 'Status', 'Tipo de Chamado', 'Solução',
'Data Abertura',
'Data Fechamamento']
dados.to_excel(f'c:\\Users\\{user_windows}\\Downloads\\'
f'Relatorio_chamados_{solucao}_por_numero_chamado.xlsx', index=False)
self.mensagem.mensagem_gerar_relatorio()
except ConnectionError as con_erro:
print(con_erro)
self.mensagem.mensagem_de_erro()
else:
try:
relatorio_dao = RelatorioDao()
resultado = relatorio_dao.relatorio_solucao_ordenado_contrato_chamado(solucao)
if len(resultado) == 0:
msg = QMessageBox()
msg.setWindowIcon(QtGui.QIcon("_img/logo_janela.ico"))
msg.setIcon(QMessageBox.Information)
msg.setWindowTitle("Relatório de Chamados")
msg.setText('Não há dados para gerar este relatório.')
msg.exec_()
else:
dados =
|
pd.DataFrame(resultado)
|
pandas.DataFrame
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_preprocessing
----------------------------------
Tests for `preprocessing` module.
"""
import pytest
from sktutor.preprocessing import (GroupByImputer, MissingValueFiller,
OverMissingThresholdDropper,
ValueReplacer, FactorLimiter,
SingleValueAboveThresholdDropper,
SingleValueDropper, ColumnExtractor,
ColumnDropper, DummyCreator,
ColumnValidator, TextContainsDummyExtractor,
BitwiseOperator, BoxCoxTransformer,
InteractionCreator, StandardScaler,
PolynomialFeatures, ContinuousFeatureBinner,
TypeExtractor, GenericTransformer,
MissingColumnsReplacer)
from sktutor.pipeline import make_union
import numpy as np
import pandas as pd
import pandas.util.testing as tm
from random import shuffle
from sklearn.pipeline import make_pipeline
@pytest.mark.usefixtures("missing_data")
@pytest.mark.usefixtures("missing_data2")
class TestGroupByImputer(object):
def test_groups_most_frequent(self, missing_data):
# Test imputing most frequent value per group.
prep = GroupByImputer('most_frequent', 'b')
prep.fit(missing_data)
result = prep.transform(missing_data)
exp_dict = {'a': [2, 2, 2, None, 4, 4, 7, 8, 8, 8],
'b': ['123', '123', '123',
'234', '456', '456',
'789', '789', '789', '789'],
'c': [1.0, 2.0, 1.0, 4.0, 4.0, 4.0, 7.0, 9.0, 9.0, 9.0],
'd': ['a', 'a', 'a', None, 'e', 'f', 'j', 'h', 'j', 'j'],
'e': [1, 2, 1, None, None, None, None, None, None, None],
'f': ['a', 'b', 'a', None, None, None, None, None, None,
None],
'g': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'b', 'a'],
'h': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'a']
}
expected = pd.DataFrame(exp_dict)
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_groups_mean(self, missing_data):
# Test imputing mean by group.
prep = GroupByImputer('mean', 'b')
prep.fit(missing_data)
result = prep.transform(missing_data)
exp_dict = {'a': [2, 2, 2, None, 4, 4, 7, 8, 7 + 2/3, 8],
'b': ['123', '123', '123',
'234', '456', '456',
'789', '789', '789', '789'],
'c': [1.0, 2.0, 1.5, 4.0, 4.0, 4.0, 7.0, 9.0, 8+1/3, 9.0],
'd': ['a', 'a', None, None, 'e', 'f', None, 'h', 'j', 'j'],
'e': [1, 2, 1.5, None, None, None, None, None, None, None],
'f': ['a', 'b', None, None, None, None, None, None, None,
None],
'g': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'b', None],
'h': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', None, None]
}
expected = pd.DataFrame(exp_dict)
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_groups_median(self, missing_data):
# Test imputing median by group.
prep = GroupByImputer('median', 'b')
prep.fit(missing_data)
result = prep.transform(missing_data)
exp_dict = {'a': [2, 2, 2, None, 4, 4, 7, 8, 8, 8],
'b': ['123', '123', '123',
'234', '456', '456',
'789', '789', '789', '789'],
'c': [1, 2, 1.5, 4, 4, 4, 7, 9, 9, 9],
'd': ['a', 'a', None, None, 'e', 'f', None, 'h', 'j', 'j'],
'e': [1, 2, 1.5, None, None, None, None, None, None, None],
'f': ['a', 'b', None, None, None, None, None, None, None,
None],
'g': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'b', None],
'h': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', None, None]
}
expected = pd.DataFrame(exp_dict)
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_all_most_frequent(self, missing_data):
# Test imputing most frequent with no group by.
prep = GroupByImputer('most_frequent')
prep.fit(missing_data)
result = prep.transform(missing_data)
exp_dict = {'a': [2, 2, 2, 2, 4, 4, 7, 8, 2, 8],
'b': ['123', '123', '123',
'234', '456', '456',
'789', '789', '789', '789'],
'c': [1.0, 2.0, 4.0, 4.0, 4.0, 4.0, 7.0, 9.0, 4.0, 9.0],
'd': ['a', 'a', 'a', 'a', 'e', 'f', 'a', 'h', 'j', 'j'],
'e': [1, 2, 1, 1, 1, 1, 1, 1, 1, 1],
'f': ['a', 'b', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'a'],
'g': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'b', 'a'],
'h': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'a']
}
expected = pd.DataFrame(exp_dict)
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_all_mean(self, missing_data):
# Test imputing mean with no group by.
prep = GroupByImputer('mean')
prep.fit(missing_data)
result = prep.transform(missing_data)
exp_dict = {'a': [2, 2, 5, 5, 4, 4, 7, 8, 5, 8],
'b': ['123', '123', '123',
'234', '456', '456',
'789', '789', '789', '789'],
'c': [1, 2, 5, 4, 4, 4, 7, 9, 5, 9],
'd': ['a', 'a', None, None, 'e', 'f', None, 'h', 'j', 'j'],
'e': [1, 2, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5],
'f': ['a', 'b', None, None, None, None, None, None, None,
None],
'g': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'b', None],
'h': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', None, None]
}
expected = pd.DataFrame(exp_dict)
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_all_median(self, missing_data):
# Test imputing median with no group by.
prep = GroupByImputer('median')
prep.fit(missing_data)
result = prep.transform(missing_data)
exp_dict = {'a': [2, 2, 4, 4, 4, 4, 7, 8, 4, 8],
'b': ['123', '123', '123',
'234', '456', '456',
'789', '789', '789', '789'],
'c': [1, 2, 4, 4, 4, 4, 7, 9, 4, 9],
'd': ['a', 'a', None, None, 'e', 'f', None, 'h', 'j', 'j'],
'e': [1, 2, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5, 1.5],
'f': ['a', 'b', None, None, None, None, None, None, None,
None],
'g': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'b', None],
'h': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', None, None]
}
expected = pd.DataFrame(exp_dict)
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_value_error(self, missing_data):
# Test limiting options without a group by.
prep = GroupByImputer('stdev')
with pytest.raises(ValueError):
prep.fit(missing_data)
def test_key_error(self, missing_data):
# Test imputing with np.nan when a new group level is introduced in
# Transform.
prep = GroupByImputer('mean', 'b')
prep.fit(missing_data)
new_dict = {'a': [2, 2, None, None, 4, 4, 7, 8, None, 8],
'b': ['123', '123', '123',
'987', '987', '456',
'789', '789', '789', '789'],
'c': [1, 2, None, 4, 4, 4, 7, 9, None, 9],
'd': ['a', 'a', None, None, 'e', 'f', None, 'h', 'j', 'j'],
'e': [1, 2, None, None, None, None, None, None, None,
None],
'f': ['a', 'b', None, None, None, None, None, None, None,
None],
'g': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'b', None],
'h': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', None, None]
}
new_data = pd.DataFrame(new_dict)
# set equal to the expected for test means group
exp_dict = {'a': [2, 2, 2, None, 4, 4, 7, 8, 7+2/3, 8],
'b': ['123', '123', '123',
'987', '987', '456',
'789', '789', '789', '789'],
'c': [1.0, 2.0, 1.5, 4.0, 4.0, 4.0, 7.0, 9.0, 8+1/3, 9.0],
'd': ['a', 'a', None, None, 'e', 'f', None, 'h', 'j', 'j'],
'e': [1, 2, 1.5, None, None, None, None, None, None, None],
'f': ['a', 'b', None, None, None, None, None, None, None,
None],
'g': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', 'b', None],
'h': ['a', 'a', 'a', 'a', 'a', 'a', 'a', 'a', None, None]
}
expected = pd.DataFrame(exp_dict)
result = prep.transform(new_data)
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_2groups_most_frequent(self, missing_data2):
# Test most frequent with group by with 2 columns.
prep = GroupByImputer('most_frequent', ['b', 'c'])
prep.fit(missing_data2)
result = prep.transform(missing_data2)
exp_dict = {'a': [1, 2, 1, 4, 4, 4, 7, 8, 8, 8],
'b': ['123', '123', '123',
'123', '123', '789',
'789', '789', '789', '789'],
'c': ['a', 'a', 'a', 'b', 'b', 'c', 'c', 'a', 'a', 'c'],
'd': ['a', 'a', 'a', 'e', 'e', 'f', 'f', 'h', 'j', 'j']
}
expected = pd.DataFrame(exp_dict)
tm.assert_frame_equal(result, expected, check_dtype=False)
def test_2groups_mean(self, missing_data2):
# Test mean with group by with 2 columns.
prep = GroupByImputer('mean', ['b', 'c'])
prep.fit(missing_data2)
result = prep.transform(missing_data2)
exp_dict = {'a': [1, 2, 1.5, 4, 4, 4, 7, 8, 8, 8],
'b': ['123', '123', '123',
'123', '123', '789',
'789', '789', '789', '789'],
'c': ['a', 'a', 'a', 'b', 'b', 'c', 'c', 'a', 'a', 'c'],
'd': ['a', 'a', None, None, 'e', 'f', None, 'h', 'j',
'j']
}
expected =
|
pd.DataFrame(exp_dict)
|
pandas.DataFrame
|
import os
import sys
import xgboost as xgb
import pandas as pd
import numpy as np
import json
import pickle as pkl
# Load model
xgb_model = pkl.load(open('xgb_model.pkl','rb'))
# Load distribution
load_dfc = pd.read_csv('cognitive_health.csv')
load_dfp = pd.read_csv('physical_health.csv')
load_dfl =
|
pd.read_csv('living_health.csv')
|
pandas.read_csv
|
import numpy as np
import os
import pandas as pd
# import eia
from datetime import datetime
import pytz
import json
from os.path import join
import zipfile
import requests
import logging
from electricitylci.globals import data_dir, output_dir
from electricitylci.bulk_eia_data import download_EBA, row_to_df, ba_exchange_to_df
from electricitylci.model_config import model_specs
import electricitylci.eia923_generation as eia923
import electricitylci.eia860_facilities as eia860
from electricitylci.process_dictionary_writer import *
"""
Merge generation and emissions data. Add region designations using either
eGRID or EIA-860. Same for primary fuel by plant (eGRID or 923). Calculate
and merge in the total generation by region. Create the column "Subregion"
to hold regional name info. Remove electricity flows. Rename flows and add
UUIDs according to the federal flow list.
Parameters
----------
year : int
Specified year to pull transaction data between balancing authorities
subregion : str
Description of a group of regions. Options include 'FERC' for all FERC
market regions, 'BA' for all balancing authorities.
Returns
-------
Dictionary of dataframes with import region, export region, transaction amount, total
imports for import region, and fraction of total. The dictionary keys
are the level of aggregation: "BA", "FERC", "US".
Sample output:
ferc_final_trade.head()
import ferc region export ferc region value total fraction
0 CAISO CAISO 2.662827e+08 3.225829e+08 0.825471
1 CAISO Canada 1.119572e+06 3.225829e+08 0.003471
2 CAISO ERCOT 0.000000e+00 3.225829e+08 0.000000
3 CAISO ISO-NE 0.000000e+00 3.225829e+08 0.000000
4 CAISO MISO 0.000000e+00 3.225829e+08 0.000000
"""
def ba_io_trading_model(year=None, subregion=None, regions_to_keep=None):
REGION_NAMES = [
'California', 'Carolinas', 'Central',
'Electric Reliability Council of Texas, Inc.', 'Florida',
'Mid-Atlantic', 'Midwest', 'New England ISO',
'New York Independent System Operator', 'Northwest', 'Southeast',
'Southwest', 'Tennessee Valley Authority'
]
REGION_ACRONYMS = [
'TVA', 'MIDA', 'CAL', 'CAR', 'CENT', 'ERCO', 'FLA',
'MIDW', 'ISNE', 'NYIS', 'NW', 'SE', 'SW',
]
if year is None:
year = model_specs.NETL_IO_trading_year
if subregion is None:
subregion = model_specs.regional_aggregation
if subregion not in ['BA', 'FERC','US']:
raise ValueError(
f'subregion or regional_aggregation must have a value of "BA" or "FERC" '
f'when calculating trading with input-output, not {subregion}'
)
# Read in BAA file which contains the names and abbreviations
df_BA = pd.read_excel(data_dir + '/BA_Codes_930.xlsx', sheet_name = 'US', header = 4)
df_BA.rename(columns={'etag ID': 'BA_Acronym', 'Entity Name': 'BA_Name','NCR_ID#': 'NRC_ID', 'Region': 'Region'}, inplace=True)
BA = pd.np.array(df_BA['BA_Acronym'])
US_BA_acronyms = df_BA['BA_Acronym'].tolist()
# Read in BAA file which contains the names and abbreviations
# Original df_BAA does not include the Canadian balancing authorities
# Import them here, then concatenate to make a single df_BAA_NA (North America)
df_BA_CA = pd.read_excel(data_dir + '/BA_Codes_930.xlsx', sheet_name = 'Canada', header = 4)
df_BA_CA.rename(columns={'etag ID': 'BA_Acronym', 'Entity Name': 'BA_Name','NCR_ID#': 'NRC_ID', 'Region': 'Region'}, inplace=True)
df_BA_NA = pd.concat([df_BA, df_BA_CA])
ferc_list = df_BA_NA['FERC_Region_Abbr'].unique().tolist()
# Read in the bulk data
# download_EBA()
path = join(data_dir, 'bulk_data', 'EBA.zip')
NET_GEN_ROWS = []
BA_TO_BA_ROWS = []
DEMAND_ROWS=[]
TOTAL_INTERCHANGE_ROWS=[]
try:
logging.info("Using existing bulk data download")
z = zipfile.ZipFile(path, 'r')
except FileNotFoundError:
logging.info("Downloading new bulk data")
download_EBA()
z = zipfile.ZipFile(path, 'r')
logging.info("Loading bulk data to json")
with z.open('EBA.txt') as f:
for line in f:
# All but one BA is currently reporting net generation in UTC and local time
# for that one BA (GRMA) only UTC time is reported - so only pulling that
# for now.
if b'EBA.NG.H' in line and b'EBA.NG.HL' not in line:
NET_GEN_ROWS.append(json.loads(line))
# Similarly there are 5 interchanges that report interchange in UTC but not in
# local time.
elif b'EBA.ID.H' in line and b'EBA.ID.HL' not in line:
exchange_line=json.loads(line)
if exchange_line['series_id'].split('-')[0][4:] not in REGION_ACRONYMS:
# try:
# Adding this check here to hopefully save some time down the road.
# dummy_date=datetime.strptime(exchange_line['data'][0][0],'%Y%m%dT%HZ')
BA_TO_BA_ROWS.append(exchange_line)
# good_date_count+=1
# except ValueError:
# bad_date_count+=1
# continue
# Keeping these here just in case
elif b'EBA.D.H' in line and b'EBA.D.HL' not in line:
DEMAND_ROWS.append(json.loads(line))
# elif b'EBA.TI.H' in line:
# TOTAL_INTERCHANGE_ROWS.append(json.loads(line))
logging.info(f"Net gen rows: {len(NET_GEN_ROWS)}; BA to BA rows:{len(BA_TO_BA_ROWS)}; Demand rows:{len(DEMAND_ROWS)}")
eia923_gen=eia923.build_generation_data(generation_years=[year])
eia860_df=eia860.eia860_balancing_authority(year)
eia860_df["Plant Id"]=eia860_df["Plant Id"].astype(int)
eia_combined_df=eia923_gen.merge(eia860_df,
left_on=["FacilityID"],
right_on=["Plant Id"],
how="left")
eia_gen_ba=eia_combined_df.groupby(by=["Balancing Authority Code"],as_index=False)["Electricity"].sum()
# Subset for specified eia_gen_year
start_datetime = '{}-01-01 00:00:00+00:00'.format(year)
end_datetime = '{}-12-31 23:00:00+00:00'.format(year)
start_datetime = datetime.strptime(start_datetime, '%Y-%m-%d %H:%M:%S%z')
end_datetime = datetime.strptime(end_datetime, '%Y-%m-%d %H:%M:%S%z')
# Net Generation Data Import
logging.info("Generating df with datetime")
df_net_gen = row_to_df(NET_GEN_ROWS, 'net_gen')
del(NET_GEN_ROWS)
logging.info("Pivoting")
df_net_gen = df_net_gen.pivot(index = 'datetime', columns = 'region', values = 'net_gen')
ba_cols = US_BA_acronyms
gen_cols = list(df_net_gen.columns.values)
gen_cols_set = set(gen_cols)
ba_ref_set = set(ba_cols)
col_diff = list(ba_ref_set - gen_cols_set)
col_diff.sort(key = str.upper)
logging.info("Cleaning net_gen dataframe")
# Add in missing columns, then sort in alphabetical order
for i in col_diff:
df_net_gen[i] = 0
# Keep only the columns that match the balancing authority names, there are several other columns included in the dataset
# that represent states (e.g., TEX, NY, FL) and other areas (US48)
df_net_gen = df_net_gen[ba_cols]
# Resort columns so the headers are in alpha order
df_net_gen = df_net_gen.sort_index(axis=1)
df_net_gen = df_net_gen.fillna(value = 0)
df_net_gen = df_net_gen.loc[start_datetime:end_datetime]
# Sum values in each column
df_net_gen_sum = df_net_gen.sum(axis = 0).to_frame()
logging.info("Reading canadian import data")
# Add Canadian import data to the net generation dataset, concatenate and put in alpha order
df_CA_Imports_Gen = pd.read_csv(data_dir + '/CA_Imports_Gen.csv', index_col = 0)
df_CA_Imports_Gen = df_CA_Imports_Gen[str(year)]
logging.info("Combining US and Canadian net gen data")
df_net_gen_sum = pd.concat([df_net_gen_sum,df_CA_Imports_Gen]).sum(axis=1)
df_net_gen_sum = df_net_gen_sum.to_frame()
df_net_gen_sum = df_net_gen_sum.sort_index(axis=0)
# Check the net generation of each Balancing Authority against EIA 923 data.
# If the percent change of a given area is greater than the mean absolute difference
# of all of the areas, it will be treated as an error and replaced with the
# value in EIA923.
logging.info("Checking against EIA 923 generation data")
net_gen_check=df_net_gen_sum.merge(
right=eia_gen_ba,
left_index=True,
right_on=["Balancing Authority Code"],
how="left"
).reset_index()
net_gen_check["diff"]=abs(net_gen_check["Electricity"]-net_gen_check[0])/net_gen_check[0]
diff_mad=net_gen_check["diff"].mad()
net_gen_swap=net_gen_check.loc[net_gen_check["diff"]>diff_mad,["Balancing Authority Code","Electricity"]].set_index("Balancing Authority Code")
df_net_gen_sum.loc[net_gen_swap.index,[0]]=np.nan
net_gen_swap.rename(columns={"Electricity":0},inplace=True)
df_net_gen_sum=df_net_gen_sum.combine_first(net_gen_swap)
# First work on the trading data from the 'df_trade_all_stack_2016' frame
# This cell does the following:
# 1. reformats the data to an annual basis
# 2. formats the BA names in the corresponding columns
# 3. evalutes the trade values from both BA perspectives
# (e.g. BA1 as exporter and importer in a transaction with BA2)
# 4. evaluates the trading data for any results that don't make sense
# a. both BAs designate as importers (negative value)
# b. both BAs designate as exporters (postive value)
# c. one of the BAs in the transation reports a zero value and the other is nonzero
# 5. calulate the percent difference in the transaction values reports by BAs
# 6. final exchange value based on logic;
# a. if percent diff is less than 20%, take mean,
# b. if not use the value as reported by the exporting BAA
# c. designate each BA in the transaction either as the importer or exporter
# Output is a pivot with index (rows) representing exporting BAs,
# columns representing importing BAs, and values for the traded amount
# Group and resample trading data so that it is on an annual basis
logging.info("Creating trading dataframe")
df_ba_trade = ba_exchange_to_df(BA_TO_BA_ROWS, data_type='ba_to_ba')
del(BA_TO_BA_ROWS)
df_ba_trade = df_ba_trade.set_index('datetime')
df_ba_trade['transacting regions'] = df_ba_trade['from_region'] + '-' + df_ba_trade['to_region']
logging.info("Filtering trading dataframe")
# Keep only the columns that match the balancing authority names, there are several other columns included in the dataset
# that represent states (e.g., TEX, NY, FL) and other areas (US48)
filt1 = df_ba_trade['from_region'].isin(ba_cols)
filt2 = df_ba_trade['to_region'].isin(ba_cols)
filt = filt1 & filt2
df_ba_trade = df_ba_trade[filt]
# Subset for eia_gen_year, need to pivot first because of non-unique datetime index
df_ba_trade_pivot = df_ba_trade.pivot(columns = 'transacting regions', values = 'ba_to_ba')
df_ba_trade_pivot = df_ba_trade_pivot.loc[start_datetime:end_datetime]
# Sum columns - represents the net transactced amount between the two BAs
df_ba_trade_sum = df_ba_trade_pivot.sum(axis = 0).to_frame()
df_ba_trade_sum = df_ba_trade_sum.reset_index()
df_ba_trade_sum.columns = ['BAAs','Exchange']
# Split BAA string into exporting and importing BAA columns
df_ba_trade_sum['BAA1'], df_ba_trade_sum['BAA2'] = df_ba_trade_sum['BAAs'].str.split('-', 1).str
df_ba_trade_sum = df_ba_trade_sum.rename(columns={'BAAs': 'Transacting BAAs'})
# Create two perspectives - import and export to use for comparison in selection of the final exchange value between the BAAs
df_trade_sum_1_2 = df_ba_trade_sum.groupby(['BAA1', 'BAA2','Transacting BAAs'], as_index=False)[['Exchange']].sum()
df_trade_sum_2_1 = df_ba_trade_sum.groupby(['BAA2', 'BAA1', 'Transacting BAAs'], as_index=False)[['Exchange']].sum()
df_trade_sum_1_2.columns = ['BAA1_1_2', 'BAA2_1_2','Transacting BAAs_1_2', 'Exchange_1_2']
df_trade_sum_2_1.columns = ['BAA2_2_1', 'BAA1_2_1','Transacting BAAs_2_1', 'Exchange_2_1']
# Combine two grouped tables for comparison for exchange values
df_concat_trade = pd.concat([df_trade_sum_1_2,df_trade_sum_2_1], axis = 1)
df_concat_trade['Exchange_1_2_abs'] = df_concat_trade['Exchange_1_2'].abs()
df_concat_trade['Exchange_2_1_abs'] = df_concat_trade['Exchange_2_1'].abs()
# Create new column to check if BAAs designate as either both exporters or both importers
# or if one of the entities in the transaction reports a zero value
# Drop combinations where any of these conditions are true, keep everything else
df_concat_trade['Status_Check'] = np.where(((df_concat_trade['Exchange_1_2'] > 0) & (df_concat_trade['Exchange_2_1'] > 0)) \
|((df_concat_trade['Exchange_1_2'] < 0) & (df_concat_trade['Exchange_2_1'] < 0)) \
| ((df_concat_trade['Exchange_1_2'] == 0) | (df_concat_trade['Exchange_2_1'] == 0)), 'drop', 'keep')
# Calculate the difference in exchange values
df_concat_trade['Delta'] = df_concat_trade['Exchange_1_2_abs'] - df_concat_trade['Exchange_2_1_abs']
# Calculate percent diff of exchange_abs values - this can be down two ways:
# relative to 1_2 exchange or relative to 2_1 exchange - perform the calc both ways
# and take the average
df_concat_trade['Percent_Diff_Avg']= ((abs((df_concat_trade['Exchange_1_2_abs']/df_concat_trade['Exchange_2_1_abs'])-1)) \
+ (abs((df_concat_trade['Exchange_2_1_abs']/df_concat_trade['Exchange_1_2_abs'])-1)))/2
# Mean exchange value
df_concat_trade['Exchange_mean'] = df_concat_trade[['Exchange_1_2_abs', 'Exchange_2_1_abs']].mean(axis=1)
# Percent diff equations creats NaN where both values are 0, fill with 0
df_concat_trade['Percent_Diff_Avg'].fillna(0, inplace = True)
# Final exchange value based on logic; if percent diff is less than 20%, take mean,
# if not use the value as reported by the exporting BAA. First figure out which BAA is the exporter
# by checking the value of the Exchance_1_2
# If that value is positive, it indicates that BAA1 is exported to BAA2; if negative, use the
# value from Exchange_2_1
df_concat_trade['Final_Exchange'] = np.where((df_concat_trade['Percent_Diff_Avg'].abs() < 0.2),
df_concat_trade['Exchange_mean'],np.where((df_concat_trade['Exchange_1_2'] > 0),
df_concat_trade['Exchange_1_2'],df_concat_trade['Exchange_2_1']))
# Assign final designation of BAA as exporter or importer based on logical assignment
df_concat_trade['Export_BAA'] = np.where((df_concat_trade['Exchange_1_2'] > 0), df_concat_trade['BAA1_1_2'],
np.where((df_concat_trade['Exchange_1_2'] < 0), df_concat_trade['BAA2_1_2'],''))
df_concat_trade['Import_BAA'] = np.where((df_concat_trade['Exchange_1_2'] < 0), df_concat_trade['BAA1_1_2'],
np.where((df_concat_trade['Exchange_1_2'] > 0), df_concat_trade['BAA2_1_2'],''))
df_concat_trade = df_concat_trade[df_concat_trade['Status_Check'] == 'keep']
# Create the final trading matrix; first grab the necessary columns, rename the columns and then pivot
df_concat_trade_subset = df_concat_trade[['Export_BAA', 'Import_BAA', 'Final_Exchange']]
df_concat_trade_subset.columns = ['Exporting_BAA', 'Importing_BAA', 'Amount']
df_trade_pivot = df_concat_trade_subset.pivot_table(index = 'Exporting_BAA', columns = 'Importing_BAA', values = 'Amount').fillna(0)
# This cell continues formatting the df_trade
# Find missing BAs - need to add them in so that we have a square matrix
# Not all BAs are involved in transactions
trade_cols = list(df_trade_pivot.columns.values)
trade_rows = list(df_trade_pivot.index.values)
trade_cols_set = set(trade_cols)
trade_rows_set = set(trade_rows)
trade_ba_ref_set = set(ba_cols)
trade_col_diff = list(trade_ba_ref_set - trade_cols_set)
trade_col_diff.sort(key = str.upper)
trade_row_diff = list(trade_ba_ref_set - trade_rows_set)
trade_row_diff.sort(key=str.upper)
# Add in missing columns, then sort in alphabetical order
for i in trade_col_diff:
df_trade_pivot[i] = 0
df_trade_pivot = df_trade_pivot.sort_index(axis=1)
# Add in missing rows, then sort in alphabetical order
for i in trade_row_diff:
df_trade_pivot.loc[i,:] = 0
df_trade_pivot = df_trade_pivot.sort_index(axis=0)
# Add Canadian Imports to the trading matrix
# CA imports are specified in an external file
df_CA_Imports_Cols = pd.read_csv(data_dir + '/CA_Imports_Cols.csv', index_col = 0)
df_CA_Imports_Rows = pd.read_csv(data_dir + '/CA_Imports_Rows.csv', index_col = 0)
df_CA_Imports_Rows = df_CA_Imports_Rows[['us_ba', str(year)]]
df_CA_Imports_Rows = df_CA_Imports_Rows.pivot(columns = 'us_ba', values = str(year))
df_concat_trade_CA = pd.concat([df_trade_pivot, df_CA_Imports_Rows])
df_concat_trade_CA = pd.concat([df_concat_trade_CA, df_CA_Imports_Cols], axis = 1)
df_concat_trade_CA.fillna(0, inplace = True)
df_trade_pivot = df_concat_trade_CA
df_trade_pivot = df_trade_pivot.sort_index(axis=0)
df_trade_pivot = df_trade_pivot.sort_index(axis=1)
# Perform trading calculations as provided in Qu et al (2018) to
# determine the composition of a BA consumption mix
# Create total inflow vector x and then convert to a diagonal matrix x-hat
logging.info("Inflow vector")
x = []
for i in range (len(df_net_gen_sum)):
x.append(df_net_gen_sum.iloc[i] + df_trade_pivot.sum(axis = 0).iloc[i])
x_np = np.array(x)
# If values are zero, x_hat matrix will be singular, set BAAs with 0 to small value (1)
df_x = pd.DataFrame(data = x_np, index = df_trade_pivot.index)
df_x = df_x.rename(columns = {0:'inflow'})
df_x.loc[df_x['inflow'] == 0] = 1
x_np = df_x.values
x_hat = np.diagflat(x_np)
# Create consumption vector c and then convert to a digaonal matrix c-hat
# Calculate c based on x and T
logging.info("consumption vector")
c = []
for i in range(len(df_net_gen_sum)):
c.append(x[i] - df_trade_pivot.sum(axis = 1).iloc[i])
c_np = np.array(c)
c_hat = np.diagflat(c_np)
# Convert df_trade_pivot to matrix
T = df_trade_pivot.values
# Create matrix to split T into distinct interconnections - i.e., prevent trading between eastern and western interconnects
# Connections between the western and eastern interconnects are through SWPP and WAUE
logging.info("Matrix operations")
interconnect = df_trade_pivot.copy()
interconnect[:] = 1
interconnect.loc['SWPP',['EPE', 'PNM', 'PSCO', 'WACM']] = 0
interconnect.loc['WAUE',['WAUW', 'WACM']] = 0
interconnect_mat = interconnect.values
T_split = np.multiply(T, interconnect_mat)
# Matrix trading math (see Qu et al. 2018 ES&T paper)
x_hat_inv = np.linalg.inv(x_hat)
B = np.matmul(T_split, x_hat_inv)
I = np.identity(len(df_net_gen_sum))
diff_I_B = I - B
G = np.linalg.inv(diff_I_B)
c_hat_x_hat_inv = np.matmul(c_hat, x_hat_inv)
G_c = np.matmul(G, c_hat)
H = np.matmul(G,c_hat, x_hat_inv)
df_G =
|
pd.DataFrame(G)
|
pandas.DataFrame
|
"""Base class for modeling portfolio and measuring its performance.
The job of the `Portfolio` class is to create a series of positions allocated
against a cash component, produce an equity curve, incorporate basic transaction costs
and produce a set of statistics about its performance. In particular it outputs
position/profit metrics and drawdown information.
## Workflow
The workflow of `Portfolio` is simple:
1. Receives a set of inputs, such as entry and exit signals
2. Uses them to generate and fill orders in form of records (simulation part)
3. Calculates a broad range of risk & performance metrics based on these records (analysis part)
It basically builds upon the `vectorbt.portfolio.orders.Orders` class. To simplify creation of order
records and keep track of balances, it exposes several convenience methods with prefix `from_`.
For example, you can use `Portfolio.from_signals` method to generate orders from entry and exit signals.
Alternatively, you can use `Portfolio.from_order_func` to run a custom order function on each tick.
The results are then automatically passed to the constructor method of `Portfolio` and you will
receive a portfolio instance ready to be used for performance analysis.
This way, one can simulate and analyze his/her strategy in a couple of lines.
### Example
The following example does something crazy: it checks candlestick data of 6 major cryptocurrencies
in 2020 against every single pattern found in TA-Lib, and translates them into signals:
```python-repl
>>> import numpy as np
>>> import pandas as pd
>>> from datetime import datetime
>>> import talib
>>> import vectorbt as vbt
>>> # Fetch price history
>>> symbols = ['BTC-USD', 'ETH-USD', 'XRP-USD', 'BNB-USD', 'BCH-USD', 'LTC-USD']
>>> start = datetime(2020, 1, 1)
>>> end = datetime(2020, 9, 1)
>>> ohlcv_by_symbol = vbt.utils.data.download(symbols, start=start, end=end)
>>> # Put assets into a single dataframe by price type
>>> ohlcv = vbt.utils.data.concat_symbols(ohlcv_by_symbol)
>>> ohlcv['Open'].head()
symbol BTC-USD ETH-USD XRP-USD BNB-USD BCH-USD \
Date
2019-12-31 7294.438965 132.612274 0.194518 13.952087 209.301987
2020-01-01 7194.892090 129.630661 0.192912 13.730962 204.671295
2020-01-02 7202.551270 130.820038 0.192708 13.698126 204.354538
2020-01-03 6984.428711 127.411263 0.187948 13.035329 196.007690
2020-01-04 7345.375488 134.168518 0.193521 13.667442 222.536560
symbol LTC-USD
Date
2019-12-31 42.766113
2020-01-01 41.326534
2020-01-02 42.018085
2020-01-03 39.863129
2020-01-04 42.383526
>>> # Run every single pattern recognition indicator and combine results
>>> result = pd.DataFrame.vbt.empty_like(ohlcv['Open'], fill_value=0.)
>>> for pattern in talib.get_function_groups()['Pattern Recognition']:
... PRecognizer = vbt.IndicatorFactory.from_talib(pattern)
... pr = PRecognizer.run(ohlcv['Open'], ohlcv['High'], ohlcv['Low'], ohlcv['Close'])
... result = result + pr.integer
>>> # Don't look into future
>>> result = result.vbt.fshift(1)
>>> # Treat each number as order value in USD
>>> size = result / ohlcv['Open']
>>> # Simulate portfolio
>>> portfolio = vbt.Portfolio.from_orders(
... ohlcv['Close'], size, price=ohlcv['Open'],
... init_cash='autoalign', fees=0.001, slippage=0.001)
>>> # Visualize portfolio value
>>> portfolio.value().vbt.plot()
```

## Broadcasting
`Portfolio` is very flexible towards inputs:
* Accepts both Series and DataFrames as inputs
* Broadcasts inputs to the same shape using vectorbt's own broadcasting rules
* Many inputs (such as `fees`) can be passed as a single value, value per column/row, or as a matrix
* Implements flexible indexing wherever possible to save memory
## Grouping
One of the key features of `Portfolio` is the ability to group columns. Groups can be specified by
`group_by`, which can be anything from positions or names of column levels, to a NumPy array with
actual groups. Groups can be formed to share capital between columns or to compute metrics
for a combined portfolio of multiple independent columns.
For example, let's divide our portfolio into two groups sharing the same cash:
```python-repl
>>> # Simulate combined portfolio
>>> group_by = pd.Index([
... 'first', 'first', 'first',
... 'second', 'second', 'second'
... ], name='group')
>>> comb_portfolio = vbt.Portfolio.from_orders(
... ohlcv['Close'], size, price=ohlcv['Open'],
... init_cash='autoalign', fees=0.001, slippage=0.001,
... group_by=group_by, cash_sharing=True)
>>> # Get total profit per group
>>> comb_portfolio.total_profit()
group
first 21891.431061
second 7575.676246
dtype: float64
```
Not only can you analyze each group, but also each column in the group:
```python-repl
>>> # Get total profit per column
>>> comb_portfolio.total_profit(group_by=False)
symbol
BTC-USD 5163.844396
ETH-USD 13368.521326
XRP-USD 3359.065339
BNB-USD 4724.565229
BCH-USD -259.592709
LTC-USD 3110.703726
dtype: float64
```
In the same way, you can introduce new grouping to the method itself:
```python-repl
>>> # Get total profit per group
>>> portfolio.total_profit(group_by=group_by)
group
first 21891.431061
second 7575.676246
dtype: float64
```
!!! note
If cash sharing is enabled, grouping can be disabled but cannot be modified.
## Indexing
In addition, you can use pandas indexing on the `Portfolio` class itself, which forwards
indexing operation to each argument with index:
```python-repl
>>> portfolio['BTC-USD']
<vectorbt.portfolio.base.Portfolio at 0x7fac7517ac88>
>>> portfolio['BTC-USD'].total_profit()
5163.844396244112
```
Combined portfolio is indexed by group:
```python-repl
>>> comb_portfolio['first']
<vectorbt.portfolio.base.Portfolio at 0x7fac5756b828>
>>> comb_portfolio['first'].total_profit()
21891.43106080097
```
!!! note
Changing index (time axis) is not supported. The object should be treated as a Series
rather than a DataFrame; for example, use `portfolio.iloc[0]` instead of `portfolio.iloc[:, 0]`.
Indexing behavior depends solely upon `vectorbt.base.array_wrapper.ArrayWrapper`.
For example, if `group_select` is enabled indexing will be performed on groups,
otherwise on single columns. You can pass wrapper arguments with `wrapper_kwargs`.
## Logging
To collect more information on how a specific order was processed or to be able to track the whole
simulation from the beginning to the end, you can turn on logging.
```python-repl
>>> # Simulate portfolio with logging
>>> portfolio = vbt.Portfolio.from_orders(
... ohlcv['Close'], size, price=ohlcv['Open'],
... init_cash='autoalign', fees=0.001, slippage=0.001, log=True)
>>> portfolio.logs.records
id idx col group cash_now shares_now val_price_now value_now \
0 0 0 0 0 inf 0.000000 7294.438965 inf
... ... ... ... ... ... ... ... ...
1463 1463 243 5 5 inf 271.629075 62.844059 inf
size size_type ... log new_cash new_shares res_size \
0 NaN 0 ... True inf 0.000000 NaN
... ... ... ... ... ... ... ...
1463 7.956202 0 ... True inf 279.585277 7.956202
res_price res_fees res_side res_status res_status_info order_id
0 NaN NaN -1 1 0 -1
... ... ... ... ... ... ...
1463 62.906903 0.5005 0 0 -1 1075
[1464 rows x 30 columns]
```
Just as orders, logs are also records and thus can be easily analyzed:
```python-repl
>>> from vectorbt.portfolio.enums import OrderStatus
>>> portfolio.logs.map_field('res_status', value_map=OrderStatus).value_counts()
symbol BTC-USD ETH-USD XRP-USD BNB-USD BCH-USD LTC-USD
Ignored 59 72 66 66 66 59
Filled 185 172 178 178 178 185
```
Logging can also be turned on just for one order, row, or column, since as many other
variables it's specified per order and can broadcast automatically.
!!! note
Logging can slow down simulation.
## Caching
`Portfolio` heavily relies upon caching. If a method or a property requires heavy computation,
it's wrapped with `vectorbt.utils.decorators.cached_method` and `vectorbt.utils.decorators.cached_property`
respectively. Caching can be disabled globally via `vectorbt.settings`.
!!! note
Because of caching, class is meant to be immutable and all properties are read-only.
To change any attribute, use the `copy` method and pass the attribute as keyword argument.
If you're running out of memory when working with large arrays, make sure to disable caching
and then store most important time series manually. For example, if you're interested in Sharpe
ratio or other metrics based on returns, run and save `Portfolio.returns` and then use the
`vectorbt.returns.accessors.ReturnsAccessor` to analyze them. Do not use methods akin to
`Portfolio.sharpe_ratio` because they will re-calculate returns each time.
Alternatively, you can precisely point at attributes and methods that should or shouldn't
be cached. For example, you can blacklist the entire `Portfolio` class except a few most called
methods such as `Portfolio.cash_flow` and `Portfolio.share_flow`:
```python-repl
>>> vbt.settings.caching['blacklist'].append('Portfolio')
>>> vbt.settings.caching['whitelist'].extend([
... 'Portfolio.cash_flow',
... 'Portfolio.share_flow'
... ])
```
Define rules for one instance of `Portfolio`:
```python-repl
>>> vbt.settings.caching['blacklist'].append(portfolio)
>>> vbt.settings.caching['whitelist'].extend([
... portfolio.cash_flow,
... portfolio.share_flow
... ])
```
!!! note
Note that the above approach doesn't work for cached properties.
Use tuples of the instance and the property name instead, such as `(portfolio, 'orders')`.
To reset caching:
```python-repl
>>> vbt.settings.caching.reset()
```
"""
import numpy as np
import pandas as pd
from inspect import signature
from collections import OrderedDict
import warnings
from vectorbt.utils import checks
from vectorbt.utils.decorators import cached_property, cached_method
from vectorbt.utils.enum import convert_str_enum_value
from vectorbt.utils.config import merge_dicts
from vectorbt.utils.random import set_seed
from vectorbt.utils.colors import adjust_opacity
from vectorbt.utils.widgets import make_subplots
from vectorbt.base.reshape_fns import to_1d, to_2d, broadcast, broadcast_to
from vectorbt.base.array_wrapper import ArrayWrapper, Wrapping
from vectorbt.generic import nb as generic_nb
from vectorbt.generic.drawdowns import Drawdowns
from vectorbt.signals.generators import RAND, RPROB
from vectorbt.portfolio import nb
from vectorbt.portfolio.orders import Orders
from vectorbt.portfolio.trades import Trades, Positions
from vectorbt.portfolio.logs import Logs
from vectorbt.portfolio.enums import (
InitCashMode,
CallSeqType,
SizeType,
ConflictMode,
Direction
)
def _mean_agg_func(df):
"""Compute mean for `Portfolio.stats`."""
return df.mean(axis=0)
def add_returns_methods(func_names):
"""Class decorator to add `vectorbt.returns.accessors.ReturnsAccessor` methods to `Portfolio`."""
def wrapper(cls):
for func_name in func_names:
if isinstance(func_name, tuple):
ret_func_name = func_name[0]
else:
ret_func_name = func_name
def returns_method(
self,
*args,
group_by=None,
year_freq=None,
ret_func_name=ret_func_name,
active_returns=False,
in_sim_order=False,
reuse_returns=None,
**kwargs):
if reuse_returns is not None:
returns = reuse_returns
else:
if active_returns:
returns = self.active_returns(group_by=group_by)
else:
returns = self.returns(group_by=group_by, in_sim_order=in_sim_order)
returns_acc = returns.vbt.returns(freq=self.wrapper.freq, year_freq=year_freq)
# Select only those arguments in kwargs that are also in the method's signature
# This is done for Portfolio.stats which passes the same kwargs to multiple methods
method = getattr(returns_acc, ret_func_name)
sig = signature(method)
arg_names = [p.name for p in sig.parameters.values() if p.kind == p.POSITIONAL_OR_KEYWORD]
new_kwargs = {}
for arg_name in arg_names:
if arg_name in kwargs:
new_kwargs[arg_name] = kwargs[arg_name]
return method(*args, **new_kwargs)
if isinstance(func_name, tuple):
func_name = func_name[1]
returns_method.__name__ = func_name
returns_method.__qualname__ = f"Portfolio.{func_name}"
returns_method.__doc__ = f"See `vectorbt.returns.accessors.ReturnsAccessor.{ret_func_name}`."
setattr(cls, func_name, cached_method(returns_method))
return cls
return wrapper
@add_returns_methods([
('daily', 'daily_returns'),
('annual', 'annual_returns'),
('cumulative', 'cumulative_returns'),
('annualized', 'annualized_return'),
'annualized_volatility',
'calmar_ratio',
'omega_ratio',
'sharpe_ratio',
'deflated_sharpe_ratio',
'downside_risk',
'sortino_ratio',
'information_ratio',
'beta',
'alpha',
'tail_ratio',
'value_at_risk',
'conditional_value_at_risk',
'capture',
'up_capture',
'down_capture',
'drawdown',
'max_drawdown'
])
class Portfolio(Wrapping):
"""Class for modeling portfolio and measuring its performance.
Args:
wrapper (ArrayWrapper): Array wrapper.
See `vectorbt.base.array_wrapper.ArrayWrapper`.
close (array_like): Reference price, such as close.
order_records (array_like): A structured NumPy array of order records.
log_records (array_like): A structured NumPy array of log records.
init_cash (InitCashMode, float or array_like of float): Initial capital.
cash_sharing (bool): Whether to share cash within the same group.
call_seq (array_like of int): Sequence of calls per row and group.
incl_unrealized (bool): Whether to include unrealized P&L in statistics.
use_filled_close (bool): Whether to forward-backward fill NaN values in `close`.
Doesn't affect simulation and only used for total profit and market value.
See `Portfolio.fill_close`.
!!! note
Use class methods with `from_` prefix to build a portfolio.
The `__init__` method is reserved for indexing purposes.
!!! note
This class is meant to be immutable. To change any attribute, use `Portfolio.copy`."""
def __init__(self, wrapper, close, order_records, log_records, init_cash,
cash_sharing, call_seq, incl_unrealized=None, use_filled_close=None):
Wrapping.__init__(
self,
wrapper,
close=close,
order_records=order_records,
log_records=log_records,
init_cash=init_cash,
cash_sharing=cash_sharing,
call_seq=call_seq,
incl_unrealized=incl_unrealized,
use_filled_close=use_filled_close
)
# Get defaults
from vectorbt import settings
if incl_unrealized is None:
incl_unrealized = settings.portfolio['incl_unrealized']
if use_filled_close is None:
use_filled_close = settings.portfolio['use_filled_close']
# Store passed arguments
self._close = broadcast_to(close, wrapper.dummy(group_by=False))
self._order_records = order_records
self._log_records = log_records
self._init_cash = init_cash
self._cash_sharing = cash_sharing
self._call_seq = call_seq
self._incl_unrealized = incl_unrealized
self._use_filled_close = use_filled_close
def _indexing_func(self, pd_indexing_func):
"""Perform indexing on `Portfolio`."""
new_wrapper, _, group_idxs, col_idxs = \
self.wrapper._indexing_func_meta(pd_indexing_func, column_only_select=True)
new_close = new_wrapper.wrap(to_2d(self.close, raw=True)[:, col_idxs], group_by=False)
new_order_records = self.orders._col_idxs_records(col_idxs)
new_log_records = self.logs._col_idxs_records(col_idxs)
if isinstance(self._init_cash, int):
new_init_cash = self._init_cash
else:
new_init_cash = to_1d(self._init_cash, raw=True)[group_idxs if self.cash_sharing else col_idxs]
new_call_seq = self.call_seq.values[:, col_idxs]
return self.copy(
wrapper=new_wrapper,
close=new_close,
order_records=new_order_records,
log_records=new_log_records,
init_cash=new_init_cash,
call_seq=new_call_seq
)
# ############# Class methods ############# #
@classmethod
def from_holding(cls, close, **kwargs):
"""Simulate portfolio from holding.
Based on `Portfolio.from_signals`."""
return cls.from_signals(close, True, False, accumulate=False, **kwargs)
@classmethod
def from_random(cls, close, n=None, prob=None, entry_prob=None, exit_prob=None,
param_product=False, seed=None, **kwargs):
"""Simulate portfolio from random entry and exit signals.
Generates signals based either on the number of signals `n` or the probability
of encountering a signal `prob`.
If `n` is set, see `vectorbt.signals.generators.RAND`.
If `prob` is set, see `vectorbt.signals.generators.RPROB`.
Based on `Portfolio.from_signals`."""
from vectorbt import settings
if entry_prob is None:
entry_prob = prob
if exit_prob is None:
exit_prob = prob
if seed is None:
seed = settings.portfolio['seed']
if n is not None and (entry_prob is not None or exit_prob is not None):
raise ValueError("Either n or entry_prob and exit_prob should be set")
if n is not None:
rand = RAND.run(
n=n,
input_shape=close.shape,
input_index=close.vbt.wrapper.index,
input_columns=close.vbt.wrapper.columns,
seed=seed
)
entries = rand.entries
exits = rand.exits
elif entry_prob is not None and exit_prob is not None:
rprob = RPROB.run(
entry_prob=entry_prob,
exit_prob=exit_prob,
param_product=param_product,
input_shape=close.shape,
input_index=close.vbt.wrapper.index,
input_columns=close.vbt.wrapper.columns,
seed=seed
)
entries = rprob.entries
exits = rprob.exits
else:
raise ValueError("At least n or entry_prob and exit_prob should be set")
return cls.from_signals(close, entries, exits, seed=seed, **kwargs)
@classmethod
def from_signals(cls, close, entries, exits, size=None, size_type=None, direction=None, price=None,
fees=None, fixed_fees=None, slippage=None, min_size=None, max_size=None,
reject_prob=None, allow_partial=None, raise_reject=None, accumulate=None, log=None,
conflict_mode=None, close_first=None, val_price=None, init_cash=None, cash_sharing=None,
call_seq=None, max_orders=None, max_logs=None, seed=None, group_by=None,
broadcast_kwargs=None, wrapper_kwargs=None, freq=None, **kwargs):
"""Simulate portfolio from entry and exit signals.
Starting with initial cash `init_cash`, for each signal in `entries`, enters a long/short position
by buying/selling `size` of shares. For each signal in `exits`, closes the position by
selling/buying shares. Depending upon accumulation, each entry signal may increase
the position and each exit signal may decrease the position. When both entry and exit signals
are present, ignores them by default. When grouping is enabled with `group_by`, will compute
the performance of the entire group. When `cash_sharing` is enabled, will share the cash among
all columns in the group.
Args:
close (array_like): Reference price, such as close.
Will broadcast.
Will be used for calculating unrealized P&L and portfolio value.
entries (array_like of bool): Boolean array of entry signals.
Will broadcast.
Becomes a long signal if `direction` is `all` or `longonly`, otherwise short.
exits (array_like of bool): Boolean array of exit signals.
Will broadcast.
Becomes a short signal if `direction` is `all` or `longonly`, otherwise long.
size (float or array_like): Size to order.
Will broadcast.
* Set to any number to buy/sell some fixed amount of shares.
Longs are limited by cash in the account, while shorts are unlimited.
* Set to `np.inf` to buy shares for all cash, or `-np.inf` to sell shares for
initial margin of 100%. If `direction` is not `all`, `-np.inf` will close the position.
* Set to `np.nan` or 0 to skip.
!!! note
Sign will be ignored.
size_type (SizeType or array_like): See `vectorbt.portfolio.enums.SizeType`.
Will broadcast.
Only `SizeType.Shares` and `SizeType.Percent` are supported.
Other modes such as target percentage are not compatible with signals since
their logic may contradict the direction of the signal.
!!! note
`SizeType.Percent` does not support position reversal. Switch to a single
direction or use `close_first`.
See warning on `size_type` in `Portfolio.from_orders`.
direction (Direction or array_like): See `vectorbt.portfolio.enums.Direction`.
Will broadcast.
price (array_like of float): Order price.
Defaults to `close`. Will broadcast.
fees (float or array_like): Fees in percentage of the order value.
Will broadcast. Note that 0.01 = 1%.
fixed_fees (float or array_like): Fixed amount of fees to pay per order.
Will broadcast.
slippage (float or array_like): Slippage in percentage of price.
Will broadcast. Note that 0.01 = 1%.
min_size (float or array_like): Minimum size for an order to be accepted.
Will broadcast.
max_size (float or array_like): Maximum size for an order.
Will broadcast.
Will be partially filled if exceeded. You might not be able to properly close
the position if accumulation is enabled and `max_size` is too low.
reject_prob (float or array_like): Order rejection probability.
Will broadcast.
allow_partial (bool or array_like): Whether to allow partial fills.
Will broadcast.
Does not apply when size is `np.inf`.
raise_reject (bool or array_like): Whether to raise an exception if order gets rejected.
Will broadcast.
log (bool or array_like): Whether to log orders.
Will broadcast.
accumulate (bool or array_like): Whether to accumulate signals.
Will broadcast.
Behaves similarly to `Portfolio.from_orders`.
conflict_mode (ConflictMode or array_like): See `vectorbt.portfolio.enums.ConflictMode`.
Will broadcast.
close_first (bool or array_like): Whether to close the position first before reversal.
Will broadcast.
Otherwise reverses the position with a single order and within the same tick.
Takes only effect under `Direction.All`. Requires a second signal to enter
the opposite position. This allows to define parameters such as `fixed_fees` for long
and short positions separately.
val_price (array_like of float): Asset valuation price.
Defaults to `price` if set, otherwise to previous `close`.
See `val_price` in `Portfolio.from_orders`.
init_cash (InitCashMode, float or array_like of float): Initial capital.
See `init_cash` in `Portfolio.from_order_func`.
cash_sharing (bool): Whether to share cash within the same group.
See `cash_sharing` in `Portfolio.from_orders`.
call_seq (CallSeqType or array_like of int): Default sequence of calls per row and group.
See `call_seq` in `Portfolio.from_orders`.
max_orders (int): Size of the order records array.
Defaults to the number of elements in the broadcasted shape.
Set to a lower number if you run out of memory.
max_logs (int): Size of the log records array.
Defaults to the number of elements in the broadcasted shape if any of the `log` is True,
otherwise to 1.
Set to a lower number if you run out of memory.
seed (int): Seed to be set for both `call_seq` and at the beginning of the simulation.
group_by (any): Group columns. See `vectorbt.base.column_grouper.ColumnGrouper`.
broadcast_kwargs (dict): Keyword arguments passed to `vectorbt.base.reshape_fns.broadcast`.
wrapper_kwargs (dict): Keyword arguments passed to `vectorbt.base.array_wrapper.ArrayWrapper`.
freq (any): Index frequency in case `close.index` is not datetime-like.
**kwargs: Keyword arguments passed to the `__init__` method.
All broadcastable arguments will be broadcast using `vectorbt.base.reshape_fns.broadcast`
but keep original shape to utilize flexible indexing and to save memory.
For defaults, see `vectorbt.settings.portfolio`.
!!! hint
If you generated signals using close price, don't forget to shift your signals by one tick
forward, for example, with `signals.vbt.fshift(1)`. In general, make sure to use a price
that comes after the signal.
Also see notes and hints for `Portfolio.from_orders`.
## Example
Some of the ways of how signals are interpreted:
```python-repl
>>> import pandas as pd
>>> import vectorbt as vbt
>>> close = pd.Series([1, 2, 3, 4, 5])
>>> entries = pd.Series([True, True, True, False, False])
>>> exits = pd.Series([False, False, True, True, True])
>>> # Entry opens long, exit closes long
>>> portfolio = vbt.Portfolio.from_signals(
... close, entries, exits, size=1., direction='longonly')
>>> portfolio.share_flow()
0 1.0
1 0.0
2 0.0
3 -1.0
4 0.0
dtype: float64
>>> # Entry opens short, exit closes short
>>> portfolio = vbt.Portfolio.from_signals(
... close, entries, exits, size=1., direction='shortonly')
>>> portfolio.share_flow()
0 -1.0
1 0.0
2 0.0
3 1.0
4 0.0
dtype: float64
>>> # Entry opens long and closes short, exit closes long and opens short
>>> # Reversal within one tick
>>> portfolio = vbt.Portfolio.from_signals(
... close, entries, exits, size=1., direction='all')
>>> portfolio.share_flow()
0 1.0
1 0.0
2 0.0
3 -2.0
4 0.0
dtype: float64
>>> # Reversal within two ticks
>>> # First signal closes position, second signal opens the opposite one
>>> portfolio = vbt.Portfolio.from_signals(
... close, entries, exits, size=1., direction='all',
... close_first=True)
>>> portfolio.share_flow()
0 1.0
1 0.0
2 0.0
3 -1.0
4 -1.0
dtype: float64
>>> # If entry and exit, chooses exit
>>> portfolio = vbt.Portfolio.from_signals(
... close, entries, exits, size=1., direction='all',
... close_first=True, conflict_mode='exit')
>>> portfolio.share_flow()
0 1.0
1 0.0
2 -1.0
3 -1.0
4 0.0
dtype: float64
>>> # Entry means long order, exit means short order
>>> # Acts similar to `from_orders`
>>> portfolio = vbt.Portfolio.from_signals(
... close, entries, exits, size=1., direction='all',
... accumulate=True)
>>> portfolio.share_flow()
0 1.0
1 1.0
2 0.0
3 -1.0
4 -1.0
dtype: float64
>>> # Testing multiple parameters (via broadcasting)
>>> from vectorbt.portfolio.enums import Direction
>>> portfolio = vbt.Portfolio.from_signals(
... close, entries, exits, direction=[list(Direction)],
... broadcast_kwargs=dict(columns_from=Direction._fields))
>>> portfolio.share_flow()
Long Short All
0 100.0 -100.0 100.0
1 0.0 0.0 0.0
2 0.0 0.0 0.0
3 -100.0 50.0 -200.0
4 0.0 0.0 0.0
```
"""
# Get defaults
from vectorbt import settings
if size is None:
size = settings.portfolio['size']
if size_type is None:
size_type = settings.portfolio['signal_size_type']
size_type = convert_str_enum_value(SizeType, size_type)
if direction is None:
direction = settings.portfolio['signal_direction']
direction = convert_str_enum_value(Direction, direction)
if price is None:
price = close
if fees is None:
fees = settings.portfolio['fees']
if fixed_fees is None:
fixed_fees = settings.portfolio['fixed_fees']
if slippage is None:
slippage = settings.portfolio['slippage']
if min_size is None:
min_size = settings.portfolio['min_size']
if max_size is None:
max_size = settings.portfolio['max_size']
if reject_prob is None:
reject_prob = settings.portfolio['reject_prob']
if allow_partial is None:
allow_partial = settings.portfolio['allow_partial']
if raise_reject is None:
raise_reject = settings.portfolio['raise_reject']
if log is None:
log = settings.portfolio['log']
if accumulate is None:
accumulate = settings.portfolio['accumulate']
if conflict_mode is None:
conflict_mode = settings.portfolio['conflict_mode']
conflict_mode = convert_str_enum_value(ConflictMode, conflict_mode)
if close_first is None:
close_first = settings.portfolio['close_first']
if val_price is None:
if price is None:
if checks.is_pandas(close):
val_price = close.vbt.fshift(1)
else:
val_price = np.require(close, dtype=np.float_)
val_price = np.roll(val_price, 1, axis=0)
val_price[0] = np.nan
else:
val_price = price
if init_cash is None:
init_cash = settings.portfolio['init_cash']
init_cash = convert_str_enum_value(InitCashMode, init_cash)
if isinstance(init_cash, int) and init_cash in InitCashMode:
init_cash_mode = init_cash
init_cash = np.inf
else:
init_cash_mode = None
if cash_sharing is None:
cash_sharing = settings.portfolio['cash_sharing']
if call_seq is None:
call_seq = settings.portfolio['call_seq']
call_seq = convert_str_enum_value(CallSeqType, call_seq)
auto_call_seq = False
if isinstance(call_seq, int):
if call_seq == CallSeqType.Auto:
call_seq = CallSeqType.Default
auto_call_seq = True
if seed is None:
seed = settings.portfolio['seed']
if seed is not None:
set_seed(seed)
if freq is None:
freq = settings.portfolio['freq']
if broadcast_kwargs is None:
broadcast_kwargs = {}
if wrapper_kwargs is None:
wrapper_kwargs = {}
if not wrapper_kwargs.get('group_select', True) and cash_sharing:
raise ValueError("group_select cannot be disabled if cash_sharing=True")
# Broadcast inputs
# Only close is broadcast, others can remain unchanged thanks to flexible indexing
broadcastable_args = (
close,
entries,
exits,
size,
size_type,
direction,
price,
fees,
fixed_fees,
slippage,
min_size,
max_size,
reject_prob,
allow_partial,
raise_reject,
accumulate,
log,
conflict_mode,
close_first,
val_price
)
keep_raw = [False] + [True] * (len(broadcastable_args) - 1)
broadcast_kwargs = merge_dicts(dict(require_kwargs=dict(requirements='W')), broadcast_kwargs)
broadcasted_args = broadcast(*broadcastable_args, **broadcast_kwargs, keep_raw=keep_raw)
close = broadcasted_args[0]
if not checks.is_pandas(close):
close = pd.Series(close) if close.ndim == 1 else pd.DataFrame(close)
target_shape_2d = (close.shape[0], close.shape[1] if close.ndim > 1 else 1)
wrapper = ArrayWrapper.from_obj(close, freq=freq, group_by=group_by, **wrapper_kwargs)
cs_group_lens = wrapper.grouper.get_group_lens(group_by=None if cash_sharing else False)
init_cash = np.require(np.broadcast_to(init_cash, (len(cs_group_lens),)), dtype=np.float_)
group_lens = wrapper.grouper.get_group_lens(group_by=group_by)
if checks.is_array(call_seq):
call_seq = nb.require_call_seq(broadcast(call_seq, to_shape=target_shape_2d, to_pd=False))
else:
call_seq = nb.build_call_seq(target_shape_2d, group_lens, call_seq_type=call_seq)
if max_orders is None:
max_orders = target_shape_2d[0] * target_shape_2d[1]
if max_logs is None:
max_logs = target_shape_2d[0] * target_shape_2d[1]
if not np.any(log):
max_logs = 1
# Perform calculation
order_records, log_records = nb.simulate_from_signals_nb(
target_shape_2d,
cs_group_lens, # group only if cash sharing is enabled to speed up
init_cash,
call_seq,
auto_call_seq,
*broadcasted_args[1:],
max_orders,
max_logs,
close.ndim == 2
)
# Create an instance
return cls(
wrapper,
close,
order_records,
log_records,
init_cash if init_cash_mode is None else init_cash_mode,
cash_sharing,
call_seq,
**kwargs
)
@classmethod
def from_orders(cls, close, size, size_type=None, direction=None, price=None, fees=None,
fixed_fees=None, slippage=None, min_size=None, max_size=None, reject_prob=None,
allow_partial=None, raise_reject=None, log=None, val_price=None, init_cash=None,
cash_sharing=None, call_seq=None, max_orders=None, max_logs=None, seed=None,
group_by=None, broadcast_kwargs=None, wrapper_kwargs=None, freq=None, **kwargs):
"""Simulate portfolio from orders.
Starting with initial cash `init_cash`, orders the number of shares specified in `size`
for `price`.
Args:
close (array_like): Reference price, such as close.
Will broadcast.
Will be used for calculating unrealized P&L and portfolio value.
size (float or array_like): Size to order.
Will broadcast.
Behavior depends upon `size_type` and `direction`. For `SizeType.Shares`:
* Set to any number to buy/sell some fixed amount of shares.
Longs are limited by cash in the account, while shorts are unlimited.
* Set to `np.inf` to buy shares for all cash, or `-np.inf` to sell shares for
initial margin of 100%. If `direction` is not `all`, `-np.inf` will close the position.
* Set to `np.nan` or 0 to skip.
For any target size:
* Set to any number to buy/sell amount of shares relative to current holdings or value.
* Set to 0 to close the current position.
* Set to `np.nan` to skip.
size_type (SizeType or array_like): See `vectorbt.portfolio.enums.SizeType`.
Will broadcast.
!!! note
`SizeType.Percent` does not support position reversal. Switch to a single direction.
!!! warning
Be cautious using `SizeType.Percent` with `call_seq` set to 'auto'.
To execute sell orders before buy orders, the value of each order in the group
needs to be approximated in advance. But since `SizeType.Percent` depends
upon cash balance, which cannot be calculated in advance, the latest cash balance
is used. This can yield wrong call sequence for buy orders.
direction (Direction or array_like): See `vectorbt.portfolio.enums.Direction`.
Will broadcast.
price (array_like of float): Order price.
Defaults to `close`. Will broadcast.
fees (float or array_like): Fees in percentage of the order value.
Will broadcast. Note that 0.01 = 1%.
fixed_fees (float or array_like): Fixed amount of fees to pay per order.
Will broadcast.
slippage (float or array_like): Slippage in percentage of price.
Will broadcast. Note that 0.01 = 1%.
min_size (float or array_like): Minimum size for an order to be accepted.
Will broadcast.
max_size (float or array_like): Maximum size for an order.
Will broadcast.
Will be partially filled if exceeded.
reject_prob (float or array_like): Order rejection probability.
Will broadcast.
allow_partial (bool or array_like): Whether to allow partial fills.
Will broadcast.
Does not apply when size is `np.inf`.
raise_reject (bool or array_like): Whether to raise an exception if order gets rejected.
Will broadcast.
log (bool or array_like): Whether to log orders.
Will broadcast.
val_price (array_like of float): Asset valuation price.
Defaults to `price`. Will broadcast.
Used at the time of decision making to calculate value of each asset in the group,
for example, to convert target value into target shares.
!!! note
Make sure to use timestamp for `val_price` that comes before timestamps of
all orders in the group with cash sharing (previous `close` for example),
otherwise you're cheating yourself.
init_cash (InitCashMode, float or array_like of float): Initial capital.
See `init_cash` in `Portfolio.from_order_func`.
cash_sharing (bool): Whether to share cash within the same group.
!!! warning
Introduces cross-asset dependencies.
This method presumes that in a group of assets that share the same capital all
orders will be executed within the same tick and retain their price regardless
of their position in the queue, even though they depend upon each other and thus
cannot be executed in parallel.
call_seq (CallSeqType or array_like of int): Default sequence of calls per row and group.
Each value in this sequence should indicate the position of column in the group to
call next. Processing of `call_seq` goes always from left to right.
For example, `[2, 0, 1]` would first call column 'c', then 'a', and finally 'b'.
* Use `vectorbt.portfolio.enums.CallSeqType` to select a sequence type.
* Set to array to specify custom sequence. Will not broadcast.
If `CallSeqType.Auto` selected, rearranges calls dynamically based on order value.
Calculates value of all orders per row and group, and sorts them by this value.
Sell orders will be executed first to release funds for buy orders.
!!! warning
`CallSeqType.Auto` should be used with caution:
* It not only presumes that order prices are known beforehand, but also that
orders can be executed in arbitrary order and still retain their price.
In reality, this is hardly the case: after processing one asset, some time
has passed and the price for other assets might have already changed.
* Even if you're able to specify a slippage large enough to compensate for
this behavior, slippage itself should depend upon execution order.
This method doesn't let you do that.
* If one order is rejected, it still may execute next orders and possibly
leave them without required funds.
For more control, use `Portfolio.from_order_func`.
max_orders (int): Size of the order records array.
Defaults to the number of elements in the broadcasted shape.
Set to a lower number if you run out of memory.
max_logs (int): Size of the log records array.
Defaults to the number of elements in the broadcasted shape if any of the `log` is True,
otherwise to 1.
Set to a lower number if you run out of memory.
seed (int): Seed to be set for both `call_seq` and at the beginning of the simulation.
group_by (any): Group columns. See `vectorbt.base.column_grouper.ColumnGrouper`.
broadcast_kwargs (dict): Keyword arguments passed to `vectorbt.base.reshape_fns.broadcast`.
wrapper_kwargs (dict): Keyword arguments passed to `vectorbt.base.array_wrapper.ArrayWrapper`.
freq (any): Index frequency in case `close.index` is not datetime-like.
**kwargs: Keyword arguments passed to the `__init__` method.
All broadcastable arguments will be broadcast using `vectorbt.base.reshape_fns.broadcast`
but keep original shape to utilize flexible indexing and to save memory.
For defaults, see `vectorbt.settings.portfolio`.
!!! note
When `call_seq` is not `CallSeqType.Auto`, at each timestamp, processing of the assets in
a group goes strictly in order defined in `call_seq`. This order can't be changed dynamically.
This has one big implication for this particular method: the last asset in the call stack
cannot be processed until other assets are processed. This is the reason why rebalancing
cannot work properly in this setting: one has to specify percentages for all assets beforehand
and then tweak the processing order to sell to-be-sold assets first in order to release funds
for to-be-bought assets. This can be automatically done by using `CallSeqType.Auto`.
!!! hint
All broadcastable arguments can be set per frame, series, row, column, or element.
## Example
Buy 10 shares each tick:
```python-repl
>>> import pandas as pd
>>> import vectorbt as vbt
>>> close = pd.Series([1, 2, 3, 4, 5])
>>> portfolio = vbt.Portfolio.from_orders(close, 10)
>>> portfolio.shares()
0 10.0
1 20.0
2 30.0
3 40.0
4 40.0
dtype: float64
>>> portfolio.cash()
0 90.0
1 70.0
2 40.0
3 0.0
4 0.0
dtype: float64
```
Reverse each position by first closing it:
```python-repl
>>> size = [1, 0, -1, 0, 1]
>>> portfolio = vbt.Portfolio.from_orders(close, size, size_type='targetpercent')
>>> portfolio.shares()
0 100.000000
1 0.000000
2 -66.666667
3 0.000000
4 26.666667
dtype: float64
>>> portfolio.cash()
0 0.000000
1 200.000000
2 400.000000
3 133.333333
4 0.000000
dtype: float64
```
Equal-weighted portfolio as in `vectorbt.portfolio.nb.simulate_nb` example:
It's more compact but has less control over execution:
```python-repl
>>> import numpy as np
>>> np.random.seed(42)
>>> close = pd.DataFrame(np.random.uniform(1, 10, size=(5, 3)))
>>> size = pd.Series(np.full(5, 1/3)) # each column 33.3%
>>> size[1::2] = np.nan # skip every second tick
>>> portfolio = vbt.Portfolio.from_orders(
... close, # acts both as reference and order price here
... size,
... size_type='targetpercent',
... call_seq='auto', # first sell then buy
... group_by=True, # one group
... cash_sharing=True, # assets share the same cash
... fees=0.001, fixed_fees=1., slippage=0.001 # costs
... )
>>> portfolio.holding_value(group_by=False).vbt.plot()
```

"""
# Get defaults
from vectorbt import settings
if size is None:
size = settings.portfolio['size']
if size_type is None:
size_type = settings.portfolio['size_type']
size_type = convert_str_enum_value(SizeType, size_type)
if direction is None:
direction = settings.portfolio['order_direction']
direction = convert_str_enum_value(Direction, direction)
if price is None:
price = close
if fees is None:
fees = settings.portfolio['fees']
if fixed_fees is None:
fixed_fees = settings.portfolio['fixed_fees']
if slippage is None:
slippage = settings.portfolio['slippage']
if min_size is None:
min_size = settings.portfolio['min_size']
if max_size is None:
max_size = settings.portfolio['max_size']
if reject_prob is None:
reject_prob = settings.portfolio['reject_prob']
if allow_partial is None:
allow_partial = settings.portfolio['allow_partial']
if raise_reject is None:
raise_reject = settings.portfolio['raise_reject']
if log is None:
log = settings.portfolio['log']
if val_price is None:
val_price = price
if init_cash is None:
init_cash = settings.portfolio['init_cash']
init_cash = convert_str_enum_value(InitCashMode, init_cash)
if isinstance(init_cash, int) and init_cash in InitCashMode:
init_cash_mode = init_cash
init_cash = np.inf
else:
init_cash_mode = None
if cash_sharing is None:
cash_sharing = settings.portfolio['cash_sharing']
if call_seq is None:
call_seq = settings.portfolio['call_seq']
call_seq = convert_str_enum_value(CallSeqType, call_seq)
auto_call_seq = False
if isinstance(call_seq, int):
if call_seq == CallSeqType.Auto:
call_seq = CallSeqType.Default
auto_call_seq = True
if seed is None:
seed = settings.portfolio['seed']
if seed is not None:
set_seed(seed)
if freq is None:
freq = settings.portfolio['freq']
if broadcast_kwargs is None:
broadcast_kwargs = {}
if wrapper_kwargs is None:
wrapper_kwargs = {}
if not wrapper_kwargs.get('group_select', True) and cash_sharing:
raise ValueError("group_select cannot be disabled if cash_sharing=True")
# Broadcast inputs
# Only close is broadcast, others can remain unchanged thanks to flexible indexing
broadcastable_args = (
close,
size,
size_type,
direction,
price,
fees,
fixed_fees,
slippage,
min_size,
max_size,
reject_prob,
allow_partial,
raise_reject,
log,
val_price
)
keep_raw = [False] + [True] * (len(broadcastable_args) - 1)
broadcast_kwargs = merge_dicts(dict(require_kwargs=dict(requirements='W')), broadcast_kwargs)
broadcasted_args = broadcast(*broadcastable_args, **broadcast_kwargs, keep_raw=keep_raw)
close = broadcasted_args[0]
if not checks.is_pandas(close):
close = pd.Series(close) if close.ndim == 1 else pd.DataFrame(close)
target_shape_2d = (close.shape[0], close.shape[1] if close.ndim > 1 else 1)
wrapper = ArrayWrapper.from_obj(close, freq=freq, group_by=group_by, **wrapper_kwargs)
cs_group_lens = wrapper.grouper.get_group_lens(group_by=None if cash_sharing else False)
init_cash = np.require(np.broadcast_to(init_cash, (len(cs_group_lens),)), dtype=np.float_)
group_lens = wrapper.grouper.get_group_lens(group_by=group_by)
if checks.is_array(call_seq):
call_seq = nb.require_call_seq(broadcast(call_seq, to_shape=target_shape_2d, to_pd=False))
else:
call_seq = nb.build_call_seq(target_shape_2d, group_lens, call_seq_type=call_seq)
if max_orders is None:
max_orders = target_shape_2d[0] * target_shape_2d[1]
if max_logs is None:
max_logs = target_shape_2d[0] * target_shape_2d[1]
if not np.any(log):
max_logs = 1
# Perform calculation
order_records, log_records = nb.simulate_from_orders_nb(
target_shape_2d,
cs_group_lens, # group only if cash sharing is enabled to speed up
init_cash,
call_seq,
auto_call_seq,
*broadcasted_args[1:],
max_orders,
max_logs,
close.ndim == 2
)
# Create an instance
return cls(
wrapper,
close,
order_records,
log_records,
init_cash if init_cash_mode is None else init_cash_mode,
cash_sharing,
call_seq,
**kwargs
)
@classmethod
def from_order_func(cls, close, order_func_nb, *order_args, target_shape=None, keys=None,
init_cash=None, cash_sharing=None, call_seq=None, active_mask=None,
prep_func_nb=None, prep_args=None, group_prep_func_nb=None, group_prep_args=None,
row_prep_func_nb=None, row_prep_args=None, segment_prep_func_nb=None,
segment_prep_args=None, row_wise=None, max_orders=None, max_logs=None,
seed=None, group_by=None, broadcast_kwargs=None, wrapper_kwargs=None, freq=None, **kwargs):
"""Build portfolio from a custom order function.
For details, see `vectorbt.portfolio.nb.simulate_nb`.
if `row_wise` is True, also see `vectorbt.portfolio.nb.simulate_row_wise_nb`.
Args:
close (array_like): Reference price, such as close.
Will broadcast to `target_shape`.
Will be used for calculating unrealized P&L and portfolio value.
order_func_nb (callable): Order generation function.
*order_args: Arguments passed to `order_func_nb`.
target_shape (tuple): Target shape to iterate over. Defaults to `close.shape`.
keys (sequence): Outermost column level.
Each element should correspond to one iteration over columns in `close`.
Should be set only if `target_shape` is bigger than `close.shape`.
init_cash (InitCashMode, float or array_like of float): Initial capital.
By default, will broadcast to the number of columns.
If cash sharing is enabled, will broadcast to the number of groups.
See `vectorbt.portfolio.enums.InitCashMode` to find optimal initial cash.
!!! note
Mode `InitCashMode.AutoAlign` is applied after the portfolio is initialized
to set the same initial cash for all columns/groups. Changing grouping
will change the initial cash, so be aware when indexing.
cash_sharing (bool): Whether to share cash within the same group.
!!! warning
Introduces cross-asset dependencies.
call_seq (CallSeqType or array_like of int): Default sequence of calls per row and group.
* Use `vectorbt.portfolio.enums.CallSeqType` to select a sequence type.
* Set to array to specify custom sequence. Will not broadcast.
!!! note
CallSeqType.Auto should be implemented manually.
Use `auto_call_seq_ctx_nb` in `segment_prep_func_nb`.
active_mask (int or array_like of bool): Mask of whether a particular segment should be executed.
Supplying an integer will activate every n-th row (just for convenience).
Supplying a boolean will broadcast to the number of rows and groups.
prep_func_nb (callable): Simulation preparation function.
prep_args (tuple): Packed arguments passed to `prep_func_nb`.
Defaults to `()`.
group_prep_func_nb (callable): Group preparation function.
Called only if `row_wise` is False.
group_prep_args (tuple): Packed arguments passed to `group_prep_func_nb`.
Defaults to `()`.
row_prep_func_nb (callable): Row preparation function.
Called only if `row_wise` is True.
row_prep_args (tuple): Packed arguments passed to `row_prep_func_nb`.
Defaults to `()`.
segment_prep_func_nb (callable): Segment preparation function.
segment_prep_args (tuple): Packed arguments passed to `segment_prep_func_nb`.
Defaults to `()`.
row_wise (bool): Whether to iterate over rows rather than columns/groups.
See `vectorbt.portfolio.nb.simulate_row_wise_nb`.
max_orders (int): Size of the order records array.
Defaults to the number of elements in the broadcasted shape.
Set to a lower number if you run out of memory.
max_logs (int): Size of the log records array.
Defaults to the number of elements in the broadcasted shape.
Set to a lower number if you run out of memory.
seed (int): Seed to be set for both `call_seq` and at the beginning of the simulation.
group_by (any): Group columns. See `vectorbt.base.column_grouper.ColumnGrouper`.
broadcast_kwargs (dict): Keyword arguments passed to `vectorbt.base.reshape_fns.broadcast`.
wrapper_kwargs (dict): Keyword arguments passed to `vectorbt.base.array_wrapper.ArrayWrapper`.
freq (any): Index frequency in case `close.index` is not datetime-like.
**kwargs: Keyword arguments passed to the `__init__` method.
For defaults, see `vectorbt.settings.portfolio`.
!!! note
All passed functions should be Numba-compiled.
Objects passed as arguments to both functions will not broadcast to `target_shape`
as their purpose is unknown. You should broadcast manually or use flexible indexing.
Also see notes on `Portfolio.from_orders`.
!!! note
In contrast to other methods, the valuation price is previous `close`
instead of order price, since the price of an order is unknown before call.
You can still set valuation price explicitly in `segment_prep_func_nb`.
## Example
Buy 10 shares each tick:
```python-repl
>>> import pandas as pd
>>> from numba import njit
>>> import vectorbt as vbt
>>> from vectorbt.portfolio.nb import create_order_nb
>>> @njit
... def order_func_nb(oc, size):
... return create_order_nb(size=size, price=oc.close[oc.i, oc.col])
>>> close = pd.Series([1, 2, 3, 4, 5])
>>> portfolio = vbt.Portfolio.from_order_func(close, order_func_nb, 10)
>>> portfolio.shares()
0 10.0
1 20.0
2 30.0
3 40.0
4 40.0
dtype: float64
>>> portfolio.cash()
0 90.0
1 70.0
2 40.0
3 0.0
4 0.0
dtype: float64
```
Reverse each position by first closing it. Keep state of last position to determine
which position to open next (just as an example, there are easier ways to do this):
```python-repl
>>> import numpy as np
>>> @njit
... def group_prep_func_nb(gc):
... last_pos_state = np.array([-1])
... return (last_pos_state,)
>>> @njit
... def order_func_nb(oc, last_pos_state):
... if oc.shares_now > 0:
... size = -oc.shares_now # close long
... elif oc.shares_now < 0:
... size = -oc.shares_now # close short
... else:
... if last_pos_state[0] == 1:
... size = -np.inf # open short
... last_pos_state[0] = -1
... else:
... size = np.inf # open long
... last_pos_state[0] = 1
...
... return create_order_nb(size=size, price=oc.close[oc.i, oc.col])
>>> portfolio = vbt.Portfolio.from_order_func(
... close, order_func_nb, group_prep_func_nb=group_prep_func_nb)
>>> portfolio.shares()
0 100.0
1 0.0
2 -100.0
3 0.0
4 20.0
dtype: float64
>>> portfolio.cash()
0 0.0
1 200.0
2 500.0
3 100.0
4 0.0
dtype: float64
```
Equal-weighted portfolio as in `vectorbt.portfolio.nb.simulate_nb` example:
```python-repl
>>> from vectorbt.portfolio.nb import auto_call_seq_ctx_nb
>>> from vectorbt.portfolio.enums import SizeType, Direction
>>> @njit
... def group_prep_func_nb(gc):
... '''Define empty arrays for each group.'''
... size = np.empty(gc.group_len, dtype=np.float_)
... size_type = np.empty(gc.group_len, dtype=np.int_)
... direction = np.empty(gc.group_len, dtype=np.int_)
... temp_float_arr = np.empty(gc.group_len, dtype=np.float_)
... return size, size_type, direction, temp_float_arr
>>> @njit
... def segment_prep_func_nb(sc, size, size_type, direction, temp_float_arr):
... '''Perform rebalancing at each segment.'''
... for k in range(sc.group_len):
... col = sc.from_col + k
... size[k] = 1 / sc.group_len
... size_type[k] = SizeType.TargetPercent
... direction[k] = Direction.LongOnly
... sc.last_val_price[col] = sc.close[sc.i, col]
... auto_call_seq_ctx_nb(sc, size, size_type, direction, temp_float_arr)
... return size, size_type, direction
>>> @njit
... def order_func_nb(oc, size, size_type, direction, fees, fixed_fees, slippage):
... '''Place an order.'''
... col_i = oc.call_seq_now[oc.call_idx]
... return create_order_nb(
... size=size[col_i],
... size_type=size_type[col_i],
... price=oc.close[oc.i, oc.col],
... fees=fees, fixed_fees=fixed_fees, slippage=slippage,
... direction=direction[col_i]
... )
>>> np.random.seed(42)
>>> close = np.random.uniform(1, 10, size=(5, 3))
>>> fees = 0.001
>>> fixed_fees = 1.
>>> slippage = 0.001
>>> portfolio = vbt.Portfolio.from_order_func(
... close, # acts both as reference and order price here
... order_func_nb, fees, fixed_fees, slippage, # order_args as *args
... active_mask=2, # rebalance every second tick
... group_prep_func_nb=group_prep_func_nb,
... segment_prep_func_nb=segment_prep_func_nb,
... cash_sharing=True, group_by=True, # one group with cash sharing
... )
>>> portfolio.holding_value(group_by=False).vbt.plot()
```

"""
# Get defaults
from vectorbt import settings
if not checks.is_pandas(close):
if not checks.is_array(close):
close = np.asarray(close)
close = pd.Series(close) if close.ndim == 1 else
|
pd.DataFrame(close)
|
pandas.DataFrame
|
import pandas as pd
import numpy as np
import os.path as path
import seaborn as sns; sns.set(style="white")
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import itertools
# defne directory where the python file resides
dir_path = path.dirname(path.realpath(__file__))
data_path = dir_path + "/data/"
def _load_base_turnout():
base_turnout_time = pd.read_csv(data_path + "turnout_time_master.csv")
return base_turnout_time
def _create_oa_areas():
output_areas = pd.read_csv(data_path + "oa_master.csv")
cas_rate = pd.read_csv(data_path + "base_cas_master.csv")
m = pd.merge(output_areas, cas_rate, how='left', left_on=['oa_code'], right_on = ['oa_code'])
m.columns = ['oa_code', 'hour', 'dwelling_cas_rate', 'rtc_cas_rate']
return m
def _create_drive_time():
drive_time = pd.read_csv(data_path + "drive_time_master.csv")
hours = pd.DataFrame({'hour': list(range(0, 24)), 'key': 0})
drive_time['key'] = 0
drive_time = drive_time.merge(hours, how='outer', on='key').drop(columns=['key'])
return drive_time
def _create_turnout_scenarios(scenario_dict):
for k, v in scenario_dict.items():
if v == 'off':
scenario_dict[k] = list(itertools.repeat(999,24))
if v == 'wt':
scenario_dict[k] = list(itertools.repeat(2,24))
if v == 'rds':
scenario_dict[k] = list(itertools.repeat(5,24))
if v == 'daycrewed':
scenario_dict[k] = list(itertools.repeat(5,np.nan)) + list(itertools.repeat(2,10)) + list(itertools.repeat(5,np.nan))
if v == 'nightonly':
scenario_dict[k] = list(itertools.repeat(5,np.nan)) + list(itertools.repeat(999,10)) + list(itertools.repeat(5,np.nan))
return scenario_dict
def _create_final_turnout(turnout_times_df, drive_time_df, scenario_name = None):
r = pd.merge(drive_time_df, turnout_times_df, how='left', left_on=['appliance_callsign', 'hour'], right_on = ['appliance_callsign', 'hour'])
r['total_time'] = r.drive_time + r.turnout_time
#if scenario != None:
# r = r[~r.appliance_callsign.isin(scenario)].copy()
r['rank'] = r.groupby(['oa_code','hour'])['total_time'].rank('first', ascending=True)
r = r[r['rank'] <= 2.0]
r = pd.pivot_table(r, values = 'total_time', index=['oa_code', 'hour'], columns = 'rank').reset_index()
r.columns = ['oa_code', 'hour', 'tt1', 'tt2']
return r
def _create_plot(df):
fig, ax = plt.subplots()
ax = sns.scatterplot(x="Increase in dwl risk %", y="Increase in rtc risk %", hue="scenario", data=df, alpha=.4, palette="muted", edgecolors='none', s=100)
handles, labels = ax.get_legend_handles_labels()
ax.legend(handles=handles[1:], labels=labels[1:], loc='upper right', frameon=False)
ax.set(ylim=(-20, 20))
ax.set(xlim=(-20, 20))
ax.xaxis.set_major_formatter(ticker.PercentFormatter(xmax=100))
ax.yaxis.set_major_formatter(ticker.PercentFormatter(xmax=100))
plt.show()
def _calc_dwelling_fatalities(cas_rate, first_appliance_time, second_appliance_time):
ratio_first_app = 0.72
ratio_second_app = 0.28
def _response_factor(appliance_response_time):
return (0.0002 * (appliance_response_time ** 2)) - (0.0006 * appliance_response_time) + 0.0218
r1 = cas_rate * ratio_first_app * _response_factor(first_appliance_time)
r2 = cas_rate * ratio_second_app * _response_factor(second_appliance_time)
return r1 + r2
def _calc_rtc_fatalities(cas_rate, first_appliance_time, second_appliance_time):
a = first_appliance_time * 0.0024
b = a + 0.0202
c = b * 0.93
d = second_appliance_time / first_appliance_time
e = (0.026 * d) + 0.93
f = c * e
return cas_rate * f
def _calculate_scores(oa_areas_df, turnout_times_df, drive_time_df, scenario_name = None):
r = _create_final_turnout(turnout_times_df, drive_time_df, scenario_name)
df = oa_areas_df.merge(r, how='left', on=('oa_code','hour'))
df['dwelling_score'] = _calc_dwelling_fatalities(df['dwelling_cas_rate'].values, df['tt1'].values, df['tt2'].values)
df['rtc_score'] = _calc_rtc_fatalities(df['rtc_cas_rate'].values, df['tt1'].values, df['tt2'].values)
df = df.aggregate({"dwelling_score":['sum'],"rtc_score":['sum']}).reset_index(drop=True)
df['scenario'] = df.apply(lambda x: scenario_name if scenario_name != None else 'Base Case', axis = 1)
df = df[['scenario','dwelling_score','rtc_score']]
return df
def _create_final_df(df):
df['Add dwl fatals per 10 yrs'] = 10 * (df.dwelling_score - df.dwelling_score.iloc[0])
df['Add rtc fatals per 10 yrs'] = 10 * (df.rtc_score - df.rtc_score.iloc[0])
df['Years per addition dwl fatal'] = 10 / df['Add dwl fatals per 10 yrs']
df['Years per addition rtc fatal'] = 10 / df['Add rtc fatals per 10 yrs']
df['Increase in dwl risk %'] = ((df.dwelling_score / df.dwelling_score.iloc[0])-1)*100
df['Increase in rtc risk %'] = ((df.rtc_score / df.rtc_score.iloc[0])-1)*100
return df
def run_batch_scenarios(scenario_list = None):
scenario_count = len([s[0] for s in scenario_list])
print(f'...................... Running modeller for {scenario_count} scenarios ......................')
o = _create_oa_areas()
t = _load_base_turnout()
d = _create_drive_time()
b = _calculate_scores(o, t, d)
print('Running base model')
for scenario_name, scenario_dict in scenario_list:
print(f'Running scenario: {scenario_name}')
ts = pd.DataFrame.from_dict(_create_turnout_scenarios(scenario_dict), orient='index')
ts.reset_index(level=0, inplace=True)
ts.rename(columns={"index": "appliance_callsign"}, inplace=True)
ts = ts.melt(id_vars=['appliance_callsign'], var_name='hour', value_name='turnout_time')
new_t = pd.merge(t, ts, how='left', left_on=['appliance_callsign', 'hour'], right_on = ['appliance_callsign', 'hour'])
new_t['turnout_time'] = new_t['turnout_time_y'].mask(pd.isnull, new_t['turnout_time_x'])
new_t.drop(['turnout_time_x', 'turnout_time_y'], axis=1, inplace=True)
s = _calculate_scores(o, new_t, d, scenario_name)
b =
|
pd.concat([b, s])
|
pandas.concat
|
"""
"""
import io
import os
import pandas as pd
import numpy as np
from datetime import datetime
import yaml
import tethys_utils as tu
import logging
from time import sleep
from pyproj import Proj, CRS, Transformer
pd.options.display.max_columns = 10
#############################################
### Parameters
base_path = os.path.realpath(os.path.dirname(__file__))
permit_csv = os.path.join(base_path, 'es_water_permit_data_v02.csv')
sd_csv = os.path.join(base_path, 'es_stream_depletion_details.csv')
with open(os.path.join(base_path, 'parameters-permits.yml')) as param:
param = yaml.safe_load(param)
conn_config = param['remote']['connection_config']
bucket = param['remote']['bucket']
base_key = 'es/{name}.csv'
run_date = pd.Timestamp.today(tz='utc').round('s')
# run_date_local = run_date.tz_convert(ts_local_tz).tz_localize(None).strftime('%Y-%m-%d %H:%M:%S')
run_date_key = run_date.strftime('%Y%m%dT%H%M%SZ')
def read_s3_csv(s3, bucket, key):
"""
"""
resp = s3.get_object(Bucket=bucket, Key=key)
body1 = resp['Body'].read().decode()
s_io = io.StringIO(body1)
csv1 = pd.read_csv(s_io)
return csv1
use_type_mapping = {'Dairying - Cows': 'irrigation', 'Water Supply - Rural': 'water_supply', 'Pasture Irrigation': 'irrigation', 'Crop Irrigation': 'irrigation', 'Stock Yard': 'stockwater', 'Water Supply - Town': 'water_supply', 'Quarrying': 'other', 'Recreational': 'other', 'Gravel extraction': 'other', 'Hydro-electric power generation': 'hydro_electric', 'Food Processing': 'other', 'Meat works': 'other', 'Tourism': 'other', 'Mining works': 'other', 'Industrial': 'other', 'Domestic': 'water_supply', 'Timber Processing incl Sawmills': 'other', 'Peat Harvesting/Processing': 'other', 'Milk and dairy industries': 'other', 'Gravel Wash': 'other'}
###########################################
#### Process csv files
### Permit file
permit1 = pd.read_csv(permit_csv)
permit2 = permit1[['AuthIRISID', 'CurrentStatus', 'CommencementDate', 'ExpiryDate', 'SubType', 'PrimaryIndustry', 'AuthExercised', 'AbstractionSiteName', 'IRISNorthing', 'IRISEasting', 'MaxAuthVol_LperSec', 'MaxVolm3perday', 'MaxVolm3peryear']].copy()
permit2.rename(columns={'AuthIRISID': 'permit_id', 'CurrentStatus': 'permit_status', 'CommencementDate': 'from_date', 'ExpiryDate': 'to_date', 'SubType': 'activity', 'PrimaryIndustry': 'use_type', 'AuthExercised': 'exercised', 'MaxAuthVol_LperSec': 'max_rate', 'MaxVolm3perday': 'max_daily_volume', 'MaxVolm3peryear': 'max_annual_volume'}, inplace=True)
# permit2 = permit2[permit2.max_rate.notnull()].copy()
# permit2['use_type'] = permit2['use_type'].replace(use_type_mapping)
permit2['permit_id'] = permit2['permit_id'].apply(lambda x: x.split('AUTH-')[1]).str.strip()
permit2['from_date'] = pd.to_datetime(permit2['from_date'])
permit2['to_date'] = pd.to_datetime(permit2['to_date'])
activity1 = permit2.activity.str.split('(')
consump1 = activity1.apply(lambda x: x[1].split(')')[0].strip())
consump2 = consump1 == 'Consumptive'
hydro_group = activity1.apply(lambda x: x[0].strip().split(' Take')[0])
permit2['activity'] = 'take'
permit2['hydro_group'] = hydro_group
permit2['consumptive'] = consump2
permit2['exercised'] = permit2['exercised'] == 'Yes'
limit_cols = ['max_rate', 'max_daily_volume', 'max_annual_volume']
for c in limit_cols:
permit2[c] = pd.to_numeric(permit2[c], errors='coerce')
permit3 = permit2.dropna(subset=limit_cols, how='all').dropna(subset=['AbstractionSiteName'])
permit4 = permit3.drop(['AbstractionSiteName', 'IRISNorthing', 'IRISEasting'], axis=1)
grp1 = permit4.groupby(['permit_id', 'hydro_group'])
limits_max = grp1[limit_cols].max()
others = grp1[['permit_status', 'from_date', 'to_date', 'activity', 'use_type', 'exercised', 'consumptive']].first()
permit5 = pd.concat([others, limits_max], axis=1).reset_index()
# permit4 = permit3.dropna(subset=['wap_name'])
### Stations
def split_sites(permit_id, AbstractionSiteName, IRISNorthing, IRISEasting):
"""
"""
sites = str(AbstractionSiteName).split('_')
lats = [int(float(lat)) for lat in IRISNorthing.split(' ,')]
lons = [int(float(lon)) for lon in IRISEasting.split(' ,')]
permit = [permit_id] * len(sites)
sites1 = list(zip(permit, sites, lons, lats))
return sites1
sites_list = []
for i, row in permit3.iterrows():
sites_list.extend(split_sites(row.permit_id, row.AbstractionSiteName, row.IRISNorthing, row.IRISEasting))
sites_df = pd.DataFrame(sites_list, columns=['permit_id', 'wap', 'NZTMY', 'NZTMX'])
sites_df1 = sites_df[sites_df.wap.str.contains('[A-Z]+\d*/\d+')].groupby(['permit_id', 'wap']).first().copy()
## Convert to lat and lon
from_crs = 2193
to_crs = 4326
from_crs1 = Proj(CRS.from_user_input(from_crs))
to_crs1 = Proj(CRS.from_user_input(to_crs))
trans1 = Transformer.from_proj(from_crs1, to_crs1)
points = np.array(trans1.transform(*sites_df1[['NZTMX', 'NZTMY']].values.T))
sites_df1['lon'] = points[1].round(5)
sites_df1['lat'] = points[0].round(5)
## Add altitude
# k_key = param['source']['koordinates_key']
#
# sites_u = sites_df1[['wap', 'lon', 'lat']].drop_duplicates(subset=['wap'])
#
# alt1 = sites_u.apply(lambda x: tu.altitude_io.koordinates_raster_query('https://data.linz.govt.nz', k_key, '51768', x.lon, x.lat), axis=1)
# alt2 = []
# for a in alt1:
# try:
# alt2.extend([round(a[0]['value'], 3)])
# except:
# print('No altitude found, using -9999')
# alt2.extend([-9999])
#
# sites_u['altitude'] = alt2
# sites_df2 = pd.merge(sites_df1.drop(['NZTMX', 'NZTMY', 'lat', 'lon'], axis=1), sites_u, on='wap')
sites_df2 = sites_df1.drop(['NZTMX', 'NZTMY'], axis=1).reset_index()
### SD file
sd1 = pd.read_csv(sd_csv)
sd_cols = {'Consent Number': 'permit_id', 'Well Number': 'wap', 'Depth': 'wap_depth', 'Bore Specific Rate as Proportion of Whole Take (L/s)': 'wap_max_rate', 'q/Q Total\nNo Flow Restriction': 'sd_ratio'}
sd2 = sd1[list(sd_cols.keys())].dropna(subset=['Consent Number']).rename(columns=sd_cols).copy()
sd2['permit_id'] = sd2['permit_id'].str.strip()
numeric_cols = ['wap_depth', 'wap_max_rate', 'sd_ratio']
for c in numeric_cols:
sd2[c] =
|
pd.to_numeric(sd2[c], errors='coerce')
|
pandas.to_numeric
|
import re
import numpy as np
import pytest
import pandas as pd
import pandas._testing as tm
from pandas.core.arrays import IntervalArray
class TestSeriesReplace:
def test_replace_explicit_none(self):
# GH#36984 if the user explicitly passes value=None, give it to them
ser = pd.Series([0, 0, ""], dtype=object)
result = ser.replace("", None)
expected = pd.Series([0, 0, None], dtype=object)
tm.assert_series_equal(result, expected)
df = pd.DataFrame(np.zeros((3, 3)))
df.iloc[2, 2] = ""
result = df.replace("", None)
expected = pd.DataFrame(
{
0: np.zeros(3),
1: np.zeros(3),
2: np.array([0.0, 0.0, None], dtype=object),
}
)
assert expected.iloc[2, 2] is None
tm.assert_frame_equal(result, expected)
# GH#19998 same thing with object dtype
ser = pd.Series([10, 20, 30, "a", "a", "b", "a"])
result = ser.replace("a", None)
expected = pd.Series([10, 20, 30, None, None, "b", None])
assert expected.iloc[-1] is None
tm.assert_series_equal(result, expected)
def test_replace_noop_doesnt_downcast(self):
# GH#44498
ser = pd.Series([None, None, pd.Timestamp("2021-12-16 17:31")], dtype=object)
res = ser.replace({np.nan: None}) # should be a no-op
tm.assert_series_equal(res, ser)
assert res.dtype == object
# same thing but different calling convention
res = ser.replace(np.nan, None)
tm.assert_series_equal(res, ser)
assert res.dtype == object
def test_replace(self):
N = 100
ser = pd.Series(np.random.randn(N))
ser[0:4] = np.nan
ser[6:10] = 0
# replace list with a single value
return_value = ser.replace([np.nan], -1, inplace=True)
assert return_value is None
exp = ser.fillna(-1)
tm.assert_series_equal(ser, exp)
rs = ser.replace(0.0, np.nan)
ser[ser == 0.0] = np.nan
tm.assert_series_equal(rs, ser)
ser = pd.Series(np.fabs(np.random.randn(N)), tm.makeDateIndex(N), dtype=object)
ser[:5] = np.nan
ser[6:10] = "foo"
ser[20:30] = "bar"
# replace list with a single value
rs = ser.replace([np.nan, "foo", "bar"], -1)
assert (rs[:5] == -1).all()
assert (rs[6:10] == -1).all()
assert (rs[20:30] == -1).all()
assert (pd.isna(ser[:5])).all()
# replace with different values
rs = ser.replace({np.nan: -1, "foo": -2, "bar": -3})
assert (rs[:5] == -1).all()
assert (rs[6:10] == -2).all()
assert (rs[20:30] == -3).all()
assert (pd.isna(ser[:5])).all()
# replace with different values with 2 lists
rs2 = ser.replace([np.nan, "foo", "bar"], [-1, -2, -3])
tm.assert_series_equal(rs, rs2)
# replace inplace
return_value = ser.replace([np.nan, "foo", "bar"], -1, inplace=True)
assert return_value is None
assert (ser[:5] == -1).all()
assert (ser[6:10] == -1).all()
assert (ser[20:30] == -1).all()
def test_replace_nan_with_inf(self):
ser = pd.Series([np.nan, 0, np.inf])
tm.assert_series_equal(ser.replace(np.nan, 0), ser.fillna(0))
ser = pd.Series([np.nan, 0, "foo", "bar", np.inf, None, pd.NaT])
tm.assert_series_equal(ser.replace(np.nan, 0), ser.fillna(0))
filled = ser.copy()
filled[4] = 0
tm.assert_series_equal(ser.replace(np.inf, 0), filled)
def test_replace_listlike_value_listlike_target(self, datetime_series):
ser = pd.Series(datetime_series.index)
tm.assert_series_equal(ser.replace(np.nan, 0), ser.fillna(0))
# malformed
msg = r"Replacement lists must match in length\. Expecting 3 got 2"
with pytest.raises(ValueError, match=msg):
ser.replace([1, 2, 3], [np.nan, 0])
# ser is dt64 so can't hold 1 or 2, so this replace is a no-op
result = ser.replace([1, 2], [np.nan, 0])
tm.assert_series_equal(result, ser)
ser = pd.Series([0, 1, 2, 3, 4])
result = ser.replace([0, 1, 2, 3, 4], [4, 3, 2, 1, 0])
tm.assert_series_equal(result, pd.Series([4, 3, 2, 1, 0]))
def test_replace_gh5319(self):
# API change from 0.12?
# GH 5319
ser = pd.Series([0, np.nan, 2, 3, 4])
expected = ser.ffill()
result = ser.replace([np.nan])
tm.assert_series_equal(result, expected)
ser = pd.Series([0, np.nan, 2, 3, 4])
expected = ser.ffill()
result = ser.replace(np.nan)
tm.assert_series_equal(result, expected)
def test_replace_datetime64(self):
# GH 5797
ser = pd.Series(pd.date_range("20130101", periods=5))
expected = ser.copy()
expected.loc[2] = pd.Timestamp("20120101")
result = ser.replace({pd.Timestamp("20130103"): pd.Timestamp("20120101")})
tm.assert_series_equal(result, expected)
result = ser.replace(pd.Timestamp("20130103"), pd.Timestamp("20120101"))
tm.assert_series_equal(result, expected)
def test_replace_nat_with_tz(self):
# GH 11792: Test with replacing NaT in a list with tz data
ts = pd.Timestamp("2015/01/01", tz="UTC")
s = pd.Series([pd.NaT, pd.Timestamp("2015/01/01", tz="UTC")])
result = s.replace([np.nan, pd.NaT], pd.Timestamp.min)
expected = pd.Series([pd.Timestamp.min, ts], dtype=object)
tm.assert_series_equal(expected, result)
def test_replace_timedelta_td64(self):
tdi = pd.timedelta_range(0, periods=5)
ser = pd.Series(tdi)
# Using a single dict argument means we go through replace_list
result = ser.replace({ser[1]: ser[3]})
expected = pd.Series([ser[0], ser[3], ser[2], ser[3], ser[4]])
tm.assert_series_equal(result, expected)
def test_replace_with_single_list(self):
ser = pd.Series([0, 1, 2, 3, 4])
result = ser.replace([1, 2, 3])
tm.assert_series_equal(result, pd.Series([0, 0, 0, 0, 4]))
s = ser.copy()
return_value = s.replace([1, 2, 3], inplace=True)
assert return_value is None
tm.assert_series_equal(s, pd.Series([0, 0, 0, 0, 4]))
# make sure things don't get corrupted when fillna call fails
s = ser.copy()
msg = (
r"Invalid fill method\. Expecting pad \(ffill\) or backfill "
r"\(bfill\)\. Got crash_cymbal"
)
with pytest.raises(ValueError, match=msg):
return_value = s.replace([1, 2, 3], inplace=True, method="crash_cymbal")
assert return_value is None
tm.assert_series_equal(s, ser)
def test_replace_mixed_types(self):
ser = pd.Series(np.arange(5), dtype="int64")
def check_replace(to_rep, val, expected):
sc = ser.copy()
result = ser.replace(to_rep, val)
return_value = sc.replace(to_rep, val, inplace=True)
assert return_value is None
tm.assert_series_equal(expected, result)
tm.assert_series_equal(expected, sc)
# 3.0 can still be held in our int64 series, so we do not upcast GH#44940
tr, v = [3], [3.0]
check_replace(tr, v, ser)
# Note this matches what we get with the scalars 3 and 3.0
check_replace(tr[0], v[0], ser)
# MUST upcast to float
e = pd.Series([0, 1, 2, 3.5, 4])
tr, v = [3], [3.5]
check_replace(tr, v, e)
# casts to object
e = pd.Series([0, 1, 2, 3.5, "a"])
tr, v = [3, 4], [3.5, "a"]
check_replace(tr, v, e)
# again casts to object
e = pd.Series([0, 1, 2, 3.5, pd.Timestamp("20130101")])
tr, v = [3, 4], [3.5, pd.Timestamp("20130101")]
check_replace(tr, v, e)
# casts to object
e = pd.Series([0, 1, 2, 3.5, True], dtype="object")
tr, v = [3, 4], [3.5, True]
check_replace(tr, v, e)
# test an object with dates + floats + integers + strings
dr = pd.Series(pd.date_range("1/1/2001", "1/10/2001", freq="D"))
result = dr.astype(object).replace([dr[0], dr[1], dr[2]], [1.0, 2, "a"])
expected = pd.Series([1.0, 2, "a"] + dr[3:].tolist(), dtype=object)
tm.assert_series_equal(result, expected)
def test_replace_bool_with_string_no_op(self):
s = pd.Series([True, False, True])
result = s.replace("fun", "in-the-sun")
tm.assert_series_equal(s, result)
def test_replace_bool_with_string(self):
# nonexistent elements
s = pd.Series([True, False, True])
result = s.replace(True, "2u")
expected = pd.Series(["2u", False, "2u"])
tm.assert_series_equal(expected, result)
def test_replace_bool_with_bool(self):
s = pd.Series([True, False, True])
result = s.replace(True, False)
expected = pd.Series([False] * len(s))
tm.assert_series_equal(expected, result)
def test_replace_with_dict_with_bool_keys(self):
s = pd.Series([True, False, True])
result = s.replace({"asdf": "asdb", True: "yes"})
expected = pd.Series(["yes", False, "yes"])
tm.assert_series_equal(result, expected)
def test_replace_Int_with_na(self, any_int_ea_dtype):
# GH 38267
result = pd.Series([0, None], dtype=any_int_ea_dtype).replace(0, pd.NA)
expected = pd.Series([pd.NA, pd.NA], dtype=any_int_ea_dtype)
tm.assert_series_equal(result, expected)
result = pd.Series([0, 1], dtype=any_int_ea_dtype).replace(0, pd.NA)
result.replace(1, pd.NA, inplace=True)
tm.assert_series_equal(result, expected)
def test_replace2(self):
N = 100
ser = pd.Series(np.fabs(np.random.randn(N)), tm.makeDateIndex(N), dtype=object)
ser[:5] = np.nan
ser[6:10] = "foo"
ser[20:30] = "bar"
# replace list with a single value
rs = ser.replace([np.nan, "foo", "bar"], -1)
assert (rs[:5] == -1).all()
assert (rs[6:10] == -1).all()
assert (rs[20:30] == -1).all()
assert (pd.isna(ser[:5])).all()
# replace with different values
rs = ser.replace({np.nan: -1, "foo": -2, "bar": -3})
assert (rs[:5] == -1).all()
assert (rs[6:10] == -2).all()
assert (rs[20:30] == -3).all()
assert (pd.isna(ser[:5])).all()
# replace with different values with 2 lists
rs2 = ser.replace([np.nan, "foo", "bar"], [-1, -2, -3])
tm.assert_series_equal(rs, rs2)
# replace inplace
return_value = ser.replace([np.nan, "foo", "bar"], -1, inplace=True)
assert return_value is None
assert (ser[:5] == -1).all()
assert (ser[6:10] == -1).all()
assert (ser[20:30] == -1).all()
def test_replace_with_dictlike_and_string_dtype(self, nullable_string_dtype):
# GH 32621, GH#44940
ser = pd.Series(["one", "two", np.nan], dtype=nullable_string_dtype)
expected = pd.Series(["1", "2", np.nan], dtype=nullable_string_dtype)
result = ser.replace({"one": "1", "two": "2"})
tm.assert_series_equal(expected, result)
def test_replace_with_empty_dictlike(self):
# GH 15289
s = pd.Series(list("abcd"))
tm.assert_series_equal(s, s.replace({}))
with tm.assert_produces_warning(FutureWarning):
empty_series = pd.Series([])
tm.assert_series_equal(s, s.replace(empty_series))
def test_replace_string_with_number(self):
# GH 15743
s = pd.Series([1, 2, 3])
result = s.replace("2", np.nan)
expected = pd.Series([1, 2, 3])
tm.assert_series_equal(expected, result)
def test_replace_replacer_equals_replacement(self):
# GH 20656
# make sure all replacers are matching against original values
s = pd.Series(["a", "b"])
expected = pd.Series(["b", "a"])
result = s.replace({"a": "b", "b": "a"})
tm.assert_series_equal(expected, result)
def test_replace_unicode_with_number(self):
# GH 15743
s = pd.Series([1, 2, 3])
result = s.replace("2", np.nan)
expected = pd.Series([1, 2, 3])
tm.assert_series_equal(expected, result)
def test_replace_mixed_types_with_string(self):
# Testing mixed
s = pd.Series([1, 2, 3, "4", 4, 5])
result = s.replace([2, "4"], np.nan)
expected = pd.Series([1, np.nan, 3, np.nan, 4, 5])
tm.assert_series_equal(expected, result)
@pytest.mark.parametrize(
"categorical, numeric",
[
(pd.Categorical(["A"], categories=["A", "B"]), [1]),
(pd.Categorical(["A", "B"], categories=["A", "B"]), [1, 2]),
],
)
def test_replace_categorical(self, categorical, numeric):
# GH 24971, GH#23305
ser = pd.Series(categorical)
result = ser.replace({"A": 1, "B": 2})
expected = pd.Series(numeric).astype("category")
if 2 not in expected.cat.categories:
# i.e. categories should be [1, 2] even if there are no "B"s present
# GH#44940
expected = expected.cat.add_categories(2)
tm.assert_series_equal(expected, result)
def test_replace_categorical_single(self):
# GH 26988
dti = pd.date_range("2016-01-01", periods=3, tz="US/Pacific")
s = pd.Series(dti)
c = s.astype("category")
expected = c.copy()
expected = expected.cat.add_categories("foo")
expected[2] = "foo"
expected = expected.cat.remove_unused_categories()
assert c[2] != "foo"
result = c.replace(c[2], "foo")
tm.assert_series_equal(expected, result)
assert c[2] != "foo" # ensure non-inplace call does not alter original
return_value = c.replace(c[2], "foo", inplace=True)
assert return_value is None
tm.assert_series_equal(expected, c)
first_value = c[0]
return_value = c.replace(c[1], c[0], inplace=True)
assert return_value is None
assert c[0] == c[1] == first_value # test replacing with existing value
def test_replace_with_no_overflowerror(self):
# GH 25616
# casts to object without Exception from OverflowError
s = pd.Series([0, 1, 2, 3, 4])
result = s.replace([3], ["100000000000000000000"])
expected = pd.Series([0, 1, 2, "100000000000000000000", 4])
tm.assert_series_equal(result, expected)
s = pd.Series([0, "100000000000000000000", "100000000000000000001"])
result = s.replace(["100000000000000000000"], [1])
expected = pd.Series([0, 1, "100000000000000000001"])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"ser, to_replace, exp",
[
([1, 2, 3], {1: 2, 2: 3, 3: 4}, [2, 3, 4]),
(["1", "2", "3"], {"1": "2", "2": "3", "3": "4"}, ["2", "3", "4"]),
],
)
def test_replace_commutative(self, ser, to_replace, exp):
# GH 16051
# DataFrame.replace() overwrites when values are non-numeric
series = pd.Series(ser)
expected = pd.Series(exp)
result = series.replace(to_replace)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"ser, exp", [([1, 2, 3], [1, True, 3]), (["x", 2, 3], ["x", True, 3])]
)
def test_replace_no_cast(self, ser, exp):
# GH 9113
# BUG: replace int64 dtype with bool coerces to int64
series = pd.Series(ser)
result = series.replace(2, True)
expected = pd.Series(exp)
tm.assert_series_equal(result, expected)
def test_replace_invalid_to_replace(self):
# GH 18634
# API: replace() should raise an exception if invalid argument is given
series = pd.Series(["a", "b", "c "])
msg = (
r"Expecting 'to_replace' to be either a scalar, array-like, "
r"dict or None, got invalid type.*"
)
with pytest.raises(TypeError, match=msg):
series.replace(lambda x: x.strip())
@pytest.mark.parametrize("frame", [False, True])
def test_replace_nonbool_regex(self, frame):
obj = pd.Series(["a", "b", "c "])
if frame:
obj = obj.to_frame()
msg = "'to_replace' must be 'None' if 'regex' is not a bool"
with pytest.raises(ValueError, match=msg):
obj.replace(to_replace=["a"], regex="foo")
@pytest.mark.parametrize("frame", [False, True])
def test_replace_empty_copy(self, frame):
obj = pd.Series([], dtype=np.float64)
if frame:
obj = obj.to_frame()
res = obj.replace(4, 5, inplace=True)
assert res is None
res = obj.replace(4, 5, inplace=False)
tm.assert_equal(res, obj)
assert res is not obj
def test_replace_only_one_dictlike_arg(self, fixed_now_ts):
# GH#33340
ser = pd.Series([1, 2, "A", fixed_now_ts, True])
to_replace = {0: 1, 2: "A"}
value = "foo"
msg = "Series.replace cannot use dict-like to_replace and non-None value"
with pytest.raises(ValueError, match=msg):
ser.replace(to_replace, value)
to_replace = 1
value = {0: "foo", 2: "bar"}
msg = "Series.replace cannot use dict-value and non-None to_replace"
with pytest.raises(ValueError, match=msg):
ser.replace(to_replace, value)
def test_replace_extension_other(self, frame_or_series):
# https://github.com/pandas-dev/pandas/issues/34530
obj = frame_or_series(pd.array([1, 2, 3], dtype="Int64"))
result = obj.replace("", "") # no exception
# should not have changed dtype
tm.assert_equal(obj, result)
def _check_replace_with_method(self, ser: pd.Series):
df = ser.to_frame()
res = ser.replace(ser[1], method="pad")
expected = pd.Series([ser[0], ser[0]] + list(ser[2:]), dtype=ser.dtype)
tm.assert_series_equal(res, expected)
res_df = df.replace(ser[1], method="pad")
tm.assert_frame_equal(res_df, expected.to_frame())
ser2 = ser.copy()
res2 = ser2.replace(ser[1], method="pad", inplace=True)
assert res2 is None
tm.assert_series_equal(ser2, expected)
res_df2 = df.replace(ser[1], method="pad", inplace=True)
assert res_df2 is None
tm.assert_frame_equal(df, expected.to_frame())
def test_replace_ea_dtype_with_method(self, any_numeric_ea_dtype):
arr = pd.array([1, 2, pd.NA, 4], dtype=any_numeric_ea_dtype)
ser = pd.Series(arr)
self._check_replace_with_method(ser)
@pytest.mark.parametrize("as_categorical", [True, False])
def test_replace_interval_with_method(self, as_categorical):
# in particular interval that can't hold NA
idx = pd.IntervalIndex.from_breaks(range(4))
ser = pd.Series(idx)
if as_categorical:
ser = ser.astype("category")
self._check_replace_with_method(ser)
@pytest.mark.parametrize("as_period", [True, False])
@pytest.mark.parametrize("as_categorical", [True, False])
def test_replace_datetimelike_with_method(self, as_period, as_categorical):
idx = pd.date_range("2016-01-01", periods=5, tz="US/Pacific")
if as_period:
idx = idx.tz_localize(None).to_period("D")
ser = pd.Series(idx)
ser.iloc[-2] = pd.NaT
if as_categorical:
ser = ser.astype("category")
self._check_replace_with_method(ser)
def test_replace_with_compiled_regex(self):
# https://github.com/pandas-dev/pandas/issues/35680
s = pd.Series(["a", "b", "c"])
regex = re.compile("^a$")
result = s.replace({regex: "z"}, regex=True)
expected = pd.Series(["z", "b", "c"])
tm.assert_series_equal(result, expected)
def test_pandas_replace_na(self):
# GH#43344
ser = pd.Series(["AA", "BB", "CC", "DD", "EE", "", pd.NA], dtype="string")
regex_mapping = {
"AA": "CC",
"BB": "CC",
"EE": "CC",
"CC": "CC-REPL",
}
result = ser.replace(regex_mapping, regex=True)
exp =
|
pd.Series(["CC", "CC", "CC-REPL", "DD", "CC", "", pd.NA], dtype="string")
|
pandas.Series
|
import os
import base64
from datetime import date
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import dash
import flask
import dash_core_components as dcc
import dash_bootstrap_components as dbc
import dash_extendable_graph as deg
import plotly.graph_objects as go
import dash_html_components as html
import plotly.express as px
import plotly.io as pio
from covid_xprize.nixtamalai.viz_components import get_pareto_data
from covid_xprize.nixtamalai.viz_components import npi_val_to_cost
from covid_xprize.nixtamalai.viz_components import get_overall_data
from covid_xprize.nixtamalai.viz_components import npi_cost_to_val
from covid_xprize.nixtamalai.viz_components import get_sliders
import palettable as pltt
import dash_table
# TEMPLATE = 'plotly_dark'
START_DATE = "2020-08-01"
END_DATE = "2020-09-01"
TEST_COST = "covid_xprize/nixtamalai/viz_data/uniform_random_costs.csv"
INITIAL_COUNTRY = "Mexico"
IP_FILE = "covid_xprize/nixtamalai/viz_data/scenario_all_countries_no_regions.csv"
DEFAULT_COLORS = px.colors.qualitative.Plotly
logo_filename = "./covid_xprize/nixtamalai/img/logo.jpeg"
encoded_logo = base64.b64encode(open(logo_filename, 'rb').read())
HIST_DF = pd.read_csv(IP_FILE,
parse_dates=['Date'],
encoding="ISO-8859-1",
keep_default_na=False,
error_bad_lines=True)
HIST_DF = HIST_DF.replace("", np.NaN)
ALL_COUNTRIES = [{"label":c, "value":c} for c in HIST_DF.CountryName.unique()]
WEIGHTS_DF = pd.read_csv(TEST_COST, keep_default_na=False)
WEIGHTS_DF = WEIGHTS_DF.replace("", np.NaN)
overall_pdf, predictions = get_overall_data(START_DATE, END_DATE, HIST_DF, WEIGHTS_DF,
INITIAL_COUNTRY, "greedy")
# Gráfica inicial de Pareto
pareto = get_pareto_data(list(overall_pdf['Stringency']),
list(overall_pdf['PredictedDailyNewCases']))
pareto_data = {"x": pareto[0],
"y": pareto[1],
"name": "Base (Blind Greedy for Mexico)",
"showlegend": True,
}
npis = (WEIGHTS_DF
.drop(columns=['CountryName', 'RegionName'])
.to_dict(orient='records'))[0]
BASE_COSTS = npi_val_to_cost(npis)
# Gráfica inicial de radar
radar_data = {
"r": [v for _,v in BASE_COSTS.items()],
"theta": [k.split("_")[0] for k,_ in BASE_COSTS.items()],
"name": "Base (Blind Greedy for Mexico)",
'type': 'scatterpolar',
"showlegend": True,
}
# Gráfica inicial de predicciones
predictions = pd.concat(predictions)
predictions['Prescriptor'] = 0
fig = px.line(predictions,
facet_col="Prescriptor",
color="Prescriptor",
line_group="PrescriptionIndex",
x="Date",
y="PredictedDailyNewCases",
facet_col_wrap=3)
data_table = dash_table.DataTable(
id='table',
columns=[{"name": i, "id": i} for i in predictions.columns],
data=predictions.to_dict('records'),
data_previous=[dict()],
export_format='xlsx',
export_headers='display',
page_size=10,
sort_action='native'
)
sliders = get_sliders(BASE_COSTS)
server = flask.Flask(__name__) # define flask app.server
app = dash.Dash(__name__,
external_stylesheets=[dbc.themes.FLATLY],
prevent_initial_callbacks=True,
server=server)
app.layout = dbc.Container(
[
dbc.Row(
[dbc.Col(html.Img(src='data:image/png;base64,{}'.format(encoded_logo.decode()),
height="100px",style={'padding-left': '30px'}), width=1),
dbc.Col(html.Div(html.H1(children='Visualizing Intervention Plans')))]
),
dbc.Row(html.Hr()),
dbc.Row(
[
dbc.Col(html.Div(sliders[0:3]), width=2),
dbc.Col(html.Div(sliders[3:6]), width=2),
dbc.Col(html.Div(sliders[6:9]), width=2),
dbc.Col(html.Div(sliders[9:12]), width=2),
dbc.Col(
[
html.Div(dcc.Dropdown(
id='model-selector',
options=[
{'label': 'Blind Greedy', 'value': 'greedy'},
{'label': 'Nixtamal Surrogate', 'value': 'nixtamal'}
],
style={'color': 'black'},
value='greedy'
)),
html.Hr(),
html.Div(dcc.DatePickerRange(
id='date-range',
min_date_allowed=date(2020, 8, 1),
max_date_allowed=date(2021, 12, 31),
initial_visible_month=date(2020, 8, 1),
start_date=date(2020, 8, 1),
end_date=date(2020, 9, 1)
)),
html.Hr(),
html.Div(dcc.Dropdown(
id='country-selector',
options=ALL_COUNTRIES,
style={'color': 'black'},
value=INITIAL_COUNTRY
))
],
width=2),
dbc.Col(
[
html.Div(dbc.Button('Submit', id='submit-val',color="success",
n_clicks=0, block=True)),
html.Hr(),
html.Div(dbc.Button('Reset', id='reset-val', color="warning",
href='/', n_clicks=0, block=True))
],
width=1),
], style={'padding-left': '30px'}
),
html.Hr(),
dbc.Row(
[
dbc.Col(
html.Div(html.H4("NPI Weights"), style={
'text-align': 'center'}),
width={"size": 4, "offset": 1},
),
dbc.Col(
html.Div(html.H4("Pareto Plot"), style={
'text-align': 'center'}),
width={"size": 6},
),
],
align="center",
),
dbc.Row(
[
dbc.Col(
deg.ExtendableGraph(
id='radar-plot',
figure=go.Figure(dict(
data=[radar_data],
layout={
"legend": {"yanchor": "bottom", "y": 0.1, "x": -1.2},
}
))
), width={"size": 4, "offset": 1},
),
dbc.Col(
dcc.Loading(
id="loading-pareto",
children=[deg.ExtendableGraph(
id='pareto-plot',
figure=go.Figure(dict(
data=[pareto_data],
layout={
"xaxis": {"title": "Mean Stringency"},
"yaxis": {"title": "Mean New Cases per Day"},
"legend": {"yanchor": "top", "y": 0.99, "x": 0.35},
}
))
)]), width={"size": 6},
),
],
align="center",
),
dbc.Row(dbc.Col(
dcc.Loading(
id="loadig-predictions",
children=[dcc.Graph(id='predictions-graphs', figure=fig)]
)
)
),
dbc.Row(dbc.Col(
data_table, width="auto"),
align='center', justify="center"),
], fluid=True
)
@app.callback([dash.dependencies.Output('pareto-plot', 'extendData'),
dash.dependencies.Output('table', 'data_previous')],
[dash.dependencies.Input('submit-val', 'n_clicks')],
[dash.dependencies.State('C1-weight', 'value')],
[dash.dependencies.State('C2-weight', 'value')],
[dash.dependencies.State('C3-weight', 'value')],
[dash.dependencies.State('C4-weight', 'value')],
[dash.dependencies.State('C5-weight', 'value')],
[dash.dependencies.State('C6-weight', 'value')],
[dash.dependencies.State('C7-weight', 'value')],
[dash.dependencies.State('C8-weight', 'value')],
[dash.dependencies.State('H1-weight', 'value')],
[dash.dependencies.State('H2-weight', 'value')],
[dash.dependencies.State('H3-weight', 'value')],
[dash.dependencies.State('H4-weight', 'value')],
[dash.dependencies.State('model-selector', 'value')],
[dash.dependencies.State('country-selector', 'value')],
[dash.dependencies.State('date-range', 'start_date')],
[dash.dependencies.State('date-range', 'end_date')],
[dash.dependencies.State('pareto-plot', 'figure')]
)
def update_pareto_plot(n_clicks, value_c1, value_c2, value_c3, value_c4, value_c5, value_c6,
value_c7, value_c8, value_h1, value_h2, value_h3, value_h4, model, country,
start_date, end_date, figure):
if n_clicks > 0:
weights_dict = {
'CountryName': [country],
'RegionName': [""],
'C1_School closing': [value_c1],
'C2_Workplace closing': [value_c2],
'C3_Cancel public events': [value_c3],
'C4_Restrictions on gatherings': [value_c4],
'C5_Close public transport': [value_c5],
'C6_Stay at home requirements': [value_c6],
'C7_Restrictions on internal movement': [value_c7],
'C8_International travel controls': [value_c8],
'H1_Public information campaigns': [value_h1],
'H2_Testing policy': [value_h2],
'H3_Contact tracing': [value_h3],
'H6_Facial Coverings': [value_h4]
}
prescriptor_names = {"greedy": "Blind Greedy",
"nixtamal": "Nixtamal Surrogate"}
weights_dict = npi_cost_to_val(weights_dict)
user_weights = pd.DataFrame.from_dict(weights_dict)
overall_pdf, predictions = get_overall_data(
start_date, end_date, HIST_DF, user_weights, country, model)
pareto = get_pareto_data(list(overall_pdf['Stringency']),
list(overall_pdf['PredictedDailyNewCases']))
new_trace = {"x": pareto[0],
"y": pareto[1],
"name": "{} prescription {} for {}".format(prescriptor_names[model],
n_clicks, country)
}
predictions = pd.concat(predictions)
predictions['Prescriptor'] = n_clicks
prediction_traces = []
for idx in predictions.PrescriptionIndex.unique():
display_legend = True if idx == 0 else False
idf = predictions[predictions.PrescriptionIndex == idx]
trace = {"x": idf["Date"],
"y": idf["PredictedDailyNewCases"],
"mode": "lines",
"line": dict(color=DEFAULT_COLORS[n_clicks]),
"name": "{} prescription {} for {}".format(prescriptor_names[model],
n_clicks, country),
"legendgroup": "group_{}".format(n_clicks),
"showlegend": display_legend
}
prediction_traces.append(trace)
return ([new_trace, []], []), predictions.to_dict('records')
return ([],[],[]), predictions.to_dict('records')
@app.callback(dash.dependencies.Output('radar-plot', 'extendData'),
[dash.dependencies.Input('submit-val', 'n_clicks')],
[dash.dependencies.State('C1-weight', 'value')],
[dash.dependencies.State('C2-weight', 'value')],
[dash.dependencies.State('C3-weight', 'value')],
[dash.dependencies.State('C4-weight', 'value')],
[dash.dependencies.State('C5-weight', 'value')],
[dash.dependencies.State('C6-weight', 'value')],
[dash.dependencies.State('C7-weight', 'value')],
[dash.dependencies.State('C8-weight', 'value')],
[dash.dependencies.State('H1-weight', 'value')],
[dash.dependencies.State('H2-weight', 'value')],
[dash.dependencies.State('H3-weight', 'value')],
[dash.dependencies.State('H4-weight', 'value')],
[dash.dependencies.State('model-selector', 'value')],
[dash.dependencies.State('country-selector', 'value')],
[dash.dependencies.State('radar-plot', 'figure')]
)
def update_radar_plot(n_clicks, value_c1, value_c2, value_c3, value_c4, value_c5, value_c6,
value_c7, value_c8, value_h1, value_h2, value_h3, value_h4, model, country, figure):
if n_clicks > 0:
weights_dict = {
'C1': value_c1,
'C2': value_c2,
'C3': value_c3,
'C4': value_c4,
'C5': value_c5,
'C6': value_c6,
'C7': value_c7,
'C8': value_c8,
'H1': value_h1,
'H2': value_h2,
'H3': value_h3,
'H6': value_h4
}
prescriptor_names = {"greedy": "Blind Greedy",
"nixtamal": "Nixtamal Surrogate"}
new_trace = {
"r": [v for _,v in weights_dict.items()],
"theta": [k for k,_ in weights_dict.items()],
'type': 'scatterpolar',
"name": "{} prescription {} for {}".format(prescriptor_names[model],
n_clicks, country),
}
return [new_trace, []], []
# @app.callback(
# dash.dependencies.Output("pareto-plot", "figure"),
# [dash.dependencies.Input("pareto-plot", "hoverData")],
# [dash.dependencies.State('pareto-plot', 'figure')]
# )
# def highlight_trace(hover_data, figure):
# # here you set the default settings
# # for trace in my_pot.data:
# # country["line"]["width"] = 1
# # country["opacity"] = 0.5
# if hover_data:
# trace_index = hover_data["points"][0]["curveNumber"]
# print(figure["data"])
# # figure["data"][trace_index]["line"]["width"] = 5
# # figure["data"][trace_index]["opacity"] = 1
# return figure
@app.callback(dash.dependencies.Output('table', 'data'),
[dash.dependencies.Input('submit-val', 'n_clicks'),
dash.dependencies.Input('table', 'data_previous'),
dash.dependencies.Input('table', 'data')],
[dash.dependencies.State('date-range', 'start_date')],
[dash.dependencies.State('date-range', 'end_date')]
)
def update_table(n_clicks, predictions, data, start_date, end_date):
predictions = pd.DataFrame.from_dict(predictions)
data = pd.DataFrame.from_dict(data)
data = data[(data.Date >= start_date) &
(data.Date < end_date)]
predictions = predictions[(predictions.Date >= start_date) &
(predictions.Date < end_date)]
return data.append(predictions).to_dict('records')
@app.callback(dash.dependencies.Output('predictions-graphs', 'figure'),
dash.dependencies.Input('table', 'data')
)
def update_predictions_graphs(data):
predictions =
|
pd.DataFrame.from_records(data)
|
pandas.DataFrame.from_records
|
import json
import numpy as np
from numpy.testing import assert_allclose
import pandas as pd
from pandas.util.testing import assert_frame_equal
import unittest
from sklearn.datasets import load_boston
from sklearn.model_selection import train_test_split
from sklearn.metrics import r2_score, explained_variance_score, mean_absolute_error, max_error
from aix360.algorithms.rbm import FeatureBinarizer, GLRMExplainer, LinearRuleRegression
class TestLinearRuleRegression(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.boston = load_boston()
def test_classification(self):
boston_df = pd.DataFrame(self.boston.data, columns=self.boston.feature_names)
X_train, X_test, Y_train, Y_test = train_test_split(boston_df, self.boston.target, test_size = 0.25, random_state = 31)
fb = FeatureBinarizer(negations=True)
X_train_fb = fb.fit_transform(X_train)
X_test_fb = fb.transform(X_test)
self.assertEqual(len(X_train_fb.columns), 196)
self.assertEqual(len(X_test_fb.columns), 196)
linear_model = LinearRuleRegression()
explainer = GLRMExplainer(linear_model)
explainer.fit(X_train_fb, Y_train)
Y_pred = explainer.predict(X_test_fb)
self.assertGreater(r2_score(Y_test, Y_pred), 0.8)
self.assertGreater(explained_variance_score(Y_test, Y_pred), 0.8)
self.assertLess(mean_absolute_error(Y_test, Y_pred), 3)
self.assertLess(max_error(Y_test, Y_pred), 12)
explanation = explainer.explain()
explanation = explainer.explain()
expected = pd.DataFrame(columns=["rule", "coefficient"], data=[
['(intercept)', 21.9],
['NOX <= 0.66', 6.3],
['RM <= 7.16 AND DIS > 1.62', -5.8],
['LSTAT <= 4.66', 5.5],
['DIS <= 3.32 AND RAD > 2.00 AND B > 295.98 AND LSTAT <= 22.79', 4.8],
['CHAS not AND PTRATIO > 16.10', -3.9],
['RM <= 7.16 AND RAD <= 6.00', -3.3],
['TAX > 293.00 AND LSTAT > 4.66', -2.9],
['LSTAT <= 15.03', 2.8],
['INDUS > 4.05 AND LSTAT > 4.66', -2.5],
['DIS <= 7.24 AND RAD > 2.00 AND PTRATIO <= 20.90 AND B <= 394.99 AND B > 295.98 AND LSTAT <= 22.79', 2.5],
['LSTAT <= 9.48', 2.5],
['CRIM <= 9.84 AND DIS <= 4.64 AND RAD > 1.00 AND TAX <= 666.00 AND LSTAT <= 22.79', 2.2],
['LSTAT <= 17.60', 1.9],
['TAX > 330.00 AND LSTAT > 4.66', -1.8],
['CRIM <= 9.84 AND CRIM > 0.06 AND PTRATIO <= 20.90', 1.8],
['LSTAT <= 6.25', 1.6],
['RM <= 7.16 AND B > 380.27', -1.6],
['LSTAT <= 11.12', 1.6],
['RAD > 2.00 AND LSTAT <= 22.79', 1.2],
['RM <= 7.16', -1.2],
['CHAS not AND RM <= 7.16', 1.2],
['RM <= 6.51', -1.1],
['CRIM <= 9.84 AND DIS <= 3.95 AND TAX <= 666.00 AND PTRATIO <= 20.90 AND B > 295.98', 1.0],
['CRIM <= 9.84 AND RAD > 1.00 AND LSTAT <= 22.79', 1.0],
['DIS <= 3.95 AND LSTAT <= 22.79', -0.9],
['RM <= 6.74', -0.8],
['PTRATIO <= 19.52', 0.8],
['NOX <= 0.66 AND PTRATIO <= 20.90 AND LSTAT <= 22.79', -0.8],
['RAD > 4.00 AND LSTAT <= 22.79', -0.63],
['B <= 391.27 AND LSTAT <= 22.79', 0.5],
['LSTAT <= 7.58', 0.44],
['LSTAT <= 13.14', 0.17]
])
|
assert_frame_equal(explanation, expected, check_dtype=False, check_exact=False, check_less_precise=1)
|
pandas.util.testing.assert_frame_equal
|
import datetime
import hashlib
import os
import time
from warnings import (
catch_warnings,
simplefilter,
)
import numpy as np
import pytest
import pandas as pd
from pandas import (
DataFrame,
DatetimeIndex,
Index,
MultiIndex,
Series,
Timestamp,
concat,
date_range,
timedelta_range,
)
import pandas._testing as tm
from pandas.tests.io.pytables.common import (
_maybe_remove,
ensure_clean_path,
ensure_clean_store,
safe_close,
)
_default_compressor = "blosc"
ignore_natural_naming_warning = pytest.mark.filterwarnings(
"ignore:object name:tables.exceptions.NaturalNameWarning"
)
from pandas.io.pytables import (
HDFStore,
read_hdf,
)
pytestmark = pytest.mark.single_cpu
def test_context(setup_path):
with tm.ensure_clean(setup_path) as path:
try:
with HDFStore(path) as tbl:
raise ValueError("blah")
except ValueError:
pass
with tm.ensure_clean(setup_path) as path:
with HDFStore(path) as tbl:
tbl["a"] = tm.makeDataFrame()
assert len(tbl) == 1
assert type(tbl["a"]) == DataFrame
def test_no_track_times(setup_path):
# GH 32682
# enables to set track_times (see `pytables` `create_table` documentation)
def checksum(filename, hash_factory=hashlib.md5, chunk_num_blocks=128):
h = hash_factory()
with open(filename, "rb") as f:
for chunk in iter(lambda: f.read(chunk_num_blocks * h.block_size), b""):
h.update(chunk)
return h.digest()
def create_h5_and_return_checksum(track_times):
with ensure_clean_path(setup_path) as path:
df = DataFrame({"a": [1]})
with HDFStore(path, mode="w") as hdf:
hdf.put(
"table",
df,
format="table",
data_columns=True,
index=None,
track_times=track_times,
)
return checksum(path)
checksum_0_tt_false = create_h5_and_return_checksum(track_times=False)
checksum_0_tt_true = create_h5_and_return_checksum(track_times=True)
# sleep is necessary to create h5 with different creation time
time.sleep(1)
checksum_1_tt_false = create_h5_and_return_checksum(track_times=False)
checksum_1_tt_true = create_h5_and_return_checksum(track_times=True)
# checksums are the same if track_time = False
assert checksum_0_tt_false == checksum_1_tt_false
# checksums are NOT same if track_time = True
assert checksum_0_tt_true != checksum_1_tt_true
def test_iter_empty(setup_path):
with ensure_clean_store(setup_path) as store:
# GH 12221
assert list(store) == []
def test_repr(setup_path):
with ensure_clean_store(setup_path) as store:
repr(store)
store.info()
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeStringSeries()
store["c"] = tm.makeDataFrame()
df = tm.makeDataFrame()
df["obj1"] = "foo"
df["obj2"] = "bar"
df["bool1"] = df["A"] > 0
df["bool2"] = df["B"] > 0
df["bool3"] = True
df["int1"] = 1
df["int2"] = 2
df["timestamp1"] = Timestamp("20010102")
df["timestamp2"] = Timestamp("20010103")
df["datetime1"] = datetime.datetime(2001, 1, 2, 0, 0)
df["datetime2"] = datetime.datetime(2001, 1, 3, 0, 0)
df.loc[df.index[3:6], ["obj1"]] = np.nan
df = df._consolidate()._convert(datetime=True)
with catch_warnings(record=True):
simplefilter("ignore", pd.errors.PerformanceWarning)
store["df"] = df
# make a random group in hdf space
store._handle.create_group(store._handle.root, "bah")
assert store.filename in repr(store)
assert store.filename in str(store)
store.info()
# storers
with ensure_clean_store(setup_path) as store:
df = tm.makeDataFrame()
store.append("df", df)
s = store.get_storer("df")
repr(s)
str(s)
@pytest.mark.filterwarnings("ignore:object name:tables.exceptions.NaturalNameWarning")
def test_contains(setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeDataFrame()
store["foo/bar"] = tm.makeDataFrame()
assert "a" in store
assert "b" in store
assert "c" not in store
assert "foo/bar" in store
assert "/foo/bar" in store
assert "/foo/b" not in store
assert "bar" not in store
# gh-2694: tables.NaturalNameWarning
with catch_warnings(record=True):
store["node())"] = tm.makeDataFrame()
assert "node())" in store
def test_versioning(setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeDataFrame()
df = tm.makeTimeDataFrame()
_maybe_remove(store, "df1")
store.append("df1", df[:10])
store.append("df1", df[10:])
assert store.root.a._v_attrs.pandas_version == "0.15.2"
assert store.root.b._v_attrs.pandas_version == "0.15.2"
assert store.root.df1._v_attrs.pandas_version == "0.15.2"
# write a file and wipe its versioning
_maybe_remove(store, "df2")
store.append("df2", df)
# this is an error because its table_type is appendable, but no
# version info
store.get_node("df2")._v_attrs.pandas_version = None
msg = "'NoneType' object has no attribute 'startswith'"
with pytest.raises(Exception, match=msg):
store.select("df2")
@pytest.mark.parametrize(
"where, expected",
[
(
"/",
{
"": ({"first_group", "second_group"}, set()),
"/first_group": (set(), {"df1", "df2"}),
"/second_group": ({"third_group"}, {"df3", "s1"}),
"/second_group/third_group": (set(), {"df4"}),
},
),
(
"/second_group",
{
"/second_group": ({"third_group"}, {"df3", "s1"}),
"/second_group/third_group": (set(), {"df4"}),
},
),
],
)
def test_walk(where, expected):
# GH10143
objs = {
"df1": DataFrame([1, 2, 3]),
"df2": DataFrame([4, 5, 6]),
"df3": DataFrame([6, 7, 8]),
"df4": DataFrame([9, 10, 11]),
"s1": Series([10, 9, 8]),
# Next 3 items aren't pandas objects and should be ignored
"a1": np.array([[1, 2, 3], [4, 5, 6]]),
"tb1": np.array([(1, 2, 3), (4, 5, 6)], dtype="i,i,i"),
"tb2": np.array([(7, 8, 9), (10, 11, 12)], dtype="i,i,i"),
}
with ensure_clean_store("walk_groups.hdf", mode="w") as store:
store.put("/first_group/df1", objs["df1"])
store.put("/first_group/df2", objs["df2"])
store.put("/second_group/df3", objs["df3"])
store.put("/second_group/s1", objs["s1"])
store.put("/second_group/third_group/df4", objs["df4"])
# Create non-pandas objects
store._handle.create_array("/first_group", "a1", objs["a1"])
store._handle.create_table("/first_group", "tb1", obj=objs["tb1"])
store._handle.create_table("/second_group", "tb2", obj=objs["tb2"])
assert len(list(store.walk(where=where))) == len(expected)
for path, groups, leaves in store.walk(where=where):
assert path in expected
expected_groups, expected_frames = expected[path]
assert expected_groups == set(groups)
assert expected_frames == set(leaves)
for leaf in leaves:
frame_path = "/".join([path, leaf])
obj = store.get(frame_path)
if "df" in leaf:
tm.assert_frame_equal(obj, objs[leaf])
else:
tm.assert_series_equal(obj, objs[leaf])
def test_getattr(setup_path):
with ensure_clean_store(setup_path) as store:
s = tm.makeTimeSeries()
store["a"] = s
# test attribute access
result = store.a
tm.assert_series_equal(result, s)
result = getattr(store, "a")
tm.assert_series_equal(result, s)
df = tm.makeTimeDataFrame()
store["df"] = df
result = store.df
tm.assert_frame_equal(result, df)
# errors
for x in ["d", "mode", "path", "handle", "complib"]:
msg = f"'HDFStore' object has no attribute '{x}'"
with pytest.raises(AttributeError, match=msg):
getattr(store, x)
# not stores
for x in ["mode", "path", "handle", "complib"]:
getattr(store, f"_{x}")
def test_store_dropna(setup_path):
df_with_missing = DataFrame(
{"col1": [0.0, np.nan, 2.0], "col2": [1.0, np.nan, np.nan]},
index=list("abc"),
)
df_without_missing = DataFrame(
{"col1": [0.0, 2.0], "col2": [1.0, np.nan]}, index=list("ac")
)
# # Test to make sure defaults are to not drop.
# # Corresponding to Issue 9382
with ensure_clean_path(setup_path) as path:
df_with_missing.to_hdf(path, "df", format="table")
reloaded = read_hdf(path, "df")
tm.assert_frame_equal(df_with_missing, reloaded)
with ensure_clean_path(setup_path) as path:
df_with_missing.to_hdf(path, "df", format="table", dropna=False)
reloaded = read_hdf(path, "df")
tm.assert_frame_equal(df_with_missing, reloaded)
with ensure_clean_path(setup_path) as path:
df_with_missing.to_hdf(path, "df", format="table", dropna=True)
reloaded = read_hdf(path, "df")
tm.assert_frame_equal(df_without_missing, reloaded)
def test_to_hdf_with_min_itemsize(setup_path):
with ensure_clean_path(setup_path) as path:
# min_itemsize in index with to_hdf (GH 10381)
df = tm.makeMixedDataFrame().set_index("C")
df.to_hdf(path, "ss3", format="table", min_itemsize={"index": 6})
# just make sure there is a longer string:
df2 = df.copy().reset_index().assign(C="longer").set_index("C")
df2.to_hdf(path, "ss3", append=True, format="table")
tm.assert_frame_equal(read_hdf(path, "ss3"), concat([df, df2]))
# same as above, with a Series
df["B"].to_hdf(path, "ss4", format="table", min_itemsize={"index": 6})
df2["B"].to_hdf(path, "ss4", append=True, format="table")
tm.assert_series_equal(read_hdf(path, "ss4"), concat([df["B"], df2["B"]]))
@pytest.mark.parametrize("format", ["fixed", "table"])
def test_to_hdf_errors(format, setup_path):
data = ["\ud800foo"]
ser = Series(data, index=Index(data))
with ensure_clean_path(setup_path) as path:
# GH 20835
ser.to_hdf(path, "table", format=format, errors="surrogatepass")
result = read_hdf(path, "table", errors="surrogatepass")
tm.assert_series_equal(result, ser)
def test_create_table_index(setup_path):
with ensure_clean_store(setup_path) as store:
with catch_warnings(record=True):
def col(t, column):
return getattr(store.get_storer(t).table.cols, column)
# data columns
df = tm.makeTimeDataFrame()
df["string"] = "foo"
df["string2"] = "bar"
store.append("f", df, data_columns=["string", "string2"])
assert col("f", "index").is_indexed is True
assert col("f", "string").is_indexed is True
assert col("f", "string2").is_indexed is True
# specify index=columns
store.append("f2", df, index=["string"], data_columns=["string", "string2"])
assert col("f2", "index").is_indexed is False
assert col("f2", "string").is_indexed is True
assert col("f2", "string2").is_indexed is False
# try to index a non-table
_maybe_remove(store, "f2")
store.put("f2", df)
msg = "cannot create table index on a Fixed format store"
with pytest.raises(TypeError, match=msg):
store.create_table_index("f2")
def test_create_table_index_data_columns_argument(setup_path):
# GH 28156
with ensure_clean_store(setup_path) as store:
with catch_warnings(record=True):
def col(t, column):
return getattr(store.get_storer(t).table.cols, column)
# data columns
df = tm.makeTimeDataFrame()
df["string"] = "foo"
df["string2"] = "bar"
store.append("f", df, data_columns=["string"])
assert col("f", "index").is_indexed is True
assert col("f", "string").is_indexed is True
msg = "'Cols' object has no attribute 'string2'"
with pytest.raises(AttributeError, match=msg):
col("f", "string2").is_indexed
# try to index a col which isn't a data_column
msg = (
"column string2 is not a data_column.\n"
"In order to read column string2 you must reload the dataframe \n"
"into HDFStore and include string2 with the data_columns argument."
)
with pytest.raises(AttributeError, match=msg):
store.create_table_index("f", columns=["string2"])
def test_mi_data_columns(setup_path):
# GH 14435
idx = MultiIndex.from_arrays(
[date_range("2000-01-01", periods=5), range(5)], names=["date", "id"]
)
df = DataFrame({"a": [1.1, 1.2, 1.3, 1.4, 1.5]}, index=idx)
with ensure_clean_store(setup_path) as store:
store.append("df", df, data_columns=True)
actual = store.select("df", where="id == 1")
expected = df.iloc[[1], :]
tm.assert_frame_equal(actual, expected)
def test_table_mixed_dtypes(setup_path):
# frame
df = tm.makeDataFrame()
df["obj1"] = "foo"
df["obj2"] = "bar"
df["bool1"] = df["A"] > 0
df["bool2"] = df["B"] > 0
df["bool3"] = True
df["int1"] = 1
df["int2"] = 2
df["timestamp1"] = Timestamp("20010102")
df["timestamp2"] = Timestamp("20010103")
df["datetime1"] = datetime.datetime(2001, 1, 2, 0, 0)
df["datetime2"] = datetime.datetime(2001, 1, 3, 0, 0)
df.loc[df.index[3:6], ["obj1"]] = np.nan
df = df._consolidate()._convert(datetime=True)
with ensure_clean_store(setup_path) as store:
store.append("df1_mixed", df)
tm.assert_frame_equal(store.select("df1_mixed"), df)
def test_calendar_roundtrip_issue(setup_path):
# 8591
# doc example from tseries holiday section
weekmask_egypt = "Sun Mon Tue Wed Thu"
holidays = [
"2012-05-01",
datetime.datetime(2013, 5, 1),
np.datetime64("2014-05-01"),
]
bday_egypt = pd.offsets.CustomBusinessDay(
holidays=holidays, weekmask=weekmask_egypt
)
dt = datetime.datetime(2013, 4, 30)
dts = date_range(dt, periods=5, freq=bday_egypt)
s = Series(dts.weekday, dts).map(Series("Mon Tue Wed Thu Fri Sat Sun".split()))
with ensure_clean_store(setup_path) as store:
store.put("fixed", s)
result = store.select("fixed")
|
tm.assert_series_equal(result, s)
|
pandas._testing.assert_series_equal
|
""" Statistics Utilities
T-tests and Power calculations:
* :py:func:`calc_pairwise_power`: Calculate the pair-wise power level to replicate a p-value over categories
* :py:func:`calc_pairwise_significance`: Calculate pair-wise significance over all categories in a dataframe
* :py:func:`calc_pairwise_anova`: Calculate one-way or two-way ANOVA over categories in a dataframe
* :py:func:`calc_pairwise_effect_size`: Calculate the effect size using Cohen's d in a dataframe
* :py:func:`calc_pairwise_batch_effect`: Calculate a batch effect using 2-way ANOVA in a dataframe
* :py:func:`calc_effect_size`: Calculate the effect size using Cohen's d
Filters and signal processing:
* :py:func:`calc_frequency_domain`: Convert a signal from time to frequency
* :py:func:`score_points`: Calculate the score vectors and IRR based on point correspondences
* :py:func:`bin_by_radius`: Bin warped data by radial group
Grouping and Pairing:
* :py:func:`groups_to_dataframe`: Convert group dictionaries to DataFrames
* :py:func:`group_by_contrast`: Group objects using a categorical attribute
* :py:func:`pair_all_tile_data`: Pair tile objects by tile and timepoint
* :py:func:`pair_train_test_data`: Use the cell index to pair off training and test files
Filesystem search and I/O:
* :py:func:`find_all_train_files`: Find all the training data under a directory, indexed by cell number
* :py:func:`load_points_from_maskfile`: Convert a mask or probability field into a point array
* :py:func:`load_training_data`: Transform training data to experiment/tile/timepoint
* :py:func:`load_train_test_split`: Load the train/test split for the data
Score Objects
* :py:class:`PointScore`: Manage ROC, TP/FP and precision/recall data for points
* :py:class:`ROCScore`: Store ROC/PR curves for a given set of scores
API Documentation
-----------------
"""
# Imports
import re
import json
import pathlib
import itertools
from collections import namedtuple
from typing import Dict, Tuple, Union, List, Optional
# 3rd party
import numpy as np
from scipy.stats import ttest_ind, mannwhitneyu, ks_2samp
from scipy.fftpack import fft
from statsmodels.stats.power import tt_ind_solve_power
from statsmodels.sandbox.stats.multicomp import multipletests
from statsmodels.stats.anova import anova_lm
from statsmodels.formula.api import ols
from skimage.feature import peak_local_max, match_descriptors
from sklearn.metrics import roc_curve, precision_recall_curve, auc
import pandas as pd
# Our own imports
from . import load_image, guess_channel_dir, find_tiledirs, parse_image_name, to_json_types
# Constants
reTRAINFILE = re.compile(r'^(?P<index>[0-9]+)[a-z_-]+\.[a-z]+$', re.IGNORECASE)
reCELLNUM = re.compile(r'^(?P<cell_number>[0-9]+)(cell|cell_resp|dots)\.[a-z]+$', re.IGNORECASE)
MAX_DISTANCE = 1.0
CategoryType = Union[str, List[str]]
# Classes
CellPoints = namedtuple('CellPoints', 'x, y')
CellIndex = namedtuple('CellIndex', 'experiment, tile, timepoint, rot90, flip')
class TileMetaData(object):
""" Meta data for individual tiles
:param Path imagefile:
The image file path
:param Path rootdir:
Experiment root directory
:param str experiment:
The name of the experiment
:param int tile:
The tile number
:param int timepoint:
The timepoint
"""
def __init__(self,
imagefile: pathlib.Path,
rootdir: pathlib.Path,
experiment: str,
tile: int,
timepoint: int):
self.imagefile = imagefile
self.rootdir = rootdir
self.cell_num = int(reCELLNUM.match(self.imagefile.name).group('cell_number'))
self.experiment = experiment
self.tile = tile
self.timepoint = timepoint
self.prev_tile = None
self.next_tile = None
self.ref_tile = None
self.points_x = None
self.points_y = None
self.points_v = None
def load_image(self, cutoff=0.01, keep_scale=True):
if all([p is not None for p in (self.points_x, self.points_y, self.points_v)]):
return
self.points_x, self.points_y, self.points_v = load_points_from_maskfile(
self.imagefile, cutoff=cutoff, keep_scale=keep_scale)
def get_points(self, threshold):
points = np.stack([self.points_x, self.points_y], axis=1)
return points[self.points_v >= threshold, :]
def __repr__(self):
return f'TileMetaData({self.experiment},{self.tile},{self.timepoint})'
class PointScore(object):
""" Score object for point data
:param tuple train_points:
The x, y coordinates of the positive reference points (score == 1.0)
:param tuple test_points:
The x, y (potentially v) coordinates of the results from an individual scorer
If passed, v is the probability assigned to the given points
:param float max_distance:
The maximum inter-point distance considered a match
"""
def __init__(self, train_points, test_points, max_distance=MAX_DISTANCE):
# Unpack the train points
if isinstance(train_points, tuple) and len(train_points) == 2:
train_x, train_y = train_points
train_xy = np.stack([train_x, train_y], axis=1)
else:
train_xy = np.array(train_points)
assert train_xy.ndim == 2
assert train_xy.shape[1] == 2
# Unpack the test points
if isinstance(test_points, tuple) and len(test_points) == 2:
test_x, test_y = test_points
test_xyv = np.stack([test_x, test_y], axis=1)
elif isinstance(test_points, tuple) and len(test_points) == 3:
test_x, test_y, test_v = test_points
test_xyv = np.stack([test_x, test_y, test_v], axis=1)
else:
test_xyv = np.array(test_points)
assert test_xyv.ndim == 2
# Pad the test points if they have scores
if test_xyv.shape[1] == 2:
test_xyv = np.concatenate([test_xyv, np.ones((test_xyv.shape[0], 1))], axis=1)
assert test_xyv.shape[1] == 3
test_xy, test_v = test_xyv[:, :2], test_xyv[:, 2]
self.train_xy = train_xy
self.test_xy = test_xy
self.test_v = test_v
self.max_distance = max_distance
def score_points(self):
""" Calculate point matches """
# Match points by distance
if self.train_xy.shape[0] == 0 or self.test_xy.shape[0] == 0:
matches = np.zeros((0, 2), dtype=np.int)
else:
matches = match_descriptors(self.train_xy, self.test_xy,
metric='euclidean',
max_distance=self.max_distance,
cross_check=True)
num_matches = matches.shape[0]
train_mask = np.zeros((self.train_xy.shape[0], ), dtype=np.bool)
test_mask = np.zeros((self.test_xy.shape[0], ), dtype=np.bool)
train_mask[matches[:, 0]] = True
test_mask[matches[:, 1]] = True
num_missed = np.sum(~train_mask)
num_extra = np.sum(~test_mask)
num_total = num_matches + num_missed + num_extra
print('{:.1%} Matched'.format(num_matches/num_total))
print('{} Matches (True Positives)'.format(num_matches))
print('{} Missed (False Negatives)'.format(num_missed))
print('{} Extra (False Positives)'.format(num_extra))
self.num_matches = num_matches
self.num_missed = num_missed
self.num_extra = num_extra
self.num_total = num_total
self.irr = num_matches / num_total
self.precision = num_matches / (num_matches + num_extra)
self.recall = num_matches / (num_matches + num_missed)
print('IRR: {:.1%}'.format(self.irr))
print('Precision: {:.1%}'.format(self.precision))
print('Recall: {:.1%}'.format(self.recall))
# Create the masks for ROC curves
y_real = np.zeros((num_total, ), dtype=np.bool)
y_score = np.zeros((num_total, ), dtype=np.float)
y_real[:num_matches] = True
y_score[:num_matches] = self.test_v[test_mask]
y_real[num_matches:train_mask.shape[0]] = True
y_score[num_matches:train_mask.shape[0]] = 0.0
y_real[train_mask.shape[0]:num_total] = False
y_score[train_mask.shape[0]:num_total] = self.test_v[~test_mask]
self.y_real = y_real
self.y_score = y_score
class ROCData(object):
""" ROC and PR curves for data
:param ndarray y_train:
An Nx1 array of the True labels for the dataset
:param ndarray y_test:
An Nx1 array of the output of the net for the dataset
"""
# Nice human readable aliases for some training runs
FINAL_LABELS = {
'countception-r3-50k': 'Count-ception',
'fcrn_a_wide-r3-75k': 'FCRN-A',
'fcrn_b_wide-r3-75k': 'FCRN-B',
'residual_unet-r4-25k': 'Residual U-net',
'unet-r1-50k': 'U-net',
}
def __init__(self, y_train, y_test):
self.y_train = y_train
self.y_test = y_test
self.metadata = {}
self.roc_data = {}
self.precision_recall_data = {}
@property
def roc_auc(self):
return self.roc_data['roc_auc']
@property
def pr_auc(self):
return self.precision_recall_data['pr_auc']
@property
def label(self):
return self.metadata['label']
@property
def detector(self):
return self.metadata['detector']
@property
def data_split(self):
return self.metadata['data_split']
@classmethod
def from_json(cls, datafile):
""" Reload the class from JSON
:param Path datafile:
The data file to load from
"""
with datafile.open('rt') as fp:
stat_data = json.load(fp)
# Load the raw arrays
obj = cls(np.array(stat_data.pop('y_train')),
np.array(stat_data.pop('y_test')))
# Reload the ROC data
roc_data = stat_data.pop('roc_data')
for key in ('true_positive_rate', 'false_positive_rate', 'thresholds'):
if key in roc_data:
roc_data[key] = np.array(roc_data[key])
obj.roc_data = roc_data
# Reload the P/R data
precision_recall_data = stat_data.pop('precision_recall_data')
for key in ('precision_rate', 'recall_rate', 'thresholds'):
if key in precision_recall_data:
precision_recall_data[key] = np.array(precision_recall_data[key])
obj.precision_recall_data = precision_recall_data
# Anything else must have been metadata
obj.add_metadata(**stat_data.pop('metadata'))
assert stat_data == {}
return obj
def to_json(self, datafile: pathlib.Path):
""" Save the current state of the class to JSON
:param Path datafile:
The JSON data file to save to
"""
stat_data = {
'y_train': to_json_types(self.y_train),
'y_test': to_json_types(self.y_test),
'metadata': to_json_types(self.metadata),
'roc_data': to_json_types(self.roc_data),
'precision_recall_data': to_json_types(self.precision_recall_data),
}
with datafile.open('wt') as fp:
json.dump(stat_data, fp)
def to_plotdata(self):
""" Only save the values that we need to re-plot everything
:returns:
A JSON serializable dictionary of the plot data
"""
return {
'metadata': to_json_types(self.metadata),
'roc_data': to_json_types(self.roc_data),
'precision_recall_data': to_json_types(self.precision_recall_data),
}
@classmethod
def from_plotdata(cls, plotdata):
""" Load only the values we need to plot things
:param dict plotdata:
The JSON data to load from
"""
# No raw array data, just load empty values
obj = cls(None, None)
# Reload the ROC data
roc_data = plotdata.pop('roc_data')
for key in ('true_positive_rate', 'false_positive_rate', 'thresholds'):
if key in roc_data:
roc_data[key] = np.array(roc_data[key])
obj.roc_data = roc_data
# Reload the P/R data
precision_recall_data = plotdata.pop('precision_recall_data')
for key in ('precision_rate', 'recall_rate', 'thresholds'):
if key in precision_recall_data:
precision_recall_data[key] = np.array(precision_recall_data[key])
obj.precision_recall_data = precision_recall_data
# Anything else must have been metadata
obj.add_metadata(**plotdata.pop('metadata'))
assert plotdata == {}
return obj
def clear_raw_data(self):
""" Clear raw data """
self.y_train = None
self.y_test = None
def add_metadata(self, **kwargs):
""" Add some metadata to this score object
If present, the key 'label' is used when logging scores for this object
"""
self.metadata.update(kwargs)
def calc_roc_curve(self):
""" Calculate the ROC score """
# Calculate the FP and TP vectors and the thresholds
false_positive_rate, true_positive_rate, thresholds = roc_curve(self.y_train, self.y_test)
roc_auc = auc(false_positive_rate, true_positive_rate)
# Find the knee of the curve
knee_dist = (true_positive_rate - 1.0)**2 + false_positive_rate**2
knee_index = np.argmin(knee_dist)
# Print some debugging so I don't think the process died
label = str(self.metadata.get('label', ''))
prefix = '{}: '.format(label) if label else ''
print('{}ROC AUC {:0.3f}'.format(prefix, roc_auc))
self.roc_data = {
'false_positive_rate': false_positive_rate,
'true_positive_rate': true_positive_rate,
'roc_auc': roc_auc,
'thresholds': thresholds,
'knee_index': knee_index,
'knee_dist': knee_dist,
'knee_fpr': false_positive_rate[knee_index],
'knee_tpr': true_positive_rate[knee_index],
'knee_threshold': thresholds[knee_index],
}
def calc_precision_recall_curve(self):
""" Calculate the P/R curve for this data """
# Calculate the Precision and Recall vectors and the thresholds
precision_rate, recall_rate, thresholds = precision_recall_curve(self.y_train, self.y_test)
pr_auc = auc(recall_rate, precision_rate)
# Find the knee of the curve
knee_dist = precision_rate**2 + recall_rate**2
knee_index = np.argmax(knee_dist)
# Print some debugging so I don't think the process died
label = str(self.metadata.get('label', ''))
prefix = '{}: '.format(label) if label else ''
print('{}P/R AUC {:0.3f}'.format(prefix, pr_auc))
self.precision_recall_data = {
'precision_rate': precision_rate,
'recall_rate': recall_rate,
'pr_auc': pr_auc,
'thresholds': thresholds,
'knee_index': knee_index,
'knee_dist': knee_dist,
'knee_precision': precision_rate[knee_index],
'knee_recall': recall_rate[knee_index],
'knee_threshold': thresholds[knee_index],
}
def plot_roc(self, ax, linewidth=3, color='k', annotate_knee=False):
""" Plot ROC curve for this line
:param Axes ax:
The matplotlib axis object
:param float linewidth:
Width of the line to plot
:param str color:
Color of the line to plot
:param bool annotate_knee:
If True, annotate the knee of the curve
"""
false_positive_rate = self.roc_data['false_positive_rate']
true_positive_rate = self.roc_data['true_positive_rate']
roc_auc = self.roc_data['roc_auc']
knee_fpr = self.roc_data['knee_fpr']
knee_tpr = self.roc_data['knee_tpr']
knee_threshold = self.roc_data['knee_threshold']
label = self.metadata.get('label')
label = self.FINAL_LABELS.get(label, label)
prefix = '{} '.format(label) if label else ''
final_label = '{}(Area: {:0.3f})'.format(prefix, roc_auc)
# Plot the actual ROC curve
ax.plot(false_positive_rate, true_positive_rate,
linewidth=linewidth, color=color,
label=final_label)
# Plot the knee point
ax.plot([knee_fpr], [knee_tpr], 'o', color=color)
if annotate_knee:
ax.text(knee_fpr, knee_tpr-0.02, '{:0.3f}'.format(knee_threshold),
color=color, fontsize=24)
def plot_precision_recall(self, ax, linewidth=3, color='k', annotate_knee=False):
""" Plot P/R curve for this line
:param Axes ax:
The matplotlib axis object
:param float linewidth:
Width of the line to plot
:param str color:
Color of the line to plot
:param bool annotate_knee:
If True, annotate the knee of the curve
"""
precision_rate = self.precision_recall_data['precision_rate']
recall_rate = self.precision_recall_data['recall_rate']
pr_auc = self.precision_recall_data['pr_auc']
knee_precision = self.precision_recall_data['knee_precision']
knee_recall = self.precision_recall_data['knee_recall']
knee_threshold = self.precision_recall_data['knee_threshold']
label = self.metadata.get('label')
label = self.FINAL_LABELS.get(label, label)
prefix = '{} '.format(label) if label else ''
final_label = '{}(area: {:0.3f})'.format(prefix, pr_auc)
# Plot the actual ROC curve
ax.plot(recall_rate, precision_rate,
linewidth=linewidth, color=color,
label=final_label)
# Plot the knee point
ax.plot([knee_recall], [knee_precision], 'o', color=color)
if annotate_knee:
ax.text(knee_recall, knee_precision-0.02, '{:0.3f}'.format(knee_threshold),
color=color, fontsize=24)
# Functions
def groups_to_dataframe(groups, attr=None, column='Value', labels=None):
""" Convert a group dictionary to a two-column dataframe
:param dict[str, data] groups:
The data map for the dataframe
:param str attr:
The attribute to extract (or None to use the groups directly)
:param str column:
The name for the column to create
:param dict labels:
The mapping between group_key and final condition name
:returns:
A DataFrame that can be fed into sns.violinplot
"""
if labels is None:
labels = {}
group_name = []
group_value = []
for group_key, group in groups.items():
group_label = str(labels.get(group_key, group_key))
if attr is None:
group_data = group
else:
group_data = []
for g in group:
val = getattr(g, attr, None)
if val is None:
continue
for v in val:
if isinstance(v, list):
group_data.extend(v)
else:
group_data.append(v)
group_data = np.array(group_data)
group_name.extend(group_label for _ in range(group_data.shape[0]))
group_value.append(group_data)
return pd.DataFrame({'Group': group_name, column: np.concatenate(group_value)})
def bin_by_radius(radius: np.ndarray,
value: np.ndarray,
num_bins: int = 4,
label: str = 'Value',
bin_type: str = 'uniform',
r_min: float = 0.0,
r_max: float = 1.0,
r_overflow: float = 1.1,
category_type: str = 'index') -> pd.DataFrame:
""" Bin by radius into a dataframe
:param ndarray radius:
The radius vector to bin
:param ndarray value:
The value vector to make bins for
:param int num_bins:
The number of bins to generate
:param str label:
The label for the value vector
:param str bin_type:
One of 'uniform', 'area' - how to generate bin edges
:param float r_min:
The minimum radius to bin
:param float r_max:
The maximum radius to bin
:param float r_overflow:
The overflow radius for the final bin
:param str category_type:
Return value for the radius, one of "index" or "mean"
:returns:
A DataFrame with binned radii and all values detected at those radii
"""
if not label:
label = 'Value'
df = {
'Radius': [],
label: [],
}
if bin_type in ('radius', 'uniform'):
# Equally spaced edges
edges = np.linspace(r_min, r_max, num_bins+1)
elif bin_type == 'area':
# Edges spaced to give annuli with equal area
area = np.pi * (r_max**2 - r_min**2) / num_bins
edges = [r_min]
for i in range(num_bins):
edges.append(np.sqrt(area / np.pi + edges[-1]**2))
edges = np.array(edges)
else:
raise KeyError(f'Unknown bin type: "{bin_type}"')
assert edges.shape[0] == num_bins + 1
print(f'Generating bins at: {edges}')
real_mask = ~np.isnan(radius)
radius = radius[real_mask]
value = value[real_mask]
if radius.shape[0] < 1 or value.shape[0] < 1:
raise ValueError('No valid measurements for radius')
for i, (e0, e1) in enumerate(zip(edges[:-1], edges[1:])):
if category_type == 'index':
cat = f'{e0:0.1f}-{e1:0.1f}'
elif category_type == 'mean':
cat = np.mean([e0, e1])
else:
raise ValueError(f'Unknown category type: {category_type}')
if i == num_bins - 1:
e1 = r_overflow
mask = np.logical_and(radius >= e0, radius < e1)
df['Radius'].extend([cat] * int(np.sum(mask)))
df[label].extend(value[mask])
return
|
pd.DataFrame(df)
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
# pylint: disable=W0612,E1101
from datetime import datetime
import operator
import nose
from functools import wraps
import numpy as np
import pandas as pd
from pandas import Series, DataFrame, Index, isnull, notnull, pivot, MultiIndex
from pandas.core.datetools import bday
from pandas.core.nanops import nanall, nanany
from pandas.core.panel import Panel
from pandas.core.series import remove_na
import pandas.core.common as com
from pandas import compat
from pandas.compat import range, lrange, StringIO, OrderedDict, signature
from pandas import SparsePanel
from pandas.util.testing import (assert_panel_equal, assert_frame_equal,
assert_series_equal, assert_almost_equal,
assert_produces_warning, ensure_clean,
assertRaisesRegexp, makeCustomDataframe as
mkdf, makeMixedDataFrame)
import pandas.core.panel as panelm
import pandas.util.testing as tm
def ignore_sparse_panel_future_warning(func):
"""
decorator to ignore FutureWarning if we have a SparsePanel
can be removed when SparsePanel is fully removed
"""
@wraps(func)
def wrapper(self, *args, **kwargs):
if isinstance(self.panel, SparsePanel):
with assert_produces_warning(FutureWarning,
check_stacklevel=False):
return func(self, *args, **kwargs)
else:
return func(self, *args, **kwargs)
return wrapper
class PanelTests(object):
panel = None
def test_pickle(self):
unpickled = self.round_trip_pickle(self.panel)
assert_frame_equal(unpickled['ItemA'], self.panel['ItemA'])
def test_rank(self):
self.assertRaises(NotImplementedError, lambda: self.panel.rank())
def test_cumsum(self):
cumsum = self.panel.cumsum()
assert_frame_equal(cumsum['ItemA'], self.panel['ItemA'].cumsum())
def not_hashable(self):
c_empty = Panel()
c = Panel(Panel([[[1]]]))
self.assertRaises(TypeError, hash, c_empty)
self.assertRaises(TypeError, hash, c)
class SafeForLongAndSparse(object):
_multiprocess_can_split_ = True
def test_repr(self):
repr(self.panel)
@ignore_sparse_panel_future_warning
def test_copy_names(self):
for attr in ('major_axis', 'minor_axis'):
getattr(self.panel, attr).name = None
cp = self.panel.copy()
getattr(cp, attr).name = 'foo'
self.assertIsNone(getattr(self.panel, attr).name)
def test_iter(self):
tm.equalContents(list(self.panel), self.panel.items)
def test_count(self):
f = lambda s: notnull(s).sum()
self._check_stat_op('count', f, obj=self.panel, has_skipna=False)
def test_sum(self):
self._check_stat_op('sum', np.sum)
def test_mean(self):
self._check_stat_op('mean', np.mean)
def test_prod(self):
self._check_stat_op('prod', np.prod)
def test_median(self):
def wrapper(x):
if isnull(x).any():
return np.nan
return np.median(x)
self._check_stat_op('median', wrapper)
def test_min(self):
self._check_stat_op('min', np.min)
def test_max(self):
self._check_stat_op('max', np.max)
def test_skew(self):
try:
from scipy.stats import skew
except ImportError:
raise nose.SkipTest("no scipy.stats.skew")
def this_skew(x):
if len(x) < 3:
return np.nan
return skew(x, bias=False)
self._check_stat_op('skew', this_skew)
# def test_mad(self):
# f = lambda x: np.abs(x - x.mean()).mean()
# self._check_stat_op('mad', f)
def test_var(self):
def alt(x):
if len(x) < 2:
return np.nan
return np.var(x, ddof=1)
self._check_stat_op('var', alt)
def test_std(self):
def alt(x):
if len(x) < 2:
return np.nan
return np.std(x, ddof=1)
self._check_stat_op('std', alt)
def test_sem(self):
def alt(x):
if len(x) < 2:
return np.nan
return np.std(x, ddof=1) / np.sqrt(len(x))
self._check_stat_op('sem', alt)
# def test_skew(self):
# from scipy.stats import skew
# def alt(x):
# if len(x) < 3:
# return np.nan
# return skew(x, bias=False)
# self._check_stat_op('skew', alt)
def _check_stat_op(self, name, alternative, obj=None, has_skipna=True):
if obj is None:
obj = self.panel
# # set some NAs
# obj.ix[5:10] = np.nan
# obj.ix[15:20, -2:] = np.nan
f = getattr(obj, name)
if has_skipna:
def skipna_wrapper(x):
nona = remove_na(x)
if len(nona) == 0:
return np.nan
return alternative(nona)
def wrapper(x):
return alternative(np.asarray(x))
for i in range(obj.ndim):
result = f(axis=i, skipna=False)
assert_frame_equal(result, obj.apply(wrapper, axis=i))
else:
skipna_wrapper = alternative
wrapper = alternative
for i in range(obj.ndim):
result = f(axis=i)
if not tm._incompat_bottleneck_version(name):
assert_frame_equal(result, obj.apply(skipna_wrapper, axis=i))
self.assertRaises(Exception, f, axis=obj.ndim)
# Unimplemented numeric_only parameter.
if 'numeric_only' in signature(f).args:
self.assertRaisesRegexp(NotImplementedError, name, f,
numeric_only=True)
class SafeForSparse(object):
_multiprocess_can_split_ = True
@classmethod
def assert_panel_equal(cls, x, y):
assert_panel_equal(x, y)
def test_get_axis(self):
assert (self.panel._get_axis(0) is self.panel.items)
assert (self.panel._get_axis(1) is self.panel.major_axis)
assert (self.panel._get_axis(2) is self.panel.minor_axis)
def test_set_axis(self):
new_items = Index(np.arange(len(self.panel.items)))
new_major = Index(np.arange(len(self.panel.major_axis)))
new_minor = Index(np.arange(len(self.panel.minor_axis)))
# ensure propagate to potentially prior-cached items too
item = self.panel['ItemA']
self.panel.items = new_items
if hasattr(self.panel, '_item_cache'):
self.assertNotIn('ItemA', self.panel._item_cache)
self.assertIs(self.panel.items, new_items)
# TODO: unused?
item = self.panel[0] # noqa
self.panel.major_axis = new_major
self.assertIs(self.panel[0].index, new_major)
self.assertIs(self.panel.major_axis, new_major)
# TODO: unused?
item = self.panel[0] # noqa
self.panel.minor_axis = new_minor
self.assertIs(self.panel[0].columns, new_minor)
self.assertIs(self.panel.minor_axis, new_minor)
def test_get_axis_number(self):
self.assertEqual(self.panel._get_axis_number('items'), 0)
self.assertEqual(self.panel._get_axis_number('major'), 1)
self.assertEqual(self.panel._get_axis_number('minor'), 2)
def test_get_axis_name(self):
self.assertEqual(self.panel._get_axis_name(0), 'items')
self.assertEqual(self.panel._get_axis_name(1), 'major_axis')
self.assertEqual(self.panel._get_axis_name(2), 'minor_axis')
def test_get_plane_axes(self):
# what to do here?
index, columns = self.panel._get_plane_axes('items')
index, columns = self.panel._get_plane_axes('major_axis')
index, columns = self.panel._get_plane_axes('minor_axis')
index, columns = self.panel._get_plane_axes(0)
@ignore_sparse_panel_future_warning
def test_truncate(self):
dates = self.panel.major_axis
start, end = dates[1], dates[5]
trunced = self.panel.truncate(start, end, axis='major')
expected = self.panel['ItemA'].truncate(start, end)
assert_frame_equal(trunced['ItemA'], expected)
trunced = self.panel.truncate(before=start, axis='major')
expected = self.panel['ItemA'].truncate(before=start)
assert_frame_equal(trunced['ItemA'], expected)
trunced = self.panel.truncate(after=end, axis='major')
expected = self.panel['ItemA'].truncate(after=end)
assert_frame_equal(trunced['ItemA'], expected)
# XXX test other axes
def test_arith(self):
self._test_op(self.panel, operator.add)
self._test_op(self.panel, operator.sub)
self._test_op(self.panel, operator.mul)
self._test_op(self.panel, operator.truediv)
self._test_op(self.panel, operator.floordiv)
self._test_op(self.panel, operator.pow)
self._test_op(self.panel, lambda x, y: y + x)
self._test_op(self.panel, lambda x, y: y - x)
self._test_op(self.panel, lambda x, y: y * x)
self._test_op(self.panel, lambda x, y: y / x)
self._test_op(self.panel, lambda x, y: y ** x)
self._test_op(self.panel, lambda x, y: x + y) # panel + 1
self._test_op(self.panel, lambda x, y: x - y) # panel - 1
self._test_op(self.panel, lambda x, y: x * y) # panel * 1
self._test_op(self.panel, lambda x, y: x / y) # panel / 1
self._test_op(self.panel, lambda x, y: x ** y) # panel ** 1
self.assertRaises(Exception, self.panel.__add__, self.panel['ItemA'])
@staticmethod
def _test_op(panel, op):
result = op(panel, 1)
assert_frame_equal(result['ItemA'], op(panel['ItemA'], 1))
def test_keys(self):
tm.equalContents(list(self.panel.keys()), self.panel.items)
def test_iteritems(self):
# Test panel.iteritems(), aka panel.iteritems()
# just test that it works
for k, v in self.panel.iteritems():
pass
self.assertEqual(len(list(self.panel.iteritems())),
len(self.panel.items))
@ignore_sparse_panel_future_warning
def test_combineFrame(self):
def check_op(op, name):
# items
df = self.panel['ItemA']
func = getattr(self.panel, name)
result = func(df, axis='items')
assert_frame_equal(result['ItemB'], op(self.panel['ItemB'], df))
# major
xs = self.panel.major_xs(self.panel.major_axis[0])
result = func(xs, axis='major')
idx = self.panel.major_axis[1]
assert_frame_equal(result.major_xs(idx),
op(self.panel.major_xs(idx), xs))
# minor
xs = self.panel.minor_xs(self.panel.minor_axis[0])
result = func(xs, axis='minor')
idx = self.panel.minor_axis[1]
assert_frame_equal(result.minor_xs(idx),
op(self.panel.minor_xs(idx), xs))
ops = ['add', 'sub', 'mul', 'truediv', 'floordiv']
if not compat.PY3:
ops.append('div')
# pow, mod not supported for SparsePanel as flex ops (for now)
if not isinstance(self.panel, SparsePanel):
ops.extend(['pow', 'mod'])
else:
idx = self.panel.minor_axis[1]
with assertRaisesRegexp(ValueError, "Simple arithmetic.*scalar"):
self.panel.pow(self.panel.minor_xs(idx), axis='minor')
with assertRaisesRegexp(ValueError, "Simple arithmetic.*scalar"):
self.panel.mod(self.panel.minor_xs(idx), axis='minor')
for op in ops:
try:
check_op(getattr(operator, op), op)
except:
com.pprint_thing("Failing operation: %r" % op)
raise
if compat.PY3:
try:
check_op(operator.truediv, 'div')
except:
com.pprint_thing("Failing operation: %r" % 'div')
raise
@ignore_sparse_panel_future_warning
def test_combinePanel(self):
result = self.panel.add(self.panel)
self.assert_panel_equal(result, self.panel * 2)
@ignore_sparse_panel_future_warning
def test_neg(self):
self.assert_panel_equal(-self.panel, self.panel * -1)
# issue 7692
def test_raise_when_not_implemented(self):
p = Panel(np.arange(3 * 4 * 5).reshape(3, 4, 5),
items=['ItemA', 'ItemB', 'ItemC'],
major_axis=pd.date_range('20130101', periods=4),
minor_axis=list('ABCDE'))
d = p.sum(axis=1).ix[0]
ops = ['add', 'sub', 'mul', 'truediv', 'floordiv', 'div', 'mod', 'pow']
for op in ops:
with self.assertRaises(NotImplementedError):
getattr(p, op)(d, axis=0)
@ignore_sparse_panel_future_warning
def test_select(self):
p = self.panel
# select items
result = p.select(lambda x: x in ('ItemA', 'ItemC'), axis='items')
expected = p.reindex(items=['ItemA', 'ItemC'])
self.assert_panel_equal(result, expected)
# select major_axis
result = p.select(lambda x: x >= datetime(2000, 1, 15), axis='major')
new_major = p.major_axis[p.major_axis >= datetime(2000, 1, 15)]
expected = p.reindex(major=new_major)
self.assert_panel_equal(result, expected)
# select minor_axis
result = p.select(lambda x: x in ('D', 'A'), axis=2)
expected = p.reindex(minor=['A', 'D'])
self.assert_panel_equal(result, expected)
# corner case, empty thing
result = p.select(lambda x: x in ('foo', ), axis='items')
self.assert_panel_equal(result, p.reindex(items=[]))
def test_get_value(self):
for item in self.panel.items:
for mjr in self.panel.major_axis[::2]:
for mnr in self.panel.minor_axis:
result = self.panel.get_value(item, mjr, mnr)
expected = self.panel[item][mnr][mjr]
assert_almost_equal(result, expected)
@ignore_sparse_panel_future_warning
def test_abs(self):
result = self.panel.abs()
result2 = abs(self.panel)
expected = np.abs(self.panel)
self.assert_panel_equal(result, expected)
self.assert_panel_equal(result2, expected)
df = self.panel['ItemA']
result = df.abs()
result2 = abs(df)
expected = np.abs(df)
assert_frame_equal(result, expected)
assert_frame_equal(result2, expected)
s = df['A']
result = s.abs()
result2 = abs(s)
expected = np.abs(s)
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
self.assertEqual(result.name, 'A')
self.assertEqual(result2.name, 'A')
class CheckIndexing(object):
_multiprocess_can_split_ = True
def test_getitem(self):
self.assertRaises(Exception, self.panel.__getitem__, 'ItemQ')
def test_delitem_and_pop(self):
expected = self.panel['ItemA']
result = self.panel.pop('ItemA')
assert_frame_equal(expected, result)
self.assertNotIn('ItemA', self.panel.items)
del self.panel['ItemB']
self.assertNotIn('ItemB', self.panel.items)
self.assertRaises(Exception, self.panel.__delitem__, 'ItemB')
values = np.empty((3, 3, 3))
values[0] = 0
values[1] = 1
values[2] = 2
panel = Panel(values, lrange(3), lrange(3), lrange(3))
# did we delete the right row?
panelc = panel.copy()
del panelc[0]
assert_frame_equal(panelc[1], panel[1])
assert_frame_equal(panelc[2], panel[2])
panelc = panel.copy()
del panelc[1]
assert_frame_equal(panelc[0], panel[0])
assert_frame_equal(panelc[2], panel[2])
panelc = panel.copy()
del panelc[2]
assert_frame_equal(panelc[1], panel[1])
assert_frame_equal(panelc[0], panel[0])
def test_setitem(self):
# LongPanel with one item
lp = self.panel.filter(['ItemA', 'ItemB']).to_frame()
with tm.assertRaises(ValueError):
self.panel['ItemE'] = lp
# DataFrame
df = self.panel['ItemA'][2:].filter(items=['A', 'B'])
self.panel['ItemF'] = df
self.panel['ItemE'] = df
df2 = self.panel['ItemF']
assert_frame_equal(df, df2.reindex(index=df.index, columns=df.columns))
# scalar
self.panel['ItemG'] = 1
self.panel['ItemE'] = True
self.assertEqual(self.panel['ItemG'].values.dtype, np.int64)
self.assertEqual(self.panel['ItemE'].values.dtype, np.bool_)
# object dtype
self.panel['ItemQ'] = 'foo'
self.assertEqual(self.panel['ItemQ'].values.dtype, np.object_)
# boolean dtype
self.panel['ItemP'] = self.panel['ItemA'] > 0
self.assertEqual(self.panel['ItemP'].values.dtype, np.bool_)
self.assertRaises(TypeError, self.panel.__setitem__, 'foo',
self.panel.ix[['ItemP']])
# bad shape
p = Panel(np.random.randn(4, 3, 2))
with tm.assertRaisesRegexp(ValueError,
"shape of value must be \(3, 2\), "
"shape of given object was \(4, 2\)"):
p[0] = np.random.randn(4, 2)
def test_setitem_ndarray(self):
from pandas import date_range, datetools
timeidx = date_range(start=datetime(2009, 1, 1),
end=datetime(2009, 12, 31),
freq=datetools.MonthEnd())
lons_coarse = np.linspace(-177.5, 177.5, 72)
lats_coarse = np.linspace(-87.5, 87.5, 36)
P = Panel(items=timeidx, major_axis=lons_coarse,
minor_axis=lats_coarse)
data = np.random.randn(72 * 36).reshape((72, 36))
key = datetime(2009, 2, 28)
P[key] = data
assert_almost_equal(P[key].values, data)
def test_set_minor_major(self):
# GH 11014
df1 = DataFrame(['a', 'a', 'a', np.nan, 'a', np.nan])
df2 = DataFrame([1.0, np.nan, 1.0, np.nan, 1.0, 1.0])
panel = Panel({'Item1': df1, 'Item2': df2})
newminor = notnull(panel.iloc[:, :, 0])
panel.loc[:, :, 'NewMinor'] = newminor
assert_frame_equal(panel.loc[:, :, 'NewMinor'],
newminor.astype(object))
newmajor = notnull(panel.iloc[:, 0, :])
panel.loc[:, 'NewMajor', :] = newmajor
assert_frame_equal(panel.loc[:, 'NewMajor', :],
newmajor.astype(object))
def test_major_xs(self):
ref = self.panel['ItemA']
idx = self.panel.major_axis[5]
xs = self.panel.major_xs(idx)
result = xs['ItemA']
assert_series_equal(result, ref.xs(idx), check_names=False)
self.assertEqual(result.name, 'ItemA')
# not contained
idx = self.panel.major_axis[0] - bday
self.assertRaises(Exception, self.panel.major_xs, idx)
def test_major_xs_mixed(self):
self.panel['ItemD'] = 'foo'
xs = self.panel.major_xs(self.panel.major_axis[0])
self.assertEqual(xs['ItemA'].dtype, np.float64)
self.assertEqual(xs['ItemD'].dtype, np.object_)
def test_minor_xs(self):
ref = self.panel['ItemA']
idx = self.panel.minor_axis[1]
xs = self.panel.minor_xs(idx)
assert_series_equal(xs['ItemA'], ref[idx], check_names=False)
# not contained
self.assertRaises(Exception, self.panel.minor_xs, 'E')
def test_minor_xs_mixed(self):
self.panel['ItemD'] = 'foo'
xs = self.panel.minor_xs('D')
self.assertEqual(xs['ItemA'].dtype, np.float64)
self.assertEqual(xs['ItemD'].dtype, np.object_)
def test_xs(self):
itemA = self.panel.xs('ItemA', axis=0)
expected = self.panel['ItemA']
assert_frame_equal(itemA, expected)
# get a view by default
itemA_view = self.panel.xs('ItemA', axis=0)
itemA_view.values[:] = np.nan
self.assertTrue(np.isnan(self.panel['ItemA'].values).all())
# mixed-type yields a copy
self.panel['strings'] = 'foo'
result = self.panel.xs('D', axis=2)
self.assertIsNotNone(result.is_copy)
def test_getitem_fancy_labels(self):
p = self.panel
items = p.items[[1, 0]]
dates = p.major_axis[::2]
cols = ['D', 'C', 'F']
# all 3 specified
assert_panel_equal(p.ix[items, dates, cols],
p.reindex(items=items, major=dates, minor=cols))
# 2 specified
assert_panel_equal(p.ix[:, dates, cols],
p.reindex(major=dates, minor=cols))
assert_panel_equal(p.ix[items, :, cols],
p.reindex(items=items, minor=cols))
assert_panel_equal(p.ix[items, dates, :],
p.reindex(items=items, major=dates))
# only 1
assert_panel_equal(p.ix[items, :, :], p.reindex(items=items))
assert_panel_equal(p.ix[:, dates, :], p.reindex(major=dates))
assert_panel_equal(p.ix[:, :, cols], p.reindex(minor=cols))
def test_getitem_fancy_slice(self):
pass
def test_getitem_fancy_ints(self):
p = self.panel
# #1603
result = p.ix[:, -1, :]
expected = p.ix[:, p.major_axis[-1], :]
assert_frame_equal(result, expected)
def test_getitem_fancy_xs(self):
p = self.panel
item = 'ItemB'
date = p.major_axis[5]
col = 'C'
# get DataFrame
# item
assert_frame_equal(p.ix[item], p[item])
assert_frame_equal(p.ix[item, :], p[item])
assert_frame_equal(p.ix[item, :, :], p[item])
# major axis, axis=1
assert_frame_equal(p.ix[:, date], p.major_xs(date))
assert_frame_equal(p.ix[:, date, :], p.major_xs(date))
# minor axis, axis=2
assert_frame_equal(p.ix[:, :, 'C'], p.minor_xs('C'))
# get Series
assert_series_equal(p.ix[item, date], p[item].ix[date])
assert_series_equal(p.ix[item, date, :], p[item].ix[date])
assert_series_equal(p.ix[item, :, col], p[item][col])
assert_series_equal(p.ix[:, date, col], p.major_xs(date).ix[col])
def test_getitem_fancy_xs_check_view(self):
item = 'ItemB'
date = self.panel.major_axis[5]
# make sure it's always a view
NS = slice(None, None)
# DataFrames
comp = assert_frame_equal
self._check_view(item, comp)
self._check_view((item, NS), comp)
self._check_view((item, NS, NS), comp)
self._check_view((NS, date), comp)
self._check_view((NS, date, NS), comp)
self._check_view((NS, NS, 'C'), comp)
# Series
comp = assert_series_equal
self._check_view((item, date), comp)
self._check_view((item, date, NS), comp)
self._check_view((item, NS, 'C'), comp)
self._check_view((NS, date, 'C'), comp)
def test_ix_setitem_slice_dataframe(self):
a = Panel(items=[1, 2, 3], major_axis=[11, 22, 33],
minor_axis=[111, 222, 333])
b = DataFrame(np.random.randn(2, 3), index=[111, 333],
columns=[1, 2, 3])
a.ix[:, 22, [111, 333]] = b
assert_frame_equal(a.ix[:, 22, [111, 333]], b)
def test_ix_align(self):
from pandas import Series
b = Series(np.random.randn(10), name=0)
b.sort()
df_orig = Panel(np.random.randn(3, 10, 2))
df = df_orig.copy()
df.ix[0, :, 0] = b
assert_series_equal(df.ix[0, :, 0].reindex(b.index), b)
df = df_orig.swapaxes(0, 1)
df.ix[:, 0, 0] = b
assert_series_equal(df.ix[:, 0, 0].reindex(b.index), b)
df = df_orig.swapaxes(1, 2)
df.ix[0, 0, :] = b
assert_series_equal(df.ix[0, 0, :].reindex(b.index), b)
def test_ix_frame_align(self):
p_orig = tm.makePanel()
df = p_orig.ix[0].copy()
assert_frame_equal(p_orig['ItemA'], df)
p = p_orig.copy()
p.ix[0, :, :] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p.ix[0] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p.iloc[0, :, :] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p.iloc[0] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p.loc['ItemA'] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p.loc['ItemA', :, :] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p['ItemA'] = df
assert_panel_equal(p, p_orig)
p = p_orig.copy()
p.ix[0, [0, 1, 3, 5], -2:] = df
out = p.ix[0, [0, 1, 3, 5], -2:]
assert_frame_equal(out, df.iloc[[0, 1, 3, 5], [2, 3]])
# GH3830, panel assignent by values/frame
for dtype in ['float64', 'int64']:
panel = Panel(np.arange(40).reshape((2, 4, 5)),
items=['a1', 'a2'], dtype=dtype)
df1 = panel.iloc[0]
df2 = panel.iloc[1]
tm.assert_frame_equal(panel.loc['a1'], df1)
tm.assert_frame_equal(panel.loc['a2'], df2)
# Assignment by Value Passes for 'a2'
panel.loc['a2'] = df1.values
tm.assert_frame_equal(panel.loc['a1'], df1)
tm.assert_frame_equal(panel.loc['a2'], df1)
# Assignment by DataFrame Ok w/o loc 'a2'
panel['a2'] = df2
tm.assert_frame_equal(panel.loc['a1'], df1)
tm.assert_frame_equal(panel.loc['a2'], df2)
# Assignment by DataFrame Fails for 'a2'
panel.loc['a2'] = df2
tm.assert_frame_equal(panel.loc['a1'], df1)
tm.assert_frame_equal(panel.loc['a2'], df2)
def _check_view(self, indexer, comp):
cp = self.panel.copy()
obj = cp.ix[indexer]
obj.values[:] = 0
self.assertTrue((obj.values == 0).all())
comp(cp.ix[indexer].reindex_like(obj), obj)
def test_logical_with_nas(self):
d = Panel({'ItemA': {'a': [np.nan, False]},
'ItemB': {'a': [True, True]}})
result = d['ItemA'] | d['ItemB']
expected = DataFrame({'a': [np.nan, True]})
assert_frame_equal(result, expected)
# this is autodowncasted here
result = d['ItemA'].fillna(False) | d['ItemB']
expected = DataFrame({'a': [True, True]})
assert_frame_equal(result, expected)
def test_neg(self):
# what to do?
assert_panel_equal(-self.panel, -1 * self.panel)
def test_invert(self):
assert_panel_equal(-(self.panel < 0), ~(self.panel < 0))
def test_comparisons(self):
p1 = tm.makePanel()
p2 = tm.makePanel()
tp = p1.reindex(items=p1.items + ['foo'])
df = p1[p1.items[0]]
def test_comp(func):
# versus same index
result = func(p1, p2)
self.assert_numpy_array_equal(result.values,
func(p1.values, p2.values))
# versus non-indexed same objs
self.assertRaises(Exception, func, p1, tp)
# versus different objs
self.assertRaises(Exception, func, p1, df)
# versus scalar
result3 = func(self.panel, 0)
self.assert_numpy_array_equal(result3.values,
func(self.panel.values, 0))
test_comp(operator.eq)
test_comp(operator.ne)
test_comp(operator.lt)
test_comp(operator.gt)
test_comp(operator.ge)
test_comp(operator.le)
def test_get_value(self):
for item in self.panel.items:
for mjr in self.panel.major_axis[::2]:
for mnr in self.panel.minor_axis:
result = self.panel.get_value(item, mjr, mnr)
expected = self.panel[item][mnr][mjr]
assert_almost_equal(result, expected)
with tm.assertRaisesRegexp(TypeError,
"There must be an argument for each axis"):
self.panel.get_value('a')
def test_set_value(self):
for item in self.panel.items:
for mjr in self.panel.major_axis[::2]:
for mnr in self.panel.minor_axis:
self.panel.set_value(item, mjr, mnr, 1.)
assert_almost_equal(self.panel[item][mnr][mjr], 1.)
# resize
res = self.panel.set_value('ItemE', 'foo', 'bar', 1.5)
tm.assertIsInstance(res, Panel)
self.assertIsNot(res, self.panel)
self.assertEqual(res.get_value('ItemE', 'foo', 'bar'), 1.5)
res3 = self.panel.set_value('ItemE', 'foobar', 'baz', 5)
self.assertTrue(com.is_float_dtype(res3['ItemE'].values))
with tm.assertRaisesRegexp(TypeError,
"There must be an argument for each axis"
" plus the value provided"):
self.panel.set_value('a')
_panel = tm.makePanel()
tm.add_nans(_panel)
class TestPanel(tm.TestCase, PanelTests, CheckIndexing, SafeForLongAndSparse,
SafeForSparse):
_multiprocess_can_split_ = True
@classmethod
def assert_panel_equal(cls, x, y):
assert_panel_equal(x, y)
def setUp(self):
self.panel = _panel.copy()
self.panel.major_axis.name = None
self.panel.minor_axis.name = None
self.panel.items.name = None
def test_panel_warnings(self):
with tm.assert_produces_warning(FutureWarning):
shifted1 = self.panel.shift(lags=1)
with tm.assert_produces_warning(False):
shifted2 = self.panel.shift(periods=1)
tm.assert_panel_equal(shifted1, shifted2)
with tm.assert_produces_warning(False):
shifted3 = self.panel.shift()
tm.assert_panel_equal(shifted1, shifted3)
def test_constructor(self):
# with BlockManager
wp = Panel(self.panel._data)
self.assertIs(wp._data, self.panel._data)
wp = Panel(self.panel._data, copy=True)
self.assertIsNot(wp._data, self.panel._data)
assert_panel_equal(wp, self.panel)
# strings handled prop
wp = Panel([[['foo', 'foo', 'foo', ], ['foo', 'foo', 'foo']]])
self.assertEqual(wp.values.dtype, np.object_)
vals = self.panel.values
# no copy
wp = Panel(vals)
self.assertIs(wp.values, vals)
# copy
wp = Panel(vals, copy=True)
self.assertIsNot(wp.values, vals)
# GH #8285, test when scalar data is used to construct a Panel
# if dtype is not passed, it should be inferred
value_and_dtype = [(1, 'int64'), (3.14, 'float64'),
('foo', np.object_)]
for (val, dtype) in value_and_dtype:
wp = Panel(val, items=range(2), major_axis=range(3),
minor_axis=range(4))
vals = np.empty((2, 3, 4), dtype=dtype)
vals.fill(val)
assert_panel_equal(wp, Panel(vals, dtype=dtype))
# test the case when dtype is passed
wp = Panel(1, items=range(2), major_axis=range(3), minor_axis=range(4),
dtype='float32')
vals = np.empty((2, 3, 4), dtype='float32')
vals.fill(1)
assert_panel_equal(wp, Panel(vals, dtype='float32'))
def test_constructor_cast(self):
zero_filled = self.panel.fillna(0)
casted = Panel(zero_filled._data, dtype=int)
casted2 = Panel(zero_filled.values, dtype=int)
exp_values = zero_filled.values.astype(int)
assert_almost_equal(casted.values, exp_values)
assert_almost_equal(casted2.values, exp_values)
casted = Panel(zero_filled._data, dtype=np.int32)
casted2 = Panel(zero_filled.values, dtype=np.int32)
exp_values = zero_filled.values.astype(np.int32)
assert_almost_equal(casted.values, exp_values)
assert_almost_equal(casted2.values, exp_values)
# can't cast
data = [[['foo', 'bar', 'baz']]]
self.assertRaises(ValueError, Panel, data, dtype=float)
def test_constructor_empty_panel(self):
empty = Panel()
self.assertEqual(len(empty.items), 0)
self.assertEqual(len(empty.major_axis), 0)
self.assertEqual(len(empty.minor_axis), 0)
def test_constructor_observe_dtype(self):
# GH #411
panel = Panel(items=lrange(3), major_axis=lrange(3),
minor_axis=lrange(3), dtype='O')
self.assertEqual(panel.values.dtype, np.object_)
def test_constructor_dtypes(self):
# GH #797
def _check_dtype(panel, dtype):
for i in panel.items:
self.assertEqual(panel[i].values.dtype.name, dtype)
# only nan holding types allowed here
for dtype in ['float64', 'float32', 'object']:
panel = Panel(items=lrange(2), major_axis=lrange(10),
minor_axis=lrange(5), dtype=dtype)
_check_dtype(panel, dtype)
for dtype in ['float64', 'float32', 'int64', 'int32', 'object']:
panel = Panel(np.array(np.random.randn(2, 10, 5), dtype=dtype),
items=lrange(2),
major_axis=lrange(10),
minor_axis=lrange(5), dtype=dtype)
_check_dtype(panel, dtype)
for dtype in ['float64', 'float32', 'int64', 'int32', 'object']:
panel = Panel(np.array(np.random.randn(2, 10, 5), dtype='O'),
items=lrange(2),
major_axis=lrange(10),
minor_axis=lrange(5), dtype=dtype)
_check_dtype(panel, dtype)
for dtype in ['float64', 'float32', 'int64', 'int32', 'object']:
panel = Panel(np.random.randn(2, 10, 5), items=lrange(
2), major_axis=lrange(10), minor_axis=lrange(5), dtype=dtype)
_check_dtype(panel, dtype)
for dtype in ['float64', 'float32', 'int64', 'int32', 'object']:
df1 = DataFrame(np.random.randn(2, 5),
index=lrange(2), columns=
|
lrange(5)
|
pandas.compat.lrange
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Jan 9 09:33:40 2019
@author: amc
"""
# -------------------- script for A.I. -----------------------#
import numpy
import pandas
import re
import nltk
from nltk.corpus import stopwords
nltk.download('stopwords')
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.metrics.pairwise import cosine_similarity
from nltk.stem.snowball import SnowballStemmer
ps = SnowballStemmer('english')
def preprocess(text):
# Stem and remove stopwords
text = re.sub('[^a-zA-Z]', ' ', text)
text = text.lower()
text = text.split()
text = [ps.stem(word) for word in text if not word in set(stopwords.words('english'))]
return ' '.join(text)
dataset1 = pandas.read_csv('interview1.csv', encoding='ISO-8859-1')
dataset2 = pandas.read_csv('interview2.csv', encoding='ISO-8859-1')
dataset3 =
|
pandas.read_csv('interview3.csv', encoding='ISO-8859-1')
|
pandas.read_csv
|
from sendFiles import *
import os
from time import sleep
from json import dumps
from kafka import KafkaProducer
from PIL import Image
import hashlib
kafkaServer = os.getenv("KAFKA_SERVER", "localhost:9092")
producer = KafkaProducer(
bootstrap_servers=[kafkaServer], value_serializer=lambda x: dumps(x).encode("utf-8")
)
# producer = KafkaProducer(
# bootstrap_servers=[kafkaServer],
# value_serializer=lambda x: dumps(x).encode('utf-8'),
# security_protocol="PLAINTEXT",
# sasl_mechanism="SCRAM-SHA-256"
# )
# cityscape_dir= 'C:/Users/zilly/Downloads/leftImg8bit_trainvaltest/leftImg8bit'
# cityscape_dir= 'C:/Users/zilly/Downloads/leftImg8bit_trainvaltest/leftImg8bit/test/berlin'
directories = {}
directories["ingest_dir"] = os.getenv("KITTI_BASEDIR", "~/kitti")
directories["blobstorage_dir"] = os.getenv(
"DATAPIPE_BLOBSTORAGEDIR", "/home/zilly/blobstorage"
)
directories["thumbnail_dir"] = os.getenv(
"DATAPIPE_THUMBNAILDIR", "/home/zilly/blobstoragethumbnail"
)
ingest_dir = directories["ingest_dir"]
basedirectory = os.getenv("KITTI_BASEDIR", "~/")
dataset_dir = ingest_dir + "/" + "/image_02/data/"
kafkaTopic = os.getenv("KAFKA_TOPIC_SENDFILES", "send_file")
print("kafkaTopic: " + kafkaTopic)
# blobstorage_dir='z:/blobstorage'
# thumbnail_dir='z:/blobstorage/thumbnails'
################# KITTI RELATED START ####################
import pandas as pd
columnnames = [
"lat",
"lon",
"alt",
"roll",
"pitch",
"yaw",
"vn",
"ve",
"vf",
"vl",
"vu",
"ax",
"ay",
"az",
"af",
"al",
"au",
"wx",
"wyw",
"wz",
"wf",
"wl",
"wu",
"pos_accuracy",
"vel_accuracy",
"navstat",
"numsats",
"posmode",
"velmode",
"orimode",
]
df = pd.DataFrame(columns=columnnames)
# the data for our odometry data is in this subfolder
metadatadir = "oxts/data"
# import os
# 'os' is a library which contains functions to interact with operating system, including file system
import os
# two strings can easily be concatinated using '+' operator
directory = basedirectory + metadatadir
# iterate over all files in the directory.
# Nota Bene: os.scandir is for Python 3.5 and above. Use os.listdir() for older python versions
for entry in os.scandir(directory):
if entry.path.endswith(".txt") and entry.is_file():
# print(entry.path)
new_df =
|
pd.read_csv(entry.path, delimiter=" ", names=columnnames)
|
pandas.read_csv
|
# *********************************************************************************
# REopt, Copyright (c) 2019-2020, Alliance for Sustainable Energy, LLC.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice, this list
# of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice, this
# list of conditions and the following disclaimer in the documentation and/or other
# materials provided with the distribution.
#
# Neither the name of the copyright holder nor the names of its contributors may be
# used to endorse or promote products derived from this software without specific
# prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
# OF THE POSSIBILITY OF SUCH DAMAGE.
# *********************************************************************************
import numpy as np
import pandas as pd
from .urdb_logger import log_urdb_errors
from .nested_inputs import nested_input_definitions, list_of_float, list_of_str
#Note: list_of_float is actually needed
import os
import csv
import copy
from reo.src.urdb_rate import Rate
import re
import uuid
from reo.src.techs import Generator
from reo.nested_inputs import max_big_number
from reo.src.emissions_calculator import EmissionsCalculator
hard_problems_csv = os.path.join('reo', 'hard_problems.csv')
hard_problem_labels = [i[0] for i in csv.reader(open(hard_problems_csv, 'r'))]
class URDB_RateValidator:
error_folder = 'urdb_rate_errors'
def __init__(self, _log_errors=True, **kwargs):
"""
Takes a dictionary parsed from a URDB Rate Json response
- See http://en.openei.org/services/doc/rest/util_rates/?version=3
Rates may or mat not have the following keys:
label Type: string
utility Type: string
name Type: string
uri Type: URI
approved Type: boolean
startdate Type: integer
enddate Type: integer
supercedes Type: string
sector Type: string
description Type: string
source Type: string
sourceparent Type: URI
basicinformationcomments Type: string
peakkwcapacitymin Type: decimal
peakkwcapacitymax Type: decimal
peakkwcapacityhistory Type: decimal
peakkwhusagemin Type: decimal
peakkwhusagemax Type: decimal
peakkwhusagehistory Type: decimal
voltageminimum Type: decimal
voltagemaximum Type: decimal
voltagecategory Type: string
phasewiring Type: string
flatdemandunit Type: string
flatdemandstructure Type: array
demandrateunit Type: string
demandweekdayschedule Type: array
demandratchetpercentage Type: array
demandwindow Type: decimal
demandreactivepowercharge Type: decimal
coincidentrateunit Type: string
coincidentratestructure Type: array
coincidentrateschedule Type: array
demandattrs Type: array
demandcomments Type: string
usenetmetering Type: boolean
energyratestructure Type: array
energyweekdayschedule Type: array
energyweekendschedule Type: array
energyattrs Type: array
energycomments Type: string
fixedmonthlycharge Type: decimal
minmonthlycharge Type: decimal
annualmincharge Type: decimal
"""
self.errors = [] #Catch Errors - write to output file
self.warnings = [] #Catch Warnings
kwargs.setdefault("label", "custom")
for key in kwargs: #Load in attributes
setattr(self, key, kwargs[key])
self.numbers = [
'fixedmonthlycharge',
'fixedchargefirstmeter',
'mincharge',
'minmonthlycharge',
'annualmincharge',
'peakkwcapacitymin',
]
self.validate() #Validate attributes
if _log_errors:
if len(self.errors + self.warnings) > 0:
log_urdb_errors(self.label, self.errors, self.warnings)
def validate(self):
# Check if in known hard problems
if self.label in hard_problem_labels:
self.errors.append("URDB Rate (label={}) is currently restricted due to performance limitations".format(self.label))
# Validate each attribute with custom valdidate function
required_fields = ['energyweekdayschedule','energyweekendschedule','energyratestructure']
for f in required_fields:
if self.isNotNone(f):
self.isNotEmptyList(f)
for key in dir(self):
if key in self.numbers:
self.validate_number(key)
else:
v = 'validate_' + key
if hasattr(self, v):
getattr(self, v)()
@property
def dependencies(self):
# map to tell if a field requires one or more other fields
return {
'demandweekdayschedule': ['demandratestructure'],
'demandweekendschedule': ['demandratestructure'],
'demandratestructure':['demandweekdayschedule','demandweekendschedule'],
'energyweekdayschedule': ['energyratestructure'],
'energyweekendschedule': ['energyratestructure'],
'energyratestructure':['energyweekdayschedule','energyweekendschedule'],
'flatdemandmonths': ['flatdemandstructure'],
'flatdemandstructure': ['flatdemandmonths'],
}
@property
def isValid(self):
#True if no errors found during validation on init
return self.errors == []
# CUSTOM VALIDATION FUNCTIONS FOR EACH URDB ATTRIBUTE name validate_<attribute name>
def validate_demandratestructure(self):
name = 'demandratestructure'
if self.validDependencies(name):
self.validRate(name)
def validate_demandweekdayschedule(self):
name = 'demandweekdayschedule'
self.validCompleteHours(name, [12,24])
if self.validDependencies(name):
self.validSchedule(name, 'demandratestructure')
def validate_demandweekendschedule(self):
name = 'demandweekendschedule'
self.validCompleteHours(name, [12,24])
if self.validDependencies(name):
self.validSchedule(name, 'demandratestructure')
def validate_energyweekendschedule(self):
name = 'energyweekendschedule'
self.validCompleteHours(name, [12,24])
if self.validDependencies(name):
self.validSchedule(name, 'energyratestructure')
def validate_energyweekdayschedule(self):
name = 'energyweekdayschedule'
self.validCompleteHours(name, [12,24])
if self.validDependencies(name):
self.validSchedule(name, 'energyratestructure')
def validate_energyratestructure(self):
name = 'energyratestructure'
if self.validDependencies(name):
self.validRate(name)
def validate_flatdemandstructure(self):
name = 'flatdemandstructure'
if self.validDependencies(name):
self.validRate(name)
def validate_flatdemandmonths(self):
name = 'flatdemandmonths'
self.validCompleteHours(name, [12])
if self.validDependencies(name):
self.validSchedule(name, 'flatdemandstructure')
def validate_coincidentratestructure(self):
name = 'coincidentratestructure'
if self.validDependencies(name):
self.validRate(name)
def validate_coincidentrateschedule(self):
name = 'coincidentrateschedule'
if self.validDependencies(name):
self.validSchedule(name, 'flatdemandstructure')
def validate_demandratchetpercentage(self):
if type(self.demandratchetpercentage) != list:
self.errors.append('Expecting demandratchetpercentage to be a list of 12 values.')
if len(self.demandratchetpercentage) != 12:
self.errors.append('Expecting demandratchetpercentage to be a list of 12 values.')
#### FUNCTIONS TO VALIDATE ATTRIBUTES ####
def validDependencies(self, name):
# check that all dependent attributes exist
# return Boolean if any errors found
all_dependencies = self.dependencies.get(name)
valid = True
if all_dependencies is not None:
for d in all_dependencies:
error = False
if hasattr(self,d):
if getattr(self,d) is None:
error = True
else:
error=True
if error:
self.errors.append("Missing %s a dependency of %s" % (d, name))
valid = False
return valid
def validCompleteHours(self, schedule_name,expected_counts):
# check that each array in a schedule contains the correct number of entries
# return Boolean if any errors found
if hasattr(self,schedule_name):
valid = True
schedule = getattr(self,schedule_name)
def recursive_search(item,level=0, entry=0):
if type(item) == list:
if len(item) != expected_counts[level]:
msg = 'Entry {} {}{} does not contain {} entries'.format(entry,'in sublevel ' + str(level)+ ' ' if level>0 else '', schedule_name, expected_counts[level])
self.errors.append(msg)
valid = False
for ii,subitem in enumerate(item):
recursive_search(subitem,level=level+1, entry=ii)
recursive_search(schedule)
return valid
def validate_number(self, name):
try:
float(getattr(self, name, 0))
except:
self.errors.append('Entry for {} ({}) is not a valid number.'.format(name, getattr(self, name)))
def isNotNone(self, name):
if getattr(self,name, None) is None:
self.errors.append('Missing valid entry for {}.'.format(name))
return False
return True
def isNotEmptyList(self, name):
if type(getattr(self,name)) != list:
self.errors.append('Expecting a list for {}.'.format(name))
return False
if len(getattr(self,name)) == 0:
self.errors.append('List is empty for {}.'.format(name))
return False
if None in getattr(self,name):
self.errors.append('List for {} contains null value(s).'.format(name))
return False
return True
def validRate(self, rate):
# check that each tier in rate structure array has a rate attribute, and that all rates except one contain a 'max' attribute
# return Boolean if any errors found
if hasattr(self,rate):
valid = True
for i, r in enumerate(getattr(self, rate)):
if len(r) == 0:
self.errors.append('Missing rate information for rate ' + str(i) + ' in ' + rate + '.')
valid = False
num_max_tags = 0
for ii, t in enumerate(r):
if t.get('max') is not None:
num_max_tags +=1
if t.get('rate') is None and t.get('sell') is None and t.get('adj') is None:
self.errors.append('Missing rate/sell/adj attributes for tier ' + str(ii) + " in rate " + str(i) + ' ' + rate + '.')
valid = False
if len(r) > 1:
num_missing_max_tags = len(r) - 1 - num_max_tags
if num_missing_max_tags > 0:
self.errors.append("Missing 'max' tag for {} tiers in rate {} for {}.".format( num_missing_max_tags, i, rate ))
valid = False
return valid
return False
def validSchedule(self, schedules, rate):
# check that each rate an a schedule array has a valid set of tiered rates in the associated rate struture attribute
# return Boolean if any errors found
if hasattr(self,schedules):
valid = True
s = getattr(self, schedules)
if isinstance(s[0],list):
s = np.concatenate(s)
periods = list(set(s))
# Loop though all periond and catch error if if exists
if hasattr(self,rate):
for period in periods:
if period > len(getattr(self, rate)) - 1 or period < 0:
self.errors.append(
'%s contains value %s which has no associated rate in %s.' % (schedules, period, rate))
valid = False
return valid
else:
self.warnings.append('{} does not exist to check {}.'.format(rate,schedules))
return False
class ValidateNestedInput:
# ASSUMPTIONS:
# User only has to specify each attribute once
# User at minimum only needs to pass in required information, failing to input in a required attribute renders the whole input invalid
# User can assume default values exist for all non-required attributes
# User can assume null or unspecified attributes will be converted to defaults
# User needs to manually overwrite defaults (with zero's) to negate their effect (pass in a federal itc of 0% to model without federal itc)
# Wind turned off by default, PV and Battery created by default
# LOGIC
# To easily map dictionaries into objects and maintain relationships between objects:
# if a key starts with a capital letter:
# the singular form of the key name must match exactly the name of a src object
# the key maps to a dictionary which will be used as a **kwargs input (along with any other necessary previously created objects) to the key"s object creation function
# if the key is not present in the JSON input or it is set to null, the API will create the object(s) with default values, unless required attributes are needed
# if the key does not end in "s" and the value is not null:
# all keys in the value dictionary that start in lower case are attributes of that object
# if an attribute is not required and not defined in the dictionary or defined as null, default values will be used later in the code for these values
# if an attribtue is required and not defined or defined as null, the entire input is invalid
# if the key does not end in "s" and the value is null:
# the object will be created with default values (barring required attributes)
# if the key ends in "s" and is not null:
# the value is a list of objects to be created of that singular key type or null
# if the key is not present in the JSON input or if the list is empty or null, the API will create any default objects that REopt needs
# if a key starts with a lowercase letter:
# it is an attribute of an object
# it's name ends in the units of that attibute
# if it is a boolean - name is camelCase starting with can
# if it ends in _pct - values must be between -1 and 1 inclusive
# abbreviations in the name are avoided (exceptions include common units/terms like pct, soc...)
# if it is a required attribute and the value is null or the key value pair does not exist, the entire input is invalid
# if it is not a required attribute and the value is null or the key does not exist, default values will be used
# EXAMPLE 1 - BASIC POST
# {
# "Scenario": {
# "Site": {
# "latitude": 40, "longitude": -123, "LoadProfile": {"building_type": "hospital", "load_size": 10000},
# "Utility": {"urdb_rate_json": {}}
# }
# },
# }
#
# # EXAMPLE 2 - BASIC POST - PV ONLY
# {
# "Scenario": {
# "Site": {
# "latitude": 40, "longitude": -123, "LoadProfile": {"building_type": "hospital", "load_size": 10000},
# "Utility": {"urdb_rate_json": {}}, "Storage":{'max_kw':0}
# }
# }
# }
#
# # EXAMPLE 3 - BASIC POST - NO FED ITC FOR PV, run battery and pv, no wind
# {
# "Scenario": {
# "Site": {
# "latitude": 40, "longitude": -123, "LoadProfile": {"building_type": "hospital", "load_size": 10000},
# "Utility": {"urdb_rate_json": {}}, "PV": {"itc_federal_us_dollars_per_kw": 0}}
# }
# }
# }
def __init__(self, input_dict):
self.list_or_dict_objects = ['PV']
self.nested_input_definitions = nested_input_definitions
self.input_data_errors = []
self.urdb_errors = []
self.input_as_none = []
self.invalid_inputs = []
self.resampled_inputs = []
self.emission_warning = []
self.defaults_inserted = []
self.input_dict = dict()
if type(input_dict) is not dict:
self.input_data_errors.append("POST must contain a valid JSON formatted accoring to format described in https://developer.nrel.gov/docs/energy-optimization/reopt-v1/")
else:
self.input_dict['Scenario'] = input_dict.get('Scenario') or {}
for k,v in input_dict.items():
if k != 'Scenario':
self.invalid_inputs.append([k, ["Top Level"]])
self.check_object_types(self.input_dict)
if self.isValid:
self.recursively_check_input_dict(self.nested_input_definitions, self.remove_invalid_keys)
self.recursively_check_input_dict(self.nested_input_definitions, self.remove_nones)
self.recursively_check_input_dict(self.nested_input_definitions, self.convert_data_types)
self.recursively_check_input_dict(self.nested_input_definitions, self.fillin_defaults)
self.recursively_check_input_dict(self.nested_input_definitions, self.check_min_max_restrictions)
self.recursively_check_input_dict(self.nested_input_definitions, self.check_required_attributes)
self.recursively_check_input_dict(self.nested_input_definitions, self.check_special_cases)
self.recursively_check_input_dict(self.nested_input_definitions, self.add_number_to_listed_inputs)
if type(self.input_dict['Scenario']['Site']['PV']) == dict:
self.input_dict['Scenario']['Site']['PV']['pv_number'] = 1
self.input_dict['Scenario']['Site']['PV'] = [self.input_dict['Scenario']['Site']['PV']]
@property
def isValid(self):
if self.input_data_errors or self.urdb_errors:
return False
return True
@property
def messages(self):
output = {}
if self.errors != {}:
output = self.errors
if self.warnings != {}:
output['warnings'] = self.warnings
return output
def warning_message(self, warnings):
"""
Convert a list of lists into a dictionary
:param warnings: list - item 1 argument, item 2 location
:return: message - 'Scenario>Site: latitude and longitude'
"""
output = {}
for arg, path in warnings:
path = ">".join(path)
if path not in output:
output[path] = arg
else:
output[path] += ' AND ' + arg
return output
@property
def errors(self):
output = {}
if self.input_data_errors:
output["error"] = "Invalid inputs. See 'input_errors'."
output["input_errors"] = self.input_data_errors
if self.urdb_errors and self.input_data_errors:
output["input_errors"] += ['URDB Rate: ' + ' '.join(self.urdb_errors)]
elif self.urdb_errors:
output["error"] = "Invalid inputs. See 'input_errors'."
output["input_errors"] = ['URDB Rate: ' + ' '.join(self.urdb_errors)]
return output
@property
def warnings(self):
output = {}
if bool(self.defaults_inserted):
output["Default values used for the following:"] = self.warning_message(self.defaults_inserted)
if bool(self.invalid_inputs):
output["Following inputs are invalid:"] = self.warning_message(self.invalid_inputs)
if bool(self.resampled_inputs):
output["Following inputs were resampled:"] = self.warning_message(self.resampled_inputs)
if bool(self.emission_warning):
output["Emissons Warning"] = {"error":self.emission_warning}
return output
def isSingularKey(self, k):
"""
True if the string `k` is upper case and does not end with "s"
:param k: str
:return: True/False
"""
return k[0] == k[0].upper() and k[-1] != 's'
def isPluralKey(self, k):
return k[0] == k[0].upper() and k[-1] == 's'
def isAttribute(self, k):
return k[0] == k[0].lower()
def check_object_types(self, nested_dictionary_to_check, object_name_path=[]):
"""
Checks that all keys (i.e. Scenario, Site) are valid dicts or lists. This function only checks object names
and does not validate attributes
:param nested_dictionary_to_check: data to be validated; default is self.input_dict
:param object_name_path: list of str, used to keep track of keys necessary to access a value to check in the
nested_template / nested_dictionary_to_check
:return: None
"""
# Loop through all keys in the dictionary
for name in nested_dictionary_to_check.keys():
# If the key is an object name (i.e. Scnenario, Wind) continue
if self.isSingularKey(name):
# get the value of the key
real_input_value = nested_dictionary_to_check.get(name)
# assume the value is fine until we catch an error
continue_checking = True
# Assess all possible value data type scenartio
# catch the case were the value is ok:
# dicts are always allowed
# Nones are ok too, they will get filled in with default values at first then later checked for required attributes
if type(real_input_value) == dict or real_input_value is None:
pass
# catch list case
elif type(real_input_value) == list:
# if the object is not one that support a list input flag an error
if name not in self.list_or_dict_objects:
message = "A list of inputs is not allowed for {}".format(">".join(object_name_path + [name]))
self.input_data_errors.append(message)
continue_checking = False
else:
# if the object supports list input, but contains anything other than dicts flag an error
if len(real_input_value) > 0:
if False in [type(x)==dict for x in real_input_value]:
message = "Lists for {} must only contain hashes of key/value pairs. No other data types are allowed (i.e. list, float, int) ".format(">".join(object_name_path + [name]))
self.input_data_errors.append(message)
continue_checking = False
# catch all other data types and flag them as an error
else:
valid_types = "hash of key and value pairs,"
if name not in self.list_or_dict_objects:
valid_types += " or a list"
message = "Invalid data type ({}) for {}. Must be {}".format(type(real_input_value).__name__, ">".join(object_name_path + [name]), valid_types)
self.input_data_errors.append(message)
continue_checking = False
# if no error has been thrown continue recursively checking the nested dict
if continue_checking:
if type(real_input_value) == list:
for rv in real_input_value:
self.check_object_types(rv or {}, object_name_path=object_name_path + [name])
else:
self.check_object_types(real_input_value or {}, object_name_path=object_name_path + [name])
def recursively_check_input_dict(self, nested_template, comparison_function, nested_dictionary_to_check=None,
object_name_path=[]):
"""
Recursively perform comparison_function on nested_dictionary_to_check using nested_template as a guide for
the (key: value) pairs to be checked in nested_dictionary_to_check.
comparison_function's include
- remove_invalid_keys
- remove_nones
- convert_data_types
- fillin_defaults
- check_min_max_restrictions
- check_required_attributes
- add_invalid_data (for testing)
:param nested_template: nested dictionary, used as guide for checking nested_dictionary_to_check
:param comparison_function: one of the input data validation tasks listed above
:param nested_dictionary_to_check: data to be validated; default is self.input_dict
:param object_name_path: list of str, used to keep track of keys necessary to access a value to check in the
nested_template / nested_dictionary_to_check
:return: None
"""
# this is a dict of input values from the user
if nested_dictionary_to_check is None:
nested_dictionary_to_check = self.input_dict
# this is a corresponding dict from the input definitions used to validate structure and content
# template is a list to handle cases where the input is a list
if type(nested_template) == dict:
nested_template = [nested_template]
# catch case there there is no nested template
if nested_template is None:
nested_template = [{}]
# Loop through template structure so we catch all possible keys even if the user does not provide them
for template in nested_template:
for template_k, template_values in template.items():
# at a key value pair, get the real value a user input (can be None)
real_input_values = nested_dictionary_to_check.get(template_k)
# start checking assuming that the input is fine and a list
# for each of coding we will populate real_values_list with a list
continue_checking = True
input_isDict = False
real_values_list = None
# if the value is a dict make it a list and update the dict or list indicator
if type(real_input_values) == dict:
input_isDict = True
real_values_list = [real_input_values]
# if the value is a list just update the real_values_list variable
if type(real_input_values) == list:
real_values_list = real_input_values
# if the value is a None make it a list with one dict in it
if real_input_values is None:
input_isDict = True
real_values_list = [{}]
# if the key is an object name apply the comparison function to all key/value pairs in each dict in the list of values
if self.isSingularKey(template_k):
# number is indexed on 1, used currently only for telling PV's apart
for number, real_values in enumerate(real_values_list):
number += 1
# real values will always be a list per validation above
comparison_function(object_name_path=object_name_path + [template_k],
template_values=template_values, real_values=real_values,
number=number, input_isDict=input_isDict)
# recursively apply this function to the real values dict
self.recursively_check_input_dict(template[template_k], comparison_function,
real_values or {},
object_name_path=object_name_path + [template_k])
# if at the end of validation we are left with a list containing one dict, convert the entry fot the object back to
# a dict from a list
if len(real_values_list) == 1:
nested_dictionary_to_check[template_k] = real_values_list[0]
def update_attribute_value(self, object_name_path, number, attribute, value):
"""
updates an attribute in the user input dictionary
:param definition_attribute: str, key for input parameter validation dict, eg. {'type':'float', ... }
:param object_name_path: list of str, used to keep track of keys necessary to access a value in a nested dict
:param number: int, used to keep track of order in list inputs (i.e. PV))
:param attribute: str, this is the name of the key to update
:param number: int, this is the order of the object in a list, defaulted to 1 in recursively_check_input_dict function
:param value: any, new value for the attribute
"""
to_update = self.input_dict
for name in object_name_path:
name = name.split(' ')[0]
to_update = to_update[name]
if number == 1 and type(to_update) == dict:
to_update[attribute] = value
else:
to_update[number-1][attribute] = value
def delete_attribute(self, object_name_path, number, attribute):
"""
deletes an attribute in the user input dictionary
:param definition_attribute: str, key for input parameter validation dict, eg. {'type':'float', ... }
:param object_name_path: list of str, used to keep track of keys necessary to access a value in a nested dict
:param number: int, used to keep track of order in list inputs (i.e. PV))
:param attribute: str, this is the name of the key to delete
:param number: int, this is the order of the object in a list, defaulted to 1 in recursively_check_input_dict function
"""
to_update = self.input_dict
for name in object_name_path:
name = name.split(' ')[0]
to_update = to_update[name]
if number == 1 and type(to_update) == dict:
if attribute in to_update.keys():
del to_update[attribute]
else:
if attribute in to_update[number-1].keys():
del to_update[number-1][attribute]
def object_name_string(self, object_name_path):
return '>'.join(object_name_path)
def test_data(self, definition_attribute):
"""
Used only in reo.tests.test_reopt_url. Does not actually validate inputs.
:param definition_attribute: str, key for input parameter validation dict, eg. {'type':'float', ... }
:return: test_data_list is a list of lists, with each sub list a pair of [str, dict],
where the str is an input param, dict is an entire post with a bad value for that input param
"""
test_data_list = []
number = 1
def swap_logic(object_name_path, name, definition, good_val, validation_attribute, number =1):
"""
append `name` and a nested-dict (post) to test_data_list with a bad value inserted into the post for
the input at object_name_path: name
:param object_name_path: list of str, eg. ["Scenario", "Site", "PV"]
:param name: str, input value to replace with bad value, eg. "latitude"
:param definition: dict with input parameter validation values, eg. {'type':'float', ... }
:param good_val: the good value for the input parameter
:param validation_attribute: str, ['min', 'max', 'restrict_to', 'type']
:return: None
"""
attribute = definition.get(validation_attribute)
if attribute is not None:
bad_val = None
if validation_attribute == 'min':
bad_val = attribute - 1
if isinstance(good_val, list):
bad_val= [bad_val]
if validation_attribute == 'max':
bad_val = attribute + 1
if isinstance(good_val, list):
bad_val = [bad_val]
if validation_attribute == 'restrict_to':
bad_val = "OOPS"
if validation_attribute == 'type':
if type(attribute) != list and 'list_of_float' != attribute:
if any(isinstance(good_val, x) for x in [float, int, dict, bool]):
bad_val = "OOPS"
elif 'list_of_float' in attribute or 'list_of_float' == attribute:
if isinstance(good_val, list):
bad_val = "OOPS"
if bad_val is not None:
self.update_attribute_value(object_name_path, number, name, bad_val)
test_data_list.append([name, copy.deepcopy(self.input_dict)])
self.update_attribute_value(object_name_path, number, name, good_val)
def add_invalid_data(object_name_path, template_values=None, real_values=None, number=number, input_isDict=None):
if real_values is not None:
for name, value in template_values.items():
if self.isAttribute(name):
swap_logic(object_name_path, name, value, real_values.get(name),
validation_attribute=definition_attribute, number=number)
self.recursively_check_input_dict(self.nested_input_definitions, add_invalid_data)
return test_data_list
def add_number_to_listed_inputs(self, object_name_path, template_values=None, real_values=None, number=1, input_isDict=None):
"""
comparison function to add a number to each dict in a list (i.e. pv_number to each PV)
:param object_name_path: list of str, location of an object in self.input_dict being validated,
eg. ["Scenario", "Site", "PV"]
:param template_values: reference dictionary for checking real_values, for example
{'latitude':{'type':'float',...}...}, which comes from nested_input_definitions
:param real_values: dict, the attributes corresponding to the object at object_name_path within the
input_dict to check and/or modify. For example, with a object_name_path of ["Scenario", "Site", "PV"]
the real_values would look like: {'latitude': 39.345678, 'longitude': -90.3, ... }
:param number: int, order of the dict in the list
:param input_isDict: bool, indicates if the object input came in as a dict or list
:return: None
"""
if real_values is not None and input_isDict==False:
object_name_path[-1].lower()
self.update_attribute_value(object_name_path, number, object_name_path[-1].lower() + '_number', number)
def remove_nones(self, object_name_path, template_values=None, real_values=None, number=1, input_isDict=None):
"""
comparison_function for recursively_check_input_dict.
remove any `None` values from the input_dict.
this step is important to prevent exceptions in later validation steps.
:param object_name_path: list of str, location of an object in self.input_dict being validated,
eg. ["Scenario", "Site", "PV"]
:param template_values: reference dictionary for checking real_values, for example
{'latitude':{'type':'float',...}...}, which comes from nested_input_definitions
:param real_values: dict, the attributes corresponding to the object at object_name_path within the
input_dict to check and/or modify. For example, with a object_name_path of ["Scenario", "Site", "PV"]
the real_values would look like: {'latitude': 39.345678, 'longitude': -90.3, ... }
:param number: int, order of the dict in the list
:param input_isDict: bool, indicates if the object input came in as a dict or list
:return: None
"""
if real_values is not None:
rv = copy.deepcopy(real_values)
for name, value in rv.items():
if self.isAttribute(name):
if value is None:
self.delete_attribute(object_name_path, number, name)
if input_isDict == True or input_isDict==None:
self.input_as_none.append([name, object_name_path[-1]])
if input_isDict == False:
self.input_as_none.append([name, object_name_path[-1] + ' (number {})'.format(number)])
def remove_invalid_keys(self, object_name_path, template_values=None, real_values=None, number=1, input_isDict=None):
"""
comparison_function for recursively_check_input_dict.
remove any input values provided by user that are not included in nested_input_definitions.
this step is important to protect against sql injections and other similar cyber-attacks.
:param object_name_path: list of str, location of an object in self.input_dict being validated,
eg. ["Scenario", "Site", "PV"]
:param template_values: reference dictionary for checking real_values, for example
{'latitude':{'type':'float',...}...}, which comes from nested_input_definitions
:param real_values: dict, the attributes corresponding to the object at object_name_path within the
input_dict to check and/or modify. For example, with a object_name_path of ["Scenario", "Site", "PV"]
the real_values would look like: {'latitude': 39.345678, 'longitude': -90.3, ... }
:param number: int, order of the dict in the list
:param input_isDict: bool, indicates if the object input came in as a dict or list
:return: None
"""
if real_values is not None:
rv = copy.deepcopy(real_values)
for name, value in rv.items():
if self.isAttribute(name):
if name not in template_values.keys():
self.delete_attribute(object_name_path, number, name)
if input_isDict == True or input_isDict==None:
self.invalid_inputs.append([name, object_name_path])
if input_isDict == False:
object_name_path[-1] = object_name_path[-1] + ' (number {})'.format(number)
self.invalid_inputs.append([name, object_name_path])
def check_special_cases(self, object_name_path, template_values=None, real_values=None, number=1, input_isDict=None):
"""
checks special input requirements not otherwise programatically captured by nested input definitions
:param object_name_path: list of str, location of an object in self.input_dict being validated,
eg. ["Scenario", "Site", "PV"]
:param template_values: reference dictionary for checking real_values, for example
{'latitude':{'type':'float',...}...}, which comes from nested_input_definitions
:param real_values: dict, the attributes corresponding to the object at object_name_path within the
input_dict to check and/or modify. For example, with a object_name_path of ["Scenario", "Site", "PV"]
the real_values would look like: {'latitude': 39.345678, 'longitude': -90.3, ... }
:param number: int, order of the dict in the list
:param input_isDict: bool, indicates if the object input came in as a dict or list
:return: None
"""
if object_name_path[-1] == "Scenario":
if real_values.get('user_uuid') is not None:
self.validate_user_uuid(user_uuid=real_values['user_uuid'], err_msg = "user_uuid must be a valid UUID")
if real_values.get('description') is not None:
self.validate_text_fields(str = real_values['description'],
pattern = r'^[-0-9a-zA-Z. $:;)(*&#_!@]*$',
err_msg = "description can include enlisted special characters: [-0-9a-zA-Z. $:;)(*&#_!@] and can have 0-9, a-z, A-Z, periods, and spaces.")
if object_name_path[-1] == "Site":
if real_values.get('address') is not None:
self.validate_text_fields(str = real_values['address'], pattern = r'^[0-9a-zA-Z. ]*$',
err_msg = "Site address must not include special characters. Restricted to 0-9, a-z, A-Z, periods, and spaces.")
if object_name_path[-1] == "PV":
if real_values.get("prod_factor_series_kw") == []:
del real_values["prod_factor_series_kw"]
if any((isinstance(real_values['max_kw'], x) for x in [float, int])):
if real_values['max_kw'] > 0:
if real_values.get("prod_factor_series_kw"):
self.validate_8760(real_values.get("prod_factor_series_kw"),
"PV", "prod_factor_series_kw", self.input_dict['Scenario']['time_steps_per_hour'])
if object_name_path[-1] == "Wind":
if any((isinstance(real_values['max_kw'], x) for x in [float, int])):
if real_values['max_kw'] > 0:
if real_values.get("prod_factor_series_kw") == []:
del real_values["prod_factor_series_kw"]
if real_values.get("prod_factor_series_kw"):
self.validate_8760(real_values.get("prod_factor_series_kw"),
"Wind", "prod_factor_series_kw", self.input_dict['Scenario']['time_steps_per_hour'])
if real_values.get("wind_meters_per_sec"):
self.validate_8760(real_values.get("wind_meters_per_sec"),
"Wind", "wind_meters_per_sec", self.input_dict['Scenario']['time_steps_per_hour'])
self.validate_8760(real_values.get("wind_direction_degrees"),
"Wind", "wind_direction_degrees", self.input_dict['Scenario']['time_steps_per_hour'])
self.validate_8760(real_values.get("temperature_celsius"),
"Wind", "temperature_celsius", self.input_dict['Scenario']['time_steps_per_hour'])
self.validate_8760(real_values.get("pressure_atmospheres"),
"Wind", "pressure_atmospheres", self.input_dict['Scenario']['time_steps_per_hour'])
else:
from reo.src.wind_resource import get_conic_coords
if self.input_dict['Scenario']['Site']['Wind'].get('size_class') is None:
"""
size_class is determined by average load. If using simulated load, then we have to get the ASHRAE
climate zone from the DeveloperREOapi in order to determine the load profile (done in BuiltInProfile).
In order to avoid redundant external API calls, when using the BuiltInProfile here we save the
BuiltInProfile in the inputs as though a user passed in the profile as their own. This logic used to be
handled in reo.src.load_profile, but due to the need for the average load here, the work-flow has been
modified.
"""
avg_load_kw = 0
if self.input_dict['Scenario']['Site']['LoadProfile'].get('annual_kwh') is not None:
annual_kwh_list = self.input_dict['Scenario']['Site']['LoadProfile'].get('annual_kwh')
percent_share_list = self.input_dict['Scenario']['Site']['LoadProfile'].get('percent_share')
# Find weighted avg for hybrid load profile
avg_load_kw = sum(
[annual_kwh_list[i] * percent_share_list[i] / 100 for i in range(len(annual_kwh_list))]) / 8760
elif self.input_dict['Scenario']['Site']['LoadProfile'].get('annual_kwh') is None and self.input_dict['Scenario']['Site']['LoadProfile'].get('doe_reference_name') is not None:
from reo.src.load_profile import BuiltInProfile
default_annual_kwh_list = []
doe_reference_name_list = self.input_dict['Scenario']['Site']['LoadProfile']['doe_reference_name']
percent_share_list = self.input_dict['Scenario']['Site']['LoadProfile']['percent_share']
for i in range(len(doe_reference_name_list)):
self.input_dict['Scenario']['Site']['LoadProfile']['doe_reference_name'] = doe_reference_name_list[i]
b = BuiltInProfile(latitude=self.input_dict['Scenario']['Site']['latitude'],longitude=self.input_dict['Scenario']['Site']['longitude'], **self.input_dict['Scenario']['Site']['LoadProfile'])
default_annual_kwh_list.append(b.default_annual_kwh)
avg_load_kw = sum([default_annual_kwh_list[i] * percent_share_list[i] / 100 for i in range(len(default_annual_kwh_list))]) / 8760
# resetting the doe_reference_name key to its original list
# form for further processing in loadprofile.py file
self.input_dict['Scenario']['Site']['LoadProfile'][
'doe_reference_name'] = doe_reference_name_list
elif self.input_dict['Scenario']['Site']['LoadProfile'].get('loads_kw') in [None,[]]:
from reo.src.load_profile import BuiltInProfile
b = BuiltInProfile(latitude=self.input_dict['Scenario']['Site']['latitude'],
longitude=self.input_dict['Scenario']['Site']['longitude'],
**self.input_dict['Scenario']['Site']['LoadProfile']
)
self.input_dict['Scenario']['Site']['LoadProfile']['loads_kw'] = b.built_in_profile
avg_load_kw = sum(self.input_dict['Scenario']['Site']['LoadProfile']['loads_kw'])\
/ len(self.input_dict['Scenario']['Site']['LoadProfile']['loads_kw'])
if avg_load_kw <= 12.5:
self.input_dict['Scenario']['Site']['Wind']['size_class'] = 'residential'
elif avg_load_kw <= 100:
self.input_dict['Scenario']['Site']['Wind']['size_class'] = 'commercial'
elif avg_load_kw <= 1000:
self.input_dict['Scenario']['Site']['Wind']['size_class'] = 'medium'
else:
self.input_dict['Scenario']['Site']['Wind']['size_class'] = 'large'
try:
get_conic_coords(
lat=self.input_dict['Scenario']['Site']['latitude'],
lng=self.input_dict['Scenario']['Site']['longitude'])
except Exception as e:
self.input_data_errors.append(e.args[0])
if object_name_path[-1] == "Generator":
if self.isValid:
fuel_conversion_per_gal = {
'diesel_oil': 22.51
}
if self.input_dict['Scenario']['Site']['Generator'].get('emissions_factor_lb_CO2_per_gal') is None:
self.update_attribute_value(object_name_path, number, 'emissions_factor_lb_CO2_per_gal', fuel_conversion_per_gal.get('diesel_oil'))
if (real_values["max_kw"] > 0 or real_values["existing_kw"] > 0):
# then replace zeros in default burn rate and slope, and set min/max kw values appropriately for
# REopt (which need to be in place before data is saved and passed on to celery tasks)
gen = real_values
m, b = Generator.default_fuel_burn_rate(gen["min_kw"] + gen["existing_kw"])
if gen["fuel_slope_gal_per_kwh"] == 0:
gen["fuel_slope_gal_per_kwh"] = m
if gen["fuel_intercept_gal_per_hr"] == 0:
gen["fuel_intercept_gal_per_hr"] = b
if object_name_path[-1] == "LoadProfile":
if self.isValid:
if real_values.get('outage_start_hour') is not None and real_values.get('outage_end_hour') is not None:
if real_values.get('outage_start_hour') == real_values.get('outage_end_hour'):
self.input_data_errors.append('LoadProfile outage_start_hour and outage_end_hour cannot be the same')
if type(real_values.get('percent_share')) in [float, int]:
if real_values.get('percent_share') == 100:
real_values['percent_share'] = [100]
self.update_attribute_value(object_name_path, number, 'percent_share', [100.0])
else:
self.input_data_errors.append(
'The percent_share input for a load profile must be be 100 or a list of numbers that sums to 100.')
if len(real_values.get('percent_share')) > 0:
percent_share_sum = sum(real_values['percent_share'])
if percent_share_sum != 100.0:
self.input_data_errors.append(
'The sum of elements of percent share list for hybrid load profile should be 100.')
if real_values.get('annual_kwh') is not None:
if type(real_values['annual_kwh']) is not list:
self.update_attribute_value(object_name_path, number, 'annual_kwh', [real_values['annual_kwh']])
if real_values.get('doe_reference_name') is not None:
if type(real_values['doe_reference_name']) is not list:
self.update_attribute_value(object_name_path, number, 'doe_reference_name',[real_values['doe_reference_name']])
if len(real_values.get('doe_reference_name')) > 1:
if len(real_values.get('doe_reference_name')) != len(real_values.get('percent_share')):
self.input_data_errors.append(
'The length of doe_reference_name and percent_share lists should be equal for constructing hybrid load profile')
if real_values.get('annual_kwh') is not None:
if len(real_values.get('doe_reference_name')) != len(real_values.get('annual_kwh')):
self.input_data_errors.append('The length of doe_reference_name and annual_kwh lists should be equal for constructing hybrid load profile')
if object_name_path[-1] == "ElectricTariff":
electric_tariff = real_values
if (len(electric_tariff.get('emissions_factor_series_lb_CO2_per_kwh') or []) == 0):
if (self.input_dict['Scenario']['Site'].get('latitude') is not None) and \
(self.input_dict['Scenario']['Site'].get('longitude') is not None):
ec = EmissionsCalculator( latitude=self.input_dict['Scenario']['Site']['latitude'],
longitude=self.input_dict['Scenario']['Site']['longitude'],
time_steps_per_hour = self.input_dict['Scenario']['time_steps_per_hour'])
emissions_series = None
try:
emissions_series = ec.emissions_series
emissions_region = ec.region
except AttributeError as e:
# Emissions warning is a specific type of warning that we check for and display to the users when it occurs
# since at this point the emissions are not required to do a run it simply
# tells the user why we could not get an emission series and results in emissions not being
# calculated, but does not prevent the run from optimizing
self.emission_warning = str(e.args[0])
if emissions_series is not None:
self.update_attribute_value(object_name_path, number, 'emissions_factor_series_lb_CO2_per_kwh',
emissions_series)
self.update_attribute_value(object_name_path, number, 'emissions_region',
emissions_region)
else:
self.validate_8760(electric_tariff['emissions_factor_series_lb_CO2_per_kwh'],
"ElectricTariff",
'emissions_factor_series_lb_CO2_per_kwh',
self.input_dict['Scenario']['time_steps_per_hour'])
if electric_tariff.get('urdb_response') is not None:
self.validate_urdb_response()
elif electric_tariff.get('urdb_label','') != '':
rate = Rate(rate=electric_tariff.get('urdb_label'))
if rate.urdb_dict is None:
self.urdb_errors.append(
"Unable to download {} from URDB. Please check the input value for 'urdb_label'."
.format(electric_tariff.get('urdb_label'))
)
else:
self.update_attribute_value(object_name_path, number, 'urdb_response', rate.urdb_dict)
electric_tariff['urdb_response'] = rate.urdb_dict
self.validate_urdb_response()
elif electric_tariff.get('urdb_utility_name','') != '' and electric_tariff.get('urdb_rate_name','') != '':
rate = Rate(util=electric_tariff.get('urdb_utility_name'), rate=electric_tariff.get('urdb_rate_name'))
if rate.urdb_dict is None:
self.urdb_errors.append(
"Unable to download {} from URDB. Please check the input values for 'urdb_utility_name' and 'urdb_rate_name'."
.format(electric_tariff.get('urdb_rate_name'))
)
else:
self.update_attribute_value(object_name_path, number, 'urdb_response', rate.urdb_dict)
electric_tariff['urdb_response'] = rate.urdb_dict
self.validate_urdb_response()
if electric_tariff['add_blended_rates_to_urdb_rate']:
monthly_energy = electric_tariff.get('blended_monthly_rates_us_dollars_per_kwh', True)
monthly_demand = electric_tariff.get('blended_monthly_demand_charges_us_dollars_per_kw', True)
urdb_rate = electric_tariff.get('urdb_response', True)
if monthly_demand==True or monthly_energy==True or urdb_rate==True:
missing_keys = []
if monthly_demand==True:
missing_keys.append('blended_monthly_demand_charges_us_dollars_per_kw')
if monthly_energy==True:
missing_keys.append('blended_monthly_rates_us_dollars_per_kwh')
if urdb_rate==True:
missing_keys.append("urdb_response OR urdb_label OR urdb_utility_name and urdb_rate_name")
self.input_data_errors.append('add_blended_rates_to_urdb_rate is set to "true" yet missing valid entries for the following inputs: {}'.format(', '.join(missing_keys)))
for blended in ['blended_monthly_demand_charges_us_dollars_per_kw','blended_monthly_rates_us_dollars_per_kwh']:
if electric_tariff.get(blended) is not None:
if type(electric_tariff.get(blended)) is not list:
self.input_data_errors.append('{} needs to be an array that contains 12 valid numbers.'.format(blended) )
elif len(electric_tariff.get(blended)) != 12:
self.input_data_errors.append('{} array needs to contain 12 valid numbers.'.format(blended) )
if electric_tariff.get('tou_energy_rates_us_dollars_per_kwh') is not None:
self.validate_8760(electric_tariff.get('tou_energy_rates_us_dollars_per_kwh'), "ElectricTariff",
'tou_energy_rates_us_dollars_per_kwh',
self.input_dict['Scenario']['time_steps_per_hour'])
if len(electric_tariff.get('tou_energy_rates_us_dollars_per_kwh')) not in [8760, 35040]:
self.input_data_errors.append("length of tou_energy_rates_us_dollars_per_kwh must be 8760 or 35040")
if len(electric_tariff.get('tou_energy_rates_us_dollars_per_kwh')) == 35040 \
and self.input_dict['Scenario']['time_steps_per_hour'] != 4:
self.input_data_errors.append(("tou_energy_rates_us_dollars_per_kwh has 35040 time steps but "
"Scenario.time_steps_per_hour is not 4. These values must be aligned."))
if electric_tariff['add_tou_energy_rates_to_urdb_rate']:
tou_energy = electric_tariff.get('tou_energy_rates_us_dollars_per_kwh', True)
urdb_rate = electric_tariff.get('urdb_response', True)
if tou_energy is True or urdb_rate is True:
missing_keys = []
if tou_energy is True:
missing_keys.append('tou_energy_rates_us_dollars_per_kwh')
if urdb_rate is True:
missing_keys.append("urdb_response OR urdb_label OR urdb_utility_name and urdb_rate_name")
self.input_data_errors.append((
'add_blended_rates_to_urdb_rate is set to "true" yet missing valid entries for the '
'following inputs: {}').format(', '.join(missing_keys)))
for key_name in ['wholesale_rate_us_dollars_per_kwh',
'wholesale_rate_above_site_load_us_dollars_per_kwh']:
if type(electric_tariff.get(key_name)) == list:
ts_per_hour = self.input_dict['Scenario'].get('time_steps_per_hour') or \
self.nested_input_definitions['Scenario']['time_steps_per_hour']['default']
if len(electric_tariff.get(key_name)) == 1:
self.update_attribute_value(object_name_path, number, key_name,
electric_tariff.get(key_name) * 8760 * ts_per_hour)
else:
self.validate_8760(attr=electric_tariff.get(key_name), obj_name=object_name_path[-1],
attr_name=key_name,
time_steps_per_hour=ts_per_hour, number=number,
input_isDict=input_isDict)
if object_name_path[-1] == "LoadProfile":
for lp in ['critical_loads_kw', 'loads_kw']:
if real_values.get(lp) not in [None, []]:
self.validate_8760(real_values.get(lp), "LoadProfile", lp, self.input_dict['Scenario']['time_steps_per_hour'])
isnet = real_values.get(lp + '_is_net')
if isnet is None:
isnet = True
if not isnet:
# next line can fail if non-numeric values are passed in for (critical_)loads_kw
if self.isValid:
if min(real_values.get(lp)) < 0:
self.input_data_errors.append("{} must contain loads greater than or equal to zero.".format(lp))
if real_values.get('doe_reference_name') is not None:
real_values['year'] = 2017
# Use 2017 b/c it is most recent year that starts on a Sunday and all reference profiles start on
# Sunday
def check_min_max_restrictions(self, object_name_path, template_values=None, real_values=None, number=1, input_isDict=None):
"""
comparison_function for recursively_check_input_dict.
check all min/max constraints for input values defined in nested_input_definitions.
create error message if user provided inputs are outside of allowable bounds.
:param object_name_path: list of str, location of an object in self.input_dict being validated,
eg. ["Scenario", "Site", "PV"]
:param template_values: reference dictionary for checking real_values, for example
{'latitude':{'type':'float',...}...}, which comes from nested_input_definitions
:param real_values: dict, the attributes corresponding to the object at object_name_path within the
input_dict to check and/or modify. For example, with a object_name_path of ["Scenario", "Site", "PV"]
the real_values would look like: {'latitude': 39.345678, 'longitude': -90.3, ... }
:param number: int, order of the dict in the list
:param input_isDict: bool, indicates if the object input came in as a dict or list
:return: None
"""
if real_values is not None:
for name, value in real_values.items():
if self.isAttribute(name):
data_validators = template_values[name]
if "list_of_float" in data_validators['type'] and isinstance(value, list):
if data_validators.get('min') is not None:
if any([v < data_validators['min'] for v in value]):
if input_isDict or input_isDict is None:
self.input_data_errors.append(
'At least one value in %s (from %s) exceeds allowable min of %s' % (
name, self.object_name_string(object_name_path), data_validators['min']))
if input_isDict is False:
self.input_data_errors.append(
'At least one value in %s (from %s number %s) exceeds allowable min of %s' % (
name, self.object_name_string(object_name_path), number, data_validators['min']))
if data_validators.get('max') is not None:
if any([v > data_validators['max'] for v in value]):
if input_isDict or input_isDict is None:
self.input_data_errors.append(
'At least one value in %s (from %s) exceeds allowable max of %s' % (
name, self.object_name_string(object_name_path), data_validators['max']))
if input_isDict is False:
self.input_data_errors.append(
'At least one value in %s (from %s number %s) exceeds allowable max of %s' % (
name, self.object_name_string(object_name_path), number, data_validators['max']))
continue
elif "list_of_str" in data_validators['type'] and isinstance(value, list):
data_type = list
elif isinstance(data_validators['type'], list) and 'float' in data_validators['type']:
data_type = float
elif isinstance(data_validators['type'], list) and 'str' in data_validators['type']:
data_type = str
else:
data_type = eval(data_validators['type'])
try: # to convert input value to restricted type
value = data_type(value)
except:
self.input_data_errors.append('Could not check min/max on %s (%s) in %s' % (
name, value, self.object_name_string(object_name_path)))
else:
if data_validators.get('min') is not None:
if value < data_validators['min']:
if input_isDict or input_isDict is None:
self.input_data_errors.append('%s value (%s) in %s exceeds allowable min %s' % (
name, value, self.object_name_string(object_name_path), data_validators['min']))
if input_isDict is False:
self.input_data_errors.append('%s value (%s) in %s (number %s) exceeds allowable min %s' % (
name, value, self.object_name_string(object_name_path), number, data_validators['min']))
if data_validators.get('max') is not None:
if value > data_validators['max']:
if input_isDict or input_isDict is None:
self.input_data_errors.append('%s value (%s) in %s exceeds allowable max %s' % (
name, value, self.object_name_string(object_name_path), data_validators['max']))
if input_isDict is False:
self.input_data_errors.append('%s value (%s) in %s (number %s) exceeds allowable max %s' % (
name, value, self.object_name_string(object_name_path), number, data_validators['max']))
if data_validators.get('restrict_to') is not None:
# Handle both cases: 1. val is of 'type' 2. List('type')
# Approach: Convert case 1 into case 2
value = [value] if not isinstance(value, list) else value
for val in value:
if val not in data_validators['restrict_to']:
if input_isDict == True or input_isDict == None:
self.input_data_errors.append(
'%s value (%s) in %s not in allowable inputs - %s' % (
name, value, self.object_name_string(object_name_path),
data_validators['restrict_to']))
if input_isDict == False:
self.input_data_errors.append(
'%s value (%s) in %s (number %s) exceeds allowable max %s' % (
name, value, self.object_name_string(object_name_path), number,
data_validators['max']))
def convert_data_types(self, object_name_path, template_values=None, real_values=None, number=1, input_isDict=None):
"""
comparison_function for recursively_check_input_dict.
try to convert input values to the expected python data type, create error message if conversion fails.
:param object_name_path: list of str, location of an object in self.input_dict being validated,
eg. ["Scenario", "Site", "PV"]
:param template_values: reference dictionary for checking real_values, for example
{'latitude':{'type':'float',...}...}, which comes from nested_input_definitions
:param real_values: dict, the attributes corresponding to the object at object_name_path within the
input_dict to check and/or modify. For example, with a object_name_path of ["Scenario", "Site", "PV"]
the real_values would look like: {'latitude': 39.345678, 'longitude': -90.3, ... }
:param number: int, order of the dict in the list
:param input_isDict: bool, indicates if the object input came in as a dict or list
:return: None
"""
if real_values is not None:
for name, value in real_values.items():
if self.isAttribute(name):
make_array = False
attribute_type = template_values[name]['type'] # attribute_type's include list_of_float
if isinstance(attribute_type, list) and \
all([x in attribute_type for x in ['float', 'list_of_float']]):
if isinstance(value, list):
try:
series = pd.Series(value)
if series.isnull().values.any():
raise NotImplementedError
new_value = list_of_float(value)
except ValueError:
if input_isDict or input_isDict is None:
self.input_data_errors.append(
'Could not convert %s (%s) in %s to list of floats' % (name, value,
self.object_name_string(object_name_path))
)
if input_isDict is False:
self.input_data_errors.append(
'Could not convert %s (%s) in %s (number %s) to list of floats' % (name, value,
self.object_name_string(object_name_path), number)
)
continue # both continue statements should be in a finally clause, ...
except NotImplementedError:
if input_isDict or input_isDict is None:
self.input_data_errors.append(
'%s in %s contains at least one NaN value.' % (name,
self.object_name_string(object_name_path))
)
if input_isDict is False:
self.input_data_errors.append(
'%s in %s (number %s) contains at least one NaN value.' % (name,
self.object_name_string(object_name_path), number)
)
continue # both continue statements should be in a finally clause, ...
else:
self.update_attribute_value(object_name_path, number, name, new_value)
continue # ... but python 2.7 does not support continue in finally clauses
else:
attribute_type = 'float'
make_array = True
if isinstance(attribute_type, list) and \
all([x in attribute_type for x in ['str', 'list_of_str']]):
if isinstance(value, list):
try:
series = pd.Series(value)
if series.isnull().values.any():
raise NotImplementedError
new_value = list_of_str(value)
except ValueError:
if input_isDict or input_isDict is None:
self.input_data_errors.append(
'Could not convert %s (%s) in %s to list of strings' % (name, value,
self.object_name_string(
object_name_path))
)
if input_isDict is False:
self.input_data_errors.append(
'Could not convert %s (%s) in %s (number %s) to list of strings' % (
name, value,
self.object_name_string(object_name_path), number)
)
continue # both continue statements should be in a finally clause, ...
except NotImplementedError:
if input_isDict or input_isDict is None:
self.input_data_errors.append(
'%s in %s contains at least one NaN value.' % (name,
self.object_name_string(
object_name_path))
)
if input_isDict is False:
self.input_data_errors.append(
'%s in %s (number %s) contains at least one NaN value.' % (name,
self.object_name_string(
object_name_path),
number)
)
continue # both continue statements should be in a finally clause, ...
else:
self.update_attribute_value(object_name_path, number, name, new_value)
continue # ... but python 2.7 does not support continue in finally clauses
else:
attribute_type = 'str'
make_array = True
attribute_type = eval(attribute_type) # convert string to python type
try: # to convert input value to type defined in nested_input_definitions
new_value = attribute_type(value)
except: # if fails for any reason record that the conversion failed
if input_isDict or input_isDict is None:
self.input_data_errors.append('Could not convert %s (%s) in %s to %s' % (name, value,
self.object_name_string(object_name_path), str(attribute_type).split(' ')[1]))
if input_isDict is False:
self.input_data_errors.append('Could not convert %s (%s) in %s (number %s) to %s' % (name, value,
self.object_name_string(object_name_path), number , str(attribute_type).split(' ')[1]))
else:
if not isinstance(new_value, bool):
if make_array:
new_value = [new_value]
self.update_attribute_value(object_name_path, number, name, new_value)
else:
if value not in [True, False, 1, 0]:
if input_isDict or input_isDict is None:
self.input_data_errors.append('Could not convert %s (%s) in %s to %s' % (
name, value, self.object_name_string(object_name_path),
str(attribute_type).split(' ')[1]))
if input_isDict is False:
self.input_data_errors.append('Could not convert %s (%s) in %s (number %s) to %s' % (
name, value, self.object_name_string(object_name_path), number,
str(attribute_type).split(' ')[1]))
def fillin_defaults(self, object_name_path, template_values=None, real_values=None, number=1, input_isDict=None):
"""
comparison_function for recursively_check_input_dict.
fills in default values for inputs that user does not provide.
:param object_name_path: list of str, location of an object in self.input_dict being validated,
eg. ["Scenario", "Site", "PV"]
:param template_values: reference dictionary for checking real_values, for example
{'latitude':{'type':'float',...}...}, which comes from nested_input_definitions
:param real_values: dict, the attributes corresponding to the object at object_name_path within the
input_dict to check and/or modify. For example, with a object_name_path of ["Scenario", "Site", "PV"]
the real_values would look like: {'latitude': 39.345678, 'longitude': -90.3, ... }
:param number: int, order of the dict in the list
:param input_isDict: bool, indicates if the object input came in as a dict or list
:return: None
"""
if real_values is None:
real_values = {}
self.update_attribute_value(object_name_path[:-1], number, object_name_path[-1], real_values)
for template_key, template_value in template_values.items():
if self.isAttribute(template_key):
default = template_value.get('default')
if default is not None and real_values.get(template_key) is None:
if isinstance(default, str): # special case for PV.tilt.default = "Site latitude"
if " " in default:
d = self.input_dict['Scenario']
for key in default.split(' '):
d = d.get(key)
default = d
if isinstance(template_value.get('type'), list) and "list_of_float" in template_value.get('type'):
# then input can be float or list_of_float, but for database we have to use only one type
default = [default]
self.update_attribute_value(object_name_path, number, template_key, default)
if input_isDict or input_isDict is None:
self.defaults_inserted.append([template_key, object_name_path])
if input_isDict is False:
object_name_path[-1] = object_name_path[-1] + ' (number {})'.format(number)
self.defaults_inserted.append([template_key, object_name_path])
if self.isSingularKey(template_key):
if template_key not in real_values.keys():
self.update_attribute_value(object_name_path, number, template_key, {})
if input_isDict or input_isDict is None:
self.defaults_inserted.append([template_key, object_name_path])
if input_isDict is False:
object_name_path[-1] = object_name_path[-1] + ' (number {})'.format(number)
self.defaults_inserted.append([template_key, object_name_path])
def check_required_attributes(self, object_name_path, template_values=None, real_values=None, number=1, input_isDict=None):
"""
comparison_function for recursively_check_input_dict.
confirm that required inputs were provided by user. If not, create message to provide to user.
:param object_name_path: list of str, location of an object in self.input_dict being validated,
eg. ["Scenario", "Site", "PV"]
:param template_values: reference dictionary for checking real_values, for example
{'latitude':{'type':'float',...}...}, which comes from nested_input_definitions
:param real_values: dict, the attributes corresponding to the object at object_name_path within the
input_dict to check and/or modify. For example, with a object_name_path of ["Scenario", "Site", "PV"]
the real_values would look like: {'latitude': 39.345678, 'longitude': -90.3, ... }
:param number: int, order of the dict in the list
:param input_isDict: bool, indicates if the object input came in as a dict or list
:return: None
"""
final_message = ''
# conditional check for complex cases where replacements are available for attributes and there are dependent attributes (annual_kwh and doe_reference_building_name)
all_missing_attribute_sets = []
for key,value in template_values.items():
if self.isAttribute(key):
missing_attribute_sets = []
replacements = value.get('replacement_sets')
depends_on = value.get('depends_on') or []
if replacements is not None:
current_set = [key] + depends_on
if list(set(current_set)-set(real_values.keys())) != []:
for replace in replacements:
missing = list(set(replace)-set(real_values.keys()))
if missing == []:
missing_attribute_sets = []
break
else:
replace = sorted(replace)
if replace not in missing_attribute_sets:
missing_attribute_sets.append(replace)
else:
if real_values.get(key) is not None:
missing = []
for dependent_key in depends_on:
if real_values.get(dependent_key) is None:
missing.append(dependent_key)
if missing !=[]:
missing_attribute_sets.append(missing)
if len(missing_attribute_sets) > 0:
missing_attribute_sets = sorted(missing_attribute_sets)
message = '(' + ' OR '.join([' and '.join(missing_set) for missing_set in missing_attribute_sets]) + ')'
if message not in all_missing_attribute_sets:
all_missing_attribute_sets.append(message)
if len(all_missing_attribute_sets) > 0:
final_message = " AND ".join(all_missing_attribute_sets)
# check simple required attributes
missing = []
for template_key, template_value in template_values.items():
if self.isAttribute(template_key):
if template_value.get('required') == True:
if real_values.get(template_key) is None:
missing.append(template_key)
if len(missing) > 0:
message = ' and '.join(missing)
if final_message != '':
final_message += ' and ' + message
else:
final_message = message
if final_message != '':
if input_isDict or input_isDict is None:
self.input_data_errors.append('Missing Required for %s: %s' % (self.object_name_string(object_name_path), final_message))
if input_isDict is False:
self.input_data_errors.append('Missing Required for %s (number %s): %s' % (self.object_name_string(object_name_path), number, final_message))
def validate_urdb_response(self, number=1):
urdb_response = self.input_dict['Scenario']['Site']['ElectricTariff'].get('urdb_response')
if type(urdb_response) == dict:
if self.input_dict['Scenario']['Site']['ElectricTariff'].get('urdb_utility_name') is None:
self.update_attribute_value(["Scenario", "Site", "ElectricTariff"], number, 'urdb_utility_name', urdb_response.get('utility'))
if self.input_dict['Scenario']['Site']['ElectricTariff'].get('urdb_rate_name') is None:
self.update_attribute_value(["Scenario", "Site", "ElectricTariff"], number, 'urdb_rate_name', urdb_response.get('name'))
try:
rate_checker = URDB_RateValidator(**urdb_response)
if rate_checker.errors:
self.urdb_errors += rate_checker.errors
except:
self.urdb_errors.append('Error parsing urdb rate in %s ' % (["Scenario", "Site", "ElectricTariff"]))
def validate_8760(self, attr, obj_name, attr_name, time_steps_per_hour, number=1, input_isDict=None):
"""
This method is for the case that a user uploads a time-series that has either 30 minute or 15 minute
resolution, but wants to run an hourly REopt model. If time_steps_per_hour = 1 then we downsample the user's
time-series to an 8760. If time_steps_per_hour != 1 then we do nothing since the resolution of time-series
relative to time_steps_per_hour is handled within each time-series' implementation.
:param attr: list of floats
:param obj_name: str, parent object name from nested_inputs (eg. "LoadProfile")
:param attr_name: str, name of time-series (eg. "critical_loads_kw")
:param time_steps_per_hour: int, [1, 2, 4]
:param number: int, order of the dict in the list
:param input_isDict: bool, indicates if the object input came in as a dict or list
:return: None
"""
n = len(attr)
length_list = [8760, 17520, 35040]
if time_steps_per_hour != 1:
if n not in length_list:
self.input_data_errors.append((
"Invalid length for {}. Samples must be hourly (8,760 samples), 30 minute (17,520 samples), "
"or 15 minute (35,040 samples)").format(attr_name)
)
elif attr_name in ["wholesale_rate_us_dollars_per_kwh", "wholesale_rate_above_site_load_us_dollars_per_kwh"]:
if time_steps_per_hour != n/8760:
if time_steps_per_hour == 2 and n/8760 == 4:
if input_isDict or input_isDict is None:
self.resampled_inputs.append(
["Downsampled {} from 15 minute resolution to 30 minute resolution to match time_steps_per_hour via average.".format(attr_name), [obj_name]])
if input_isDict is False:
self.resampled_inputs.append(
["Downsampled {} from 15 minute resolution to 30 minute resolution to match time_steps_per_hour via average.".format(attr_name), [obj_name + ' (number %s)'.format(number)]])
index =
|
pd.date_range('1/1/2000', periods=n, freq='15T')
|
pandas.date_range
|
import datetime as dt
from itertools import product
import sys
import numpy as np
from numpy.random import RandomState
from numpy.testing import assert_allclose
import pandas as pd
from pandas.testing import assert_frame_equal
import pytest
from arch.data import sp500
from arch.tests.univariate.test_variance_forecasting import preserved_state
from arch.univariate import (
APARCH,
ARX,
EGARCH,
FIGARCH,
GARCH,
HARCH,
HARX,
ConstantMean,
ConstantVariance,
EWMAVariance,
MIDASHyperbolic,
RiskMetrics2006,
ZeroMean,
arch_model,
)
from arch.univariate.mean import _ar_forecast, _ar_to_impulse
SP500 = 100 * sp500.load()["Adj Close"].pct_change().dropna()
MEAN_MODELS = [
HARX(SP500, lags=[1, 5]),
ARX(SP500, lags=2),
ConstantMean(SP500),
ZeroMean(SP500),
]
VOLATILITIES = [
ConstantVariance(),
GARCH(),
FIGARCH(),
EWMAVariance(lam=0.94),
MIDASHyperbolic(),
HARCH(lags=[1, 5, 22]),
RiskMetrics2006(),
APARCH(),
EGARCH(),
]
MODEL_SPECS = list(product(MEAN_MODELS, VOLATILITIES))
IDS = [
f"{str(mean).split('(')[0]}-{str(vol).split('(')[0]}" for mean, vol in MODEL_SPECS
]
@pytest.fixture(params=MODEL_SPECS, ids=IDS)
def model_spec(request):
mean, vol = request.param
mean.volatility = vol
return mean
class TestForecasting(object):
@classmethod
def setup_class(cls):
cls.rng = RandomState(12345)
am = arch_model(None, mean="Constant", vol="Constant")
data = am.simulate(np.array([0.0, 10.0]), 1000)
data.index = pd.date_range("2000-01-01", periods=data.index.shape[0])
cls.zero_mean = data.data
am = arch_model(None, mean="AR", vol="Constant", lags=[1])
data = am.simulate(np.array([1.0, 0.9, 2]), 1000)
data.index = pd.date_range("2000-01-01", periods=data.index.shape[0])
cls.ar1 = data.data
am = arch_model(None, mean="AR", vol="Constant", lags=[1, 2])
data = am.simulate(np.array([1.0, 1.9, -0.95, 2]), 1000)
data.index = pd.date_range("2000-01-01", periods=data.index.shape[0])
cls.ar2 = data.data
am = arch_model(None, mean="HAR", vol="Constant", lags=[1, 5, 22])
data = am.simulate(np.array([1.0, 0.4, 0.3, 0.2, 2]), 1000)
data.index = pd.date_range("2000-01-01", periods=data.index.shape[0])
cls.har3 = data.data
am = arch_model(None, mean="AR", vol="GARCH", lags=[1, 2], p=1, q=1)
data = am.simulate(np.array([1.0, 1.9, -0.95, 0.05, 0.1, 0.88]), 1000)
data.index = pd.date_range("2000-01-01", periods=data.index.shape[0])
cls.ar2_garch = data.data
def test_ar_forecasting(self):
params = np.array([0.9])
forecasts = _ar_forecast(
self.zero_mean, 5, 0, 0.0, params, np.empty(0), np.empty(0)
)
expected = np.zeros((1000, 5))
expected[:, 0] = 0.9 * self.zero_mean.values
for i in range(1, 5):
expected[:, i] = 0.9 * expected[:, i - 1]
assert_allclose(forecasts, expected)
params = np.array([0.5, -0.3, 0.2])
forecasts = _ar_forecast(
self.zero_mean, 5, 2, 0.0, params, np.empty(0), np.empty(0)
)
expected = np.zeros((998, 8))
expected[:, 0] = self.zero_mean.iloc[0:-2]
expected[:, 1] = self.zero_mean.iloc[1:-1]
expected[:, 2] = self.zero_mean.iloc[2:]
for i in range(3, 8):
expected[:, i] = (
0.5 * expected[:, i - 1]
- 0.3 * expected[:, i - 2]
+ 0.2 * expected[:, i - 3]
)
fill = np.empty((2, 5))
fill.fill(np.nan)
expected = np.concatenate((fill, expected[:, 3:]))
assert_allclose(forecasts, expected[2:])
def test_ar_to_impulse(self):
arp = np.array([0.9])
impulses = _ar_to_impulse(20, arp)
expected = 0.9 ** np.arange(20)
assert_allclose(impulses, expected)
arp = np.array([0.5, 0.3])
impulses = _ar_to_impulse(20, arp)
comp = np.array([arp, [1, 0]])
a = comp.copy()
expected = np.ones(20)
for i in range(1, 20):
expected[i] = a[0, 0]
a = a.dot(comp)
assert_allclose(impulses, expected)
arp = np.array([1.5, 0.0, -0.7])
impulses = _ar_to_impulse(20, arp)
comp = np.array([arp, [1, 0, 0], [0, 1, 0]])
a = comp.copy()
expected = np.ones(20)
for i in range(1, 20):
expected[i] = a[0, 0]
a = a.dot(comp)
assert_allclose(impulses, expected)
def test_zero_mean_forecast(self):
am = arch_model(self.zero_mean, mean="Zero", vol="Constant")
res = am.fit()
fcast = res.forecast(res.params, horizon=3, reindex=False)
alt_fcast = res.forecast(horizon=3, reindex=False)
assert_frame_equal(fcast.mean, alt_fcast.mean)
assert_frame_equal(fcast.variance, alt_fcast.variance)
assert_frame_equal(fcast.residual_variance, alt_fcast.residual_variance)
fcast_reindex = res.forecast(res.params, horizon=3, reindex=True)
assert_frame_equal(fcast.mean, fcast_reindex.mean.iloc[-1:])
assert_frame_equal(fcast.variance, fcast_reindex.variance.iloc[-1:])
assert_frame_equal(
fcast.residual_variance, fcast_reindex.residual_variance.iloc[-1:]
)
assert fcast_reindex.mean.shape[0] == self.zero_mean.shape[0]
assert np.all(np.asarray(np.isnan(fcast.mean[:-1])))
assert np.all(np.asarray(np.isnan(fcast.variance[:-1])))
assert np.all(np.asarray(np.isnan(fcast.residual_variance[:-1])))
params = np.asarray(res.params)
assert np.all(0.0 == fcast.mean.iloc[-1])
assert_allclose(fcast.variance.iloc[-1], np.ones(3) * params[0])
assert_allclose(fcast.residual_variance.iloc[-1], np.ones(3) * params[0])
res = am.fit(last_obs=500)
params = np.asarray(res.params)
fcast = res.forecast(horizon=3, reindex=False)
assert fcast.mean.shape == (501, 3)
assert fcast.variance.shape == (501, 3)
assert fcast.residual_variance.shape == (501, 3)
assert np.all(np.asarray(np.isfinite(fcast.mean)))
assert np.all(np.asarray(np.isfinite(fcast.variance)))
assert np.all(np.asarray(np.isfinite(fcast.residual_variance)))
assert np.all(np.asarray(0.0 == fcast.mean))
assert_allclose(fcast.variance, np.ones((501, 3)) * params[0])
assert_allclose(fcast.residual_variance, np.ones((501, 3)) * params[0])
with pytest.raises(ValueError, match="horizon must be an integer >= 1"):
res.forecast(horizon=0, reindex=False)
def test_frame_labels(self):
am = arch_model(self.zero_mean, mean="Zero", vol="Constant")
res = am.fit()
fcast = res.forecast(horizon=12, reindex=False)
assert fcast.mean.shape[1] == 12
assert fcast.variance.shape[1] == 12
assert fcast.residual_variance.shape[1] == 12
for i in range(1, 13):
if i < 10:
col = "h.0" + str(i)
else:
col = "h." + str(i)
assert col in fcast.mean.columns
assert col in fcast.variance.columns
assert col in fcast.residual_variance.columns
def test_ar1_forecast(self):
am = arch_model(self.ar1, mean="AR", vol="Constant", lags=[1])
res = am.fit()
fcast = res.forecast(horizon=5, start=0, reindex=False)
params = np.asarray(res.params)
direct = self.ar1.values
for i in range(5):
direct = params[0] + params[1] * direct
assert_allclose(direct, fcast.mean.iloc[:, i])
scale = np.sum((params[1] ** np.arange(i + 1)) ** 2.0)
var = fcast.variance.iloc[1:, i]
assert_allclose(var, scale * params[2] * np.ones_like(var))
assert np.all(np.asarray(fcast.residual_variance[1:] == params[2]))
fcast = res.forecast(horizon=5, reindex=False)
params = np.asarray(res.params)
assert np.all(np.asarray(np.isnan(fcast.mean[:-1])))
assert np.all(np.asarray(np.isnan(fcast.variance[:-1])))
assert np.all(np.asarray(np.isnan(fcast.residual_variance[:-1])))
assert np.all(np.asarray(fcast.residual_variance.iloc[-1] == params[-1]))
means = np.zeros(5)
means[0] = params[0] + params[1] * self.ar1.iloc[-1]
for i in range(1, 5):
means[i] = params[0] + params[1] * means[i - 1]
assert_allclose(means, fcast.mean.iloc[-1].values)
def test_constant_mean_forecast(self):
am = arch_model(self.zero_mean, mean="Constant", vol="Constant")
res = am.fit()
fcast = res.forecast(horizon=5, reindex=False)
assert np.all(np.asarray(np.isnan(fcast.mean[:-1])))
assert np.all(np.asarray(np.isnan(fcast.variance[:-1])))
assert np.all(np.asarray(np.isnan(fcast.residual_variance[:-1])))
params = np.asarray(res.params)
assert_allclose(params[0] * np.ones(5), fcast.mean.iloc[-1])
assert_allclose(params[1] * np.ones(5), fcast.variance.iloc[-1])
assert_allclose(params[1] * np.ones(5), fcast.residual_variance.iloc[-1])
assert fcast.mean.shape == (1, 5)
assert fcast.variance.shape == (1, 5)
assert fcast.residual_variance.shape == (1, 5)
def test_ar2_forecast(self):
am = arch_model(self.ar2, mean="AR", vol="Constant", lags=[1, 2])
res = am.fit()
fcast = res.forecast(horizon=5, reindex=False)
params = np.asarray(res.params)
expected = np.zeros(7)
expected[:2] = self.ar2.iloc[-2:]
for i in range(2, 7):
expected[i] = (
params[0] + params[1] * expected[i - 1] + params[2] * expected[i - 2]
)
expected = expected[2:]
assert np.all(np.asarray(np.isnan(fcast.mean.iloc[:-1])))
assert_allclose(fcast.mean.iloc[-1], expected)
expected = np.zeros(5)
comp = np.array([res.params.iloc[1:3], [1, 0]])
a = np.eye(2)
for i in range(5):
expected[i] = a[0, 0]
a = a.dot(comp)
expected = res.params.iloc[-1] * np.cumsum(expected ** 2)
assert_allclose(fcast.variance.iloc[-1], expected)
expected = np.empty((1000, 5))
expected[:2] = np.nan
expected[2:] = res.params.iloc[-1]
fcast = res.forecast(horizon=5, start=1, reindex=False)
expected = np.zeros((999, 7))
expected[:, 0] = self.ar2.iloc[0:-1]
expected[:, 1] = self.ar2.iloc[1:]
for i in range(2, 7):
expected[:, i] = (
params[0]
+ params[1] * expected[:, i - 1]
+ params[2] * expected[:, i - 2]
)
fill = np.empty((1, 5))
fill.fill(np.nan)
expected = np.concatenate((fill, expected[:, 2:]))
assert_allclose(np.asarray(fcast.mean), expected[1:])
expected = np.empty((1000, 5))
expected[:2] = np.nan
expected[2:] = res.params.iloc[-1]
assert_allclose(np.asarray(fcast.residual_variance), expected[1:])
with pytest.raises(ValueError):
res.forecast(horizon=5, start=0, reindex=False)
def test_har_forecast(self):
am = arch_model(self.har3, mean="HAR", vol="Constant", lags=[1, 5, 22])
res = am.fit()
fcast_1 = res.forecast(horizon=1, reindex=False)
fcast_5 = res.forecast(horizon=5, reindex=False)
assert_allclose(fcast_1.mean, fcast_5.mean.iloc[:, :1])
with pytest.raises(ValueError):
res.forecast(horizon=1, start=0, reindex=False)
with pytest.raises(ValueError):
res.forecast(horizon=1, start=20, reindex=False)
fcast_66 = res.forecast(horizon=66, start=21, reindex=False)
expected = np.empty((1000, 66 + 22))
expected.fill(np.nan)
for i in range(22):
if i < 21:
expected[21:, i] = self.har3.iloc[i : (-21 + i)]
else:
expected[21:, i] = self.har3.iloc[i:]
params = np.asarray(res.params)
const = params[0]
arp = np.zeros(22)
arp[0] = params[1]
arp[:5] += params[2] / 5
arp[:22] += params[3] / 22
arp_rev = arp[::-1]
for i in range(22, 88):
expected[:, i] = const + expected[:, i - 22 : i].dot(arp_rev)
expected = expected[:, 22:]
assert_allclose(fcast_66.mean, expected[21:])
expected[:22] = np.nan
expected[22:] = res.params.iloc[-1]
assert_allclose(fcast_66.residual_variance, expected[21:])
impulse = _ar_to_impulse(66, arp)
expected = expected * np.cumsum(impulse ** 2)
assert_allclose(fcast_66.variance, expected[21:])
def test_forecast_start_alternatives(self):
am = arch_model(self.har3, mean="HAR", vol="Constant", lags=[1, 5, 22])
res = am.fit()
date = self.har3.index[21]
fcast_1 = res.forecast(start=21, reindex=False)
fcast_2 = res.forecast(start=date, reindex=False)
for field in ("mean", "variance", "residual_variance"):
assert_frame_equal(getattr(fcast_1, field), getattr(fcast_2, field))
pydt = dt.datetime(date.year, date.month, date.day)
fcast_2 = res.forecast(start=pydt, reindex=False)
for field in ("mean", "variance", "residual_variance"):
assert_frame_equal(getattr(fcast_1, field), getattr(fcast_2, field))
strdt = pydt.strftime("%Y-%m-%d")
fcast_2 = res.forecast(start=strdt, reindex=False)
for field in ("mean", "variance", "residual_variance"):
assert_frame_equal(getattr(fcast_1, field), getattr(fcast_2, field))
npydt = np.datetime64(pydt).astype("M8[ns]")
fcast_2 = res.forecast(start=npydt, reindex=False)
for field in ("mean", "variance", "residual_variance"):
assert_frame_equal(getattr(fcast_1, field), getattr(fcast_2, field))
with pytest.raises(ValueError):
date = self.har3.index[20]
res.forecast(start=date, reindex=False)
with pytest.raises(ValueError):
date = self.har3.index[0]
res.forecast(start=date, reindex=False)
fcast_0 = res.forecast(reindex=False)
fcast_1 = res.forecast(start=999, reindex=False)
fcast_2 = res.forecast(start=self.har3.index[999], reindex=False)
for field in ("mean", "variance", "residual_variance"):
assert_frame_equal(getattr(fcast_0, field), getattr(fcast_1, field))
assert_frame_equal(getattr(fcast_0, field), getattr(fcast_2, field))
def test_fit_options(self):
am = arch_model(self.zero_mean, mean="Constant", vol="Constant")
res = am.fit(first_obs=100)
res.forecast(reindex=False)
res = am.fit(last_obs=900)
res.forecast(reindex=False)
res = am.fit(first_obs=100, last_obs=900)
res.forecast(reindex=False)
res.forecast(start=100, reindex=False)
res.forecast(start=200, reindex=False)
am = arch_model(self.zero_mean, mean="Constant", vol="Constant", hold_back=20)
res = am.fit(first_obs=100)
res.forecast(reindex=False)
def test_ar1_forecast_simulation_one(self):
# Bug found when simulation=1
am = arch_model(self.ar1, mean="AR", vol="GARCH", lags=[1])
res = am.fit(disp="off")
forecast = res.forecast(
horizon=10, method="simulation", reindex=False, simulations=1
)
assert forecast.simulations.variances.shape == (1, 1, 10)
def test_ar1_forecast_simulation(self):
am = arch_model(self.ar1, mean="AR", vol="GARCH", lags=[1])
res = am.fit(disp="off")
with preserved_state(self.rng):
forecast = res.forecast(
horizon=5, start=0, method="simulation", reindex=False
)
forecast_reindex = res.forecast(
horizon=5, start=10, method="simulation", reindex=True
)
assert forecast.simulations.index.shape[0] == self.ar1.shape[0]
assert (
forecast.simulations.index.shape[0] == forecast.simulations.values.shape[0]
)
with preserved_state(self.rng):
forecast_reindex = res.forecast(
horizon=5, start=10, method="simulation", reindex=True
)
assert forecast_reindex.mean.shape[0] == self.ar1.shape[0]
assert forecast_reindex.simulations.index.shape[0] == self.ar1.shape[0]
y = np.asarray(self.ar1)
index = self.ar1.index
t = y.shape[0]
params = np.array(res.params)
resids = np.asarray(y[1:] - params[0] - params[1] * y[:-1])
vol = am.volatility
params = np.array(res.params)
backcast = vol.backcast(resids)
var_bounds = vol.variance_bounds(resids)
rng = am.distribution.simulate([])
vfcast = vol.forecast(
params[2:],
resids,
backcast,
var_bounds,
start=0,
method="simulation",
rng=rng,
horizon=5,
)
const, ar = params[0], params[1]
means = np.zeros((t, 5))
means[:, 0] = const + ar * y
for i in range(1, 5):
means[:, i] = const + ar * means[:, i - 1]
means = pd.DataFrame(
means, index=index, columns=["h.{0}".format(j) for j in range(1, 6)]
)
assert_frame_equal(means, forecast.mean)
var = np.concatenate([[[np.nan] * 5], vfcast.forecasts])
rv = pd.DataFrame(
var, index=index, columns=["h.{0}".format(j) for j in range(1, 6)]
)
assert_frame_equal(rv, forecast.residual_variance)
lrv = rv.copy()
for i in range(5):
weights = (ar ** np.arange(i + 1)) ** 2
weights = weights[:, None]
lrv.iloc[:, i : i + 1] = rv.values[:, : i + 1].dot(weights[::-1])
assert_frame_equal(lrv, forecast.variance)
def test_ar1_forecast_bootstrap(self):
am = arch_model(self.ar1, mean="AR", vol="GARCH", lags=[1])
res = am.fit(disp="off")
rs = np.random.RandomState(98765432)
state = rs.get_state()
forecast = res.forecast(
horizon=5, start=900, method="bootstrap", random_state=rs, reindex=False
)
rs.set_state(state)
repeat = res.forecast(
horizon=5, start=900, method="bootstrap", random_state=rs, reindex=False
)
assert_frame_equal(forecast.mean, repeat.mean)
|
assert_frame_equal(forecast.variance, repeat.variance)
|
pandas.testing.assert_frame_equal
|
from pm4py.algo.discovery.dfg.adapters.pandas import df_statistics
from pm4py.algo.filtering.pandas.paths import paths_filter
from pm4pymdl.algo.mvp.utils import succint_mdl_to_exploded_mdl
from pm4py.util.constants import PARAMETER_CONSTANT_CASEID_KEY, PARAMETER_CONSTANT_ATTRIBUTE_KEY
import pandas as pd
def filter_paths(df, paths, parameters=None):
"""
Apply a filter on traces containing / not containing a path
Parameters
----------
df
Dataframe
paths
Paths to filter on
parameters
Possible parameters of the algorithm, including:
case_id_glue -> Case ID column in the dataframe
attribute_key -> Attribute we want to filter
positive -> Specifies if the filter should be applied including traces (positive=True)
or excluding traces (positive=False)
Returns
----------
df
Filtered dataframe
"""
try:
if df.type == "succint":
df = succint_mdl_to_exploded_mdl.apply(df)
except:
pass
if parameters is None:
parameters = {}
paths = [path[0] + "," + path[1] for path in paths]
case_id_glue = parameters[
PARAMETER_CONSTANT_CASEID_KEY] if PARAMETER_CONSTANT_CASEID_KEY in parameters else CASE_CONCEPT_NAME
attribute_key = parameters[
PARAMETER_CONSTANT_ATTRIBUTE_KEY] if PARAMETER_CONSTANT_ATTRIBUTE_KEY in parameters else DEFAULT_NAME_KEY
df = df.sort_values([case_id_glue, "event_timestamp"])
positive = parameters["positive"] if "positive" in parameters else True
filt_df = df[[case_id_glue, attribute_key, "event_id"]]
filt_dif_shifted = filt_df.shift(-1)
filt_dif_shifted.columns = [str(col) + '_2' for col in filt_dif_shifted.columns]
stacked_df = pd.concat([filt_df, filt_dif_shifted], axis=1)
stacked_df["@@path"] = stacked_df[attribute_key] + "," + stacked_df[attribute_key + "_2"]
stacked_df = stacked_df[stacked_df["@@path"].isin(paths)]
i1 = df.set_index("event_id").index
i2 = stacked_df.set_index("event_id").index
i3 = stacked_df.set_index("event_id_2").index
if positive:
return df[i1.isin(i2) | i1.isin(i3)]
else:
return df[~i1.isin(i2) & ~i1.isin(i3)]
def apply(df, min_freq=0):
if min_freq > 0:
persps = [x for x in df.columns if not x.startswith("event_")]
collation = []
for persp in persps:
red_df = df.dropna(subset=[persp])
prevlen = len(df)
while True:
dfg = df_statistics.get_dfg_graph(red_df, activity_key="event_activity", timestamp_key="event_timestamp", case_id_glue=persp)
dfg = [x for x in dfg if dfg[x] >= min_freq]
param = {}
param[PARAMETER_CONSTANT_CASEID_KEY] = persp
param[PARAMETER_CONSTANT_ATTRIBUTE_KEY] = "event_activity"
red_df = filter_paths(red_df, dfg, parameters=param)
thislen = len(red_df)
dfg = df_statistics.get_dfg_graph(red_df, activity_key="event_activity", timestamp_key="event_timestamp", case_id_glue=persp)
if len(dfg) == 0 or min(dfg.values()) >= min_freq or prevlen == thislen:
collation.append(red_df)
break
prevlen = thislen
return
|
pd.concat(collation)
|
pandas.concat
|
import sys, os, io, time, datetime, requests, warnings, configparser
import pandas as pd
import numpy as np
import pandas_datareader as pdr
from pandas.tseries.holiday import USFederalHolidayCalendar
import concurrent.futures
from tqdm import tqdm
cur_path = os.path.dirname(os.path.abspath(__file__))
for _ in range(2):
root_path = cur_path[0:cur_path.rfind('/', 0, len(cur_path))]
cur_path = root_path
sys.path.append(root_path + "/" + 'Source/DataBase/')
sys.path.append(root_path + "/" + 'Source/Utility/')
from Fetch_Data_Stock_US_StockList import getStocksList_US
from DB_API import queryStock, storeStock, queryStockList, storeStockList, queryStockPublishDay, storePublishDay
import fix_yahoo_finance as yf
def getSingleStock(symbol, from_date, till_date):
repeat_times = 1
message = ""
df = pd.DataFrame()
if len(symbol) == 0: return df, message
for _ in range(repeat_times):
try:
data = yf.download(symbol, start=from_date, end=till_date, interval='1wk')
#data = pdr.get_data_yahoo(symbol, start=from_date, end=till_date, interval='d')
data = data.rename(columns = {'Date':'date', 'Open':'open', 'High':'high', 'Low':'low', 'Close':'close', "Adj Close":'adj_close', 'Volume':'volume'})
data.index.name = 'date'
data.sort_index()
return data, ""
except Exception as e:
message = symbol + " fetch exception: " + str(e)
continue
return df, message
def judgeOpenDaysInRange(from_date, to_date):
cal = USFederalHolidayCalendar()
holidays = cal.holidays(from_date, to_date)
duedays =
|
pd.bdate_range(from_date, to_date)
|
pandas.bdate_range
|
from __future__ import division
import os
import time
import logging
import numpy as np
import pandas as pd
# Specify logging settings
logging.basicConfig(
format='%(levelname)s: %(name)s - %(message)s')
logging_level_dict = {0: logging.ERROR, 1: logging.INFO, 2: logging.DEBUG}
logger = logging.getLogger(__name__)
def validate_filter_input(user_input):
if user_input is None:
return None
if isinstance(user_input, str):
user_input = [user_input]
if not isinstance(user_input, (list, tuple, set, np.ndarray)):
raise TypeError('Input must be array-like, '
'got {}'.format(type(user_input)))
return user_input
def filter_dataframe(df, metadata_key, desired_values=None):
if not isinstance(df, pd.DataFrame):
raise TypeError('df must be a pandas.DataFrame, '
'got {}'.format(type(df)))
if desired_values and not isinstance(desired_values,
(list, tuple, set, np.ndarray)):
raise TypeError('desired_values must be array-like')
if desired_values is not None:
return df[df[metadata_key].isin(desired_values)]
else:
return df
@np.vectorize
def db_path_to_image_file(path, data_dir='/net/deco/deco_data'):
'''Function to convert paths stored in andriod db to image file paths
Parameters
----------
path : str, array-like
Path from android database.
data_dir : str, optional
Path to directory containing images (default is /net/deco/deco_data).
Returns
-------
image_files : str, array-like
Image file paths.
'''
date, basename = os.path.split(path)
image_id = basename.split('_')[0]
image_file = os.path.join(data_dir, date + 'Z', image_id+'.jpg')
# Check that image_file exists. If not, return nan (to be dropped later)
if not os.path.exists(image_file):
logger.debug('Image file {} doesn\'t exist'.format(image_file))
image_file = np.nan
return image_file
@np.vectorize
def db_path_to_date(path, data_dir='/net/deco/deco_data'):
'''Function to extract date from paths stored in andriod db
Parameters
----------
path : str, array-like
Path from android database.
data_dir : str, optional
Path to directory containing images (default is /net/deco/deco_data).
Returns
-------
dates : str, array-like
Dates corresponding to path.
'''
date, basename = os.path.split(path)
return
|
pd.to_datetime(date, utc=True)
|
pandas.to_datetime
|
#Author : <NAME> (<EMAIL>)
from sklearn.base import clone
import numpy as np
import pandas as pd
from Utilils import *
import shap
delta=0.000000000000000000001
class RectifiedClassiferChain:
def __init__(self,basemodel,type=0, optimized=False, optimizedmethod='CrossEntropy'):
'''
:param basemodel: Basemodel to be used in ClassiferChain
'''
self.ClassifierList = []
self.model=basemodel
self.updatedlabelorder=[]
self.originallabelorder = []
self.optimized=optimized
self.optimizedmethod=optimizedmethod
self.X=None
self.Y=None
self.type = type
def trainRCC(self,X,Y):
'''
:param X: Pandas Data Frame Features
:param Y: Pandas Data Frame Labels
:return: List of model
'''
self.X = X
self.Y=Y
Categories = list(Y.columns.values)
for categories in Categories:
self.ClassifierList.append(clone(self.model))
self.originallabelorder.append(categories)
if(self.optimized==True):
Y=self.OptimizeLabelOrder(Y)
Categories = list(Y.columns.values)
else:
self.updatedlabelorder=self.originallabelorder
k = 0
for category in list(Categories): ###Updated if needed
#print("Category: " + category)
modelc = self.ClassifierList[k]
Xtrain = X
Ytrain = Y
Ytrain = Ytrain.dropna(subset=[category]) # , inplace=True)
droppedindex = Y[~Y.index.isin(Ytrain.index)].index.values
Xtrain = Xtrain.drop(droppedindex, axis=0)
Ytrain = Ytrain[category]
Ytrain = Ytrain.astype(int)
modelc.fit(Xtrain, Ytrain.values.ravel())
if (len(droppedindex) > 0):
prediction = modelc.predict(X.iloc[droppedindex])
i = 0
for value in prediction:
Y.iloc[droppedindex[i]][category] = value
i = i + 1
X = X.join(Y[category])
k = k + 1
return self.ClassifierList
def predictRCC(self, X):
'''
:param X:
:return:
'''
i = 0
Y = pd.DataFrame(index=X.index)
global TotalSumvalue
TotalSumvalue = [[0 for x in range(X.shape[0])] for y in range(X.shape[1])]
for classifer in self.ClassifierList:
if (self.type==3):
KErnalExplnanier = shap.TreeExplainer(classifer)
class_shap_values = KErnalExplnanier.shap_values(X)
if (i == 0):
TotalSumvalue = np.absolute(class_shap_values)
else:
TotalSumvalue = TotalSumvalue + np.absolute(class_shap_values[:, :-i])
prediction = classifer.predict(X)
YPredict = pd.DataFrame(prediction, columns=[self.updatedlabelorder[i]])
X = X.join(YPredict)
Y = Y.join(YPredict)
i = i + 1
if (self.optimized == True):
Y_rearranged = pd.DataFrame(index=Y.index)
for index, element in enumerate(self.originallabelorder):
Y_rearranged = Y_rearranged.join(Y[element])
Y=Y_rearranged
return Y
def predictProbRCC(self, X):
'''
:param X:
:return:
'''
i = 0
Y = pd.DataFrame(index=X.index)
for classifer in self.ClassifierList:
prediction = classifer.predict(X)
predprob=classifer.predict_proba(X)
predprobValue=(predprob[:,1]-predprob[:,0])
#print(predprobValue)
YPredict = pd.DataFrame(prediction, columns=[self.updatedlabelorder[i]])
Ypredprob=pd.DataFrame(predprobValue, columns=[self.updatedlabelorder[i]])
X = X.join(YPredict)
Y = Y.join(Ypredprob)
i = i + 1
if (self.optimized == True):
Y_rearranged = pd.DataFrame(index=Y.index)
for index, element in enumerate(self.originallabelorder):
Y_rearranged = Y_rearranged.join(Y[element])
Y=Y_rearranged
return Y
def ModifiedHammingAccuracyscore(self, y_pred, y_true):
'''
:param y_pred:
:param y_true:
:return:
'''
scorelist=[]
i = 0
for value in ((y_true.values)):
match = 0.0
total = 0.0
predictedlabel = y_pred.iloc[i]
j = 0
for singlelabel in value:
if (singlelabel == predictedlabel[j]):
match = match + 1
total = total + 1
elif (not (np.isnan(singlelabel))):
total = total + 1
j=j+1
i = i + 1
scorelist.append(match/total)
return np.mean(scorelist)
def ModifiedF1score(self, y_pred, y_true):
'''
:param y_pred:
:param y_true:
:return:
'''
Fscorelist=[]
for i, value in enumerate((y_true.values)):
TP, TN, FP, FN = 0.0000000001, 0.0000000001, 0.0000000001, 0.0000000001 ##Delta to avoid Zero division
predictedlabel = y_pred.iloc[i]
for j, singlelabel in enumerate(value):
if ((np.isnan(singlelabel))):
continue
elif (singlelabel ==1 and predictedlabel[j]==1):
TP = TP + 1
elif (singlelabel ==1 and predictedlabel[j]==0):
FN = FN + 1
elif (singlelabel ==0 and predictedlabel[j]==0):
TN = TN + 1
elif (singlelabel ==0 and predictedlabel[j]==1):
FP = FP + 1
precision = TP/(TP + FP)
recall = TP/(TP + FN)
fscore = (2 * precision * recall)/(precision + recall)
Fscorelist.append(fscore)
return np.mean(Fscorelist)
def Evaluate(self, y_true, y_predict):
'''
:param y_true:
:param y_predict:
:return:
'''
return self.ModifiedHammingAccuracyscore(y_predict,y_true),self.ModifiedF1score(y_predict,y_true)
def getOptimizedLabelOrder(self):
'''
:param y_true:
:param y_predict:
:return:
'''
return self.updatedlabelorder
def OptimizeLabelOrder(self, Y):
'''
:param Y:
:return:
'''
Valuelist = []
Yoptimized =
|
pd.DataFrame(index=Y.index)
|
pandas.DataFrame
|
# # **********************************************************************************************************
# # Important (task is often ignored when doing data science)
# # !!! Clean up project by removing any assets that are no longer needed !!!
# # Remove zip file which has downloaded and the directory to which the files were unzipped
# # System utilities
# import os
# from pathlib import Path
# import shutil
# import wget
# from zipfile import ZipFile
# # Remove the zip file downloaded
# os.remove('names.zip')
# # Remove the directory data-us
# shutil.rmtree('data-us')
# # **********************************************************************************************************
# **********************************************************************************************************
# Download Data
# Start by downloading the data and saving it in an easy-to-read format.
# The raw data of babynames is available to download at https://www.ssa.gov/oact/babynames/names.zip
# as a zip file consisting of a set of comma separated text files for each year.
# Let us download the zip file and extract the files into a directory so we can inspect the files.
# Import modules and functions
import os
import numpy as np
import pandas as pd
from wquantiles import quantile
# Plotting libraries
import matplotlib.pyplot as plt
plt.rcParams["figure.figsize"] = (14, 6)
plt.style.use('seaborn-darkgrid')
import plotly.express as px
# Import modules and functions
import numpy as np
import pandas as pd
from wquantiles import quantile
from pathlib import Path
import shutil
import wget
from zipfile import ZipFile
cwd = os.getcwd()
# **********************************************************************************************************
# !!! Important !!!
# !!! Clean up project by removing any assets that are no longer needed !!!
# ? in sub directory /babynames/
# ? sub directory > data-us
# ? zip file > names.zip
# ? file > names.csv.gz
# ? file > lifetables.csv
# **********************************************************************************************************
pathSubDirDataUs = cwd + '\\babynames\\data-us'
if os.path.exists(pathSubDirDataUs):
shutil.rmtree(pathSubDirDataUs, ignore_errors=True)
pathFileNamesCsvGz = cwd + '\\babynames\\names.csv.gz'
if os.path.exists(pathFileNamesCsvGz):
os.remove(pathFileNamesCsvGz)
# Download the zip file from "https://www.ssa.gov/oact/babynames/names.zip"
wget.download("https://www.ssa.gov/oact/babynames/names.zip")
# Unzip data files to a directory named 'data-us'
zipName = 'names.zip'
zip_names = ZipFile(zipName)
zip_names.extractall(cwd + '\\babynames\\' + 'data-us')
zip_names.close()
# Remove zip file after it has been downloaded and the content has been unzipped
pathFileNamesZip = cwd + '\\' + zipName
if os.path.exists(pathFileNamesZip):
os.remove(pathFileNamesZip)
# Read the data for each year and combine them into a single data frame.
babynames = []
for file in Path(cwd + '\\babynames\\' + 'data-us').iterdir():
if file.name.endswith('txt'):
df = pd.read_csv(file, names=['name', 'sex', 'births'])
df['year'] = int(file.name[3:7])
babynames.append(df)
# Combine dataframes into a single dataframe
babynames =
|
pd.concat(babynames)
|
pandas.concat
|
## Basketball Reference Game Log Scraping ####################################################################################
# Georgia Tech: Daily Fantasy Sports Project
# authors: <NAME> & <NAME>
#### Process Outline #########################################################################################################
# note: conduct the following for a) minutes b) fantasy points per minute
#_________________________________________________________#
# ingest game log data, pivot players --> columns
# cycle through each player:
# check if stationary (ADF Test)
# if not, take log and update data accordingly
# run Arima with custom CV (AIC Based) based on q & p thresholds for that specific player
# predict next player value & store to
##############################################################################################################################
##### Notes ######
# Changing the Adfuller summary from a termnial print to a csv append output
# work on custom date filter for retroactive analysis
# complete run time: 310 seconds
# Package Import #
from datetime import date
import itertools
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from statsmodels.tsa.arima.model import ARIMA
from statsmodels.tsa.stattools import adfuller
from statsmodels.tsa.stattools import acf, pacf
# Functions #
def gameLog_ingestion(csv_path):
gameLog_dt =
|
pd.read_csv(csv_path)
|
pandas.read_csv
|
import argparse
import os
import sys
import pandas as pd
from sqlalchemy import create_engine
from sqlalchemy.pool import NullPool
from . import exceptions, query, settings, utils
from .models import Base, Bulletin
from .version import __version__
def parse_args():
parser = argparse.ArgumentParser(
description='Query seismic bulletin database (v{})'.format(__version__))
parser.add_argument(
'-s', '--start',
help="Start time of query in 'YYYY-mm-dd HH:MM:SS' format. "
"Time part is optional. By default start time is in Asia/Jakarta "
"time zone.")
parser.add_argument(
'-e', '--end',
help="End time of query in 'YYYY-mm-dd HH:MM:SS' format. "
"Time part is optional. By default end time is in Asia/Jakarta "
"time zone.")
parser.add_argument(
'-u', '--eventid',
help="Event ID, e.g. 2021-07#2355.")
parser.add_argument(
'-t', '--eventtype',
nargs='+',
help='Event type to query, e.g. VTA, VTB, MP. If not provided, '
'the script will query all event types. You can also add more '
'than one event types.')
parser.add_argument(
'-o', '--output',
help='Path to store query result to the CSV file. If not provided or '
'equal to "stdout", the script will output the results '
'to the standard output. ')
parser.add_argument(
'-l', '--long-format',
action='store_true',
help='If provided, all bulletin fields will be printed.')
parser.add_argument(
'-d', '--delimiter',
default=',',
help='CSV delimiter. Default to comma (,).')
parser.add_argument(
'-c', '--config',
default=settings.CONFIG_PATH,
help='Path to the querybulletin JSON config file.')
parser.add_argument(
'-m', '--modified',
action='store_true',
help='If provided, query events in the bulletin that was modified '
'since start and before end time.')
return parser.parse_args()
def validate_args(args):
if args.start:
if not utils.is_valid_datetime(args.start):
raise ValueError(
"Start time value '{}' is not a valid datetime."
"".format(args.start))
if args.end:
if not utils.is_valid_datetime(args.end):
raise ValueError(
"End time value '{}' is not a valid datetime."
"".format(args.end))
if not os.path.isfile(args.config):
raise exceptions.ImproperlyConfigured(
"JSON config file for querybulletin is not found. ")
def main():
args = parse_args()
validate_args(args)
config = utils.load_config(args.config)
engine_url = config.get('dburl')
if engine_url is None:
raise exceptions.ImproperlyConfigured(
'dburl field in the config.json is not set.')
engine = create_engine(engine_url, poolclass=NullPool)
Base.prepare(engine, reflect=True)
eventtype = args.eventtype
events = []
if args.start and args.end:
start = utils.parse_datetime_naive(args.start)
end = utils.parse_datetime_naive(args.end)
if args.modified:
events = query.get_bulletin_modified_by_range(
engine,
Bulletin,
start,
end,
eventtype=eventtype,
)
else:
events = query.get_bulletin_by_range(
engine,
Bulletin,
start,
end,
eventtype=eventtype,
)
if args.eventid:
event = query.get_bulletin_by_id(engine, Bulletin, args.eventid)
if event is not None:
events = [event, ]
else:
events = []
if events:
pd.set_option('display.max_rows', None)
|
pd.set_option('display.max_columns', None)
|
pandas.set_option
|
# -*- coding: utf-8 -*-
"""
Created on Tue Dec 12 20:05:48 2017
@author: jercas
"""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sn
import calendar
from datetime import datetime
from sklearn import preprocessing
from sklearn import cross_validation
from sklearn import linear_model
from sklearn import svm
from sklearn.ensemble import RandomForestRegressor
from sklearn.learning_curve import learning_curve
from sklearn .metrics import explained_variance_score
from sklearn.grid_search import GridSearchCV
def visualization(data):
"""
visualization()
data set visualization
Parameters
----------
data: data set to be shown
Returns
-------
Null
"""
# preview top 5 row of data
print("\n--------Data preview--------\n{0}"
.format(data.head()))
print("\nNull value status as follow:\n{0}".format(data.isnull().sum()))
cols = [col for col in data.columns]
print("\nNumber of original features: {0}".format(len(cols)))
print("\nFeatures types:\n{0}".format(data[cols].dtypes.value_counts()))
counts = [[], [], []]
for col in cols:
# the data type of each feature
typ = data[col].dtype
# the number of differents value in each feature
uniq = len(np.unique(data[col]))
# constant value feature
if uniq == 1:
counts[0].append(col)
# binary value feature
elif uniq == 2 and typ == np.int64:
counts[1].append(col)
# multiple value feature
else:
counts[2].append(col)
print('\nConstant features: {}\nBinary features: {} \nCategorical features: {}\n'.format(*[len(c) for c in counts]))
print('Constant features:', counts[0])
print('Binary features:', counts[1])
print('Categorical features:', counts[2])
fig, axes = plt.subplots(2,2)
fig.set_size_inches(12, 10)
sn.boxplot(data=data,y="count",orient="v",ax=axes[0][0])
sn.boxplot(data=data,y="count",x="season",orient="v",ax=axes[0][1])
sn.boxplot(data=data,y="count",x="hour",orient="v",ax=axes[1][0])
sn.boxplot(data=data,y="count",x="workingday",orient="v",ax=axes[1][1])
axes[0][0].set(ylabel='Count',title="Box Plot On Count")
axes[0][1].set(xlabel='Season', ylabel='Count',title="Box Plot On Count Across Season")
axes[1][0].set(xlabel='Hour Of The Day', ylabel='Count',title="Box Plot On Count Across Hour Of The Day")
axes[1][1].set(xlabel='Working Day', ylabel='Count',title="Box Plot On Count Across Working Day")
plt.show()
fig,(ax1,ax2,ax3,ax4)= plt.subplots(nrows=4)
fig.set_size_inches(12,20)
sortOrder = [1,2,3,4,5,6,7,8,9,10,11,12]
hueOrder = ["Sunday","Monday","Tuesday","Wednesday","Thursday","Friday","Saturday"]
monthAggregated = pd.DataFrame(data.groupby("month")["count"].mean()).reset_index()
monthSorted = monthAggregated.sort_values(by="count",ascending=False)
sn.barplot(data=monthSorted,x="month",y="count",ax=ax1,order=sortOrder)
ax1.set(xlabel='Month', ylabel='Avearage Count',title="Average Count By Month")
hourAggregated = pd.DataFrame(data.groupby(["hour","season"],sort=True)["count"].mean()).reset_index()
sn.pointplot(x=hourAggregated["hour"], y=hourAggregated["count"],hue=hourAggregated["season"],
data=hourAggregated, join=True,ax=ax2)
ax2.set(xlabel='Hour Of The Day', ylabel='Users Count',
title="Average Users Count By Hour Of The Day Across Season",label='big')
hourAggregated = pd.DataFrame(data.groupby(["hour","weekday"],sort=True)["count"].mean()).reset_index()
sn.pointplot(x=hourAggregated["hour"], y=hourAggregated["count"],hue=hourAggregated["weekday"],hue_order=hueOrder,
data=hourAggregated, join=True,ax=ax3)
ax3.set(xlabel='Hour Of The Day', ylabel='Users Count',
title="Average Users Count By Hour Of The Day Across Weekdays",label='big')
hourTransformed = pd.melt(data[["hour","casual","registered"]], id_vars=['hour'], value_vars=['casual', 'registered'])
hourAggregated = pd.DataFrame(hourTransformed.groupby(["hour","variable"],sort=True)["value"].mean()).reset_index()
sn.pointplot(x=hourAggregated["hour"], y=hourAggregated["value"],hue=hourAggregated["variable"],
hue_order=["casual","registered"], data=hourAggregated, join=True,ax=ax4)
ax4.set(xlabel='Hour Of The Day', ylabel='Users Count',
title="Average Users Count By Hour Of The Day Across User Type",label='big')
plt.show()
def preprocess(data):
"""
preprocess(data)
data preprocess for extract features of training data
Parameters
----------
data: data set to be processed
Returns
-------
data: already had processed data
"""
print("\n--------Data preview--------\n{0}".format(data.head()))
# transform datatime columns to four columns includes the year、month、day、hour
data['year'] = pd.DatetimeIndex(data['datetime']).year
data['month'] = pd.DatetimeIndex(data['datetime']).month
data['day'] =
|
pd.DatetimeIndex(data['datetime'])
|
pandas.DatetimeIndex
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Dec 12 16:53:59 2018
@author: xavier.qiu
"""
import pandas as pd
import datetime
import gc
import numpy as np
import featuretools as ft
import os
from util.util import compress_int, send_msg
class DataSet(object):
def __init__(self, data_dir='/content/EloMerchantKaggle/data/'):
self.data_dir = data_dir
self.train_x_path = os.path.join(self.data_dir, 'x_train_agg')
self.test_x_path = os.path.join(self.data_dir, 'x_test_agg')
self.train_y_path = os.path.join(self.data_dir, 'y_train')
pass
def get_train_dataset(self, reset=False, load=True):
if load and os.path.isfile(self.train_x_path) and os.path.isfile(self.train_y_path):
return pd.read_csv(self.train_x_path), pd.read_csv(self.train_y_path)
train_df, hist_df_train, new_trans_df_train = split_trans_into_train_test(data_dir=self.data_dir,
reset=reset)
return agg(train_df, hist_df_train, new_trans_df_train, True, self.train_x_path, self.train_y_path)
def get_test_dataset(self, load=True):
if load and os.path.isfile(self.test_x_path):
return
|
pd.read_csv(self.test_x_path)
|
pandas.read_csv
|
#!/usr/bin/env python
# coding: utf-8
# #<NAME>
# ## <b> Problem Description </b>
#
# ### This project aims to build a classification model to predict the sentiment of COVID-19 tweets.The tweets have been pulled from Twitter and manual tagging has been done then. Leveraging Natural Language Processing, sentiment analysis is to be done on the dataset. Additionally, machine learning algorithms are to be incorporated to evaluate accuracy score and classification prediction by the trained model.
#
# ### The following information is used:
# 1. Location
# 2. Tweet At
# 3. Original Tweet
# 4. Label
# ##Importing necessary libraries to build model
# In[63]:
import pandas as pd
import numpy as np
from numpy import percentile
import matplotlib.pyplot as plt
import seaborn as sns
import matplotlib
import tweepy
from textblob import TextBlob
import re # for regular expressions
import pandas as pd
pd.set_option("display.max_colwidth", 200)
import string
import branca.colormap as cm
import requests
import folium
from folium import plugins
from folium.plugins import HeatMap
import branca.colormap
import nltk # for text manipulation
from nltk.stem.porter import *
from nltk.tokenize import TweetTokenizer
from nltk.corpus import stopwords
from nltk import pos_tag, ne_chunk
from nltk.sentiment.vader import SentimentIntensityAnalyzer as sid
from wordcloud import WordCloud
from tqdm import tqdm, notebook
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from tqdm import tqdm
from gensim.models.doc2vec import LabeledSentence
import gensim
from sklearn.linear_model import LogisticRegression
from scipy import stats
from sklearn import metrics
from sklearn.metrics import mean_squared_error,mean_absolute_error, make_scorer,classification_report,confusion_matrix,accuracy_score,roc_auc_score,roc_curve
from sklearn.model_selection import train_test_split,cross_val_score,KFold
from sklearn.model_selection import train_test_split
from sklearn.metrics import f1_score
# ##Extracting dataset and Reviewing Our Dataset
# In[4]:
df=pd.read_csv("https://raw.githubusercontent.com/gabrielpreda/covid-19-tweets/master/covid19_tweets.csv")
df.head()
# In[5]:
df.info()
# In[6]:
df.shape
# In[7]:
df.columns
# In[8]:
# There are 12220 unique locations from where the tweets came.
df['user_location'].value_counts()
# # Looking For Null Values
# In[9]:
missing_values =
|
pd.DataFrame()
|
pandas.DataFrame
|
from .linear_signal import LinearDatasetAccessor, LinearSignalGenerator, SignalFeatureGenerator
import unittest
import numpy as np
import pandas as pd
from numpy.testing import assert_array_equal
class LinearDatasetAccessorTest(unittest.TestCase):
def test_continuos_blocks_merge_one(self):
df = pd.DataFrame()
accessor = LinearDatasetAccessor(df, 10, [1, 3, 4, 6, 9])
expected = [(1, 2), (3, 5), (6, 7), (9, 10)]
self.assertListEqual(accessor.get_contiguos_blocks(), expected)
def test_continuos_blocks_single_element(self):
df = pd.DataFrame()
accessor = LinearDatasetAccessor(df, 10, [4])
expected = [(4, 5)]
self.assertListEqual(accessor.get_contiguos_blocks(), expected)
def test_continuos_blocks_merge_multiple(self):
df = pd.DataFrame()
accessor = LinearDatasetAccessor(df, 10, [0, 1, 2, 3, 4, 5, 6, 8, 9])
expected = [(0, 7), (8, 10)]
self.assertListEqual(accessor.get_contiguos_blocks(), expected)
class MockFeatureGenerator(SignalFeatureGenerator):
def generate(self, df: pd.DataFrame, predict=False):
return df.values.reshape(-1), np.array([0])
class LinearSignalGeneratorTest(unittest.TestCase):
def test_basic(self):
signal = np.random.rand(100)
df = pd.DataFrame({'signal': signal})
accessor = LinearDatasetAccessor(df, 1, [0])
feature_gen = MockFeatureGenerator()
generator = LinearSignalGenerator(
accessor, 10, feature_gen, 5, batch_size=10)
self.assertEqual(len(generator), 2)
b0 = generator[0]
b1 = generator[1]
self.assertEqual(b0[0].shape, (10, 10))
self.assertEqual(b1[0].shape, (9, 10))
def test_no_overlap(self):
"""Ensure that there is no overlap between the data that a generator is
allowed to access and data outside of its range.
"""
signal = np.arange(1000)
df = pd.DataFrame({'signal': signal})
# Define 2 blocks of 300 and 200 data points respectivly
accessor = LinearDatasetAccessor(df, 10, [1, 2, 3, 7, 8])
feature_gen = MockFeatureGenerator()
generator = LinearSignalGenerator(
accessor, 10, feature_gen, 5, batch_size=10)
self.assertEqual(len(generator), (59 + 39 - 1) // 10 + 1)
r1_count = 0
r2_count = 0
for index in range(len(generator)):
b = generator[index]
for example in b[0]:
self.assertTrue(np.all(np.diff(example) == 1))
r1 = np.all(np.where((example >= 100) &
(example < 400), True, False))
r2 = np.all(np.where((example >= 700) &
(example < 900), True, False))
self.assertTrue(r1 or r2, example)
if r1:
r1_count += 1
if r2:
r2_count += 1
self.assertEqual(r1_count, 59)
self.assertEqual(r2_count, 39)
def test_non_divisible_sizes(self):
"""Repeat the non_overlap test with sizes such that the number of
datapoints is not divisible by the number of blocks.
"""
signal = np.arange(1000)
df =
|
pd.DataFrame({'signal': signal})
|
pandas.DataFrame
|
#!/usr/bin/env python
# coding: utf-8
# ## IMT 563
# ### Group 7 | Covid -19 Vaccination Info
# ### Authors - <NAME>, <NAME> and <NAME>
# In[1]:
# Importing useful packages and libraries
import pandas as pd
import numpy as np
import datetime
from datetime import datetime
import snowflake.connector
from snowflake import sqlalchemy
from snowflake.sqlalchemy import URL
from sqlalchemy import create_engine,inspect
import pytz
from calendar import monthrange
import re
from tqdm import tqdm
# In[2]:
# Establishing Snowflake Connection Parameters
engine = create_engine(URL(
account = 'tca69088',
role = 'SYSADMIN',
user = 'Group7',
password = '<PASSWORD>!',
database = 'IMT_DB',
schema = 'PUBLIC',
))
# ### Importing Files
# #### Comment - HERE I HAVE NOT REMOVED UNASSIGNED VALUES
# In[3]:
### Writing a function to do the same
### - So the function should take in file_path,sheet_name,list_non_group
def load_wa_chd(file_path,sheet_name,list_columns,list_group,agg_value):
df = pd.ExcelFile(file_path)
df_s = pd.read_excel(df, sheet_name = sheet_name)
df_s = df_s[list_columns]
df_s = df_s.groupby(list_group,as_index=False)[agg_value].sum()
df_s = df_s[df_s['County'] != 'Unassigned']
df_s = df_s.reset_index(drop=True)
return df_s
# #### 7th MARCH CASES
# In[4]:
file_path_wa_c_7 = '/Users/rohan20k/Desktop/data_imt_563/7th Mar 2021/7_Mar_WA_COVID19_Cases_Hospitalizations_Deaths (2).xlsx'
sheet_name_wa_c_7 = 'Cases'
list_columns_wa_c_7 = ['County','TotalCases']
list_group_wa_c_7 = ['County']
agg_value_c = 'TotalCases'
wa_chd_7_cases = load_wa_chd(file_path_wa_c_7,sheet_name_wa_c_7,list_columns_wa_c_7,list_group_wa_c_7
,agg_value_c)
# #### 7th MARCH HOSPITALIZATIONS
# In[5]:
file_path_wa_h_7 = '/Users/rohan20k/Desktop/data_imt_563/7th Mar 2021/7_Mar_WA_COVID19_Cases_Hospitalizations_Deaths (2).xlsx'
sheet_name_wa_h_7 = 'Hospitalizations'
list_columns_wa_h_7 = ['County','Hospitalizations']
list_group_wa_h_7 = ['County']
agg_value_h = 'Hospitalizations'
wa_chd_7_hospitalizations = load_wa_chd(file_path_wa_h_7,sheet_name_wa_h_7,list_columns_wa_h_7,list_group_wa_h_7
,agg_value_h)
# #### 7th MARCH DEATHS
# In[6]:
file_path_wa_d_7 = '/Users/rohan20k/Desktop/data_imt_563/7th Mar 2021/7_Mar_WA_COVID19_Cases_Hospitalizations_Deaths (2).xlsx'
sheet_name_wa_d_7 = 'Deaths'
list_columns_wa_d_7 = ['County','Deaths']
list_group_wa_d_7 = ['County']
agg_value_d = 'Deaths'
wa_chd_7_deaths = load_wa_chd(file_path_wa_d_7,sheet_name_wa_d_7,list_columns_wa_d_7,list_group_wa_d_7
,agg_value_d)
# #### 21st FEB CASES
# In[7]:
file_path_wa_c_21 = '/Users/rohan20k/Desktop/data_imt_563/21st Feb 2021/21_Feb_WA_COVID19_Cases_Hospitalizations_Deaths.xlsx'
sheet_name_wa_c_21 = 'Cases'
list_columns_wa_c_21 = ['County','TotalCases']
list_group_wa_c_21 = ['County']
agg_value_c = 'TotalCases'
wa_chd_21_cases = load_wa_chd(file_path_wa_c_21,sheet_name_wa_c_21,list_columns_wa_c_21,list_group_wa_c_21
,agg_value_c)
# #### 21st FEB HOSPITALIZATIONS
# In[8]:
file_path_wa_h_21 = '/Users/rohan20k/Desktop/data_imt_563/21st Feb 2021/21_Feb_WA_COVID19_Cases_Hospitalizations_Deaths.xlsx'
sheet_name_wa_h_21 = 'Hospitalizations'
list_columns_wa_h_21 = ['County','Hospitalizations']
list_group_wa_h_21 = ['County']
agg_value_h = 'Hospitalizations'
wa_chd_21_hospitalizations = load_wa_chd(file_path_wa_h_21,sheet_name_wa_h_21,list_columns_wa_h_21,list_group_wa_h_21
,agg_value_h)
# #### 21st FEB DEATHS
# In[9]:
file_path_wa_d_21 = '/Users/rohan20k/Desktop/data_imt_563/21st Feb 2021/21_Feb_WA_COVID19_Cases_Hospitalizations_Deaths.xlsx'
sheet_name_wa_d_21 = 'Deaths'
list_columns_wa_d_21 = ['County','Deaths']
list_group_wa_d_21 = ['County']
agg_value_d = 'Deaths'
wa_chd_21_deaths = load_wa_chd(file_path_wa_d_21,sheet_name_wa_d_21,list_columns_wa_d_21,list_group_wa_d_21
,agg_value_d)
# ### Zip Level Data
# In[10]:
def load_wa_zip_chd(file_path,sheet_name,list_columns):
df = pd.ExcelFile(file_path)
df_s = pd.read_excel(df, sheet_name = sheet_name)
df_s = df_s[list_columns]
df_s = df_s.drop(0)
df_s.reset_index(drop=True,inplace=True)
return df_s
# #### 7th March
# In[11]:
file_path_wa_zip_c_7 = "/Users/rohan20k/Desktop/data_imt_563/7th Mar 2021/7_Mar_overall-counts-rates-geography-mar-3.xlsx"
sheet_name_wa_zip_c_7 = 'ZIP'
list_columns_wa_zip_c_7 =['Location_Name','Positives']
wa_chd_zip_7_cases = load_wa_zip_chd(file_path_wa_zip_c_7,sheet_name_wa_zip_c_7,list_columns_wa_zip_c_7)
# In[12]:
file_path_wa_zip_d_7 = "/Users/rohan20k/Desktop/data_imt_563/7th Mar 2021/7_Mar_overall-counts-rates-geography-mar-3.xlsx"
sheet_name_wa_zip_d_7 = 'ZIP'
list_columns_wa_zip_d_7 =['Location_Name','Deaths']
wa_chd_zip_7_deaths = load_wa_zip_chd(file_path_wa_zip_d_7,sheet_name_wa_zip_d_7,list_columns_wa_zip_d_7)
# In[13]:
file_path_wa_zip_h_7 = "/Users/rohan20k/Desktop/data_imt_563/7th Mar 2021/7_Mar_overall-counts-rates-geography-mar-3.xlsx"
sheet_name_wa_zip_h_7 = 'ZIP'
list_columns_wa_zip_h_7 =['Location_Name','Hospitalizations']
wa_chd_zip_7_hospitalizations = load_wa_zip_chd(file_path_wa_zip_h_7,sheet_name_wa_zip_h_7,list_columns_wa_zip_h_7)
# #### 21st Feb
# In[14]:
file_path_wa_zip_c_21 = "/Users/rohan20k/Desktop/data_imt_563/21st Feb 2021/21_Feb_overall-counts-rates-geography-feb-17 (1).xlsx"
sheet_name_wa_zip_c_21 = 'ZIP'
list_columns_wa_zip_c_21 =['Location_Name','Positives']
wa_chd_zip_21_cases = load_wa_zip_chd(file_path_wa_zip_c_21,sheet_name_wa_zip_c_21,list_columns_wa_zip_c_21)
# In[15]:
file_path_wa_zip_d_21 = "/Users/rohan20k/Desktop/data_imt_563/21st Feb 2021/21_Feb_overall-counts-rates-geography-feb-17 (1).xlsx"
sheet_name_wa_zip_d_21 = 'ZIP'
list_columns_wa_zip_d_21 =['Location_Name','Deaths']
wa_chd_zip_21_deaths = load_wa_zip_chd(file_path_wa_zip_d_21,sheet_name_wa_zip_d_21,list_columns_wa_zip_d_21)
# In[16]:
file_path_wa_zip_h_21 = "/Users/rohan20k/Desktop/data_imt_563/21st Feb 2021/21_Feb_overall-counts-rates-geography-feb-17 (1).xlsx"
sheet_name_wa_zip_h_21 = 'ZIP'
list_columns_wa_zip_h_21 =['Location_Name','Hospitalizations']
wa_chd_zip_21_hospitalizations = load_wa_zip_chd(file_path_wa_zip_h_21,sheet_name_wa_zip_h_21,list_columns_wa_zip_h_21)
# ### Covid County Vaccinations
# ### 7th March
# In[17]:
wa_vacc_7 = pd.read_excel(r'/Users/rohan20k/Desktop/data_imt_563/7th Mar 2021/7_Mar_Vaccination_County_Level_Counts.xlsx')
wa_vacc_7 = wa_vacc_7[['County','People Initiating Vaccination']]
# In[18]:
wa_vacc_7 = wa_vacc_7.drop([39,40])
# In[19]:
wa_vacc_7['County'] = wa_vacc_7['County'].apply(lambda x: x+' County')
# ### 21st February
# In[20]:
wa_vacc_21 =
|
pd.read_excel(r'/Users/rohan20k/Desktop/data_imt_563/21st Feb 2021/21_Feb_Vaccination_County_Level_Counts.xlsx')
|
pandas.read_excel
|
from collections import Counter
from functools import partial
from math import sqrt
from pathlib import Path
import lightgbm as lgb
import numpy as np
import pandas as pd
import scipy as sp
from sklearn.decomposition import TruncatedSVD, NMF
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import KFold
from sklearn.model_selection import StratifiedKFold
from kaggle_petfinder.utils import is_script_running
ON_KAGGLE: bool = is_script_running()
DATA_ROOT = Path(
"../input/petfinder-adoption-prediction"
if ON_KAGGLE
else "../resources/petfinder-adoption-prediction"
)
EXTRA_DATA_ROOT = Path(
"../input/extract-image-features-from-pretrained-nn"
if ON_KAGGLE
else "../resources/extract-image-features-from-pretrained-nn"
)
# basic datasets
train = pd.read_csv(DATA_ROOT / "train/train.csv")
test = pd.read_csv(DATA_ROOT / "test/test.csv")
sample_submission = pd.read_csv(DATA_ROOT / "test/sample_submission.csv")
labels_breed = pd.read_csv(DATA_ROOT / "breed_labels.csv")
labels_state = pd.read_csv(DATA_ROOT / "color_labels.csv")
labels_color = pd.read_csv(DATA_ROOT / "state_labels.csv")
# extract datasets
# https://www.kaggle.com/christofhenkel/extract-image-features-from-pretrained-nn
train_img_features = pd.read_csv(EXTRA_DATA_ROOT / "train_img_features.csv")
test_img_features = pd.read_csv(EXTRA_DATA_ROOT / "test_img_features.csv")
# img_features columns set names
col_names = ["PetID"] + ["{}_img_feature".format(_) for _ in range(256)]
train_img_features.columns = col_names
test_img_features.columns = col_names
def agg_features(df_metadata, df_sentiment):
# Extend aggregates and improve column naming
aggregates = ["mean", "median", "sum", "var", "std", "min", "max", "nunique"]
metadata_desc = df_metadata.groupby(["PetID"])["metadata_annots_top_desc"].unique()
metadata_desc = metadata_desc.reset_index()
metadata_desc["metadata_annots_top_desc"] = metadata_desc[
"metadata_annots_top_desc"
].apply(lambda x: " ".join(x))
prefix = "metadata"
metadata_gr = df_metadata.drop(["metadata_annots_top_desc"], axis=1)
for i in metadata_gr.columns:
if "PetID" not in i:
metadata_gr[i] = metadata_gr[i].astype(float)
metadata_gr = metadata_gr.groupby(["PetID"]).agg(aggregates)
metadata_gr.columns = pd.Index(
[
"{}_{}_{}".format(prefix, c[0], c[1].upper())
for c in metadata_gr.columns.tolist()
]
)
metadata_gr = metadata_gr.reset_index()
sentiment_desc = df_sentiment.groupby(["PetID"])["sentiment_entities"].unique()
sentiment_desc = sentiment_desc.reset_index()
sentiment_desc["sentiment_entities"] = sentiment_desc["sentiment_entities"].apply(
lambda x: " ".join(x)
)
prefix = "sentiment"
sentiment_gr = df_sentiment.drop(["sentiment_entities"], axis=1)
for i in sentiment_gr.columns:
if "PetID" not in i:
sentiment_gr[i] = sentiment_gr[i].astype(float)
sentiment_gr = sentiment_gr.groupby(["PetID"]).agg(aggregates)
sentiment_gr.columns = pd.Index(
[
"{}_{}_{}".format(prefix, c[0], c[1].upper())
for c in sentiment_gr.columns.tolist()
]
)
sentiment_gr = sentiment_gr.reset_index()
return sentiment_gr, metadata_gr, metadata_desc, sentiment_desc
def breed_features(df, _labels_breed):
breed_main = df[["Breed1"]].merge(
_labels_breed,
how="left",
left_on="Breed1",
right_on="BreedID",
suffixes=("", "_main_breed"),
)
breed_main = breed_main.iloc[:, 2:]
breed_main = breed_main.add_prefix("main_breed_")
breed_second = df[["Breed2"]].merge(
_labels_breed,
how="left",
left_on="Breed2",
right_on="BreedID",
suffixes=("", "_second_breed"),
)
breed_second = breed_second.iloc[:, 2:]
breed_second = breed_second.add_prefix("second_breed_")
return breed_main, breed_second
def impact_coding(data, feature, target="y"):
"""
In this implementation we get the values and the dictionary as two different steps.
This is just because initially we were ignoring the dictionary as a result variable.
In this implementation the KFolds use shuffling. If you want reproducibility the cv
could be moved to a parameter.
"""
n_folds = 20
n_inner_folds = 10
impact_coded = pd.Series()
oof_default_mean = data[
target
].mean() # Gobal mean to use by default (you could further tune this)
kf = KFold(n_splits=n_folds, shuffle=True)
oof_mean_cv = pd.DataFrame()
split = 0
for infold, oof in kf.split(data[feature]):
impact_coded_cv = pd.Series()
kf_inner = KFold(n_splits=n_inner_folds, shuffle=True)
inner_split = 0
inner_oof_mean_cv = pd.DataFrame()
oof_default_inner_mean = data.iloc[infold][target].mean()
for infold_inner, oof_inner in kf_inner.split(data.iloc[infold]):
# The mean to apply to the inner oof split (a 1/n_folds % based on the rest)
oof_mean = data.iloc[infold_inner].groupby(by=feature)[target].mean()
impact_coded_cv = impact_coded_cv.append(
data.iloc[infold].apply(
lambda x: oof_mean[x[feature]]
if x[feature] in oof_mean.index
else oof_default_inner_mean,
axis=1,
)
)
# Also populate mapping (this has all group -> mean for all inner CV folds)
inner_oof_mean_cv = inner_oof_mean_cv.join(
pd.DataFrame(oof_mean), rsuffix=inner_split, how="outer"
)
inner_oof_mean_cv.fillna(value=oof_default_inner_mean, inplace=True)
inner_split += 1
# Also populate mapping
oof_mean_cv = oof_mean_cv.join(
pd.DataFrame(inner_oof_mean_cv), rsuffix=split, how="outer"
)
oof_mean_cv.fillna(value=oof_default_mean, inplace=True)
split += 1
impact_coded = impact_coded.append(
data.iloc[oof].apply(
lambda x: inner_oof_mean_cv.loc[x[feature]].mean()
if x[feature] in inner_oof_mean_cv.index
else oof_default_mean,
axis=1,
)
)
return impact_coded, oof_mean_cv.mean(axis=1), oof_default_mean
def frequency_encoding(df, col_name):
new_name = "{}_counts".format(col_name)
new_col_name = "{}_freq".format(col_name)
grouped = df.groupby(col_name).size().reset_index(name=new_name)
df = df.merge(grouped, how="left", on=col_name)
df[new_col_name] = df[new_name] / df[new_name].count()
del df[new_name]
return df
# FROM: https://www.kaggle.com/myltykritik/simple-lgbm-image-features
# The following 3 functions have been taken from <NAME>'s github repository
# https://github.com/benhamner/Metrics
def confusion_matrix(rater_a, rater_b, min_rating=None, max_rating=None):
"""
Returns the confusion matrix between rater's ratings
"""
assert len(rater_a) == len(rater_b)
if min_rating is None:
min_rating = min(rater_a + rater_b)
if max_rating is None:
max_rating = max(rater_a + rater_b)
num_ratings = int(max_rating - min_rating + 1)
conf_mat = [[0 for i in range(num_ratings)] for j in range(num_ratings)]
for a, b in zip(rater_a, rater_b):
conf_mat[a - min_rating][b - min_rating] += 1
return conf_mat
def histogram(ratings, min_rating=None, max_rating=None):
"""
Returns the counts of each type of rating that a rater made
"""
if min_rating is None:
min_rating = min(ratings)
if max_rating is None:
max_rating = max(ratings)
num_ratings = int(max_rating - min_rating + 1)
hist_ratings = [0 for x in range(num_ratings)]
for r in ratings:
hist_ratings[r - min_rating] += 1
return hist_ratings
def quadratic_weighted_kappa(y, y_pred):
"""
Calculates the quadratic weighted kappa
axquadratic_weighted_kappa calculates the quadratic weighted kappa
value, which is a measure of inter-rater agreement between two raters
that provide discrete numeric ratings. Potential values range from -1
(representing complete disagreement) to 1 (representing complete
agreement). A kappa value of 0 is expected if all agreement is due to
chance.
quadratic_weighted_kappa(rater_a, rater_b), where rater_a and rater_b
each correspond to a list of integer ratings. These lists must have the
same length.
The ratings should be integers, and it is assumed that they contain
the complete range of possible ratings.
quadratic_weighted_kappa(X, min_rating, max_rating), where min_rating
is the minimum possible rating, and max_rating is the maximum possible
rating
"""
rater_a = y
rater_b = y_pred
min_rating = None
max_rating = None
rater_a = np.array(rater_a, dtype=int)
rater_b = np.array(rater_b, dtype=int)
assert len(rater_a) == len(rater_b)
if min_rating is None:
min_rating = min(min(rater_a), min(rater_b))
if max_rating is None:
max_rating = max(max(rater_a), max(rater_b))
conf_mat = confusion_matrix(rater_a, rater_b, min_rating, max_rating)
num_ratings = len(conf_mat)
num_scored_items = float(len(rater_a))
hist_rater_a = histogram(rater_a, min_rating, max_rating)
hist_rater_b = histogram(rater_b, min_rating, max_rating)
numerator = 0.0
denominator = 0.0
for i in range(num_ratings):
for j in range(num_ratings):
expected_count = hist_rater_a[i] * hist_rater_b[j] / num_scored_items
d = pow(i - j, 2.0) / pow(num_ratings - 1, 2.0)
numerator += d * conf_mat[i][j] / num_scored_items
denominator += d * expected_count / num_scored_items
return 1.0 - numerator / denominator
class OptimizedRounder(object):
def __init__(self):
self.coef_ = 0
def _kappa_loss(self, coef, X, y):
X_p = np.copy(X)
for i, pred in enumerate(X_p):
if pred < coef[0]:
X_p[i] = 0
elif pred >= coef[0] and pred < coef[1]:
X_p[i] = 1
elif pred >= coef[1] and pred < coef[2]:
X_p[i] = 2
elif pred >= coef[2] and pred < coef[3]:
X_p[i] = 3
else:
X_p[i] = 4
ll = quadratic_weighted_kappa(y, X_p)
return -ll
def fit(self, X, y):
loss_partial = partial(self._kappa_loss, X=X, y=y)
initial_coef = [0.5, 1.5, 2.5, 3.5]
self.coef_ = sp.optimize.minimize(
loss_partial, initial_coef, method="nelder-mead"
)
def predict(self, X, coef):
X_p = np.copy(X)
for i, pred in enumerate(X_p):
if pred < coef[0]:
X_p[i] = 0
elif pred >= coef[0] and pred < coef[1]:
X_p[i] = 1
elif pred >= coef[1] and pred < coef[2]:
X_p[i] = 2
elif pred >= coef[2] and pred < coef[3]:
X_p[i] = 3
else:
X_p[i] = 4
return X_p
def coefficients(self):
return self.coef_["x"]
def rmse(actual, predicted):
return sqrt(mean_squared_error(actual, predicted))
def train_lightgbm(
X_train, X_test, params, n_splits, num_rounds, verbose_eval, early_stop
):
kfold = StratifiedKFold(n_splits=n_splits, random_state=1337)
oof_train = np.zeros((X_train.shape[0]))
oof_test = np.zeros((X_test.shape[0], n_splits))
i = 0
for train_index, valid_index in kfold.split(
X_train, X_train["AdoptionSpeed"].values
):
X_tr = X_train.iloc[train_index, :]
X_val = X_train.iloc[valid_index, :]
y_tr = X_tr["AdoptionSpeed"].values
X_tr = X_tr.drop(["AdoptionSpeed"], axis=1)
y_val = X_val["AdoptionSpeed"].values
X_val = X_val.drop(["AdoptionSpeed"], axis=1)
print("\ny_tr distribution: {}".format(Counter(y_tr)))
d_train = lgb.Dataset(X_tr, label=y_tr)
d_valid = lgb.Dataset(X_val, label=y_val)
watchlist = [d_train, d_valid]
print("training LGB:")
model = lgb.train(
params,
train_set=d_train,
num_boost_round=num_rounds,
valid_sets=watchlist,
verbose_eval=verbose_eval,
early_stopping_rounds=early_stop,
)
val_pred = model.predict(X_val, num_iteration=model.best_iteration)
test_pred = model.predict(X_test, num_iteration=model.best_iteration)
oof_train[valid_index] = val_pred
oof_test[:, i] = test_pred
i += 1
return oof_train, oof_test
def main():
train_pet_ids = train.PetID.unique()
test_pet_ids = test.PetID.unique()
train_proc = train.copy()
test_proc = test.copy()
train_proc =
|
pd.merge(train_proc, train_img_features, on="PetID")
|
pandas.merge
|
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score
from seglearn.transform import Segment
import plotly.express as px
import plotly.graph_objects as go
from plotly.subplots import make_subplots
from keras import backend as K
import holidays
de_holidays = holidays.DE()
RANDOM_SEED = 42
data_path = '../data/'
def train_test_valid_split(df, window_size, feature_len,
split_pct_1=0.3, split_pct_2=0.33,
test_set=True):
"""Splits data into training, validation and test sets.
If you do not want a test set, set the `test_set` param
to False. """
X_train, X_valid, y_train, y_valid = train_test_split(df.iloc[:,:(window_size*feature_len)],
df.iloc[:,(window_size*feature_len):-1],
test_size=split_pct_1, shuffle=True,
random_state=RANDOM_SEED)
# print(y_valid.shape, type(y_valid),'\n' , X_train.shape, type(X_train))
if test_set:
X_valid, X_test, y_valid, y_test = train_test_split(X_valid, y_valid,
test_size=split_pct_2, shuffle=True,
random_state=RANDOM_SEED)
return X_train, X_valid, X_test, y_train.iloc[:,0].values, y_valid.iloc[:,0].values, y_test.iloc[:,0].values
return X_train, X_valid, y_train.iloc[:,0].values, y_valid.iloc[:,0].values
def calc_reg_metrics(y_true, y_pred):
"""Calculates a set of regression
metrics"""
mse = mean_squared_error(y_true, y_pred)
rmse = np.sqrt(mse)
mae = mean_absolute_error(y_true, y_pred)
try:
mape = mean_absolute_percentage_error(y_true, y_pred)
except:
pass
r2 = r2_score(y_true, y_pred)
results = pd.DataFrame([mse, rmse, mae, r2],
index=['MSE', 'RMSE', 'MAE', 'R2'],
columns=['value'])
return results
def create_column_features(features, window_size):
"""Create column names from list
of features and window size"""
columns = []
for i in list(range(window_size)) + ['y']:
for f in features:
columns.append(f+'_'+str(i))
return columns
def create_features(temp, features: list, ohlc: bool=True):
"""Creates features based on list. """
if ohlc:
y = temp.px.close.values
else:
y = temp.px.values
feature_list = []
if 'weekday' in features:
weekday = np.array(temp.index.dayofweek)
feature_list.append(weekday)
if 'weekday_sin' in features:
weekday_sin = np.sin(2*np.pi*temp.index.dayofweek/6)
feature_list.append(weekday_sin)
if 'weekday_cos' in features:
weekday_cos = np.cos(2*np.pi*temp.index.dayofweek/6)
feature_list.append(weekday_cos)
if 'run_hour' in features:
feature_list.append(temp.hour)
if 'hours_to_4' in features:
# hour = temp.index.hour
hours_to_4 = np.array([40-hour if hour>16 else 16-hour for hour in temp.index.hour])/23
feature_list.append(hours_to_4)
if 'n_prev_hour_contracts' in features:
feature_list.append(temp.n_prev_hour_contracts/41)
if 'hour' in features:
hour = np.array(temp.index.hour)
feature_list.append(hour)
if 'hour_sin' in features:
hour_sin = np.sin(2*np.pi*temp.index.hour/23)
feature_list.append(hour_sin)
# 16 - temp.index.hour
if 'hour_cos' in features:
hour_cos = np.cos(2*np.pi*temp.index.hour/23)
feature_list.append(hour_cos)
if 'air_temp' in features:
feature_list.append(temp.air_temp)
if 'rel_humidity' in features:
feature_list.append(temp.rel_humidity)
if 'wind_speed' in features:
feature_list.append(temp.wind_speed)
if 'wind_dir' in features:
feature_list.append(temp.wind_dir)
if 'holidays' in features:
holidays = np.array([x in de_holidays for x in temp.index.strftime("%Y-%m-%d")])
feature_list.append(holidays)
if 'qty_open' in features:
qty_open = np.array(temp.qty.open.values)
feature_list.append(qty_open)
if 'qty_high' in features:
qty_high = np.array(temp.qty.high.values)
feature_list.append(qty_high)
if 'qty_low' in features:
qty_low = np.array(temp.qty.low.values)
feature_list.append(qty_low)
if 'qty_close' in features:
qty_close = np.array(temp.qty.close.values)
feature_list.append(qty_close)
if 'qty_var' in features:
try:
qty_var = np.array(temp.qty['var'].values)
except:
qty_var = np.array(temp.qty.qty.values)
feature_list.append(qty_var)
if 'qty_sum' in features:
try:
qty_sum = np.array(temp.qty['sum'].values)
except:
qty_sum = np.array(temp.qty.qty.values)
feature_list.append(qty_sum)
if 'act_px_open' in features:
act_px_open = np.array(temp.act_px.open.values)
feature_list.append(act_px_open)
if 'act_px_high' in features:
act_px_high = np.array(temp.act_px.high.values)
feature_list.append(act_px_high)
if 'act_px_low' in features:
act_px_low = np.array(temp.act_px.low.values)
feature_list.append(act_px_low)
if 'act_px_close' in features:
act_px_close = np.array(temp.act_px.close.values)
feature_list.append(act_px_close)
if 'px_open' in features:
px_open = np.array(temp.px.open.values)
feature_list.append(px_open)
if 'px_high' in features:
px_high = np.array(temp.px.high.values)
feature_list.append(px_high)
if 'px_low' in features:
px_low = np.array(temp.px.low.values)
feature_list.append(px_low)
if 'px_var' in features:
px_var = np.array(temp.px['var'].values)
feature_list.append(px_var)
if 'act_px_absdif' in features:
act_px_absdif = np.array(temp.act_px_absdif.values)
feature_list.append(act_px_absdif)
if 'px_absdif' in features:
px_absdif = np.array(temp.px_absdif.values)
feature_list.append(px_absdif)
return np.stack([y, *feature_list], axis=1), y
def create_rolling_windows(resampled_df: pd.DataFrame, window_size: int,
features: list, save_to_pickle: bool=True,
ohlc: bool=True) -> pd.DataFrame:
"""Creates rolling windows from the data. You need to specify
a window size and a list of feature names you have."""
if ohlc:
contracts = resampled_df['contractId']['contractId'].value_counts()\
[resampled_df['contractId']['contractId'].value_counts() > window_size].index
else:
contracts = resampled_df['contractId'].value_counts()\
[resampled_df['contractId'].value_counts() > window_size].index
columns = create_column_features(features, window_size)
segmenter = Segment(width=window_size+1, step=1)
forecast_df = pd.DataFrame()
for c in contracts:
if ohlc:
temp = resampled_df[resampled_df['contractId']['contractId']==c]
save_str = 'ohlc'
date = '27102020'
else:
temp = resampled_df[resampled_df['contractId']==c]
save_str = 'last'
date = '25102020'
X, y = create_features(temp, features, ohlc)
X_train, y_train, _ = segmenter.fit_transform([X], [y])
assert X_train.shape[0] == len(temp) - window_size
temp_rolling = pd.DataFrame(X_train.reshape(X_train.shape[0], -1), columns=columns)
temp_rolling['contractId'] = c
forecast_df =
|
pd.concat([forecast_df, temp_rolling])
|
pandas.concat
|
from __future__ import division # brings in Python 3.0 mixed type calculation rules
import datetime
import inspect
import numpy as np
import numpy.testing as npt
import os.path
import pandas as pd
import sys
from tabulate import tabulate
import unittest
print("Python version: " + sys.version)
print("Numpy version: " + np.__version__)
from ..ted_exe import Ted
test = {}
class TestTed(unittest.TestCase):
"""
Unit tests for TED model.
"""
print("ted unittests conducted at " + str(datetime.datetime.today()))
def setUp(self):
"""
Setup routine for ted unit tests.
:return:
"""
pass
def tearDown(self):
"""
Teardown routine for ted unit tests.
:return:
"""
pass
# teardown called after each test
# e.g. maybe write test results to some text file
def create_ted_object(self):
# create empty pandas dataframes to create empty object for testing
df_empty = pd.DataFrame()
# create an empty ted object
ted_empty = Ted(df_empty, df_empty)
return ted_empty
def test_daily_app_flag(self):
"""
:description generates a daily flag to denote whether a pesticide is applied that day or not (1 - applied, 0 - anot applied)
:param num_apps; number of applications
:param app_interval; number of days between applications
:NOTE in TED model there are two application scenarios per simulation (one for a min/max exposure scenario)
(this is why the parameters are passed in)
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
ted_empty = self.create_ted_object()
expected_results = pd.Series([], dtype='bool')
result = pd.Series([[]], dtype='bool')
expected_results = [[True, False, False, True, False, False, True, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False],
[True, False, False, False, False, False, False, True, False, False,
False, False, False, False, True, False, False, False, False, False,
False, True, False, False, False, False, False, False, True, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False],
[True, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False, False, False, False, False,
False, False, False, False, False, False]]
try:
# internal model constants
ted_empty.num_simulation_days = 366
# input varialbles that change per simulation
ted_empty.num_apps_min = pd.Series([3, 5, 1])
ted_empty.app_interval_min = pd.Series([3, 7, 1])
for i in range (3):
result[i] = ted_empty.daily_app_flag(ted_empty.num_apps_min[i], ted_empty.app_interval_min[i])
np.array_equal(result[i],expected_results[i])
finally:
for i in range(3):
tab = [result[i], expected_results[i]]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_set_drift_parameters(self):
"""
:description provides parmaeter values to use when calculating distances from edge of application source area to
concentration of interest
:param app_method; application method (aerial/ground/airblast)
:param boom_hgt; height of boom (low/high) - 'NA' if not ground application
:param drop_size; droplet spectrum for application (see list below for aerial/ground - 'NA' if airblast)
:param param_a (result[i][0]; parameter a for spray drift distance calculation
:param param_b (result[i][1]; parameter b for spray drift distance calculation
:param param_c (result[i][2]; parameter c for spray drift distance calculation
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
ted_empty = self.create_ted_object()
expected_results = pd.Series([], dtype='float')
result = pd.Series(9*[[0.,0.,0.]], dtype='float')
expected_results = [[0.0292,0.822,0.6539],[0.043,1.03,0.5],[0.0721,1.0977,0.4999],[0.1014,1.1344,0.4999],
[1.0063,0.9998,1.0193],[5.5513,0.8523,1.0079],[0.1913,1.2366,1.0552],
[2.4154,0.9077,1.0128],[0.0351,2.4586,0.4763]]
try:
# input variable that change per simulation
ted_empty.app_method_min = pd.Series(['aerial','aerial','aerial','aerial','ground','ground','ground','ground','airblast'])
ted_empty.boom_hgt_min = pd.Series(['','','','','low','low','high','high',''])
ted_empty.droplet_spec_min = pd.Series(['very_fine_to_fine','fine_to_medium','medium_to_coarse','coarse_to_very_coarse',
'very_fine_to_fine','fine_to_medium-coarse','very_fine_to_fine','fine_to_medium-coarse',''])
for i in range (9): # test that the nine combinations are accessed
result[i][0], result[i][1], result[i][2] = ted_empty.set_drift_parameters(ted_empty.app_method_min[i], ted_empty.boom_hgt_min[i], ted_empty.droplet_spec_min[i])
npt.assert_allclose(result[i],expected_results[i],rtol=1e-4, atol=0, err_msg='', verbose=True)
finally:
for i in range (9):
tab = [result, expected_results]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_drift_distance_calc(self):
"""
:description provides parmaeter values to use when calculating distances from edge of application source area to
concentration of interest
:param app_rate_frac; fraction of active ingredient application rate equivalent to the health threshold of concern
:param param_a; parameter a for spray drift distance calculation
:param param_b; parameter b for spray drift distance calculation
:param param_c; parameter c for spray drift distance calculation
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
ted_empty = self.create_ted_object()
expected_results = pd.Series([], dtype='float')
result = pd.Series([], dtype='float')
expected_results = [302.050738, 11.484378, 0.0]
try:
# internal model constants
ted_empty.max_distance_from_source = 1000.
# input variable that is internally specified from among options
param_a = pd.Series([0.0292, 0.1913, 0.0351], dtype='float')
param_b = pd.Series([0.822, 1.2366, 2.4586], dtype='float')
param_c =
|
pd.Series([0.6539, 1.0522, 0.4763], dtype='float')
|
pandas.Series
|
import pandas as pd
import numpy as np
import os
from collections import Counter
from scipy.cluster.hierarchy import linkage, fcluster
from scipy.spatial.distance import squareform
def find_correlation_clusters(corr,corr_thresh):
dissimilarity = 1.0 - corr
hierarchy = linkage(squareform(dissimilarity), method='single')
diss_thresh = 1.0 - corr_thresh
labels = fcluster(hierarchy, diss_thresh, criterion='distance')
return labels
def relabel_clusters(labels,metric_columns):
cluster_count = Counter(labels)
cluster_order = {cluster[0]: idx for idx, cluster in enumerate(cluster_count.most_common())}
relabeled_clusters = [cluster_order[l] for l in labels]
relabled_count = Counter(relabeled_clusters)
labeled_column_df = pd.DataFrame({'group': relabeled_clusters, 'column': metric_columns}).sort_values(
['group', 'column'], ascending=[True, True])
return labeled_column_df, relabled_count
def make_load_matrix(labeled_column_df,metric_columns,relabled_count,corr):
load_mat = np.zeros((len(metric_columns), len(relabled_count)))
for row in labeled_column_df.iterrows():
orig_col = metric_columns.index(row[1][1])
if relabled_count[row[1][0]]>1:
load_mat[orig_col, row[1][0]] = 1.0/ (np.sqrt(corr) * float(relabled_count[row[1][0]]) )
else:
load_mat[orig_col, row[1][0]] = 1.0
is_group = load_mat.astype(bool).sum(axis=0) > 1
column_names=['metric_group_{}'.format(d + 1) if is_group[d]
else labeled_column_df.loc[labeled_column_df['group']==d,'column'].iloc[0]
for d in range(0, load_mat.shape[1])]
loadmat_df = pd.DataFrame(load_mat, index=metric_columns, columns=column_names)
loadmat_df['name'] = loadmat_df.index
sort_cols = list(loadmat_df.columns.values)
sort_order = [False] * loadmat_df.shape[1]
sort_order[-1] = True
loadmat_df = loadmat_df.sort_values(sort_cols, ascending=sort_order)
loadmat_df = loadmat_df.drop('name', axis=1)
return loadmat_df
def save_load_matrix(data_set_path,loadmat_df, labeled_column_df):
save_path = data_set_path.replace('.csv', '_load_mat.csv')
print('saving loadings to ' + save_path)
loadmat_df.to_csv(save_path)
save_path = data_set_path.replace('.csv', '_groupmets.csv')
print('saving metric groups to ' + save_path)
group_lists=['|'.join(labeled_column_df[labeled_column_df['group']==g]['column'])
for g in set(labeled_column_df['group'])]
|
pd.DataFrame(group_lists,index=loadmat_df.columns.values,columns=['metrics'])
|
pandas.DataFrame
|
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from Eir.DTMC.spatialModel.HubModel import Hub
from Eir.DTMC.spatialModel.simul_details import Simul_Details
from Eir.utility import Person, dist, randEvent
class HubSEIR(Hub):
"""
Object that represents the Hub Model with compartments S, E, I, and R. In this model, E is assumed to not be
able to spread the virus.
Parameters
----------
S0: int
Initial amount of susceptibles at the start of the simulation.
E0: int
Initial amount of exposed at the start of the simulation.
I0: int
Initial amount of infected at the start of the simulation.
R0: int
Initial amount of recovered at the start of the simulation.
pss: float
The probability that the randomly generated person at the start of the simulation is a super spreader.
rho: float
Rho is the probability of someone moving from E to I compartment. Rho is in [0, 1].
gamma: float
The probability of someone going from I to R.
rstart: float
The spreading radius of a normal spreader.
days: int
The nubmer of days being simulated.
w0: float optional
The probability of a susceptible getting infected if the distance between the infectious person and susceptible is 0. Default is 1.0.
hubConstant: float optional
The scale by which the spreading radius of a super spreader increases. Default is sqrt(6).
alpha: float optional
Constant used in the infect probability generator. Default is 2.0.
Attributes
----------
S: ndarray
A numpy array that stores the number of people in the susceptible state on each given day of the simulation.
E: ndarray
A numpy array that stores the number of people in the exposed state on each given day of the simulation.
I: ndarray
A numpy array that stores the number of people in the infected state on each given day of the simulation.
R: ndarray
A numpy array that stores the number of people in the recovered state on each given day of the simulation.
popsize: int
The total size of the population in the simulation. Given by S0 + E0 +I0 + R0 + V0.
Scollect: list
Used to keep track of the states each Person object is in. If the copy of a Person object has
isIncluded == True, then the person is SUSCEPTIBLE. Has a total of popsize Person objects,
with numbers [0, popsize).
Ecollect: list
Used to keep track of the states each Person object is in. If the copy of a Person object has
isIncluded == True, then the person is EXPOSED. Has a total of popsize Person objects,
with numbers [0, popsize).
Icollect: list
Used to keep track of the states each Person object is in. If the copy of a Person object has
isIncluded == True, then the person is INFECTED. Has a total of popsize Person objects,
with numbers [0, popsize).
Rcollect: list
Used to keep track of the states each Person object is in. If the copy of a Person object has
isIncluded == True, then the person is RECOVERED. Has a total of popsize Person objects,
with numbers [0, popsize).
details: Simul_Details
An object that can be returned to give a more in-depth look into the simulation. With this object,
one can see transmission chains, state changes, the movement history of each individaul, the state
history of each person, and more.
"""
def __init__(self, S0: int, E0: int, I0: int, R0: int, pss: float, rho: float,
gamma: float, side: float, rstart:float, days: int, w0=1.0, hubConstant=6**0.5, alpha=2.0):
#error checking
self.intCheck([S0, E0, I0, R0, days])
self.floatCheck([pss, rho, gamma, side, rstart, w0, alpha, hubConstant])
self.negValCheck([S0, E0, I0, R0, pss, rho, gamma, side, rstart, days, w0, hubConstant, alpha])
self.probValCheck([pss, rho, gamma, w0])
super(HubSEIR, self).__init__(popsize=S0+I0+R0, pss=pss, rstart=rstart, alpha=alpha, side=side, S0=S0, I0=I0,
days=days, w0=w0,hubConstant=hubConstant)
# adjust the popsize
self.popsize += E0
# locations in the plane
self.locx, self.locy = np.random.random(self.popsize)*self.side, np.random.random(self.popsize)*self.side
# probability of going from I to R
self.gamma = gamma
# initialize the probability of leaving E
self.rho = rho
# make the initial R class variable
self.R0 = R0
# create the R collect datastructure
self.Rcollect = []
# create the E collect datastructure
self.Ecollect = []
self.E0 = E0
# create numpy arrays to store number of people in each compartment
self.E = np.zeros(days+1)
self.E[0] = E0
self.R = np.zeros(days+1)
# put the initial removed values into the array
self.R[0] = R0
# create a Simul_Details object
self.details = Simul_Details(days=days, popsize=self.popsize, static=True)
for i in range(self.popsize):
# event is whether person is a super spreader
event = randEvent(self.pss)
# susceptible version
p1 = Person(self.locx[i], self.locy[i], event)
# exposed version
p2 = Person(self.locx[i], self.locy[i], event)
# infectious version
p3 = Person(self.locx[i], self.locy[i], event)
# removed version
p4 = Person(self.locx[i], self.locy[i], event)
# depending on the number, say that the person is in S, I, R. Add that state to the Simul_Details object
if i < S0:
p1.isIncluded = True
self.details.addStateChange(i, "S", 0)
elif i < S0 + I0:
p3.isIncluded = True
self.details.addStateChange(i, "I", 0)
elif i < S0 + E0 + I0:
p2.isIncluded=True
self.details.addStateChange(i, "E", 0)
else:
p4.isIncluded = True
self.details.addStateChange(i, "R", 0)
# add the locations to the Simul_Details object
self.details.addLocation(0, (self.locx[i], self.locy[i]))
# append the Person objects to the collections
self.Scollect.append(p1)
self.Ecollect.append(p2)
self.Icollect.append(p3)
self.Rcollect.append(p4)
# run state changes from S to E
def _StoE(self, day: int):
"""
Deals with the transfers from S compartment to E compartment.
Parameters
----------
day: int
feed in the current day the state transfer is taking place on.
Return
------
set:
returns the set contianing the indices of those that whose self.Ecollect[index].isIncluded must be set to True
"""
# set that keeps track of the indices of people that changed states
transfers = set()
for count, inf in enumerate(self.Icollect):
if not inf.isIncluded:
continue
for count2, sus in enumerate(self.Scollect):
#print("Susceptible Person ", count2)
if not sus.isIncluded:
continue
# generate the probability of infection
prob = self._infect(inf, sus)
# generate a random event based on the P(infection)
event = randEvent(prob)
# if an infection doesn't occur
if not event:
continue
# remove the person from the susceptible state
self.Scollect[count2].isIncluded = False
self.details.addTransmission(day, count, count2)
# put the person in the transfer set to be made an exposed person
transfers.add(count2)
return transfers
# run state changes from E to I
def _EtoI(self):
"""
Deals with transferring those from E compartment to I compartment.
Return
------
set:
the indices of people who will be transferred from E compartment to I compartment
"""
# set that keeps track of the indices of people that changed states
transfers = set()
for count, per in enumerate(self.Ecollect):
if not per.isIncluded:
continue
event = randEvent(self.rho)
if not event:
continue
self.Ecollect[count].isIncluded = False
transfers.add(count)
return transfers
def _ItoR(self):
# set that keeps track of the indices of people that changed states
"""
Deals with transferring those from E compartment to I compartment.
Return
------
set:
the indices of people who will be transferred from I compartment to R compartment
"""
transfers = set()
for count, inf in enumerate(self.Icollect):
if not inf.isIncluded:
continue
event = randEvent(self.gamma)
if not event:
continue
self.Icollect[count].isIncluded = False
transfers.add(count)
return transfers
# run the simulation using
def run(self, getDetails=True):
for i in range(1, self.days + 1):
#print("Day: ", i)
# run the transfers from different compartments
transferSE = self._StoE(i)
transferEI = self._EtoI()
transferIR = self._ItoR()
# go after and change the indices in the collection data structure thing
for index in transferSE:
self.Ecollect[index].isIncluded = True
self.details.addStateChange(index, "E", i)
for index in transferEI:
self.Icollect[index].isIncluded = True
self.details.addStateChange(index, "I", i)
for index in transferIR:
self.Rcollect[index].isIncluded = True
self.details.addStateChange(index, "R", i)
# change the number of people in each state on the day i by adjusting the previous day's count
self.S[i] = self.S[i - 1] - len(transferSE)
self.E[i] = self.E[i-1] +len(transferSE) - len(transferEI)
self.I[i] = self.I[i - 1] + len(transferEI) - len(transferIR)
self.R[i] = self.R[i-1] + len(transferIR)
if getDetails:
return self.details
def plot(self):
"""
Plots all variables on subplots
Return
-------
pyplot.Figure:
return a fig object that will contian the graphs
"""
t = np.linspace(0, self.days, self.days + 1)
fig, (ax1, ax2, ax3, ax4) = plt.subplots(nrows=4, sharex='all')
ax1.plot(t, self.S, label="Susceptible", color='r')
ax1.set_ylabel("Number of Susceptible People")
ax1.set_title("Hub SEIR Simulation")
ax3.plot(t, self.I, label="Active Cases", color='b')
ax3.set_ylabel("Active Cases")
ax2.plot(t, self.E, label="Exposed", color='c')
ax2.set_ylabel("# of Exposed")
ax4.plot(t, self.R, label="Recovered", color='m')
ax4.set_xlabel("Days")
ax4.set_ylabel('Number of Recovered')
ax1.legend()
ax2.legend()
ax3.legend()
ax4.legend()
plt.show()
return fig
# convert the arrays to dataframe
def toDataFrame(self):
"""
Converts the arrays to a pandas DataFrame.
Return
------
pd.DataFrame:
a dataframe containing the people in S, E, I, and R compartments per day.
"""
# create the linspaced numpy array
t = np.linspace(0, self.days, self.days + 1)
# create a 2D array with the days and susceptible and infected arrays
# do it over axis one so that it creates columns days, susceptible, infected
arr = np.stack([t, self.S, self.E, self.I, self.R], axis=1)
df =
|
pd.DataFrame(arr, columns=["Days", "Susceptible", "Exposed", "Infected", "Recovered"])
|
pandas.DataFrame
|
from mesa import Agent, Model
from mesa.time import RandomActivation
from random import random, randint,choice
class Firm(Agent):
def __init__(self,unique_id, alpha,model):
super().__init__(unique_id, model)
self.agents = []
self.utility=0
self.alpha = random()
self.beta = 1-alpha
self.dead=False
def step(self):
effort = sum([a.effort for a in self.agents])
self.utility = self.alpha * effort + self.beta * effort**2
if len(self.agents) == 0:
self.dead = True
else:
self.income = self.utility/float(len(self.agents))
class FirmAgent(Agent):
"""An agent with fixed initial wealth."""
def __init__(self, unique_id, exp, model):
super().__init__(unique_id, model)
self.utility = 0
self.effort = 1
self.exp=exp
self.job = None
def step(self):
# The agent's step will go here.
if not self.job:
self.job = Firm(0-self.unique_id,1,self.model)
self.job.agents.append(self)
self.model.schedule.add(self.job)
self.effort =self.exp
self.utility = (self.job.utility**self.exp) * ((1-self.effort)**(1-self.exp))
### am I happy?
doo = choice(['stay','leave','startup'])
if doo =='leave':
### join a random firm
firms = [f for f in model.schedule.agents if isinstance(f,Firm) and not f.dead]
self.job.agents.remove(self) ### quit my job
self.job = choice(firms) ### find a new job
self.job.agents.append(self) ##$ add mysefl to payroll
elif doo == 'startup':
self.job = Firm(1000-self.unique_id,1,self.model)
self.job.agents.append(self)
self.model.schedule.add(self.job)
class FirmModel(Model):
"""A model with some number of agents."""
def __init__(self, N):
self.alpha=0.5
self.beta = 1-self.alpha
self.num_agents = N
# Create agents
self.schedule = RandomActivation(self)
# Create agents
for i in range(self.num_agents):
exp = random()
a = FirmAgent(i, exp, self)
self.schedule.add(a)
def step(self):
'''Advance the model by one step.'''
self.schedule.step()
model = FirmModel(100)
for i in range(1000):
if i % 100 ==0: print(i)
model.step()
import pandas as pd
agent_wealth =
|
pd.DataFrame([{'id':a.unique_id, 'w':a.utility} for a in model.schedule.agents])
|
pandas.DataFrame
|
'''
This module is used to download the data from several feeds in the public KoDa API.
Supported companies:
- dintur - Västernorrlands län: Only GTFSStatic
- dt - Dalatrafik
- klt - Kalmar länstrafik
- krono - Kronobergs Länstrafik: Only GTFSStatic
- otraf - Östgötatrafiken
- sj - SJ + Snälltåget + Tågab: Only GTFSStatic
- skane - Skånetrafiken
- sl - Stockholm län: All feeds without VehiclePositions
- ul - Uppsala län
- varm - Värmlandstrafik+Karlstadbuss
- vt - Västtrafik: Only GTFSStatic
- xt - X-trafik
Supported feeds:
- VehiclePositions
- TripUpdates
- ServiceAlerts
Supported date format: YYYY-MM-DD and YYYY_MM_DD
'''
import gzip
import json
import tarfile
import warnings
import datetime
import contextlib
import requests
import numpy as np
import ey
from google.protobuf import json_format
import pandas as pd
from joblib import Parallel, delayed
import tqdm
from .. import config
from . import gtfs_realtime_pb2
def _is_json(series: pd.Series) -> bool:
try:
return isinstance(series[0][0], dict)
except TypeError:
return False
def unpack_jsons(df: pd.DataFrame) -> pd.DataFrame:
keys_to_sanitise = []
for k in list(df.keys()):
# If the content is a json, unpack and remove
if df[k].dtype == np.dtype('O') and _is_json(df[k]):
keys_to_sanitise.append(k)
if keys_to_sanitise:
indexes = []
unpacked = {k: [] for k in keys_to_sanitise}
for ix in df.index:
for k in keys_to_sanitise:
this_unpack = pd.json_normalize(df[k][ix])
unpacked[k].append(this_unpack)
indexes.extend(ix for _ in range(len(this_unpack)))
df.drop(keys_to_sanitise, axis='columns', inplace=True)
unpacked_series = []
for k in keys_to_sanitise:
this_df = pd.concat(unpacked[k], axis='index').reset_index(drop=True)
this_df.rename(columns={curr_name: '_'.join((k, curr_name)) for curr_name in this_df.keys()},
inplace=True)
unpacked_series.append(this_df)
repeated = df.iloc[indexes].reset_index(drop=True)
df = pd.concat([repeated] + unpacked_series, axis='columns')
for k in df.keys():
if df[k].dtype == np.dtype('O') and _is_json(df[k]):
warnings.warn(RuntimeWarning(f'There are extra json in column {k}'))
return df
def _get_data_path(company: str, feed: str, date: str, hour: (int, str)) -> str:
return f'{config.CACHE_DIR}/{company}_{feed}_{date.replace("-", "_")}_{hour}.feather'
def _parse_gtfs(gtfsrt: bytes) -> pd.DataFrame:
# Read in to a FeedMessage class, of the GTFS-RT format
msg = gtfs_realtime_pb2.FeedMessage()
pbfile = gzip.decompress(gtfsrt)
msg.ParseFromString(pbfile)
msg_json = json_format.MessageToJson(msg)
msg_dict = json.loads(msg_json)
df = pd.json_normalize(msg_dict.get('entity', dict()), sep='_')
df = unpack_jsons(df)
df.reset_index(drop=True, inplace=True)
return df
def normalize_keys(df: pd.DataFrame) -> None:
"""Reformat the name of the keys to a consistent format, according to GTFS"""
renames = {'tripUpdate_trip_tripId': 'trip_id', 'tripUpdate_trip_startDate': 'start_date',
'tripUpdate_trip_directionId': 'direction_id', 'tripUpdate_trip_routeId': 'route_id',
'tripUpdate_trip_scheduleRelationship': 'schedule_relationship',
'tripUpdate_trip_startTime': 'start_time',
'tripUpdate_timestamp': 'timestamp', 'tripUpdate_vehicle_id': 'vehicle_id',
'stopSequence': 'stop_sequence', 'stopId': 'stop_id',
'scheduleRelationship': 'schedule_relationship2',
'vehicle_trip_tripId': 'trip_id', 'vehicle_trip_scheduleRelationship': 'schedule_relationship',
'vehicle_timestamp': 'timestamp', 'vehicle_vehicle_id': 'vehicle_id',
'vehicle_trip_startTime': 'start_time', 'vehicle_trip_startDate': 'start_date',
'vehicle_trip_routeId': 'route_id', 'vehicle_trip_directionId': 'direction_id',
'tripUpdate_stopTimeUpdate_stopSequence': 'stop_sequence',
'tripUpdate_stopTimeUpdate_stopId': 'stop_id',
'tripUpdate_stopTimeUpdate_arrival_delay': 'arrival_delay',
'tripUpdate_stopTimeUpdate_arrival_time': 'arrival_time',
'tripUpdate_stopTimeUpdate_departure_delay': 'departure_delay',
'tripUpdate_stopTimeUpdate_departure_time': 'departure_time',
'tripUpdate_stopTimeUpdate_arrival_uncertainty': 'arrival_uncertainty',
'tripUpdate_stopTimeUpdate_departure_uncertainty': 'departure_uncertainty',
'alert_activePeriod_start': 'period_start', 'alert_activePeriod_end': 'period_end',
'alert_informedEntity_routeId': 'route_id', 'alert_informedEntity_stopId': 'stop_id',
'alert_informedEntity_trip_tripId': 'trip_id',
'alert_informedEntity_trip_scheduleRelationship': 'schedule_relationship',
'alert_headerText_translation_text': 'header_text',
'alert_descriptionText_translation_text': 'description_text',
}
df.rename(columns=renames, inplace=True)
def sanitise_array(df: pd.DataFrame) -> None:
normalize_keys(df)
# Remove columns and rows with all NaNs
df.dropna(axis=0, how='all', inplace=True)
df.dropna(axis=1, how='all', inplace=True)
# Remove old indexes
df.drop(columns='level_0', inplace=True, errors='ignore')
# Remove duplicated entries, ignoring timpestamps and index
keys = list(df.keys())
with contextlib.suppress(ValueError):
keys.remove('timestamp')
keys.remove('index')
# These may be updated in the database, so ignore as well
keys.remove('arrival_delay')
keys.remove('arrival_time')
keys.remove('departure_delay')
keys.remove('departure_time')
keys.remove('arrival_uncertainty')
keys.remove('departure_uncertainty')
df.drop_duplicates(subset=keys, inplace=True, keep='last')
def download_file(task):
url = task.inputs['url']
output = task.outputs['file']
with open(output, 'wb') as f_out:
with requests.get(url, stream=True) as req:
for chunk in req.iter_content(chunk_size=128):
f_out.write(chunk)
def get_data(date: str, hour: (int, str), feed: str, company: str, output_file: (str, None) = None) -> None:
if output_file is None:
output_file = _get_data_path(company, feed, date, hour)
print('Getting', output_file)
# admit both _ and -
date = date.replace('_', '-')
data_date = datetime.date.fromisoformat(date)
# ------------------------------------------------------------------------
# Create data dir
# ------------------------------------------------------------------------
ey.shell('mkdir [o:datafolder:data]')
# ------------------------------------------------------------------------
# Download data
# ------------------------------------------------------------------------
if config.API_VERSION == 1:
koda_url = f"https://koda.linkoping-ri.se/KoDa/api/v0.1?company={company}&feed={feed}&date={date}"
else:
koda_url = f'https://koda.linkoping-ri.se/KoDa/api/v2/gtfs-rt/{company}/{feed}?date={date}&hour={hour}&key={config.API_KEY}'
out_path = f'{config.CACHE_DIR}/' + f'{company}-{feed}-{date}.bz2'.lower()
download = ey.func(download_file, inputs={'url': koda_url}, outputs={'file': out_path})
# Check the file:
with open(download.outputs['file'], 'rb') as f:
start = f.read(10)
if b'error' in start:
msg = start + f.read(70)
msg = msg.strip(b'{}" ')
raise ValueError('API returned the following error message:', msg)
# Select the list of files to extract:
tar_file_name = download.outputs['file']
# ------------------------------------------------------------------------
# GTFS to file
# ------------------------------------------------------------------------
def merge_files(task):
tar = tarfile.open(tar_file_name)
_prefix = f'mnt/kodashare/KoDa_NiFi_data/{company}/{feed}/{data_date.year}/' \
f'{str(data_date.month).zfill(2)}/{str(data_date.day).zfill(2)}/{str(hour).zfill(2)}/'
gzfiles = [name for name in tar.getnames() if name.startswith(_prefix) and 'Duplicate' not in name]
if len(gzfiles) == 0:
# File does not contain data. Save an empty file.
pd.DataFrame().reset_index().to_feather(task.outputs['outfile'])
return
# Extract each file and pass it to the parsing function
parsed_files = Parallel(n_jobs=config.N_CPU, verbose=0)(
delayed(_parse_gtfs)(tar.extractfile(gtfsfile).read()) for gtfsfile in gzfiles)
tar.close()
merged_df = pd.concat(parsed_files)
# Force casts:
castings = dict()
for k in merged_df.keys():
if 'timestamp' in k: # Timestamps should be ints, not strings
castings[k] = np.int64
elif k == 'id':
castings[k] = np.int64
merged_df.dropna(how='all', inplace=True) # Remove rows of only NaNs
merged_df = merged_df.astype(castings)
# Remove dots from column names
rename = dict((k, k.replace('.', '_')) for k in merged_df.keys() if '.' in k)
merged_df.rename(columns=rename, inplace=True)
# Clean up duplicates, fix keys, etc
sanitise_array(merged_df)
if merged_df.empty: # Feather does not support a DF without columns, so add a dummy one
merged_df['_'] = np.zeros(len(merged_df), dtype=np.bool_)
# Save to file
merged_df.reset_index(inplace=True)
merged_df.to_feather(task.outputs['outfile'], compression='zstd', compression_level=9)
ey.func(merge_files, outputs={'outfile': output_file})
def get_range(start_date, end_date, start_hour, end_hour, feed, company) -> None:
warnings.warn(DeprecationWarning('Use get_data_range instead'))
date_range =
|
pd.date_range(start=start_date, end=end_date)
|
pandas.date_range
|
import pandas as pd
import io
import requests
from bs4 import BeautifulSoup
import warnings
from astropy import time
class AIAEffectiveArea:
def __init__(self, url='https://hesperia.gsfc.nasa.gov/ssw/sdo/aia/response/', filename=None):
'''
:param url: the online location of the response table
:param filename: string optional location of a local response table to read, overrides url
Usage
aia_effective_area = AIAEffectiveArea()
effective_area_ratio = aia_effective_area.effective_area_ratio(171, '2010-10-24 15:00:00')
'''
# Local input possible else fetch response table file from GSFC mirror of SSW
if filename is not None:
response_table = filename
else:
soup = BeautifulSoup(requests.get(url).text, 'html.parser')
all_versions = [node.get('href') for node in soup.find_all('a') if
node.get('href').endswith('_response_table.txt')]
latest_table_url = url + sorted([table_files for table_files in all_versions if
table_files.startswith('aia_V')])[-1]
tbl = requests.get(latest_table_url).content
response_table = io.StringIO(tbl.decode('utf-8'))
# Read in response table
self.response_table = pd.read_csv(response_table, sep='\s+', parse_dates=[1], infer_datetime_format=True, index_col=1)
def effective_area(self, wavelength, time):
'''
:param wavelength: float wavelength of the aia target image
:param time: string in a format to be read by pandas.to_datetime; the time of the aia target image
:return: the effective area of the AIA detector interpolated to the target_time
'''
eff_area_series = self._parse_series(wavelength.value)
if (pd.to_datetime(time) - eff_area_series.index[0]) < pd.Timedelta(0):
warnings.warn('The target time requested is before the beginning of AIA', UserWarning)
return time_interpolate(eff_area_series, time)
def effective_area_ratio(self, wavelength, time):
'''
:param wavelength: float wavelength of the aia target image
:param time: string in a format to be read by pandas.to_datetime; the time of the aia target image
:return: the ratio of the current effective area to the pre-launch effective area
'''
eff_area_series = self._parse_series(wavelength.value)
launch_value = eff_area_series[eff_area_series.index.min()]
if (
|
pd.to_datetime(time)
|
pandas.to_datetime
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Jan 3 17:28:04 2020
@author: shlomi
"""
from PW_paths import work_yuval
from matplotlib import rcParams
import seaborn as sns
from pathlib import Path
import matplotlib.pyplot as plt
from PW_paths import savefig_path
import matplotlib.ticker as ticker
import matplotlib.dates as mdates
from PW_stations import produce_geo_gnss_solved_stations
tela_results_path = work_yuval / 'GNSS_stations/tela/rinex/30hr/results'
tela_solutions = work_yuval / 'GNSS_stations/tela/gipsyx_solutions'
sound_path = work_yuval / 'sounding'
phys_soundings = sound_path / 'bet_dagan_phys_sounding_2007-2019.nc'
ims_path = work_yuval / 'IMS_T'
gis_path = work_yuval / 'gis'
dem_path = work_yuval / 'AW3D30'
era5_path = work_yuval / 'ERA5'
hydro_path = work_yuval / 'hydro'
ceil_path = work_yuval / 'ceilometers'
aero_path = work_yuval / 'AERONET'
climate_path = work_yuval / 'climate'
df_gnss = produce_geo_gnss_solved_stations(
plot=False, add_distance_to_coast=True)
st_order_climate = [x for x in df_gnss.dropna().sort_values(
['groups_climate', 'lat', 'lon'], ascending=[1, 0, 0]).index]
rc = {
'font.family': 'serif',
'xtick.labelsize': 'large',
'ytick.labelsize': 'large'}
for key, val in rc.items():
rcParams[key] = val
# sns.set(rc=rc, style='white')
seasonal_colors = {'DJF': 'tab:blue',
'SON': 'tab:red',
'JJA': 'tab:green',
'MAM': 'tab:orange',
'Annual': 'tab:purple'}
def get_twin(ax, axis):
assert axis in ("x", "y")
siblings = getattr(ax, f"get_shared_{axis}_axes")().get_siblings(ax)
for sibling in siblings:
if sibling.bbox.bounds == ax.bbox.bounds and sibling is not ax:
return sibling
return None
def sci_notation(num, decimal_digits=1, precision=None, exponent=None):
"""
Returns a string representation of the scientific
notation of the given number formatted for use with
LaTeX or Mathtext, with specified number of significant
decimal digits and precision (number of decimal digits
to show). The exponent to be used can also be specified
explicitly.
"""
from math import floor, log10
if exponent is None:
exponent = int(floor(log10(abs(num))))
coeff = round(num / float(10**exponent), decimal_digits)
if precision is None:
precision = decimal_digits
return r"${0:.{2}f}\cdot10^{{{1:d}}}$".format(coeff, exponent, precision)
def utm_from_lon(lon):
"""
utm_from_lon - UTM zone for a longitude
Not right for some polar regions (Norway, Svalbard, Antartica)
:param float lon: longitude
:return: UTM zone number
:rtype: int
"""
from math import floor
return floor((lon + 180) / 6) + 1
def scale_bar(ax, proj, length, location=(0.5, 0.05), linewidth=3,
units='km', m_per_unit=1000, bounds=None):
"""
http://stackoverflow.com/a/35705477/1072212
ax is the axes to draw the scalebar on.
proj is the projection the axes are in
location is center of the scalebar in axis coordinates ie. 0.5 is the middle of the plot
length is the length of the scalebar in km.
linewidth is the thickness of the scalebar.
units is the name of the unit
m_per_unit is the number of meters in a unit
"""
import cartopy.crs as ccrs
from matplotlib import patheffects
# find lat/lon center to find best UTM zone
try:
x0, x1, y0, y1 = ax.get_extent(proj.as_geodetic())
except AttributeError:
if bounds is not None:
x0, x1, y0, y1 = bounds
# Projection in metres
utm = ccrs.UTM(utm_from_lon((x0+x1)/2))
# Get the extent of the plotted area in coordinates in metres
x0, x1, y0, y1 = ax.get_extent(utm)
# Turn the specified scalebar location into coordinates in metres
sbcx, sbcy = x0 + (x1 - x0) * location[0], y0 + (y1 - y0) * location[1]
# Generate the x coordinate for the ends of the scalebar
bar_xs = [sbcx - length * m_per_unit/2, sbcx + length * m_per_unit/2]
# buffer for scalebar
buffer = [patheffects.withStroke(linewidth=5, foreground="w")]
# Plot the scalebar with buffer
ax.plot(bar_xs, [sbcy, sbcy], transform=utm, color='k',
linewidth=linewidth, path_effects=buffer)
# buffer for text
buffer = [patheffects.withStroke(linewidth=3, foreground="w")]
# Plot the scalebar label
t0 = ax.text(sbcx, sbcy, str(length) + ' ' + units, transform=utm,
horizontalalignment='center', verticalalignment='bottom',
path_effects=buffer, zorder=2)
left = x0+(x1-x0)*0.05
# Plot the N arrow
t1 = ax.text(left, sbcy, u'\u25B2\nN', transform=utm,
horizontalalignment='center', verticalalignment='bottom',
path_effects=buffer, zorder=2)
# Plot the scalebar without buffer, in case covered by text buffer
ax.plot(bar_xs, [sbcy, sbcy], transform=utm, color='k',
linewidth=linewidth, zorder=3)
return
@ticker.FuncFormatter
def lon_formatter(x, pos):
if x < 0:
return r'{0:.1f}$\degree$W'.format(abs(x))
elif x > 0:
return r'{0:.1f}$\degree$E'.format(abs(x))
elif x == 0:
return r'0$\degree$'
@ticker.FuncFormatter
def lat_formatter(x, pos):
if x < 0:
return r'{0:.1f}$\degree$S'.format(abs(x))
elif x > 0:
return r'{0:.1f}$\degree$N'.format(abs(x))
elif x == 0:
return r'0$\degree$'
def align_yaxis_np(ax1, ax2):
"""Align zeros of the two axes, zooming them out by same ratio"""
import numpy as np
axes = np.array([ax1, ax2])
extrema = np.array([ax.get_ylim() for ax in axes])
tops = extrema[:,1] / (extrema[:,1] - extrema[:,0])
# Ensure that plots (intervals) are ordered bottom to top:
if tops[0] > tops[1]:
axes, extrema, tops = [a[::-1] for a in (axes, extrema, tops)]
# How much would the plot overflow if we kept current zoom levels?
tot_span = tops[1] + 1 - tops[0]
extrema[0,1] = extrema[0,0] + tot_span * (extrema[0,1] - extrema[0,0])
extrema[1,0] = extrema[1,1] + tot_span * (extrema[1,0] - extrema[1,1])
[axes[i].set_ylim(*extrema[i]) for i in range(2)]
# def align_yaxis(ax1, v1, ax2, v2):
# """adjust ax2 ylimit so that v2 in ax2 is aligned to v1 in ax1"""
# _, y1 = ax1.transData.transform((0, v1))
# _, y2 = ax2.transData.transform((0, v2))
# inv = ax2.transData.inverted()
# _, dy = inv.transform((0, 0)) - inv.transform((0, y1-y2))
# miny, maxy = ax2.get_ylim()
# ax2.set_ylim(miny+dy, maxy+dy)
def get_legend_labels_handles_title_seaborn_histplot(ax):
old_legend = ax.legend_
handles = old_legend.legendHandles
labels = [t.get_text() for t in old_legend.get_texts()]
title = old_legend.get_title().get_text()
return handles, labels, title
def alignYaxes(axes, align_values=None):
'''Align the ticks of multiple y axes
Args:
axes (list): list of axes objects whose yaxis ticks are to be aligned.
Keyword Args:
align_values (None or list/tuple): if not None, should be a list/tuple
of floats with same length as <axes>. Values in <align_values>
define where the corresponding axes should be aligned up. E.g.
[0, 100, -22.5] means the 0 in axes[0], 100 in axes[1] and -22.5
in axes[2] would be aligned up. If None, align (approximately)
the lowest ticks in all axes.
Returns:
new_ticks (list): a list of new ticks for each axis in <axes>.
A new sets of ticks are computed for each axis in <axes> but with equal
length.
'''
from matplotlib.pyplot import MaxNLocator
import numpy as np
nax = len(axes)
ticks = [aii.get_yticks() for aii in axes]
if align_values is None:
aligns = [ticks[ii][0] for ii in range(nax)]
else:
if len(align_values) != nax:
raise Exception(
"Length of <axes> doesn't equal that of <align_values>.")
aligns = align_values
bounds = [aii.get_ylim() for aii in axes]
# align at some points
ticks_align = [ticks[ii]-aligns[ii] for ii in range(nax)]
# scale the range to 1-100
ranges = [tii[-1]-tii[0] for tii in ticks]
lgs = [-np.log10(rii)+2. for rii in ranges]
igs = [np.floor(ii) for ii in lgs]
log_ticks = [ticks_align[ii]*(10.**igs[ii]) for ii in range(nax)]
# put all axes ticks into a single array, then compute new ticks for all
comb_ticks = np.concatenate(log_ticks)
comb_ticks.sort()
locator = MaxNLocator(nbins='auto', steps=[1, 2, 2.5, 3, 4, 5, 8, 10])
new_ticks = locator.tick_values(comb_ticks[0], comb_ticks[-1])
new_ticks = [new_ticks/10.**igs[ii] for ii in range(nax)]
new_ticks = [new_ticks[ii]+aligns[ii] for ii in range(nax)]
# find the lower bound
idx_l = 0
for i in range(len(new_ticks[0])):
if any([new_ticks[jj][i] > bounds[jj][0] for jj in range(nax)]):
idx_l = i-1
break
# find the upper bound
idx_r = 0
for i in range(len(new_ticks[0])):
if all([new_ticks[jj][i] > bounds[jj][1] for jj in range(nax)]):
idx_r = i
break
# trim tick lists by bounds
new_ticks = [tii[idx_l:idx_r+1] for tii in new_ticks]
# set ticks for each axis
for axii, tii in zip(axes, new_ticks):
axii.set_yticks(tii)
return new_ticks
def align_yaxis(ax1, v1, ax2, v2):
"""adjust ax2 ylimit so that v2 in ax2 is aligned to v1 in ax1"""
_, y1 = ax1.transData.transform((0, v1))
_, y2 = ax2.transData.transform((0, v2))
adjust_yaxis(ax2, (y1 - y2) / 2, v2)
adjust_yaxis(ax1, (y2 - y1) / 2, v1)
def adjust_yaxis(ax, ydif, v):
"""shift axis ax by ydiff, maintaining point v at the same location"""
inv = ax.transData.inverted()
_, dy = inv.transform((0, 0)) - inv.transform((0, ydif))
miny, maxy = ax.get_ylim()
miny, maxy = miny - v, maxy - v
if -miny > maxy or (-miny == maxy and dy > 0):
nminy = miny
nmaxy = miny * (maxy + dy) / (miny + dy)
else:
nmaxy = maxy
nminy = maxy * (miny + dy) / (maxy + dy)
ax.set_ylim(nminy + v, nmaxy + v)
def qualitative_cmap(n=2):
import matplotlib.colors as mcolors
if n == 2:
colorsList = [mcolors.BASE_COLORS['r'], mcolors.BASE_COLORS['g']]
cmap = mcolors.ListedColormap(colorsList)
elif n == 4:
colorsList = [
mcolors.BASE_COLORS['r'],
mcolors.BASE_COLORS['g'],
mcolors.BASE_COLORS['c'],
mcolors.BASE_COLORS['m']]
cmap = mcolors.ListedColormap(colorsList)
elif n == 5:
colorsList = [
mcolors.BASE_COLORS['r'],
mcolors.BASE_COLORS['g'],
mcolors.BASE_COLORS['c'],
mcolors.BASE_COLORS['m'],
mcolors.BASE_COLORS['b']]
cmap = mcolors.ListedColormap(colorsList)
return cmap
def caption(text, color='blue', **kwargs):
from termcolor import colored
print(colored('Caption:', color, attrs=['bold'], **kwargs))
print(colored(text, color, attrs=['bold'], **kwargs))
return
def adjust_lightness(color, amount=0.5):
import matplotlib.colors as mc
import colorsys
try:
c = mc.cnames[color]
except:
c = color
c = colorsys.rgb_to_hls(*mc.to_rgb(c))
return colorsys.hls_to_rgb(c[0], max(0, min(1, amount * c[1])), c[2])
def produce_colors_for_pwv_station(scope='annual', zebra=False,
as_dict=False, as_cat_dict=False):
import pandas as pd
stns = group_sites_to_xarray(scope=scope)
cdict = {'coastal': 'tab:blue',
'highland': 'tab:green',
'eastern': 'tab:orange'}
if as_cat_dict:
return cdict
# for grp, color in cdict.copy().items():
# cdict[grp] = to_rgba(get_named_colors_mapping()[
# color], alpha=1)
ds = stns.to_dataset('group')
colors = []
for group in ds:
sts = ds[group].dropna('GNSS').values
for i, st in enumerate(sts):
color = cdict.get(group)
if zebra:
if i % 2 != 0:
# rgba = np.array(rgba)
# rgba[-1] = 0.5
color = adjust_lightness(color, 0.5)
colors.append(color)
# colors = [item for sublist in colors for item in sublist]
stns = stns.T.values.ravel()
stns = stns[~pd.isnull(stns)]
if as_dict:
colors = dict(zip(stns, colors))
return colors
def fix_time_axis_ticks(ax, limits=None, margin=15):
import pandas as pd
import matplotlib.dates as mdates
if limits is not None:
ax.set_xlim(*pd.to_datetime(limits))
years_fmt = mdates.DateFormatter('%Y')
ax.xaxis.set_major_locator(mdates.YearLocator())
ax.xaxis.set_major_formatter(years_fmt)
ax.xaxis.set_minor_locator(mdates.MonthLocator())
# locator = mdates.AutoDateLocator(minticks=3, maxticks=7)
# formatter = mdates.ConciseDateFormatter(locator)
# ax.xaxis.set_major_locator(locator)
# ax.xaxis.set_major_formatter(formatter)
return ax
def plot_qflux_climatotlogy_israel(path=era5_path, save=True, reduce='mean',
plot_type='uv'):
import xarray as xr
import matplotlib.pyplot as plt
import numpy as np
ds = xr.load_dataset(path / 'ERA5_UVQ_mm_israel_1979-2020.nc')
ds = ds.sel(expver=1).reset_coords(drop=True)
if plot_type == 'uv':
f1 = ds['q'] * ds['u']
f2 = ds['q'] * ds['v']
elif plot_type == 'md':
qu = ds['q'] * ds['u']
qv = ds['q'] * ds['v']
f1 = np.sqrt(qu**2 + qv**2)
f2 = np.rad2deg(np.arctan2(qv, qu))
if reduce == 'mean':
f1_clim = f1.groupby('time.month').mean().mean(
'longitude').mean('latitude')
f2_clim = f2.groupby('time.month').mean().mean(
'longitude').mean('latitude')
center = 0
cmap = 'bwr'
elif reduce == 'std':
f1_clim = f1.groupby('time.month').std().mean(
'longitude').mean('latitude')
f2_clim = f2.groupby('time.month').std().mean(
'longitude').mean('latitude')
center = None
cmap = 'viridis'
ds_clim = xr.concat([f1_clim, f2_clim], 'direction')
ds_clim['direction'] = ['zonal', 'meridional']
if plot_type == 'md':
fg, axes = plt.subplots(1, 2, figsize=(14, 7))
f1_clim.sel(
level=slice(
300,
1000)).T.plot.contourf(levels=41,
yincrease=False,
cmap=cmap,
center=center, ax=axes[0])
f2_clim.sel(
level=slice(
300,
1000)).T.plot.contourf(levels=41,
yincrease=False,
cmap=cmap,
center=center, ax=axes[1])
else:
fg = ds_clim.sel(
level=slice(
300,
1000)).T.plot.contourf(
levels=41,
yincrease=False,
cmap=cmap,
center=center,
col='direction',
figsize=(
15,
6))
fg.fig.suptitle('Moisture flux climatology over Israel')
# fig, axes = plt.subplots(1, 2, figsize=(15, 5))
# qu_clim.sel(level=slice(300,1000)).T.plot.contourf(levels=41, yincrease=False, ax=axes[0], cmap='bwr', center=0)
# qv_clim.sel(level=slice(300,1000)).T.plot.contourf(levels=41, yincrease=False, ax=axes[1], cmap='bwr', center=0)
fg.fig.subplots_adjust(top=0.923,
bottom=0.102,
left=0.058,
right=0.818,
hspace=0.2,
wspace=0.045)
if save:
filename = 'moisture_clim_from_ERA5_over_israel.png'
# plt.savefig(savefig_path / filename, bbox_inches='tight')
plt.savefig(savefig_path / filename, orientation='landscape')
return fg
def plot_mean_std_count(da_ts, time_reduce='hour', reduce='mean',
count_factor=1):
import xarray as xr
import seaborn as sns
"""plot mean, std and count of Xarray dataarray time-series"""
cmap = sns.color_palette("colorblind", 2)
time_dim = list(set(da_ts.dims))[0]
grp = '{}.{}'.format(time_dim, time_reduce)
if reduce == 'mean':
mean = da_ts.groupby(grp).mean()
elif reduce == 'median':
mean = da_ts.groupby(grp).median()
std = da_ts.groupby(grp).std()
mean_plus_std = mean + std
mean_minus_std = mean - std
count = da_ts.groupby(grp).count()
if isinstance(da_ts, xr.Dataset):
dvars = [x for x in da_ts.data_vars.keys()]
assert len(dvars) == 2
secondary_y = dvars[1]
else:
secondary_y = None
fig, axes = plt.subplots(2, 1, sharex=True, sharey=False, figsize=(15, 15))
mean_df = mean.to_dataframe()
if secondary_y is not None:
axes[0] = mean_df[dvars[0]].plot(
ax=axes[0], linewidth=2.0, marker='o', color=cmap[0])
ax2mean = mean_df[secondary_y].plot(
ax=axes[0],
linewidth=2.0,
marker='s',
color=cmap[1],
secondary_y=True)
h1, l1 = axes[0].get_legend_handles_labels()
h2, l2 = axes[0].right_ax.get_legend_handles_labels()
handles = h1 + h2
labels = l1 + l2
axes[0].legend(handles, labels)
axes[0].fill_between(mean_df.index.values,
mean_minus_std[dvars[0]].values,
mean_plus_std[dvars[0]].values,
color=cmap[0],
alpha=0.5)
ax2mean.fill_between(
mean_df.index.values,
mean_minus_std[secondary_y].values,
mean_plus_std[secondary_y].values,
color=cmap[1],
alpha=0.5)
ax2mean.tick_params(axis='y', colors=cmap[1])
else:
mean_df.plot(ax=axes[0], linewidth=2.0, marker='o', color=cmap[0])
axes[0].fill_between(
mean_df.index.values,
mean_minus_std.values,
mean_plus_std.values,
color=cmap[0],
alpha=0.5)
axes[0].grid()
count_df = count.to_dataframe() / count_factor
count_df.plot.bar(ax=axes[1], rot=0)
axes[0].xaxis.set_tick_params(labelbottom=True)
axes[0].tick_params(axis='y', colors=cmap[0])
fig.tight_layout()
if secondary_y is not None:
return axes, ax2mean
else:
return axes
def plot_seasonal_histogram(da, dim='sound_time', xlim=None, xlabel=None,
suptitle=''):
fig_hist, axs = plt.subplots(2, 2, sharex=False, sharey=True,
figsize=(10, 8))
seasons = ['DJF', 'MAM', 'JJA', 'SON']
cmap = sns.color_palette("colorblind", 4)
for i, ax in enumerate(axs.flatten()):
da_season = da.sel(
{dim: da['{}.season'.format(dim)] == seasons[i]}).dropna(dim)
ax = sns.distplot(da_season, ax=ax, norm_hist=False,
color=cmap[i], hist_kws={'edgecolor': 'k'},
axlabel=xlabel,
label=seasons[i])
ax.set_xlim(xlim)
ax.legend()
# axes.set_xlabel('MLH [m]')
ax.set_ylabel('Frequency')
fig_hist.suptitle(suptitle)
fig_hist.tight_layout()
return axs
def plot_two_histograms_comparison(x, y, bins=None, labels=['x', 'y'],
ax=None, colors=['b', 'r']):
import numpy as np
import matplotlib.pyplot as plt
x_w = np.empty(x.shape)
x_w.fill(1/x.shape[0])
y_w = np.empty(y.shape)
y_w.fill(1/y.shape[0])
if ax is None:
fig, ax = plt.subplots()
ax.hist([x, y], bins=bins, weights=[x_w, y_w], color=colors,
label=labels)
ax.legend()
return ax
def plot_diurnal_wind_hodograph(path=ims_path, station='TEL-AVIV-COAST',
season=None, cmax=None, ax=None):
import xarray as xr
from metpy.plots import Hodograph
# import matplotlib
import numpy as np
colorbar = False
# from_list = matplotlib.colors.LinearSegmentedColormap.from_list
cmap = plt.cm.get_cmap('hsv', 24)
# cmap = from_list(None, plt.cm.jet(range(0,24)), 24)
U = xr.open_dataset(path / 'IMS_U_israeli_10mins.nc')
V = xr.open_dataset(path / 'IMS_V_israeli_10mins.nc')
u_sta = U[station]
v_sta = V[station]
u_sta.load()
v_sta.load()
if season is not None:
print('{} season selected'.format(season))
u_sta = u_sta.sel(time=u_sta['time.season'] == season)
v_sta = v_sta.sel(time=v_sta['time.season'] == season)
u = u_sta.groupby('time.hour').mean()
v = v_sta.groupby('time.hour').mean()
if ax is None:
colorbar = True
fig, ax = plt.subplots()
max_uv = max(max(u.values), max(v.values)) + 1
if cmax is None:
max_uv = max(max(u.values), max(v.values)) + 1
else:
max_uv = cmax
h = Hodograph(component_range=max_uv, ax=ax)
h.add_grid(increment=0.5)
# hours = np.arange(0, 25)
lc = h.plot_colormapped(u, v, u.hour, cmap=cmap,
linestyle='-', linewidth=2)
#ticks = np.arange(np.min(hours), np.max(hours))
# cb = fig.colorbar(lc, ticks=range(0,24), label='Time of Day [UTC]')
if colorbar:
cb = ax.figure.colorbar(lc, ticks=range(
0, 24), label='Time of Day [UTC]')
# cb.ax.tick_params(length=0)
if season is None:
ax.figure.suptitle('{} diurnal wind Hodograph'.format(station))
else:
ax.figure.suptitle(
'{} diurnal wind Hodograph {}'.format(station, season))
ax.set_xlabel('North')
ax.set_ylabel('East')
ax.set_title('South')
ax2 = ax.twinx()
ax2.tick_params(axis='y', right=False, labelright=False)
ax2.set_ylabel('West')
# axcb = fig.colorbar(lc)
return ax
def plot_MLR_GNSS_PW_harmonics_facetgrid(path=work_yuval, season='JJA',
n_max=2, ylim=None, scope='diurnal',
save=True, era5=False, leg_size=15):
"""
Parameters
----------
path : TYPE, optional
DESCRIPTION. The default is work_yuval.
season : TYPE, optional
DESCRIPTION. The default is 'JJA'.
n_max : TYPE, optional
DESCRIPTION. The default is 2.
ylim : TYPE, optional
the ylimits of each panel use [-6,8] for annual. The default is None.
scope : TYPE, optional
DESCRIPTION. The default is 'diurnal'.
save : TYPE, optional
DESCRIPTION. The default is True.
era5 : TYPE, optional
DESCRIPTION. The default is False.
leg_size : TYPE, optional
DESCRIPTION. The default is 15.
Returns
-------
None.
"""
import xarray as xr
from aux_gps import run_MLR_harmonics
from matplotlib.ticker import AutoMinorLocator
from PW_stations import produce_geo_gnss_solved_stations
import numpy as np
sns.set_style('whitegrid')
sns.set_style('ticks')
geo = produce_geo_gnss_solved_stations(add_distance_to_coast=True, plot=False)
if scope == 'diurnal':
cunits = 'cpd'
ticks = np.arange(0, 23, 3)
xlabel = 'Hour of day [UTC]'
elif scope == 'annual':
cunits = 'cpy'
ticks = np.arange(1, 13, 1)
xlabel = 'month'
print('producing {} harmonics plot.'.format(scope))
if era5:
harmonics = xr.load_dataset(path / 'GNSS_PW_era5_harmonics_{}.nc'.format(scope))
else:
harmonics = xr.load_dataset(path / 'GNSS_PW_harmonics_{}.nc'.format(scope))
# sites = sorted(list(set([x.split('_')[0] for x in harmonics])))
# da = xr.DataArray([x for x in range(len(sites))], dims='GNSS')
# da['GNSS'] = sites
sites = group_sites_to_xarray(upper=False, scope=scope)
sites_flat = [x for x in sites.values.flatten()]
da = xr.DataArray([x for x in range(len(sites_flat))], dims='GNSS')
da['GNSS'] = [x for x in range(len(da))]
fg = xr.plot.FacetGrid(
da,
col='GNSS',
col_wrap=3,
sharex=False,
sharey=False, figsize=(20, 20))
for i in range(fg.axes.shape[0]): # i is rows
for j in range(fg.axes.shape[1]): # j is cols
site = sites.values[i, j]
ax = fg.axes[i, j]
try:
harm_site = harmonics[[x for x in harmonics if site in x]]
if site in ['nrif']:
leg_loc = 'upper center'
elif site in ['yrcm', 'ramo']:
leg_loc = 'lower center'
# elif site in ['katz']:
# leg_loc = 'upper right'
else:
leg_loc = None
if scope == 'annual':
leg_loc = 'upper left'
ax, handles, labels = run_MLR_harmonics(harm_site, season=season,
cunits=cunits,
n_max=n_max, plot=True, ax=ax,
legend_loc=leg_loc, ncol=1,
legsize=leg_size, lw=2.5,
legend_S_only=True)
ax.set_xlabel(xlabel, fontsize=16)
if ylim is not None:
ax.set_ylim(*ylim)
ax.tick_params(axis='x', which='major', labelsize=18)
# if scope == 'diurnal':
ax.yaxis.set_major_locator(plt.MaxNLocator(4))
ax.yaxis.set_minor_locator(AutoMinorLocator(2))
ax.tick_params(axis='y', which='major', labelsize=18)
ax.yaxis.tick_left()
ax.xaxis.set_ticks(ticks)
ax.grid()
ax.set_title('')
ax.set_ylabel('')
ax.grid(axis='y', which='minor', linestyle='--')
# get this for upper legend:
# handles, labels = ax.get_legend_handles_labels()
if scope == 'annual':
site_label = '{} ({:.0f})'.format(
site.upper(), geo.loc[site].alt)
label_coord = [0.52, 0.87]
fs = 18
elif scope == 'diurnal':
site_label = site.upper()
label_coord = [0.1, 0.85]
fs = 20
ax.text(*label_coord, site_label,
horizontalalignment='center', fontweight='bold',
transform=ax.transAxes, fontsize=fs)
if j == 0:
ax.set_ylabel('PWV anomalies [mm]', fontsize=16)
# if j == 0:
# ax.set_ylabel('PW anomalies [mm]', fontsize=12)
# elif j == 1:
# if i>5:
# ax.set_ylabel('PW anomalies [mm]', fontsize=12)
except TypeError:
print('{}, {} axis off'.format(i, j))
ax.set_axis_off()
# for i, (site, ax) in enumerate(zip(da['GNSS'].values, fg.axes.flatten())):
# harm_site = harmonics[[x for x in harmonics if sites[i] in x]]
# if site in ['elat', 'nrif']:
# loc = 'upper center'
# text = 0.1
# elif site in ['elro', 'yrcm', 'ramo', 'slom', 'jslm']:
# loc = 'upper right'
# text = 0.1
# else:
# loc = None
# text = 0.1
# ax = run_MLR_diurnal_harmonics(harm_site, season=season, n_max=n_max, plot=True, ax=ax, legend_loc=loc)
# ax.set_title('')
# ax.set_ylabel('PW anomalies [mm]')
# if ylim is not None:
# ax.set_ylim(ylim[0], ylim[1])
# ax.text(text, .85, site.upper(),
# horizontalalignment='center', fontweight='bold',
# transform=ax.transAxes)
# for i, ax in enumerate(fg.axes.flatten()):
# if i > (da.GNSS.telasize-1):
# ax.set_axis_off()
# pass
# add upper legend for all factes:
S_labels = labels[:-2]
S_labels = [x.split(' ')[0] for x in S_labels]
last_label = 'Mean PWV anomalies'
sum_label = labels[-2].split("'")[1]
S_labels.append(sum_label)
S_labels.append(last_label)
fg.fig.legend(handles=handles, labels=S_labels, prop={'size': 20}, edgecolor='k',
framealpha=0.5, fancybox=True, facecolor='white',
ncol=5, fontsize=20, loc='upper center', bbox_to_anchor=(0.5, 1.005),
bbox_transform=plt.gcf().transFigure)
fg.fig.subplots_adjust(
top=0.973,
bottom=0.032,
left=0.054,
right=0.995,
hspace=0.15,
wspace=0.12)
if save:
if era5:
filename = 'pw_era5_{}_harmonics_{}_{}.png'.format(scope, n_max, season)
else:
filename = 'pw_{}_harmonics_{}_{}.png'.format(scope, n_max, season)
# plt.savefig(savefig_path / filename, bbox_inches='tight')
plt.savefig(savefig_path / filename, orientation='portrait')
return fg
def plot_gustiness(path=work_yuval, ims_path=ims_path, site='tela',
ims_site='HAIFA-TECHNION', season='JJA', month=None, pts=7,
ax=None):
import xarray as xr
import numpy as np
g = xr.open_dataset(
ims_path / 'IMS_G{}_israeli_10mins_daily_anoms.nc'.format(pts))[ims_site]
g.load()
if season is not None:
g = g.sel(time=g['time.season'] == season)
label = 'Gustiness {} IMS station in {} season'.format(
site, season)
elif month is not None:
g = g.sel(time=g['time.month'] == month)
label = 'Gustiness {} IMS station in {} month'.format(
site, month)
elif season is not None and month is not None:
raise('pls pick either season or month...')
# date = groupby_date_xr(g)
# # g_anoms = g.groupby('time.month') - g.groupby('time.month').mean('time')
# g_anoms = g.groupby(date) - g.groupby(date).mean('time')
# g_anoms = g_anoms.reset_coords(drop=True)
G = g.groupby('time.hour').mean('time') * 100.0
if ax is None:
fig, ax = plt.subplots(figsize=(16, 8))
Gline = G.plot(ax=ax, color='b', marker='o', label='Gustiness')
ax.set_title(label)
ax.axhline(0, color='b', linestyle='--')
ax.set_ylabel('Gustiness anomalies [dimensionless]', color='b')
ax.set_xlabel('Time of day [UTC]')
# ax.set_xticks(np.arange(0, 24, step=1))
ax.yaxis.label.set_color('b')
ax.tick_params(axis='y', colors='b')
ax.xaxis.set_ticks(np.arange(0, 23, 3))
ax.grid()
pw = xr.open_dataset(
work_yuval /
'GNSS_PW_hourly_anoms_thresh_50_homogenized.nc')[site]
pw.load().dropna('time')
if season is not None:
pw = pw.sel(time=pw['time.season'] == season)
elif month is not None:
pw = pw.sel(time=pw['time.month'] == month)
# date = groupby_date_xr(pw)
# pw = pw.groupby(date) - pw.groupby(date).mean('time')
# pw = pw.reset_coords(drop=True)
pw = pw.groupby('time.hour').mean()
axpw = ax.twinx()
PWline = pw.plot.line(ax=axpw, color='tab:green',
marker='s', label='PW ({})'.format(season))
axpw.axhline(0, color='k', linestyle='--')
lns = Gline + PWline
axpw.set_ylabel('PW anomalies [mm]')
align_yaxis(ax, 0, axpw, 0)
return lns
def plot_gustiness_facetgrid(path=work_yuval, ims_path=ims_path,
season='JJA', month=None, save=True):
import xarray as xr
gnss_ims_dict = {
'alon': 'ASHQELON-PORT', 'bshm': 'HAIFA-TECHNION', 'csar': 'HADERA-PORT',
'tela': 'TEL-AVIV-COAST', 'slom': 'BESOR-FARM', 'kabr': 'SHAVE-ZIYYON',
'nzrt': 'DEIR-HANNA', 'katz': 'GAMLA', 'elro': 'MEROM-GOLAN-PICMAN',
'mrav': 'MAALE-GILBOA', 'yosh': 'ARIEL', 'jslm': 'JERUSALEM-GIVAT-RAM',
'drag': 'METZOKE-DRAGOT', 'dsea': 'SEDOM', 'ramo': 'MIZPE-RAMON-20120927',
'nrif': 'NEOT-SMADAR', 'elat': 'ELAT', 'klhv': 'SHANI',
'yrcm': 'ZOMET-HANEGEV', 'spir': 'PARAN-20060124'}
da = xr.DataArray([x for x in gnss_ims_dict.values()], dims=['GNSS'])
da['GNSS'] = [x for x in gnss_ims_dict.keys()]
to_remove = ['kabr', 'nzrt', 'katz', 'elro', 'klhv', 'yrcm', 'slom']
sites = [x for x in da['GNSS'].values if x not in to_remove]
da = da.sel(GNSS=sites)
gnss_order = ['bshm', 'mrav', 'drag', 'csar', 'yosh', 'dsea', 'tela', 'jslm',
'nrif', 'alon', 'ramo', 'elat']
df = da.to_dataframe('gnss')
da = df.reindex(gnss_order).to_xarray()['gnss']
fg = xr.plot.FacetGrid(
da,
col='GNSS',
col_wrap=3,
sharex=False,
sharey=False, figsize=(20, 20))
for i, (site, ax) in enumerate(zip(da['GNSS'].values, fg.axes.flatten())):
lns = plot_gustiness(path=path, ims_path=ims_path,
ims_site=gnss_ims_dict[site],
site=site, season=season, month=month, ax=ax)
labs = [l.get_label() for l in lns]
if site in ['tela', 'alon', 'dsea', 'csar', 'elat', 'nrif']:
ax.legend(lns, labs, loc='upper center', prop={
'size': 8}, framealpha=0.5, fancybox=True, title=site.upper())
elif site in ['drag']:
ax.legend(lns, labs, loc='upper right', prop={
'size': 8}, framealpha=0.5, fancybox=True, title=site.upper())
else:
ax.legend(lns, labs, loc='best', prop={
'size': 8}, framealpha=0.5, fancybox=True, title=site.upper())
ax.set_title('')
ax.set_ylabel(r'G anomalies $\times$$10^{2}$')
# ax.text(.8, .85, site.upper(),
# horizontalalignment='center', fontweight='bold',
# transform=ax.transAxes)
for i, ax in enumerate(fg.axes.flatten()):
if i > (da.GNSS.size-1):
ax.set_axis_off()
pass
fg.fig.tight_layout()
fg.fig.subplots_adjust(top=0.974,
bottom=0.053,
left=0.041,
right=0.955,
hspace=0.15,
wspace=0.3)
filename = 'gustiness_israeli_gnss_pw_diurnal_{}.png'.format(season)
if save:
plt.savefig(savefig_path / filename, bbox_inches='tight')
return fg
def plot_fft_diurnal(path=work_yuval, save=True):
import xarray as xr
import numpy as np
import matplotlib.ticker as tck
sns.set_style("whitegrid",
{'axes.grid': True,
'xtick.bottom': True,
'font.family': 'serif',
'ytick.left': True})
sns.set_context('paper')
power = xr.load_dataset(path / 'GNSS_PW_power_spectrum_diurnal.nc')
power = power.to_array('site')
sites = [x for x in power.site.values]
fg = power.plot.line(col='site', col_wrap=4,
sharex=False, figsize=(20, 18))
fg.set_xlabels('Frequency [cpd]')
fg.set_ylabels('PW PSD [dB]')
ticklabels = np.arange(0, 7)
for ax, site in zip(fg.axes.flatten(), sites):
sns.despine()
ax.set_title('')
ax.set_xticklabels(ticklabels)
# ax.tick_params(axis='y', which='minor')
ax.yaxis.set_minor_locator(tck.AutoMinorLocator())
ax.set_xlim(0, 6.5)
ax.set_ylim(70, 125)
ax.grid(True)
ax.grid(which='minor', axis='y')
ax.text(.8, .85, site.upper(),
horizontalalignment='center', fontweight='bold',
transform=ax.transAxes)
fg.fig.tight_layout()
filename = 'power_pw_diurnal.png'
if save:
plt.savefig(savefig_path / filename, bbox_inches='tight')
return fg
def plot_rinex_availability_with_map(path=work_yuval, gis_path=gis_path,
scope='diurnal', ims=True,
dem_path=dem_path, fontsize=18, save=True):
# TODO: add box around merged stations and removed stations
# TODO: add color map labels to stations removed and merged
from aux_gps import gantt_chart
import xarray as xr
import pandas as pd
import geopandas as gpd
from PW_stations import produce_geo_gnss_solved_stations
from aux_gps import geo_annotate
from ims_procedures import produce_geo_ims
from matplotlib.colors import ListedColormap
from aux_gps import path_glob
sns.set_style('whitegrid')
sns.set_style('ticks')
print('{} scope selected.'.format(scope))
fig = plt.figure(figsize=(20, 15))
# grid = plt.GridSpec(1, 2, width_ratios=[
# 5, 2], wspace=0.1)
grid = plt.GridSpec(1, 2, width_ratios=[
5, 3], wspace=0.05)
ax_gantt = fig.add_subplot(grid[0, 0]) # plt.subplot(221)
ax_map = fig.add_subplot(grid[0, 1]) # plt.subplot(122)
# fig, ax = plt.subplots(1, 2, sharex=False, sharey=False, figsize=(20, 6))
# RINEX gantt chart:
if scope == 'diurnal':
file = path_glob(path, 'GNSS_PW_thresh_50_for_diurnal_analysis.nc')[-1]
elif scope == 'annual':
file = path / 'GNSS_PW_monthly_thresh_50.nc'
ds = xr.open_dataset(file)
just_pw = [x for x in ds if 'error' not in x]
ds = ds[just_pw]
da = ds.to_array('station').sel(time=slice(None,'2019'))
da['station'] = [x.upper() for x in da.station.values]
ds = da.to_dataset('station')
# reorder for annual, coastal, highland and eastern:
stns = group_sites_to_xarray(scope='annual', upper=True).T.values.ravel()
stns = stns[~pd.isnull(stns)]
ds = ds[stns]
# colors:
colors = produce_colors_for_pwv_station(scope=scope, zebra=False)
title = 'Daily RINEX files availability for the Israeli GNSS stations'
ax_gantt = gantt_chart(
ds,
ax=ax_gantt,
fw='bold', grid=True,
title='', colors=colors,
pe_dict=None, fontsize=fontsize, linewidth=24, antialiased=False)
years_fmt = mdates.DateFormatter('%Y')
# ax_gantt.xaxis.set_major_locator(mdates.YearLocator())
ax_gantt.xaxis.set_major_locator(mdates.YearLocator(4))
ax_gantt.xaxis.set_minor_locator(mdates.YearLocator(1))
ax_gantt.xaxis.set_major_formatter(years_fmt)
# ax_gantt.xaxis.set_minor_formatter(years_fmt)
ax_gantt.tick_params(axis='x', labelrotation=0)
# Israel gps ims map:
ax_map = plot_israel_map(
gis_path=gis_path, ax=ax_map, ticklabelsize=fontsize)
# overlay with dem data:
cmap = plt.get_cmap('terrain', 41)
dem = xr.open_dataarray(dem_path / 'israel_dem_250_500.nc')
# dem = xr.open_dataarray(dem_path / 'israel_dem_500_1000.nc')
fg = dem.plot.imshow(ax=ax_map, alpha=0.5, cmap=cmap,
vmin=dem.min(), vmax=dem.max(), add_colorbar=False)
# scale_bar(ax_map, 50)
cbar_kwargs = {'fraction': 0.1, 'aspect': 50, 'pad': 0.03}
cb = plt.colorbar(fg, **cbar_kwargs)
cb.set_label(label='meters above sea level',
size=fontsize, weight='normal')
cb.ax.tick_params(labelsize=fontsize)
ax_map.set_xlabel('')
ax_map.set_ylabel('')
gps = produce_geo_gnss_solved_stations(path=gis_path, plot=False)
# removed = ['hrmn', 'nizn', 'spir']
# removed = ['hrmn']
if scope == 'diurnal':
removed = ['hrmn', 'gilb', 'lhav']
elif scope == 'annual':
removed = ['hrmn', 'gilb', 'lhav']
print('removing {} stations from map.'.format(removed))
# merged = ['klhv', 'lhav', 'mrav', 'gilb']
merged = []
gps_list = [x for x in gps.index if x not in merged and x not in removed]
gps.loc[gps_list, :].plot(ax=ax_map, edgecolor='black', marker='s',
alpha=1.0, markersize=35, facecolor="None", linewidth=2, zorder=3)
# gps.loc[removed, :].plot(ax=ax_map, color='black', edgecolor='black', marker='s',
# alpha=1.0, markersize=25, facecolor='white')
# gps.loc[merged, :].plot(ax=ax_map, color='black', edgecolor='r', marker='s',
# alpha=0.7, markersize=25)
gps_stations = gps_list # [x for x in gps.index]
# to_plot_offset = ['mrav', 'klhv', 'nzrt', 'katz', 'elro']
to_plot_offset = []
for x, y, label in zip(gps.loc[gps_stations, :].lon, gps.loc[gps_stations,
:].lat, gps.loc[gps_stations, :].index.str.upper()):
if label.lower() in to_plot_offset:
ax_map.annotate(label, xy=(x, y), xytext=(4, -6),
textcoords="offset points", color='k',
fontweight='bold', fontsize=fontsize - 2)
else:
ax_map.annotate(label, xy=(x, y), xytext=(3, 3),
textcoords="offset points", color='k',
fontweight='bold', fontsize=fontsize - 2)
# geo_annotate(ax_map, gps_normal_anno.lon, gps_normal_anno.lat,
# gps_normal_anno.index.str.upper(), xytext=(3, 3), fmt=None,
# c='k', fw='normal', fs=10, colorupdown=False)
# geo_annotate(ax_map, gps_offset_anno.lon, gps_offset_anno.lat,
# gps_offset_anno.index.str.upper(), xytext=(4, -6), fmt=None,
# c='k', fw='normal', fs=10, colorupdown=False)
# plot bet-dagan:
df = pd.Series([32.00, 34.81]).to_frame().T
df.index = ['Bet-Dagan']
df.columns = ['lat', 'lon']
bet_dagan = gpd.GeoDataFrame(df, geometry=gpd.points_from_xy(df.lon,
df.lat),
crs=gps.crs)
bet_dagan.plot(ax=ax_map, color='black', edgecolor='black',
marker='x', linewidth=2, zorder=2)
geo_annotate(ax_map, bet_dagan.lon, bet_dagan.lat,
bet_dagan.index, xytext=(4, -6), fmt=None,
c='k', fw='bold', fs=fontsize - 2, colorupdown=False)
# plt.legend(['GNSS \nreceiver sites',
# 'removed \nGNSS sites',
# 'merged \nGNSS sites',
# 'radiosonde\nstation'],
# loc='upper left', framealpha=0.7, fancybox=True,
# handletextpad=0.2, handlelength=1.5)
if ims:
print('getting IMS temperature stations metadata...')
ims = produce_geo_ims(path=gis_path, freq='10mins', plot=False)
ims.plot(ax=ax_map, marker='o', edgecolor='tab:orange', alpha=1.0,
markersize=35, facecolor="tab:orange", zorder=1)
# ims, gps = produce_geo_df(gis_path=gis_path, plot=False)
print('getting solved GNSS israeli stations metadata...')
plt.legend(['GNSS \nstations',
'radiosonde\nstation', 'IMS stations'],
loc='upper left', framealpha=0.7, fancybox=True,
handletextpad=0.2, handlelength=1.5, fontsize=fontsize - 2)
else:
plt.legend(['GNSS \nstations',
'radiosonde\nstation'],
loc='upper left', framealpha=0.7, fancybox=True,
handletextpad=0.2, handlelength=1.5, fontsize=fontsize - 2)
fig.subplots_adjust(top=0.95,
bottom=0.11,
left=0.05,
right=0.95,
hspace=0.2,
wspace=0.2)
# plt.legend(['IMS stations', 'GNSS stations'], loc='upper left')
filename = 'rinex_israeli_gnss_map_{}.png'.format(scope)
# caption('Daily RINEX files availability for the Israeli GNSS station network at the SOPAC/GARNER website')
if save:
plt.savefig(savefig_path / filename, bbox_inches='tight')
return fig
def plot_means_box_plots(path=work_yuval, thresh=50, kind='box',
x='month', col_wrap=5, ylimits=None, twin=None,
twin_attrs=None,
xlimits=None, anoms=True, bins=None,
season=None, attrs_plot=True, save=True, ds_input=None):
import xarray as xr
pw = xr.open_dataset(
work_yuval /
'GNSS_PW_thresh_{:.0f}_homogenized.nc'.format(thresh))
pw = pw[[x for x in pw.data_vars if '_error' not in x]]
attrs = [x.attrs for x in pw.data_vars.values()]
if x == 'month':
pw = xr.load_dataset(
work_yuval /
'GNSS_PW_monthly_thresh_{:.0f}_homogenized.nc'.format(thresh))
# pw = pw.resample(time='MS').mean('time')
elif x == 'hour':
# pw = pw.resample(time='1H').mean('time')
# pw = pw.groupby('time.hour').mean('time')
pw = xr.load_dataset(
work_yuval / 'GNSS_PW_hourly_thresh_{:.0f}_homogenized.nc'.format(thresh))
pw = pw[[x for x in pw.data_vars if '_error' not in x]]
# first remove long term monthly means:
if anoms:
pw = xr.load_dataset(
work_yuval / 'GNSS_PW_hourly_anoms_thresh_{:.0f}_homogenized.nc'.format(thresh))
if twin is not None:
twin = twin.groupby('time.month') - \
twin.groupby('time.month').mean('time')
twin = twin.reset_coords(drop=True)
# pw = pw.groupby('time.month') - pw.groupby('time.month').mean('time')
elif x == 'day':
# pw = pw.resample(time='1H').mean('time')
# pw = pw.groupby('time.hour').mean('time')
pw = xr.load_dataset(
work_yuval / 'GNSS_PW_daily_thresh_{:.0f}_homogenized.nc'.format(thresh))
pw = pw[[x for x in pw.data_vars if '_error' not in x]]
# first remove long term monthly means:
if anoms:
# pw = pw.groupby('time.month') - pw.groupby('time.month').mean('time')
pw = pw.groupby('time.dayofyear') - \
pw.groupby('time.dayodyear').mean('time')
if season is not None:
if season != 'all':
print('{} season is selected'.format(season))
pw = pw.sel(time=pw['time.season'] == season)
all_seas = False
if twin is not None:
twin = twin.sel(time=twin['time.season'] == season)
else:
print('all seasons selected')
all_seas = True
else:
all_seas = False
for i, da in enumerate(pw.data_vars):
pw[da].attrs = attrs[i]
if not attrs_plot:
attrs = None
if ds_input is not None:
# be carful!:
pw = ds_input
fg = plot_multi_box_xr(pw, kind=kind, x=x, col_wrap=col_wrap,
ylimits=ylimits, xlimits=xlimits, attrs=attrs,
bins=bins, all_seasons=all_seas, twin=twin,
twin_attrs=twin_attrs)
attrs = [x.attrs for x in pw.data_vars.values()]
for i, ax in enumerate(fg.axes.flatten()):
try:
mean_years = float(attrs[i]['mean_years'])
# print(i)
# print(mean_years)
except IndexError:
ax.set_axis_off()
pass
if kind != 'hist':
[fg.axes[x, 0].set_ylabel('PW [mm]')
for x in range(len(fg.axes[:, 0]))]
# [fg.axes[-1, x].set_xlabel('month') for x in range(len(fg.axes[-1, :]))]
fg.fig.subplots_adjust(top=0.98,
bottom=0.05,
left=0.025,
right=0.985,
hspace=0.27,
wspace=0.215)
if season is not None:
filename = 'pw_{}ly_means_{}_seas_{}.png'.format(x, kind, season)
else:
filename = 'pw_{}ly_means_{}.png'.format(x, kind)
if save:
plt.savefig(savefig_path / filename, bbox_inches='tight')
return fg
def plot_interannual_MLR_results(path=climate_path, fontsize=16, save=True):
import matplotlib.pyplot as plt
from climate_works import run_best_MLR
# rds = xr.load_dataset(path / 'best_MLR_interannual_gnss_pwv.nc')
model_lci, rdf_lci = run_best_MLR(plot=False, heatmap=False, keep='lci',
add_trend=True)
rds_lci = model_lci.results_
model_eofi, rdf_eofi = run_best_MLR(plot=False, heatmap=False, keep='eofi',
add_trend=False)
rds_eofi = model_eofi.results_
fig, axes = plt.subplots(2, 1, sharex=True, sharey=False, figsize=(15, 7))
origln = rds_lci['original'].plot.line('k-.', ax=axes[0], linewidth=1.5)
predln_lci = rds_lci['predict'].plot.line('b-', ax=axes[0], linewidth=1.5)
predln_eofi = rds_eofi['predict'].plot.line(
'g-', ax=axes[0], linewidth=1.5)
r2_lci = rds_lci['r2_adj'].item()
r2_eofi = rds_eofi['r2_adj'].item()
axes[0].legend(origln+predln_lci+predln_eofi, ['mean PWV (12m-mean)', 'MLR with LCI (Adj R$^2$:{:.2f})'.format(
r2_lci), 'MLR with EOFs (Adj R$^2$:{:.2f})'.format(r2_eofi)], fontsize=fontsize-2)
axes[0].grid()
axes[0].set_xlabel('')
axes[0].set_ylabel('PWV anomalies [mm]', fontsize=fontsize)
axes[0].tick_params(labelsize=fontsize)
axes[0].grid(which='minor', color='k', linestyle='--')
residln_lci = rds_lci['resid'].plot.line('b-', ax=axes[1])
residln_eofi = rds_eofi['resid'].plot.line('g-', ax=axes[1])
axes[1].legend(residln_lci+residln_eofi, ['MLR with LCI',
'MLR with EOFs'], fontsize=fontsize-2)
axes[1].grid()
axes[1].set_ylabel('Residuals [mm]', fontsize=fontsize)
axes[1].tick_params(labelsize=fontsize)
axes[1].set_xlabel('')
years_fmt = mdates.DateFormatter('%Y')
# ax.figure.autofmt_xdate()
axes[1].xaxis.set_major_locator(mdates.YearLocator(2))
axes[1].xaxis.set_minor_locator(mdates.YearLocator(1))
axes[1].xaxis.set_major_formatter(years_fmt)
axes[1].grid(which='minor', color='k', linestyle='--')
# ax.xaxis.set_minor_locator(mdates.MonthLocator())
axes[1].figure.autofmt_xdate()
fig.tight_layout()
fig.subplots_adjust()
if save:
filename = 'pw_interannual_MLR_comparison.png'
plt.savefig(savefig_path / filename, bbox_inches='tight')
return fig
def plot_annual_pw(path=work_yuval, fontsize=20, labelsize=18, compare='uerra',
ylim=[7.5, 40], save=True, kind='violin', bins=None, ds=None,
add_temperature=False):
"""kind can be violin or hist, for violin choose ylim=7.5,40 and for hist
choose ylim=0,0.3"""
import xarray as xr
import pandas as pd
import numpy as np
from synoptic_procedures import slice_xr_with_synoptic_class
gnss_filename = 'GNSS_PW_monthly_thresh_50.nc'
# gnss_filename = 'first_climatol_try.nc'
pw = xr.load_dataset(path / gnss_filename)
df_annual = pw.to_dataframe()
hue = None
if compare is not None:
df_annual = prepare_reanalysis_monthly_pwv_to_dataframe(
path, re=compare, ds=ds)
hue = 'source'
if not add_temperature:
fg = plot_pw_geographical_segments(
df_annual, scope='annual',
kind=kind,
fg=None,
ylim=ylim,
fontsize=fontsize,
labelsize=labelsize, hue=hue,
save=False, bins=bins)
fg.fig.subplots_adjust(
top=0.973,
bottom=0.029,
left=0.054,
right=0.995,
hspace=0.15,
wspace=0.12)
filename = 'pw_annual_means_{}.png'.format(kind)
else:
fg = plot_pw_geographical_segments(
df_annual, scope='annual',
kind='mean_month',
fg=None, ticklabelcolor='tab:blue',
ylim=[10, 31], color='tab:blue',
fontsize=fontsize,
labelsize=labelsize, hue=None,
save=False, bins=None)
# tmm = xr.load_dataset(path / 'GNSS_TD_monthly_1996_2020.nc')
tmm = xr.load_dataset(path / 'IMS_T/GNSS_TD_daily.nc')
tmm = tmm.groupby('time.month').mean()
dftm = tmm.to_dataframe()
# dftm.columns = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
sites = group_sites_to_xarray(scope='annual')
sites_flat = sites.values.ravel()
# sites = sites[~pd.isnull(sites)]
for i, ax in enumerate(fg.axes.flat):
if pd.isnull(sites_flat[i]):
continue
twinax = ax.twinx()
twinax.plot(dftm.index.values, dftm[sites_flat[i]].values, color='tab:red',
markersize=10, marker='s', lw=1, markerfacecolor="None",
label='Temperature')
# dftm[sites[i]].plot(ax=twinax, color='r', markersize=10,
# marker='s', lw=1, markerfacecolor="None")
twinax.set_ylim(5, 37)
twinax.set_yticks(np.arange(5, 40, 10))
twinax.tick_params(axis='y', which='major', labelcolor='tab:red',
labelsize=labelsize)
if sites_flat[i] in sites.sel(group='eastern'):
twinax.set_ylabel(r'Temperature [$\degree$ C]',
fontsize=labelsize)
# fg.fig.canvas.draw()
# twinax.xaxis.set_ticks(np.arange(1, 13))
# twinax.tick_params(axis='x', which='major', labelsize=labelsize-2)
lines, labels = ax.get_legend_handles_labels()
lines2, labels2 = twinax.get_legend_handles_labels()
labels = ['PWV', 'Surface Temperature']
fg.fig.legend(handles=lines+lines2, labels=labels, prop={'size': 20}, edgecolor='k',
framealpha=0.5, fancybox=True, facecolor='white',
ncol=5, fontsize=20, loc='upper center', bbox_to_anchor=(0.5, 1.005),
bbox_transform=plt.gcf().transFigure)
fg.fig.subplots_adjust(
top=0.97,
bottom=0.029,
left=0.049,
right=0.96,
hspace=0.15,
wspace=0.17)
filename = 'pw_annual_means_temperature.png'
if save:
if compare is not None:
filename = 'pw_annual_means_{}_with_{}.png'.format(kind, compare)
plt.savefig(savefig_path / filename, orientation='portrait')
return fg
def plot_multi_box_xr(pw, kind='violin', x='month', sharex=False, sharey=False,
col_wrap=5, ylimits=None, xlimits=None, attrs=None,
bins=None, all_seasons=False, twin=None, twin_attrs=None):
import xarray as xr
pw = pw.to_array('station')
if twin is not None:
twin = twin.to_array('station')
fg = xr.plot.FacetGrid(pw, col='station', col_wrap=col_wrap, sharex=sharex,
sharey=sharey)
for i, (sta, ax) in enumerate(zip(pw['station'].values, fg.axes.flatten())):
pw_sta = pw.sel(station=sta).reset_coords(drop=True)
if all_seasons:
pw_seas = pw_sta.sel(time=pw_sta['time.season'] == 'DJF')
df = pw_seas.to_dataframe(sta)
plot_box_df(df, ax=ax, x=x, title=sta, ylabel='', kind=kind,
ylimits=ylimits, xlimits=xlimits, attrs=None, bins=bins,
marker='o')
pw_seas = pw_sta.sel(time=pw_sta['time.season'] == 'MAM')
df = pw_seas.to_dataframe(sta)
plot_box_df(df, ax=ax, x=x, title=sta, ylabel='', kind=kind,
ylimits=ylimits, xlimits=xlimits, attrs=None, bins=bins,
marker='^')
pw_seas = pw_sta.sel(time=pw_sta['time.season'] == 'JJA')
df = pw_seas.to_dataframe(sta)
plot_box_df(df, ax=ax, x=x, title=sta, ylabel='', kind=kind,
ylimits=ylimits, xlimits=xlimits, attrs=None, bins=bins,
marker='s')
pw_seas = pw_sta.sel(time=pw_sta['time.season'] == 'SON')
df = pw_seas.to_dataframe(sta)
plot_box_df(df, ax=ax, x=x, title=sta, ylabel='', kind=kind,
ylimits=ylimits, xlimits=xlimits, attrs=attrs[i], bins=bins,
marker='x')
df = pw_sta.to_dataframe(sta)
plot_box_df(df, ax=ax, x=x, title=sta, ylabel='', kind=kind,
ylimits=ylimits, xlimits=xlimits, attrs=attrs[i], bins=bins,
marker='d')
if sta == 'nrif' or sta == 'elat':
ax.legend(['DJF', 'MAM', 'JJA', 'SON', 'Annual'],
prop={'size': 8}, loc='upper center', framealpha=0.5, fancybox=True)
elif sta == 'yrcm' or sta == 'ramo':
ax.legend(['DJF', 'MAM', 'JJA', 'SON', 'Annual'],
prop={'size': 8}, loc='upper right', framealpha=0.5, fancybox=True)
else:
ax.legend(['DJF', 'MAM', 'JJA', 'SON', 'Annual'],
prop={'size': 8}, loc='best', framealpha=0.5, fancybox=True)
else:
# if x == 'hour':
# # remove seasonal signal:
# pw_sta = pw_sta.groupby('time.dayofyear') - pw_sta.groupby('time.dayofyear').mean('time')
# elif x == 'month':
# # remove daily signal:
# pw_sta = pw_sta.groupby('time.hour') - pw_sta.groupby('time.hour').mean('time')
df = pw_sta.to_dataframe(sta)
if twin is not None:
twin_sta = twin.sel(station=sta).reset_coords(drop=True)
twin_df = twin_sta.to_dataframe(sta)
else:
twin_df = None
if attrs is not None:
plot_box_df(df, ax=ax, x=x, title=sta, ylabel='', kind=kind,
ylimits=ylimits, xlimits=xlimits, attrs=attrs[i],
bins=bins, twin_df=twin_df, twin_attrs=twin_attrs)
else:
plot_box_df(df, ax=ax, x=x, title=sta, ylabel='', kind=kind,
ylimits=ylimits, xlimits=xlimits, attrs=None,
bins=bins, twin_df=twin_df, twin_attrs=twin_attrs)
return fg
def plot_box_df(df, x='month', title='TELA', marker='o',
ylabel=r'IWV [kg$\cdot$m$^{-2}$]', ax=None, kind='violin',
ylimits=(5, 40), xlimits=None, attrs=None, bins=None, twin_df=None,
twin_attrs=None):
# x=hour is experimental
import seaborn as sns
from matplotlib.ticker import MultipleLocator
import matplotlib.pyplot as plt
import numpy as np
from scipy.stats import kurtosis
from scipy.stats import skew
# df = da_ts.to_dataframe()
if x == 'month':
df[x] = df.index.month
pal = sns.color_palette("Paired", 12)
elif x == 'hour':
df[x] = df.index.hour
if twin_df is not None:
twin_df[x] = twin_df.index.hour
# df[x] = df.index
pal = sns.color_palette("Paired", 12)
y = df.columns[0]
if ax is None:
fig, ax = plt.subplots()
if kind is None:
df = df.groupby(x).mean()
df.plot(ax=ax, legend=False, marker=marker)
if twin_df is not None:
twin_df = twin_df.groupby(x).mean()
twinx = ax.twinx()
twin_df.plot.line(ax=twinx, color='r', marker='s')
ax.axhline(0, color='k', linestyle='--')
if twin_attrs is not None:
twinx.set_ylabel(twin_attrs['ylabel'])
align_yaxis(ax, 0, twinx, 0)
ax.set_xlabel('Time of day [UTC]')
elif kind == 'violin':
sns.violinplot(ax=ax, data=df, x=x, y=y, palette=pal, fliersize=4,
gridsize=250, inner='quartile', scale='area')
ax.set_ylabel(ylabel)
ax.set_title(title)
ax.set_xlabel('')
elif kind == 'box':
kwargs = dict(markerfacecolor='r', marker='o')
sns.boxplot(ax=ax, data=df, x=x, y=y, palette=pal, fliersize=4,
whis=1.0, flierprops=kwargs, showfliers=False)
ax.set_ylabel(ylabel)
ax.set_title(title)
ax.set_xlabel('')
elif kind == 'hist':
if bins is None:
bins = 15
a = df[y].dropna()
sns.distplot(ax=ax, a=a, norm_hist=True, bins=bins, axlabel='PW [mm]')
xmean = df[y].mean()
xmedian = df[y].median()
std = df[y].std()
sk = skew(df[y].dropna().values)
kurt = kurtosis(df[y].dropna().values)
# xmode = df[y].mode().median()
data_x, data_y = ax.lines[0].get_data()
ymean = np.interp(xmean, data_x, data_y)
ymed = np.interp(xmedian, data_x, data_y)
# ymode = np.interp(xmode, data_x, data_y)
ax.vlines(x=xmean, ymin=0, ymax=ymean, color='r', linestyle='--')
ax.vlines(x=xmedian, ymin=0, ymax=ymed, color='g', linestyle='-')
# ax.vlines(x=xmode, ymin=0, ymax=ymode, color='k', linestyle='-')
# ax.legend(['Mean:{:.1f}'.format(xmean),'Median:{:.1f}'.format(xmedian),'Mode:{:.1f}'.format(xmode)])
ax.legend(['Mean: {:.1f}'.format(xmean),
'Median: {:.1f}'.format(xmedian)])
ax.text(0.55, 0.45, "Std-Dev: {:.1f}\nSkewness: {:.1f}\nKurtosis: {:.1f}".format(
std, sk, kurt), transform=ax.transAxes)
ax.yaxis.set_minor_locator(MultipleLocator(5))
ax.yaxis.grid(True, which='minor', linestyle='--', linewidth=1, alpha=0.7)
ax.yaxis.grid(True, linestyle='--', linewidth=1, alpha=0.7)
title = ax.get_title().split('=')[-1].strip(' ')
if attrs is not None:
mean_years = float(attrs['mean_years'])
ax.set_title('')
ax.text(.2, .85, y.upper(),
horizontalalignment='center', fontweight='bold',
transform=ax.transAxes)
if kind is not None:
if kind != 'hist':
ax.text(.22, .72, '{:.1f} years'.format(mean_years),
horizontalalignment='center',
transform=ax.transAxes)
ax.yaxis.tick_left()
ax.spines["top"].set_visible(False)
ax.spines["right"].set_visible(False)
ax.spines["bottom"].set_visible(False)
if ylimits is not None:
ax.set_ylim(*ylimits)
if twin_attrs is not None:
twinx.set_ylim(*twin_attrs['ylimits'])
align_yaxis(ax, 0, twinx, 0)
if xlimits is not None:
ax.set_xlim(*xlimits)
return ax
def plot_means_pw(load_path=work_yuval, ims_path=ims_path, thresh=50,
col_wrap=5, means='hour', save=True):
import xarray as xr
import numpy as np
pw = xr.load_dataset(
work_yuval /
'GNSS_PW_thresh_{:.0f}_homogenized.nc'.format(thresh))
pw = pw[[x for x in pw.data_vars if '_error' not in x]]
if means == 'hour':
# remove long term monthly means:
pw_clim = pw.groupby('time.month') - \
pw.groupby('time.month').mean('time')
pw_clim = pw_clim.groupby('time.{}'.format(means)).mean('time')
else:
pw_clim = pw.groupby('time.{}'.format(means)).mean('time')
# T = xr.load_dataset(
# ims_path /
# 'GNSS_5mins_TD_ALL_1996_2020.nc')
# T_clim = T.groupby('time.month').mean('time')
attrs = [x.attrs for x in pw.data_vars.values()]
fg = pw_clim.to_array('station').plot(col='station', col_wrap=col_wrap,
color='b', marker='o', alpha=0.7,
sharex=False, sharey=True)
col_arr = np.arange(0, len(pw_clim))
right_side = col_arr[col_wrap-1::col_wrap]
for i, ax in enumerate(fg.axes.flatten()):
title = ax.get_title().split('=')[-1].strip(' ')
try:
mean_years = float(attrs[i]['mean_years'])
ax.set_title('')
ax.text(.2, .85, title.upper(),
horizontalalignment='center', fontweight='bold',
transform=ax.transAxes)
ax.text(.2, .73, '{:.1f} years'.format(mean_years),
horizontalalignment='center',
transform=ax.transAxes)
# ax_t = ax.twinx()
# T_clim['{}'.format(title)].plot(
# color='r', linestyle='dashed', marker='s', alpha=0.7,
# ax=ax_t)
# ax_t.set_ylim(0, 30)
fg.fig.canvas.draw()
# labels = [item.get_text() for item in ax_t.get_yticklabels()]
# ax_t.yaxis.set_ticklabels([])
# ax_t.tick_params(axis='y', color='r')
# ax_t.set_ylabel('')
# if i in right_side:
# ax_t.set_ylabel(r'Surface temperature [$\degree$C]', fontsize=10)
# ax_t.yaxis.set_ticklabels(labels)
# ax_t.tick_params(axis='y', labelcolor='r', color='r')
# show months ticks and grid lines for pw:
ax.xaxis.tick_bottom()
ax.yaxis.tick_left()
ax.yaxis.grid()
# ax.legend([ax.lines[0], ax_t.lines[0]], ['PW', 'T'],
# loc='upper right', fontsize=10, prop={'size': 8})
# ax.legend([ax.lines[0]], ['PW'],
# loc='upper right', fontsize=10, prop={'size': 8})
except IndexError:
pass
# change bottom xticks to 1-12 and show them:
# fg.axes[-1, 0].xaxis.set_ticks(np.arange(1, 13))
[fg.axes[x, 0].set_ylabel('PW [mm]') for x in range(len(fg.axes[:, 0]))]
# adjust subplots:
fg.fig.subplots_adjust(top=0.977,
bottom=0.039,
left=0.036,
right=0.959,
hspace=0.185,
wspace=0.125)
filename = 'PW_{}_climatology.png'.format(means)
if save:
plt.savefig(savefig_path / filename, bbox_inches='tight')
return fg
def plot_gnss_radiosonde_monthly_means(sound_path=sound_path, path=work_yuval,
times=['2014', '2019'], sample='MS',
gps_station='tela', east_height=5000):
import xarray as xr
from aux_gps import path_glob
import pandas as pd
file = path_glob(sound_path, 'bet_dagan_phys_PW_Tm_Ts_*.nc')
phys = xr.load_dataset(file[0])['PW']
if east_height is not None:
file = path_glob(sound_path, 'bet_dagan_edt_sounding*.nc')
east = xr.load_dataset(file[0])['east_distance']
east = east.resample(sound_time=sample).mean().sel(
Height=east_height, method='nearest')
east_df = east.reset_coords(drop=True).to_dataframe()
if times is not None:
phys = phys.sel(sound_time=slice(*times))
ds = phys.resample(sound_time=sample).mean(
).to_dataset(name='Bet-dagan-radiosonde')
ds = ds.rename({'sound_time': 'time'})
gps = xr.load_dataset(
path / 'GNSS_PW_thresh_50_homogenized.nc')[gps_station]
if times is not None:
gps = gps.sel(time=slice(*times))
ds[gps_station] = gps.resample(time=sample).mean()
df = ds.to_dataframe()
# now plot:
fig, axes = plt.subplots(2, 1, sharex=True, figsize=(12, 8))
# [x.set_xlim([pd.to_datetime(times[0]), pd.to_datetime(times[1])])
# for x in axes]
df.columns = ['Bet dagan soundings', '{} GNSS station'.format(gps_station)]
sns.lineplot(data=df, markers=['o', 's'], linewidth=2.0, ax=axes[0])
# axes[0].legend(['Bet_Dagan soundings', 'TELA GPS station'])
df_r = df.iloc[:, 1] - df.iloc[:, 0]
df_r.columns = ['Residual distribution']
sns.lineplot(data=df_r, color='k', marker='o', linewidth=1.5, ax=axes[1])
if east_height is not None:
ax_east = axes[1].twinx()
sns.lineplot(data=east_df, color='red',
marker='x', linewidth=1.5, ax=ax_east)
ax_east.set_ylabel(
'East drift at {} km altitude [km]'.format(east_height / 1000.0))
axes[1].axhline(y=0, color='r')
axes[0].grid(b=True, which='major')
axes[1].grid(b=True, which='major')
axes[0].set_ylabel('Precipitable Water [mm]')
axes[1].set_ylabel('Residuals [mm]')
plt.tight_layout()
plt.subplots_adjust(wspace=0, hspace=0.01)
return ds
def plot_wetz_example(path=tela_results_path, plot='WetZ', fontsize=16,
save=True):
from aux_gps import path_glob
import matplotlib.pyplot as plt
from gipsyx_post_proc import process_one_day_gipsyx_output
filepath = path_glob(path, 'tela*_smoothFinal.tdp')[3]
if plot is None:
df, meta = process_one_day_gipsyx_output(filepath, True)
return df, meta
else:
df, meta = process_one_day_gipsyx_output(filepath, False)
if not isinstance(plot, str):
raise ValueError('pls pick only one field to plot., e.g., WetZ')
error_plot = '{}_error'.format(plot)
fig, ax = plt.subplots(1, 1, figsize=(8, 6))
desc = meta['desc'][plot]
unit = meta['units'][plot]
df[plot].plot(ax=ax, legend=False, color='k')
ax.fill_between(df.index, df[plot] - df[error_plot],
df[plot] + df[error_plot], alpha=0.5)
ax.grid()
# ax.set_title('{} from station TELA in {}'.format(
# desc, df.index[100].strftime('%Y-%m-%d')))
ax.set_ylabel('WetZ [{}]'.format(unit), fontsize=fontsize)
ax.set_xlabel('Time [UTC]', fontsize=fontsize)
ax.tick_params(which='both', labelsize=fontsize)
ax.grid('on')
fig.tight_layout()
filename = 'wetz_tela_daily.png'
caption('{} from station TELA in {}. Note the error estimation from the GipsyX software(filled)'.format(
desc, df.index[100].strftime('%Y-%m-%d')))
if save:
plt.savefig(savefig_path / filename, bbox_inches='tight')
return ax
def plot_figure_3(path=tela_solutions, year=2004, field='WetZ',
middle_date='11-25', zooms=[10, 3, 0.5], save=True):
from gipsyx_post_proc import analyse_results_ds_one_station
import xarray as xr
import matplotlib.pyplot as plt
import pandas as pd
dss = xr.open_dataset(path / 'TELA_ppp_raw_{}.nc'.format(year))
nums = sorted(list(set([int(x.split('-')[1])
for x in dss if x.split('-')[0] == field])))
ds = dss[['{}-{}'.format(field, i) for i in nums]]
da = analyse_results_ds_one_station(dss, field=field, plot=False)
fig, axes = plt.subplots(ncols=1, nrows=3, sharex=False, figsize=(16, 10))
for j, ax in enumerate(axes):
start = pd.to_datetime('{}-{}'.format(year, middle_date)
) - pd.Timedelta(zooms[j], unit='D')
end = pd.to_datetime('{}-{}'.format(year, middle_date)
) + pd.Timedelta(zooms[j], unit='D')
daa = da.sel(time=slice(start, end))
for i, ppp in enumerate(ds):
ds['{}-{}'.format(field, i)].plot(ax=ax, linewidth=3.0)
daa.plot.line(marker='.', linewidth=0., ax=ax, color='k')
axes[j].set_xlim(start, end)
axes[j].set_ylim(daa.min() - 0.5, daa.max() + 0.5)
try:
axes[j - 1].axvline(x=start, color='r', alpha=0.85,
linestyle='--', linewidth=2.0)
axes[j - 1].axvline(x=end, color='r', alpha=0.85,
linestyle='--', linewidth=2.0)
except IndexError:
pass
units = ds.attrs['{}>units'.format(field)]
sta = da.attrs['station']
desc = da.attrs['{}>desc'.format(field)]
ax.set_ylabel('{} [{}]'.format(field, units))
ax.set_xlabel('')
ax.grid()
# fig.suptitle(
# '30 hours stitched {} for GNSS station {}'.format(
# desc, sta), fontweight='bold')
fig.tight_layout()
caption('20, 6 and 1 days of zenith wet delay in 2004 from the TELA GNSS station for the top, middle and bottom figures respectively. The colored segments represent daily solutions while the black dots represent smoothed mean solutions.')
filename = 'zwd_tela_discon_panel.png'
if save:
plt.savefig(savefig_path / filename, bbox_inches='tight')
# fig.subplots_adjust(top=0.95)
return axes
def plot_figure_3_1(path=work_yuval, data='zwd'):
import xarray as xr
from aux_gps import plot_tmseries_xarray
from PW_stations import load_gipsyx_results
if data == 'zwd':
tela = load_gipsyx_results('tela', sample_rate='1H', plot_fields=None)
label = 'ZWD [cm]'
title = 'Zenith wet delay derived from GPS station TELA'
ax = plot_tmseries_xarray(tela, 'WetZ')
elif data == 'pw':
ds = xr.open_dataset(path / 'GNSS_hourly_PW.nc')
tela = ds['tela']
label = 'PW [mm]'
title = 'Precipitable water derived from GPS station TELA'
ax = plot_tmseries_xarray(tela)
ax.set_ylabel(label)
ax.set_xlim('1996-02', '2019-07')
ax.set_title(title)
ax.set_xlabel('')
ax.figure.tight_layout()
return ax
def plot_ts_tm(path=sound_path, model='TSEN',
times=['2007', '2019'], fontsize=14, save=True):
"""plot ts-tm relashonship"""
import xarray as xr
import matplotlib.pyplot as plt
import seaborn as sns
from PW_stations import ML_Switcher
from sklearn.metrics import mean_squared_error
from sklearn.metrics import r2_score
import numpy as np
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
from sounding_procedures import get_field_from_radiosonde
models_dict = {'LR': 'Linear Regression',
'TSEN': 'Theil–Sen Regression'}
# sns.set_style('whitegrid')
pds = xr.Dataset()
Ts = get_field_from_radiosonde(path=sound_path, field='Ts',
data_type='phys', reduce=None, times=times,
plot=False)
Tm = get_field_from_radiosonde(path=sound_path, field='Tm',
data_type='phys', reduce='min', times=times,
plot=False)
pds['Tm'] = Tm
pds['Ts'] = Ts
pds = pds.dropna('sound_time')
fig, ax = plt.subplots(1, 1, figsize=(7, 7))
pds.plot.scatter(
x='Ts',
y='Tm',
marker='.',
s=100.,
linewidth=0,
alpha=0.5,
ax=ax)
ax.grid()
ml = ML_Switcher()
fit_model = ml.pick_model(model)
X = pds.Ts.values.reshape(-1, 1)
y = pds.Tm.values
fit_model.fit(X, y)
predict = fit_model.predict(X)
coef = fit_model.coef_[0]
inter = fit_model.intercept_
ax.plot(X, predict, c='r')
bevis_tm = pds.Ts.values * 0.72 + 70.0
ax.plot(pds.Ts.values, bevis_tm, c='purple')
ax.legend(['{} ({:.2f}, {:.2f})'.format(models_dict.get(model),
coef, inter), 'Bevis 1992 et al. (0.72, 70.0)'], fontsize=fontsize-4)
# ax.set_xlabel('Surface Temperature [K]')
# ax.set_ylabel('Water Vapor Mean Atmospheric Temperature [K]')
ax.set_xlabel('Ts [K]', fontsize=fontsize)
ax.set_ylabel('Tm [K]', fontsize=fontsize)
ax.set_ylim(265, 320)
ax.tick_params(labelsize=fontsize)
axin1 = inset_axes(ax, width="40%", height="40%", loc=2)
resid = predict - y
sns.distplot(resid, bins=50, color='k', label='residuals', ax=axin1,
kde=False,
hist_kws={"linewidth": 1, "alpha": 0.5, "color": "k", 'edgecolor': 'k'})
axin1.yaxis.tick_right()
rmean = np.mean(resid)
rmse = np.sqrt(mean_squared_error(y, predict))
print(rmean, rmse)
r2 = r2_score(y, predict)
axin1.axvline(rmean, color='r', linestyle='dashed', linewidth=1)
# axin1.set_xlabel('Residual distribution[K]')
textstr = '\n'.join(['n={}'.format(pds.Ts.size),
'RMSE: ', '{:.2f} K'.format(rmse)]) # ,
# r'R$^2$: {:.2f}'.format(r2)])
props = dict(boxstyle='round', facecolor='wheat', alpha=0.5)
axin1.text(0.05, 0.95, textstr, transform=axin1.transAxes, fontsize=14,
verticalalignment='top', bbox=props)
# axin1.text(0.2, 0.9, 'n={}'.format(pds.Ts.size),
# verticalalignment='top', horizontalalignment='center',
# transform=axin1.transAxes, color='k', fontsize=10)
# axin1.text(0.78, 0.9, 'RMSE: {:.2f} K'.format(rmse),
# verticalalignment='top', horizontalalignment='center',
# transform=axin1.transAxes, color='k', fontsize=10)
axin1.set_xlim(-15, 15)
fig.tight_layout()
filename = 'Bet_dagan_ts_tm_fit_{}-{}.png'.format(times[0], times[1])
caption('Water vapor mean temperature (Tm) vs. surface temperature (Ts) of the Bet-Dagan radiosonde station. Ordinary least squares linear fit(red) yields the residual distribution with RMSE of 4 K. Bevis(1992) model is plotted(purple) for comparison.')
if save:
plt.savefig(savefig_path / filename, bbox_inches='tight')
return
def plot_pw_tela_bet_dagan_scatterplot(path=work_yuval, sound_path=sound_path,
ims_path=ims_path, station='tela',
cats=None,
times=['2007', '2019'], wv_name='pw',
r2=False, fontsize=14,
save=True):
"""plot the PW of Bet-Dagan vs. PW of gps station"""
from PW_stations import mean_ZWD_over_sound_time_and_fit_tstm
from sklearn.metrics import mean_squared_error
from sklearn.metrics import r2_score
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
# sns.set_style('white')
ds, mda = mean_ZWD_over_sound_time_and_fit_tstm(path=path, sound_path=sound_path,
ims_path=ims_path,
data_type='phys',
gps_station=station,
times=times,
plot=False,
cats=cats)
ds = ds.drop_dims('time')
time_dim = list(set(ds.dims))[0]
ds = ds.rename({time_dim: 'time'})
tpw = 'tpw_bet_dagan'
ds = ds[[tpw, 'tela_pw']].dropna('time')
ds = ds.sel(time=slice(*times))
fig, ax = plt.subplots(1, 1, figsize=(7, 7))
ds.plot.scatter(x=tpw,
y='tela_pw',
marker='.',
s=100.,
linewidth=0,
alpha=0.5,
ax=ax)
ax.plot(ds[tpw], ds[tpw], c='r')
ax.legend(['y = x'], loc='upper right', fontsize=fontsize)
if wv_name == 'pw':
ax.set_xlabel('PWV from Bet-Dagan [mm]', fontsize=fontsize)
ax.set_ylabel('PWV from TELA GPS station [mm]', fontsize=fontsize)
elif wv_name == 'iwv':
ax.set_xlabel(
r'IWV from Bet-Dagan station [kg$\cdot$m$^{-2}$]', fontsize=fontsize)
ax.set_ylabel(
r'IWV from TELA GPS station [kg$\cdot$m$^{-2}$]', fontsize=fontsize)
ax.grid()
axin1 = inset_axes(ax, width="40%", height="40%", loc=2)
resid = ds.tela_pw.values - ds[tpw].values
sns.distplot(resid, bins=50, color='k', label='residuals', ax=axin1,
kde=False,
hist_kws={"linewidth": 1, "alpha": 0.5, "color": "k", "edgecolor": 'k'})
axin1.yaxis.tick_right()
rmean = np.mean(resid)
rmse = np.sqrt(mean_squared_error(ds[tpw].values, ds.tela_pw.values))
r2s = r2_score(ds[tpw].values, ds.tela_pw.values)
axin1.axvline(rmean, color='r', linestyle='dashed', linewidth=1)
# axin1.set_xlabel('Residual distribution[mm]')
ax.tick_params(labelsize=fontsize)
if wv_name == 'pw':
if r2:
textstr = '\n'.join(['n={}'.format(ds[tpw].size),
'bias: {:.2f} mm'.format(rmean),
'RMSE: {:.2f} mm'.format(rmse),
r'R$^2$: {:.2f}'.format(r2s)])
else:
textstr = '\n'.join(['n={}'.format(ds[tpw].size),
'bias: {:.2f} mm'.format(rmean),
'RMSE: {:.2f} mm'.format(rmse)])
elif wv_name == 'iwv':
if r2:
textstr = '\n'.join(['n={}'.format(ds[tpw].size),
r'bias: {:.2f} kg$\cdot$m$^{{-2}}$'.format(
rmean),
r'RMSE: {:.2f} kg$\cdot$m$^{{-2}}$'.format(
rmse),
r'R$^2$: {:.2f}'.format(r2s)])
else:
textstr = '\n'.join(['n={}'.format(ds[tpw].size),
r'bias: {:.2f} kg$\cdot$m$^{{-2}}$'.format(
rmean),
r'RMSE: {:.2f} kg$\cdot$m$^{{-2}}$'.format(rmse)])
props = dict(boxstyle='round', facecolor='wheat', alpha=0.5)
axin1.text(0.05, 0.95, textstr, transform=axin1.transAxes, fontsize=14,
verticalalignment='top', bbox=props)
#
# axin1.text(0.2, 0.95, 'n={}'.format(ds[tpw].size),
# verticalalignment='top', horizontalalignment='center',
# transform=axin1.transAxes, color='k', fontsize=10)
# axin1.text(0.3, 0.85, 'bias: {:.2f} mm'.format(rmean),
# verticalalignment='top', horizontalalignment='center',
# transform=axin1.transAxes, color='k', fontsize=10)
# axin1.text(0.35, 0.75, 'RMSE: {:.2f} mm'.format(rmse),
# verticalalignment='top', horizontalalignment='center',
# transform=axin1.transAxes, color='k', fontsize=10)
# fig.suptitle('Precipitable Water comparison for the years {} to {}'.format(*times))
fig.tight_layout()
caption(
'PW from TELA GNSS station vs. PW from Bet-Dagan radiosonde station in {}-{}. A 45 degree line is plotted(red) for comparison. Note the skew in the residual distribution with an RMSE of 4.37 mm.'.format(times[0], times[1]))
# fig.subplots_adjust(top=0.95)
filename = 'Bet_dagan_tela_pw_compare_{}-{}.png'.format(times[0], times[1])
if save:
plt.savefig(savefig_path / filename, bbox_inches='tight')
return ds
def plot_tela_bet_dagan_comparison(path=work_yuval, sound_path=sound_path,
ims_path=ims_path, station='tela',
times=['2007', '2020'], cats=None,
compare='pwv',
save=True):
from PW_stations import mean_ZWD_over_sound_time_and_fit_tstm
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
import matplotlib.dates as mdates
# sns.set_style('whitegrid')
ds, mda = mean_ZWD_over_sound_time_and_fit_tstm(path=path,
sound_path=sound_path,
ims_path=ims_path,
data_type='phys',
gps_station=station,
times=times,
plot=False,
cats=cats)
ds = ds.drop_dims('time')
time_dim = list(set(ds.dims))[0]
ds = ds.rename({time_dim: 'time'})
ds = ds.dropna('time')
ds = ds.sel(time=slice(*times))
if compare == 'zwd':
df = ds[['zwd_bet_dagan', 'tela']].to_dataframe()
elif compare == 'pwv':
df = ds[['tpw_bet_dagan', 'tela_pw']].to_dataframe()
fig, axes = plt.subplots(2, 1, sharex=True, figsize=(12, 8))
df.columns = ['Bet-Dagan soundings', 'TELA GNSS station']
sns.scatterplot(
data=df,
s=20,
ax=axes[0],
style='x',
linewidth=0,
alpha=0.8)
# axes[0].legend(['Bet_Dagan soundings', 'TELA GPS station'])
df_r = df.iloc[:, 0] - df.iloc[:, 1]
df_r.columns = ['Residual distribution']
sns.scatterplot(
data=df_r,
color='k',
s=20,
ax=axes[1],
linewidth=0,
alpha=0.5)
axes[0].grid(b=True, which='major')
axes[1].grid(b=True, which='major')
if compare == 'zwd':
axes[0].set_ylabel('Zenith Wet Delay [cm]')
axes[1].set_ylabel('Residuals [cm]')
elif compare == 'pwv':
axes[0].set_ylabel('Precipitable Water Vapor [mm]')
axes[1].set_ylabel('Residuals [mm]')
# axes[0].set_title('Zenith wet delay from Bet-Dagan radiosonde station and TELA GNSS satation')
sonde_change_x = pd.to_datetime('2013-08-20')
axes[1].axvline(sonde_change_x, color='red')
axes[1].annotate(
'changed sonde type from VIZ MK-II to PTU GPS',
(mdates.date2num(sonde_change_x),
10),
xytext=(
15,
15),
textcoords='offset points',
arrowprops=dict(
arrowstyle='fancy',
color='red'),
color='red')
# axes[1].set_aspect(3)
[x.set_xlim(*[pd.to_datetime(times[0]), pd.to_datetime(times[1])])
for x in axes]
plt.tight_layout()
plt.subplots_adjust(wspace=0, hspace=0.01)
filename = 'Bet_dagan_tela_{}_compare.png'.format(compare)
caption('Top: zenith wet delay from Bet-dagan radiosonde station(blue circles) and from TELA GNSS station(orange x) in 2007-2019. Bottom: residuals. Note the residuals become constrained from 08-2013 probebly due to an equipment change.')
if save:
plt.savefig(savefig_path / filename, bbox_inches='tight')
return df
def plot_israel_map_from_shape_file(gis_path=gis_path):
import geopandas as gpd
agr = gpd.read_file(gis_path/'ISR_agriculture_districts.shp')
isr = gpd.GeoSeries(agr.geometry.unary_union)
isr.crs = agr.crs
isr = isr.to_crs(epsg=4326)
return isr
def plot_israel_map(gis_path=gis_path, rc=rc, ticklabelsize=12, ax=None):
"""general nice map for israel, need that to plot stations,
and temperature field on top of it"""
import geopandas as gpd
import contextily as ctx
import seaborn as sns
import cartopy.crs as ccrs
sns.set_style("ticks", rc=rc)
isr_with_yosh = gpd.read_file(gis_path / 'Israel_and_Yosh.shp')
isr_with_yosh.crs = {'init': 'epsg:4326'}
# isr_with_yosh = isr_with_yosh.to_crs(epsg=3857)
crs_epsg = ccrs.epsg('3857')
# crs_epsg = ccrs.epsg('2039')
if ax is None:
# fig, ax = plt.subplots(subplot_kw={'projection': crs_epsg},
# figsize=(6, 15))
bounds = isr_with_yosh.geometry.total_bounds
extent = [bounds[0], bounds[2], bounds[1], bounds[3]]
# ax.set_extent([bounds[0], bounds[2], bounds[1], bounds[3]], crs=crs_epsg)
# ax.add_geometries(isr_with_yosh.geometry, crs=crs_epsg)
ax = isr_with_yosh.plot(alpha=0.0, figsize=(6, 15))
else:
isr_with_yosh.plot(alpha=0.0, ax=ax)
ctx.add_basemap(
ax,
source=ctx.providers.Stamen.TerrainBackground,
crs='epsg:4326')
ax.xaxis.set_major_locator(ticker.MaxNLocator(2))
ax.yaxis.set_major_locator(ticker.MaxNLocator(5))
ax.yaxis.set_major_formatter(lat_formatter)
ax.xaxis.set_major_formatter(lon_formatter)
ax.tick_params(top=True, bottom=True, left=True, right=True,
direction='out', labelsize=ticklabelsize)
# scale_bar(ax, ccrs.Mercator(), 50, bounds=bounds)
return ax
def plot_israel_with_stations(gis_path=gis_path, dem_path=dem_path, ims=True,
gps=True, radio=True, terrain=True, alt=False,
ims_names=False, gps_final=False, save=True):
from PW_stations import produce_geo_gnss_solved_stations
from aux_gps import geo_annotate
from ims_procedures import produce_geo_ims
import matplotlib.pyplot as plt
import xarray as xr
import pandas as pd
import geopandas as gpd
ax = plot_israel_map(gis_path)
station_names = []
legend = []
if ims:
print('getting IMS temperature stations metadata...')
ims_t = produce_geo_ims(path=gis_path, freq='10mins', plot=False)
ims_t.plot(ax=ax, color='red', edgecolor='black', alpha=0.5)
station_names.append('ims')
legend.append('IMS stations')
if ims_names:
geo_annotate(ax, ims_t.lon, ims_t.lat,
ims_t['name_english'], xytext=(3, 3), fmt=None,
c='k', fw='normal', fs=7, colorupdown=False)
# ims, gps = produce_geo_df(gis_path=gis_path, plot=False)
if gps:
print('getting solved GNSS israeli stations metadata...')
gps_df = produce_geo_gnss_solved_stations(path=gis_path, plot=False)
if gps_final:
to_drop = ['gilb', 'lhav', 'hrmn', 'nizn', 'spir']
gps_final_stations = [x for x in gps_df.index if x not in to_drop]
gps = gps_df.loc[gps_final_stations, :]
gps.plot(ax=ax, color='k', edgecolor='black', marker='s')
gps_stations = [x for x in gps.index]
to_plot_offset = ['gilb', 'lhav']
# [gps_stations.remove(x) for x in to_plot_offset]
gps_normal_anno = gps.loc[gps_stations, :]
# gps_offset_anno = gps.loc[to_plot_offset, :]
geo_annotate(ax, gps_normal_anno.lon, gps_normal_anno.lat,
gps_normal_anno.index.str.upper(), xytext=(3, 3), fmt=None,
c='k', fw='bold', fs=10, colorupdown=False)
if alt:
geo_annotate(ax, gps_normal_anno.lon, gps_normal_anno.lat,
gps_normal_anno.alt, xytext=(4, -6), fmt='{:.0f}',
c='k', fw='bold', fs=9, colorupdown=False)
# geo_annotate(ax, gps_offset_anno.lon, gps_offset_anno.lat,
# gps_offset_anno.index.str.upper(), xytext=(4, -6), fmt=None,
# c='k', fw='bold', fs=10, colorupdown=False)
station_names.append('gps')
legend.append('GNSS stations')
if terrain:
# overlay with dem data:
cmap = plt.get_cmap('terrain', 41)
dem = xr.open_dataarray(dem_path / 'israel_dem_250_500.nc')
# dem = xr.open_dataarray(dem_path / 'israel_dem_500_1000.nc')
fg = dem.plot.imshow(ax=ax, alpha=0.5, cmap=cmap,
vmin=dem.min(), vmax=dem.max(), add_colorbar=False)
cbar_kwargs = {'fraction': 0.1, 'aspect': 50, 'pad': 0.03}
cb = plt.colorbar(fg, **cbar_kwargs)
cb.set_label(label='meters above sea level', size=8, weight='normal')
cb.ax.tick_params(labelsize=8)
ax.set_xlabel('')
ax.set_ylabel('')
if radio: # plot bet-dagan:
df = pd.Series([32.00, 34.81]).to_frame().T
df.index = ['Bet-Dagan']
df.columns = ['lat', 'lon']
bet_dagan = gpd.GeoDataFrame(df, geometry=gpd.points_from_xy(df.lon,
df.lat),
crs=gps.crs)
bet_dagan.plot(ax=ax, color='black', edgecolor='black',
marker='+')
geo_annotate(ax, bet_dagan.lon, bet_dagan.lat,
bet_dagan.index, xytext=(4, -6), fmt=None,
c='k', fw='bold', fs=10, colorupdown=False)
station_names.append('radio')
legend.append('radiosonde')
if legend:
plt.legend(legend, loc='upper left')
plt.tight_layout()
plt.subplots_adjust(bottom=0.05)
if station_names:
station_names = '_'.join(station_names)
else:
station_names = 'no_stations'
filename = 'israel_map_{}.png'.format(station_names)
if save:
plt.savefig(savefig_path / filename, bbox_inches='tight')
return ax
def plot_zwd_lapse_rate(path=work_yuval, fontsize=18, model='TSEN', save=True):
from PW_stations import calculate_zwd_altitude_fit
df, zwd_lapse_rate = calculate_zwd_altitude_fit(path=path, model=model,
plot=True, fontsize=fontsize)
if save:
filename = 'zwd_lapse_rate.png'
if save:
plt.savefig(savefig_path / filename, bbox_inches='tight')
return
def plot_ims_T_lapse_rate(ims_path=ims_path, dt='2013-10-19T22:00:00',
fontsize=16, save=True):
from aux_gps import path_glob
import xarray as xr
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
# from matplotlib import rc
def choose_dt_and_lapse_rate(tdf, dt, T_alts, lapse_rate):
ts = tdf.loc[dt, :]
# dt_col = dt.strftime('%Y-%m-%d %H:%M')
# ts.name = dt_col
# Tloc_df = Tloc_df.join(ts, how='right')
# Tloc_df = Tloc_df.dropna(axis=0)
ts_vs_alt =
|
pd.Series(ts.values, index=T_alts)
|
pandas.Series
|
"""
Makes a figure providing an overview of our dataset with a focus on lineages
laid out as follows:
a - Patient metadata
b - Donut plot of our lineage distributions vs the world
c - Timeline of patient sampling vs lineages identified
d - Choropleth of lineages by region
"""
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from typing import Dict
import logging
import matplotlib
from matplotlib.lines import Line2D
from mpl_toolkits.axes_grid.inset_locator import (inset_axes, InsetPosition,
mark_inset)
from covid_bronx import lineage_colors_dict, lineage_colors_dict_rgb
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
savefile = "figures/figure1_v2"
months = {
1: 'Jan',
2: 'Feb',
3: 'Mar',
4: 'Apr',
5: 'May',
6: 'Jun',
7: 'Jul',
8: 'Aug',
9: 'Sep',
10: 'Oct',
11: 'Nov',
12: 'Dec',
}
from covid_bronx.metadata import preprocess_metadata
from matplotlib.colors import colorConverter
# a) Timeline of lineages
logger.info("Plotting 1a")
timeline = pd.read_csv("data/external/global_lineages.csv")
from covid_bronx.metadata import get_metadata
metadata = get_metadata()
index = pd.date_range(metadata['collection_date'].min(), metadata['collection_date'].max())
metadata.index = metadata['name']
df = pd.read_csv("data/external/pangolin2.csv")
df.index = df['Sequence name'].apply(lambda x: x.split(" ")[0])
df.index = df.index.map(lambda x: "AECOM-" + str(int(x.split("-")[1])))
metadata[df.columns] = df
lineages_df = pd.read_csv("data/external/Lineages_updated.csv", index_col=0)
lineages = lineages_df['lineage'].dropna()
lineages.index = lineages.index.map(lambda x: x.replace("_", "-"))
metadata['Lineage'] = lineages
ddf = pd.DataFrame([ # Incremental Values
{
l: (metadata[metadata['collection_date'] == d]['Lineage']==l).sum()
for l in lineages
}
for d in index
],
index=index
)
ddf.index = ddf.index.map(lambda x: months[x.month])
ddmf = pd.DataFrame({k: v.sum(0) for k,v in ddf.groupby(ddf.index)})
cdf = pd.DataFrame([ # Cumulative Values
{
l: (metadata[metadata['collection_date'] <= d]['Lineage']==l).sum()
for l in lineages
}
for d in index
],
index=index
)
bronx_sampling = ddmf.sum(0)
sampling = pd.read_csv("data/external/sampling.csv", index_col=0)
sampling['date'] = pd.to_datetime(sampling['date'])
sampling['month'] = sampling['date'].apply(lambda x: months[x.month])
deathsdmf = pd.Series({k:v['Deaths'].sum() for k,v in sampling.groupby('month')})
casesdmf = pd.Series({k:v['Cases'].sum() for k,v in sampling.groupby('month')})
hospitalizationdmf = pd.Series({k:v['Hospitalizations'].sum() for k,v in sampling.groupby('month')})
sampling_df = pd.DataFrame({"Sampling": bronx_sampling, "Cases": casesdmf, "Deaths": deathsdmf, "Hospitalizations": hospitalizationdmf}).fillna(0.)
##########################################################
# Start Plotting
matplotlib.rcParams.update({'font.size': 16})
plt.clf()
plt.close()
fig1 = plt.figure(figsize=(24,24))
gs = fig1.add_gridspec(20,20)
# a) Sampling Timeline
ax_c = fig1.add_subplot(gs[0:8, 10:])
ax_c2 = ax_c.twinx()
sampling_df[['Cases', 'Deaths', 'Hospitalizations']].loc[['Feb','Mar','Apr','May','Jun','Jul','Aug','Sep']].plot(ax=ax_c, label=True, color=['yellowgreen','red','orange'], linewidth=6)
ax_c.grid(linestyle='--', linewidth=1)
ax_c.set_ylim([0,100000])
ax_c2.set_ylim([0,80])
ax_c.set_ylabel("Count of Cases / Hospitalizations / Deaths")
ax_c.legend()
ax_c2.set_ylabel("Count of Genomes Sequenced")
ax_c.set_xlabel("Month")
ax_c.set_xticklabels(['Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep'])
sampling_df['Sampling'][['Feb','Mar','Apr','May','Jun','Jul','Aug','Sep']].plot.bar(ax=ax_c2, alpha=.5)
ax_c2.grid(linestyle='--', color='blue', alpha=.5, linewidth=1)
ax_c2.spines['right'].set_color('blue')
ax_c2.yaxis.label.set_color('blue')
ax_c2.tick_params(axis='y', colors='blue')
# d) Choropleth by Lineage
logger.info("Plotting 1d")
# ax_d = fig1.add_subplot(gs[6:, 8:])
from covid_bronx.geography import gen_points_in_gdf_polys, blank_background_choropleth, get_zip_codes_metadata_geo
import geopandas as gpd
metadata = preprocess_metadata()
coverage_levels =
|
pd.read_csv("data/processed/sequencing/coverage.csv", index_col=0)
|
pandas.read_csv
|
"""test_ulogconv."""
from context import mathpandas as mpd
import pandas as pd
import numpy as np
from numpy.testing import assert_almost_equal
def test_norm_2d():
"""test pythagoras series."""
x = pd.Series([1, 2, 3, 4])
y = pd.Series([2, 3, 4, 5])
r = mpd.get_series_norm_2d(x, y, "test")
assert_almost_equal(r.iloc[0], 2.23606797749979)
assert_almost_equal(r.iloc[1], 3.605551275463989)
assert_almost_equal(r.iloc[2], 5.0)
assert_almost_equal(r.iloc[3], 6.4031242374328485)
assert r.name == "test_norm"
def test_tilt_from_attitude():
"""test tilt series."""
q0 = pd.Series([1]) # w
q1 = pd.Series([0]) # x
q2 = pd.Series([0]) # y
q3 = pd.Series([0]) # z
tilt = mpd.get_tilt_from_attitude(q0, q1, q2, q3, "angle_z_xy")
assert_almost_equal(tilt, [0])
q0 = pd.Series([0.707])
q1 = pd.Series([0.707])
q2 = pd.Series([0])
q3 = pd.Series([0])
tilt = mpd.get_tilt_from_attitude(q0, q1, q2, q3, "angle_z_xy")
assert_almost_equal(tilt, [np.pi / 2.0]) # should be 90
q0 = pd.Series([0.707])
q1 = pd.Series([0])
q2 = pd.Series([0.707])
q3 = pd.Series([0])
tilt = mpd.get_tilt_from_attitude(q0, q1, q2, q3, "angle_z_xy")
assert_almost_equal(tilt, [np.pi / 2.0]) # should be 90
def test_heading_from_2d():
"""test heading from 2d."""
# point north
n = pd.Series([1])
e = pd.Series([0])
heading = mpd.get_heading_from_2d_vector(n, e)
assert_almost_equal(heading, [0])
# point east
n = pd.Series([0])
e = pd.Series([1])
heading = mpd.get_heading_from_2d_vector(n, e)
assert_almost_equal(heading, [np.pi / 2.0])
# point north west 45
n = pd.Series([1])
e =
|
pd.Series([-1])
|
pandas.Series
|
from pooch import os_cache as _os_cache
from pooch import retrieve as _retrieve
from pooch import HTTPDownloader as _HTTPDownloader
from pooch import Unzip as _Unzip
import pandas as _pd
import geopandas as _gpd
import os as _os
def loadDB(version=2021):
'''
Load the Puetz (2018) database, returning two dataframes:
- the Sample Details
- the Data table
'''
if version==2018:
fname = _retrieve(
url="https://ars.els-cdn.com/content/image/1-s2.0-S1674987117302141-mmc1.xlsx",
known_hash="sha256:8ea19b08d5c8d3c6e7f3239471d27b6da440fcfa40994e712ac9ae95642fe3d9",
downloader=_HTTPDownloader(progressbar=True),
path=_os_cache('gprm'),
)
# load the whole xls file once to avoid duplication
xls = _pd.ExcelFile(fname)
# Get dataframes for the sheets with information per site and per sample
df_Data = _pd.read_excel(xls, sheet_name='Data')
df_SampleDetails = _pd.read_excel(xls, sheet_name='Sample_Details')
# rename some fields for neatness
df_Data.rename(columns = {u'206Pb /\n238U\nAge\n(Ma)': '206Pb_238U_Age_Ma',
u'206Pb /\n238U\n2σ\nPrecis': '206Pb_238U_Precis',
u'207Pb /\n235U\nAge\n(Ma)': '207Pb_235U_Age_Ma',
u'207Pb /\n235U\n2σ\nPrecis': '207Pb_235U_Precis',
u'207Pb /\n206Pb\nAge\n(Ma)': '207Pb_206Pb_Age_Ma',
u'207Pb /\n206Pb\n2σ\nPrecis': '207Pb_206Pb_Precis'},
inplace = True)
df_SampleDetails.rename(columns = {'Est. Depos. Age (Ma)': 'Est_Depos_Age_Ma'}, inplace = True)
df_SampleDetails = df_SampleDetails.dropna(subset=['Sample Key'])
return df_SampleDetails, df_Data
elif version==2021:
fname = _retrieve(
url="https://ars.els-cdn.com/content/image/1-s2.0-S0012825221002464-mmc4.xlsx",
known_hash="sha256:afdcaa26698ba06f113f29fab311af0ec37f290db117fc85847966c28b78ff09",
downloader=_HTTPDownloader(progressbar=True),
path=_os_cache('gprm'),
)
xls = _pd.ExcelFile(fname)
df = _pd.read_excel(xls, sheet_name='UPb_Data')
gdf = _gpd.GeoDataFrame(df, geometry=_gpd.points_from_xy(df.Longitude, df.Latitude), crs=4326)
return gdf
def load_Hf():
fname = _retrieve(
url="https://ars.els-cdn.com/content/image/1-s2.0-S0012825221002464-mmc5.xlsx",
known_hash="sha256:7402470ca7e7319899d949207da3eae8c0ba20bdf21050a85ca70ff6c9be9b8c",
downloader=_HTTPDownloader(progressbar=True),
path=_os_cache('gprm'),
)
xls =
|
_pd.ExcelFile(fname)
|
pandas.ExcelFile
|
import numpy as np
import pandas as pd
from pandas import (
DataFrame,
Series,
date_range,
timedelta_range,
)
import pandas._testing as tm
def _check_mixed_int(df, dtype=None):
# GH#41672
result = DataFrame([], columns=['lang', 'name'])
result = result.agg({'name': lambda y: y.values})
assert type(result) == Series
result =
|
DataFrame([['a', 'boof']], columns=['lang', 'name'])
|
pandas.DataFrame
|
from sklearn.model_selection import train_test_split
import pandas as pd
import numpy as np
from sklearn.metrics.pairwise import euclidean_distances
from sklearn.utils import resample
import os
def flattenList(l=None):
"""
flatting a nest list of lists to a list
:param l: a nested list of lists
:return: a list
"""
flat_list = [item for sublist in l for item in sublist]
return flat_list
def checkDir(dirName=None):
"""
check if a given directory exist, if not make a new directory
:param dirName: a given directory
:return:
"""
if not os.path.exists(dirName):
os.makedirs(dirName)
return 0
def indexAll(inputList=None, value=None):
"""
find the index of a given value in a given list
:param inputList: input as a list
:param value: the value that targeted to index
:return: the index of value lies in a list
"""
if not isinstance(inputList, list):
raise TypeError('Input list must be a list object.')
return [i for i, x in enumerate(inputList) if x == value]
def dataToCsv(data=None,fileName=None, floatPrecision='%.10f'):
"""
write the data to csv file
:param data: data to write as numpy array
:param fileName: the name of the file
:param floatPrecision: the precision of the float
:return:
"""
df =
|
pd.DataFrame(data=data)
|
pandas.DataFrame
|
import requests
import pandas as pd
class KrxHistoricalDailyPriceDataDownloader:
def __init__(self):
self._headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.141 Safari/537.36',
}
self._stocks = None
self._stocks_delisted = None
self._bld = 'dbms/MDC/STAT/standard/MDCSTAT01701'
def get_stocks(self):
data = {
'mktsel': 'ALL',
'typeNo': '0',
'searchText': '',
'bld': 'dbms/comm/finder/finder_stkisu',
}
url = 'http://data.krx.co.kr/comm/bldAttendant/getJsonData.cmd'
response = requests.post(url, data, headers=self._headers)
df = pd.json_normalize(response.json()['block1'])
df = df.set_index('short_code')
return df
def get_stocks_delisted(self):
data = {
'mktsel': 'ALL',
'searchText': '',
'bld': 'dbms/comm/finder/finder_listdelisu',
}
url = 'http://data.krx.co.kr/comm/bldAttendant/getJsonData.cmd'
response = requests.post(url, data, headers=self._headers)
df = pd.json_normalize(response.json()['block1'])
df = df.set_index('short_code')
return df
@property
def stocks(self):
if self._stocks is None:
self._stocks = self.get_stocks()
return self._stocks
@property
def stocks_delisted(self):
if self._stocks_delisted is None:
self._stocks_delisted = self.get_stocks_delisted()
return self._stocks_delisted
def get_full_code(self, symbol):
if symbol in self.stocks.index:
return self.stocks.loc[symbol]['full_code']
if symbol in self.stocks_delisted.index:
return self.stocks_delisted.loc[symbol]['full_code']
raise ValueError('No full_code found for given symbol %s' % symbol)
def download(self, symbol, start_date=None, end_date=None):
if start_date is None:
start_date = pd.Timestamp(1980, 1, 1)
if end_date is None:
end_date = pd.Timestamp.now().normalize() +
|
pd.Timedelta(1, unit='day')
|
pandas.Timedelta
|
import cantools
import pandas as pd
def dbc2dict(dbc_file):
"""
Convert a dbc file to a dictionary
"""
dbc = cantools.db.load_file(dbc_file)
dictData=[]
for message in dbc.messages: # loop over all messages in the dbc file
candt=dict()
candt['name']=message.name
candt['id']=hex(message.frame_id)
candt['length']=message.length
candt['comments']=message.comment
signals=dict()
for signal in message.signals: # loop through signals in message
signals[signal.name]={}
signals[signal.name]['start']=signal.start
signals[signal.name]['bit_length']=signal.length
signals[signal.name]['is_signed']=signal.is_signed
signals[signal.name]['is_float']=signal.is_float
signals[signal.name]['offset']=signal.offset
signals[signal.name]['scale']=signal.scale
signals[signal.name]['minimum']=signal.minimum
signals[signal.name]['maximum']=signal.maximum
signals[signal.name]['unit']=signal.unit
signals[signal.name]['multiplexer_ids']=signal.multiplexer_ids
signals[signal.name]['unit']=signal.unit
signals[signal.name]['receivers']=signal.receivers
# signals[signal.name]['dbc_specifics']=signal.dbc_specifics
signals[signal.name]['comment']=signal.comment
# signals[signal.name]['decimal']=signal.decimal
signals[signal.name]['byte_order']=signal.byte_order
candt['signals']=signals
dictData.append(candt) # append to json data
return dictData
def normalizeDf(js):
"""
Normalize the dataframe
"""
data=[]
header=[]
for elkey in js[0]: # loop over all keys in json file
header.append(elkey)
if elkey=='signals': # if key is signals
sigs=js[0][elkey]
for sigkey in sigs: # loop over all keys in signals
for sigel in sigs[sigkey]:
header.append(sigel)
break
break
for el in js: # loop over all elements in json file
newDict=dict()
for hd in header:
try:
if hd=="signals":
newDict[hd]=""
continue
newDict[hd]=el[hd]
except:
newDict[hd]=''
data.append(newDict)
for skey in el['signals']: # loop over all keys in signals
newDict=dict()
for hd in header:
if hd=="signals":
newDict[hd]=skey
continue
try:
newDict[hd]=el['signals'][skey][hd]
except:
newDict[hd]=''
data.append(newDict)
df=
|
pd.DataFrame(data)
|
pandas.DataFrame
|
#======================================================
# Model Utility Functions
#======================================================
'''
Info: Utility functions for model building.
Version: 2.0
Author: <NAME>
Created: Saturday, 13 April 2019
'''
# Import modules
import os
import uuid
import copy
import time
import random
import numpy as np
import pandas as pd
from subprocess import call
import itertools
from datetime import datetime
import matplotlib.pyplot as plt
from matplotlib.pyplot import cm
from sklearn.utils import resample
from sklearn.tree import export_graphviz
from sklearn.metrics import confusion_matrix
from sklearn.preprocessing import Imputer
from sklearn.model_selection import train_test_split, cross_val_score, GridSearchCV
from sklearn.metrics import precision_score, recall_score, accuracy_score, f1_score
from sklearn.ensemble import VotingClassifier
from sklearn.metrics import classification_report
from sklearn.metrics import roc_auc_score as roc
from sklearn.metrics import f1_score
#------------------------------
# Utility Functions
#------------------------------
# Set section
def set_section(string):
# Check if string is too long
string_size = len(string)
max_length = 100
if string_size > max_length:
print('TITLE TOO LONG')
else:
full_buffer_len = string_size
print('\n')
print(full_buffer_len * '-')
print(string)
print(full_buffer_len * '-'+'\n')
def downsample_df(df, labels_df, random_seed):
num_of_yt = sum(labels_df)
random.seed(random_seed+1)
downsample_bad_ix = random.sample(np.where(labels_df == 0)[0], num_of_yt)
good_ix = np.where(labels_df == 1)[0]
downsampled_full_ix = np.append(downsample_bad_ix, good_ix)
df_ds = pd.concat([df.iloc[[index]] for index in downsampled_full_ix])
return df_ds
def upsample(df, groupby_cols, random_seed, max_sample_ratio=1.5):
max_sample_size = df.groupby(groupby_cols).agg('count').max().max()
dfs = []
for i, df_ in df.groupby(groupby_cols):
dfs.append(resample(df_, replace=True, n_samples=int(max_sample_size * max_sample_ratio), random_state=random_seed))
upsampled_df = pd.concat(dfs, axis=0)
return upsampled_df
# Binarise
def binarise_labels(actual, pred):
classes, actual_pred_binary = np.unique(list(actual.append(pred)), return_inverse = True)
actual_binary = actual_pred_binary[:len(actual)]
pred_binary = actual_pred_binary[len(actual):]
return actual_binary, pred_binary, classes
# Plot confusion
def plot_cfmt(cfmt, classes,
title='Confusion matrix',
cmap=plt.cm.Blues,
save_path=None,
colorbar=True,
figsize=(6,6),
fontsize=None,
ylabel='True label',
xlabel='Predicted label'):
'''
This function prints and plots the confusion matrix.
'''
plt.figure(figsize=figsize)
plt.imshow(cfmt, interpolation='nearest', cmap=cmap)
plt.title(title)
if colorbar:
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
thresh = cfmt.max() - (cfmt.max() - cfmt.min())/ 2.
for i, j in itertools.product(range(cfmt.shape[0]), range(cfmt.shape[1])):
plt.text(j, i, cfmt[i, j],
horizontalalignment="center",
color="white" if cfmt[i, j] > thresh else "black", size=fontsize)
plt.ylabel(ylabel)
plt.xlabel(xlabel)
plt.tight_layout()
if save_path:
plt.savefig(save_path, dpi=360)
else:
plt.show()
def feature_importance_rf(model, feature_names, verbose=1):
importances = model.feature_importances_
std = np.std([tree.feature_importances_ for tree in model.estimators_],
axis=0)
indices = np.argsort(importances)[::-1]
print("Feature ranking:")
for f in range(len(feature_names)):
print("%d. %s (%f)" % (f + 1, feature_names[indices[f]], importances[indices[f]]))
# Plot
if verbose:
plt.figure()
plt.title("Feature importances")
plt.bar(range(len(feature_names)), importances[indices],
color="r", yerr=std[indices], align="center")
plt.xticks(range(len(feature_names)), [feature_names[i] for i in indices], rotation=90)
plt.xlim([-1, len(feature_names)])
plt.show()
def plot_tree(tree, save_path, feature_names, class_names, dpi=300):
# Dot path
dot_save_path = save_path.split('.png')[0] + '.dot'
# Export as dot file
export_graphviz(tree, out_file=dot_save_path,
feature_names = feature_names,
class_names = class_names,
rounded = True, proportion = False,
precision = 2, filled = True)
# export_graphviz(tree, out_file=None, max_depth=None,
# feature_names=None,
# class_names=None, label='all', filled=False, leaves_parallel=False, impurity=True, node_ids=False, proportion=False,
# rotate=False, rounded=False, special_characters=False, precision=3)
# Convert to png using system command (requires Graphviz)
call(['dot', '-Tpng', dot_save_path, '-o', save_path, '-Gdpi='+str(dpi)])
os.remove(dot_save_path)
# Classify binary classification predictions
def map_classify_pred(true, pred):
if (true*1 == 1) & (pred*1 == 1):
return 'TP'
elif (true*1 == 0) & (pred*1 == 0):
return 'TN'
elif (true*1 == 1) & (pred*1 == 0):
return 'FN'
elif (true*1 == 0) & (pred*1 == 1):
return 'FP'
else:
return np.nan
# Clf report
def clf_report(y_series, p_series, verbose=True):
df = pd.DataFrame(classification_report(y_series, p_series, output_dict=True)).T.reset_index()
df.columns = ['class', 'f1-score', 'precision', 'recall', 'support']
agg = df[df['class'].isin(['micro avg', 'macro avg', 'weighted avg'])]
summary = df[~df['class'].isin(['micro avg', 'macro avg', 'weighted avg'])]
if verbose:
set_section('Accuracy: ' + str(np.round(accuracy_score(y_series, p_series),3)))
print('\n', summary)
print('\n\n', agg, '\n')
return summary
def get_acc(y, p, verbose=1, name=None, save_path=None):
accuracy = accuracy_score(y, p)
support = len(y)
total_target = np.sum(y)
cfmt = confusion_matrix(y, p)
precision = precision_score(y, p, average='weighted')
recall = recall_score(y, p, average='weighted')
f1 = f1_score(y, p, average='weighted')
acc_breakdown = clf_report(y_series=y, p_series=p, verbose=False)
output = [str(uuid.uuid4()), datetime.now(), name, precision, recall, f1, accuracy, support, total_target, cfmt, [acc_breakdown.to_dict()]]
output =
|
pd.DataFrame(output)
|
pandas.DataFrame
|
"""
filename chapter_id speaker_id dataset_id
0 1272/128104/1272-128104-0000.wav 128104 1272 dev-clean
1 1272/128104/1272-128104-0001.wav 128104 1272 dev-clean
2 1272/128104/1272-128104-0002.wav 128104 1272 dev-clean
3 1272/128104/1272-128104-0003.wav 128104 1272 dev-clean
4 1272/128104/1272-128104-0004.wav 128104 1272 dev-clean
5 1272/128104/1272-128104-0005.wav 128104 1272 dev-clean
6 1272/128104/1272-128104-0006.wav 128104 1272 dev-clean
7 1272/128104/1272-128104-0007.wav 128104 1272 dev-clean
8 1272/128104/1272-128104-0008.wav 128104 1272 dev-clean
9 1272/128104/1272-128104-0009.wav 128104 1272 dev-clean
"""
import logging
import numpy as np
import pandas as pd
from python_speech_features import fbank, delta
import ds_constants as c
from ds_constants import SAMPLE_RATE
from voxceleb_wav_reader import read_audio
#def normalize_frames(m):
# return [(v - np.mean(v)) / np.std(v) for v in m]
def normalize_frames(m,epsilon=1e-12):
return [(v - np.mean(v)) / max(np.std(v),epsilon) for v in m]
def pre_process_inputs(signal=np.random.uniform(size=32000), target_sample_rate=8000):
filter_banks, energies = fbank(signal, samplerate=target_sample_rate, nfilt=64, winlen=0.025)
delta_1 = delta(filter_banks, N=1)
delta_2 = delta(delta_1, N=1)
filter_banks = normalize_frames(filter_banks)
delta_1 = normalize_frames(delta_1)
delta_2 = normalize_frames(delta_2)
frames_features = np.hstack([filter_banks, delta_1, delta_2])
num_frames = len(frames_features)
network_inputs = []
for j in range(8, num_frames - 8):
frames_slice = frames_features[j - 8:j + 8]
network_inputs.append(np.reshape(frames_slice, (32, 32, 3)))
return np.array(network_inputs)
class MiniBatch:
def __init__(self, voxceleb, batch_size):
# indices = np.random.choice(len(libri), size=batch_size, replace=False)
# [anc1, anc2, anc3, pos1, pos2, pos3, neg1, neg2, neg3]
# [sp1, sp2, sp3, sp1, sp2, sp3, sp4, sp5, sp6]
unique_speakers = list(voxceleb['speaker_id'].unique())
num_triplets = batch_size
anchor_batch = None
positive_batch = None
negative_batch = None
for ii in range(num_triplets):
two_different_speakers = np.random.choice(unique_speakers, size=2, replace=False)
anchor_positive_speaker = two_different_speakers[0]
negative_speaker = two_different_speakers[1]
anchor_positive_file = voxceleb[voxceleb['speaker_id'] == anchor_positive_speaker].sample(n=2, replace=False)
anchor_df = pd.DataFrame(anchor_positive_file[0:1])
anchor_df['training_type'] = 'anchor'
positive_df = pd.DataFrame(anchor_positive_file[1:2])
positive_df['training_type'] = 'positive'
negative_df = voxceleb[voxceleb['speaker_id'] == negative_speaker].sample(n=1)
negative_df['training_type'] = 'negative'
if anchor_batch is None:
anchor_batch = anchor_df.copy()
else:
anchor_batch = pd.concat([anchor_batch, anchor_df], axis=0)
if positive_batch is None:
positive_batch = positive_df.copy()
else:
positive_batch = pd.concat([positive_batch, positive_df], axis=0)
if negative_batch is None:
negative_batch = negative_df.copy()
else:
negative_batch =
|
pd.concat([negative_batch, negative_df], axis=0)
|
pandas.concat
|
# IMPORTS
import sys
sys.path.append("..")
from preprocessing.temporal_aggregation import TemporalAggregator
import numpy as np
import pandas as pd
from tools.processing import groupwise_normalise, groupwise_expansion
from misc.utils import matchfinder, fano_inequality
from tqdm import tqdm
from structures.trajectory import TrajectoriesFrame
tqdm.pandas()
import concurrent.futures as cf
from math import ceil
from random import sample
from scipy.optimize import curve_fit
from sklearn.metrics import r2_score
def num_of_distinct_locations(trajectories_frame):
"""
Returns a number of distinct location in the trajectory. First looks for 'labels' column.
:param trajectories_frame: TrajectoriesFrame class object
:return: a Series with the number of unique locations for each user
"""
if isinstance(trajectories_frame, pd.DataFrame):
return trajectories_frame.groupby(level=0).progress_apply(lambda x: len(pd.unique(x['labels'])))
else:
return trajectories_frame.groupby(level=0).progress_apply(lambda x: pd.unique(x).shape[0])
def visitation_frequency(trajectories_frame):
"""
Calculates visitiation frequency for each user in the TrajectoriesFrame
:param trajectories_frame: TrajectoriesFrame class object
:return: a Series with the visitation frequency for each user
"""
lat_col = trajectories_frame._geom_cols[0]
lon_col = trajectories_frame._geom_cols[1]
frequencies = trajectories_frame.groupby(level=0).progress_apply(
lambda x: x.groupby([lat_col, lon_col]).count()).iloc[:, 0]
frequencies = frequencies.groupby(level=0).progress_apply(lambda x: x.sort_values(ascending=False))
frequencies = groupwise_normalise(frequencies)
return frequencies
def _filter_distinct_locations(trajectories_frame):
to_concat = []
for ind, vals in trajectories_frame.groupby(level=0):
if len(vals) == 1:
to_concat.append(uniq)
continue
else:
uniq = vals.loc[vals['geometry'].drop_duplicates().index]
to_concat.append(uniq)
return pd.concat(to_concat)
def distinct_locations_over_time(trajectories_frame, time_unit='30min', reaggregate=False):
"""
Calculates the number of distinct location visited in the movement trajectory over time.
:param trajectories_frame: TrajectoriesFrame class object
:param time_unit: determines time unit
:param reaggregate: if true, data are first reagregated to given time unit
:return: a Series with the number of unique locations visited up to each time step in the movement trajectory
"""
if reaggregate:
temp_agg = TemporalAggregator(time_unit)
trajectories_frame = temp_agg.aggregate(trajectories_frame)
trajectories_frame = _filter_distinct_locations(trajectories_frame)
distinct_locations = trajectories_frame.dropna().groupby(level=0).resample(time_unit, level=1).count()
distinct_locations = distinct_locations.groupby(level=0).cumsum().iloc[:, 0]
return distinct_locations
def jump_lengths(trajectories_frame):
"""
Calculates jump lengths between each step in the trajectory
:param trajectories_frame: TrajectoriesFrame class object
:return: a Series with jump lengths between consecutive records
"""
jumps = trajectories_frame.groupby(level=0).progress_apply(lambda x: x.distance(x.shift()))
return jumps
def nonzero_trips(trajectories_frame):
"""
Counts all trips that had distance larger than 0.
:param trajectories_frame: TrajectoriesFrame class object
:return: a Series with a count of nonzero trips for each user
"""
jumps = jump_lengths(trajectories_frame).dropna().droplevel([1, 2])
return jumps[jumps != 0].groupby(by="ID").count()
def self_transitions(trajectories_frame):
"""
Calculates the number of self transitions for each user
:param trajectories_frame: TrajectoriesFrame class object
:return: a Series with the number of self transitions for each user
"""
if isinstance(trajectories_frame, pd.Series):
self_transitions_mask = (trajectories_frame == trajectories_frame.shift())
else:
if not hasattr(trajectories_frame, '_geom_cols'):
trajectories_frame = TrajectoriesFrame(trajectories_frame)
coordinates_frame = trajectories_frame[[trajectories_frame._geom_cols[0], trajectories_frame._geom_cols[1]]]
self_transitions_mask = (coordinates_frame == coordinates_frame.shift()).all(axis=1)
empty_mask = (~self_transitions_mask).groupby(level=0).progress_apply(lambda x: x.all())
empty_mask = empty_mask[empty_mask == True].index
self_transitions_only = trajectories_frame[self_transitions_mask]
empty_self_transitions = pd.DataFrame([0 for x in range(len(empty_mask))], index=empty_mask)
if isinstance(trajectories_frame, pd.Series):
self_transitions_only = self_transitions_only.groupby(level=0).count()
else:
self_transitions_only = self_transitions_only.groupby(level=0).count()[self_transitions_only.columns[0]]
if len(empty_self_transitions) > 0:
self_transitions_only.append(empty_self_transitions.iloc[:, 0]).sort_index()
return self_transitions_only
def waiting_times(trajectories_frame, time_unit='h'):
"""
Calculates waiting times for each transition in TrajectoriesFrame
:param trajectories_frame: TrajectoriesFrame class object
:param time_unit: time unit in which waiting times will be expressed
:return: A series with waiting times for each transition for each user
"""
transitions_only = trajectories_frame[
trajectories_frame.geometry.groupby(level=0).progress_apply(lambda x: x.shift(-1) != x)]
transitions_only['dt'] = transitions_only.index.get_level_values(1)
times = transitions_only.groupby(level=0).progress_apply(
lambda x: (x['dt'] - x['dt'].shift(1)).astype('timedelta64[%s]' % time_unit))
return times
def center_of_mass(trajectories_frame):
"""
Calculates a center of mass for each user's trajectory
:param trajectories_frame: TrajectoriesFrame class object
:return: a GeoSeries with centers of mass of each user's trajectory
"""
return trajectories_frame.dissolve(by=trajectories_frame.index.get_level_values(0)).centroid
def radius_of_gyration(trajectories_frame, time_evolution=True):
"""
Calculates radii of gyration for each user. Optionally uses time steps to express their growth.
:param trajectories_frame: TrajectoriesFrame class object
:param time_evolution: If true, radii of gyration are calculated over time
:return: a Series with radii of gyration for each user
"""
mean_locs = center_of_mass(trajectories_frame)
to_concat_dict = {}
to_concat_list = []
for ind, vals in tqdm(trajectories_frame.groupby(level=0), total=len(trajectories_frame)):
vals = vals.dropna()
rog_ind = vals.distance(mean_locs.loc[ind]) ** 2
if time_evolution:
rog_ind = groupwise_expansion(np.sqrt(rog_ind))
to_concat_list.append(rog_ind)
else:
rog_ind = np.sqrt(rog_ind.mean())
to_concat_dict[ind] = rog_ind
if time_evolution:
radius = pd.concat(to_concat_list)
else:
radius = pd.DataFrame.from_dict(to_concat_dict, orient='index')
return radius
def mean_square_displacement(trajectories_frame, from_center=False, time_evolution=True, reference_locs=None):
"""
Calculates mean square displacements for each user. Optionally uses time steps to express their growth.
:param trajectories_frame: TrajectoriesFrame class object
:param from_center: If ture, displacement is calculated from the trajectory ceneter, if false - from the first point
:param time_evolution: If true, mean square displacements are calculated over time
:param reference_locs: allows to give reference locations for each trajectory explicitly
:return: a Series with mean square displacements for each user
"""
to_concat_dict = {}
to_concat_list = []
if reference_locs is not None:
if from_center:
reference_locs = center_of_mass(trajectories_frame)
else:
reference_locs = trajectories_frame.groupby(level=0).head(1).droplevel(1).geometry
for ind, vals in tqdm(trajectories_frame.groupby(level=0), total=len(trajectories_frame)):
vals = vals.dropna()
msd_ind = (vals.distance(reference_locs.loc[ind]) ** 2)
if time_evolution:
msd_ind = groupwise_expansion(msd_ind)
to_concat_list.append(msd_ind)
else:
msd_ind = msd_ind.mean()
to_concat_dict[ind] = msd_ind
if time_evolution:
msd = pd.concat(to_concat_list)
else:
msd = pd.DataFrame.from_dict(to_concat_dict, orient='index')
return msd
def return_time(trajectories_frame, time_unit='h', by_place=False):
"""
Calculates return times for each unique location in each user's trajectory.
:param trajectories_frame: TrajectoriesFrame class object
:param time_unit: time unit in which return times will be expressed
:param by_place: If true, return times are expressed for each place globally
:return: a Series with return times
"""
if not hasattr(trajectories_frame, '_geom_cols'):
trajectories_frame = TrajectoriesFrame(trajectories_frame)
lat_col = trajectories_frame[trajectories_frame._geom_cols[0]]
lon_col = trajectories_frame[trajectories_frame._geom_cols[1]]
trajectories_frame['datetime_temp'] = trajectories_frame.index.get_level_values(1)
to_concat = []
for ind, vals in tqdm(trajectories_frame.groupby(level=0), total=len(trajectories_frame)):
concat_level = {}
for place, vals2 in vals.groupby([lat_col, lon_col]):
shifts = (vals2.datetime_temp - vals2.datetime_temp.shift()).astype('timedelta64[%s]' % time_unit)
concat_level[place] = shifts
to_concat.append(
|
pd.concat(concat_level)
|
pandas.concat
|
"""
July 2021
This code retrieves the calculation of sand use for concrete and glass production in the building sector in 26 global regions. For the original code & latest updates, see: https://github.com/
The dynamic material model is based on the BUMA model developed by <NAME>, Leiden University, the Netherlands. For the original code & latest updates, see: https://github.com/SPDeetman/BUMA
The dynamic stock model is based on the ODYM model developed by <NAME>, Uni Freiburg, Germany. For the original code & latest updates, see: https://github.com/IndEcol/ODYM
*NOTE: Insert location of GloBus-main folder in 'dir_path' (line 23) before running the code
Software version: Python 3.7
"""
#%% GENERAL SETTING & STATEMENTS
import pandas as pd
import numpy as np
import os
import ctypes
import math
# set current directory
dir_path = ""
os.chdir(dir_path)
# Set general constants
regions = 26 #26 IMAGE regions
building_types = 4 #4 building types: detached, semi-detached, appartments & high-rise
area = 2 #2 areas: rural & urban
materials = 2 #2 materials: Concrete, Glass
inflation = 1.2423 #gdp/cap inflation correction between 2005 (IMAGE data) & 2016 (commercial calibration) according to https://www.bls.gov/data/inflation_calculator.htm
# Set Flags for sensitivity analysis
flag_alpha = 0 # switch for the sensitivity analysis on alpha, if 1 the maximum alpha is 10% above the maximum found in the data
flag_ExpDec = 0 # switch to choose between Gompertz and Exponential Decay function for commercial floorspace demand (0 = Gompertz, 1 = Expdec)
flag_Normal = 0 # switch to choose between Weibull and Normal lifetime distributions (0 = Weibull, 1 = Normal)
flag_Mean = 0 # switch to choose between material intensity settings (0 = regular regional, 1 = mean, 2 = high, 3 = low, 4 = median)
#%%Load files & arrange tables ----------------------------------------------------
if flag_Mean == 0:
file_addition = ''
elif flag_Mean == 1:
file_addition = '_mean'
elif flag_Mean ==2:
file_addition = '_high'
elif flag_Mean ==3:
file_addition = '_low'
else:
file_addition = '_median'
# Load Population, Floor area, and Service value added (SVA) Database csv-files
pop = pd.read_csv('files_population/pop.csv', index_col = [0]) # Pop; unit: million of people; meaning: global population (over time, by region)
rurpop = pd.read_csv('files_population/rurpop.csv', index_col = [0]) # rurpop; unit: %; meaning: the share of people living in rural areas (over time, by region)
housing_type = pd.read_csv('files_population\Housing_type.csv') # Housing_type; unit: %; meaning: the share of the NUMBER OF PEOPLE living in a particular building type (by region & by area)
floorspace = pd.read_csv('files_floor_area/res_Floorspace.csv') # Floorspace; unit: m2/capita; meaning: the average m2 per capita (over time, by region & area)
floorspace = floorspace[floorspace.Region != regions + 1] # Remove empty region 27
avg_m2_cap = pd.read_csv('files_floor_area\Average_m2_per_cap.csv') # Avg_m2_cap; unit: m2/capita; meaning: average square meters per person (by region & area (rural/urban) & building type)
sva_pc_2005 = pd.read_csv('files_GDP/sva_pc.csv', index_col = [0])
sva_pc = sva_pc_2005 * inflation # we use the inflation corrected SVA to adjust for the fact that IMAGE provides gdp/cap in 2005 US$
# load material density data csv-files
building_materials_concrete = pd.read_csv('files_material_density\Building_materials_concrete' + file_addition + '.csv') # Building_materials; unit: kg/m2; meaning: the average material use per square meter (by building type, by region & by area)
building_materials_glass = pd.read_csv('files_material_density\Building_materials_glass' + file_addition + '.csv') # Building_materials; unit: kg/m2; meaning: the average material use per square meter (by building type, by region & by area)
materials_commercial_concrete = pd.read_csv('files_material_density\materials_commercial_concrete' + file_addition + '.csv', index_col = [0]) # 7 building materials in 4 commercial building types; unit: kg/m2; meaning: the average material use per square meter (by commercial building type)
materials_commercial_glass = pd.read_csv('files_material_density\materials_commercial_glass' + file_addition + '.csv', index_col = [0]) # 7 building materials in 4 commercial building types; unit: kg/m2; meaning: the average material use per square meter (by commercial building type)
# Load fitted regression parameters for comercial floor area estimate
if flag_alpha == 0:
gompertz = pd.read_csv('files_floor_area//files_commercial/Gompertz_parameters.csv', index_col = [0])
else:
gompertz = pd.read_csv('files_floor_area//files_commercial/Gompertz_parameters_alpha.csv', index_col = [0])
# Ensure full time series for pop & rurpop (interpolation, some years are missing)
rurpop2 = rurpop.reindex(list(range(1970,2061,1))).interpolate()
pop2 = pop.reindex(list(range(1970,2061,1))).interpolate()
# Remove 1st year, to ensure same Table size as floorspace data (from 1971)
pop2 = pop2.iloc[1:]
rurpop2 = rurpop2.iloc[1:]
#pre-calculate urban population
urbpop = 1 - rurpop2 # urban population is 1 - the fraction of people living in rural areas (rurpop)
# Restructure the tables to regions as columns; for floorspace
floorspace_rur = floorspace.pivot(index="t", columns="Region", values="Rural")
floorspace_urb = floorspace.pivot(index="t", columns="Region", values="Urban")
# Restructuring for square meters (m2/cap)
avg_m2_cap_urb = avg_m2_cap.loc[avg_m2_cap['Area'] == 'Urban'].drop('Area', 1).T # Remove area column & Transpose
avg_m2_cap_urb.columns = list(map(int,avg_m2_cap_urb.iloc[0])) # name columns according to the row containing the region-labels
avg_m2_cap_urb2 = avg_m2_cap_urb.drop(['Region']) # Remove idle row
avg_m2_cap_rur = avg_m2_cap.loc[avg_m2_cap['Area'] == 'Rural'].drop('Area', 1).T # Remove area column & Transpose
avg_m2_cap_rur.columns = list(map(int,avg_m2_cap_rur.iloc[0])) # name columns according to the row containing the region-labels
avg_m2_cap_rur2 = avg_m2_cap_rur.drop(['Region']) # Remove idle row
# Restructuring for the Housing types (% of population living in them)
housing_type_urb = housing_type.loc[housing_type['Area'] == 'Urban'].drop('Area', 1).T # Remove area column & Transpose
housing_type_urb.columns = list(map(int,housing_type_urb.iloc[0])) # name columns according to the row containing the region-labels
housing_type_urb2 = housing_type_urb.drop(['Region']) # Remove idle row
housing_type_rur = housing_type.loc[housing_type['Area'] == 'Rural'].drop('Area', 1).T # Remove area column & Transpose
housing_type_rur.columns = list(map(int,housing_type_rur.iloc[0])) # name columns according to the row containing the region-labels
housing_type_rur2 = housing_type_rur.drop(['Region']) # Remove idle row
#%% COMMERCIAL building space demand (stock) calculated from Gomperz curve (fitted, using separate regression model)
# Select gompertz curve paramaters for the total commercial m2 demand (stock)
alpha = gompertz['All']['a'] if flag_ExpDec == 0 else 25.601
beta = gompertz['All']['b'] if flag_ExpDec == 0 else 28.431
gamma = gompertz['All']['c'] if flag_ExpDec == 0 else 0.0415
# find the total commercial m2 stock (in Millions of m2)
commercial_m2_cap = pd.DataFrame(index=range(1971,2061), columns=range(1,27))
for year in range(1971,2061):
for region in range(1,27):
if flag_ExpDec == 0:
commercial_m2_cap[region][year] = alpha * math.exp(-beta * math.exp((-gamma/1000) * sva_pc[str(region)][year]))
else:
commercial_m2_cap[region][year] = max(0.542, alpha - beta * math.exp((-gamma/1000) * sva_pc[str(region)][year]))
# Subdivide the total across Offices, Retail+, Govt+ & Hotels+
commercial_m2_cap_office = pd.DataFrame(index=range(1971,2061), columns=range(1,27)) # Offices
commercial_m2_cap_retail = pd.DataFrame(index=range(1971,2061), columns=range(1,27)) # Retail & Warehouses
commercial_m2_cap_hotels = pd.DataFrame(index=range(1971,2061), columns=range(1,27)) # Hotels & Restaurants
commercial_m2_cap_govern = pd.DataFrame(index=range(1971,2061), columns=range(1,27)) # Hospitals, Education, Government & Transportation
minimum_com_office = 25
minimum_com_retail = 25
minimum_com_hotels = 25
minimum_com_govern = 25
for year in range(1971,2061):
for region in range(1,27):
# get the square meter per capita floorspace for 4 commercial applications
office = gompertz['Office']['a'] * math.exp(-gompertz['Office']['b'] * math.exp((-gompertz['Office']['c']/1000) * sva_pc[str(region)][year]))
retail = gompertz['Retail+']['a'] * math.exp(-gompertz['Retail+']['b'] * math.exp((-gompertz['Retail+']['c']/1000) * sva_pc[str(region)][year]))
hotels = gompertz['Hotels+']['a'] * math.exp(-gompertz['Hotels+']['b'] * math.exp((-gompertz['Hotels+']['c']/1000) * sva_pc[str(region)][year]))
govern = gompertz['Govt+']['a'] * math.exp(-gompertz['Govt+']['b'] * math.exp((-gompertz['Govt+']['c']/1000) * sva_pc[str(region)][year]))
#calculate minimum values for later use in historic tail(Region 20: China @ 134 $/cap SVA)
minimum_com_office = office if office < minimum_com_office else minimum_com_office
minimum_com_retail = retail if retail < minimum_com_retail else minimum_com_retail
minimum_com_hotels = hotels if hotels < minimum_com_hotels else minimum_com_hotels
minimum_com_govern = govern if govern < minimum_com_govern else minimum_com_govern
# Then use the ratio's to subdivide the total commercial floorspace into 4 categories
commercial_sum = office + retail + hotels + govern
commercial_m2_cap_office[region][year] = commercial_m2_cap[region][year] * (office/commercial_sum)
commercial_m2_cap_retail[region][year] = commercial_m2_cap[region][year] * (retail/commercial_sum)
commercial_m2_cap_hotels[region][year] = commercial_m2_cap[region][year] * (hotels/commercial_sum)
commercial_m2_cap_govern[region][year] = commercial_m2_cap[region][year] * (govern/commercial_sum)
#%% Add historic tail (1720-1970) + 100 yr initial --------------------------------------------
# load historic population development
hist_pop = pd.read_csv('files_initial_stock\hist_pop.csv', index_col = [0]) # initial population as a percentage of the 1970 population; unit: %; according to the Maddison Project Database (MPD) 2018 (Groningen University)
# Determine the historical average global trend in floorspace/cap & the regional rural population share based on the last 10 years of IMAGE data
floorspace_urb_trend_by_region = [0 for j in range(0,26)]
floorspace_rur_trend_by_region = [0 for j in range(0,26)]
rurpop_trend_by_region = [0 for j in range(0,26)]
commercial_m2_cap_office_trend = [0 for j in range(0,26)]
commercial_m2_cap_retail_trend = [0 for j in range(0,26)]
commercial_m2_cap_hotels_trend = [0 for j in range(0,26)]
commercial_m2_cap_govern_trend = [0 for j in range(0,26)]
# For the RESIDENTIAL & COMMERCIAL floorspace: Derive the annual trend (in m2/cap) over the initial 10 years of IMAGE data
for region in range(1,27):
floorspace_urb_trend_by_year = [0 for i in range(0,10)]
floorspace_rur_trend_by_year = [0 for i in range(0,10)]
commercial_m2_cap_office_trend_by_year = [0 for j in range(0,10)]
commercial_m2_cap_retail_trend_by_year = [0 for i in range(0,10)]
commercial_m2_cap_hotels_trend_by_year = [0 for j in range(0,10)]
commercial_m2_cap_govern_trend_by_year = [0 for i in range(0,10)]
# Get the growth by year (for the first 10 years)
for year in range(1970,1980):
floorspace_urb_trend_by_year[year-1970] = floorspace_urb[region][year+1]/floorspace_urb[region][year+2]
floorspace_rur_trend_by_year[year-1970] = floorspace_rur[region][year+1]/floorspace_rur[region][year+2]
commercial_m2_cap_office_trend_by_year[year-1970] = commercial_m2_cap_office[region][year+1]/commercial_m2_cap_office[region][year+2]
commercial_m2_cap_retail_trend_by_year[year-1970] = commercial_m2_cap_retail[region][year+1]/commercial_m2_cap_retail[region][year+2]
commercial_m2_cap_hotels_trend_by_year[year-1970] = commercial_m2_cap_hotels[region][year+1]/commercial_m2_cap_hotels[region][year+2]
commercial_m2_cap_govern_trend_by_year[year-1970] = commercial_m2_cap_govern[region][year+1]/commercial_m2_cap_govern[region][year+2]
rurpop_trend_by_region[region-1] = ((1-(rurpop[str(region)][1980]/rurpop[str(region)][1970]))/10)*100
floorspace_urb_trend_by_region[region-1] = sum(floorspace_urb_trend_by_year)/10
floorspace_rur_trend_by_region[region-1] = sum(floorspace_rur_trend_by_year)/10
commercial_m2_cap_office_trend[region-1] = sum(commercial_m2_cap_office_trend_by_year)/10
commercial_m2_cap_retail_trend[region-1] = sum(commercial_m2_cap_retail_trend_by_year)/10
commercial_m2_cap_hotels_trend[region-1] = sum(commercial_m2_cap_hotels_trend_by_year)/10
commercial_m2_cap_govern_trend[region-1] = sum(commercial_m2_cap_govern_trend_by_year)/10
# Average global annual decline in floorspace/cap in %, rural: 1%; urban 1.2%; commercial: 1.26-2.18% /yr
floorspace_urb_trend_global = (1-(sum(floorspace_urb_trend_by_region)/26))*100 # in % decrease per annum
floorspace_rur_trend_global = (1-(sum(floorspace_rur_trend_by_region)/26))*100 # in % decrease per annum
commercial_m2_cap_office_trend_global = (1-(sum(commercial_m2_cap_office_trend)/26))*100 # in % decrease per annum
commercial_m2_cap_retail_trend_global = (1-(sum(commercial_m2_cap_retail_trend)/26))*100 # in % decrease per annum
commercial_m2_cap_hotels_trend_global = (1-(sum(commercial_m2_cap_hotels_trend)/26))*100 # in % decrease per annum
commercial_m2_cap_govern_trend_global = (1-(sum(commercial_m2_cap_govern_trend)/26))*100 # in % decrease per annum
# define historic floorspace (1820-1970) in m2/cap
floorspace_urb_1820_1970 = pd.DataFrame(index=range(1820,1971), columns=floorspace_urb.columns)
floorspace_rur_1820_1970 = pd.DataFrame(index=range(1820,1971), columns=floorspace_rur.columns)
rurpop_1820_1970 = pd.DataFrame(index=range(1820,1971), columns=rurpop.columns)
pop_1820_1970 = pd.DataFrame(index=range(1820,1971), columns=pop2.columns)
commercial_m2_cap_office_1820_1970 = pd.DataFrame(index=range(1820,1971), columns=commercial_m2_cap_office.columns)
commercial_m2_cap_retail_1820_1970 = pd.DataFrame(index=range(1820,1971), columns=commercial_m2_cap_retail.columns)
commercial_m2_cap_hotels_1820_1970 = pd.DataFrame(index=range(1820,1971), columns=commercial_m2_cap_hotels.columns)
commercial_m2_cap_govern_1820_1970 = pd.DataFrame(index=range(1820,1971), columns=commercial_m2_cap_govern.columns)
# Find minumum or maximum values in the original IMAGE data (Just for residential, commercial minimum values have been calculated above)
minimum_urb_fs = floorspace_urb.values.min() # Region 20: China
minimum_rur_fs = floorspace_rur.values.min() # Region 20: China
maximum_rurpop = rurpop.values.max() # Region 9 : Eastern Africa
# Calculate the actual values used between 1820 & 1970, given the trends & the min/max values
for region in range(1,regions+1):
for year in range(1820,1971):
# MAX of 1) the MINimum value & 2) the calculated value
floorspace_urb_1820_1970[region][year] = max(minimum_urb_fs, floorspace_urb[region][1971] * ((100-floorspace_urb_trend_global)/100)**(1971-year)) # single global value for average annual Decrease
floorspace_rur_1820_1970[region][year] = max(minimum_rur_fs, floorspace_rur[region][1971] * ((100-floorspace_rur_trend_global)/100)**(1971-year)) # single global value for average annual Decrease
commercial_m2_cap_office_1820_1970[region][year] = max(minimum_com_office, commercial_m2_cap_office[region][1971] * ((100-commercial_m2_cap_office_trend_global)/100)**(1971-year)) # single global value for average annual Decrease
commercial_m2_cap_retail_1820_1970[region][year] = max(minimum_com_retail, commercial_m2_cap_retail[region][1971] * ((100-commercial_m2_cap_retail_trend_global)/100)**(1971-year)) # single global value for average annual Decrease
commercial_m2_cap_hotels_1820_1970[region][year] = max(minimum_com_hotels, commercial_m2_cap_hotels[region][1971] * ((100-commercial_m2_cap_hotels_trend_global)/100)**(1971-year)) # single global value for average annual Decrease
commercial_m2_cap_govern_1820_1970[region][year] = max(minimum_com_govern, commercial_m2_cap_govern[region][1971] * ((100-commercial_m2_cap_govern_trend_global)/100)**(1971-year)) # single global value for average annual Decrease
# MIN of 1) the MAXimum value & 2) the calculated value
rurpop_1820_1970[str(region)][year] = min(maximum_rurpop, rurpop[str(region)][1970] * ((100+rurpop_trend_by_region[region-1])/100)**(1970-year)) # average annual INcrease by region
# just add the tail to the population (no min/max & trend is pre-calculated in hist_pop)
pop_1820_1970[str(region)][year] = hist_pop[str(region)][year] * pop[str(region)][1970]
urbpop_1820_1970 = 1 - rurpop_1820_1970
# To avoid full model setup in 1820 (all required stock gets built in yr 1) we assume another tail that linearly increases to the 1820 value over a 100 year time period, so 1720 = 0
floorspace_urb_1721_1820 = pd.DataFrame(index=range(1721,1820), columns=floorspace_urb.columns)
floorspace_rur_1721_1820 = pd.DataFrame(index=range(1721,1820), columns=floorspace_rur.columns)
rurpop_1721_1820 = pd.DataFrame(index=range(1721,1820), columns=rurpop.columns)
urbpop_1721_1820 = pd.DataFrame(index=range(1721,1820), columns=urbpop.columns)
pop_1721_1820 = pd.DataFrame(index=range(1721,1820), columns=pop2.columns)
commercial_m2_cap_office_1721_1820 = pd.DataFrame(index=range(1721,1820), columns=commercial_m2_cap_office.columns)
commercial_m2_cap_retail_1721_1820 = pd.DataFrame(index=range(1721,1820), columns=commercial_m2_cap_retail.columns)
commercial_m2_cap_hotels_1721_1820 = pd.DataFrame(index=range(1721,1820), columns=commercial_m2_cap_hotels.columns)
commercial_m2_cap_govern_1721_1820 = pd.DataFrame(index=range(1721,1820), columns=commercial_m2_cap_govern.columns)
for region in range(1,27):
for time in range(1721,1820):
# MAX(0,...) Because of floating point deviations, leading to negative stock in some cases
floorspace_urb_1721_1820[int(region)][time] = max(0.0, floorspace_urb_1820_1970[int(region)][1820] - (floorspace_urb_1820_1970[int(region)][1820]/100)*(1820-time))
floorspace_rur_1721_1820[int(region)][time] = max(0.0, floorspace_rur_1820_1970[int(region)][1820] - (floorspace_rur_1820_1970[int(region)][1820]/100)*(1820-time))
rurpop_1721_1820[str(region)][time] = max(0.0, rurpop_1820_1970[str(region)][1820] - (rurpop_1820_1970[str(region)][1820]/100)*(1820-time))
urbpop_1721_1820[str(region)][time] = max(0.0, urbpop_1820_1970[str(region)][1820] - (urbpop_1820_1970[str(region)][1820]/100)*(1820-time))
pop_1721_1820[str(region)][time] = max(0.0, pop_1820_1970[str(region)][1820] - (pop_1820_1970[str(region)][1820]/100)*(1820-time))
commercial_m2_cap_office_1721_1820[int(region)][time] = max(0.0, commercial_m2_cap_office_1820_1970[region][1820] - (commercial_m2_cap_office_1820_1970[region][1820]/100)*(1820-time))
commercial_m2_cap_retail_1721_1820[int(region)][time] = max(0.0, commercial_m2_cap_retail_1820_1970[region][1820] - (commercial_m2_cap_retail_1820_1970[region][1820]/100)*(1820-time))
commercial_m2_cap_hotels_1721_1820[int(region)][time] = max(0.0, commercial_m2_cap_hotels_1820_1970[region][1820] - (commercial_m2_cap_hotels_1820_1970[region][1820]/100)*(1820-time))
commercial_m2_cap_govern_1721_1820[int(region)][time] = max(0.0, commercial_m2_cap_govern_1820_1970[region][1820] - (commercial_m2_cap_govern_1820_1970[region][1820]/100)*(1820-time))
# combine historic with IMAGE data here
rurpop_tail = rurpop_1820_1970.append(rurpop2, ignore_index=False)
urbpop_tail = urbpop_1820_1970.append(urbpop, ignore_index=False)
pop_tail = pop_1820_1970.append(pop2, ignore_index=False)
floorspace_urb_tail = floorspace_urb_1820_1970.append(floorspace_urb, ignore_index=False)
floorspace_rur_tail = floorspace_rur_1820_1970.append(floorspace_rur, ignore_index=False)
commercial_m2_cap_office_tail = commercial_m2_cap_office_1820_1970.append(commercial_m2_cap_office, ignore_index=False)
commercial_m2_cap_retail_tail = commercial_m2_cap_retail_1820_1970.append(commercial_m2_cap_retail, ignore_index=False)
commercial_m2_cap_hotels_tail = commercial_m2_cap_hotels_1820_1970.append(commercial_m2_cap_hotels, ignore_index=False)
commercial_m2_cap_govern_tail = commercial_m2_cap_govern_1820_1970.append(commercial_m2_cap_govern, ignore_index=False)
rurpop_tail = rurpop_1721_1820.append(rurpop_1820_1970.append(rurpop2, ignore_index=False), ignore_index=False)
urbpop_tail = urbpop_1721_1820.append(urbpop_1820_1970.append(urbpop, ignore_index=False), ignore_index=False)
pop_tail = pop_1721_1820.append(pop_1820_1970.append(pop2, ignore_index=False), ignore_index=False)
floorspace_urb_tail = floorspace_urb_1721_1820.append(floorspace_urb_1820_1970.append(floorspace_urb, ignore_index=False), ignore_index=False)
floorspace_rur_tail = floorspace_rur_1721_1820.append(floorspace_rur_1820_1970.append(floorspace_rur, ignore_index=False), ignore_index=False)
commercial_m2_cap_office_tail = commercial_m2_cap_office_1721_1820.append(commercial_m2_cap_office_1820_1970.append(commercial_m2_cap_office, ignore_index=False), ignore_index=False)
commercial_m2_cap_retail_tail = commercial_m2_cap_retail_1721_1820.append(commercial_m2_cap_retail_1820_1970.append(commercial_m2_cap_retail, ignore_index=False), ignore_index=False)
commercial_m2_cap_hotels_tail = commercial_m2_cap_hotels_1721_1820.append(commercial_m2_cap_hotels_1820_1970.append(commercial_m2_cap_hotels, ignore_index=False), ignore_index=False)
commercial_m2_cap_govern_tail = commercial_m2_cap_govern_1721_1820.append(commercial_m2_cap_govern_1820_1970.append(commercial_m2_cap_govern, ignore_index=False), ignore_index=False)
#%% SQUARE METER Calculations -----------------------------------------------------------
# adjust the share for urban/rural only (shares in csv are as percantage of the total(Rur + Urb), we needed to adjust the urban shares to add up to 1, same for rural)
housing_type_rur3 = housing_type_rur2/housing_type_rur2.sum()
housing_type_urb3 = housing_type_urb2/housing_type_urb2.sum()
# calculte the total rural/urban population (pop2 = millions of people, rurpop2 = % of people living in rural areas)
people_rur = pd.DataFrame(rurpop_tail.values*pop_tail.values, columns=pop_tail.columns, index=pop_tail.index)
people_urb = pd.DataFrame(urbpop_tail.values*pop_tail.values, columns=pop_tail.columns, index=pop_tail.index)
# calculate the total number of people (urban/rural) BY HOUSING TYPE (the sum of det,sem,app & hig equals the total population e.g. people_rur)
people_det_rur = pd.DataFrame(housing_type_rur3.iloc[0].values*people_rur.values, columns=people_rur.columns, index=people_rur.index)
people_sem_rur = pd.DataFrame(housing_type_rur3.iloc[1].values*people_rur.values, columns=people_rur.columns, index=people_rur.index)
people_app_rur = pd.DataFrame(housing_type_rur3.iloc[2].values*people_rur.values, columns=people_rur.columns, index=people_rur.index)
people_hig_rur = pd.DataFrame(housing_type_rur3.iloc[3].values*people_rur.values, columns=people_rur.columns, index=people_rur.index)
people_det_urb = pd.DataFrame(housing_type_urb3.iloc[0].values*people_urb.values, columns=people_urb.columns, index=people_urb.index)
people_sem_urb = pd.DataFrame(housing_type_urb3.iloc[1].values*people_urb.values, columns=people_urb.columns, index=people_urb.index)
people_app_urb = pd.DataFrame(housing_type_urb3.iloc[2].values*people_urb.values, columns=people_urb.columns, index=people_urb.index)
people_hig_urb = pd.DataFrame(housing_type_urb3.iloc[3].values*people_urb.values, columns=people_urb.columns, index=people_urb.index)
# calculate the total m2 (urban/rural) BY HOUSING TYPE (= nr. of people * OWN avg m2, so not based on IMAGE)
m2_unadjusted_det_rur = pd.DataFrame(avg_m2_cap_rur2.iloc[0].values * people_det_rur.values, columns=people_det_rur.columns, index=people_det_rur.index)
m2_unadjusted_sem_rur = pd.DataFrame(avg_m2_cap_rur2.iloc[1].values * people_sem_rur.values, columns=people_sem_rur.columns, index=people_sem_rur.index)
m2_unadjusted_app_rur = pd.DataFrame(avg_m2_cap_rur2.iloc[2].values * people_app_rur.values, columns=people_app_rur.columns, index=people_app_rur.index)
m2_unadjusted_hig_rur = pd.DataFrame(avg_m2_cap_rur2.iloc[3].values * people_hig_rur.values, columns=people_hig_rur.columns, index=people_hig_rur.index)
m2_unadjusted_det_urb = pd.DataFrame(avg_m2_cap_urb2.iloc[0].values * people_det_urb.values, columns=people_det_urb.columns, index=people_det_urb.index)
m2_unadjusted_sem_urb = pd.DataFrame(avg_m2_cap_urb2.iloc[1].values * people_sem_urb.values, columns=people_sem_urb.columns, index=people_sem_urb.index)
m2_unadjusted_app_urb = pd.DataFrame(avg_m2_cap_urb2.iloc[2].values * people_app_urb.values, columns=people_app_urb.columns, index=people_app_urb.index)
m2_unadjusted_hig_urb = pd.DataFrame(avg_m2_cap_urb2.iloc[3].values * people_hig_urb.values, columns=people_hig_urb.columns, index=people_hig_urb.index)
# Define empty dataframes for m2 adjustments
total_m2_adj_rur = pd.DataFrame(index=m2_unadjusted_det_rur.index, columns=m2_unadjusted_det_rur.columns)
total_m2_adj_urb = pd.DataFrame(index=m2_unadjusted_det_urb.index, columns=m2_unadjusted_det_urb.columns)
# Sum all square meters in Rural area
for j in range(1721,2061,1):
for i in range(1,27,1):
total_m2_adj_rur.loc[j,str(i)] = m2_unadjusted_det_rur.loc[j,str(i)] + m2_unadjusted_sem_rur.loc[j,str(i)] + m2_unadjusted_app_rur.loc[j,str(i)] + m2_unadjusted_hig_rur.loc[j,str(i)]
# Sum all square meters in Urban area
for j in range(1721,2061,1):
for i in range(1,27,1):
total_m2_adj_urb.loc[j,str(i)] = m2_unadjusted_det_urb.loc[j,str(i)] + m2_unadjusted_sem_urb.loc[j,str(i)] + m2_unadjusted_app_urb.loc[j,str(i)] + m2_unadjusted_hig_urb.loc[j,str(i)]
# average square meter per person implied by our OWN data
avg_m2_cap_adj_rur = pd.DataFrame(total_m2_adj_rur.values / people_rur.values, columns=people_rur.columns, index=people_rur.index)
avg_m2_cap_adj_urb = pd.DataFrame(total_m2_adj_urb.values / people_urb.values, columns=people_urb.columns, index=people_urb.index)
# factor to correct square meters per capita so that we respect the IMAGE data in terms of total m2, but we use our own distinction between Building types
m2_cap_adj_fact_rur = pd.DataFrame(floorspace_rur_tail.values / avg_m2_cap_adj_rur.values, columns=floorspace_rur_tail.columns, index=floorspace_rur_tail.index)
m2_cap_adj_fact_urb = pd.DataFrame(floorspace_urb_tail.values / avg_m2_cap_adj_urb.values, columns=floorspace_urb_tail.columns, index=floorspace_urb_tail.index)
# All m2 by region (in millions), Building_type & year (using the correction factor, to comply with IMAGE avg m2/cap)
m2_det_rur = pd.DataFrame(m2_unadjusted_det_rur.values * m2_cap_adj_fact_rur.values, columns=m2_cap_adj_fact_rur.columns, index=m2_cap_adj_fact_rur.index)
m2_sem_rur = pd.DataFrame(m2_unadjusted_sem_rur.values * m2_cap_adj_fact_rur.values, columns=m2_cap_adj_fact_rur.columns, index=m2_cap_adj_fact_rur.index)
m2_app_rur = pd.DataFrame(m2_unadjusted_app_rur.values * m2_cap_adj_fact_rur.values, columns=m2_cap_adj_fact_rur.columns, index=m2_cap_adj_fact_rur.index)
m2_hig_rur = pd.DataFrame(m2_unadjusted_hig_rur.values * m2_cap_adj_fact_rur.values, columns=m2_cap_adj_fact_rur.columns, index=m2_cap_adj_fact_rur.index)
m2_det_urb = pd.DataFrame(m2_unadjusted_det_urb.values * m2_cap_adj_fact_urb.values, columns=m2_cap_adj_fact_urb.columns, index=m2_cap_adj_fact_urb.index)
m2_sem_urb = pd.DataFrame(m2_unadjusted_sem_urb.values * m2_cap_adj_fact_urb.values, columns=m2_cap_adj_fact_urb.columns, index=m2_cap_adj_fact_urb.index)
m2_app_urb = pd.DataFrame(m2_unadjusted_app_urb.values * m2_cap_adj_fact_urb.values, columns=m2_cap_adj_fact_urb.columns, index=m2_cap_adj_fact_urb.index)
m2_hig_urb = pd.DataFrame(m2_unadjusted_hig_urb.values * m2_cap_adj_fact_urb.values, columns=m2_cap_adj_fact_urb.columns, index=m2_cap_adj_fact_urb.index)
# Add a checksum to see if calculations based on adjusted OWN avg m2 (by building type) now match the total m2 according to IMAGE.
m2_sum_rur_OWN = m2_det_rur + m2_sem_rur + m2_app_rur + m2_hig_rur
m2_sum_rur_IMAGE = pd.DataFrame(floorspace_rur_tail.values*people_rur.values, columns=m2_sum_rur_OWN.columns, index=m2_sum_rur_OWN.index)
m2_checksum = m2_sum_rur_OWN - m2_sum_rur_IMAGE
if m2_checksum.sum().sum() > 0.0000001 or m2_checksum.sum().sum() < -0.0000001:
ctypes.windll.user32.MessageBoxW(0, "IMAGE & OWN m2 sums do not match", "Warning", 1)
# total RESIDENTIAL square meters by region
m2 = m2_det_rur + m2_sem_rur + m2_app_rur + m2_hig_rur + m2_det_urb + m2_sem_urb + m2_app_urb + m2_hig_urb
# Total m2 for COMMERCIAL Buildings
commercial_m2_office = pd.DataFrame(commercial_m2_cap_office_tail.values * pop_tail.values, columns=m2_cap_adj_fact_urb.columns, index=m2_cap_adj_fact_urb.index)
commercial_m2_retail = pd.DataFrame(commercial_m2_cap_retail_tail.values * pop_tail.values, columns=m2_cap_adj_fact_urb.columns, index=m2_cap_adj_fact_urb.index)
commercial_m2_hotels = pd.DataFrame(commercial_m2_cap_hotels_tail.values * pop_tail.values, columns=m2_cap_adj_fact_urb.columns, index=m2_cap_adj_fact_urb.index)
commercial_m2_govern = pd.DataFrame(commercial_m2_cap_govern_tail.values * pop_tail.values, columns=m2_cap_adj_fact_urb.columns, index=m2_cap_adj_fact_urb.index)
#%% MATERIAL STOCK CALCULATIONS
#rural concrete stock
material_concrete_det=building_materials_concrete.loc[(building_materials_concrete['Building_type']=='Detached')]
material_concrete_det=material_concrete_det.set_index('Region')
material_concrete_det=material_concrete_det.drop(['Building_type'],axis=1)
material_concrete_det=pd.DataFrame(material_concrete_det.values.T, index=material_concrete_det.columns, columns=material_concrete_det.index)
a=m2_det_rur.index
material_concrete_det=material_concrete_det.set_index(a)
kg_det_rur_concrete=m2_det_rur*material_concrete_det
material_concrete_sem=building_materials_concrete.loc[(building_materials_concrete['Building_type']=='Semi-detached')]
material_concrete_sem=material_concrete_sem.set_index('Region')
material_concrete_sem=material_concrete_sem.drop(['Building_type'],axis=1)
material_concrete_sem=pd.DataFrame(material_concrete_sem.values.T, index=material_concrete_sem.columns, columns=material_concrete_sem.index)
a=m2_sem_rur.index
material_concrete_sem=material_concrete_sem.set_index(a)
kg_sem_rur_concrete=m2_sem_rur*material_concrete_sem
material_concrete_app=building_materials_concrete.loc[(building_materials_concrete['Building_type']=='Appartments')]
material_concrete_app=material_concrete_app.set_index('Region')
material_concrete_app=material_concrete_app.drop(['Building_type'],axis=1)
material_concrete_app=pd.DataFrame(material_concrete_app.values.T, index=material_concrete_app.columns, columns=material_concrete_app.index)
a=m2_app_rur.index
material_concrete_app=material_concrete_app.set_index(a)
kg_app_rur_concrete=m2_app_rur*material_concrete_app
material_concrete_hig=building_materials_concrete.loc[(building_materials_concrete['Building_type']=='High-rise')]
material_concrete_hig=material_concrete_hig.set_index('Region')
material_concrete_hig=material_concrete_hig.drop(['Building_type'],axis=1)
material_concrete_hig=pd.DataFrame(material_concrete_hig.values.T, index=material_concrete_hig.columns, columns=material_concrete_hig.index)
a=m2_hig_rur.index
material_concrete_hig=material_concrete_hig.set_index(a)
kg_hig_rur_concrete=m2_hig_rur*material_concrete_hig
#urban concrete stock
material_concrete_det=building_materials_concrete.loc[(building_materials_concrete['Building_type']=='Detached')]
material_concrete_det=material_concrete_det.set_index('Region')
material_concrete_det=material_concrete_det.drop(['Building_type'],axis=1)
material_concrete_det=pd.DataFrame(material_concrete_det.values.T, index=material_concrete_det.columns, columns=material_concrete_det.index)
a=m2_det_urb.index
material_concrete_det=material_concrete_det.set_index(a)
kg_det_urb_concrete=m2_det_urb*material_concrete_det
material_concrete_sem=building_materials_concrete.loc[(building_materials_concrete['Building_type']=='Semi-detached')]
material_concrete_sem=material_concrete_sem.set_index('Region')
material_concrete_sem=material_concrete_sem.drop(['Building_type'],axis=1)
material_concrete_sem=pd.DataFrame(material_concrete_sem.values.T, index=material_concrete_sem.columns, columns=material_concrete_sem.index)
a=m2_sem_urb.index
material_concrete_sem=material_concrete_sem.set_index(a)
kg_sem_urb_concrete=m2_sem_urb*material_concrete_sem
material_concrete_app=building_materials_concrete.loc[(building_materials_concrete['Building_type']=='Appartments')]
material_concrete_app=material_concrete_app.set_index('Region')
material_concrete_app=material_concrete_app.drop(['Building_type'],axis=1)
material_concrete_app=pd.DataFrame(material_concrete_app.values.T, index=material_concrete_app.columns, columns=material_concrete_app.index)
a=m2_app_urb.index
material_concrete_app=material_concrete_app.set_index(a)
kg_app_urb_concrete=m2_app_urb*material_concrete_app
material_concrete_hig=building_materials_concrete.loc[(building_materials_concrete['Building_type']=='High-rise')]
material_concrete_hig=material_concrete_hig.set_index('Region')
material_concrete_hig=material_concrete_hig.drop(['Building_type'],axis=1)
material_concrete_hig=pd.DataFrame(material_concrete_hig.values.T, index=material_concrete_hig.columns, columns=material_concrete_hig.index)
a=m2_hig_urb.index
material_concrete_hig=material_concrete_hig.set_index(a)
kg_hig_urb_concrete=m2_hig_urb*material_concrete_hig
#rural glass stock
material_glass_det=building_materials_glass.loc[(building_materials_glass['Building_type']=='Detached')]
material_glass_det=material_glass_det.set_index('Region')
material_glass_det=material_glass_det.drop(['Building_type'],axis=1)
material_glass_det=pd.DataFrame(material_glass_det.values.T, index=material_glass_det.columns, columns=material_glass_det.index)
a=m2_det_rur.index
material_glass_det=material_glass_det.set_index(a)
kg_det_rur_glass=m2_det_rur*material_glass_det
material_glass_sem=building_materials_glass.loc[(building_materials_glass['Building_type']=='Semi-detached')]
material_glass_sem=material_glass_sem.set_index('Region')
material_glass_sem=material_glass_sem.drop(['Building_type'],axis=1)
material_glass_sem=pd.DataFrame(material_glass_sem.values.T, index=material_glass_sem.columns, columns=material_glass_sem.index)
a=m2_sem_rur.index
material_glass_sem=material_glass_sem.set_index(a)
kg_sem_rur_glass=m2_sem_rur*material_glass_sem
material_glass_app=building_materials_glass.loc[(building_materials_glass['Building_type']=='Appartments')]
material_glass_app=material_glass_app.set_index('Region')
material_glass_app=material_glass_app.drop(['Building_type'],axis=1)
material_glass_app=pd.DataFrame(material_glass_app.values.T, index=material_glass_app.columns, columns=material_glass_app.index)
a=m2_app_rur.index
material_glass_app=material_glass_app.set_index(a)
kg_app_rur_glass=m2_app_rur*material_glass_app
material_glass_hig=building_materials_glass.loc[(building_materials_glass['Building_type']=='High-rise')]
material_glass_hig=material_glass_hig.set_index('Region')
material_glass_hig=material_glass_hig.drop(['Building_type'],axis=1)
material_glass_hig=pd.DataFrame(material_glass_hig.values.T, index=material_glass_hig.columns, columns=material_glass_hig.index)
a=m2_hig_rur.index
material_glass_hig=material_glass_hig.set_index(a)
kg_hig_rur_glass=m2_hig_rur*material_glass_hig
#urban glass stock
material_glass_det=building_materials_glass.loc[(building_materials_glass['Building_type']=='Detached')]
material_glass_det=material_glass_det.set_index('Region')
material_glass_det=material_glass_det.drop(['Building_type'],axis=1)
material_glass_det=pd.DataFrame(material_glass_det.values.T, index=material_glass_det.columns, columns=material_glass_det.index)
a=m2_det_urb.index
material_glass_det=material_glass_det.set_index(a)
kg_det_urb_glass=m2_det_urb*material_glass_det
material_glass_sem=building_materials_glass.loc[(building_materials_glass['Building_type']=='Semi-detached')]
material_glass_sem=material_glass_sem.set_index('Region')
material_glass_sem=material_glass_sem.drop(['Building_type'],axis=1)
material_glass_sem=pd.DataFrame(material_glass_sem.values.T, index=material_glass_sem.columns, columns=material_glass_sem.index)
a=m2_sem_urb.index
material_glass_sem=material_glass_sem.set_index(a)
kg_sem_urb_glass=m2_sem_urb*material_glass_sem
material_glass_app=building_materials_glass.loc[(building_materials_glass['Building_type']=='Appartments')]
material_glass_app=material_glass_app.set_index('Region')
material_glass_app=material_glass_app.drop(['Building_type'],axis=1)
material_glass_app=pd.DataFrame(material_glass_app.values.T, index=material_glass_app.columns, columns=material_glass_app.index)
a=m2_app_urb.index
material_glass_app=material_glass_app.set_index(a)
kg_app_urb_glass=m2_app_urb*material_glass_app
material_glass_hig=building_materials_glass.loc[(building_materials_glass['Building_type']=='High-rise')]
material_glass_hig=material_glass_hig.set_index('Region')
material_glass_hig=material_glass_hig.drop(['Building_type'],axis=1)
material_glass_hig=pd.DataFrame(material_glass_hig.values.T, index=material_glass_hig.columns, columns=material_glass_hig.index)
a=m2_hig_urb.index
material_glass_hig=material_glass_hig.set_index(a)
kg_hig_urb_glass=m2_hig_urb*material_glass_hig
# Commercial Building materials (in Million kg)
#commercial concrete stock
materials_concrete_office=materials_commercial_concrete.loc[(materials_commercial_concrete['Building_type']=='Offices')]
materials_concrete_office=materials_concrete_office.drop(['Building_type'],axis=1)
materials_concrete_office=pd.DataFrame(materials_concrete_office.values.T, index=materials_concrete_office.columns, columns=materials_concrete_office.index)
a= commercial_m2_office.index
materials_concrete_office=materials_concrete_office.set_index(a)
kg_office_concrete=commercial_m2_office*materials_concrete_office
materials_concrete_retail=materials_commercial_concrete.loc[(materials_commercial_concrete['Building_type']=='Retail+')]
materials_concrete_retail=materials_concrete_retail.drop(['Building_type'],axis=1)
materials_concrete_retail=pd.DataFrame(materials_concrete_retail.values.T, index=materials_concrete_retail.columns, columns=materials_concrete_retail.index)
a= commercial_m2_retail.index
materials_concrete_retail=materials_concrete_retail.set_index(a)
kg_retail_concrete=commercial_m2_retail*materials_concrete_retail
materials_concrete_hotels=materials_commercial_concrete.loc[(materials_commercial_concrete['Building_type']=='Hotels+')]
materials_concrete_hotels=materials_concrete_hotels.drop(['Building_type'],axis=1)
materials_concrete_hotels=pd.DataFrame(materials_concrete_hotels.values.T, index=materials_concrete_hotels.columns, columns=materials_concrete_hotels.index)
a= commercial_m2_hotels.index
materials_concrete_hotels=materials_concrete_hotels.set_index(a)
kg_hotels_concrete=commercial_m2_hotels*materials_concrete_hotels
materials_concrete_govern=materials_commercial_concrete.loc[(materials_commercial_concrete['Building_type']=='Govt+')]
materials_concrete_govern=materials_concrete_govern.drop(['Building_type'],axis=1)
materials_concrete_govern=pd.DataFrame(materials_concrete_govern.values.T, index=materials_concrete_govern.columns, columns=materials_concrete_govern.index)
a= commercial_m2_govern.index
materials_concrete_govern=materials_concrete_govern.set_index(a)
kg_govern_concrete=commercial_m2_govern*materials_concrete_govern
#commercial glass stock
materials_glass_office=materials_commercial_glass.loc[(materials_commercial_glass['Building_type']=='Offices')]
materials_glass_office=materials_glass_office.drop(['Building_type'],axis=1)
materials_glass_office=
|
pd.DataFrame(materials_glass_office.values.T, index=materials_glass_office.columns, columns=materials_glass_office.index)
|
pandas.DataFrame
|
#!/usr/bin/env python
# coding: utf-8
import os
import glob
import pandas as pd
import matplotlib.pyplot as plt
import psycopg2
from sqlalchemy import create_engine
import yaml
import sys, getopt
import logging
import logging.config
import zipfile
# Copyright 2021 National Technology & Engineering Solutions of Sandia, LLC (NTESS). Under the terms of Contract DE-NA0003525 with NTESS, the U.S. Government retains certain rights in this software.
# Function to convert a list to a string
def listToString(s):
# initialize an empty string
str1 = ""
# traverse in the string
for ele in s:
str1 += ele
# return string
return str1
# unpack the dataframe and calculate quantities used in statistics
def calc_cycle_quantities(df):
logging.info('calculate quantities used in statistics')
tmp_arr = df[["test_time", "i", "v", "ah_c", 'e_c', 'ah_d', 'e_d', 'cycle_time']].to_numpy()
start = 0
last_time = 0
last_i = 0
last_v = 0
last_ah_c = 0
last_e_c = 0
last_ah_d = 0
last_e_d = 0
initial_time = 0
for x in tmp_arr:
if start == 0:
start += 1
initial_time = x[0]
else:
if x[1] >= 0:
x[3] = (x[0]-last_time)*(x[1]+last_i)*0.5+last_ah_c
x[4] = (x[0]-last_time)*(x[1]+last_i)*0.5*(x[2]+last_v)*0.5+last_e_c
elif x[1] <= 0:
x[5] = (x[0] - last_time) * (x[1] + last_i) * 0.5 + last_ah_d
x[6] = (x[0] - last_time) * (x[1] + last_i) * 0.5 * (x[2] + last_v) * 0.5 + last_e_d
x[7] = x[0] - initial_time
last_time = x[0]
last_i = x[1]
last_v = x[2]
last_ah_c = x[3]
last_e_c = x[4]
last_ah_d = x[5]
last_e_d = x[6]
df_tmp = pd.DataFrame(data=tmp_arr[:, [3]], columns=["ah_c"])
df_tmp.index += df.index[0]
df['ah_c'] = df_tmp['ah_c']/3600.0
df_tmp =
|
pd.DataFrame(data=tmp_arr[:, [4]], columns=["e_c"])
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
"""
Created on Wed May 19 10:28:35 2021
@author: <NAME> -Spatial structure index value distribution of urban streetscape
"""
import pickle
from database import postSQL2gpd,gpd2postSQL
import pandas as pd
xian_epsg=32649 #Xi'an WGS84 / UTM zone 49N
wgs84_epsg=4326
poi_classificationName={
0:"delicacy",
1:"hotel",
2:"shopping",
3:"lifeService",
4:"beauty",
5:"spot",
6:"entertainment",
7:"sports",
8:"education",
9:"media",
10:"medicalTreatment",
11:"carService",
12:"trafficFacilities",
13:"finance",
14:"realEstate",
15:"corporation",
16:"government",
17:"entrance",
18:"naturalFeatures",
}
poi_classificationName_reverse={v:k for k,v in poi_classificationName.items()}
def street_poi_structure(poi,position,distance=300):
from tqdm import tqdm
import pickle,math
import pandas as pd
import numpy as np
import geopandas as gpd
# tqdm.pandas()
poi_num=len(poi_classificationName.keys())
feature_vector=np.zeros(poi_num)
poi_=poi.copy(deep=True)
pos_poi_dict={}
pos_poi_idxes_df=
|
pd.DataFrame(columns=['geometry','frank_e','num'])
|
pandas.DataFrame
|
"""
Run tests on inStrain profile
"""
import glob
import importlib
import logging
import os
import shutil
from subprocess import call
import numpy as np
import pandas as pd
from Bio import SeqIO
import inStrain
import inStrain.SNVprofile
import inStrain.argumentParser
import inStrain.controller
import inStrain.deprecated
import inStrain.deprecated.deprecated_filter_reads
import inStrain.filter_reads
import inStrain.logUtils
import inStrain.profile
import inStrain.profile.fasta
import tests.test_utils as test_utils
from test_utils import BTO
# class test_profile:
# def __init__(BTO):
# BTO.script = test_utils.get_script_loc('inStrain')
# BTO.test_dir = test_utils.load_random_test_dir()
# BTO.bam1 = test_utils.load_data_loc() + \
# 'N5_271_010G1_scaffold_min1000.fa-vs-N5_271_010G1.sorted.bam'
# BTO.fasta = test_utils.load_data_loc() + \
# 'N5_271_010G1_scaffold_min1000.fa'
# BTO.failure_bam = test_utils.load_data_loc() + \
# 'N5_271_010G1_scaffold_failureScaffold.sorted.bam'
# BTO.single_scaff = test_utils.load_data_loc() + \
# 'N5_271_010G1_scaffold_101.fasta'
# BTO.fasta_extra = test_utils.load_data_loc() + \
# 'N5_271_010G1_scaffold_min1000_extra.fa'
# BTO.small_fasta = test_utils.load_data_loc() + \
# 'SmallScaffold.fa'
# BTO.small_bam = test_utils.load_data_loc() + \
# 'SmallScaffold.fa.sorted.bam'
# BTO.extra_single_scaff = test_utils.load_data_loc() + \
# 'N5_271_010G1_scaffold_101_extra.fasta'
# BTO.failure_fasta = test_utils.load_data_loc() + \
# 'N5_271_010G1_scaffold_failureScaffold.fa'
# BTO.failure_genes = test_utils.load_data_loc() + \
# 'N5_271_010G1_scaffold_failureScaffold.fa.genes.fna.fa'
# BTO.cc_solution = test_utils.load_data_loc() + \
# 'N5_271_010G1_scaffold_min1000.fa-vs-N5_271_010G1.bam.CB'
# BTO.pp_snp_solution = test_utils.load_data_loc() + \
# 'strainProfiler_v0.3_results/N5_271_010G1_scaffold_min1000.fa-vs-N5_271_010G1.sorted' \
# '.bam_SP_snpLocations.pickle '
# BTO.cc_snp_solution = test_utils.load_data_loc() + \
# 'v0.4_results/test_0.98.freq'
# BTO.v12_solution = test_utils.load_data_loc() + \
# 'N5_271_010G1_scaffold_min1000.fa-vs-N5_271_010G1.sorted.bam.IS.v1.2.14'
# BTO.sam = test_utils.load_data_loc() + \
# 'N5_271_010G1_scaffold_min1000.fa-vs-N5_271_010G1.sam'
# BTO.IS = test_utils.load_data_loc() + \
# 'N5_271_010G1_scaffold_min1000.fa-vs-N5_271_010G1.IS'
# BTO.scafflist = test_utils.load_data_loc() + \
# 'scaffList.txt'
# BTO.genes = test_utils.load_data_loc() + \
# 'N5_271_010G1_scaffold_min1000.fa.genes.fna'
# BTO.stb = test_utils.load_data_loc() + \
# 'GenomeCoverages.stb'
# BTO.genes = test_utils.load_data_loc() + \
# 'N5_271_010G1_scaffold_min1000.fa.genes.fna'
#
# twelve2thirteen, del_thirteen, new_thirteen = test_utils.get_twelve2thriteen()
# BTO.twelve2thirteen = twelve2thirteen
# BTO.del_thirteen = del_thirteen
# BTO.new_thirteen = new_thirteen
#
# def setUp(BTO, destroy=True):
# if destroy:
# if os.path.isdir(BTO.test_dir):
# shutil.rmtree(BTO.test_dir)
# os.mkdir(BTO.test_dir)
#
# importlib.reload(logging)
#
# def tearDown(BTO):
# logging.shutdown()
# if os.path.isdir(BTO.test_dir):
# shutil.rmtree(BTO.test_dir)
#
# def run(BTO, min=0, max=19, tests='all', cleanUp=True):
# # YOU HAVE TO RUN THIS ONE ON ITS OWN, BECUASE IT MESSES UP FUTURE RUNS
# # BTO.setUp()
# # BTO.test0()
# # BTO.tearDown()
#
# if tests == 'all':
# tests = np.arange(min, max+1)
#
# for test_num in tests:
# BTO.setUp()
# print("\n*** Running {1} test {0} ***\n".format(test_num, BTO.__class__))
# eval('BTO.test{0}()'.format(test_num))
# if cleanUp:
# BTO.tearDown()
#
# # BTO.setUp(destroy=True)
# # BTO.test16()
# # BTO.tearDown()
def test_profile_0(BTO):
"""
Compare Matts to CCs methodology
"""
# Run Matts program
base = BTO.test_dir + 'testMatt'
cmd = "inStrain profile {1} {2} -o {3} -l 0.95 --store_everything --min_mapq 2 --skip_genome_wide".format(
True, BTO.bam1,
BTO.fasta, base)
print(cmd)
sys_args = cmd.split(' ')
args = inStrain.argumentParser.parse_args(sys_args[1:])
inStrain.controller.Controller().main(args)
# Load the object
Matt_object = inStrain.SNVprofile.SNVprofile(base)
# Run CCs program
base = BTO.test_dir + 'testCC'
cmd = "{0} {1} {2} -o {3} -l 0.95".format(True, BTO.bam1,
BTO.fasta, base)
print(cmd)
args = inStrain.deprecated.parse_arguments(cmd.split()[1:])
inStrain.deprecated.main(args)
# Load the object
CC_object = inStrain.deprecated.SNVdata()
CC_object.load(name=base + '_0.95')
## COMPARE SNPS
# Parse CCs dumb SNV table
CPdb = CC_object.snv_table
CPdb['var_base'] = [s.split(':')[1] for s in CPdb['SNV']]
CPdb['loc'] = [int(s.split(':')[0].split('_')[-1]) for s in CPdb['SNV']]
CPdb['scaff'] = ['_'.join(s.split(':')[0].split('_')[:-1]) for s in CPdb['SNV']]
CPdb = CPdb.drop_duplicates(subset=['scaff', 'loc'])
CPdb = CPdb.sort_values(['scaff', 'loc'])
# Load Matts beautiful object
MPdb = Matt_object.get_nonredundant_snv_table()
# Allowing for cryptic SNPs, make sure Matt calls everything CC does
MS = set(["{0}-{1}".format(x, y) for x, y in zip(MPdb['scaffold'], MPdb['position'])])
CS = set(["{0}-{1}".format(x, y) for x, y in zip(CPdb['scaff'], CPdb['loc'])])
assert len(MS) > 0
assert len(CS - MS) == 0, CS - MS
# Not allowing for cyptic SNPs, make sure CC calls everything Matt does
MPdb = MPdb[MPdb['cryptic'] == False]
MPdb = MPdb[MPdb['allele_count'] >= 2]
MS = set(["{0}-{1}".format(x, y) for x, y in zip(MPdb['scaffold'], MPdb['position'])])
CS = set(["{0}-{1}".format(x, y) for x, y in zip(CPdb['scaff'], CPdb['loc'])])
assert len(MS - CS) == 0, MS - CS
## COMPARE CLONALITY
# Parse CCs dumb clonality table
CLdb = CC_object.clonality_table
p2c = CLdb.set_index('position')['clonality'].to_dict()
# Load Matt's beautiful table
MCLdb = Matt_object.get_clonality_table()
# print(set(p2c.keys()) - set(["{0}_{1}".format(s, p) for s, p in zip(MCLdb['scaffold'], MCLdb['position'])]))
assert len(MCLdb) == len(CLdb), (len(MCLdb), len(CLdb))
for i, row in MCLdb.dropna().iterrows():
assert (p2c["{0}_{1}".format(row['scaffold'], row['position'])]
- row['clonality']) < .001, (row, p2c["{0}_{1}".format(row['scaffold'], row['position'])])
## COMPARE LINKAGE
CLdb = CC_object.r2linkage_table
CLdb['position_A'] = [eval(str(x))[0].split(':')[0].split('_')[-1] for x in CLdb['total_A']]
CLdb['position_B'] = [eval(str(x))[0].split(':')[0].split('_')[-1] for x in CLdb['total_B']]
CLdb['scaffold'] = [x.split(':')[0] for x in CLdb['Window']]
MLdb = Matt_object.get_nonredundant_linkage_table()
# Mark cryptic SNPs
MPdb = Matt_object.get_nonredundant_snv_table()
dbs = []
for scaff, db in MPdb.groupby('scaffold'):
p2c = db.set_index('position')['cryptic'].to_dict()
mdb = MLdb[MLdb['scaffold'] == scaff]
mdb['cryptic'] = [True if ((p2c[a] == True) | (p2c[b] == True)) else False for a, b in zip(
mdb['position_A'], mdb['position_B'])]
dbs.append(mdb)
MLdb = pd.concat(dbs)
# # Make sure MLdb and MPdb aggree
# MLS = set(["{0}-{1}".format(x, y, z) for x, y, z in zip(MLdb['scaffold'], MLdb['position_A'], MLdb['position_B'])]).union(
# set(["{0}-{2}".format(x, y, z) for x, y, z in zip(MLdb['scaffold'], MLdb['position_A'], MLdb['position_B'])]))
# print([len(MS), len(MLS), len(MS - MLS), len(MLS - MS)])
# assert MS == MLS
# Allowing for cryptic SNPs, make sure Matt calls everything CC does
MS = set(["{0}-{1}-{2}".format(x, y, z) for x, y, z in
zip(MLdb['scaffold'], MLdb['position_A'], MLdb['position_B'])])
CS = set(["{0}-{1}-{2}".format(x, y, z) for x, y, z in
zip(CLdb['scaffold'], CLdb['position_A'], CLdb['position_B'])])
assert len(CS - MS) <= 1, [CS - MS]
# At scaffold N5_271_010G1_scaffold_110 from position 525 to 546 you end up in an edge case
# where you skip it because you have absolutely no minor alleles to counterbalance it. It's fine,
# CC just reports an r2 of np.nan, and this seems like the easiest way to handle it
# Not allowing for cyptic SNPs, make sure CC calls everything Matt does
MLdb = MLdb[MLdb['cryptic'] == False]
MS = set(["{0}-{1}-{2}".format(x, y, z) for x, y, z in
zip(MLdb['scaffold'], MLdb['position_A'], MLdb['position_B'])])
CS = set(["{0}-{1}-{2}".format(x, y, z) for x, y, z in
zip(CLdb['scaffold'], CLdb['position_A'], CLdb['position_B'])])
assert len(MS - CS) == 0, [len(MS), len(CS), len(MS - CS), MS - CS]
def test_profile_1(BTO):
"""
Basic test- Make sure whole version doesn't crash when run from the command line
"""
# Set up
base = BTO.test_dir + 'test'
# Run program
cmd = "inStrain profile {1} {2} -o {3} -l 0.98".format(True, BTO.bam1,
BTO.fasta_extra, base)
print(cmd)
call(cmd, shell=True)
# Make sure it produced output
assert os.path.isdir(base)
assert len(glob.glob(base + '/output/*')) == 5, glob.glob(base + '/output/*')
# Make sure the output makes sense
S1 = inStrain.SNVprofile.SNVprofile(base)
db = S1.get('cumulative_scaffold_table')
assert len(db) > 0
test_utils._internal_verify_Sdb(db)
# Make sure it doesn't mess up at the lower-case bases (I put a lower-case in scaffold N5_271_010G1_scaffold_0 at a c; make sure it's not there)
sdb = S1.get('raw_snp_table')
assert len(sdb) > 0
assert len(sdb[sdb['ref_base'] == 'c']) == 0
def test_profile_2(BTO):
"""
Test filter reads; make sure CCs and Matt's agree
"""
# Set up
positions, total_length = inStrain.deprecated.deprecated_filter_reads.get_fasta(BTO.fasta)
min_read_ani = 0.98
# Run initial filter_reads
subset_reads, Rdb = inStrain.deprecated.deprecated_filter_reads.filter_reads(BTO.bam1, positions,
total_length,
filter_cutoff=min_read_ani,
max_insert_relative=3,
min_insert=50, min_mapq=2)
# Run Matts filter_reads
scaff2sequence = SeqIO.to_dict(SeqIO.parse(BTO.fasta, "fasta")) # set up .fasta file
s2l = {s: len(scaff2sequence[s]) for s in list(scaff2sequence.keys())} # Get scaffold2length
scaffolds = list(s2l.keys())
subset_reads2 = inStrain.deprecated.deprecated_filter_reads.filter_paired_reads(BTO.bam1,
scaffolds,
filter_cutoff=min_read_ani,
max_insert_relative=3,
min_insert=50, min_mapq=2)
# Run Matts filter_reads in a different way
pair2info = inStrain.deprecated.deprecated_filter_reads.get_paired_reads(BTO.bam1, scaffolds)
pair2infoF = inStrain.deprecated.deprecated_filter_reads.filter_paired_reads_dict(pair2info,
filter_cutoff=min_read_ani,
max_insert_relative=3,
min_insert=50, min_mapq=2)
subset_readsF = list(pair2infoF.keys())
# Run Matts filter_reads in a different way still
scaff2pair2infoM, Rdb = inStrain.filter_reads.load_paired_reads(
BTO.bam1, scaffolds, min_read_ani=min_read_ani,
max_insert_relative=3, min_insert=50, min_mapq=2)
# pair2infoMF = inStrain.filter_reads.paired_read_filter(scaff2pair2infoM)
# pair2infoMF = inStrain.filter_reads.filter_scaff2pair2info(pair2infoMF,
# min_read_ani=min_read_ani, max_insert_relative=3,
# min_insert=50, min_mapq=2)
subset_readsMF = set()
for scaff, pair2infoC in scaff2pair2infoM.items():
subset_readsMF = subset_readsMF.union(pair2infoC.keys())
# subset_readsMF = list(pair2infoMF.keys())
assert (set(subset_reads2) == set(subset_reads) == set(subset_readsF) == set(subset_readsMF)), \
[len(subset_reads2), len(subset_reads), len(subset_readsF), len(subset_readsMF)]
# Make sure the filter report is accurate
# Rdb = inStrain.filter_reads.makeFilterReport2(scaff2pair2infoM, pairTOinfo=pair2infoMF, min_read_ani=min_read_ani, max_insert_relative=3,
# min_insert=50, min_mapq=2)
assert int(Rdb[Rdb['scaffold'] == 'all_scaffolds'] \
['unfiltered_pairs'].tolist()[0]) \
== len(list(pair2info.keys()))
assert int(Rdb[Rdb['scaffold'] == 'all_scaffolds']['filtered_pairs'].tolist()[0]) \
== len(subset_reads)
# Try another cutuff
positions, total_length = inStrain.deprecated.deprecated_filter_reads.get_fasta(BTO.fasta)
min_read_ani = 0.90
# Run initial filter_reads
subset_reads, Rdb = inStrain.deprecated.deprecated_filter_reads.filter_reads(BTO.bam1, positions,
total_length,
filter_cutoff=min_read_ani,
max_insert_relative=3,
min_insert=50, min_mapq=2)
# Run Matts filter_reads
scaff2sequence = SeqIO.to_dict(SeqIO.parse(BTO.fasta, "fasta")) # set up .fasta file
s2l = {s: len(scaff2sequence[s]) for s in list(scaff2sequence.keys())} # Get scaffold2length
scaffolds = list(s2l.keys())
scaff2pair2infoM, Rdb = inStrain.filter_reads.load_paired_reads(
BTO.bam1, scaffolds, min_read_ani=min_read_ani,
max_insert_relative=3, min_insert=50, min_mapq=2)
pair2infoMF_keys = set()
for scaff, pair2infoC in scaff2pair2infoM.items():
pair2infoMF_keys = pair2infoMF_keys.union(pair2infoC.keys())
# Scaff2pair2infoM = inStrain.filter_reads.get_paired_reads_multi(BTO.bam1, scaffolds)
# pair2infoMF = inStrain.filter_reads.paired_read_filter(scaff2pair2infoM)
# pair2infoMF = inStrain.filter_reads.filter_paired_reads_dict2(pair2infoMF,
# min_read_ani=min_read_ani, max_insert_relative=3,
# min_insert=50, min_mapq=2)
subset_reads2 = pair2infoMF_keys
assert (set(subset_reads2) == set(subset_reads))
def test_profile_3(BTO):
"""
Testing scaffold table (breadth and coverage) vs. calculate_breadth
"""
# Set up
base = BTO.test_dir + 'test'
# Run program
cmd = "inStrain profile {1} {2} -o {3} -l 0.98 --skip_genome_wide".format(True, BTO.bam1,
BTO.fasta, base)
print(cmd)
inStrain.controller.Controller().main(inStrain.argumentParser.parse_args(cmd.split(' ')[1:]))
Sprofile = inStrain.SNVprofile.SNVprofile(base)
Odb = Sprofile.get('cumulative_scaffold_table')
# Verify coverage table
test_utils._internal_verify_Sdb(Odb)
# Compare to calculate_coverage
Cdb = pd.read_csv(BTO.cc_solution)
s2c = Cdb.set_index('scaffold')['coverage'].to_dict()
s2b = Cdb.set_index('scaffold')['breadth'].to_dict()
for scaff, db in Odb.groupby('scaffold'):
db = db.sort_values('mm', ascending=False)
assert (db['coverage'].tolist()[0] - s2c[scaff]) < .1, \
[db['coverage'].tolist()[0], s2c[scaff]]
assert (db['breadth'].tolist()[0] - s2b[scaff]) < .01, \
[db['breadth'].tolist()[0], s2b[scaff]]
# Verify SNP calls
Sdb = Sprofile.get('cumulative_snv_table')
test_utils._internal_verify_OdbSdb(Odb, Sdb)
def test_profile_4(BTO):
"""
Test store_everything and database mode
"""
# Set up
base = BTO.test_dir + 'test'
# Run program
cmd = "inStrain profile {1} {2} -o {3} -l 0.95 --store_everything --skip_plot_generation -s {4}".format(
True, BTO.bam1,
BTO.fasta, base, BTO.stb)
print(cmd)
inStrain.controller.Controller().main(inStrain.argumentParser.parse_args(cmd.split(' ')[1:]))
Sprofile = inStrain.SNVprofile.SNVprofile(base)
# Make sure you stored a heavy object
assert Sprofile.get('testeroni') is None
assert Sprofile.get('covT') is not None
assert Sprofile.get('mm_to_position_graph') is not None
# Run database mode
base2 = BTO.test_dir + 'test2'
cmd = "inStrain profile {1} {2} -o {3} -l 0.95 --database_mode --skip_plot_generation -s {4}".format(
True, BTO.bam1,
BTO.fasta, base2, BTO.stb)
print(cmd)
inStrain.controller.Controller().main(inStrain.argumentParser.parse_args(cmd.split(' ')[1:]))
Sprofile2 = inStrain.SNVprofile.SNVprofile(base2)
# Make sure you didn't story a heavy object
assert Sprofile2.get('mm_to_position_graph') is None
# Make sure you have more reads mapping
mdb1 = Sprofile.get('mapping_info').sort_values('scaffold').reset_index(
drop=True)
mdb2 = Sprofile2.get('mapping_info').sort_values('scaffold').reset_index(
drop=True)
assert set(mdb1['scaffold']) == set(mdb2['scaffold'])
assert not test_utils.compare_dfs2(mdb1, mdb2, verbose=True)
# Make sure you have more skip mm level
mdb1 = Sprofile.get('genome_level_info')
mdb2 = Sprofile2.get('genome_level_info')
assert 'mm' in mdb1
assert 'mm' not in mdb2
# Make sure you skip junk genomes
# assert len(set(mdb1['genome']) - set(mdb2['genome'])) > 0
def test_profile_5(BTO):
"""
Test one thread
"""
# Set up
base = BTO.test_dir + 'test'
# Run program
cmd = "inStrain profile {1} {2} -o {3} -l 0.95 -p 1 --skip_genome_wide".format(True, BTO.bam1,
BTO.fasta, base)
print(cmd)
inStrain.controller.Controller().main(inStrain.argumentParser.parse_args(cmd.split(' ')[1:]))
Sprofile = inStrain.SNVprofile.SNVprofile(base)
# Make sure you get a result
Odb = Sprofile.get('cumulative_scaffold_table')
assert len(Odb['scaffold'].unique()) == 178, Odb
def test_profile_6(BTO):
"""
Test the case where only one scaffold is preset at all in the .bam file AND it has no SNPs
"""
# Run program
base = BTO.test_dir + 'test'
cmd = "inStrain profile {1} {2} -o {3} -l 0.99 -p 1 --skip_genome_wide".format(True, BTO.bam1,
BTO.single_scaff, base)
print(cmd)
inStrain.controller.Controller().main(inStrain.argumentParser.parse_args(cmd.split(' ')[1:]))
Sprofile = inStrain.SNVprofile.SNVprofile(base)
# Load output
Odb = Sprofile.get('cumulative_scaffold_table')
print(Odb)
test_utils._internal_verify_Sdb(Odb)
def test_profile_7(BTO):
"""
Test the case where a scaffold is not preset at all in the .bam file
Also test being able to adjust read filtering parameters
"""
# Run program
base = BTO.test_dir + 'test'
cmd = "inStrain profile {1} {2} -o {3} -l 0.80 -p 6 --store_everything --skip_genome_wide --skip_plot_generation".format(
True, BTO.bam1,
BTO.extra_single_scaff, base)
print(cmd)
inStrain.controller.Controller().main(inStrain.argumentParser.parse_args(cmd.split(' ')[1:]))
Sprofile = inStrain.SNVprofile.SNVprofile(base)
# Make sure scaffold table is OK
Odb = Sprofile.get('cumulative_scaffold_table')
test_utils._internal_verify_Sdb(Odb)
# Check the read report
rloc = glob.glob(Sprofile.get_location('output') + '*mapping_info.tsv')[0]
with open(rloc) as f:
first_line = f.readline()
assert "min_read_ani:0.8" in first_line
rdb = pd.read_csv(rloc, sep='\t', header=1)
total_pairs = rdb[rdb['scaffold'] == 'all_scaffolds']['filtered_pairs'].tolist()[0]
reads = set()
for s, rdic in Sprofile.get('Rdic').items():
reads = reads.union(rdic.keys())
assert total_pairs == len(reads)
ORI_READS = len(reads)
for thing, val in zip(['min_mapq', 'max_insert_relative', 'min_insert'], [10, 1, 100]):
print("!!!!!")
print(thing, val)
cmd = "inStrain profile {1} {2} -o {3} --{4} {5} -p 6 --store_everything --skip_plot_generation".format(
True, BTO.bam1,
BTO.extra_single_scaff, base, thing, val)
inStrain.controller.Controller().main(inStrain.argumentParser.parse_args(cmd.split(' ')[1:]))
Sprofile = inStrain.SNVprofile.SNVprofile(base)
rloc = glob.glob(Sprofile.get_location('output') + 'test_mapping_info.tsv')[0]
with open(rloc) as f:
first_line = f.readline()
assert "{0}:{1}".format(thing, val) in first_line, [first_line, thing, val]
if thing == 'max_insert_relative':
thing = 'max_insert'
rdb = pd.read_csv(rloc, sep='\t', header=1)
passF = rdb[rdb['scaffold'] == 'all_scaffolds']["pass_{0}".format(thing)].tolist()[0]
print(passF)
# assert rdb[rdb['scaffold'] == 'all_scaffolds']["pass_{0}".format(thing)].tolist()[0] == 0
reads = len(Sprofile.get('Rdic').keys())
print(Sprofile.get('Rdic'))
assert reads < ORI_READS
def test_profile_8(BTO):
"""
Test the ability to make and sort .bam files from .sam
"""
importlib.reload(logging)
# Copy sam to test dir
new_sam = os.path.join(BTO.test_dir, os.path.basename(BTO.sam))
shutil.copyfile(BTO.sam, new_sam)
# Run program
base = BTO.test_dir + 'test'
cmd = "inStrain profile {1} {2} -o {3} -l 0.80 -p 6 --store_everything --skip_genome_wide -d".format(
True, new_sam,
BTO.extra_single_scaff, base)
print(cmd)
inStrain.controller.Controller().main(inStrain.argumentParser.parse_args(cmd.split(' ')[1:]))
# Load output
assert len([f for f in glob.glob(base + '/output/*') if '.log' not in f]) == 4, glob.glob(base + '/output/*')
# Make sure the missing scaffold is reported
print(glob.glob(base + '/log/*'))
rr = [f for f in glob.glob(base + '/log/*') if 'runtime' in f][0]
got = False
with open(rr, 'r') as o:
for line in o.readlines():
line = line.strip()
if 'weird_NAMED_scaff' in line:
got = True
assert got
def test_profile_9(BTO):
"""
Test the debug option
v0.5.1 - Actually this should happen all the time now...
v1.2.0 - This test is obsolete now
"""
pass
# # Run Matts program
# base = BTO.test_dir + 'testMatt'
# # cmd = "{0} {1} {2} -o {3} -l 0.95 --store_everything --debug".format(True, BTO.bam1, \
# # BTO.fasta, base)
# cmd = "inStrain profile {1} {2} -o {3} -l 0.95 --store_everything --skip_genome_wide".format(True, BTO.bam1, \
# BTO.fasta, base)
# print(cmd)
# inStrain.controller.Controller().main(inStrain.argumentParser.parse_args(cmd.split(' ')[1:]))
# Sprofile = inStrain.SNVprofile.SNVprofile(base)
#
# # Open the log
# logfile = Sprofile.get_location('log') + 'log.log'
#
# table = defaultdict(list)
# with open(logfile) as o:
# for line in o.readlines():
# line = line.strip()
# if 'RAM. System has' in line:
# linewords = [x.strip() for x in line.split()]
# table['scaffold'].append(linewords[0])
# table['PID'].append(linewords[2])
# table['status'].append(linewords[3])
# table['time'].append(linewords[5])
# table['process_RAM'].append(linewords[7])
# table['system_RAM'].append(linewords[11])
# table['total_RAM'].append(linewords[13])
# logged = True
# Ldb = pd.DataFrame(table)
# assert len(Ldb) > 5
def test_profile_10(BTO):
"""
Test min number of reads filtered and min number of genome coverage
"""
# Set up
base = BTO.test_dir + 'test'
# Run program
cmd = "inStrain profile {1} {2} -o {3} -l 0.98 --min_scaffold_reads 10 --skip_genome_wide".format(True,
BTO.bam1,
BTO.fasta,
base)
print(cmd)
inStrain.controller.Controller().main(inStrain.argumentParser.parse_args(cmd.split(' ')[1:]))
# Make sure you actually filtered out the scaffolds
Sdb = pd.read_csv(glob.glob(base + '/output/*scaffold_info.tsv')[0], sep='\t')
Rdb = pd.read_csv(glob.glob(base + '/output/*mapping_info.tsv')[0], sep='\t', header=1)
print("{0} of {1} scaffolds have >10 reads".format(len(Rdb[Rdb['filtered_pairs'] >= 10]),
len(Rdb)))
assert len(Sdb['scaffold'].unique()) == len(Rdb[Rdb['filtered_pairs'] >= 10]['scaffold'].unique()) - 1
# Try min_genome_coverage
# Make sure it fails with no .stb
cmd = "inStrain profile {1} {2} -o {3} -l 0.98 --min_genome_coverage 5 --skip_genome_wide".format(True,
BTO.bam1,
BTO.fasta,
base)
exit_code = call(cmd, shell=True)
assert exit_code == 1
# Run it with an .stb
cmd = "inStrain profile {1} {2} -o {3} -l 0.98 -s {4} --min_genome_coverage 5 --skip_genome_wide".format(True,
BTO.bam1,
BTO.fasta,
base,
BTO.stb2)
exit_code = call(cmd, shell=True)
assert exit_code == 0
# Make sure you actually filtered out the scaffolds
Sdb = pd.read_csv(glob.glob(base + '/output/*scaffold_info.tsv')[0], sep='\t')
Rdb = pd.read_csv(glob.glob(base + '/output/*mapping_info.tsv')[0], sep='\t', header=1)
assert len(Rdb) == 179
print(Sdb)
assert len(Sdb) == 42
# Make sure empty scaffolds don't mess it up
cmd = "inStrain profile {1} {2} -o {3} -l 0.98 --min_genome_coverage 5 -s {4} --skip_genome_wide".format(
True, BTO.bam1, BTO.fasta_extra, base, BTO.stb2)
exit_code = call(cmd, shell=True)
assert exit_code == 0, exit_code
# Make sure you actually filtered out the scaffolds
Sdb = pd.read_csv(glob.glob(base + '/output/*scaffold_info.tsv')[0], sep='\t')
Rdb = pd.read_csv(glob.glob(base + '/output/*mapping_info.tsv')[0], sep='\t', header=1)
assert len(Rdb) == 180, len(Rdb)
assert len(Sdb) == 42, len(Sdb)
# Run it with an .stb and coverage that cannot be hit
cmd = "inStrain profile {1} {2} -o {3} -l 0.98 --min_genome_coverage 100 -s {4} --skip_genome_wide".format(
True, BTO.bam1, BTO.fasta, base, BTO.stb2)
exit_code = call(cmd, shell=True)
assert exit_code == 1
# Run it with an .stb and coverage that is low
cmd = "inStrain profile {1} {2} -o {3} -l 0.98 --min_genome_coverage 1.1 -s {4} --skip_genome_wide".format(
True, BTO.bam1, BTO.fasta, base, BTO.stb2)
exit_code = call(cmd, shell=True)
Sprofile = inStrain.SNVprofile.SNVprofile(base)
# Make sure you actually filtered out the scaffolds
Sdb = pd.read_csv(glob.glob(base + '/output/*scaffold_info.tsv')[0], sep='\t')
Rdb = pd.read_csv(glob.glob(base + '/output/*mapping_info.tsv')[0], sep='\t', header=1)
assert len(Rdb) == 179
assert len(Sdb) > 42
def test_profile_11(BTO):
"""
Test skip mm profiling
# NOTE! THIS TEST FAILS WITH NEW PYSAM (ONLY WORKS WITH PYSAM=0.15.4). NEED TO FIX
"""
# Set up
base = BTO.test_dir + 'test'
# Run program
cmd = "inStrain profile {1} {2} -o {3} --skip_mm_profiling --skip_genome_wide".format(True,
BTO.bam1,
BTO.fasta, base)
print(cmd)
inStrain.controller.Controller().main(inStrain.argumentParser.parse_args(cmd.split(' ')[1:]))
IS = inStrain.SNVprofile.SNVprofile(base)
# Make sure you get the same results
scaffdb = IS.get_nonredundant_scaffold_table().reset_index(drop=True)
correct_scaffdb = inStrain.SNVprofile.SNVprofile(BTO.IS).get_nonredundant_scaffold_table().reset_index(
drop=True)
cols = ['scaffold', 'length', 'breadth', 'coverage']
# assert test_utils.compare_dfs2(scaffdb[cols].sort_values(['scaffold']).reset_index(drop=True),
# correct_scaffdb[cols].sort_values(['scaffold']).reset_index(drop=True))
assert test_utils.compare_dfs(scaffdb[cols].sort_values(['scaffold']).reset_index(drop=True),
correct_scaffdb[cols].sort_values(['scaffold']).reset_index(drop=True),
verbose=True, round=4)
# Make sure you dont have the raw mm
sdb = IS.get('cumulative_scaffold_table')
print(sdb.head())
assert set(sdb['mm'].tolist()) == {0}, set(sdb['mm'].tolist())
def test_profile_12(BTO):
"""
Test scaffolds_to_profile
"""
# Set up
base = BTO.test_dir + 'test'
# Run program
cmd = "inStrain profile {1} {2} -o {3} -l 0.95 --min_mapq 2 --scaffolds_to_profile {4} --skip_genome_wide".format(
True, BTO.bam1,
BTO.fasta, base, BTO.scafflist)
print(cmd)
inStrain.controller.Controller().main(inStrain.argumentParser.parse_args(cmd.split(' ')[1:]))
IS = inStrain.SNVprofile.SNVprofile(base)
# Make sure you get the same results
scaffdb = IS.get_nonredundant_scaffold_table().reset_index(drop=True)
assert set(scaffdb['scaffold'].unique()) == set(inStrain.profile.fasta.load_scaff_list(BTO.scafflist))
def test_profile_13(BTO):
"""
Make sure that covT, clonT, and the SNP table agree on coverage
"""
# Set up
base = BTO.test_dir + 'test'
# Run program
cmd = "inStrain profile {1} {2} -o {3} -l 0.95 --min_mapq 2 --scaffolds_to_profile {4} -p 1 --skip_genome_wide".format(
True, BTO.bam1,
BTO.fasta, base, BTO.scafflist)
print(cmd)
inStrain.controller.Controller().main(inStrain.argumentParser.parse_args(cmd.split(' ')[1:]))
IS = inStrain.SNVprofile.SNVprofile(base)
SRdb = IS.get('cumulative_snv_table')
CovT = IS.get('covT')
for i, row in SRdb.iterrows():
cov = 0
for mm, covs in CovT[row['scaffold']].items():
if mm <= row['mm']:
if row['position'] in covs:
cov += covs[row['position']]
assert row['position_coverage'] == cov, [cov, row['position_coverage'], row]
def test_profile_14(BTO):
"""
Basic test- Make sure genes and genome_wide can be run within the profile option
Make sure logging produces the run statistics
"""
# Set up
base = BTO.test_dir + 'test'
# Run program
cmd = "inStrain profile {1} {2} -o {3} -g {4} -l 0.98".format(True, BTO.bam1,
BTO.fasta, base, BTO.genes)
print(cmd)
call(cmd, shell=True)
# Make sure it produced output
assert os.path.isdir(base)
assert len(glob.glob(base + '/output/*')) == 6, len(glob.glob(base + '/output/*'))
assert len(glob.glob(base + '/log/*')) == 3
# Make sure the output makes sense
S1 = inStrain.SNVprofile.SNVprofile(base)
db = S1.get('cumulative_scaffold_table')
test_utils._internal_verify_Sdb(db)
# Read the log
for l in glob.glob(base + '/log/*'):
if 'runtime_summary.txt' in l:
with open(l, 'r') as o:
for line in o.readlines():
print(line.strip())
def test_profile_15(BTO):
"""
Basic test- Make sure genes and genome_wide can be run within the profile option
"""
# Set up
base = BTO.test_dir + 'test'
# Run program
cmd = "inStrain profile {1} {2} -o {3} -g {4} -l 0.98 --rarefied_coverage 10".format(True,
BTO.bam1,
BTO.fasta, base,
BTO.genes)
print(cmd)
call(cmd, shell=True)
# Make sure it produced output
assert os.path.isdir(base)
assert len(glob.glob(base + '/output/*')) == 6
# Make sure the output makes sense
S1 = inStrain.SNVprofile.SNVprofile(base)
db = S1.get('cumulative_scaffold_table')
test_utils._internal_verify_Sdb(db)
clontR = S1.get('clonTR')
counts0 = sum([len(x[2]) if 2 in x else 0 for s, x in clontR.items()])
# Make sure its in the genome_wide table
gdb = pd.read_csv(glob.glob(base + '/output/*genome_info*.tsv')[0], sep='\t')
assert 'nucl_diversity' in gdb.columns, gdb.head()
# Run again with different rarefied coverage
base = BTO.test_dir + 'test2'
cmd = "inStrain profile {1} {2} -o {3} -g {4} -l 0.98 --rarefied_coverage 50".format(True,
BTO.bam1,
BTO.fasta, base,
BTO.genes)
print(cmd)
call(cmd, shell=True)
S1 = inStrain.SNVprofile.SNVprofile(base)
db = S1.get('cumulative_scaffold_table')
test_utils._internal_verify_Sdb(db)
clontR = S1.get('clonTR')
counts2 = sum([len(x[2]) if 2 in x else 0 for s, x in clontR.items()])
assert counts0 > counts2, [counts0, counts2]
def test_profile_16(BTO):
"""
Make sure the results exactly match a run done with inStrain verion 1.2.14
"""
importlib.reload(logging)
# Run program
base = BTO.test_dir + 'test'
cmd = "inStrain profile {1} {2} -o {3} -g {4} --skip_plot_generation -p 6 -d".format(True,
BTO.bam1,
BTO.fasta, base,
BTO.genes)
print(cmd)
inStrain.controller.Controller().main(inStrain.argumentParser.parse_args(cmd.split(' ')[1:]))
exp_IS = inStrain.SNVprofile.SNVprofile(base)
sol_IS = inStrain.SNVprofile.SNVprofile(BTO.v12_solution)
# Print what the output of the solutions directory looks like
if True:
s_out_files = glob.glob(exp_IS.get_location('output') + os.path.basename(
exp_IS.get('location')) + '_*')
print("The output has {0} tables".format(len(s_out_files)))
for f in s_out_files:
name = os.path.basename(f)
print("{1}\n{0}\n{1}".format(name, '-' * len(name)))
if 'mapping_info.tsv' in name:
s = pd.read_csv(f, sep='\t', header=1)
else:
s = pd.read_csv(f, sep='\t')
print(s.head())
print()
# MAKE SURE LOG IS WORKING
assert len(glob.glob(base + '/log/*')) == 3, base
Ldb = exp_IS.get_parsed_log()
rdb = inStrain.logUtils.load_multiprocessing_log(Ldb)
LOGGED_SCAFFOLDS = set(rdb[(rdb['command'] == 'MergeProfile') & (rdb['multi_log_type'] == 'WorkerLog')]['unit'].tolist())
TRUE_SCAFFOLDS = \
set(exp_IS.get_nonredundant_scaffold_table()['scaffold'].tolist())
assert (LOGGED_SCAFFOLDS == TRUE_SCAFFOLDS)
# CHECK OUTPUT FILES
e_out_files = glob.glob(exp_IS.get_location('output') + os.path.basename(
exp_IS.get('location')) + '_*')
s_out_files = glob.glob(sol_IS.get_location('output') + os.path.basename(
sol_IS.get('location')) + '_*')
for s_file in s_out_files:
name = os.path.basename(s_file).split('v1.2.14_')[1]
e_file = [e for e in e_out_files if name in os.path.basename(e)]
print("checking {0}".format(name))
if len(e_file) == 1:
# print("Both have {0}!".format(name))
e = pd.read_csv(e_file[0], sep='\t')
s = pd.read_csv(s_file, sep='\t').rename(columns=BTO.twelve2thirteen)
if name in ['linkage.tsv']:
e = e.sort_values(['scaffold', 'position_A', 'position_B']).reset_index(drop=True)
s = s.sort_values(['scaffold', 'position_A', 'position_B']).reset_index(drop=True)
# Delete random ones
rand = ['r2_normalized', 'd_prime_normalized']
for r in rand:
del e[r]
del s[r]
if name in ['SNVs.tsv']:
e = e.sort_values(['scaffold', 'position']).reset_index(drop=True)
s = s.sort_values(['scaffold', 'position']).reset_index(drop=True)
for col in ['mutation_type', 'mutation', 'gene', 'class']:
del e[col]
if name in ['scaffold_info.tsv']:
e = e.sort_values(['scaffold']).reset_index(drop=True)
s = s.sort_values(['scaffold']).reset_index(drop=True)
# TRANSLATE THE OLD VERSION
s = s.rename(columns=BTO.twelve2thirteen)
for r in BTO.del_thirteen:
if r in s.columns:
del s[r]
for r in BTO.new_thirteen:
if r in e.columns:
del e[r]
rand = ['nucl_diversity_rarefied', 'nucl_diversity_rarefied_median']
for r in rand:
if r in e.columns:
del e[r]
if r in s.columns:
del s[r]
if name in ['mapping_info.tsv']:
e = pd.read_csv(e_file[0], sep='\t', header=1)
s = pd.read_csv(s_file, sep='\t', header=1)
e = e.sort_values(['scaffold']).reset_index(drop=True)
s = s.sort_values(['scaffold']).reset_index(drop=True)
if name in ['gene_info.tsv']:
e = e.sort_values(['scaffold', 'gene']).reset_index(drop=True)
s = s.sort_values(['scaffold', 'gene']).reset_index(drop=True)
# TRANSLATE THE OLD VERSION
s = s.rename(columns=BTO.twelve2thirteen)
for r in BTO.del_thirteen:
if r in s.columns:
del s[r]
for r in BTO.new_thirteen:
if r in e.columns:
del e[r]
rand = ['SNS_count', 'divergent_site_count', 'partial']
for r in rand:
if r in e.columns:
del e[r]
if r in s.columns:
del s[r]
# Re-arange column order
assert set(e.columns) == set(s.columns), \
[name,
set(e.columns) - set(s.columns),
set(s.columns) - set(e.columns)]
s = s[list(e.columns)]
e = e[list(e.columns)]
assert test_utils.compare_dfs2(e, s, verbose=True), name
else:
# print("Both dont have {0}!".format(name))
if name in ['read_report.tsv']:
e_file = [e for e in e_out_files if 'mapping_info.tsv' in os.path.basename(e)]
e = pd.read_csv(e_file[0], sep='\t', header=1)
s = pd.read_csv(s_file, sep='\t', header=1).rename(
columns={'pass_filter_cutoff': 'pass_min_read_ani'})
e = e.sort_values(['scaffold']).reset_index(drop=True)
s = s.sort_values(['scaffold']).reset_index(drop=True)
e = e[e['scaffold'] == 'all_scaffolds']
s = s[s['scaffold'] == 'all_scaffolds']
for c in list(s.columns):
s[c] = s[c].astype(e[c].dtype)
for c in ['median_insert']: # calculated in a different way
del e[c]
del s[c]
elif name in ['genomeWide_scaffold_info.tsv']:
e_file = [e for e in e_out_files if 'genome_info.tsv' in os.path.basename(e)]
e = pd.read_csv(e_file[0], sep='\t')
s = pd.read_csv(s_file, sep='\t').rename(columns=BTO.twelve2thirteen)
for r in BTO.del_thirteen:
if r in s.columns:
del s[r]
s = s.rename(columns=BTO.twelve2thirteen)
NEW_HERE = {'coverage_median', 'SNV_count', 'SNS_count',
'nucl_diversity', 'filtered_read_pair_count'}
for c in BTO.new_thirteen.union(NEW_HERE):
if c in e.columns:
del e[c]
# Remove the ones that are gained by the new read filtering
for c in e.columns:
if c.startswith('reads_'):
del e[c]
e = e.sort_values(['genome']).reset_index(drop=True)
s = s.sort_values(['genome']).reset_index(drop=True)
# Re-order columns
assert set(e.columns) == set(s.columns), \
[set(e.columns) - set(s.columns),
set(s.columns) - set(e.columns)]
e = e[list(s.columns)]
s = s[list(s.columns)]
changed_cols = ['coverage_std',
'nucl_diversity_rarefied']
for c in changed_cols:
del e[c]
del s[c]
elif name in ['genomeWide_read_report.tsv']:
e_file = [e for e in e_out_files if 'genome_info.tsv' in os.path.basename(e)]
e =
|
pd.read_csv(e_file[0], sep='\t')
|
pandas.read_csv
|
# Se tivermos o dotenv instalado importamos e setamos
try:
from dotenv import load_dotenv
load_dotenv()
except:
pass
import pandas as pd
import requests
import os
client_id = os.getenv("client_id")
client_secret = os.getenv("client_secret")
def getOAuth():
# Get Oauth Token
url = "https://id.twitch.tv/oauth2/token"
param = {
"client_id": client_id,
"client_secret": client_secret,
"grant_type": "client_credentials"
}
r = requests.post(url, data=param)
access_token = r.json()["access_token"]
header = {"Client-ID": client_id, "Authorization":"Bearer "+ access_token}
return access_token, header
def getStreamerId(streamer, header):
# Get streamer's id
url = "https://api.twitch.tv/helix/users"
param = {"login": streamer}
r = requests.get(url, params=param, headers=header).json()
return r["data"][0]
def createCSV():
""" Com o CSV do bot do twitter cria um novo com as infos para o site """
access_token, header = getOAuth()
df = pd.read_csv("streamers.csv")
df_new =
|
pd.DataFrame([], columns=["Id", "Nome", "Twitch", "Descr", "Avatar"])
|
pandas.DataFrame
|
import pandas as pd
import tempfile
import inspect
import pypipegraph as ppg
from pathlib import Path
import time
import re
import pickle
import zipfile
import os
import json
import base64
from . import WideNotSupported
settings = None
def apply_ovca_settings():
global settings
if settings is not None and settings["what"] != "OVCA":
raise ValueError("different apply_*_settings being called")
def check_patient_id(patient_id):
if patient_id.startswith("OVCA"):
if not re.match(r"^OVCA\d+(R[0-9]*)?$", patient_id):
raise ValueError("Patient id must follow OVCA\\d(R[0-9]*)? if it starts with OVCA")
return "cancer"
elif patient_id.startswith("OC"):
raise ValueError("OVCA patients must not start with OC")
else:
return "non-cancer"
settings = {
"what": "OVCA",
# for the primary data
"must_have_columns": ["variable", "unit", "value", "patient"],
# for 'secondary' datasets
"must_have_columns_secondary": ["variable", "unit", "value"],
# for gene lists
"must_have_columns_tertiary_genelists": ["stable_id", "gene"],
"allowed_cells": {
"T",
"macrophage",
"tumor",
"tumor_s",
"tumor_sc",
"tumor_m",
"tumor_L",
"tumor_G",
"MDSC",
"NK",
"n.a.",
"adipocyte",
"HPMC",
"CAF",
},
"allowed_compartments": {"blood", "ascites", "n.a.", "omentum"},
"allowed_disease_states": {"cancer", "healthy", "benign", "n.a."},
"check_patient_id": check_patient_id,
'database_filename_template': 'marburg_ovca_revision_%s.zip'
}
def apply_paad_settings():
"for the pancreas biobank"
global settings
if settings is not None and settings["what"] != "PAAD":
raise ValueError("different apply_*_settings being called")
def check_patient_id(patient_id):
if patient_id.startswith("ACH"):
if not re.match(r"^ACH-\d+$", patient_id):
raise ValueError("Patient id must be ACH\\d if it starts with ACH")
return "PAAD"
else:
raise ValueError(
"PAAD patients must start with ACH (non-cancer samples yet to be suported in apply_paad_settings"
)
settings = {
"what": "PAAD",
# for the primary data
"must_have_columns": ["variable", "unit", "value", "patient"],
# for 'secondary' datasets
"must_have_columns_secondary": ["variable", "unit", "value"],
# for gene lists
"must_have_columns_tertiary_genelists": ["stable_id", "gene"],
"allowed_cells": {"solid_tumor_mix",},
"allowed_compartments": {"tumor"}, # -
"allowed_disease_states": {"PAAD",},
"check_patient_id": check_patient_id,
'database_filename_template': 'marburg_paad_biobank_revision_%s.zip'
}
def check_dataframe(name, df):
# why was this done?
# if "variable" in df.columns:
# df = df.assign(
# variable=[
# x.encode("utf-8") if isinstance(x, str) else x for x in df.variable
# ]
# )
if settings is None:
raise ValueError("Must call apply_*_settings (eg. apply_ovca_settings) first")
for c in "seperate_me":
if c in df.columns:
raise ValueError("%s must no longer be a df column - %s " % (c, name))
if "compartment" in df.columns and not "disease" in df.columns:
raise ValueError("Columns must now be cell_type/disease/compartment split")
if "patient" in df.columns:
for patient in df["patient"]:
settings["check_patient_id"](patient)
#
# dataframes ofter now are _actual_name/0-9+,
# but possibly only after writing it out...
if re.search("/[0-9]+$", name):
name = name[: name.rfind("/")]
basename = os.path.basename(name)
# no fixed requirements on _meta dfs
if not basename.startswith("_") and not name.startswith("_"):
if (
"_differential/" in name
or "/genomics/" # no special requirements for differential datasets for now
in name # mutation data is weird enough.
):
mh = set()
elif name.startswith("secondary") or name.startswith('tertiary/transcriptomics'):
mh = set(settings["must_have_columns_secondary"])
elif name.startswith("tertiary/genelists"):
mh = set(settings["must_have_columns_tertiary_genelists"])
elif name.startswith("tertiary/survival"):
mh = set()
else:
mh = set(settings["must_have_columns"])
for c in "cell", "disease_state", "tissue":
if c in df.columns:
raise ValueError(
"%s must no longer be a df column - %s " % (c, name)
)
missing = mh.difference(df.columns)
if missing:
raise ValueError(
"%s is missing columns: %s, had %s" % (name, missing, df.columns)
)
elif name.endswith("_exclusion"):
mhc = ["patient", "reason"]
missing = set(mhc).difference(df.columns)
if missing:
raise ValueError(
"%s is missing columns: %s, had %s" % (name, missing, df.columns)
)
for column, allowed_values in [
("cell_type", settings["allowed_cells"]),
("compartment", settings["allowed_compartments"]),
("disease", settings["allowed_disease_states"]),
]:
if column in df.columns and not name.startswith("secondary/") and not name.startswith('tertiary/'):
x = set(df[column].unique()).difference(allowed_values)
if x:
raise ValueError(
"invalid %s found in %s: %s - check marburg_biobank/create.py, allowed_* if you want to extend it"
% (column, name, x)
)
if "patient" in df.columns and not name.endswith("_exclusion"):
states = set([settings["check_patient_id"](x) for x in df["patient"]])
if len(states) > 1:
if "disease" not in df.columns:
raise ValueError(
"Datasets mixing cancer and non cancer data need a disease column:%s"
% (name,)
)
for x in "variable", "unit":
if x in df.columns:
try:
if
|
pd.isnull(df[x])
|
pandas.isnull
|
# _*_ coding: utf-8 _*_
"""
Retrieve resource dataset.
"""
import pkg_resources
import pandas as pd
import numpy as np
import shapefile
from netCDF4 import Dataset
def get_nation_station_info(limit=None):
"""
Get the national surface weather station information.
:param limit: region limit, (lon0, lon1, lat0, lat1)
:return: pandas data frame, weather station information, column names:
['province', 'ID', 'name', 'grade', 'lat', 'lon', 'alt', 'pressureAlt']
"""
file = pkg_resources.resource_filename(
"dk_met_graphics", "resources/stations/cma_national_station_info.dat")
sta_info = pd.read_csv(file, dtype={"ID": np.str})
if limit is None:
sta_info = sta_info.loc[
(sta_info['lon'] >= limit[0]) & (sta_info['lon'] <= limit[1]) &
(sta_info['lat'] >= limit[2]) & (sta_info['lat'] <= limit[3])]
return sta_info
def get_county_station_info(limit=None):
"""
Get the county surface weather station information.
return pandas data frame.
Keyword Arguments:
limit {tuple} -- region limit, (lon0, lon1, lat0, lat1)
"""
file = pkg_resources.resource_filename(
"dk_met_graphics", "resources/stations/cma_county_station_info.dat")
sta_info = pd.read_csv(
file, delim_whitespace=True, header=None,
names=['ID', 'lat', 'lon', 'county', 'city', 'province'],
dtype={"ID": np.str})
if limit is None:
sta_info = sta_info.loc[
(sta_info['lon'] >= limit[0]) & (sta_info['lon'] <= limit[1]) &
(sta_info['lat'] >= limit[2]) & (sta_info['lat'] <= limit[3])]
return sta_info
def get_china_city(limit=None, grade="province"):
"""
Get china city information.
Return pandas dataframe.
Keyword Arguments:
limit {[type]} -- [description] (default: {None})
grade {str} -- [description] (default: {"province"})
"""
if grade == "province":
file = pkg_resources.resource_filename(
"dk_met_graphics", "resources/stations/res1_4m.shp")
else:
file = pkg_resources.resource_filename(
"dk_met_graphics", "resources/stations/res2_4m.shp")
shape = shapefile.Reader(file)
srs = shape.shapeRecords()
city_info =
|
pd.DataFrame(columns=['name', 'lon', 'lat'])
|
pandas.DataFrame
|
# coding: utf-8
# # Rule size sensitivity benchmark
# In[17]:
PY_IDS_DURATION_ITERATIONS = 10
# # Guide to use lvhimabindu/interpretable_decision_sets
#
# * git pull https://github.com/lvhimabindu/interpretable_decision_sets interpretable_decision_sets_lakkaraju
# * locate your python *site_packages* directory
# * copy *interpretable_decision_sets_lakkaraju* into *site_packages*
# * correct errors in code to allow it to run (wrong identation etc.)
# # Interpretable Decision Sets - setup
# In[18]:
import interpretable_decision_sets_lakkaraju.IDS_smooth_local as sls_lakk
from interpretable_decision_sets_lakkaraju.IDS_smooth_local import run_apriori, createrules, smooth_local_search, func_evaluation
# In[19]:
import pandas as pd
import numpy as np
import time
# ## Simple example
# In[20]:
df = pd.read_csv('../../data/titanic_train.tab',' ', header=None, names=['Passenger_Cat', 'Age_Cat', 'Gender'])
df1 = pd.read_csv('../../data/titanic_train.Y', ' ', header=None, names=['Died', 'Survived'])
Y = list(df1['Died'].values)
itemsets = run_apriori(df, 0.1)
list_of_rules = createrules(itemsets, list(set(Y)))
# In[21]:
support_levels = [1, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1]
support_levels = list(reversed(support_levels))
rule_counts_quantiles = [ int(support_levels[idx] * len(list_of_rules)) for idx in range(len(support_levels)) ]
# In[22]:
rule_counts_quantiles
# In[ ]:
#%%capture
benchmark_data = [
]
for rule_count in rule_counts_quantiles:
current_rules = list_of_rules[:rule_count]
time1 = time.time()
lambda_array = [1.0]*7 # use separate hyperparamter search routine
s1 = smooth_local_search(current_rules, df, Y, lambda_array, 0.33, 0.33)
s2 = smooth_local_search(current_rules, df, Y, lambda_array, 0.33, -1.0)
f1 = func_evaluation(s1, current_rules, df, Y, lambda_array)
f2 = func_evaluation(s2, current_rules, df, Y, lambda_array)
soln_set = None
if f1 > f2:
soln_set = s1
else:
soln_set = s2
time2 = time.time()
duration = time2 - time1
rule_count = rule_count
benchmark_data.append(dict(
duration=duration,
rule_count=rule_count
))
# In[27]:
benchmark_dataframe_lakkaraju = pd.DataFrame(benchmark_data)
# In[28]:
benchmark_dataframe_lakkaraju.to_csv("./results/titanic_rule_size_benchmark_lakkaraju.csv", index=False)
# # PyIDS
# ## PyIDS setup
# In[35]:
import time
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import random
from sklearn.metrics import accuracy_score, auc, roc_auc_score
from pyids.ids_rule import IDSRule
from pyids.ids_ruleset import IDSRuleSet
from pyids.ids_objective_function import ObjectiveFunctionParameters, IDSObjectiveFunction
from pyids.ids_optimizer import RSOptimizer, SLSOptimizer
from pyids.ids_cacher import IDSCacher
from pyids.ids_classifier import IDS, mine_CARs
from pyarc.qcba import *
from pyarc.algorithms import createCARs, top_rules
from pyarc import TransactionDB
df = pd.read_csv("../../data/titanic.csv")
df["Died"] = df["Died"].astype(str) + "_"
cars = mine_CARs(df, rule_cutoff=100)
quant_df = QuantitativeDataFrame(df)
# ## PyIDS benchmark
# In[ ]:
max_rule_length = 34
support_levels = [1, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1]
support_levels = list(reversed(support_levels))
rule_counts_quantiles = [ int(support_levels[idx] * max_rule_length) for idx in range(len(support_levels)) ]
# In[37]:
benchmark_data = [
]
for rule_count in range(0, max_rule_length + 1):
current_cars = cars[:rule_count]
times = []
for _ in range(PY_IDS_DURATION_ITERATIONS):
time1 = time.time()
lambda_array = [1.0]*7 # use separate hyperparamter search routine
ids = IDS()
ids.fit(class_association_rules=current_cars, quant_dataframe=quant_df, debug=False)
time2 = time.time()
duration = time2 - time1
times.append(duration)
rule_count = rule_count
benchmark_data.append(dict(
duration=duration,
rule_count=np.mean(times)
))
# In[44]:
benchmark_dataframe_pyids =
|
pd.DataFrame(benchmark_data)
|
pandas.DataFrame
|
# ----------------------------------------------------------------------------
# Copyright (c) 2016-2017, QIIME 2 development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
import unittest
import os
import shutil
import numpy as np
import pandas as pd
import pandas.util.testing as pdt
from scipy.cluster.hierarchy import ward
from skbio import TreeNode, DistanceMatrix
from q2_gneiss.plot._plot import dendrogram_heatmap, balance_taxonomy
from qiime2 import MetadataCategory
class TestHeatmap(unittest.TestCase):
def setUp(self):
self.results = "results"
if not os.path.exists(self.results):
os.mkdir(self.results)
def tearDown(self):
shutil.rmtree(self.results)
def test_visualization(self):
np.random.seed(0)
num_otus = 500 # otus
table = pd.DataFrame(np.random.random((num_otus, 5)),
index=np.arange(num_otus).astype(np.str)).T
x = np.random.rand(num_otus)
dm = DistanceMatrix.from_iterable(x, lambda x, y: np.abs(x-y))
lm = ward(dm.condensed_form())
t = TreeNode.from_linkage_matrix(lm, np.arange(len(x)).astype(np.str))
for i, n in enumerate(t.postorder()):
if not n.is_tip():
n.name = "y%d" % i
n.length = np.random.rand()*3
md = MetadataCategory(
pd.Series(['a', 'a', 'a', 'b', 'b']))
dendrogram_heatmap(self.results, table, t, md)
index_fp = os.path.join(self.results, 'index.html')
self.assertTrue(os.path.exists(index_fp))
with open(index_fp, 'r') as fh:
html = fh.read()
self.assertIn('<h1>Dendrogram heatmap</h1>',
html)
def test_visualization_small(self):
# tests the scenario where ndim > number of tips
np.random.seed(0)
num_otus = 11 # otus
table = pd.DataFrame(np.random.random((num_otus, 5)),
index=np.arange(num_otus).astype(np.str)).T
x = np.random.rand(num_otus)
dm = DistanceMatrix.from_iterable(x, lambda x, y: np.abs(x-y))
lm = ward(dm.condensed_form())
t = TreeNode.from_linkage_matrix(lm, np.arange(len(x)).astype(np.str))
for i, n in enumerate(t.postorder()):
if not n.is_tip():
n.name = "y%d" % i
n.length = np.random.rand()*3
md = MetadataCategory(
pd.Series(['a', 'a', 'a', 'b', 'b']))
dendrogram_heatmap(self.results, table, t, md)
index_fp = os.path.join(self.results, 'index.html')
self.assertTrue(os.path.exists(index_fp))
with open(index_fp, 'r') as fh:
html = fh.read()
self.assertIn('<h1>Dendrogram heatmap</h1>',
html)
class TestBalanceTaxonomy(unittest.TestCase):
def setUp(self):
self.results = "results"
if not os.path.exists(self.results):
os.mkdir(self.results)
self.balances = pd.DataFrame(
{'a': [-2, -1, 0, 1, 2],
'b': [-2, 0, 0, 0, 0]},
index=['a1', 'a2', 'a3', 'a4', 'a5']
)
self.tree = TreeNode.read([r'((k, q)d, ((x, y)a, z)b)c;'])
self.taxonomy = pd.DataFrame(
[['foo;barf;a;b;c;d;e', 1],
['foo;bark;f;g;h;i;j', 1],
['foo;bark;f;g;h;w;j', 1],
['nom;tu;k;l;m;n;o', 0.9],
['nom;tu;k;l;m;t;o', 0.9]],
columns=['Taxon', 'Confidence'],
index=['x', 'y', 'z', 'k', 'q'])
self.balances = pd.DataFrame(
[[1, 2, 3, 4, 5, 6, 7],
[-3.1, -2.9, -3, 3, 2.9, 3.2, 3.1],
[1, 1, 1, 1, 1, 1, 1],
[3, 2, 1, 0, -1, -2, -3]],
index=['d', 'a', 'b', 'c'],
columns=['s1', 's2', 's3', 's4', 's5', 's6', 's7']
).T
self.categorical = MetadataCategory(
pd.Series(['a', 'a', 'a', 'b', 'b', 'b', 'b'],
index=['s1', 's2', 's3', 's4', 's5', 's6', 's7'],
name='categorical'))
self.continuous = MetadataCategory(
pd.Series(np.arange(7),
index=['s1', 's2', 's3', 's4', 's5', 's6', 's7'],
name='continuous'))
def tearDown(self):
shutil.rmtree(self.results)
pass
def test_balance_taxonomy(self):
index_fp = os.path.join(self.results, 'index.html')
balance_taxonomy(self.results, self.balances, self.tree,
self.taxonomy, balance_name='c')
self.assertTrue(os.path.exists(index_fp))
# test to make sure that the numerator file is there
num_fp = os.path.join(self.results, 'numerator.csv')
self.assertTrue(os.path.exists(num_fp))
# test to make sure that the denominator file is there
denom_fp = os.path.join(self.results, 'denominator.csv')
self.assertTrue(os.path.exists(denom_fp))
with open(index_fp, 'r') as fh:
html = fh.read()
self.assertIn('<h1>Balance Taxonomy</h1>', html)
self.assertIn('Numerator taxa', html)
self.assertIn('Denominator taxa', html)
# extract csv files and test for contents
exp = pd.DataFrame(
[['foo', 'barf', 'a', 'b', 'c', 'd', 'e'],
['foo', 'bark', 'f', 'g', 'h', 'i', 'j'],
['foo', 'bark', 'f', 'g', 'h', 'w', 'j']],
columns=['0', '1', '2', '3', '4', '5', '6'],
index=['x', 'y', 'z'])
res = pd.read_csv(num_fp, index_col=0)
pdt.assert_frame_equal(exp, res.sort_index())
exp = pd.DataFrame([['nom', 'tu', 'k', 'l', 'm', 't', 'o'],
['nom', 'tu', 'k', 'l', 'm', 'n', 'o']],
columns=['0', '1', '2', '3', '4', '5', '6'],
index=['q', 'k']).sort_index()
res = pd.read_csv(denom_fp, index_col=0)
pdt.assert_frame_equal(exp, res.sort_index())
def test_balance_taxonomy_tips(self):
index_fp = os.path.join(self.results, 'index.html')
balance_taxonomy(self.results, self.balances, self.tree,
self.taxonomy, balance_name='a')
self.assertTrue(os.path.exists(index_fp))
# test to make sure that the numerator file is there
num_fp = os.path.join(self.results, 'numerator.csv')
self.assertTrue(os.path.exists(num_fp))
# test to make sure that the denominator file is there
denom_fp = os.path.join(self.results, 'denominator.csv')
self.assertTrue(os.path.exists(denom_fp))
exp = pd.DataFrame(['foo', 'bark', 'f', 'g', 'h', 'i', 'j'],
index=['0', '1', '2', '3', '4',
'5', '6'],
columns=['y']).T
res =
|
pd.read_csv(num_fp, index_col=0)
|
pandas.read_csv
|
# -*- coding: utf-8 -*-
"""
@file:make_cross_rate_feature.py
@time:2019/6/9 8:39
@author:Tangj
@software:Pycharm
@Desc
"""
import pandas as pd
from utils import *
date = ['20190410', '20190411', '20190412', '20190413', '20190414',
'20190415', '20190416', '20190417', '20190418', '20190419',
'20190420', '20190421', '20190422']
# rate = pd.read_csv('../usingData/feature/rate_expose.csv')
rate = pd.read_csv('../usingData/feature/everyday_exposure_train.csv')
cross_train = pd.read_csv('../usingData/sub_totaltrain/cross_fea_train.csv')
cross_test = pd.read_csv('../usingData/feature/cross_fea_test.csv')
rate1 = rate[['ad_id', 'sucess_rate', 'day']]
print(cross_train.shape)
train_use = pd.merge(cross_train, rate1, on=['ad_id', 'day'], how='left')
train_use = train_use.fillna(0)
print(train_use.shape)
col_name1 = ['max_rate', 'min_rate', 'mean_rate', 'median_rate']
train_use2 = pd.DataFrame()
test_use2 =
|
pd.DataFrame()
|
pandas.DataFrame
|
"""
scrapes data from here:
http://regsho.finra.org/regsho-Index.html
"""
# core
import os
import glob
import time
# installed
import requests as req
from bs4 import BeautifulSoup as bs
import urllib
import pandas as pd
from tqdm import tqdm
from concurrent.futures import ProcessPoolExecutor
# custom
import scrape_stockdata as ss
from utils import get_home_dir
HOME_DIR = get_home_dir(repo_name='scrape_stocks')
FOLDERS = ['ADF', 'NASDAQ', 'NYSE', 'ORF', 'NASDAQ-Chicago']
FILEPATH = '/home/nate/Dropbox/data/finra/'
def dl_and_get_df(ul, org):
"""
:param ul: html of ul with links
:param org: the organization, one of ['ADF', 'NASDAQ', 'NYSE', 'ORF']
"""
if not os.path.exists(FILEPATH):
os.mkdir(FILEPATH)
if not os.path.exists(FILEPATH + org):
os.mkdir(FILEPATH + org)
dfs = []
for l in ul.find_all('li'):
link = l.find('a').attrs['href']
filename = link.split('/')[-1]
urllib.request.urlretrieve(link, FILEPATH + org + '/' + filename)
df = pd.read_csv(FILEPATH + org + '/' + filename, sep='|')
nona = df.dropna()
if df.shape != nona.shape and df.shape[0] == 1:
print('empty file!')
continue
dfs.append(nona)
if len(dfs) == 0:
return None
full_df =
|
pd.concat(dfs)
|
pandas.concat
|
"""FinvizSentimentalAnalysis.py: Performs sentimental analysis on news from Finviz.com"""
# Look into README.md for installing Vader
# Extrenal Imports
import pandas as pd
from urllib.request import urlopen, Request
from bs4 import BeautifulSoup
# NLTK imports and Intialization
import nltk
nltk.downloader.download('vader_lexicon')
from nltk.sentiment.vader import SentimentIntensityAnalyzer
# Internal Imports
from . import constants
from . import SaveData
# Figure out how to integrate this onto the main model
def finvizSentimentalAnalysis(tickers):
newsTables = {}
for ticker in tickers:
url = constants.FINVIZ_URL + ticker
req = Request(url=url, headers={'user-agent': 'GSPP'})
response = urlopen(req)
html = BeautifulSoup(response, 'lxml')
newsTables[ticker] = html.find(id='news-table')
parsedData = []
for ticker, newsTable in newsTables.items():
for row in newsTable.findAll('tr'):
newsTitle = row.a.text
dateData = row.td.text.split(' ')
if len(dateData) == 1:
time = dateData[0]
else:
date = dateData[0]
time = dateData[1]
parsedData.append([ticker, date, time, newsTitle])
news = pd.DataFrame(parsedData,
columns=['Ticker', 'Date', 'Time', 'News Title'])
news['Date'] =
|
pd.to_datetime(news.Date)
|
pandas.to_datetime
|
import os
# os.environ["OMP_NUM_THREADS"] = "16"
import logging
logging.basicConfig(filename=snakemake.log[0], level=logging.INFO)
import pandas as pd
import numpy as np
# seak imports
from seak.data_loaders import intersect_ids, EnsemblVEPLoader, VariantLoaderSnpReader, CovariatesLoaderCSV
from seak.scoretest import ScoretestNoK
from seak.lrt import LRTnoK, pv_chi2mixture, fit_chi2mixture
from pysnptools.snpreader import Bed
import pickle
import sys
from util.association import BurdenLoaderHDF5
from util import Timer
class GotNone(Exception):
pass
# set up the covariatesloader
covariatesloader = CovariatesLoaderCSV(snakemake.params.phenotype,
snakemake.input.covariates_tsv,
snakemake.params.covariate_column_names,
sep='\t',
path_to_phenotypes=snakemake.input.phenotypes_tsv)
# initialize the null models
Y, X = covariatesloader.get_one_hot_covariates_and_phenotype('noK')
null_model_score = ScoretestNoK(Y, X)
null_model_lrt = LRTnoK(X, Y)
# set up function to filter variants:
def maf_filter(mac_report):
# load the MAC report, keep only observed variants with MAF below threshold
mac_report = pd.read_csv(mac_report, sep='\t', usecols=['SNP', 'MAF', 'Minor', 'alt_greater_ref'])
if snakemake.params.filter_highconfidence:
vids = mac_report.SNP[(mac_report.MAF < snakemake.params.max_maf) & (mac_report.Minor > 0) & ~(mac_report.alt_greater_ref.astype(bool)) & (mac_report.hiconf_reg.astype(bool))]
else:
vids = mac_report.SNP[(mac_report.MAF < snakemake.params.max_maf) & (mac_report.Minor > 0) & ~(mac_report.alt_greater_ref.astype(bool))]
# this has already been done in filter_variants.py
# load the variant annotation, keep only variants in high-confidece regions
# anno = pd.read_csv(anno_tsv, sep='\t', usecols=['Name', 'hiconf_reg'])
# vids_highconf = anno.Name[anno.hiconf_reg.astype(bool).values]
# vids = np.intersect1d(vids, vids_highconf)
return mac_report.set_index('SNP').loc[vids]
def get_regions():
# load the results, keep those below a certain p-value
results = pd.read_csv(snakemake.input.results_tsv, sep='\t')
kern = snakemake.params.kernels
if isinstance(kern, str):
kern = [kern]
pvcols_score = ['pv_score_' + k for k in kern ]
pvcols_lrt = ['pv_lrt_' + k for k in kern]
statcols = ['lrtstat_' + k for k in kern]
results = results[['gene', 'n_snp', 'cumMAC', 'nCarrier'] + statcols + pvcols_score + pvcols_lrt]
# get genes below threshold
genes = [results.gene[results[k] < 1e-7].values for k in pvcols_score + pvcols_lrt ]
genes = np.unique(np.concatenate(genes))
if len(genes) == 0:
return None
# set up the regions to loop over for the chromosome
regions = pd.read_csv(snakemake.input.regions_bed, sep='\t', header=None, usecols=[0 ,1 ,2 ,3, 5], dtype={0 :str, 1: np.int32, 2 :np.int32, 3 :str, 5:str})
regions.columns = ['chrom', 'start', 'end', 'name', 'strand']
regions['strand'] = regions.strand.map({'+': 'plus', '-': 'minus'})
regions = regions.set_index('name').loc[genes]
regions = regions.join(results.set_index('gene'), how='left').reset_index()
return regions
# genotype path, vep-path:
assert len(snakemake.params.ids) == len (snakemake.input.bed), 'Error: length of chromosome IDs does not match length of genotype files'
geno_vep = zip(snakemake.params.ids, snakemake.input.bed, snakemake.input.vep_tsv, snakemake.input.ensembl_vep_tsv, snakemake.input.mac_report, snakemake.input.h5_lof, snakemake.input.iid_lof, snakemake.input.gid_lof)
# get the top hits
regions_all = get_regions()
if regions_all is None:
logging.info('No genes pass significance threshold, exiting.')
sys.exit(0)
# where we store the results
stats = []
i_gene = 0
# enter the chromosome loop:
timer = Timer()
for i, (chromosome, bed, vep_tsv, ensembl_vep_tsv, mac_report, h5_lof, iid_lof, gid_lof) in enumerate(geno_vep):
if chromosome.replace('chr','') not in regions_all.chrom.unique():
continue
# set up the ensembl vep loader for the chromosome
spliceaidf = pd.read_csv(vep_tsv,
sep='\t',
usecols=['name', 'chrom', 'end', 'gene', 'max_effect', 'DS_AG', 'DS_AL', 'DS_DG', 'DS_DL', 'DP_AG', 'DP_AL', 'DP_DG', 'DP_DL'],
index_col='name')
# get set of variants for the chromosome:
mac_report = maf_filter(mac_report)
filter_vids = mac_report.index.values
# filter by MAF
keep = intersect_ids(filter_vids, spliceaidf.index.values)
spliceaidf = spliceaidf.loc[keep]
spliceaidf.reset_index(inplace=True)
# filter by impact:
spliceaidf = spliceaidf[spliceaidf.max_effect >= snakemake.params.min_impact]
# set up the regions to loop over for the chromosome
regions = regions_all.copy()
# discard all genes for which we don't have annotations
gene_ids = regions.name.str.split('_', expand=True) # table with two columns, ensembl-id and gene-name
regions['gene'] = gene_ids[1] # this is the gene name
regions['ensembl_id'] = gene_ids[0]
regions.set_index('gene', inplace=True)
genes = intersect_ids(np.unique(regions.index.values), np.unique(spliceaidf.gene)) # intersection of gene names
regions = regions.loc[genes].reset_index() # subsetting
regions = regions.sort_values(['chrom', 'start', 'end'])
# check if the variants are protein LOF variants, load the protein LOF variants:
ensemblvepdf =
|
pd.read_csv(ensembl_vep_tsv, sep='\t', usecols=['Uploaded_variation', 'Gene'])
|
pandas.read_csv
|
import os
import tempfile
import unittest
import numpy as np
import pandas as pd
from sqlalchemy import create_engine
from tests.settings import POSTGRESQL_ENGINE, SQLITE_ENGINE
from tests.utils import get_repository_path, DBTest
from ukbrest.common.pheno2sql import Pheno2SQL
class Pheno2SQLTest(DBTest):
@unittest.skip('sqlite being removed')
def test_sqlite_default_values(self):
# Prepare
csv_file = get_repository_path('pheno2sql/example01.csv')
db_engine = SQLITE_ENGINE
p2sql = Pheno2SQL(csv_file, db_engine)
# Run
p2sql.load_data()
# Validate
assert p2sql.db_type == 'sqlite'
## Check table exists
tmp = pd.read_sql("SELECT name FROM sqlite_master WHERE type='table' AND name='{}';".format('ukb_pheno_0_00'), create_engine(db_engine))
assert not tmp.empty
## Check columns are correct
tmp = pd.read_sql('select * from ukb_pheno_0_00', create_engine(db_engine))
expected_columns = ["eid","c21_0_0","c21_1_0","c21_2_0","c31_0_0","c34_0_0","c46_0_0","c47_0_0","c48_0_0"]
assert len(tmp.columns) == len(expected_columns)
assert all(x in expected_columns for x in tmp.columns)
## Check data is correct
tmp = pd.read_sql('select * from ukb_pheno_0_00', create_engine(db_engine), index_col='eid')
assert not tmp.empty
assert tmp.shape[0] == 2
assert tmp.loc[1, 'c21_0_0'] == 'Option number 1'
assert tmp.loc[1, 'c21_1_0'] == 'No response'
assert tmp.loc[1, 'c21_2_0'] == 'Yes'
assert tmp.loc[1, 'c31_0_0'] == '2012-01-05'
assert int(tmp.loc[1, 'c34_0_0']) == 21
assert int(tmp.loc[1, 'c46_0_0']) == -9
assert tmp.loc[1, 'c47_0_0'].round(5) == 45.55412
assert tmp.loc[1, 'c48_0_0'] == '2011-08-14'
assert tmp.loc[2, 'c21_0_0'] == 'Option number 2'
assert pd.isnull(tmp.loc[2, 'c21_1_0'])
assert tmp.loc[2, 'c21_2_0'] == 'No'
assert tmp.loc[2, 'c31_0_0'] == '2015-12-30'
assert int(tmp.loc[2, 'c34_0_0']) == 12
assert int(tmp.loc[2, 'c46_0_0']) == -2
assert tmp.loc[2, 'c47_0_0'].round(5) == -0.55461
assert tmp.loc[2, 'c48_0_0'] == '2010-03-29'
def test_postgresql_default_values(self):
# Prepare
csv_file = get_repository_path('pheno2sql/example01.csv')
db_engine = POSTGRESQL_ENGINE
p2sql = Pheno2SQL(csv_file, db_engine)
# Run
p2sql.load_data()
# Validate
assert p2sql.db_type == 'postgresql'
## Check table exists
table = pd.read_sql("SELECT EXISTS (SELECT 1 FROM pg_tables WHERE schemaname = 'public' AND tablename = '{}');".format('ukb_pheno_0_00'), create_engine(db_engine))
assert table.iloc[0, 0]
## Check columns are correct
tmp = pd.read_sql('select * from ukb_pheno_0_00', create_engine(db_engine))
expected_columns = ["eid","c21_0_0","c21_1_0","c21_2_0","c31_0_0","c34_0_0","c46_0_0","c47_0_0","c48_0_0"]
assert len(tmp.columns) == len(expected_columns)
assert all(x in expected_columns for x in tmp.columns)
## Check data is correct
tmp = pd.read_sql('select * from ukb_pheno_0_00', create_engine(db_engine), index_col='eid')
assert not tmp.empty
assert tmp.shape[0] == 2
assert tmp.loc[1, 'c21_0_0'] == 'Option number 1'
assert tmp.loc[1, 'c21_1_0'] == 'No response'
assert tmp.loc[1, 'c21_2_0'] == 'Yes'
assert tmp.loc[1, 'c31_0_0'].strftime('%Y-%m-%d') == '2012-01-05'
assert int(tmp.loc[1, 'c34_0_0']) == 21
assert int(tmp.loc[1, 'c46_0_0']) == -9
assert tmp.loc[1, 'c47_0_0'].round(5) == 45.55412
assert tmp.loc[1, 'c48_0_0'].strftime('%Y-%m-%d') == '2011-08-14'
assert tmp.loc[2, 'c21_0_0'] == 'Option number 2'
assert pd.isnull(tmp.loc[2, 'c21_1_0'])
assert tmp.loc[2, 'c21_2_0'] == 'No'
assert tmp.loc[2, 'c31_0_0'].strftime('%Y-%m-%d') == '2015-12-30'
assert int(tmp.loc[2, 'c34_0_0']) == 12
assert int(tmp.loc[2, 'c46_0_0']) == -2
assert tmp.loc[2, 'c47_0_0'].round(5) == -0.55461
assert tmp.loc[2, 'c48_0_0'].strftime('%Y-%m-%d') == '2010-03-29'
def test_exit(self):
# Prepare
csv_file = get_repository_path('pheno2sql/example01.csv')
db_engine = POSTGRESQL_ENGINE
temp_dir = tempfile.mkdtemp()
# Run
with Pheno2SQL(csv_file, db_engine, tmpdir=temp_dir) as p2sql:
p2sql.load_data()
# Validate
## Check table exists
table = pd.read_sql("SELECT EXISTS (SELECT 1 FROM pg_tables WHERE schemaname = 'public' AND tablename = '{}');".format('ukb_pheno_0_00'), create_engine(db_engine))
assert table.iloc[0, 0]
## Check columns are correct
tmp = pd.read_sql('select * from ukb_pheno_0_00', create_engine(db_engine))
expected_columns = ["eid","c21_0_0","c21_1_0","c21_2_0","c31_0_0","c34_0_0","c46_0_0","c47_0_0","c48_0_0"]
assert len(tmp.columns) == len(expected_columns)
assert all(x in expected_columns for x in tmp.columns)
## Check data is correct
tmp = pd.read_sql('select * from ukb_pheno_0_00', create_engine(db_engine), index_col='eid')
assert not tmp.empty
assert tmp.shape[0] == 2
## Check that temporary files were deleted
assert len(os.listdir(temp_dir)) == 0
@unittest.skip('sqlite being removed')
def test_sqlite_less_columns_per_table(self):
# Prepare
csv_file = get_repository_path('pheno2sql/example01.csv')
db_engine = SQLITE_ENGINE
p2sql = Pheno2SQL(csv_file, db_engine, n_columns_per_table=3)
# Run
p2sql.load_data()
# Validate
assert p2sql.db_type == 'sqlite'
## Check tables exist
table = pd.read_sql("SELECT name FROM sqlite_master WHERE type='table' AND name='{}';".format('ukb_pheno_0_00'), create_engine(db_engine))
assert not table.empty
table = pd.read_sql("SELECT name FROM sqlite_master WHERE type='table' AND name='{}';".format('ukb_pheno_0_01'), create_engine(db_engine))
assert not table.empty
table = pd.read_sql("SELECT name FROM sqlite_master WHERE type='table' AND name='{}';".format('ukb_pheno_0_02'), create_engine(db_engine))
assert not table.empty
## Check columns are correct
tmp = pd.read_sql('select * from ukb_pheno_0_00', create_engine(db_engine))
expected_columns = ["eid","c21_0_0","c21_1_0","c21_2_0"]
assert len(tmp.columns) == len(expected_columns)
assert all(x in expected_columns for x in tmp.columns)
tmp = pd.read_sql('select * from ukb_pheno_0_01', create_engine(db_engine))
expected_columns = ["eid","c31_0_0","c34_0_0","c46_0_0"]
assert len(tmp.columns) == len(expected_columns)
assert all(x in expected_columns for x in tmp.columns)
tmp = pd.read_sql('select * from ukb_pheno_0_02', create_engine(db_engine))
expected_columns = ["eid","c47_0_0","c48_0_0"]
assert len(tmp.columns) == len(expected_columns)
assert all(x in expected_columns for x in tmp.columns)
## Check data is correct
tmp = pd.read_sql('select * from ukb_pheno_0_00', create_engine(db_engine), index_col='eid')
assert not tmp.empty
assert tmp.shape[0] == 2
assert tmp.loc[1, 'c21_0_0'] == 'Option number 1'
assert tmp.loc[1, 'c21_1_0'] == 'No response'
assert tmp.loc[1, 'c21_2_0'] == 'Yes'
assert tmp.loc[2, 'c21_0_0'] == 'Option number 2'
assert pd.isnull(tmp.loc[2, 'c21_1_0'])
assert tmp.loc[2, 'c21_2_0'] == 'No'
tmp = pd.read_sql('select * from ukb_pheno_0_01', create_engine(db_engine), index_col='eid')
assert not tmp.empty
assert tmp.shape[0] == 2
assert tmp.loc[1, 'c31_0_0'] == '2012-01-05'
assert int(tmp.loc[1, 'c34_0_0']) == 21
assert int(tmp.loc[1, 'c46_0_0']) == -9
assert tmp.loc[2, 'c31_0_0'] == '2015-12-30'
assert int(tmp.loc[2, 'c34_0_0']) == 12
assert int(tmp.loc[2, 'c46_0_0']) == -2
tmp = pd.read_sql('select * from ukb_pheno_0_02', create_engine(db_engine), index_col='eid')
assert not tmp.empty
assert tmp.shape[0] == 2
assert tmp.loc[1, 'c47_0_0'].round(5) == 45.55412
assert tmp.loc[1, 'c48_0_0'] == '2011-08-14'
assert tmp.loc[2, 'c47_0_0'].round(5) == -0.55461
assert tmp.loc[2, 'c48_0_0'] == '2010-03-29'
def test_postgresql_less_columns_per_table(self):
# Prepare
csv_file = get_repository_path('pheno2sql/example01.csv')
db_engine = POSTGRESQL_ENGINE
p2sql = Pheno2SQL(csv_file, db_engine, n_columns_per_table=3)
# Run
p2sql.load_data()
# Validate
assert p2sql.db_type == 'postgresql'
## Check tables exist
table = pd.read_sql("SELECT EXISTS (SELECT 1 FROM pg_tables WHERE schemaname = 'public' AND tablename = '{}');".format('ukb_pheno_0_00'), create_engine(db_engine))
assert table.iloc[0, 0]
table = pd.read_sql("SELECT EXISTS (SELECT 1 FROM pg_tables WHERE schemaname = 'public' AND tablename = '{}');".format('ukb_pheno_0_01'), create_engine(db_engine))
assert table.iloc[0, 0]
table = pd.read_sql("SELECT EXISTS (SELECT 1 FROM pg_tables WHERE schemaname = 'public' AND tablename = '{}');".format('ukb_pheno_0_02'), create_engine(db_engine))
assert table.iloc[0, 0]
## Check columns are correct
tmp = pd.read_sql('select * from ukb_pheno_0_00', create_engine(db_engine))
expected_columns = ["eid","c21_0_0","c21_1_0","c21_2_0"]
assert len(tmp.columns) == len(expected_columns)
assert all(x in expected_columns for x in tmp.columns)
tmp = pd.read_sql('select * from ukb_pheno_0_01', create_engine(db_engine))
expected_columns = ["eid","c31_0_0","c34_0_0","c46_0_0"]
assert len(tmp.columns) == len(expected_columns)
assert all(x in expected_columns for x in tmp.columns)
tmp = pd.read_sql('select * from ukb_pheno_0_02', create_engine(db_engine))
expected_columns = ["eid","c47_0_0","c48_0_0"]
assert len(tmp.columns) == len(expected_columns)
assert all(x in expected_columns for x in tmp.columns)
## Check data is correct
tmp = pd.read_sql('select * from ukb_pheno_0_00', create_engine(db_engine), index_col='eid')
assert not tmp.empty
assert tmp.shape[0] == 2
assert tmp.loc[1, 'c21_0_0'] == 'Option number 1'
assert tmp.loc[1, 'c21_1_0'] == 'No response'
assert tmp.loc[1, 'c21_2_0'] == 'Yes'
assert tmp.loc[2, 'c21_0_0'] == 'Option number 2'
assert pd.isnull(tmp.loc[2, 'c21_1_0'])
assert tmp.loc[2, 'c21_2_0'] == 'No'
tmp = pd.read_sql('select * from ukb_pheno_0_01', create_engine(db_engine), index_col='eid')
assert not tmp.empty
assert tmp.shape[0] == 2
assert tmp.loc[1, 'c31_0_0'].strftime('%Y-%m-%d') == '2012-01-05'
assert int(tmp.loc[1, 'c34_0_0']) == 21
assert int(tmp.loc[1, 'c46_0_0']) == -9
assert tmp.loc[2, 'c31_0_0'].strftime('%Y-%m-%d') == '2015-12-30'
assert int(tmp.loc[2, 'c34_0_0']) == 12
assert int(tmp.loc[2, 'c46_0_0']) == -2
tmp = pd.read_sql('select * from ukb_pheno_0_02', create_engine(db_engine), index_col='eid')
assert not tmp.empty
assert tmp.shape[0] == 2
assert tmp.loc[1, 'c47_0_0'].round(5) == 45.55412
assert tmp.loc[1, 'c48_0_0'].strftime('%Y-%m-%d') == '2011-08-14'
assert tmp.loc[2, 'c47_0_0'].round(5) == -0.55461
assert tmp.loc[2, 'c48_0_0'].strftime('%Y-%m-%d') == '2010-03-29'
def test_custom_tmpdir(self):
# Prepare
csv_file = get_repository_path('pheno2sql/example01.csv')
db_engine = POSTGRESQL_ENGINE
with Pheno2SQL(csv_file, db_engine, tmpdir='/tmp/custom/directory/here', delete_temp_csv=False) as p2sql:
# Run
p2sql.load_data()
# Validate
## Check table exists
table = pd.read_sql("SELECT EXISTS (SELECT 1 FROM pg_tables WHERE schemaname = 'public' AND tablename = '{}');".format('ukb_pheno_0_00'), create_engine(db_engine))
assert table.iloc[0, 0]
## Check columns are correct
tmp = pd.read_sql('select * from ukb_pheno_0_00', create_engine(db_engine))
expected_columns = ["eid","c21_0_0","c21_1_0","c21_2_0","c31_0_0","c34_0_0","c46_0_0","c47_0_0","c48_0_0"]
assert len(tmp.columns) == len(expected_columns)
assert all(x in expected_columns for x in tmp.columns)
## Check data is correct
tmp = pd.read_sql('select * from ukb_pheno_0_00', create_engine(db_engine), index_col='eid')
assert not tmp.empty
assert tmp.shape[0] == 2
## Check that temporary are still there
assert len(os.listdir('/tmp/custom/directory/here')) > 0
## Check that temporary is now clean
assert len(os.listdir('/tmp/custom/directory/here')) == 0
@unittest.skip('sqlite being removed')
def test_sqlite_auxiliary_table_is_created(self):
# Prepare
csv_file = get_repository_path('pheno2sql/example01.csv')
db_engine = SQLITE_ENGINE
p2sql = Pheno2SQL(csv_file, db_engine, n_columns_per_table=3)
# Run
p2sql.load_data()
# Validate
assert p2sql.db_type == 'sqlite'
## Check tables exist
table = pd.read_sql("SELECT name FROM sqlite_master WHERE type='table' AND name='{}';".format('ukb_pheno_0_00'), create_engine(db_engine))
assert not table.empty
table = pd.read_sql("SELECT name FROM sqlite_master WHERE type='table' AND name='{}';".format('ukb_pheno_0_01'), create_engine(db_engine))
assert not table.empty
table = pd.read_sql("SELECT name FROM sqlite_master WHERE type='table' AND name='{}';".format('ukb_pheno_0_02'), create_engine(db_engine))
assert not table.empty
## Check columns are correct
tmp = pd.read_sql('select * from ukb_pheno_0_00', create_engine(db_engine))
expected_columns = ["eid","c21_0_0","c21_1_0","c21_2_0"]
assert len(tmp.columns) == len(expected_columns)
assert all(x in expected_columns for x in tmp.columns)
tmp = pd.read_sql('select * from ukb_pheno_0_01', create_engine(db_engine))
expected_columns = ["eid","c31_0_0","c34_0_0","c46_0_0"]
assert len(tmp.columns) == len(expected_columns)
assert all(x in expected_columns for x in tmp.columns)
tmp = pd.read_sql('select * from ukb_pheno_0_02', create_engine(db_engine))
expected_columns = ["eid","c47_0_0","c48_0_0"]
assert len(tmp.columns) == len(expected_columns)
assert all(x in expected_columns for x in tmp.columns)
## Check auxiliary table existance
table = pd.read_sql("SELECT name FROM sqlite_master WHERE type='table' AND name='{}';".format('fields'), create_engine(db_engine))
assert not table.empty
## Check columns are correct
tmp = pd.read_sql('select * from fields', create_engine(db_engine))
expected_columns = ["column_name", "table_name"]
assert len(tmp.columns) >= len(expected_columns)
assert all(x in tmp.columns for x in expected_columns)
## Check data is correct
tmp = pd.read_sql('select * from fields', create_engine(db_engine), index_col='column_name')
assert not tmp.empty
assert tmp.shape[0] == 8
assert tmp.loc['c21_0_0', 'table_name'] == 'ukb_pheno_0_00'
assert tmp.loc['c21_1_0', 'table_name'] == 'ukb_pheno_0_00'
assert tmp.loc['c21_2_0', 'table_name'] == 'ukb_pheno_0_00'
assert tmp.loc['c31_0_0', 'table_name'] == 'ukb_pheno_0_01'
assert tmp.loc['c34_0_0', 'table_name'] == 'ukb_pheno_0_01'
assert tmp.loc['c46_0_0', 'table_name'] == 'ukb_pheno_0_01'
assert tmp.loc['c47_0_0', 'table_name'] == 'ukb_pheno_0_02'
assert tmp.loc['c48_0_0', 'table_name'] == 'ukb_pheno_0_02'
def test_postgresql_auxiliary_table_is_created_and_has_minimum_data_required(self):
# Prepare
csv_file = get_repository_path('pheno2sql/example01.csv')
db_engine = POSTGRESQL_ENGINE
p2sql = Pheno2SQL(csv_file, db_engine, n_columns_per_table=3, loading_n_jobs=1)
# Run
p2sql.load_data()
# Validate
assert p2sql.db_type == 'postgresql'
## Check tables exist
table = pd.read_sql("SELECT EXISTS (SELECT 1 FROM pg_tables WHERE schemaname = 'public' AND tablename = '{}');".format('ukb_pheno_0_00'), create_engine(db_engine))
assert table.iloc[0, 0]
table = pd.read_sql("SELECT EXISTS (SELECT 1 FROM pg_tables WHERE schemaname = 'public' AND tablename = '{}');".format('ukb_pheno_0_01'), create_engine(db_engine))
assert table.iloc[0, 0]
table = pd.read_sql("SELECT EXISTS (SELECT 1 FROM pg_tables WHERE schemaname = 'public' AND tablename = '{}');".format('ukb_pheno_0_02'), create_engine(db_engine))
assert table.iloc[0, 0]
## Check columns are correct
tmp = pd.read_sql('select * from ukb_pheno_0_00', create_engine(db_engine))
expected_columns = ["eid","c21_0_0","c21_1_0","c21_2_0"]
assert len(tmp.columns) == len(expected_columns)
assert all(x in expected_columns for x in tmp.columns)
tmp = pd.read_sql('select * from ukb_pheno_0_01', create_engine(db_engine))
expected_columns = ["eid","c31_0_0","c34_0_0","c46_0_0"]
assert len(tmp.columns) == len(expected_columns)
assert all(x in expected_columns for x in tmp.columns)
tmp = pd.read_sql('select * from ukb_pheno_0_02', create_engine(db_engine))
expected_columns = ["eid","c47_0_0","c48_0_0"]
assert len(tmp.columns) == len(expected_columns)
assert all(x in expected_columns for x in tmp.columns)
## Check auxiliary table existance
table = pd.read_sql("SELECT EXISTS (SELECT 1 FROM pg_tables WHERE schemaname = 'public' AND tablename = '{}');".format('fields'), create_engine(db_engine))
assert table.iloc[0, 0]
## Check columns are correct
tmp = pd.read_sql('select * from fields', create_engine(db_engine))
expected_columns = ["column_name", "table_name"]
assert len(tmp.columns) >= len(expected_columns)
assert all(x in tmp.columns for x in expected_columns)
## Check data is correct
tmp = pd.read_sql('select * from fields', create_engine(db_engine), index_col='column_name')
assert not tmp.empty
assert tmp.shape[0] == 8
assert tmp.loc['c21_0_0', 'table_name'] == 'ukb_pheno_0_00'
assert tmp.loc['c21_1_0', 'table_name'] == 'ukb_pheno_0_00'
assert tmp.loc['c21_2_0', 'table_name'] == 'ukb_pheno_0_00'
assert tmp.loc['c31_0_0', 'table_name'] == 'ukb_pheno_0_01'
assert tmp.loc['c34_0_0', 'table_name'] == 'ukb_pheno_0_01'
assert tmp.loc['c46_0_0', 'table_name'] == 'ukb_pheno_0_01'
assert tmp.loc['c47_0_0', 'table_name'] == 'ukb_pheno_0_02'
assert tmp.loc['c48_0_0', 'table_name'] == 'ukb_pheno_0_02'
def test_postgresql_auxiliary_table_with_more_information(self):
# Prepare
csv_file = get_repository_path('pheno2sql/example01.csv')
db_engine = POSTGRESQL_ENGINE
p2sql = Pheno2SQL(csv_file, db_engine, n_columns_per_table=3, loading_n_jobs=1)
# Run
p2sql.load_data()
# Validate
assert p2sql.db_type == 'postgresql'
## Check tables exist
table = pd.read_sql("SELECT EXISTS (SELECT 1 FROM pg_tables WHERE schemaname = 'public' AND tablename = '{}');".format('ukb_pheno_0_00'), create_engine(db_engine))
assert table.iloc[0, 0]
table = pd.read_sql("SELECT EXISTS (SELECT 1 FROM pg_tables WHERE schemaname = 'public' AND tablename = '{}');".format('ukb_pheno_0_01'), create_engine(db_engine))
assert table.iloc[0, 0]
table = pd.read_sql("SELECT EXISTS (SELECT 1 FROM pg_tables WHERE schemaname = 'public' AND tablename = '{}');".format('ukb_pheno_0_02'), create_engine(db_engine))
assert table.iloc[0, 0]
## Check auxiliary table existance
table = pd.read_sql("SELECT EXISTS (SELECT 1 FROM pg_tables WHERE schemaname = 'public' AND tablename = '{}');".format('fields'), create_engine(db_engine))
assert table.iloc[0, 0]
## Check columns are correct
tmp = pd.read_sql('select * from fields', create_engine(db_engine))
expected_columns = ["column_name", "field_id", "inst", "arr", "coding", "table_name", "type", "description"]
assert len(tmp.columns) == len(expected_columns), len(tmp.columns)
assert all(x in expected_columns for x in tmp.columns)
## Check data is correct
tmp = pd.read_sql('select * from fields', create_engine(db_engine), index_col='column_name')
assert not tmp.empty
assert tmp.shape[0] == 8
assert tmp.loc['c21_0_0', 'field_id'] == '21'
assert tmp.loc['c21_0_0', 'inst'] == 0
assert tmp.loc['c21_0_0', 'arr'] == 0
assert tmp.loc['c21_0_0', 'coding'] == 100261
assert tmp.loc['c21_0_0', 'table_name'] == 'ukb_pheno_0_00'
assert tmp.loc['c21_0_0', 'type'] == 'Categorical (single)'
assert tmp.loc['c21_0_0', 'description'] == 'An string value'
assert tmp.loc['c21_1_0', 'field_id'] == '21'
assert tmp.loc['c21_1_0', 'inst'] == 1
assert tmp.loc['c21_1_0', 'arr'] == 0
assert tmp.loc['c21_1_0', 'coding'] == 100261
assert tmp.loc['c21_1_0', 'table_name'] == 'ukb_pheno_0_00'
assert tmp.loc['c21_1_0', 'type'] == 'Categorical (single)'
assert tmp.loc['c21_1_0', 'description'] == 'An string value'
assert tmp.loc['c21_2_0', 'field_id'] == '21'
assert tmp.loc['c21_2_0', 'inst'] == 2
assert tmp.loc['c21_2_0', 'arr'] == 0
assert tmp.loc['c21_2_0', 'coding'] == 100261
assert tmp.loc['c21_2_0', 'table_name'] == 'ukb_pheno_0_00'
assert tmp.loc['c21_2_0', 'type'] == 'Categorical (single)'
assert tmp.loc['c21_2_0', 'description'] == 'An string value'
assert tmp.loc['c31_0_0', 'field_id'] == '31'
assert tmp.loc['c31_0_0', 'inst'] == 0
assert tmp.loc['c31_0_0', 'arr'] == 0
assert
|
pd.isnull(tmp.loc['c31_0_0', 'coding'])
|
pandas.isnull
|
# ===============================================================================
# Copyright 2018 dgketchum
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
import json
import os
from datetime import datetime
from geopandas import GeoDataFrame, read_file
from numpy import where, sum, nan, std, array, min, max, mean
from pandas import read_csv, concat, errors, Series, DataFrame
from pandas import to_datetime
from pandas.io.json import json_normalize
from shapely.geometry import Polygon
INT_COLS = ['POINT_TYPE', 'YEAR']
KML_DROP = ['system:index', 'altitudeMo', 'descriptio',
'extrude', 'gnis_id', 'icon', 'loaddate',
'metasource', 'name_1', 'shape_area', 'shape_leng', 'sourcedata',
'sourcefeat', 'sourceorig', 'system_ind', 'tessellate',
'tnmid', 'visibility', ]
DUPLICATE_HUC8_NAMES = ['Beaver', 'Big Sandy', 'Blackfoot', 'Carrizo', 'Cedar', 'Clear', 'Colorado Headwaters', 'Crow',
'Frenchman', 'Horse', 'Jordan', 'Lower Beaver', 'Lower White', 'Medicine', 'Muddy', 'Palo Duro',
'Pawnee', 'Redwater', 'Rock', 'Salt', 'San Francisco', 'Santa Maria', 'Silver', 'Smith',
'Stillwater', 'Teton', 'Upper Bear', 'Upper White', 'White', 'Willow']
COLS = ['SCENE_ID',
'PRODUCT_ID',
'SPACECRAFT_ID',
'SENSOR_ID',
'DATE_ACQUIRED',
'COLLECTION_NUMBER',
'COLLECTION_CATEGORY',
'SENSING_TIME',
'DATA_TYPE',
'WRS_PATH',
'WRS_ROW',
'CLOUD_COVER',
'NORTH_LAT',
'SOUTH_LAT',
'WEST_LON',
'EAST_LON',
'TOTAL_SIZE',
'BASE_URL']
DROP_COUNTY = ['system:index', 'AFFGEOID', 'COUNTYFP', 'COUNTYNS', 'GEOID', 'LSAD', 'STATEFP', '.geo']
def concatenate_county_data(folder, out_file, glob='counties', acres=False):
df = None
base_names = [x for x in os.listdir(folder)]
_files = [os.path.join(folder, x) for x in base_names if x.startswith(glob) and not 'total_area' in x]
totals_file = [os.path.join(folder, x) for x in base_names if 'total_area' in x][0]
first = True
for csv in _files:
print(csv)
if first:
df = read_csv(totals_file).sort_values('COUNTYNS')
cty_str = df['COUNTYFP'].map(lambda x: str(int(x)).zfill(3))
idx_str = df['STATEFP'].map(lambda x: str(int(x))) + cty_str
idx = idx_str.map(int)
df['FIPS'] = idx
df.index = idx
if acres:
df['total_area'] = df['sum'] / 4046.86
else:
df['total_area'] = df['sum']
df.drop(columns=['sum'], inplace=True)
first = False
prefix, year = os.path.basename(csv).split('_')[1], os.path.basename(csv).split('_')[4]
c = read_csv(csv).sort_values('COUNTYNS')
name = '{}_{}'.format(prefix, year)
cty_str = c['COUNTYFP'].map(lambda x: str(int(x)).zfill(3))
idx_str = c['STATEFP'].map(lambda x: str(int(x))) + cty_str
idx = idx_str.map(int)
c.index = idx
if acres:
c[name] = c['sum'] / 4046.86
else:
c[name] = c['sum']
df = concat([df, c[name]], axis=1)
print(c.shape, csv)
# print('size: {}'.format(df.shape))
# df = df.reindex(sorted(df.columns), axis=1)
df.drop(columns=DROP_COUNTY, inplace=True)
df.sort_index(axis=1, inplace=True)
df.to_csv(out_file, index=False)
print('saved {}'.format(out_file))
def concatenate_band_extract(root, out_dir, glob='None', sample=None, n=None, spec=None):
l = [os.path.join(root, x) for x in os.listdir(root) if glob in x]
l.sort()
first = True
for csv in l:
try:
if first:
df = read_csv(csv)
print(df.shape, csv)
first = False
else:
c = read_csv(csv)
df = concat([df, c], sort=False)
print(c.shape, csv)
except errors.EmptyDataError:
print('{} is empty'.format(csv))
pass
df.drop(columns=['system:index', '.geo'], inplace=True)
try:
df.drop(columns=['nd_max_p1', 'nd_max_p2'], inplace=True)
except KeyError:
pass
if sample:
_len = int(df.shape[0]/1e3 * sample)
out_file = os.path.join(out_dir, '{}_{}.csv'.format(glob, _len))
elif n:
_len = int(n / 1e3)
out_file = os.path.join(out_dir, '{}_{}.csv'.format(glob, _len))
else:
out_file = os.path.join(out_dir, '{}.csv'.format(glob))
for c in df.columns:
if c in INT_COLS:
df[c] = df[c].astype(int, copy=True)
else:
df[c] = df[c].astype(float, copy=True)
if n or spec:
counts = df['POINT_TYPE'].value_counts()
_min = min(counts.values)
for i in sorted(list(counts.index)):
if spec:
if i == 0:
ndf = df[df['POINT_TYPE'] == i].sample(n=spec[i])
else:
ndf = concat([ndf, df[df['POINT_TYPE'] == i].sample(n=spec[i])], sort=False)
out_file = os.path.join(out_dir, '{}_kw.csv'.format(glob))
elif i == 0:
ndf = df[df['POINT_TYPE'] == i].sample(n=n)
else:
ndf = concat([ndf, df[df['POINT_TYPE'] == i].sample(n=n)], sort=False)
df = ndf
if sample:
df = df.sample(frac=sample).reset_index(drop=True)
print(df['POINT_TYPE'].value_counts())
print('size: {}'.format(df.shape))
print('file: {}'.format(out_file))
df.to_csv(out_file, index=False)
def concatenate_irrigation_attrs(_dir, out_filename, glob):
_files = [os.path.join(_dir, x) for x in os.listdir(_dir) if glob in x]
_files.sort()
first_year = True
for year in range(1986, 2019):
yr_files = [f for f in _files if str(year) in f]
first_state = True
for f in yr_files:
if first_state:
df = read_csv(f, index_col=0)
df.dropna(subset=['mean'], inplace=True)
df.rename(columns={'mean': 'IPct_{}'.format(year)}, inplace=True)
df.drop_duplicates(subset=['.geo'], keep='first', inplace=True)
df['Irr_{}'.format(year)] = where(df['IPct_{}'.format(year)].values > 0.5, 1, 0)
first_state = False
else:
c = read_csv(f, index_col=0)
c.dropna(subset=['mean'], inplace=True)
c.rename(columns={'mean': 'IPct_{}'.format(year)}, inplace=True)
c['Irr_{}'.format(year)] = where(c['IPct_{}'.format(year)].values > 0.5, 1, 0)
df = concat([df, c], sort=False)
df.drop_duplicates(subset=['.geo'], keep='first', inplace=True)
print(year, df.shape)
if first_year:
master = df
first_year = False
else:
master['IPct_{}'.format(year)] = df['IPct_{}'.format(year)]
master['Irr_{}'.format(year)] = df['Irr_{}'.format(year)]
bool_cols = array([master[x].values for x in master.columns if 'Irr_' in x])
bool_sum = sum(bool_cols, axis=0)
master['IYears'] = bool_sum
master.dropna(subset=['.geo'], inplace=True)
coords = Series(json_normalize(master['.geo'].apply(json.loads))['coordinates'].values,
index=master.index)
master['geometry'] = coords.apply(to_polygon)
master.dropna(subset=['geometry'], inplace=True)
gpd = GeoDataFrame(master.drop(['.geo'], axis=1),
crs={'init': 'epsg:4326'})
gpd.to_file(out_filename)
def concatenate_attrs_huc(_dir, out_csv_filename, out_shp_filename, template_geometry):
_files = [os.path.join(_dir, x) for x in os.listdir(_dir) if x.endswith('.csv')]
_files.sort()
first = True
df_geo = []
template_names = []
count_arr = None
names = None
for year in range(1986, 2017):
print(year)
yr_files = [f for f in _files if str(year) in f]
_mean = [f for f in yr_files if 'mean' in f][0]
if first:
df = read_csv(_mean, index_col=['huc8']).sort_values('huc8', axis=0)
names = df['Name']
units = df.index
template_gpd = read_file(template_geometry).sort_values('huc8', axis=0)
for i, r in template_gpd.iterrows():
if r['Name'] in DUPLICATE_HUC8_NAMES:
df_geo.append(r['geometry'])
template_names.append('{}_{}'.format(r['Name'], str(r['states']).replace(',', '_')))
elif r['Name'] in names.values and r['Name'] not in template_names:
df_geo.append(r['geometry'])
template_names.append(r['Name'])
else:
print('{} is in the list'.format(r['Name']))
mean_arr = df['mean']
df.drop(columns=KML_DROP, inplace=True)
df.drop(columns=['.geo', 'mean'], inplace=True)
_count_csv = [f for f in yr_files if 'count' in f][0]
count_arr = read_csv(_count_csv, index_col=0)['count'].values
df['TotalPix'.format(year)] = count_arr
df['Ct_{}'.format(year)] = mean_arr * count_arr
first = False
else:
mean_arr =
|
read_csv(_mean, index_col=['huc8'])
|
pandas.read_csv
|
import pandas as pd
from pandas.api.types import is_numeric_dtype, is_object_dtype
import numpy as np
import seaborn as sns
import squarify
import matplotlib.pyplot as plt
import matplotlib.ticker as tkr
import matplotlib.cm
from matplotlib.colors import ListedColormap, LinearSegmentedColormap
from statsmodels.stats.weightstats import ztest
from statsmodels.stats.proportion import proportions_ztest
from scipy import stats
from IPython.display import display_html
import os
import sys
from prettierplot.plotter import PrettierPlot
from prettierplot import style
def eda_cat_target_cat_feat(self, feature, level_count_cap=50, color_map="viridis", legend_labels=None,
chart_scale=15):
"""
Documentation:
---
Description:
Creates exploratory data visualizations and statistical summaries for a category feature
in the context of a categorical target.
---
Parameters:
feature : str
Feature to visualize.
level_count_cap : int, default=50
Maximum number of unique levels in feature. If the number of levels exceeds the
cap, then no visualization panel is produced.
color_map : str specifying built-in matplotlib colormap, default="viridis"
Color map applied to plots.
legend_labels : list, default=None
Class labels displayed in plot legend.
chart_scale : int or float, default=15
Controls size and proportions of chart and chart elements. Higher value creates
larger plots and increases visual elements proportionally.
"""
# if number of unique levels in feature is less than specified level_count_cap
if (len(np.unique(self.data[self.data[feature].notnull()][feature].values)) < level_count_cap):
### data summaries
## feature summary
# create empty DataFrame
uni_summ_df = pd.DataFrame(columns=[feature, "Count", "Proportion"])
# capture unique values and count of those unique values
unique_vals, unique_counts = np.unique(
self.data[self.data[feature].notnull()][feature], return_counts=True
)
# append each unique value, count and proportion to DataFrame
for i, j in zip(unique_vals, unique_counts):
uni_summ_df = uni_summ_df.append(
{
feature: i,
"Count": j,
"Proportion": j / np.sum(unique_counts) * 100,
},
ignore_index=True,
)
# sort DataFrame by "Proportion", descending
uni_summ_df = uni_summ_df.sort_values(by=["Proportion"], ascending=False)
# set values to int dtype where applicable to optimize
uni_summ_df["Count"] = uni_summ_df["Count"].astype("int64")
if is_numeric_dtype(uni_summ_df[feature]):
uni_summ_df[feature] = uni_summ_df[feature].astype("int64")
## feature vs. target summary
# combine feature column and target
bi_df = pd.concat([self.data[feature], self.target], axis=1)
# remove any rows with nulls
bi_df = bi_df[bi_df[feature].notnull()]
# groupby category feature and count the occurrences of target classes
# for each level in category
bi_summ_df = (
bi_df.groupby([feature] + [self.target.name])
.size()
.reset_index()
.pivot(columns=self.target.name, index=feature, values=0)
)
# overwrite DataFrame index with actual class labels if provided
bi_summ_df.columns = pd.Index(legend_labels) if legend_labels is not None else pd.Index([i for i in bi_summ_df.columns.tolist()])
bi_summ_df.reset_index(inplace=True)
# fill nan's with zero
fill_columns = bi_summ_df.iloc[:,2:].columns
bi_summ_df[fill_columns] = bi_summ_df[fill_columns].fillna(0)
# set values to int dtype where applicable to optimize displayed DataFrame
for column in bi_summ_df.columns:
try:
bi_summ_df[column] = bi_summ_df[column].astype(np.int)
except ValueError:
bi_summ_df[column] = bi_summ_df[column]
## proportion by category summary
# combine feature column and target
prop_df = pd.concat([self.data[feature], self.target], axis=1)
# remove any rows with nulls
prop_df = prop_df[prop_df[feature].notnull()]
# calculate percent of 100 by class label
prop_df = prop_df.groupby([feature, self.target.name]).agg({self.target.name : {"count"}})
prop_df = prop_df.groupby(level=0).apply(lambda x: 100 * x / float(x.sum()))
prop_df = prop_df.reset_index()
multiIndex = prop_df.columns
singleIndex = [i[0] for i in multiIndex.tolist()]
singleIndex[-1] = "Count"
prop_df.columns = singleIndex
prop_df = prop_df.reset_index(drop=True)
prop_df = pd.pivot_table(prop_df, values=["Count"], columns=[feature], index=[self.target.name], aggfunc={"Count": np.mean})
prop_df = prop_df.reset_index(drop=True)
multiIndex = prop_df.columns
singleIndex = []
for column in multiIndex.tolist():
try:
singleIndex.append(int(column[1]))
except ValueError:
singleIndex.append(column[1])
prop_df.columns = singleIndex
prop_df = prop_df.reset_index(drop=True)
# insert column to DataFrame with actual class labels if provided, otherwise use raw class labels in target
prop_df.insert(loc=0, column="Class", value=legend_labels if legend_labels is not None else np.unique(self.target))
# fill nan's with zero
fill_columns = prop_df.iloc[:,:].columns
prop_df[fill_columns] = prop_df[fill_columns].fillna(0)
# if there are only two class labels, perform z-test/t-test
if len(np.unique(bi_df[bi_df[feature].notnull()][feature])) == 2:
# total observations
total_obs1 = bi_df[(bi_df[feature] == np.unique(bi_df[feature])[0])][
feature
].shape[0]
total_obs2 = bi_df[(bi_df[feature] == np.unique(bi_df[feature])[1])][
feature
].shape[0]
# total positive observations
pos_obs1 = bi_df[
(bi_df[feature] == np.unique(bi_df[feature])[0])
& (bi_df[self.target.name] == 1)
][feature].shape[0]
pos_obs2 = bi_df[
(bi_df[feature] == np.unique(bi_df[feature])[1])
& (bi_df[self.target.name] == 1)
][feature].shape[0]
# perform z-test, return z-statistic and p-value
z, p_val = proportions_ztest(
count=(pos_obs1, pos_obs2), nobs=(total_obs1, total_obs2)
)
# add z-statistic and p-value to DataFrame
stat_test_df = pd.DataFrame(
data=[{"z-test statistic": z, "p-value": p_val}],
columns=["z-test statistic", "p-value"],
index=[feature],
).round(4)
# display summary tables
self.df_side_by_side(
dfs=(uni_summ_df, bi_summ_df, prop_df, stat_test_df),
names=["Feature summary", "Feature vs. target summary", "Target proportion", "Statistical test",],
)
if "percent_positive" in bi_summ_df:
bi_summ_df = bi_summ_df.drop(["percent_positive"], axis=1)
else:
# display summary tables
self.df_side_by_side(
dfs=(uni_summ_df, bi_summ_df, prop_df),
names=["Feature summary", "Feature vs. target summary", "Target proportion"],
)
if "percent_positive" in bi_summ_df:
bi_summ_df = bi_summ_df.drop(["percent_positive"], axis=1)
### visualizations
# set label rotation angle
len_unique_val = len(unique_vals)
avg_len_unique_val = sum(map(len, str(unique_vals))) / len(unique_vals)
if len_unique_val <= 4 and avg_len_unique_val <= 12:
rotation = 0
elif len_unique_val >= 5 and len_unique_val <= 8 and avg_len_unique_val <= 8:
rotation = 0
elif len_unique_val >= 9 and len_unique_val <= 14 and avg_len_unique_val <= 4:
rotation = 0
else:
rotation = 90
# create prettierplot object
p = PrettierPlot(chart_scale=chart_scale, plot_orientation="wide_narrow")
# add canvas to prettierplot object
ax = p.make_canvas(title="Category counts\n* {}".format(feature), position=131, title_scale=0.82)
# add treemap to canvas
p.tree_map(
counts=uni_summ_df["Count"].values,
labels=uni_summ_df[feature].values,
colors=style.color_gen(name=color_map, num=len(uni_summ_df[feature].values)),
alpha=0.8,
ax=ax,
)
# add canvas to prettierplot object
ax = p.make_canvas(title="Category counts by target\n* {}".format(feature), position=132)
# add faceted categorical plot to canvas
p.facet_cat(
df=bi_summ_df,
feature=feature,
label_rotate=rotation,
color_map=color_map,
bbox=(1.0, 1.15),
alpha=0.8,
legend_labels=legend_labels,
x_units=None,
ax=ax,
)
# add canvas to prettierplot object
ax = p.make_canvas(title="Target proportion by category\n* {}".format(feature), position=133)
# add stacked bar chart to canvas
p.stacked_bar_h(
df=prop_df.drop("Class", axis=1),
bbox=(1.0, 1.15),
legend_labels=legend_labels,
color_map=color_map,
alpha=0.8,
ax=ax,
)
plt.show()
def eda_cat_target_num_feat(self, feature, color_map="viridis", outliers_out_of_scope=None, legend_labels=None,
chart_scale=15):
"""
Documentation:
---
Description:
Creates exploratory data visualizations and statistical summaries for a number
feature in the context of a categorical target.
---
Parameters:
feature : str
Feature to visualize.
color_map : str specifying built-in matplotlib colormap, default="viridis"
Color map applied to plots.
outliers_out_of_scope : boolean, float or int, default=None
Truncates the x-axis upper limit so that outliers are out of scope of the visualization.
The x-axis upper limit is reset to the maximum non-outlier value.
To identify outliers, the IQR is calculated, and values that are below the first quartile
minus the IQR, or above the third quarterile plus the IQR are designated as outliers. If True
is passed as a value, the IQR that is subtracted/added is multiplied by 5. If a float or int is
passed, the IQR is multiplied by that value. Higher values increase how extremem values need
to be to be identified as outliers.
legend_labels : list, default=None
Class labels displayed in plot legend.
chart_scale : int or float, default=15
Controls size and proportions of chart and chart elements. Higher value creates larger plots
and increases visual elements proportionally.
"""
### data summaries
## bivariate roll_up table
# combine feature column and target
bi_df = pd.concat([self.data[feature], self.target], axis=1)
# remove any rows with nulls
bi_df = bi_df[bi_df[feature].notnull()]
# bivariate summary statistics
bi_summ_stats_df = pd.DataFrame(
columns=["Class", "Count", "Proportion", "Mean", "StdDev"]
)
# for each unique class label
for labl in np.unique(self.target):
# get feature values associated with single class label
feature_slice = bi_df[bi_df[self.target.name] == labl][feature]
# append summary statistics for feature values associated with class label
bi_summ_stats_df = bi_summ_stats_df.append(
{
"Class": labl,
"Count": len(feature_slice),
"Proportion": len(feature_slice) / len(bi_df[feature]) * 100,
"Mean": np.mean(feature_slice),
"StdDev": np.std(feature_slice),
},
ignore_index=True,
)
# apply custom legend labels, or set dtype to int if column values are numeric
if legend_labels is not None:
bi_summ_stats_df["Class"] = legend_labels
elif is_numeric_dtype(bi_summ_stats_df["Class"]):
bi_summ_stats_df["Class"] = bi_summ_stats_df["Class"].astype(np.int)
## Feature summary
describe_df = pd.DataFrame(bi_df[feature].describe()).reset_index()
# add missing percentage
describe_df = describe_df.append(
{
"index": "missing",
feature: np.round(self.data.shape[0] - bi_df[feature].shape[0], 5),
},
ignore_index=True,
)
# add skew
describe_df = describe_df.append(
{
"index": "skew",
feature: np.round(stats.skew(bi_df[feature].values, nan_policy="omit"), 5),
},
ignore_index=True,
)
# add kurtosis
describe_df = describe_df.append(
{
"index": "kurtosis",
feature: stats.kurtosis(bi_df[feature].values, nan_policy="omit"),
},
ignore_index=True,
)
describe_df = describe_df.rename(columns={"index": ""})
# execute z-test or t-test
if len(np.unique(self.target)) == 2:
s1 = bi_df[
(bi_df[self.target.name] == bi_df[self.target.name].unique()[0])
][feature]
s2 = bi_df[
(bi_df[self.target.name] == bi_df[self.target.name].unique()[1])
][feature]
if len(s1) > 30 and len(s2) > 30:
# perform z-test, return z-statistic and p-value
z, p_val = ztest(s1, s2)
# add z-statistic and p-value to DataFrame
stat_test_df = pd.DataFrame(
data=[{"z-test statistic": z, "p-value": p_val}],
columns=["z-test statistic", "p-value"],
index=[feature],
).round(4)
else:
# perform t-test, return t-score and p-value
t, p_val = stats.ttest_ind(s1, s2)
# add t-statistic and p-value to DataFrame
stat_test_df = pd.DataFrame(
data=[{"t-test statistic": t, "p-value": p_val}],
columns=["t-test statistic", "p-value"],
index=[feature],
).round(4)
# display summary tables
self.df_side_by_side(
dfs=(describe_df, bi_summ_stats_df, stat_test_df),
names=["Feature summary", "Feature vs. target summary", "Statistical test"],
)
else:
# display summary tables
self.df_side_by_side(
dfs=(describe_df, bi_summ_stats_df),
names=["Feature summary", "Feature vs. target summary"],
)
### visualizations
# create prettierplot object
p = PrettierPlot(chart_scale=chart_scale, plot_orientation="wide_standard")
# if boolean is passed to outliers_out_of_scope
if isinstance(outliers_out_of_scope, bool):
# if outliers_out_of_scope = True
if outliers_out_of_scope:
# identify outliers using IQR method and an IQR step of 5
outliers = self.outlier_IQR(self.data[feature], iqr_step=5)
# reset x-axis minimum and maximum
x_axis_min = self.data[feature].drop(index=outliers).min()
x_axis_max = self.data[feature].drop(index=outliers).max()
# if outliers_out_of_scope is a float or int
elif isinstance(outliers_out_of_scope, float) or isinstance(outliers_out_of_scope, int):
# identify outliers using IQR method and an IQR step equal to the float/int passed
outliers = self.outlier_IQR(self.data[feature], iqr_step=outliers_out_of_scope)
# reset x-axis minimum and maximum
x_axis_min = self.data[feature].drop(index=outliers).min()
x_axis_max = self.data[feature].drop(index=outliers).max()
# add canvas to prettierplot object
ax = p.make_canvas(
title="Feature distribution\n* {}".format(feature),
title_scale=0.85,
position=221,
)
## dynamically determine precision of x-units
# capture min and max feature values
dist_min = bi_df[feature].values.min()
dist_max = bi_df[feature].values.max()
# determine x-units precision based on min and max values in feature
if -3 < dist_min < 3 and -3 < dist_max < 3 and dist_max/dist_min < 10:
x_units = "fff"
elif -30 < dist_min < 30 and -30 < dist_max < 30 and dist_max/dist_min < 3:
x_units = "fff"
elif -5 < dist_min < 5 and -5 < dist_max < 5 and dist_max/dist_min < 10:
x_units = "ff"
elif -90 < dist_min < 90 and -90 < dist_max < 90 and dist_max/dist_min < 5:
x_units = "ff"
else:
x_units = "f"
# add distribution plot to canvas
p.dist_plot(
bi_df[feature].values,
color=style.style_grey,
y_units="f",
x_units=x_units,
ax=ax,
)
# optionally reset x-axis limits
if outliers_out_of_scope is not None:
plt.xlim(x_axis_min, x_axis_max)
# add canvas to prettierplot object
ax = p.make_canvas(
title="Probability plot\n* {}".format(feature),
title_scale=0.85,
position=222,
)
# add QQ / probability plot to canvas
p.prob_plot(
x=bi_df[feature].values,
plot=ax,
)
# add canvas to prettierplot object
ax = p.make_canvas(
title="Distribution by class\n* {}".format(feature),
title_scale=0.85,
position=223,
)
## dynamically determine precision of x-units
# capture min and max feature values
dist_min = bi_df[feature].values.min()
dist_max = bi_df[feature].values.max()
# determine x-units precision based on min and max values in feature
if -3 < dist_min < 3 and -3 < dist_max < 3 and dist_max/dist_min < 10:
x_units = "fff"
elif -30 < dist_min < 30 and -30 < dist_max < 30 and dist_max/dist_min < 3:
x_units = "fff"
elif -5 < dist_min < 5 and -5 < dist_max < 5 and dist_max/dist_min < 10:
x_units = "ff"
elif -90 < dist_min < 90 and -90 < dist_max < 90 and dist_max/dist_min < 5:
x_units = "ff"
else:
x_units = "f"
# generate color list
color_list = style.color_gen(name=color_map, num=len(np.unique(self.target)))
# add one distribution plot to canvas for each category class
for ix, labl in enumerate(np.unique(bi_df[self.target.name].values)):
p.dist_plot(
bi_df[bi_df[self.target.name] == labl][feature].values,
color=color_list[ix],
y_units="f",
x_units=x_units,
legend_labels=legend_labels if legend_labels is not None else np.arange(len(np.unique(self.target))),
alpha=0.4,
bbox=(1.0, 1.0),
ax=ax,
)
# optionally reset x-axis limits
if outliers_out_of_scope is not None:
plt.xlim(x_axis_min, x_axis_max)
# add canvas to prettierplot object
ax = p.make_canvas(
title="Boxplot by class\n* {}".format(feature),
title_scale=0.85,
position=224,
)
## dynamically determine precision of x-units
# capture min and max feature values
dist_min = bi_df[feature].values.min()
dist_max = bi_df[feature].values.max()
# determine x-units precision based on min and max values in feature
if -3 < dist_min < 3 and -3 < dist_max < 3 and dist_max/dist_min < 10:
x_units = "fff"
elif -30 < dist_min < 30 and -30 < dist_max < 30 and dist_max/dist_min < 3:
x_units = "fff"
elif -5 < dist_min < 5 and -5 < dist_max < 5 and dist_max/dist_min < 10:
x_units = "ff"
elif -90 < dist_min < 90 and -90 < dist_max < 90 and dist_max/dist_min < 5:
x_units = "ff"
else:
x_units = "f"
# add horizontal box plot to canvas
p.box_plot_h(
x=feature,
y=self.target.name,
data=bi_df,
alpha=0.7,
x_units=x_units,
legend_labels=legend_labels,
bbox=(1.2, 1.0),
suppress_outliers=True,
ax=ax
)
# optionally reset x-axis limits
if outliers_out_of_scope is not None:
plt.xlim(x_axis_min-(x_axis_min * 0.1), x_axis_max)
# apply position adjustment to subplots
plt.subplots_adjust(bottom=-0.1)
plt.show()
def eda_num_target_num_feat(self, feature, color_map="viridis", chart_scale=15):
"""
Documentation:
---
Description:
Produces exploratory data visualizations and statistical summaries for a numeric
feature in the context of a numeric target.
---
Parameters:
feature : str
Feature to visualize.
color_map : str specifying built-in matplotlib colormap, default="viridis"
Color map applied to plots.
chart_scale : int or float, default=15
Controls size and proportions of chart and chart elements. Higher value creates
larger plots and increases visual elements proportionally.
"""
### data summaries
## feature summary
# combine feature column and target
bi_df = pd.concat([self.data[feature], self.target], axis=1)
# remove any rows with nulls
bi_df = bi_df[bi_df[feature].notnull()]
# cast target as float
bi_df[self.target.name] = bi_df[self.target.name].astype(float)
# create summary statistic table
describe_df = pd.DataFrame(bi_df[feature].describe()).reset_index()
# add skew and kurtosis to describe_df
describe_df = describe_df.append(
{
"index": "skew",
feature: stats.skew(bi_df[feature].values, nan_policy="omit"),
},
ignore_index=True,
)
describe_df = describe_df.append(
{
"index": "kurtosis",
feature: stats.kurtosis(bi_df[feature].values, nan_policy="omit"),
},
ignore_index=True,
)
describe_df = describe_df.rename(columns={"index": ""})
# display summary tables
display(describe_df)
### visualizations
# create prettierplot object
p = PrettierPlot(chart_scale=chart_scale, plot_orientation="wide_narrow")
# add canvas to prettierplot object
ax = p.make_canvas(
title="Feature distribution\n* {}".format(feature), position=131, title_scale=1.2
)
# determine x-units precision based on magnitude of max value
if -1 <= np.nanmax(bi_df[feature].values) <= 1:
x_units = "fff"
elif -10 <= np.nanmax(bi_df[feature].values) <= 10:
x_units = "ff"
else:
x_units = "f"
# determine y-units precision based on magnitude of max value
if -1 <= np.nanmax(bi_df[feature].values) <= 1:
y_units = "fff"
elif -10 <= np.nanmax(bi_df[feature].values) <= 10:
y_units = "ff"
else:
y_units = "f"
# x rotation
if -10000 < np.nanmax(bi_df[feature].values) < 10000:
x_rotate = 0
else:
x_rotate = 45
# add distribution plot to canvas
p.dist_plot(
bi_df[feature].values,
color=style.style_grey,
y_units=y_units,
x_rotate=x_rotate,
ax=ax,
)
# add canvas to prettierplot object
ax = p.make_canvas(title="Probability plot\n* {}".format(feature), position=132)
# add QQ / probability plot to canvas
p.prob_plot(x=bi_df[feature].values, plot=ax)
# add canvas to prettierplot object
ax = p.make_canvas(
title="Regression plot - feature vs. target\n* {}".format(feature),
position=133,
title_scale=1.5
)
# add regression plot to canvas
p.reg_plot(
x=feature,
y=self.target.name,
data=bi_df,
x_jitter=0.1,
x_rotate=x_rotate,
x_units=x_units,
y_units=y_units,
ax=ax,
)
plt.show()
def eda_num_target_cat_feat(self, feature, level_count_cap=50, color_map="viridis", chart_scale=15):
"""
Documentation:
---
Description:
Produces exploratory data visualizations and statistical summaries for a category
feature in the context of a numeric target.
---
Parameters:
feature : str
Feature to visualize.
level_count_cap : int, default=50
Maximum number of unique levels in feature. If the number of levels exceeds the
cap then the feature is skipped.
color_map : str specifying built-in matplotlib colormap, default="viridis"
Color map applied to plots.
chart_scale : int or float, default=15
Controls size and proportions of chart and chart elements. Higher value creates
larger plots and increases visual elements proportionally.
"""
# if number of unique levels in feature is less than specified level_count_cap
if (len(np.unique(self.data[self.data[feature].notnull()][feature].values)) < level_count_cap):
### data summaries
## feature summary
# create empty DataFrame
uni_summ_df = pd.DataFrame(columns=[feature, "Count", "Proportion"])
# capture unique values and count of those unique values
unique_vals, unique_counts = np.unique(
self.data[self.data[feature].notnull()][feature], return_counts=True
)
# append each unique value, count and proportion to DataFrame
for i, j in zip(unique_vals, unique_counts):
uni_summ_df = uni_summ_df.append(
{feature: i, "Count": j, "Proportion": j / np.sum(unique_counts) * 100},
ignore_index=True,
)
# sort DataFrame by "Proportion", descending
uni_summ_df = uni_summ_df.sort_values(by=["Proportion"], ascending=False)
# set values to int dtype where applicable to optimize
if is_numeric_dtype(uni_summ_df[feature]):
uni_summ_df[feature] = uni_summ_df[feature].astype("int64")
uni_summ_df["Count"] = uni_summ_df["Count"].astype("int64")
## feature vs. target summary
# combine feature column and target
bi_df =
|
pd.concat([self.data[feature], self.target], axis=1)
|
pandas.concat
|
import types
from functools import wraps
import numpy as np
import datetime
import collections
from pandas.compat import(
zip, builtins, range, long, lzip,
OrderedDict, callable
)
from pandas import compat
from pandas.core.base import PandasObject
from pandas.core.categorical import Categorical
from pandas.core.frame import DataFrame
from pandas.core.generic import NDFrame
from pandas.core.index import Index, MultiIndex, _ensure_index, _union_indexes
from pandas.core.internals import BlockManager, make_block
from pandas.core.series import Series
from pandas.core.panel import Panel
from pandas.util.decorators import cache_readonly, Appender
import pandas.core.algorithms as algos
import pandas.core.common as com
from pandas.core.common import(_possibly_downcast_to_dtype, isnull,
notnull, _DATELIKE_DTYPES, is_numeric_dtype,
is_timedelta64_dtype, is_datetime64_dtype,
is_categorical_dtype, _values_from_object)
from pandas.core.config import option_context
from pandas import _np_version_under1p7
import pandas.lib as lib
from pandas.lib import Timestamp
import pandas.tslib as tslib
import pandas.algos as _algos
import pandas.hashtable as _hash
_agg_doc = """Aggregate using input function or dict of {column -> function}
Parameters
----------
arg : function or dict
Function to use for aggregating groups. If a function, must either
work when passed a DataFrame or when passed to DataFrame.apply. If
passed a dict, the keys must be DataFrame column names.
Notes
-----
Numpy functions mean/median/prod/sum/std/var are special cased so the
default behavior is applying the function along axis=0
(e.g., np.mean(arr_2d, axis=0)) as opposed to
mimicking the default Numpy behavior (e.g., np.mean(arr_2d)).
Returns
-------
aggregated : DataFrame
"""
# special case to prevent duplicate plots when catching exceptions when
# forwarding methods from NDFrames
_plotting_methods = frozenset(['plot', 'boxplot', 'hist'])
_common_apply_whitelist = frozenset([
'last', 'first',
'head', 'tail', 'median',
'mean', 'sum', 'min', 'max',
'cumsum', 'cumprod', 'cummin', 'cummax', 'cumcount',
'resample',
'describe',
'rank', 'quantile', 'count',
'fillna',
'mad',
'any', 'all',
'irow', 'take',
'idxmax', 'idxmin',
'shift', 'tshift',
'ffill', 'bfill',
'pct_change', 'skew',
'corr', 'cov', 'diff',
]) | _plotting_methods
_series_apply_whitelist = \
(_common_apply_whitelist - set(['boxplot'])) | \
frozenset(['dtype', 'value_counts', 'unique', 'nunique',
'nlargest', 'nsmallest'])
_dataframe_apply_whitelist = \
_common_apply_whitelist | frozenset(['dtypes', 'corrwith'])
class GroupByError(Exception):
pass
class DataError(GroupByError):
pass
class SpecificationError(GroupByError):
pass
def _groupby_function(name, alias, npfunc, numeric_only=True,
_convert=False):
def f(self):
self._set_selection_from_grouper()
try:
return self._cython_agg_general(alias, numeric_only=numeric_only)
except AssertionError as e:
raise SpecificationError(str(e))
except Exception:
result = self.aggregate(lambda x: npfunc(x, axis=self.axis))
if _convert:
result = result.convert_objects()
return result
f.__doc__ = "Compute %s of group values" % name
f.__name__ = name
return f
def _first_compat(x, axis=0):
def _first(x):
x = np.asarray(x)
x = x[notnull(x)]
if len(x) == 0:
return np.nan
return x[0]
if isinstance(x, DataFrame):
return x.apply(_first, axis=axis)
else:
return _first(x)
def _last_compat(x, axis=0):
def _last(x):
x = np.asarray(x)
x = x[notnull(x)]
if len(x) == 0:
return np.nan
return x[-1]
if isinstance(x, DataFrame):
return x.apply(_last, axis=axis)
else:
return _last(x)
def _count_compat(x, axis=0):
try:
return x.size
except:
return x.count()
class Grouper(object):
"""
A Grouper allows the user to specify a groupby instruction for a target object
This specification will select a column via the key parameter, or if the level and/or
axis parameters are given, a level of the index of the target object.
These are local specifications and will override 'global' settings, that is the parameters
axis and level which are passed to the groupby itself.
Parameters
----------
key : string, defaults to None
groupby key, which selects the grouping column of the target
level : name/number, defaults to None
the level for the target index
freq : string / freqency object, defaults to None
This will groupby the specified frequency if the target selection (via key or level) is
a datetime-like object
axis : number/name of the axis, defaults to None
sort : boolean, default to False
whether to sort the resulting labels
additional kwargs to control time-like groupers (when freq is passed)
closed : closed end of interval; left or right
label : interval boundary to use for labeling; left or right
convention : {'start', 'end', 'e', 's'}
If grouper is PeriodIndex
Returns
-------
A specification for a groupby instruction
Examples
--------
>>> df.groupby(Grouper(key='A')) : syntatic sugar for df.groupby('A')
>>> df.groupby(Grouper(key='date',freq='60s')) : specify a resample on the column 'date'
>>> df.groupby(Grouper(level='date',freq='60s',axis=1)) :
specify a resample on the level 'date' on the columns axis with a frequency of 60s
"""
def __new__(cls, *args, **kwargs):
if kwargs.get('freq') is not None:
from pandas.tseries.resample import TimeGrouper
cls = TimeGrouper
return super(Grouper, cls).__new__(cls)
def __init__(self, key=None, level=None, freq=None, axis=None, sort=False):
self.key=key
self.level=level
self.freq=freq
self.axis=axis
self.sort=sort
self.grouper=None
self.obj=None
self.indexer=None
self.binner=None
self.grouper=None
@property
def ax(self):
return self.grouper
def _get_grouper(self, obj):
"""
Parameters
----------
obj : the subject object
Returns
-------
a tuple of binner, grouper, obj (possibly sorted)
"""
self._set_grouper(obj)
return self.binner, self.grouper, self.obj
def _set_grouper(self, obj, sort=False):
"""
given an object and the specifcations, setup the internal grouper for this particular specification
Parameters
----------
obj : the subject object
"""
if self.key is not None and self.level is not None:
raise ValueError("The Grouper cannot specify both a key and a level!")
# the key must be a valid info item
if self.key is not None:
key = self.key
if key not in obj._info_axis:
raise KeyError("The grouper name {0} is not found".format(key))
ax = Index(obj[key],name=key)
else:
ax = obj._get_axis(self.axis)
if self.level is not None:
level = self.level
# if a level is given it must be a mi level or
# equivalent to the axis name
if isinstance(ax, MultiIndex):
if isinstance(level, compat.string_types):
if obj.index.name != level:
raise ValueError('level name %s is not the name of the '
'index' % level)
elif level > 0:
raise ValueError('level > 0 only valid with MultiIndex')
ax = Index(ax.get_level_values(level), name=level)
else:
if not (level == 0 or level == ax.name):
raise ValueError("The grouper level {0} is not valid".format(level))
# possibly sort
if (self.sort or sort) and not ax.is_monotonic:
indexer = self.indexer = ax.argsort(kind='quicksort')
ax = ax.take(indexer)
obj = obj.take(indexer, axis=self.axis, convert=False, is_copy=False)
self.obj = obj
self.grouper = ax
return self.grouper
def _get_binner_for_grouping(self, obj):
raise NotImplementedError
@property
def groups(self):
return self.grouper.groups
class GroupBy(PandasObject):
"""
Class for grouping and aggregating relational data. See aggregate,
transform, and apply functions on this object.
It's easiest to use obj.groupby(...) to use GroupBy, but you can also do:
::
grouped = groupby(obj, ...)
Parameters
----------
obj : pandas object
axis : int, default 0
level : int, default None
Level of MultiIndex
groupings : list of Grouping objects
Most users should ignore this
exclusions : array-like, optional
List of columns to exclude
name : string
Most users should ignore this
Notes
-----
After grouping, see aggregate, apply, and transform functions. Here are
some other brief notes about usage. When grouping by multiple groups, the
result index will be a MultiIndex (hierarchical) by default.
Iteration produces (key, group) tuples, i.e. chunking the data by group. So
you can write code like:
::
grouped = obj.groupby(keys, axis=axis)
for key, group in grouped:
# do something with the data
Function calls on GroupBy, if not specially implemented, "dispatch" to the
grouped data. So if you group a DataFrame and wish to invoke the std()
method on each group, you can simply do:
::
df.groupby(mapper).std()
rather than
::
df.groupby(mapper).aggregate(np.std)
You can pass arguments to these "wrapped" functions, too.
See the online documentation for full exposition on these topics and much
more
Returns
-------
**Attributes**
groups : dict
{group name -> group labels}
len(grouped) : int
Number of groups
"""
_apply_whitelist = _common_apply_whitelist
_internal_names = ['_cache']
_internal_names_set = set(_internal_names)
_group_selection = None
def __init__(self, obj, keys=None, axis=0, level=None,
grouper=None, exclusions=None, selection=None, as_index=True,
sort=True, group_keys=True, squeeze=False):
self._selection = selection
if isinstance(obj, NDFrame):
obj._consolidate_inplace()
self.level = level
if not as_index:
if not isinstance(obj, DataFrame):
raise TypeError('as_index=False only valid with DataFrame')
if axis != 0:
raise ValueError('as_index=False only valid for axis=0')
self.as_index = as_index
self.keys = keys
self.sort = sort
self.group_keys = group_keys
self.squeeze = squeeze
if grouper is None:
grouper, exclusions, obj = _get_grouper(obj, keys, axis=axis,
level=level, sort=sort)
self.obj = obj
self.axis = obj._get_axis_number(axis)
self.grouper = grouper
self.exclusions = set(exclusions) if exclusions else set()
def __len__(self):
return len(self.indices)
def __unicode__(self):
# TODO: Better unicode/repr for GroupBy object
return object.__repr__(self)
@property
def groups(self):
""" dict {group name -> group labels} """
return self.grouper.groups
@property
def ngroups(self):
return self.grouper.ngroups
@property
def indices(self):
""" dict {group name -> group indices} """
return self.grouper.indices
def _get_index(self, name):
""" safe get index, translate keys for datelike to underlying repr """
def convert(key, s):
# possibly convert to they actual key types
# in the indices, could be a Timestamp or a np.datetime64
if isinstance(s, (Timestamp,datetime.datetime)):
return Timestamp(key)
elif isinstance(s, np.datetime64):
return Timestamp(key).asm8
return key
sample = next(iter(self.indices))
if isinstance(sample, tuple):
if not isinstance(name, tuple):
raise ValueError("must supply a tuple to get_group with multiple grouping keys")
if not len(name) == len(sample):
raise ValueError("must supply a a same-length tuple to get_group with multiple grouping keys")
name = tuple([ convert(n, k) for n, k in zip(name,sample) ])
else:
name = convert(name, sample)
return self.indices[name]
@property
def name(self):
if self._selection is None:
return None # 'result'
else:
return self._selection
@property
def _selection_list(self):
if not isinstance(self._selection, (list, tuple, Series, Index, np.ndarray)):
return [self._selection]
return self._selection
@cache_readonly
def _selected_obj(self):
if self._selection is None or isinstance(self.obj, Series):
if self._group_selection is not None:
return self.obj[self._group_selection]
return self.obj
else:
return self.obj[self._selection]
def _set_selection_from_grouper(self):
""" we may need create a selection if we have non-level groupers """
grp = self.grouper
if self.as_index and getattr(grp,'groupings',None) is not None and self.obj.ndim > 1:
ax = self.obj._info_axis
groupers = [ g.name for g in grp.groupings if g.level is None and g.name is not None and g.name in ax ]
if len(groupers):
self._group_selection = (ax-Index(groupers)).tolist()
def _local_dir(self):
return sorted(set(self.obj._local_dir() + list(self._apply_whitelist)))
def __getattr__(self, attr):
if attr in self._internal_names_set:
return object.__getattribute__(self, attr)
if attr in self.obj:
return self[attr]
if hasattr(self.obj, attr):
return self._make_wrapper(attr)
raise AttributeError("%r object has no attribute %r" %
(type(self).__name__, attr))
def __getitem__(self, key):
raise NotImplementedError('Not implemented: %s' % key)
def _make_wrapper(self, name):
if name not in self._apply_whitelist:
is_callable = callable(getattr(self._selected_obj, name, None))
kind = ' callable ' if is_callable else ' '
msg = ("Cannot access{0}attribute {1!r} of {2!r} objects, try "
"using the 'apply' method".format(kind, name,
type(self).__name__))
raise AttributeError(msg)
# need to setup the selection
# as are not passed directly but in the grouper
self._set_selection_from_grouper()
f = getattr(self._selected_obj, name)
if not isinstance(f, types.MethodType):
return self.apply(lambda self: getattr(self, name))
f = getattr(type(self._selected_obj), name)
def wrapper(*args, **kwargs):
# a little trickery for aggregation functions that need an axis
# argument
kwargs_with_axis = kwargs.copy()
if 'axis' not in kwargs_with_axis:
kwargs_with_axis['axis'] = self.axis
def curried_with_axis(x):
return f(x, *args, **kwargs_with_axis)
def curried(x):
return f(x, *args, **kwargs)
# preserve the name so we can detect it when calling plot methods,
# to avoid duplicates
curried.__name__ = curried_with_axis.__name__ = name
# special case otherwise extra plots are created when catching the
# exception below
if name in _plotting_methods:
return self.apply(curried)
try:
return self.apply(curried_with_axis)
except Exception:
try:
return self.apply(curried)
except Exception:
# related to : GH3688
# try item-by-item
# this can be called recursively, so need to raise ValueError if
# we don't have this method to indicated to aggregate to
# mark this column as an error
try:
return self._aggregate_item_by_item(name, *args, **kwargs)
except (AttributeError):
raise ValueError
return wrapper
def get_group(self, name, obj=None):
"""
Constructs NDFrame from group with provided name
Parameters
----------
name : object
the name of the group to get as a DataFrame
obj : NDFrame, default None
the NDFrame to take the DataFrame out of. If
it is None, the object groupby was called on will
be used
Returns
-------
group : type of obj
"""
if obj is None:
obj = self._selected_obj
inds = self._get_index(name)
return obj.take(inds, axis=self.axis, convert=False)
def __iter__(self):
"""
Groupby iterator
Returns
-------
Generator yielding sequence of (name, subsetted object)
for each group
"""
return self.grouper.get_iterator(self.obj, axis=self.axis)
def apply(self, func, *args, **kwargs):
"""
Apply function and combine results together in an intelligent way. The
split-apply-combine combination rules attempt to be as common sense
based as possible. For example:
case 1:
group DataFrame
apply aggregation function (f(chunk) -> Series)
yield DataFrame, with group axis having group labels
case 2:
group DataFrame
apply transform function ((f(chunk) -> DataFrame with same indexes)
yield DataFrame with resulting chunks glued together
case 3:
group Series
apply function with f(chunk) -> DataFrame
yield DataFrame with result of chunks glued together
Parameters
----------
func : function
Notes
-----
See online documentation for full exposition on how to use apply.
In the current implementation apply calls func twice on the
first group to decide whether it can take a fast or slow code
path. This can lead to unexpected behavior if func has
side-effects, as they will take effect twice for the first
group.
See also
--------
aggregate, transform
Returns
-------
applied : type depending on grouped object and function
"""
func = _intercept_function(func)
@wraps(func)
def f(g):
return func(g, *args, **kwargs)
# ignore SettingWithCopy here in case the user mutates
with option_context('mode.chained_assignment',None):
return self._python_apply_general(f)
def _python_apply_general(self, f):
keys, values, mutated = self.grouper.apply(f, self._selected_obj,
self.axis)
return self._wrap_applied_output(keys, values,
not_indexed_same=mutated)
def aggregate(self, func, *args, **kwargs):
raise NotImplementedError
@Appender(_agg_doc)
def agg(self, func, *args, **kwargs):
return self.aggregate(func, *args, **kwargs)
def _iterate_slices(self):
yield self.name, self._selected_obj
def transform(self, func, *args, **kwargs):
raise NotImplementedError
def mean(self):
"""
Compute mean of groups, excluding missing values
For multiple groupings, the result index will be a MultiIndex
"""
try:
return self._cython_agg_general('mean')
except GroupByError:
raise
except Exception: # pragma: no cover
self._set_selection_from_grouper()
f = lambda x: x.mean(axis=self.axis)
return self._python_agg_general(f)
def median(self):
"""
Compute median of groups, excluding missing values
For multiple groupings, the result index will be a MultiIndex
"""
try:
return self._cython_agg_general('median')
except GroupByError:
raise
except Exception: # pragma: no cover
self._set_selection_from_grouper()
def f(x):
if isinstance(x, np.ndarray):
x = Series(x)
return x.median(axis=self.axis)
return self._python_agg_general(f)
def std(self, ddof=1):
"""
Compute standard deviation of groups, excluding missing values
For multiple groupings, the result index will be a MultiIndex
"""
# todo, implement at cython level?
return np.sqrt(self.var(ddof=ddof))
def var(self, ddof=1):
"""
Compute variance of groups, excluding missing values
For multiple groupings, the result index will be a MultiIndex
"""
if ddof == 1:
return self._cython_agg_general('var')
else:
self._set_selection_from_grouper()
f = lambda x: x.var(ddof=ddof)
return self._python_agg_general(f)
def sem(self, ddof=1):
"""
Compute standard error of the mean of groups, excluding missing values
For multiple groupings, the result index will be a MultiIndex
"""
return self.std(ddof=ddof)/np.sqrt(self.count())
def size(self):
"""
Compute group sizes
"""
return self.grouper.size()
sum = _groupby_function('sum', 'add', np.sum)
prod = _groupby_function('prod', 'prod', np.prod)
min = _groupby_function('min', 'min', np.min, numeric_only=False)
max = _groupby_function('max', 'max', np.max, numeric_only=False)
first = _groupby_function('first', 'first', _first_compat,
numeric_only=False, _convert=True)
last = _groupby_function('last', 'last', _last_compat, numeric_only=False,
_convert=True)
_count = _groupby_function('_count', 'count', _count_compat,
numeric_only=False)
def count(self, axis=0):
return self._count().astype('int64')
def ohlc(self):
"""
Compute sum of values, excluding missing values
For multiple groupings, the result index will be a MultiIndex
"""
return self._apply_to_column_groupbys(
lambda x: x._cython_agg_general('ohlc'))
def nth(self, n, dropna=None):
"""
Take the nth row from each group.
If dropna, will not show nth non-null row, dropna is either
Truthy (if a Series) or 'all', 'any' (if a DataFrame); this is equivalent
to calling dropna(how=dropna) before the groupby.
Examples
--------
>>> df = DataFrame([[1, np.nan], [1, 4], [5, 6]], columns=['A', 'B'])
>>> g = df.groupby('A')
>>> g.nth(0)
A B
0 1 NaN
2 5 6
>>> g.nth(1)
A B
1 1 4
>>> g.nth(-1)
A B
1 1 4
2 5 6
>>> g.nth(0, dropna='any')
B
A
1 4
5 6
>>> g.nth(1, dropna='any') # NaNs denote group exhausted when using dropna
B
A
1 NaN
5 NaN
"""
self._set_selection_from_grouper()
if not dropna: # good choice
m = self.grouper._max_groupsize
if n >= m or n < -m:
return self._selected_obj.loc[[]]
rng = np.zeros(m, dtype=bool)
if n >= 0:
rng[n] = True
is_nth = self._cumcount_array(rng)
else:
rng[- n - 1] = True
is_nth = self._cumcount_array(rng, ascending=False)
result = self._selected_obj[is_nth]
# the result index
if self.as_index:
ax = self.obj._info_axis
names = self.grouper.names
if self.obj.ndim == 1:
# this is a pass-thru
pass
elif all([ n in ax for n in names ]):
result.index = Index(self.obj[names][is_nth].values.ravel()).set_names(names)
elif self._group_selection is not None:
result.index = self.obj._get_axis(self.axis)[is_nth]
result = result.sort_index()
return result
if (isinstance(self._selected_obj, DataFrame)
and dropna not in ['any', 'all']):
# Note: when agg-ing picker doesn't raise this, just returns NaN
raise ValueError("For a DataFrame groupby, dropna must be "
"either None, 'any' or 'all', "
"(was passed %s)." % (dropna),)
# old behaviour, but with all and any support for DataFrames.
# modified in GH 7559 to have better perf
max_len = n if n >= 0 else - 1 - n
dropped = self.obj.dropna(how=dropna, axis=self.axis)
# get a new grouper for our dropped obj
if self.keys is None and self.level is None:
# we don't have the grouper info available (e.g. we have selected out
# a column that is not in the current object)
axis = self.grouper.axis
grouper = axis[axis.isin(dropped.index)]
keys = self.grouper.names
else:
# create a grouper with the original parameters, but on the dropped object
grouper, _, _ = _get_grouper(dropped, key=self.keys, axis=self.axis,
level=self.level, sort=self.sort)
sizes = dropped.groupby(grouper).size()
result = dropped.groupby(grouper).nth(n)
mask = (sizes<max_len).values
# set the results which don't meet the criteria
if len(result) and mask.any():
result.loc[mask] = np.nan
# reset/reindex to the original groups
if len(self.obj) == len(dropped) or len(result) == len(self.grouper.result_index):
result.index = self.grouper.result_index
else:
result = result.reindex(self.grouper.result_index)
return result
def cumcount(self, **kwargs):
"""
Number each item in each group from 0 to the length of that group - 1.
Essentially this is equivalent to
>>> self.apply(lambda x: Series(np.arange(len(x)), x.index))
Parameters
----------
ascending : bool, default True
If False, number in reverse, from length of group - 1 to 0.
Example
-------
>>> df = pd.DataFrame([['a'], ['a'], ['a'], ['b'], ['b'], ['a']],
... columns=['A'])
>>> df
A
0 a
1 a
2 a
3 b
4 b
5 a
>>> df.groupby('A').cumcount()
0 0
1 1
2 2
3 0
4 1
5 3
dtype: int64
>>> df.groupby('A').cumcount(ascending=False)
0 3
1 2
2 1
3 1
4 0
5 0
dtype: int64
"""
self._set_selection_from_grouper()
ascending = kwargs.pop('ascending', True)
index = self._selected_obj.index
cumcounts = self._cumcount_array(ascending=ascending)
return Series(cumcounts, index)
def head(self, n=5):
"""
Returns first n rows of each group.
Essentially equivalent to ``.apply(lambda x: x.head(n))``,
except ignores as_index flag.
Example
-------
>>> df = DataFrame([[1, 2], [1, 4], [5, 6]],
columns=['A', 'B'])
>>> df.groupby('A', as_index=False).head(1)
A B
0 1 2
2 5 6
>>> df.groupby('A').head(1)
A B
0 1 2
2 5 6
"""
obj = self._selected_obj
in_head = self._cumcount_array() < n
head = obj[in_head]
return head
def tail(self, n=5):
"""
Returns last n rows of each group
Essentially equivalent to ``.apply(lambda x: x.tail(n))``,
except ignores as_index flag.
Example
-------
>>> df = DataFrame([[1, 2], [1, 4], [5, 6]],
columns=['A', 'B'])
>>> df.groupby('A', as_index=False).tail(1)
A B
0 1 2
2 5 6
>>> df.groupby('A').head(1)
A B
0 1 2
2 5 6
"""
obj = self._selected_obj
rng = np.arange(0, -self.grouper._max_groupsize, -1, dtype='int64')
in_tail = self._cumcount_array(rng, ascending=False) > -n
tail = obj[in_tail]
return tail
def _cumcount_array(self, arr=None, **kwargs):
"""
arr is where cumcount gets its values from
note: this is currently implementing sort=False (though the default is sort=True)
for groupby in general
"""
ascending = kwargs.pop('ascending', True)
if arr is None:
arr = np.arange(self.grouper._max_groupsize, dtype='int64')
len_index = len(self._selected_obj.index)
cumcounts = np.zeros(len_index, dtype=arr.dtype)
if not len_index:
return cumcounts
indices, values = [], []
for v in self.indices.values():
indices.append(v)
if ascending:
values.append(arr[:len(v)])
else:
values.append(arr[len(v)-1::-1])
indices = np.concatenate(indices)
values = np.concatenate(values)
cumcounts[indices] = values
return cumcounts
def _index_with_as_index(self, b):
"""
Take boolean mask of index to be returned from apply, if as_index=True
"""
# TODO perf, it feels like this should already be somewhere...
from itertools import chain
original = self._selected_obj.index
gp = self.grouper
levels = chain((gp.levels[i][gp.labels[i][b]]
for i in range(len(gp.groupings))),
(original.get_level_values(i)[b]
for i in range(original.nlevels)))
new = MultiIndex.from_arrays(list(levels))
new.names = gp.names + original.names
return new
def _try_cast(self, result, obj):
"""
try to cast the result to our obj original type,
we may have roundtripped thru object in the mean-time
"""
if obj.ndim > 1:
dtype = obj.values.dtype
else:
dtype = obj.dtype
if not np.isscalar(result):
result = _possibly_downcast_to_dtype(result, dtype)
return result
def _cython_agg_general(self, how, numeric_only=True):
output = {}
for name, obj in self._iterate_slices():
is_numeric = is_numeric_dtype(obj.dtype)
if numeric_only and not is_numeric:
continue
try:
result, names = self.grouper.aggregate(obj.values, how)
except AssertionError as e:
raise GroupByError(str(e))
output[name] = self._try_cast(result, obj)
if len(output) == 0:
raise DataError('No numeric types to aggregate')
return self._wrap_aggregated_output(output, names)
def _python_agg_general(self, func, *args, **kwargs):
func = _intercept_function(func)
f = lambda x: func(x, *args, **kwargs)
# iterate through "columns" ex exclusions to populate output dict
output = {}
for name, obj in self._iterate_slices():
try:
result, counts = self.grouper.agg_series(obj, f)
output[name] = self._try_cast(result, obj)
except TypeError:
continue
if len(output) == 0:
return self._python_apply_general(f)
if self.grouper._filter_empty_groups:
mask = counts.ravel() > 0
for name, result in compat.iteritems(output):
# since we are masking, make sure that we have a float object
values = result
if is_numeric_dtype(values.dtype):
values = com.ensure_float(values)
output[name] = self._try_cast(values[mask], result)
return self._wrap_aggregated_output(output)
def _wrap_applied_output(self, *args, **kwargs):
raise NotImplementedError
def _concat_objects(self, keys, values, not_indexed_same=False):
from pandas.tools.merge import concat
if not not_indexed_same:
result = concat(values, axis=self.axis)
ax = self._selected_obj._get_axis(self.axis)
if isinstance(result, Series):
result = result.reindex(ax)
else:
result = result.reindex_axis(ax, axis=self.axis)
elif self.group_keys:
if self.as_index:
# possible MI return case
group_keys = keys
group_levels = self.grouper.levels
group_names = self.grouper.names
result = concat(values, axis=self.axis, keys=group_keys,
levels=group_levels, names=group_names)
else:
# GH5610, returns a MI, with the first level being a
# range index
keys = list(range(len(values)))
result = concat(values, axis=self.axis, keys=keys)
else:
result = concat(values, axis=self.axis)
return result
def _apply_filter(self, indices, dropna):
if len(indices) == 0:
indices = []
else:
indices = np.sort(np.concatenate(indices))
if dropna:
filtered = self._selected_obj.take(indices)
else:
mask = np.empty(len(self._selected_obj.index), dtype=bool)
mask.fill(False)
mask[indices.astype(int)] = True
# mask fails to broadcast when passed to where; broadcast manually.
mask = np.tile(mask, list(self._selected_obj.shape[1:]) + [1]).T
filtered = self._selected_obj.where(mask) # Fill with NaNs.
return filtered
@Appender(GroupBy.__doc__)
def groupby(obj, by, **kwds):
if isinstance(obj, Series):
klass = SeriesGroupBy
elif isinstance(obj, DataFrame):
klass = DataFrameGroupBy
else: # pragma: no cover
raise TypeError('invalid type: %s' % type(obj))
return klass(obj, by, **kwds)
def _get_axes(group):
if isinstance(group, Series):
return [group.index]
else:
return group.axes
def _is_indexed_like(obj, axes):
if isinstance(obj, Series):
if len(axes) > 1:
return False
return obj.index.equals(axes[0])
elif isinstance(obj, DataFrame):
return obj.index.equals(axes[0])
return False
class BaseGrouper(object):
"""
This is an internal Grouper class, which actually holds the generated groups
"""
def __init__(self, axis, groupings, sort=True, group_keys=True):
self.axis = axis
self.groupings = groupings
self.sort = sort
self.group_keys = group_keys
self.compressed = True
@property
def shape(self):
return tuple(ping.ngroups for ping in self.groupings)
def __iter__(self):
return iter(self.indices)
@property
def nkeys(self):
return len(self.groupings)
def get_iterator(self, data, axis=0):
"""
Groupby iterator
Returns
-------
Generator yielding sequence of (name, subsetted object)
for each group
"""
splitter = self._get_splitter(data, axis=axis)
keys = self._get_group_keys()
for key, (i, group) in zip(keys, splitter):
yield key, group
def _get_splitter(self, data, axis=0):
comp_ids, _, ngroups = self.group_info
return get_splitter(data, comp_ids, ngroups, axis=axis)
def _get_group_keys(self):
if len(self.groupings) == 1:
return self.levels[0]
else:
comp_ids, _, ngroups = self.group_info
# provide "flattened" iterator for multi-group setting
mapper = _KeyMapper(comp_ids, ngroups, self.labels, self.levels)
return [mapper.get_key(i) for i in range(ngroups)]
def apply(self, f, data, axis=0):
mutated = False
splitter = self._get_splitter(data, axis=axis)
group_keys = self._get_group_keys()
# oh boy
f_name = com._get_callable_name(f)
if (f_name not in _plotting_methods and
hasattr(splitter, 'fast_apply') and axis == 0):
try:
values, mutated = splitter.fast_apply(f, group_keys)
return group_keys, values, mutated
except (lib.InvalidApply):
# we detect a mutation of some kind
# so take slow path
pass
except (Exception) as e:
# raise this error to the caller
pass
result_values = []
for key, (i, group) in zip(group_keys, splitter):
object.__setattr__(group, 'name', key)
# group might be modified
group_axes = _get_axes(group)
res = f(group)
if not _is_indexed_like(res, group_axes):
mutated = True
result_values.append(res)
return group_keys, result_values, mutated
@cache_readonly
def indices(self):
""" dict {group name -> group indices} """
if len(self.groupings) == 1:
return self.groupings[0].indices
else:
label_list = [ping.labels for ping in self.groupings]
keys = [_values_from_object(ping.group_index) for ping in self.groupings]
return _get_indices_dict(label_list, keys)
@property
def labels(self):
return [ping.labels for ping in self.groupings]
@property
def levels(self):
return [ping.group_index for ping in self.groupings]
@property
def names(self):
return [ping.name for ping in self.groupings]
def size(self):
"""
Compute group sizes
"""
# TODO: better impl
labels, _, ngroups = self.group_info
bin_counts = algos.value_counts(labels, sort=False)
bin_counts = bin_counts.reindex(np.arange(ngroups))
bin_counts.index = self.result_index
return bin_counts
@cache_readonly
def _max_groupsize(self):
'''
Compute size of largest group
'''
# For many items in each group this is much faster than
# self.size().max(), in worst case marginally slower
if self.indices:
return max(len(v) for v in self.indices.values())
else:
return 0
@cache_readonly
def groups(self):
""" dict {group name -> group labels} """
if len(self.groupings) == 1:
return self.groupings[0].groups
else:
to_groupby = lzip(*(ping.grouper for ping in self.groupings))
to_groupby = Index(to_groupby)
return self.axis.groupby(to_groupby.values)
@cache_readonly
def group_info(self):
comp_ids, obs_group_ids = self._get_compressed_labels()
ngroups = len(obs_group_ids)
comp_ids = com._ensure_int64(comp_ids)
return comp_ids, obs_group_ids, ngroups
def _get_compressed_labels(self):
all_labels = [ping.labels for ping in self.groupings]
if self._overflow_possible:
tups = lib.fast_zip(all_labels)
labs, uniques = algos.factorize(tups)
if self.sort:
uniques, labs = _reorder_by_uniques(uniques, labs)
return labs, uniques
else:
if len(all_labels) > 1:
group_index = get_group_index(all_labels, self.shape)
comp_ids, obs_group_ids = _compress_group_index(group_index)
else:
ping = self.groupings[0]
comp_ids = ping.labels
obs_group_ids = np.arange(len(ping.group_index))
self.compressed = False
self._filter_empty_groups = False
return comp_ids, obs_group_ids
@cache_readonly
def _overflow_possible(self):
return _int64_overflow_possible(self.shape)
@cache_readonly
def ngroups(self):
return len(self.result_index)
@cache_readonly
def result_index(self):
recons = self.get_group_levels()
return MultiIndex.from_arrays(recons, names=self.names)
def get_group_levels(self):
obs_ids = self.group_info[1]
if not self.compressed and len(self.groupings) == 1:
return [self.groupings[0].group_index]
if self._overflow_possible:
recons_labels = [np.array(x) for x in zip(*obs_ids)]
else:
recons_labels = decons_group_index(obs_ids, self.shape)
name_list = []
for ping, labels in zip(self.groupings, recons_labels):
labels = com._ensure_platform_int(labels)
levels = ping.group_index.take(labels)
name_list.append(levels)
return name_list
#------------------------------------------------------------
# Aggregation functions
_cython_functions = {
'add': 'group_add',
'prod': 'group_prod',
'min': 'group_min',
'max': 'group_max',
'mean': 'group_mean',
'median': {
'name': 'group_median'
},
'var': 'group_var',
'first': {
'name': 'group_nth',
'f': lambda func, a, b, c, d: func(a, b, c, d, 1)
},
'last': 'group_last',
'count': 'group_count',
}
_cython_arity = {
'ohlc': 4, # OHLC
}
_name_functions = {}
_filter_empty_groups = True
def _get_aggregate_function(self, how, values):
dtype_str = values.dtype.name
def get_func(fname):
# find the function, or use the object function, or return a
# generic
for dt in [dtype_str, 'object']:
f = getattr(_algos, "%s_%s" % (fname, dtype_str), None)
if f is not None:
return f
return getattr(_algos, fname, None)
ftype = self._cython_functions[how]
if isinstance(ftype, dict):
func = afunc = get_func(ftype['name'])
# a sub-function
f = ftype.get('f')
if f is not None:
def wrapper(*args, **kwargs):
return f(afunc, *args, **kwargs)
# need to curry our sub-function
func = wrapper
else:
func = get_func(ftype)
if func is None:
raise NotImplementedError("function is not implemented for this"
"dtype: [how->%s,dtype->%s]" %
(how, dtype_str))
return func, dtype_str
def aggregate(self, values, how, axis=0):
arity = self._cython_arity.get(how, 1)
vdim = values.ndim
swapped = False
if vdim == 1:
values = values[:, None]
out_shape = (self.ngroups, arity)
else:
if axis > 0:
swapped = True
values = values.swapaxes(0, axis)
if arity > 1:
raise NotImplementedError
out_shape = (self.ngroups,) + values.shape[1:]
if is_numeric_dtype(values.dtype):
values = com.ensure_float(values)
is_numeric = True
out_dtype = 'f%d' % values.dtype.itemsize
else:
is_numeric = issubclass(values.dtype.type, (np.datetime64,
np.timedelta64))
if is_numeric:
out_dtype = 'float64'
values = values.view('int64')
else:
out_dtype = 'object'
values = values.astype(object)
# will be filled in Cython function
result = np.empty(out_shape, dtype=out_dtype)
result.fill(np.nan)
counts = np.zeros(self.ngroups, dtype=np.int64)
result = self._aggregate(result, counts, values, how, is_numeric)
if self._filter_empty_groups:
if result.ndim == 2:
try:
result = lib.row_bool_subset(
result, (counts > 0).view(np.uint8))
except ValueError:
result = lib.row_bool_subset_object(
result, (counts > 0).view(np.uint8))
else:
result = result[counts > 0]
if vdim == 1 and arity == 1:
result = result[:, 0]
if how in self._name_functions:
# TODO
names = self._name_functions[how]()
else:
names = None
if swapped:
result = result.swapaxes(0, axis)
return result, names
def _aggregate(self, result, counts, values, how, is_numeric):
agg_func, dtype = self._get_aggregate_function(how, values)
comp_ids, _, ngroups = self.group_info
if values.ndim > 3:
# punting for now
raise NotImplementedError
elif values.ndim > 2:
for i, chunk in enumerate(values.transpose(2, 0, 1)):
chunk = chunk.squeeze()
agg_func(result[:, :, i], counts, chunk, comp_ids)
else:
agg_func(result, counts, values, comp_ids)
return result
def agg_series(self, obj, func):
try:
return self._aggregate_series_fast(obj, func)
except Exception:
return self._aggregate_series_pure_python(obj, func)
def _aggregate_series_fast(self, obj, func):
func = _intercept_function(func)
if obj.index._has_complex_internals:
raise TypeError('Incompatible index for Cython grouper')
group_index, _, ngroups = self.group_info
# avoids object / Series creation overhead
dummy = obj._get_values(slice(None, 0)).to_dense()
indexer = _algos.groupsort_indexer(group_index, ngroups)[0]
obj = obj.take(indexer, convert=False)
group_index = com.take_nd(group_index, indexer, allow_fill=False)
grouper = lib.SeriesGrouper(obj, func, group_index, ngroups,
dummy)
result, counts = grouper.get_result()
return result, counts
def _aggregate_series_pure_python(self, obj, func):
group_index, _, ngroups = self.group_info
counts = np.zeros(ngroups, dtype=int)
result = None
splitter = get_splitter(obj, group_index, ngroups, axis=self.axis)
for label, group in splitter:
res = func(group)
if result is None:
if (isinstance(res, (Series, Index, np.ndarray)) or
isinstance(res, list)):
raise ValueError('Function does not reduce')
result = np.empty(ngroups, dtype='O')
counts[label] = group.shape[0]
result[label] = res
result = lib.maybe_convert_objects(result, try_float=0)
return result, counts
def generate_bins_generic(values, binner, closed):
"""
Generate bin edge offsets and bin labels for one array using another array
which has bin edge values. Both arrays must be sorted.
Parameters
----------
values : array of values
binner : a comparable array of values representing bins into which to bin
the first array. Note, 'values' end-points must fall within 'binner'
end-points.
closed : which end of bin is closed; left (default), right
Returns
-------
bins : array of offsets (into 'values' argument) of bins.
Zero and last edge are excluded in result, so for instance the first
bin is values[0:bin[0]] and the last is values[bin[-1]:]
"""
lenidx = len(values)
lenbin = len(binner)
if lenidx <= 0 or lenbin <= 0:
raise ValueError("Invalid length for values or for binner")
# check binner fits data
if values[0] < binner[0]:
raise ValueError("Values falls before first bin")
if values[lenidx - 1] > binner[lenbin - 1]:
raise ValueError("Values falls after last bin")
bins = np.empty(lenbin - 1, dtype=np.int64)
j = 0 # index into values
bc = 0 # bin count
# linear scan, presume nothing about values/binner except that it fits ok
for i in range(0, lenbin - 1):
r_bin = binner[i + 1]
# count values in current bin, advance to next bin
while j < lenidx and (values[j] < r_bin or
(closed == 'right' and values[j] == r_bin)):
j += 1
bins[bc] = j
bc += 1
return bins
class BinGrouper(BaseGrouper):
def __init__(self, bins, binlabels, filter_empty=False):
self.bins = com._ensure_int64(bins)
self.binlabels = _ensure_index(binlabels)
self._filter_empty_groups = filter_empty
@cache_readonly
def groups(self):
""" dict {group name -> group labels} """
# this is mainly for compat
# GH 3881
result = {}
for key, value in zip(self.binlabels, self.bins):
if key is not tslib.NaT:
result[key] = value
return result
@property
def nkeys(self):
return 1
def get_iterator(self, data, axis=0):
"""
Groupby iterator
Returns
-------
Generator yielding sequence of (name, subsetted object)
for each group
"""
if isinstance(data, NDFrame):
slicer = lambda start,edge: data._slice(slice(start,edge),axis=axis)
length = len(data.axes[axis])
else:
slicer = lambda start,edge: data[slice(start,edge)]
length = len(data)
start = 0
for edge, label in zip(self.bins, self.binlabels):
if label is not tslib.NaT:
yield label, slicer(start,edge)
start = edge
if start < length:
yield self.binlabels[-1], slicer(start,None)
def apply(self, f, data, axis=0):
result_keys = []
result_values = []
mutated = False
for key, group in self.get_iterator(data, axis=axis):
object.__setattr__(group, 'name', key)
# group might be modified
group_axes = _get_axes(group)
res = f(group)
if not _is_indexed_like(res, group_axes):
mutated = True
result_keys.append(key)
result_values.append(res)
return result_keys, result_values, mutated
@cache_readonly
def indices(self):
indices = collections.defaultdict(list)
i = 0
for label, bin in zip(self.binlabels, self.bins):
if i < bin:
if label is not tslib.NaT:
indices[label] = list(range(i, bin))
i = bin
return indices
@cache_readonly
def ngroups(self):
return len(self.binlabels)
@cache_readonly
def result_index(self):
mask = self.binlabels.asi8 == tslib.iNaT
return self.binlabels[~mask]
@property
def levels(self):
return [self.binlabels]
@property
def names(self):
return [self.binlabels.name]
@property
def groupings(self):
# for compat
return None
def size(self):
"""
Compute group sizes
"""
base = Series(np.zeros(len(self.result_index), dtype=np.int64),
index=self.result_index)
indices = self.indices
for k, v in compat.iteritems(indices):
indices[k] = len(v)
bin_counts = Series(indices, dtype=np.int64)
result = base.add(bin_counts, fill_value=0)
# addition with fill_value changes dtype to float64
result = result.astype(np.int64)
return result
#----------------------------------------------------------------------
# cython aggregation
_cython_functions = {
'add': 'group_add_bin',
'prod': 'group_prod_bin',
'mean': 'group_mean_bin',
'min': 'group_min_bin',
'max': 'group_max_bin',
'var': 'group_var_bin',
'ohlc': 'group_ohlc',
'first': {
'name': 'group_nth_bin',
'f': lambda func, a, b, c, d: func(a, b, c, d, 1)
},
'last': 'group_last_bin',
'count': 'group_count_bin',
}
_name_functions = {
'ohlc': lambda *args: ['open', 'high', 'low', 'close']
}
_filter_empty_groups = True
def _aggregate(self, result, counts, values, how, is_numeric=True):
agg_func, dtype = self._get_aggregate_function(how, values)
if values.ndim > 3:
# punting for now
raise NotImplementedError
elif values.ndim > 2:
for i, chunk in enumerate(values.transpose(2, 0, 1)):
agg_func(result[:, :, i], counts, chunk, self.bins)
else:
agg_func(result, counts, values, self.bins)
return result
def agg_series(self, obj, func):
dummy = obj[:0]
grouper = lib.SeriesBinGrouper(obj, func, self.bins, dummy)
return grouper.get_result()
class Grouping(object):
"""
Holds the grouping information for a single key
Parameters
----------
index : Index
grouper :
obj :
name :
level :
Returns
-------
**Attributes**:
* indices : dict of {group -> index_list}
* labels : ndarray, group labels
* ids : mapping of label -> group
* counts : array of group counts
* group_index : unique groups
* groups : dict of {group -> label_list}
"""
def __init__(self, index, grouper=None, obj=None, name=None, level=None,
sort=True):
self.name = name
self.level = level
self.grouper = _convert_grouper(index, grouper)
self.index = index
self.sort = sort
self.obj = obj
# right place for this?
if isinstance(grouper, (Series, Index)) and name is None:
self.name = grouper.name
if isinstance(grouper, MultiIndex):
self.grouper = grouper.values
# pre-computed
self._was_factor = False
self._should_compress = True
# we have a single grouper which may be a myriad of things, some of which are
# dependent on the passing in level
#
if level is not None:
if not isinstance(level, int):
if level not in index.names:
raise AssertionError('Level %s not in index' % str(level))
level = index.names.index(level)
inds = index.labels[level]
level_index = index.levels[level]
if self.name is None:
self.name = index.names[level]
# XXX complete hack
if grouper is not None:
level_values = index.levels[level].take(inds)
self.grouper = level_values.map(self.grouper)
else:
self._was_factor = True
# all levels may not be observed
labels, uniques = algos.factorize(inds, sort=True)
if len(uniques) > 0 and uniques[0] == -1:
# handle NAs
mask = inds != -1
ok_labels, uniques = algos.factorize(inds[mask], sort=True)
labels = np.empty(len(inds), dtype=inds.dtype)
labels[mask] = ok_labels
labels[~mask] = -1
if len(uniques) < len(level_index):
level_index = level_index.take(uniques)
self._labels = labels
self._group_index = level_index
self.grouper = level_index.take(labels)
else:
if isinstance(self.grouper, (list, tuple)):
self.grouper = com._asarray_tuplesafe(self.grouper)
# a passed Categorical
elif isinstance(self.grouper, Categorical):
factor = self.grouper
self._was_factor = True
# Is there any way to avoid this?
self.grouper = np.asarray(factor)
self._labels = factor.codes
self._group_index = factor.levels
if self.name is None:
self.name = factor.name
# a passed Grouper like
elif isinstance(self.grouper, Grouper):
# get the new grouper
grouper = self.grouper._get_binner_for_grouping(self.obj)
self.obj = self.grouper.obj
self.grouper = grouper
if self.name is None:
self.name = grouper.name
# no level passed
if not isinstance(self.grouper, (Series, Index, np.ndarray)):
self.grouper = self.index.map(self.grouper)
if not (hasattr(self.grouper, "__len__") and
len(self.grouper) == len(self.index)):
errmsg = ('Grouper result violates len(labels) == '
'len(data)\nresult: %s' %
com.pprint_thing(self.grouper))
self.grouper = None # Try for sanity
raise AssertionError(errmsg)
# if we have a date/time-like grouper, make sure that we have Timestamps like
if getattr(self.grouper,'dtype',None) is not None:
if is_datetime64_dtype(self.grouper):
from pandas import to_datetime
self.grouper = to_datetime(self.grouper)
elif is_timedelta64_dtype(self.grouper):
from pandas import to_timedelta
self.grouper = to_timedelta(self.grouper)
def __repr__(self):
return 'Grouping(%s)' % self.name
def __iter__(self):
return iter(self.indices)
_labels = None
_group_index = None
@property
def ngroups(self):
return len(self.group_index)
@cache_readonly
def indices(self):
return _groupby_indices(self.grouper)
@property
def labels(self):
if self._labels is None:
self._make_labels()
return self._labels
@property
def group_index(self):
if self._group_index is None:
self._make_labels()
return self._group_index
def _make_labels(self):
if self._was_factor: # pragma: no cover
raise Exception('Should not call this method grouping by level')
else:
labels, uniques = algos.factorize(self.grouper, sort=self.sort)
uniques = Index(uniques, name=self.name)
self._labels = labels
self._group_index = uniques
_groups = None
@property
def groups(self):
if self._groups is None:
self._groups = self.index.groupby(self.grouper)
return self._groups
def _get_grouper(obj, key=None, axis=0, level=None, sort=True):
"""
create and return a BaseGrouper, which is an internal
mapping of how to create the grouper indexers.
This may be composed of multiple Grouping objects, indicating
multiple groupers
Groupers are ultimately index mappings. They can originate as:
index mappings, keys to columns, functions, or Groupers
Groupers enable local references to axis,level,sort, while
the passed in axis, level, and sort are 'global'.
This routine tries to figure of what the passing in references
are and then creates a Grouping for each one, combined into
a BaseGrouper.
"""
group_axis = obj._get_axis(axis)
# validate thatthe passed level is compatible with the passed
# axis of the object
if level is not None:
if not isinstance(group_axis, MultiIndex):
if isinstance(level, compat.string_types):
if obj.index.name != level:
raise ValueError('level name %s is not the name of the '
'index' % level)
elif level > 0:
raise ValueError('level > 0 only valid with MultiIndex')
level = None
key = group_axis
# a passed in Grouper, directly convert
if isinstance(key, Grouper):
binner, grouper, obj = key._get_grouper(obj)
if key.key is None:
return grouper, [], obj
else:
return grouper, set([key.key]), obj
# already have a BaseGrouper, just return it
elif isinstance(key, BaseGrouper):
return key, [], obj
if not isinstance(key, (tuple, list)):
keys = [key]
else:
keys = key
# what are we after, exactly?
match_axis_length = len(keys) == len(group_axis)
any_callable = any(callable(g) or isinstance(g, dict) for g in keys)
any_arraylike = any(isinstance(g, (list, tuple, Series, Index, np.ndarray))
for g in keys)
try:
if isinstance(obj, DataFrame):
all_in_columns = all(g in obj.columns for g in keys)
else:
all_in_columns = False
except Exception:
all_in_columns = False
if (not any_callable and not all_in_columns
and not any_arraylike and match_axis_length
and level is None):
keys = [com._asarray_tuplesafe(keys)]
if isinstance(level, (tuple, list)):
if key is None:
keys = [None] * len(level)
levels = level
else:
levels = [level] * len(keys)
groupings = []
exclusions = []
for i, (gpr, level) in enumerate(zip(keys, levels)):
name = None
try:
obj._data.items.get_loc(gpr)
in_axis = True
except Exception:
in_axis = False
if _is_label_like(gpr) or in_axis:
exclusions.append(gpr)
name = gpr
gpr = obj[gpr]
if isinstance(gpr, Categorical) and len(gpr) != len(obj):
errmsg = "Categorical grouper must have len(grouper) == len(data)"
raise AssertionError(errmsg)
ping = Grouping(group_axis, gpr, obj=obj, name=name, level=level, sort=sort)
groupings.append(ping)
if len(groupings) == 0:
raise ValueError('No group keys passed!')
# create the internals grouper
grouper = BaseGrouper(group_axis, groupings, sort=sort)
return grouper, exclusions, obj
def _is_label_like(val):
return isinstance(val, compat.string_types) or np.isscalar(val)
def _convert_grouper(axis, grouper):
if isinstance(grouper, dict):
return grouper.get
elif isinstance(grouper, Series):
if grouper.index.equals(axis):
return grouper.values
else:
return grouper.reindex(axis).values
elif isinstance(grouper, (list, Series, Index, np.ndarray)):
if len(grouper) != len(axis):
raise AssertionError('Grouper and axis must be same length')
return grouper
else:
return grouper
class SeriesGroupBy(GroupBy):
_apply_whitelist = _series_apply_whitelist
def aggregate(self, func_or_funcs, *args, **kwargs):
"""
Apply aggregation function or functions to groups, yielding most likely
Series but in some cases DataFrame depending on the output of the
aggregation function
Parameters
----------
func_or_funcs : function or list / dict of functions
List/dict of functions will produce DataFrame with column names
determined by the function names themselves (list) or the keys in
the dict
Notes
-----
agg is an alias for aggregate. Use it.
Examples
--------
>>> series
bar 1.0
baz 2.0
qot 3.0
qux 4.0
>>> mapper = lambda x: x[0] # first letter
>>> grouped = series.groupby(mapper)
>>> grouped.aggregate(np.sum)
b 3.0
q 7.0
>>> grouped.aggregate([np.sum, np.mean, np.std])
mean std sum
b 1.5 0.5 3
q 3.5 0.5 7
>>> grouped.agg({'result' : lambda x: x.mean() / x.std(),
... 'total' : np.sum})
result total
b 2.121 3
q 4.95 7
See also
--------
apply, transform
Returns
-------
Series or DataFrame
"""
if isinstance(func_or_funcs, compat.string_types):
return getattr(self, func_or_funcs)(*args, **kwargs)
if hasattr(func_or_funcs, '__iter__'):
ret = self._aggregate_multiple_funcs(func_or_funcs)
else:
cyfunc = _intercept_cython(func_or_funcs)
if cyfunc and not args and not kwargs:
return getattr(self, cyfunc)()
if self.grouper.nkeys > 1:
return self._python_agg_general(func_or_funcs, *args, **kwargs)
try:
return self._python_agg_general(func_or_funcs, *args, **kwargs)
except Exception:
result = self._aggregate_named(func_or_funcs, *args, **kwargs)
index = Index(sorted(result), name=self.grouper.names[0])
ret = Series(result, index=index)
if not self.as_index: # pragma: no cover
print('Warning, ignoring as_index=True')
return ret
def _aggregate_multiple_funcs(self, arg):
if isinstance(arg, dict):
columns = list(arg.keys())
arg = list(arg.items())
elif any(isinstance(x, (tuple, list)) for x in arg):
arg = [(x, x) if not isinstance(x, (tuple, list)) else x
for x in arg]
# indicated column order
columns = lzip(*arg)[0]
else:
# list of functions / function names
columns = []
for f in arg:
if isinstance(f, compat.string_types):
columns.append(f)
else:
# protect against callables without names
columns.append(com._get_callable_name(f))
arg = lzip(columns, arg)
results = {}
for name, func in arg:
if name in results:
raise SpecificationError('Function names must be unique, '
'found multiple named %s' % name)
results[name] = self.aggregate(func)
return DataFrame(results, columns=columns)
def _wrap_aggregated_output(self, output, names=None):
# sort of a kludge
output = output[self.name]
index = self.grouper.result_index
if names is not None:
return DataFrame(output, index=index, columns=names)
else:
name = self.name
if name is None:
name = self._selected_obj.name
return Series(output, index=index, name=name)
def _wrap_applied_output(self, keys, values, not_indexed_same=False):
if len(keys) == 0:
# GH #6265
return Series([], name=self.name)
def _get_index():
if self.grouper.nkeys > 1:
index = MultiIndex.from_tuples(keys, names=self.grouper.names)
else:
index = Index(keys, name=self.grouper.names[0])
return index
if isinstance(values[0], dict):
# GH #823
index = _get_index()
return DataFrame(values, index=index).stack()
if isinstance(values[0], (Series, dict)):
return self._concat_objects(keys, values,
not_indexed_same=not_indexed_same)
elif isinstance(values[0], DataFrame):
# possible that Series -> DataFrame by applied function
return self._concat_objects(keys, values,
not_indexed_same=not_indexed_same)
else:
# GH #6265
return Series(values, index=_get_index(), name=self.name)
def _aggregate_named(self, func, *args, **kwargs):
result = {}
for name, group in self:
group.name = name
output = func(group, *args, **kwargs)
if isinstance(output, (Series, Index, np.ndarray)):
raise Exception('Must produce aggregated value')
result[name] = self._try_cast(output, group)
return result
def transform(self, func, *args, **kwargs):
"""
Call function producing a like-indexed Series on each group and return
a Series with the transformed values
Parameters
----------
func : function
To apply to each group. Should return a Series with the same index
Examples
--------
>>> grouped.transform(lambda x: (x - x.mean()) / x.std())
Returns
-------
transformed : Series
"""
# if string function
if isinstance(func, compat.string_types):
return self._transform_fast(lambda : getattr(self, func)(*args, **kwargs))
# do we have a cython function
cyfunc = _intercept_cython(func)
if cyfunc and not args and not kwargs:
return self._transform_fast(cyfunc)
# reg transform
dtype = self._selected_obj.dtype
result = self._selected_obj.values.copy()
wrapper = lambda x: func(x, *args, **kwargs)
for i, (name, group) in enumerate(self):
object.__setattr__(group, 'name', name)
res = wrapper(group)
if hasattr(res, 'values'):
res = res.values
# may need to astype
try:
common_type = np.common_type(np.array(res), result)
if common_type != result.dtype:
result = result.astype(common_type)
except:
pass
indexer = self._get_index(name)
result[indexer] = res
result = _possibly_downcast_to_dtype(result, dtype)
return self._selected_obj.__class__(result,
index=self._selected_obj.index,
name=self._selected_obj.name)
def _transform_fast(self, func):
"""
fast version of transform, only applicable to builtin/cythonizable functions
"""
if isinstance(func, compat.string_types):
func = getattr(self,func)
values = func().values
counts = self.count().values
values = np.repeat(values, com._ensure_platform_int(counts))
# the values/counts are repeated according to the group index
indices = self.indices
# shortcut of we have an already ordered grouper
if Index(self.grouper.group_info[0]).is_monotonic:
result = Series(values, index=self.obj.index)
else:
index = Index(np.concatenate([ indices[v] for v in self.grouper.result_index ]))
result = Series(values, index=index).sort_index()
result.index = self.obj.index
return result
def filter(self, func, dropna=True, *args, **kwargs):
"""
Return a copy of a Series excluding elements from groups that
do not satisfy the boolean criterion specified by func.
Parameters
----------
func : function
To apply to each group. Should return True or False.
dropna : Drop groups that do not pass the filter. True by default;
if False, groups that evaluate False are filled with NaNs.
Example
-------
>>> grouped.filter(lambda x: x.mean() > 0)
Returns
-------
filtered : Series
"""
if isinstance(func, compat.string_types):
wrapper = lambda x: getattr(x, func)(*args, **kwargs)
else:
wrapper = lambda x: func(x, *args, **kwargs)
# Interpret np.nan as False.
def true_and_notnull(x, *args, **kwargs):
b = wrapper(x, *args, **kwargs)
return b and notnull(b)
try:
indices = [self._get_index(name) if true_and_notnull(group) else []
for name, group in self]
except ValueError:
raise TypeError("the filter must return a boolean result")
except TypeError:
raise TypeError("the filter must return a boolean result")
filtered = self._apply_filter(indices, dropna)
return filtered
def _apply_to_column_groupbys(self, func):
""" return a pass thru """
return func(self)
class NDFrameGroupBy(GroupBy):
def _iterate_slices(self):
if self.axis == 0:
# kludge
if self._selection is None:
slice_axis = self.obj.columns
else:
slice_axis = self._selection_list
slicer = lambda x: self.obj[x]
else:
slice_axis = self.obj.index
slicer = self.obj.xs
for val in slice_axis:
if val in self.exclusions:
continue
yield val, slicer(val)
def _cython_agg_general(self, how, numeric_only=True):
new_items, new_blocks = self._cython_agg_blocks(how, numeric_only=numeric_only)
return self._wrap_agged_blocks(new_items, new_blocks)
def _wrap_agged_blocks(self, items, blocks):
obj = self._obj_with_exclusions
new_axes = list(obj._data.axes)
# more kludge
if self.axis == 0:
new_axes[0], new_axes[1] = new_axes[1], self.grouper.result_index
else:
new_axes[self.axis] = self.grouper.result_index
# Make sure block manager integrity check passes.
assert new_axes[0].equals(items)
new_axes[0] = items
mgr = BlockManager(blocks, new_axes)
new_obj = type(obj)(mgr)
return self._post_process_cython_aggregate(new_obj)
_block_agg_axis = 0
def _cython_agg_blocks(self, how, numeric_only=True):
data, agg_axis = self._get_data_to_aggregate()
new_blocks = []
if numeric_only:
data = data.get_numeric_data(copy=False)
for block in data.blocks:
values = block._try_operate(block.values)
if block.is_numeric:
values = com.ensure_float(values)
result, _ = self.grouper.aggregate(values, how, axis=agg_axis)
# see if we can cast the block back to the original dtype
result = block._try_coerce_and_cast_result(result)
newb = make_block(result, placement=block.mgr_locs)
new_blocks.append(newb)
if len(new_blocks) == 0:
raise DataError('No numeric types to aggregate')
return data.items, new_blocks
def _get_data_to_aggregate(self):
obj = self._obj_with_exclusions
if self.axis == 0:
return obj.swapaxes(0, 1)._data, 1
else:
return obj._data, self.axis
def _post_process_cython_aggregate(self, obj):
# undoing kludge from below
if self.axis == 0:
obj = obj.swapaxes(0, 1)
return obj
@cache_readonly
def _obj_with_exclusions(self):
if self._selection is not None:
return self.obj.reindex(columns=self._selection_list)
if len(self.exclusions) > 0:
return self.obj.drop(self.exclusions, axis=1)
else:
return self.obj
@Appender(_agg_doc)
def aggregate(self, arg, *args, **kwargs):
if isinstance(arg, compat.string_types):
return getattr(self, arg)(*args, **kwargs)
result = OrderedDict()
if isinstance(arg, dict):
if self.axis != 0: # pragma: no cover
raise ValueError('Can only pass dict with axis=0')
obj = self._selected_obj
if any(isinstance(x, (list, tuple, dict)) for x in arg.values()):
new_arg = OrderedDict()
for k, v in compat.iteritems(arg):
if not isinstance(v, (tuple, list, dict)):
new_arg[k] = [v]
else:
new_arg[k] = v
arg = new_arg
keys = []
if self._selection is not None:
subset = obj
if isinstance(subset, DataFrame):
raise NotImplementedError
for fname, agg_how in compat.iteritems(arg):
colg = SeriesGroupBy(subset, selection=self._selection,
grouper=self.grouper)
result[fname] = colg.aggregate(agg_how)
keys.append(fname)
else:
for col, agg_how in compat.iteritems(arg):
colg = SeriesGroupBy(obj[col], selection=col,
grouper=self.grouper)
result[col] = colg.aggregate(agg_how)
keys.append(col)
if isinstance(list(result.values())[0], DataFrame):
from pandas.tools.merge import concat
result = concat([result[k] for k in keys], keys=keys, axis=1)
else:
result = DataFrame(result)
elif isinstance(arg, list):
return self._aggregate_multiple_funcs(arg)
else:
cyfunc = _intercept_cython(arg)
if cyfunc and not args and not kwargs:
return getattr(self, cyfunc)()
if self.grouper.nkeys > 1:
return self._python_agg_general(arg, *args, **kwargs)
else:
# try to treat as if we are passing a list
try:
assert not args and not kwargs
result = self._aggregate_multiple_funcs([arg])
result.columns = Index(result.columns.levels[0],
name=self._selected_obj.columns.name)
except:
result = self._aggregate_generic(arg, *args, **kwargs)
if not self.as_index:
if isinstance(result.index, MultiIndex):
zipped = zip(result.index.levels, result.index.labels,
result.index.names)
for i, (lev, lab, name) in enumerate(zipped):
result.insert(i, name,
com.take_nd(lev.values, lab,
allow_fill=False))
result = result.consolidate()
else:
values = result.index.values
name = self.grouper.groupings[0].name
result.insert(0, name, values)
result.index = np.arange(len(result))
return result.convert_objects()
def _aggregate_multiple_funcs(self, arg):
from pandas.tools.merge import concat
if self.axis != 0:
raise NotImplementedError
obj = self._obj_with_exclusions
results = []
keys = []
for col in obj:
try:
colg = SeriesGroupBy(obj[col], selection=col,
grouper=self.grouper)
results.append(colg.aggregate(arg))
keys.append(col)
except (TypeError, DataError):
pass
except SpecificationError:
raise
result = concat(results, keys=keys, axis=1)
return result
def _aggregate_generic(self, func, *args, **kwargs):
if self.grouper.nkeys != 1:
raise AssertionError('Number of keys must be 1')
axis = self.axis
obj = self._obj_with_exclusions
result = {}
if axis != obj._info_axis_number:
try:
for name, data in self:
# for name in self.indices:
# data = self.get_group(name, obj=obj)
result[name] = self._try_cast(func(data, *args, **kwargs),
data)
except Exception:
return self._aggregate_item_by_item(func, *args, **kwargs)
else:
for name in self.indices:
try:
data = self.get_group(name, obj=obj)
result[name] = self._try_cast(func(data, *args, **kwargs),
data)
except Exception:
wrapper = lambda x: func(x, *args, **kwargs)
result[name] = data.apply(wrapper, axis=axis)
return self._wrap_generic_output(result, obj)
def _wrap_aggregated_output(self, output, names=None):
raise NotImplementedError
def _aggregate_item_by_item(self, func, *args, **kwargs):
# only for axis==0
obj = self._obj_with_exclusions
result = {}
cannot_agg = []
errors=None
for item in obj:
try:
data = obj[item]
colg = SeriesGroupBy(data, selection=item,
grouper=self.grouper)
result[item] = self._try_cast(
colg.aggregate(func, *args, **kwargs), data)
except ValueError:
cannot_agg.append(item)
continue
except TypeError as e:
cannot_agg.append(item)
errors=e
continue
result_columns = obj.columns
if cannot_agg:
result_columns = result_columns.drop(cannot_agg)
# GH6337
if not len(result_columns) and errors is not None:
raise errors
return DataFrame(result, columns=result_columns)
def _decide_output_index(self, output, labels):
if len(output) == len(labels):
output_keys = labels
else:
output_keys = sorted(output)
try:
output_keys.sort()
except Exception: # pragma: no cover
pass
if isinstance(labels, MultiIndex):
output_keys = MultiIndex.from_tuples(output_keys,
names=labels.names)
return output_keys
def _wrap_applied_output(self, keys, values, not_indexed_same=False):
from pandas.core.index import _all_indexes_same
if len(keys) == 0:
# XXX
return DataFrame({})
key_names = self.grouper.names
if isinstance(values[0], DataFrame):
return self._concat_objects(keys, values,
not_indexed_same=not_indexed_same)
elif self.grouper.groupings is not None:
if len(self.grouper.groupings) > 1:
key_index = MultiIndex.from_tuples(keys, names=key_names)
else:
ping = self.grouper.groupings[0]
if len(keys) == ping.ngroups:
key_index = ping.group_index
key_index.name = key_names[0]
key_lookup =
|
Index(keys)
|
pandas.core.index.Index
|
# -*- coding: utf-8 -*-
from __future__ import print_function
import pytest
from pandas.compat import range, lrange
import numpy as np
from pandas import DataFrame, Series, Index, MultiIndex
from pandas.util.testing import (assert_series_equal,
assert_frame_equal,
assertRaisesRegexp)
import pandas.util.testing as tm
from pandas.tests.frame.common import TestData
# Column add, remove, delete.
class TestDataFrameMutateColumns(tm.TestCase, TestData):
def test_assign(self):
df = DataFrame({'A': [1, 2, 3], 'B': [4, 5, 6]})
original = df.copy()
result = df.assign(C=df.B / df.A)
expected = df.copy()
expected['C'] = [4, 2.5, 2]
assert_frame_equal(result, expected)
# lambda syntax
result = df.assign(C=lambda x: x.B / x.A)
assert_frame_equal(result, expected)
# original is unmodified
|
assert_frame_equal(df, original)
|
pandas.util.testing.assert_frame_equal
|
"""Graphs differences in signal measurements of channels over time (scan instances)
"""
import json
from itertools import cycle
import pandas as pd
import plotly
import plotly.graph_objects as go
from db import load
class TrackChannels():
def __init__(self):
super().__init__()
self.default_signal_measurement = "snq"
self.signal_measurements = ["snq", "ss", "seq"]
self.colors = ["#1f77b4", "#ff7f0e", "#2ca02c", "#d62728", "#9467bd", "#8c564b",
"#e377c2", "#7f7f7f", "#bcbd22", "#17becf", "goldenrod", "darkseagreen",
"palevioletred", "slateblue", "teal", "chocolate", "deepskyblue", "lightcoral",
"greenyellow", "dodgerblue", "darksalmon", "khaki", "plum", "lightgreen",
"mediumslateblue", "olive", "darkgray", "fuschia", "ivory"]
self.color_cycle = cycle(self.colors)
self.default_antenna = load("SELECT configured_antenna_instance FROM monitor")["configured_antenna_instance"].to_list()[0]
self.current_antenna = self.default_antenna
antenna_df = load("SELECT * FROM antenna")
self.antenna_map = {instance: {"name": str(instance) + "; " + "Name: " + name + ", Location: " + location + ", Direction: " + str(direction) + " degrees, Comments: "}
for instance, name, location, direction, comment in zip(antenna_df["antenna_instance"], antenna_df["name"], antenna_df["location"],
antenna_df["direction"], antenna_df["comment"])}
# Remove quotations
for i, description in enumerate(self.antenna_map.values()):
if "'" in self.antenna_map[i + 1]["name"]:
self.antenna_map[i + 1]["name"] = self.antenna_map[i + 1]["name"].replace("'", "")
if "\"" in self.antenna_map[i + 1]["name"]:
self.antenna_map[i + 1]["name"] = self.antenna_map[i + 1]["name"].replace("\"", "")
self.fig = None
self.real_channels = None
self.labels = None
self.mdf = None
def _build_df(self):
signals = pd.DataFrame()
for channel in self.real_channels:
df = load("""SELECT signal.scan_instance, snq, ss, seq
FROM signal LEFT JOIN scan ON signal.scan_instance = scan.scan_instance
WHERE channel=? AND antenna_instance=?""", channel, self.current_antenna)
df.columns = ["scan_instance"] + [f"{measurement}{channel}" for measurement in self.signal_measurements]
signals =
|
pd.merge(signals, df, how="inner", on="scan_instance")
|
pandas.merge
|
from abc import ABC, abstractmethod
from FutuAlgo import Logger
import zmq
import zmq.asyncio
import pickle
import asyncio
from sanic import Sanic
from sanic import response
import pandas as pd
import time
import os
from FutuAlgo.FutuHook import d_types
import datetime
import requests
import itertools
import shutil
import collections
import random
class BaseAlgo(ABC):
def __init__(self, name: str, benchmark: str = 'SPX'):
# settings
self.name = name
self.benchmark = benchmark
self.logger = Logger.RootLogger(root_name=self.name)
self._trading_environment = ''
self._trading_universe = None
self._failed_tickers = None
self._datatypes = None
self._txn_cost = None
self._total_txn_cost = 0
self._initial_capital = 0.0
# current status
self._running = False
self._current_cash = 0.0
# Info
self.pending_orders = None
self.completed_orders = None
self.positions = None
self.slippage = None
self.ticker_lot_size = None
self.records = None
# IPs
self._ip = ''
self._mq_ip = ''
self._hook_ip = ''
self._zmq_context = None
self._mq_socket = None
self._topics = None
self._hook_name = None
# Cache
self.cache_path = None
self.cache = None
self._max_cache = 0
self._per_ticker_max_cache = 0
self._drop_cache_ratio = 0
self.cache_pickle_no_map = None
# Web
self._sanic = None
self._sanic_host = None
self._sanic_port = None
self.initialized_date = None
self._initialized = False
def initialize(self, initial_capital: float, mq_ip: str,
hook_ip: str, hook_name: str, trading_environment: str,
trading_universe: list, datatypes: list,
txn_cost: float = 30, max_cache: int = 50000, per_ticker_max_cache: int = 10000,
drop_cache_ratio: float = 0.3, test_mq_con=True, **kwargs):
try:
assert trading_environment in (
'REAL', 'SIMULATE', 'BACKTEST'), f'Invalid trading universe {trading_environment}'
for dtype in datatypes:
assert dtype in d_types, f'Invalid data type {dtype}'
self._trading_environment = trading_environment
self._trading_universe = trading_universe
self._datatypes = datatypes
self._txn_cost = txn_cost
self._total_txn_cost = 0
self.pending_orders = dict()
self.records = pd.DataFrame(columns=['PV', 'EV', 'Cash'])
self.completed_orders = pd.DataFrame(columns=['order_id'])
self.positions = pd.DataFrame(columns=['price', 'quantity', 'market_value'])
self.slippage = pd.DataFrame(columns=['exp_price', 'dealt_price', 'dealt_qty', 'total_slippage'])
self._initial_capital = initial_capital
self._mq_ip = mq_ip
self._hook_ip = hook_ip
self._hook_name = hook_name
self._current_cash = initial_capital
if test_mq_con:
# Test Connection with ZMQ
test_context = zmq.Context()
try:
test_socket = test_context.socket(zmq.PAIR)
test_socket.setsockopt(zmq.LINGER, 0)
test_socket.setsockopt(zmq.SNDTIMEO, 2000)
test_socket.setsockopt(zmq.RCVTIMEO, 2000)
hello_mq_ip = self._mq_ip.split(':')
hello_mq_ip = ':'.join([hello_mq_ip[0], hello_mq_ip[1], str(int(hello_mq_ip[2]) + 1)])
test_socket.connect(hello_mq_ip)
test_socket.send_string('Ping')
msg = test_socket.recv_string()
if msg != 'Pong':
raise Exception(f'Failed to connect to ZMQ, please check : {self._mq_ip}')
self.logger.info(f'Test Connection with ZMQ {self._mq_ip} is Successful!')
self.logger.info(f'Test Connection with ZMQ {hello_mq_ip} is Successful!')
except zmq.error.Again:
raise Exception(f'Failed to connect to ZMQ, please check : {self._mq_ip}')
# except Exception as e:
# raise Exception(f'Failed to connect to ZMQ, please check : {self._mq_ip}, reason: {str(e)}')
finally:
test_context.destroy()
# Test Connection with Hook
try:
requests.get(self._hook_ip + '/subscriptions').json()
self.logger.info(f'Test Connection with Hook IP f{self._hook_ip} is Successful!')
except requests.ConnectionError:
raise Exception(f'Failed to connect to Hook, please check: {self._hook_ip}')
# Subscription data
self.ticker_lot_size = dict()
self._failed_tickers = list()
self._topics = list()
# Cache
self.cache_path = './{}_cache'.format(self.name)
if os.path.exists(self.cache_path):
shutil.rmtree(self.cache_path)
os.mkdir(self.cache_path)
self.cache = dict()
for datatype in d_types:
self.cache[datatype] = pd.DataFrame()
self._max_cache = max_cache
self._per_ticker_max_cache = per_ticker_max_cache
self._drop_cache_ratio = drop_cache_ratio
self.cache_pickle_no_map = collections.defaultdict(lambda: 1)
self.initialized_date = datetime.datetime.today()
self._running = False
self._initialized = True
self.logger.info('Initialized sucessfully.')
except Exception as e:
self._initialized = False
self.logger.error(f'Failed to initialize algo, reason: {str(e)}')
async def daily_record_performance(self):
while True:
self.log()
await asyncio.sleep(60 * 60 * 24 - time.time() % 60 * 60 * 24)
def log(self, overwrite_date=None):
EV = sum(self.positions['market_value'])
PV = EV + self._current_cash
d = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') if overwrite_date is None else overwrite_date
self.records.loc[d] = [PV, EV, self._current_cash]
async def main(self):
self._zmq_context = zmq.asyncio.Context()
self._mq_socket = self._zmq_context.socket(zmq.SUB)
self._mq_socket.connect(self._mq_ip)
self.subscribe_tickers(tickers=self._trading_universe)
# for ticker in self._trading_universe:
# self.positions.loc[ticker] = [0.0, 0.0, 0.0]
self._running = True
self.logger.info(f'Algo {self.name} running successfully!')
while True:
try:
topic, bin_df = await self._mq_socket.recv_multipart()
if not self._running:
await asyncio.sleep(0.5)
continue
topic_split = topic.decode('ascii').split('.')
datatype = topic_split[1]
key = '.'.join(topic_split[2:])
df = pickle.loads(bin_df)
if datatype == 'ORDER_UPDATE':
self.update_positions(df)
await self.on_order_update(order_id=key, df=df)
else:
self.update_prices(datatype=datatype, df=df)
# TODO: improve add_cache places, determine_trigger, trigger_strat
self.add_cache(datatype=datatype, df=df)
trigger_strat, (tgr_dtype, tgr_ticker, tgr_df) = self.determine_trigger(datatype=datatype,
ticker=key, df=df)
if trigger_strat:
await self.trigger_strat(datatype=tgr_dtype, ticker=tgr_ticker, df=tgr_df)
except Exception as e:
self._running = False
self.logger.error(f'Exception occur, Algo stopped, reason: {str(e)}')
raise
def determine_trigger(self, datatype, ticker, df):
# TODO: improve add_cache places, determine_trigger, trigger_strat
return True, (datatype, ticker, df)
async def trigger_strat(self, datatype, ticker, df):
# TODO: improve add_cache places, determine_trigger, trigger_strat
if datatype == 'TICKER':
await self.on_tick(ticker=ticker, df=df)
elif datatype == 'QUOTE':
await self.on_quote(ticker=ticker, df=df)
elif 'K_' in datatype:
await self.on_bar(datatype=datatype, ticker=ticker, df=df)
elif datatype == 'ORDER_BOOK':
await self.on_orderbook(ticker=ticker, df=df)
else:
await self.on_other_data(datatype=datatype, ticker=ticker, df=df)
@abstractmethod
async def on_other_data(self, datatype, ticker, df):
pass
@abstractmethod
async def on_tick(self, ticker, df):
pass
@abstractmethod
async def on_quote(self, ticker, df):
pass
@abstractmethod
async def on_orderbook(self, ticker, df):
pass
@abstractmethod
async def on_bar(self, datatype, ticker, df):
pass
@abstractmethod
async def on_order_update(self, order_id, df):
pass
def run(self, sanic_port, sanic_host='127.0.0.1'):
if not self._initialized:
self.logger.info('Algo not initialized')
else:
loop = asyncio.get_event_loop()
self._sanic = Sanic(self.name)
self._sanic_host = sanic_host
self._sanic_port = sanic_port
self._ip = 'http://' + self._sanic_host + ':' + str(self._sanic_port)
async def _run():
tasks = list()
self.app_add_route(app=self._sanic)
web_server = self._sanic.create_server(return_asyncio_server=True, host=sanic_host, port=sanic_port)
tasks.append(web_server)
tasks.append(self.main())
tasks.append(self.daily_record_performance())
await asyncio.gather(*tasks)
loop.create_task(_run())
loop.run_forever()
# ------------------------------------------------ [ Position ] ------------------------------------------
def update_positions(self, df):
# TODO: changed 2020/07/26
# record order df
trd_side = 1 if df['trd_side'].iloc[0].upper() in ('BUY', 'BUY_BACK') else -1
dealt_qty = df['dealt_qty'].iloc[0] * trd_side
avg_price = df['dealt_avg_price'].iloc[0]
order_id = df['order_id'].iloc[0]
ticker = df['ticker'].iloc[0]
order_status = df['order_status'].iloc[0]
in_pending = False
in_completed = False
if order_id in self.pending_orders.keys():
in_pending = True
last_order_update_df = self.pending_orders[order_id]
last_qty = last_order_update_df['dealt_qty'].iloc[0] * trd_side
last_avg_price = last_order_update_df['dealt_avg_price'].iloc[0]
cash_change = -(dealt_qty * avg_price - last_qty * last_avg_price)
qty_change = dealt_qty - last_qty
else:
if order_id not in self.completed_orders['order_id']:
cash_change = - dealt_qty * avg_price
qty_change = dealt_qty
else:
in_completed = True
cash_change = 0
qty_change = 0
if order_status in ('SUBMIT_FAILED', 'FILLED_ALL', 'CANCELLED_PART', 'CANCELLED_ALL', 'FAILED', 'DELETED'):
if not in_completed:
self.completed_orders = self.completed_orders.append(df)
if order_status in ('FILLED_ALL', 'CANCELLED_PART'):
# update slippage
self.slippage.loc[order_id] = [0, 0, 0, 0]
exp_price = self.positions.loc[ticker]['price'] if df['price'].iloc[0] == 0.0 else df['price'].iloc[
0]
self.slippage.loc[order_id] = [exp_price, avg_price, dealt_qty, (avg_price - exp_price) * dealt_qty]
# Txn cost
self._total_txn_cost += self._txn_cost
cash_change -= self._txn_cost
if in_pending:
del self.pending_orders[order_id]
else:
self.pending_orders[order_id] = df
# update positions and snapshot
latest_price = self.positions.loc[ticker]['price']
existing_qty = self.positions.loc[ticker]['quantity']
new_qty = existing_qty + qty_change
self.positions.loc[ticker] = [latest_price, new_qty, new_qty * latest_price]
self._current_cash += cash_change
def update_prices(self, datatype, df):
if 'K_' in datatype:
ticker = df['ticker'].iloc[0]
qty = self.positions.loc[ticker]['quantity']
latest_price = df['close'].iloc[0]
self.positions.loc[ticker] = [latest_price, qty, qty * latest_price]
elif datatype == 'QUOTE':
ticker = df['ticker'].iloc[0]
qty = self.positions.loc[ticker]['quantity']
latest_price = df['quote'].iloc[0]
self.positions.loc[ticker] = [latest_price, qty, qty * latest_price]
# ------------------------------------------------ [ Data ] ------------------------------------------
def add_cache(self, datatype, df):
# TODO: changed 2020/07/26
self.cache[datatype] = self.cache[datatype].append(df).drop_duplicates(
subset=['datetime', 'ticker'], keep='last')
if self.cache[datatype].shape[0] >= self._max_cache:
drop_rows_n = int(self._max_cache * self._drop_cache_ratio)
# Drop cache to pickle
self.cache[datatype] = self.cache[datatype].sort_values('datetime')
df_to_drop = self.cache[datatype].iloc[:drop_rows_n]
self.cache[datatype] = self.cache[datatype][drop_rows_n:]
for ticker in df_to_drop['ticker'].unique():
ticker_df = df_to_drop.loc[df_to_drop['ticker'] == ticker]
key = f'{datatype}_{ticker}'
while ticker_df.shape[0] != 0:
try:
with open(f'{self.cache_path}/{key}_{self.cache_pickle_no_map[key]}.pickle',
'rb+') as file:
db_df = pickle.load(file)
# unnecesaary min()
# to_store = ticker_df.iloc[
# :min(drop_rows_n, self._per_ticker_max_cache - db_df.shape[0])]
to_store = ticker_df.iloc[:self._per_ticker_max_cache - db_df.shape[0]]
ticker_df = ticker_df.iloc[to_store.shape[0]:]
to_store = db_df.append(to_store)
# to_store = pd.concat([db_df, to_store], axis=0)
file.truncate(0)
file.seek(0)
pickle.dump(to_store, file)
except (FileExistsError, FileNotFoundError):
with open(f'{self.cache_path}/{key}_{self.cache_pickle_no_map[key]}.pickle',
'wb') as file:
# to_store = ticker_df.iloc[:min(self._per_ticker_max_cache, ticker_df.shape[0])]
to_store = ticker_df.iloc[:self._per_ticker_max_cache]
ticker_df = ticker_df.iloc[to_store.shape[0]:]
pickle.dump(to_store, file)
if to_store.shape[0] == self._per_ticker_max_cache:
self.cache_pickle_no_map[key] += 1
with open(f'{self.cache_path}/{key}_{self.cache_pickle_no_map[key]}.pickle',
'wb') as file:
pickle.dump(
|
pd.DataFrame()
|
pandas.DataFrame
|
"""PyQT GUI offering remote monitoring and control of experiment execution."""
import sys
import json
import socket
import queue
import numpy as np
import pandas as pd
import matplotlib as mpl
from matplotlib.artist import Artist
from matplotlib.figure import Figure
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg
from PyQt5.QtCore import Qt, QTimer
from PyQt5.QtWidgets import (QMainWindow, QWidget,
QSlider, QPushButton, QLabel,
QVBoxLayout, QHBoxLayout)
from . import clientserver
from .ext.bunch import Bunch
class RemoteApp(QMainWindow):
def __init__(self, host, trial_app=True):
# Avoid Abort Trap when exception is raised in Python code
# Otherwise even simple problems will be impossible to debug
# The downside is that we have to manually exit out of the GUI
# (and maybe sometimes will be stuck?).
# But at least we can see the exception trace!
def no_abort(a, b, c):
sys.__excepthook__(a, b, c)
sys.excepthook = no_abort
QMainWindow.__init__(self, None)
self.setWindowTitle("Visigoth Remote")
self.screen_q = queue.Queue()
self.param_q = queue.Queue()
self.trial_q = queue.Queue()
self.cmd_q = queue.Queue()
self.poll_dur = 20
self.host = host
self.client = None
# Intialize the parameters and eyeopts
# This is just one example of how this division is unclear
# but it gets more obvious later
self.p = Bunch()
self.eyeopt = Bunch(x_offset=0, y_offset=0, fix_window=3)
self.local_eyeopt = Bunch(x_offset=0, y_offset=0, fix_window=3)
self.main_frame = QWidget()
self.gaze_app = GazeApp(self)
if trial_app:
self.trial_app = TrialApp(self)
else:
self.trial_app = None
self.initialize_layout()
self.initialize_timers()
def poll(self):
# Ensure connection to the server
if self.client is None:
self.initialize_client()
# Ensure that we can animate the gaze
if self.gaze_app.axes_background is None:
self.gaze_app.initialize_animation()
# Get the most recent gaze position
# Previously we showed a "trail" of gaze positions rather
# than just one, which looked pretty and is more informative.
# It is a bit tricker so I am skipping for the moment to get things
# running, but worth revisiting.
screen_data = None
while True:
try:
screen_data = json.loads(self.screen_q.get(block=False))
except queue.Empty:
break
if screen_data is not None:
self.gaze_app.update_screen(screen_data)
if self.trial_app is not None:
try:
trial_data = self.trial_q.get(block=False)
self.trial_app.update_figure(trial_data)
except queue.Empty:
pass
# Update the GazeApp GUI elementes
self.gaze_app.update_gui()
def initialize_client(self):
try:
# Boot up the client thread
self.client = clientserver.SocketClientThread(self)
self.client.start()
# Ask the server for the params it is currently using
self.cmd_q.put(self.client.PARAM_REQUEST)
params = json.loads(self.param_q.get())
self.p.update(params)
# Update our understanding of the fix window size
self.eyeopt["fix_window"] = self.p.fix_window
self.local_eyeopt["fix_window"] = self.p.fix_window
self.gaze_app.sliders["fix_window"].value = self.p.fix_window
# Initialize the stimulus artists in the gaze window
# This had to be deferred util we knew the active params
self.gaze_app.initialize_stim_artists()
except socket.error:
pass
def initialize_layout(self):
main_hbox = QHBoxLayout()
main_hbox.addLayout(self.gaze_app.layout)
if self.trial_app is not None:
main_hbox.addLayout(self.trial_app.layout)
self.main_frame.setLayout(main_hbox)
self.setCentralWidget(self.main_frame)
def initialize_timers(self):
self.timer = QTimer(self)
self.timer.timeout.connect(self.poll)
self.timer.start(self.poll_dur)
class GazeApp(object):
"""Component of the Remote GUI that monitors/controls eyetracking."""
def __init__(self, remote_app):
self.remote_app = remote_app
self.p = remote_app.p
self.eyeopt = remote_app.eyeopt
self.local_eyeopt = remote_app.local_eyeopt
fig, ax = self.initialize_figure()
self.fig = fig
self.ax = ax
self.screen_canvas = FigureCanvasQTAgg(fig)
self.screen_canvas.setParent(remote_app.main_frame)
update_button = QPushButton("Update")
update_button.clicked.connect(self.update_eyeopt)
reset_button = QPushButton("Reset")
reset_button.clicked.connect(self.reset_eyeopt)
self.buttons = Bunch(
update=update_button,
reset=reset_button
)
self.sliders = Bunch(
x_offset=ParamSlider(self, "x offset", (-4, 4)),
y_offset=ParamSlider(self, "y offset", (-4, 4)),
fix_window=ParamSlider(self, "fix window", (0, 6))
)
self.initialize_layout()
# ---- Initialization methods
def initialize_figure(self):
"""Set up the basic aspects of the matplotlib screen figure."""
fig = Figure((5, 5), dpi=100, facecolor="white")
ax = fig.add_subplot(111)
ax.set(xlim=(-10, 10),
ylim=(-10, 10),
aspect="equal")
major_ticks = [-10, -5, 0, 5, 10]
ax.set_xticks(major_ticks)
ax.set_yticks(major_ticks)
minor_ticks = np.linspace(-10, 10, 21)
ax.set_xticks(minor_ticks, minor=True)
ax.set_yticks(minor_ticks, minor=True)
grid_kws = dict(which="minor", lw=.5, ls="-", c=".8")
ax.xaxis.grid(True, **grid_kws)
ax.yaxis.grid(True, **grid_kws)
self.axes_background = None
return fig, ax
def initialize_stim_artists(self):
"""Set up the artists that represent stimuli and gaze location."""
gaze = mpl.patches.Circle((0, 0),
radius=.3,
facecolor="#4c72b0",
linewidth=0,
animated=True)
fix = Bunch(
point=mpl.patches.Circle((0, 0),
radius=.15,
facecolor=".1",
linewidth=0,
animated=True),
window=mpl.patches.Circle((0, 0),
radius=self.eyeopt.fix_window,
facecolor="none",
linestyle="dashed",
edgecolor=".3",
animated=True)
)
targets = []
if "target_pos" in self.p:
for pos in self.p.target_pos:
point = mpl.patches.Circle(pos,
.3,
facecolor=".1",
linewidth=0,
animated=True)
window = mpl.patches.Circle(pos,
self.p.target_window,
facecolor="none",
linestyle="dashed",
edgecolor=".3",
animated=True)
targets.extend([point, window])
self.plot_objects = Bunch(fix=fix, gaze=gaze, targets=targets)
self.plot_objects.update(self.create_stim_artists())
for _, stim in self.plot_objects.items():
self.add_artist(self.ax, stim)
def initialize_layout(self):
"""Set up the basic layout of the PyQT GUI."""
controls = QHBoxLayout()
for key in ["x_offset", "y_offset", "fix_window"]:
s = self.sliders[key]
vbox = QVBoxLayout()
vbox.addWidget(s.label)
vbox.addWidget(s.slider)
vbox.setAlignment(s.slider, Qt.AlignVCenter)
controls.addLayout(vbox)
vbox = QVBoxLayout()
vbox.addWidget(self.buttons["update"])
vbox.addWidget(self.buttons["reset"])
controls.addLayout(vbox)
vbox = QVBoxLayout()
vbox.addWidget(self.screen_canvas)
vbox.addLayout(controls)
self.layout = vbox
def initialize_animation(self):
# TODO this happens once; we may want to check for a resize
# on every draw and recapture the background then, otherwise
# things will look screwy if the app is reized after the run starts
self.fig.canvas.draw()
ax_bg = self.fig.canvas.copy_from_bbox(self.ax.bbox)
self.axes_background = ax_bg
# ----- Study-specific functions
def create_stim_artists(self):
"""Define additional matplotlib artists to represent stimuli.
Returns
-------
stims : dict
The keys in this dictionary should correspond to the server-side
stimulus names (i.e. what you define in `create_stimuli`.
The values should be either a single matplotlib artist, a list
of artists, or a dict mapping arbitrary artist subcomponent
names to artists.
"""
return dict()
# ----- Live GUI methods
def add_artist(self, ax, obj):
"""Add either each artist in an iterable or a single artist."""
if isinstance(obj, list):
for artist in obj:
if isinstance(artist, Artist):
ax.add_artist(artist)
elif isinstance(obj, dict):
for _, artist in obj.items():
if isinstance(artist, Artist):
ax.add_artist(artist)
else:
if isinstance(obj, Artist):
ax.add_artist(obj)
def draw_artist(self, ax, obj):
"""Draw either each artist in an iterable or a single artist."""
if isinstance(obj, list):
for artist in obj:
if isinstance(artist, Artist):
ax.draw_artist(artist)
elif isinstance(obj, dict):
for _, artist in obj.items():
if isinstance(artist, Artist):
ax.draw_artist(artist)
else:
if isinstance(obj, Artist):
ax.draw_artist(obj)
def update_screen(self, screen_data):
"""Re-draw the figure to show current gaze and what's on the screen."""
# Update gaze position
gaze = np.array(screen_data["gaze"])
offsets = np.array([self.local_eyeopt["x_offset"],
self.local_eyeopt["y_offset"]])
gaze += offsets
self.plot_objects.gaze.center = gaze
# Update fix window size
self.plot_objects.fix.window.radius = self.local_eyeopt["fix_window"]
# Draw stimuli on the screen
self.fig.canvas.restore_region(self.axes_background)
self.ax.draw_artist(self.plot_objects["gaze"])
for stim, pos in screen_data["stims"].items():
if stim in self.plot_objects:
# TODO This lets us move stimulus objects around in the gaze
# app, but it's limited to Psychopy objects with a `pos`
# attribute and matplotlib objects with a `center` attribute.
# It would be nice if this were more flexible, but it's not
# trivial to link arbitrary psychopy attributes to arbitrary
# matplotlib attributes. Maybe this mapping could be defined
# somehow on our versions of the Psychopy objects?
# Punting on this for now -- it seems to work ok and the
# GazeApp display is intended to be pretty minimal anyway.
if pos is not None:
self.plot_objects[stim].center = pos
self.draw_artist(self.ax, self.plot_objects[stim])
self.screen_canvas.blit(self.ax.bbox)
def update_gui(self):
"""Sync the GUI elements with the current values."""
for name, slider in self.sliders.items():
if self.local_eyeopt[name] != self.eyeopt[name]:
slider.label.setStyleSheet("color: red")
else:
slider.label.setStyleSheet("color: black")
def update_eyeopt(self):
"""Method to trigger a parameter upload; triggered by a button."""
self.remote_app.param_q.put(json.dumps(self.local_eyeopt))
self.eyeopt.update(self.local_eyeopt)
def reset_eyeopt(self):
"""Method to reset sliders to original value without uploading."""
for name, obj in self.sliders.items():
obj.value = self.eyeopt[name]
self.local_eyeopt.update(self.eyeopt)
class TrialApp(object):
"""Component of the Remote GUI that shows data from each trial."""
def __init__(self, remote_app):
self.remote_app = remote_app
fig, axes = self.initialize_figure()
fig_canvas = FigureCanvasQTAgg(fig)
fig_canvas.setParent(remote_app.main_frame)
self.fig = fig
self.axes = axes
self.fig_canvas = fig_canvas
self.trial_data = []
vbox = QVBoxLayout()
vbox.addWidget(fig_canvas)
self.layout = vbox
# ---- Study-specific methods
# Both of these methods can be overloaded by defining a remote.py
# module in your study directory. The default is to show a simple
# summary of when the subject responded, their accuracy, and their RT.
# However, note that the remote.py file should define
# `initialize_trial_figure` and `update_trial_figure`, not the names here.
def initialize_figure(self):
"""Set up the figure and axes for trial data.
This method can be overloaded in a study-specific remote.py file
if you want a more complicated figure than this basic example.
"""
# Note that we do not use the matplotlib.pyplot function, but
# rather create the Figure object directly.
fig = Figure((5, 5), dpi=100, facecolor="white")
axes = [fig.add_subplot(3, 1, i) for i in range(1, 4)]
axes[0].set(ylim=(-.1, 1.1),
yticks=[0, 1],
yticklabels=["No", "Yes"],
ylabel="Responded")
axes[1].set(ylim=(-.1, 1.1),
yticks=[0, 1],
yticklabels=["No", "Yes"],
ylabel="Correct")
axes[2].set(ylim=(0, None),
xlabel="RT (s)")
fig.subplots_adjust(.15, .125, .95, .95)
return fig, axes
def update_figure(self, trial_data):
"""Change the trial data figure with data from a new trial.
This method can be overloaded in a study-specific remote.py file
if you want a more complicated figure than this basic example.
Parameters
----------
trial_data : serialized object
The data has whatever format is defined in the server-side
`Experiment.serialize_trial_info` method. By default this is
a pandas.Series in json, but it can be made study specific
if you need a more complex representation of each trial's data.
"""
# Note that we need to handle deserialization here
# This allows support for study-specific formats of trial_data.
# The easiest thing to do is to have it be a Pandas Series.
trial_data =
|
pd.read_json(trial_data, typ="series")
|
pandas.read_json
|
import os
import pandas
import numpy as np
import nibabel as ni
import itertools
from glob import glob
import statsmodels.distributions.empirical_distribution as ed
import statsmodels.formula.api as smf
import matplotlib.pyplot as plt
import seaborn as sns
import plotly.express as px
from scipy import stats
from scipy.io import savemat,loadmat
from nilearn import input_data, image
from matplotlib import mlab
from sklearn.utils import resample
from sklearn.mixture import GaussianMixture
from sklearn.preprocessing import MinMaxScaler
from statsmodels.sandbox.stats.multicomp import multipletests
#import matlab.engine
import sys
#eng = matlab.engine.start_matlab()
#eng.addpath('../',nargout=0)
def Extract_Values_from_Atlas(files_in, atlas,
mask = None, mask_threshold = 0,
blocking = 'one_at_a_time',
labels = [], sids = [],
output = None,):
'''
This function will extract mean values from a set of images for
each ROI from a given atlas. Returns a Subject x ROI pandas
DataFrame (and csv file if output argument is set to a path).
Use blocking argument according to memory capacity of your
computer vis-a-vis memory requirements of loading all images.
files_in: determines which images to extract values from. Input
can be any of the following:
-- a list of paths
-- a path to a directory containing ONLY files to extract from
-- a search string (with wild card) that would return all
desired images. For example, doing ls [files_in] in a terminal
would list all desired subjects
-- a 4D Nifti image
**NOTE** be aware of the order of file input, which relates to
other arguments
atlas: Path to an atlas, or a Nifti image or np.ndarry of desired
atlas. Or, if doing native space analysis, instead, supply a list
of paths to atlases that match each subject.
NOTE: In this case, The order of this list should be the same
order as subjects in files_in
mask: Path to a binary inclusive mask image. Script will set all
values to 0 for every image where mask voxels = 0. This process
is done before extraction. If doing a native space analysis,
instead, supply a list of paths to masks that match each subject
and each atlas.
mask_threshold: An integer that denotes the minimum acceptable
size (in voxels) of an ROI after masking. This is to prevent
tiny ROIs resulting from conservative masks that might have
spuriously high or low mean values due to the low amount of
information within.
blocking: loading all images to memory at once may not be possible
depending on your computer. Acceptable arguments are:
-- 'one_at_a_time': will extract values from each image
independently. Recommended for memories with poor memory
capacity. Required for native space extraction.
-- 'all_at_once': loads all images into memory at once.
Provides a slight speed up for faster machines overe
one_at_a_time, but is probably not faster than batching (see
below). Only recommended for smaller datasets.
** WARNING ** Not recommended on very large datasets. Will
crash computers with poor memory capacity.
-- any integer: determines the number of images to be read to
memory at once. Recommended for large datasets.
labels: a list of string labels that represent the names of the
ROIs from atlas.
NOTE: ROIs are read consecutively from lowest to highest, and
labels *must* match that order
Default argument [] will use "ROI_x" for each ROI, where X
corresponds to the actual ROI integer lael
sids: a list of subject IDs in the same order as files_in. Default
argument [] will list subjects with consecutive integers.
output: if you wish the resulting ROI values to be written to file,
provide a FULL path. Otherwise, leave as None (matrix will be
returned)
'''
if type(blocking) == str and blocking not in ['all_at_once','one_at_a_time']:
raise IOError('blocking only accepts integers or argumennts of "all_at_once" or "one_at_a_time"')
if type(atlas) == list:
if blocking != 'one_at_a_time':
print('WARNING: you have passed a list of atlases but blocking is not set to one_at_a_time')
print('Lists of atlases are for native space situations where each subject has their own atlas')
print('If you want to test multiple atlases, run the script multiple times with different atlases')
raise IOError('you have passed a list of atlases but blocking is not set to one_at_a_time')
if type(mask) != type(None):
if type(atlas) != type(mask):
raise IOError('for masking, list of masks must be passed that equals length of atlas list')
elif type(mask) == list:
if len(atlas) != len(mask):
raise IOError('list of atlases (n=%s) and masks (n=%s) are unequal'%(len(atlases),
len(masks)))
if type(atlas) != list:
if type(atlas) == str:
try:
atl = ni.load(atlas).get_data()
except:
raise IOError('could not find an atlas at the specified location: %s'%atlas)
elif type(atlas) == ni.nifti1.Nifti1Image:
atl = atlas.get_data()
elif type(atlas) == np.ndarray:
atl = atlas
else:
print('could not recognize atlas filetype. Please provide a path, a NiftiImage object, or an numpy ndarray')
raise IOError('atlas type not recognized')
if blocking == 'all_at_once':
i4d = load_data(files_in, return_images=True).get_data()
if i4d.shape[:-1] != atl.shape:
raise IOError('image dimensions do not match atlas dimensions')
if type(mask) != type(None):
print('masking...')
mask_data = ni.load(mask).get_data()
mask_data = mask_atlas(mask_data, atl, mask_threshold)
i4d = mask_image_data(i4d, mask_data)
if len(sids) == 0:
sids = range(i4d.shape[-1])
print('extracting values from atlas')
roi_vals = generate_matrix_from_atlas(i4d, atl, labels, sids)
else:
image_paths = load_data(files_in, return_images = False)
if blocking == 'one_at_a_time':
catch = []
for i,image_path in enumerate(image_paths):
if len(sids) > 0:
sid = [sids[i]]
else:
sid = [i]
print('working on subject %s'%sid[0])
img = ni.load(image_path).get_data()
try:
assert img.shape == atl.shape, 'fail'
except:
print('dimensions for subject %s (%s) image did not match atlas dimensions (%s)'%(sid,
img.shape,
atl.shape))
print('skipping subject %s'%sid[0])
continue
if type(mask) != type(None):
mask_data = ni.load(mask).get_data()
mask_data = mask_atlas(mask_data, atl, mask_threshold)
img = mask_image_data(img, mask_data)
f_mat = generate_matrix_from_atlas(img, atl, labels, sid)
catch.append(f_mat)
roi_vals = pandas.concat(catch)
elif type(blocking) == int:
block_size = blocking
if len(image_paths)%block_size == 0:
blocks = int(len(image_paths)/block_size)
remainder = False
else:
blocks = int((len(image_paths)/blocking) + 1)
remainder = True
catch = []
count = 0
if type(mask) != type(None):
mask_data = ni.load(mask).get_data()
mask_data = mask_atlas(mask_data, atl, mask_threshold)
for block in range(blocks):
if block == (blocks - 1) and remainder:
print('working on final batch of subjects')
sub_block = image_paths[count:]
else:
print('working on batch %s of %s subjects'%((block+1),block_size))
sub_block = image_paths[count:(count+block_size)]
i4d = load_data(sub_block, return_images = True).get_data()
if i4d.shape[:-1] != atl.shape:
raise IOError('image dimensions (%s) do not match atlas dimensions (%)'%(atl.shape,
i4d.shape[:-1]
))
if type(mask) != type(None):
if len(mask_data.shape) == 4:
tmp_mask = mask_data[:,:,:,:block_size]
else:
tmp_mask = mask_data
i4d = mask_image_data(i4d, tmp_mask)
if block == (blocks - 1) and remainder:
if len(sids) == 0:
sids_in = range(count,i4d.shape[-1])
else:
sids_in = sids[count:]
else:
if len(sids) == 0:
sids_in = range(count,(count+block_size))
else:
sids_in = sids[count:(count+block_size)]
f_mat = generate_matrix_from_atlas(i4d, atl, labels, sids_in)
catch.append(f_mat)
count += block_size
roi_vals =
|
pandas.concat(catch)
|
pandas.concat
|
"""Collect and expose datasets for experiments."""
from torch.utils.data import Dataset, DataLoader
import pytorch_lightning as pl
import torch
import pandas as pd
from operator import itemgetter
import logging
import os
logging.basicConfig(
format="%(levelname)s:%(asctime)s:%(module)s:%(message)s", level=logging.INFO
)
logger = logging.getLogger(__name__)
MADLIBS_DATASETS = ["madlibs77k", "madlibs89k"]
TOX_DATASETS = ["tox_nonfuzz", "tox_fuzz"]
MISO_DATASETS = ["miso", "miso-ita-raw", "miso-ita-synt"]
MISOSYNT_DATASETS = ["miso_synt_test"]
MLMA_DATASETS = ["mlma"]
MLMA_RAW_DATASETS = ["mlma_en", "mlma_fr", "mlma_ar"]
AVAIL_DATASETS = (
MADLIBS_DATASETS
+ TOX_DATASETS
+ MISO_DATASETS
+ MISOSYNT_DATASETS
+ MLMA_DATASETS
)
def get_dataset_by_name(name: str, base_dir=None):
path = os.path.join(base_dir, name) if base_dir else name
train, dev, test = None, None, None
if name in MADLIBS_DATASETS:
test = Madlibs.build_dataset(path)
elif name in TOX_DATASETS:
test = Toxicity.build_dataset(path)
elif name in MISO_DATASETS:
if name == "miso-ita-synt":
test = MisoDataset.build_dataset(name, "test")
else:
train = MisoDataset.build_dataset(name, "train")
dev = MisoDataset.build_dataset(name, "dev")
test = MisoDataset.build_dataset(name, "test")
elif name in MISOSYNT_DATASETS:
test = MisoSyntDataset.build_dataset(name)
elif name in MLMA_RAW_DATASETS:
test = MLMARawDataset.build_dataset(name)
elif name in MLMA_DATASETS:
train = MLMADataset.build_dataset(split="train")
dev = MLMADataset.build_dataset(split="dev")
test = MLMADataset.build_dataset(split="test")
else:
raise ValueError(f"Can't recognize dataset name {name}")
return train, dev, test
def get_tokenized_path(path: str):
base_dir, filename = os.path.dirname(path), os.path.basename(path)
return os.path.join(base_dir, f"{os.path.splitext(filename)[0]}.pt")
class MLMARawDataset(Dataset):
# DEPRECATED
"""Multilingual and Multi-Aspect Hate Speech Analysis"""
def __init__(self, path: str):
self.path = path
data = pd.read_csv(path)
# define the hate binary label
data["hate"] = 1
data.loc[data.sentiment == "normal", "hate"] = 0
data = data.loc[
data.sentiment.apply(lambda x: "normal" not in x or x == "normal")
]
self.data = data
self.texts = data["tweet"].tolist()
self.labels = data["hate"].astype(int).tolist()
self.tokenized_path = get_tokenized_path(path)
def __getitem__(self, idx):
return {"text": self.texts[idx], "label": self.labels[idx]}
def __len__(self):
return len(self.labels)
def get_texts(self):
return self.texts
def get_labels(self):
return self.labels
@classmethod
def build_dataset(cls, name: str):
if name == "mlma_en":
return cls(os.path.join("data", "hate_speech_mlma", f"en_dataset.csv"))
elif name == "mlma_fr":
return cls(os.path.join("data", "hate_speech_mlma", f"fr_dataset.csv"))
elif name == "mlma_ar":
return cls(os.path.join("data", "hate_speech_mlma", f"ar_dataset.csv"))
else:
raise ValueError("Name not recognized.")
class MLMADataset(Dataset):
def __init__(self, path: str):
self.path = path
data = pd.read_csv(path, sep="\t")
self.texts = data["tweet"].tolist()
self.labels = data["hate"].astype(int).tolist()
self.tokenized_path = get_tokenized_path(path)
def __getitem__(self, idx):
return {"text": self.texts[idx], "label": self.labels[idx]}
def __len__(self):
return len(self.labels)
def get_texts(self):
return self.texts
def get_labels(self):
return self.labels
@classmethod
def build_dataset(cls, split: str):
return cls(f"./data/mlma_{split}.tsv")
class MisoDataset(Dataset):
def __init__(self, path: str):
self.path = path
data = pd.read_csv(path, sep="\t")
self.texts = data["text"].tolist()
self.labels = data["misogynous"].astype(int).tolist()
self.tokenized_path = get_tokenized_path(path)
def __getitem__(self, idx):
return {"text": self.texts[idx], "label": self.labels[idx]}
def __len__(self):
return len(self.labels)
def get_texts(self):
return self.texts
def get_labels(self):
return self.labels
@classmethod
def build_dataset(cls, name: str, split: str):
if name == "miso":
return cls(f"./data/miso_{split}.tsv")
elif name == "miso-ita-raw":
return cls(f"./data/AMI2020_{split}_raw.tsv")
elif name == "miso-ita-synt":
return cls(f"./data/AMI2020_{split}_synt.tsv")
else:
raise ValueError("Type not recognized.")
class MisoSyntDataset(Dataset):
def __init__(self, path: str):
self.path = path
data = pd.read_csv(path, sep="\t", header=None, names=["Text", "Label"])
self.texts = data["Text"].tolist()
self.labels = data["Label"].astype(int).tolist()
self.tokenized_path = get_tokenized_path(path)
def __getitem__(self, idx):
return {"text": self.texts[idx], "label": self.labels[idx]}
def __len__(self):
return len(self.labels)
def get_texts(self):
return self.texts
def get_labels(self):
return self.labels
@classmethod
def build_dataset(cls, type: str):
if type not in MISOSYNT_DATASETS:
raise ValueError("Type not recognized.")
else:
return cls(f"./data/miso_synt_test.tsv")
class Madlibs(Dataset):
def __init__(self, path: str):
self.path = path
data =
|
pd.read_csv(path)
|
pandas.read_csv
|
##############################
## COVID_county.py ##
## <NAME> ##
## Version 2021.07.30 ##
##############################
import os
import sys
import warnings
import datetime as dtt
import numpy as np
import scipy as sp
import scipy.signal as signal
import pandas as pd
import COVID_common as ccm
################################################################################
## Classes - County breakdown
class CountySheet(ccm.Template):
def __init__(self, verbose=True):
self.coltag_disease = '確定病名'
self.coltag_report_date = '個案研判日'
self.coltag_county = '縣市'
self.coltag_village = '鄉鎮'
self.coltag_gender = '性別'
self.coltag_imported = '是否為境外移入'
self.coltag_age = '年齡層'
self.coltag_nb_cases = '確定病例數'
name = '%sraw_data/COVID-19_in_Taiwan_raw_data_county_age.csv' % ccm.DATA_PATH
data = ccm.loadCsv(name, verbose=verbose)
self.data = data
self.n_total = data[self.coltag_nb_cases].astype(int).sum()
self.county_key_list = [
'Keelung', 'Taipei', 'New_Taipei', 'Taoyuan', 'Hsinchu', 'Hsinchu_C', 'Miaoli',
'Taichung', 'Changhua', 'Nantou', 'Yunlin',
'Chiayi' ,'Chiayi_C', 'Tainan', 'Kaohsiung', 'Pingtung',
'Yilan', 'Hualien', 'Taitung',
'Penghu', 'Kinmen', 'Matsu',
]
self.age_key_list = [
'0-4', '5-9', '10-14', '15-19', '20-24', '25-29', '30-34', '35-39',
'40-44', '45-49', '50-54', '55-59', '60-64', '65-69', '70+',
]
if verbose:
print('N_total = %d' % self.n_total)
return
def getReportDate(self):
report_date_list = []
for report_date in self.getCol(self.coltag_report_date):
yyyy = report_date[:4]
mm = report_date[5:7]
dd = report_date[8:]
report_date = '%s-%s-%s' % (yyyy, mm, dd)
report_date_list.append(report_date)
return report_date_list
def getCounty(self):
county_list = []
for county in self.getCol(self.coltag_county):
try:
county_list.append(ccm.COUNTY_DICT_2[county])
except KeyError:
print('County, %s' % county)
county_list.append('unknown')
return county_list
def getVillage(self):
return self.getCol(self.coltag_village)
def getGender(self):
gender_list = [1 if gender == '男' else 2 for gender in self.getCol(self.coltag_gender)]
return gender_list
def getImported(self):
imported_list = [1 if imported == '是' else 0 for imported in self.getCol(self.coltag_imported)]
return imported_list
def getAge(self):
age_list = []
for age in self.getCol(self.coltag_age):
if age in ['0', '1', '2', '3', '4']:
age_list.append('0-4')
else:
age_list.append(age)
return age_list
def getNbCases(self):
return self.getCol(self.coltag_nb_cases).astype(int)
def increment_localCasePerCounty(self):
report_date_list = self.getReportDate()
county_list = self.getCounty()
nb_cases_list = self.getNbCases()
## Initialize stock
col_tag_list = ['total'] + self.county_key_list
stock = ccm.initializeStock_dailyCounts(col_tag_list)
ind_max = 0
## Loop over series
for report_date, county, nb_cases in zip(report_date_list, county_list, nb_cases_list):
if 'unknown' == county:
continue
ind = ccm.indexForOverall(report_date)
ind_max = max(ind_max, ind+1)
try:
stock['total'][ind] += nb_cases
stock[county][ind] += nb_cases
except IndexError: ## If NaN
pass
ind_today = ccm.getTodayOrdinal() - ccm.ISODateToOrd(ccm.ISO_DATE_REF)
ind = max(ind_max, ind_today-1) ## Take the max of data date & today
stock = {k: v[:ind] for k, v in stock.items()}
## Moving average
for col_tag in col_tag_list:
key = col_tag + '_avg'
stock[key] = ccm.makeMovingAverage(stock[col_tag])
return stock
def makeReadme_localCasePerCounty(self, page):
key = 'local_case_per_county'
stock = []
stock.append('`%s.csv`' % key)
stock.append('- Row: report date')
stock.append('- Column')
stock.append(' - `date`')
stock.append(' - `total`: nationalwide')
stock.append(' - `Keelung` to `Matsu`: individual city or county')
stock.append(' - `Hsinchu`: Hsinchu county')
stock.append(' - `Hsinchu_C`: Hsinchu city')
stock.append(' - `Chiayi`: Chiayi county')
stock.append(' - `Chiayi_C`: Chiayi city')
stock.append(' - `*_avg`: 7-day moving average of `*`')
ccm.README_DICT[page][key] = stock
return
def saveCsv_localCasePerCounty(self):
stock = self.increment_localCasePerCounty()
stock = pd.DataFrame(stock)
stock = ccm.adjustDateRange(stock)
for page in ccm.PAGE_LIST:
data = ccm.truncateStock(stock, page)
## Save
name = '%sprocessed_data/%s/local_case_per_county.csv' % (ccm.DATA_PATH, page)
ccm.saveCsv(name, data)
self.makeReadme_localCasePerCounty(page)
return
def increment_caseByAge(self):
report_date_list = self.getReportDate()
age_list = self.getAge()
nb_cases_list = self.getNbCases()
## Initialize stock dict
case_hist = {age: 0 for age in self.age_key_list}
stock = [case_hist.copy() for i in range(13)]
stock_dict = ccm.initializeStockDict_general(stock)
## Add 12 empty hist for overall
for i in range(12):
stock_dict[ccm.PAGE_OVERALL].append(case_hist.copy())
## Loop over series
for report_date, age, nb_cases in zip(report_date_list, age_list, nb_cases_list):
index_list = ccm.makeIndexList(report_date)
for ind, page, stock in zip(index_list, stock_dict.keys(), stock_dict.values()):
if ind != ind: ## If NaN
continue
stock[0][age] += nb_cases
if ccm.PAGE_LATEST == page:
lookback_week = (ind - ccm.NB_LOOKBACK_DAYS) // 7 ## ind - ccm.NB_LOOKBACK_DAYS in [-90, -1]; this will be in [-13, -1]
if lookback_week >= -12:
stock[-lookback_week][age] += nb_cases
elif ccm.PAGE_OVERALL == page:
yyyy = int(report_date[:4])
mm = int(report_date[5:7])
yyyymm = mm + 12 * (yyyy - 2020)
stock[yyyymm][age] += nb_cases
else:
mm = int(report_date[5:7])
stock[mm][age] += nb_cases
return stock_dict
def makeReadme_caseByAge(self, page):
key = 'case_by_age'
stock = []
stock.append('`%s.csv`' % key)
stock.append('- Row: age range')
stock.append('- Column')
stock.append(' - `age`')
if page == ccm.PAGE_LATEST:
stock.append(' - `total`: last 90 days')
stock.append(' - `week_-N`: between 7*`N`-7 & 7*`N`-1 days ago')
elif page == ccm.PAGE_OVERALL:
stock.append(' - `total`: overall stats')
stock.append(' - `MMM_YYYY`: during month `MMM` of year `YYYY`')
elif page == ccm.PAGE_2020 or page == ccm.PAGE_2021:
stock.append(' - `total`: all year %s' % page)
stock.append(' - `MMM`: during month `MMM`')
ccm.README_DICT[page][key] = stock
return
def saveCsv_caseByAge(self):
stock_dict = self.increment_caseByAge()
## Loop over page
for page, stock in stock_dict.items():
if ccm.PAGE_LATEST == page:
label_list = ['total'] + ['week_-%d' % (i+1) for i in range(12)]
elif ccm.PAGE_OVERALL == page:
label_list = ['total'] + ['%s_2020' % (ccm.numMonthToAbbr(i+1)) for i in range(12)] + ['%s_2021' % (ccm.numMonthToAbbr(i+1)) for i in range(12)]
else:
label_list = ['total'] + [ccm.numMonthToAbbr(i+1) for i in range(12)]
data = {'age': self.age_key_list}
data.update({label: case_hist.values() for label, case_hist in zip(label_list, stock)})
data = pd.DataFrame(data)
## Save
name = '%sprocessed_data/%s/case_by_age.csv' % (ccm.DATA_PATH, page)
ccm.saveCsv(name, data)
self.makeReadme_caseByAge(page)
return
def increment_incidenceMap(self):
report_date_list = self.getReportDate()
county_list = self.getCounty()
nb_cases_list = self.getNbCases()
## Initialize stock dict
county_key_list = ['total'] + self.county_key_list
case_hist = {county: 0 for county in county_key_list}
stock = [case_hist.copy() for i in range(13)]
stock_dict = ccm.initializeStockDict_general(stock)
## Add 12 empty hist for overall
for i in range(12):
stock_dict[ccm.PAGE_OVERALL].append(case_hist.copy())
## Loop over series
for report_date, county, nb_cases in zip(report_date_list, county_list, nb_cases_list):
if 'unknown' == county:
continue
index_list = ccm.makeIndexList(report_date)
for ind, page, stock in zip(index_list, stock_dict.keys(), stock_dict.values()):
if ind != ind:
continue
stock[0]['total'] += nb_cases
stock[0][county] += nb_cases
if ccm.PAGE_LATEST == page:
lookback_week = (ind - ccm.NB_LOOKBACK_DAYS) // 7 ## ind - ccm.NB_LOOKBACK_DAYS in [-90, -1]; this will be in [-13, -1]
if lookback_week >= -12:
stock[-lookback_week]['total'] += nb_cases
stock[-lookback_week][county] += nb_cases
elif ccm.PAGE_OVERALL == page:
yyyy = int(report_date[:4])
mm = int(report_date[5:7])
yyyymm = mm + 12 * (yyyy - 2020)
stock[yyyymm]['total'] += nb_cases
stock[yyyymm][county] += nb_cases
else:
mm = int(report_date[5:7])
stock[mm]['total'] += nb_cases
stock[mm][county] += nb_cases
return stock_dict
def makeReadme_incidenceMap(self, page):
key = 'incidence_map'
stock = []
stock.append('`%s.csv`' % key)
stock.append('- Row: city or county')
stock.append('- Column')
stock.append(' - `county`')
if page == ccm.PAGE_LATEST:
stock.append(' - `total`: last 90 days')
stock.append(' - `week_-N`: between 7*`N`-7 & 7*`N`-1 days ago')
elif page == ccm.PAGE_OVERALL:
stock.append(' - `total`: overall stats')
stock.append(' - `MMM_YYYY`: during month `MMM` of year `YYYY`')
elif page == ccm.PAGE_2020 or page == ccm.PAGE_2021:
stock.append(' - `total`: all year %s' % page)
stock.append(' - `MMM`: during month `MMM`')
ccm.README_DICT[page][key] = stock
key = 'incidence_map_label'
stock = []
stock.append('`%s.csv`' % key)
stock.append('- Row: city or county')
stock.append('- Column')
stock.append(' - `key`')
stock.append(' - `code`: unique code attributed to city or county by Ministry of Interior')
stock.append(' - `population`')
stock.append(' - `label`: label in English')
stock.append(' - `label_fr`: label in French (contains non-ASCII characters)')
stock.append(' - `label_zh`: label in Mandarin (contains non-ASCII characters)')
ccm.README_DICT[page][key] = stock
return
def saveCsv_incidenceMap(self):
stock_dict = self.increment_incidenceMap()
county_key_list = ['total'] + self.county_key_list
## Loop over page
for page, stock in stock_dict.items():
if ccm.PAGE_LATEST == page:
label_list = ['total'] + ['week_-%d' % (i+1) for i in range(12)]
elif ccm.PAGE_OVERALL == page:
label_list = ['total'] + ['%s_2020' % (ccm.numMonthToAbbr(i+1)) for i in range(12)] + ['%s_2021' % (ccm.numMonthToAbbr(i+1)) for i in range(12)]
else:
label_list = ['total'] + [ccm.numMonthToAbbr(i+1) for i in range(12)]
## Data for population & label
inv_dict = {dict_['tag']: code for code, dict_ in ccm.COUNTY_DICT.items()}
code_list = [inv_dict[county] for county in county_key_list]
population = [ccm.COUNTY_DICT[code]['population'] for code in code_list]
label_list_en = [ccm.COUNTY_DICT[code]['label'][0] for code in code_list]
label_list_fr = [ccm.COUNTY_DICT[code]['label'][1] for code in code_list]
label_list_zh = [ccm.COUNTY_DICT[code]['label'][2] for code in code_list]
data_c = {'county': county_key_list}
data_c.update({label: case_hist.values() for label, case_hist in zip(label_list, stock)})
data_c = pd.DataFrame(data_c)
data_p = {'key': county_key_list, 'code': code_list, 'population': population, 'label': label_list_en, 'label_fr': label_list_fr, 'label_zh': label_list_zh}
data_p = pd.DataFrame(data_p)
## Save
name = '%sprocessed_data/%s/incidence_map.csv' % (ccm.DATA_PATH, page)
ccm.saveCsv(name, data_c)
name = '%sprocessed_data/%s/incidence_map_label.csv' % (ccm.DATA_PATH, page)
ccm.saveCsv(name, data_p)
self.makeReadme_incidenceMap(page)
return
def increment_incidenceEvolutionByCounty(self):
report_date_list = self.getReportDate()
county_list = self.getCounty()
nb_cases_list = self.getNbCases()
## Initialize stock
county_key_list = ['total'] + self.county_key_list
stock = ccm.initializeStock_dailyCounts(county_key_list)
## Loop over series
for report_date, county, nb_cases in zip(report_date_list, county_list, nb_cases_list):
if 'unknown' == county:
continue
ind = ccm.indexForOverall(report_date)
try:
stock[county][ind] += nb_cases
stock['total'][ind] += nb_cases
except IndexError:
pass
return stock
def smooth_incidenceEvolutionByCounty(self):
stock = self.increment_incidenceEvolutionByCounty()
population_dict = {county['tag']: county['population'] * 0.00001 for code, county in ccm.COUNTY_DICT.items()}
nb_lookback_days = 45
## Smooth
for county, nb_cases_arr in stock.items():
if 'date' == county:
stock[county] = nb_cases_arr[-nb_lookback_days:]
continue
nb_cases_arr = ccm.sevenDayMovingAverage(nb_cases_arr)
nb_cases_arr = nb_cases_arr[-nb_lookback_days:]
nb_cases_arr *= 7 / population_dict[county]
stock[county] = np.around(nb_cases_arr, decimals=2)
return stock
def makeReadme_incidenceEvolutionByCounty(self, page):
key = 'incidence_evolution_by_county'
stock = []
stock.append('`%s.csv`' % key)
stock.append('- Row: report date')
stock.append('- Column')
stock.append(' - `date`')
stock.append(' - `total`: nationalwide')
stock.append(' - `Keelung` to `Matsu`: individual city or county')
stock.append(' - `Hsinchu`: Hsinchu county')
stock.append(' - `Hsinchu_C`: Hsinchu city')
stock.append(' - `Chiayi`: Chiayi county')
stock.append(' - `Chiayi_C`: Chiayi city')
ccm.README_DICT[page][key] = stock
key = 'incidence_evolution_by_county_label'
stock = []
stock.append('`%s.csv`' % key)
stock.append('- Row: city or county')
stock.append('- Column')
stock.append(' - `key`')
stock.append(' - `label`: label in English')
stock.append(' - `label_fr`: label in French (contains non-ASCII characters)')
stock.append(' - `label_zh`: label in Mandarin (contains non-ASCII characters)')
ccm.README_DICT[page][key] = stock
return
def saveCsv_incidenceEvolutionByCounty(self):
stock = self.smooth_incidenceEvolutionByCounty()
data_r = pd.DataFrame(stock)
## Data for population & label
county_key_list = ['total'] + self.county_key_list
county_dict = {dict_['tag']: dict_ for dict_ in ccm.COUNTY_DICT.values()}
label_list_en = [county_dict[county]['label'][0] for county in county_key_list]
label_list_fr = [county_dict[county]['label'][1] for county in county_key_list]
label_list_zh = [county_dict[county]['label'][2] for county in county_key_list]
data_l = {'key': county_key_list, 'label': label_list_en, 'label_fr': label_list_fr, 'label_zh': label_list_zh}
data_l = pd.DataFrame(data_l)
page = ccm.PAGE_LATEST
name = '%sprocessed_data/%s/incidence_evolution_by_county.csv' % (ccm.DATA_PATH, page)
ccm.saveCsv(name, data_r)
name = '%sprocessed_data/%s/incidence_evolution_by_county_label.csv' % (ccm.DATA_PATH, page)
ccm.saveCsv(name, data_l)
self.makeReadme_incidenceEvolutionByCounty(page)
return
def increment_incidenceEvolutionByAge(self):
report_date_list = self.getReportDate()
age_list = self.getAge()
nb_cases_list = self.getNbCases()
## Reverse
age_key_list = ['total'] + self.age_key_list[::-1]
## Initialize stock
stock = ccm.initializeStock_dailyCounts(age_key_list)
## Loop over series
for report_date, age, nb_cases in zip(report_date_list, age_list, nb_cases_list):
ind = ccm.indexForOverall(report_date)
try:
stock[age][ind] += nb_cases
stock['total'][ind] += nb_cases
except IndexError:
pass
return stock
def smooth_incidenceEvolutionByAge(self):
stock = self.increment_incidenceEvolutionByAge()
nb_lookback_days = 45
## Get year & adjust
year = dtt.datetime.today().isoformat()[:4]
year = str(int(year) - 1)
population_dict = {age: population * 0.00001 for age, population in ccm.AGE_DICT_2[year].items()}
population_dict['total'] = ccm.COUNTY_DICT['00000']['population'] * 0.00001
## Minor modif
value = 0
for age in ['70-74', '75-79', '80-84', '85-89', '90-94', '95-99', '100+']:
value += population_dict.pop(age)
population_dict['70+'] = value
## Smooth
for age, nb_cases_arr in stock.items():
if 'date' == age:
stock[age] = nb_cases_arr[-nb_lookback_days:]
continue
nb_cases_arr = ccm.sevenDayMovingAverage(nb_cases_arr)
nb_cases_arr = nb_cases_arr[-nb_lookback_days:]
nb_cases_arr *= 7 / population_dict[age]
stock[age] = np.around(nb_cases_arr, decimals=2)
return stock
def makeReadme_incidenceEvolutionByAge(self, page):
key = 'incidence_evolution_by_age'
stock = []
stock.append('`%s.csv`' % key)
stock.append('- Row: report date')
stock.append('- Column')
stock.append(' - `date`')
stock.append(' - `total`: all ages')
stock.append(' - `70+` to `0-4`: age range')
ccm.README_DICT[page][key] = stock
key = 'incidence_evolution_by_age_label'
stock = []
stock.append('`%s.csv`' % key)
stock.append('- Row: age range')
stock.append('- Column')
stock.append(' - `key`')
stock.append(' - `label`: label in English')
stock.append(' - `label_fr`: label in French (contains non-ASCII characters)')
stock.append(' - `label_zh`: label in Mandarin (contains non-ASCII characters)')
ccm.README_DICT[page][key] = stock
return
def saveCsv_incidenceEvolutionByAge(self):
stock = self.smooth_incidenceEvolutionByAge()
data_r =
|
pd.DataFrame(stock)
|
pandas.DataFrame
|
# Copyright 2021 The WAX-ML Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import haiku as hk
import jax
import jax.numpy as jnp
import numpy as onp
import pytest
from wax.compile import jit_init_apply
from wax.modules.ewma import EWMA
from wax.modules.ewmvar import EWMVar
from wax.unroll import unroll
# Another implementation for checking
class EWMVar_v2(hk.Module):
"""Exponentially weighted variance.
To calculate the variance we use the fact that Var(X) = Mean(x^2) - Mean(x)^2 and internally
we use the exponentially weighted mean of x/x^2 to calculate this.
Arguments:
alpha : The closer `alpha` is to 1 the more the statistic will adapt to recent values.
Attributes:
variance : The running exponentially weighted variance.
References
----------
[^1]: [<NAME>., 2009. Incremental calculation of weighted mean and variance. University of Cambridge, 4(11-5), pp.41-42.](https://fanf2.user.srcf.net/hermes/doc/antiforgery/stats.pdf) # noqa
"""
def __init__(self, alpha=0.5, adjust=True, name=None):
super().__init__(name=name)
self.alpha = alpha
self.adjust = adjust
def __call__(self, x):
mean = EWMA(self.alpha, self.adjust, initial_value=jnp.nan, name="mean")(x)
mean_square = EWMA(
self.alpha, self.adjust, initial_value=jnp.nan, name="mean_square"
)(x * x)
var = mean_square - mean ** 2
var = jnp.where(var < 0, 0.0, var)
return var
@pytest.mark.parametrize("dtype", ["float32", "float64"])
def test_init_and_first_step_var_float64(dtype):
from jax.config import config
if dtype == "float64":
config.update("jax_enable_x64", True)
else:
config.update("jax_enable_x64", False)
seq = hk.PRNGSequence(42)
x = jax.random.normal(shape=(3,), key=next(seq), dtype=jnp.float64)
@jit_init_apply
@hk.transform_with_state
def model(x):
return EWMVar(0.1, adjust=True)(x)
params, state = model.init(next(seq), x)
var, state = model.apply(params, state, next(seq), x)
assert var.dtype == jnp.dtype(dtype)
def test_run_var_vs_pandas_not_adjust():
from jax.config import config
config.update("jax_enable_x64", True)
import pandas as pd
seq = hk.PRNGSequence(42)
x = jax.random.normal(shape=(10, 3), key=next(seq), dtype=jnp.float64)
@jit_init_apply
@hk.transform_with_state
def model(x):
return EWMVar(0.1, adjust=False)(x)
var, state = unroll(model, return_final_state=True)(x)
var = pd.DataFrame(var)
@jit_init_apply
@hk.transform_with_state
def model2(x):
return EWMVar_v2(0.1, adjust=False)(x)
var2, state2 = unroll(model2, return_final_state=True)(x)
var2 =
|
pd.DataFrame(var2)
|
pandas.DataFrame
|
from pynwb import NWBFile, NWBHDF5IO, TimeSeries, ProcessingModule
from pynwb.core import MultiContainerInterface, NWBDataInterface
from scipy.stats import mode
from glob import glob
import numpy as np
import pandas as pd
import scipy.signal as signal
import scipy.interpolate as interpolate
import multiprocessing
import itertools
import os
import logging
logger = logging.getLogger(__name__)
class NWBDataset:
"""A class for loading/preprocessing data from NWB files for
the NLB competition
"""
def __init__(self, fpath, prefix='', split_heldout=True, skip_fields=[]):
"""Initializes an NWBDataset, loading data from
the indicated file(s)
Parameters
----------
fpath : str
Either the path to an NWB file or to a directory
containing NWB files
prefix : str, optional
A pattern used to filter the NWB files in directory
by name. By default, prefix='' loads all .nwb files in
the directory. Please refer to documentation for
the `glob` module for more details:
https://docs.python.org/3/library/glob.html
split_heldout : bool, optional
Whether to load heldin units and heldout units
to separate fields or not, by default True
skip_fields : list, optional
List of field names to skip during loading,
which may be useful if memory is an issue.
Field names must match the names automatically
assigned in the loading process. Spiking data
can not be skipped. Field names in the list
that are not found in the dataset are
ignored
"""
fpath = os.path.expanduser(fpath)
self.fpath = fpath
self.prefix = prefix
# Check if file/directory exists
if not os.path.exists(fpath):
raise FileNotFoundError(f"Specified file or directory not found")
# If directory, look for files with matching prefix
if os.path.isdir(fpath):
filenames = sorted(glob(os.path.join(fpath, prefix + "*.nwb")))
else:
filenames = [fpath]
# If no files found
if len(filenames) == 0:
raise FileNotFoundError(f"No matching files with prefix {prefix} found in directory {fpath}")
# If multiple files found
elif len(filenames) > 1:
loaded = [self.load(fname, split_heldout=split_heldout, skip_fields=skip_fields) for fname in filenames]
datas, trial_infos, descriptions, bin_widths = [list(out) for out in zip(*loaded)]
assert np.all(np.array(bin_widths) == bin_widths[0]), "Bin widths of loaded datasets must be the same"
# Shift loaded files to stack them into continuous array
def trial_shift(x, shift_ms, trial_offset):
if x.name.endswith('_time'):
return x + pd.to_timedelta(shift_ms, unit='ms')
elif x.name == 'trial_id':
return x + trial_offset
else:
return x
# Loop through files, shifting continuous data
past_end = datas[0].index[-1].total_seconds() + round(50 * bin_widths[0] / 1000, 4)
descriptions_full = descriptions[0]
tcount = len(trial_infos[0])
for i in range(1, len(datas)):
block_start_ms = np.ceil(past_end * 10) * 100
datas[i] = datas[i].shift(block_start_ms, freq='ms')
trial_infos[i] = trial_infos[i].apply(trial_shift, shift_ms=block_start_ms, trial_offset=tcount)
descriptions_full.update(descriptions[i])
past_end = datas[i].index[-1].total_seconds() + round(50 * bin_widths[i] / 1000, 4)
tcount += len(trial_infos[i])
# Stack data and reindex to continuous
self.data = pd.concat(datas, axis=0, join='outer')
self.trial_info = pd.concat(trial_infos, axis=0, join='outer').reset_index(drop=True)
self.descriptions = descriptions_full
self.bin_width = bin_widths[0]
new_index = pd.to_timedelta((np.arange(round(self.data.index[-1].total_seconds() * 1000 / self.bin_width) + 1) * self.bin_width).round(4), unit='ms')
self.data = self.data.reindex(new_index)
self.data.index.name = 'clock_time'
# If single file found
else:
data, trial_info, descriptions, bin_width = self.load(filenames[0], split_heldout=split_heldout, skip_fields=skip_fields)
self.data = data
self.trial_info = trial_info
self.descriptions = descriptions
self.bin_width = bin_width
def load(self, fpath, split_heldout=True, skip_fields=[]):
"""Loads data from an NWB file into two dataframes,
one for trial info and one for time-varying data
Parameters
----------
fpath : str
Path to the NWB file
split_heldout : bool, optional
Whether to load heldin units and heldout units
to separate fields or not, by default True
skip_fields : list, optional
List of field names to skip during loading,
which may be useful if memory is an issue.
Field names must match the names automatically
assigned in the loading process. Spiking data
can not be skipped. Field names in the list
that are not found in the dataset are
ignored
Returns
-------
tuple
Tuple containing a pd.DataFrame of continuous loaded
data, a pd.DataFrame with trial metadata, a dict
with descriptions of fields in the DataFrames, and
the bin width of the loaded data in ms
"""
logger.info(f"Loading {fpath}")
# Open NWB file
io = NWBHDF5IO(fpath, 'r')
nwbfile = io.read()
# Load trial info and units
trial_info = (
nwbfile.trials.to_dataframe()
.reset_index()
.rename({'id': 'trial_id', 'stop_time': 'end_time'}, axis=1))
units = nwbfile.units.to_dataframe()
# Load descriptions of trial info fields
descriptions = {}
for name, info in zip(nwbfile.trials.colnames, nwbfile.trials.columns):
descriptions[name] = info.description
# Find all timeseries
def make_df(ts):
"""Converts TimeSeries into pandas DataFrame"""
if ts.timestamps is not None:
index = ts.timestamps[()]
else:
index = np.arange(ts.data.shape[0]) / ts.rate + ts.starting_time
columns = ts.comments.split('[')[-1].split(']')[0].split(',') if 'columns=' in ts.comments else None
df = pd.DataFrame(ts.data[()], index=pd.to_timedelta(index, unit='s'), columns=columns)
return df
def find_timeseries(nwbobj):
"""Recursively searches the NWB file for time series data"""
ts_dict = {}
for child in nwbobj.children:
if isinstance(child, TimeSeries):
if child.name in skip_fields:
continue
ts_dict[child.name] = make_df(child)
descriptions[child.name] = child.description
elif isinstance(child, ProcessingModule):
pm_dict = find_timeseries(child)
ts_dict.update(pm_dict)
elif isinstance(child, MultiContainerInterface):
for field in child.children:
if isinstance(field, TimeSeries):
name = child.name + "_" + field.name
if name in skip_fields:
continue
ts_dict[name] = make_df(field)
descriptions[name] = field.description
return ts_dict
# Create a dictionary containing DataFrames for all time series
data_dict = find_timeseries(nwbfile)
# Calculate data index
start_time = 0.0
bin_width = 1 # in ms, this will be the case for all provided datasets
rate = round(1000. / bin_width, 2) # in Hz
# Use obs_intervals, or last trial to determine data end
end_time = round(max(units.obs_intervals.apply(lambda x: x[-1][-1])) * rate) * bin_width
if (end_time < trial_info['end_time'].iloc[-1]):
print("obs_interval ends before trial end") # TO REMOVE
end_time = round(trial_info['end_time'].iloc[-1] * rate) * bin_width
timestamps = (np.arange(start_time, end_time, bin_width) / 1000).round(6)
timestamps_td = pd.to_timedelta(timestamps, unit='s')
# Check that all timeseries match with calculated timestamps
for key, val in list(data_dict.items()):
if not np.all(np.isin(np.round(val.index.total_seconds(), 6), timestamps)):
logger.warning(f"Dropping {key} due to timestamp mismatch.")
data_dict.pop(key)
def make_mask(obs_intervals):
"""Creates boolean mask to indicate when spiking data is not in obs_intervals"""
mask = np.full(timestamps.shape, True)
for start, end in obs_intervals:
start_idx = np.ceil(round((start - timestamps[0]) * rate, 6)).astype(int)
end_idx = np.floor(round((end - timestamps[0]) * rate, 6)).astype(int)
mask[start_idx:end_idx] = False
return mask
# Prepare variables for spike binning
masks = [(~units.heldout).to_numpy(), units.heldout.to_numpy()] if split_heldout else [np.full(len(units), True)]
for mask, name in zip(masks, ['spikes', 'heldout_spikes']):
# Check if there are any units
if not np.any(mask):
continue
# Allocate array to fill with spikes
spike_arr = np.full((len(timestamps), np.sum(mask)), 0.0, dtype='float16')
# Bin spikes using decimal truncation and np.unique - faster than np.histogram with same results
for idx, (_, unit) in enumerate(units[mask].iterrows()):
spike_idx, spike_cnt = np.unique(((unit.spike_times - timestamps[0]) * rate).round(6).astype(int), return_counts=True)
spike_arr[spike_idx, idx] = spike_cnt
# Replace invalid intervals in spike recordings with NaNs
if 'obs_intervals' in units.columns:
neur_mask = make_mask(units[mask].iloc[0].obs_intervals)
if np.any(spike_arr[neur_mask]):
logger.warning("Spikes found outside of observed interval.")
spike_arr[neur_mask] = np.nan
# Create DataFrames with spike arrays
data_dict[name] = pd.DataFrame(spike_arr, index=timestamps_td, columns=units[mask].index).astype('float16', copy=False)
# Create MultiIndex column names
data_list = []
for key, val in data_dict.items():
chan_names = None if type(val.columns) == pd.RangeIndex else val.columns
val.columns = self._make_midx(key, chan_names=chan_names, num_channels=val.shape[1])
data_list.append(val)
# Assign time-varying data to `self.data`
data = pd.concat(data_list, axis=1)
data.index.name = 'clock_time'
data.sort_index(axis=1, inplace=True)
# Convert time fields in trial info to timedelta
# and assign to `self.trial_info`
def to_td(x):
if x.name.endswith('_time'):
return pd.to_timedelta(x, unit='s')
else:
return x
trial_info = trial_info.apply(to_td, axis=0)
io.close()
return data, trial_info, descriptions, bin_width
def make_trial_data(self,
start_field='start_time',
end_field='end_time',
align_field=None,
align_range=(None, None),
margin=0,
ignored_trials=None,
allow_overlap=False,
allow_nans=False):
"""Makes a DataFrame of trialized data based on
an alignment field
Parameters
----------
start_field : str, optional
The field in `trial_info` to use as the beginning of
each trial, by default 'start_time'
end_field : str, optional
The field in `trial_info` to use as the end of each trial,
by default 'end_time'
align_field : str, optional
The field in `trial_info` to use for alignment,
by default None, which does not align trials and
instead takes them in their entirety
align_range : tuple of int, optional
The offsets to add to the alignment field to
calculate the alignment window, by default (None, None)
uses `trial_start` and `trial_end`
margin : int, optional
The number of ms of extra data to include on either end of
each trial, labeled with the `margin` column for easy
removal. Margins are useful for decoding and smoothing
ignored_trials : pd.Series or np.ndarray, optional
A boolean pd.Series or np.ndarray of the same length
as trial_info with True for the trials to ignore, by
default None ignores no trials. This is useful for
rejecting trials outside of the alignment process
allow_overlap : bool, optional
Whether to allow overlap between trials, by default False
truncates each trial at the start of the subsequent trial
allow_nans : bool, optional
Whether to allow NaNs within trials, by default False
drops all timestamps containing NaNs in any column
Returns
-------
pd.DataFrame
A DataFrame containing trialized data. It has the same
fields as the continuous `self.data` DataFrame, but
adds `trial_id`, `trial_time`, and `align_time`. It also
resets the index so `clock_time` is a column rather than
an index. This DataFrame can be pivoted to plot its
various fields across trials, aligned relative to
`align_time`, `trial_time`, or `clock_time`
"""
# Allow rejection of trials by passing a boolean series
trial_info = self.trial_info.copy()
trial_info['next_start'] = trial_info['start_time'].shift(-1)
if ignored_trials is not None:
trial_info = trial_info.loc[~ignored_trials]
if len(trial_info) == 0:
logger.warning("All trials ignored. No trial data made")
return
# Find alignment points
bin_width =
|
pd.to_timedelta(self.bin_width, unit='ms')
|
pandas.to_timedelta
|
import numpy as np
import pandas as pd
from config import conf
import eigen as eig
import region as reg
import hiperbolica as hyp
import matrices_acoplamiento as m_acop
import distorsionador as v_dist
import matriz_gauss as m_gauss
import v_transpuestos as v_trans
__doc__ = """
Este modulo se determina el flujo y la continuidad de flujo.
Es equivalente a la funcion: * f11_V(region,modo,x,y)
"""
def cargar_recursos_flujo(regiones, ejes_relativos):
"""
Funcion encargada de crear un dataframe que funciona como esquema o patron
para luego poder hacer el calculo de los flujos en cada region
"""
filename = 'csv/' + conf.data['env']['path'] + '/artificio_potencial.csv'
recursos_flujo = pd.read_csv(filename)
# Se eliman algunas columnas de potencial que ya no son necesarias
recursos_flujo.drop(['sum. 1er termino', 'term. (x-ca/cb-ca)', 'ca', 'cb'], inplace=True, axis=1)
recursos_flujo.rename(columns={'tipo_f1':'tipo_f1_num','tipo_f2':'tipo_f2_num'}, inplace=True)
# Se agregan nuevas columnas utiles para recursos_flujo
recursos_flujo.insert(0, 'signo', '')
recursos_flujo.insert(4, 'tipo_f1_denom', '')
recursos_flujo.insert(5, 'tipo_f2_denom', '')
# Para poder utilzar la busqueda condicional de pandas se deja el mismo index
recursos_flujo.index = regiones.index
recursos_flujo.loc[regiones['direcc_de_flujo']=='subiendo','signo'] = '+'
recursos_flujo.loc[regiones['direcc_de_flujo']=='bajando','signo'] = '-'
recursos_flujo.loc[regiones['direcc_de_flujo']=='ambos sentidos','signo'] = '+'
# Se construye las funciones del factor normalizador 1
recursos_flujo.loc[recursos_flujo['tipo_f1_num']=='sinh','tipo_f1_num'] = 'cosh1'
recursos_flujo.loc[recursos_flujo['tipo_f1_num']=='cosh','tipo_f1_num'] = 'sinh'
recursos_flujo.loc[recursos_flujo['tipo_f1_num']=='cosh1','tipo_f1_num'] = 'cosh'
recursos_flujo.loc[recursos_flujo['tipo_f1_num']=='cosh','tipo_f1_denom'] = 'sinh'
recursos_flujo.loc[recursos_flujo['tipo_f1_num']=='sinh','tipo_f1_denom'] = 'cosh'
# Se construye las funciones del factor normalizador 2
recursos_flujo.loc[recursos_flujo['tipo_f2_num']=='0','tipo_f2_denom'] = '1' # Previniendo futura indeterminacion
recursos_flujo.loc[recursos_flujo['tipo_f2_num']=='sinh','tipo_f2_num'] = 'cosh1'
recursos_flujo.loc[recursos_flujo['tipo_f2_num']=='cosh','tipo_f2_num'] = 'sinh'
recursos_flujo.loc[recursos_flujo['tipo_f2_num']=='cosh1','tipo_f2_num'] = 'cosh'
recursos_flujo.loc[recursos_flujo['tipo_f2_num']=='cosh','tipo_f2_denom'] = 'sinh'
recursos_flujo.loc[recursos_flujo['tipo_f2_num']=='sinh','tipo_f2_denom'] = 'cosh'
# Se agrega el index que corresponde a los flujos
recursos_flujo.index = [u'\u2202V' + str(i) for i in range(1, len(recursos_flujo) + 1)]
# Se crean nuevas columnas calcular_str para mostrar en graficacion.py
factor_signo = pd.DataFrame(index=recursos_flujo.index)
factor_signo.loc[recursos_flujo['signo']=='+', 'calcular_str'] = ""
factor_signo.loc[recursos_flujo['signo']=='-', 'calcular_str'] = "-"
factor_normalizador_1 =
|
pd.DataFrame(index=recursos_flujo.index)
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
"""
Created on Mon Sep 23 13:05:34 2019
GSEA class
@author: tadahaya
"""
import pandas as pd
import numpy as np
from itertools import chain
import random
import string
from .process.processor import Processor
from .analyzer import Analyzer
from .data.data_control import GSEADataControl
from .calculator._gsea import Calculator
from .plot._plot import PlotGSEA
# concrete class
class GSEA(Analyzer):
def __init__(self):
self.data = GSEADataControl()
self.__process = Processor()
self.__calc = Calculator()
self.__plot = PlotGSEA()
self.__whole = set()
self.__obj = pd.DataFrame()
self.__ref = dict()
self.__method = ""
self.alpha = 0.0
self.res = pd.DataFrame()
### data processing ###
def check_ref(self,keyword:str):
""" check contents of reference data """
if len(self.__ref)==0:
raise ValueError("!! fit() before this process !!")
try:
temp = self.__ref[keyword]
print("{0}: {1}".format(keyword,temp))
except KeyError:
print("!! Wrong keyword !!")
hit = {v for v in self.__ref.keys() if keyword in v}
print("perhaps: {}".format(hit))
def vector2set(self,data,fold:float=3.0,
nmin:int=None,nmax:int=None,**kwargs):
"""
convert dataframe to the outlier set for reference
Parameters
----------
data: dataframe
feature x sample dataframe
fold: float
indicates fold change determining the outliers
nmin,nmax: int
indicate the minimum/maximum number of each set
"""
return self.__process.vec2set(mtx=data,fold=fold,nmin=nmin,nmax=nmax,
two_sided=False,**kwargs)
### data control ###
def fit(self,data:dict,keep_whole:bool=False,nmin=None):
"""
set a reference data instance
Parameters
----------
data: dict
a dictionary of sets like {"XXXX":{"aa","bb"},"YYYY":{"cc","dd","ee"},...}
keep_whole: boolean
whether whole features is conserved when already registered
nmin: int
indicates the number of features necessary for each set
"""
self.data.set_ref(data=data)
self.__ref = self.data.get_ref()
if keep_whole:
if len(self.__whole)==0:
raise ValueError("!! set_whole() or turn off keep_whole !!")
else:
self.data.adjust_ref()
else:
self.__whole = set(chain.from_iterable(self.__ref.values()))
self.data.set_whole(self.__whole)
if nmin is not None:
temp = self.__ref.copy()
for k,v in self.__ref.items():
if len(v) < nmin:
del temp[k]
self.__ref = temp
self.data.set_ref(data=self.__ref)
def set_whole(self,whole:set):
"""
set whole features
Parameters
----------
whole: set
indicates whole features
"""
self.data.set_whole(whole)
self.__whole = self.data.get_whole()
if len(self.data.get_ref())!=0:
self.data.adjust_ref()
def get_ref(self):
""" get reference data instance """
return self.data.get_ref()
def get_obj(self):
""" get an object data instance """
return self.data.get_obj()
def get_whole(self):
return self.__whole
### calculation ###
def calc(self,data,method:str="standard",alpha:float=0.0): # realization
"""
conduct GSEA
Parameters
-------
data: dataframe
feature x sample dataframe
method: str
indicate a method for calculating the enrichment score
"starndard": employed in the original paper Barbie, et al, 2009
"kuiper": Kuiper test statistics, good when up/down genes are mixed, tail sensitive
"gsva": GSVA like statistics, good when unidirection (ex. up only)
alpha: float, (0,1]
indicate weight of center
0 means no weight and is employed well
Returns res
-------
res: df
gene set enrichment score
"""
self.__method = method
self.__alpha = alpha
if method=="standard":
self.__calc.to_standard()
print("Standard method")
elif method=="kuiper":
self.__calc.to_kuiper()
print("Kuiper method")
elif method=="gsva":
self.__calc.to_gsva()
print("GSVA method")
else:
raise ValueError("!! Wrong method: choose 'standard', 'kuiper', or 'gsva' !!")
self.data.set_obj(data)
self.__obj = self.data.get_obj()
temp = self.__obj.copy()
col = list(temp.columns)
res = []
ap = res.append
for v in col:
ap(self.__calc.calc(obj=temp[v],ref=self.__ref,alpha=alpha))
res = pd.concat(res,axis=1,join="inner")
res.columns = col
self.res = res
return res
def normalize_score(self):
"""
normalize enrichment score with maximum value
Note that this method can be applied
only when the standard method was employed for calculation
"""
if self.res.empty:
raise ValueError("!! No result stored: calc() before this method !!")
n = len(self.__whole)
key = list(self.__ref.keys())
val = list(self.__ref.values())
dic = dict(zip(key,[len(v) for v in val]))
idx = list(self.res.index)
t = np.array([dic[v] for v in idx])
if type(self.res)==type(pd.DataFrame()):
kmax = np.c_[1 - t/n]
val = self.res.values/kmax
else:
kmax = 1 - t/n
val = self.res.values.flatten()/kmax
return
|
pd.DataFrame(val,columns=self.res.columns,index=idx)
|
pandas.DataFrame
|
import pandas as pd
import streamlit as st
import numpy as np
from sklearn.feature_extraction.text import TfidfVectorizer, ENGLISH_STOP_WORDS
from sklearn.metrics.pairwise import linear_kernel
from PIL import Image
import matplotlib.pyplot as plt
def cocktail_recommender(cocktail_name, similarity_df, cocktails_df, num_recommendations=10):
'''
This function gets cocktail_name and provides recommendations using similarity values.
inputs:
cocktail_name (str): Name of a cocktail provided by the user.
similarity_df (pandas dataframe): Dataframe that contains similarity values between cocktails
cocktails_df (pandas dataframe): Dataframe that contains cocktails with recipes and ingredients
num_recommendations (int): Number of recommendations
outputs:
recommendations_df (pandas dataframe): Dataframe that contains recommended cocktails with recipes and ingredients
'''
recommendations = similarity_df[cocktail_name].sort_values(ascending=False)[1:num_recommendations]
recommendations.name = 'Similarity'
cocktails_details = cocktails_df[cocktails_df['Cocktail Name'].isin(recommendations.index)].set_index('Cocktail Name')
recommendations_df = pd.concat([cocktails_details,recommendations], axis=1).sort_values(by='Similarity', ascending=False)
return recommendations_df
def etl_function():
'''
This function employs Extract Transform Load (ETL) pipeline.
The function reads data from csv files, merges the datasets, preprocess data,
applies tf-idf vectorizer, calculates cosine similarities between vectors.
Inputs: None
Outputs:
similarity_df (pandas dataframe): Dataframe that contains similarity values between cocktails
cocktails_df (pandas dataframe): Dataframe that contains cocktails with recipes and ingredients
vectorizer (sklearn class): Tf-idf vectorizer class fit to the data
'''
cocktails_df1 = pd.read_csv('./cocktails.csv')
cocktails_df2 = pd.read_csv('./cocktails_db.csv')
cocktails_df = pd.concat([cocktails_df1, cocktails_df2], axis=0)
cocktails_df['Cocktail Name'] = cocktails_df['Cocktail Name'].str.upper()
cocktails_df.drop(columns=['Bartender', 'Location', 'Bar/Company', 'Glassware', 'Notes'], inplace=True)
cocktails_df.drop_duplicates(subset='Cocktail Name', inplace=True)
cocktails_df.fillna('', inplace=True)
cocktails_df['All Ingredients'] = cocktails_df['Ingredients'] + ',' + cocktails_df['Garnish']
additional_stop_words = frozenset(['oz', 'simple', 'dash', 'bsp', 'drops'])
cocktail_stop_words = ENGLISH_STOP_WORDS.union(additional_stop_words)
vectorizer = TfidfVectorizer(stop_words=cocktail_stop_words, token_pattern=r'\b[^\d\W][^\d\W]+\b')
tfidf_matrix = vectorizer.fit_transform(cocktails_df['All Ingredients'])
cocktail_feature_df = pd.DataFrame(tfidf_matrix.toarray() ,columns=vectorizer.get_feature_names(), index=cocktails_df['Cocktail Name'])
similarity_matrix = linear_kernel(tfidf_matrix, tfidf_matrix)
similarity_df = pd.DataFrame(similarity_matrix, columns=cocktail_feature_df.index, index=cocktail_feature_df.index)
return similarity_df, cocktails_df, vectorizer
def print_given_cocktail(user_input, cocktails_df):
'''
This function prints the details of the given cocktail by the user.
inputs:
user_input (str): Name of a cocktail provided by the user.
cocktails_df (pandas dataframe): Dataframe that contains cocktails with recipes and ingredients
Output: None
'''
provided_cocktail = cocktails_df[cocktails_df['Cocktail Name'] == user_input]
name = provided_cocktail['Cocktail Name'].iloc[0]
ingredients = provided_cocktail['Ingredients'].iloc[0]
garnish = provided_cocktail['Garnish'].iloc[0]
preparation = provided_cocktail['Preparation'].iloc[0]
st.markdown("**Given Cocktail is** {}".format(name))
st.markdown("Ingredients: {}".format(ingredients))
st.markdown("Garnish: {}".format(garnish))
st.markdown("Preparation: {}".format(preparation))
def recommend_cocktail_key_in_database(user_input, similarity_df, cocktails_df, number_of_recommendations):
'''
This function is called when user given cocktail name is present in the database.
It uses cocktail_recommender function to give single recommendation. It plots bar chart and prints
recommendations for the web app.
inputs:
user_input (str): Name of a cocktail provided by the user.
similarity_df (pandas dataframe): Dataframe that contains similarity values between cocktails
cocktails_df (pandas dataframe): Dataframe that contains cocktails with recipes and ingredients
number_of_recommendations (int): Number of recommendations
Output: None
'''
recommended = cocktail_recommender(cocktail_name=user_input, similarity_df=similarity_df, cocktails_df=cocktails_df)
chart_data = similarity_df[user_input].sort_values(ascending=False)[1:6]
fig, ax = plt.subplots()
ax.barh(chart_data.index, chart_data.values)
ax.invert_yaxis()
ax.set_title('Similarities to given cocktail')
st.pyplot(fig)
image = Image.open('./great_gatsby.jpg')
st.image(image, use_column_width=True)
st.success('Recommended based on the name of cocktail provided!')
for i in range(0,2):
name = recommended.iloc[i].name
ingredients = recommended.iloc[i].Ingredients
garnish = recommended.iloc[i].Garnish
preparation = recommended.iloc[i].Preparation
st.markdown("**Recommended Cocktail is** {}".format(name))
st.markdown("Ingredients: {}".format(ingredients))
st.markdown("Garnish: {}".format(garnish))
st.markdown("Preparation: {}".format(preparation))
st.text("\n")
st.text("\n")
my_expander = st.beta_expander('Show more recommendations')
with my_expander:
for i in range(2, number_of_recommendations):
name = recommended.iloc[i].name
ingredients = recommended.iloc[i].Ingredients
garnish = recommended.iloc[i].Garnish
preparation = recommended.iloc[i].Preparation
st.markdown("**Recommended Cocktail is** {}".format(name))
st.markdown("Ingredients: {}".format(ingredients))
st.markdown("Garnish: {}".format(garnish))
st.markdown("Preparation: {}".format(preparation))
st.text("\n")
st.text("\n")
def recommend_cocktail_similarity_to_ingredients(user_input, cocktails_df, vectorizer, number_of_recommendations):
'''
This function is called when user input is the ingredients present in the database.
It applies tf-idf vectorization to user input, calculates cosine similarities and
prints recommendations for the web app.
inputs:
user_input (str): Name of a cocktail provided by the user.
cocktails_df (pandas dataframe): Dataframe that contains cocktails with recipes and ingredients
vectorizer (sklearn class): Tf-idf vectorizer class fit to the data
number_of_recommendations (int): Number of recommendations
Output: None
'''
tfidf_matrix = vectorizer.transform(cocktails_df['All Ingredients'])
user_input_vector = vectorizer.transform([user_input])
similarity_vector = linear_kernel(tfidf_matrix, user_input_vector)
similarity_pd =
|
pd.DataFrame(similarity_vector, columns=['Similarity'], index=cocktails_df['Cocktail Name'])
|
pandas.DataFrame
|
import numpy as np
import scipy
import pandas as pd
import plotly.express as px
def smoothen_fft_(lc, thresh=200):
# a low pass filter
lc_fft = np.fft.fft(lc)
lc_fft[thresh : len(lc) - thresh] = 0
lc_smooth = np.abs(np.fft.ifft(lc_fft))
return lc_smooth
def smoothening_ma(__x, __y, window_sz, shift):
"""
Using moving average to smoothen data and linearly interpolating back to original size
"""
new_norm = []
new_norm_data_points = []
# for first frame
new_norm.append(np.mean(__y[0:window_sz]))
new_norm_data_points.append(__x[0])
for i in range(window_sz, len(__y), shift):
tmp = np.mean(__y[i : i + shift])
new_norm.append(tmp)
new_norm_data_points.append(__x[i])
new_norm = np.array(new_norm)
new_norm_data_points = np.array(new_norm_data_points)
xnew = np.linspace(__x[0], __x[0] + len(__x), __x[0] + len(__x))
# interpolating back to original size
f = scipy.interpolate.interp1d(
new_norm_data_points, new_norm, fill_value="extrapolate", kind="linear"
)
ynew = f(xnew)
return xnew, ynew
def smoothening_fft(lc, thresh=200, should_plot=False):
lc_fft = np.fft.fft(lc)
lc_fft[thresh : len(lc) - thresh] = 0
lc_smooth = np.abs(np.fft.ifft(lc_fft)) + 1e-5
if should_plot:
px.line(
|
pd.DataFrame(lc_smooth)
|
pandas.DataFrame
|
# -*- coding: UTF-8 -*-
# create_paper_figures.py
import numpy as np
import pandas as pd
import statsmodels.formula.api as sm
import os
import matplotlib.pyplot as plt
import matplotlib as mpl
import seaborn as sns
from matplotlib.lines import Line2D
try:
import cantera as ct
except:
raise Exception("I am not seeing cantera installed. Find more information about installing it on https://www.cantera.org/.")
try:
import cantera_tools as ctt
import analysis_methods as am
except:
raise Exception("I am having trouble loading special modules. Make sure you run this script from within the 'code' folder.")
image_path = '../results'
if not os.path.exists(image_path):
os.makedirs(image_path)
# set plot style
sns.set_palette('colorblind',n_colors=4)
sns.set_style('white')
sns.set_context('paper',font_scale=1.5)
sns.set_style('ticks',{'ytick.direction': 'in','xtick.direction': 'in'})
mpl.rcParams['xtick.top'] = True
mpl.rcParams['ytick.right'] = True
# get data for models
# cluster number
model_molecule_to_cluster_number = {'full': {'propane':26, 'ethyl':25,'methyl':24,
'ethene':22,'H-atom':23,'n-propyl':20,
'methane':19,'ethane':18,'ethenyl':17,
'ethyne':16,'propene':14,},
'drg' : {'propane':8, 'ethyl':6,'methyl':5,
'ethene':2,'H-atom':4,'n-propyl':3,
'methane':1,'ethane':7,},
'3rxn' : {'propane':5, 'ethyl':3,'methyl':2,
'ethene':1,
'methane':0,'ethane':4,},
'6rxn' : {'propane':7, 'ethyl':5,'methyl':4,
'ethene':2,'H-atom':3,
'methane':1,'ethane':6,},
}
# initial propane isotopologue concentrations
delta_total= -28
psia = 5.4
edge_labeled_delta = delta_total + psia / 3.
center_labeled_delta = delta_total - 2. * psia / 3.
edge_labeled_fraction = am.getEnrichementFractionFromDelta(edge_labeled_delta)
center_labeled_fraction = am.getEnrichementFractionFromDelta(center_labeled_delta)
fraction_propane = 0.0049 # see supplemental
initialMoleFractions={
"CCC": fraction_propane * (1-center_labeled_fraction) * (1-edge_labeled_fraction)**2,
"CCC-2": fraction_propane * center_labeled_fraction * edge_labeled_fraction**2,
"CCC-3": fraction_propane * edge_labeled_fraction**2*(1-center_labeled_fraction),
"CCC-4": fraction_propane * 2*edge_labeled_fraction * (1-edge_labeled_fraction) * center_labeled_fraction,
"CCC-5": fraction_propane * 2*edge_labeled_fraction *(1-center_labeled_fraction) * (1-edge_labeled_fraction),
"CCC-6": fraction_propane * center_labeled_fraction*(1-edge_labeled_fraction)**2,
"[He]": 1-fraction_propane,
}
main_paths = [('full', '../mechanisms/full_model'),
('drg','../mechanisms/drg_model'),
('3rxn','../mechanisms/three_reaction_model'),
('6rxn','../mechanisms/six_reaction_model')]
################################
# Figures 2 and 3
print('creating figures 2 and 3')
enrichment_results = []
concentrations = {}
ethyl_psie_all = pd.DataFrame()
# run all four simulations
for name, mainPath in main_paths:
cluster_info = pd.read_csv(os.path.join(mainPath, 'isotopomer_cluster_info.csv'),index_col='name')
molecule_to_cluster_number = model_molecule_to_cluster_number[name]
temp = 850+273
times = np.linspace(1e-4,95. / temp,100)
solution = ct.Solution(os.path.join(mainPath,'chem.cti'))
conditions = temp, 2e5, initialMoleFractions
output = ctt.run_simulation(solution, times, conditions=conditions,
condition_type = 'constant-temperature-and-pressure',
output_species = True,
output_reactions = False)
species = output['species']
# find enrichments and total concentration
delta_enrichments = pd.DataFrame(columns=list(molecule_to_cluster_number.keys()), index = times)
concentration_data = pd.DataFrame(columns=list(molecule_to_cluster_number.keys()), index = times)
ethyl_psie = pd.Series()
for time in times:
enrichments_temp = {}
for molecule, cluster_num in molecule_to_cluster_number.items():
labels = cluster_info[cluster_info.cluster_number == cluster_num].index
if not np.isclose(species.loc[time,labels].sum(),0,atol=1e-40) and molecule != 'H-atom':
enrichments_temp[molecule] = am.getDelta(species.loc[time,labels],
cluster_info.loc[labels,'enriched_atoms'],
cluster_info.loc[labels,'unenriched_atoms'],)
concentration_data.loc[time,molecule] = species.loc[time,labels].sum()
# get psie information
if molecule == 'ethyl':
ethyl_psie[time] = am.getPSIE(species.loc[time,labels],
cluster_info.loc[labels,:],
type_1 = 'r',
type_2 = 'not_r')
delta_enrichments.loc[time,:] = enrichments_temp
concentrations[name] = concentration_data
ethyl_psie_all[name] = ethyl_psie
enrichment_results.append((name,delta_enrichments))
# output figure 2
molecule = 'ethene'
f,ax = plt.subplots()
for identifier, enrichments in enrichment_results:
enrichments.plot(y=molecule,ax=ax)
ax.set_ylabel(u'{}\n$^{{13}}\delta$C\n(‰)'.format(molecule),rotation='horizontal',labelpad=30)
ax.set_xlabel('time (s)')
ax.annotate('6 reaction model',(.22,.4),xycoords='axes fraction', rotation = 0)
ax.annotate('full model',(.7,.7),xycoords='axes fraction', rotation = 3.5)
ax.annotate('3 reaction model',(.2,.88),xycoords='axes fraction', rotation = 3.5)
ax.annotate('DRG model',(0.7,.82),xycoords='axes fraction', rotation = 3)
ax.legend([])
plt.savefig(os.path.join(image_path,'{}_enrich.pdf'.format(molecule)),bbox_inches='tight')
# output figure 3
molecule = 'ethyl'
f,ax = plt.subplots()
for column_name in ethyl_psie_all.columns:
psie_data = ethyl_psie_all[column_name]
psie_data.plot(ax=ax)
ax.set_ylabel(u'{}\nPSIA\n(‰)'.format(molecule),rotation='horizontal',labelpad=20)
ax.set_xlabel('time (s)')
ax.annotate('6 reaction model',(.15,.95), (.2,.7),xycoords='axes fraction',textcoords='axes fraction', rotation = 0, arrowprops={'arrowstyle':'-'}) #kie
ax.annotate('full model',(.4,.91), (.4,.8),xycoords='axes fraction',textcoords='axes fraction', rotation = 0, arrowprops={'arrowstyle':'-'})
ax.annotate('DRG model',(.65,.89), (.6,.7),xycoords='axes fraction',textcoords='axes fraction', rotation = 0, arrowprops={'arrowstyle':'-'})
ax.annotate('3 reaction model',(.2,.5),xycoords='axes fraction', rotation = 0)
ax.set_ylim(-21,1)
ax.legend([])
plt.savefig(os.path.join(image_path,'{}_psie.pdf'.format(molecule)),bbox_inches='tight')
##############################################
# figure 5 - mole fractions
print('creating figure 4')
# get data
model_index = 0
model_name = main_paths[model_index][0]
model_path = main_paths[model_index][1]
molecule_to_cluster_number = model_molecule_to_cluster_number[model_name]
model_file = os.path.join(model_path, 'chem.cti')
isotopomer_info = pd.read_csv(os.path.join(model_path,'isotopomer_cluster_info.csv'),index_col = 'name')
# simulate mechanism
mole_fractions = pd.DataFrame(index=molecule_to_cluster_number.keys())
for temp in (750,800,850,900,950):
solution = ct.Solution(model_file)
# set initial conditions of solution in kelvin pascals and mole fractions
conditions = temp+273, 2e5, initialMoleFractions
t_final = 95. / temp # see supplemental info
output = ctt.run_simulation(solution, [0,t_final], conditions=conditions,
condition_type = 'constant-temperature-and-pressure',
output_species = True,
output_reactions = True)
species = output['species'].loc[t_final,:]
isotopomer_info['conc'] = species
# Gilbert et al 2016 weighted the results by the area of detector peak,
# so the total concentration should be weighted
# by the number of carbon atoms.
species_concentrations = {}
for molecule, cluster_num in molecule_to_cluster_number.items():
labels = isotopomer_info[isotopomer_info.cluster_number == cluster_num].index
species_concentrations[molecule] = isotopomer_info.loc[labels,'conc'].sum() * \
(isotopomer_info.loc[labels[0],'enriched_atoms'] + isotopomer_info.loc[labels[0],'unenriched_atoms'])
# find mole fractions (weighted by number of carbons)
total_concentration = 0
for index in isotopomer_info.index:
moles_element = isotopomer_info.loc[index,'conc'] * (isotopomer_info.loc[index,'enriched_atoms'] + isotopomer_info.loc[index,'unenriched_atoms'])
total_concentration += moles_element
mole_fractions_temp = {}
for molecule in species_concentrations.keys():
mole_fractions_temp[molecule] = species_concentrations[molecule] / total_concentration
mole_fractions[temp] = pd.Series(mole_fractions_temp)
# get experimental data
# taken three times to improve accuracy
experimental_data_folder = '../exp_data/'
fig1A_data_a = pd.read_csv(os.path.join(experimental_data_folder,'Gilbert_fig1A_from_engauge_try_2a.csv'),
index_col='Temperature (C)')
fig1A_data_b = pd.read_csv(os.path.join(experimental_data_folder,'Gilbert_fig1A_from_engauge_try_2b.csv'),
index_col='Temperature (C)')
fig1A_data_c = pd.read_csv(os.path.join(experimental_data_folder,'Gilbert_fig1A_from_engauge_try_2c.csv'),
index_col='Temperature (C)')
fig1A_data_original = (fig1A_data_a + fig1A_data_b + fig1A_data_c)/3
# process data
mole_fractions_1a = mole_fractions.T * 100
# normalize using propane conversion for model. use 100% for experiments
mole_fractions_1a = mole_fractions_1a.divide(100-mole_fractions_1a['propane'],'index') * 100
column_order = ['methane', 'ethane','ethene','propene']
fig1A_data = fig1A_data_original[column_order]
column_order = ['methane', 'ethane','ethene','propene','ethyne']
mole_fractions_1a = mole_fractions_1a[column_order]
fig1A_data = fig1A_data.divide(fig1A_data.sum('columns'),'index')*100
# create figure 4
f,ax = plt.subplots()
fig1A_data.plot.area(ax=ax, linewidth=0,
stacked=True, alpha= 0.3)
mole_fractions_1a.plot(ax=ax, linestyle = '-',
linewidth= 2, markersize =0,stacked=True)
ax.set_ylabel("fraction carbon from propane (%)")
ax.set_xlabel(u'T (°C)')
ax.set_xticks(ticks=[750,800,850,900,950])
ax.set_xlim(750,950)
ax.set_ylim(1,110)
ax.legend([])
ax.annotate('methane',(800,18),(760,5),arrowprops={'arrowstyle':'-'})
ax.annotate('ethane',(800,31),(760,22),arrowprops={'arrowstyle':'-'})
ax.annotate('ethene',(800,91),(760,55),arrowprops={'arrowstyle':'-'})
ax.annotate('propene',(820,97),(760,92),arrowprops={'arrowstyle':'-'})
ax.annotate('ethyne',(900,100),(860,103),arrowprops={'arrowstyle':'-'})
plt.savefig(os.path.join(image_path, '1a_area_normalized_using_experimental_conversion.pdf'), bbox_inches = 'tight')
################################
# figure 5 - enrichments
print('creating figure 5')
# plot experimental data
# simulate and plot model data
styles = ['o',
'^',
's',
'v',
'D']
line_styles= [(0, (1, 2)),
(0, (5, 10)),
(0, (1, 5)),
(0, (3, 5, 1, 5)),
]
f,ax = plt.subplots()
fig1B_data = pd.read_csv('../exp_data/Gilbert_fig1B_engauge.csv', index_col='Temperature (C)')
del fig1B_data['propene']
fig1B_data.plot(ax=ax, linestyle = '',
linewidth= .5, style = styles, markersize = 6, markeredgewidth = 1)
# use same enrichments as used by gilbert
conversions_gilbert = 1- fig1A_data_original.propane / 100
for model_index in range(2):
# get model data
model_name = main_paths[model_index][0]
model_path = main_paths[model_index][1]
molecule_to_cluster_number = model_molecule_to_cluster_number[model_name]
model_file = os.path.join(model_path, 'chem.cti')
isotopomer_info = pd.read_csv(os.path.join(model_path,'isotopomer_cluster_info.csv'),index_col = 'name')
# simulate model
enrichments_by_conversion = pd.DataFrame(dtype=float)
for temp in [750,800,850,900,950]:
conversion = conversions_gilbert[temp]
# creates the cantera Solution object
solution = ct.Solution(model_file)
conditions = temp+273, 2e5, initialMoleFractions
output = ctt.run_simulation_till_conversion(solution, species='CCC', conversion=conversion,conditions = conditions,
condition_type = 'constant-temperature-and-pressure',
output_species = True,
output_reactions = False)
species = output['species'].iloc[-1,:]
isotopomer_info['conc'] = species
for molecule, cluster_num in molecule_to_cluster_number.items():
labels = isotopomer_info[isotopomer_info.cluster_number == cluster_num].index
if molecule != 'H-atom':
enrichments_by_conversion.loc[temp, molecule] = am.getDelta(species[labels],
isotopomer_info.loc[labels,'enriched_atoms'],
isotopomer_info.loc[labels,'unenriched_atoms'],
)
# plot this data set
enrichments_by_conversion.plot(y=fig1B_data.columns,
ax=ax, linestyle = line_styles[model_index], linewidth= 1,
style =styles,markersize = 2, markeredgewidth = 1,
markerfacecolor = "None")
# plot details
ax.set_ylabel("$\delta^{13}C$\n$(\perthousand)$",rotation='horizontal',va='center',ha='right')
ax.set_xlabel(u'T (°C)')
# move legend outside of plot
ax.legend(list(fig1B_data.columns), **{'bbox_to_anchor':(1.05, 1), 'loc':2, 'borderaxespad':0.})
items, entries = ax.get_legend_handles_labels()
items = items[:len(fig1B_data.columns)]
legend_items = []
for item in items:
# make a new copy so the graph isn't affected without the lines
legend_item = Line2D([],[],linestyle='none', marker= item.get_marker(), markersize=item.get_markersize(), markeredgewidth=item.get_markeredgewidth(), markerfacecolor=item.get_markerfacecolor(), markeredgecolor=item.get_markeredgecolor())
legend_item.set_linestyle('none')
legend_items.append(legend_item)
legend_items.append(Line2D([],[],linestyle='none'))
legend_items.append(Line2D([],[],linestyle = '', color='black',
linewidth= .5,marker = 'd',
markerfacecolor='black',markeredgecolor = 'black',
markersize = 6, markeredgewidth = 1))
for linestyle in line_styles[:2]:
legend_items.append(Line2D([],[],linestyle = linestyle, color='black', linewidth= 1,marker = 'd',
markerfacecolor='none', markeredgecolor = 'black',markersize = 2, markeredgewidth = 1))
entries = entries[:len(fig1B_data.columns)]
entries.append('')
entries.append('experiment')
for name, _ in main_paths:
entries.append(name+' model')
# move legend outside of plot
ax.legend(legend_items,entries, **{'bbox_to_anchor':(1.05, 1), 'loc':2, 'borderaxespad':0.})
ax.set_xticks(ticks=[750,800,850,900,950])
ax.set_xlim(740,960)
ax.set_yticks(ticks=[-40,-30,-20,-10,0])
plt.savefig(os.path.join(image_path, '1b_by_conversion_sans_propene_all_models.pdf'), bbox_inches = 'tight')
################################
# figure 6 & table 2 - slopes of enrichment
print('creating figure 6 and table 2')
# simulate
model_to_slope_dict = {}
model_to_temperature_varying_enrichments = {}
for model_index in range(4):
# get model data
model_name = main_paths[model_index][0]
model_path = main_paths[model_index][1]
molecule_to_cluster_number = model_molecule_to_cluster_number[model_name]
model_file = os.path.join(model_path, 'chem.cti')
isotopomer_info = pd.read_csv(os.path.join(model_path,'isotopomer_cluster_info.csv'),index_col = 'name')
# simulate model
temperature_varying_enrichments = {}
slopes_found =
|
pd.DataFrame(index = ["dC2H4 = f(dCH4)",'dC2H6 = f(dCH4)','dC2H6 = f(dC2H4)','dBulk = f(dCH4)'])
|
pandas.DataFrame
|
import ibeis
import six
import vtool
import utool
import numpy as np
import numpy.linalg as npl # NOQA
import pandas as pd
from vtool import clustering2 as clustertool
from vtool import nearest_neighbors as nntool
from plottool import draw_func2 as df2
np.set_printoptions(precision=2)
pd.set_option('display.max_rows', 10)
pd.set_option('display.max_columns', 10)
pd.set_option('isplay.notebook_repr_html', True)
ibeis.ensure_pz_mtest()
#taids = ibs.get_valid_aids()
#tvecs_list = ibs.get_annot_vecs(taids)
#tkpts_list = ibs.get_annot_kpts(taids)
#tvec_list = np.vstack(tvecs_list)
#print(idx2_vec)
#labels, words = vtool.clustering.cached_akmeans(tvec_list, 1000, 30, cache_dir='.')
#tvecdf_list = [pd.DataFrame(vecs) for vecs in tvecs_list]
#tvecs_df = pd.DataFrame(tvecdf_list, index=taids)
#kpts_col = pd.DataFrame(tkpts_list, index=taids, columns=['kpts'])
#vecs_col = pd.DataFrame(tvecs_list, index=taids, columns=['vecs'])
#tvecs_dflist = [pd.DataFrame(vecs, index=np.arange(len(vecs))) for vecs in tvecs_list]
#pd.concat(tvecs_dflist)
## Bui
#taids = ibs.get_valid_aids()
#tvecs_list = ibs.get_annot_vecs(taids)
#tkpts_list = ibs.get_annot_kpts(taids)
#orig_idx2_vec, orig_idx2_ax, orig_idx2_fx = vtool.nearest_neighbors.invertible_stack(tvecs_list, taids)
#annots_df = pd.concat([vecs_col, kpts_col], axis=1)
#annots_df
#idx2_vec = np.vstack(annots_df['vecs'].values)
##idx2_ax =
#idx2_vec, idx2_ax, idx2_fx = vtool.nearest_neighbors.invertible_stack(tvecs_list, taids)
#labels, words = vtool.clustering2.cached_akmeans(tvec_list, 1000, 30)
#words = centroids
def display_info(ibs, invindex, annots_df):
#################
#from ibeis.other import dbinfo
#print(ibs.get_infostr())
#dbinfo.get_dbinfo(ibs, verbose=True)
#################
#print('Inverted Index Stats: vectors per word')
#print(utool.get_stats_str(map(len, invindex.wx2_idxs.values())))
#################
qfx2_vec = annots_df['vecs'][1]
centroids = invindex.words
num_pca_dims = 3 # 3
whiten = False
kwd = dict(num_pca_dims=num_pca_dims,
whiten=whiten,)
#clustertool.rrr()
def makeplot_(fnum, prefix, data, labels='centroids', centroids=centroids):
return clustertool.plot_centroids(data, centroids, labels=labels,
fnum=fnum, prefix=prefix + '\n', **kwd)
#makeplot_(1, 'centroid vecs', centroids)
#makeplot_(2, 'database vecs', invindex.idx2_vec)
#makeplot_(3, 'query vecs', qfx2_vec)
#makeplot_(4, 'database vecs', invindex.idx2_vec)
#makeplot_(5, 'query vecs', qfx2_vec)
#################
def make_annot_df(ibs):
aid_list = ibs.get_valid_aids()
_kpts_col = pd.DataFrame(ibs.get_annot_kpts(aid_list),
index=aid_list, columns=['kpts'])
_vecs_col = pd.DataFrame(ibs.get_annot_vecs(aid_list),
index=aid_list, columns=['vecs'])
annots_df = pd.concat([_vecs_col, _kpts_col], axis=1)
return annots_df
def learn_visual_words(annots_df, train_aids, nCentroids):
vecs_list = annots_df['vecs'][train_aids].as_matrix()
train_vecs = np.vstack(vecs_list)
print('Training %d word vocabulary with %d annots and %d descriptors' %
(nCentroids, len(train_aids), len(train_vecs)))
words = clustertool.cached_akmeans(train_vecs, nCentroids, max_iters=100)
return words
def index_data_annots(annots_df, daids, words):
vecs_list = annots_df['vecs'][daids]
flann_params = {}
wordflann = vtool.nearest_neighbors.flann_cache(words, flann_params=flann_params)
ax2_aid = np.array(daids)
idx2_vec, idx2_ax, idx2_fx = nntool.invertible_stack(vecs_list, np.arange(len(ax2_aid)))
invindex = InvertedIndex(words, wordflann, idx2_vec, idx2_ax, idx2_fx, ax2_aid)
invindex.compute_internals()
return invindex
@six.add_metaclass(utool.ReloadingMetaclass)
class InvertedIndex(object):
def __init__(invindex, words, wordflann, idx2_vec, idx2_ax, idx2_fx, ax2_aid):
invindex.wordflann = wordflann
invindex.words = words # visual word centroids
invindex.ax2_aid = ax2_aid # annot index -> annot id
invindex.idx2_vec = idx2_vec # stacked index -> descriptor vector
invindex.idx2_ax = idx2_ax # stacked index -> annot index
invindex.idx2_fx = idx2_fx # stacked index -> feature index
invindex.idx2_wx = None # stacked index -> word index
invindex.wx2_idxs = None # word index -> stacked indexes
invindex.wx2_drvecs = None # word index -> residual vectors
#invindex.compute_internals()
def compute_internals(invindex):
idx2_vec = invindex.idx2_vec
wx2_idxs, idx2_wx = invindex.assign_to_words(idx2_vec)
wx2_drvecs = invindex.compute_residuals(idx2_vec, wx2_idxs)
invindex.idx2_wx = idx2_wx
invindex.wx2_idxs = wx2_idxs
invindex.wx2_drvecs = wx2_drvecs
def assign_to_words(invindex, idx2_vec):
idx2_wx, _idx2_wdist = invindex.wordflann.nn_index(idx2_vec, 1)
if True:
assign_df = pd.DataFrame(idx2_wx, columns=['wordindex'])
grouping = assign_df.groupby('wordindex')
wx2_idxs = grouping.wordindex.indices
else:
# TODO: replace with pandas groupby
idx_list = list(range(len(idx2_wx)))
wx2_idxs = utool.group_items(idx_list, idx2_wx.tolist())
return wx2_idxs, idx2_wx
def compute_residuals(invindex, idx2_vec, wx2_idxs):
""" returns mapping from word index to a set of residual vectors """
words = invindex.words
wx2_rvecs = {}
for word_index in wx2_idxs.keys():
# for each word
idxs = wx2_idxs[word_index]
vecs = np.array(idx2_vec[idxs], dtype=np.float64)
word = np.array(words[word_index], dtype=np.float64)
# compute residuals of all vecs assigned to this word
residuals = np.array([word - vec for vec in vecs])
# normalize residuals
residuals_n = vtool.linalg.normalize_rows(residuals)
wx2_rvecs[word_index] = residuals_n
return wx2_rvec
#def smk_similarity(wx2_qrvecs, wx2_drvecs):
# similarity_matrix = (rvecs1.dot(rvecs2.T))
def query_inverted_index(annots_df, qaid, invindex):
qfx2_vec = annots_df['vecs'][qaid]
wx2_qfxs, qfx2_wx = invindex.assign_to_words(qfx2_vec)
wx2_qrvecs = invindex.compute_residuals(qfx2_vec, wx2_qfxs)
daid = invindex.ax2_aid[0]
def single_daid_similairty(invindex, daid):
""" daid = 4
FIXME: Inefficient code
"""
ax = np.where(invindex.ax2_aid == daid)[0]
wx2_dfxs = {}
wx2_drvecs = {}
for wx, idxs in invindex.wx2_idxs.items():
valid = (invindex.idx2_ax[idxs] == ax)
dfxs = invindex.idx2_fx[idxs][valid]
drvecs = invindex.wx2_drvecs[wx][valid]
wx2_dfxs[wx] = dfxs
wx2_drvecs[wx] = drvecs
# Similarity to a single database annotation
query_wxs = set(wx2_qrvecs.keys())
data_wxs = set(wx2_drvecs.keys())
total_score = 0
for wx in data_wxs.intersection(query_wxs):
qrvecs = wx2_qrvecs[wx]
drvecs = wx2_drvecs[wx]
residual_similarity = qrvecs.dot(drvecs.T)
scores = selectivity_function(residual_similarity)
total_score += scores.sum()
return total_score
def selectivity_function(residual_similarity, alpha=3, thresh=0):
""" sigma from SMK paper """
u = residual_similarity
scores = (np.sign(u) * np.abs(u)) ** alpha
scores[scores <= thresh] = 0
return scores
# Entire database
daid2_score = utool.ddict(lambda: 0)
query_wxs = set(wx2_qrvecs.keys())
data_wxs = set(invindex.wx2_drvecs.keys())
qfx2_axs = []
qfx2_fm = []
qfx2_fs = []
aid_fm = []
aid_fs = []
idx2_daid = pd.Series(invindex.ax2_aid[invindex.idx2_ax], name='daid')
idx2_dfx = pd.Series(invindex.idx2_fx, name='dfx')
idx2_wfx = pd.Series(invindex.idx2_wx, name='dwx')
idx_df =
|
pd.concat((idx2_daid, idx2_dfx, idx2_wfx), axis=1, names=['idx'])
|
pandas.concat
|
"""
Tests for DatetimeIndex timezone-related methods
"""
from datetime import date, datetime, time, timedelta, tzinfo
import dateutil
from dateutil.tz import gettz, tzlocal
import numpy as np
import pytest
import pytz
from pandas._libs.tslibs import conversion, timezones
import pandas.util._test_decorators as td
import pandas as pd
from pandas import (
DatetimeIndex,
Index,
Timestamp,
bdate_range,
date_range,
isna,
to_datetime,
)
import pandas._testing as tm
class FixedOffset(tzinfo):
"""Fixed offset in minutes east from UTC."""
def __init__(self, offset, name):
self.__offset = timedelta(minutes=offset)
self.__name = name
def utcoffset(self, dt):
return self.__offset
def tzname(self, dt):
return self.__name
def dst(self, dt):
return timedelta(0)
fixed_off = FixedOffset(-420, "-07:00")
fixed_off_no_name = FixedOffset(-330, None)
class TestDatetimeIndexTimezones:
# -------------------------------------------------------------
# DatetimeIndex.tz_convert
def test_tz_convert_nat(self):
# GH#5546
dates = [pd.NaT]
idx = DatetimeIndex(dates)
idx = idx.tz_localize("US/Pacific")
tm.assert_index_equal(idx, DatetimeIndex(dates, tz="US/Pacific"))
idx = idx.tz_convert("US/Eastern")
tm.assert_index_equal(idx, DatetimeIndex(dates, tz="US/Eastern"))
idx = idx.tz_convert("UTC")
tm.assert_index_equal(idx, DatetimeIndex(dates, tz="UTC"))
dates = ["2010-12-01 00:00", "2010-12-02 00:00", pd.NaT]
idx = DatetimeIndex(dates)
idx = idx.tz_localize("US/Pacific")
tm.assert_index_equal(idx, DatetimeIndex(dates, tz="US/Pacific"))
idx = idx.tz_convert("US/Eastern")
expected = ["2010-12-01 03:00", "2010-12-02 03:00", pd.NaT]
tm.assert_index_equal(idx, DatetimeIndex(expected, tz="US/Eastern"))
idx = idx + pd.offsets.Hour(5)
expected = ["2010-12-01 08:00", "2010-12-02 08:00", pd.NaT]
tm.assert_index_equal(idx, DatetimeIndex(expected, tz="US/Eastern"))
idx = idx.tz_convert("US/Pacific")
expected = ["2010-12-01 05:00", "2010-12-02 05:00", pd.NaT]
tm.assert_index_equal(idx, DatetimeIndex(expected, tz="US/Pacific"))
idx = idx + np.timedelta64(3, "h")
expected = ["2010-12-01 08:00", "2010-12-02 08:00", pd.NaT]
tm.assert_index_equal(idx, DatetimeIndex(expected, tz="US/Pacific"))
idx = idx.tz_convert("US/Eastern")
expected = ["2010-12-01 11:00", "2010-12-02 11:00", pd.NaT]
tm.assert_index_equal(idx, DatetimeIndex(expected, tz="US/Eastern"))
@pytest.mark.parametrize("prefix", ["", "dateutil/"])
def test_dti_tz_convert_compat_timestamp(self, prefix):
strdates = ["1/1/2012", "3/1/2012", "4/1/2012"]
idx = DatetimeIndex(strdates, tz=prefix + "US/Eastern")
conv = idx[0].tz_convert(prefix + "US/Pacific")
expected = idx.tz_convert(prefix + "US/Pacific")[0]
assert conv == expected
def test_dti_tz_convert_hour_overflow_dst(self):
# Regression test for:
# https://github.com/pandas-dev/pandas/issues/13306
# sorted case US/Eastern -> UTC
ts = ["2008-05-12 09:50:00", "2008-12-12 09:50:35", "2009-05-12 09:50:32"]
tt = DatetimeIndex(ts).tz_localize("US/Eastern")
ut = tt.tz_convert("UTC")
expected = Index([13, 14, 13])
tm.assert_index_equal(ut.hour, expected)
# sorted case UTC -> US/Eastern
ts = ["2008-05-12 13:50:00", "2008-12-12 14:50:35", "2009-05-12 13:50:32"]
tt = DatetimeIndex(ts).tz_localize("UTC")
ut = tt.tz_convert("US/Eastern")
expected = Index([9, 9, 9])
tm.assert_index_equal(ut.hour, expected)
# unsorted case US/Eastern -> UTC
ts = ["2008-05-12 09:50:00", "2008-12-12 09:50:35", "2008-05-12 09:50:32"]
tt = DatetimeIndex(ts).tz_localize("US/Eastern")
ut = tt.tz_convert("UTC")
expected = Index([13, 14, 13])
tm.assert_index_equal(ut.hour, expected)
# unsorted case UTC -> US/Eastern
ts = ["2008-05-12 13:50:00", "2008-12-12 14:50:35", "2008-05-12 13:50:32"]
tt = DatetimeIndex(ts).tz_localize("UTC")
ut = tt.tz_convert("US/Eastern")
expected = Index([9, 9, 9])
tm.assert_index_equal(ut.hour, expected)
@pytest.mark.parametrize("tz", ["US/Eastern", "dateutil/US/Eastern"])
def test_dti_tz_convert_hour_overflow_dst_timestamps(self, tz):
# Regression test for GH#13306
# sorted case US/Eastern -> UTC
ts = [
Timestamp("2008-05-12 09:50:00", tz=tz),
Timestamp("2008-12-12 09:50:35", tz=tz),
Timestamp("2009-05-12 09:50:32", tz=tz),
]
tt = DatetimeIndex(ts)
ut = tt.tz_convert("UTC")
expected = Index([13, 14, 13])
tm.assert_index_equal(ut.hour, expected)
# sorted case UTC -> US/Eastern
ts = [
Timestamp("2008-05-12 13:50:00", tz="UTC"),
Timestamp("2008-12-12 14:50:35", tz="UTC"),
Timestamp("2009-05-12 13:50:32", tz="UTC"),
]
tt = DatetimeIndex(ts)
ut = tt.tz_convert("US/Eastern")
expected = Index([9, 9, 9])
tm.assert_index_equal(ut.hour, expected)
# unsorted case US/Eastern -> UTC
ts = [
Timestamp("2008-05-12 09:50:00", tz=tz),
Timestamp("2008-12-12 09:50:35", tz=tz),
Timestamp("2008-05-12 09:50:32", tz=tz),
]
tt = DatetimeIndex(ts)
ut = tt.tz_convert("UTC")
expected = Index([13, 14, 13])
tm.assert_index_equal(ut.hour, expected)
# unsorted case UTC -> US/Eastern
ts = [
Timestamp("2008-05-12 13:50:00", tz="UTC"),
Timestamp("2008-12-12 14:50:35", tz="UTC"),
Timestamp("2008-05-12 13:50:32", tz="UTC"),
]
tt = DatetimeIndex(ts)
ut = tt.tz_convert("US/Eastern")
expected = Index([9, 9, 9])
tm.assert_index_equal(ut.hour, expected)
@pytest.mark.parametrize("freq, n", [("H", 1), ("T", 60), ("S", 3600)])
def test_dti_tz_convert_trans_pos_plus_1__bug(self, freq, n):
# Regression test for tslib.tz_convert(vals, tz1, tz2).
# See https://github.com/pandas-dev/pandas/issues/4496 for details.
idx = date_range(datetime(2011, 3, 26, 23), datetime(2011, 3, 27, 1), freq=freq)
idx = idx.tz_localize("UTC")
idx = idx.tz_convert("Europe/Moscow")
expected = np.repeat(np.array([3, 4, 5]), np.array([n, n, 1]))
tm.assert_index_equal(idx.hour, Index(expected))
def test_dti_tz_convert_dst(self):
for freq, n in [("H", 1), ("T", 60), ("S", 3600)]:
# Start DST
idx = date_range(
"2014-03-08 23:00", "2014-03-09 09:00", freq=freq, tz="UTC"
)
idx = idx.tz_convert("US/Eastern")
expected = np.repeat(
np.array([18, 19, 20, 21, 22, 23, 0, 1, 3, 4, 5]),
np.array([n, n, n, n, n, n, n, n, n, n, 1]),
)
tm.assert_index_equal(idx.hour, Index(expected))
idx = date_range(
"2014-03-08 18:00", "2014-03-09 05:00", freq=freq, tz="US/Eastern"
)
idx = idx.tz_convert("UTC")
expected = np.repeat(
np.array([23, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9]),
np.array([n, n, n, n, n, n, n, n, n, n, 1]),
)
tm.assert_index_equal(idx.hour, Index(expected))
# End DST
idx = date_range(
"2014-11-01 23:00", "2014-11-02 09:00", freq=freq, tz="UTC"
)
idx = idx.tz_convert("US/Eastern")
expected = np.repeat(
np.array([19, 20, 21, 22, 23, 0, 1, 1, 2, 3, 4]),
np.array([n, n, n, n, n, n, n, n, n, n, 1]),
)
tm.assert_index_equal(idx.hour, Index(expected))
idx = date_range(
"2014-11-01 18:00", "2014-11-02 05:00", freq=freq, tz="US/Eastern"
)
idx = idx.tz_convert("UTC")
expected = np.repeat(
np.array([22, 23, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]),
np.array([n, n, n, n, n, n, n, n, n, n, n, n, 1]),
)
tm.assert_index_equal(idx.hour, Index(expected))
# daily
# Start DST
idx = date_range("2014-03-08 00:00", "2014-03-09 00:00", freq="D", tz="UTC")
idx = idx.tz_convert("US/Eastern")
tm.assert_index_equal(idx.hour, Index([19, 19]))
idx = date_range(
"2014-03-08 00:00", "2014-03-09 00:00", freq="D", tz="US/Eastern"
)
idx = idx.tz_convert("UTC")
tm.assert_index_equal(idx.hour, Index([5, 5]))
# End DST
idx = date_range("2014-11-01 00:00", "2014-11-02 00:00", freq="D", tz="UTC")
idx = idx.tz_convert("US/Eastern")
tm.assert_index_equal(idx.hour, Index([20, 20]))
idx = date_range(
"2014-11-01 00:00", "2014-11-02 000:00", freq="D", tz="US/Eastern"
)
idx = idx.tz_convert("UTC")
tm.assert_index_equal(idx.hour, Index([4, 4]))
def test_tz_convert_roundtrip(self, tz_aware_fixture):
tz = tz_aware_fixture
idx1 = date_range(start="2014-01-01", end="2014-12-31", freq="M", tz="UTC")
exp1 = date_range(start="2014-01-01", end="2014-12-31", freq="M")
idx2 = date_range(start="2014-01-01", end="2014-12-31", freq="D", tz="UTC")
exp2 = date_range(start="2014-01-01", end="2014-12-31", freq="D")
idx3 = date_range(start="2014-01-01", end="2014-03-01", freq="H", tz="UTC")
exp3 = date_range(start="2014-01-01", end="2014-03-01", freq="H")
idx4 = date_range(start="2014-08-01", end="2014-10-31", freq="T", tz="UTC")
exp4 = date_range(start="2014-08-01", end="2014-10-31", freq="T")
for idx, expected in [(idx1, exp1), (idx2, exp2), (idx3, exp3), (idx4, exp4)]:
converted = idx.tz_convert(tz)
reset = converted.tz_convert(None)
tm.assert_index_equal(reset, expected)
assert reset.tzinfo is None
expected = converted.tz_convert("UTC").tz_localize(None)
expected = expected._with_freq("infer")
tm.assert_index_equal(reset, expected)
def test_dti_tz_convert_tzlocal(self):
# GH#13583
# tz_convert doesn't affect to internal
dti = date_range(start="2001-01-01", end="2001-03-01", tz="UTC")
dti2 = dti.tz_convert(dateutil.tz.tzlocal())
tm.assert_numpy_array_equal(dti2.asi8, dti.asi8)
dti = date_range(start="2001-01-01", end="2001-03-01", tz=dateutil.tz.tzlocal())
dti2 = dti.tz_convert(None)
tm.assert_numpy_array_equal(dti2.asi8, dti.asi8)
@pytest.mark.parametrize(
"tz",
[
"US/Eastern",
"dateutil/US/Eastern",
pytz.timezone("US/Eastern"),
gettz("US/Eastern"),
],
)
def test_dti_tz_convert_utc_to_local_no_modify(self, tz):
rng = date_range("3/11/2012", "3/12/2012", freq="H", tz="utc")
rng_eastern = rng.tz_convert(tz)
# Values are unmodified
tm.assert_numpy_array_equal(rng.asi8, rng_eastern.asi8)
assert timezones.tz_compare(rng_eastern.tz, timezones.maybe_get_tz(tz))
@pytest.mark.parametrize("tzstr", ["US/Eastern", "dateutil/US/Eastern"])
def test_tz_convert_unsorted(self, tzstr):
dr = date_range("2012-03-09", freq="H", periods=100, tz="utc")
dr = dr.tz_convert(tzstr)
result = dr[::-1].hour
exp = dr.hour[::-1]
tm.assert_almost_equal(result, exp)
# -------------------------------------------------------------
# DatetimeIndex.tz_localize
def test_dti_tz_localize_nonexistent_raise_coerce(self):
# GH#13057
times = ["2015-03-08 01:00", "2015-03-08 02:00", "2015-03-08 03:00"]
index = DatetimeIndex(times)
tz = "US/Eastern"
with pytest.raises(pytz.NonExistentTimeError, match="|".join(times)):
index.tz_localize(tz=tz)
with pytest.raises(pytz.NonExistentTimeError, match="|".join(times)):
index.tz_localize(tz=tz, nonexistent="raise")
result = index.tz_localize(tz=tz, nonexistent="NaT")
test_times = ["2015-03-08 01:00-05:00", "NaT", "2015-03-08 03:00-04:00"]
dti = to_datetime(test_times, utc=True)
expected = dti.tz_convert("US/Eastern")
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("tz", [pytz.timezone("US/Eastern"), gettz("US/Eastern")])
def test_dti_tz_localize_ambiguous_infer(self, tz):
# November 6, 2011, fall back, repeat 2 AM hour
# With no repeated hours, we cannot infer the transition
dr = date_range(datetime(2011, 11, 6, 0), periods=5, freq=pd.offsets.Hour())
with pytest.raises(pytz.AmbiguousTimeError, match="Cannot infer dst time"):
dr.tz_localize(tz)
# With repeated hours, we can infer the transition
dr = date_range(
datetime(2011, 11, 6, 0), periods=5, freq=pd.offsets.Hour(), tz=tz
)
times = [
"11/06/2011 00:00",
"11/06/2011 01:00",
"11/06/2011 01:00",
"11/06/2011 02:00",
"11/06/2011 03:00",
]
di = DatetimeIndex(times)
localized = di.tz_localize(tz, ambiguous="infer")
expected = dr._with_freq(None)
tm.assert_index_equal(expected, localized)
tm.assert_index_equal(expected, DatetimeIndex(times, tz=tz, ambiguous="infer"))
# When there is no dst transition, nothing special happens
dr = date_range(datetime(2011, 6, 1, 0), periods=10, freq=pd.offsets.Hour())
localized = dr.tz_localize(tz)
localized_infer = dr.tz_localize(tz, ambiguous="infer")
tm.assert_index_equal(localized, localized_infer)
@pytest.mark.parametrize("tz", [pytz.timezone("US/Eastern"), gettz("US/Eastern")])
def test_dti_tz_localize_ambiguous_times(self, tz):
# March 13, 2011, spring forward, skip from 2 AM to 3 AM
dr = date_range(datetime(2011, 3, 13, 1, 30), periods=3, freq=pd.offsets.Hour())
with pytest.raises(pytz.NonExistentTimeError, match="2011-03-13 02:30:00"):
dr.tz_localize(tz)
# after dst transition, it works
dr = date_range(
datetime(2011, 3, 13, 3, 30), periods=3, freq=pd.offsets.Hour(), tz=tz
)
# November 6, 2011, fall back, repeat 2 AM hour
dr = date_range(datetime(2011, 11, 6, 1, 30), periods=3, freq=pd.offsets.Hour())
with pytest.raises(pytz.AmbiguousTimeError, match="Cannot infer dst time"):
dr.tz_localize(tz)
# UTC is OK
dr = date_range(
datetime(2011, 3, 13), periods=48, freq=pd.offsets.Minute(30), tz=pytz.utc
)
@pytest.mark.parametrize("tzstr", ["US/Eastern", "dateutil/US/Eastern"])
def test_dti_tz_localize_pass_dates_to_utc(self, tzstr):
strdates = ["1/1/2012", "3/1/2012", "4/1/2012"]
idx = DatetimeIndex(strdates)
conv = idx.tz_localize(tzstr)
fromdates = DatetimeIndex(strdates, tz=tzstr)
assert conv.tz == fromdates.tz
tm.assert_numpy_array_equal(conv.values, fromdates.values)
@pytest.mark.parametrize("prefix", ["", "dateutil/"])
def test_dti_tz_localize(self, prefix):
tzstr = prefix + "US/Eastern"
dti = pd.date_range(start="1/1/2005", end="1/1/2005 0:00:30.256", freq="L")
dti2 = dti.tz_localize(tzstr)
dti_utc = pd.date_range(
start="1/1/2005 05:00", end="1/1/2005 5:00:30.256", freq="L", tz="utc"
)
tm.assert_numpy_array_equal(dti2.values, dti_utc.values)
dti3 = dti2.tz_convert(prefix + "US/Pacific")
tm.assert_numpy_array_equal(dti3.values, dti_utc.values)
dti = pd.date_range(start="11/6/2011 1:59", end="11/6/2011 2:00", freq="L")
with pytest.raises(pytz.AmbiguousTimeError, match="Cannot infer dst time"):
dti.tz_localize(tzstr)
dti = pd.date_range(start="3/13/2011 1:59", end="3/13/2011 2:00", freq="L")
with pytest.raises(pytz.NonExistentTimeError, match="2011-03-13 02:00:00"):
dti.tz_localize(tzstr)
@pytest.mark.parametrize(
"tz",
[
"US/Eastern",
"dateutil/US/Eastern",
pytz.timezone("US/Eastern"),
gettz("US/Eastern"),
],
)
def test_dti_tz_localize_utc_conversion(self, tz):
# Localizing to time zone should:
# 1) check for DST ambiguities
# 2) convert to UTC
rng = date_range("3/10/2012", "3/11/2012", freq="30T")
converted = rng.tz_localize(tz)
expected_naive = rng + pd.offsets.Hour(5)
tm.assert_numpy_array_equal(converted.asi8, expected_naive.asi8)
# DST ambiguity, this should fail
rng = date_range("3/11/2012", "3/12/2012", freq="30T")
# Is this really how it should fail??
with pytest.raises(pytz.NonExistentTimeError, match="2012-03-11 02:00:00"):
rng.tz_localize(tz)
def test_dti_tz_localize_roundtrip(self, tz_aware_fixture):
# note: this tz tests that a tz-naive index can be localized
# and de-localized successfully, when there are no DST transitions
# in the range.
idx = date_range(start="2014-06-01", end="2014-08-30", freq="15T")
tz = tz_aware_fixture
localized = idx.tz_localize(tz)
# cant localize a tz-aware object
with pytest.raises(
TypeError, match="Already tz-aware, use tz_convert to convert"
):
localized.tz_localize(tz)
reset = localized.tz_localize(None)
assert reset.tzinfo is None
expected = idx._with_freq(None)
tm.assert_index_equal(reset, expected)
def test_dti_tz_localize_naive(self):
rng = date_range("1/1/2011", periods=100, freq="H")
conv = rng.tz_localize("US/Pacific")
exp = date_range("1/1/2011", periods=100, freq="H", tz="US/Pacific")
tm.assert_index_equal(conv, exp._with_freq(None))
def test_dti_tz_localize_tzlocal(self):
# GH#13583
offset = dateutil.tz.tzlocal().utcoffset(datetime(2011, 1, 1))
offset = int(offset.total_seconds() * 1000000000)
dti = date_range(start="2001-01-01", end="2001-03-01")
dti2 = dti.tz_localize(dateutil.tz.tzlocal())
tm.assert_numpy_array_equal(dti2.asi8 + offset, dti.asi8)
dti = date_range(start="2001-01-01", end="2001-03-01", tz=dateutil.tz.tzlocal())
dti2 = dti.tz_localize(None)
tm.assert_numpy_array_equal(dti2.asi8 - offset, dti.asi8)
@pytest.mark.parametrize("tz", [pytz.timezone("US/Eastern"), gettz("US/Eastern")])
def test_dti_tz_localize_ambiguous_nat(self, tz):
times = [
"11/06/2011 00:00",
"11/06/2011 01:00",
"11/06/2011 01:00",
"11/06/2011 02:00",
"11/06/2011 03:00",
]
di = DatetimeIndex(times)
localized = di.tz_localize(tz, ambiguous="NaT")
times = [
"11/06/2011 00:00",
np.NaN,
np.NaN,
"11/06/2011 02:00",
"11/06/2011 03:00",
]
di_test = DatetimeIndex(times, tz="US/Eastern")
# left dtype is datetime64[ns, US/Eastern]
# right is datetime64[ns, tzfile('/usr/share/zoneinfo/US/Eastern')]
tm.assert_numpy_array_equal(di_test.values, localized.values)
@pytest.mark.parametrize("tz", [pytz.timezone("US/Eastern"), gettz("US/Eastern")])
def test_dti_tz_localize_ambiguous_flags(self, tz):
# November 6, 2011, fall back, repeat 2 AM hour
# Pass in flags to determine right dst transition
dr = date_range(
datetime(2011, 11, 6, 0), periods=5, freq=pd.offsets.Hour(), tz=tz
)
times = [
"11/06/2011 00:00",
"11/06/2011 01:00",
"11/06/2011 01:00",
"11/06/2011 02:00",
"11/06/2011 03:00",
]
# Test tz_localize
di = DatetimeIndex(times)
is_dst = [1, 1, 0, 0, 0]
localized = di.tz_localize(tz, ambiguous=is_dst)
expected = dr._with_freq(None)
tm.assert_index_equal(expected, localized)
tm.assert_index_equal(expected, DatetimeIndex(times, tz=tz, ambiguous=is_dst))
localized = di.tz_localize(tz, ambiguous=np.array(is_dst))
tm.assert_index_equal(dr, localized)
localized = di.tz_localize(tz, ambiguous=np.array(is_dst).astype("bool"))
tm.assert_index_equal(dr, localized)
# Test constructor
localized = DatetimeIndex(times, tz=tz, ambiguous=is_dst)
tm.assert_index_equal(dr, localized)
# Test duplicate times where inferring the dst fails
times += times
di = DatetimeIndex(times)
# When the sizes are incompatible, make sure error is raised
msg = "Length of ambiguous bool-array must be the same size as vals"
with pytest.raises(Exception, match=msg):
di.tz_localize(tz, ambiguous=is_dst)
# When sizes are compatible and there are repeats ('infer' won't work)
is_dst = np.hstack((is_dst, is_dst))
localized = di.tz_localize(tz, ambiguous=is_dst)
dr = dr.append(dr)
tm.assert_index_equal(dr, localized)
# When there is no dst transition, nothing special happens
dr = date_range(datetime(2011, 6, 1, 0), periods=10, freq=pd.offsets.Hour())
is_dst = np.array([1] * 10)
localized = dr.tz_localize(tz)
localized_is_dst = dr.tz_localize(tz, ambiguous=is_dst)
tm.assert_index_equal(localized, localized_is_dst)
# TODO: belongs outside tz_localize tests?
@pytest.mark.parametrize("tz", ["Europe/London", "dateutil/Europe/London"])
def test_dti_construction_ambiguous_endpoint(self, tz):
# construction with an ambiguous end-point
# GH#11626
with pytest.raises(pytz.AmbiguousTimeError, match="Cannot infer dst time"):
date_range(
"2013-10-26 23:00", "2013-10-27 01:00", tz="Europe/London", freq="H"
)
times = date_range(
"2013-10-26 23:00", "2013-10-27 01:00", freq="H", tz=tz, ambiguous="infer"
)
assert times[0] == Timestamp("2013-10-26 23:00", tz=tz, freq="H")
assert times[-1] == Timestamp("2013-10-27 01:00:00+0000", tz=tz, freq="H")
@pytest.mark.parametrize(
"tz, option, expected",
[
["US/Pacific", "shift_forward", "2019-03-10 03:00"],
["dateutil/US/Pacific", "shift_forward", "2019-03-10 03:00"],
["US/Pacific", "shift_backward", "2019-03-10 01:00"],
["dateutil/US/Pacific", "shift_backward", "2019-03-10 01:00"],
["US/Pacific", timedelta(hours=1), "2019-03-10 03:00"],
],
)
def test_dti_construction_nonexistent_endpoint(self, tz, option, expected):
# construction with an nonexistent end-point
with pytest.raises(pytz.NonExistentTimeError, match="2019-03-10 02:00:00"):
date_range(
"2019-03-10 00:00", "2019-03-10 02:00", tz="US/Pacific", freq="H"
)
times = date_range(
"2019-03-10 00:00", "2019-03-10 02:00", freq="H", tz=tz, nonexistent=option
)
assert times[-1] == Timestamp(expected, tz=tz, freq="H")
def test_dti_tz_localize_bdate_range(self):
dr = pd.bdate_range("1/1/2009", "1/1/2010")
dr_utc = pd.bdate_range("1/1/2009", "1/1/2010", tz=pytz.utc)
localized = dr.tz_localize(pytz.utc)
tm.assert_index_equal(dr_utc, localized)
@pytest.mark.parametrize("tz", ["Europe/Warsaw", "dateutil/Europe/Warsaw"])
@pytest.mark.parametrize(
"method, exp", [["NaT", pd.NaT], ["raise", None], ["foo", "invalid"]]
)
def test_dti_tz_localize_nonexistent(self, tz, method, exp):
# GH 8917
n = 60
dti = date_range(start="2015-03-29 02:00:00", periods=n, freq="min")
if method == "raise":
with pytest.raises(pytz.NonExistentTimeError, match="2015-03-29 02:00:00"):
dti.tz_localize(tz, nonexistent=method)
elif exp == "invalid":
msg = (
"The nonexistent argument must be one of "
"'raise', 'NaT', 'shift_forward', 'shift_backward' "
"or a timedelta object"
)
with pytest.raises(ValueError, match=msg):
dti.tz_localize(tz, nonexistent=method)
else:
result = dti.tz_localize(tz, nonexistent=method)
expected = DatetimeIndex([exp] * n, tz=tz)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize(
"start_ts, tz, end_ts, shift",
[
["2015-03-29 02:20:00", "Europe/Warsaw", "2015-03-29 03:00:00", "forward"],
[
"2015-03-29 02:20:00",
"Europe/Warsaw",
"2015-03-29 01:59:59.999999999",
"backward",
],
[
"2015-03-29 02:20:00",
"Europe/Warsaw",
"2015-03-29 03:20:00",
timedelta(hours=1),
],
[
"2015-03-29 02:20:00",
"Europe/Warsaw",
"2015-03-29 01:20:00",
timedelta(hours=-1),
],
["2018-03-11 02:33:00", "US/Pacific", "2018-03-11 03:00:00", "forward"],
[
"2018-03-11 02:33:00",
"US/Pacific",
"2018-03-11 01:59:59.999999999",
"backward",
],
[
"2018-03-11 02:33:00",
"US/Pacific",
"2018-03-11 03:33:00",
timedelta(hours=1),
],
[
"2018-03-11 02:33:00",
"US/Pacific",
"2018-03-11 01:33:00",
timedelta(hours=-1),
],
],
)
@pytest.mark.parametrize("tz_type", ["", "dateutil/"])
def test_dti_tz_localize_nonexistent_shift(
self, start_ts, tz, end_ts, shift, tz_type
):
# GH 8917
tz = tz_type + tz
if isinstance(shift, str):
shift = "shift_" + shift
dti = DatetimeIndex([Timestamp(start_ts)])
result = dti.tz_localize(tz, nonexistent=shift)
expected = DatetimeIndex([Timestamp(end_ts)]).tz_localize(tz)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize("offset", [-1, 1])
@pytest.mark.parametrize("tz_type", ["", "dateutil/"])
def test_dti_tz_localize_nonexistent_shift_invalid(self, offset, tz_type):
# GH 8917
tz = tz_type + "Europe/Warsaw"
dti = DatetimeIndex([Timestamp("2015-03-29 02:20:00")])
msg = "The provided timedelta will relocalize on a nonexistent time"
with pytest.raises(ValueError, match=msg):
dti.tz_localize(tz, nonexistent=timedelta(seconds=offset))
# -------------------------------------------------------------
# DatetimeIndex.normalize
def test_normalize_tz(self):
rng = date_range("1/1/2000 9:30", periods=10, freq="D", tz="US/Eastern")
result = rng.normalize() # does not preserve freq
expected = date_range("1/1/2000", periods=10, freq="D", tz="US/Eastern")
tm.assert_index_equal(result, expected._with_freq(None))
assert result.is_normalized
assert not rng.is_normalized
rng = date_range("1/1/2000 9:30", periods=10, freq="D", tz="UTC")
result = rng.normalize()
expected = date_range("1/1/2000", periods=10, freq="D", tz="UTC")
tm.assert_index_equal(result, expected)
assert result.is_normalized
assert not rng.is_normalized
rng = date_range("1/1/2000 9:30", periods=10, freq="D", tz=tzlocal())
result = rng.normalize() # does not preserve freq
expected = date_range("1/1/2000", periods=10, freq="D", tz=tzlocal())
tm.assert_index_equal(result, expected._with_freq(None))
assert result.is_normalized
assert not rng.is_normalized
@td.skip_if_windows
@pytest.mark.parametrize(
"timezone",
[
"US/Pacific",
"US/Eastern",
"UTC",
"Asia/Kolkata",
"Asia/Shanghai",
"Australia/Canberra",
],
)
def test_normalize_tz_local(self, timezone):
# GH#13459
with tm.set_timezone(timezone):
rng = date_range("1/1/2000 9:30", periods=10, freq="D", tz=tzlocal())
result = rng.normalize()
expected = date_range("1/1/2000", periods=10, freq="D", tz=tzlocal())
expected = expected._with_freq(None)
tm.assert_index_equal(result, expected)
assert result.is_normalized
assert not rng.is_normalized
# ------------------------------------------------------------
# DatetimeIndex.__new__
@pytest.mark.parametrize("prefix", ["", "dateutil/"])
def test_dti_constructor_static_tzinfo(self, prefix):
# it works!
index = DatetimeIndex([datetime(2012, 1, 1)], tz=prefix + "EST")
index.hour
index[0]
def test_dti_constructor_with_fixed_tz(self):
off = FixedOffset(420, "+07:00")
start = datetime(2012, 3, 11, 5, 0, 0, tzinfo=off)
end = datetime(2012, 6, 11, 5, 0, 0, tzinfo=off)
rng = date_range(start=start, end=end)
assert off == rng.tz
rng2 = date_range(start, periods=len(rng), tz=off)
tm.assert_index_equal(rng, rng2)
rng3 = date_range("3/11/2012 05:00:00+07:00", "6/11/2012 05:00:00+07:00")
assert (rng.values == rng3.values).all()
@pytest.mark.parametrize("tzstr", ["US/Eastern", "dateutil/US/Eastern"])
def test_dti_convert_datetime_list(self, tzstr):
dr = date_range("2012-06-02", periods=10, tz=tzstr, name="foo")
dr2 = DatetimeIndex(list(dr), name="foo", freq="D")
tm.assert_index_equal(dr, dr2)
def test_dti_construction_univalent(self):
rng = date_range("03/12/2012 00:00", periods=10, freq="W-FRI", tz="US/Eastern")
rng2 = DatetimeIndex(data=rng, tz="US/Eastern")
tm.assert_index_equal(rng, rng2)
@pytest.mark.parametrize("tz", [pytz.timezone("US/Eastern"), gettz("US/Eastern")])
def test_dti_from_tzaware_datetime(self, tz):
d = [datetime(2012, 8, 19, tzinfo=tz)]
index = DatetimeIndex(d)
assert timezones.tz_compare(index.tz, tz)
@pytest.mark.parametrize("tzstr", ["US/Eastern", "dateutil/US/Eastern"])
def test_dti_tz_constructors(self, tzstr):
"""Test different DatetimeIndex constructions with timezone
Follow-up of GH#4229
"""
arr = ["11/10/2005 08:00:00", "11/10/2005 09:00:00"]
idx1 = to_datetime(arr).tz_localize(tzstr)
idx2 = pd.date_range(start="2005-11-10 08:00:00", freq="H", periods=2, tz=tzstr)
idx2 = idx2._with_freq(None) # the others all have freq=None
idx3 = DatetimeIndex(arr, tz=tzstr)
idx4 = DatetimeIndex(np.array(arr), tz=tzstr)
for other in [idx2, idx3, idx4]:
tm.assert_index_equal(idx1, other)
# -------------------------------------------------------------
# Unsorted
@pytest.mark.parametrize(
"dtype",
[None, "datetime64[ns, CET]", "datetime64[ns, EST]", "datetime64[ns, UTC]"],
)
def test_date_accessor(self, dtype):
# Regression test for GH#21230
expected = np.array([date(2018, 6, 4), pd.NaT])
index = DatetimeIndex(["2018-06-04 10:00:00", pd.NaT], dtype=dtype)
result = index.date
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize(
"dtype",
[None, "datetime64[ns, CET]", "datetime64[ns, EST]", "datetime64[ns, UTC]"],
)
def test_time_accessor(self, dtype):
# Regression test for GH#21267
expected = np.array([time(10, 20, 30), pd.NaT])
index = DatetimeIndex(["2018-06-04 10:20:30", pd.NaT], dtype=dtype)
result = index.time
tm.assert_numpy_array_equal(result, expected)
def test_timetz_accessor(self, tz_naive_fixture):
# GH21358
tz = timezones.maybe_get_tz(tz_naive_fixture)
expected = np.array([time(10, 20, 30, tzinfo=tz), pd.NaT])
index = DatetimeIndex(["2018-06-04 10:20:30", pd.NaT], tz=tz)
result = index.timetz
tm.assert_numpy_array_equal(result, expected)
def test_dti_drop_dont_lose_tz(self):
# GH#2621
ind = date_range("2012-12-01", periods=10, tz="utc")
ind = ind.drop(ind[-1])
assert ind.tz is not None
def test_dti_tz_conversion_freq(self, tz_naive_fixture):
# GH25241
t3 = DatetimeIndex(["2019-01-01 10:00"], freq="H")
assert t3.tz_localize(tz=tz_naive_fixture).freq == t3.freq
t4 = DatetimeIndex(["2019-01-02 12:00"], tz="UTC", freq="T")
assert t4.tz_convert(tz="UTC").freq == t4.freq
def test_drop_dst_boundary(self):
# see gh-18031
tz = "Europe/Brussels"
freq = "15min"
start = Timestamp("201710290100", tz=tz)
end = Timestamp("201710290300", tz=tz)
index = pd.date_range(start=start, end=end, freq=freq)
expected = DatetimeIndex(
[
"201710290115",
"201710290130",
"201710290145",
"201710290200",
"201710290215",
"201710290230",
"201710290245",
"201710290200",
"201710290215",
"201710290230",
"201710290245",
"201710290300",
],
tz=tz,
freq=freq,
ambiguous=[
True,
True,
True,
True,
True,
True,
True,
False,
False,
False,
False,
False,
],
)
result = index.drop(index[0])
tm.assert_index_equal(result, expected)
def test_date_range_localize(self):
rng = date_range("3/11/2012 03:00", periods=15, freq="H", tz="US/Eastern")
rng2 = DatetimeIndex(["3/11/2012 03:00", "3/11/2012 04:00"], tz="US/Eastern")
rng3 = date_range("3/11/2012 03:00", periods=15, freq="H")
rng3 = rng3.tz_localize("US/Eastern")
tm.assert_index_equal(rng._with_freq(None), rng3)
# DST transition time
val = rng[0]
exp = Timestamp("3/11/2012 03:00", tz="US/Eastern")
assert val.hour == 3
assert exp.hour == 3
assert val == exp # same UTC value
tm.assert_index_equal(rng[:2], rng2)
# Right before the DST transition
rng = date_range("3/11/2012 00:00", periods=2, freq="H", tz="US/Eastern")
rng2 = DatetimeIndex(
["3/11/2012 00:00", "3/11/2012 01:00"], tz="US/Eastern", freq="H"
)
tm.assert_index_equal(rng, rng2)
exp = Timestamp("3/11/2012 00:00", tz="US/Eastern")
assert exp.hour == 0
assert rng[0] == exp
exp = Timestamp("3/11/2012 01:00", tz="US/Eastern")
assert exp.hour == 1
assert rng[1] == exp
rng = date_range("3/11/2012 00:00", periods=10, freq="H", tz="US/Eastern")
assert rng[2].hour == 3
def test_timestamp_equality_different_timezones(self):
utc_range = date_range("1/1/2000", periods=20, tz="UTC")
eastern_range = utc_range.tz_convert("US/Eastern")
berlin_range = utc_range.tz_convert("Europe/Berlin")
for a, b, c in zip(utc_range, eastern_range, berlin_range):
assert a == b
assert b == c
assert a == c
assert (utc_range == eastern_range).all()
assert (utc_range == berlin_range).all()
assert (berlin_range == eastern_range).all()
def test_dti_intersection(self):
rng = date_range("1/1/2011", periods=100, freq="H", tz="utc")
left = rng[10:90][::-1]
right = rng[20:80][::-1]
assert left.tz == rng.tz
result = left.intersection(right)
assert result.tz == left.tz
def test_dti_equals_with_tz(self):
left = date_range("1/1/2011", periods=100, freq="H", tz="utc")
right =
|
date_range("1/1/2011", periods=100, freq="H", tz="US/Eastern")
|
pandas.date_range
|
"""Run the following classification models:
1) Random Trees and Logistic Regression
2) Random Forest and Logistic Regression
3) Gradient Boosting Trees
4) Gradient Boosting Trees and Logistic Regression
5) Random Forest
"""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import (
RandomTreesEmbedding,
RandomForestClassifier,
GradientBoostingClassifier
)
from sklearn.preprocessing import OneHotEncoder
from sklearn.model_selection import train_test_split
from sklearn.metrics import roc_curve
from sklearn.metrics import roc_auc_score
from sklearn.pipeline import make_pipeline
from sklearn import metrics
from database import DataBase
from sql import FLDEM_VIEW_TABLE
# outputs
MODELS_ROC_TABLE = 'models_roc'
IMPORTANCE_TABLE = 'importance'
np.random.seed(10)
def rt_log_reg(X_train, X_test, y_train, y_test, n_estimators):
"""Random Trees and Logistic Reqression classifier
Arguments:
X_train {array} -- training set for independent variables
X_test {array} -- testing set for independent variables
y_train {array} -- training set for dependent variable
y_test {array} -- testing set for dependent variable
n_estimators {integer} -- [description]
Returns:
dict -- model's roc[false pos rate, true pos rate], auc and logloss
"""
# Unsupervised transformation based on totally random trees
clf_rt = RandomTreesEmbedding(max_depth=3, n_estimators=n_estimators,
random_state=0)
clf_lm = LogisticRegression(max_iter=1000, solver='lbfgs')
# fit model
pipeline = make_pipeline(clf_rt, clf_lm)
pipeline.fit(X_train, y_train)
# predict
y_pred = pipeline.predict_proba(X_test)[:, 1]
# model metrics
fpr, tpr, _ = roc_curve(y_test, y_pred)
auc = roc_auc_score(y_test, y_pred)
logloss = metrics.log_loss(y_test, y_pred)
return dict(fpr=fpr, tpr=tpr, auc=auc, logloss=logloss)
def rf_log_reg(X_train, X_test, y_train, y_test, n_estimators):
"""
Random Forest and Logistic Regression classifier
Arguments:
X_train {array} -- training set for independent variables
X_test {array} -- testing set for independent variables
y_train {array} -- training set for dependent variable
y_test {array} -- testing set for dependent variable
n_estimators {integer} -- [description]
Returns:
dict -- model's roc[false pos rate, true pos rate], auc and logloss
"""
# Unsupervised transformation based on totally random trees
clf_rf = RandomForestClassifier(max_depth=3, n_estimators=n_estimators)
rf_enc = OneHotEncoder(categories='auto')
clf_lm = LogisticRegression(max_iter=1000, solver='lbfgs')
clf_rf.fit(X_train, y_train)
rf_enc.fit(clf_rf.apply(X_train))
clf_lm.fit(rf_enc.transform(clf_rf.apply(X_train)), y_train)
# predict
y_pred = clf_lm.predict_proba(rf_enc.transform(clf_rf.apply(X_test)))[:, 1]
# model metrics
fpr, tpr, _ = roc_curve(y_test, y_pred)
auc = roc_auc_score(y_test, y_pred)
logloss = metrics.log_loss(y_test, y_pred)
return dict(fpr=fpr, tpr=tpr, auc=auc, logloss=logloss)
def gbt_log_reg(X_train, X_test, y_train, y_test, n_estimators):
"""
Gradient Boosting Trees and Logistic Regression classifier
Arguments:
X_train {array} -- training set for independent variables
X_test {array} -- testing set for independent variables
y_train {array} -- training set for dependent variable
y_test {array} -- testing set for dependent variable
n_estimators {integer} -- [description]
Returns:
dict -- model's roc[false pos rate, true pos rate], auc and logloss
"""
# Supervised transformation based on gradient boosted trees
clf_grd = GradientBoostingClassifier(n_estimators=n_estimators)
grd_enc = OneHotEncoder(categories='auto')
clf_lm = LogisticRegression(max_iter=1000, solver='lbfgs')
clf_grd.fit(X_train, y_train)
grd_enc.fit(clf_grd.apply(X_train)[:, :, 0])
clf_lm.fit(grd_enc.transform(clf_grd.apply(X_train)[:, :, 0]), y_train)
# prediction
y_pred = clf_lm.predict_proba(grd_enc.transform(clf_grd.apply(X_test)[:, :, 0]))[:, 1]
# model metrics
fpr, tpr, _ = roc_curve(y_test, y_pred)
auc = roc_auc_score(y_test, y_pred)
logloss = metrics.log_loss(y_test, y_pred)
return dict(fpr=fpr, tpr=tpr, auc=auc, logloss=logloss)
def grd_boosting_trees(X_train, X_test, y_train, y_test, n_estimators):
"""
Gradient Boosting Trees classifier
Arguments:
X_train {array} -- training set for independent variables
X_test {array} -- testing set for independent variables
y_train {array} -- training set for dependent variable
y_test {array} -- testing set for dependent variable
n_estimators {integer} -- [description]
Returns:
dict -- model's roc[false pos rate, true pos rate], auc and logloss
"""
# Supervised transformation based on gradient boosted trees
clf_grd = GradientBoostingClassifier(n_estimators=n_estimators)
grd_enc = OneHotEncoder(categories='auto')
# fit model
clf_grd.fit(X_train, y_train)
grd_enc.fit(clf_grd.apply(X_train)[:, :, 0])
# predict
y_pred = clf_grd.predict_proba(X_test)[:, 1]
# model metrics
fpr, tpr, _ = roc_curve(y_test, y_pred)
auc = roc_auc_score(y_test, y_pred)
logloss = metrics.log_loss(y_test, y_pred)
return dict(fpr=fpr, tpr=tpr, auc=auc, logloss=logloss)
def random_forest(X_train, X_test, y_train, y_test, n_estimators):
"""
Random Forest classifier
Arguments:
X_train {array} -- training set for independent variables
X_test {array} -- testing set for independent variables
y_train {array} -- training set for dependent variable
y_test {array} -- testing set for dependent variable
n_estimators {integer} -- [description]
Returns:
dict -- model's roc[false pos rate, true pos rate], auc and logloss
"""
# Unsupervised transformation based on totally random trees
clf_rf = RandomForestClassifier(max_depth=3, n_estimators=n_estimators)
rf_enc = OneHotEncoder(categories='auto')
# fit
clf_rf.fit(X_train, y_train)
rf_enc.fit(clf_rf.apply(X_train))
# predict
y_pred = clf_rf.predict_proba(X_test)[:, 1]
# model metrics
fpr, tpr, _ = roc_curve(y_test, y_pred)
auc = roc_auc_score(y_test, y_pred)
logloss = metrics.log_loss(y_test, y_pred)
return dict(fpr=fpr, tpr=tpr, auc=auc, logloss=logloss)
def feature_importance(X_train, y_train, col_names, n_estimators):
"""
Feature importance using Random Forest classifier
Arguments:
X_train {array} -- training set for independent variables
X_test {array} -- testing set for independent variables
y_train {array} -- training set for dependent variable
y_test {array} -- testing set for dependent variable
n_estimators {integer} -- [description]
Returns:
dict -- model's roc[false pos rate, true pos rate], auc and logloss
"""
# Unsupervised transformation based on totally random trees
clf_rf = RandomForestClassifier(max_depth=3, n_estimators=n_estimators)
# fit
clf_rf.fit(X_train, y_train)
# feature importance
data = pd.DataFrame({'feature': col_names,
'importance': clf_rf.feature_importances_})
data.sort_values('importance', ascending=False, inplace=True)
return data
def classify():
"""Run classification models:
1) Random Trees and Logistic Regression
2) Random Forest and Logistic Regression
3) Gradient Boosting Trees
4) Gradient Boosting Trees and Logistic Regression
5) Random Forest
Input from database:
FDEM_VIEW_TABLE {database table} -- fldem cleaned data
Output to database:
MODELS_ROC_TABLE {database table} -- models' ROC, LogLoss and AUC
IMPORTANCE_TABLE {database table } -- random forest feature importances
"""
y_var = 'died'
n_estimators = 10
# get fldem from database
_db = DataBase()
cols = ['died', 'age', 'population', 'land_area', 'water_area', 'gender',
'density']
data = _db.get_table(FLDEM_VIEW_TABLE, columns=cols)
data.dropna(inplace=True)
_db.close()
# divide data set
X = data.loc[:, data.columns != y_var]
y = data.loc[:, data.columns == y_var]
X_train, X_test, y_train, y_test = train_test_split(X.values,
y.values.ravel(),
test_size=0.5)
# classification models
models = dict(
rt_lr=dict(model='RT + LR',
**rt_log_reg(X_train, X_test, y_train, y_test, n_estimators)),
rf_lr=dict(model='RF + LR',
**rf_log_reg(X_train, X_test, y_train, y_test, n_estimators)),
rforest=dict(model='RF',
**random_forest(X_train, X_test, y_train, y_test, n_estimators)),
gbt=dict(model='GBT',
**grd_boosting_trees(X_train, X_test, y_train, y_test, n_estimators)),
gbt_lr=dict(model='GBT + LR',
**gbt_log_reg(X_train, X_test, y_train, y_test, n_estimators)),
rand=dict(model='Random', logloss=-1 * np.log10(0.5), auc=0.5,
fpr=np.linspace(0, 1, 100), tpr=np.linspace(0, 1, 100)))
# conbine result of all models
data = pd.concat([pd.DataFrame(models['rt_lr']), pd.DataFrame(models['rf_lr']),
|
pd.DataFrame(models['gbt'])
|
pandas.DataFrame
|
from itertools import cycle
import colorlover
import dash_table
import numpy as np
import re
import pandas as pd
from logzero import logger
from scipy.spatial.distance import squareform
from skbio.stats.distance import DissimilarityMatrix
from sklearn.cluster import OPTICS
from covigator import MISSENSE_VARIANT, DISRUPTIVE_INFRAME_DELETION, CONSERVATIVE_INFRAME_DELETION, \
CONSERVATIVE_INFRAME_INSERTION, DISRUPTIVE_INFRAME_INSERTION
from covigator.dashboard.figures.figures import Figures, PLOTLY_CONFIG, TEMPLATE, MARGIN, STYLES_STRIPPED, STYLE_HEADER
import plotly
import plotly.express as px
import plotly.graph_objects as go
import dash_html_components as html
import dash_core_components as dcc
from covigator.database.model import Gene, Domain, DataSource
VARIANT_TOOLTIP = '<b>%{text}</b><br>' + 'Allele frequency: %{y:.5f}<br>' + 'Genomic Position: %{x}'
GENE_COLORS = list(reversed(plotly.express.colors.sequential.Tealgrn))
DOMAIN_COLORS = list(reversed(plotly.express.colors.sequential.Magenta))
OTHER_VARIANT_SYMBOL = "x"
INSERTION_SYMBOL = "triangle-up"
DELETION_SYMBOL = "triangle-down"
MISSENSE_VARIANT_SYMBOL = "circle"
VERY_COMMON_VARIANTS_COLOR = plotly.express.colors.sequential.Reds[-1]
COMMON_VARIANTS_COLOR = plotly.express.colors.sequential.Reds[-3]
RARE_VARIANTS_COLOR = plotly.express.colors.sequential.Reds[-7]
COMMON_VARIANTS_THRESHOLD = 0.1
LOW_FREQUENCY_VARIANTS_THRESHOLD = 0.01
LOW_FREQUENCY_VARIANTS_COLOR = plotly.express.colors.sequential.Reds[-5]
RARE_VARIANTS_THRESHOLD = 0.001
MONTH_PATTERN = re.compile('[0-9]{4}-[0-9]{2}')
class RecurrentMutationsFigures(Figures):
def _get_color_by_af(self, af):
color = None
if af < RARE_VARIANTS_THRESHOLD:
color = RARE_VARIANTS_COLOR
elif RARE_VARIANTS_THRESHOLD <= af < LOW_FREQUENCY_VARIANTS_THRESHOLD:
color = LOW_FREQUENCY_VARIANTS_COLOR
elif LOW_FREQUENCY_VARIANTS_THRESHOLD <= af < COMMON_VARIANTS_THRESHOLD:
color = COMMON_VARIANTS_COLOR
elif af >= COMMON_VARIANTS_THRESHOLD:
color = VERY_COMMON_VARIANTS_COLOR
return color
def _get_table_style_by_af(self):
return [
{
'if': {
'filter_query': '{{frequency}} >= 0 && {{frequency}} < {}'.format(RARE_VARIANTS_THRESHOLD),
'column_id': "frequency"
},
'backgroundColor': RARE_VARIANTS_COLOR,
'color': 'inherit'
},
{
'if': {
'filter_query': '{{frequency}} >= {} && {{frequency}} < {}'.format(
RARE_VARIANTS_THRESHOLD, LOW_FREQUENCY_VARIANTS_THRESHOLD),
'column_id': "frequency"
},
'backgroundColor': LOW_FREQUENCY_VARIANTS_COLOR,
'color': 'inherit'
},
{
'if': {
'filter_query': '{{frequency}} >= {} && {{frequency}} < {}'.format(
LOW_FREQUENCY_VARIANTS_THRESHOLD, COMMON_VARIANTS_THRESHOLD),
'column_id': "frequency"
},
'backgroundColor': COMMON_VARIANTS_COLOR,
'color': 'white'
},
{
'if': {
'filter_query': '{{frequency}} >= {}'.format(COMMON_VARIANTS_THRESHOLD),
'column_id': "frequency"
},
'backgroundColor': VERY_COMMON_VARIANTS_COLOR,
'color': 'white'
}
]
def get_top_occurring_variants_plot(self, top, gene_name, domain, date_range_start, date_range_end, metric, source):
data = self.queries.get_top_occurring_variants_precomputed(top, gene_name, domain, metric, source)
fig = dcc.Markdown("""**No mutations for the current selection**""")
if data is not None and data.shape[0] > 0:
# removes the columns from the months out of the range
month_columns = [c for c in data.columns if MONTH_PATTERN.match(c)]
included_month_colums = [c for c in month_columns if c >= date_range_start and c <= date_range_end]
excluded_month_colums = [c for c in month_columns if c < date_range_start or c > date_range_end]
data.drop(excluded_month_colums, axis=1, inplace=True)
# set the styles of the cells
styles_counts = self.discrete_background_color_bins(data, columns=included_month_colums)
styles_total_count = self.discrete_background_color_bins(data, columns=["total"], colors="Reds")
styles_frequency = self._get_table_style_by_af()
month_columns = [{'name': ["", i], 'id': i} for i in data.columns if i.startswith("20")]
month_columns[0]['name'][0] = 'Monthly counts' if metric == "count" else 'Monthly frequencies'
fig = dash_table.DataTable(
id="top-occurring-variants-table",
data=data.to_dict('records'),
sort_action='native',
columns=[
{"name": ["Variant", "Gene"], "id": "gene_name"},
{"name": ["", "DNA mutation"], "id": "dna_mutation"},
{"name": ["", "Protein mutation"], "id": "hgvs_p"},
{"name": ["", "Effect"], "id": "annotation"},
{"name": ["", "Frequency"], "id": "frequency"},
{"name": ["", "Count"], "id": "total"},
] + month_columns,
style_data_conditional=STYLES_STRIPPED + styles_counts + styles_frequency + styles_total_count,
style_cell_conditional=[
{
'if': {'column_id': c},
'textAlign': 'left'
} for c in ['gene_name', 'dna_mutation', 'hgvs_p', 'annotation']
],
style_table={'overflowX': 'auto'},
style_as_list_view=True,
style_header=STYLE_HEADER,
sort_by=[{"column_id": "frequency", "direction": "desc"}],
row_selectable='multi'
)
return [
fig,
dcc.Markdown("""
**Top occurring mutations table**
*table shows the {} mutations{} with the highest frequency across all samples.*
*The counts and frequencies per month are only shown between {} and {}.*
*Selections in this table will be highlighted in the genome view and in the co-occurrence matrix.*
""".format(top, " in gene {}".format(gene_name) if gene_name else "",
date_range_start, date_range_end))
]
def discrete_background_color_bins(self, df, n_bins=5, columns='all', colors='Blues'):
bounds = [i * (1.0 / n_bins) for i in range(n_bins + 1)]
if columns == 'all':
if 'id' in df:
df_numeric_columns = df.select_dtypes('number').drop(['id'], axis=1)
else:
df_numeric_columns = df.select_dtypes('number')
else:
df_numeric_columns = df[columns]
df_max = df_numeric_columns.max().max()
df_min = df_numeric_columns.min().min()
ranges = [
((df_max - df_min) * i) + df_min
for i in bounds
]
styles = []
for i in range(1, len(bounds)):
min_bound = ranges[i - 1]
max_bound = ranges[i]
backgroundColor = colorlover.scales[str(n_bins)]['seq'][colors][i - 1]
color = 'white' if i > len(bounds) / 2. else 'inherit'
for column in df_numeric_columns:
styles.append({
'if': {
'filter_query': (
'{{{column}}} >= {min_bound}' +
(' && {{{column}}} < {max_bound}' if (i < len(bounds) - 1) else '')
).format(column=column, min_bound=min_bound, max_bound=max_bound),
'column_id': column
},
'backgroundColor': backgroundColor,
'color': color
})
return styles
def get_cooccurrence_heatmap(self, sparse_matrix, selected_variants, metric="jaccard", min_cooccurrences=5):
data = self._get_variants_cooccurrence_matrix(data=sparse_matrix)
graph = dcc.Markdown("""**No co-occurrent mutations for the current selection**""")
if data is not None and data.shape[0] > 0:
all_variants = data.variant_id_one.unique()
values = np.array_split(data[metric], len(all_variants))
texts = np.array_split(data.hgvs_tooltip, len(all_variants))
if metric == "count":
hovertemplate = '<b>%{text}</b><br>' + 'Counts: %{z}<br>' + 'Variant one: %{x}<br>' + 'Variant two: %{y}'
elif metric == "frequency":
hovertemplate = '<b>%{text}</b><br>' + 'Frequency: %{z:.3f}<br>' + 'Variant one: %{x}<br>' + 'Variant two: %{y}'
elif metric == "jaccard":
hovertemplate = '<b>%{text}</b><br>' + 'Jaccard index: %{z:.3f}<br>' + 'Variant one: %{x}<br>' + 'Variant two: %{y}'
elif metric == "kappa":
hovertemplate = '<b>%{text}</b><br>' + 'Kappa coefficient: %{z:.3f}<br>' + 'Variant one: %{x}<br>' + 'Variant two: %{y}'
heatmap = go.Heatmap(
z=values,
x=all_variants,
y=all_variants,
colorscale="Oryel",
hoverongaps=False,
text=texts,
hovertemplate=hovertemplate,
)
if selected_variants:
selected_variant_ids = [v.get("dna_mutation") for v in selected_variants]
selected_data = data
selected_data[metric].where(
(selected_data.variant_id_one.isin(selected_variant_ids)) |
(selected_data.variant_id_two.isin(selected_variant_ids)), np.nan, inplace=True)
values_selected = np.array_split(selected_data[metric], len(all_variants))
texts_selected = np.array_split(selected_data.hgvs_tooltip, len(all_variants))
heatmap_selected = go.Heatmap(
z=values_selected,
x=all_variants,
y=all_variants,
colorscale="Teal",
hoverongaps=False,
showscale=False,
text=texts_selected,
hovertemplate=hovertemplate,
)
layout = go.Layout(
template=TEMPLATE,
margin=MARGIN,
height=700,
yaxis=dict(visible=True, tickfont={"size": 10}, showgrid=False, showspikes=True, spikemode='toaxis',
spikethickness=2),
xaxis=dict(tickangle=-45, tickfont={"size": 10}, showgrid=False, showspikes=True, spikemode='toaxis',
spikethickness=2),
)
traces = [heatmap]
if selected_variants:
traces.append(heatmap_selected)
fig = go.Figure(data=traces, layout=layout)
# the y index is reversed in plotly heatmap
fig.update_yaxes(autorange="reversed")
graph = dcc.Graph(figure=fig, config=PLOTLY_CONFIG)
return html.Div(children=[
graph,
dcc.Markdown("""
***Co-occurrence matrix*** *showing variant pairs co-occurring in at least {} samples (this value is configurable).*
*The metric in the co-occurrence matrix can be chosen among counts, frequencies, Jaccard index or
Cohen's kappa coefficient. The Cohen's kappa coefficient introduces a correction to the Jaccard index for
mutations with low occurrence.*
*The diagonal contains the total counts or just 1.0 in the other metrics.*
*The upper diagonal is not shown for clarity.*
*Synonymous mutations are excluded.*
*Different genomic mutations causing the same protein variant are not grouped.*
""".format(min_cooccurrences))
])
def _get_variants_cooccurrence_matrix(self, data) -> pd.DataFrame:
"""
Returns the full cooccurrence matrix of all non synonymous variants in a gene with at least
min_occurrences occurrences.
"""
# query for total samples required to calculate frequencies
count_samples = self.queries.count_samples(source=DataSource.ENA.name)
full_matrix = None
if data.shape[0] > 0:
# these are views of the original data
annotations = data.loc[data.variant_id_one == data.variant_id_two,
["variant_id_one", "position", "reference", "alternate", "hgvs_p"]]
tooltip = data.loc[:, ["variant_id_one", "variant_id_two", "hgvs_tooltip"]]
diagonal = data.loc[
data.variant_id_one == data.variant_id_two, ["variant_id_one", "variant_id_two", "count"]]
sparse_matrix = data.loc[:, ["variant_id_one", "variant_id_two", "count"]]
sparse_matrix["frequency"] = sparse_matrix["count"] / count_samples
sparse_matrix = pd.merge(left=sparse_matrix, right=diagonal, on="variant_id_one", how="left",
suffixes=("", "_one"))
sparse_matrix = pd.merge(left=sparse_matrix, right=diagonal, on="variant_id_two", how="left",
suffixes=("", "_two"))
# calculate Jaccard index
sparse_matrix["count_union"] = sparse_matrix["count_one"] + sparse_matrix["count_two"] - sparse_matrix[
"count"]
sparse_matrix["jaccard"] = sparse_matrix["count"] / sparse_matrix["count_union"]
# calculate Cohen's kappa
sparse_matrix["chance_agreement"] = np.exp(-sparse_matrix["count"])
sparse_matrix["kappa"] = 1 - ((1 - sparse_matrix.jaccard) / (1 - sparse_matrix.chance_agreement))
sparse_matrix["kappa"] = sparse_matrix["kappa"].transform(lambda k: k if k > 0 else 0)
del sparse_matrix["count_union"]
del sparse_matrix["count_one"]
del sparse_matrix["count_two"]
del sparse_matrix["chance_agreement"]
# from the sparse matrix builds in memory the complete matrix
all_variants = data.variant_id_one.unique()
empty_full_matrix = pd.DataFrame(index=pd.MultiIndex.from_product(
[all_variants, all_variants], names=["variant_id_one", "variant_id_two"])).reset_index()
full_matrix = pd.merge(
left=empty_full_matrix, right=sparse_matrix, on=["variant_id_one", "variant_id_two"], how='left')
# add annotation on variant one
full_matrix = pd.merge(left=full_matrix, right=annotations, on="variant_id_one", how='left') \
.rename(columns={"position": "position_one",
"reference": "reference_one",
"alternate": "alternate_one",
"hgvs_p": "hgvs_p_one"})
# add annotations on variant two
full_matrix = pd.merge(left=full_matrix, right=annotations,
left_on="variant_id_two", right_on="variant_id_one", how='left') \
.rename(columns={"variant_id_one_x": "variant_id_one",
"position": "position_two",
"reference": "reference_two",
"alternate": "alternate_two",
"hgvs_p": "hgvs_p_two"})
# add tooltip
full_matrix = pd.merge(left=full_matrix, right=tooltip, on=["variant_id_one", "variant_id_two"], how='left')
# correct tooltip in diagonal
full_matrix.loc[
full_matrix.variant_id_one == full_matrix.variant_id_two, "hgvs_tooltip"] = full_matrix.hgvs_p_one
# NOTE: transpose matrix manually as plotly transpose does not work with labels
# the database return the upper diagonal, the lower is best for plots
full_matrix.sort_values(["position_two", "reference_two", "alternate_two",
"position_one", "reference_one", "alternate_one"], inplace=True)
full_matrix = full_matrix.loc[:, ["variant_id_one", "variant_id_two", "count", "frequency", "jaccard",
"kappa", "hgvs_tooltip"]]
return full_matrix
def get_variants_abundance_plot(self, bin_size=50, source=None):
# reads genes and domains across the whole genome
genes = self.queries.get_genes()
domains = self.queries.get_domains()
genes_and_domains = [(list(filter(lambda g: g.name == d.gene_name, genes))[0], d) for d in domains]
# reads variants abundance
variant_abundance = self.queries.get_variant_abundance_histogram(bin_size=bin_size, source=source)
# reads conservation and bins it
conservation = self.queries.get_conservation_table(bin_size=bin_size)
# joins variant abundance and conservation
data = variant_abundance.set_index("position_bin").join(conservation.set_index("position_bin"))
data.reset_index(inplace=True)
data.fillna(0, inplace=True)
layout = go.Layout(
template=TEMPLATE,
margin=MARGIN,
xaxis=dict(domain=[0, 1.0], tickformat=',d', hoverformat=',d', ticksuffix=" bp", ticks="outside",
visible=True, anchor="y7", showspikes=True, spikemode='across', spikethickness=2),
yaxis=dict(domain=[0.9, 1.0], anchor='x7'),
yaxis2=dict(domain=[0.6, 0.9], anchor='x7'),
yaxis3=dict(domain=[0.45, 0.6], anchor='x7', visible=False),
yaxis4=dict(domain=[0.3, 0.45], anchor='x7', visible=False),
yaxis5=dict(domain=[0.15, 0.3], anchor='x7', visible=False),
yaxis6=dict(domain=[0.05, 0.1], anchor='x7', visible=False),
yaxis7=dict(domain=[0.0, 0.05], anchor='x7', visible=False),
legend={'traceorder': 'normal'}
)
gene_colors = cycle(GENE_COLORS)
domain_colors = cycle(DOMAIN_COLORS)
gene_traces = [
self._get_gene_trace(g, start=g.start, end=g.end, color=c, yaxis='y6') for g, c in zip(genes, gene_colors)]
domain_traces = [self._get_domain_trace(color=c, domain=d, gene=g, yaxis='y7')
for (g, d), c in zip(genes_and_domains, domain_colors)]
conservation_traces = self._get_conservation_traces(
conservation, xaxis='x', yaxis1='y3', yaxis2='y4', yaxis3='y5')
mean_unique_variants_per_bin = data.count_unique_variants.mean()
variant_counts_traces = [
go.Scatter(x=data.position_bin, y=data.count_variant_observations,
name="All variants", text="All variants", showlegend=False,
line_color=plotly.express.colors.sequential.Blues[-2], line_width=1),
go.Scatter(x=data.position_bin,
y=[mean_unique_variants_per_bin for _ in range(data.shape[0])],
yaxis='y2', name="Mean unique variants", text="Mean unique variants",
line_width=1, showlegend=False, line_color=plotly.express.colors.sequential.Blues[-3]),
go.Scatter(x=data.position_bin, y=data.count_unique_variants, yaxis='y2',
name="Unique variants", text="Unique variants", showlegend=False, fill='tonexty',
line_color=plotly.express.colors.sequential.Blues[-4], line_width=1)
]
fig = go.Figure(data=variant_counts_traces + conservation_traces + gene_traces + domain_traces, layout=layout)
# add track names
fig.add_annotation(x=0.98, y=1.1, xref="x domain", yref="y domain", text="All variants",
showarrow=False, yshift=10)
fig.add_annotation(x=0.98, y=0.9, xref="x domain", yref="y2 domain", text="Unique variants",
showarrow=False, yshift=10)
fig.add_annotation(x=0.98, y=1.0, xref="x domain", yref="y3 domain", text="Conservation SARS-CoV-2",
showarrow=False, yshift=10)
fig.add_annotation(x=0.98, y=1.0, xref="x domain", yref="y4 domain", text="Conservation SARS-like betaCoV",
showarrow=False, yshift=10)
fig.add_annotation(x=0.98, y=1.0, xref="x domain", yref="y5 domain", text="Conservation vertebrate CoV",
showarrow=False, yshift=10)
return [
dcc.Graph(
figure=fig,
config=PLOTLY_CONFIG
),
dcc.Markdown("""
***Genome view*** *representing the abundance of mutations and ConsHMM (Arneson, 2019) conservation
using a bin size of {} bp. Synonymous mutations are included.*
*The first track shows the count of mutations across the genome including repetitions. *
*The second track shows the count of unique mutations across the genome, the horizontal line represents
the average number of unique mutations per bin and thus distinguishes regions with over and under the
average number of unique mutations.*
*The third, fourth and fifth tracks represent the conservation as reported by ConsHMM within
SARS-CoV-2, among SARS-like betaCoV and among vertebrate CoV. Correlation between distribution of
unique mutations and conservation within Sars-CoV-2, among SARS-like betacoronavirus and among
vertebrates CoV respectively: {}, {}, {}.*
*Genes and Pfam domains are represented in tones of red and purple respectively.*
*Conservation data source: https://github.com/ernstlab/ConsHMM_CoV*
*<NAME>, <NAME>. Systematic discovery of conservation states for single-nucleotide annotation of the
human genome. Communications Biology, 248, 2019. doi: https://doi.org/10.1038/s42003-019-0488-1*
""".format(bin_size,
round(np.corrcoef(data.conservation, data.count_unique_variants)[0][1], 5),
round(np.corrcoef(data.conservation_sarbecovirus, data.count_unique_variants)[0][1], 5),
round(np.corrcoef(data.conservation_vertebrates, data.count_unique_variants)[0][1], 5)
))]
def get_variants_plot(self, gene_name, domain_name, selected_variants, bin_size, source=None):
# reads gene annotations
logger.debug("Getting genes and domains...")
assert gene_name is not None or domain_name is not None, "Either gene or domain need to be provided"
if domain_name is None:
gene = self.queries.get_gene(gene_name)
domains = self.queries.get_domains_by_gene(gene_name)
start = gene.start
end = gene.end
else:
domain = self.queries.get_domain(domain_name=domain_name)
gene = self.queries.get_gene(domain.gene_name)
domains = [domain]
start = gene.start + (domain.start * 3)
end = gene.start + (domain.end * 3)
# reads variants
logger.debug("Getting mutations...")
variants = self.queries.get_non_synonymous_variants_by_region(start=start, end=end, source=source)
# reads conservation and bins it
logger.debug("Getting conservation...")
conservation = self.queries.get_conservation_table(start=start, end=end, bin_size=bin_size)
if variants.shape[0] > 0:
# reads total number of samples and calculates frequencies
logger.debug("Getting sample count...")
count_samples = self.queries.count_samples(source=source)
variants["af"] = variants.count_occurrences / count_samples
variants["log_af"] = variants.af.transform(lambda x: np.log(x + 1))
variants["log_count"] = variants.count_occurrences.transform(lambda x: np.log(x))
variants.annotation_highest_impact = variants.annotation_highest_impact.transform(lambda a: a.split("&")[0])
main_xaxis = 'x'
variants_traces = []
missense_variants = variants[variants.annotation_highest_impact == MISSENSE_VARIANT]
if missense_variants.shape[0] > 0:
variants_traces.append(self._get_variants_scatter(
missense_variants, name="missense variants", symbol=MISSENSE_VARIANT_SYMBOL, xaxis=main_xaxis))
deletion_variants = variants[variants.annotation_highest_impact.isin(
[DISRUPTIVE_INFRAME_DELETION, CONSERVATIVE_INFRAME_DELETION])]
if deletion_variants.shape[0] > 0:
variants_traces.append(self._get_variants_scatter(
deletion_variants, name="inframe deletions", symbol=DELETION_SYMBOL, xaxis=main_xaxis))
insertion_variants = variants[variants.annotation_highest_impact.isin(
[DISRUPTIVE_INFRAME_INSERTION, CONSERVATIVE_INFRAME_INSERTION])]
if insertion_variants.shape[0] > 0:
variants_traces.append(self._get_variants_scatter(
insertion_variants, name="inframe insertions", symbol=INSERTION_SYMBOL, xaxis=main_xaxis))
other_variants = variants[~variants.annotation_highest_impact.isin([
MISSENSE_VARIANT, DISRUPTIVE_INFRAME_DELETION, CONSERVATIVE_INFRAME_DELETION,
DISRUPTIVE_INFRAME_INSERTION, CONSERVATIVE_INFRAME_DELETION])]
if other_variants.shape[0] > 0:
variants_traces.append(self._get_variants_scatter(
other_variants, name="other variants", symbol=OTHER_VARIANT_SYMBOL, xaxis=main_xaxis))
selected_variants_trace = None
if selected_variants:
selected_variants_trace = go.Scatter(
x=[int(v.get("dna_mutation").split(":")[0]) for v in selected_variants],
y=[v.get("frequency") for v in selected_variants],
name="selected variants",
mode='markers',
marker=dict(
symbol="circle",
color="blue",
size=10,
showscale=False
),
xaxis=main_xaxis,
showlegend=True,
text=["{} ({})".format(v.get("hgvs_p"), v.get("annotation")) for v in selected_variants],
hovertemplate=VARIANT_TOOLTIP
)
domain_colors = cycle(DOMAIN_COLORS)
gene_trace = self._get_gene_trace(
gene, start=start, end=end, color=plotly.express.colors.sequential.Tealgrn[-1], yaxis='y5', xaxis=main_xaxis)
domain_traces = [self._get_domain_trace(
color=c, gene=gene, domain=d, yaxis='y6', xaxis=main_xaxis, showlegend=True)
for d, c in zip(domains, domain_colors)]
conservation_traces = self._get_conservation_traces(
conservation, main_xaxis, yaxis1='y2', yaxis2='y3', yaxis3='y4')
data = variants_traces + conservation_traces + [gene_trace] + domain_traces
if selected_variants_trace is not None:
data.append(selected_variants_trace)
layout = go.Layout(
template=TEMPLATE,
margin=go.layout.Margin(l=0, r=0, b=0, t=20),
xaxis=dict(tickformat=',d', hoverformat=',d', ticksuffix=" bp", ticks="outside",
showspikes=True, spikemode='across', spikethickness=1, anchor='y6'),
yaxis=dict(title='Allele frequency', type='log', domain=[0.4, 1.0], anchor=main_xaxis),
yaxis2=dict(domain=[0.3, 0.4], visible=False, anchor=main_xaxis),
yaxis3=dict(domain=[0.2, 0.3], visible=False, anchor=main_xaxis),
yaxis4=dict(domain=[0.1, 0.2], visible=False, anchor=main_xaxis),
yaxis5=dict(domain=[0.05, 0.1], visible=False, anchor=main_xaxis),
yaxis6=dict(domain=[0.0, 0.05], visible=False, anchor=main_xaxis)
)
fig = go.Figure(data=data, layout=layout)
fig.add_annotation(x=0.98, y=1.0, xref="x domain", yref="y2 domain", text="Conservation SARS-CoV-2",
showarrow=False, yshift=10)
fig.add_annotation(x=0.98, y=1.0, xref="x domain", yref="y3 domain", text="Conservation SARS-like betaCoV",
showarrow=False, yshift=10)
fig.add_annotation(x=0.98, y=1.0, xref="x domain", yref="y4 domain", text="Conservation vertebrate CoV",
showarrow=False, yshift=10)
# add horizontal lines on the frequency boundaries
fig.add_hline(y=0.1, line_width=1, line_dash="dash", line_color=VERY_COMMON_VARIANTS_COLOR)
fig.add_hline(y=0.01, line_width=1, line_dash="dash", line_color=COMMON_VARIANTS_COLOR)
fig.add_hline(y=0.001, line_width=1, line_dash="dash", line_color=RARE_VARIANTS_COLOR)
return [dcc.Graph(figure=fig, config=PLOTLY_CONFIG),
dcc.Markdown("""
***Gene view*** *representing each variant with its frequency in the population and
ConsHMM (Arneson, 2019) conservation using a bin size of {bin_size} bp. Synonymous mutations
and mutations occurring in a single sample are excluded.*
*The scatter plot shows non synonymous mutations occurring in at least two samples on {region}.
The x-axis shows the genomic coordinates and the y-axis shows the allele frequency.*
*The category of "other variants" includes frameshift indels, stop codon gain and lost and
start lost mutations.*
*The mutations are colored according to their frequency as rare mutations (< 0.1 %),
low frequency mutations (>= 0.1% and < 1%), common mutations (>= 1% and < 10%) and very common
mutations (>= 10%). The second, third and fourth tracks in grey represent the conservation as
reported by ConsHMM within SARS-CoV-2, among SARS-like betaCoV and among vertebrate CoV.*
*Genes and Pfam domains are represented in tones of red and purple respectively.*
*Conservation data source: https://github.com/ernstlab/ConsHMM_CoV*
*<NAME>, <NAME>. Systematic discovery of conservation states for single-nucleotide annotation of the
human genome. Communications Biology, 248, 2019. doi: https://doi.org/10.1038/s42003-019-0488-1*
""".format(
bin_size=bin_size,
region="gene {}".format(gene_name) if domain_name is not None else
"domain {}: {}".format(gene_name, domain_name)))]
else:
return dcc.Markdown("""**No mutations for the current selection**""")
def _get_conservation_traces(self, conservation, xaxis, yaxis1, yaxis2, yaxis3):
return [
go.Scatter(x=conservation.position_bin, y=conservation.conservation, yaxis=yaxis1, xaxis=xaxis,
text="Conservation SARS-CoV-2", textposition="top right", showlegend=False,
fill='tozeroy', line_color="grey", line_width=1),
go.Scatter(x=conservation.position_bin, y=conservation.conservation_sarbecovirus, yaxis=yaxis2,
xaxis=xaxis, text="Conservation SARS-like betacoronavirus", textposition="top right",
showlegend=False, fill='tozeroy', line_color="grey", line_width=1),
go.Scatter(x=conservation.position_bin, y=conservation.conservation_vertebrates, yaxis=yaxis3,
xaxis=xaxis, text="Conservation vertebrates", textposition="top right",
showlegend=False, fill='tozeroy', line_color="grey", line_width=1)
]
@staticmethod
def _get_domain_trace(color: str, domain: Domain, gene: Gene, yaxis='y', xaxis='x', showlegend=False):
domain_start = gene.start + (domain.start * 3) # domain coordinates are in the protein space
domain_end = gene.start + (domain.end * 3) # domain coordinates are in the protein space
domain_name = domain.name
return go.Scatter(
mode='lines',
x=[domain_start, domain_end, domain_end, domain_start],
y=[0, 0, 1, 1],
name=domain_name,
fill="toself",
fillcolor=color,
text="<b>{} {}</b>: {}-{}".format(gene.name, domain_name, domain_start, domain_end),
hoverinfo="text",
line=dict(width=0),
yaxis=yaxis,
xaxis=xaxis,
legendgroup='domains',
showlegend=showlegend
)
@staticmethod
def _get_gene_trace(gene: Gene, start, end, color, yaxis="y", xaxis='x'):
# start and end coordinates in some case will come from the domain
return go.Scatter(
mode='lines',
x=[start, end, end, start],
y=[0, 0, 1, 1],
name=gene.name,
fill="toself",
fillcolor=color,
text="<b>{}</b>: {}-{}".format(gene.name, gene.start, gene.end),
hoverinfo="text",
line=dict(width=0),
yaxis=yaxis,
xaxis=xaxis,
legendgroup='gene'
)
def _get_variants_scatter(self, variants, name, symbol, xaxis='x'):
return go.Scatter(
x=variants.position,
y=variants.af,
name=name,
mode='markers',
# opacity=0.5,
marker=dict(
symbol=symbol,
color=variants.af.transform(self._get_color_by_af),
showscale=False
),
xaxis=xaxis,
showlegend=True,
text=variants[["hgvs_p", "annotation_highest_impact"]].apply(lambda x: "{} ({})".format(x[0], x[1]), axis=1),
hovertemplate=VARIANT_TOOLTIP
)
def get_variants_clustering(self, sparse_matrix, min_cooccurrence, min_samples):
data = self._get_mds(sparse_matrix=sparse_matrix, min_samples=min_samples)
tables = []
for c in data.cluster.unique():
tables.append(dash_table.DataTable(
id="cluster{}-variants-table".format(c),
data=data[data.cluster == c].to_dict('records'),
columns=[
{"name": [
"Cluster {} (mean Jaccard={})".format(c, data[data.cluster == c].cluster_jaccard_mean.iloc[0]),
"DNA mutation"], "id": "variant_id"},
{"name": ["", "Protein mutation"], "id": "hgvs_p"},
],
fixed_columns={'headers': True, 'data': 1},
style_table={'overflowX': 'auto'},
style_cell={'minWidth': '50px', 'width': '50px', 'maxWidth': '50px'},
style_as_list_view=True,
style_header={
'backgroundColor': 'rgb(230, 230, 230)',
'fontWeight': 'bold'
},
sort_by=[{"column_id": "variant_id", "direction": "asc"}],
))
tables.append(html.Br())
return html.Div(children=[
html.Div(children=tables),
dcc.Markdown("""
***Co-occurrence clustering*** *shows the resulting clusters from the
co-occurrence matrix with the Jaccard index corrected with the Cohen's kappa coefficient.
The co-occurrence matrix is built taking into account only mutations with at least {} pairwise
co-occurrences and if a gene is provided only mutations within that gene.
Clustering is performed on the co-occurrence matrix using OPTICS.
The mimimum number of neighbours to call
a cluster is {}.
Variants selected in the top occurrent mutations table are highlighted with a greater size in the plot.
*
*Ankerst et al. “OPTICS: ordering points to identify the clustering structure.” ACM SIGMOD Record 28, no. 2 (1999): 49-60.*
""".format(min_cooccurrence, min_samples))])
def _get_mds(self, sparse_matrix, min_samples) -> pd.DataFrame:
diagonal = sparse_matrix.loc[sparse_matrix.variant_id_one == sparse_matrix.variant_id_two,
["variant_id_one", "variant_id_two", "count"]]
sparse_matrix_with_diagonal = pd.merge(
left=sparse_matrix, right=diagonal, on="variant_id_one", how="left", suffixes=("", "_one"))
sparse_matrix_with_diagonal = pd.merge(
left=sparse_matrix_with_diagonal, right=diagonal, on="variant_id_two", how="left", suffixes=("", "_two"))
# calculate Jaccard index
sparse_matrix_with_diagonal["count_union"] = sparse_matrix_with_diagonal["count_one"] + \
sparse_matrix_with_diagonal["count_two"] - \
sparse_matrix_with_diagonal["count"]
sparse_matrix_with_diagonal["jaccard_similarity"] = sparse_matrix_with_diagonal["count"] / \
sparse_matrix_with_diagonal.count_union
sparse_matrix_with_diagonal["jaccard_dissimilarity"] = 1 - sparse_matrix_with_diagonal.jaccard_similarity
# calculate Cohen's kappa
sparse_matrix_with_diagonal["chance_agreement"] = np.exp(-sparse_matrix_with_diagonal["count"])
sparse_matrix_with_diagonal["kappa"] = 1 - ((1 - sparse_matrix_with_diagonal.jaccard_similarity) / (
1 - sparse_matrix_with_diagonal.chance_agreement))
sparse_matrix_with_diagonal["kappa"] = sparse_matrix_with_diagonal["kappa"].transform(
lambda k: k if k > 0 else 0)
sparse_matrix_with_diagonal["kappa_dissimilarity"] = 1 - sparse_matrix_with_diagonal.kappa
dissimilarity_metric = "kappa_dissimilarity"
# build upper diagonal matrix
all_variants = sparse_matrix_with_diagonal.variant_id_one.unique()
empty_full_matrix = pd.DataFrame(index=pd.MultiIndex.from_product(
[all_variants, all_variants], names=["variant_id_one", "variant_id_two"])).reset_index()
upper_diagonal_matrix = pd.merge(
# gets only the inferior matrix without the diagnonal
left=empty_full_matrix.loc[empty_full_matrix.variant_id_one < empty_full_matrix.variant_id_two, :],
right=sparse_matrix_with_diagonal.loc[:, ["variant_id_one", "variant_id_two", dissimilarity_metric]],
on=["variant_id_one", "variant_id_two"], how='left')
upper_diagonal_matrix.fillna(1.0, inplace=True)
upper_diagonal_matrix.sort_values(by=["variant_id_one", "variant_id_two"], inplace=True)
logger.debug("Building square distance matrix...")
distance_matrix = squareform(upper_diagonal_matrix[dissimilarity_metric])
# this ensures the order of variants ids is coherent with the non redundant form of the distance matrix
ids = np.array([list(upper_diagonal_matrix.variant_id_one[0])[0]] + \
list(upper_diagonal_matrix.variant_id_two[0:len(upper_diagonal_matrix.variant_id_two.unique())]))
distance_matrix_with_ids = DissimilarityMatrix(data=distance_matrix, ids=ids)
logger.debug("Clustering...")
clusters = OPTICS(min_samples=min_samples, max_eps=1.4).fit_predict(distance_matrix_with_ids.data)
logger.debug("Building clustering dataframe...")
data = pd.DataFrame({'variant_id': distance_matrix_with_ids.ids, 'cluster': clusters})
logger.debug("Annotate with HGVS.p ...")
annotations = pd.concat([
sparse_matrix.loc[:, ["variant_id_one", "hgvs_p_one"]].rename(
columns={"variant_id_one": "variant_id", "hgvs_p_one": "hgvs_p"}),
sparse_matrix.loc[:, ["variant_id_two", "hgvs_p_two"]].rename(
columns={"variant_id_two": "variant_id", "hgvs_p_two": "hgvs_p"})])
data =
|
pd.merge(left=data, right=annotations, on="variant_id", how="left")
|
pandas.merge
|
"""
This module contains a set of functions that parse the training data set and
compute the centers for the data clusters.
Here you will also find dictionaries contatining Sentinel2 and Landsat7/8
bands, as well as distionaries containing the mean values for each class.
"""
import numpy as np
import pandas as pd
import os
import matplotlib.pyplot as plt
HCRF_FILE = os.path.join(os.getcwd(), 'TrainingData', 'TrainingData.csv')
SAVEFIG_PATH = os.getcwd()
HA = {}
LA = {}
CI = {}
CC = {}
WAT = {}
SN = {}
HA_L8 = {}
LA_L8 = {}
CI_L8 = {}
CC_L8 = {}
WAT_L8 = {}
SN_L8 = {}
HA_L7 = {}
LA_L7 = {}
CI_L7 = {}
CC_L7 = {}
WAT_L7 = {}
SN_L7 = {}
BANDS = {
1: [433, 453],
2: [457, 522],
3: [542, 578],
4: [650, 680],
5: [697, 712],
6: [732, 747],
7: [776, 796],
8: [855, 875], # 8a
9: [935, 955],
10: [1365, 1385],
11: [1565, 1655],
12: [2100, 2280]
}
BANDS_LANDSAT_8 = {
1: [430, 450],
2: [450, 510],
3: [530, 590],
4: [640, 670],
5: [850, 880],
6: [1570, 1650],
7: [2110, 2290],
8: [500, 680],
9: [1360, 1380]
}
BANDS_LANDSAT_7 = {
1: [450, 520],
2: [520, 600],
3: [630, 690],
4: [770, 900],
5: [1550, 1750],
7: [2064, 2354],
8: [520, 900]
}
def plot_training_spectra(BANDS, HA, LA, CI, CC, WAT, SN, mission="Sentinel2"):
ax = plt.subplot(1, 1, 1)
xpoints = BANDS.keys()
plt.plot(xpoints, HA.values(), 'o:g', label="High Algae")
plt.plot(xpoints, LA.values(), 'o:y', label="Low Algae")
plt.plot(xpoints, CI.values(), 'o:b', label="Clean Ice")
plt.plot(xpoints, CC.values(), 'o:m', label="Cryoconite")
plt.plot(xpoints, WAT.values(), 'o:k', label="Water")
plt.plot(xpoints, SN.values(), 'o:c', label="Snow")
handles, labels = ax.get_legend_handles_labels()
ax.legend(labels)
plt.grid()
plt.xlabel("{} bands".format(mission))
plt.ylabel("Albedo")
plt.title("Spectra of training data")
plt.savefig(os.path.join(SAVEFIG_PATH, 'TrainingSpectra{}.png'.format(mission)))
plt.close()
def create_dataset(file=HCRF_FILE, savefig=True):
hcrf_master = pd.read_csv(file)
HA_hcrf = pd.DataFrame()
LA_hcrf = pd.DataFrame()
HA_hcrf_S2 = pd.DataFrame()
LA_hcrf_S2 =
|
pd.DataFrame()
|
pandas.DataFrame
|
"""
This is the Syncer that is responsible for storing events in the ModelDB.
Contains functions for overriding basic scikit-learn functions.
"""
import sys
import numpy as np
import pandas as pd
from future.utils import with_metaclass
# sklearn imports
from sklearn.linear_model import *
from sklearn.preprocessing import *
from sklearn.decomposition import *
from sklearn.calibration import *
from sklearn.ensemble import *
from sklearn.tree import *
from sklearn.feature_selection import *
from sklearn.svm import *
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV
from sklearn import cross_validation
import sklearn.metrics
from thrift import Thrift
from thrift.transport import TSocket
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol
# modeldb imports
from modeldb.utils.Singleton import Singleton
from . import GridCrossValidation
from . CrossValidationScore import *
from ..basic import *
from ..events import *
from ..thrift.modeldb import ModelDBService
from ..thrift.modeldb import ttypes as modeldb_types
'''
Functions that extract relevant information from scikit-learn, pandas and
numpy calls
'''
def fit_fn(self, x, y=None, sample_weight=None):
"""
Overrides the fit function for all models except for
Pipeline and GridSearch, and Cross Validation,
which have their own functions.
"""
df = x
# Certain fit functions only accept one argument
if y is None:
model = self.fit(x)
else:
model = self.fit(x, y)
fit_event = FitEvent(model, self, x)
Syncer.instance.add_to_buffer(fit_event)
def convert_prediction_to_event(model, predict_array, x):
predict_df =
|
pd.DataFrame(predict_array)
|
pandas.DataFrame
|
"""
This script visualises the prevention parameters of the first and second COVID-19 waves.
Arguments:
----------
-f:
Filename of samples dictionary to be loaded. Default location is ~/data/interim/model_parameters/COVID19_SEIRD/calibrations/national/
Returns:
--------
Example use:
------------
"""
__author__ = "<NAME>"
__copyright__ = "Copyright (c) 2020 by <NAME>, BIOMATH, Ghent University. All Rights Reserved."
# ----------------------
# Load required packages
# ----------------------
import json
import argparse
import datetime
import random
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.transforms import offset_copy
from covid19model.models import models
from covid19model.data import mobility, sciensano, model_parameters
from covid19model.models.time_dependant_parameter_fncs import ramp_fun
from covid19model.visualization.output import _apply_tick_locator
from covid19model.visualization.utils import colorscale_okabe_ito, moving_avg
# covid 19 specific parameters
plt.rcParams.update({
"axes.prop_cycle": plt.cycler('color',
list(colorscale_okabe_ito.values())),
})
# -----------------------
# Handle script arguments
# -----------------------
parser = argparse.ArgumentParser()
parser.add_argument("-n", "--n_samples", help="Number of samples used to visualise model fit", default=100, type=int)
parser.add_argument("-k", "--n_draws_per_sample", help="Number of binomial draws per sample drawn used to visualize model fit", default=1, type=int)
args = parser.parse_args()
#################################################
## PART 1: Comparison of total number of cases ##
#################################################
youth = moving_avg((df_sciensano['C_0_9']+df_sciensano['C_10_19']).to_frame())
cases_youth_nov21 = youth[youth.index == pd.to_datetime('2020-11-21')].values
cases_youth_rel = moving_avg((df_sciensano['C_0_9']+df_sciensano['C_10_19']).to_frame())/cases_youth_nov21*100
work = moving_avg((df_sciensano['C_20_29']+df_sciensano['C_30_39']+df_sciensano['C_40_49']+df_sciensano['C_50_59']).to_frame())
cases_work_nov21 = work[work.index == pd.to_datetime('2020-11-21')].values
cases_work_rel = work/cases_work_nov21*100
old = moving_avg((df_sciensano['C_60_69']+df_sciensano['C_70_79']+df_sciensano['C_80_89']+df_sciensano['C_90+']).to_frame())
cases_old_nov21 = old[old.index == pd.to_datetime('2020-11-21')].values
cases_old_rel = old/cases_old_nov21*100
fig,ax=plt.subplots(figsize=(12,4.3))
ax.plot(df_sciensano.index, cases_youth_rel, linewidth=1.5, color='black')
ax.plot(df_sciensano.index, cases_work_rel, linewidth=1.5, color='orange')
ax.plot(df_sciensano.index, cases_old_rel, linewidth=1.5, color='blue')
ax.axvspan(pd.to_datetime('2020-11-21'), pd.to_datetime('2020-12-18'), color='black', alpha=0.2)
ax.axvspan(pd.to_datetime('2021-01-09'), pd.to_datetime('2021-02-15'), color='black', alpha=0.2)
ax.set_xlim([pd.to_datetime('2020-11-05'), pd.to_datetime('2021-02-01')])
ax.set_ylim([0,320])
ax.set_ylabel('Relative number of cases as compared\n to November 16th, 2020 (%)')
#ax.set_xticks([pd.to_datetime('2020-11-16'), pd.to_datetime('2020-12-18'), pd.to_datetime('2021-01-04')])
ax.legend(['$[0,20[$','$[20,60[$','$[60,\infty[$'], bbox_to_anchor=(1.05, 1), loc='upper left')
ax = _apply_tick_locator(ax)
ax.set_yticks([0,100,200,300])
ax.grid(False)
plt.tight_layout()
plt.show()
def crosscorr(datax, datay, lag=0):
""" Lag-N cross correlation.
Parameters
----------
lag : int, default 0
datax, datay : pandas.Series objects of equal length
Returns
----------
crosscorr : float
"""
return datax.corr(datay.shift(lag))
lag_series = range(-15,8)
covariance_youth_work = []
covariance_youth_old = []
covariance_work_old = []
for lag in lag_series:
covariance_youth_work.append(crosscorr(cases_youth_rel[pd.to_datetime('2020-11-02'):pd.to_datetime('2021-02-01')].squeeze(),cases_work_rel[pd.to_datetime('2020-11-02'):pd.to_datetime('2021-02-01')].squeeze(),lag=lag))
covariance_youth_old.append(crosscorr(cases_youth_rel[pd.to_datetime('2020-11-02'):pd.to_datetime('2021-02-01')].squeeze(),cases_old_rel[pd.to_datetime('2020-11-02'):pd.to_datetime('2021-02-01')].squeeze(),lag=lag))
covariance_work_old.append(crosscorr(cases_work_rel[pd.to_datetime('2020-11-02'):pd.to_datetime('2021-02-01')].squeeze(),cases_old_rel[pd.to_datetime('2020-11-02'):pd.to_datetime('2021-02-01')].squeeze(),lag=lag))
covariances = [covariance_youth_work, covariance_youth_old, covariance_work_old]
for i in range(3):
n = len(covariances[i])
k = max(covariances[i])
idx=np.argmax(covariances[i])
tau = lag_series[idx]
sig = 2/np.sqrt(n-abs(k))
if k >= sig:
print(tau, k, True)
else:
print(tau, k, False)
fig,(ax1,ax2)=plt.subplots(nrows=2,ncols=1,figsize=(15,10))
# First part
ax1.plot(df_sciensano.index, cases_youth_rel, linewidth=1.5, color='black')
ax1.plot(df_sciensano.index, cases_work_rel, linewidth=1.5, color='orange')
ax1.plot(df_sciensano.index, cases_old_rel, linewidth=1.5, color='blue')
ax1.axvspan(pd.to_datetime('2020-11-21'), pd.to_datetime('2020-12-18'), color='black', alpha=0.2)
ax1.axvspan(pd.to_datetime('2021-01-09'), pd.to_datetime('2021-02-15'), color='black', alpha=0.2)
ax1.set_xlim([pd.to_datetime('2020-11-05'), pd.to_datetime('2021-02-01')])
ax1.set_ylim([0,300])
ax1.set_ylabel('Relative number of cases as compared\n to November 16th, 2020 (%)')
#ax.set_xticks([pd.to_datetime('2020-11-16'), pd.to_datetime('2020-12-18'), pd.to_datetime('2021-01-04')])
ax1.legend(['$[0,20[$','$[20,60[$','$[60,\infty[$'], bbox_to_anchor=(1.05, 1), loc='upper left')
ax1 = _apply_tick_locator(ax1)
# Second part
ax2.scatter(lag_series, covariance_youth_work, color='black',alpha=0.6,linestyle='None',facecolors='none', s=30, linewidth=1)
ax2.scatter(lag_series, covariance_youth_old, color='black',alpha=0.6, linestyle='None',facecolors='none', s=30, linewidth=1, marker='s')
ax2.scatter(lag_series, covariance_work_old, color='black',alpha=0.6, linestyle='None',facecolors='none', s=30, linewidth=1, marker='D')
ax2.legend(['$[0,20[$ vs. $[20,60[$', '$[0,20[$ vs. $[60,\infty[$', '$[20,60[$ vs. $[60, \infty[$'], bbox_to_anchor=(1.05, 1), loc='upper left')
ax2.plot(lag_series, covariance_youth_work, color='black', linestyle='--', linewidth=1)
ax2.plot(lag_series, covariance_youth_old, color='black',linestyle='--', linewidth=1)
ax2.plot(lag_series, covariance_work_old, color='black',linestyle='--', linewidth=1)
ax2.axvline(0,linewidth=1, color='black')
ax2.grid(False)
ax2.set_ylabel('lag-$\\tau$ cross correlation (-)')
ax2.set_xlabel('$\\tau$ (days)')
plt.tight_layout()
plt.show()
fig,ax = plt.subplots(figsize=(15,5))
ax.scatter(lag_series, covariance_youth_work, color='black',alpha=0.6,linestyle='None',facecolors='none', s=30, linewidth=1)
ax.scatter(lag_series, covariance_youth_old, color='black',alpha=0.6, linestyle='None',facecolors='none', s=30, linewidth=1, marker='s')
ax.scatter(lag_series, covariance_work_old, color='black',alpha=0.6, linestyle='None',facecolors='none', s=30, linewidth=1, marker='D')
ax.legend(['$[0,20[$ vs. $[20,60[$', '$[0,20[$ vs. $[60,\infty[$', '$[20,60[$ vs. $[60, \infty[$'], bbox_to_anchor=(1.05, 1), loc='upper left')
ax.plot(lag_series, covariance_youth_work, color='black', linestyle='--', linewidth=1)
ax.plot(lag_series, covariance_youth_old, color='black',linestyle='--', linewidth=1)
ax.plot(lag_series, covariance_work_old, color='black',linestyle='--', linewidth=1)
ax.axvline(0,linewidth=1, color='black')
ax.grid(False)
ax.set_ylabel('lag-$\\tau$ cross correlation (-)')
ax.set_xlabel('$\\tau$ (days)')
plt.tight_layout()
plt.show()
#####################################################
## PART 1: Calibration robustness figure of WAVE 1 ##
#####################################################
n_calibrations = 6
n_prevention = 3
conf_int = 0.05
# -------------------------
# Load samples dictionaries
# -------------------------
samples_dicts = [
json.load(open('../../data/interim/model_parameters/COVID19_SEIRD/calibrations/national/BE_WAVE1_BETA_COMPLIANCE_2021-02-15.json')), # 2020-04-04
json.load(open('../../data/interim/model_parameters/COVID19_SEIRD/calibrations/national/BE_WAVE1_BETA_COMPLIANCE_2021-02-13.json')), # 2020-04-15
json.load(open('../../data/interim/model_parameters/COVID19_SEIRD/calibrations/national/BE_WAVE1_BETA_COMPLIANCE_2021-02-23.json')), # 2020-05-01
json.load(open('../../data/interim/model_parameters/COVID19_SEIRD/calibrations/national/BE_WAVE1_BETA_COMPLIANCE_2021-02-18.json')), # 2020-05-15
json.load(open('../../data/interim/model_parameters/COVID19_SEIRD/calibrations/national/BE_WAVE1_BETA_COMPLIANCE_2021-02-21.json')), # 2020-06-01
json.load(open('../../data/interim/model_parameters/COVID19_SEIRD/calibrations/national/BE_WAVE1_BETA_COMPLIANCE_2021-02-22.json')) # 2020-07-01
]
warmup = int(samples_dicts[0]['warmup'])
# Start of data collection
start_data = '2020-03-15'
# First datapoint used in inference
start_calibration = '2020-03-15'
# Last datapoint used in inference
end_calibrations = ['2020-04-04', '2020-04-15', '2020-05-01', '2020-05-15', '2020-06-01', '2020-07-01']
# Start- and enddate of plotfit
start_sim = start_calibration
end_sim = '2020-07-14'
# ---------
# Load data
# ---------
# Contact matrices
initN, Nc_home, Nc_work, Nc_schools, Nc_transport, Nc_leisure, Nc_others, Nc_total = model_parameters.get_interaction_matrices(dataset='willem_2012')
Nc_all = {'total': Nc_total, 'home':Nc_home, 'work': Nc_work, 'schools': Nc_schools, 'transport': Nc_transport, 'leisure': Nc_leisure, 'others': Nc_others}
levels = initN.size
# Google Mobility data
df_google = mobility.get_google_mobility_data(update=False)
# ---------------------------------
# Time-dependant parameter function
# ---------------------------------
# Extract build contact matrix function
from covid19model.models.time_dependant_parameter_fncs import make_contact_matrix_function, ramp_fun
contact_matrix_4prev, all_contact, all_contact_no_schools = make_contact_matrix_function(df_google, Nc_all)
# Define policy function
def policies_wave1_4prev(t, states, param, l , tau, prev_schools, prev_work, prev_rest, prev_home):
# Convert tau and l to dates
tau_days = pd.Timedelta(tau, unit='D')
l_days = pd.Timedelta(l, unit='D')
# Define key dates of first wave
t1 = pd.Timestamp('2020-03-15') # start of lockdown
t2 = pd.Timestamp('2020-05-15') # gradual re-opening of schools (assume 50% of nominal scenario)
t3 = pd.Timestamp('2020-07-01') # start of summer holidays
t4 = pd.Timestamp('2020-09-01') # end of summer holidays
# Define key dates of second wave
t5 = pd.Timestamp('2020-10-19') # lockdown (1)
t6 = pd.Timestamp('2020-11-02') # lockdown (2)
t7 = pd.Timestamp('2020-11-16') # schools re-open
t8 = pd.Timestamp('2020-12-18') # Christmas holiday starts
t9 = pd.Timestamp('2021-01-04') # Christmas holiday ends
t10 = pd.Timestamp('2021-02-15') # Spring break starts
t11 = pd.Timestamp('2021-02-21') # Spring break ends
t12 = pd.Timestamp('2021-04-05') # Easter holiday starts
t13 = pd.Timestamp('2021-04-18') # Easter holiday ends
# ------
# WAVE 1
# ------
if t <= t1:
t = pd.Timestamp(t.date())
return all_contact(t)
elif t1 < t < t1 + tau_days:
t = pd.Timestamp(t.date())
return all_contact(t)
elif t1 + tau_days < t <= t1 + tau_days + l_days:
t = pd.Timestamp(t.date())
policy_old = all_contact(t)
policy_new = contact_matrix_4prev(t, prev_home, prev_schools, prev_work, prev_rest,
school=0)
return ramp_fun(policy_old, policy_new, t, tau_days, l, t1)
elif t1 + tau_days + l_days < t <= t2:
t = pd.Timestamp(t.date())
return contact_matrix_4prev(t, prev_home, prev_schools, prev_work, prev_rest,
school=0)
elif t2 < t <= t3:
t = pd.Timestamp(t.date())
return contact_matrix_4prev(t, prev_home, prev_schools, prev_work, prev_rest,
school=0)
elif t3 < t <= t4:
t = pd.Timestamp(t.date())
return contact_matrix_4prev(t, prev_home, prev_schools, prev_work, prev_rest,
school=0)
# ------
# WAVE 2
# ------
elif t4 < t <= t5 + tau_days:
return contact_matrix_4prev(t, school=1)
elif t5 + tau_days < t <= t5 + tau_days + l_days:
policy_old = contact_matrix_4prev(t, school=1)
policy_new = contact_matrix_4prev(t, prev_schools, prev_work, prev_rest,
school=1)
return ramp_fun(policy_old, policy_new, t, tau_days, l, t5)
elif t5 + tau_days + l_days < t <= t6:
return contact_matrix_4prev(t, prev_home, prev_schools, prev_work, prev_rest,
school=1)
elif t6 < t <= t7:
return contact_matrix_4prev(t, prev_home, prev_schools, prev_work, prev_rest,
school=0)
elif t7 < t <= t8:
return contact_matrix_4prev(t, prev_home, prev_schools, prev_work, prev_rest,
school=1)
elif t8 < t <= t9:
return contact_matrix_4prev(t, prev_home, prev_schools, prev_work, prev_rest,
school=0)
elif t9 < t <= t10:
return contact_matrix_4prev(t, prev_home, prev_schools, prev_work, prev_rest,
school=1)
elif t10 < t <= t11:
return contact_matrix_4prev(t, prev_home, prev_schools, prev_work, prev_rest,
school=0)
elif t11 < t <= t12:
return contact_matrix_4prev(t, prev_home, prev_schools, prev_work, prev_rest,
school=1)
elif t12 < t <= t13:
return contact_matrix_4prev(t, prev_home, prev_schools, prev_work, prev_rest,
school=0)
else:
t = pd.Timestamp(t.date())
return contact_matrix_4prev(t, prev_home, prev_schools, prev_work, prev_rest,
school=1)
# --------------------
# Initialize the model
# --------------------
# Load the model parameters dictionary
params = model_parameters.get_COVID19_SEIRD_parameters()
# Add the time-dependant parameter function arguments
params.update({'l': 21, 'tau': 21, 'prev_schools': 0, 'prev_work': 0.5, 'prev_rest': 0.5, 'prev_home': 0.5})
# Define initial states
initial_states = {"S": initN, "E": np.ones(9)}
# Initialize model
model = models.COVID19_SEIRD(initial_states, params,
time_dependent_parameters={'Nc': policies_wave1_4prev})
# ------------------------
# Define sampling function
# ------------------------
def draw_fcn(param_dict,samples_dict):
# Sample first calibration
idx, param_dict['beta'] = random.choice(list(enumerate(samples_dict['beta'])))
param_dict['da'] = samples_dict['da'][idx]
param_dict['omega'] = samples_dict['omega'][idx]
param_dict['sigma'] = 5.2 - samples_dict['omega'][idx]
# Sample second calibration
param_dict['l'] = samples_dict['l'][idx]
param_dict['tau'] = samples_dict['tau'][idx]
param_dict['prev_home'] = samples_dict['prev_home'][idx]
param_dict['prev_work'] = samples_dict['prev_work'][idx]
param_dict['prev_rest'] = samples_dict['prev_rest'][idx]
return param_dict
# -------------------------------------
# Define necessary function to plot fit
# -------------------------------------
LL = conf_int/2
UL = 1-conf_int/2
def add_poisson(state_name, output, n_samples, n_draws_per_sample, UL=1-0.05*0.5, LL=0.05*0.5):
data = output[state_name].sum(dim="Nc").values
# Initialize vectors
vector = np.zeros((data.shape[1],n_draws_per_sample*n_samples))
# Loop over dimension draws
for n in range(data.shape[0]):
binomial_draw = np.random.poisson( np.expand_dims(data[n,:],axis=1),size = (data.shape[1],n_draws_per_sample))
vector[:,n*n_draws_per_sample:(n+1)*n_draws_per_sample] = binomial_draw
# Compute mean and median
mean = np.mean(vector,axis=1)
median = np.median(vector,axis=1)
# Compute quantiles
LL = np.quantile(vector, q = LL, axis = 1)
UL = np.quantile(vector, q = UL, axis = 1)
return mean, median, LL, UL
def plot_fit(ax, state_name, state_label, data_df, time, vector_mean, vector_LL, vector_UL, start_calibration='2020-03-15', end_calibration='2020-07-01' , end_sim='2020-09-01'):
ax.fill_between(pd.to_datetime(time), vector_LL, vector_UL,alpha=0.30, color = 'blue')
ax.plot(time, vector_mean,'--', color='blue', linewidth=1.5)
ax.scatter(data_df[start_calibration:end_calibration].index,data_df[state_name][start_calibration:end_calibration], color='black', alpha=0.5, linestyle='None', facecolors='none', s=30, linewidth=1)
ax.scatter(data_df[pd.to_datetime(end_calibration)+datetime.timedelta(days=1):end_sim].index,data_df[state_name][pd.to_datetime(end_calibration)+datetime.timedelta(days=1):end_sim], color='red', alpha=0.5, linestyle='None', facecolors='none', s=30, linewidth=1)
ax = _apply_tick_locator(ax)
ax.set_xlim(start_calibration,end_sim)
ax.set_ylabel(state_label)
return ax
# -------------------------------
# Visualize prevention parameters
# -------------------------------
# Method 1: all in on page
fig,axes= plt.subplots(nrows=n_calibrations,ncols=n_prevention+1, figsize=(13,8.27), gridspec_kw={'width_ratios': [1, 1, 1, 3]})
prevention_labels = ['$\Omega_{home}$ (-)', '$\Omega_{work}$ (-)', '$\Omega_{rest}$ (-)']
prevention_names = ['prev_home', 'prev_work', 'prev_rest']
row_labels = ['(a)', '(b)', '(c)', '(d)', '(e)', '(f)']
pad = 5 # in points
for i in range(n_calibrations):
print('Simulation no. {} out of {}'.format(i+1,n_calibrations))
out = model.sim(end_sim,start_date=start_sim,warmup=warmup,N=args.n_samples,draw_fcn=draw_fcn,samples=samples_dicts[i])
vector_mean, vector_median, vector_LL, vector_UL = add_poisson('H_in', out, args.n_samples, args.n_draws_per_sample)
for j in range(n_prevention+1):
if j != n_prevention:
n, bins, patches = axes[i,j].hist(samples_dicts[i][prevention_names[j]], color='blue', bins=15, density=True, alpha=0.6)
axes[i,j].axvline(np.mean(samples_dicts[i][prevention_names[j]]), ymin=0, ymax=1, linestyle='--', color='black')
max_n = 1.05*max(n)
axes[i,j].annotate('$\hat{\mu} = $'+"{:.2f}".format(np.mean(samples_dicts[i][prevention_names[j]])), xy=(np.mean(samples_dicts[i][prevention_names[j]]),max_n),
rotation=0,va='bottom', ha='center',annotation_clip=False,fontsize=10)
if j == 0:
axes[i,j].annotate(row_labels[i], xy=(0, 0.5), xytext=(-axes[i,j].yaxis.labelpad - pad, 0),
xycoords=axes[i,j].yaxis.label, textcoords='offset points',
ha='right', va='center')
axes[i,j].set_xlim([0,1])
axes[i,j].set_xticks([0.0, 0.5, 1.0])
axes[i,j].set_yticks([])
axes[i,j].grid(False)
if i == n_calibrations-1:
axes[i,j].set_xlabel(prevention_labels[j])
axes[i,j].spines['left'].set_visible(False)
else:
axes[i,j] = plot_fit(axes[i,j], 'H_in','$H_{in}$ (-)', df_sciensano, out['time'].values, vector_median, vector_LL, vector_UL, end_calibration=end_calibrations[i], end_sim=end_sim)
axes[i,j].xaxis.set_major_locator(plt.MaxNLocator(3))
axes[i,j].set_yticks([0,300, 600])
axes[i,j].set_ylim([0,700])
plt.tight_layout()
plt.show()
model_results_WAVE1 = {'time': out['time'].values, 'vector_mean': vector_mean, 'vector_median': vector_median, 'vector_LL': vector_LL, 'vector_UL': vector_UL}
#####################################
## PART 2: Hospitals vs. R0 figure ##
#####################################
def compute_R0(initN, Nc, samples_dict, model_parameters):
N = initN.size
sample_size = len(samples_dict['beta'])
R0 = np.zeros([N,sample_size])
R0_norm = np.zeros([N,sample_size])
for i in range(N):
for j in range(sample_size):
R0[i,j] = (model_parameters['a'][i] * samples_dict['da'][j] + samples_dict['omega'][j]) * samples_dict['beta'][j] * np.sum(Nc, axis=1)[i]
R0_norm[i,:] = R0[i,:]*(initN[i]/sum(initN))
R0_age = np.mean(R0,axis=1)
R0_overall = np.mean(np.sum(R0_norm,axis=0))
return R0, R0_overall
R0, R0_overall = compute_R0(initN, Nc_all['total'], samples_dicts[-1], params)
cumsum = out['H_in'].cumsum(dim='time').values
cumsum_mean = np.mean(cumsum[:,:,-1], axis=0)/sum(np.mean(cumsum[:,:,-1],axis=0))
cumsum_LL = cumsum_mean - np.quantile(cumsum[:,:,-1], q = 0.05/2, axis=0)/sum(np.mean(cumsum[:,:,-1],axis=0))
cumsum_UL = np.quantile(cumsum[:,:,-1], q = 1-0.05/2, axis=0)/sum(np.mean(cumsum[:,:,-1],axis=0)) - cumsum_mean
cumsum = (out['H_in'].mean(dim="draws")).cumsum(dim='time').values
fraction = cumsum[:,-1]/sum(cumsum[:,-1])
fig,ax = plt.subplots(figsize=(12,4))
bars = ('$[0, 10[$', '$[10, 20[$', '$[20, 30[$', '$[30, 40[$', '$[40, 50[$', '$[50, 60[$', '$[60, 70[$', '$[70, 80[$', '$[80, \infty[$')
x_pos = np.arange(len(bars))
#ax.bar(x_pos, np.mean(R0,axis=1), yerr = [np.mean(R0,axis=1) - np.quantile(R0,q=0.05/2,axis=1), np.quantile(R0,q=1-0.05/2,axis=1) - np.mean(R0,axis=1)], width=1, color='b', alpha=0.5, capsize=10)
ax.bar(x_pos, np.mean(R0,axis=1), width=1, color='b', alpha=0.8)
ax.set_ylabel('$R_0$ (-)')
ax.grid(False)
ax2 = ax.twinx()
#ax2.bar(x_pos, cumsum_mean, yerr = [cumsum_LL, cumsum_UL], width=1,color='orange',alpha=0.9,hatch="/", capsize=10)
ax2.bar(x_pos, cumsum_mean, width=1,color='orange',alpha=0.6,hatch="/")
ax2.set_ylabel('Fraction of hospitalizations (-)')
ax2.grid(False)
plt.xticks(x_pos, bars)
plt.tight_layout()
plt.show()
#########################################
## Part 3: Robustness figure of WAVE 2 ##
#########################################
n_prevention = 4
conf_int = 0.05
# -------------------------
# Load samples dictionaries
# -------------------------
samples_dicts = [
json.load(open('../../data/interim/model_parameters/COVID19_SEIRD/calibrations/national/BE_WAVE2_BETA_COMPLIANCE_2021-03-06.json')), # 2020-11-04
json.load(open('../../data/interim/model_parameters/COVID19_SEIRD/calibrations/national/BE_WAVE2_BETA_COMPLIANCE_2021-03-05.json')), # 2020-11-16
json.load(open('../../data/interim/model_parameters/COVID19_SEIRD/calibrations/national/BE_WAVE2_BETA_COMPLIANCE_2021-03-04.json')), # 2020-12-24
json.load(open('../../data/interim/model_parameters/COVID19_SEIRD/calibrations/national/BE_WAVE2_BETA_COMPLIANCE_2021-03-02.json')), # 2021-02-01
]
n_calibrations = len(samples_dicts)
warmup = int(samples_dicts[0]['warmup'])
# Start of data collection
start_data = '2020-03-15'
# First datapoint used in inference
start_calibration = '2020-09-01'
# Last datapoint used in inference
end_calibrations = ['2020-11-06','2020-11-16','2020-12-24','2021-02-01']
# Start- and enddate of plotfit
start_sim = start_calibration
end_sim = '2021-02-14'
# --------------------
# Initialize the model
# --------------------
# Load the model parameters dictionary
params = model_parameters.get_COVID19_SEIRD_parameters()
# Add the time-dependant parameter function arguments
params.update({'l': 21, 'tau': 21, 'prev_schools': 0, 'prev_work': 0.5, 'prev_rest': 0.5, 'prev_home': 0.5})
# Model initial condition on September 1st
warmup = 0
with open('../../data/interim/model_parameters/COVID19_SEIRD/calibrations/national/initial_states_2020-09-01.json', 'r') as fp:
initial_states = json.load(fp)
initial_states.update({
'VE': np.zeros(9),
'V': np.zeros(9),
'V_new': np.zeros(9),
'alpha': np.zeros(9)
})
#initial_states['ICU_tot'] = initial_states.pop('ICU')
# Initialize model
model = models.COVID19_SEIRD(initial_states, params,
time_dependent_parameters={'Nc': policies_wave1_4prev})
# ------------------------
# Define sampling function
# ------------------------
def draw_fcn(param_dict,samples_dict):
# Sample first calibration
idx, param_dict['beta'] = random.choice(list(enumerate(samples_dict['beta'])))
param_dict['da'] = samples_dict['da'][idx]
param_dict['omega'] = samples_dict['omega'][idx]
param_dict['sigma'] = 5.2 - samples_dict['omega'][idx]
# Sample second calibration
param_dict['l'] = samples_dict['l'][idx]
param_dict['tau'] = samples_dict['tau'][idx]
param_dict['prev_schools'] = samples_dict['prev_schools'][idx]
param_dict['prev_home'] = samples_dict['prev_home'][idx]
param_dict['prev_work'] = samples_dict['prev_work'][idx]
param_dict['prev_rest'] = samples_dict['prev_rest'][idx]
return param_dict
# -------------------------------
# Visualize prevention parameters
# -------------------------------
# Method 1: all in on page
fig,axes= plt.subplots(nrows=n_calibrations,ncols=n_prevention+1, figsize=(13,8.27), gridspec_kw={'width_ratios': [1, 1, 1, 1, 6]})
prevention_labels = ['$\Omega_{home}$ (-)', '$\Omega_{schools}$ (-)', '$\Omega_{work}$ (-)', '$\Omega_{rest}$ (-)']
prevention_names = ['prev_home', 'prev_schools', 'prev_work', 'prev_rest']
row_labels = ['(a)', '(b)', '(c)', '(d)', '(e)', '(f)']
pad = 5 # in points
for i in range(n_calibrations):
print('Simulation no. {} out of {}'.format(i+1,n_calibrations))
out = model.sim(end_sim,start_date=start_sim,warmup=warmup,N=args.n_samples,draw_fcn=draw_fcn,samples=samples_dicts[i])
vector_mean, vector_median, vector_LL, vector_UL = add_poisson('H_in', out, args.n_samples, args.n_draws_per_sample)
for j in range(n_prevention+1):
if j != n_prevention:
n, bins, patches = axes[i,j].hist(samples_dicts[i][prevention_names[j]], color='blue', bins=15, density=True, alpha=0.6)
axes[i,j].axvline(np.mean(samples_dicts[i][prevention_names[j]]), ymin=0, ymax=1, linestyle='--', color='black')
max_n = 1.05*max(n)
axes[i,j].annotate('$\hat{\mu} = $'+"{:.2f}".format(np.mean(samples_dicts[i][prevention_names[j]])), xy=(np.mean(samples_dicts[i][prevention_names[j]]),max_n),
rotation=0,va='bottom', ha='center',annotation_clip=False,fontsize=10)
if j == 0:
axes[i,j].annotate(row_labels[i], xy=(0, 0.5), xytext=(-axes[i,j].yaxis.labelpad - pad, 0),
xycoords=axes[i,j].yaxis.label, textcoords='offset points',
ha='right', va='center')
axes[i,j].set_xlim([0,1])
axes[i,j].set_xticks([0.0, 1.0])
axes[i,j].set_yticks([])
axes[i,j].grid(False)
if i == n_calibrations-1:
axes[i,j].set_xlabel(prevention_labels[j])
axes[i,j].spines['left'].set_visible(False)
else:
axes[i,j] = plot_fit(axes[i,j], 'H_in','$H_{in}$ (-)', df_sciensano, out['time'].values, vector_median, vector_LL, vector_UL, start_calibration = start_calibration, end_calibration=end_calibrations[i], end_sim=end_sim)
axes[i,j].xaxis.set_major_locator(plt.MaxNLocator(3))
axes[i,j].set_yticks([0,250, 500, 750])
axes[i,j].set_ylim([0,850])
plt.tight_layout()
plt.show()
model_results_WAVE2 = {'time': out['time'].values, 'vector_mean': vector_mean, 'vector_median': vector_median, 'vector_LL': vector_LL, 'vector_UL': vector_UL}
model_results = [model_results_WAVE1, model_results_WAVE2]
#################################################################
## Part 4: Comparing the maximal dataset prevention parameters ##
#################################################################
samples_dict_WAVE1 = json.load(open('../../data/interim/model_parameters/COVID19_SEIRD/calibrations/national/BE_WAVE1_BETA_COMPLIANCE_2021-02-22.json'))
samples_dict_WAVE2 = json.load(open('../../data/interim/model_parameters/COVID19_SEIRD/calibrations/national/BE_WAVE2_BETA_COMPLIANCE_2021-03-02.json'))
labels = ['$\Omega_{schools}$','$\Omega_{work}$', '$\Omega_{rest}$', '$\Omega_{home}$']
keys = ['prev_schools','prev_work','prev_rest','prev_home']
fig,axes = plt.subplots(1,4,figsize=(12,4))
for idx,ax in enumerate(axes):
if idx != 0:
(n1, bins, patches) = ax.hist(samples_dict_WAVE1[keys[idx]],bins=15,color='blue',alpha=0.4, density=True)
(n2, bins, patches) =ax.hist(samples_dict_WAVE2[keys[idx]],bins=15,color='black',alpha=0.4, density=True)
max_n = max([max(n1),max(n2)])*1.10
ax.axvline(np.mean(samples_dict_WAVE1[keys[idx]]),ls=':',ymin=0,ymax=1,color='blue')
ax.axvline(np.mean(samples_dict_WAVE2[keys[idx]]),ls=':',ymin=0,ymax=1,color='black')
if idx ==1:
ax.annotate('$\mu_1 = \mu_2 = $'+"{:.2f}".format(np.mean(samples_dict_WAVE1[keys[idx]])), xy=(np.mean(samples_dict_WAVE1[keys[idx]]),max_n),
rotation=90,va='bottom', ha='center',annotation_clip=False,fontsize=12)
else:
ax.annotate('$\mu_1 = $'+"{:.2f}".format(np.mean(samples_dict_WAVE1[keys[idx]])), xy=(np.mean(samples_dict_WAVE1[keys[idx]]),max_n),
rotation=90,va='bottom', ha='center',annotation_clip=False,fontsize=12)
ax.annotate('$\mu_2 = $'+"{:.2f}".format(np.mean(samples_dict_WAVE2[keys[idx]])), xy=(np.mean(samples_dict_WAVE2[keys[idx]]),max_n),
rotation=90,va='bottom', ha='center',annotation_clip=False,fontsize=12)
ax.set_xlabel(labels[idx])
ax.set_yticks([])
ax.spines['left'].set_visible(False)
else:
ax.hist(samples_dict_WAVE2['prev_schools'],bins=15,color='black',alpha=0.6, density=True)
ax.set_xlabel('$\Omega_{schools}$')
ax.set_yticks([])
ax.spines['left'].set_visible(False)
ax.set_xlim([0,1])
ax.xaxis.grid(False)
ax.yaxis.grid(False)
plt.tight_layout()
plt.show()
################################################################
## Part 5: Relative contributions of each contact: both waves ##
################################################################
# --------------------------------
# Re-define function to compute R0
# --------------------------------
def compute_R0(initN, Nc, samples_dict, model_parameters):
N = initN.size
sample_size = len(samples_dict['beta'])
R0 = np.zeros([N,sample_size])
R0_norm = np.zeros([N,sample_size])
for i in range(N):
for j in range(sample_size):
R0[i,j] = (model_parameters['a'][i] * samples_dict['da'][j] + samples_dict['omega'][j]) * samples_dict['beta'][j] *Nc[i,j]
R0_norm[i,:] = R0[i,:]*(initN[i]/sum(initN))
R0_age = np.mean(R0,axis=1)
R0_mean = np.sum(R0_norm,axis=0)
return R0, R0_mean
# -----------------------
# Pre-allocate dataframes
# -----------------------
index=df_google.index
columns = [['1','1','1','1','1','1','1','1','1','1','1','1','1','1','1','2','2','2','2','2','2','2','2','2','2','2','2','2','2','2'],['work_mean','work_LL','work_UL','schools_mean','schools_LL','schools_UL','rest_mean','rest_LL','rest_UL',
'home_mean','home_LL','home_UL','total_mean','total_LL','total_UL','work_mean','work_LL','work_UL','schools_mean','schools_LL','schools_UL',
'rest_mean','rest_LL','rest_UL','home_mean','home_LL','home_UL','total_mean','total_LL','total_UL']]
tuples = list(zip(*columns))
columns = pd.MultiIndex.from_tuples(tuples, names=["WAVE", "Type"])
data = np.zeros([len(df_google.index),30])
df_rel = pd.DataFrame(data=data, index=df_google.index, columns=columns)
df_abs = pd.DataFrame(data=data, index=df_google.index, columns=columns)
df_Re = pd.DataFrame(data=data, index=df_google.index, columns=columns)
samples_dicts = [samples_dict_WAVE1, samples_dict_WAVE2]
start_dates =[pd.to_datetime('2020-03-15'), pd.to_datetime('2020-10-19')]
waves=["1", "2"]
for j,samples_dict in enumerate(samples_dicts):
print('\n WAVE: ' + str(j)+'\n')
# ---------------
# Rest prevention
# ---------------
print('Rest\n')
data_rest = np.zeros([len(df_google.index.values), len(samples_dict['prev_rest'])])
Re_rest = np.zeros([len(df_google.index.values), len(samples_dict['prev_rest'])])
for idx, date in enumerate(df_google.index):
tau = np.mean(samples_dict['tau'])
l = np.mean(samples_dict['l'])
tau_days = pd.Timedelta(tau, unit='D')
l_days = pd.Timedelta(l, unit='D')
date_start = start_dates[j]
if date <= date_start + tau_days:
data_rest[idx,:] = 0.01*(100+df_google['retail_recreation'][date])* (np.sum(np.mean(Nc_leisure,axis=0)))\
+ 0.01*(100+df_google['transport'][date])* (np.sum(np.mean(Nc_transport,axis=0)))\
+ 0.01*(100+df_google['grocery'][date])* (np.sum(np.mean(Nc_others,axis=0)))*np.ones(len(samples_dict['prev_rest']))
contacts = np.expand_dims(0.01*(100+df_google['retail_recreation'][date])* (np.sum(Nc_leisure,axis=1))\
+ 0.01*(100+df_google['transport'][date])* (np.sum(Nc_transport,axis=1))\
+ 0.01*(100+df_google['grocery'][date])* (np.sum(Nc_others,axis=1)),axis=1)*np.ones([1,len(samples_dict['prev_rest'])])
R0, Re_rest[idx,:] = compute_R0(initN, contacts, samples_dict, params)
elif date_start + tau_days < date <= date_start + tau_days + l_days:
old = 0.01*(100+df_google['retail_recreation'][date])* (np.sum(np.mean(Nc_leisure,axis=0)))\
+ 0.01*(100+df_google['transport'][date])* (np.sum(np.mean(Nc_transport,axis=0)))\
+ 0.01*(100+df_google['grocery'][date])* (np.sum(np.mean(Nc_others,axis=0)))*np.ones(len(samples_dict['prev_rest']))
new = (0.01*(100+df_google['retail_recreation'][date])* (np.sum(np.mean(Nc_leisure,axis=0)))\
+ 0.01*(100+df_google['transport'][date])* (np.sum(np.mean(Nc_transport,axis=0)))\
+ 0.01*(100+df_google['grocery'][date])* (np.sum(np.mean(Nc_others,axis=0)))\
)*np.array(samples_dict['prev_rest'])
data_rest[idx,:]= old + (new-old)/l * (date-date_start-tau_days)/pd.Timedelta('1D')
old_contacts = np.expand_dims(0.01*(100+df_google['retail_recreation'][date])* (np.sum(Nc_leisure,axis=1))\
+ 0.01*(100+df_google['transport'][date])* (np.sum(Nc_transport,axis=1))\
+ 0.01*(100+df_google['grocery'][date])* (np.sum(Nc_others,axis=1)),axis=1)*np.ones([1,len(samples_dict['prev_rest'])])
new_contacts = np.expand_dims(0.01*(100+df_google['retail_recreation'][date])* (np.sum(Nc_leisure,axis=1))\
+ 0.01*(100+df_google['transport'][date])* (np.sum(Nc_transport,axis=1))\
+ 0.01*(100+df_google['grocery'][date])* (np.sum(Nc_others,axis=1)),axis=1)*np.array(samples_dict['prev_rest'])
contacts = old_contacts + (new_contacts-old_contacts)/l * (date-date_start-tau_days)/pd.Timedelta('1D')
R0, Re_rest[idx,:] = compute_R0(initN, contacts, samples_dict, params)
else:
data_rest[idx,:] = (0.01*(100+df_google['retail_recreation'][date])* (np.sum(np.mean(Nc_leisure,axis=0)))\
+ 0.01*(100+df_google['transport'][date])* (np.sum(np.mean(Nc_transport,axis=0)))\
+ 0.01*(100+df_google['grocery'][date])* (np.sum(np.mean(Nc_others,axis=0)))\
)*np.array(samples_dict['prev_rest'])
contacts = np.expand_dims(0.01*(100+df_google['retail_recreation'][date])* (np.sum(Nc_leisure,axis=1))\
+ 0.01*(100+df_google['transport'][date])* (np.sum(Nc_transport,axis=1))\
+ 0.01*(100+df_google['grocery'][date])* (np.sum(Nc_others,axis=1)),axis=1)*np.array(samples_dict['prev_rest'])
R0, Re_rest[idx,:] = compute_R0(initN, contacts, samples_dict, params)
Re_rest_mean = np.mean(Re_rest,axis=1)
Re_rest_LL = np.quantile(Re_rest,q=0.05/2,axis=1)
Re_rest_UL = np.quantile(Re_rest,q=1-0.05/2,axis=1)
# ---------------
# Work prevention
# ---------------
print('Work\n')
data_work = np.zeros([len(df_google.index.values), len(samples_dict['prev_work'])])
Re_work = np.zeros([len(df_google.index.values), len(samples_dict['prev_work'])])
for idx, date in enumerate(df_google.index):
tau = np.mean(samples_dict['tau'])
l = np.mean(samples_dict['l'])
tau_days = pd.Timedelta(tau, unit='D')
l_days = pd.Timedelta(l, unit='D')
date_start = start_dates[j]
if date <= date_start + tau_days:
data_work[idx,:] = 0.01*(100+df_google['work'][date])* (np.sum(np.mean(Nc_work,axis=0)))*np.ones(len(samples_dict['prev_work']))
contacts = np.expand_dims(0.01*(100+df_google['work'][date])* (np.sum(Nc_work,axis=1)),axis=1)*np.ones([1,len(samples_dict['prev_work'])])
R0, Re_work[idx,:] = compute_R0(initN, contacts, samples_dict, params)
elif date_start + tau_days < date <= date_start + tau_days + l_days:
old = 0.01*(100+df_google['work'][date])* (np.sum(np.mean(Nc_work,axis=0)))*np.ones(len(samples_dict['prev_work']))
new = 0.01*(100+df_google['work'][date])* (np.sum(np.mean(Nc_work,axis=0)))*np.array(samples_dict['prev_work'])
data_work[idx,:] = old + (new-old)/l * (date-date_start-tau_days)/pd.Timedelta('1D')
old_contacts = np.expand_dims(0.01*(100+df_google['work'][date])*(np.sum(Nc_work,axis=1)),axis=1)*np.ones([1,len(samples_dict['prev_work'])])
new_contacts = np.expand_dims(0.01*(100+df_google['work'][date])* (np.sum(Nc_work,axis=1)),axis=1)*np.array(samples_dict['prev_work'])
contacts = old_contacts + (new_contacts-old_contacts)/l * (date-date_start-tau_days)/pd.Timedelta('1D')
R0, Re_work[idx,:] = compute_R0(initN, contacts, samples_dict, params)
else:
data_work[idx,:] = (0.01*(100+df_google['work'][date])* (np.sum(np.mean(Nc_work,axis=0))))*np.array(samples_dict['prev_work'])
contacts = np.expand_dims(0.01*(100+df_google['work'][date])* (np.sum(Nc_work,axis=1)),axis=1)*np.array(samples_dict['prev_work'])
R0, Re_work[idx,:] = compute_R0(initN, contacts, samples_dict, params)
Re_work_mean = np.mean(Re_work,axis=1)
Re_work_LL = np.quantile(Re_work, q=0.05/2, axis=1)
Re_work_UL = np.quantile(Re_work, q=1-0.05/2, axis=1)
# ----------------
# Home prevention
# ----------------
print('Home\n')
data_home = np.zeros([len(df_google['work'].values),len(samples_dict['prev_home'])])
Re_home = np.zeros([len(df_google['work'].values),len(samples_dict['prev_home'])])
for idx, date in enumerate(df_google.index):
tau = np.mean(samples_dict['tau'])
l = np.mean(samples_dict['l'])
tau_days = pd.Timedelta(tau, unit='D')
l_days = pd.Timedelta(l, unit='D')
date_start = start_dates[j]
if date <= date_start + tau_days:
data_home[idx,:] = np.sum(np.mean(Nc_home,axis=0))*np.ones(len(samples_dict['prev_home']))
contacts = np.expand_dims((np.sum(Nc_home,axis=1)),axis=1)*np.ones(len(samples_dict['prev_home']))
R0, Re_home[idx,:] = compute_R0(initN, contacts, samples_dict, params)
elif date_start + tau_days < date <= date_start + tau_days + l_days:
old = np.sum(np.mean(Nc_home,axis=0))*np.ones(len(samples_dict['prev_home']))
new = np.sum(np.mean(Nc_home,axis=0))*np.array(samples_dict['prev_home'])
data_home[idx,:] = old + (new-old)/l * (date-date_start-tau_days)/pd.Timedelta('1D')
old_contacts = np.expand_dims(np.sum(Nc_home,axis=1),axis=1)*np.ones([1,len(samples_dict['prev_home'])])
new_contacts = np.expand_dims((np.sum(Nc_home,axis=1)),axis=1)*np.array(samples_dict['prev_home'])
contacts = old_contacts + (new_contacts-old_contacts)/l * (date-date_start-tau_days)/pd.Timedelta('1D')
R0, Re_home[idx,:] = compute_R0(initN, contacts, samples_dict, params)
else:
data_home[idx,:] = np.sum(np.mean(Nc_home,axis=0))*np.array(samples_dict['prev_home'])
contacts = np.expand_dims((np.sum(Nc_home,axis=1)),axis=1)*np.array(samples_dict['prev_home'])
R0, Re_home[idx,:] = compute_R0(initN, contacts, samples_dict, params)
Re_home_mean = np.mean(Re_home,axis=1)
Re_home_LL = np.quantile(Re_home, q=0.05/2, axis=1)
Re_home_UL = np.quantile(Re_home, q=1-0.05/2, axis=1)
# ------------------
# School prevention
# ------------------
if j == 0:
print('School\n')
data_schools = np.zeros([len(df_google.index.values), len(samples_dict['prev_work'])])
Re_schools = np.zeros([len(df_google.index.values), len(samples_dict['prev_work'])])
for idx, date in enumerate(df_google.index):
tau = np.mean(samples_dict['tau'])
l = np.mean(samples_dict['l'])
tau_days = pd.Timedelta(tau, unit='D')
l_days = pd.Timedelta(l, unit='D')
date_start = start_dates[j]
if date <= date_start + tau_days:
data_schools[idx,:] = 1*(np.sum(np.mean(Nc_schools,axis=0)))*np.ones(len(samples_dict['prev_work']))
contacts = 1*np.expand_dims(np.sum(Nc_schools,axis=1),axis=1)*np.ones([1,len(samples_dict['prev_home'])])
R0, Re_schools[idx,:] = compute_R0(initN, contacts, samples_dict, params)
elif date_start + tau_days < date <= date_start + tau_days + l_days:
old = 1*(np.sum(np.mean(Nc_schools,axis=0)))*np.ones(len(samples_dict['prev_work']))
new = 0* (np.sum(np.mean(Nc_schools,axis=0)))*np.array(samples_dict['prev_work'])
data_schools[idx,:] = old + (new-old)/l * (date-date_start-tau_days)/pd.Timedelta('1D')
old_contacts = 1*np.expand_dims(np.sum(Nc_schools,axis=1),axis=1)*np.ones([1,len(samples_dict['prev_work'])])
new_contacts = 0*np.expand_dims(np.sum(Nc_schools,axis=1),axis=1)*np.ones([1,len(samples_dict['prev_work'])])
contacts = old_contacts + (new_contacts-old_contacts)/l * (date-date_start-tau_days)/pd.Timedelta('1D')
R0, Re_schools[idx,:] = compute_R0(initN, contacts, samples_dict, params)
elif date_start + tau_days + l_days < date <= pd.to_datetime('2020-09-01'):
data_schools[idx,:] = 0* (np.sum(np.mean(Nc_schools,axis=0)))*np.array(samples_dict['prev_work'])
contacts = 0*np.expand_dims(np.sum(Nc_schools,axis=1),axis=1)*np.ones([1,len(samples_dict['prev_home'])])
R0, Re_schools[idx,:] = compute_R0(initN, contacts, samples_dict, params)
else:
data_schools[idx,:] = 1 * (np.sum(np.mean(Nc_schools,axis=0)))*np.array(samples_dict['prev_work']) # This is wrong, but is never used
contacts = 1*np.expand_dims(np.sum(Nc_schools,axis=1),axis=1)*np.ones([1,len(samples_dict['prev_home'])])
R0, Re_schools[idx,:] = compute_R0(initN, contacts, samples_dict, params)
elif j == 1:
print('School\n')
data_schools = np.zeros([len(df_google.index.values), len(samples_dict['prev_schools'])])
Re_schools = np.zeros([len(df_google.index.values), len(samples_dict['prev_work'])])
for idx, date in enumerate(df_google.index):
tau = np.mean(samples_dict['tau'])
l = np.mean(samples_dict['l'])
tau_days = pd.Timedelta(tau, unit='D')
l_days = pd.Timedelta(l, unit='D')
date_start = start_dates[j]
if date <= date_start + tau_days:
data_schools[idx,:] = 1*(np.sum(np.mean(Nc_schools,axis=0)))*np.ones(len(samples_dict['prev_schools']))
contacts = 1*np.expand_dims(np.sum(Nc_schools,axis=1),axis=1)*np.ones([1,len(samples_dict['prev_schools'])])
R0, Re_schools[idx,:] = compute_R0(initN, contacts, samples_dict, params)
elif date_start + tau_days < date <= date_start + tau_days + l_days:
old = 1*(np.sum(np.mean(Nc_schools,axis=0)))*np.ones(len(samples_dict['prev_schools']))
new = 0* (np.sum(np.mean(Nc_schools,axis=0)))*np.array(samples_dict['prev_schools'])
data_schools[idx,:] = old + (new-old)/l * (date-date_start-tau_days)/pd.Timedelta('1D')
old_contacts = 1*np.expand_dims(np.sum(Nc_schools,axis=1),axis=1)*np.ones([1,len(samples_dict['prev_schools'])])
new_contacts = 0*np.expand_dims(np.sum(Nc_schools,axis=1),axis=1)*np.ones([1,len(samples_dict['prev_schools'])])
contacts = old_contacts + (new_contacts-old_contacts)/l * (date-date_start-tau_days)/pd.Timedelta('1D')
R0, Re_schools[idx,:] = compute_R0(initN, contacts, samples_dict, params)
elif date_start + tau_days + l_days < date <= pd.to_datetime('2020-11-16'):
data_schools[idx,:] = 0* (np.sum(np.mean(Nc_schools,axis=0)))*np.array(samples_dict['prev_schools'])
contacts = 0*np.expand_dims(np.sum(Nc_schools,axis=1),axis=1)*np.ones([1,len(samples_dict['prev_schools'])])
R0, Re_schools[idx,:] = compute_R0(initN, contacts, samples_dict, params)
elif pd.to_datetime('2020-11-16') < date <= pd.to_datetime('2020-12-18'):
data_schools[idx,:] = 1* (np.sum(np.mean(Nc_schools,axis=0)))*np.array(samples_dict['prev_schools'])
contacts = 1*np.expand_dims(np.sum(Nc_schools,axis=1),axis=1)*np.ones([1,len(samples_dict['prev_schools'])])
R0, Re_schools[idx,:] = compute_R0(initN, contacts, samples_dict, params)
elif pd.to_datetime('2020-12-18') < date <= pd.to_datetime('2021-01-04'):
data_schools[idx,:] = 0* (np.sum(np.mean(Nc_schools,axis=0)))*np.array(samples_dict['prev_schools'])
contacts = tmp = 0*np.expand_dims(np.sum(Nc_schools,axis=1),axis=1)*np.ones([1,len(samples_dict['prev_schools'])])
R0, Re_schools[idx,:] = compute_R0(initN, contacts, samples_dict, params)
elif pd.to_datetime('2021-01-04') < date <= pd.to_datetime('2021-02-15'):
data_schools[idx,:] = 1* (np.sum(np.mean(Nc_schools,axis=0)))*np.array(samples_dict['prev_schools'])
contacts = 1*np.expand_dims(np.sum(Nc_schools,axis=1),axis=1)*np.ones([1,len(samples_dict['prev_schools'])])
R0, Re_schools[idx,:] = compute_R0(initN, contacts, samples_dict, params)
elif pd.to_datetime('2021-02-15') < date <= pd.to_datetime('2021-02-21'):
data_schools[idx,:] = 0* (np.sum(np.mean(Nc_schools,axis=0)))*np.array(samples_dict['prev_schools'])
contacts = 0*np.expand_dims(np.sum(Nc_schools,axis=1),axis=1)*np.ones([1,len(samples_dict['prev_schools'])])
R0, Re_schools[idx,:] = compute_R0(initN, contacts, samples_dict, params)
else:
data_schools[idx,:] = 1* (np.sum(np.mean(Nc_schools,axis=0)))*np.array(samples_dict['prev_schools'])
contacts = 1*np.expand_dims(np.sum(Nc_schools,axis=1),axis=1)*np.ones([1,len(samples_dict['prev_schools'])])
R0, Re_schools[idx,:] = compute_R0(initN, contacts, samples_dict, params)
Re_schools_mean = np.mean(Re_schools,axis=1)
Re_schools_LL = np.quantile(Re_schools, q=0.05/2, axis=1)
Re_schools_UL = np.quantile(Re_schools, q=1-0.05/2, axis=1)
# -----
# Total
# -----
data_total = data_rest + data_work + data_home + data_schools
Re_total = Re_rest + Re_work + Re_home + Re_schools
Re_total_mean = np.mean(Re_total,axis=1)
Re_total_LL = np.quantile(Re_total, q=0.05/2, axis=1)
Re_total_UL = np.quantile(Re_total, q=1-0.05/2, axis=1)
# -----------------------
# Absolute contributions
# -----------------------
abs_rest = np.zeros(data_rest.shape)
abs_work = np.zeros(data_rest.shape)
abs_home = np.zeros(data_rest.shape)
abs_schools = np.zeros(data_schools.shape)
abs_total = data_total
for i in range(data_rest.shape[0]):
abs_rest[i,:] = data_rest[i,:]
abs_work[i,:] = data_work[i,:]
abs_home[i,:] = data_home[i,:]
abs_schools[i,:] = data_schools[i,:]
abs_schools_mean = np.mean(abs_schools,axis=1)
abs_schools_LL = np.quantile(abs_schools,LL,axis=1)
abs_schools_UL = np.quantile(abs_schools,UL,axis=1)
abs_rest_mean = np.mean(abs_rest,axis=1)
abs_rest_LL = np.quantile(abs_rest,LL,axis=1)
abs_rest_UL = np.quantile(abs_rest,UL,axis=1)
abs_work_mean = np.mean(abs_work,axis=1)
abs_work_LL = np.quantile(abs_work,LL,axis=1)
abs_work_UL = np.quantile(abs_work,UL,axis=1)
abs_home_mean = np.mean(abs_home,axis=1)
abs_home_LL = np.quantile(abs_home,LL,axis=1)
abs_home_UL = np.quantile(abs_home,UL,axis=1)
abs_total_mean = np.mean(abs_total,axis=1)
abs_total_LL = np.quantile(abs_total,LL,axis=1)
abs_total_UL = np.quantile(abs_total,UL,axis=1)
# -----------------------
# Relative contributions
# -----------------------
rel_rest = np.zeros(data_rest.shape)
rel_work = np.zeros(data_rest.shape)
rel_home = np.zeros(data_rest.shape)
rel_schools = np.zeros(data_schools.shape)
rel_total = np.zeros(data_schools.shape)
for i in range(data_rest.shape[0]):
total = data_schools[i,:] + data_rest[i,:] + data_work[i,:] + data_home[i,:]
rel_rest[i,:] = data_rest[i,:]/total
rel_work[i,:] = data_work[i,:]/total
rel_home[i,:] = data_home[i,:]/total
rel_schools[i,:] = data_schools[i,:]/total
rel_total[i,:] = total/total
rel_schools_mean = np.mean(rel_schools,axis=1)
rel_schools_LL = np.quantile(rel_schools,LL,axis=1)
rel_schools_UL = np.quantile(rel_schools,UL,axis=1)
rel_rest_mean = np.mean(rel_rest,axis=1)
rel_rest_LL = np.quantile(rel_rest,LL,axis=1)
rel_rest_UL = np.quantile(rel_rest,UL,axis=1)
rel_work_mean = np.mean(rel_work,axis=1)
rel_work_LL = np.quantile(rel_work,LL,axis=1)
rel_work_UL = np.quantile(rel_work,UL,axis=1)
rel_home_mean = np.mean(rel_home,axis=1)
rel_home_LL = np.quantile(rel_home,LL,axis=1)
rel_home_UL = np.quantile(rel_home,UL,axis=1)
rel_total_mean = np.mean(rel_total,axis=1)
rel_total_LL = np.quantile(rel_total,LL,axis=1)
rel_total_UL = np.quantile(rel_total,UL,axis=1)
# ---------------------
# Append to dataframe
# ---------------------
df_rel[waves[j],"work_mean"] = rel_work_mean
df_rel[waves[j],"work_LL"] = rel_work_LL
df_rel[waves[j],"work_UL"] = rel_work_UL
df_rel[waves[j], "rest_mean"] = rel_rest_mean
df_rel[waves[j], "rest_LL"] = rel_rest_LL
df_rel[waves[j], "rest_UL"] = rel_rest_UL
df_rel[waves[j], "home_mean"] = rel_home_mean
df_rel[waves[j], "home_LL"] = rel_home_LL
df_rel[waves[j], "home_UL"] = rel_home_UL
df_rel[waves[j],"schools_mean"] = rel_schools_mean
df_rel[waves[j],"schools_LL"] = rel_schools_LL
df_rel[waves[j],"schools_UL"] = rel_schools_UL
df_rel[waves[j],"total_mean"] = rel_total_mean
df_rel[waves[j],"total_LL"] = rel_total_LL
df_rel[waves[j],"total_UL"] = rel_total_UL
copy1 = df_rel.copy(deep=True)
df_Re[waves[j],"work_mean"] = Re_work_mean
df_Re[waves[j],"work_LL"] = Re_work_LL
df_Re[waves[j],"work_UL"] = Re_work_UL
df_Re[waves[j], "rest_mean"] = Re_rest_mean
df_Re[waves[j],"rest_LL"] = Re_rest_LL
df_Re[waves[j],"rest_UL"] = Re_rest_UL
df_Re[waves[j], "home_mean"] = Re_home_mean
df_Re[waves[j], "home_LL"] = Re_home_LL
df_Re[waves[j], "home_UL"] = Re_home_UL
df_Re[waves[j],"schools_mean"] = Re_schools_mean
df_Re[waves[j],"schools_LL"] = Re_schools_LL
df_Re[waves[j],"schools_UL"] = Re_schools_UL
df_Re[waves[j],"total_mean"] = Re_total_mean
df_Re[waves[j],"total_LL"] = Re_total_LL
df_Re[waves[j],"total_UL"] = Re_total_UL
copy2 = df_Re.copy(deep=True)
df_abs[waves[j],"work_mean"] = abs_work_mean
df_abs[waves[j],"work_LL"] = abs_work_LL
df_abs[waves[j],"work_UL"] = abs_work_UL
df_abs[waves[j], "rest_mean"] = abs_rest_mean
df_abs[waves[j], "rest_LL"] = abs_rest_LL
df_abs[waves[j], "rest_UL"] = abs_rest_UL
df_abs[waves[j], "home_mean"] = abs_home_mean
df_abs[waves[j], "home_LL"] = abs_home_LL
df_abs[waves[j], "home_UL"] = abs_home_UL
df_abs[waves[j],"schools_mean"] = abs_schools_mean
df_abs[waves[j],"schools_LL"] = abs_schools_LL
df_abs[waves[j],"schools_UL"] = abs_schools_UL
df_abs[waves[j],"total_mean"] = abs_total_mean
df_abs[waves[j],"total_LL"] = abs_total_LL
df_abs[waves[j],"total_UL"] = abs_total_UL
df_rel = copy1
df_Re = copy2
#df_abs.to_excel('test.xlsx', sheet_name='Absolute contacts')
#df_rel.to_excel('test.xlsx', sheet_name='Relative contacts')
#df_Re.to_excel('test.xlsx', sheet_name='Effective reproduction number')
print(np.mean(df_abs["1","total_mean"][pd.to_datetime('2020-03-22'):pd.to_datetime('2020-05-04')]))
print(np.mean(df_Re["1","total_LL"][pd.to_datetime('2020-03-22'):pd.to_datetime('2020-05-04')]),
np.mean(df_Re["1","total_mean"][pd.to_datetime('2020-03-22'):pd.to_datetime('2020-05-04')]),
np.mean(df_Re["1","total_UL"][pd.to_datetime('2020-03-22'):pd.to_datetime('2020-05-04')]))
print(np.mean(df_abs["1","total_mean"][pd.to_datetime('2020-06-01'):pd.to_datetime('2020-07-01')]))
print(np.mean(df_Re["1","total_LL"][pd.to_datetime('2020-06-01'):pd.to_datetime('2020-07-01')]),
np.mean(df_Re["1","total_mean"][pd.to_datetime('2020-06-01'):pd.to_datetime('2020-07-01')]),
np.mean(df_Re["1","total_UL"][pd.to_datetime('2020-06-01'):pd.to_datetime('2020-07-01')]))
print(np.mean(df_abs["2","total_mean"][pd.to_datetime('2020-10-25'):pd.to_datetime('2020-11-16')]))
print(np.mean(df_Re["2","total_LL"][pd.to_datetime('2020-10-25'):pd.to_datetime('2020-11-16')]),
np.mean(df_Re["2","total_mean"][pd.to_datetime('2020-10-25'):pd.to_datetime('2020-11-16')]),
np.mean(df_Re["2","total_UL"][pd.to_datetime('2020-10-25'):pd.to_datetime('2020-11-16')]))
print(np.mean(df_abs["2","total_mean"][pd.to_datetime('2020-11-16'):pd.to_datetime('2020-12-18')]))
print(np.mean(df_Re["2","total_LL"][pd.to_datetime('2020-11-16'):pd.to_datetime('2020-12-18')]),
np.mean(df_Re["2","total_mean"][pd.to_datetime('2020-11-16'):pd.to_datetime('2020-12-18')]),
np.mean(df_Re["2","total_UL"][pd.to_datetime('2020-11-16'):pd.to_datetime('2020-12-18')]))
# ----------------------------
# Plot absolute contributions
# ----------------------------
xlims = [[pd.to_datetime('2020-03-01'), pd.to_datetime('2020-07-14')],[pd.to_datetime('2020-09-01'), pd.to_datetime('2021-02-01')]]
no_lockdown = [[pd.to_datetime('2020-03-01'), pd.to_datetime('2020-03-15')],[pd.to_datetime('2020-09-01'), pd.to_datetime('2020-10-19')]]
fig,axes=plt.subplots(nrows=2,ncols=1,figsize=(12,7))
for idx,ax in enumerate(axes):
ax.plot(df_abs.index, df_abs[waves[idx],"rest_mean"], color='blue', linewidth=2)
ax.plot(df_abs.index, df_abs[waves[idx],"work_mean"], color='red', linewidth=2)
ax.plot(df_abs.index, df_abs[waves[idx],"home_mean"], color='green', linewidth=2)
ax.plot(df_abs.index, df_abs[waves[idx],"schools_mean"], color='orange', linewidth=2)
ax.plot(df_abs.index, df_abs[waves[idx],"total_mean"], color='black', linewidth=1.5)
ax.xaxis.grid(False)
ax.yaxis.grid(False)
ax.set_ylabel('Absolute contacts (-)')
if idx == 0:
ax.legend(['leisure','work','home','schools','total'], bbox_to_anchor=(1.20, 1), loc='upper left')
ax.set_xlim(xlims[idx])
ax.axvspan(no_lockdown[idx][0], no_lockdown[idx][1], alpha=0.2, color='black')
ax2 = ax.twinx()
time = model_results[idx]['time']
vector_mean = model_results[idx]['vector_mean']
vector_LL = model_results[idx]['vector_LL']
vector_UL = model_results[idx]['vector_UL']
ax2.scatter(df_sciensano.index,df_sciensano['H_in'],color='black',alpha=0.6,linestyle='None',facecolors='none', s=30, linewidth=1)
ax2.plot(time,vector_mean,'--', color='black', linewidth=1.5)
ax2.fill_between(time,vector_LL, vector_UL,alpha=0.20, color = 'black')
ax2.xaxis.grid(False)
ax2.yaxis.grid(False)
ax2.set_xlim(xlims[idx])
ax2.set_ylabel('New hospitalisations (-)')
ax = _apply_tick_locator(ax)
ax2 = _apply_tick_locator(ax2)
plt.tight_layout()
plt.show()
plt.close()
# ----------------------------
# Plot relative contributions
# ----------------------------
fig,axes=plt.subplots(nrows=2,ncols=1,figsize=(12,7))
for idx,ax in enumerate(axes):
ax.plot(df_rel.index, df_rel[waves[idx],"rest_mean"], color='blue', linewidth=1.5)
ax.plot(df_rel.index, df_rel[waves[idx],"work_mean"], color='red', linewidth=1.5)
ax.plot(df_rel.index, df_rel[waves[idx],"home_mean"], color='green', linewidth=1.5)
ax.plot(df_rel.index, df_rel[waves[idx],"schools_mean"], color='orange', linewidth=1.5)
ax.xaxis.grid(False)
ax.yaxis.grid(False)
ax.set_ylabel('Relative contacts (-)')
if idx == 0:
ax.legend(['leisure','work','home','schools'], bbox_to_anchor=(1.20, 1), loc='upper left')
ax.set_xlim(xlims[idx])
ax.axvspan(no_lockdown[idx][0], no_lockdown[idx][1], alpha=0.2, color='black')
ax.set_yticks([0,0.25,0.50,0.75])
ax.set_ylim([0,0.85])
ax2 = ax.twinx()
time = model_results[idx]['time']
vector_mean = model_results[idx]['vector_mean']
vector_LL = model_results[idx]['vector_LL']
vector_UL = model_results[idx]['vector_UL']
ax2.scatter(df_sciensano.index,df_sciensano['H_in'],color='black',alpha=0.6,linestyle='None',facecolors='none', s=30, linewidth=1)
ax2.plot(time,vector_mean,'--', color='black', linewidth=1.5)
ax2.fill_between(time,vector_LL, vector_UL,alpha=0.20, color = 'black')
ax2.xaxis.grid(False)
ax2.yaxis.grid(False)
ax2.set_xlim(xlims[idx])
ax2.set_ylabel('New hospitalisations (-)')
ax = _apply_tick_locator(ax)
ax2 = _apply_tick_locator(ax2)
plt.tight_layout()
plt.show()
plt.close()
# --------------------------------------------
# Plot relative contributions and cluster data
# --------------------------------------------
# Perform calculation
df_clusters = pd.read_csv('../../data/interim/sciensano/clusters.csv')
population_total = 11539326
population_schools = 2344395
population_work = 4893800 #https://stat.nbb.be/Index.aspx?DataSetCode=POPULA&lang=nl
home_rel = df_clusters['family']/population_total
work_rel = df_clusters['work']/population_work
schools_rel = df_clusters['schools']/population_schools
others_rel = df_clusters['others']/population_total
normalizer = df_clusters['family']/population_total + df_clusters['work']/population_work + df_clusters['schools']/population_schools + df_clusters['others']/population_total
df_clusters['family_rel'] = df_clusters['family']/population_total/normalizer
df_clusters['work_rel'] = df_clusters['work']/population_work/normalizer
df_clusters['schools_rel'] = df_clusters['schools']/population_schools/normalizer
df_clusters['others_rel'] = df_clusters['others']/population_total/normalizer
df_clusters['midpoint_week'] = pd.to_datetime(df_clusters['startdate_week'])+(pd.to_datetime(df_clusters['enddate_week'])-pd.to_datetime(df_clusters['startdate_week']))/2
# Make plot
fig,ax = plt.subplots(figsize=(12,5))
# Cluster data
ax.plot(df_clusters['midpoint_week'], df_clusters['others_rel'], '--',color='blue',linewidth=1.5)
ax.plot(df_clusters['midpoint_week'], df_clusters['work_rel'],'--', color='red',linewidth=1.5)
ax.plot(df_clusters['midpoint_week'], df_clusters['family_rel'],'--',color='green',linewidth=1.5)
ax.plot(df_clusters['midpoint_week'], df_clusters['schools_rel'],'--', color='orange',linewidth=1.5)
# Model relative share
#ax.plot(df_rel.index, df_rel['2',"rest_mean"], color='blue', linewidth=1.5)
#ax.plot(df_rel.index, df_rel['2',"work_mean"], color='red', linewidth=1.5)
#ax.plot(df_rel.index, df_rel['2',"home_mean"], color='green', linewidth=1.5)
#ax.plot(df_rel.index, df_rel['2',"schools_mean"], color='orange', linewidth=1.5)
ax.legend(['others','work','home','schools'], bbox_to_anchor=(1.10, 1), loc='upper left')
ax.scatter(df_clusters['midpoint_week'], df_clusters['others_rel'], color='blue')
ax.scatter(df_clusters['midpoint_week'], df_clusters['work_rel'], color='red')
ax.scatter(df_clusters['midpoint_week'], df_clusters['family_rel'],color='green')
ax.scatter(df_clusters['midpoint_week'], df_clusters['schools_rel'], color='orange')
# Shading of no lockdown zone
ax.axvspan('2020-09-01', '2020-10-19', alpha=0.2, color='black')
# Other style options
ax.set_ylabel('Normalized share of clusters (-)')
ax.grid(False)
ax = _apply_tick_locator(ax)
ax.set_ylim([0,0.80])
ax.set_yticks([0,0.25,0.50,0.75])
ax2 = ax.twinx()
time = model_results[1]['time']
vector_mean = model_results[1]['vector_mean']
vector_LL = model_results[1]['vector_LL']
vector_UL = model_results[1]['vector_UL']
ax2.scatter(df_sciensano.index,df_sciensano['H_in'],color='black',alpha=0.6,linestyle='None',facecolors='none', s=30, linewidth=1)
ax2.plot(time,vector_mean,'--', color='black', linewidth=1.5)
ax2.fill_between(time,vector_LL, vector_UL,alpha=0.20, color = 'black')
ax2.scatter(df_sciensano.index,df_sciensano['H_in'],color='black',alpha=0.6,linestyle='None',facecolors='none', s=30, linewidth=1)
ax2.set_xlim(['2020-09-01', '2021-02-20'])
ax2.set_ylabel('New hospitalisations (-)')
ax2.grid(False)
ax2 = _apply_tick_locator(ax2)
plt.tight_layout()
plt.show()
# ------------------------------
# Plot Reproduction numbers (1)
# ------------------------------
xlims = [[pd.to_datetime('2020-03-01'), pd.to_datetime('2020-07-14')],[pd.to_datetime('2020-09-01'), pd.to_datetime('2021-02-01')]]
no_lockdown = [[pd.to_datetime('2020-03-01'), pd.to_datetime('2020-03-15')],[pd.to_datetime('2020-09-01'),
|
pd.to_datetime('2020-10-19')
|
pandas.to_datetime
|
#! /usr/bin/env python
from __future__ import (absolute_import, division,
print_function, unicode_literals)
from os import listdir
from os.path import isfile, join
import os
import sys
import time
import pandas as pd
import numpy as np
import re
import hashlib
import logging
import joblib
import gzip
from scipy import stats
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelBinarizer
import pkg_resources
from mmbot.decoder import return_decoded_value
if sys.version_info >= (3, 0):
from oletools.olevba3 import VBA_Parser
else:
from oletools.olevba import VBA_Parser
class MaliciousMacroBot:
def __init__(self, benign_path=None, malicious_path=None,
model_path=pkg_resources.resource_filename('mmbot', 'model'), retain_sample_contents=False):
"""
Constructor to setup path variables for model and sample data and initialize object.
:param benign_path: directory path (relative or absolute) to benign documents for the machine learning model to learn from.
:param malicious_path: directory path (relative or absolute) to malicious documents for the machine learning model to learn from.
:param model_path: directory where modeldata.pickle and vocab.txt files are kept.
:param retain_sample_contents: this relates to level of detail saved in the model data. If True, potentially sensitive
information like extracted vba will be stored in the model's pickle file. The benefit is that incremental
models can be built, where adding a new file to the training set will result in only reprocessing that one new
file. Otherwise all files in the benign_path and malicious_path will be reprocessed each time the model is
rebuilt. If you are experimenting with building many models and comparing results, set this to True,
otherwise keep it to False.
"""
# os.path.join(os.path.dirname(__file__), 'model')
self.clear_state()
self.set_model_paths(benign_path, malicious_path, model_path)
self.retain_sample_contents = retain_sample_contents
def clear_state(self):
"""
Resets object's state to clear out all model internals created after loading state from disk
"""
self.cls = None
self.modeldata = None
self.features = {}
def set_model_paths(self, benign_path, malicious_path, model_path):
"""
Helper function to set up paths to files and pre-emptively identify issues with the existence of files and
paths that will be an issue later.
:param benign_path: directory path (relative or absolute) to benign documents for the machine learning model to learn from.
:param malicious_path: directory path (relative or absolute) to malicious documents for the machine learning model to learn from.
:param model_path: directory where model files and helpful data will be saved for the algorithm to function.
"""
try:
# One of the two paths is None
if (benign_path is None and malicious_path is not None) or (
benign_path is not None and malicious_path is None):
raise IOError("""ERROR: When supplying benign_path and malicious_path, both paths must have samples to
build a classification model. Either values can be None and an existing saved model
can be supplied, or paths can exist with corresponding office files and a new model
can be built.""")
# All three paths are None
if benign_path is None and malicious_path is None and model_path is None:
raise IOError(
"ERROR: All paths supplied for benign_path, malicious_path, and model_path cannot be None")
# Make sure provided paths actually do exist
if benign_path and malicious_path:
self.malicious_path = os.path.join(malicious_path, '')
if not os.path.exists(malicious_path) or not os.path.isdir(malicious_path):
raise IOError("ERROR: The malicious_path provided {} does not exist".format(malicious_path))
self.benign_path = os.path.join(benign_path, '')
if not os.path.exists(benign_path) or not os.path.isdir(benign_path):
raise IOError("ERROR: The benign_path provided {} does not exist".format(benign_path))
if model_path is not None:
self.model_path = os.path.join(model_path, '')
self.vba_vocab = os.path.join(self.model_path, 'vocab.txt')
self.modeldata_pickle = os.path.join(self.model_path, 'modeldata.pickle')
self.modeldata_pickle_gz = os.path.join(self.model_path, 'modeldata.pickle.gz')
# If the user-supplied path does not exist, use the default vocab.txt that comes with the package
if not os.path.exists(self.vba_vocab):
self.vba_vocab = os.path.join(pkg_resources.resource_filename('mmbot', 'model'), 'vocab.txt')
except Exception as e:
self.malicious_path = './tests/samples/malicious/'
raise IOError("ERROR: Supplied benign_path, malicious_path, or model_path does not "
"exist or is not a directory. {}".format(str(e)))
def get_file_hash(self, pathtofile):
"""
Computes the MD5 hash of the file
:param pathtofile: absolute or relative path to a file
:return: md5 hash of file as a string
"""
if os.path.isfile(pathtofile):
with open(pathtofile, 'rb') as file_to_hash:
filedata = file_to_hash.read()
md5 = hashlib.md5(filedata).hexdigest()
# sha1 = hashlib.sha1(filedata).hexdigest()
# sha256 = hashlib.sha256(filedata).hexdigest()
return md5
return None
def fill_missing_hashes(self, row):
"""
Checks if there is a null or NaN value for the 'md5' column. If so, computes it, if not,
returns original value. Used to fill in missing md5's in a dataframe.
:param row: a row of a dataframe with a column named 'md5' and 'filepath'
:return: for any missing md5 values, computes the hash on the given filepath
"""
if pd.isnull(row['md5']):
return self.get_file_hash(row['filepath'])
else:
return row['md5']
def get_file_meta_data(self, filepath, filename=None, getHash=False):
"""
helper function to get meta information about a file to include it's path, date modified, size
:param filepath: path to a file
:param filename: filename
:param getHash: whether or not the hash should be computed
:return: a tuple of format (filename, filepath, filesize, filemodified, md5)
"""
if filename is None:
filename = os.path.split(filepath)[1]
filemodified = time.ctime(os.path.getmtime(filepath))
filesize = os.path.getsize(filepath)
md5 = np.nan
if getHash:
md5 = self.get_file_hash(filepath)
return (filename, filepath, filesize, filemodified, md5)
def get_samples_from_disk(self, path=None, getHash=False):
"""
Given a path to a file or folder of files, recursively lists all files and metadata for the files
:param path: directory path
:param getHash: boolean, indicating whether or not to compute hash
:return: a dataframe with the filename, filepath, filesize, modified date, and md5 hash for each file found
"""
if not os.path.exists(path):
raise IOError("ERROR: File or path does not exist: {}".format(path, ))
if os.path.isfile(path):
meta = self.get_file_meta_data(path, getHash=getHash)
return pd.DataFrame({'filename': (meta[0],),
'filepath': (meta[1],),
'filesize': (meta[2],),
'filemodified': (meta[3],),
'md5': (meta[4],)})
try:
matches = []
for root, dirnames, filenames in os.walk(path):
for filename in filenames:
filepath = os.path.join(root, filename)
meta = self.get_file_meta_data(filepath, filename, getHash=getHash)
matches.append(meta)
if len(matches) > 0:
filenames, paths, sizes, dates, md5s = zip(*matches)
return pd.DataFrame({'filename': filenames, 'filepath': paths, 'filesize': sizes, \
'filemodified': dates, 'md5': md5s})
return pd.DataFrame()
except Exception as e:
raise IOError("ERROR with file or path {}: {}".format(path, str(e)))
def get_family_name(self, mypath):
"""
Given a file path, return the deepest directory name to allow organizing samples by name and having that meta
data in predictions
:param mypath: path to a file in the model training set
:return: deepest directory name and 'Unknown' if ther eis a problem with a part of the file path
"""
normalized_path = os.path.dirname(os.path.abspath(mypath))
m = re.match(r'.*[\\/](.*?$)', normalized_path)
try:
group = m.group(1)
if len(group) > 0:
return group
return 'Unknown'
except:
return 'Unknown'
def new_samples(self, existing, possiblenew):
"""
Returns dataframe containing rows from possiblenew with MD5 hashes that are not in existing, to identify
new file samples.
:param existing: dataframe containing an 'md5' field
:param possiblenew: dataframe containing an 'md5' field
:return: Returns dataframe containing rows from possiblenew with MD5 hashes that are not in existing.
"""
existing_items = existing['md5'].tolist()
possiblenew_items = possiblenew['md5'].tolist()
actualnew_items = [x for x in possiblenew_items if x not in existing_items]
if len(actualnew_items) > 0:
return possiblenew[possiblenew['md5'].isin(actualnew_items)].copy()
return None
def get_language_features(self):
"""
After vba has been extracted from all files, this function does feature extraction on that vba and prepares
everything for a model to be built. load_model_data has been called, populating self.modeldata
:return: feature matrix and labels in a dictionary structure with keys 'X' and 'y' respectively
"""
self.load_model_vocab()
# Get custom VBA features
self.modeldata = pd.concat([self.modeldata, self.modeldata.extracted_vba.apply(self.get_vba_features)], axis=1)
tempfeatures = self.modeldata.columns
self.features['vba_features'] = [x for x in tempfeatures if x.startswith('vba_')]
# Count Vectorizer
vocab_lower = [x.lower() for x in self.features['vocab']]
vocab_lower = list(set(vocab_lower))
self.model_cntvect = CountVectorizer(vocabulary=vocab_lower,
lowercase=True,
decode_error='ignore',
token_pattern=r"(?u)\b\w[\w\.]+\b")
self.modeldata_cnts = self.model_cntvect.fit_transform(self.modeldata['extracted_vba'])
self.features['cnt_features'] = ['cnt_' + x for x in self.model_cntvect.get_feature_names()]
self.features['features'] = self.model_cntvect.get_feature_names()
self.modeldata = self.modeldata.join(pd.DataFrame(self.modeldata_cnts.toarray(),
columns=self.features['cnt_features']))
# TF-IDF Transformer
self.model_tfidf_trans = TfidfTransformer()
self.model_tfidf_cntvect = self.model_tfidf_trans.fit_transform(self.modeldata_cnts.toarray())
self.features['tfidf_features'] = ['tfidf_' + x for x in self.features['features']]
self.modeldata = self.modeldata.join(pd.DataFrame(self.model_tfidf_cntvect.toarray(),
columns=self.features['tfidf_features']))
# Train and Test Model
predictive_features = self.features['tfidf_features'] + self.features['vba_features']
self.features['predictive_features'] = predictive_features
self.clf_X = self.modeldata[predictive_features].values
self.clf_y = np.array(self.modeldata['label'])
return {'X': self.clf_X, 'y': self.clf_y}
def clear_model_features(self):
"""
Removes all columns from modeldata with names starting with cnt_, tfidf_, or vba_
These are the computed columns for the model
"""
if self.modeldata is not None:
columns = self.modeldata.columns
cntcolumns = [x for x in columns if x.startswith('cnt_')]
vba_feature_columns = [x for x in columns if x.startswith('vba_')]
tfidfcolumns = [x for x in columns if x.startswith('tfidf_')]
self.modeldata.drop(self.modeldata[cntcolumns], axis=1, inplace=True)
self.modeldata.drop(self.modeldata[vba_feature_columns], axis=1, inplace=True)
self.modeldata.drop(self.modeldata[tfidfcolumns], axis=1, inplace=True)
def build_models(self):
"""
After get_language_features is called, this function builds the models based on
the classifier matrix and labels.
:return:
"""
self.cls = RandomForestClassifier(n_estimators=100, max_features=.2)
# build classifier
self.cls.fit(self.clf_X, self.clf_y)
return self.cls
def load_model_vocab(self):
"""
Loads vocabulary used in the bag of words model
:return: fixed vocabulary that was loaded into internal state
"""
with open(self.vba_vocab) as vocabfile:
lines = vocabfile.readlines()
lines = [x.strip() for x in lines]
self.features['vocab'] = set(lines)
return self.features['vocab']
def load_model_data(self, exclude=None):
"""
Merges previously saved model data (if exists) with new files found in malicious and benign doc paths.
:param exclude: string value - if samples (including path) from the training set contain this string,
they will be omitted from the model. This is primarily used to hold malware families from consideration
in the model to assess classification generalization to new unknown families.
:return: number of new documents loaded into the model
"""
newdoc_cnt = 0
knowndocs = None
# Clear all stored contents because we don't save enough detail to pick up where we left off last time
if self.modeldata is not None:
knowndocs = self.modeldata.copy(deep=True)
try:
if self.malicious_path:
maldocs = self.get_samples_from_disk(self.malicious_path)
except:
self.malicious_path = './tests/samples/malicious/'
self.benign_path = './tests/samples/benign/'
self.model_path = './tests/samples/model/'
maldocs = self.get_samples_from_disk(self.malicious_path)
if len(maldocs) > 0:
maldocs['label'] = 'malicious'
benigndocs = self.get_samples_from_disk(self.benign_path)
if len(benigndocs) > 0:
benigndocs['label'] = 'benign'
if len(benigndocs) == 0 and len(maldocs) == 0 and knowndocs is None:
raise IOError("ERROR: Unable to load saved model data {} or process samples rooted in model path {}. "
"Unable to make predictions.".format(self.modeldata_pickle, self.model_path))
possiblenew = pd.concat([maldocs, benigndocs], axis=0)
if knowndocs is None:
# No existing samples, so alldocs are newly found docs.
possiblenew['md5'] = possiblenew['filepath'].apply(self.get_file_hash)
possiblenew[['extracted_vba', 'stream_path', 'filename_vba']] = possiblenew['filepath'].apply(self.get_vba)
possiblenew['family'] = possiblenew['filepath'].apply(self.get_family_name)
alldocs = possiblenew
newdoc_cnt = len(alldocs)
else:
temp = knowndocs.append(possiblenew)
# Omit seemingly duplicate files with same filepath, filesize and modified date
grouped_rows = temp.groupby(['filesize', 'filepath', 'filemodified'])
omit = grouped_rows.filter(lambda x: len(x) > 1)['filepath'].unique()
temp = temp[~((temp['filepath'].isin(omit)) & temp['md5'].isnull())].reset_index(drop=True)
# Compute hashes for those that are new. Omit files with duplicate md5 hashes
temp['md5'] = temp.apply(self.fill_missing_hashes, axis=1)
temp = temp.drop_duplicates(subset='md5', keep='first')
temp.reset_index(drop=True)
newdocs = temp[temp['extracted_vba'].isnull()].copy()
knowndocs = temp[~temp['extracted_vba'].isnull()].copy()
# get enrichment for truly new docs
if len(newdocs) > 0:
logging.info("%d NEW DOCS FOUND!" % (len(newdocs),))
logging.info(newdocs[['filename', 'filemodified', 'filesize', 'filepath']])
newdocs[['extracted_vba', 'stream_path', 'filename_vba']] = newdocs['filepath'].apply(self.get_vba)
newdoc_cnt = len(newdocs)
newdocs['family'] = newdocs['filepath'].apply(self.get_family_name)
alldocs =
|
pd.concat([knowndocs, newdocs], axis=0)
|
pandas.concat
|
# pylint: disable-msg=E1101,W0612
import pytest
import numpy as np
import pandas as pd
import pandas.util.testing as tm
from pandas.core.sparse.api import SparseDtype
class TestSparseSeriesIndexing(object):
def setup_method(self, method):
self.orig = pd.Series([1, np.nan, np.nan, 3, np.nan])
self.sparse = self.orig.to_sparse()
def test_getitem(self):
orig = self.orig
sparse = self.sparse
assert sparse[0] == 1
assert np.isnan(sparse[1])
assert sparse[3] == 3
result = sparse[[1, 3, 4]]
exp = orig[[1, 3, 4]].to_sparse()
tm.assert_sp_series_equal(result, exp)
# dense array
result = sparse[orig % 2 == 1]
exp = orig[orig % 2 == 1].to_sparse()
tm.assert_sp_series_equal(result, exp)
# sparse array (actuary it coerces to normal Series)
result = sparse[sparse % 2 == 1]
exp = orig[orig % 2 == 1].to_sparse()
tm.assert_sp_series_equal(result, exp)
# sparse array
result = sparse[pd.SparseArray(sparse % 2 == 1, dtype=bool)]
tm.assert_sp_series_equal(result, exp)
def test_getitem_slice(self):
orig = self.orig
sparse = self.sparse
tm.assert_sp_series_equal(sparse[:2], orig[:2].to_sparse())
tm.assert_sp_series_equal(sparse[4:2], orig[4:2].to_sparse())
tm.assert_sp_series_equal(sparse[::2], orig[::2].to_sparse())
tm.assert_sp_series_equal(sparse[-5:], orig[-5:].to_sparse())
def test_getitem_int_dtype(self):
# GH 8292
s = pd.SparseSeries([0, 1, 2, 3, 4, 5, 6], name='xxx')
res = s[::2]
exp = pd.SparseSeries([0, 2, 4, 6], index=[0, 2, 4, 6], name='xxx')
tm.assert_sp_series_equal(res, exp)
assert res.dtype == SparseDtype(np.int64)
s = pd.SparseSeries([0, 1, 2, 3, 4, 5, 6], fill_value=0, name='xxx')
res = s[::2]
exp = pd.SparseSeries([0, 2, 4, 6], index=[0, 2, 4, 6],
fill_value=0, name='xxx')
tm.assert_sp_series_equal(res, exp)
assert res.dtype == SparseDtype(np.int64)
def test_getitem_fill_value(self):
orig = pd.Series([1, np.nan, 0, 3, 0])
sparse = orig.to_sparse(fill_value=0)
assert sparse[0] == 1
assert np.isnan(sparse[1])
assert sparse[2] == 0
assert sparse[3] == 3
result = sparse[[1, 3, 4]]
exp = orig[[1, 3, 4]].to_sparse(fill_value=0)
tm.assert_sp_series_equal(result, exp)
# dense array
result = sparse[orig % 2 == 1]
exp = orig[orig % 2 == 1].to_sparse(fill_value=0)
tm.assert_sp_series_equal(result, exp)
# sparse array (actuary it coerces to normal Series)
result = sparse[sparse % 2 == 1]
exp = orig[orig % 2 == 1].to_sparse(fill_value=0)
tm.assert_sp_series_equal(result, exp)
# sparse array
result = sparse[pd.SparseArray(sparse % 2 == 1, dtype=bool)]
tm.assert_sp_series_equal(result, exp)
def test_getitem_ellipsis(self):
# GH 9467
s = pd.SparseSeries([1, np.nan, 2, 0, np.nan])
tm.assert_sp_series_equal(s[...], s)
s = pd.SparseSeries([1, np.nan, 2, 0, np.nan], fill_value=0)
tm.assert_sp_series_equal(s[...], s)
def test_getitem_slice_fill_value(self):
orig = pd.Series([1, np.nan, 0, 3, 0])
sparse = orig.to_sparse(fill_value=0)
tm.assert_sp_series_equal(sparse[:2],
orig[:2].to_sparse(fill_value=0))
tm.assert_sp_series_equal(sparse[4:2],
orig[4:2].to_sparse(fill_value=0))
tm.assert_sp_series_equal(sparse[::2],
orig[::2].to_sparse(fill_value=0))
tm.assert_sp_series_equal(sparse[-5:],
orig[-5:].to_sparse(fill_value=0))
def test_loc(self):
orig = self.orig
sparse = self.sparse
assert sparse.loc[0] == 1
assert np.isnan(sparse.loc[1])
result = sparse.loc[[1, 3, 4]]
exp = orig.loc[[1, 3, 4]].to_sparse()
tm.assert_sp_series_equal(result, exp)
# exceeds the bounds
result = sparse.reindex([1, 3, 4, 5])
exp = orig.reindex([1, 3, 4, 5]).to_sparse()
tm.assert_sp_series_equal(result, exp)
# padded with NaN
assert np.isnan(result[-1])
# dense array
result = sparse.loc[orig % 2 == 1]
exp = orig.loc[orig % 2 == 1].to_sparse()
tm.assert_sp_series_equal(result, exp)
# sparse array (actuary it coerces to normal Series)
result = sparse.loc[sparse % 2 == 1]
exp = orig.loc[orig % 2 == 1].to_sparse()
tm.assert_sp_series_equal(result, exp)
# sparse array
result = sparse.loc[pd.SparseArray(sparse % 2 == 1, dtype=bool)]
tm.assert_sp_series_equal(result, exp)
def test_loc_index(self):
orig = pd.Series([1, np.nan, np.nan, 3, np.nan], index=list('ABCDE'))
sparse = orig.to_sparse()
assert sparse.loc['A'] == 1
assert np.isnan(sparse.loc['B'])
result = sparse.loc[['A', 'C', 'D']]
exp = orig.loc[['A', 'C', 'D']].to_sparse()
tm.assert_sp_series_equal(result, exp)
# dense array
result = sparse.loc[orig % 2 == 1]
exp = orig.loc[orig % 2 == 1].to_sparse()
tm.assert_sp_series_equal(result, exp)
# sparse array (actuary it coerces to normal Series)
result = sparse.loc[sparse % 2 == 1]
exp = orig.loc[orig % 2 == 1].to_sparse()
tm.assert_sp_series_equal(result, exp)
# sparse array
result = sparse[pd.SparseArray(sparse % 2 == 1, dtype=bool)]
tm.assert_sp_series_equal(result, exp)
def test_loc_index_fill_value(self):
orig = pd.Series([1, np.nan, 0, 3, 0], index=list('ABCDE'))
sparse = orig.to_sparse(fill_value=0)
assert sparse.loc['A'] == 1
assert np.isnan(sparse.loc['B'])
result = sparse.loc[['A', 'C', 'D']]
exp = orig.loc[['A', 'C', 'D']].to_sparse(fill_value=0)
tm.assert_sp_series_equal(result, exp)
# dense array
result = sparse.loc[orig % 2 == 1]
exp = orig.loc[orig % 2 == 1].to_sparse(fill_value=0)
tm.assert_sp_series_equal(result, exp)
# sparse array (actuary it coerces to normal Series)
result = sparse.loc[sparse % 2 == 1]
exp = orig.loc[orig % 2 == 1].to_sparse(fill_value=0)
tm.assert_sp_series_equal(result, exp)
def test_loc_slice(self):
orig = self.orig
sparse = self.sparse
tm.assert_sp_series_equal(sparse.loc[2:], orig.loc[2:].to_sparse())
def test_loc_slice_index_fill_value(self):
orig = pd.Series([1, np.nan, 0, 3, 0], index=list('ABCDE'))
sparse = orig.to_sparse(fill_value=0)
tm.assert_sp_series_equal(sparse.loc['C':],
orig.loc['C':].to_sparse(fill_value=0))
def test_loc_slice_fill_value(self):
orig = pd.Series([1, np.nan, 0, 3, 0])
sparse = orig.to_sparse(fill_value=0)
tm.assert_sp_series_equal(sparse.loc[2:],
orig.loc[2:].to_sparse(fill_value=0))
def test_iloc(self):
orig = self.orig
sparse = self.sparse
assert sparse.iloc[3] == 3
assert np.isnan(sparse.iloc[2])
result = sparse.iloc[[1, 3, 4]]
exp = orig.iloc[[1, 3, 4]].to_sparse()
tm.assert_sp_series_equal(result, exp)
result = sparse.iloc[[1, -2, -4]]
exp = orig.iloc[[1, -2, -4]].to_sparse()
tm.assert_sp_series_equal(result, exp)
with pytest.raises(IndexError):
sparse.iloc[[1, 3, 5]]
def test_iloc_fill_value(self):
orig = pd.Series([1, np.nan, 0, 3, 0])
sparse = orig.to_sparse(fill_value=0)
assert sparse.iloc[3] == 3
assert np.isnan(sparse.iloc[1])
assert sparse.iloc[4] == 0
result = sparse.iloc[[1, 3, 4]]
exp = orig.iloc[[1, 3, 4]].to_sparse(fill_value=0)
tm.assert_sp_series_equal(result, exp)
def test_iloc_slice(self):
orig = pd.Series([1, np.nan, np.nan, 3, np.nan])
sparse = orig.to_sparse()
tm.assert_sp_series_equal(sparse.iloc[2:], orig.iloc[2:].to_sparse())
def test_iloc_slice_fill_value(self):
orig = pd.Series([1, np.nan, 0, 3, 0])
sparse = orig.to_sparse(fill_value=0)
tm.assert_sp_series_equal(sparse.iloc[2:],
orig.iloc[2:].to_sparse(fill_value=0))
def test_at(self):
orig = pd.Series([1, np.nan, np.nan, 3, np.nan])
sparse = orig.to_sparse()
assert sparse.at[0] == orig.at[0]
assert np.isnan(sparse.at[1])
assert np.isnan(sparse.at[2])
assert sparse.at[3] == orig.at[3]
assert np.isnan(sparse.at[4])
orig = pd.Series([1, np.nan, np.nan, 3, np.nan],
index=list('abcde'))
sparse = orig.to_sparse()
assert sparse.at['a'] == orig.at['a']
assert np.isnan(sparse.at['b'])
assert np.isnan(sparse.at['c'])
assert sparse.at['d'] == orig.at['d']
assert np.isnan(sparse.at['e'])
def test_at_fill_value(self):
orig = pd.Series([1, np.nan, 0, 3, 0],
index=list('abcde'))
sparse = orig.to_sparse(fill_value=0)
assert sparse.at['a'] == orig.at['a']
assert np.isnan(sparse.at['b'])
assert sparse.at['c'] == orig.at['c']
assert sparse.at['d'] == orig.at['d']
assert sparse.at['e'] == orig.at['e']
def test_iat(self):
orig = self.orig
sparse = self.sparse
assert sparse.iat[0] == orig.iat[0]
assert np.isnan(sparse.iat[1])
assert np.isnan(sparse.iat[2])
assert sparse.iat[3] == orig.iat[3]
assert np.isnan(sparse.iat[4])
assert np.isnan(sparse.iat[-1])
assert sparse.iat[-5] == orig.iat[-5]
def test_iat_fill_value(self):
orig = pd.Series([1, np.nan, 0, 3, 0])
sparse = orig.to_sparse()
assert sparse.iat[0] == orig.iat[0]
assert np.isnan(sparse.iat[1])
assert sparse.iat[2] == orig.iat[2]
assert sparse.iat[3] == orig.iat[3]
assert sparse.iat[4] == orig.iat[4]
assert sparse.iat[-1] == orig.iat[-1]
assert sparse.iat[-5] == orig.iat[-5]
def test_get(self):
s = pd.SparseSeries([1, np.nan, np.nan, 3, np.nan])
assert s.get(0) == 1
assert np.isnan(s.get(1))
assert s.get(5) is None
s = pd.SparseSeries([1, np.nan, 0, 3, 0], index=list('ABCDE'))
assert s.get('A') == 1
assert np.isnan(s.get('B'))
assert s.get('C') == 0
assert s.get('XX') is None
s = pd.SparseSeries([1, np.nan, 0, 3, 0], index=list('ABCDE'),
fill_value=0)
assert s.get('A') == 1
assert np.isnan(s.get('B'))
assert s.get('C') == 0
assert s.get('XX') is None
def test_take(self):
orig = pd.Series([1, np.nan, np.nan, 3, np.nan],
index=list('ABCDE'))
sparse = orig.to_sparse()
tm.assert_sp_series_equal(sparse.take([0]),
orig.take([0]).to_sparse())
tm.assert_sp_series_equal(sparse.take([0, 1, 3]),
orig.take([0, 1, 3]).to_sparse())
tm.assert_sp_series_equal(sparse.take([-1, -2]),
orig.take([-1, -2]).to_sparse())
def test_take_fill_value(self):
orig = pd.Series([1, np.nan, 0, 3, 0],
index=list('ABCDE'))
sparse = orig.to_sparse(fill_value=0)
tm.assert_sp_series_equal(sparse.take([0]),
orig.take([0]).to_sparse(fill_value=0))
exp = orig.take([0, 1, 3]).to_sparse(fill_value=0)
tm.assert_sp_series_equal(sparse.take([0, 1, 3]), exp)
exp = orig.take([-1, -2]).to_sparse(fill_value=0)
tm.assert_sp_series_equal(sparse.take([-1, -2]), exp)
def test_reindex(self):
orig = pd.Series([1, np.nan, np.nan, 3, np.nan],
index=list('ABCDE'))
sparse = orig.to_sparse()
res = sparse.reindex(['A', 'E', 'C', 'D'])
exp = orig.reindex(['A', 'E', 'C', 'D']).to_sparse()
tm.assert_sp_series_equal(res, exp)
# all missing & fill_value
res = sparse.reindex(['B', 'E', 'C'])
exp = orig.reindex(['B', 'E', 'C']).to_sparse()
tm.assert_sp_series_equal(res, exp)
orig = pd.Series([np.nan, np.nan, np.nan, np.nan, np.nan],
index=list('ABCDE'))
sparse = orig.to_sparse()
res = sparse.reindex(['A', 'E', 'C', 'D'])
exp = orig.reindex(['A', 'E', 'C', 'D']).to_sparse()
tm.assert_sp_series_equal(res, exp)
def test_fill_value_reindex(self):
orig = pd.Series([1, np.nan, 0, 3, 0], index=list('ABCDE'))
sparse = orig.to_sparse(fill_value=0)
res = sparse.reindex(['A', 'E', 'C', 'D'])
exp = orig.reindex(['A', 'E', 'C', 'D']).to_sparse(fill_value=0)
tm.assert_sp_series_equal(res, exp)
# includes missing and fill_value
res = sparse.reindex(['A', 'B', 'C'])
exp = orig.reindex(['A', 'B', 'C']).to_sparse(fill_value=0)
tm.assert_sp_series_equal(res, exp)
# all missing
orig = pd.Series([np.nan, np.nan, np.nan, np.nan, np.nan],
index=list('ABCDE'))
sparse = orig.to_sparse(fill_value=0)
res = sparse.reindex(['A', 'E', 'C', 'D'])
exp = orig.reindex(['A', 'E', 'C', 'D']).to_sparse(fill_value=0)
tm.assert_sp_series_equal(res, exp)
# all fill_value
orig = pd.Series([0., 0., 0., 0., 0.],
index=list('ABCDE'))
sparse = orig.to_sparse(fill_value=0)
def test_fill_value_reindex_coerces_float_int(self):
orig = pd.Series([1, np.nan, 0, 3, 0], index=list('ABCDE'))
sparse = orig.to_sparse(fill_value=0)
res = sparse.reindex(['A', 'E', 'C', 'D'])
exp = orig.reindex(['A', 'E', 'C', 'D']).to_sparse(fill_value=0)
tm.assert_sp_series_equal(res, exp)
def test_reindex_fill_value(self):
floats = pd.Series([1., 2., 3.]).to_sparse()
result = floats.reindex([1, 2, 3], fill_value=0)
expected = pd.Series([2., 3., 0], index=[1, 2, 3]).to_sparse()
tm.assert_sp_series_equal(result, expected)
def test_reindex_nearest(self):
s = pd.Series(np.arange(10, dtype='float64')).to_sparse()
target = [0.1, 0.9, 1.5, 2.0]
actual = s.reindex(target, method='nearest')
expected = pd.Series(np.around(target), target).to_sparse()
tm.assert_sp_series_equal(expected, actual)
actual = s.reindex(target, method='nearest', tolerance=0.2)
expected = pd.Series([0, 1, np.nan, 2], target).to_sparse()
tm.assert_sp_series_equal(expected, actual)
actual = s.reindex(target, method='nearest',
tolerance=[0.3, 0.01, 0.4, 3])
expected = pd.Series([0, np.nan, np.nan, 2], target).to_sparse()
tm.assert_sp_series_equal(expected, actual)
def tests_indexing_with_sparse(self):
# GH 13985
for kind in ['integer', 'block']:
for fill in [True, False, np.nan]:
arr = pd.SparseArray([1, 2, 3], kind=kind)
indexer = pd.SparseArray([True, False, True], fill_value=fill,
dtype=bool)
tm.assert_sp_array_equal(pd.SparseArray([1, 3], kind=kind),
arr[indexer],)
s = pd.SparseSeries(arr, index=['a', 'b', 'c'],
dtype=np.float64)
exp = pd.SparseSeries(
[1, 3], index=['a', 'c'],
dtype=SparseDtype(np.float64, s.fill_value),
kind=kind
)
tm.assert_sp_series_equal(s[indexer], exp)
tm.assert_sp_series_equal(s.loc[indexer], exp)
tm.assert_sp_series_equal(s.iloc[indexer], exp)
indexer = pd.SparseSeries(indexer, index=['a', 'b', 'c'])
tm.assert_sp_series_equal(s[indexer], exp)
tm.assert_sp_series_equal(s.loc[indexer], exp)
msg = ("iLocation based boolean indexing cannot use an "
"indexable as a mask")
with tm.assert_raises_regex(ValueError, msg):
s.iloc[indexer]
class TestSparseSeriesMultiIndexing(TestSparseSeriesIndexing):
def setup_method(self, method):
# Mi with duplicated values
idx = pd.MultiIndex.from_tuples([('A', 0), ('A', 1), ('B', 0),
('C', 0), ('C', 1)])
self.orig =
|
pd.Series([1, np.nan, np.nan, 3, np.nan], index=idx)
|
pandas.Series
|
"""Extract messages from iMessage / the Apple Messages app."""
# %% imports
import sqlite3
import pandas as pd
# %% Connect to messages database
# conn = sqlite3.connect("/Users/Tobias/Library/Messages/chat.db")
# Above gives this error -> OperationalError: unable to open database file
# Copied chat.db onto my desktop and it worked!
conn = sqlite3.connect("/Users/Tobias/Desktop/chat.db")
cur = conn.cursor()
# query the database to get all the table names
cur.execute(" select name from sqlite_master where type = 'table' ")
for name in cur.fetchall():
print(name)
# %% Look at a few messages
a_few_messages = pd.read_sql_query("select * from message limit 10", conn)
a_few_messages
# %% Create dataframe with relevant tables
# High Sierra and above
messages = pd.read_sql_query(
"""select *, datetime(date/1000000000 + strftime("%s", "2001-01-01") ,"unixepoch","localtime") as date_utc from message""",
conn,
)
messages = messages.rename(columns={"ROWID": "message_id"})
# Get handles to map apple_ids
handles = pd.read_sql_query("select * from handle", conn)
handles = handles.rename(columns={"id": "phone_number", "ROWID": "handle_id"})
merge_1 =
|
pd.merge(messages["text",])
|
pandas.merge
|
import numpy as np
import pandas as pd
import torch
from torch.utils.data import Dataset
from ..datasets.dunnhumby import Dunnhumby
from ..datasets.epinions import Epinions
from ..datasets.instacart import Instacart, Instacart_25
from ..datasets.last_fm import LastFM
from ..datasets.movielens import Movielens_1m, Movielens_25m, Movielens_100k
from ..datasets.tafeng import Tafeng
def load_dataset(config):
"""Load datasets.
Args:
config (dict): Dictionary of configuration.
Returns:
dataset (pandas.DataFrame): Full dataset.
"""
dataset_mapping = {
"ml_100k": Movielens_100k,
"ml_1m": Movielens_1m,
"ml_25m": Movielens_25m,
"last_fm": LastFM,
"tafeng": Tafeng,
"epinions": Epinions,
"dunnhumby": Dunnhumby,
"instacart": Instacart,
"instacart_25": Instacart_25,
}
dataset = dataset_mapping[config["dataset"]["dataset"]]()
return dataset
def reindex_items(train_data, valid_data=None, test_data=None):
"""Reindex the item ids.
Item ids are reindexed from 1. "0" is left for padding.
Args:
train_data (pandas.DataFrame): Training set.
valid_data (pandas.DataFrame): Validation set.
test_data (pandas.DataFrame): Test set.
Returns:
train_data (pandas.DataFrame): Reindexed training set.
valid_data (pandas.DataFrame): Reindexed validation set.
test_data (pandas.DataFrame): Reindexed test set.
"""
train_data = train_data.sort_values(by=["col_user", "col_timestamp"])
test_data = test_data.sort_values(by=["col_user", "col_timestamp"])
# train data
item_ids = train_data.col_item.unique()
item2idx = pd.Series(data=np.arange(len(item_ids)) + 1, index=item_ids)
# Build itemmap is a DataFrame that have 2 columns (col_item, item_idx)
itemmap = pd.DataFrame(
{"col_item": item_ids, "item_idx": item2idx[item_ids].values}
)
train_data = pd.merge(train_data, itemmap, on="col_item", how="inner")
train_data.col_item = train_data.item_idx
train_data = train_data.drop(columns=["item_idx"])
train_data = train_data.sort_values(by=["col_user", "col_timestamp"])
# test data
test_data =
|
pd.merge(test_data, itemmap, on="col_item", how="inner")
|
pandas.merge
|
# coding: utf-8
# Import libraries
import pandas as pd
from pandas import ExcelWriter
import numpy as np
import pickle
def create_m1():
"""
The CREATE_M1 operation builds the first data matrix for each gene of interest, collecting the current gene expression and methylation values, along with the expression values of all the genes in the same gene set. One data matrix for each target gene is created and exported locally in as many Excel files as the considered genes; while the whole set of M1 matrixes is returned as a Python dictionary (dict_model_v1.p), where each target gene (set as key) is associated to a Pandas dataframe containing M1 data of interest (set as value).
:return: a Python dictionary
Example::
import genereg as gr
m1_dict = gr.DataMatrixes.create_m1()
"""
# Load input data:
# Genes of interest
EntrezConversion_df = pd.read_excel('./Genes_of_Interest.xlsx',sheetname='Sheet1',header=0,converters={'GENE_SYMBOL':str,'ENTREZ_GENE_ID':str,'GENE_SET':str})
# Methylation values for genes of interest
methyl_df = pd.read_excel('./3_TCGA_Data/Methylation/Methylation_Values.xlsx',sheetname='Sheet1',header=0)
# Gene expression values for genes of interest
expr_interest_df = pd.read_excel('./3_TCGA_Data/Gene_Expression/Gene_Expression-InterestGenes.xlsx',sheetname='Sheet1',header=0)
# Create a list containing the Gene Symbols of the genes of interest
gene_interest_SYMs = []
for i, r in EntrezConversion_df.iterrows():
sym = r['GENE_SYMBOL']
if sym not in gene_interest_SYMs:
gene_interest_SYMs.append(sym)
# Get the TCGA aliquots
aliquots = []
for i, r in methyl_df.iterrows():
if i != 'ENTREZ_GENE_ID':
aliquots.append(i)
# Create a dictionary where, for each gene of interest set as key (the model gene), we have a dataframe representing the model (matrix of data) of that gene.
# This model the expression and methylation values of the model gene in the first and second columns, and the expression of all the genes that belong to the
# model gene set in the other columns, while the different TCGA aliquots are the indexes of the rows.
dict_model_v1 = {}
# Define the variables we need for the computation
model_gene_pathways = [] # list of the gene sets the model gene belongs to
same_pathway_genes = [] # list of the symbols of the genes belonging to the same gene sets as the model gene
df_columns = [] # list of the model columns names
# Execute the following code for each gene of interest
for gene in gene_interest_SYMs:
model_gene_SYM = gene # get the Gene Symbol of the current gene
# Get the gene sets of the model gene
for i, r in EntrezConversion_df.iterrows():
sym = r['GENE_SYMBOL']
if sym == model_gene_SYM:
p = r['GENE_SET']
model_gene_pathways.append(p)
# Get the genes of interest belonging to the model gene set
for i, r in EntrezConversion_df.iterrows():
path = r['GENE_SET']
if path in model_gene_pathways:
symbol = r['GENE_SYMBOL']
if symbol != model_gene_SYM:
same_pathway_genes.append(symbol)
# Define the columns of the model gene matrix of data
df_columns.append('EXPRESSION ('+model_gene_SYM+')') # the second column contains the expression of the model gene
df_columns.append('METHYLATION ('+model_gene_SYM+')') # the third column contains the methylation of the model gene
for g in same_pathway_genes:
df_columns.append(g) # we have a column for each gene in the same gene set of the model gene
# In correspondence of the model gene key in the dictionary,
# set its model as value, with the proper indexes and column names
dict_model_v1[model_gene_SYM] = pd.DataFrame(index = aliquots, columns = df_columns)
# Reset the variables for the next iteration on the next gene of interest
model_gene_pathways = []
same_pathway_genes = []
df_columns = []
# Fill the models for each gene of interest
for gene, matrix in dict_model_v1.items():
first_col = 'EXPRESSION ('+gene+')'
second_col = 'METHYLATION ('+gene+')'
# Add the expression and methylation values of each model gene and for each TCGA aliquot
for index, row in matrix.iterrows():
model_expr = expr_interest_df.get_value(index,gene) # get the expression
model_methyl = methyl_df.get_value(index,gene) # get the mathylation value
# set the two values in the correct cell of the matrix
matrix.set_value(index,first_col,model_expr)
matrix.set_value(index,second_col,model_methyl)
# Add the expression values for all the other genes belonging to the same gene set of the model gene
for index, row in matrix.iterrows():
for column_name, values in matrix.iteritems(): # iterate along the columns of the dataframe
# skip the first two columns and add the proper values
if (column_name != first_col) and (column_name != second_col):
expr = expr_interest_df.get_value(index,column_name)
matrix.set_value(index,column_name,expr)
# Export the dictionary into a pickle file in order to be able to import it back and use it to progressively build the next models for the genes of interest, adding further information.
pickle.dump(dict_model_v1, open('./4_Data_Matrix_Construction/Model1/dict_model_v1.p', 'wb'))
# Export the models as .xlsx files
for gene in gene_interest_SYMs:
model_gene_SYM = gene
pathway = EntrezConversion_df.loc[EntrezConversion_df['GENE_SYMBOL'] == model_gene_SYM, 'GENE_SET'].iloc[0]
gene_ID = EntrezConversion_df.loc[EntrezConversion_df['GENE_SYMBOL'] == model_gene_SYM, 'ENTREZ_GENE_ID'].iloc[0]
file_name = 'Gene_'+gene_ID+'_['+model_gene_SYM+']'+'_('+pathway+')-Model_v1.xlsx'
writer = ExcelWriter('./4_Data_Matrix_Construction/Model1/'+file_name)
output_df = dict_model_v1[model_gene_SYM]
output_df.to_excel(writer,'Sheet1')
writer.save()
# Handle genes belonging to multiple gene sets
multiple_pathway_genes = []
n = EntrezConversion_df['GENE_SYMBOL'].value_counts()
for i, v in n.items():
if v > 1 :
multiple_pathway_genes.append(i)
for g in multiple_pathway_genes:
filtered_df = EntrezConversion_df.loc[EntrezConversion_df['GENE_SYMBOL'] == g]
pathways = (filtered_df.GENE_SET.unique()).tolist()
gene_ID = EntrezConversion_df.loc[EntrezConversion_df['GENE_SYMBOL'] == g, 'ENTREZ_GENE_ID'].iloc[0]
for p in pathways:
current_pathway_model = dict_model_v1[g].copy()
# Extract the genes of interest in the current gene set
current_pathway_genes = []
for i, r in EntrezConversion_df.iterrows():
sym = r['GENE_SYMBOL']
path = r['GENE_SET']
if path == p:
current_pathway_genes.append(sym)
# Extract list of columns in the full model
all_columns = []
for column_name, values in current_pathway_model.iteritems():
if (column_name != 'EXPRESSION ('+g+')') and (column_name != 'METHYLATION ('+g+')'):
all_columns.append(column_name)
# Extract the columns to remove form the model
other_pathway_genes = list(set(all_columns) - set(current_pathway_genes))
for i in other_pathway_genes:
if (i != g):
current_pathway_model.drop(i, axis=1, inplace=True)
writer = ExcelWriter('./4_Data_Matrix_Construction/Model1/Gene_'+gene_ID+'_['+g+']_('+p+')-Model_v1.xlsx')
current_pathway_model.to_excel(writer,'Sheet1')
writer.save()
return dict_model_v1
def create_m2():
"""
The CREATE_M2 operation builds the second data matrix for each gene of interest, adding to the first matrix data about the expression of candidate regulatory genes of each gene of interest. One data matrix for each target gene is created and exported locally in as many Excel files as the considered genes; while the whole set of M2 matrixes is returned as a Python dictionary (dict_model_v2.p), where each target gene (set as key) is associated to a Pandas dataframe containing M2 data of interest (set as value).
:return: a Python dictionary
Example::
import genereg as gr
m2_dict = gr.DataMatrixes.create_m2()
"""
# Load input data:
# Genes of interest
EntrezConversion_df = pd.read_excel('./Genes_of_Interest.xlsx',sheetname='Sheet1',header=0,converters={'GENE_SYMBOL':str,'ENTREZ_GENE_ID':str,'GENE_SET':str})
# Models_v1 of genes of interest
dict_model_v1 = pickle.load(open('./4_Data_Matrix_Construction/Model1/dict_model_v1.p', 'rb'))
# Distinct regulatory genes for each gene of interest
dict_RegulGenes = pickle.load(open('./2_Regulatory_Genes/dict_RegulGenes.p', 'rb'))
# Gene expression values for regulatory genes
expr_regulatory_df = pd.read_excel('./3_TCGA_Data/Gene_Expression/Gene_Expression-RegulatoryGenes.xlsx',sheetname='Sheet1',header=0)
# Create a list containing the Gene Symbols of the genes of interest
gene_interest_SYMs = []
for i, r in EntrezConversion_df.iterrows():
sym = r['GENE_SYMBOL']
if sym not in gene_interest_SYMs:
gene_interest_SYMs.append(sym)
# Get the TCGA aliquots
aliquots = []
for i, r in expr_regulatory_df.iterrows():
if i != 'ENTREZ_GENE_ID':
aliquots.append(i)
# Create a dictionary where, for each gene of interest set as key (the model gene), we have a dataframe representing the model (matrix of data) of that gene.
# This model contains all the information in the first model, plus additional columns with the expression of the regulatory genes for each model gene,
# while the different TCGA aliquots are the indexes of the rows
dict_model_v2 = {}
# Define the variables we need for the computation
model_gene_RegulGenes_SYM = [] # list of gene symbols for the regulatory genes of the model gene
new_columns = [] # list of the new columns names to be added to the model
# Execute the following code for each gene of interest
for gene in gene_interest_SYMs:
model_gene_SYM = gene # get the Gene Symbol of the current gene
# Get the list of regulatory genes for the model gene
model_gene_RegulGenes_SYM = dict_RegulGenes[model_gene_SYM]
# Get the first model for the current gene (model_v1)
model_1_df = dict_model_v1[model_gene_SYM]
# Identify the new columns to be added to the matrix:
# in this case they are the columns corresponding to regulatory genes of the model gene
# (be careful not to have duplicated columns, so add only the symbols of the genes
# that are not already contained in the previous model)
old_columns = list(model_1_df.columns.values)
for g in model_gene_RegulGenes_SYM:
if g not in old_columns:
new_columns.append(g)
# Create the new part of the model to add
new_df = pd.DataFrame(index = aliquots, columns = new_columns)
# Add the expression values for all the new regulatory genes and for each TCGA aliquot
for index, row in new_df.iterrows():
for column_name, values in new_df.iteritems(): # iterate along the columns of the dataframe
expr = expr_regulatory_df.get_value(index,column_name)
new_df.set_value(index,column_name,expr)
# Join the two dataframes and create the new model (model_v2)
model_2_df = model_1_df.join(new_df)
# Set the new model in correspondence of the correct model gene key in the new dictionary
dict_model_v2[model_gene_SYM] = model_2_df
# Reset the variables for the next iteration on the next gene of interest
model_gene_RegulGenes_SYM = []
new_columns = []
# Check if some genes of interest have their own as candidate regulatory genes. If so, remove that column from the matrix
for gene in gene_interest_SYMs:
data_matrix = dict_model_v2[gene]
matrix_cols = list(data_matrix.columns.values)
if gene in matrix_cols:
data_matrix.drop(gene, axis=1, inplace=True)
# Export the dictionary into a pickle file in order to be able to import it back and use it to progressively build the next models for the genes of interest, adding further information
pickle.dump(dict_model_v2, open('./4_Data_Matrix_Construction/Model2/dict_model_v2.p', 'wb'))
# Export the models as .xlsx files
for gene in gene_interest_SYMs:
model_gene_SYM = gene
pathway = EntrezConversion_df.loc[EntrezConversion_df['GENE_SYMBOL'] == model_gene_SYM, 'GENE_SET'].iloc[0]
gene_ID = EntrezConversion_df.loc[EntrezConversion_df['GENE_SYMBOL'] == model_gene_SYM, 'ENTREZ_GENE_ID'].iloc[0]
file_name = 'Gene_'+gene_ID+'_['+model_gene_SYM+']'+'_('+pathway+')-Model_v2.xlsx'
writer = ExcelWriter('./4_Data_Matrix_Construction/Model2/'+file_name)
output_df = dict_model_v2[model_gene_SYM]
output_df.to_excel(writer,'Sheet1')
writer.save()
# Handle genes belonging to multiple gene sets
multiple_pathway_genes = []
n = EntrezConversion_df['GENE_SYMBOL'].value_counts()
for i, v in n.items():
if v > 1 :
multiple_pathway_genes.append(i)
for g in multiple_pathway_genes:
filtered_df = EntrezConversion_df.loc[EntrezConversion_df['GENE_SYMBOL'] == g]
pathways = (filtered_df.GENE_SET.unique()).tolist()
gene_ID = EntrezConversion_df.loc[EntrezConversion_df['GENE_SYMBOL'] == g, 'ENTREZ_GENE_ID'].iloc[0]
for p in pathways:
# Import the 'model_v1' matrix for the current gene
current_pathway_model = pd.read_excel('./4_Data_Matrix_Construction/Model1/Gene_'+gene_ID+'_['+g+']_('+p+')-Model_v1.xlsx',sheetname='Sheet1',header=0)
# Get the list of regulatory genes for the model gene
current_gene_RegulGenes_SYM = dict_RegulGenes[g]
# Create the M2 model for the current gene in the current gene set, identifying the new columns to be added to the matrix
current_pathway_new_columns = []
current_pathway_old_columns = list(current_pathway_model.columns.values)
for gene in current_gene_RegulGenes_SYM:
if gene not in current_pathway_old_columns:
current_pathway_new_columns.append(gene)
# Create the new part of the model to add
current_pathway_new_df = pd.DataFrame(index = aliquots, columns = current_pathway_new_columns)
# Add the expression values for all the new regulatory genes and for each TCGA aliquot
for index, row in current_pathway_new_df.iterrows():
for column_name, values in current_pathway_new_df.iteritems(): # iterate along the columns of the dataframe
expr = expr_regulatory_df.get_value(index,column_name)
current_pathway_new_df.set_value(index,column_name,expr)
# Join the two dataframes and create the new model (model_v2)
current_pathway_model_2_df = current_pathway_model.join(current_pathway_new_df)
# Check if some genes of interest have their own as candidate regulatory genes. If so, remove that column from the matrix
current_pathway_matrix_cols = list(current_pathway_model_2_df.columns.values)
if g in current_pathway_matrix_cols:
current_pathway_model_2_df.drop(g, axis=1, inplace=True)
writer = ExcelWriter('./4_Data_Matrix_Construction/Model2/Gene_'+gene_ID+'_['+g+']_('+p+')-Model_v2.xlsx')
current_pathway_model_2_df.to_excel(writer,'Sheet1')
writer.save()
return dict_model_v2
def create_m3():
"""
The CREATE_M3 operation builds the third data matrix for the analysis for each gene of interest, adding to the second matrix data about the expression of candidate regulatory genes of genes of interest belonging to the same gene set of the model gene. One data matrix for each target gene is created and exported locally in as many Excel files as the considered genes; while the whole set of M3 matrixes is returned as a Python dictionary (dict_model_v3.p), where each target gene (set as key) is associated to a Pandas dataframe containing M3 data of interest (set as value).
:return: a Python dictionary
Example::
import genereg as gr
m3_dict = gr.DataMatrixes.create_m3()
"""
# Load input data:
# Genes of interest
EntrezConversion_df = pd.read_excel('./Genes_of_Interest.xlsx',sheetname='Sheet1',header=0,converters={'GENE_SYMBOL':str,'ENTREZ_GENE_ID':str,'GENE_SET':str})
# Models_v2 of genes of interest
dict_model_v2 = pickle.load(open('./4_Data_Matrix_Construction/Model2/dict_model_v2.p', 'rb'))
# Distinct regulatory genes for each gene of interest
dict_RegulGenes = pickle.load(open('./2_Regulatory_Genes/dict_RegulGenes.p', 'rb'))
# Gene expression values for regulatory genes
expr_regulatory_df = pd.read_excel('./3_TCGA_Data/Gene_Expression/Gene_Expression-RegulatoryGenes.xlsx',sheetname='Sheet1',header=0)
# Create a list containing the Gene Symbols of the genes of interest
gene_interest_SYMs = []
for i, r in EntrezConversion_df.iterrows():
sym = r['GENE_SYMBOL']
if sym not in gene_interest_SYMs:
gene_interest_SYMs.append(sym)
# Get the TCGA aliquots
aliquots = []
for i, r in expr_regulatory_df.iterrows():
if i != 'ENTREZ_GENE_ID':
aliquots.append(i)
# Create a dictionary where, for each gene of interest set as key (the model gene), we have a dataframe representing the model (matrix of data) of that gene.
# This model contains all the information in the second model, plus additional columns with the expression of the regulatory genes for each one of the genes belonging to the model gene set,
# while the different TCGA aliquots are the indexes of the rows
dict_model_v3 = {}
# Define the variables we need for the computation
model_gene_pathways = [] # list of the gene sets the model gene belongs to
same_pathway_genes = [] # list of the symbols of the genes belonging to the same gene sets as the model gene
same_pathway_genes_RegulGenes_SYM = [] # list of gene symbols for the regulatory genes of the genes in the same gene set
new_columns = [] # list of the new columns names to be added to the model
# Execute the following code for each gene of interest
for gene in gene_interest_SYMs:
model_gene_SYM = gene # get the Gene Symbol of the current gene
# Get the gene sets of the model gene
for i, r in EntrezConversion_df.iterrows():
sym = r['GENE_SYMBOL']
if sym == model_gene_SYM:
p = r['GENE_SET']
model_gene_pathways.append(p)
# Get the genes of interest belonging to the model gene sets
for i, r in EntrezConversion_df.iterrows():
path = r['GENE_SET']
if path in model_gene_pathways:
symbol = r['GENE_SYMBOL']
if symbol != model_gene_SYM:
same_pathway_genes.append(symbol)
# Get the list of regulatory genes for each one of the genes belonging to the same gene sets of the model gene
for elem in same_pathway_genes:
elem_regulatory_genes = dict_RegulGenes[elem]
same_pathway_genes_RegulGenes_SYM = same_pathway_genes_RegulGenes_SYM + elem_regulatory_genes
same_pathway_genes_RegulGenes_SYM = list(set(same_pathway_genes_RegulGenes_SYM)) # keep only distinct regulatory genes
# Get the second model for the current gene (model_v2)
model_2_df = dict_model_v2[model_gene_SYM]
# Identify the new columns to be added to the matrix:
# in this case they are the columns corresponding to regulatory genes of genes in the
# same gene sets of our model gene
# (be careful not to have duplicated columns, so add only the symbols of the genes
# that are not already contained in the previous model)
old_columns = list(model_2_df.columns.values)
for g in same_pathway_genes_RegulGenes_SYM:
if g not in old_columns:
new_columns.append(g)
# Create the new part of the model to add
new_df = pd.DataFrame(index = aliquots, columns = new_columns)
# Add the expression values for all the new regulatory genes and for each TCGA aliquot
for index, row in new_df.iterrows():
for column_name, values in new_df.iteritems(): # iterate along the columns of the dataframe
expr = expr_regulatory_df.get_value(index,column_name)
new_df.set_value(index,column_name,expr)
# Join the two dataframes and create the new model (model_v3)
model_3_df = model_2_df.join(new_df)
# Set the new model in correspondence of the correct model gene key in the new dictionary
dict_model_v3[model_gene_SYM] = model_3_df
# Reset the variables for the next iteration on the next gene of interest
model_gene_pathways = []
same_pathway_genes = []
same_pathway_genes_RegulGenes_SYM = []
new_columns = []
# Remove duplicate columns of the model gene
for gene in gene_interest_SYMs:
data_matrix = dict_model_v3[gene]
matrix_cols = list(data_matrix.columns.values)
if gene in matrix_cols:
data_matrix.drop(gene, axis=1, inplace=True)
# Export the dictionary into a pickle file in order to be able to import it back and use it to progressively build the next models for the genes of interest, adding further information
pickle.dump(dict_model_v3, open('./4_Data_Matrix_Construction/Model3/dict_model_v3.p', 'wb'))
# Export the models as .xlsx files
for gene in gene_interest_SYMs:
model_gene_SYM = gene
pathway = EntrezConversion_df.loc[EntrezConversion_df['GENE_SYMBOL'] == model_gene_SYM, 'GENE_SET'].iloc[0]
gene_ID = EntrezConversion_df.loc[EntrezConversion_df['GENE_SYMBOL'] == model_gene_SYM, 'ENTREZ_GENE_ID'].iloc[0]
file_name = 'Gene_'+gene_ID+'_['+model_gene_SYM+']'+'_('+pathway+')-Model_v3.xlsx'
writer = ExcelWriter('./4_Data_Matrix_Construction/Model3/'+file_name)
output_df = dict_model_v3[model_gene_SYM]
output_df.to_excel(writer,'Sheet1')
writer.save()
# Handle genes belonging to multiple gene sets
multiple_pathway_genes = []
n = EntrezConversion_df['GENE_SYMBOL'].value_counts()
for i, v in n.items():
if v > 1 :
multiple_pathway_genes.append(i)
for g in multiple_pathway_genes:
filtered_df = EntrezConversion_df.loc[EntrezConversion_df['GENE_SYMBOL'] == g]
pathways = (filtered_df.GENE_SET.unique()).tolist()
gene_ID = EntrezConversion_df.loc[EntrezConversion_df['GENE_SYMBOL'] == g, 'ENTREZ_GENE_ID'].iloc[0]
for p in pathways:
# Import the 'model_v2' matrix for the current gene
current_pathway_model =
|
pd.read_excel('./4_Data_Matrix_Construction/Model2/Gene_'+gene_ID+'_['+g+']_('+p+')-Model_v2.xlsx',sheetname='Sheet1',header=0)
|
pandas.read_excel
|
"""
A data loading script that takes in the output of Clover (in text format)
and builds an object that holds the clean data (i.e. without information
not required for querying for transcription factors.)
"""
# packages used in this script
import pandas as pd
import sys
import re
# global variables
FIRST_ARGUMENT_INDEX = 1
class Loader:
"""
Loader class that takes in the Clover ouput and "cleans" it by
selecting for the important features from the file.
"""
genes_dataframe = pd.DataFrame()
def check_file(self):
"""
Check if the user provided a file to be opened. If provided,
continue, die otherwise.
"""
if len(sys.argv) == 1:
print("\n\nNo input file provided.\nRequired format: python3 " \
"<clover_file.txt>\n\n")
exit()
else:
with open (sys.argv[FIRST_ARGUMENT_INDEX]) as input_file:
first_line = input_file.readline()
if "Clover: Cis-eLement OVERrepresentation" not in first_line:
print("\nNot a Clover file.\nRequired format: python3" \
"<clover_file.txt>\n\n")
exit()
def add_genes(self, code, tf_list):
"""
Add genes to the main dataframe.
:param code: the ensembl code.
:param tf_list: the transcription factors associated with the given code.
"""
gene_dict = dict()
gene_dict[code] = tf_list
self.genes_dataframe = pd.concat([self.genes_dataframe,
|
pd.DataFrame(gene_dict)
|
pandas.DataFrame
|
from distutils.version import LooseVersion
from warnings import catch_warnings
import numpy as np
import pytest
from pandas._libs.tslibs import Timestamp
import pandas as pd
from pandas import (
DataFrame,
HDFStore,
Index,
MultiIndex,
Series,
_testing as tm,
bdate_range,
concat,
date_range,
isna,
read_hdf,
)
from pandas.tests.io.pytables.common import (
_maybe_remove,
ensure_clean_path,
ensure_clean_store,
tables,
)
from pandas.io.pytables import Term
pytestmark = pytest.mark.single
def test_select_columns_in_where(setup_path):
# GH 6169
# recreate multi-indexes when columns is passed
# in the `where` argument
index = MultiIndex(
levels=[["foo", "bar", "baz", "qux"], ["one", "two", "three"]],
codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=["foo_name", "bar_name"],
)
# With a DataFrame
df = DataFrame(np.random.randn(10, 3), index=index, columns=["A", "B", "C"])
with ensure_clean_store(setup_path) as store:
store.put("df", df, format="table")
expected = df[["A"]]
tm.assert_frame_equal(store.select("df", columns=["A"]), expected)
tm.assert_frame_equal(store.select("df", where="columns=['A']"), expected)
# With a Series
s = Series(np.random.randn(10), index=index, name="A")
with ensure_clean_store(setup_path) as store:
store.put("s", s, format="table")
tm.assert_series_equal(store.select("s", where="columns=['A']"), s)
def test_select_with_dups(setup_path):
# single dtypes
df = DataFrame(np.random.randn(10, 4), columns=["A", "A", "B", "B"])
df.index = date_range("20130101 9:30", periods=10, freq="T")
with ensure_clean_store(setup_path) as store:
store.append("df", df)
result = store.select("df")
expected = df
tm.assert_frame_equal(result, expected, by_blocks=True)
result = store.select("df", columns=df.columns)
expected = df
tm.assert_frame_equal(result, expected, by_blocks=True)
result = store.select("df", columns=["A"])
expected = df.loc[:, ["A"]]
tm.assert_frame_equal(result, expected)
# dups across dtypes
df = concat(
[
DataFrame(np.random.randn(10, 4), columns=["A", "A", "B", "B"]),
DataFrame(
np.random.randint(0, 10, size=20).reshape(10, 2), columns=["A", "C"]
),
],
axis=1,
)
df.index = date_range("20130101 9:30", periods=10, freq="T")
with ensure_clean_store(setup_path) as store:
store.append("df", df)
result = store.select("df")
expected = df
tm.assert_frame_equal(result, expected, by_blocks=True)
result = store.select("df", columns=df.columns)
expected = df
tm.assert_frame_equal(result, expected, by_blocks=True)
expected = df.loc[:, ["A"]]
result = store.select("df", columns=["A"])
tm.assert_frame_equal(result, expected, by_blocks=True)
expected = df.loc[:, ["B", "A"]]
result = store.select("df", columns=["B", "A"])
tm.assert_frame_equal(result, expected, by_blocks=True)
# duplicates on both index and columns
with ensure_clean_store(setup_path) as store:
store.append("df", df)
store.append("df", df)
expected = df.loc[:, ["B", "A"]]
expected = concat([expected, expected])
result = store.select("df", columns=["B", "A"])
tm.assert_frame_equal(result, expected, by_blocks=True)
def test_select(setup_path):
with ensure_clean_store(setup_path) as store:
with catch_warnings(record=True):
# select with columns=
df = tm.makeTimeDataFrame()
_maybe_remove(store, "df")
store.append("df", df)
result = store.select("df", columns=["A", "B"])
expected = df.reindex(columns=["A", "B"])
tm.assert_frame_equal(expected, result)
# equivalently
result = store.select("df", [("columns=['A', 'B']")])
expected = df.reindex(columns=["A", "B"])
tm.assert_frame_equal(expected, result)
# with a data column
_maybe_remove(store, "df")
store.append("df", df, data_columns=["A"])
result = store.select("df", ["A > 0"], columns=["A", "B"])
expected = df[df.A > 0].reindex(columns=["A", "B"])
tm.assert_frame_equal(expected, result)
# all a data columns
_maybe_remove(store, "df")
store.append("df", df, data_columns=True)
result = store.select("df", ["A > 0"], columns=["A", "B"])
expected = df[df.A > 0].reindex(columns=["A", "B"])
tm.assert_frame_equal(expected, result)
# with a data column, but different columns
_maybe_remove(store, "df")
store.append("df", df, data_columns=["A"])
result = store.select("df", ["A > 0"], columns=["C", "D"])
expected = df[df.A > 0].reindex(columns=["C", "D"])
tm.assert_frame_equal(expected, result)
def test_select_dtypes(setup_path):
with ensure_clean_store(setup_path) as store:
# with a Timestamp data column (GH #2637)
df = DataFrame(
{
"ts": bdate_range("2012-01-01", periods=300),
"A": np.random.randn(300),
}
)
_maybe_remove(store, "df")
store.append("df", df, data_columns=["ts", "A"])
result = store.select("df", "ts>=Timestamp('2012-02-01')")
expected = df[df.ts >= Timestamp("2012-02-01")]
tm.assert_frame_equal(expected, result)
# bool columns (GH #2849)
df = DataFrame(np.random.randn(5, 2), columns=["A", "B"])
df["object"] = "foo"
df.loc[4:5, "object"] = "bar"
df["boolv"] = df["A"] > 0
_maybe_remove(store, "df")
store.append("df", df, data_columns=True)
expected = df[df.boolv == True].reindex(columns=["A", "boolv"]) # noqa
for v in [True, "true", 1]:
result = store.select("df", f"boolv == {v}", columns=["A", "boolv"])
tm.assert_frame_equal(expected, result)
expected = df[df.boolv == False].reindex(columns=["A", "boolv"]) # noqa
for v in [False, "false", 0]:
result = store.select("df", f"boolv == {v}", columns=["A", "boolv"])
tm.assert_frame_equal(expected, result)
# integer index
df = DataFrame({"A": np.random.rand(20), "B": np.random.rand(20)})
_maybe_remove(store, "df_int")
store.append("df_int", df)
result = store.select("df_int", "index<10 and columns=['A']")
expected = df.reindex(index=list(df.index)[0:10], columns=["A"])
tm.assert_frame_equal(expected, result)
# float index
df = DataFrame(
{
"A": np.random.rand(20),
"B": np.random.rand(20),
"index": np.arange(20, dtype="f8"),
}
)
_maybe_remove(store, "df_float")
store.append("df_float", df)
result = store.select("df_float", "index<10.0 and columns=['A']")
expected = df.reindex(index=list(df.index)[0:10], columns=["A"])
tm.assert_frame_equal(expected, result)
with ensure_clean_store(setup_path) as store:
# floats w/o NaN
df = DataFrame({"cols": range(11), "values": range(11)}, dtype="float64")
df["cols"] = (df["cols"] + 10).apply(str)
store.append("df1", df, data_columns=True)
result = store.select("df1", where="values>2.0")
expected = df[df["values"] > 2.0]
tm.assert_frame_equal(expected, result)
# floats with NaN
df.iloc[0] = np.nan
expected = df[df["values"] > 2.0]
store.append("df2", df, data_columns=True, index=False)
result = store.select("df2", where="values>2.0")
tm.assert_frame_equal(expected, result)
# https://github.com/PyTables/PyTables/issues/282
# bug in selection when 0th row has a np.nan and an index
# store.append('df3',df,data_columns=True)
# result = store.select(
# 'df3', where='values>2.0')
# tm.assert_frame_equal(expected, result)
# not in first position float with NaN ok too
df = DataFrame({"cols": range(11), "values": range(11)}, dtype="float64")
df["cols"] = (df["cols"] + 10).apply(str)
df.iloc[1] = np.nan
expected = df[df["values"] > 2.0]
store.append("df4", df, data_columns=True)
result = store.select("df4", where="values>2.0")
tm.assert_frame_equal(expected, result)
# test selection with comparison against numpy scalar
# GH 11283
with ensure_clean_store(setup_path) as store:
df = tm.makeDataFrame()
expected = df[df["A"] > 0]
store.append("df", df, data_columns=True)
np_zero = np.float64(0) # noqa
result = store.select("df", where=["A>np_zero"])
tm.assert_frame_equal(expected, result)
def test_select_with_many_inputs(setup_path):
with ensure_clean_store(setup_path) as store:
df = DataFrame(
{
"ts": bdate_range("2012-01-01", periods=300),
"A": np.random.randn(300),
"B": range(300),
"users": ["a"] * 50
+ ["b"] * 50
+ ["c"] * 100
+ [f"a{i:03d}" for i in range(100)],
}
)
_maybe_remove(store, "df")
store.append("df", df, data_columns=["ts", "A", "B", "users"])
# regular select
result = store.select("df", "ts>=Timestamp('2012-02-01')")
expected = df[df.ts >= Timestamp("2012-02-01")]
tm.assert_frame_equal(expected, result)
# small selector
result = store.select("df", "ts>=Timestamp('2012-02-01') & users=['a','b','c']")
expected = df[
(df.ts >= Timestamp("2012-02-01")) & df.users.isin(["a", "b", "c"])
]
tm.assert_frame_equal(expected, result)
# big selector along the columns
selector = ["a", "b", "c"] + [f"a{i:03d}" for i in range(60)]
result = store.select("df", "ts>=Timestamp('2012-02-01') and users=selector")
expected = df[(df.ts >= Timestamp("2012-02-01")) & df.users.isin(selector)]
tm.assert_frame_equal(expected, result)
selector = range(100, 200)
result = store.select("df", "B=selector")
expected = df[df.B.isin(selector)]
tm.assert_frame_equal(expected, result)
assert len(result) == 100
# big selector along the index
selector = Index(df.ts[0:100].values)
result = store.select("df", "ts=selector")
expected = df[df.ts.isin(selector.values)]
tm.assert_frame_equal(expected, result)
assert len(result) == 100
def test_select_iterator(setup_path):
# single table
with ensure_clean_store(setup_path) as store:
df = tm.makeTimeDataFrame(500)
_maybe_remove(store, "df")
store.append("df", df)
expected = store.select("df")
results = list(store.select("df", iterator=True))
result = concat(results)
tm.assert_frame_equal(expected, result)
results = list(store.select("df", chunksize=100))
assert len(results) == 5
result = concat(results)
tm.assert_frame_equal(expected, result)
results = list(store.select("df", chunksize=150))
result = concat(results)
tm.assert_frame_equal(result, expected)
with ensure_clean_path(setup_path) as path:
df = tm.makeTimeDataFrame(500)
df.to_hdf(path, "df_non_table")
msg = "can only use an iterator or chunksize on a table"
with pytest.raises(TypeError, match=msg):
read_hdf(path, "df_non_table", chunksize=100)
with pytest.raises(TypeError, match=msg):
read_hdf(path, "df_non_table", iterator=True)
with ensure_clean_path(setup_path) as path:
df = tm.makeTimeDataFrame(500)
df.to_hdf(path, "df", format="table")
results = list(read_hdf(path, "df", chunksize=100))
result = concat(results)
assert len(results) == 5
tm.assert_frame_equal(result, df)
tm.assert_frame_equal(result, read_hdf(path, "df"))
# multiple
with ensure_clean_store(setup_path) as store:
df1 = tm.makeTimeDataFrame(500)
store.append("df1", df1, data_columns=True)
df2 = tm.makeTimeDataFrame(500).rename(columns="{}_2".format)
df2["foo"] = "bar"
store.append("df2", df2)
df = concat([df1, df2], axis=1)
# full selection
expected = store.select_as_multiple(["df1", "df2"], selector="df1")
results = list(
store.select_as_multiple(["df1", "df2"], selector="df1", chunksize=150)
)
result = concat(results)
tm.assert_frame_equal(expected, result)
def test_select_iterator_complete_8014(setup_path):
# GH 8014
# using iterator and where clause
chunksize = 1e4
# no iterator
with ensure_clean_store(setup_path) as store:
expected = tm.makeTimeDataFrame(100064, "S")
_maybe_remove(store, "df")
store.append("df", expected)
beg_dt = expected.index[0]
end_dt = expected.index[-1]
# select w/o iteration and no where clause works
result = store.select("df")
tm.assert_frame_equal(expected, result)
# select w/o iterator and where clause, single term, begin
# of range, works
where = f"index >= '{beg_dt}'"
result = store.select("df", where=where)
tm.assert_frame_equal(expected, result)
# select w/o iterator and where clause, single term, end
# of range, works
where = f"index <= '{end_dt}'"
result = store.select("df", where=where)
tm.assert_frame_equal(expected, result)
# select w/o iterator and where clause, inclusive range,
# works
where = f"index >= '{beg_dt}' & index <= '{end_dt}'"
result = store.select("df", where=where)
tm.assert_frame_equal(expected, result)
# with iterator, full range
with ensure_clean_store(setup_path) as store:
expected = tm.makeTimeDataFrame(100064, "S")
_maybe_remove(store, "df")
store.append("df", expected)
beg_dt = expected.index[0]
end_dt = expected.index[-1]
# select w/iterator and no where clause works
results = list(store.select("df", chunksize=chunksize))
result = concat(results)
tm.assert_frame_equal(expected, result)
# select w/iterator and where clause, single term, begin of range
where = f"index >= '{beg_dt}'"
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
tm.assert_frame_equal(expected, result)
# select w/iterator and where clause, single term, end of range
where = f"index <= '{end_dt}'"
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
tm.assert_frame_equal(expected, result)
# select w/iterator and where clause, inclusive range
where = f"index >= '{beg_dt}' & index <= '{end_dt}'"
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
tm.assert_frame_equal(expected, result)
def test_select_iterator_non_complete_8014(setup_path):
# GH 8014
# using iterator and where clause
chunksize = 1e4
# with iterator, non complete range
with ensure_clean_store(setup_path) as store:
expected = tm.makeTimeDataFrame(100064, "S")
_maybe_remove(store, "df")
store.append("df", expected)
beg_dt = expected.index[1]
end_dt = expected.index[-2]
# select w/iterator and where clause, single term, begin of range
where = f"index >= '{beg_dt}'"
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
rexpected = expected[expected.index >= beg_dt]
tm.assert_frame_equal(rexpected, result)
# select w/iterator and where clause, single term, end of range
where = f"index <= '{end_dt}'"
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
rexpected = expected[expected.index <= end_dt]
tm.assert_frame_equal(rexpected, result)
# select w/iterator and where clause, inclusive range
where = f"index >= '{beg_dt}' & index <= '{end_dt}'"
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
rexpected = expected[(expected.index >= beg_dt) & (expected.index <= end_dt)]
tm.assert_frame_equal(rexpected, result)
# with iterator, empty where
with ensure_clean_store(setup_path) as store:
expected = tm.makeTimeDataFrame(100064, "S")
_maybe_remove(store, "df")
store.append("df", expected)
end_dt = expected.index[-1]
# select w/iterator and where clause, single term, begin of range
where = f"index > '{end_dt}'"
results = list(store.select("df", where=where, chunksize=chunksize))
assert 0 == len(results)
def test_select_iterator_many_empty_frames(setup_path):
# GH 8014
# using iterator and where clause can return many empty
# frames.
chunksize = 10_000
# with iterator, range limited to the first chunk
with ensure_clean_store(setup_path) as store:
expected = tm.makeTimeDataFrame(100000, "S")
_maybe_remove(store, "df")
store.append("df", expected)
beg_dt = expected.index[0]
end_dt = expected.index[chunksize - 1]
# select w/iterator and where clause, single term, begin of range
where = f"index >= '{beg_dt}'"
results = list(store.select("df", where=where, chunksize=chunksize))
result = concat(results)
rexpected = expected[expected.index >= beg_dt]
tm.assert_frame_equal(rexpected, result)
# select w/iterator and where clause, single term, end of range
where = f"index <= '{end_dt}'"
results = list(store.select("df", where=where, chunksize=chunksize))
assert len(results) == 1
result = concat(results)
rexpected = expected[expected.index <= end_dt]
tm.assert_frame_equal(rexpected, result)
# select w/iterator and where clause, inclusive range
where = f"index >= '{beg_dt}' & index <= '{end_dt}'"
results = list(store.select("df", where=where, chunksize=chunksize))
# should be 1, is 10
assert len(results) == 1
result = concat(results)
rexpected = expected[(expected.index >= beg_dt) & (expected.index <= end_dt)]
tm.assert_frame_equal(rexpected, result)
# select w/iterator and where clause which selects
# *nothing*.
#
# To be consistent with Python idiom I suggest this should
# return [] e.g. `for e in []: print True` never prints
# True.
where = f"index <= '{beg_dt}' & index >= '{end_dt}'"
results = list(store.select("df", where=where, chunksize=chunksize))
# should be []
assert len(results) == 0
def test_frame_select(setup_path):
df = tm.makeTimeDataFrame()
with ensure_clean_store(setup_path) as store:
store.put("frame", df, format="table")
date = df.index[len(df) // 2]
crit1 = Term("index>=date")
assert crit1.env.scope["date"] == date
crit2 = "columns=['A', 'D']"
crit3 = "columns=A"
result = store.select("frame", [crit1, crit2])
expected = df.loc[date:, ["A", "D"]]
tm.assert_frame_equal(result, expected)
result = store.select("frame", [crit3])
expected = df.loc[:, ["A"]]
tm.assert_frame_equal(result, expected)
# invalid terms
df = tm.makeTimeDataFrame()
store.append("df_time", df)
msg = "could not convert string to Timestamp"
with pytest.raises(ValueError, match=msg):
store.select("df_time", "index>0")
# can't select if not written as table
# store['frame'] = df
# with pytest.raises(ValueError):
# store.select('frame', [crit1, crit2])
def test_frame_select_complex(setup_path):
# select via complex criteria
df = tm.makeTimeDataFrame()
df["string"] = "foo"
df.loc[df.index[0:4], "string"] = "bar"
with ensure_clean_store(setup_path) as store:
store.put("df", df, format="table", data_columns=["string"])
# empty
result = store.select("df", 'index>df.index[3] & string="bar"')
expected = df.loc[(df.index > df.index[3]) & (df.string == "bar")]
tm.assert_frame_equal(result, expected)
result = store.select("df", 'index>df.index[3] & string="foo"')
expected = df.loc[(df.index > df.index[3]) & (df.string == "foo")]
|
tm.assert_frame_equal(result, expected)
|
pandas._testing.assert_frame_equal
|
from django.shortcuts import render
from django.http import HttpResponse
import pandas as pd
from os import listdir
from os.path import isfile, join
import re
import numpy as np
import plotly.express as px
import plotly.graph_objects as go
import json
import plotly
from plotly.subplots import make_subplots
from .models import notebook
from django.db import connection
import urllib, json
an_files = [f for f in listdir("data") if isfile(join("data", f))]
route = ["data/" + sub for sub in an_files]
read_list = [open(route[x], encoding="utf8") for x in range(len(route))]
read_list = [x.read() for x in read_list]
read_list = [x.replace("false", "False") for x in read_list]
read_list = [x.replace("true", "True") for x in read_list]
read_list = [x.replace("null", "None") for x in read_list]
ntb_list = [eval(x) for x in read_list]# ntb_list is the list of notebooks in dict form
cell_count = [len(x["cells"]) for x in ntb_list]
# getting code cells
def find_code_cells(s):
return [x for x in s["cells"] if x["cell_type"] == "code"]
ntb_list_code = list(map(find_code_cells, ntb_list))
code_cell_count = [len(x) for x in ntb_list_code]
an_files = [re.sub('.ipynb',"", x) for x in an_files]
# id creation
for x in range(len(ntb_list)):
ntb_list[x]["id"] = x
for x in range(len(ntb_list_code)):
ntb_list_code[x].insert(0, x)
# python code selection
re_is_python = re.compile(r'pd.read_')
is_python = [x[0] for x in ntb_list_code for y in range(1,len(x)) for z in x[y]["source"] if re_is_python.search(z) != None]
# function to get unique values
def unique(list1):
x = np.array(list1)
return(np.unique(x))
is_python = unique(is_python)
is_r = range(len(ntb_list_code))
is_r = [item for item in is_r if item not in is_python]
for x in is_python:
ntb_list_code[x].insert(1, "python")
for x in is_r:
ntb_list_code[x].insert(1, "r")
df_code = pd.DataFrame(ntb_list_code)
# package list for python
pklf = open("dash_oyku/package_list.txt", "r")
package_list=[]
for line in pklf:
stripped_line = line.strip()
package_list.append(stripped_line)
pklf.close()
# libraries
def reg_apply(r, x):
return [a["source"] for a in x if a != None and r.search(str(a["source"])) != None]
def reg_sub_apply(r, x):
return [re.sub(r,"", a) for a in x]
def pull(r, x):
return [r.search(i[a]).group() for i in x for a in range(len(i)) if r.search(i[a]) != None]
def is_exist(real, x):
imports_as_set = set(x)
intersection = imports_as_set.intersection(real)
intersection_as_list = list(intersection)
return intersection_as_list
def out(x, r):
return [a for a in x if r.search(a) == None]
# python libraries
regimport = re.compile(r'from.[^ \t\n\r\f\v.]*|import.[^ \t\n\r\f\v.]*')
libpy = df_code[df_code[1]=="python"].iloc[:,2:].apply(lambda x: reg_apply(regimport, x), axis=1)
libpy = libpy.apply(lambda x: pull(regimport, x))
libpy =
|
pd.DataFrame({"id":df_code[df_code[1]=="python"][0], "library": libpy})
|
pandas.DataFrame
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.