prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
import numpy as np
import pytest
from pandas import (
Categorical,
CategoricalDtype,
CategoricalIndex,
DataFrame,
Index,
MultiIndex,
Series,
Timestamp,
concat,
get_dummies,
period_range,
)
import pandas._testing as tm
from pandas.core.arrays import SparseArray
class TestGetitem:
def test_getitem_unused_level_raises(self):
# GH#20410
mi = MultiIndex(
levels=[["a_lot", "onlyone", "notevenone"], [1970, ""]],
codes=[[1, 0], [1, 0]],
)
df = DataFrame(-1, index=range(3), columns=mi)
with pytest.raises(KeyError, match="notevenone"):
df["notevenone"]
def test_getitem_periodindex(self):
rng = period_range("1/1/2000", periods=5)
df = DataFrame(np.random.randn(10, 5), columns=rng)
ts = df[rng[0]]
tm.assert_series_equal(ts, df.iloc[:, 0])
# GH#1211; smoketest unrelated to the rest of this test
repr(df)
ts = df["1/1/2000"]
tm.assert_series_equal(ts, df.iloc[:, 0])
def test_getitem_list_of_labels_categoricalindex_cols(self):
# GH#16115
cats = Categorical([Timestamp("12-31-1999"), Timestamp("12-31-2000")])
expected = DataFrame(
[[1, 0], [0, 1]], dtype="uint8", index=[0, 1], columns=cats
)
dummies = get_dummies(cats)
result = dummies[list(dummies.columns)]
tm.assert_frame_equal(result, expected)
def test_getitem_sparse_column_return_type_and_dtype(self):
# https://github.com/pandas-dev/pandas/issues/23559
data = SparseArray([0, 1])
df = DataFrame({"A": data})
expected = Series(data, name="A")
result = df["A"]
tm.assert_series_equal(result, expected)
# Also check iloc and loc while we're here
result = df.iloc[:, 0]
tm.assert_series_equal(result, expected)
result = df.loc[:, "A"]
tm.assert_series_equal(result, expected)
class TestGetitemListLike:
def test_getitem_list_missing_key(self):
# GH#13822, incorrect error string with non-unique columns when missing
# column is accessed
df = DataFrame({"x": [1.0], "y": [2.0], "z": [3.0]})
df.columns = ["x", "x", "z"]
# Check that we get the correct value in the KeyError
with pytest.raises(KeyError, match=r"\['y'\] not in index"):
df[["x", "y", "z"]]
class TestGetitemCallable:
def test_getitem_callable(self, float_frame):
# GH#12533
result = float_frame[lambda x: "A"]
expected = float_frame.loc[:, "A"]
tm.assert_series_equal(result, expected)
result = float_frame[lambda x: ["A", "B"]]
expected = float_frame.loc[:, ["A", "B"]]
tm.assert_frame_equal(result, float_frame.loc[:, ["A", "B"]])
df = float_frame[:3]
result = df[lambda x: [True, False, True]]
expected = float_frame.iloc[[0, 2], :]
tm.assert_frame_equal(result, expected)
def test_loc_multiindex_columns_one_level(self):
# GH#29749
df = DataFrame([[1, 2]], columns=[["a", "b"]])
expected = DataFrame([1], columns=[["a"]])
result = df["a"]
| tm.assert_frame_equal(result, expected) | pandas._testing.assert_frame_equal |
# -*- coding: utf-8 -*-
"""
Analyzes code age in a git repository
Writes reports in the following locations
e.g. For repository "cpython"
[root] Defaults to ~/git.stats
├── cpython Directory for https://github.com/python/cpython.git
│ └── reports
│ ├── 2011-03-06.d68ed6fc.2_0 Revision `d68ed6fc` which was created on 2011-03-06 on
│ │ │ branch `2.0`.
│ │ └── __c.__cpp.__h Report on *.c, *.cpp and *.h files in this revision
│ │ ├── Guido_van_Rossum Sub-report on author `<NAME>`
│ │ │ ├── code-age.png Graph of code age. LoC / day vs date
│ │ │ ├── code-age.txt List of commits in the peaks in the code-age.png graph
│ │ │ ├── details.csv LoC in each directory in for these files and authors
│ │ │ ├── newest-commits.txt List of newest commits for these files and authors
│ │ │ └── oldest-commits.txt List of oldest commits for these files and authors
"""
from __future__ import division, print_function
import subprocess
from subprocess import CalledProcessError
from collections import defaultdict, Counter
import sys
import time
import re
import os
import stat
import glob
import errno
import numpy as np
from scipy import signal
import pandas as pd
from pandas import Series, DataFrame, Timestamp
import matplotlib
import matplotlib.pylab as plt
from matplotlib.pylab import cycler
import bz2
from multiprocessing import Pool, cpu_count
from multiprocessing.pool import ThreadPool
import pygments
from pygments import lex
from pygments.token import Text, Comment, Punctuation, Literal
from pygments.lexers import guess_lexer_for_filename
# Python 2 / 3 stuff
PY2 = sys.version_info[0] < 3
try:
import cPickle as pickle
except ImportError:
import pickle
try:
reload(sys)
sys.setdefaultencoding('utf-8')
except:
pass
#
# Configuration.
#
CACHE_FILE_VERSION = 3 # Update when making incompatible changes to cache file format
TIMEZONE = 'Australia/Melbourne' # The timezone used for all commit times. TODO Make configurable
SHA_LEN = 8 # The number of characters used when displaying git SHA-1 hashes
STRICT_CHECKING = False # For validating code.
N_BLAME_PROCESSES = max(1, cpu_count() - 1) # Number of processes to use for blaming
N_SHOW_THREADS = 8 # Number of threads for running the many git show commands
DO_MULTIPROCESSING = True # For test non-threaded performance
# Set graphing style
matplotlib.style.use('ggplot')
plt.rcParams['axes.prop_cycle'] = cycler('color', ['b', 'y', 'k', '#707040', '#404070'])
plt.rcParams['savefig.dpi'] = 300
PATH_MAX = 255
# Files that we don't analyze. These are files that don't have lines of code so that blaming
# doesn't make sense.
IGNORED_EXTS = {
'.air', '.bin', '.bmp', '.cer', '.cert', '.der', '.developerprofile', '.dll', '.doc', '.docx',
'.exe', '.gif', '.icns', '.ico', '.jar', '.jpeg', '.jpg', '.keychain', '.launch', '.pdf',
'.pem', '.pfx', '.png', '.prn', '.so', '.spc', '.svg', '.swf', '.tif', '.tiff', '.xls', '.xlsx',
'.tar', '.zip', '.gz', '.7z', '.rar',
'.patch',
'.dump',
'.h5'
}
def _is_windows():
"""Returns: True if running on a MS-Windows operating system."""
try:
sys.getwindowsversion()
except:
return False
else:
return True
IS_WINDOWS = _is_windows()
if IS_WINDOWS:
import win32api
import win32process
import win32con
def lowpriority():
""" Set the priority of the process to below-normal.
http://stackoverflow.com/questions/1023038/change-process-priority-in-python-cross-platform
"""
pid = win32api.GetCurrentProcessId()
handle = win32api.OpenProcess(win32con.PROCESS_ALL_ACCESS, True, pid)
win32process.SetPriorityClass(handle, win32process.BELOW_NORMAL_PRIORITY_CLASS)
else:
def lowpriority():
os.nice(1)
class ProcessPool(object):
"""Package of Pool and ThreadPool for 'with' usage.
"""
SINGLE = 0
THREAD = 1
PROCESS = 2
def __init__(self, process_type, n_pool):
if not DO_MULTIPROCESSING:
process_type = ProcessPool.SINGLE
self.process_type = process_type
if process_type != ProcessPool.SINGLE:
clazz = ThreadPool if process_type == ProcessPool.THREAD else Pool
self.pool = clazz(n_pool)
def __enter__(self):
return self
def imap_unordered(self, func, args_iter):
if self.process_type != ProcessPool.SINGLE:
return self.pool.imap_unordered(func, args_iter)
else:
return map(func, args_iter)
def __exit__(self, exc_type, exc_value, traceback):
if self.process_type != ProcessPool.SINGLE:
self.pool.terminate()
def sha_str(sha):
"""The way we show git SHA-1 hashes in reports."""
return sha[:SHA_LEN]
def date_str(date):
"""The way we show dates in reports."""
return date.strftime('%Y-%m-%d')
DAY = pd.Timedelta('1 days') # 24 * 3600 * 1e9 in pandas nanosec time
# Max date accepted for commits. Clearly a sanity check
MAX_DATE = | Timestamp('today') | pandas.Timestamp |
from io import StringIO
import operator
import numpy as np
import pytest
import pandas.util._test_decorators as td
import pandas as pd
from pandas import DataFrame, Index, MultiIndex, Series, date_range
import pandas._testing as tm
from pandas.core.computation.check import _NUMEXPR_INSTALLED
PARSERS = "python", "pandas"
ENGINES = "python", pytest.param("numexpr", marks=td.skip_if_no_ne)
@pytest.fixture(params=PARSERS, ids=lambda x: x)
def parser(request):
return request.param
@pytest.fixture(params=ENGINES, ids=lambda x: x)
def engine(request):
return request.param
def skip_if_no_pandas_parser(parser):
if parser != "pandas":
pytest.skip(f"cannot evaluate with parser {repr(parser)}")
class TestCompat:
def setup_method(self, method):
self.df = DataFrame({"A": [1, 2, 3]})
self.expected1 = self.df[self.df.A > 0]
self.expected2 = self.df.A + 1
def test_query_default(self):
# GH 12749
# this should always work, whether _NUMEXPR_INSTALLED or not
df = self.df
result = df.query("A>0")
tm.assert_frame_equal(result, self.expected1)
result = df.eval("A+1")
tm.assert_series_equal(result, self.expected2, check_names=False)
def test_query_None(self):
df = self.df
result = df.query("A>0", engine=None)
tm.assert_frame_equal(result, self.expected1)
result = df.eval("A+1", engine=None)
tm.assert_series_equal(result, self.expected2, check_names=False)
def test_query_python(self):
df = self.df
result = df.query("A>0", engine="python")
tm.assert_frame_equal(result, self.expected1)
result = df.eval("A+1", engine="python")
tm.assert_series_equal(result, self.expected2, check_names=False)
def test_query_numexpr(self):
df = self.df
if _NUMEXPR_INSTALLED:
result = df.query("A>0", engine="numexpr")
tm.assert_frame_equal(result, self.expected1)
result = df.eval("A+1", engine="numexpr")
tm.assert_series_equal(result, self.expected2, check_names=False)
else:
with pytest.raises(ImportError):
df.query("A>0", engine="numexpr")
with pytest.raises(ImportError):
df.eval("A+1", engine="numexpr")
class TestDataFrameEval:
# smaller hits python, larger hits numexpr
@pytest.mark.parametrize("n", [4, 4000])
@pytest.mark.parametrize(
"op_str,op,rop",
[
("+", "__add__", "__radd__"),
("-", "__sub__", "__rsub__"),
("*", "__mul__", "__rmul__"),
("/", "__truediv__", "__rtruediv__"),
],
)
def test_ops(self, op_str, op, rop, n):
# tst ops and reversed ops in evaluation
# GH7198
df = DataFrame(1, index=range(n), columns=list("abcd"))
df.iloc[0] = 2
m = df.mean()
base = DataFrame( # noqa
np.tile(m.values, n).reshape(n, -1), columns=list("abcd")
)
expected = eval(f"base {op_str} df")
# ops as strings
result = eval(f"m {op_str} df")
tm.assert_frame_equal(result, expected)
# these are commutative
if op in ["+", "*"]:
result = getattr(df, op)(m)
tm.assert_frame_equal(result, expected)
# these are not
elif op in ["-", "/"]:
result = getattr(df, rop)(m)
tm.assert_frame_equal(result, expected)
def test_dataframe_sub_numexpr_path(self):
# GH7192: Note we need a large number of rows to ensure this
# goes through the numexpr path
df = DataFrame(dict(A=np.random.randn(25000)))
df.iloc[0:5] = np.nan
expected = 1 - np.isnan(df.iloc[0:25])
result = (1 - np.isnan(df)).iloc[0:25]
tm.assert_frame_equal(result, expected)
def test_query_non_str(self):
# GH 11485
df = pd.DataFrame({"A": [1, 2, 3], "B": ["a", "b", "b"]})
msg = "expr must be a string to be evaluated"
with pytest.raises(ValueError, match=msg):
df.query(lambda x: x.B == "b")
with pytest.raises(ValueError, match=msg):
df.query(111)
def test_query_empty_string(self):
# GH 13139
df = pd.DataFrame({"A": [1, 2, 3]})
msg = "expr cannot be an empty string"
with pytest.raises(ValueError, match=msg):
df.query("")
def test_eval_resolvers_as_list(self):
# GH 14095
df = DataFrame(np.random.randn(10, 2), columns=list("ab"))
dict1 = {"a": 1}
dict2 = {"b": 2}
assert df.eval("a + b", resolvers=[dict1, dict2]) == dict1["a"] + dict2["b"]
assert pd.eval("a + b", resolvers=[dict1, dict2]) == dict1["a"] + dict2["b"]
class TestDataFrameQueryWithMultiIndex:
def test_query_with_named_multiindex(self, parser, engine):
skip_if_no_pandas_parser(parser)
a = np.random.choice(["red", "green"], size=10)
b = np.random.choice(["eggs", "ham"], size=10)
index = MultiIndex.from_arrays([a, b], names=["color", "food"])
df = DataFrame(np.random.randn(10, 2), index=index)
ind = Series(
df.index.get_level_values("color").values, index=index, name="color"
)
# equality
res1 = df.query('color == "red"', parser=parser, engine=engine)
res2 = df.query('"red" == color', parser=parser, engine=engine)
exp = df[ind == "red"]
tm.assert_frame_equal(res1, exp)
tm.assert_frame_equal(res2, exp)
# inequality
res1 = df.query('color != "red"', parser=parser, engine=engine)
res2 = df.query('"red" != color', parser=parser, engine=engine)
exp = df[ind != "red"]
tm.assert_frame_equal(res1, exp)
tm.assert_frame_equal(res2, exp)
# list equality (really just set membership)
res1 = df.query('color == ["red"]', parser=parser, engine=engine)
res2 = df.query('["red"] == color', parser=parser, engine=engine)
exp = df[ind.isin(["red"])]
tm.assert_frame_equal(res1, exp)
tm.assert_frame_equal(res2, exp)
res1 = df.query('color != ["red"]', parser=parser, engine=engine)
res2 = df.query('["red"] != color', parser=parser, engine=engine)
exp = df[~ind.isin(["red"])]
tm.assert_frame_equal(res1, exp)
tm.assert_frame_equal(res2, exp)
# in/not in ops
res1 = df.query('["red"] in color', parser=parser, engine=engine)
res2 = df.query('"red" in color', parser=parser, engine=engine)
exp = df[ind.isin(["red"])]
tm.assert_frame_equal(res1, exp)
tm.assert_frame_equal(res2, exp)
res1 = df.query('["red"] not in color', parser=parser, engine=engine)
res2 = df.query('"red" not in color', parser=parser, engine=engine)
exp = df[~ind.isin(["red"])]
tm.assert_frame_equal(res1, exp)
tm.assert_frame_equal(res2, exp)
def test_query_with_unnamed_multiindex(self, parser, engine):
skip_if_no_pandas_parser(parser)
a = np.random.choice(["red", "green"], size=10)
b = np.random.choice(["eggs", "ham"], size=10)
index = MultiIndex.from_arrays([a, b])
df = DataFrame(np.random.randn(10, 2), index=index)
ind = Series(df.index.get_level_values(0).values, index=index)
res1 = df.query('ilevel_0 == "red"', parser=parser, engine=engine)
res2 = df.query('"red" == ilevel_0', parser=parser, engine=engine)
exp = df[ind == "red"]
tm.assert_frame_equal(res1, exp)
tm.assert_frame_equal(res2, exp)
# inequality
res1 = df.query('ilevel_0 != "red"', parser=parser, engine=engine)
res2 = df.query('"red" != ilevel_0', parser=parser, engine=engine)
exp = df[ind != "red"]
tm.assert_frame_equal(res1, exp)
tm.assert_frame_equal(res2, exp)
# list equality (really just set membership)
res1 = df.query('ilevel_0 == ["red"]', parser=parser, engine=engine)
res2 = df.query('["red"] == ilevel_0', parser=parser, engine=engine)
exp = df[ind.isin(["red"])]
tm.assert_frame_equal(res1, exp)
tm.assert_frame_equal(res2, exp)
res1 = df.query('ilevel_0 != ["red"]', parser=parser, engine=engine)
res2 = df.query('["red"] != ilevel_0', parser=parser, engine=engine)
exp = df[~ind.isin(["red"])]
tm.assert_frame_equal(res1, exp)
tm.assert_frame_equal(res2, exp)
# in/not in ops
res1 = df.query('["red"] in ilevel_0', parser=parser, engine=engine)
res2 = df.query('"red" in ilevel_0', parser=parser, engine=engine)
exp = df[ind.isin(["red"])]
tm.assert_frame_equal(res1, exp)
tm.assert_frame_equal(res2, exp)
res1 = df.query('["red"] not in ilevel_0', parser=parser, engine=engine)
res2 = df.query('"red" not in ilevel_0', parser=parser, engine=engine)
exp = df[~ind.isin(["red"])]
tm.assert_frame_equal(res1, exp)
| tm.assert_frame_equal(res2, exp) | pandas._testing.assert_frame_equal |
#
# Copyright 2016 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import OrderedDict
from textwrap import dedent
from parameterized import parameterized
import numpy as np
from numpy import nan
import pandas as pd
from zipline._protocol import handle_non_market_minutes, BarData
from zipline.assets import Asset, Equity
from zipline.errors import (
HistoryInInitialize,
HistoryWindowStartsBeforeData,
)
from zipline.finance.asset_restrictions import NoRestrictions
from zipline.testing import (
create_minute_df_for_asset,
str_to_seconds,
MockDailyBarReader,
)
import zipline.testing.fixtures as zf
OHLC = ['open', 'high', 'low', 'close']
OHLCP = OHLC + ['price']
ALL_FIELDS = OHLCP + ['volume']
class WithHistory(zf.WithCreateBarData, zf.WithDataPortal):
TRADING_START_DT = TRADING_ENV_MIN_DATE = START_DATE = pd.Timestamp(
'2014-01-03',
tz='UTC',
)
TRADING_END_DT = END_DATE = pd.Timestamp('2016-01-29', tz='UTC')
SPLIT_ASSET_SID = 4
DIVIDEND_ASSET_SID = 5
MERGER_ASSET_SID = 6
HALF_DAY_TEST_ASSET_SID = 7
SHORT_ASSET_SID = 8
# asset1:
# - 2014-03-01 (rounds up to TRADING_START_DT) to 2016-01-29.
# - every minute/day.
# asset2:
# - 2015-01-05 to 2015-12-31
# - every minute/day.
# asset3:
# - 2015-01-05 to 2015-12-31
# - trades every 10 minutes
# SPLIT_ASSET:
# - 2015-01-04 to 2015-12-31
# - trades every minute
# - splits on 2015-01-05 and 2015-01-06
# DIVIDEND_ASSET:
# - 2015-01-04 to 2015-12-31
# - trades every minute
# - dividends on 2015-01-05 and 2015-01-06
# MERGER_ASSET
# - 2015-01-04 to 2015-12-31
# - trades every minute
# - merger on 2015-01-05 and 2015-01-06
@classmethod
def init_class_fixtures(cls):
super().init_class_fixtures()
cls.trading_days = cls.trading_calendar.sessions_in_range(
cls.TRADING_START_DT,
cls.TRADING_END_DT
)
cls.ASSET1 = cls.asset_finder.retrieve_asset(1)
cls.ASSET2 = cls.asset_finder.retrieve_asset(2)
cls.ASSET3 = cls.asset_finder.retrieve_asset(3)
cls.SPLIT_ASSET = cls.asset_finder.retrieve_asset(
cls.SPLIT_ASSET_SID,
)
cls.DIVIDEND_ASSET = cls.asset_finder.retrieve_asset(
cls.DIVIDEND_ASSET_SID,
)
cls.MERGER_ASSET = cls.asset_finder.retrieve_asset(
cls.MERGER_ASSET_SID,
)
cls.HALF_DAY_TEST_ASSET = cls.asset_finder.retrieve_asset(
cls.HALF_DAY_TEST_ASSET_SID,
)
cls.SHORT_ASSET = cls.asset_finder.retrieve_asset(
cls.SHORT_ASSET_SID,
)
@classmethod
def make_equity_info(cls):
jan_5_2015 = pd.Timestamp('2015-01-05', tz='UTC')
day_after_12312015 = pd.Timestamp('2016-01-04', tz='UTC')
return pd.DataFrame.from_dict(
{
1: {
'start_date': pd.Timestamp('2014-01-03', tz='UTC'),
'end_date': cls.TRADING_END_DT,
'symbol': 'ASSET1',
'exchange': "TEST",
},
2: {
'start_date': jan_5_2015,
'end_date': day_after_12312015,
'symbol': 'ASSET2',
'exchange': "TEST",
},
3: {
'start_date': jan_5_2015,
'end_date': day_after_12312015,
'symbol': 'ASSET3',
'exchange': "TEST",
},
cls.SPLIT_ASSET_SID: {
'start_date': jan_5_2015,
'end_date': day_after_12312015,
'symbol': 'SPLIT_ASSET',
'exchange': "TEST",
},
cls.DIVIDEND_ASSET_SID: {
'start_date': jan_5_2015,
'end_date': day_after_12312015,
'symbol': 'DIVIDEND_ASSET',
'exchange': "TEST",
},
cls.MERGER_ASSET_SID: {
'start_date': jan_5_2015,
'end_date': day_after_12312015,
'symbol': 'MERGER_ASSET',
'exchange': "TEST",
},
cls.HALF_DAY_TEST_ASSET_SID: {
'start_date': pd.Timestamp('2014-07-02', tz='UTC'),
'end_date': day_after_12312015,
'symbol': 'HALF_DAY_TEST_ASSET',
'exchange': "TEST",
},
cls.SHORT_ASSET_SID: {
'start_date': pd.Timestamp('2015-01-05', tz='UTC'),
'end_date': pd.Timestamp('2015-01-06', tz='UTC'),
'symbol': 'SHORT_ASSET',
'exchange': "TEST",
}
},
orient='index',
)
@classmethod
def make_splits_data(cls):
return pd.DataFrame([
{
'effective_date': str_to_seconds('2015-01-06'),
'ratio': 0.25,
'sid': cls.SPLIT_ASSET_SID,
},
{
'effective_date': str_to_seconds('2015-01-07'),
'ratio': 0.5,
'sid': cls.SPLIT_ASSET_SID,
},
])
@classmethod
def make_mergers_data(cls):
return pd.DataFrame([
{
'effective_date': str_to_seconds('2015-01-06'),
'ratio': 0.25,
'sid': cls.MERGER_ASSET_SID,
},
{
'effective_date': str_to_seconds('2015-01-07'),
'ratio': 0.5,
'sid': cls.MERGER_ASSET_SID,
}
])
@classmethod
def make_dividends_data(cls):
return pd.DataFrame([
{
# only care about ex date, the other dates don't matter here
'ex_date':
pd.Timestamp('2015-01-06', tz='UTC').to_datetime64(),
'record_date':
pd.Timestamp('2015-01-06', tz='UTC').to_datetime64(),
'declared_date':
pd.Timestamp('2015-01-06', tz='UTC').to_datetime64(),
'pay_date':
pd.Timestamp('2015-01-06', tz='UTC').to_datetime64(),
'amount': 2.0,
'sid': cls.DIVIDEND_ASSET_SID,
},
{
'ex_date':
pd.Timestamp('2015-01-07', tz='UTC').to_datetime64(),
'record_date':
pd.Timestamp('2015-01-07', tz='UTC').to_datetime64(),
'declared_date':
pd.Timestamp('2015-01-07', tz='UTC').to_datetime64(),
'pay_date':
pd.Timestamp('2015-01-07', tz='UTC').to_datetime64(),
'amount': 4.0,
'sid': cls.DIVIDEND_ASSET_SID,
}],
columns=[
'ex_date',
'record_date',
'declared_date',
'pay_date',
'amount',
'sid'],
)
@classmethod
def make_adjustment_writer_equity_daily_bar_reader(cls):
return MockDailyBarReader(
dates=cls.trading_calendar.sessions_in_range(
cls.TRADING_START_DT,
cls.TRADING_END_DT,
),
)
def verify_regular_dt(self, idx, dt, mode, fields=None, assets=None):
if mode == 'daily':
freq = '1d'
else:
freq = '1m'
cal = self.trading_calendar
equity_cal = self.trading_calendars[Equity]
def reindex_to_primary_calendar(a, field):
"""
Reindex an array of prices from a window on the NYSE
calendar by the window on the primary calendar with the same
dt and window size.
"""
if mode == 'daily':
dts = cal.sessions_window(dt, -9)
# `dt` may not be a session on the equity calendar, so
# find the next valid session.
equity_sess = equity_cal.minute_to_session_label(dt)
equity_dts = equity_cal.sessions_window(equity_sess, -9)
elif mode == 'minute':
dts = cal.minutes_window(dt, -10)
equity_dts = equity_cal.minutes_window(dt, -10)
output = pd.Series(
index=equity_dts,
data=a,
).reindex(dts)
# Fill after reindexing, to ensure we don't forward fill
# with values that are being dropped.
if field == 'volume':
return output.fillna(0)
elif field == 'price':
return output.fillna(method='ffill')
else:
return output
fields = fields if fields is not None else ALL_FIELDS
assets = assets if assets is not None else [self.ASSET2, self.ASSET3]
bar_data = self.create_bardata(
simulation_dt_func=lambda: dt,
)
check_internal_consistency(
bar_data, assets, fields, 10, freq
)
for field in fields:
for asset in assets:
asset_series = bar_data.history(asset, field, 10, freq)
base = MINUTE_FIELD_INFO[field] + 2
if idx < 9:
missing_count = 9 - idx
present_count = 9 - missing_count
if field in OHLCP:
if asset == self.ASSET2:
# asset2 should have some leading nans
np.testing.assert_array_equal(
np.full(missing_count, np.nan),
asset_series[0:missing_count]
)
# asset2 should also have some real values
np.testing.assert_array_equal(
np.array(range(base,
base + present_count + 1)),
asset_series[(9 - present_count):]
)
if asset == self.ASSET3:
# asset3 should be NaN the entire time
np.testing.assert_array_equal(
np.full(10, np.nan),
asset_series
)
elif field == 'volume':
if asset == self.ASSET2:
# asset2 should have some zeros (instead of nans)
np.testing.assert_array_equal(
np.zeros(missing_count),
asset_series[0:missing_count]
)
# and some real values
np.testing.assert_array_equal(
np.array(
range(base, base + present_count + 1)
) * 100,
asset_series[(9 - present_count):]
)
if asset == self.ASSET3:
# asset3 is all zeros, no volume yet
np.testing.assert_array_equal(
np.zeros(10),
asset_series
)
else:
# asset3 should have data every 10 minutes
# construct an array full of nans, put something in the
# right slot, and test for comparison
position_from_end = ((idx + 1) % 10) + 1
# asset3's baseline data is 9 NaNs, then 11, then 9 NaNs,
# then 21, etc. for idx 9 to 19, value_for_asset3 should
# be a baseline of 11 (then adjusted for the individual
# field), thus the rounding down to the nearest 10.
value_for_asset3 = (((idx + 1) // 10) * 10) + \
MINUTE_FIELD_INFO[field] + 1
if field in OHLC:
asset3_answer_key = np.full(10, np.nan)
asset3_answer_key[-position_from_end] = \
value_for_asset3
asset3_answer_key = reindex_to_primary_calendar(
asset3_answer_key,
field,
)
if asset == self.ASSET2:
np.testing.assert_array_equal(
reindex_to_primary_calendar(
np.array(
range(base + idx - 9, base + idx + 1)
),
field,
),
asset_series
)
if asset == self.ASSET3:
np.testing.assert_array_equal(
asset3_answer_key,
asset_series
)
elif field == 'volume':
asset3_answer_key = np.zeros(10)
asset3_answer_key[-position_from_end] = \
value_for_asset3 * 100
asset3_answer_key = reindex_to_primary_calendar(
asset3_answer_key,
field,
)
if asset == self.ASSET2:
np.testing.assert_array_equal(
reindex_to_primary_calendar(
np.array(
range(base + idx - 9, base + idx + 1)
) * 100,
field,
),
asset_series
)
if asset == self.ASSET3:
np.testing.assert_array_equal(
asset3_answer_key,
asset_series
)
elif field == 'price':
# price is always forward filled
# asset2 has prices every minute, so it's easy
if asset == self.ASSET2:
# at idx 9, the data is 2 to 11
np.testing.assert_array_equal(
reindex_to_primary_calendar(
range(idx - 7, idx + 3),
field=field,
),
asset_series
)
if asset == self.ASSET3:
# Second part begins on the session after
# `position_from_end` on the NYSE calendar.
second_begin = (
dt - equity_cal.day * (position_from_end - 1)
)
# First part goes up until the start of the
# second part, because we forward-fill.
first_end = second_begin - cal.day
first_part = asset_series[:first_end]
second_part = asset_series[second_begin:]
decile_count = ((idx + 1) // 10)
# in our test data, asset3 prices will be nine
# NaNs, then ten 11s, ten 21s, ten 31s...
if len(second_part) >= 10:
np.testing.assert_array_equal(
np.full(len(first_part), np.nan),
first_part
)
elif decile_count == 1:
np.testing.assert_array_equal(
np.full(len(first_part), np.nan),
first_part
)
np.testing.assert_array_equal(
np.array([11] * len(second_part)),
second_part
)
else:
np.testing.assert_array_equal(
np.array([decile_count * 10 - 9] *
len(first_part)),
first_part
)
np.testing.assert_array_equal(
np.array([decile_count * 10 + 1] *
len(second_part)),
second_part
)
def check_internal_consistency(bar_data, assets, fields, bar_count, freq):
if isinstance(assets, Asset):
asset_list = [assets]
else:
asset_list = assets
if isinstance(fields, str):
field_list = [fields]
else:
field_list = fields
multi_field_dict = {
asset: bar_data.history(asset, field_list, bar_count, freq)
for asset in asset_list
}
multi_asset_dict = {
field: bar_data.history(asset_list, field, bar_count, freq)
for field in fields
}
panel = bar_data.history(asset_list, field_list, bar_count, freq)
for field in field_list:
# make sure all the different query forms are internally
# consistent
for asset in asset_list:
series = bar_data.history(asset, field, bar_count, freq)
np.testing.assert_array_equal(
series,
multi_asset_dict[field][asset]
)
np.testing.assert_array_equal(
series,
multi_field_dict[asset][field]
)
np.testing.assert_array_equal(
series,
panel[field][asset]
)
# each minute's OHLCV data has a consistent offset for each field.
# for example, the open is always 1 higher than the close, the high
# is always 2 higher than the close, etc.
MINUTE_FIELD_INFO = {
'open': 1,
'high': 2,
'low': -1,
'close': 0,
'price': 0,
'volume': 0, # unused, later we'll multiply by 100
}
class MinuteEquityHistoryTestCase(WithHistory,
zf.WithMakeAlgo,
zf.ZiplineTestCase):
EQUITY_DAILY_BAR_SOURCE_FROM_MINUTE = True
DATA_PORTAL_FIRST_TRADING_DAY = zf.alias('TRADING_START_DT')
@classmethod
def make_equity_minute_bar_data(cls):
equities_cal = cls.trading_calendars[Equity]
data = {}
sids = {2, 5, cls.SHORT_ASSET_SID, cls.HALF_DAY_TEST_ASSET_SID}
for sid in sids:
asset = cls.asset_finder.retrieve_asset(sid)
data[sid] = create_minute_df_for_asset(
equities_cal,
asset.start_date,
asset.end_date,
start_val=2,
)
data[1] = create_minute_df_for_asset(
equities_cal,
pd.Timestamp('2014-01-03', tz='utc'),
pd.Timestamp('2016-01-29', tz='utc'),
start_val=2,
)
asset2 = cls.asset_finder.retrieve_asset(2)
data[asset2.sid] = create_minute_df_for_asset(
equities_cal,
asset2.start_date,
equities_cal.previous_session_label(asset2.end_date),
start_val=2,
minute_blacklist=[
pd.Timestamp('2015-01-08 14:31', tz='UTC'),
pd.Timestamp('2015-01-08 21:00', tz='UTC'),
],
)
# Start values are crafted so that the thousands place are equal when
# adjustments are applied correctly.
# The splits and mergers are defined as 4:1 then 2:1 ratios, so the
# prices approximate that adjustment by quartering and then halving
# the thousands place.
data[cls.MERGER_ASSET_SID] = data[cls.SPLIT_ASSET_SID] = pd.concat((
create_minute_df_for_asset(
equities_cal,
pd.Timestamp('2015-01-05', tz='UTC'),
pd.Timestamp('2015-01-05', tz='UTC'),
start_val=8000),
create_minute_df_for_asset(
equities_cal,
pd.Timestamp('2015-01-06', tz='UTC'),
pd.Timestamp('2015-01-06', tz='UTC'),
start_val=2000),
create_minute_df_for_asset(
equities_cal,
pd.Timestamp('2015-01-07', tz='UTC'),
pd.Timestamp('2015-01-07', tz='UTC'),
start_val=1000),
create_minute_df_for_asset(
equities_cal,
pd.Timestamp('2015-01-08', tz='UTC'),
pd.Timestamp('2015-01-08', tz='UTC'),
start_val=1000)
))
asset3 = cls.asset_finder.retrieve_asset(3)
data[3] = create_minute_df_for_asset(
equities_cal,
asset3.start_date,
asset3.end_date,
start_val=2,
interval=10,
)
return data.items()
def test_history_in_initialize(self):
algo_text = dedent(
"""\
from zipline.api import history
def initialize(context):
history([1], 10, '1d', 'price')
def handle_data(context, data):
pass
"""
)
algo = self.make_algo(script=algo_text)
with self.assertRaises(HistoryInInitialize):
algo.run()
def test_negative_bar_count(self):
"""
Negative bar counts leak future information.
"""
with self.assertRaisesRegex(
ValueError,
"bar_count must be >= 1, but got -1"
):
self.data_portal.get_history_window(
[self.ASSET1],
pd.Timestamp('2015-01-07 14:35', tz='UTC'),
-1,
'1d',
'close',
'minute',
)
def test_daily_splits_and_mergers(self):
# self.SPLIT_ASSET and self.MERGER_ASSET had splits/mergers
# on 1/6 and 1/7
jan5 = pd.Timestamp('2015-01-05', tz='UTC')
for asset in [self.SPLIT_ASSET, self.MERGER_ASSET]:
# before any of the adjustments, 1/4 and 1/5
window1 = self.data_portal.get_history_window(
[asset],
self.trading_calendar.open_and_close_for_session(jan5)[1],
2,
'1d',
'close',
'minute',
)[asset]
np.testing.assert_array_equal(np.array([np.nan, 8389]), window1)
# straddling the first event
window2 = self.data_portal.get_history_window(
[asset],
pd.Timestamp('2015-01-06 14:35', tz='UTC'),
2,
'1d',
'close',
'minute',
)[asset]
# Value from 1/5 should be quartered
np.testing.assert_array_equal(
[2097.25,
# Split occurs. The value of the thousands place should
# match.
2004],
window2
)
# straddling both events!
window3 = self.data_portal.get_history_window(
[asset],
pd.Timestamp('2015-01-07 14:35', tz='UTC'),
3,
'1d',
'close',
'minute',
)[asset]
np.testing.assert_array_equal(
[1048.625, 1194.50, 1004.0],
window3
)
# after last event
window4 = self.data_portal.get_history_window(
[asset],
pd.Timestamp('2015-01-08 14:40', tz='UTC'),
2,
'1d',
'close',
'minute',
)[asset]
# should not be adjusted
np.testing.assert_array_equal([1389, 1009], window4)
def test_daily_dividends(self):
# self.DIVIDEND_ASSET had dividends on 1/6 and 1/7
jan5 = pd.Timestamp('2015-01-05', tz='UTC')
asset = self.DIVIDEND_ASSET
# before any of the dividends
window1 = self.data_portal.get_history_window(
[asset],
self.trading_calendar.session_close(jan5),
2,
'1d',
'close',
'minute',
)[asset]
np.testing.assert_array_equal(np.array([nan, 391]), window1)
# straddling the first event
window2 = self.data_portal.get_history_window(
[asset],
pd.Timestamp('2015-01-06 14:35', tz='UTC'),
2,
'1d',
'close',
'minute',
)[asset]
np.testing.assert_array_equal(
[383.18, # 391 (last close) * 0.98 (first div)
# Dividend occurs prior.
396],
window2
)
# straddling both events!
window3 = self.data_portal.get_history_window(
[asset],
pd.Timestamp('2015-01-07 14:35', tz='UTC'),
3,
'1d',
'close',
'minute',
)[asset]
np.testing.assert_array_equal(
[367.853, # 391 (last close) * 0.98 * 0.96 (both)
749.76, # 781 (last_close) * 0.96 (second div)
786], # no adjustment
window3
)
# after last event
window4 = self.data_portal.get_history_window(
[asset],
pd.Timestamp('2015-01-08 14:40', tz='UTC'),
2,
'1d',
'close',
'minute',
)[asset]
# should not be adjusted, should be 787 to 791
np.testing.assert_array_equal([1171, 1181], window4)
def test_minute_before_assets_trading(self):
# since asset2 and asset3 both started trading on 1/5/2015, let's do
# some history windows that are completely before that
minutes = self.trading_calendar.minutes_for_session(
self.trading_calendar.previous_session_label(pd.Timestamp(
'2015-01-05', tz='UTC'
))
)[0:60]
for idx, minute in enumerate(minutes):
bar_data = self.create_bardata(
lambda: minute,
)
check_internal_consistency(
bar_data, [self.ASSET2, self.ASSET3], ALL_FIELDS, 10, '1m'
)
for field in ALL_FIELDS:
# OHLCP should be NaN
# Volume should be 0
asset2_series = bar_data.history(self.ASSET2, field, 10, '1m')
asset3_series = bar_data.history(self.ASSET3, field, 10, '1m')
if field == 'volume':
np.testing.assert_array_equal(np.zeros(10), asset2_series)
np.testing.assert_array_equal(np.zeros(10), asset3_series)
else:
np.testing.assert_array_equal(
np.full(10, np.nan),
asset2_series
)
np.testing.assert_array_equal(
np.full(10, np.nan),
asset3_series
)
@parameterized.expand([
('open_sid_2', 'open', 2),
('high_sid_2', 'high', 2),
('low_sid_2', 'low', 2),
('close_sid_2', 'close', 2),
('volume_sid_2', 'volume', 2),
('open_sid_3', 'open', 3),
('high_sid_3', 'high', 3),
('low_sid_3', 'low', 3),
('close_sid_3', 'close', 3),
('volume_sid_3', 'volume', 3),
])
def test_minute_regular(self, name, field, sid):
# asset2 and asset3 both started on 1/5/2015, but asset3 trades every
# 10 minutes
asset = self.asset_finder.retrieve_asset(sid)
# Check the first hour of equities trading.
minutes = self.trading_calendars[Equity].minutes_for_session(
pd.Timestamp('2015-01-05', tz='UTC')
)[0:60]
for idx, minute in enumerate(minutes):
self.verify_regular_dt(idx, minute, 'minute',
assets=[asset],
fields=[field])
def test_minute_sunday_midnight(self):
# Most trading calendars aren't open at midnight on Sunday.
sunday_midnight = pd.Timestamp('2015-01-09', tz='UTC')
# Find the closest prior minute when the trading calendar was
# open (note that if the calendar is open at `sunday_midnight`,
# this will be `sunday_midnight`).
trading_minutes = self.trading_calendar.all_minutes
last_minute = trading_minutes[trading_minutes <= sunday_midnight][-1]
sunday_midnight_bar_data = self.create_bardata(lambda: sunday_midnight)
last_minute_bar_data = self.create_bardata(lambda: last_minute)
# Ensure that we get the same results at midnight on Sunday as
# the last open minute.
with handle_non_market_minutes(sunday_midnight_bar_data):
for field in ALL_FIELDS:
np.testing.assert_array_equal(
sunday_midnight_bar_data.history(
self.ASSET2,
field,
30,
'1m',
),
last_minute_bar_data.history(self.ASSET2, field, 30, '1m')
)
def test_minute_after_asset_stopped(self):
# SHORT_ASSET's last day was 2015-01-06
# get some history windows that straddle the end
minutes = self.trading_calendars[Equity].minutes_for_session(
pd.Timestamp('2015-01-07', tz='UTC')
)[0:60]
for idx, minute in enumerate(minutes):
bar_data = self.create_bardata(
lambda: minute
)
check_internal_consistency(
bar_data, self.SHORT_ASSET, ALL_FIELDS, 30, '1m'
)
# Reset data portal because it has advanced past next test date.
data_portal = self.make_data_portal()
# close high low open price volume
# 2015-01-06 20:47:00+00:00 768 770 767 769 768 76800
# 2015-01-06 20:48:00+00:00 769 771 768 770 769 76900
# 2015-01-06 20:49:00+00:00 770 772 769 771 770 77000
# 2015-01-06 20:50:00+00:00 771 773 770 772 771 77100
# 2015-01-06 20:51:00+00:00 772 774 771 773 772 77200
# 2015-01-06 20:52:00+00:00 773 775 772 774 773 77300
# 2015-01-06 20:53:00+00:00 774 776 773 775 774 77400
# 2015-01-06 20:54:00+00:00 775 777 774 776 775 77500
# 2015-01-06 20:55:00+00:00 776 778 775 777 776 77600
# 2015-01-06 20:56:00+00:00 777 779 776 778 777 77700
# 2015-01-06 20:57:00+00:00 778 780 777 779 778 77800
# 2015-01-06 20:58:00+00:00 779 781 778 780 779 77900
# 2015-01-06 20:59:00+00:00 780 782 779 781 780 78000
# 2015-01-06 21:00:00+00:00 781 783 780 782 781 78100
# 2015-01-07 14:31:00+00:00 NaN NaN NaN NaN NaN 0
# 2015-01-07 14:32:00+00:00 NaN NaN NaN NaN NaN 0
# 2015-01-07 14:33:00+00:00 NaN NaN NaN NaN NaN 0
# 2015-01-07 14:34:00+00:00 NaN NaN NaN NaN NaN 0
# 2015-01-07 14:35:00+00:00 NaN NaN NaN NaN NaN 0
# 2015-01-07 14:36:00+00:00 NaN NaN NaN NaN NaN 0
# 2015-01-07 14:37:00+00:00 NaN NaN NaN NaN NaN 0
# 2015-01-07 14:38:00+00:00 NaN NaN NaN NaN NaN 0
# 2015-01-07 14:39:00+00:00 NaN NaN NaN NaN NaN 0
# 2015-01-07 14:40:00+00:00 NaN NaN NaN NaN NaN 0
# 2015-01-07 14:41:00+00:00 NaN NaN NaN NaN NaN 0
# 2015-01-07 14:42:00+00:00 NaN NaN NaN NaN NaN 0
# 2015-01-07 14:43:00+00:00 NaN NaN NaN NaN NaN 0
# 2015-01-07 14:44:00+00:00 NaN NaN NaN NaN NaN 0
# 2015-01-07 14:45:00+00:00 NaN NaN NaN NaN NaN 0
# 2015-01-07 14:46:00+00:00 NaN NaN NaN NaN NaN 0
# choose a window that contains the last minute of the asset
window_start = pd.Timestamp('2015-01-06 20:47', tz='UTC')
window_end = pd.Timestamp('2015-01-07 14:46', tz='UTC')
bar_data = BarData(
data_portal=data_portal,
simulation_dt_func=lambda: minutes[15],
data_frequency='minute',
restrictions=NoRestrictions(),
trading_calendar=self.trading_calendar,
)
bar_count = len(
self.trading_calendar.minutes_in_range(window_start, window_end)
)
window = bar_data.history(
self.SHORT_ASSET,
ALL_FIELDS,
bar_count,
'1m',
)
# Window should start with 14 values and end with 16 NaNs/0s.
for field in ALL_FIELDS:
if field == 'volume':
np.testing.assert_array_equal(
range(76800, 78101, 100),
window['volume'][0:14]
)
np.testing.assert_array_equal(
np.zeros(16),
window['volume'][-16:]
)
else:
np.testing.assert_array_equal(
np.array(range(768, 782)) + MINUTE_FIELD_INFO[field],
window[field][0:14]
)
np.testing.assert_array_equal(
np.full(16, np.nan),
window[field][-16:]
)
# now do a smaller window that is entirely contained after the asset
# ends
window = bar_data.history(self.SHORT_ASSET, ALL_FIELDS, 5, '1m')
for field in ALL_FIELDS:
if field == 'volume':
np.testing.assert_array_equal(np.zeros(5), window['volume'])
else:
np.testing.assert_array_equal(np.full(5, np.nan),
window[field])
def test_minute_splits_and_mergers(self):
# self.SPLIT_ASSET and self.MERGER_ASSET had splits/mergers
# on 1/6 and 1/7
jan5 = pd.Timestamp('2015-01-05', tz='UTC')
# the assets' close column starts at 2 on the first minute of
# 1/5, then goes up one per minute forever
for asset in [self.SPLIT_ASSET, self.MERGER_ASSET]:
# before any of the adjustments, last 10 minutes of jan 5
equity_cal = self.trading_calendars[Equity]
window1 = self.data_portal.get_history_window(
[asset],
equity_cal.open_and_close_for_session(jan5)[1],
10,
'1m',
'close',
'minute',
)[asset]
np.testing.assert_array_equal(
np.array(range(8380, 8390)), window1)
# straddling the first event - begins with the last 5 equity
# minutes on 2015-01-05, ends with the first 5 on
# 2015-01-06.
window2_start = pd.Timestamp('2015-01-05 20:56', tz='UTC')
window2_end = pd.Timestamp('2015-01-06 14:35', tz='UTC')
window2_count = len(self.trading_calendar.minutes_in_range(
window2_start,
window2_end,
))
window2 = self.data_portal.get_history_window(
[asset],
pd.Timestamp('2015-01-06 14:35', tz='UTC'),
window2_count,
'1m',
'close',
'minute',
)[asset]
# five minutes from 1/5 should be halved
np.testing.assert_array_equal(
[2096.25,
2096.5,
2096.75,
2097,
2097.25],
window2[:5],
)
# Split occurs. The value of the thousands place should
# match.
np.testing.assert_array_equal(
[2000,
2001,
2002,
2003,
2004],
window2[-5:],
)
# straddling both events! on the equities calendar this is 5
# minutes of 1/7, 390 of 1/6, and 5 minutes of 1/5.
window3_start = pd.Timestamp('2015-01-05 20:56', tz='UTC')
window3_end = pd.Timestamp('2015-01-07 14:35', tz='UTC')
window3_minutes = self.trading_calendar.minutes_in_range(
window3_start,
window3_end,
)
window3_count = len(window3_minutes)
window3 = self.data_portal.get_history_window(
[asset],
pd.Timestamp('2015-01-07 14:35', tz='UTC'),
window3_count,
'1m',
'close',
'minute',
)[asset]
# first five minutes should be 4385-4390, but eigthed
np.testing.assert_array_equal(
[1048.125, 1048.25, 1048.375, 1048.5, 1048.625],
window3[0:5]
)
# next 390 minutes (the 2015-01-06 session) should be
# 2000-2390, but halved
middle_day_open_i = window3_minutes.searchsorted(
pd.Timestamp('2015-01-06 14:31', tz='UTC')
)
middle_day_close_i = window3_minutes.searchsorted(
pd.Timestamp('2015-01-06 21:00', tz='UTC')
)
np.testing.assert_array_equal(
np.array(range(2000, 2390), dtype='float64') / 2,
window3[middle_day_open_i:middle_day_close_i + 1]
)
# final 5 minutes should be 1000-1004
np.testing.assert_array_equal(range(1000, 1005), window3[-5:])
# after last event
window4 = self.data_portal.get_history_window(
[asset],
pd.Timestamp('2015-01-07 14:40', tz='UTC'),
5,
'1m',
'close',
'minute',
)[asset]
# should not be adjusted, should be 1005 to 1009
np.testing.assert_array_equal(range(1005, 1010), window4)
def test_minute_dividends(self):
# self.DIVIDEND_ASSET had dividends on 1/6 and 1/7
# before any of the dividends
window1 = self.data_portal.get_history_window(
[self.DIVIDEND_ASSET],
pd.Timestamp('2015-01-05 21:00', tz='UTC'),
10,
'1m',
'close',
'minute',
)[self.DIVIDEND_ASSET]
np.testing.assert_array_equal(np.array(range(382, 392)), window1)
# straddling the first dividend (10 active equity minutes)
window2_start = pd.Timestamp('2015-01-05 20:56', tz='UTC')
window2_end = pd.Timestamp('2015-01-06 14:35', tz='UTC')
window2_count = len(
self.trading_calendar.minutes_in_range(window2_start, window2_end)
)
window2 = self.data_portal.get_history_window(
[self.DIVIDEND_ASSET],
window2_end,
window2_count,
'1m',
'close',
'minute',
)[self.DIVIDEND_ASSET]
# first dividend is 2%, so the first five values should be 2% lower
# than before
np.testing.assert_array_almost_equal(
np.array(range(387, 392), dtype='float64') * 0.98,
window2[0:5]
)
# second half of window is unadjusted
np.testing.assert_array_equal(range(392, 397), window2[-5:])
# straddling both dividends (on the equities calendar, this is
# 5 minutes of 1/7, 390 of 1/6, and 5 minutes of 1/5).
window3_start = pd.Timestamp('2015-01-05 20:56', tz='UTC')
window3_end = pd.Timestamp('2015-01-07 14:35', tz='UTC')
window3_minutes = self.trading_calendar.minutes_in_range(
window3_start,
window3_end,
)
window3_count = len(window3_minutes)
window3 = self.data_portal.get_history_window(
[self.DIVIDEND_ASSET],
window3_end,
window3_count,
'1m',
'close',
'minute',
)[self.DIVIDEND_ASSET]
# first five minute from 1/7 should be hit by 0.9408 (= 0.98 * 0.96)
np.testing.assert_array_almost_equal(
np.around(np.array(range(387, 392), dtype='float64') * 0.9408, 3),
window3[0:5]
)
# next 390 minutes (the 2015-01-06 session) should be hit by 0.96
# (second dividend)
middle_day_open_i = window3_minutes.searchsorted(
pd.Timestamp('2015-01-06 14:31', tz='UTC')
)
middle_day_close_i = window3_minutes.searchsorted(
pd.Timestamp('2015-01-06 21:00', tz='UTC')
)
np.testing.assert_array_almost_equal(
np.array(range(392, 782), dtype='float64') * 0.96,
window3[middle_day_open_i:middle_day_close_i + 1]
)
# last 5 minutes should not be adjusted
np.testing.assert_array_equal(np.array(range(782, 787)), window3[-5:])
def test_passing_iterable_to_history_regular_hours(self):
# regular hours
current_dt = pd.Timestamp("2015-01-06 9:45", tz='US/Eastern')
bar_data = self.create_bardata(
lambda: current_dt,
)
bar_data.history(pd.Index([self.ASSET1, self.ASSET2]),
"high", 5, "1m")
def test_passing_iterable_to_history_bts(self):
# before market hours
current_dt = pd.Timestamp("2015-01-07 8:45", tz='US/Eastern')
bar_data = self.create_bardata(
lambda: current_dt,
)
with handle_non_market_minutes(bar_data):
bar_data.history(pd.Index([self.ASSET1, self.ASSET2]),
"high", 5, "1m")
def test_overnight_adjustments(self):
# Should incorporate adjustments on midnight 01/06
current_dt = pd.Timestamp('2015-01-06 8:45', tz='US/Eastern')
bar_data = self.create_bardata(
lambda: current_dt,
)
adj_expected = {
'open': np.arange(8381, 8391) / 4.0,
'high': np.arange(8382, 8392) / 4.0,
'low': np.arange(8379, 8389) / 4.0,
'close': np.arange(8380, 8390) / 4.0,
'volume': np.arange(8380, 8390) * 100 * 4.0,
'price': np.arange(8380, 8390) / 4.0,
}
expected = {
'open': np.arange(383, 393) / 2.0,
'high': np.arange(384, 394) / 2.0,
'low': np.arange(381, 391) / 2.0,
'close': np.arange(382, 392) / 2.0,
'volume': np.arange(382, 392) * 100 * 2.0,
'price': np.arange(382, 392) / 2.0,
}
# Use a window looking back to 3:51pm from 8:45am the following day.
# This contains the last ten minutes of the equity session for
# 2015-01-05.
window_start = pd.Timestamp('2015-01-05 20:51', tz='UTC')
window_end = pd.Timestamp('2015-01-06 13:44', tz='UTC')
window_length = len(
self.trading_calendar.minutes_in_range(window_start, window_end)
)
with handle_non_market_minutes(bar_data):
# Single field, single asset
for field in ALL_FIELDS:
values = bar_data.history(
self.SPLIT_ASSET,
field,
window_length,
'1m',
)
# The first 10 bars the `values` correspond to the last
# 10 minutes in the 2015-01-05 session.
np.testing.assert_array_equal(values.values[:10],
adj_expected[field],
err_msg=field)
# Multi field, single asset
values = bar_data.history(
self.SPLIT_ASSET, ['open', 'volume'], window_length, '1m'
)
np.testing.assert_array_equal(values.open.values[:10],
adj_expected['open'])
np.testing.assert_array_equal(values.volume.values[:10],
adj_expected['volume'])
# Single field, multi asset
values = bar_data.history(
[self.SPLIT_ASSET, self.ASSET2], 'open', window_length, '1m'
)
np.testing.assert_array_equal(values[self.SPLIT_ASSET].values[:10],
adj_expected['open'])
np.testing.assert_array_equal(values[self.ASSET2].values[:10],
expected['open'] * 2)
# Multi field, multi asset
values = bar_data.history(
[self.SPLIT_ASSET, self.ASSET2],
['open', 'volume'],
window_length,
'1m',
)
np.testing.assert_array_equal(
values.open[self.SPLIT_ASSET].values[:10],
adj_expected['open']
)
np.testing.assert_array_equal(
values.volume[self.SPLIT_ASSET].values[:10],
adj_expected['volume']
)
np.testing.assert_array_equal(
values.open[self.ASSET2].values[:10],
expected['open'] * 2
)
np.testing.assert_array_equal(
values.volume[self.ASSET2].values[:10],
expected['volume'] / 2
)
def test_minute_early_close(self):
# 2014-07-03 is an early close
# HALF_DAY_TEST_ASSET started trading on 2014-07-02, how convenient
#
# five minutes into the day after the early close, get 20 1m bars
cal = self.trading_calendar
window_start = pd.Timestamp('2014-07-03 16:46:00', tz='UTC')
window_end = pd.Timestamp('2014-07-07 13:35:00', tz='UTC')
bar_count = len(cal.minutes_in_range(window_start, window_end))
window = self.data_portal.get_history_window(
[self.HALF_DAY_TEST_ASSET],
window_end,
bar_count,
'1m',
'close',
'minute',
)[self.HALF_DAY_TEST_ASSET]
# 390 minutes for 7/2, 210 minutes for 7/3, 7/4-7/6 closed
# first minute of 7/7 is the 600th trading minute for this asset
# this asset's first minute had a close value of 2, so every value is
# 2 + (minute index)
expected = range(587, 607)
# First 15 bars on occur at the end of 2014-07-03.
np.testing.assert_array_equal(window[:15], expected[:15])
# Interim bars (only on other calendars) should all be nan.
np.testing.assert_array_equal(
window[15:-5],
np.full(len(window) - 20, np.nan),
)
# Last 5 bars occur at the start of 2014-07-07.
np.testing.assert_array_equal(window[-5:], expected[-5:])
self.assertEqual(
window.index[14],
pd.Timestamp('2014-07-03 17:00', tz='UTC')
)
self.assertEqual(
window.index[-5],
pd.Timestamp('2014-07-07 13:31', tz='UTC')
)
def test_minute_different_lifetimes(self):
cal = self.trading_calendar
equity_cal = self.trading_calendars[Equity]
# at trading start, only asset1 existed
day = self.trading_calendar.next_session_label(self.TRADING_START_DT)
# Range containing 100 equity minutes, possibly more on other
# calendars (i.e. futures).
window_start = pd.Timestamp('2014-01-03 19:22', tz='UTC')
window_end = pd.Timestamp('2014-01-06 14:31', tz='UTC')
bar_count = len(cal.minutes_in_range(window_start, window_end))
equity_cal = self.trading_calendars[Equity]
first_equity_open, _ = equity_cal.open_and_close_for_session(day)
asset1_minutes = equity_cal.minutes_for_sessions_in_range(
self.ASSET1.start_date,
self.ASSET1.end_date
)
asset1_idx = asset1_minutes.searchsorted(first_equity_open)
window = self.data_portal.get_history_window(
[self.ASSET1, self.ASSET2],
first_equity_open,
bar_count,
'1m',
'close',
'minute',
)
expected = range(asset1_idx - 97, asset1_idx + 3)
# First 99 bars occur on the previous day,
np.testing.assert_array_equal(
window[self.ASSET1][:99],
expected[:99],
)
# Any interim bars are not active equity minutes, so should all
# be nan.
np.testing.assert_array_equal(
window[self.ASSET1][99:-1],
np.full(len(window) - 100, np.nan),
)
# Final bar in the window is the first equity bar of `day`.
np.testing.assert_array_equal(
window[self.ASSET1][-1:],
expected[-1:],
)
# All NaNs for ASSET2, since it hasn't started yet.
np.testing.assert_array_equal(
window[self.ASSET2],
np.full(len(window), np.nan),
)
def test_history_window_before_first_trading_day(self):
# trading_start is 2/3/2014
# get a history window that starts before that, and ends after that
first_day_minutes = self.trading_calendar.minutes_for_session(
self.TRADING_START_DT
)
exp_msg = (
'History window extends before 2014-01-03. To use this history '
'window, start the backtest on or after 2014-01-06.'
)
for field in OHLCP:
with self.assertRaisesRegex(
HistoryWindowStartsBeforeData, exp_msg):
self.data_portal.get_history_window(
[self.ASSET1],
first_day_minutes[5],
15,
'1m',
field,
'minute',
)[self.ASSET1]
def test_daily_history_blended(self):
# daily history windows that end mid-day use minute values for the
# last day
# January 2015 has both daily and minute data for ASSET2
day = pd.Timestamp('2015-01-07', tz='UTC')
minutes = self.trading_calendar.minutes_for_session(day)
equity_cal = self.trading_calendars[Equity]
equity_minutes = equity_cal.minutes_for_session(day)
equity_open, equity_close = equity_minutes[0], equity_minutes[-1]
# minute data, baseline:
# Jan 5: 2 to 391
# Jan 6: 392 to 781
# Jan 7: 782 to 1172
for minute in minutes:
idx = equity_minutes.searchsorted(min(minute, equity_close))
for field in ALL_FIELDS:
window = self.data_portal.get_history_window(
[self.ASSET2],
minute,
3,
'1d',
field,
'minute',
)[self.ASSET2]
self.assertEqual(len(window), 3)
if field == 'open':
self.assertEqual(window[0], 3)
self.assertEqual(window[1], 393)
elif field == 'high':
self.assertEqual(window[0], 393)
self.assertEqual(window[1], 783)
elif field == 'low':
self.assertEqual(window[0], 1)
self.assertEqual(window[1], 391)
elif field == 'close':
self.assertEqual(window[0], 391)
self.assertEqual(window[1], 781)
elif field == 'volume':
self.assertEqual(window[0], 7663500)
self.assertEqual(window[1], 22873500)
last_val = -1
if minute < equity_open:
# If before the equity calendar open, we don't yet
# have data (but price is forward-filled).
if field == 'volume':
last_val = 0
elif field == 'price':
last_val = window[1]
else:
last_val = nan
elif field == 'open':
last_val = 783
elif field == 'high':
# since we increase monotonically, it's just the last
# value
last_val = 784 + idx
elif field == 'low':
# since we increase monotonically, the low is the first
# value of the day
last_val = 781
elif field == 'close' or field == 'price':
last_val = 782 + idx
elif field == 'volume':
# for volume, we sum up all the minutely volumes so far
# today
last_val = sum(np.array(range(782, 782 + idx + 1)) * 100)
np.testing.assert_equal(window[-1], last_val)
@parameterized.expand(ALL_FIELDS)
def test_daily_history_blended_gaps(self, field):
# daily history windows that end mid-day use minute values for the
# last day
# January 2015 has both daily and minute data for ASSET2
day = pd.Timestamp('2015-01-08', tz='UTC')
minutes = self.trading_calendar.minutes_for_session(day)
equity_cal = self.trading_calendars[Equity]
equity_minutes = equity_cal.minutes_for_session(day)
equity_open, equity_close = equity_minutes[0], equity_minutes[-1]
# minute data, baseline:
# Jan 5: 2 to 391
# Jan 6: 392 to 781
# Jan 7: 782 to 1172
for minute in minutes:
idx = equity_minutes.searchsorted(min(minute, equity_close))
window = self.data_portal.get_history_window(
[self.ASSET2],
minute,
3,
'1d',
field,
'minute',
)[self.ASSET2]
self.assertEqual(len(window), 3)
if field == 'open':
self.assertEqual(window[0], 393)
self.assertEqual(window[1], 783)
elif field == 'high':
self.assertEqual(window[0], 783)
self.assertEqual(window[1], 1173)
elif field == 'low':
self.assertEqual(window[0], 391)
self.assertEqual(window[1], 781)
elif field == 'close':
self.assertEqual(window[0], 781)
self.assertEqual(window[1], 1171)
elif field == 'price':
self.assertEqual(window[0], 781)
self.assertEqual(window[1], 1171)
elif field == 'volume':
self.assertEqual(window[0], 22873500)
self.assertEqual(window[1], 38083500)
last_val = -1
if minute < equity_open:
# If before the equity calendar open, we don't yet
# have data (but price is forward-filled).
if field == 'volume':
last_val = 0
elif field == 'price':
last_val = window[1]
else:
last_val = nan
elif field == 'open':
if idx == 0:
last_val = np.nan
else:
last_val = 1174.0
elif field == 'high':
# since we increase monotonically, it's just the last
# value
if idx == 0:
last_val = np.nan
elif idx == 389:
last_val = 1562.0
else:
last_val = 1174.0 + idx
elif field == 'low':
# since we increase monotonically, the low is the first
# value of the day
if idx == 0:
last_val = np.nan
else:
last_val = 1172.0
elif field == 'close':
if idx == 0:
last_val = np.nan
elif idx == 389:
last_val = 1172.0 + 388
else:
last_val = 1172.0 + idx
elif field == 'price':
if idx == 0:
last_val = 1171.0
elif idx == 389:
last_val = 1172.0 + 388
else:
last_val = 1172.0 + idx
elif field == 'volume':
# for volume, we sum up all the minutely volumes so far
# today
if idx == 0:
last_val = 0
elif idx == 389:
last_val = sum(
np.array(range(1173, 1172 + 388 + 1)) * 100)
else:
last_val = sum(
np.array(range(1173, 1172 + idx + 1)) * 100)
np.testing.assert_almost_equal(window[-1], last_val,
err_msg='field={0} minute={1}'.
format(field, minute))
@parameterized.expand([(("bar_count%s" % x), x) for x in [1, 2, 3]])
def test_daily_history_minute_gaps_price_ffill(self, test_name, bar_count):
# Make sure we use the previous day's value when there's been no volume
# yet today.
# January 5 2015 is the first day, and there is volume only every
# 10 minutes.
# January 6 has the same volume pattern and is used here to ensure we
# ffill correctly from the previous day when there is no volume yet
# today.
# January 12 is a Monday, ensuring we ffill correctly when the previous
# day is not a trading day.
for day_idx, day in enumerate([pd.Timestamp('2015-01-05', tz='UTC'),
pd.Timestamp('2015-01-06', tz='UTC'),
pd.Timestamp('2015-01-12', tz='UTC')]):
session_minutes = self.trading_calendar.minutes_for_session(day)
equity_cal = self.trading_calendars[Equity]
equity_minutes = equity_cal.minutes_for_session(day)
if day_idx == 0:
# dedupe when session_minutes are same as equity_minutes
minutes_to_test = OrderedDict([
(session_minutes[0], np.nan), # No volume yet on first day
(equity_minutes[0], np.nan), # No volume yet on first day
(equity_minutes[1], np.nan), # ...
(equity_minutes[8], np.nan), # Minute before > 0 volume
(equity_minutes[9], 11.0), # We have a price!
(equity_minutes[10], 11.0), # ffill
(equity_minutes[-2], 381.0), # ...
(equity_minutes[-1], 391.0), # Last minute of exchange
(session_minutes[-1], 391.0), # Last minute of day
])
elif day_idx == 1:
minutes_to_test = OrderedDict([
(session_minutes[0], 391.0), # ffill from yesterday
(equity_minutes[0], 391.0), # ...
(equity_minutes[8], 391.0), # ...
(equity_minutes[9], 401.0), # New price today
(equity_minutes[-1], 781.0), # Last minute of exchange
(session_minutes[-1], 781.0), # Last minute of day
])
else:
minutes_to_test = OrderedDict([
(session_minutes[0], 1951.0), # ffill from previous week
(equity_minutes[0], 1951.0), # ...
(equity_minutes[8], 1951.0), # ...
(equity_minutes[9], 1961.0), # New price today
])
for minute, expected in minutes_to_test.items():
window = self.data_portal.get_history_window(
[self.ASSET3],
minute,
bar_count,
'1d',
'price',
'minute',
)[self.ASSET3]
self.assertEqual(
len(window),
bar_count,
"Unexpected window length at {}. Expected {}, but was {}."
.format(minute, bar_count, len(window))
)
np.testing.assert_allclose(
window[-1],
expected,
err_msg=f"at minute {minute}",
)
class NoPrefetchMinuteEquityHistoryTestCase(MinuteEquityHistoryTestCase):
DATA_PORTAL_MINUTE_HISTORY_PREFETCH = 0
DATA_PORTAL_DAILY_HISTORY_PREFETCH = 0
class DailyEquityHistoryTestCase(WithHistory, zf.ZiplineTestCase):
CREATE_BARDATA_DATA_FREQUENCY = 'daily'
@classmethod
def make_equity_daily_bar_data(cls, country_code, sids):
yield 1, cls.create_df_for_asset(
cls.START_DATE,
pd.Timestamp('2016-01-30', tz='UTC')
)
yield 3, cls.create_df_for_asset(
| pd.Timestamp('2015-01-05', tz='UTC') | pandas.Timestamp |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Tue Nov 20 15:37:54 2018
@author: isaaclera
"""
import pandas as pd
import numpy as np
from sklearn import preprocessing
df = pd.read_csv("data/data2.csv")
## Normalizar
x = df.values #returns a numpy array
min_max_scaler = preprocessing.MinMaxScaler()
x_scaled = min_max_scaler.fit_transform(x)
df = | pd.DataFrame(x_scaled) | pandas.DataFrame |
import numpy as np
import pandas as pd
import mlflow
import sys
import csv
import matplotlib.pyplot as plt
import seaborn as sns
from collections import Counter, defaultdict
import random
from sklearn.metrics import mean_absolute_error, mean_squared_error, mean_squared_log_error, accuracy_score, average_precision_score
from sklearn.model_selection import KFold, StratifiedKFold, GroupKFold, train_test_split
from typing import Optional, Tuple, Union
from takaggle.training.model import Model
from takaggle.training.util import Logger, Util
from takaggle.training.TimeSeriesSplitter import CustomTimeSeriesSplitter
# 定数
shap_sampling = 10000
corr_sampling = 10000
def rmse(val_y, val_pred):
return np.sqrt(mean_squared_error(val_y, val_pred))
def stratified_group_k_fold(X, y, groups, k, seed=None) -> (list, list):
"""StratifiedKFoldで分割する関数
Args:
X (pd.DataFrame): trainデータ
y (pd.DataFrame): 目的変数のDF
groups (pd.DataFrame): groupに指定するカラムのDF
k (int): k数
seed (int): seet. Defaults to None.
Yields:
list: trainデータのindexリスト
list: validarionのindexリスト
"""
labels_num = np.max(y) + 1
y_counts_per_group = defaultdict(lambda: np.zeros(labels_num))
y_distr = Counter()
for label, g in zip(y, groups):
y_counts_per_group[g][label] += 1
y_distr[label] += 1
y_counts_per_fold = defaultdict(lambda: np.zeros(labels_num))
groups_per_fold = defaultdict(set)
def eval_y_counts_per_fold(y_counts, fold):
y_counts_per_fold[fold] += y_counts
std_per_label = []
for label in range(labels_num):
label_std = np.std([y_counts_per_fold[i][label] / y_distr[label] for i in range(k)])
std_per_label.append(label_std)
y_counts_per_fold[fold] -= y_counts
return np.mean(std_per_label)
groups_and_y_counts = list(y_counts_per_group.items())
random.Random(seed).shuffle(groups_and_y_counts)
for g, y_counts in sorted(groups_and_y_counts, key=lambda x: -np.std(x[1])):
best_fold = None
min_eval = None
for i in range(k):
fold_eval = eval_y_counts_per_fold(y_counts, i)
if min_eval is None or fold_eval < min_eval:
min_eval = fold_eval
best_fold = i
y_counts_per_fold[best_fold] += y_counts
groups_per_fold[best_fold].add(g)
all_groups = set(groups)
for i in range(k):
train_groups = all_groups - groups_per_fold[i]
test_groups = groups_per_fold[i]
train_indices = [i for i, g in enumerate(groups) if g in train_groups]
test_indices = [i for i, g in enumerate(groups) if g in test_groups]
yield train_indices, test_indices
class Runner:
def __init__(self, run_name, model_cls, features, setting, params, cv, categoricals=[], is_add_pseudo=False, pseudo_label_file=''):
"""コンストラクタ
:run_name: runの名前
:model_cls: モデルのクラス
:features: 特徴量のリスト
:setting: 設定リスト
:params: ハイパーパラメータ
:cv: CVの設定
"""
# setting情報
self.target = setting.get('target') # 目的変数
self.calc_shap = setting.get('calc_shap') # shapを計算するか否か
self.save_train_pred = setting.get('save_train_pred') # 学習データでの予測値を保存するか否か
self.feature_dir_name = setting.get('feature_directory') # 学習データの読み込み先ディレクトリ
self.model_dir_name = setting.get('model_directory') # 学習データの読み込み先ディレクトリ
self.train_file_name = setting.get('train_file_name')
self.test_file_name = setting.get('test_file_name')
self.run_name = run_name # run名
self.model_cls = model_cls # モデルクラス
self.features = features # 使用する特徴量のリスト
self.params = params # モデルのハイパーパラメータ
self.out_dir_name = self.model_dir_name + run_name + '/'
self.logger = Logger(self.out_dir_name)
# 評価指標
self.metrics_name = setting.get('metrics')
self.logger.info(f'{self.run_name} - metrics is {self.metrics_name}')
if self.metrics_name == 'MSE':
self.metrics = mean_squared_error
elif self.metrics_name == 'RMSE':
self.metrics = rmse
elif self.metrics_name == 'RMSLE':
self.metrics = mean_squared_log_error
elif self.metrics_name == 'MAE':
self.metrics = mean_absolute_error
elif self.metrics_name == 'ACC':
self.metrics = accuracy_score
elif self.metrics_name == 'PR_AUC':
self.metrics = average_precision_score
elif self.metrics_name == 'CUSTOM':
self.metrics = None
else:
self.metrics = None
# データの読み込み
self.train_x = self.load_x_train() # 学習データの読み込み
self.train_y = self.load_y_train() # 学習データの読み込み
# cv情報
self.cv_method = cv.get('method') # CVのメソッド名
self.n_splits = cv.get('n_splits') # k数
self.random_state = cv.get('random_state') # seed
self.shuffle = cv.get('shuffle') # shffleの有無
self.cv_target_gr_column = cv.get('cv_target_gr') # GroupKFold or StratifiedGroupKFoldを使用する時に指定する
self.cv_target_sf_column = cv.get('cv_target_sf') # StratifiedKFold or StratifiedGroupKFoldを使用する時に指定する
self.test_size = cv.get('test_size') # train_test_split用
# ファイル出力用変数
# 各fold,groupのスコアをファイルに出力するための2次元リスト
self.score_list = []
self.score_list.append(['run_name', self.run_name])
self.fold_score_list = []
# その他の情報
self.remove_train_index = None # trainデータからデータを絞り込む際に使用する。除外するindexを保持。
if self.calc_shap:
self.shap_values = np.zeros(self.train_x.shape)
self.categoricals = categoricals # カテゴリ変数を指定する場合に使用する
self.is_add_pseudo = is_add_pseudo # pseudo_labelデータを学習に私用する場合に追加する
self.pseudo_label_file = pseudo_label_file # pseudo_labelデータを学習に私用する場合に追加する. 特徴量と同じディレクトリに存在するファイルを指定すること
if self.is_add_pseudo:
if self.pseudo_label_file == '':
print('pseudo_label_fileが設定されていません。処理を終了します')
sys.exit(0)
# ログにデータ件数を出力
self.logger.info(f'{self.run_name} - train_x shape: {self.train_x.shape}')
self.logger.info(f'{self.run_name} - train_y shape: {self.train_y.shape}')
# TimeSeriecSplits用
self.train_days = cv.get('train_days')
self.test_days = cv.get('test_days')
self.day_col = cv.get('day_col')
self.pred_days = cv.get('pred_days')
def visualize_corr(self):
"""相関係数を算出する
"""
fig, ax = plt.subplots(figsize=(30, 20))
plt.rcParams["font.size"] = 12 # 図のfontサイズ
plt.tick_params(labelsize=14) # 図のラベルのfontサイズ
plt.tight_layout()
# use a ranked correlation to catch nonlinearities
df = self.train_x.copy()
df[self.target] = self.train_y.copy()
corr = df.sample(corr_sampling).corr(method='spearman')
sns.heatmap(corr.round(3), annot=True,
xticklabels=corr.columns.values,
yticklabels=corr.columns.values)
# 保存
plt.savefig(self.out_dir_name + self.run_name + '_corr.png', dpi=300, bbox_inches="tight")
plt.close()
del df, corr
def shap_feature_importance(self) -> None:
"""計算したshap値を可視化して保存する
"""
all_columns = self.train_x.columns.values.tolist() + [self.target]
ma_shap = pd.DataFrame(sorted(zip(abs(self.shap_values).mean(axis=0), all_columns), reverse=True),
columns=['Mean Abs Shapley', 'Feature']).set_index('Feature')
ma_shap = ma_shap.sort_values('Mean Abs Shapley', ascending=True)
fig = plt.figure(figsize=(8, 25))
plt.tick_params(labelsize=12) # 図のラベルのfontサイズ
ax = fig.add_subplot(1, 1, 1)
ax.set_title('shap value')
ax.barh(ma_shap.index, ma_shap['Mean Abs Shapley'], label='Mean Abs Shapley', align="center", alpha=0.8)
labels = ax.get_xticklabels()
plt.setp(labels, rotation=0, fontsize=10)
ax.legend(loc='upper left')
plt.savefig(self.out_dir_name + self.run_name + '_shap.png', dpi=300, bbox_inches="tight")
plt.close()
def get_feature_name(self):
""" 学習に使用する特徴量を返却
"""
return self.train_x.columns.values.tolist()
def train_fold(self, i_fold: Union[int, str]) -> Tuple[Model, Optional[np.array],
Optional[np.array], Optional[float]]:
"""クロスバリデーションでのfoldを指定して学習・評価を行う
他のメソッドから呼び出すほか、単体でも確認やパラメータ調整に用いる
:param i_fold: foldの番号(すべてのときには'all'とする)
:return: (モデルのインスタンス、レコードのインデックス、予測値、評価によるスコア)のタプル
"""
# 学習データの読込
validation = i_fold != 'all'
train_x = self.train_x.copy()
train_y = self.train_y.copy()
if validation:
# 学習データ・バリデーションデータのindexを取得
if self.cv_method == 'KFold':
tr_idx, va_idx = self.load_index_k_fold(i_fold)
elif self.cv_method == 'StratifiedKFold':
tr_idx, va_idx = self.load_index_sk_fold(i_fold)
elif self.cv_method == 'GroupKFold':
tr_idx, va_idx = self.load_index_gk_fold_shuffle(i_fold)
elif self.cv_method == 'StratifiedGroupKFold':
tr_idx, va_idx = self.load_index_sgk_fold(i_fold)
elif self.cv_method == 'TrainTestSplit':
tr_idx, va_idx = self.load_index_train_test_split()
elif self.cv_method == 'CustomTimeSeriesSplitter':
tr_idx, va_idx = self.load_index_custom_ts_fold(i_fold)
else:
print('CVメソッドが正しくないため終了します')
sys.exit(0)
tr_x, tr_y = train_x.iloc[tr_idx], train_y.iloc[tr_idx]
va_x, va_y = train_x.iloc[va_idx], train_y.iloc[va_idx]
# pseudo labelingを行う場合
if self.is_add_pseudo:
# pseudo labelingデータを追加する
pseudo_df = pd.read_pickle(self.feature_dir_name + self.pseudo_label_file)
pseudo_df_x = pseudo_df.drop(self.target, axis=1)[self.features]
if self.metrics_name == 'RMSLE':
pseudo_df[self.target] = np.log1p(pseudo_df[self.target])
pseudo_df_y = pseudo_df[self.target]
# 結合
tr_x = pd.concat([tr_x, pseudo_df_x], axis=0)
tr_y = pd.concat([tr_y, pseudo_df_y], axis=0)
# 学習を行う
model = self.build_model(i_fold)
model.train(tr_x, tr_y, va_x, va_y)
# TODO: shap値もちゃんと計算できるようにしたい
# バリデーションデータへの予測・評価を行う
if self.calc_shap:
va_pred, self.shap_values[va_idx[:shap_sampling]] = model.predict_and_shap(va_x, shap_sampling)
else:
va_pred = model.predict(va_x)
# 評価指標がRMSLEの場合、log1pで学習させているため、元に戻して計算する
if self.metrics_name == 'RMSLE':
va_pred = np.expm1(va_pred)
va_pred = np.where(va_pred < 0, 0, va_pred)
score = np.sqrt(self.metrics(np.expm1(va_y), va_pred))
elif self.metrics_name == 'ACC':
va_pred = np.round(va_pred)
score = self.metrics(va_y, va_pred)
else:
score = self.metrics(va_y, va_pred)
# foldごとのスコアをリストに追加
self.fold_score_list.append([f'fold{i_fold}', round(score, 4)])
# TODO: ここもconfigで良い感じに管理できると良いね
# 特定のカラム(グループ)ごとにスコアを算出したい場合
# カテゴリ変数で、予測が容易なものもあれば難しいものもある場合に、ここを追加することで
# そのカテゴリごとのスコアを確認できる
# 事前にクラス変数にlistを宣言する必要がある
"""
# 特別仕様: groupごとのスコアを算出
_temp_df = pd.read_pickle(self.feature_dir_name + 'X_train.pkl')[['chip_id', 'chip_exc_wl']]
_temp_df = _temp_df.iloc[va_idx].reset_index(drop=True)
_temp_df = pd.concat([_temp_df, va_y.reset_index(drop=True), pd.Series(va_pred, name='pred')], axis=1)
# chip_idの辞書
with open(self.feature_dir_name + 'chip_dic.pkl', 'rb') as f:
chip_dict = pickle.load(f)
for i in sorted(_temp_df['chip_id'].unique().tolist()):
chip_df = _temp_df.query('chip_id == @i')
chip_y = chip_df['target']
chip_pred = chip_df['pred']
chip_score = self.metrics(chip_y, chip_pred)
# chip_idごとのスコアをリストに追加
self.chip_score_list.append([chip_dict[i], round(chip_score, 4)])
for i in sorted(_temp_df['chip_exc_wl'].unique().tolist()):
chip_exc_wl_df = _temp_df.query('chip_exc_wl == @i')
chip_exc_wl_y = chip_exc_wl_df['target']
chip_exc_wl_pred = chip_exc_wl_df['pred']
chip_exc_wl_score = self.metrics(chip_exc_wl_y, chip_exc_wl_pred)
# chip_exc_wlごとのスコアをリストに追加
self.chip_exc_wl_score_list.append([i, round(chip_exc_wl_score, 4)])
"""
# モデル、インデックス、予測値、評価を返す
return model, va_idx, va_pred, score
else:
# 学習データ全てで学習を行う
model = self.build_model(i_fold)
model.train(train_x, train_y)
# モデルを返す
return model, None, None, None
def run_train_cv(self) -> None:
"""クロスバリデーションでの学習・評価を行う
学習・評価とともに、各foldのモデルの保存、スコアのログ出力についても行う
"""
self.logger.info(f'{self.run_name} - start training cv')
if self.cv_method in ['KFold', 'TrainTestSplit', 'CustomTimeSeriesSplitter']:
self.logger.info(f'{self.run_name} - cv method: {self.cv_method}')
else:
self.logger.info(f'{self.run_name} - cv method: {self.cv_method} - group: {self.cv_target_gr_column} - stratify: {self.cv_target_sf_column}')
scores = [] # 各foldのscoreを保存
va_idxes = [] # 各foldのvalidationデータのindexを保存
preds = [] # 各foldの推論結果を保存
# 各foldで学習を行う
for i_fold in range(self.n_splits):
# 学習を行う
self.logger.info(f'{self.run_name} fold {i_fold} - start training')
model, va_idx, va_pred, score = self.train_fold(i_fold)
self.logger.info(f'{self.run_name} fold {i_fold} - end training - score {score}')
# モデルを保存する
model.save_model(self.out_dir_name)
# 結果を保持する
va_idxes.append(va_idx)
scores.append(score)
preds.append(va_pred)
# 各foldの結果をまとめる
va_idxes = np.concatenate(va_idxes)
order = np.argsort(va_idxes)
preds = np.concatenate(preds, axis=0)
preds = preds[order]
# 全体のスコアを算出
if self.cv_method not in ['TrainTestSplit', 'CustomTimeSeriesSplitter']:
if self.metrics_name == 'RMSLE':
score_all_data = np.sqrt(self.metrics(np.expm1(self.train_y), preds))
else:
score_all_data = self.metrics(self.train_y, preds)
else:
score_all_data = None
# oofデータに対するfoldごとのscoreをcsvに書き込む(foldごとに分析する用)
self.score_list.append(['score_all_data', score_all_data])
self.score_list.append(['score_fold_mean', np.mean(scores)])
for i in self.fold_score_list:
self.score_list.append(i)
with open(self.out_dir_name + f'{self.run_name}_score.csv', 'a') as f:
writer = csv.writer(f)
writer.writerows(self.score_list)
# foldごとのスコアもmlflowでトラッキングする
def score_mean(df):
df = df.groupby('run_name').mean().round(4).reset_index().sort_values('run_name')
return df
_score_df = pd.read_csv(self.out_dir_name + f'{self.run_name}_score.csv')
_score_df = score_mean(_score_df)
_score_df = _score_df.T
_score_df.columns = _score_df.iloc[0]
_score_df = _score_df.drop(_score_df.index[0])
for col in _score_df.columns.tolist():
mlflow.log_metric(col, _score_df[col].values[0])
# 学習データでの予測結果の保存
if self.save_train_pred:
Util.dump_df_pickle(pd.DataFrame(preds), self.out_dir_name + f'.{self.run_name}_train.pkl')
# 評価結果の保存
self.logger.result_scores(self.run_name, scores, score_all_data)
# shap feature importanceデータの保存
if self.calc_shap:
self.shap_feature_importance()
def run_predict_cv(self) -> None:
"""クロスバリデーションで学習した各foldのモデルの平均により、テストデータの予測を行う
あらかじめrun_train_cvを実行しておく必要がある
"""
self.logger.info(f'{self.run_name} - start prediction cv')
test_x = self.load_x_test()
preds = []
# 各foldのモデルで予測を行う
for i_fold in range(self.n_splits):
self.logger.info(f'{self.run_name} - start prediction fold:{i_fold}')
model = self.build_model(i_fold)
model.load_model(self.out_dir_name)
if self.metrics_name == 'RMSLE':
pred = np.expm1(model.predict(test_x))
else:
pred = model.predict(test_x)
preds.append(pred)
self.logger.info(f'{self.run_name} - end prediction fold:{i_fold}')
# 予測の平均値を出力する
if self.metrics_name == 'ACC':
pred_avg = np.round(np.mean(preds, axis=0))
else:
pred_avg = np.mean(preds, axis=0)
# 推論結果の保存(submit対象データ)
Util.dump_df_pickle(pd.DataFrame(pred_avg), self.out_dir_name + f'{self.run_name}_pred.pkl')
self.logger.info(f'{self.run_name} - end prediction cv')
def run_train_all(self) -> None:
"""学習データすべてで学習し、そのモデルを保存する"""
self.logger.info(f'{self.run_name} - start training all')
# 学習データ全てで学習を行う
i_fold = 'all'
model, _, _, _ = self.train_fold(i_fold)
model.save_model(self.out_dir_name)
self.logger.info(f'{self.run_name} - end training all')
def run_predict_all(self) -> None:
"""学習データすべてで学習したモデルにより、テストデータの予測を行う
あらかじめrun_train_allを実行しておく必要がある
"""
self.logger.info(f'{self.run_name} - start prediction all')
test_x = self.load_x_test()
# 学習データ全てで学習したモデルで予測を行う
i_fold = 'all'
model = self.build_model(i_fold)
model.load_model(self.out_dir_name)
pred = model.predict(test_x)
# 予測結果の保存
Util.dump(pred, f'../model/pred/{self.run_name}-test.pkl')
self.logger.info(f'{self.run_name} - end prediction all')
def build_model(self, i_fold: Union[int, str]) -> Model:
"""クロスバリデーションでのfoldを指定して、モデルの作成を行う
:param i_fold: foldの番号
:return: モデルのインスタンス
"""
# ラン名、fold、モデルのクラスからモデルを作成する
run_fold_name = f'{self.run_name}-fold{i_fold}'
return self.model_cls(run_fold_name, self.params, self.categoricals)
def load_x_train(self, all_cols=False) -> pd.DataFrame:
"""学習データの特徴量を読み込む
列名で抽出する以上のことを行う場合、このメソッドの修正が必要
:return: 学習データの特徴量
"""
# 複数のpklファイルにデータが散らばっている場合 -----------
# dfs = [pd.read_pickle(self.feature_dir_name + f'{f}_train.pkl') for f in self.features]
# df = pd.concat(dfs, axis=1)
# -------------------------------------------------
# csv or pklにまとまっている場合 -----------
# df = pd.read_csv('../input/train.csv')[self.features]
# df = pd.read_pickle(self.feature_dir_name + 'X_train.pkl')
df = pd.read_pickle(self.feature_dir_name + f'{self.train_file_name}')
if all_cols:
# 全てのカラムを返却する場合
# stratify-cvなどで、目的変数を対象にしたい場合はこちらを呼ぶ
return df
df = df[self.features]
# -------------------------------------------------
# 特定のサンプルを除外して学習させる場合 -----------
# self.remove_train_index = df[(df['age']==64) | (df['age']==66) | (df['age']==67)].index
# df = df.drop(index = self.remove_train_index)
# df = df[self.features]
# -------------------------------------------------
return df
def load_y_train(self) -> pd.Series:
"""学習データの目的変数を読み込む
対数変換や使用するデータを削除する場合には、このメソッドの修正が必要
:return: 学習データの目的変数
"""
# train_y = pd.read_pickle(self.feature_dir_name + self.target + '_train.pkl')
df = | pd.read_pickle(self.feature_dir_name + f'{self.train_file_name}') | pandas.read_pickle |
import pandas as pd
import numpy as np
import re
import marcformat
class MarcExtractor(object):
tag_marc_file = 'MARC_FILE'
tag_filter_columns = 'FILTER_COLUMNS'
tag_marc_output_file = 'MARC_OUTPUT_FILE'
marcFile = ''
marcOutFile = ''
filteredColumns = []
df = pd.DataFrame()
df1 = pd.DataFrame()
df2 = pd.DataFrame()
chunkSize = 1000
count = 0
def __init__(self, config_file):
self.__processConfigFile(config_file)
pass
def processDataSet(self):
header = | pd.DataFrame() | pandas.DataFrame |
import numpy as np
import pandas as pd
from asset_model import geometric_brownian_motion
#TODO
# class CcpiStrategy(InvestmentStrategy):
#
# def __init__(self, drawdown=None, multiplier=3):
# self.drawdown = drawdown
# self.multiplier = multiplier
#
# def update_portfolio_weighs(self, current_weights, account_value, returns):
# risky_weight = current_weights[0]
# safe_weight = current_weights[1]
# #TODO
# # if self.drawdown is not None:
# # peak = np.maximum(peak, account_value)
# # floor_value = peak * (1 - drawdown)
# cushion = (account_value - floor_value) / account_value
# risky_weight = self.multiplier * cushion
# risky_weight = np.minimum(risky_weight, 1)
# risky_weight = np.maximum(risky_weight, 0)
# safe_weight = 1 - risky_weight
# return np.array(risky_weight, safe_weight)
def backtest_cppi(risky_returns,
safe_returns=None,
risk_free_rate=0.03,
multiplier=3,
cushion_ratio=0.8,
drawdown=None,
start_value=1000):
"""
Run a backtest of the CPPI strategy, given a set of returns for the risky asset
Returns a dictionary containing: Asset Value History, Risk Budget History, Risky Weight History
:param risky_returns: history of risky returns
:param safe_returns: history of safe returns. If None, default to risk free rate
:param risk_free_rate: Rate of return of the risk free asset
:param multiplier: multiplier to allocate to risky asset = multiplier*(1-cushion)*wealth
:param cushion_ratio: ratio of the wealth to protect
:param drawdown: max drawdown allowed (as ratio)
:param start_value: Initial monetary value of the account
"""
# Ensure returns are a data frame
if isinstance(risky_returns, pd.Series):
risky_returns = pd.DataFrame(risky_returns, columns=["R"])
# If no safe asset is specified, default to the risk free rate
if safe_returns is None:
safe_returns = pd.DataFrame().reindex_like(risky_returns)
safe_returns.values[:] = risk_free_rate / 12 # fast way to set all values to a number
# set up the CPPI parameters
dates = risky_returns.index
steps = len(dates)
account_value = start_value
floor_value = start_value * cushion_ratio
peak = account_value
# set up some DataFrames for saving intermediate values
account_history = pd.DataFrame().reindex_like(risky_returns)
risky_w_history = | pd.DataFrame() | pandas.DataFrame |
import json
import pandas as pd
import plotly
import plotly.express as px
import plotly.graph_objs as go
from flask import Flask # Flask -> class name
from flask import jsonify
from flask import render_template
from web_app.funcs.db import Db
# declare application. initialize with Flask instance/class
app = Flask(__name__, template_folder='static/stylesheets')
# get MongoDB instance
db = Db()
# @app.route("/", methods=['GET'])
# @app.route('/wordclouds')
# def word_loud():
# return render_template('wordclouds.html')
#
# declare route # can also declare methods to accept. like -> 'GET'
@app.route("/", methods=['GET'])
@app.route("/home.html")
def index():
influencer_count_by_category_df = pd.read_csv("data_csv/statistics/influencers_count_by_category.csv", sep=',', header=0, skiprows=0)
fig = px.bar(influencer_count_by_category_df, x='_id', y='count', barmode='group')
fig = px.pie(influencer_count_by_category_df, values='count', names='_id', title='Percentage of influencers from each category')
graphJSON = json.dumps(fig, cls=plotly.utils.PlotlyJSONEncoder)
return render_template("home.html", graph=graphJSON)
@app.route("/topic_modeling.html")
def topic_modeling():
return render_template("topic_modeling.html")
@app.route("/machine_learning.html")
def machine_learning():
f_importance_q = pd.read_csv("static/predictions/f_importance_questionnaire.csv", sep=',',
header=0, skiprows=0)
fig = go.Figure(data=[go.Bar(
x=f_importance_q['feature'],
y=f_importance_q['score'],
marker_color='rgb(0, 179, 179)')
])
fig.update_layout(title='Feature Importance - Questionnaire', title_x=0.5,
xaxis=dict(title='Feautures', showgrid=False, linecolor='rgb(204, 204, 204)'),
yaxis=dict(title='Score', showgrid=True, linecolor='rgb(204, 204, 204)', showline=True,
gridcolor="rgb(204, 204, 204)"),
paper_bgcolor='rgba(0,0,0,0)',
plot_bgcolor='rgba(0,0,0,0)'
)
f_importance_q_graphjson = json.dumps(fig, cls=plotly.utils.PlotlyJSONEncoder)
#-----------------------------------------------------------------------------------------
f_importance_m = pd.read_csv("static/predictions/f_importance_mongo.csv", sep=',',
header=0, skiprows=0)
fig = go.Figure(data=[go.Bar(
x=f_importance_m['feature'],
y=f_importance_m['score'],
marker_color='rgb(0, 179, 179)')
])
fig.update_layout(title='Feature Importance - Crowdtangle', title_x=0.5,
xaxis=dict(title='Features', showgrid=False, linecolor='rgb(204, 204, 204)'),
yaxis=dict(title='Score', showgrid=True, linecolor='rgb(204, 204, 204)', showline=True,
gridcolor="rgb(204, 204, 204)"),
paper_bgcolor='rgba(0,0,0,0)',
plot_bgcolor='rgba(0,0,0,0)'
)
f_importance_m_graphjson = json.dumps(fig, cls=plotly.utils.PlotlyJSONEncoder)
return render_template("machine_learning.html", f_importance_q=f_importance_q_graphjson, f_importance_mongo=f_importance_m_graphjson)
@app.route("/clustering.html")
def clustering():
return render_template("clustering.html")
@app.route("/wordclouds.html")
def wordclouds():
return render_template('wordclouds.html')
@app.route("/statistics.html")
def statistics():
# --------------- Number of influencer by category ---------------
influencer_count_by_category_df = pd.read_csv("data_csv/statistics/influencers_count_by_category.csv", sep=',', header=0, skiprows=0)
fig = go.Figure(data=[go.Pie(labels=influencer_count_by_category_df['_id'], values=influencer_count_by_category_df['count'],
textinfo='percent',
insidetextorientation='radial'
)])
fig.update_layout(title='Percentage of influencers from each category', title_x=0.5,
paper_bgcolor='rgba(0,0,0,0)',
plot_bgcolor='rgba(0,0,0,0)'
)
#colors = ['gold', 'mediumturquoise']
#fig.update_traces(hoverinfo='label+percent', marker=dict(colors=colors))
influencer_count_graphjson = json.dumps(fig, cls=plotly.utils.PlotlyJSONEncoder)
# ----------------- Number of posts per day -----------------
day_freq_df = pd.read_csv("data_csv/statistics/post_frequency_per_day.csv", sep=',', header=0, skiprows=0)
fig = go.Figure(data=[go.Bar(
x=day_freq_df['day'],
y=day_freq_df['posts'],
marker_color='rgb(0, 179, 179)')
])
fig.update_layout(title='No. of posts by day', title_x=0.5,
xaxis=dict(title='Day', showgrid=False, linecolor='rgb(204, 204, 204)'),
yaxis=dict(title='No. of posts', showgrid=True, linecolor='rgb(204, 204, 204)', showline=True,
gridcolor="rgb(204, 204, 204)"),
paper_bgcolor='rgba(0,0,0,0)',
plot_bgcolor='rgba(0,0,0,0)'
)
'''fig = px.bar(day_freq_df, x="day", y="posts",
labels={
"day": "Day",
"posts": "No. of posts",
},
title="No. of posts by day")'''
day_graphjson = json.dumps(fig, cls=plotly.utils.PlotlyJSONEncoder)
# ----------------- Number of posts per hour -----------------
hour_freq_df = pd.read_csv("data_csv/statistics/post_frequency_per_hour.csv", sep=',', header=0, skiprows=0)
fig = go.Figure(data=[go.Scatter(
x=hour_freq_df['time'],
y=hour_freq_df['posts'],
line_color='rgb(0, 179, 179)',
mode='lines+markers')
])
fig.update_layout(title='No. of posts by hour', title_x=0.5,
xaxis=dict(title='Time', showgrid=False, linecolor='rgb(204, 204, 204)'),
yaxis=dict(title='No. of posts', showgrid=True, linecolor='rgb(204, 204, 204)', showline=True, gridcolor="rgb(204, 204, 204)"),
paper_bgcolor='rgba(0,0,0,0)',
plot_bgcolor='rgba(0,0,0,0)'
)
'''fig = px.bar(hour_freq_df, x="time", y="posts",
labels={
"time": "Time",
"posts": "No. of posts",
},
title="No. of posts by hour")'''
hour_graphjson = json.dumps(fig, cls=plotly.utils.PlotlyJSONEncoder)
posts = db.myLeaderboardsNew_db.find()
df = pd.DataFrame(list(posts))
df['Likes'] = df['Likes'].str.replace(',', '').astype(float)
df['Views'] = df['Views'].str.replace(',', '').astype(float)
df['Total Posts'] = pd.to_numeric(df['Total Posts'], errors='coerce')
likes_per_category_sex = df.groupby(['category', 'sex'], as_index=False)['Likes'].sum()
# views_per_category_sex = df.groupby(['category', 'sex'], as_index=False)['Views'].sum()
posts_per_category_sex = df.groupby(['category', 'sex'], as_index=False)['Total Posts'].sum()
likes_per_category = df.groupby(['category'], as_index=False)['Likes'].sum()
# comments_per_category = df.groupby(['category'], as_index=False)['Comments'].sum()
# data = likes_per_category.copy()
# data['posts'] = posts_per_category_sex['posts']
# data['likes'] = likes_per_category['Likes']
# data['comments'] = comments_per_category
# data['calc'] = (data['likes'] + data['comments']) / df['Followers'] * data['posts']
category = df['category'].value_counts().to_frame().reset_index()
category.rename(columns={'index': 'category', 'category': 'frequency'}, inplace=True)
maritalStatus = df['marital_status'].value_counts().to_frame().reset_index()
maritalStatus.rename(columns={'index': 'marital_status', 'marital_status': 'frequency'}, inplace=True)
age = df['age'].value_counts().to_frame().reset_index()
age.rename(columns={'index': 'age', 'age': 'frequency'}, inplace=True)
sex = df['sex'].value_counts().to_frame().reset_index()
sex.rename(columns={'index': 'sex', 'sex': 'frequency'}, inplace=True)
# 1
fig = go.Figure(data=[go.Bar(
x=category['category'],
y=category['frequency'],
marker_color='rgb(0, 179, 179)')
])
# fig = px.bar(category, x='category', y='frequency')
fig.update_layout(title='Bar-chart-category', title_x=0.5,
xaxis=dict(title='Category', showgrid=False, linecolor='rgb(204, 204, 204)'),
yaxis=dict(title='Frequency', showgrid=True, linecolor='rgb(204, 204, 204)', showline=True,
gridcolor="rgb(204, 204, 204)"),
paper_bgcolor='rgba(0,0,0,0)',
plot_bgcolor='rgba(0,0,0,0)'
)
category_graphjson = json.dumps(fig, cls=plotly.utils.PlotlyJSONEncoder)
# 2
#fig = px.bar(maritalStatus, x='marital_status', y='frequency')
fig = go.Figure(data=[go.Bar(
x=maritalStatus['marital_status'],
y=maritalStatus['frequency'],
marker_color='rgb(0, 179, 179)')
])
fig.update_layout(title='Marital status', title_x=0.5,
xaxis=dict(title='Marital Status', showgrid=False, linecolor='rgb(204, 204, 204)'),
yaxis=dict(title='Frequency', showgrid=True, linecolor='rgb(204, 204, 204)', showline=True,
gridcolor="rgb(204, 204, 204)"),
paper_bgcolor='rgba(0,0,0,0)',
plot_bgcolor='rgba(0,0,0,0)'
)
maritalStatus_graphjson = json.dumps(fig, cls=plotly.utils.PlotlyJSONEncoder)
# 3
# fig = px.bar(age, x='age', y='frequency')
fig = go.Figure(data=[go.Bar(
x=age['age'],
y=age['frequency'],
marker_color='rgb(0, 179, 179)')
])
fig.update_layout(title='Age Distribution', title_x=0.5,
xaxis=dict(title='Age', showgrid=False, linecolor='rgb(204, 204, 204)'),
yaxis=dict(title='Frequency', showgrid=True, linecolor='rgb(204, 204, 204)', showline=True,
gridcolor="rgb(204, 204, 204)"),
paper_bgcolor='rgba(0,0,0,0)',
plot_bgcolor='rgba(0,0,0,0)'
)
age_graphjson = json.dumps(fig, cls=plotly.utils.PlotlyJSONEncoder)
# 4
fig = go.Figure(data=[go.Bar(
x=sex['sex'],
y=sex['frequency'],
marker_color='rgb(0, 179, 179)')
])
# fig = px.bar(sex, x='sex', y='frequency')
fig.update_layout(title='Sex', title_x=0.5,
xaxis=dict(title='Sex', showgrid=False, linecolor='rgb(204, 204, 204)'),
yaxis=dict(title='Frequency', showgrid=True, linecolor='rgb(204, 204, 204)', showline=True,
gridcolor="rgb(204, 204, 204)"),
paper_bgcolor='rgba(0,0,0,0)',
plot_bgcolor='rgba(0,0,0,0)'
)
sex_graphjson = json.dumps(fig, cls=plotly.utils.PlotlyJSONEncoder)
fig = go.Figure(
data=[go.Pie(labels=likes_per_category['category'], values=likes_per_category['Likes'],
textinfo='percent',
insidetextorientation='radial',
title='Percentage of likes per category'
)])
# fig = px.pie(labels=likes_per_category['category'], values=likes_per_category['Likes'],
# title='Percentage of likes per category',
# )
fig.update_layout(title='No. of posts by day', title_x=0.5,
xaxis=dict(title='Day', showgrid=False, linecolor='rgb(204, 204, 204)'),
yaxis=dict(title='No. of posts', showgrid=True, linecolor='rgb(204, 204, 204)', showline=True,
gridcolor="rgb(204, 204, 204)"),
paper_bgcolor='rgba(0,0,0,0)',
plot_bgcolor='rgba(0,0,0,0)'
)
likes_per_category_graphjson = json.dumps(fig, cls=plotly.utils.PlotlyJSONEncoder)
# fig = px.Figure(data=[go.bar(likes_per_category_sex, x='category', y='Likes', color='sex')])
fig = px.bar(likes_per_category_sex, x="category", y="Likes",
color="sex", barmode='group')
fig.update_layout(title='likes_per_category_sex', title_x=0.5,
xaxis=dict(title='Category', showgrid=False, linecolor='rgb(204, 204, 204)'),
yaxis=dict(title='Likes', showgrid=True, linecolor='rgb(204, 204, 204)', showline=True,
gridcolor="rgb(204, 204, 204)"),
paper_bgcolor='rgba(0,0,0,0)',
plot_bgcolor='rgba(0,0,0,0)'
)
category_sex_graphjson = json.dumps(fig, cls=plotly.utils.PlotlyJSONEncoder)
fig = px.scatter(df, x='Total Posts', y='Likes', color='category', hover_name='sex', log_x=True, log_y=True)
fig.update_layout(title='Posts - Likes per user', title_x=0.5,
xaxis=dict(title='Total Posts', showgrid=False, linecolor='rgb(204, 204, 204)'),
yaxis=dict(title='Likes', showgrid=True, linecolor='rgb(204, 204, 204)', showline=True,
gridcolor="rgb(204, 204, 204)"),
paper_bgcolor='rgba(0,0,0,0)',
plot_bgcolor='rgba(0,0,0,0)'
)
posts_likes_scatter_json = json.dumps(fig, cls=plotly.utils.PlotlyJSONEncoder)
# fig = px.line(posts_per_category_sex, x='category', y='Total Posts', color='sex')
fig = px.bar(posts_per_category_sex, x="category", y="Total Posts",
color="sex", barmode='group')
fig.update_layout(title='Posts per category', title_x=0.5,
xaxis=dict(title='Category', showgrid=False, linecolor='rgb(204, 204, 204)'),
yaxis=dict(title='Total Posts', showgrid=True, linecolor='rgb(204, 204, 204)', showline=True,
gridcolor="rgb(204, 204, 204)"),
paper_bgcolor='rgba(0,0,0,0)',
plot_bgcolor='rgba(0,0,0,0)'
)
posts_per_category = json.dumps(fig, cls=plotly.utils.PlotlyJSONEncoder)
fig = px.scatter(df, x='Views', y='Total Posts', color='category', hover_name='sex', log_x=True, log_y=True)
fig.update_layout(title='Video posts - Views per user', title_x=0.5,
xaxis=dict(title='Category', showgrid=False, linecolor='rgb(204, 204, 204)'),
yaxis=dict(title='Likes', showgrid=True, linecolor='rgb(204, 204, 204)', showline=True,
gridcolor="rgb(204, 204, 204)"),
paper_bgcolor='rgba(0,0,0,0)',
plot_bgcolor='rgba(0,0,0,0)'
)
views_posts_scatter_graph_json = json.dumps(fig, cls=plotly.utils.PlotlyJSONEncoder)
# fig = px.bar(likes_per_category_sex, x='category', y='Likes')
fig = go.Figure(data=[go.Bar(
x=likes_per_category_sex['category'],
y=likes_per_category_sex['Likes'],
marker_color='rgb(0, 179, 179)')
])
fig.update_layout(title='likes-category-Bar-chart', title_x=0.5,
xaxis=dict(title='Category', showgrid=False, linecolor='rgb(204, 204, 204)'),
yaxis=dict(title='Likes', showgrid=True, linecolor='rgb(204, 204, 204)', showline=True,
gridcolor="rgb(204, 204, 204)"),
paper_bgcolor='rgba(0,0,0,0)',
plot_bgcolor='rgba(0,0,0,0)'
)
likes_per_category_sex_graphjson = json.dumps(fig, cls=plotly.utils.PlotlyJSONEncoder)
# fig = px.pie(likes_per_category['Likes'], values='Likes', names='category',
# title='Percentage of influencers from each category')
# likes_per_category_graphjson = json.dumps(fig, cls=plotly.utils.PlotlyJSONEncoder)
return render_template("statistics.html", influencerCountGraphJSON=influencer_count_graphjson,
dayGraph=day_graphjson, hourGraph=hour_graphjson, category_Graphjson=category_graphjson,
age_Graphjson=age_graphjson, maritalStatus_Graphjson=maritalStatus_graphjson,
sex_Graphjson=sex_graphjson,
likes_per_category_sex_Graphjson=likes_per_category_sex_graphjson,
likes_per_category_Graphjson=likes_per_category_graphjson,
category_sex_Graphjson=category_sex_graphjson,
posts_likes_scatter_Graphjson=posts_likes_scatter_json,
posts_per_category_Graphjson=posts_per_category,
views_posts_scatter_Graph_json=views_posts_scatter_graph_json,
likes_per_category_sex_Graphjson2=likes_per_category_sex_graphjson
)
@app.route("/questionaire_statistics.html")
def questionaire_statistics():
"""
df = read_questionnaire()
gender_df, willing_to_follow_male_df, willing_to_follow_female_df = cluster_by_gender(df)
fig = px.pie(gender_df, values='Counter', names='Sex', title='Annotators - Gender Percentage')
hour_graphjson = json.dumps(fig, cls=plotly.utils.PlotlyJSONEncoder)
fig = px.pie(willing_to_follow_male_df, values='Counter', names='Willing to follow', title='Male Annotators - Probability of Following')
willing_to_follow_male_graphjson = json.dumps(fig, cls=plotly.utils.PlotlyJSONEncoder)
fig = px.pie(willing_to_follow_female_df, values='Counter', names='Willing to follow', title='Female Annotators - Probability of Following')
willing_to_follow_female_graphjson = json.dumps(fig, cls=plotly.utils.PlotlyJSONEncoder)
"""
gender_graph_json = pd.read_csv("data_csv/gender_df.csv", sep=',',
header=0, skiprows=0)
fig = go.Figure(data=[go.Pie(values=gender_graph_json['Counter'], labels=gender_graph_json['Sex'],
textinfo='label+percent',
insidetextorientation='radial'
)])
fig.update_layout(title='Annotators - Gender Percentage', title_x=0.5,
paper_bgcolor='rgba(0,0,0,0)',
plot_bgcolor='rgba(0,0,0,0)'
)
colors = ['#198DE8', '#EF1453']
fig.update_traces(hoverinfo='label+percent', marker=dict(colors=colors))
gender_graph_json = json.dumps(fig, cls=plotly.utils.PlotlyJSONEncoder)
#-------------------------------------------------------------------------------------
colorsYesNo = ['#85e085', '#ffa64d']
willing_to_follow_male_df = pd.read_csv("data_csv/willing_to_follow_male_df.csv", sep=',',
header=0, skiprows=0)
fig = go.Figure(data=[go.Pie(values=willing_to_follow_male_df['Counter'], labels=willing_to_follow_male_df['Willing to follow'],
textinfo='label+percent',
insidetextorientation='radial'
)])
fig.update_layout(title='Male Annotators - Probability of following', title_x=0.5,
paper_bgcolor='rgba(0,0,0,0)',
plot_bgcolor='rgba(0,0,0,0)'
)
fig.update_traces(hoverinfo='label+percent', marker=dict(colors=colorsYesNo))
willing_to_follow_male_graphjson = json.dumps(fig, cls=plotly.utils.PlotlyJSONEncoder)
# -------------------------------------------------------------------------------------
willing_to_follow_female_df = pd.read_csv("data_csv/willing_to_follow_female_df.csv", sep=',',
header=0, skiprows=0)
fig = go.Figure(
data=[go.Pie(values=willing_to_follow_female_df['Counter'], labels=willing_to_follow_female_df['Willing to follow'],
textinfo='label+percent',
insidetextorientation='radial'
)])
fig.update_layout(title='Female Annotators - Probability of following', title_x=0.5,
paper_bgcolor='rgba(0,0,0,0)',
plot_bgcolor='rgba(0,0,0,0)'
)
fig.update_traces(hoverinfo='label+percent', marker=dict(colors=colorsYesNo))
willing_to_follow_female_graphjson = json.dumps(fig, cls=plotly.utils.PlotlyJSONEncoder)
# -------------------------------------------------------------------------------------
########## pies with reasons
total_reasons = pd.read_csv("data_csv/total_reasons.csv", sep=',',
header=0, skiprows=0, nrows=10)
fig = go.Figure(
data=[go.Pie(values=total_reasons['Counter'],
labels=total_reasons['Reason'],
textinfo='percent',
insidetextorientation='radial'
)])
fig.update_layout(title='All categories - Top 10 reasons to follow', title_x=0.5,
paper_bgcolor='rgba(0,0,0,0)',
plot_bgcolor='rgba(0,0,0,0)', legend=dict(font=dict(size=5,color="black")))
fig.update_traces(hoverinfo='label+percent')
total_reasons_graphjson = json.dumps(fig, cls=plotly.utils.PlotlyJSONEncoder)
# -------------------------------------------------------------------------------------
athlete = pd.read_csv("data_csv/athlete.csv", sep=',',
header=0, skiprows=0)
fig = go.Figure(
data=[go.Pie(values=athlete['counts'],
labels=athlete['reasons'],
textinfo='percent',
insidetextorientation='radial'
)])
fig.update_layout(title='Athlete category - Reasons to follow', title_x=0.5,
paper_bgcolor='rgba(0,0,0,0)',
plot_bgcolor='rgba(0,0,0,0)', legend=dict(font=dict(size=6, color="black")))
fig.update_traces(hoverinfo='label+percent')
athlete_graphjson = json.dumps(fig, cls=plotly.utils.PlotlyJSONEncoder)
# -------------------------------------------------------------------------------------
dance = pd.read_csv("data_csv/dance.csv", sep=',',
header=0, skiprows=0)
fig = go.Figure(
data=[go.Pie(values=dance['counts'],
labels=dance['reasons'],
textinfo='percent',
insidetextorientation='radial'
)])
fig.update_layout(title='Dance category - Reasons to follow', title_x=0.5,
paper_bgcolor='rgba(0,0,0,0)',
plot_bgcolor='rgba(0,0,0,0)', legend=dict(font=dict(size=5, color="black")))
fig.update_traces(hoverinfo='label+percent')
dance_graphjson = json.dumps(fig, cls=plotly.utils.PlotlyJSONEncoder)
# -------------------------------------------------------------------------------------
ballet = pd.read_csv("data_csv/ballet.csv", sep=',',
header=0, skiprows=0)
fig = go.Figure(
data=[go.Pie(values=ballet['counts'],
labels=ballet['reasons'],
textinfo='percent',
insidetextorientation='radial'
)])
fig.update_layout(title='Ballet category - Reasons to follow', title_x=0.5,
paper_bgcolor='rgba(0,0,0,0)',
plot_bgcolor='rgba(0,0,0,0)', legend=dict(font=dict(size=8, color="black")))
fig.update_traces(hoverinfo='label+percent')
ballet_graphjson = json.dumps(fig, cls=plotly.utils.PlotlyJSONEncoder)
# -------------------------------------------------------------------------------------
nutrition = pd.read_csv("data_csv/nutrition.csv", sep=',',
header=0, skiprows=0)
fig = go.Figure(
data=[go.Pie(values=nutrition['counts'],
labels=nutrition['reasons'],
textinfo='percent',
insidetextorientation='radial'
)])
fig.update_layout(title='Nutrition category - Reasons to follow', title_x=0.5,
paper_bgcolor='rgba(0,0,0,0)',
plot_bgcolor='rgba(0,0,0,0)', legend=dict(font=dict(size=6, color="black")))
fig.update_traces(hoverinfo='label+percent')
nutrition_graphjson = json.dumps(fig, cls=plotly.utils.PlotlyJSONEncoder)
# -------------------------------------------------------------------------------------
pilates = pd.read_csv("data_csv/pilates.csv", sep=',',
header=0, skiprows=0)
fig = go.Figure(
data=[go.Pie(values=pilates['counts'],
labels=pilates['reasons'],
textinfo='percent',
insidetextorientation='radial'
)])
fig.update_layout(title='Pilates category - Reasons to follow', title_x=0.5,
paper_bgcolor='rgba(0,0,0,0)',
plot_bgcolor='rgba(0,0,0,0)', legend=dict(font=dict(size=5, color="black")))
fig.update_traces(hoverinfo='label+percent')
pilates_graphjson = json.dumps(fig, cls=plotly.utils.PlotlyJSONEncoder)
# -------------------------------------------------------------------------------------
body_building = pd.read_csv("data_csv/body_building.csv", sep=',',
header=0, skiprows=0)
fig = go.Figure(
data=[go.Pie(values=body_building['counts'],
labels=body_building['reasons'],
textinfo='percent',
insidetextorientation='radial'
)])
fig.update_layout(title='Body building category - Reasons to follow', title_x=0.5,
paper_bgcolor='rgba(0,0,0,0)',
plot_bgcolor='rgba(0,0,0,0)', legend=dict(font=dict(size=5, color="black")))
fig.update_traces(hoverinfo='label+percent')
body_building_graphjson = json.dumps(fig, cls=plotly.utils.PlotlyJSONEncoder)
# -------------------------------------------------------------------------------------
fitness_model = pd.read_csv("data_csv/fitness_model.csv", sep=',',
header=0, skiprows=0)
fig = go.Figure(
data=[go.Pie(values=fitness_model['counts'],
labels=fitness_model['reasons'],
textinfo='percent',
insidetextorientation='radial'
)])
fig.update_layout(title='Fitness model category - Reasons to follow', title_x=0.5,
paper_bgcolor='rgba(0,0,0,0)',
plot_bgcolor='rgba(0,0,0,0)', legend=dict(font=dict(size=5, color="black")))
fig.update_traces(hoverinfo='label+percent')
fitness_model_graphjson = json.dumps(fig, cls=plotly.utils.PlotlyJSONEncoder)
# -------------------------------------------------------------------------------------
#total_reasons_df = show_reasons(df)
return render_template("questionaire_statistics.html", hour_graph=gender_graph_json, willing_to_follow_male_graph=willing_to_follow_male_graphjson,
willing_to_follow_female_graph=willing_to_follow_female_graphjson,
athlete=athlete_graphjson, dance=dance_graphjson,ballet=ballet_graphjson, nutrition=nutrition_graphjson,
body_building=body_building_graphjson,pilates=pilates_graphjson, fitness_model=fitness_model_graphjson,
total_reasons=total_reasons_graphjson)
@app.route("/hashtags.html")
def hashtags():
# ----------------- Total number of hashtags from each category -----------------
df = pd.read_csv("data_csv/hashtags/no_of_hashtags_by_category.csv", sep=',', header=0, skiprows=0)
fig = go.Figure(data=[go.Bar(
x=df['category'],
y=df['hashtags_count'],
marker_color='rgb(175, 122, 197)')
])
fig.update_layout(title='No. of hashtags for each category', title_x=0.5,
xaxis=dict(title='Category', showgrid=False, linecolor='rgb(204, 204, 204)'),
yaxis=dict(title='No. of total hashtags', showgrid=True, linecolor='rgb(204, 204, 204)',
showline=True, gridcolor="rgb(204, 204, 204)"),
paper_bgcolor='rgba(0,0,0,0)',
plot_bgcolor='rgba(0,0,0,0)'
)
no_of_hashtags_by_category_graphjson = json.dumps(fig, cls=plotly.utils.PlotlyJSONEncoder)
#fig.write_image("images_for_report/no_of_hashtags_by_category.png")
# ----------------- Percentage of hashtags from each category -----------------
df = pd.read_csv("data_csv/hashtags/percentage_of_hashtags_by_category.csv", sep=',', header=0, skiprows=0)
df['hashtags_percentage'] = df['hashtags_percentage'] /100
fig = go.Figure(data=[go.Bar(
x=df['category'],
y=df['hashtags_percentage'],
marker_color='rgb(175, 122, 197)')
])
fig.update_layout(title='Percentage of hashtags by category', title_x=0.5,
xaxis=dict(title='Category', showgrid=False, linecolor='rgb(204, 204, 204)'),
yaxis=dict(title='Percentage of hashtags', showgrid=True, linecolor='rgb(204, 204, 204)',
showline=True, gridcolor="rgb(204, 204, 204)", tickformat=".0%"),
paper_bgcolor='rgba(0,0,0,0)',
plot_bgcolor='rgba(0,0,0,0)'
)
percentage_of_hashtags_graphjson = json.dumps(fig, cls=plotly.utils.PlotlyJSONEncoder)
#fig.write_image("images_for_report/percentage_of_hashtags_by_category.png")
# ----------------- Hashtags distribution -----------------
df = pd.read_csv("data_csv/hashtags/hashtag_distribution.csv", sep=',', header=0, skiprows=0).sort_values(by=['Number of Hashtags'], ascending=True)
fig = go.Figure(data=[go.Scatter(
x=df['Number of Hashtags'],
y=df['Number of Posts'],
line_color='rgb(0, 179, 179)',
mode='lines+markers')
])
fig.update_layout(title='Hashtags Distribution', title_x=0.5,
xaxis=dict(title='No. of hashtags', showgrid=False, linecolor='rgb(204, 204, 204)', zeroline=False),
yaxis=dict(title='No. of posts', showgrid=True, linecolor='rgb(204, 204, 204)', showline=True, gridcolor="rgb(204, 204, 204)",
zeroline=True, zerolinecolor='rgb(204, 204, 204)', zerolinewidth=1),
paper_bgcolor='rgba(0,0,0,0)',
plot_bgcolor='rgba(0,0,0,0)',
updatemenus=[
dict(
x=1.01,
xanchor="left",
buttons=list([
dict(label="Linear",
method="relayout",
args=[{"yaxis.type": "linear"}]),
dict(label="Log",
method="relayout",
args=[{"yaxis.type": "log"}])
]))]
)
hashtags_distribution_graphjson = json.dumps(fig, cls=plotly.utils.PlotlyJSONEncoder)
#fig.write_image("images_for_report/hashtag_distribution.png")
# ----------------- Top hashtag frequencies -----------------
df = pd.read_csv("data_csv/hashtags/top_hashtag_frequency.csv", sep=',', header=0, skiprows=0).head(25)
fig = go.Figure(data=[go.Bar(
x=df['hashtag'],
y=df['count'],
marker_color='rgb(0, 179, 179)')
])
fig.update_layout(title='Most Popular Hashtags', title_x=0.5,
xaxis=dict(title='Hashtags', showgrid=False, linecolor='rgb(204, 204, 204)'),
yaxis=dict(title='No. of posts', showgrid=True, linecolor='rgb(204, 204, 204)', showline=True, gridcolor="rgb(204, 204, 204)"),
paper_bgcolor='rgba(0,0,0,0)',
plot_bgcolor='rgba(0,0,0,0)'
)
hashtags_frequency_graphjson = json.dumps(fig, cls=plotly.utils.PlotlyJSONEncoder)
#fig.write_image("images_for_report/top_hashtag_frequency.png")
# ----------------- Hashtags engagement distribution -----------------
df = | pd.read_csv("data_csv/hashtags/hashtags_engagement_distribution.csv", sep=',', header=0, skiprows=0) | pandas.read_csv |
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Conv2D, Dropout, MaxPool2D, Flatten, GlobalAveragePooling2D, add, average, \
maximum
import tensorflow_addons as tfa
from tensorflow_docs import modeling
import tensorflow_datasets as tfds
import tensorflow_hub as hub
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import os
import pathlib
import shutil
tfds.disable_progress_bar() # disable tqdm progress bar
print("TensorFlow Version: ", tf.__version__)
print("Number of GPU available: ", len(tf.config.experimental.list_physical_devices("GPU")))
def read_and_label(file_path):
img = tf.io.read_file(file_path)
img = decode_img(img)
label = get_label(file_path)
return img, label
def decode_img(img):
img = tf.image.decode_jpeg(img, channels=3)
img = tf.image.convert_image_dtype(img, tf.float32)
return tf.image.resize(img, [IMG_WIDTH, IMG_HEIGHT])
def get_label(file_path):
parts = tf.strings.split(file_path, os.path.sep)
return tf.reshape(tf.where(parts[-4] == CLASS_NAMES), [])
def augment(image, label):
image = tf.image.random_hue(image, max_delta=0.05, seed=5)
image = tf.image.random_contrast(image, 0.95, 1.05, seed=5) # tissue quality
image = tf.image.random_saturation(image, 0.95, 1.05, seed=5) # stain quality
image = tf.image.random_brightness(image, max_delta=0.05) # tissue thickness, glass transparency (clean)
image = tf.image.random_flip_left_right(image, seed=5) # cell orientation
image = tf.image.random_flip_up_down(image, seed=5) # cell orientation
image = tf.image.rot90(image, tf.random.uniform(shape=[], minval=0, maxval=4, dtype=tf.int32)) # cell orientation
return image, label
IMG_HEIGHT = 100
IMG_WIDTH = 100
BATCH_SIZE = 32
val_fraction = 10
# list location of all training images
data_dir = r'C:\Users\kuki\Desktop\Research\Skin\RCNN data\train'
data_dir = pathlib.Path(data_dir)
train_image_count = len(list(data_dir.glob('*\*\image\*.jpg')))
CLASS_NAMES = np.array(
[item.name for item in data_dir.glob('*') if item.name != "LICENSE.txt" and item.name != ".DS_store"])
list_ds = tf.data.Dataset.list_files(str(data_dir / '*/*/image/*'))
AUTOTUNE = tf.data.experimental.AUTOTUNE
labeled_ds = list_ds.map(read_and_label, num_parallel_calls=AUTOTUNE)
# plt.figure(figsize=(10,10))
# for idx,elem in enumerate(labeled_ds.take(25)):
# img = elem[0]
# label = elem[1]
# ax = plt.subplot(5,5,idx+1)
# plt.imshow(img)
# plt.title(CLASS_NAMES[label].title())
# plt.axis('off')
test_data_dir = r'C:\Users\kuki\Desktop\Research\Skin\RCNN data\test'
test_data_dir = pathlib.Path(test_data_dir)
test_image_count = len(list(test_data_dir.glob('*\*\image\*.jpg')))
test_list_ds = tf.data.Dataset.list_files(str(test_data_dir / '*\*\image\*'))
test_labeled_ds = test_list_ds.map(read_and_label, num_parallel_calls=AUTOTUNE)
val_image_count = test_image_count // 100 * val_fraction # // : floor division ex) 15/2 = 7.5 -> 7
STEPS_PER_EPOCH = train_image_count // BATCH_SIZE
TEST_STEPS = test_image_count // BATCH_SIZE
VALIDATION_STEPS = val_image_count // BATCH_SIZE
shuffle_buffer_size = 3000 # take first 100 from dataset and shuffle and pick one.
train_ds = (labeled_ds
# .skip(val_image_count)
.cache("./cache/fibro_train.tfcache")
.shuffle(buffer_size=shuffle_buffer_size)
.repeat()
.batch(BATCH_SIZE)
.map(augment, num_parallel_calls=AUTOTUNE) # always batch before mapping
.prefetch(buffer_size=AUTOTUNE)
)
# no shuffle, augment for validation and test dataset
val_ds = (test_labeled_ds
.shuffle(buffer_size=shuffle_buffer_size)
.take(val_image_count)
.cache("./cache/fibro_val.tfcache")
.repeat()
.batch(BATCH_SIZE)
.prefetch(buffer_size=AUTOTUNE))
test_ds = (test_labeled_ds
.cache("./cache/fibro_test.tfcache")
.repeat()
.batch(BATCH_SIZE)
.prefetch(buffer_size=AUTOTUNE) # time it takes to produce next element
)
checkpoint_dir = "training_1"
shutil.rmtree(checkpoint_dir, ignore_errors=True)
def get_callbacks(name):
return [
modeling.EpochDots(),
tf.keras.callbacks.EarlyStopping(monitor='val_sparse_categorical_crossentropy',
patience=100, restore_best_weights=True),
# tf.keras.callbacks.TensorBoard(log_dir/name, histogram_freq=1),
tf.keras.callbacks.ModelCheckpoint(filepath=checkpoint_dir + "/{}/cp.ckpt".format(name),
verbose=0,
monitor='val_sparse_categorical_crossentropy',
save_weights_only=True,
save_best_only=True),
tf.keras.callbacks.ReduceLROnPlateau(monitor='val_sparse_categorical_crossentropy',
factor=0.1, patience=50, verbose=0, mode='auto',
min_delta=0.0001, cooldown=0, min_lr=0),
]
lr_schedule = tf.keras.optimizers.schedules.InverseTimeDecay(
1e-3,
decay_steps=STEPS_PER_EPOCH * 100,
decay_rate=1,
staircase=False)
def compilefit(model, name, lr, max_epochs=1000):
optimizer = tfa.optimizers.AdamW(lr)
model.compile(optimizer=optimizer,
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=[tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), 'accuracy'])
model_history = model.fit(train_ds,
steps_per_epoch=STEPS_PER_EPOCH,
epochs=max_epochs,
verbose=0,
validation_data=val_ds,
callbacks=get_callbacks(name),
validation_steps=VALIDATION_STEPS,
use_multiprocessing=True
)
namename = os.path.dirname(name)
if not os.path.isdir(os.path.abspath(namename)):
os.mkdir(os.path.abspath(namename))
if not os.path.isdir(os.path.abspath(name)):
os.mkdir(os.path.abspath(name))
model.save(pathlib.Path(name) / 'full_model.h5')
return model_history
def plotdf(dfobj, condition, lr=None):
dfobj.pop('loss')
dfobj.pop('val_loss')
dfobj1 = dfobj.copy()
dfobj2 = dfobj.copy()
dfobj.pop('lr')
dfobj.pop('sparse_categorical_crossentropy')
dfobj.pop('val_sparse_categorical_crossentropy')
pd.DataFrame(dfobj).plot(title=condition)
dfobj1.pop('lr')
dfobj1.pop('accuracy')
dfobj1.pop('val_accuracy')
| pd.DataFrame(dfobj1) | pandas.DataFrame |
"""
Script that scans 3 band tiff files and creates csv file with columns:
image_id, width, height
"""
from __future__ import division
import tifffile as tiff
import os
from tqdm import tqdm
import pandas as pd
data_path = '../data'
three_band_path = os.path.join(data_path, 'three_band')
file_name_s = []
widths_3 = []
heights_3 = []
for file_name_ in tqdm(sorted(os.listdir(three_band_path))):
# TODO: crashes if there anything except tiff files in folder (for ex, QGIS creates a lot of aux files)
image_id = file_name_.split('.')[0]
image_3 = tiff.imread(os.path.join(three_band_path, file_name_))
file_name_s += [file_name_]
_, height_3, width_3 = image_3.shape
widths_3 += [width_3]
heights_3 += [height_3]
df = | pd.DataFrame({'file_name_': file_name_s, 'width': widths_3, 'height': heights_3}) | pandas.DataFrame |
from __future__ import division, print_function
import numpy as np
import pandas as pd
from astropy import units as u
__all__ = ['solid_angle', 'random_radec', 'check_random_state']
def solid_angle(ra_lim, dec_lim):
"""
Calculate solid angle with given ra & dec range.
All angles in degrees.
Parameters
----------
ra_lim : list-like, optional
ra limits.
dec_lim : list-like, optional
dec limits.
Returns
-------
area : float
Solid angle in square degrees.
"""
ra_lim = np.deg2rad(np.asarray(ra_lim))
dec_lim = np.deg2rad(np.asarray(dec_lim))
dsin_dec = np.sin(dec_lim[1]) - np.sin(dec_lim[0])
area = ra_lim.ptp() * dsin_dec * (180.0/np.pi)**2
return area
def random_radec(npoints, ra_lim=[0, 360], dec_lim=[-90, 90],
random_state=None, as_df=True):
"""
Generate random ra and dec points within a specified range.
All angles in degrees.
Parameters
----------
npoints : int
Number of random points to generate.
ra_lim : list-like, optional
ra limits.
dec_lim : list-like, optional
dec limits.
random_state : `None`, int, list of ints, or `numpy.random.RandomState`
If ``seed`` is `None`, return the `~numpy.random.RandomState`
singleton used by ``numpy.random``. If ``seed`` is an `int`,
return a new `~numpy.random.RandomState` instance seeded with
``seed``. If ``seed`` is already a `~numpy.random.RandomState`,
return it. Otherwise raise ``ValueError``.
as_df : bool
If True, return as pandas DataFrame.
Returns
-------
points : 2d ndarray of pandas DataFrame
Random ra and dec points in degrees.
"""
rng = check_random_state(random_state)
ra_lim = np.deg2rad(np.asarray(ra_lim))
dec_lim = np.deg2rad(np.asarray(dec_lim))
zlim = np.sin(dec_lim)
z = zlim[0] + zlim.ptp() * rng.uniform(size=int(npoints))
ra = ra_lim[0] + ra_lim.ptp() * rng.uniform(size=int(npoints))
dec = np.arcsin(z)
ra, dec = np.rad2deg(ra), np.rad2deg(dec)
points = np.array([ra, dec]).T
if as_df:
df = | pd.DataFrame(data=points, columns=['ra', 'dec']) | pandas.DataFrame |
import warnings
warnings.filterwarnings("ignore")
import logging
import os
from os.path import join
from keras.callbacks import ModelCheckpoint, EarlyStopping
from keras.models import load_model, save_model, model_from_json
from keras.utils import multi_gpu_model
from utils import utils
import model_skeleton.featuristic as featuristic
import model_skeleton.malfusion as malfusion
import model_skeleton.echelon as echelon
from keras import optimizers
from trend import activation_trend_identification as ati
import config.settings as cnst
from .train_args import DefaultTrainArguments
from plots.plots import plot_partition_epoch_history
from predict import predict
from predict.predict_args import Predict as pObj, DefaultPredictArguments, QStats
import numpy as np
from sklearn.utils import class_weight
import pandas as pd
from plots.plots import display_probability_chart
from analyzers.collect_exe_files import get_partition_data, partition_pkl_files_by_count, partition_pkl_files_by_size
import gc
from shutil import copyfile
def train(args):
""" Function for training Tier-1 model with whole byte sequence data
Args:
args: An object containing all the required parameters for training
Returns:
history: Returns history object from keras training process
"""
train_steps = len(args.t1_x_train) // args.t1_batch_size
args.t1_train_steps = train_steps - 1 if len(args.t1_x_train) % args.t1_batch_size == 0 else train_steps + 1
if args.t1_x_val is not None:
val_steps = len(args.t1_x_val) // args.t1_batch_size
args.t1_val_steps = val_steps - 1 if len(args.t1_x_val) % args.t1_batch_size == 0 else val_steps + 1
args.t1_ear = EarlyStopping(monitor='acc', patience=3)
args.t1_mcp = ModelCheckpoint(join(args.save_path, args.t1_model_name),
monitor="acc", save_best_only=args.save_best, save_weights_only=False)
data_gen = utils.direct_data_generator(args.t1_x_train, args.t1_y_train)
history = args.t1_model_base.fit(
data_gen,
class_weight=args.t1_class_weights,
steps_per_epoch=args.t1_train_steps,
epochs=args.t1_epochs,
verbose=args.t1_verbose,
callbacks=[args.t1_ear, args.t1_mcp]
# , validation_data=utils.data_generator(args.t1_x_val, args.t1_y_val, args.t1_max_len, args.t1_batch_size,
# args.t1_shuffle) , validation_steps=val_steps
)
# plot_history(history, cnst.TIER1)
return history
def train_by_blocks(args):
""" Function for training Tier-2 model with top activation blocks data
Args:
args: An object containing all the required parameters for training
Returns:
history: Returns history object from keras training process
"""
train_steps = len(args.t2_x_train) // args.t2_batch_size
args.t2_train_steps = train_steps - 1 if len(args.t2_x_train) % args.t2_batch_size == 0 else train_steps + 1
if args.t2_x_val is not None:
val_steps = len(args.t2_x_val) // args.t2_batch_size
args.t2_val_steps = val_steps - 1 if len(args.t2_x_val) % args.t2_batch_size == 0 else val_steps + 1
args.t2_ear = EarlyStopping(monitor='acc', patience=3)
args.t2_mcp = ModelCheckpoint(join(args.save_path, args.t2_model_name),
monitor="acc", save_best_only=args.save_best, save_weights_only=False)
data_gen = utils.data_generator(args.train_partition, args.t2_x_train, args.t2_y_train, args.t2_max_len, args.t2_batch_size, args.t2_shuffle)
history = args.t2_model_base.fit(
data_gen,
class_weight=args.t2_class_weights,
steps_per_epoch=args.t2_train_steps,
epochs=args.t2_epochs,
verbose=args.t2_verbose,
callbacks=[args.t2_ear, args.t2_mcp]
# , validation_data=utils.data_generator_by_section(args.q_sections, args.t2_x_val, args.t2_y_val
# , args.t2_max_len, args.t2_batch_size, args.t2_shuffle)
# , validation_steps=args.val_steps
)
# plot_history(history, cnst.TIER2)
return history
def train_by_section(args):
''' Obsolete: For block-based implementation'''
train_steps = len(args.t2_x_train)//args.t2_batch_size
args.t2_train_steps = train_steps - 1 if len(args.t2_x_train) % args.t2_batch_size == 0 else train_steps + 1
if args.t2_x_val is not None:
val_steps = len(args.t2_x_val) // args.t2_batch_size
args.t2_val_steps = val_steps - 1 if len(args.t2_x_val) % args.t2_batch_size == 0 else val_steps + 1
args.t2_ear = EarlyStopping(monitor='acc', patience=3)
args.t2_mcp = ModelCheckpoint(join(args.save_path, args.t2_model_name),
monitor="acc", save_best_only=args.save_best, save_weights_only=False)
# Check MAX_LEN modification is needed - based on proportion of section vs whole file size
# args.max_len = cnst.MAX_FILE_SIZE_LIMIT + (cnst.CONV_WINDOW_SIZE * len(args.q_sections))
data_gen = utils.direct_data_generator_by_section(args.q_sections, args.t2_x_train, args.t2_y_train)
history = args.t2_model_base.fit(
data_gen,
class_weight=args.t2_class_weights,
steps_per_epoch=len(args.t2_x_train)//args.t2_batch_size + 1,
epochs=args.t2_epochs,
verbose=args.t2_verbose,
callbacks=[args.t2_ear, args.t2_mcp]
# , validation_data=utils.data_generator_by_section(args.q_sections, args.t2_x_val, args.t2_y_val
# , args.t2_max_len, args.t2_batch_size, args.t2_shuffle)
# , validation_steps=args.val_steps
)
# plot_history(history, cnst.TIER2)
return history
def change_model(model, new_input_shape=(None, cnst.TIER2_NEW_INPUT_SHAPE)):
""" Function to transfer weights of pre-trained Malconv to the block based model with reduced input shape.
Args:
model: An object with required parameters/hyper-parameters for loading, configuring and compiling
new_input_shape: a value <= Tier-1 model's input shape. Typically, ( Num of Conv. Filters * Size of Conv. Stride )
Returns:
new_model: new model with reduced input shape and weights updated
"""
model._layers[0].batch_input_shape = new_input_shape
new_model = model_from_json(model.to_json())
for layer in new_model.layers:
try:
layer.set_weights(model.get_layer(name=layer.name).get_weights())
logging.info("Loaded and weights set for layer {}".format(layer.name))
except Exception as e:
logging.exception("Could not transfer weights for layer {}".format(layer.name))
return new_model
def change_hydra(model, ech_model, new_input_shape=(None, cnst.TIER2_NEW_INPUT_SHAPE)):
""" Function to transfer weights of pre-trained Malconv to the block based model with reduced input shape.
Args:
model: An object with required parameters/hyper-parameters for loading, configuring and compiling
new_input_shape: a value <= Tier-1 model's input shape. Typically, ( Num of Conv. Filters * Size of Conv. Stride )
Returns:
new_model: new model with reduced input shape and weights updated
"""
model._layers[0].batch_input_shape = new_input_shape
new_model = ech_model # model_from_json(model.to_json())
print("Updating Layer weights")
for layer in new_model.layers:
try:
layer.set_weights(model.get_layer(name=layer.name).get_weights())
logging.info("Loaded and weights set for layer {}".format(layer.name))
except Exception as e:
logging.exception("Could not transfer weights for layer {}".format(layer.name))
return new_model
def get_model1(args):
""" Function to prepare model required for Tier-1's training/prediction.
Args:
args: An object with required parameters/hyper-parameters for loading, configuring and compiling
Returns:
model1: Returns a Tier-1 model
"""
model1 = None
optimizer = optimizers.Adam(lr=0.001) # , beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False)
if args.resume:
if cnst.USE_PRETRAINED_FOR_TIER1:
logging.info("[ CAUTION ] : Resuming with pretrained model for TIER1 - " + args.pretrained_t1_model_name)
model1 = load_model(args.model_path + args.pretrained_t1_model_name, compile=False)
print("\n\n\nChanging model input ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n\n\n")
logging.info(str(model1.summary()))
model1 = change_model(model1, new_input_shape=(None, cnst.TIER2_NEW_INPUT_SHAPE))
logging.info(str(model1.summary()))
model1.compile(loss='binary_crossentropy', optimizer=optimizer, metrics=['accuracy'])
if cnst.NUM_GPU > 1:
multi_gpu_model1 = multi_gpu_model(model1, gpus=cnst.NUM_GPU)
# multi_gpu_model1.compile(loss='binary_crossentropy', optimizer=optimizer, metrics=['accuracy'])
return multi_gpu_model1
else:
logging.info("[ CAUTION ] : Resuming with old model")
model1 = load_model(args.model_path + args.t1_model_name, compile=False)
model1.compile(loss='binary_crossentropy', optimizer=optimizer, metrics=['accuracy'])
if cnst.NUM_GPU > 1:
multi_gpu_model1 = multi_gpu_model(model1, gpus=cnst.NUM_GPU)
# multi_gpu_model1.compile(loss='binary_crossentropy', optimizer=optimizer, metrics=["accuracy"])
return multi_gpu_model1
else:
logging.info("[CAUTION]: Proceeding training with custom model skeleton")
if args.byte:
premodel = load_model(args.model_path + args.pretrained_t1_model_name, compile=False)
echmodel = echelon.model(args.t1_max_len, args.t1_win_size)
change_hydra(premodel, echmodel)
elif args.featuristic:
model1 = featuristic.model(args.total_features)
elif args.fusion:
model1 = malfusion.model(args.max_len, args.win_size)
# optimizer = optimizers.SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
model1.compile(loss='binary_crossentropy', optimizer=optimizer, metrics=['accuracy'])
# param_dict = {'lr': [0.00001, 0.0001, 0.001, 0.1]}
# model_gs = GridSearchCV(model, param_dict, cv=10)
# model1.summary()
return model1
def get_model2(args):
'''Obsolete: For block-based implementation'''
model2 = None
optimizer = optimizers.Adam(lr=0.001)
if args.resume:
if cnst.USE_PRETRAINED_FOR_TIER2:
logging.info("[ CAUTION ] : Resuming with pretrained model for TIER2 - " + args.pretrained_t2_model_name)
model2 = load_model(args.model_path + args.pretrained_t2_model_name, compile=False)
print("\n\n\nChanging model input ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n\n\n")
logging.info(str(model2.summary()))
model2 = change_model(model2, new_input_shape=(None, cnst.TIER2_NEW_INPUT_SHAPE))
logging.info(str(model2.summary()))
model2.compile(loss='binary_crossentropy', optimizer=optimizer, metrics=["accuracy"])
if cnst.NUM_GPU > 1:
model2 = multi_gpu_model(model2, gpus=cnst.NUM_GPU)
else:
logging.info("[ CAUTION ] : Resuming with old model")
model2 = load_model(args.model_path + args.t2_model_name, compile=False)
model2.compile(loss='binary_crossentropy', optimizer=optimizer, metrics=["accuracy"])
if cnst.NUM_GPU > 1:
model2 = multi_gpu_model(model2, gpus=cnst.NUM_GPU)
else:
# logging.info("*************************** CREATING new model *****************************")
if args.byte:
model2 = echelon.model(args.t2_max_len, args.t2_win_size)
elif args.featuristic:
model2 = featuristic.model(len(args.selected_features))
elif args.fusion:
model2 = malfusion.model(args.max_len, args.win_size)
# optimizer = optimizers.SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
optimizer = optimizers.Adam(lr=0.001) # , beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False)
model2.compile(loss='binary_crossentropy', optimizer=optimizer, metrics=['accuracy'])
# model2.summary()
return model2
def get_block_model2(args):
""" Function to prepare model required for Tier-2's training/prediction - For top activation block implementation.
Model's input shape is set to a reduced value specified in TIER2_NEW_INPUT_SHAPE parameter in settings.
Args:
args: An object with required parameters/hyper-parameters for loading, configuring and compiling
Returns:
model2: Returns a Tier-2 model
"""
model2 = None
optimizer = optimizers.Adam(lr=0.001)
if args.resume:
if cnst.USE_PRETRAINED_FOR_TIER2:
logging.info("[ CAUTION ] : Resuming with pretrained model for TIER2 - " + args.pretrained_t2_model_name)
model2 = load_model(args.model_path + args.pretrained_t2_model_name, compile=False)
model2.compile(loss='binary_crossentropy', optimizer=optimizer, metrics=["accuracy"])
if cnst.NUM_GPU > 1:
model2 = multi_gpu_model(model2, gpus=cnst.NUM_GPU)
else:
logging.info("[ CAUTION ] : Resuming with old model")
model2 = load_model(args.model_path + args.t1_model_name, compile=False)
logging.info(str(model2.summary()))
model2 = change_model(model2, new_input_shape=(None, cnst.TIER2_NEW_INPUT_SHAPE))
logging.info(str(model2.summary()))
model2.compile(loss='binary_crossentropy', optimizer=optimizer, metrics=["accuracy"])
if cnst.NUM_GPU > 1:
model2 = multi_gpu_model(model2, gpus=cnst.NUM_GPU)
else:
# logging.info("*************************** CREATING new model *****************************")
if args.byte:
model2 = echelon.model(args.t2_max_len, args.t2_win_size)
elif args.featuristic:
model2 = featuristic.model(len(args.selected_features))
elif args.fusion:
model2 = malfusion.model(args.max_len, args.win_size)
# optimizer = optimizers.SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
optimizer = optimizers.Adam(lr=0.001) # , beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False)
model2.compile(loss='binary_crossentropy', optimizer=optimizer, metrics=['accuracy'])
# model2.summary()
return model2
def train_tier1(args):
# logging.info("************************ TIER 1 TRAINING - STARTED ****************************
# Samples:", len(args.t1_x_train))
if args.tier1:
if args.byte:
return train(args)
# logging.info("************************ TIER 1 TRAINING - ENDED ****************************")
def train_tier2(args):
# logging.info("************************ TIER 2 TRAINING - STARTED ****************************")
if args.tier2:
if args.byte:
return train_by_section(args)
# print("************************ TIER 2 TRAINING - ENDED ****************************")
def evaluate_tier1(args):
""" Function to evaluate the Tier-1 model being trained at the end of each Epoch. (Not after completing each partition !)
Args:
args: An object with evaluation parameters and data.
Returns:
history: Returns a history object with Tier-1 evaluation loss and accuracy
"""
eval_steps = len(args.t1_x_val) // args.t1_batch_size
args.t1_val_steps = eval_steps - 1 if len(args.t1_x_val) % args.t1_batch_size == 0 else eval_steps + 1
history = args.t1_model_base.evaluate_generator(
# utils.train_data_generator(args.val_partition, args.t1_x_val, args.t1_y_val, args.t1_max_len, args.t1_batch_size, args.t1_shuffle),
utils.direct_data_generator(args.t1_x_val, args.t1_y_val),
steps=args.t1_val_steps,
verbose=args.t1_verbose
)
# plot_history(history, cnst.TIER1)
return history
def evaluate_tier2(args):
""" Function to evaluate the Tier-2 model being trained at the end of each Epoch. (Not after completing each partition !)
Args:
args: An object with evaluation parameters and data.
Returns:
history: Returns a history object with Tier-2 evaluation loss and accuracy
"""
eval_steps = len(args.t2_x_val) // args.t2_batch_size
args.t2_val_steps = eval_steps - 1 if len(args.t2_x_val) % args.t2_batch_size == 0 else eval_steps + 1
history = args.t2_model_base.evaluate_generator(
# utils.train_data_generator_by_section(args.spartition, args.q_sections, args.t2_x_val, args.t2_y_val, args.t2_max_len, args.t2_batch_size, args.t2_shuffle),
utils.direct_data_generator_by_section(args.q_sections, args.t2_x_val, args.t2_y_val),
steps=args.t2_val_steps,
verbose=args.t2_verbose
)
# plot_history(history, cnst.TIER2)
return history
def evaluate_tier2_block(args):
""" Function to evaluate the Tier-2 block-based model being trained at the end of each Epoch. (Not after completing each partition !)
Args:
args: An object with evaluation parameters and data.
Returns:
history: Returns a history object with block-model evaluation loss and accuracy
"""
eval_steps = len(args.t2_x_val) // args.t2_batch_size
args.t2_val_steps = eval_steps - 1 if len(args.t2_x_val) % args.t2_batch_size == 0 else eval_steps + 1
history = args.t2_model_base.evaluate_generator(
utils.data_generator(args.val_partition, args.t2_x_val, args.t2_y_val, args.t2_max_len, args.t2_batch_size, args.t2_shuffle),
steps=args.t2_val_steps,
verbose=args.t2_verbose
)
# plot_history(history, cnst.TIER2)
return history
def init(model_idx, train_partitions, val_partitions, fold_index):
""" Module for Training and Validation
# ##################################################################################################################
# OBJECTIVES:
# 1) Train Tier-1 and select its decision threshold for classification using Training data
# 2) Perform ATI over training data and select influential (Qualified) sections to be used by Tier-2
# 3) Train Tier-2 on selected PE sections' top activation blocks
# 4) Save trained models for Tier-1 and Tier-2
# ##################################################################################################################
Args:
model_idx: Default 0 for byte sequence models. Do not change.
train_partitions: list of partition indexes to be used for Training
val_partitions: list of partition indexes to be used for evaluation and validation
fold_index: current fold of cross-validation
Returns:
None (Resultant data are stored in CSV for further use)
"""
t_args = DefaultTrainArguments()
if cnst.EXECUTION_TYPE[model_idx] == cnst.BYTE: t_args.byte = True
elif cnst.EXECUTION_TYPE[model_idx] == cnst.FEATURISTIC: t_args.featuristic = True
elif cnst.EXECUTION_TYPE[model_idx] == cnst.FUSION: t_args.fusion = True
t_args.t1_model_name = cnst.TIER1_MODELS[model_idx] + "_" + str(fold_index) + ".h5"
t_args.t2_model_name = cnst.TIER2_MODELS[model_idx] + "_" + str(fold_index) + ".h5"
t_args.t1_best_model_name = cnst.TIER1_MODELS[model_idx] + "_" + str(fold_index) + "_best.h5"
t_args.t2_best_model_name = cnst.TIER2_MODELS[model_idx] + "_" + str(fold_index) + "_best.h5"
# logging.info("################################## TRAINING TIER-1 ###########################################")
# partition_tracker_df = pd.read_csv(cnst.DATA_SOURCE_PATH + cnst.ESC + "partition_tracker_"+str(fold_index)+".csv")
if not cnst.SKIP_TIER1_TRAINING:
logging.info("************************ TIER 1 TRAINING - STARTED ****************************")
t_args.t1_model_base = get_model1(t_args)
best_val_loss = float('inf')
best_val_acc = 0
epochs_since_best = 0
mean_trn_loss = []
mean_trn_acc = []
mean_val_loss = []
mean_val_acc = []
cwy = []
for tp_idx in train_partitions:
cwdf = pd.read_csv(cnst.DATA_SOURCE_PATH + cnst.ESC + "p" + str(tp_idx) + ".csv", header=None)
cwy = np.concatenate([cwy, cwdf.iloc[:, 1].values])
t_args.t1_class_weights = class_weight.compute_class_weight('balanced', np.unique(cwy), cwy)
for epoch in range(cnst.EPOCHS): # External Partition Purpose
logging.info("[ PARTITION LEVEL TIER-1 EPOCH : %s ]", epoch+1)
cur_trn_loss = []
cur_trn_acc = []
for tp_idx in train_partitions:
logging.info("Training on partition: %s", tp_idx)
tr_datadf = pd.read_csv(cnst.DATA_SOURCE_PATH + cnst.ESC + "p" + str(tp_idx) + ".csv", header=None)
t_args.t1_x_train, t_args.t1_x_val, t_args.t1_y_train, t_args.t1_y_val = tr_datadf.iloc[:, 0].values, None, tr_datadf.iloc[:, 1].values, None
# t_args.t1_class_weights = class_weight.compute_class_weight('balanced',
# np.unique(t_args.t1_y_train), t_args.t1_y_train) # Class Imbalance Tackling - Setting class weights
t_args.train_partition = get_partition_data(None, None, tp_idx, "t1")
t_history = train_tier1(t_args)
cur_trn_loss.append(t_history.history['loss'][0])
cur_trn_acc.append(t_history.history['accuracy'][0])
del t_args.train_partition
gc.collect()
cnst.USE_PRETRAINED_FOR_TIER1 = False
cur_val_loss = []
cur_val_acc = []
# Evaluating after each epoch for early stopping over validation loss
logging.info("Evaluating on validation data . . .")
for vp_idx in val_partitions:
val_datadf = pd.read_csv(cnst.DATA_SOURCE_PATH + cnst.ESC + "p" + str(vp_idx) + ".csv", header=None)
t_args.t1_x_train, t_args.t1_x_val, t_args.t1_y_train, t_args.t1_y_val = None, val_datadf.iloc[:, 0].values, None, val_datadf.iloc[:, 1].values
t_args.val_partition = get_partition_data(None, None, vp_idx, "t1")
v_history = evaluate_tier1(t_args)
cur_val_loss.append(v_history[0])
cur_val_acc.append(v_history[1])
del t_args.val_partition
gc.collect()
mean_trn_loss.append(np.mean(cur_trn_loss))
mean_trn_acc.append(np.mean(cur_trn_acc))
mean_val_loss.append(np.mean(cur_val_loss))
mean_val_acc.append(np.mean(cur_val_acc))
if mean_val_loss[epoch] < best_val_loss:
best_val_loss = mean_val_loss[epoch]
try:
copyfile(join(t_args.save_path, t_args.t1_model_name), join(t_args.save_path, t_args.t1_best_model_name))
except Exception as e:
logging.exception("Saving EPOCH level best model failed for Tier1")
epochs_since_best = 0
logging.info("Current Epoch Loss: %s\tCurrent Epoch Acc: %s\tUpdating best loss: %s", str(mean_val_loss[epoch]).ljust(25), str(mean_val_acc[epoch]).ljust(25), best_val_loss)
else:
logging.info("Current Epoch Loss: %s\tCurrent Epoch Acc: %s", mean_val_loss[epoch], mean_val_acc[epoch])
epochs_since_best += 1
logging.info('{} epochs passed since best val loss of {}'.format(epochs_since_best, best_val_loss))
if cnst.EARLY_STOPPING_PATIENCE_TIER1 <= epochs_since_best:
logging.info('Triggering early stopping as no improvement found since last {} epochs! Best Loss: {}'.format(epochs_since_best, best_val_loss))
try:
copyfile(join(t_args.save_path, t_args.t1_best_model_name), join(t_args.save_path, t_args.t1_model_name))
except Exception as e:
logging.exception("Retrieving EPOCH level best model failed for Tier1")
break
if epoch + 1 == cnst.EPOCHS:
try:
copyfile(join(t_args.save_path, t_args.t1_best_model_name), join(t_args.save_path, t_args.t1_model_name))
except Exception as e:
logging.exception("Retrieving EPOCH level best model failed for Tier1.")
del t_args.t1_model_base
gc.collect()
plot_partition_epoch_history(mean_trn_acc, mean_val_acc, mean_trn_loss, mean_val_loss, "Tier1_F" + str(fold_index+1))
logging.info("************************ TIER 1 TRAINING - ENDED ****************************")
else:
cnst.USE_PRETRAINED_FOR_TIER1 = False # Use model trained through Echelon
logging.info("SKIPPED: Tier-1 Training process")
if cnst.ONLY_TIER1_TRAINING:
return
# TIER-1 PREDICTION OVER TRAINING DATA [Select THD1]
min_boosting_bound = None
max_thd1 = None
b1val_partition_count = 0
if not cnst.SKIP_TIER1_VALIDATION:
logging.info("*** Prediction over Validation data in TIER-1 to select THD1 and Boosting Bound")
pd.DataFrame().to_csv(cnst.PROJECT_BASE_PATH + cnst.ESC + "data" + cnst.ESC + "b1_val_" + str(fold_index) + "_pkl.csv", header=None, index=None)
for vp_idx in val_partitions:
val_datadf = pd.read_csv(cnst.DATA_SOURCE_PATH + cnst.ESC + "p"+str(vp_idx)+".csv", header=None)
predict_t1_val_data = pObj(cnst.TIER1, cnst.TIER1_TARGET_FPR, val_datadf.iloc[:, 0].values, val_datadf.iloc[:, 1].values)
predict_t1_val_data.partition = get_partition_data(None, None, vp_idx, "t1")
predict_t1_val_data = predict.predict_tier1(model_idx, predict_t1_val_data, fold_index)
predict_t1_val_data = predict.select_thd_get_metrics_bfn_mfp(cnst.TIER1, predict_t1_val_data)
min_boosting_bound = predict_t1_val_data.boosting_upper_bound if min_boosting_bound is None or predict_t1_val_data.boosting_upper_bound < min_boosting_bound else min_boosting_bound
max_thd1 = predict_t1_val_data.thd if max_thd1 is None or predict_t1_val_data.thd > max_thd1 else max_thd1
del predict_t1_val_data.partition # Release Memory
gc.collect()
val_b1datadf = pd.concat([pd.DataFrame(predict_t1_val_data.xB1), | pd.DataFrame(predict_t1_val_data.yB1) | pandas.DataFrame |
"""Utilities related to pandas data frames."""
from pandas import merge
def get_colnames(df):
"""Get the column names of a data frame as a list."""
# Faster than using list(df)
return df.columns.get_values().tolist()
def split(df, by, include_by=True):
"""Split a data frame by a grouping column.
Parameters
----------
df: pandas.DataFrame
The input data frame.
by: str
The name of the column to split by.
include_by: bool
If True, the group column is conserved in the output tables, if False it
is removed.
Returns
-------
List[pandas.DataFrame]
"""
split_dfs = [rows for _, rows in df.groupby(by)]
if not include_by:
split_dfs = [x.drop(by, axis=1).reindex() for x in split_dfs]
return split_dfs
def df_subtract(df1, df2):
"""Remove rows in `df` which are also in `df2` (both DataFrames)."""
merged = | merge(df1, df2, how="outer", indicator=True) | pandas.merge |
import cv2
import numpy as np
import base64
import pandas as pd
import plotly.graph_objects as go
from datetime import datetime, time, timedelta, date
import wget
from zipfile import ZipFile
import os
import json
import plotly.express as px
import joblib
# pip install streamlit --upgrade
# pip install streamlit==0.78.0
class Inference:
def __init__(self,model_path="model/model.pkl"):
self.nomi_regioni = ['Abruzzo', 'Basilicata', 'Calabria', 'Campania', 'Emilia-Romagna', '<NAME>', 'Lazio', 'Liguria', 'Lombardia', 'Marche', 'Molise', '<NAME>', '<NAME>', 'Piemonte', 'Puglia', 'Sardegna', 'Sicilia', 'Toscana', 'Umbria', "Valle d'Aosta", 'Veneto']
dict_names = {"bianca":0,"gialla": 1, "arancione": 2, "rossa": 3}
self.names = list(dict_names)
self.model = joblib.load(model_path)
def predict(self,inputs, regione):
idx = self.nomi_regioni.index(regione)
v = [ 0 for i in range(0,len(self.nomi_regioni))]
v[idx] = 1
inputs.extend(v)
X = np.array(inputs,dtype=np.float).reshape(1,-1)
Y_hat = self.model.predict(X)
return self.names[int(Y_hat[0])]
def fig_stats_variation(regione, data_inizio, data_fine,options):
#select = ["deceduti","totale_casi","dimessi_guariti","terapia_intensiva","tamponi","isolamento_domiciliare"]
df = None
title = "Variazione Giornaliera"
if regione=="Italia":
df = get_data_nazione()
df = df[ (df["data"]>=data_inizio) & (df["data"]<=data_fine) ]
else:
df = get_data_regioni()
df = df[ (df["data"]>=data_inizio) & (df["data"]<=data_fine) ]
df = df[df["denominazione_regione"]==regione]
# Script to aggregate data
# https://pandas.pydata.org/docs/reference/api/pandas.DataFrame.resample.html
dft = df.copy()
dft = dft.set_index("data")
dft["count"] = [1 for i in range(0,len(df))]
agg = {"count" : "size"}
for s in options:
agg[s] = "median"
dft = dft.resample('1D').agg(agg)
# Variation daily
df = {"data": dft.index[1:]}
for s in options:
start = dft[s][:-1].values
end = dft[s][1:].values
df[s] = ( end - start )
#df[s] = np.round( ( end / start -1 )*100,2)
df = pd.DataFrame(df)
df = df.set_index("data")
#dft.dropna()
#print(dft.head())
# Rolling average variation
#df = df[ (df["data"]>=data_inizio) & (df["data"]<=data_fine) ]
fig = go.Figure()
for name in options:
fig.add_trace(go.Scatter(x=df.index, y=df[name],
mode='lines+markers',#mode='lines+markers',
name=name.replace("_"," "),
hoverlabel_namelength=-1))
fig.update_layout(
showlegend=True,
hovermode = "x",
yaxis_title = "Persone",
#paper_bgcolor = "rgb(0,0,0)" ,
#plot_bgcolor = "rgb(10,10,10)" ,
legend=dict(orientation="h",yanchor="bottom", y=1.02,xanchor="right", x=1,title_text=""),
dragmode="pan",
title=dict(
x = 0.5,
y = 0.05,
text = title,
font=dict(
size = 20,
color = "rgb(0,0,0)"
)
)
)
return fig
def fig_stats(regione, data_inizio, data_fine,options):
#select = ["deceduti","totale_casi","dimessi_guariti","terapia_intensiva","tamponi","isolamento_domiciliare"]
df = None
title = "Andamento Cumulativo"
if regione=="Italia":
df = get_data_nazione()
df = df[ (df["data"]>=data_inizio) & (df["data"]<=data_fine) ]
df = df.set_index("data")
else:
df = get_data_regioni()
df = df[ (df["data"]>=data_inizio) & (df["data"]<=data_fine) ]
df = df[df["denominazione_regione"]==regione]
df = df.set_index("data")
#df = df[ (df["data"]>=data_inizio) & (df["data"]<=data_fine) ]
fig = go.Figure()
for name in options:
fig.add_trace(go.Scatter(x=df.index, y=df[name],
mode='lines+markers',#mode='lines+markers',
name=name.replace("_"," "),
hoverlabel_namelength=-1))
fig.update_layout(
showlegend=True,
hovermode = "x",
yaxis_title = "Persone",
#paper_bgcolor = "rgb(0,0,0)" ,
#plot_bgcolor = "rgb(10,10,10)" ,
legend=dict(orientation="h",yanchor="bottom", y=1.02,xanchor="right", x=1,title_text=""),
dragmode="pan",
title=dict(
x = 0.5,
y = 0.05,
text = title,
font=dict(
size = 20,
color = "rgb(0,0,0)"
)
)
)
return fig
def get_stats(regione,data_inizio, data_fine):
select = ["deceduti","totale_casi","dimessi_guariti","variazione_totale_positivi"]
df = None
if regione=="Italia":
df = get_data_nazione()
else:
df = get_data_regioni()
df = df[df["denominazione_regione"]==regione]
df = df[ (df["data"]>=data_inizio) & (df["data"]<=data_fine) ]
incremento = ( df.iloc[-1,:][select] - df.iloc[-2,:][select] ) .to_dict()
data = ( df.iloc[-1,:][select]) .to_dict()
df = pd.DataFrame ([data,incremento],columns=select, index=["Situazione","Incremento"])
df = df.rename(columns={"deceduti": "Deceduti", "totale_casi": "Totale Casi", "dimessi_guariti": "Dimessi Guariti","variazione_totale_positivi" : "Var. Totale Positivi" })
return df
def get_nomi_regioni():
df = get_data_regioni()
#df["data"] = [ datetime.strptime(d, "%Y-%m-%dT%H:%M:%S") for d in df["data"]]
return df["denominazione_regione"].unique().tolist()
def get_options():
select = ["deceduti","totale_casi","dimessi_guariti","terapia_intensiva","tamponi","isolamento_domiciliare"]
return select
def get_date():
df = get_data_nazione()
start = df["data"].tolist()[0]
end= df["data"].tolist()[-1]
d = end
date = []
date.append(d.strftime("%Y-%m-%d"))
while (d>start):
t = d -timedelta(days=0, weeks=1)
date.append(t.strftime("%Y-%m-%d"))
d = t
#date = [ d.strftime("%Y-%m-%d") for d in df["data"].dt.date]
return date
def get_data_nazione():
'''
Keys: ['data', 'stato', 'ricoverati_con_sintomi', 'terapia_intensiva',
'totale_ospedalizzati', 'isolamento_domiciliare', 'totale_positivi',
'variazione_totale_positivi', 'nuovi_positivi', 'dimessi_guariti',
'deceduti', 'casi_da_sospetto_diagnostico', 'casi_da_screening',
'totale_casi', 'tamponi', 'casi_testati', 'note',
'ingressi_terapia_intensiva', 'note_test', 'note_casi',
'totale_positivi_test_molecolare',
'totale_positivi_test_antigenico_rapido', 'tamponi_test_molecolare',
'tamponi_test_antigenico_rapido']
'''
#url = "https://raw.githubusercontent.com/pcm-dpc/COVID-19/master/dati-andamento-nazionale/dpc-covid19-ita-andamento-nazionale.csv"
url = "data/dpc-covid19-ita-andamento-nazionale.csv"
df = pd.read_csv(url)
df["data"] = [ datetime.strptime(d, "%Y-%m-%dT%H:%M:%S") for d in df["data"]]
return df
def get_data_province():
'''
Keys: ['data', 'stato', 'codice_regione', 'denominazione_regione',
'codice_provincia', 'denominazione_provincia', 'sigla_provincia', 'lat',
'long', 'totale_casi', 'note', 'codice_nuts_1', 'codice_nuts_2',
'codice_nuts_3']
'''
#url = "https://raw.githubusercontent.com/pcm-dpc/COVID-19/master/dati-province/dpc-covid19-ita-province.csv"
url = "data/dpc-covid19-ita-province.csv"
df = pd.read_csv(url)
df["data"] = [ datetime.strptime(d, "%Y-%m-%dT%H:%M:%S") for d in df["data"]]
return df
def get_data_regioni():
'''
Keys: ['data', 'stato', 'codice_regione', 'denominazione_regione', 'lat',
'long', 'ricoverati_con_sintomi', 'terapia_intensiva',
'totale_ospedalizzati', 'isolamento_domiciliare', 'totale_positivi',
'variazione_totale_positivi', 'nuovi_positivi', 'dimessi_guariti',
'deceduti', 'casi_da_sospetto_diagnostico', 'casi_da_screening',
'totale_casi', 'tamponi', 'casi_testati', 'note',
'ingressi_terapia_intensiva', 'note_test', 'note_casi',
'totale_positivi_test_molecolare',
'totale_positivi_test_antigenico_rapido', 'tamponi_test_molecolare',
'tamponi_test_antigenico_rapido', 'codice_nuts_1', 'codice_nuts_2']
'''
#url = "https://raw.githubusercontent.com/pcm-dpc/COVID-19/master/dati-regioni/dpc-covid19-ita-regioni.csv"
url = "data/dpc-covid19-ita-regioni.csv"
df = pd.read_csv(url)
df["data"] = [ datetime.strptime(d, "%Y-%m-%dT%H:%M:%S") for d in df["data"]]
return df
def create_dataset(df_p):
# Data string to datetime
# Le date sono codificate come stringhe. Le vogliamo come datetime
df["data"] = [ datetime.strptime(d, "%Y-%m-%dT%H:%M:%S") for d in df["data"]]
# Filtro emilia - romagna
df = df[df["denomiazione_regione"]=="Emilia-Romagna"]
# Dataset (ultime 4 settimane)
data_end = df["data"].tolist()[-1] # data di oggi
data_start = data_end - timedelta(days=0,weeks=2,hours=0,minutes=0)
df_f = df[ (df["data"]>=data_start) & (df["data"]<=data_end) ]
# Dataset (ultime 2 settimane)
data_end = df["data"].tolist()[-1] # data di oggi
data_start = data_end - timedelta(days=0,weeks=1,hours=0,minutes=0)
df_ff = df[ (df["data"]>=data_start) & (df["data"]<=data_end) ]
# Calcolo Indici Regionali Emilia Romagna
# id1 Totale casi ultime 2 settimate
i1 = df_f["totale_casi"]
#id2 Ricoverati con sentomi utlime 2 settimane
i2 = df_f["ricoverati_con_sintomi"]
#id3 Terapia intensiva ultime 2 settimate
i3 = df_f["terapia_intensiva"]
#id4 Isolamento dociciliare
i4 = df_f["isolamento_domiciliare"]
# id7 % tamponi positivi
i7 = ( df_f["totale_positivi_test_molecolare"] + df_f["totale_positivi_test_antigenico_rapido"] ) / df_f["tamponi"]
# Numero di deceduti nelle 2 settimane
e1 = df_f["deceduti"]
i12 = df_f["casi_da_sospetto_diagnostico"]
i13 = df_ff["totale_casi"]
def get_data_locally():
url = "https://raw.githubusercontent.com/pcm-dpc/COVID-19/master/dati-province/dpc-covid19-ita-province.csv"
df = pd.read_csv(url)
df.to_csv("data/dpc-covid19-ita-province.csv")
url = "https://raw.githubusercontent.com/pcm-dpc/COVID-19/master/dati-regioni/dpc-covid19-ita-regioni.csv"
df = pd.read_csv(url)
df.to_csv("data/dpc-covid19-ita-regioni.csv")
url = "https://raw.githubusercontent.com/pcm-dpc/COVID-19/master/dati-andamento-nazionale/dpc-covid19-ita-andamento-nazionale.csv"
df = | pd.read_csv(url) | pandas.read_csv |
import pandas as pd
import numpy as np
import os
input_data_folder = "../labeled_logs_csv"
output_data_folder = "../labeled_logs_csv_processed"
filenames = ["BPIC15_%s_f%s.csv"%(municipality, formula) for municipality in range(1,6) for formula in range(2,3)]
case_id_col = "Case ID"
activity_col = "Activity"
resource_col = "org:resource"
timestamp_col = "time:timestamp"
label_col = "label"
pos_label = "deviant"
neg_label = "regular"
category_freq_threshold = 10
# features for classifier
dynamic_cat_cols = ["Activity", "monitoringResource", "question", "org:resource"]
static_cat_cols = ["Responsible_actor"]
dynamic_num_cols = []
static_num_cols = ["SUMleges"]
static_cols_base = static_cat_cols + static_num_cols + [case_id_col, label_col]
dynamic_cols = dynamic_cat_cols + dynamic_num_cols + [timestamp_col]
cat_cols = dynamic_cat_cols + static_cat_cols
def split_parts(group, parts_col="parts"):
return pd.Series(group[parts_col].str.split(',').values[0], name='vals')
def extract_timestamp_features(group):
group = group.sort_values(timestamp_col, ascending=False, kind='mergesort')
tmp = group[timestamp_col] - group[timestamp_col].shift(-1)
tmp = tmp.fillna(0)
group["timesincelastevent"] = tmp.apply(lambda x: float(x / np.timedelta64(1, 'm'))) # m is for minutes
tmp = group[timestamp_col] - group[timestamp_col].iloc[-1]
tmp = tmp.fillna(0)
group["timesincecasestart"] = tmp.apply(lambda x: float(x / np.timedelta64(1, 'm'))) # m is for minutes
group = group.sort_values(timestamp_col, ascending=True, kind='mergesort')
group["event_nr"] = range(1, len(group) + 1)
return group
def get_open_cases(date):
return sum((dt_first_last_timestamps["start_time"] <= date) & (dt_first_last_timestamps["end_time"] > date))
for filename in filenames:
data = pd.read_csv(os.path.join(input_data_folder, filename), sep=";", encoding='latin-1')
data = data[data["caseStatus"] == "G"] # G is closed, O is open
# switch labels (deviant/regular was set incorrectly before)
data = data.set_value(col=label_col, index=(data[label_col] == pos_label), value="normal")
data = data.set_value(col=label_col, index=(data[label_col] == neg_label), value=pos_label)
data = data.set_value(col=label_col, index=(data[label_col] == "normal"), value=neg_label)
# split the parts attribute to separate columns
ser = data.groupby(level=0).apply(split_parts)
dt_parts = pd.get_dummies(ser).groupby(level=0).apply(lambda group: group.max())
data = | pd.concat([data, dt_parts], axis=1) | pandas.concat |
import pandas as pd
# import numpy as np
from collections import namedtuple
class BasicIndicators:
def __init__(self,df,noon_peak,moon_peak,**kwargs):
self.df = df
self.noon_peak = noon_peak
self.moon_peak=moon_peak
if kwargs:
for name, value in kwargs.items():
setattr(self, name, value)
def _get_peak_indicators(self,df):
is_noon_peak = (df['order_time'] >= pd.to_datetime(self.noon_peak.begin)) & (df['order_time'] <= pd.to_datetime(self.noon_peak.end))
is_moon_peak = (df['order_time'] >= | pd.to_datetime(self.moon_peak.begin) | pandas.to_datetime |
# -*- coding: utf-8 -*-
import unittest
import platform
import pandas as pd
import numpy as np
import pyarrow.parquet as pq
import hpat
from hpat.tests.test_utils import (
count_array_REPs, count_parfor_REPs, count_array_OneDs, get_start_end)
from hpat.tests.gen_test_data import ParquetGenerator
from numba import types
from numba.config import IS_32BITS
from numba.errors import TypingError
_cov_corr_series = [(pd.Series(x), pd.Series(y)) for x, y in [
(
[np.nan, -2., 3., 9.1],
[np.nan, -2., 3., 5.0],
),
# TODO(quasilyte): more intricate data for complex-typed series.
# Some arguments make assert_almost_equal fail.
# Functions that yield mismaching results:
# _column_corr_impl and _column_cov_impl.
(
[complex(-2., 1.0), complex(3.0, 1.0)],
[complex(-3., 1.0), complex(2.0, 1.0)],
),
(
[complex(-2.0, 1.0), complex(3.0, 1.0)],
[1.0, -2.0],
),
(
[1.0, -4.5],
[complex(-4.5, 1.0), complex(3.0, 1.0)],
),
]]
min_float64 = np.finfo('float64').min
max_float64 = np.finfo('float64').max
test_global_input_data_float64 = [
[1., np.nan, -1., 0., min_float64, max_float64],
[np.nan, np.inf, np.NINF, np.NZERO]
]
min_int64 = np.iinfo('int64').min
max_int64 = np.iinfo('int64').max
max_uint64 = np.iinfo('uint64').max
test_global_input_data_integer64 = [
[1, -1, 0],
[min_int64, max_int64],
[max_uint64]
]
test_global_input_data_numeric = test_global_input_data_integer64 + test_global_input_data_float64
test_global_input_data_unicode_kind4 = [
'ascii',
'12345',
'1234567890',
'¡Y tú quién te crees?',
'🐍⚡',
'大处着眼,小处着手。',
]
test_global_input_data_unicode_kind1 = [
'ascii',
'12345',
'1234567890',
]
def _make_func_from_text(func_text, func_name='test_impl'):
loc_vars = {}
exec(func_text, {}, loc_vars)
test_impl = loc_vars[func_name]
return test_impl
def _make_func_use_binop1(operator):
func_text = "def test_impl(A, B):\n"
func_text += " return A {} B\n".format(operator)
return _make_func_from_text(func_text)
def _make_func_use_binop2(operator):
func_text = "def test_impl(A, B):\n"
func_text += " A {} B\n".format(operator)
func_text += " return A\n"
return _make_func_from_text(func_text)
def _make_func_use_method_arg1(method):
func_text = "def test_impl(A, B):\n"
func_text += " return A.{}(B)\n".format(method)
return _make_func_from_text(func_text)
GLOBAL_VAL = 2
class TestSeries(unittest.TestCase):
def test_create1(self):
def test_impl():
df = pd.DataFrame({'A': [1, 2, 3]})
return (df.A == 1).sum()
hpat_func = hpat.jit(test_impl)
self.assertEqual(hpat_func(), test_impl())
@unittest.skip('Feature request: implement Series::ctor with list(list(type))')
def test_create_list_list_unicode(self):
def test_impl():
S = pd.Series([
['abc', 'defg', 'ijk'],
['lmn', 'opq', 'rstuvwxyz']
])
return S
hpat_func = hpat.jit(test_impl)
result_ref = test_impl()
result = hpat_func()
pd.testing.assert_series_equal(result, result_ref)
@unittest.skip('Feature request: implement Series::ctor with list(list(type))')
def test_create_list_list_integer(self):
def test_impl():
S = pd.Series([
[123, 456, -789],
[-112233, 445566, 778899]
])
return S
hpat_func = hpat.jit(test_impl)
result_ref = test_impl()
result = hpat_func()
pd.testing.assert_series_equal(result, result_ref)
@unittest.skip('Feature request: implement Series::ctor with list(list(type))')
def test_create_list_list_float(self):
def test_impl():
S = pd.Series([
[1.23, -4.56, 7.89],
[11.2233, 44.5566, -778.899]
])
return S
hpat_func = hpat.jit(test_impl)
result_ref = test_impl()
result = hpat_func()
pd.testing.assert_series_equal(result, result_ref)
def test_create2(self):
def test_impl(n):
df = pd.DataFrame({'A': np.arange(n)})
return (df.A == 2).sum()
hpat_func = hpat.jit(test_impl)
n = 11
self.assertEqual(hpat_func(n), test_impl(n))
def test_create_series1(self):
def test_impl():
A = pd.Series([1, 2, 3])
return A
hpat_func = hpat.jit(test_impl)
pd.testing.assert_series_equal(hpat_func(), test_impl())
def test_create_series_index1(self):
# create and box an indexed Series
def test_impl():
A = pd.Series([1, 2, 3], ['A', 'C', 'B'])
return A
hpat_func = hpat.jit(test_impl)
pd.testing.assert_series_equal(hpat_func(), test_impl())
def test_create_series_index2(self):
def test_impl():
A = pd.Series([1, 2, 3], index=['A', 'C', 'B'])
return A
hpat_func = hpat.jit(test_impl)
pd.testing.assert_series_equal(hpat_func(), test_impl())
def test_create_series_index3(self):
def test_impl():
A = pd.Series([1, 2, 3], index=['A', 'C', 'B'], name='A')
return A
hpat_func = hpat.jit(test_impl)
pd.testing.assert_series_equal(hpat_func(), test_impl())
def test_create_series_index4(self):
def test_impl(name):
A = pd.Series([1, 2, 3], index=['A', 'C', 'B'], name=name)
return A
hpat_func = hpat.jit(test_impl)
pd.testing.assert_series_equal(hpat_func('A'), test_impl('A'))
def test_create_str(self):
def test_impl():
df = pd.DataFrame({'A': ['a', 'b', 'c']})
return (df.A == 'a').sum()
hpat_func = hpat.jit(test_impl)
self.assertEqual(hpat_func(), test_impl())
def test_pass_df1(self):
def test_impl(df):
return (df.A == 2).sum()
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
self.assertEqual(hpat_func(df), test_impl(df))
def test_pass_df_str(self):
def test_impl(df):
return (df.A == 'a').sum()
hpat_func = hpat.jit(test_impl)
df = pd.DataFrame({'A': ['a', 'b', 'c']})
self.assertEqual(hpat_func(df), test_impl(df))
def test_pass_series1(self):
# TODO: check to make sure it is series type
def test_impl(A):
return (A == 2).sum()
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
self.assertEqual(hpat_func(df.A), test_impl(df.A))
def test_pass_series2(self):
# test creating dataframe from passed series
def test_impl(A):
df = pd.DataFrame({'A': A})
return (df.A == 2).sum()
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
self.assertEqual(hpat_func(df.A), test_impl(df.A))
def test_pass_series_str(self):
def test_impl(A):
return (A == 'a').sum()
hpat_func = hpat.jit(test_impl)
df = pd.DataFrame({'A': ['a', 'b', 'c']})
self.assertEqual(hpat_func(df.A), test_impl(df.A))
def test_pass_series_index1(self):
def test_impl(A):
return A
hpat_func = hpat.jit(test_impl)
S = pd.Series([3, 5, 6], ['a', 'b', 'c'], name='A')
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_size(self):
def test_impl(S):
return S.size
hpat_func = hpat.jit(test_impl)
n = 11
for S, expected in [
(pd.Series(), 0),
(pd.Series([]), 0),
(pd.Series(np.arange(n)), n),
(pd.Series([np.nan, 1, 2]), 3),
(pd.Series(['1', '2', '3']), 3),
]:
with self.subTest(S=S, expected=expected):
self.assertEqual(hpat_func(S), expected)
self.assertEqual(hpat_func(S), test_impl(S))
def test_series_attr2(self):
def test_impl(A):
return A.copy().values
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
np.testing.assert_array_equal(hpat_func(df.A), test_impl(df.A))
def test_series_attr3(self):
def test_impl(A):
return A.min()
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
self.assertEqual(hpat_func(df.A), test_impl(df.A))
def test_series_attr4(self):
def test_impl(A):
return A.cumsum().values
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
np.testing.assert_array_equal(hpat_func(df.A), test_impl(df.A))
def test_series_argsort1(self):
def test_impl(A):
return A.argsort()
hpat_func = hpat.jit(test_impl)
n = 11
A = pd.Series(np.random.ranf(n))
pd.testing.assert_series_equal(hpat_func(A), test_impl(A))
def test_series_attr6(self):
def test_impl(A):
return A.take([2, 3]).values
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
np.testing.assert_array_equal(hpat_func(df.A), test_impl(df.A))
def test_series_attr7(self):
def test_impl(A):
return A.astype(np.float64)
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
np.testing.assert_array_equal(hpat_func(df.A), test_impl(df.A))
def test_series_getattr_ndim(self):
'''Verifies getting Series attribute ndim is supported'''
def test_impl(S):
return S.ndim
hpat_func = hpat.jit(test_impl)
n = 11
S = pd.Series(np.arange(n))
self.assertEqual(hpat_func(S), test_impl(S))
def test_series_getattr_T(self):
'''Verifies getting Series attribute T is supported'''
def test_impl(S):
return S.T
hpat_func = hpat.jit(test_impl)
n = 11
S = pd.Series(np.arange(n))
np.testing.assert_array_equal(hpat_func(S), test_impl(S))
def test_series_copy_str1(self):
def test_impl(A):
return A.copy()
hpat_func = hpat.jit(test_impl)
S = pd.Series(['aa', 'bb', 'cc'])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_copy_int1(self):
def test_impl(A):
return A.copy()
hpat_func = hpat.jit(test_impl)
S = pd.Series([1, 2, 3])
np.testing.assert_array_equal(hpat_func(S), test_impl(S))
def test_series_copy_deep(self):
def test_impl(A, deep):
return A.copy(deep=deep)
hpat_func = hpat.jit(test_impl)
for S in [
pd.Series([1, 2]),
pd.Series([1, 2], index=["a", "b"]),
]:
with self.subTest(S=S):
for deep in (True, False):
with self.subTest(deep=deep):
actual = hpat_func(S, deep)
expected = test_impl(S, deep)
pd.testing.assert_series_equal(actual, expected)
self.assertEqual(actual.values is S.values, expected.values is S.values)
self.assertEqual(actual.values is S.values, not deep)
# Shallow copy of index is not supported yet
if deep:
self.assertEqual(actual.index is S.index, expected.index is S.index)
self.assertEqual(actual.index is S.index, not deep)
def test_series_astype_int_to_str1(self):
'''Verifies Series.astype implementation with function 'str' as argument
converts integer series to series of strings
'''
def test_impl(S):
return S.astype(str)
hpat_func = hpat.jit(test_impl)
n = 11
S = pd.Series(np.arange(n))
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_astype_int_to_str2(self):
'''Verifies Series.astype implementation with a string literal dtype argument
converts integer series to series of strings
'''
def test_impl(S):
return S.astype('str')
hpat_func = hpat.jit(test_impl)
n = 11
S = pd.Series(np.arange(n))
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_astype_str_to_str1(self):
'''Verifies Series.astype implementation with function 'str' as argument
handles string series not changing it
'''
def test_impl(S):
return S.astype(str)
hpat_func = hpat.jit(test_impl)
S = pd.Series(['aa', 'bb', 'cc'])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_astype_str_to_str2(self):
'''Verifies Series.astype implementation with a string literal dtype argument
handles string series not changing it
'''
def test_impl(S):
return S.astype('str')
hpat_func = hpat.jit(test_impl)
S = pd.Series(['aa', 'bb', 'cc'])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_astype_str_to_str_index_str(self):
'''Verifies Series.astype implementation with function 'str' as argument
handles string series not changing it
'''
def test_impl(S):
return S.astype(str)
hpat_func = hpat.jit(test_impl)
S = pd.Series(['aa', 'bb', 'cc'], index=['d', 'e', 'f'])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_astype_str_to_str_index_int(self):
'''Verifies Series.astype implementation with function 'str' as argument
handles string series not changing it
'''
def test_impl(S):
return S.astype(str)
hpat_func = hpat.jit(test_impl)
S = pd.Series(['aa', 'bb', 'cc'], index=[1, 2, 3])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
@unittest.skip('TODO: requires str(datetime64) support in Numba')
def test_series_astype_dt_to_str1(self):
'''Verifies Series.astype implementation with function 'str' as argument
converts datetime series to series of strings
'''
def test_impl(A):
return A.astype(str)
hpat_func = hpat.jit(test_impl)
S = pd.Series([pd.Timestamp('20130101 09:00:00'),
pd.Timestamp('20130101 09:00:02'),
pd.Timestamp('20130101 09:00:03')
])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
@unittest.skip('AssertionError: Series are different'
'[left]: [0.000000, 1.000000, 2.000000, 3.000000, ...'
'[right]: [0.0, 1.0, 2.0, 3.0, ...'
'TODO: needs alignment to NumPy on Numba side')
def test_series_astype_float_to_str1(self):
'''Verifies Series.astype implementation with function 'str' as argument
converts float series to series of strings
'''
def test_impl(A):
return A.astype(str)
hpat_func = hpat.jit(test_impl)
n = 11.0
S = pd.Series(np.arange(n))
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_astype_int32_to_int64(self):
'''Verifies Series.astype implementation with NumPy dtype argument
converts series with dtype=int32 to series with dtype=int64
'''
def test_impl(A):
return A.astype(np.int64)
hpat_func = hpat.jit(test_impl)
n = 11
S = pd.Series(np.arange(n), dtype=np.int32)
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_astype_int_to_float64(self):
'''Verifies Series.astype implementation with NumPy dtype argument
converts integer series to series of float
'''
def test_impl(A):
return A.astype(np.float64)
hpat_func = hpat.jit(test_impl)
n = 11
S = pd.Series(np.arange(n))
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_astype_float_to_int32(self):
'''Verifies Series.astype implementation with NumPy dtype argument
converts float series to series of integers
'''
def test_impl(A):
return A.astype(np.int32)
hpat_func = hpat.jit(test_impl)
n = 11.0
S = pd.Series(np.arange(n))
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
@unittest.skip('TODO: needs Numba astype impl support string literal as dtype arg')
def test_series_astype_literal_dtype1(self):
'''Verifies Series.astype implementation with a string literal dtype argument
converts float series to series of integers
'''
def test_impl(A):
return A.astype('int32')
hpat_func = hpat.jit(test_impl)
n = 11.0
S = pd.Series(np.arange(n))
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
@unittest.skip('TODO: needs Numba astype impl support converting unicode_type to int')
def test_series_astype_str_to_int32(self):
'''Verifies Series.astype implementation with NumPy dtype argument
converts series of strings to series of integers
'''
import numba
def test_impl(A):
return A.astype(np.int32)
hpat_func = hpat.jit(test_impl)
n = 11
S = pd.Series([str(x) for x in np.arange(n) - n // 2])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
@unittest.skip('TODO: needs Numba astype impl support converting unicode_type to float')
def test_series_astype_str_to_float64(self):
'''Verifies Series.astype implementation with NumPy dtype argument
converts series of strings to series of float
'''
def test_impl(A):
return A.astype(np.float64)
hpat_func = hpat.jit(test_impl)
S = pd.Series(['3.24', '1E+05', '-1', '-1.3E-01', 'nan', 'inf'])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_astype_str_index_str(self):
'''Verifies Series.astype implementation with function 'str' as argument
handles string series not changing it
'''
def test_impl(S):
return S.astype(str)
hpat_func = hpat.jit(test_impl)
S = pd.Series(['aa', 'bb', 'cc'], index=['a', 'b', 'c'])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_astype_str_index_int(self):
'''Verifies Series.astype implementation with function 'str' as argument
handles string series not changing it
'''
def test_impl(S):
return S.astype(str)
hpat_func = hpat.jit(test_impl)
S = pd.Series(['aa', 'bb', 'cc'], index=[2, 3, 5])
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_np_call_on_series1(self):
def test_impl(A):
return np.min(A)
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
np.testing.assert_array_equal(hpat_func(df.A), test_impl(df.A))
def test_series_values(self):
def test_impl(A):
return A.values
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
np.testing.assert_array_equal(hpat_func(df.A), test_impl(df.A))
def test_series_values1(self):
def test_impl(A):
return (A == 2).values
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
np.testing.assert_array_equal(hpat_func(df.A), test_impl(df.A))
def test_series_shape1(self):
def test_impl(A):
return A.shape
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
self.assertEqual(hpat_func(df.A), test_impl(df.A))
def test_static_setitem_series1(self):
def test_impl(A):
A[0] = 2
return (A == 2).sum()
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
self.assertEqual(hpat_func(df.A), test_impl(df.A))
def test_setitem_series1(self):
def test_impl(A, i):
A[i] = 2
return (A == 2).sum()
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
self.assertEqual(hpat_func(df.A.copy(), 0), test_impl(df.A.copy(), 0))
def test_setitem_series2(self):
def test_impl(A, i):
A[i] = 100
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
A1 = df.A.copy()
A2 = df.A
hpat_func(A1, 0)
test_impl(A2, 0)
pd.testing.assert_series_equal(A1, A2)
@unittest.skip("enable after remove dead in hiframes is removed")
def test_setitem_series3(self):
def test_impl(A, i):
S = pd.Series(A)
S[i] = 100
hpat_func = hpat.jit(test_impl)
n = 11
A = np.arange(n)
A1 = A.copy()
A2 = A
hpat_func(A1, 0)
test_impl(A2, 0)
np.testing.assert_array_equal(A1, A2)
def test_setitem_series_bool1(self):
def test_impl(A):
A[A > 3] = 100
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
A1 = df.A.copy()
A2 = df.A
hpat_func(A1)
test_impl(A2)
pd.testing.assert_series_equal(A1, A2)
def test_setitem_series_bool2(self):
def test_impl(A, B):
A[A > 3] = B[A > 3]
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n), 'B': np.arange(n)**2})
A1 = df.A.copy()
A2 = df.A
hpat_func(A1, df.B)
test_impl(A2, df.B)
pd.testing.assert_series_equal(A1, A2)
def test_static_getitem_series1(self):
def test_impl(A):
return A[0]
hpat_func = hpat.jit(test_impl)
n = 11
A = pd.Series(np.arange(n))
self.assertEqual(hpat_func(A), test_impl(A))
def test_getitem_series1(self):
def test_impl(A, i):
return A[i]
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
self.assertEqual(hpat_func(df.A, 0), test_impl(df.A, 0))
def test_getitem_series_str1(self):
def test_impl(A, i):
return A[i]
hpat_func = hpat.jit(test_impl)
df = pd.DataFrame({'A': ['aa', 'bb', 'cc']})
self.assertEqual(hpat_func(df.A, 0), test_impl(df.A, 0))
def test_series_iat1(self):
def test_impl(A):
return A.iat[3]
hpat_func = hpat.jit(test_impl)
n = 11
S = pd.Series(np.arange(n)**2)
self.assertEqual(hpat_func(S), test_impl(S))
def test_series_iat2(self):
def test_impl(A):
A.iat[3] = 1
return A
hpat_func = hpat.jit(test_impl)
n = 11
S = pd.Series(np.arange(n)**2)
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
def test_series_iloc1(self):
def test_impl(A):
return A.iloc[3]
hpat_func = hpat.jit(test_impl)
n = 11
S = pd.Series(np.arange(n)**2)
self.assertEqual(hpat_func(S), test_impl(S))
def test_series_iloc2(self):
def test_impl(A):
return A.iloc[3:8]
hpat_func = hpat.jit(test_impl)
n = 11
S = pd.Series(np.arange(n)**2)
pd.testing.assert_series_equal(
hpat_func(S), test_impl(S).reset_index(drop=True))
def test_series_op1(self):
arithmetic_binops = ('+', '-', '*', '/', '//', '%', '**')
for operator in arithmetic_binops:
test_impl = _make_func_use_binop1(operator)
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(1, n), 'B': np.ones(n - 1)})
pd.testing.assert_series_equal(hpat_func(df.A, df.B), test_impl(df.A, df.B), check_names=False)
def test_series_op2(self):
arithmetic_binops = ('+', '-', '*', '/', '//', '%', '**')
for operator in arithmetic_binops:
test_impl = _make_func_use_binop1(operator)
hpat_func = hpat.jit(test_impl)
n = 11
if platform.system() == 'Windows' and not IS_32BITS:
df = pd.DataFrame({'A': np.arange(1, n, dtype=np.int64)})
else:
df = pd.DataFrame({'A': np.arange(1, n)})
pd.testing.assert_series_equal(hpat_func(df.A, 1), test_impl(df.A, 1), check_names=False)
def test_series_op3(self):
arithmetic_binops = ('+', '-', '*', '/', '//', '%', '**')
for operator in arithmetic_binops:
test_impl = _make_func_use_binop2(operator)
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(1, n), 'B': np.ones(n - 1)})
pd.testing.assert_series_equal(hpat_func(df.A, df.B), test_impl(df.A, df.B), check_names=False)
def test_series_op4(self):
arithmetic_binops = ('+', '-', '*', '/', '//', '%', '**')
for operator in arithmetic_binops:
test_impl = _make_func_use_binop2(operator)
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(1, n)})
pd.testing.assert_series_equal(hpat_func(df.A, 1), test_impl(df.A, 1), check_names=False)
def test_series_op5(self):
arithmetic_methods = ('add', 'sub', 'mul', 'div', 'truediv', 'floordiv', 'mod', 'pow')
for method in arithmetic_methods:
test_impl = _make_func_use_method_arg1(method)
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(1, n), 'B': np.ones(n - 1)})
pd.testing.assert_series_equal(hpat_func(df.A, df.B), test_impl(df.A, df.B), check_names=False)
@unittest.skipIf(platform.system() == 'Windows', 'Series values are different (20.0 %)'
'[left]: [1, 1024, 59049, 1048576, 9765625, 60466176, 282475249, 1073741824, 3486784401, 10000000000]'
'[right]: [1, 1024, 59049, 1048576, 9765625, 60466176, 282475249, 1073741824, -808182895, 1410065408]')
def test_series_op5_integer_scalar(self):
arithmetic_methods = ('add', 'sub', 'mul', 'div', 'truediv', 'floordiv', 'mod', 'pow')
for method in arithmetic_methods:
test_impl = _make_func_use_method_arg1(method)
hpat_func = hpat.jit(test_impl)
n = 11
if platform.system() == 'Windows' and not IS_32BITS:
operand_series = pd.Series(np.arange(1, n, dtype=np.int64))
else:
operand_series = pd.Series(np.arange(1, n))
operand_scalar = 10
pd.testing.assert_series_equal(
hpat_func(operand_series, operand_scalar),
test_impl(operand_series, operand_scalar),
check_names=False)
def test_series_op5_float_scalar(self):
arithmetic_methods = ('add', 'sub', 'mul', 'div', 'truediv', 'floordiv', 'mod', 'pow')
for method in arithmetic_methods:
test_impl = _make_func_use_method_arg1(method)
hpat_func = hpat.jit(test_impl)
n = 11
operand_series = pd.Series(np.arange(1, n))
operand_scalar = .5
pd.testing.assert_series_equal(
hpat_func(operand_series, operand_scalar),
test_impl(operand_series, operand_scalar),
check_names=False)
def test_series_op6(self):
def test_impl(A):
return -A
hpat_func = hpat.jit(test_impl)
n = 11
A = pd.Series(np.arange(n))
pd.testing.assert_series_equal(hpat_func(A), test_impl(A))
def test_series_op7(self):
comparison_binops = ('<', '>', '<=', '>=', '!=', '==')
for operator in comparison_binops:
test_impl = _make_func_use_binop1(operator)
hpat_func = hpat.jit(test_impl)
n = 11
A = pd.Series(np.arange(n))
B = pd.Series(np.arange(n)**2)
pd.testing.assert_series_equal(hpat_func(A, B), test_impl(A, B), check_names=False)
def test_series_op8(self):
comparison_methods = ('lt', 'gt', 'le', 'ge', 'ne', 'eq')
for method in comparison_methods:
test_impl = _make_func_use_method_arg1(method)
hpat_func = hpat.jit(test_impl)
n = 11
A = pd.Series(np.arange(n))
B = pd.Series(np.arange(n)**2)
pd.testing.assert_series_equal(hpat_func(A, B), test_impl(A, B), check_names=False)
@unittest.skipIf(platform.system() == 'Windows', "Attribute dtype are different: int64, int32")
def test_series_op8_integer_scalar(self):
comparison_methods = ('lt', 'gt', 'le', 'ge', 'eq', 'ne')
for method in comparison_methods:
test_impl = _make_func_use_method_arg1(method)
hpat_func = hpat.jit(test_impl)
n = 11
operand_series = pd.Series(np.arange(1, n))
operand_scalar = 10
pd.testing.assert_series_equal(
hpat_func(operand_series, operand_scalar),
test_impl(operand_series, operand_scalar),
check_names=False)
def test_series_op8_float_scalar(self):
comparison_methods = ('lt', 'gt', 'le', 'ge', 'eq', 'ne')
for method in comparison_methods:
test_impl = _make_func_use_method_arg1(method)
hpat_func = hpat.jit(test_impl)
n = 11
operand_series = pd.Series(np.arange(1, n))
operand_scalar = .5
pd.testing.assert_series_equal(
hpat_func(operand_series, operand_scalar),
test_impl(operand_series, operand_scalar),
check_names=False)
def test_series_inplace_binop_array(self):
def test_impl(A, B):
A += B
return A
hpat_func = hpat.jit(test_impl)
n = 11
A = np.arange(n)**2.0 # TODO: use 2 for test int casting
B = pd.Series(np.ones(n))
np.testing.assert_array_equal(hpat_func(A.copy(), B), test_impl(A, B))
def test_series_fusion1(self):
def test_impl(A, B):
return A + B + 1
hpat_func = hpat.jit(test_impl)
n = 11
if platform.system() == 'Windows' and not IS_32BITS:
A = pd.Series(np.arange(n), dtype=np.int64)
B = pd.Series(np.arange(n)**2, dtype=np.int64)
else:
A = pd.Series(np.arange(n))
B = pd.Series(np.arange(n)**2)
pd.testing.assert_series_equal(hpat_func(A, B), test_impl(A, B))
self.assertEqual(count_parfor_REPs(), 1)
def test_series_fusion2(self):
# make sure getting data var avoids incorrect single def assumption
def test_impl(A, B):
S = B + 2
if A[0] == 0:
S = A + 1
return S + B
hpat_func = hpat.jit(test_impl)
n = 11
if platform.system() == 'Windows' and not IS_32BITS:
A = pd.Series(np.arange(n), dtype=np.int64)
B = pd.Series(np.arange(n)**2, dtype=np.int64)
else:
A = pd.Series(np.arange(n))
B = pd.Series(np.arange(n)**2)
pd.testing.assert_series_equal(hpat_func(A, B), test_impl(A, B))
self.assertEqual(count_parfor_REPs(), 3)
def test_series_len(self):
def test_impl(A, i):
return len(A)
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
self.assertEqual(hpat_func(df.A, 0), test_impl(df.A, 0))
def test_series_box(self):
def test_impl():
A = pd.Series([1, 2, 3])
return A
hpat_func = hpat.jit(test_impl)
pd.testing.assert_series_equal(hpat_func(), test_impl())
def test_series_box2(self):
def test_impl():
A = pd.Series(['1', '2', '3'])
return A
hpat_func = hpat.jit(test_impl)
pd.testing.assert_series_equal(hpat_func(), test_impl())
def test_series_list_str_unbox1(self):
def test_impl(A):
return A.iloc[0]
hpat_func = hpat.jit(test_impl)
S = pd.Series([['aa', 'b'], ['ccc'], []])
np.testing.assert_array_equal(hpat_func(S), test_impl(S))
# call twice to test potential refcount errors
np.testing.assert_array_equal(hpat_func(S), test_impl(S))
def test_np_typ_call_replace(self):
# calltype replacement is tricky for np.typ() calls since variable
# type can't provide calltype
def test_impl(i):
return np.int32(i)
hpat_func = hpat.jit(test_impl)
self.assertEqual(hpat_func(1), test_impl(1))
def test_series_ufunc1(self):
def test_impl(A, i):
return np.isinf(A).values
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
np.testing.assert_array_equal(hpat_func(df.A, 1), test_impl(df.A, 1))
def test_list_convert(self):
def test_impl():
df = pd.DataFrame({'one': np.array([-1, np.nan, 2.5]),
'two': ['foo', 'bar', 'baz'],
'three': [True, False, True]})
return df.one.values, df.two.values, df.three.values
hpat_func = hpat.jit(test_impl)
one, two, three = hpat_func()
self.assertTrue(isinstance(one, np.ndarray))
self.assertTrue(isinstance(two, np.ndarray))
self.assertTrue(isinstance(three, np.ndarray))
@unittest.skip("needs empty_like typing fix in npydecl.py")
def test_series_empty_like(self):
def test_impl(A):
return np.empty_like(A)
hpat_func = hpat.jit(test_impl)
n = 11
df = pd.DataFrame({'A': np.arange(n)})
self.assertTrue(isinstance(hpat_func(df.A), np.ndarray))
def test_series_fillna1(self):
def test_impl(A):
return A.fillna(5.0)
hpat_func = hpat.jit(test_impl)
df = pd.DataFrame({'A': [1.0, 2.0, np.nan, 1.0]})
pd.testing.assert_series_equal(hpat_func(df.A),
test_impl(df.A), check_names=False)
# test inplace fillna for named numeric series (obtained from DataFrame)
def test_series_fillna_inplace1(self):
def test_impl(A):
A.fillna(5.0, inplace=True)
return A
hpat_func = hpat.jit(test_impl)
df = pd.DataFrame({'A': [1.0, 2.0, np.nan, 1.0]})
pd.testing.assert_series_equal(hpat_func(df.A),
test_impl(df.A), check_names=False)
def test_series_fillna_str1(self):
def test_impl(A):
return A.fillna("dd")
hpat_func = hpat.jit(test_impl)
df = pd.DataFrame({'A': ['aa', 'b', None, 'ccc']})
pd.testing.assert_series_equal(hpat_func(df.A),
test_impl(df.A), check_names=False)
def test_series_fillna_str_inplace1(self):
def test_impl(A):
A.fillna("dd", inplace=True)
return A
hpat_func = hpat.jit(test_impl)
S1 = pd.Series(['aa', 'b', None, 'ccc'])
S2 = S1.copy()
pd.testing.assert_series_equal(hpat_func(S1), test_impl(S2))
# TODO: handle string array reflection
# hpat_func(S1)
# test_impl(S2)
# np.testing.assert_array_equal(S1, S2)
def test_series_fillna_str_inplace_empty1(self):
def test_impl(A):
A.fillna("", inplace=True)
return A
hpat_func = hpat.jit(test_impl)
S1 = pd.Series(['aa', 'b', None, 'ccc'])
S2 = S1.copy()
pd.testing.assert_series_equal(hpat_func(S1), test_impl(S2))
@unittest.skip('Unsupported functionality: failed to handle index')
def test_series_fillna_index_str(self):
def test_impl(S):
return S.fillna(5.0)
hpat_func = hpat.jit(test_impl)
S = pd.Series([1.0, 2.0, np.nan, 1.0], index=['a', 'b', 'c', 'd'])
pd.testing.assert_series_equal(hpat_func(S),
test_impl(S), check_names=False)
@unittest.skip('Unsupported functionality: failed to handle index')
def test_series_fillna_index_int(self):
def test_impl(S):
return S.fillna(5.0)
hpat_func = hpat.jit(test_impl)
S = pd.Series([1.0, 2.0, np.nan, 1.0], index=[2, 3, 4, 5])
pd.testing.assert_series_equal(hpat_func(S),
test_impl(S), check_names=False)
@unittest.skipIf(hpat.config.config_pipeline_hpat_default,
'No support of axis argument in old-style Series.dropna() impl')
def test_series_dropna_axis1(self):
'''Verifies Series.dropna() implementation handles 'index' as axis argument'''
def test_impl(S):
return S.dropna(axis='index')
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([1.0, 2.0, np.nan, 1.0, np.inf])
S2 = S1.copy()
pd.testing.assert_series_equal(hpat_func(S1), test_impl(S2))
@unittest.skipIf(hpat.config.config_pipeline_hpat_default,
'No support of axis argument in old-style Series.dropna() impl')
def test_series_dropna_axis2(self):
'''Verifies Series.dropna() implementation handles 0 as axis argument'''
def test_impl(S):
return S.dropna(axis=0)
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([1.0, 2.0, np.nan, 1.0, np.inf])
S2 = S1.copy()
pd.testing.assert_series_equal(hpat_func(S1), test_impl(S2))
@unittest.skipIf(hpat.config.config_pipeline_hpat_default,
'No support of axis argument in old-style Series.dropna() impl')
def test_series_dropna_axis3(self):
'''Verifies Series.dropna() implementation handles correct non-literal axis argument'''
def test_impl(S, axis):
return S.dropna(axis=axis)
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([1.0, 2.0, np.nan, 1.0, np.inf])
S2 = S1.copy()
axis_values = [0, 'index']
for value in axis_values:
pd.testing.assert_series_equal(hpat_func(S1, value), test_impl(S2, value))
@unittest.skipIf(hpat.config.config_pipeline_hpat_default,
'BUG: old-style dropna impl returns series without index')
def test_series_dropna_float_index1(self):
'''Verifies Series.dropna() implementation for float series with default index'''
def test_impl(S):
return S.dropna()
hpat_func = hpat.jit(test_impl)
for data in test_global_input_data_float64:
S1 = pd.Series(data)
S2 = S1.copy()
pd.testing.assert_series_equal(hpat_func(S1), test_impl(S2))
@unittest.skipIf(hpat.config.config_pipeline_hpat_default,
'BUG: old-style dropna impl returns series without index')
def test_series_dropna_float_index2(self):
'''Verifies Series.dropna() implementation for float series with string index'''
def test_impl(S):
return S.dropna()
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([1.0, 2.0, np.nan, 1.0, np.inf], ['a', 'b', 'c', 'd', 'e'])
S2 = S1.copy()
pd.testing.assert_series_equal(hpat_func(S1), test_impl(S2))
@unittest.skipIf(hpat.config.config_pipeline_hpat_default,
'BUG: old-style dropna impl returns series without index')
def test_series_dropna_str_index1(self):
'''Verifies Series.dropna() implementation for series of strings with default index'''
def test_impl(S):
return S.dropna()
hpat_func = hpat.jit(test_impl)
S1 = pd.Series(['aa', 'b', None, 'cccd', ''])
S2 = S1.copy()
pd.testing.assert_series_equal(hpat_func(S1), test_impl(S2))
@unittest.skipIf(hpat.config.config_pipeline_hpat_default,
'BUG: old-style dropna impl returns series without index')
def test_series_dropna_str_index2(self):
'''Verifies Series.dropna() implementation for series of strings with string index'''
def test_impl(S):
return S.dropna()
hpat_func = hpat.jit(test_impl)
S1 = pd.Series(['aa', 'b', None, 'cccd', ''], ['a', 'b', 'c', 'd', 'e'])
S2 = S1.copy()
pd.testing.assert_series_equal(hpat_func(S1), test_impl(S2))
@unittest.skipIf(hpat.config.config_pipeline_hpat_default,
'BUG: old-style dropna impl returns series without index')
def test_series_dropna_str_index3(self):
def test_impl(S):
return S.dropna()
hpat_func = hpat.jit(test_impl)
S1 = pd.Series(['aa', 'b', None, 'cccd', ''], index=[1, 2, 5, 7, 10])
S2 = S1.copy()
pd.testing.assert_series_equal(hpat_func(S1), test_impl(S2))
@unittest.skip('BUG: old-style dropna impl returns series without index, in new-style inplace is unsupported')
def test_series_dropna_float_inplace_no_index1(self):
'''Verifies Series.dropna() implementation for float series with default index and inplace argument True'''
def test_impl(S):
S.dropna(inplace=True)
return S
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([1.0, 2.0, np.nan, 1.0, np.inf])
S2 = S1.copy()
pd.testing.assert_series_equal(hpat_func(S1), test_impl(S2))
@unittest.skip('TODO: add reflection support and check method return value')
def test_series_dropna_float_inplace_no_index2(self):
'''Verifies Series.dropna(inplace=True) results are reflected back in the original float series'''
def test_impl(S):
return S.dropna(inplace=True)
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([1.0, 2.0, np.nan, 1.0, np.inf])
S2 = S1.copy()
self.assertIsNone(hpat_func(S1))
self.assertIsNone(test_impl(S2))
pd.testing.assert_series_equal(S1, S2)
@unittest.skip('BUG: old-style dropna impl returns series without index, in new-style inplace is unsupported')
def test_series_dropna_str_inplace_no_index1(self):
'''Verifies Series.dropna() implementation for series of strings
with default index and inplace argument True
'''
def test_impl(S):
S.dropna(inplace=True)
return S
hpat_func = hpat.jit(test_impl)
S1 = pd.Series(['aa', 'b', None, 'cccd', ''])
S2 = S1.copy()
pd.testing.assert_series_equal(hpat_func(S1), test_impl(S2))
@unittest.skip('TODO: add reflection support and check method return value')
def test_series_dropna_str_inplace_no_index2(self):
'''Verifies Series.dropna(inplace=True) results are reflected back in the original string series'''
def test_impl(S):
return S.dropna(inplace=True)
hpat_func = hpat.jit(test_impl)
S1 = pd.Series(['aa', 'b', None, 'cccd', ''])
S2 = S1.copy()
self.assertIsNone(hpat_func(S1))
self.assertIsNone(test_impl(S2))
pd.testing.assert_series_equal(S1, S2)
def test_series_dropna_str_parallel1(self):
'''Verifies Series.dropna() distributed work for series of strings with default index'''
def test_impl(A):
B = A.dropna()
return (B == 'gg').sum()
hpat_func = hpat.jit(distributed=['A'])(test_impl)
S1 = pd.Series(['aa', 'b', None, 'ccc', 'dd', 'gg'])
start, end = get_start_end(len(S1))
# TODO: gatherv
self.assertEqual(hpat_func(S1[start:end]), test_impl(S1))
self.assertEqual(count_array_REPs(), 0)
self.assertEqual(count_parfor_REPs(), 0)
self.assertTrue(count_array_OneDs() > 0)
@unittest.skip('AssertionError: Series are different\n'
'Series length are different\n'
'[left]: 3, Int64Index([0, 1, 2], dtype=\'int64\')\n'
'[right]: 2, Int64Index([1, 2], dtype=\'int64\')')
def test_series_dropna_dt_no_index1(self):
'''Verifies Series.dropna() implementation for datetime series with default index'''
def test_impl(S):
return S.dropna()
hpat_func = hpat.jit(test_impl)
S1 = pd.Series([pd.NaT, pd.Timestamp('1970-12-01'), pd.Timestamp('2012-07-25')])
S2 = S1.copy()
pd.testing.assert_series_equal(hpat_func(S1), test_impl(S2))
def test_series_dropna_bool_no_index1(self):
'''Verifies Series.dropna() implementation for bool series with default index'''
def test_impl(S):
return S.dropna()
hpat_func = hpat.jit(test_impl)
S1 = | pd.Series([True, False, False, True]) | pandas.Series |
import tempfile
import unittest
from typing import Iterable, Union
import pandas as pd
from exabel_data_sdk.services.csv_reader import CsvReader
class TestCsvReader(unittest.TestCase):
def _read_csv(self, content: str, string_columns: Iterable[Union[str, int]]):
with tempfile.TemporaryDirectory() as tmp:
file = f"{tmp}/file.csv"
with open(file, "w", encoding="utf-8") as f:
for line in content:
f.write(",".join(map(str, line)) + "\n")
return CsvReader.read_csv(
file, separator=",", string_columns=string_columns, keep_default_na=False
)
def test_read_csv(self):
content = [("Column A", "Column B"), ("a1", "a2"), ("b1", "b2")]
result = self._read_csv(content, string_columns=["Column A", "Column B"])
expected = pd.DataFrame({"Column A": ["a1", "b1"], "Column B": ["a2", "b2"]})
pd.testing.assert_frame_equal(expected, result)
def test_read_csv_with_obvious_string_columns(self):
content = [("Column A", "Column B"), ("a1", "a2"), ("b1", "b2")]
result = self._read_csv(content, string_columns=[])
expected = pd.DataFrame({"Column A": ["a1", "b1"], "Column B": ["a2", "b2"]})
pd.testing.assert_frame_equal(expected, result)
def test_read_csv_with_integer_column(self):
content = [("Column A", "Integer"), ("a1", 2), ("b1", 3)]
result = self._read_csv(content, string_columns=["Column A"])
expected = pd.DataFrame({"Column A": ["a1", "b1"], "Integer": [2, 3]})
pd.testing.assert_frame_equal(expected, result)
def test_read_csv_with_integer_columns_interpreted_as_string(self):
content = [("Column A", "Integer"), ("a1", 2), ("b1", 3)]
result = self._read_csv(content, string_columns=["Column A", "Integer"])
expected = pd.DataFrame({"Column A": ["a1", "b1"], "Integer": ["2", "3"]})
pd.testing.assert_frame_equal(expected, result)
def test_read_csv_with_integer_column_interpreted_as_string_referred_to_with_index(self):
content = [("Column A", "Integer"), ("a1", 2), ("b1", 3)]
result = self._read_csv(content, string_columns=["Column A", 1])
expected = pd.DataFrame({"Column A": ["a1", "b1"], "Integer": ["2", "3"]})
pd.testing.assert_frame_equal(expected, result)
def test_read_csv_with_empty_value(self):
content = [("Column A", "Column B"), ("a1", "a2"), ("b1", "")]
result = self._read_csv(content, string_columns=["Column A", "Column B"])
expected = pd.DataFrame({"Column A": ["a1", "b1"], "Column B": ["a2", ""]})
| pd.testing.assert_frame_equal(expected, result) | pandas.testing.assert_frame_equal |
from hetdesrun.component.registration import register
from hetdesrun.datatypes import DataType
import pandas as pd
import numpy as np
# ***** DO NOT EDIT LINES BELOW *****
# These lines may be overwritten if input/output changes.
@register(inputs={"data": DataType.Series}, outputs={"cum_sum": DataType.Series})
def main(*, data):
"""entrypoint function for this component
Usage example:
>>> main(
... data = pd.Series(
... {
... "2019-08-01T15:20:00": 0.0,
... "2019-08-01T15:20:01": 5.0,
... "2019-08-01T15:20:05": 1.0,
... "2019-08-01T15:20:09": 9.0,
... }
... )
... )["cum_sum"]
2019-08-01 15:20:00 0.0
2019-08-01 15:20:01 5.0
2019-08-01 15:20:05 6.0
2019-08-01 15:20:09 15.0
dtype: float64
"""
# ***** DO NOT EDIT LINES ABOVE *****
# write your code here.
if pd.api.types.is_numeric_dtype(data.index.dtype):
data_sort = data.sort_index()
else:
try:
data.index = | pd.to_datetime(data.index) | pandas.to_datetime |
import inspect
import os
import datetime
from collections import OrderedDict
import numpy as np
from numpy import nan, array
import pandas as pd
import pytest
from pandas.util.testing import assert_series_equal, assert_frame_equal
from numpy.testing import assert_allclose
from pvlib import tmy
from pvlib import pvsystem
from pvlib import clearsky
from pvlib import irradiance
from pvlib import atmosphere
from pvlib import solarposition
from pvlib.location import Location
from conftest import needs_numpy_1_10, requires_scipy
latitude = 32.2
longitude = -111
tus = Location(latitude, longitude, 'US/Arizona', 700, 'Tucson')
times = pd.date_range(start=datetime.datetime(2014,1,1),
end=datetime.datetime(2014,1,2), freq='1Min')
ephem_data = solarposition.get_solarposition(times,
latitude=latitude,
longitude=longitude,
method='nrel_numpy')
am = atmosphere.relativeairmass(ephem_data.apparent_zenith)
irrad_data = clearsky.ineichen(ephem_data['apparent_zenith'], am,
linke_turbidity=3)
aoi = irradiance.aoi(0, 0, ephem_data['apparent_zenith'],
ephem_data['azimuth'])
meta = {'latitude': 37.8,
'longitude': -122.3,
'altitude': 10,
'Name': 'Oakland',
'State': 'CA',
'TZ': -8}
pvlib_abspath = os.path.dirname(os.path.abspath(inspect.getfile(tmy)))
tmy3_testfile = os.path.join(pvlib_abspath, 'data', '703165TY.csv')
tmy2_testfile = os.path.join(pvlib_abspath, 'data', '12839.tm2')
tmy3_data, tmy3_metadata = tmy.readtmy3(tmy3_testfile)
tmy2_data, tmy2_metadata = tmy.readtmy2(tmy2_testfile)
def test_systemdef_tmy3():
expected = {'tz': -9.0,
'albedo': 0.1,
'altitude': 7.0,
'latitude': 55.317,
'longitude': -160.517,
'name': '"SAND POINT"',
'strings_per_inverter': 5,
'modules_per_string': 5,
'surface_azimuth': 0,
'surface_tilt': 0}
assert expected == pvsystem.systemdef(tmy3_metadata, 0, 0, .1, 5, 5)
def test_systemdef_tmy2():
expected = {'tz': -5,
'albedo': 0.1,
'altitude': 2.0,
'latitude': 25.8,
'longitude': -80.26666666666667,
'name': 'MIAMI',
'strings_per_inverter': 5,
'modules_per_string': 5,
'surface_azimuth': 0,
'surface_tilt': 0}
assert expected == pvsystem.systemdef(tmy2_metadata, 0, 0, .1, 5, 5)
def test_systemdef_dict():
expected = {'tz': -8, ## Note that TZ is float, but Location sets tz as string
'albedo': 0.1,
'altitude': 10,
'latitude': 37.8,
'longitude': -122.3,
'name': 'Oakland',
'strings_per_inverter': 5,
'modules_per_string': 5,
'surface_azimuth': 0,
'surface_tilt': 5}
assert expected == pvsystem.systemdef(meta, 5, 0, .1, 5, 5)
@needs_numpy_1_10
def test_ashraeiam():
thetas = np.linspace(-90, 90, 9)
iam = pvsystem.ashraeiam(thetas, .05)
expected = np.array([ nan, 0.9193437 , 0.97928932, 0.99588039, 1. ,
0.99588039, 0.97928932, 0.9193437 , nan])
assert_allclose(iam, expected, equal_nan=True)
@needs_numpy_1_10
def test_PVSystem_ashraeiam():
module_parameters = pd.Series({'b': 0.05})
system = pvsystem.PVSystem(module_parameters=module_parameters)
thetas = np.linspace(-90, 90, 9)
iam = system.ashraeiam(thetas)
expected = np.array([ nan, 0.9193437 , 0.97928932, 0.99588039, 1. ,
0.99588039, 0.97928932, 0.9193437 , nan])
assert_allclose(iam, expected, equal_nan=True)
@needs_numpy_1_10
def test_physicaliam():
thetas = np.linspace(-90, 90, 9)
iam = pvsystem.physicaliam(thetas, 1.526, 0.002, 4)
expected = np.array([ nan, 0.8893998 , 0.98797788, 0.99926198, nan,
0.99926198, 0.98797788, 0.8893998 , nan])
assert_allclose(iam, expected, equal_nan=True)
@needs_numpy_1_10
def test_PVSystem_physicaliam():
module_parameters = pd.Series({'K': 4, 'L': 0.002, 'n': 1.526})
system = pvsystem.PVSystem(module_parameters=module_parameters)
thetas = np.linspace(-90, 90, 9)
iam = system.physicaliam(thetas)
expected = np.array([ nan, 0.8893998 , 0.98797788, 0.99926198, nan,
0.99926198, 0.98797788, 0.8893998 , nan])
assert_allclose(iam, expected, equal_nan=True)
# if this completes successfully we'll be able to do more tests below.
@pytest.fixture(scope="session")
def sam_data():
data = {}
data['cecmod'] = pvsystem.retrieve_sam('cecmod')
data['sandiamod'] = pvsystem.retrieve_sam('sandiamod')
data['cecinverter'] = pvsystem.retrieve_sam('cecinverter')
return data
@pytest.fixture(scope="session")
def sapm_module_params(sam_data):
modules = sam_data['sandiamod']
module = 'Canadian_Solar_CS5P_220M___2009_'
module_parameters = modules[module]
return module_parameters
@pytest.fixture(scope="session")
def cec_module_params(sam_data):
modules = sam_data['cecmod']
module = 'Example_Module'
module_parameters = modules[module]
return module_parameters
def test_sapm(sapm_module_params):
times = pd.DatetimeIndex(start='2015-01-01', periods=5, freq='12H')
effective_irradiance = pd.Series([-1, 0.5, 1.1, np.nan, 1], index=times)
temp_cell = pd.Series([10, 25, 50, 25, np.nan], index=times)
out = pvsystem.sapm(effective_irradiance, temp_cell, sapm_module_params)
expected = pd.DataFrame(np.array(
[[ -5.0608322 , -4.65037767, nan, nan,
nan, -4.91119927, -4.15367716],
[ 2.545575 , 2.28773882, 56.86182059, 47.21121608,
108.00693168, 2.48357383, 1.71782772],
[ 5.65584763, 5.01709903, 54.1943277 , 42.51861718,
213.32011294, 5.52987899, 3.48660728],
[ nan, nan, nan, nan,
nan, nan, nan],
[ nan, nan, nan, nan,
nan, nan, nan]]),
columns=['i_sc', 'i_mp', 'v_oc', 'v_mp', 'p_mp', 'i_x', 'i_xx'],
index=times)
assert_frame_equal(out, expected, check_less_precise=4)
out = pvsystem.sapm(1, 25, sapm_module_params)
expected = OrderedDict()
expected['i_sc'] = 5.09115
expected['i_mp'] = 4.5462909092579995
expected['v_oc'] = 59.260800000000003
expected['v_mp'] = 48.315600000000003
expected['p_mp'] = 219.65677305534581
expected['i_x'] = 4.9759899999999995
expected['i_xx'] = 3.1880204359100004
for k, v in expected.items():
assert_allclose(out[k], v, atol=1e-4)
# just make sure it works with a dict input
pvsystem.sapm(effective_irradiance, temp_cell,
sapm_module_params.to_dict())
def test_PVSystem_sapm(sapm_module_params):
system = pvsystem.PVSystem(module_parameters=sapm_module_params)
times = pd.DatetimeIndex(start='2015-01-01', periods=5, freq='12H')
effective_irradiance = pd.Series([-1, 0.5, 1.1, np.nan, 1], index=times)
temp_cell = pd.Series([10, 25, 50, 25, np.nan], index=times)
out = system.sapm(effective_irradiance, temp_cell)
@pytest.mark.parametrize('airmass,expected', [
(1.5, 1.00028714375),
(np.array([[10, np.nan]]), np.array([[0.999535, 0]])),
(pd.Series([5]), pd.Series([1.0387675]))
])
def test_sapm_spectral_loss(sapm_module_params, airmass, expected):
out = pvsystem.sapm_spectral_loss(airmass, sapm_module_params)
if isinstance(airmass, pd.Series):
assert_series_equal(out, expected, check_less_precise=4)
else:
assert_allclose(out, expected, atol=1e-4)
def test_PVSystem_sapm_spectral_loss(sapm_module_params):
system = pvsystem.PVSystem(module_parameters=sapm_module_params)
times = pd.DatetimeIndex(start='2015-01-01', periods=2, freq='12H')
airmass = pd.Series([1, 10], index=times)
out = system.sapm_spectral_loss(airmass)
@pytest.mark.parametrize('aoi,expected', [
(45, 0.9975036250000002),
(np.array([[-30, 30, 100, np.nan]]),
np.array([[np.nan, 1.007572, 0, np.nan]])),
(pd.Series([80]), pd.Series([0.597472]))
])
def test_sapm_aoi_loss(sapm_module_params, aoi, expected):
out = pvsystem.sapm_aoi_loss(aoi, sapm_module_params)
if isinstance(aoi, pd.Series):
assert_series_equal(out, expected, check_less_precise=4)
else:
assert_allclose(out, expected, atol=1e-4)
def test_sapm_aoi_loss_limits():
module_parameters = {'B0': 5, 'B1': 0, 'B2': 0, 'B3': 0, 'B4': 0, 'B5': 0}
assert pvsystem.sapm_aoi_loss(1, module_parameters) == 5
module_parameters = {'B0': 5, 'B1': 0, 'B2': 0, 'B3': 0, 'B4': 0, 'B5': 0}
assert pvsystem.sapm_aoi_loss(1, module_parameters, upper=1) == 1
module_parameters = {'B0': -5, 'B1': 0, 'B2': 0, 'B3': 0, 'B4': 0, 'B5': 0}
assert pvsystem.sapm_aoi_loss(1, module_parameters) == 0
def test_PVSystem_sapm_aoi_loss(sapm_module_params):
system = pvsystem.PVSystem(module_parameters=sapm_module_params)
times = pd.DatetimeIndex(start='2015-01-01', periods=2, freq='12H')
aoi = pd.Series([45, 10], index=times)
out = system.sapm_aoi_loss(aoi)
@pytest.mark.parametrize('test_input,expected', [
([1000, 100, 5, 45, 1000], 1.1400510967821877),
([np.array([np.nan, 1000, 1000]),
np.array([100, np.nan, 100]),
np.array([1.1, 1.1, 1.1]),
np.array([10, 10, 10]),
1000],
np.array([np.nan, np.nan, 1.081157])),
([pd.Series([1000]), | pd.Series([100]) | pandas.Series |
# -*- coding:utf-8 -*-
# /usr/bin/env python
"""
Date: 2021/7/12 15:47
Desc: 东方财富-沪深板块-概念板块
http://quote.eastmoney.com/center/boardlist.html#concept_board
"""
import requests
import pandas as pd
def stock_board_concept_name_em() -> pd.DataFrame:
"""
东方财富-沪深板块-概念板块-名称
http://quote.eastmoney.com/center/boardlist.html#concept_board
:return: 概念板块-名称
:rtype: pandas.DataFrame
"""
url = "http://79.push2.eastmoney.com/api/qt/clist/get"
params = {
"pn": "1",
"pz": "2000",
"po": "1",
"np": "1",
"ut": "bd1d9ddb04089700cf9c27f6f7426281",
"fltt": "2",
"invt": "2",
"fid": "f3",
"fs": "m:90 t:3 f:!50",
"fields": "f2,f3,f4,f8,f12,f14,f15,f16,f17,f18,f20,f21,f24,f25,f22,f33,f11,f62,f128,f124,f107,f104,f105,f136",
"_": "1626075887768",
}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json["data"]["diff"])
temp_df.reset_index(inplace=True)
temp_df["index"] = range(1, len(temp_df) + 1)
temp_df.columns = [
"排名",
"最新价",
"涨跌幅",
"涨跌额",
"换手率",
"_",
"板块代码",
"板块名称",
"_",
"_",
"_",
"_",
"总市值",
"_",
"_",
"_",
"_",
"_",
"_",
"上涨家数",
"下跌家数",
"_",
"_",
"领涨股票",
"_",
"_",
"领涨股票-涨跌幅",
]
temp_df = temp_df[
[
"排名",
"板块名称",
"板块代码",
"最新价",
"涨跌额",
"涨跌幅",
"总市值",
"换手率",
"上涨家数",
"下跌家数",
"领涨股票",
"领涨股票-涨跌幅",
]
]
temp_df["最新价"] = pd.to_numeric(temp_df["最新价"])
temp_df["涨跌额"] = pd.to_numeric(temp_df["涨跌额"])
temp_df["涨跌幅"] = pd.to_numeric(temp_df["涨跌幅"])
temp_df["总市值"] = pd.to_numeric(temp_df["总市值"])
temp_df["换手率"] = pd.to_numeric(temp_df["换手率"])
temp_df["上涨家数"] = pd.to_numeric(temp_df["上涨家数"])
temp_df["下跌家数"] = pd.to_numeric(temp_df["下跌家数"])
temp_df["领涨股票-涨跌幅"] = pd.to_numeric(temp_df["领涨股票-涨跌幅"])
return temp_df
def stock_board_concept_hist_em(symbol: str = "数字货币", adjust: str = "") -> pd.DataFrame:
"""
东方财富-沪深板块-概念板块-历史行情
http://q.10jqka.com.cn/gn/detail/code/301558/
:param symbol: 板块名称
:type symbol: str
:param adjust: choice of {'': 不复权, "qfq": 前复权, "hfq": 后复权}
:type adjust: str
:return: 历史行情
:rtype: pandas.DataFrame
"""
stock_board_concept_em_map = stock_board_concept_name_em()
stock_board_code = stock_board_concept_em_map[
stock_board_concept_em_map["板块名称"] == symbol
]["板块代码"].values[0]
adjust_map = {"": "0", "qfq": "1", "hfq": "2"}
url = "http://91.push2his.eastmoney.com/api/qt/stock/kline/get"
params = {
"secid": f"90.{stock_board_code}",
"ut": "fa5fd1943c7b386f172d6893dbfba10b",
"fields1": "f1,f2,f3,f4,f5,f6",
"fields2": "f51,f52,f53,f54,f55,f56,f57,f58,f59,f60,f61",
"klt": "101",
"fqt": adjust_map[adjust],
"beg": "0",
"end": "20500101",
"smplmt": "10000",
"lmt": "1000000",
"_": "1626079488673",
}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame([item.split(",") for item in data_json["data"]["klines"]])
temp_df.columns = [
"日期",
"开盘",
"收盘",
"最高",
"最低",
"成交量",
"成交额",
"振幅",
"涨跌幅",
"涨跌额",
"换手率",
]
temp_df = temp_df[
[
"日期",
"开盘",
"收盘",
"最高",
"最低",
"涨跌幅",
"涨跌额",
"成交量",
"成交额",
"振幅",
"换手率",
]
]
temp_df["开盘"] = pd.to_numeric(temp_df["开盘"])
temp_df["收盘"] = pd.to_numeric(temp_df["收盘"])
temp_df["最高"] = pd.to_numeric(temp_df["最高"])
temp_df["最低"] = pd.to_numeric(temp_df["最低"])
temp_df["涨跌幅"] = pd.to_numeric(temp_df["涨跌幅"])
temp_df["涨跌额"] = pd.to_numeric(temp_df["涨跌额"])
temp_df["成交量"] = pd.to_numeric(temp_df["成交量"])
temp_df["成交额"] = pd.to_numeric(temp_df["成交额"])
temp_df["振幅"] = pd.to_numeric(temp_df["振幅"])
temp_df["换手率"] = pd.to_numeric(temp_df["换手率"])
return temp_df
def stock_board_concept_cons_em(symbol: str = "车联网") -> pd.DataFrame:
"""
东方财富-沪深板块-概念板块-板块成份
http://quote.eastmoney.com/center/boardlist.html#boards-BK06551
:param symbol: 板块名称
:type symbol: str
:return: 板块成份
:rtype: pandas.DataFrame
"""
stock_board_concept_em_map = stock_board_concept_name_em()
stock_board_code = stock_board_concept_em_map[
stock_board_concept_em_map["板块名称"] == symbol
]["板块代码"].values[0]
url = "http://29.push2.eastmoney.com/api/qt/clist/get"
params = {
"pn": "1",
"pz": "2000",
"po": "1",
"np": "1",
"ut": "bd1d9ddb04089700cf9c27f6f7426281",
"fltt": "2",
"invt": "2",
"fid": "f3",
"fs": f"b:{stock_board_code} f:!50",
"fields": "f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f12,f13,f14,f15,f16,f17,f18,f20,f21,f23,f24,f25,f22,f11,f62,f128,f136,f115,f152,f45",
"_": "1626081702127",
}
r = requests.get(url, params=params)
data_json = r.json()
temp_df = pd.DataFrame(data_json["data"]["diff"])
temp_df.reset_index(inplace=True)
temp_df["index"] = range(1, len(temp_df) + 1)
temp_df.columns = [
"序号",
"_",
"最新价",
"涨跌幅",
"涨跌额",
"成交量",
"成交额",
"振幅",
"换手率",
"市盈率-动态",
"_",
"_",
"代码",
"_",
"名称",
"最高",
"最低",
"今开",
"昨收",
"_",
"_",
"_",
"市净率",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
"_",
]
temp_df = temp_df[
[
"序号",
"代码",
"名称",
"最新价",
"涨跌幅",
"涨跌额",
"成交量",
"成交额",
"振幅",
"最高",
"最低",
"今开",
"昨收",
"换手率",
"市盈率-动态",
"市净率",
]
]
temp_df["最新价"] = pd.to_numeric(temp_df["最新价"], errors="coerce")
temp_df["涨跌幅"] = pd.to_numeric(temp_df["涨跌幅"], errors="coerce")
temp_df["涨跌额"] = pd.to_numeric(temp_df["涨跌额"], errors="coerce")
temp_df["成交量"] = pd.to_numeric(temp_df["成交量"], errors="coerce")
temp_df["成交额"] = pd.to_numeric(temp_df["成交额"], errors="coerce")
temp_df["振幅"] = pd.to_numeric(temp_df["振幅"], errors="coerce")
temp_df["最高"] = pd.to_numeric(temp_df["最高"], errors="coerce")
temp_df["最低"] = pd.t | o_numeric(temp_df["最低"], errors="coerce") | pandas.to_numeric |
# -*- coding: utf-8 -*-
"""
Created on Sun Apr 5 23:53:33 2018
@author: Fahim
"""
from tkinter import *
from tkinter import filedialog
import glob
import os
import cv2
from sklearn.neighbors import KNeighborsClassifier
import pandas as pd
from PIL import ImageTk,Image
import numpy as np
def extract_surf_feature(image):
surf = cv2.xfeatures2d.SURF_create()
keypoint = surf.detect(image)
keypoint, descriptors = surf.compute(image,keypoint)
descriptors = descriptors.flatten()
descrip = descriptors[0:9856]
return descrip
labels = []
imageLabel = open('./label.txt', 'r')
#print(imageLabel)
for label in imageLabel:
label = int(label.strip('\n'))
labels.append(label)
model = KNeighborsClassifier(n_neighbors=3)
master = Tk()
master.title('Classifier')
master.geometry('800x520')
msg = Message(master, text="")
label = Label(master, text="")
label1 = Label(master, text="")
var = '/*.jpg'
imagePaths = ''
def OpenFolder():
global imagePaths
global msg
global label
global label1
try:
msg.pack_forget()
label.pack_forget()
label1.pack_forget()
except Exception:
pass
dirname = filedialog.askdirectory(parent=master, initialdir=os.getcwd(), title="Select Folder")
imagePaths = dirname + var
label = Label(master, text="Folder Opening Done", bg="green", fg="black")
label.pack()
msg = Message(master, text=imagePaths)
msg.pack()
features = []
def Extract_Feature_and_store_in_Database():
global features
global imagePaths
global msg
global label
global label1
try:
label.pack_forget()
msg.pack_forget()
label1.pack_forget()
except Exception:
pass
imagefile = glob.glob(imagePaths)
for imagePath in imagefile:
#print(imagePath)
image = cv2.imread(imagePath)
surf = extract_surf_feature(image)
features.append(surf)
print(type(features))
print(features)
print(type(features))
df = | pd.DataFrame(data=features) | pandas.DataFrame |
import os
import cv2
import requests
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from argparse import ArgumentParser
from utils import captha_segmentation
from sklearn.cluster import MiniBatchKMeans
def parse_args():
parser = ArgumentParser()
parser.add_argument('--data-size', '-d', help='size of dataset', default=40000, type=int)
parser.add_argument('--k-means', '-k', help='number of clusters', default=200, type=int)
parser.add_argument('--batch-size', '-b', help='size of minibatch', default=1000, type=int)
parser.add_argument('--load-captha', '-l', help='captha images path', default='captha_imgs.npy')
return parser.parse_args()
if __name__ == '__main__':
params = parse_args()
# download
if not os.path.isfile(params.load_captha):
captha_imgs = []
while True:
img_url = f'https://cos{np.random.randint(low=1, high=5)}s.ntnu.edu.tw/AasEnrollStudent/RandImage'
response = requests.get(img_url)
with open('tmp.jpg', 'wb') as f:
f.write(response.content)
img = cv2.imread('tmp.jpg', cv2.IMREAD_COLOR)
os.remove('tmp.jpg')
crop_imgs = captha_segmentation(img)
if crop_imgs is None:
continue
captha_imgs.append(crop_imgs)
print(len(captha_imgs)*4)
if len(captha_imgs) >= params.data_size//4:
break
captha_imgs = np.array(captha_imgs, dtype=np.uint8).reshape(-1, 20, 20)
np.save(params.load_captha, captha_imgs)
else:
captha_imgs = np.load(params.load_captha)
# K-Means
data = np.array(captha_imgs, dtype=np.float32).reshape(-1, 20*20) / 255.0
model = MiniBatchKMeans(n_clusters=params.k_means, batch_size=params.batch_size)
model.fit(data)
print(f'Number of clusters: {model.n_clusters}')
print(f'Inertia: {model.inertia_}')
# labeling
label_to_ch = np.zeros((params.k_means, ), dtype=np.str)
for k in range(params.k_means):
fig, ax = plt.subplots(1, 5)
ax_idx = 0
for idx, label in enumerate(model.labels_):
if label == k:
ax[ax_idx].imshow(captha_imgs[idx], cmap='gray')
ax_idx += 1
if ax_idx >= 5:
break
plt.show(block=False)
print(f'Input the label of the {k+1}-cluster:', end=' ')
while True:
ch = input()
if len(ch) != 1:
print('Incorrect!')
else:
break
label_to_ch[k] = ch
plt.close('all')
# save csv
captha_idx = 0
cols = ['label']
for i in range(20):
for j in range(20):
cols.append(f'{i+1}x{j+1}')
if not os.path.isfile('captha.csv'):
captha_df = pd.DataFrame(columns=cols)
captha_df.to_csv('captha.csv', header=True, index=False)
else:
captha_df = pd.read_csv('captha.csv')
captha_idx = captha_df.shape[0]
if not os.path.exists('captha'):
os.mkdir('captha')
new_captha = np.insert(captha_imgs.reshape(-1, 20*20).astype(np.str), 0, label_to_ch[model.labels_], axis=1)
captha_df = | pd.DataFrame(new_captha, columns=cols) | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Feb 7 12:54:10 2021
this script is to join the grant level vocabulary indices and mathy keyword extraction indices
@author: sethschimmel
"""
import json
import pandas as pd
import numpy as np
import ast
cd /Users/sethschimmel/Documents/GitHub/CUNY-Capstone/data/sbir
# this index has all vocabularies for all terms, unigrams on
vocabIndex = json.load(open("/Users/sethschimmel/Documents/GitHub/CUNY-Capstone/data/sbir/sbir_2008to2018_FULLINDEX_clean.json","r"))
# this index has all vocabularies but only for bigrams+ tokens, to save space and get rid of potentially junky single word keywords
vocabIndex = json.load(open("/Users/sethschimmel/Documents/GitHub/CUNY-Capstone/data/sbir/sbir_2008to2018_FULLINDEX_clean_BIGRAMSplus.json","r"))
mathyIndex = pd.read_csv("/Users/sethschimmel/Documents/GitHub/CUNY-Capstone/data/sbir/mathyTFIDF_TEXTRANK_sbir2008to2018.csv")
grants = pd.read_csv("/Users/sethschimmel/Documents/GitHub/CUNY-Capstone/data/sbir/sbir2008to2018_geoRefed_FINAL_03.csv")
grants = grants.sort_values(by=['index'])
g = grants.head()
## the word2vec preprocessing pipeline generates its own id column; using this to validate to ensure the join is correct
w2vKey = pd.read_csv("/Users/sethschimmel/Documents/GitHub/CUNY-Capstone/data/sbir/w2v_pipelineKey.csv")
## here I grab the processed abstract outputs so that we can join a wordcount column to each grant and do keyword stats if necessary later on
w2vParsed = pd.read_csv("/Users/sethschimmel/Documents/GitHub/CUNY-Capstone/data/sbir/w2v_sbir_2008to2018_textFields.csv")
w2vParsed.columns
w2vDict = {}
for index,row in w2vParsed.iterrows():
w2vDict[str(row['_ref'])] = row['text']
# get the vocabs and create a new col for each
vocabs = list(vocabIndex['0'].keys())
vocabs
for v in vocabs:
grants[v] = ""
# ensure the ref id is the old index, use _ref to lookup vocab values
grants["_ref"] = grants['index']
g = grants.head()
for index,row in grants.iterrows():
for v in vocabs:
grants.at[index,v] = vocabIndex[str(row['_ref'])].get(v,[])
g = grants.head()
b = mathyIndex.head()
# reformat to dict
mathyDict = {}
for index,row in mathyIndex.iterrows():
mathyDict[str(row['_ref'])] = {"textrank":row["textrank"],\
"tfidf_keywords":row["tfidf_keywords"]}
# get the mathy keywords
grants["textrank"] = ""
grants["tfidf_keywords"]=""
for index,row in grants.iterrows():
grants.at[index,"textrank"] = mathyDict[str(row['_ref'])].get('textrank',[])
grants.at[index,"tfidf_keywords"] = mathyDict[str(row['_ref'])].get('tfidf_keywords',[])
g = grants.head()
# add the total word count for each grant from w2vParsed, also supply raw counts to textrank and tfidf keyword columns
grants["num_words"] = 0
for index,row in grants.iterrows():
grants.loc[index,"num_words"] = len(w2vDict[str(row['_ref'])].split(" "))
grants['textrank'] = [ast.literal_eval(x) for x in grants.textrank]
grants['tfidf_keywords'] = [ast.literal_eval(x) for x in grants.tfidf_keywords]
#cd /Users/sethschimmel/Documents/GitHub/CUNY-Capstone/data/sbir
#grants.to_csv("sbir2008to2018_geoRefed_FINAL_04_wKeywords.csv")
#grants.to_csv("sbir2008to2018_geoRefed_FINAL_04_wKeywordsBigrams.csv")
## to do so, we need to confirm the maximum ngram size in the vocabular
all_words = []
for index,row in grants.iterrows():
for v in vocabs:
#print(type(row[v]))
for entry in row[v]:
if type(entry) is dict:
#print(type(entry))
all_words.append(entry['t'])
tfKeys = row['tfidf_keywords']
#print(type(tfKeys))
if type(tfKeys) is dict:
for k,v in tfKeys.items():
all_words.append(k)
for t in row['textrank']:
all_words.append(t)
gramSize = []
for word in all_words:
w = word.replace("_abbr","")
if "_" in w:
gramSize.append(w.count("_")+ w.count(" ")+1)
if " " in w and "_" not in w:
gramSize.append(w.count(" ")+1)
# find the longest term...
max(gramSize)
gramLens = list(set(gramSize))
gramLens
# the average term...
import statistics
statistics.mean(gramSize)
statistics.median(gramSize)
uniqueGrams = list(set(all_words))
#gramSize.index(21)
vocabGrams = | pd.DataFrame() | pandas.DataFrame |
import numpy as np
import pandas as pd
import os
import datetime as dt
print(os.getcwd())
os.chdir(os.getcwd()+'/Fundamental')
tmp = pd.read_excel('DR3M.xlsx')
df_DR3M = pd.DataFrame(columns=['Date', 'DR3M'])
df_DR3M.loc[:,'Date'] = tmp.loc[:, '日期']
df_DR3M.loc[:, 'DR3M'] = tmp.loc[:, '收盘价']
df_DR3M.set_index('Date', inplace=True)
df_RB00 = pd.DataFrame(columns=['Date', 'RB00_p', 'RB00_v', 'RB00_po'])
df_RB01 = pd.DataFrame(columns=['Date', 'RB01_p', 'RB01_v', 'RB01_po'])
df_RB05 = pd.DataFrame(columns=['Date', 'RB05_p', 'RB05_v', 'RB05_po'])
df_RB10 = pd.DataFrame(columns=['Date', 'RB10_p', 'RB10_v', 'RB10_po'])
tmp00 = pd.read_excel('./Data/RB00.xlsx')
tmp01 = pd.read_excel('./Data/RB01.xlsx')
tmp05 = pd.read_excel('./Data/RB05.xlsx')
tmp10 = pd.read_excel('./Data/RB10.xlsx')
tmp00.dropna(inplace=True)
tmp01.dropna(inplace=True)
tmp05.dropna(inplace=True)
tmp10.dropna(inplace=True)
df_RB00.loc[:, 'Date'] = tmp00.loc[:, '日期']
df_RB00.loc[:, 'RB00_p'] = tmp00.loc[:, '收盘价(元)']
df_RB00.loc[:, 'RB00_v'] = tmp00.loc[:, '成交量']
df_RB00.loc[:, 'RB00_po'] = tmp00.loc[:, '持仓量']
df_RB00.set_index('Date', inplace=True)
df_RB01.loc[:, 'Date'] = tmp01.loc[:, '日期']
df_RB01.loc[:, 'RB01_p'] = tmp01.loc[:, '收盘价(元)']
df_RB01.loc[:, 'RB01_v'] = tmp01.loc[:, '成交量']
df_RB01.loc[:, 'RB01_po'] = tmp01.loc[:, '持仓量']
df_RB01.set_index('Date', inplace=True)
df_RB05.loc[:, 'Date'] = tmp05.loc[:, '日期']
df_RB05.loc[:, 'RB05_p'] = tmp05.loc[:, '收盘价(元)']
df_RB05.loc[:, 'RB05_v'] = tmp05.loc[:, '成交量']
df_RB05.loc[:, 'RB05_po'] = tmp05.loc[:, '持仓量']
df_RB05.set_index('Date', inplace=True)
df_RB10.loc[:, 'Date'] = tmp10.loc[:, '日期']
df_RB10.loc[:, 'RB10_p'] = tmp10.loc[:, '收盘价(元)']
df_RB10.loc[:, 'RB10_v'] = tmp10.loc[:, '成交量']
df_RB10.loc[:, 'RB10_po'] = tmp10.loc[:, '持仓量']
df_RB10.set_index('Date', inplace=True)
df_DR3M.to_csv('DR3M.csv')
df_RB00.to_csv('RB00.csv')
df_RB01.to_csv('RB01.csv')
df_RB05.to_csv('RB05.csv')
df_RB10.to_csv('RB10.csv')
df_RB = pd.merge(df_RB01, df_RB05, left_index=True, right_index=True, how='outer')
df_RB = | pd.merge(df_RB, df_RB10,left_index=True, right_index=True, how='outer') | pandas.merge |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
hbvpy.process
=============
**A package to process HBV-light simulation results.**
This package is intended to provide functions and methods to parse and process
the output of the different types HBV-light simulations (i.e. SingleRun,
BatchRun, GAPRun).
.. author:: <NAME>
"""
import os
import pandas as pd
from . import HBVdata
__all__ = ['BatchRun', 'GAPRun', 'MonteCarloRun', 'SingleRun']
class SingleRun(object):
"""
Process results from HBV-light single run simulations.
Attributes
----------
bsn_dir : str
Basin directory.
"""
def __init__(self, bsn_dir):
self.bsn_dir = bsn_dir
def load_results(self, results_folder='Results', sc=None):
"""
Load the results from a single HBV-light run.
Parameters
----------
results_folder : str, optional
Name of the results folder, default is 'Results'.
sc : int, optional
Sub-catchment number, in case there are more than one
sub-catchments, default is None.
Returns
-------
Pandas.DataFrame
Data structure containing the model results.
Raises
------
ValueError
If the specified file does not exist.
"""
# Set the results folder path.
path = self.bsn_dir + '\\' + results_folder + '\\'
# Set the results filename.
if sc is not None:
filepath = path + 'Results_SubCatchment_' + str(sc) + '.txt'
else:
filepath = path + 'Results.txt'
# Check if the results file exists
if not os.path.exists(filepath):
raise ValueError('The file does not exist.')
# Load the results file.
return pd.read_csv(
filepath, sep='\t', index_col=0,
parse_dates=True, infer_datetime_format=True)
def load_dist_results(self, results_folder='Results', sc=None):
"""
Load the distributed results from a single HBV-light run.
Parameters
----------
results_folder : str, optional
Name of the results folder, default is 'Results'.
sc : int, optional
Sub-catchment number, in case there are more than one
sub-catchments, default is None.
Returns
-------
Pandas.DataFrame
Data structure containing the distributed model results.
Raises
------
ValueError
If the specified file does not exist.
"""
# Set the results folder path.
path = self.bsn_dir + '\\' + results_folder + '\\'
# Set the results filename.
if sc is not None:
filepath = path + 'Dis_SubCatchment_' + str(sc) + '.txt'
else:
filepath = path + 'Dis.txt'
# Check if the results file exists.
if not os.path.exists(filepath):
raise ValueError('The file does not exist.')
# Load the results file.
return pd.read_csv(
filepath, sep='\t', index_col=0,
parse_dates=True, infer_datetime_format=True)
def load_summary(self, results_folder='Results'):
"""
Load the summary of the results from a single HBV-light run.
Parameters
----------
results_folder : str, optional
Name of the results folder, default is 'Results'.
Returns
-------
Pandas.DataFrame
Data structure containing the distributed model results.
Raises
------
ValueError
If the specified file does not exist.
"""
# Set the results folder path.
path = self.bsn_dir + '\\' + results_folder + '\\'
# Set the summary filename.
filepath = path + 'Summary.txt'
# Check if the summary file exists.
if not os.path.exists(filepath):
raise ValueError('The file does not exist.')
# Load the summary file.
return pd.read_csv(filepath, sep='\t', index_col=0)
def load_peaks(self, results_folder='Results'):
"""
Load the list of peak flows from a single HBV-light run.
Following the documentation of HBV-light, a peak is defined as a data
point with a Qobs value that is at least three times the average Qobs.
Only a single peak is allowed in a window of 15 days.
Parameters
----------
results_folder : str, optional
Name of the results folder, default is 'Results'.
Returns
-------
Pandas.DataFrame
Data structure containing the peak flow dates and values.
Raises
------
ValueError
If the specified file does not exist.
"""
# Set the results folder path.
path = self.bsn_dir + '\\' + results_folder + '\\'
# Set the peaks filename.
filepath = path + 'Peaks.txt'
# Check if the peaks file exists.
if not os.path.exists(filepath):
raise ValueError('The file does not exist.')
# Load the peaks file.
return pd.read_csv(
filepath, sep='\t', index_col=0, parse_dates=True,
infer_datetime_format=True, squeeze=True)
def load_q_peaks(self, results_folder='Results'):
"""
Load the list of observed runoff and peak flows from a single
HBV-light run.
Following the documentation of HBV-light, a peak is defined as a data
point with a Qobs value that is at least three times the average Qobs.
Only a single peak is allowed in a window of 15 days.
Parameters
----------
results_folder : str, optional
Name of the results folder, default is 'Results'.
Returns
-------
Pandas.DataFrame
Data structure containing the observed discharge values as well
as the peak flow values.
Raises
------
ValueError
If the specified file does not exist.
"""
# Set the results folder path.
path = self.bsn_dir + '\\' + results_folder + '\\'
# Set the runoff peaks filename.
filepath = path + 'Q_Peaks.txt'
# Check if the runoff peaks file exists.
if not os.path.exists(filepath):
raise ValueError('The file does not exist.')
# Load the runoff peaks file.
return pd.read_csv(
filepath, sep='\t', index_col=0, parse_dates=True,
infer_datetime_format=True, squeeze=True)
class GAPRun(object):
"""
Process results from HBV-light GAP run simulations.
Attributes
----------
bsn_dir : str
Basin directory.
"""
def __init__(self, bsn_dir):
self.bsn_dir = bsn_dir
def load_results(self, results_folder='Results'):
"""
Load the results from an HBV-light GAP calibration run.
Parameters
----------
results_folder : str, optional
Name of the GAP results folder, default is 'Results'.
Returns
-------
Pandas.DataFrame
Data structure containing the GAP results.
Raises
------
ValueError
If the specified file does not exist.
"""
# Set the results folder path.
path = self.bsn_dir + '\\' + results_folder + '\\'
# Set the results filename.
filepath = path + 'GA_best1.txt'
# Check if the results file exists.
if not os.path.exists(filepath):
raise ValueError('The file does not exist.')
# Load the results file.
return pd.read_csv(filepath, sep='\t')
class BatchRun(object):
"""
Process results from HBV-light batch run simulations.
Attributes
----------
bsn_dir : str
Basin directory.
"""
def __init__(self, bsn_dir):
self.bsn_dir = bsn_dir
def load_results(self, results_folder='Results', sc=None):
"""
Load the results from a batch HBV-light run.
Parameters
----------
results_folder : str, optional
Name of the Batch Run results folder,
default is 'Results'.
sc : int, optional
Sub-catchment number, in case there are more than one
sub-catchments, default is None.
Returns
-------
Pandas.DataFrame
Data structure containing the Batch Run results.
Raises
------
ValueError
If the specified file does not exist.
"""
# Set the results folder path.
path = self.bsn_dir + '\\' + results_folder + '\\'
# Set the results filename.
if sc is not None:
filepath = path + 'BatchRun_SubCatchment_' + str(sc) + '.txt'
else:
filepath = path + 'BatchRun.txt'
# Check if the results file exists.
if not os.path.exists(filepath):
raise ValueError('The file does not exist.')
# Load the results file.
return pd.read_csv(filepath, sep='\t')
def load_runoff(self, results_folder='Results', data='columns', sc=None):
"""
Load the time series of observed and simulated runoff from
a batch HBV-light Run.
Parameters
----------
results_folder : str, optional
Name of the Batch Run results folder, default is 'Results'.
data : {'rows', 'columns'}, optional
Organisation of the data in the results file.
sc : int, optional
Sub-catchment number, in case there are more than one
sub-catchments, default is None.
Returns
-------
Pandas.DataFrame
Data structure containing the Batch Run runoff time series.
Raises
------
ValueError
If the corresponding file does not exist.
ValueError
If the data structure is not recognised.
"""
# Set the results folder path.
path = self.bsn_dir + '\\' + results_folder + '\\'
# Load the data according to the predefined format of the results file.
if data == 'columns':
# Set the runoff results filename.
if sc is not None:
filepath = path + 'BatchQsim_(InColumns)_' + str(sc) + '.txt'
else:
filepath = path + 'BatchQsim_(InColumns).txt'
# Check if the runoff results file exists.
if not os.path.exists(filepath):
raise ValueError('The file does not exist.')
# Load the runoff results file.
return pd.read_csv(
filepath, sep='\t', parse_dates=True,
index_col=0, infer_datetime_format=True)
elif data == 'rows':
# Set the runoff results filename.
if sc is not None:
filepath = path + 'BatchQsim_' + str(sc) + '.txt'
else:
filepath = path + 'BatchQsim.txt'
# Check if the runoff results file exists.
if not os.path.exists(filepath):
raise ValueError('The file does not exist.')
# Parse the index.
dates = pd.read_csv(
filepath, sep='\t', header=None, nrows=1,
index_col=False, squeeze=True).transpose()
# Parse the data.
data = pd.read_csv(
filepath, sep='\t', header=None, index_col=False,
skiprows=1).transpose()
# Rename the index and convert it to datetime format.
dates.columns = ['Date']
dates = pd.to_datetime(dates['Date'], format='%Y%m%d')
# Merge the index and data into a Pandas.DataFrame structure.
df = pd.concat([data, dates], axis=1)
# Set the index and return the DataFrame
return df.set_index('Date')
else:
raise ValueError('Data organisation not recognised.')
def load_runoff_stats(self, results_folder='Results', sc=None):
"""
Load the time series of observed and simulated runoff statistics
from a batch HBV-light Run.
The statistics contain: Qobs, Qmedian, Qmean, Qp10, Qp90.
Parameters
----------
results_folder : str, optional
Name of the Batch Run results folder, default is 'Results'.
sc : int, optional
Sub-catchment number, in case there are more than one
sub-catchments, default is None.
Returns
-------
Pandas.DataFrame
Data structure containing the Batch Run runoff statistics
time series.
Raises
------
ValueError
If the specified file does not exist.
"""
# Set the results folder path.
path = self.bsn_dir + '\\' + results_folder + '\\'
# Set the runoff statistics filename.
if sc is not None:
filepath = path + 'BatchQsimSummary_' + str(sc) + '.txt'
else:
filepath = path + 'BatchQsimSummary.txt'
# Check if the runoff statistics file exists.
if not os.path.exists(filepath):
raise ValueError('The file does not exist.')
# Load the runoff statistics file.
return pd.read_csv(
filepath, sep='\t', parse_dates=True,
index_col=0, infer_datetime_format=True)
def load_runoff_component(
self, results_folder='Results', component='Snow', sc=None):
"""
Load the time series of a given runoff component from a batch
HBV-light run.
Parameters
----------
results_folder : str, optional
Name of the Batch Run results folder, default is 'Results'.
component : {'Rain', 'Snow', 'Glacier', 'Q0', 'Q1', 'Q2'}
Name of the runoff component to load, default 'Snow'.
sc : int, optional
Sub-catchment number, in case there are more than one
sub-catchments, default is None.
Returns
-------
Pandas.DataFrame
Data structure containing the Batch Run runoff component
time series.
Raises
------
ValueError
If the provided runoff component is not recognised.
ValueError
If the specified file does not exist.
"""
# Check if the provided component is valid.
if component not in ['Rain', 'Snow', 'Glacier', 'Q0', 'Q1', 'Q2']:
raise ValueError('Provided runoff compoent not recognised.')
# Set the results folder path.
path = self.bsn_dir + '\\' + results_folder + '\\'
# Set the runoff component filename.
if sc is not None:
filepath = path + 'BatchQsim_' + component + '_' + str(sc) + '.txt'
else:
filepath = path + 'BatchQsim_' + component + '.txt.'
# Check if the runoff component file exists.
if not os.path.exists(filepath):
raise ValueError('The file does not exist.')
# Parse the index.
dates = pd.read_csv(
filepath, sep='\t', header=None, nrows=1,
index_col=False, squeeze=True).transpose()
# Parse the data.
data = pd.read_csv(
filepath, sep='\t', header=None, index_col=False,
skiprows=1).transpose()
# Rename the index and convert it to datetime format.
dates.columns = ['Date']
dates = pd.to_datetime(dates['Date'], format='%Y%m%d')
# Merge the index and data into a single Pandas.DataFrame structure.
df = pd.concat([data, dates], axis=1)
# Set the index.
return df.set_index('Date')
def load_monthly_runoff(self, results_folder='Results', sc=None):
"""
Load the monthly average simulated runoff from each parameter set
used for a batch HBV-light run.
Parameters
----------
results_folder : str, optional
Name of the Batch Run results folder, default is 'Results'.
sc : int, optional
Sub-catchment number, in case there are more than one
sub-catchments, default is None.
Returns
-------
Pandas.DataFrame
Data structure containing the monthly average runoff values
from each parameter set.
Raises
------
ValueError
If the specifed file does not exist.
"""
# Set the results folder path.
path = self.bsn_dir + '\\' + results_folder + '\\'
# Set the monthly runoff filename.
if sc is not None:
filepath = path + 'Qseasonal_' + str(sc) + '.txt'
else:
filepath = path + 'Qseasonal.txt.'
# Check if the monthly runoff file exists.
if not os.path.exists(filepath):
raise ValueError('The file does not exist.')
# Load the monthly runoff file.
return pd.read_csv(filepath, sep='\t')
def load_swe(self, results_folder='Results'):
"""
Load the time series of simulated snow water equivalent for each
elevation band and parameter set used for a batch HBV-light run.
NOTE: This method ONLY works if no additional precipitaiton,
temperature, or evaporation series are provided.
Parameters
----------
results_folder : str, optional
Name of the Batch Run results folder, default is 'Results'.
Returns
-------
swe : Pandas.DataFrame
Data structure containing the simulated snow water equivalent
data for each elevation band and parameter set.
Raises
------
ValueError
If the specifed file does not exist.
"""
# Set the results folder path.
path = self.bsn_dir + '\\' + results_folder + '\\'
# Set the snow water equivalent filename.
filepath = path + 'Simulated_SWE.txt.'
# Check if the snow water equivalent file exists.
if not os.path.exists(filepath):
raise ValueError('The file does not exist.')
# Parse the raw data
raw_data = pd.read_csv(filepath, sep='\t')
# Get the number of parameter sets
param_sets = raw_data['Parameterset'].unique()
# Initialise a dict to write the SWE data
swe = {ps: None for ps in param_sets}
# Parse the data
for ps in param_sets:
data_tmp = raw_data[raw_data['Parameterset'] == ps]
data_tmp.set_index('SWE serie', drop=True, inplace=True)
data = data_tmp.drop(
['Parameterset', 'Prec. serie',
'Temp. serie', 'Evap. serie'], axis=1).transpose()
data.index = pd.to_datetime(data.index, format='%Y%m%d')
swe[ps] = data
return swe
def calculate_swe_stats(self, results_folder, clarea=None):
"""
Calculate the catchment-wide statistics of snow water equivalent
taking into account all elevation bands and parameter sets used in
batch HBV-light run.
This method provides median and mean catchment snow water equivalent,
in addition to the 10th and 90th percentiles.
Parameters
----------
results_folder : str
Name of the Batch Run results folder.
clarea : str, optional
Custom name for the 'Clarea.xml' file. If no value is provided
the default file name is used, default is None.
Returns
-------
sim_swe : Pandas.Series
Data structure containing the simulated catchment avverage
snow water equivalent for each time step.
"""
# Instatiate the HBVdata class
hbv_data = HBVdata(self.bsn_dir)
# Use the default elevation distribution file name if no name is
# specified.
if clarea is None:
clarea = 'Clarea.xml'
# Check if the elevation distribution file exists.
if not os.path.exists(hbv_data.data_dir + clarea):
raise ValueError('The Clarea.xml file does not exist.')
# Load the area fraction of each elevation band in the catchment.
elev_dist = hbv_data.load_elev_dist(filename=clarea)
# Load the distributed simulated snow water equivalent.
dist_swe = self.load_swe(results_folder)
# Calculate the catchment average snow water equivalent for
# each of the parameter sets.
avg_swe = pd.DataFrame()
for p_set in dist_swe.keys():
for i, col in enumerate(dist_swe[p_set].columns):
area = elev_dist.iloc[i]
dist_swe[p_set][col] = dist_swe[p_set][col] * area
avg_swe[p_set] = dist_swe[p_set].sum(axis=1)
# Calculate the snow water equivalent statistics:
# median, mean, 10p, and 90p
swe_stats = | pd.DataFrame() | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Join outlier (RNA) and variant (DNA) info.
:Author: <NAME> <<EMAIL>>
:Date: 2018-05-20
:Copyright: 2018, <NAME>
:License: CC BY-SA
"""
import os
import sys
import re
import pandas as pd
from .utils import filter_variant_class
class Error(Exception):
"""Base class for exceptions in this module."""
pass
class JoinedDNAandRNAError(Error):
"""Exception raised for errors in this module.
Attributes:
message -- explanation of the error
"""
def __init__(self, message):
"""Assign error explanation to object."""
self.message = message
class JoinedVarExpr(object):
"""Methods for creating the final DF."""
def __init__(self, var_loc, expr_outs_loc, dna_rna_df_loc,
variant_class, exon_class, refgene, ensgene, max_tss_dist,
annotations, contigs, logger):
"""Load and join variants and outliers.
Args:
var_loc (:obj:`str`): file location for the final set of variants
that are being joined with expression (contains a
string rep for chrom)
expr_outs_loc (:obj:`str`): file location of the outliers
dna_rna_df_loc (:obj:`str`): fully joined dataframe output file
variant_class (:obj:`str`): annovar variant class to filter
on (default None)
exon_class (:obj:`str`): annovar EXON class to filter
on (default None)
max_tss_dist (:obj:`int`): maximum distance from TSS
contigs (:obj:`list`): chromosomes that are in the VCF
logger (:obj:`logging object`): Current logger
Attributes:
df (:obj:`DataFrame`): variants and outliers in a single dataframe
"""
logger.info("Loading outliers...")
self.load_outliers(expr_outs_loc)
if annotations:
anno_list = annotations
anno_list = [re.sub("Conserved_TF_sites/", "Conserved_TF_", i)
for i in anno_list]
anno_list = [re.sub("TfbsClustered_split/", "TfbsClust_", i)
for i in anno_list]
anno_list = [re.sub("factorbookMotif/", "factorbookMotif_", i)
for i in anno_list]
rep_w_blank = ".*/|.merged.sorted|.sorted|.bed$|.bed.gz$|.txt$"
self.anno_list = [re.sub(rep_w_blank, "", i)
for i in anno_list]
else:
self.anno_list = None
if os.path.exists(dna_rna_df_loc):
logger.info("Already joined data, now loading from " +
dna_rna_df_loc)
dtype_specs = {
'dist_refgene': 'str', 'exon_func_refgene': 'str',
'dist_ensgene': 'str', 'exon_func_ensgene': 'str'}
self.df = pd.read_table(dna_rna_df_loc, dtype=dtype_specs)
# self.df = pd.read_table(dna_rna_df_loc)
self.df = filter_variant_class(self.df, variant_class, exon_class,
refgene, ensgene)
if variant_class:
logger.info("Considering vars in these ENSEMBL categories " +
", ".join(set(self.df.func_ensgene)))
if exon_class:
logger.info("Only vars in these ENSEMBL EXONIC categories " +
", ".join(set(self.df.exon_func_ensgene)))
else:
logger.info("Loading variants...")
self.load_vars(var_loc, contigs, variant_class, exon_class,
refgene, ensgene, max_tss_dist, logger)
logger.info("joining outliers with variants...")
# confirm there are overlapping IDs
# logger.debug(self.var_df.head())
# logger.debug(self.var_df.index[:10])
# logger.debug(self.var_df.shape)
# logger.debug(self.expr_outlier_df.head())
# logger.debug(self.expr_outlier_df.index[:10])
# logger.debug(self.expr_outlier_df.shape)
dna_ids = self.var_df.index.levels[1]
pheno_ids = self.expr_outlier_df.index.levels[1]
overlapped_ids = dna_ids.isin(pheno_ids)
if overlapped_ids.sum() == 0:
raise JoinedDNAandRNAError("No overlapping IDs between" +
"RNAseq and VCF")
self.df = self.var_df.join(self.expr_outlier_df, how='inner')
self.df.reset_index(inplace=True)
self.write_to_file(dna_rna_df_loc)
def load_vars(self, var_loc, contigs, variant_class, exon_class,
refgene, ensgene, max_tss_dist, logger):
"""Load and combine variant data.
Attributes:
var_df (:obj:`DataFrame`): all variants in a single dataframe
"""
self.var_df = pd.DataFrame()
list_ = []
# cols_to_keep does not include 'gene' and 'blinded_id'
# because these are set as the indices
cols_to_keep = ['var_id', 'tss_dist',
'gene_refgene', 'func_refgene', 'dist_refgene',
'exon_func_refgene',
'gene_ensgene', 'func_ensgene', 'dist_ensgene',
'exon_func_ensgene',
'popmax_af', 'VCF_af', 'var_id_count', 'var_id_freq']
if self.anno_list:
cols_to_keep.extend(self.anno_list)
dtype_specs = {
'dist_refgene': 'str', 'exon_func_refgene': 'str',
'dist_ensgene': 'str', 'exon_func_ensgene': 'str'}
for chrom in contigs:
logger.info("Current chrom: " + chrom) # "chr" +
var_df_per_chrom = pd.read_table(
var_loc % (chrom), dtype=dtype_specs)
if var_df_per_chrom.empty:
logger.info("Empty dataframe for chromosome " + chrom)
raise JoinedDNAandRNAError("Empty dataframe for " + chrom)
var_df_per_chrom.set_index(['gene', 'blinded_id'], inplace=True)
var_df_per_chrom = var_df_per_chrom.loc[
abs(var_df_per_chrom.tss_dist) <= max_tss_dist]
var_df_per_chrom = filter_variant_class(
var_df_per_chrom, variant_class, exon_class, refgene, ensgene)
"""# [18:118] [118:218] [218:-3]
# last one is regions_enh_E013, total length is 371
if len(cols_to_keep) == 14:
cols_to_keep.extend(list(var_df_per_chrom)[18:118])
logger.info(cols_to_keep)
logger.info("Keeping {} columns".format(len(cols_to_keep)))
# modification for summing accross annotations
if 'any_gata4' in cols_to_keep:
var_df_per_chrom = self.summarise_anno_cols(var_df_per_chrom)
"""
var_df_per_chrom = var_df_per_chrom.reindex(columns=cols_to_keep)
"""Keep only lines where any of the annotations are 1.
https://stackoverflow.com/a/34243246"""
if self.anno_list:
nzv = var_df_per_chrom[self.anno_list].any(axis=1)
# print(nzv.sum())
# print(var_df_per_chrom[nzv].shape)
var_df_per_chrom = var_df_per_chrom[nzv]
list_.append(var_df_per_chrom)
# list_ is just pointing to DF so doesn't take any memory
print(sys.getsizeof(var_df_per_chrom)/(1024**3), "Gb")
logger.info("All contigs/chromosomes loaded")
self.var_df = | pd.concat(list_) | pandas.concat |
import pandas as pd
import numpy as np
import gspread_dataframe as gd
import gspread as gs
import setup
import queries
# csv export of historical sales
sales_master = pd.read_csv('Inventory Manager/historical_sales.csv')
# dropping na values, filtering out samples
sales_master = sales_master.dropna()
sales_master = sales_master[sales_master['Sample'] == 'N']
# adding in datetime fields for segmentation
sales_master['Delivery Date'] = pd.to_datetime(sales_master['Delivery Date'])
sales_master['Month'] = sales_master['Delivery Date'].dt.month
sales_master['Year'] = sales_master['Delivery Date'].dt.year
sales_master['Week'] = sales_master['Delivery Date'].dt.isocalendar().week
# limiting data to only directly purchased and managed inventory
sales_master_no_dsw = sales_master[sales_master['Warehouse'] != 'DSW']
# global monthly sales
ind = ['Item Description: Product Family', 'Item Description: Size']
cols = ['Year', 'Month']
monthly_sales_global = | pd.pivot_table(sales_master_no_dsw, values='Cases Sold', index=ind, columns=cols, aggfunc=np.sum) | pandas.pivot_table |
import pandas as pd
from netstats.fsan import Fsan
class ListaDataframe:
def __init__(self, operacao):
self.operacao = operacao
def dataframe(self):
"""
Retorna uma lista de dataframes por FSAN, cada dataframe contém as operações realizadas
com a FSAN.
Returns
-------
list
"""
fsan = Fsan(self.operacao).lista_de_fsans()
sequencia = []
for i in fsan:
lista = []
for j in self.operacao.operacao:
if i in j or i+':' in j:
lista.append(j)
sequencia.append(lista)
lista_data = []
for i in sequencia:
lista_data.append( | pd.DataFrame(i) | pandas.DataFrame |
import pytest
from hypothesis import given, settings
import hypothesis.strategies as st
import numpy as np
import pandas as pd
from generators import (
generate_votes,
row_maker,
)
def test_row_keys():
voting_machine = row_maker()
row = voting_machine()
assert list(row.keys()) == ["timestamp", "id", "region", "vote"]
@settings(deadline=None)
@given(st.integers(min_value=0, max_value=200))
def test_generated_as_many_votes_as_requested(length):
votes = generate_votes(length)
assert (length == 0 and votes.empty) or generate_votes(length).shape == (length, 4)
@settings(deadline=None)
@given(st.integers(min_value=0, max_value=200))
def test_votes_columns(length):
data = generate_votes(length)
assert (length == 0 and data.empty) or list(data.columns) == [
"timestamp",
"id",
"region",
"vote",
]
@settings(deadline=None)
@given(st.integers(min_value=1, max_value=200))
def test_id_lengths(length):
string_lengths = generate_votes(length)["id"].apply(lambda x: len(x))
assert all(uid_len == 36 for uid_len in string_lengths)
@settings(deadline=None)
@given(st.integers(min_value=1, max_value=200))
def test_ids_have_no_repetitions(length):
assert generate_votes(length)["id"].drop_duplicates().shape[0] == length
@settings(deadline=None)
@given(st.integers(min_value=1, max_value=200))
def test_timestamps_have_constant_date(length):
dates = list(generate_votes(length)["timestamp"].dt.date.unique())
assert (length == 0 and not dates) or dates == [pd.Timestamp("2020-12-10")]
@settings(deadline=None)
@given(st.integers(min_value=1, max_value=200))
def test_timestamps_have_hours_within_range(length):
hours = generate_votes(length)["timestamp"].dt.hour.unique()
assert all(hour in range(8, 21) for hour in hours)
@settings(deadline=None)
@given(st.integers(min_value=1000, max_value=1400))
def test_all_regions_appear(length):
expected_regions = set(pd.read_csv("data/region_data.csv").region)
actual_regions = set(generate_votes(length)["region"].unique())
assert expected_regions == actual_regions
@settings(deadline=None)
@given(st.integers(min_value=1000, max_value=1800))
def test_regions_distribution(length):
expected = pd.read_csv("data/region_data.csv", usecols=["region", "percent"])
regions = pd.DataFrame(generate_votes(length)["region"])
regions["cnt"] = 1
actual = (regions.groupby("region").agg("count") / length).reset_index()
joined = | pd.merge(expected, actual, on="region") | pandas.merge |
import numpy as np
import pandas as pd
from tensorflow.keras import Input
from keras.layers.core import Dropout, Dense
from keras.layers import LSTM, Bidirectional, Concatenate
from keras.layers.embeddings import Embedding
from keras.models import Model
from tensorflow.keras.preprocessing.text import Tokenizer
from src.utils import *
from model import (do_padding,get_extra,preprocess_text,convert_cities,convert_countries)
train = pd.read_csv("data/train.csv")
test = pd.read_csv("data/test.csv")
# Processing extra vars
keyword_bins = pd.read_csv("data/keyword_bins.csv", dtype={"keyword":str,
"keyword_bin":str})
locations = | pd.read_csv("data/locations.csv") | pandas.read_csv |
#!/usr/bin/python3
import sys
import numpy as np
import pandas as pd
import scipy.stats as ss
import tensorflow as tf
from tensorflow.keras.utils import to_categorical
from segnet import segnet
# specify model, datasets
prefix='weights/chr20.full.conv3.168'
if len(sys.argv) > 1:
dataset=sys.argv[1]
else:
dataset="test_10gen.no_OCE_WAS"
in_x="/scratch/users/magu/deepmix/data/simulated_chr20/numpy/"+dataset+".query.npz"
in_y="/scratch/users/magu/deepmix/data/simulated_chr20/label/"+dataset+".result.npz"
print(in_x)
print(in_y)
# consider proper variants
v = np.loadtxt(prefix+'.var_index.txt', dtype=int)
# declare model, compile, load weights -- perhaps make this automated with the file?
model=segnet(input_shape=(v.shape[0], 2), n_classes=5, n_blocks=4, n_filters=16, width=16)
model.compile(tf.keras.optimizers.Adam(lr=1e-4), loss='categorical_crossentropy', metrics=['accuracy'])
model.load_weights(prefix+'.h5')
# load data
anc=np.array(['AFR','EAS','EUR','NAT','SAS']) # fixed for all test datasets
x=np.load(in_x)
y=np.load(in_y)
X=x['G'][:,v,:]
S=x['S']
V=x['V'][v]
Y=y['L'][np.ix_(np.array([np.where(y['S']==s)[0][0] for s in S]),v)]-1
print(X.shape, Y.shape)
# preliminary evaluation
Yhat=model.predict(X)
Yhat_lab = np.argmax(Yhat, axis=-1)
loss, acc = model.evaluate(X, to_categorical(Y), batch_size=4, verbose=0)
print(loss)
print(acc)
# confusion matrix
cf=tf.math.confusion_matrix(Y.flatten(), Yhat_lab.flatten()).numpy()
print(np.sum(np.diag(cf))/np.sum(cf)) # verify accuracy
print("base pair confusion (rows are ground truth)")
print(pd.DataFrame(cf, index=anc, columns=anc).to_latex())
# specificity (column-normalized, diagonal is fraction of A_hat which is truly A)
print("specificity (column-normalized) confusion")
print(pd.DataFrame(cf, index=anc, columns=anc).divide(cf.sum(axis=0), axis=1).round(decimals=3).to_latex())
# sensitivity (row-normalized, diagonal is fraction of A which we say is A_hat)
print("sensitivity (row-normalized) confusion")
print(pd.DataFrame(cf, index=anc, columns=anc).divide(cf.sum(axis=1), axis=0).round(decimals=3).to_latex())
# rinse and repeat with mode filter
print("---[MODE FILTER: WIDTH=4000]---")
mfw=2000
yhat2=np.zeros(Yhat_lab.shape)
for j in range(Yhat_lab.shape[-1]):
yhat2[:,j]=ss.mode(Yhat_lab[:,max(0,j-mfw):min(Yhat_lab.shape[-1], j+mfw)], axis=1).mode.flatten()
print("accuracy")
cf2=tf.math.confusion_matrix(Y.flatten(), yhat2.flatten()).numpy()
print(np.sum(np.diag(cf2))/np.sum(cf2))
print("base pair confusion (rows are ground truth)")
print( | pd.DataFrame(cf2, index=anc, columns=anc) | pandas.DataFrame |
from datetime import timedelta
import operator
import numpy as np
import pytest
import pytz
from pandas._libs.tslibs import IncompatibleFrequency
from pandas.core.dtypes.common import is_datetime64_dtype, is_datetime64tz_dtype
import pandas as pd
from pandas import (
Categorical,
Index,
IntervalIndex,
Series,
Timedelta,
bdate_range,
date_range,
isna,
)
import pandas._testing as tm
from pandas.core import nanops, ops
def _permute(obj):
return obj.take(np.random.permutation(len(obj)))
class TestSeriesFlexArithmetic:
@pytest.mark.parametrize(
"ts",
[
(lambda x: x, lambda x: x * 2, False),
(lambda x: x, lambda x: x[::2], False),
(lambda x: x, lambda x: 5, True),
(lambda x: tm.makeFloatSeries(), lambda x: tm.makeFloatSeries(), True),
],
)
@pytest.mark.parametrize(
"opname", ["add", "sub", "mul", "floordiv", "truediv", "pow"]
)
def test_flex_method_equivalence(self, opname, ts):
# check that Series.{opname} behaves like Series.__{opname}__,
tser = tm.makeTimeSeries().rename("ts")
series = ts[0](tser)
other = ts[1](tser)
check_reverse = ts[2]
op = getattr(Series, opname)
alt = getattr(operator, opname)
result = op(series, other)
expected = alt(series, other)
tm.assert_almost_equal(result, expected)
if check_reverse:
rop = getattr(Series, "r" + opname)
result = rop(series, other)
expected = alt(other, series)
tm.assert_almost_equal(result, expected)
def test_flex_method_subclass_metadata_preservation(self, all_arithmetic_operators):
# GH 13208
class MySeries(Series):
_metadata = ["x"]
@property
def _constructor(self):
return MySeries
opname = all_arithmetic_operators
op = getattr(Series, opname)
m = MySeries([1, 2, 3], name="test")
m.x = 42
result = op(m, 1)
assert result.x == 42
def test_flex_add_scalar_fill_value(self):
# GH12723
s = Series([0, 1, np.nan, 3, 4, 5])
exp = s.fillna(0).add(2)
res = s.add(2, fill_value=0)
tm.assert_series_equal(res, exp)
pairings = [(Series.div, operator.truediv, 1), (Series.rdiv, ops.rtruediv, 1)]
for op in ["add", "sub", "mul", "pow", "truediv", "floordiv"]:
fv = 0
lop = getattr(Series, op)
lequiv = getattr(operator, op)
rop = getattr(Series, "r" + op)
# bind op at definition time...
requiv = lambda x, y, op=op: getattr(operator, op)(y, x)
pairings.append((lop, lequiv, fv))
pairings.append((rop, requiv, fv))
@pytest.mark.parametrize("op, equiv_op, fv", pairings)
def test_operators_combine(self, op, equiv_op, fv):
def _check_fill(meth, op, a, b, fill_value=0):
exp_index = a.index.union(b.index)
a = a.reindex(exp_index)
b = b.reindex(exp_index)
amask = isna(a)
bmask = isna(b)
exp_values = []
for i in range(len(exp_index)):
with np.errstate(all="ignore"):
if amask[i]:
if bmask[i]:
exp_values.append(np.nan)
continue
exp_values.append(op(fill_value, b[i]))
elif bmask[i]:
if amask[i]:
exp_values.append(np.nan)
continue
exp_values.append(op(a[i], fill_value))
else:
exp_values.append(op(a[i], b[i]))
result = meth(a, b, fill_value=fill_value)
expected = Series(exp_values, exp_index)
tm.assert_series_equal(result, expected)
a = Series([np.nan, 1.0, 2.0, 3.0, np.nan], index=np.arange(5))
b = Series([np.nan, 1, np.nan, 3, np.nan, 4.0], index=np.arange(6))
result = op(a, b)
exp = equiv_op(a, b)
tm.assert_series_equal(result, exp)
_check_fill(op, equiv_op, a, b, fill_value=fv)
# should accept axis=0 or axis='rows'
op(a, b, axis=0)
class TestSeriesArithmetic:
# Some of these may end up in tests/arithmetic, but are not yet sorted
def test_add_series_with_period_index(self):
rng = pd.period_range("1/1/2000", "1/1/2010", freq="A")
ts = Series(np.random.randn(len(rng)), index=rng)
result = ts + ts[::2]
expected = ts + ts
expected.iloc[1::2] = np.nan
tm.assert_series_equal(result, expected)
result = ts + _permute(ts[::2])
tm.assert_series_equal(result, expected)
msg = "Input has different freq=D from PeriodIndex\\(freq=A-DEC\\)"
with pytest.raises(IncompatibleFrequency, match=msg):
ts + ts.asfreq("D", how="end")
@pytest.mark.parametrize(
"target_add,input_value,expected_value",
[
("!", ["hello", "world"], ["hello!", "world!"]),
("m", ["hello", "world"], ["hellom", "worldm"]),
],
)
def test_string_addition(self, target_add, input_value, expected_value):
# GH28658 - ensure adding 'm' does not raise an error
a = Series(input_value)
result = a + target_add
expected = Series(expected_value)
tm.assert_series_equal(result, expected)
def test_divmod(self):
# GH#25557
a = Series([1, 1, 1, np.nan], index=["a", "b", "c", "d"])
b = Series([2, np.nan, 1, np.nan], index=["a", "b", "d", "e"])
result = a.divmod(b)
expected = divmod(a, b)
tm.assert_series_equal(result[0], expected[0])
tm.assert_series_equal(result[1], expected[1])
result = a.rdivmod(b)
expected = divmod(b, a)
tm.assert_series_equal(result[0], expected[0])
tm.assert_series_equal(result[1], expected[1])
@pytest.mark.parametrize("index", [None, range(9)])
def test_series_integer_mod(self, index):
# GH#24396
s1 = Series(range(1, 10))
s2 = Series("foo", index=index)
msg = "not all arguments converted during string formatting"
with pytest.raises(TypeError, match=msg):
s2 % s1
def test_add_with_duplicate_index(self):
# GH14227
s1 = Series([1, 2], index=[1, 1])
s2 = Series([10, 10], index=[1, 2])
result = s1 + s2
expected = Series([11, 12, np.nan], index=[1, 1, 2])
tm.assert_series_equal(result, expected)
def test_add_na_handling(self):
from datetime import date
from decimal import Decimal
s = Series(
[Decimal("1.3"), Decimal("2.3")], index=[date(2012, 1, 1), date(2012, 1, 2)]
)
result = s + s.shift(1)
result2 = s.shift(1) + s
assert isna(result[0])
assert isna(result2[0])
def test_add_corner_cases(self, datetime_series):
empty = Series([], index=Index([]), dtype=np.float64)
result = datetime_series + empty
assert np.isnan(result).all()
result = empty + empty.copy()
assert len(result) == 0
# FIXME: dont leave commented-out
# TODO: this returned NotImplemented earlier, what to do?
# deltas = Series([timedelta(1)] * 5, index=np.arange(5))
# sub_deltas = deltas[::2]
# deltas5 = deltas * 5
# deltas = deltas + sub_deltas
# float + int
int_ts = datetime_series.astype(int)[:-5]
added = datetime_series + int_ts
expected = Series(
datetime_series.values[:-5] + int_ts.values,
index=datetime_series.index[:-5],
name="ts",
)
tm.assert_series_equal(added[:-5], expected)
def test_mul_empty_int_corner_case(self):
s1 = Series([], [], dtype=np.int32)
s2 = Series({"x": 0.0})
tm.assert_series_equal(s1 * s2, Series([np.nan], index=["x"]))
def test_sub_datetimelike_align(self):
# GH#7500
# datetimelike ops need to align
dt = Series(date_range("2012-1-1", periods=3, freq="D"))
dt.iloc[2] = np.nan
dt2 = dt[::-1]
expected = Series([timedelta(0), timedelta(0), pd.NaT])
# name is reset
result = dt2 - dt
tm.assert_series_equal(result, expected)
expected = Series(expected, name=0)
result = (dt2.to_frame() - dt.to_frame())[0]
tm.assert_series_equal(result, expected)
def test_alignment_doesnt_change_tz(self):
# GH#33671
dti = pd.date_range("2016-01-01", periods=10, tz="CET")
dti_utc = dti.tz_convert("UTC")
ser = Series(10, index=dti)
ser_utc = Series(10, index=dti_utc)
# we don't care about the result, just that original indexes are unchanged
ser * ser_utc
assert ser.index is dti
assert ser_utc.index is dti_utc
def test_arithmetic_with_duplicate_index(self):
# GH#8363
# integer ops with a non-unique index
index = [2, 2, 3, 3, 4]
ser = Series(np.arange(1, 6, dtype="int64"), index=index)
other = Series(np.arange(5, dtype="int64"), index=index)
result = ser - other
expected = Series(1, index=[2, 2, 3, 3, 4])
tm.assert_series_equal(result, expected)
# GH#8363
# datetime ops with a non-unique index
ser = Series(date_range("20130101 09:00:00", periods=5), index=index)
other = Series(date_range("20130101", periods=5), index=index)
result = ser - other
expected = Series(Timedelta("9 hours"), index=[2, 2, 3, 3, 4])
tm.assert_series_equal(result, expected)
# ------------------------------------------------------------------
# Comparisons
class TestSeriesFlexComparison:
@pytest.mark.parametrize("axis", [0, None, "index"])
def test_comparison_flex_basic(self, axis, all_compare_operators):
op = all_compare_operators.strip("__")
left = Series(np.random.randn(10))
right = Series(np.random.randn(10))
result = getattr(left, op)(right, axis=axis)
expected = getattr(operator, op)(left, right)
tm.assert_series_equal(result, expected)
def test_comparison_bad_axis(self, all_compare_operators):
op = all_compare_operators.strip("__")
left = Series(np.random.randn(10))
right = Series(np.random.randn(10))
msg = "No axis named 1 for object type"
with pytest.raises(ValueError, match=msg):
getattr(left, op)(right, axis=1)
@pytest.mark.parametrize(
"values, op",
[
([False, False, True, False], "eq"),
([True, True, False, True], "ne"),
([False, False, True, False], "le"),
([False, False, False, False], "lt"),
([False, True, True, False], "ge"),
([False, True, False, False], "gt"),
],
)
def test_comparison_flex_alignment(self, values, op):
left = Series([1, 3, 2], index=list("abc"))
right = Series([2, 2, 2], index=list("bcd"))
result = getattr(left, op)(right)
expected = Series(values, index=list("abcd"))
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"values, op, fill_value",
[
([False, False, True, True], "eq", 2),
([True, True, False, False], "ne", 2),
([False, False, True, True], "le", 0),
([False, False, False, True], "lt", 0),
([True, True, True, False], "ge", 0),
([True, True, False, False], "gt", 0),
],
)
def test_comparison_flex_alignment_fill(self, values, op, fill_value):
left = Series([1, 3, 2], index=list("abc"))
right = Series([2, 2, 2], index=list("bcd"))
result = getattr(left, op)(right, fill_value=fill_value)
expected = Series(values, index=list("abcd"))
tm.assert_series_equal(result, expected)
class TestSeriesComparison:
def test_comparison_different_length(self):
a = Series(["a", "b", "c"])
b = Series(["b", "a"])
msg = "only compare identically-labeled Series"
with pytest.raises(ValueError, match=msg):
a < b
a = Series([1, 2])
b = Series([2, 3, 4])
with pytest.raises(ValueError, match=msg):
a == b
@pytest.mark.parametrize("opname", ["eq", "ne", "gt", "lt", "ge", "le"])
def test_ser_flex_cmp_return_dtypes(self, opname):
# GH#15115
ser = Series([1, 3, 2], index=range(3))
const = 2
result = getattr(ser, opname)(const).dtypes
expected = np.dtype("bool")
assert result == expected
@pytest.mark.parametrize("opname", ["eq", "ne", "gt", "lt", "ge", "le"])
def test_ser_flex_cmp_return_dtypes_empty(self, opname):
# GH#15115 empty Series case
ser = Series([1, 3, 2], index=range(3))
empty = ser.iloc[:0]
const = 2
result = getattr(empty, opname)(const).dtypes
expected = np.dtype("bool")
assert result == expected
@pytest.mark.parametrize(
"op",
[operator.eq, operator.ne, operator.le, operator.lt, operator.ge, operator.gt],
)
@pytest.mark.parametrize(
"names", [(None, None, None), ("foo", "bar", None), ("baz", "baz", "baz")]
)
def test_ser_cmp_result_names(self, names, op):
# datetime64 dtype
dti = pd.date_range("1949-06-07 03:00:00", freq="H", periods=5, name=names[0])
ser = Series(dti).rename(names[1])
result = op(ser, dti)
assert result.name == names[2]
# datetime64tz dtype
dti = dti.tz_localize("US/Central")
dti = pd.DatetimeIndex(dti, freq="infer") # freq not preserved by tz_localize
ser = Series(dti).rename(names[1])
result = op(ser, dti)
assert result.name == names[2]
# timedelta64 dtype
tdi = dti - dti.shift(1)
ser = Series(tdi).rename(names[1])
result = op(ser, tdi)
assert result.name == names[2]
# interval dtype
if op in [operator.eq, operator.ne]:
# interval dtype comparisons not yet implemented
ii = pd.interval_range(start=0, periods=5, name=names[0])
ser = Series(ii).rename(names[1])
result = op(ser, ii)
assert result.name == names[2]
# categorical
if op in [operator.eq, operator.ne]:
# categorical dtype comparisons raise for inequalities
cidx = tdi.astype("category")
ser = Series(cidx).rename(names[1])
result = op(ser, cidx)
assert result.name == names[2]
def test_comparisons(self):
left = np.random.randn(10)
right = np.random.randn(10)
left[:3] = np.nan
result = nanops.nangt(left, right)
with np.errstate(invalid="ignore"):
expected = (left > right).astype("O")
expected[:3] = np.nan
tm.assert_almost_equal(result, expected)
s = Series(["a", "b", "c"])
s2 = Series([False, True, False])
# it works!
exp = Series([False, False, False])
tm.assert_series_equal(s == s2, exp)
tm.assert_series_equal(s2 == s, exp)
# -----------------------------------------------------------------
# Categorical Dtype Comparisons
def test_categorical_comparisons(self):
# GH#8938
# allow equality comparisons
a = Series(list("abc"), dtype="category")
b = Series(list("abc"), dtype="object")
c = Series(["a", "b", "cc"], dtype="object")
d = Series(list("acb"), dtype="object")
e = Categorical(list("abc"))
f = Categorical(list("acb"))
# vs scalar
assert not (a == "a").all()
assert ((a != "a") == ~(a == "a")).all()
assert not ("a" == a).all()
assert (a == "a")[0]
assert ("a" == a)[0]
assert not ("a" != a)[0]
# vs list-like
assert (a == a).all()
assert not (a != a).all()
assert (a == list(a)).all()
assert (a == b).all()
assert (b == a).all()
assert ((~(a == b)) == (a != b)).all()
assert ((~(b == a)) == (b != a)).all()
assert not (a == c).all()
assert not (c == a).all()
assert not (a == d).all()
assert not (d == a).all()
# vs a cat-like
assert (a == e).all()
assert (e == a).all()
assert not (a == f).all()
assert not (f == a).all()
assert (~(a == e) == (a != e)).all()
assert (~(e == a) == (e != a)).all()
assert (~(a == f) == (a != f)).all()
assert (~(f == a) == (f != a)).all()
# non-equality is not comparable
msg = "can only compare equality or not"
with pytest.raises(TypeError, match=msg):
a < b
with pytest.raises(TypeError, match=msg):
b < a
with pytest.raises(TypeError, match=msg):
a > b
with pytest.raises(TypeError, match=msg):
b > a
def test_unequal_categorical_comparison_raises_type_error(self):
# unequal comparison should raise for unordered cats
cat = Series(Categorical(list("abc")))
msg = "can only compare equality or not"
with pytest.raises(TypeError, match=msg):
cat > "b"
cat = Series(Categorical(list("abc"), ordered=False))
with pytest.raises(TypeError, match=msg):
cat > "b"
# https://github.com/pandas-dev/pandas/issues/9836#issuecomment-92123057
# and following comparisons with scalars not in categories should raise
# for unequal comps, but not for equal/not equal
cat = Series(Categorical(list("abc"), ordered=True))
msg = "Invalid comparison between dtype=category and str"
with pytest.raises(TypeError, match=msg):
cat < "d"
with pytest.raises(TypeError, match=msg):
cat > "d"
with pytest.raises(TypeError, match=msg):
"d" < cat
with pytest.raises(TypeError, match=msg):
"d" > cat
tm.assert_series_equal(cat == "d", Series([False, False, False]))
tm.assert_series_equal(cat != "d", Series([True, True, True]))
# -----------------------------------------------------------------
def test_comparison_tuples(self):
# GH#11339
# comparisons vs tuple
s = Series([(1, 1), (1, 2)])
result = s == (1, 2)
expected = Series([False, True])
tm.assert_series_equal(result, expected)
result = s != (1, 2)
expected = Series([True, False])
tm.assert_series_equal(result, expected)
result = s == (0, 0)
expected = Series([False, False])
tm.assert_series_equal(result, expected)
result = s != (0, 0)
expected = Series([True, True])
tm.assert_series_equal(result, expected)
s = Series([(1, 1), (1, 1)])
result = s == (1, 1)
expected = Series([True, True])
tm.assert_series_equal(result, expected)
result = s != (1, 1)
expected = Series([False, False])
tm.assert_series_equal(result, expected)
s = Series([frozenset([1]), frozenset([1, 2])])
result = s == frozenset([1])
expected = Series([True, False])
tm.assert_series_equal(result, expected)
def test_comparison_operators_with_nas(self, all_compare_operators):
op = all_compare_operators
ser = Series(bdate_range("1/1/2000", periods=10), dtype=object)
ser[::2] = np.nan
f = getattr(operator, op)
# test that comparisons work
val = ser[5]
result = f(ser, val)
expected = f(ser.dropna(), val).reindex(ser.index)
if op == "__ne__":
expected = expected.fillna(True).astype(bool)
else:
expected = expected.fillna(False).astype(bool)
tm.assert_series_equal(result, expected)
# FIXME: dont leave commented-out
# result = f(val, ser)
# expected = f(val, ser.dropna()).reindex(ser.index)
# tm.assert_series_equal(result, expected)
def test_ne(self):
ts = Series([3, 4, 5, 6, 7], [3, 4, 5, 6, 7], dtype=float)
expected = [True, True, False, True, True]
assert tm.equalContents(ts.index != 5, expected)
assert tm.equalContents(~(ts.index == 5), expected)
@pytest.mark.parametrize(
"left, right",
[
(
Series([1, 2, 3], index=list("ABC"), name="x"),
Series([2, 2, 2], index=list("ABD"), name="x"),
),
(
Series([1, 2, 3], index=list("ABC"), name="x"),
Series([2, 2, 2, 2], index=list("ABCD"), name="x"),
),
],
)
def test_comp_ops_df_compat(self, left, right):
# GH 1134
msg = "Can only compare identically-labeled Series objects"
with pytest.raises(ValueError, match=msg):
left == right
with pytest.raises(ValueError, match=msg):
right == left
with pytest.raises(ValueError, match=msg):
left != right
with pytest.raises(ValueError, match=msg):
right != left
with pytest.raises(ValueError, match=msg):
left < right
with pytest.raises(ValueError, match=msg):
right < left
msg = "Can only compare identically-labeled DataFrame objects"
with pytest.raises(ValueError, match=msg):
left.to_frame() == right.to_frame()
with pytest.raises(ValueError, match=msg):
right.to_frame() == left.to_frame()
with pytest.raises(ValueError, match=msg):
left.to_frame() != right.to_frame()
with pytest.raises(ValueError, match=msg):
right.to_frame() != left.to_frame()
with pytest.raises(ValueError, match=msg):
left.to_frame() < right.to_frame()
with pytest.raises(ValueError, match=msg):
right.to_frame() < left.to_frame()
def test_compare_series_interval_keyword(self):
# GH#25338
s = Series(["IntervalA", "IntervalB", "IntervalC"])
result = s == "IntervalA"
expected = Series([True, False, False])
tm.assert_series_equal(result, expected)
# ------------------------------------------------------------------
# Unsorted
# These arithmetic tests were previously in other files, eventually
# should be parametrized and put into tests.arithmetic
class TestTimeSeriesArithmetic:
# TODO: De-duplicate with test below
def test_series_add_tz_mismatch_converts_to_utc_duplicate(self):
rng = date_range("1/1/2011", periods=10, freq="H", tz="US/Eastern")
ser = Series(np.random.randn(len(rng)), index=rng)
ts_moscow = ser.tz_convert("Europe/Moscow")
result = ser + ts_moscow
assert result.index.tz is pytz.utc
result = ts_moscow + ser
assert result.index.tz is pytz.utc
def test_series_add_tz_mismatch_converts_to_utc(self):
rng = date_range("1/1/2011", periods=100, freq="H", tz="utc")
perm = np.random.permutation(100)[:90]
ser1 = Series(
np.random.randn(90), index=rng.take(perm).tz_convert("US/Eastern")
)
perm = np.random.permutation(100)[:90]
ser2 = Series(
np.random.randn(90), index=rng.take(perm).tz_convert("Europe/Berlin")
)
result = ser1 + ser2
uts1 = ser1.tz_convert("utc")
uts2 = ser2.tz_convert("utc")
expected = uts1 + uts2
assert result.index.tz == pytz.UTC
tm.assert_series_equal(result, expected)
def test_series_add_aware_naive_raises(self):
rng = | date_range("1/1/2011", periods=10, freq="H") | pandas.date_range |
# -*- coding:utf-8 -*-
import numpy as np
import pandas as pd
import xgboost as xgb
import operator
import matplotlib.pyplot as plt
from sklearn import preprocessing
from sklearn.feature_selection import RFE
from sklearn.svm import SVR
from sklearn.preprocessing import Imputer
from sklearn.ensemble import RandomForestRegressor
plt.rcParams['font.sans-serif'] = ['SimHei'] # 指定默认字体
plt.rcParams['axes.unicode_minus'] = False # 解决保存图像是负号'-'显示为方块的问题
def check_nan():
"""
缺失值含义分析与删除
"""
train = | pd.read_csv("./data/house/train.csv") | pandas.read_csv |
#!/usr/bin/env python
# coding: utf-8
# Based on [olivier's script](https://www.kaggle.com/ogrellier/xgb-classifier-upsampling-lb-0-283)
# In[1]:
MAX_ROUNDS = 400
OPTIMIZE_ROUNDS = False
LEARNING_RATE = 0.07
EARLY_STOPPING_ROUNDS = 50
# Note: I set EARLY_STOPPING_ROUNDS high so that (when OPTIMIZE_ROUNDS is set)
# I will get lots of information to make my own judgment. You should probably
# reduce EARLY_STOPPING_ROUNDS if you want to do actual early stopping.
# I recommend initially setting <code>MAX_ROUNDS</code> fairly high and using <code>OPTIMIZE_ROUNDS</code> to get an idea of the appropriate number of rounds (which, in my judgment, should be close to the maximum value of <code>best_ntree_limit</code> among all folds, maybe even a bit higher if your model is adequately regularized...or alternatively, you could set <code>verbose=True</code> and look at the details to try to find a number of rounds that works well for all folds). Then I would turn off <code>OPTIMIZE_ROUNDS</code> and set <code>MAX_ROUNDS</code> to the appropraite number of total rounds.
#
# The problem with "early stopping" by choosing the best round for each fold is that it overfits to the validation data. It's therefore liable not to produce the optimal model for predicting test data, and if it's used to produce validation data for stacking/ensembling with other models, it would cause this one to have too much weight in the ensemble. Another possibility (and the default for XGBoost, it seems) is to use the round where the early stop actually happens (with the lag that verifies lack of improvement) rather than the best round. That solves the overfitting problem (provided the lag is long enough), but so far it doesn't seem to have helped. (I got a worse validation score with 20-round early stopping per fold than with a constant number of rounds for all folds, so the early stopping actually seemed to underfit.)
#
# In[2]:
import numpy as np
import pandas as pd
from xgboost import XGBClassifier
from sklearn.model_selection import train_test_split
from sklearn.model_selection import KFold
from sklearn.preprocessing import LabelEncoder
from numba import jit
import time
import gc
# In[3]:
# Compute gini
# from CPMP's kernel https://www.kaggle.com/cpmpml/extremely-fast-gini-computation
@jit
def eval_gini(y_true, y_prob):
y_true = np.asarray(y_true)
y_true = y_true[np.argsort(y_prob)]
ntrue = 0
gini = 0
delta = 0
n = len(y_true)
for i in range(n-1, -1, -1):
y_i = y_true[i]
ntrue += y_i
gini += y_i * delta
delta += 1 - y_i
gini = 1 - 2 * gini / (ntrue * (n - ntrue))
return gini
# In[4]:
# Funcitons from olivier's kernel
# https://www.kaggle.com/ogrellier/xgb-classifier-upsampling-lb-0-283
def gini_xgb(preds, dtrain):
labels = dtrain.get_label()
gini_score = -eval_gini(labels, preds)
return [('gini', gini_score)]
def add_noise(series, noise_level):
return series * (1 + noise_level * np.random.randn(len(series)))
def target_encode(trn_series=None, # Revised to encode validation series
val_series=None,
tst_series=None,
target=None,
min_samples_leaf=1,
smoothing=1,
noise_level=0):
"""
Smoothing is computed like in the following paper by <NAME>
https://kaggle2.blob.core.windows.net/forum-message-attachments/225952/7441/high%20cardinality%20categoricals.pdf
trn_series : training categorical feature as a pd.Series
tst_series : test categorical feature as a pd.Series
target : target data as a pd.Series
min_samples_leaf (int) : minimum samples to take category average into account
smoothing (int) : smoothing effect to balance categorical average vs prior
"""
assert len(trn_series) == len(target)
assert trn_series.name == tst_series.name
temp = pd.concat([trn_series, target], axis=1)
# Compute target mean
averages = temp.groupby(by=trn_series.name)[target.name].agg(["mean", "count"])
# Compute smoothing
smoothing = 1 / (1 + np.exp(-(averages["count"] - min_samples_leaf) / smoothing))
# Apply average function to all target data
prior = target.mean()
# The bigger the count the less full_avg is taken into account
averages[target.name] = prior * (1 - smoothing) + averages["mean"] * smoothing
averages.drop(["mean", "count"], axis=1, inplace=True)
# Apply averages to trn and tst series
ft_trn_series = pd.merge(
trn_series.to_frame(trn_series.name),
averages.reset_index().rename(columns={'index': target.name, target.name: 'average'}),
on=trn_series.name,
how='left')['average'].rename(trn_series.name + '_mean').fillna(prior)
# pd.merge does not keep the index so restore it
ft_trn_series.index = trn_series.index
ft_val_series = pd.merge(
val_series.to_frame(val_series.name),
averages.reset_index().rename(columns={'index': target.name, target.name: 'average'}),
on=val_series.name,
how='left')['average'].rename(trn_series.name + '_mean').fillna(prior)
# pd.merge does not keep the index so restore it
ft_val_series.index = val_series.index
ft_tst_series = pd.merge(
tst_series.to_frame(tst_series.name),
averages.reset_index().rename(columns={'index': target.name, target.name: 'average'}),
on=tst_series.name,
how='left')['average'].rename(trn_series.name + '_mean').fillna(prior)
# pd.merge does not keep the index so restore it
ft_tst_series.index = tst_series.index
return add_noise(ft_trn_series, noise_level), add_noise(ft_val_series, noise_level), add_noise(ft_tst_series, noise_level)
# In[5]:
# Read data
train_df = | pd.read_csv('../input/train.csv', na_values="-1") | pandas.read_csv |
# -*- coding: utf-8 -*-
"""
Created on Thu Jul 9 16:49:36 2020
@author: Yumen
"""
import os
import pandas as pd
import matplotlib.pyplot as plt
import networkx as nx
import Preprocessing as pp
import Data_Cleaning as dc
def DataCleaning(SELECTED_BEACON,time_zone,log_version,time_bins_size,
members_metadata_filename, beacons_metadata_filename,
attendees_metadata_filename,data_dir):
#set global variables
SELECTED_BEACON = 12
time_zone = 'US/Eastern'
log_version = '2.0'
time_bins_size = '1min'
proximity_data_filenames = []
for i in range(1, 18):
if i < 10:
filename = 'CTSIserver{:02d}_proximity_2019-06-01.txt'.format(i)
else:
filename = 'CTSIserver{}_proximity_2019-06-01.txt'.format(i)
proximity_data_filenames.append(filename)
#Pre-processing on data
'''
First, we load two lists that will help us with some of the analysis: list of
membmers and list of location beacons
'''
members_metadata = pd.read_csv(data_dir+members_metadata_filename)
beacons_metadata = pd.read_excel(data_dir+beacons_metadata_filename, sheet_name='Sheet1')
#beacon data peprocessing
'''
We create a translation table between the badge ID and member key. This is done
based on the data itself, since it should contain data from all the badges that
take part in the study.
Note that we create a <id,member key> pair for ever time bin. While this is not
necessary at this point, it allows this mapping to change (for example, if a
badge is re-assigned to a different member).
'''
idmaps = []
for proximity_data_filename in proximity_data_filenames:
with open(os.path.join(data_dir, proximity_data_filename), 'r') as f:
idmaps.append(pp.id_to_member_mapping(f, time_bins_size, tz=time_zone))
tmp_idmaps = idmaps[0]
for i in range(1, len(idmaps)):
tmp_idmaps = | pd.concat([tmp_idmaps, idmaps[i]]) | pandas.concat |
from flask import Flask, render_template,request,redirect,flash,url_for
from flask import *
from flaskext.mysql import MySQL
from flask import request
import pandas as pd
from sklearn.model_selection import train_test_split
import missingno as msno
import warnings
warnings.filterwarnings('ignore')
import pickle
mysql = MySQL()
app = Flask(__name__)
model = pickle.load(open('model.pkl', 'rb' ))
app.secret_key = 'your_secret_key'
@app.route("/")
def home():
return render_template("index.html")
@app.route("/register")
def register():
#Loading the quiz data
#reading data
df0 = pd.read_csv('data/LWR1.csv')
df1 = pd.read_csv('data/MOR1.csv')
df2 = pd.read_csv('data/PCR1.csv')
df3 = pd.read_csv('data/PVR1.csv')
df4 = pd.read_csv('data/SRR1.csv')
#spelling data
df5 = pd.read_csv('data/LWS1.csv')
df6 = pd.read_csv('data/PVS1.csv')
df7 = pd.read_csv('data/SOSS1.csv')
#memory data
df8 = pd.read_csv('data/LWM1.csv')
df9 = pd.read_csv('data/LSM1.csv')
df10 = pd.read_csv('data/MOM1.csv')
df11 = pd.read_csv('data/PCM1.csv')
df12 = pd.read_csv('data/PVM1.csv')
df13 = pd.read_csv('data/SOM1.csv')
#parent's feedback
df14 = pd.read_csv('data/PFR1.csv')
#In[3]:
dl = pd.merge(df0,df1, left_on='id ', right_index=True)
# In[4]:
dd = pd.merge(dl,df2, left_on='id ', right_index=True)
# In[5]:
de = pd.merge(dd,df3, left_on='id ', right_index=True)
# In[6]:
d = pd.merge(de,df4, left_on='id ', right_index=True)
# In[7]:
dz = pd.merge(d,df5, left_on='id ', right_index=True)
# In[8]:
dx = pd.merge(dz,df6, left_on='id ', right_index=True)
# In[9]:
dc = pd.merge(dx,df7, left_on='id ', right_index=True)
# In[10]:
dv = pd.merge(dx,df8, left_on='id ', right_index=True)
# In[11]:
db = pd.merge(dv,df9, left_on='id ', right_index=True)
# In[12]:
dn = pd.merge(db,df10, left_on='id ', right_index=True)
# In[13]:
dm = | pd.merge(dn,df11, left_on='id ', right_index=True) | pandas.merge |
import datetime
import fiona
import geopandas as gpd
import jinja2
import logging
import numpy as np
import pandas as pd
import random
import requests
import sqlite3
import sys
import time
import yaml
from collections import ChainMap, defaultdict
from operator import attrgetter, itemgetter
from osgeo import ogr, osr
from pathlib import Path
from shapely.geometry import LineString, Point
from sqlalchemy import create_engine, exc as sqlalchemy_exc
from sqlalchemy.engine.base import Engine
from tqdm import tqdm
from tqdm.auto import trange
from typing import Any, Dict, List, Type, Union
# Set logger.
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.INFO)
handler.setFormatter(logging.Formatter("%(asctime)s - %(levelname)s: %(message)s", "%Y-%m-%d %H:%M:%S"))
logger.addHandler(handler)
# Enable ogr exceptions.
ogr.UseExceptions()
# Define globally accessible variables.
filepath = Path(__file__).resolve()
distribution_format_path = filepath.parent / "distribution_format.yaml"
field_domains_path = {lang: filepath.parent / f"field_domains_{lang}.yaml" for lang in ("en", "fr")}
class Timer:
"""Tracks stage runtime."""
def __init__(self) -> None:
"""Initializes the Timer class."""
self.start_time = None
def __enter__(self) -> None:
"""Starts the timer."""
logger.info("Started.")
self.start_time = time.time()
def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None:
"""
Computes and returns the elapsed time.
:param Any exc_type: required parameter for __exit__.
:param Any exc_val: required parameter for __exit__.
:param Any exc_tb: required parameter for __exit__.
"""
total_seconds = time.time() - self.start_time
delta = datetime.timedelta(seconds=total_seconds)
logger.info(f"Finished. Time elapsed: {delta}.")
def apply_domain(series: pd.Series, domain: dict, default: Any) -> pd.Series:
"""
Applies a domain restriction to the given Series based on a domain dictionary.
Replaces missing or invalid values with the default value.
Non-dictionary domains are treated as Null. Values are left as-is excluding Null types and empty strings, which are
replaced with the default value.
:param pd.Series series: Series.
:param dict domain: dictionary of acceptable domain values.
:param Any default: default value.
:return pd.Series: Series with enforced domain restriction.
"""
# Validate against domain dictionary.
if isinstance(domain, dict):
# Convert keys to lowercase strings.
domain = {str(k).lower(): v for k, v in domain.items()}
# Configure lookup function, convert invalid values to default.
def get_value(val: Any) -> Any:
"""
Retrieves a domain dictionary value for a given key, non-matches return the default value.
:param Any val: lookup key.
:return Any: corresponding domain value or the default value.
"""
try:
return domain[str(val).lower()]
except KeyError:
return default
# Get values.
return series.map(get_value)
else:
# Convert empty strings and null types to default.
series.loc[(series.map(str).isin(["", "nan", "-2147483648"])) | (series.isna())] = default
return series
def cast_dtype(val: Any, dtype: Type, default: Any) -> Any:
"""
Casts the value to the given numpy dtype.
Returns the default parameter for invalid or Null values.
:param Any val: value.
:param Type dtype: numpy type object to be casted to.
:param Any default: value to be returned in case of error.
:return Any: casted or default value.
"""
try:
if pd.isna(val) or val == "":
return default
else:
return itemgetter(0)(np.array([val]).astype(dtype))
except (TypeError, ValueError):
return default
def compile_default_values(lang: str = "en") -> dict:
"""
Compiles the default value for each field in each NRN dataset.
:param str lang: output language: 'en', 'fr'.
:return dict: dictionary of default values for each attribute of each NRN dataset.
"""
dft_vals = load_yaml(field_domains_path[lang])["default"]
dist_format = load_yaml(distribution_format_path)
defaults = dict()
try:
# Iterate tables.
for name in dist_format:
defaults[name] = dict()
# Iterate fields.
for field, dtype in dist_format[name]["fields"].items():
# Configure default value.
key = "label" if dtype[0] == "str" else "code"
defaults[name][field] = dft_vals[key]
except (AttributeError, KeyError, ValueError):
logger.exception(f"Invalid schema definition for one or more yamls:"
f"\nDefault values: {dft_vals}"
f"\nDistribution format: {dist_format}")
sys.exit(1)
return defaults
def compile_domains(mapped_lang: str = "en") -> dict:
"""
Compiles the acceptable domain values for each field in each NRN dataset. Each domain will consist of the following
keys:
1) 'values': all English and French values and keys flattened into a single list.
2) 'lookup': a lookup dictionary mapping each English and French value and key to the value of the given map
language. Integer keys and their float-equivalents are both added to accommodate incorrectly casted data.
:param str mapped_lang: output language: 'en', 'fr'.
:return dict: dictionary of domain values and lookup dictionary for each attribute of each NRN dataset.
"""
# Compile field domains.
domains = defaultdict(dict)
# Load domain yamls.
domain_yamls = {lang: load_yaml(field_domains_path[lang]) for lang in ("en", "fr")}
# Iterate tables and fields with domains.
for table in domain_yamls["en"]["tables"]:
for field in domain_yamls["en"]["tables"][table]:
try:
# Compile domains.
domain_en = domain_yamls["en"]["tables"][table][field]
domain_fr = domain_yamls["fr"]["tables"][table][field]
# Configure mapped and non-mapped output domain.
domain_mapped = domain_en if mapped_lang == "en" else domain_fr
domain_non_mapped = domain_en if mapped_lang != "en" else domain_fr
# Compile all domain values and domain lookup table, separately.
if domain_en is None:
domains[table][field] = {"values": None, "lookup": None}
elif isinstance(domain_en, list):
domains[table][field] = {
"values": sorted(list({*domain_en, *domain_fr}), reverse=True),
"lookup": dict([*zip(domain_en, domain_mapped), *zip(domain_fr, domain_mapped)])
}
elif isinstance(domain_en, dict):
domains[table][field] = {
"values": sorted(list({*domain_en.values(), *domain_fr.values()}), reverse=True),
"lookup": {**domain_mapped,
**{v: v for v in domain_mapped.values()},
**{v: domain_mapped[k] for k, v in domain_non_mapped.items()}}
}
# Add integer keys as floats to accommodate incorrectly casted data.
for k, v in domain_mapped.items():
try:
domains[table][field]["lookup"].update({str(float(k)): v})
except ValueError:
continue
else:
raise TypeError
except (AttributeError, KeyError, TypeError, ValueError):
yaml_paths = ", ".join(str(field_domains_path[lang]) for lang in ("en", "fr"))
logger.exception(f"Unable to compile domains from config yamls: {yaml_paths}. Invalid schema "
f"definition for table: {table}, field: {field}.")
sys.exit(1)
return domains
def compile_dtypes(length: bool = False) -> dict:
"""
Compiles the dtype for each field in each NRN dataset. Optionally includes the field length.
:param bool length: includes the length of the field in the returned data.
:return dict: dictionary of dtypes and, optionally, length for each attribute of each NRN dataset.
"""
dist_format = load_yaml(distribution_format_path)
dtypes = dict()
try:
# Iterate tables.
for name in dist_format:
dtypes[name] = dict()
# Iterate fields.
for field, dtype in dist_format[name]["fields"].items():
# Compile dtype and field length.
dtypes[name][field] = dtype if length else dtype[0]
except (AttributeError, KeyError, ValueError):
logger.exception(f"Invalid schema definition: {dist_format}.")
sys.exit(1)
return dtypes
def create_db_engine(url: str) -> Engine:
"""
:param str url: NRN database connection URL.
:return sqlalchemy.engine.base.Engine: SQLAlchemy database engine.
"""
logger.info(f"Creating NRN database engine.")
# Create database engine.
try:
engine = create_engine(url)
except sqlalchemy_exc.SQLAlchemyError as e:
logger.exception(f"Unable to create engine for NRN database.")
logger.exception(e)
sys.exit(1)
return engine
def explode_geometry(gdf: gpd.GeoDataFrame) -> gpd.GeoDataFrame:
"""
Explodes MultiLineStrings and MultiPoints to LineStrings and Points, respectively.
:param gpd.GeoDataFrame gdf: GeoDataFrame.
:return gpd.GeoDataFrame: GeoDataFrame containing only single-part geometries.
"""
logger.info("Exploding multi-type geometries.")
multi_types = {"MultiLineString", "MultiPoint"}
if len(set(gdf.geom_type.unique()).intersection(multi_types)):
# Separate multi- and single-type records.
multi = gdf.loc[gdf.geom_type.isin(multi_types)]
single = gdf.loc[~gdf.index.isin(multi.index)]
# Explode multi-type geometries.
multi_exploded = multi.explode().reset_index(drop=True)
# Merge all records.
merged = gpd.GeoDataFrame(pd.concat([single, multi_exploded], ignore_index=True), crs=gdf.crs)
return merged.copy(deep=True)
else:
return gdf.copy(deep=True)
def export(dfs: Dict[str, Union[gpd.GeoDataFrame, pd.DataFrame]], dst: Path, driver: str = "GPKG",
name_schemas: Union[None, dict] = None, merge_schemas: bool = False, keep_uuid: bool = True,
outer_pbar: Union[tqdm, trange, None] = None) -> None:
"""
Exports one or more (Geo)DataFrames as a specified OGR driver file / layer.
:param Dict[str, Union[gpd.GeoDataFrame, pd.DataFrame]] dfs: dictionary of NRN dataset names and (Geo)DataFrames.
:param Path dst: output path.
:param str driver: OGR driver short name, default='GPKG'.
:param Union[None, dict] name_schemas: optional dictionary mapping of dataset and field names for each provided
dataset. Expected dictionary format:
{
<dataset_name>:
name: <new_dataset_name>
fields:
<field_name>: <new_field_name>
...
...
}
:param bool merge_schemas: optional flag to merge type and name schemas such that attributes from any dataset can
exist on each provided dataset, default False.
:param bool keep_uuid: optional flag to preserve the uuid column, default True.
:param Union[tqdm, trange, None] outer_pbar: optional pre-existing tqdm progress bar.
"""
try:
# Validate / create driver.
driver = ogr.GetDriverByName(driver)
# Create directory structure and data source (only create source for layer-based drivers).
dst = Path(dst).resolve()
if dst.suffix:
dst.parent.mkdir(parents=True, exist_ok=True)
if dst.exists():
source = driver.Open(str(dst), update=1)
else:
source = driver.CreateDataSource(str(dst))
else:
dst.mkdir(parents=True, exist_ok=True)
source = None
# Compile type schemas, conditionally merge.
type_schemas = load_yaml(distribution_format_path)
if merge_schemas:
merged = {"spatial": any(type_schemas[table]["spatial"] for table in dfs),
"fields": dict(ChainMap(*[type_schema["fields"] for table, type_schema in type_schemas.items()]))}
type_schemas = {table: merged for table in type_schemas}
# Compile name schemas (filter datasets and fields within the existing type schemas and dataframe columns).
if not name_schemas:
name_schemas = {table: {"name": table, "fields": dict(zip(table_schema["fields"], table_schema["fields"]))}
for table, table_schema in type_schemas.items()}
# Iterate dataframes.
for table, df in dfs.items():
name_schema, type_schema = name_schemas[table], type_schemas[table]
schema = {"name": str(name_schema["name"]),
"spatial": type_schema["spatial"],
"fields": {field: {"name": name_schema["fields"][field],
"type": type_schema["fields"][field][0],
"width": type_schema["fields"][field][1]}
for field in set(name_schema["fields"]).intersection(set(df.columns))}}
# Conditionally add uuid to schema.
if keep_uuid and "uuid" in df.columns:
schema["fields"]["uuid"] = {"name": "uuid", "type": "str", "width": 32}
# Configure layer geometry type and spatial reference system.
spatial = schema["spatial"]
srs = None
geom_type = ogr.wkbNone
if schema["spatial"]:
srs = osr.SpatialReference()
srs.ImportFromEPSG(df.crs.to_epsg())
geom_type = attrgetter(f"wkb{df.geom_type.iloc[0]}")(ogr)
# Create source (non-layer-based drivers only) and layer.
if dst.suffix:
layer = source.CreateLayer(name=schema["name"], srs=srs, geom_type=geom_type, options=["OVERWRITE=YES"])
else:
source = driver.CreateDataSource(str(dst / schema["name"]))
layer = source.CreateLayer(name=Path(schema["name"]).stem, srs=srs, geom_type=geom_type)
# Set field definitions from schema.
ogr_field_map = {"float": ogr.OFTReal, "int": ogr.OFTInteger, "str": ogr.OFTString}
for field, specs in schema["fields"].items():
field_defn = ogr.FieldDefn(specs["name"], ogr_field_map[specs["type"]])
field_defn.SetWidth(specs["width"])
layer.CreateField(field_defn)
# Reorder and rename columns to match schema.
df = df[[*schema["fields"], "geometry"] if spatial else [*schema["fields"]]].copy(deep=True)
df.rename(columns={field: specs["name"] for field, specs in schema["fields"].items()}, inplace=True)
# Write layer.
layer.StartTransaction()
for feat in tqdm(df.itertuples(index=False), total=len(df),
desc=f"Writing to file={source.GetName()}, layer={table}",
bar_format="{desc}: |{bar}| {percentage:3.0f}% {r_bar}", leave=not bool(outer_pbar)):
# Instantiate feature.
feature = ogr.Feature(layer.GetLayerDefn())
# Compile feature properties.
properties = feat._asdict()
# Set feature geometry, if spatial.
if spatial:
geom = ogr.CreateGeometryFromWkb(properties.pop("geometry").wkb)
feature.SetGeometry(geom)
# Iterate and set feature properties (attributes).
for field_index, prop in enumerate(properties.items()):
feature.SetField(field_index, prop[-1])
# Create feature.
layer.CreateFeature(feature)
# Clear pointer for next iteration.
feature = None
layer.CommitTransaction()
# Update outer progress bar.
if outer_pbar:
outer_pbar.update(1)
except FileExistsError as e:
logger.exception(f"Invalid output directory - already exists.")
logger.exception(e)
sys.exit(1)
except (KeyError, ValueError, sqlite3.Error) as e:
logger.exception(f"Error raised when writing output: {dst}.")
logger.exception(e)
sys.exit(1)
def extract_nrn(url: str, source_code: int) -> Dict[str, Union[gpd.GeoDataFrame, pd.DataFrame]]:
"""
Extracts NRN database records for the source into (Geo)DataFrames.
:param str url: NRN database connection URL.
:param int source_code: code for the source province / territory.
:return Dict[str, Union[gpd.GeoDataFrame, pd.DataFrame]]: dictionary of NRN dataset names and (Geo)DataFrames.
"""
logger.info(f"Extracting NRN datasets for source code: {source_code}.")
# Connect to database.
con = create_db_engine(url)
# Compile field defaults, domains, and dtypes.
defaults = compile_default_values(lang="en")
domains = compile_domains(mapped_lang="en")
dtypes = compile_dtypes()
# Load and execute database queries for NRN datasets.
dfs = dict()
for sql_file in (filepath.parent / "sql/extract").glob("*.sql"):
logger.info(f"Extracting NRN dataset: {sql_file.stem}.")
try:
# Resolve layer name.
layer = sql_file.stem
# Load document as jinja template.
with open(sql_file, "r") as doc:
template = jinja2.Template(doc.read())
# Update template.
query = template.render(
source_code=source_code,
metacover=f"'{defaults[layer]['metacover']}'" if
isinstance(defaults[layer]["metacover"], str) else defaults[layer]["metacover"],
specvers=2.0,
muniquad=f"'{defaults['strplaname']['muniquad']}'" if
isinstance(defaults['strplaname']["muniquad"], str) else defaults['strplaname']["muniquad"]
)
# Execute query.
df = gpd.read_postgis(query, con, geom_col="geometry")
# Store non-empty dataset.
if len(df):
dfs[layer] = df.copy(deep=True)
except (jinja2.TemplateError, jinja2.TemplateAssertionError, jinja2.UndefinedError) as e:
logger.exception(f"Unable to load SQL from: {sql_file}.")
logger.exception(e)
# Separate individual datasets from extracted data.
logger.info("Separating individual datasets from extracted data.")
nrn = dict()
# Separate dataset: addrange.
logger.info("Separating dataset: addrange.")
# Separate records.
addrange = dfs["roadseg"].loc[dfs["roadseg"]["segment_type"] == 1, [
"addrange_acqtech", "metacover", "addrange_credate", "datasetnam", "accuracy", "addrange_provider",
"addrange_revdate", "specvers", "l_altnanid", "r_altnanid", "addrange_l_digdirfg", "addrange_r_digdirfg",
"addrange_l_hnumf", "addrange_r_hnumf", "addrange_l_hnumsuff", "addrange_r_hnumsuff", "addrange_l_hnumtypf",
"addrange_r_hnumtypf", "addrange_l_hnumstr", "addrange_r_hnumstr", "addrange_l_hnuml", "addrange_r_hnuml",
"addrange_l_hnumsufl", "addrange_r_hnumsufl", "addrange_l_hnumtypl", "addrange_r_hnumtypl", "addrange_nid",
"segment_id_left", "segment_id_right", "addrange_l_rfsysind", "addrange_r_rfsysind"]
].rename(columns={
"addrange_acqtech": "acqtech", "addrange_credate": "credate", "addrange_provider": "provider",
"addrange_revdate": "revdate", "addrange_l_digdirfg": "l_digdirfg", "addrange_r_digdirfg": "r_digdirfg",
"addrange_l_hnumf": "l_hnumf", "addrange_r_hnumf": "r_hnumf", "addrange_l_hnumsuff": "l_hnumsuff",
"addrange_r_hnumsuff": "r_hnumsuff", "addrange_l_hnumtypf": "l_hnumtypf", "addrange_r_hnumtypf": "r_hnumtypf",
"addrange_l_hnumstr": "l_hnumstr", "addrange_r_hnumstr": "r_hnumstr", "addrange_l_hnuml": "l_hnuml",
"addrange_r_hnuml": "r_hnuml", "addrange_l_hnumsufl": "l_hnumsufl", "addrange_r_hnumsufl": "r_hnumsufl",
"addrange_l_hnumtypl": "l_hnumtypl", "addrange_r_hnumtypl": "r_hnumtypl", "addrange_nid": "nid",
"segment_id_left": "l_offnanid", "segment_id_right": "r_offnanid", "addrange_l_rfsysind": "l_rfsysind",
"addrange_r_rfsysind": "r_rfsysind"}
).copy(deep=True)
addrange.reset_index(drop=True, inplace=True)
# Store dataset.
nrn["addrange"] = | pd.DataFrame(addrange) | pandas.DataFrame |
# # Planning
# ## Challenge
# This is an open-ended challenge to find something interesting and useful (with a business case!) from a dataset of New York City's restaurant health inspections. The inspections are performed by the Department of Health and Mental Hygiene (DOHMH). Some suggestions include identifying trends or actionable insights, or providing recommendations. The audience could be restaurant customers, inspectors, or restauranteurs.
# I came up with some questions I was interested in answering:
# 1. What factors contribute to inspection failures?
# 2. Is there any evidence of geographic bias in inspections?
# 3. Is there any evidence of cuisine bias in inspections?
# 4. Is there any evidence of inspection timing affecting results?
# ## Approach
# I cleaned, plotted, and examined the data. Documentation describing the inspection process suggested two possible outcome variables to look into: 1) initial inspection failure and 2) closure after reinspection. I wanted to investigate both, but started with initial inspection failure.
# I investigated both logistic regression and random forest classification models. I chose to focus on the logistic regression results because I wanted to be able to interpret the coefficients and odds ratios. I tuned hyperparameters and evaluated the model using AUC ROC, because it is a good overall summary of model performance, considering all cells of the confusion matrix. A logistic regression model with L2 (ridge) regression and a penalty of 0.1 classifies initial inspection failures with an AUC of 0.932.
# ## Results
# ### 1. What factors contribute to inspection failures?
# Looking at the odds ratios for each of the features in the logistic regression model, here are some of the most important factors affecting initial inspection failure.
# - Features associated with lower odds of passing initial inspection:
# - Violation codes related to the presence of mice, rats, cockroaches, or flies
# - Violation codes related to lack of washing facilities, lack of food safety plan, improper food storage temperature, and lack of a required certificate
# - The borough Queens
# - Many kinds of cuisine, including Bangladeshi, Indian, Moroccan, Asian, Malaysian, Spanish, African, Turkish, Latin, Chinese, Mediterranean, Hawaiian, Egyptian, Thai, etc.
# - The number of violations cited
# - Features associated with higher odds of passing initial inspection:
# - Violation codes with lower stakes issues, such as violation of a recently-introduced ban on styrofoam, improper lighting or ventilation, or reuse of single use items
# - The borough Staten Island
# - Many kinds of cuisine including ice cream, hot dogs, donuts, soups/sandwiches, hamburgers, Continental, cafe/coffee/tea shops, juices/smoothies, Ethiopian, steak, sandwiches, bakeries, bagel/pretzel shops, etc. Many of these seem to be shops that would have less food prep and smaller facilities to maintain, so they make sense.
# - Increasing day of the week
# ### 2. Is there any evidence of geographic bias in inspections?
# Yes, there is some evidence for Queens establishments having lower odds of passing the initial inspection and for Staten Island establishments having higher odds of passing. It's difficult to answer this question without a more sophisticated version of logistic regression to use.
# ### 3. Is there any evidence of cuisine bias in inspections?
# Yes, the cuisine types with the lowest odds of passing the initial inspection include many of the "ethnic" cuisines. Other information is needed to determine if this is a cause or an effect.
# ### 4. Is there any evidence of inspection timing affecting results?
# There might be a slight increase in odds of passing the initial inspection for inspections happening later in the week, but it was slight and of unknown significance. There is no evidence of any effect of the time of year (month) on the odds of passing inspection.
# ## Takeaways
# - Restauranteurs in Queens or those running establishments serving at-risk cuisines (e.g. Bangladeshi, Indian, Moroccan, Malaysian, etc.) should be extra vigilant before inspections.
# - Restauranteurs should pay special attention to the violations most associated with lower odds of passing the inspection, such as presence of vermin, lack of washing facilities, improper food storage temperature, and lack of required certficiations or food safety plans.
# - NYC food inspectors should carefully examine their inspection process to see if it is being affected by bias against certain cuisines.
# - Aspiring restauranteurs could open an ice cream, hot dog, donut, soup & sandwich, or coffee & tea shop to start out with lower odds of failing the initial food saftey inspection.
# +
import matplotlib.pyplot as plt
import numpy as np
import os
import pandas as pd
import pickle
import seaborn as sns
from datetime import datetime
from IPython.display import display
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import plot_confusion_matrix, plot_roc_curve
from sklearn.model_selection import cross_validate, GridSearchCV, train_test_split, StratifiedKFold
from sklearn.preprocessing import MultiLabelBinarizer, OneHotEncoder
from treeinterpreter import treeinterpreter as ti
sns.set(style="whitegrid", font_scale=1.25)
plt.figure(figsize=(12.8, 9.6), dpi=400)
# -
# +
data_dir = '~/devel/insight-data-challenges/05-nyc-restaurant-inspections/data'
output_dir = '~/devel/insight-data-challenges/05-nyc-restaurant-inspections/output'
# -
# ## Read in and clean the user data
# +
inspections = pd.read_csv(
os.path.join(os.path.expanduser(data_dir), 'DOHMH_New_York_City_Restaurant_Inspection_Results.csv'),
parse_dates=['INSPECTION DATE', 'GRADE DATE', 'RECORD DATE']
)
display(inspections.info())
display(inspections.head(15))
# -
# ### Fix data types
# Find the categorical variables
# +
# Are there any that look categorical based on number of unique values?
values_per_variable = inspections.apply('nunique', 0)
variable_dtypes = inspections.dtypes.apply(lambda x: x.name)
variable_info = pd.DataFrame({'n_categories': values_per_variable,
'dtype': variable_dtypes,
'variable': values_per_variable.index}).reset_index(drop=True)
display(variable_info)
# Convert columns to categorical
cat_threshold = 110 # If n unique values is below this, it's probably categorical
known_cat_cols = [
'ACTION', 'BORO', 'GRADE', 'INSPECTION TYPE', 'CRITICAL FLAG', 'CUISINE DESCRIPTION',
'VIOLATION CODE', 'VIOLATION DESCRIPTION', 'Community Board', 'Council District'
]
variable_info['to_category'] = (variable_info['n_categories'] < cat_threshold)\
& (~variable_info['dtype'].isin(('datetime64[ns]', )))
display(variable_info)
# Are there any known categorical variables missing? Or vice versa?
set(variable_info['variable'].loc[variable_info['to_category']].to_list()) - set(known_cat_cols)
set(known_cat_cols) - set(variable_info['variable'].loc[variable_info['to_category']].to_list())
for v in variable_info['variable'].loc[variable_info['to_category']]:
inspections[v] = inspections[v].astype('category')
display(inspections.info())
variable_info['dtype'] = inspections.dtypes.apply(lambda x: x.name).to_numpy()
# -
# ### Convert zipcode to an int
# +
display(inspections['ZIPCODE'].describe())
display(inspections['ZIPCODE'].isna().sum()) # 5500 NaN values, which is why it's not an int. Leave it for now.
# -
# ### Fix missing value codes
# +
inspections['BORO'] = inspections['BORO'].replace('0', np.NaN)
for v in inspections.select_dtypes(include='category').columns:
print('_' * 20)
print(v)
display(inspections[v].value_counts(dropna=False))
new_establishment_inspection_date = datetime(1900, 1, 1)
inspections['INSPECTION DATE'] = inspections['INSPECTION DATE'].replace(new_establishment_inspection_date, pd.NaT)
for v in inspections.select_dtypes(include='datetime').columns:
print('_' * 20)
print(v)
display(inspections[v].value_counts(dropna=False))
display(inspections.select_dtypes(include='number').describe())
variable_info['n_missing'] = inspections.apply(lambda x: x.isna().sum()).to_numpy()
# -
# ### Make a map from violation code to violation description
# +
# Check if there's more than one description per violation code, to see if it will work to select the first one
display(
inspections[['VIOLATION CODE', 'VIOLATION DESCRIPTION']].groupby(
'VIOLATION CODE').aggregate('nunique')['VIOLATION DESCRIPTION'].value_counts()
)
# -
# There are 15 violation codes without any matching description.
# +
inspections['VIOLATION CODE'].nunique()
violation_descriptions = inspections[['VIOLATION CODE', 'VIOLATION DESCRIPTION']].groupby(
'VIOLATION CODE').aggregate('first')
with pd.option_context('display.max_rows', 200):
display(violation_descriptions)
# -
# ## Add some derived variables
# ### Use documentation instructions to label gradeable/ungradeable inspections
# +
gradeable_inspection_types = (
'Cycle Inspection / Initial Inspection',
'Cycle Inspection / Re-Inspection',
'Pre-Permit (Operational) / Initial Inspection',
'Pre-Permit (Operational)/Re-Inspection',
)
gradeable_actions = (
'Violations were cited in the following area(s).',
'No violations were recorded at the time of this inspection.',
'Establishment Closed by DOHMH.',
)
gradeable_inspection_date_min = datetime(2010, 7, 27)
inspections['INSPECTION TYPE'].isin(gradeable_inspection_types).sum()
inspections['ACTION'].isin(gradeable_actions).sum()
np.sum(inspections['INSPECTION DATE'] >= gradeable_inspection_date_min)
inspections['is_gradeable'] = ((inspections['INSPECTION TYPE'].isin(gradeable_inspection_types))
& (inspections['ACTION'].isin(gradeable_actions))
& (inspections['INSPECTION DATE'] >= gradeable_inspection_date_min)
)
display(inspections['is_gradeable'].value_counts(dropna=False))
# -
# ### Add variables for what kind of inspection it was
# +
inspections['INSPECTION TYPE'].value_counts()
inspections['is_cycle_inspection'] = inspections['INSPECTION TYPE'].str.contains('Cycle')
inspections['is_opening_inspection'] = inspections['INSPECTION TYPE'].str.contains(
'Pre-permit (Operational)', regex=False)
inspections['is_initial_inspection'] = inspections['INSPECTION TYPE'].str.contains('Initial')
inspections['is_reinspection'] = inspections['INSPECTION TYPE'].str.contains('Re-inspection')
inspections['is_compliance_inspection'] = inspections['INSPECTION TYPE'].str.contains('Compliance')
# -
# ### Add variables for date components
# +
inspections['inspection_year'] = inspections['INSPECTION DATE'].dt.year.astype('category')
inspections['inspection_month'] = inspections['INSPECTION DATE'].dt.month.astype('category')
inspections['inspection_day'] = inspections['INSPECTION DATE'].dt.day
inspections['inspection_dayofyear'] = inspections['INSPECTION DATE'].dt.dayofyear
inspections['inspection_dayofweek'] = inspections['INSPECTION DATE'].dt.dayofweek.astype('category')
inspections['inspection_isweekday'] = inspections['inspection_dayofweek'].isin(range(5))
inspections['inspection_week'] = inspections['INSPECTION DATE'].dt.week.astype('category')
display(inspections.info())
# -
# ## Plot everything
# +
# Try the Pandas built in histogram function, even though it's mediocre
inspections.select_dtypes(exclude='bool').hist(figsize=(20, 15))
plt.show()
# And it fails on boolean columns!
# -
# ### Histograms of the numeric variables
# +
g = sns.FacetGrid(
inspections.select_dtypes(include='number').melt(), col='variable', col_wrap=4,
sharex=False, sharey=False, height=4
)
g.map(plt.hist, 'value', color='steelblue', bins=20)
plt.show()
# -
# ### Barplots of the categorical & boolean variables
# Individual plots for variables with too many categories
# +
cat_col_n_values = inspections.select_dtypes(include='category').apply('nunique', 0)
many_values_cat_vars = cat_col_n_values.loc[cat_col_n_values > 20].index
other_cat_vars = cat_col_n_values.loc[cat_col_n_values <= 20].index
# for v in many_values_cat_vars:
# g = sns.countplot(data=inspections, x=v)
# g.set_xticklabels(g.get_xticklabels(), rotation=60, horizontalalignment='right')
# plt.tight_layout()
# plt.show()
# The best is really just a sorted table of value counts.
for v in many_values_cat_vars:
print('_' * 20)
print(v)
with | pd.option_context('display.max_rows', cat_threshold) | pandas.option_context |
import math
import os
import shutil
from datetime import datetime
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from estimator.delay_estimator import estimate_delays
from generator.generator import generator
from simulator.shard_allocator import calculate_manhattan_vector_module
from simulator.shard_allocator import diff_list
from simulator.shard_allocator import shard_allocator
from simulator.simulator import simulator
CLOUD_LOAD_LEVEL = "cloud_load_lvl"
LOAD_VARIATION_RATIO = "load_variation_ratio"
SHARDS_PER_NODE_RATIO = "shards_per_node_ratio"
class ExperimentExecutor:
def __init__(self):
self.num_of_shards = 0
self.num_of_samples = 100
self.period = 5.0
self.shape = 2.0
self.scale = 0
self.parallel_requests = 5
self.num_of_nodes = 0
self.experiments = []
self.algorithms = []
self.load_vectors = []
self.shard_on_nodes = pd.DataFrame(columns=["shard", "node"])
self.requests_completed = pd.DataFrame()
self.current_algorithm = "random"
self.delays_df = | pd.DataFrame(columns=['algorithm', 'nodes', 'sum_of_delay', 'delay_percentage']) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Created on Thu Oct 25 12:09:05 2018
原始fitness:0.26 ;;四工况:1.012
@author: cwktu
"""
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import subprocess
from math import sqrt
import os
##%%求曲线斜率最大点(即着火燃烧点)
def combustion_time(data,m):
slope_max = 0
time = 0
if m==1:
for i in range(len(data)-1):
timex1 = data['Time_Soln#1_(sec)'][i]
timex2 = data['Time_Soln#1_(sec)'][i+1]
tempy1 = data[' Mole_fraction_OH_Soln#1_()'][i]
tempy2 = data[' Mole_fraction_OH_Soln#1_()'][i+1]
k = (tempy2-tempy1)/(timex2-timex1)
if k>slope_max:
slope_max = k
time= i
elif m==2:
for i in range(len(data)-1):
timex1 = data['Time_Soln#2_(sec)'][i]
timex2 = data['Time_Soln#2_(sec)'][i+1]
tempy1 = data[' Mole_fraction_OH_Soln#2_()'][i]
tempy2 = data[' Mole_fraction_OH_Soln#2_()'][i+1]
k = (tempy2-tempy1)/(timex2-timex1)
if k>slope_max:
slope_max = k
time= i
elif m==3:
for i in range(len(data)-1):
timex1 = data['Time_Soln#3_(sec)'][i]
timex2 = data['Time_Soln#3_(sec)'][i+1]
tempy1 = data[' Mole_fraction_OH_Soln#3_()'][i]
tempy2 = data[' Mole_fraction_OH_Soln#3_()'][i+1]
k = (tempy2-tempy1)/(timex2-timex1)
if k>slope_max:
slope_max = k
time= i
elif m==4:
for i in range(len(data)-1):
timex1 = data['Time_Soln#4_(sec)'][i]
timex2 = data['Time_Soln#4_(sec)'][i+1]
tempy1 = data[' Mole_fraction_OH_Soln#4_()'][i]
tempy2 = data[' Mole_fraction_OH_Soln#4_()'][i+1]
k = (tempy2-tempy1)/(timex2-timex1)
if k>slope_max:
slope_max = k
time= i
elif m==5:
for i in range(len(data)-1):
timex1 = data['Time_Soln#5_(sec)'][i]
timex2 = data['Time_Soln#5_(sec)'][i+1]
tempy1 = data[' Mole_fraction_OH_Soln#5_()'][i]
tempy2 = data[' Mole_fraction_OH_Soln#5_()'][i+1]
k = (tempy2-tempy1)/(timex2-timex1)
if k>slope_max:
slope_max = k
time= i
elif m==6:
for i in range(len(data)-1):
timex1 = data['Time_Soln#6_(sec)'][i]
timex2 = data['Time_Soln#6_(sec)'][i+1]
tempy1 = data[' Mole_fraction_OH_Soln#6_()'][i]
tempy2 = data[' Mole_fraction_OH_Soln#6_()'][i+1]
k = (tempy2-tempy1)/(timex2-timex1)
if k>slope_max:
slope_max = k
time= i
elif m==7:
for i in range(len(data)-1):
timex1 = data['Time_Soln#7_(sec)'][i]
timex2 = data['Time_Soln#7_(sec)'][i+1]
tempy1 = data[' Mole_fraction_OH_Soln#7_()'][i]
tempy2 = data[' Mole_fraction_OH_Soln#7_()'][i+1]
k = (tempy2-tempy1)/(timex2-timex1)
if k>slope_max:
slope_max = k
time= i
elif m==8:
for i in range(len(data)-1):
timex1 = data['Time_Soln#8_(sec)'][i]
timex2 = data['Time_Soln#8_(sec)'][i+1]
tempy1 = data[' Mole_fraction_OH_Soln#8_()'][i]
tempy2 = data[' Mole_fraction_OH_Soln#8_()'][i+1]
k = (tempy2-tempy1)/(timex2-timex1)
if k>slope_max:
slope_max = k
time= i
elif m==9:
for i in range(len(data)-1):
timex1 = data['Time_Soln#9_(sec)'][i]
timex2 = data['Time_Soln#9_(sec)'][i+1]
tempy1 = data[' Mole_fraction_OH_Soln#9_()'][i]
tempy2 = data[' Mole_fraction_OH_Soln#9_()'][i+1]
k = (tempy2-tempy1)/(timex2-timex1)
if k>slope_max:
slope_max = k
time= i
elif m==10:
for i in range(len(data)-1):
timex1 = data['Time_Soln#10_(sec)'][i]
timex2 = data['Time_Soln#10_(sec)'][i+1]
tempy1 = data[' Mole_fraction_OH_Soln#10_()'][i]
tempy2 = data[' Mole_fraction_OH_Soln#10_()'][i+1]
k = (tempy2-tempy1)/(timex2-timex1)
if k>slope_max:
slope_max = k
time= i
elif m==11:
for i in range(len(data)-1):
timex1 = data['Time_Soln#11_(sec)'][i]
timex2 = data['Time_Soln#11_(sec)'][i+1]
tempy1 = data[' Mole_fraction_OH_Soln#11_()'][i]
tempy2 = data[' Mole_fraction_OH_Soln#11_()'][i+1]
k = (tempy2-tempy1)/(timex2-timex1)
if k>slope_max:
slope_max = k
time= i
elif m==12:
for i in range(len(data)-1):
timex1 = data['Time_Soln#12_(sec)'][i]
timex2 = data['Time_Soln#12_(sec)'][i+1]
tempy1 = data[' Mole_fraction_OH_Soln#12_()'][i]
tempy2 = data[' Mole_fraction_OH_Soln#12_()'][i+1]
k = (tempy2-tempy1)/(timex2-timex1)
if k>slope_max:
slope_max = k
time= i
elif m==13:
for i in range(len(data)-1):
timex1 = data['Time_Soln#13_(sec)'][i]
timex2 = data['Time_Soln#13_(sec)'][i+1]
tempy1 = data[' Mole_fraction_OH_Soln#13_()'][i]
tempy2 = data[' Mole_fraction_OH_Soln#13_()'][i+1]
k = (tempy2-tempy1)/(timex2-timex1)
if k>slope_max:
slope_max = k
time= i
return slope_max,time
def error(a,b):
error=[]
for i in range(len(a)):
error.append(abs((a[i]-b[i])/b[i]))
me = sum(error)/len(error)
return me
def Mean_squared_error(a,b):
error=[]
for i in range(len(a)):
error.append((a[i]-b[i])*(a[i]-b[i]))
mse = sum(error)/len(error)
rmse = sqrt(mse)
return rmse
def mechanism_computation(path):
data1 = pd.read_csv(path+"/CKSoln_solution_no_1.csv")
data2 = pd.read_csv(path+"/CKSoln_solution_no_2.csv")
data3 = pd.read_csv(path+"/CKSoln_solution_no_3.csv")
data4 = pd.read_csv(path+"/CKSoln_solution_no_4.csv")
data5 = pd.read_csv(path+"/CKSoln_solution_no_5.csv")
data6 = pd.read_csv(path+"/CKSoln_solution_no_6.csv")
data7 = pd.read_csv(path+"/CKSoln_solution_no_7.csv")
data8 = pd.read_csv(path+"/CKSoln_solution_no_8.csv")
data9 = pd.read_csv(path+"/CKSoln_solution_no_9.csv")
data10 = | pd.read_csv(path+"/CKSoln_solution_no_10.csv") | pandas.read_csv |
import pymongo
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import datetime
import os
import sys
from datetime import date
client = pymongo.MongoClient('mongodb://localhost:27017')
db = client["simplii"]
p_details = db["tasks"] # profile details
records = p_details.find()
list_record = list(records)
df = pd.DataFrame(list_record)
start = df['startdate'].tolist()
due = df['duedate'].tolist()
email = df['email'].tolist()
format = '%Y-%m-%d'
st, dt = [], []
for i in start:
st.append(datetime.datetime.strptime(i, format).date())
for i in due:
dt.append(datetime.datetime.strptime(i, format).date())
in_prog = df.loc[df['status'] == 'In Progress']
in_prog = in_prog.drop(['_id', 'status'], axis=1)
blocked = df.loc[df['status'] == 'Blocked']
blocked = blocked.drop(['_id', 'status'], axis=1)
sip, dip, sb, db = [], [], [], []
start1 = in_prog['startdate'].tolist()
due1 = in_prog['duedate'].tolist()
start2 = blocked['startdate'].tolist()
due2 = blocked['duedate'].tolist()
for i in start1:
sip.append(datetime.datetime.strptime(i, format).date())
for i in start2:
sb.append(datetime.datetime.strptime(i, format).date())
for i in due1:
dip.append(datetime.datetime.strptime(i, format).date())
for i in due2:
db.append(datetime.datetime.strptime(i, format).date())
in_prog['startdate'] = pd.to_datetime(in_prog['startdate'])
in_prog['duedate'] = pd.to_datetime(in_prog['duedate'])
blocked['startdate'] = pd.to_datetime(blocked['startdate'])
blocked['duedate'] = pd.to_datetime(blocked['duedate'])
df1 = in_prog.loc[(in_prog['startdate'] <= | pd.to_datetime('today') | pandas.to_datetime |
from Modules.appLogger import application_logger
from Modules.DataLoader import predictionDataLoader
from Modules.SaveLoadModel import saveLoadModel
from Modules.DataPreprocessor import dataPreprocessor
import pandas as pd
class predictData:
"""
Class Name: predictData
Description: Predicts the rating of a restaurant based on the inputs.
Input: None
Output: CSV file containing the ratings of the restaurants given in the input file.
On Failure: Raise Exception
Written By: <NAME>
Version: 1.0
Revisions: None
"""
def __init__(self):
try:
self.prediction_logs = pd.read_csv('Logs\\Prediction Logs\\prediction_logs.csv')
self.prediction_logs.drop('Unnamed :0', axis = 1, inplace= True)
except:
self.prediction_logs = pd.DataFrame(columns=['date','time','logs'])
self.loggerObj = application_logger.logger()
self.data_loaderObj = predictionDataLoader.predictionDataLoader(logger_obj= self.loggerObj, log_file = self.prediction_logs)
self.load_modelObj = saveLoadModel.saveLoadModel(loggerObj= self.loggerObj, log_file = self.prediction_logs)
self.preprocessObj = dataPreprocessor.processData(logger_object= self.loggerObj, log_file = self.prediction_logs)
def predict_data(self, filename):
"""
Class Name: predict_data
Description: Predicts the rating of a restaurant based on the inputs.
Input: None
Output: CSV file containing the ratings of the restaurants given in the input file.
On Failure: Raise Exception
Written By: <NAME>
Version: 1.0
Revisions: None
"""
try:
self.prediction_logs = self.loggerObj.write_log(self.prediction_logs, "Prediction of data has started")
self.prediction_logs = self.loggerObj.write_log(self.prediction_logs,"Entered predict_data of predictData class")
prediction_data = self.data_loaderObj.load_prediction_data(filename)
#preprocess the data before loading the model
preprocessed_prediction_data = self.preprocessObj.preprocess_prediction_data(prediction_data)
sku_ids = preprocessed_prediction_data['sku']
preprocessed_prediction_data.drop('sku', axis=1, inplace= True)
#loading the model.
model = self.load_modelObj.load_model()
#predciting using the loaded model.
predictions = model.predict(preprocessed_prediction_data)
predictions_dataframe = pd.DataFrame(predictions,columns= ['went_on_backorder'])
sku_ids_dataframe = pd.DataFrame(sku_ids)
# concatenating ratings and dataframes.
sku_ids_dataframe.reset_index(inplace=True)
predictions_dataframe.reset_index(inplace=True)
#concatenating ratings and dataframes.
predictions_csv = | pd.concat([sku_ids_dataframe['sku'],predictions_dataframe['went_on_backorder']], axis=1) | pandas.concat |
"""
SIR 3S Logfile Utilities (short: Lx)
"""
__version__='192.168.3.11.dev1'
import os
import sys
import logging
logger = logging.getLogger(__name__)
import argparse
import unittest
import doctest
import nbformat
from nbconvert.preprocessors import ExecutePreprocessor
from nbconvert.preprocessors.execute import CellExecutionError
import timeit
import xml.etree.ElementTree as ET
import re
import struct
import collections
import zipfile
import py7zr
import pandas as pd
import h5py
import subprocess
import csv
import glob
import warnings
#warnings.simplefilter(action='ignore', category=PerformanceWarning)
# pd.set_option("max_rows", None)
# pd.set_option("max_columns", None)
# pd.reset_option('max_rows')
# ...
class LxError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
def fTCCast(x):
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
#logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
v=x
try:
if x in ['true','True']:
v=1
elif x in ['false','False','']:
v=0
else:
try:
v = float(x)
except Exception as e:
#logStrTmp="{:s}{!s:s}: Konvertierung zu float schlaegt fehl! - Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,x,sys.exc_info()[-1].tb_lineno,type(e),str(e))
#logger.debug(logStrTmp)
try:
v = pd.to_numeric(x,errors='raise',downcast='float')
#logStrTmp="{:s}{!s:s}: Konvertierung mit pd.to_numeric liefert: {!s:s}".format(logStr,x,v)
#logger.debug(logStrTmp)
except Exception as e:
#logStrTmp="{:s}{!s:s}: Konvertierung zu float mit pd.to_numeric schlaegt auch fehl! - Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,x,sys.exc_info()[-1].tb_lineno,type(e),str(e))
#logger.debug(logStrTmp)
#x='2021-04-20 10:56:12.000'
#t = pd.Timestamp(x)
#t # Timestamp('2021-04-20 10:56:12')
#i=int(t.to_datetime64())/1000000000
#i # 1618916172.0
#pd.to_datetime(i,unit='s',errors='coerce'): Timestamp('2021-04-20 10:56:12')
try:
t = pd.Timestamp(x)
i=int(t.to_datetime64())/1000000000
v=pd.to_numeric(i,errors='raise',downcast='float')
except Exception as e:
logStrTmp="{:s}{!s:s}: Konvertierung zu float (mit pd.to_numeric) schlaegt (auch nach Annahme vaulue=Zeitstring) fehl! - Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,x,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.debug(logStrTmp)
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise LxError(logStrFinal)
finally:
#logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return v
def getTCsOPCDerivative(TCsOPC,col,shiftSize,windowSize,fct=None):
"""
returns a df
index: ProcessTime
cols:
col
dt
dValue
dValueDt
dValueDtRollingMean
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
mDf=pd.DataFrame()
try:
s=TCsOPC[col].dropna()
mDf=pd.DataFrame(s)
dt=mDf.index.to_series().diff(periods=shiftSize)
mDf['dt']=dt
mDf['dValue']=mDf[col].diff(periods=shiftSize)
mDf=mDf.iloc[shiftSize:]
mDf['dValueDt']=mDf.apply(lambda row: row['dValue']/row['dt'].total_seconds(),axis=1)
if fct != None:
mDf['dValueDt']=mDf['dValueDt'].apply(fct)
mDf['dValueDtRollingMean']=mDf['dValueDt'].rolling(window=windowSize).mean()
mDf=mDf.iloc[windowSize-1:]
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise LxError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return mDf
logFilenamePattern='([0-9]+)(_)+([0-9]+)(\.log)' # group(3) ist Postfix und Nr.
logFilenameHeadPattern='([0-9,_]+)(\.log)' # group(1) ist Head und H5-Key
# nicht alle IDs werden von RE pID erfasst
# diese werden mit pID2, getDfFromODIHelper und in getDfFromODI "nachbehandelt"
pID=re.compile('(?P<Prae>IMDI\.)?(?P<A>[a-z,A-Z,0-9,_]+)\.(?P<B>[a-z,A-Z,0-9,_]+)\.(?P<C1>[a-z,A-Z,0-9]+)_(?P<C2>[a-z,A-Z,0-9]+)_(?P<C3>[a-z,A-Z,0-9]+)_(?P<C4>[a-z,A-Z,0-9]+)_(?P<C5>[a-z,A-Z,0-9]+)(?P<C6>_[a-z,A-Z,0-9]+)?(?P<C7>_[a-z,A-Z,0-9]+)?\.(?P<D>[a-z,A-Z,0-9,_]+)\.(?P<E>[a-z,A-Z,0-9,_]+)(?P<Post>\.[a-z,A-Z,0-9,_]+)?')
pID2='(?P<Prae>IMDI\.)?(?P<A>[a-z,A-Z,0-9,_]+)(?P<Post>\.[a-z,A-Z,0-9,_]+)?'
def getDfFromODIHelper(row,col,colCheck,pID2=pID2):
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
#logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
try:
if not pd.isnull(row[colCheck]):
res= row[col]
resStr='ColCheckOk'
elif pd.isnull(row[col]):
res=re.search(pID2,row['ID']).group(col)
if res != None:
resStr='ColNowOk'
else:
resStr='ColStillNotOk'
else:
res = row[col]
resStr='ColWasOk'
except:
res = row[col]
resStr='ERROR'
finally:
if resStr not in ['ColCheckOk','ColNowOk']:
logger.debug("{:s}col: {:s} resStr: {:s} row['ID']: {:s} res: {:s}".format(logStr,col, resStr,row['ID'],str(res)))
#logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return res
def getDfFromODI(ODIFile,pID=pID):
"""
returns a defined df from ODIFile
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
dfID=None
try:
df=pd.read_csv(ODIFile,delimiter=';')
s = pd.Series(df['ID'].unique())
dfID=s.str.extract(pID.pattern,expand=True)
dfID['ID']=s
dfC=dfID['C1']+'_'+dfID['C2']+'_'+dfID['C3']+'_'+dfID['C4']+'_'+dfID['C5']+'_'+dfID['C6']#+'_'+dfID['C7']
dfID.loc[:,'C']=dfC.values
dfID['C']=dfID.apply(lambda row: row['C']+'_'+row['C7'] if not pd.isnull(row['C7']) else row['C'],axis=1)
dfID=dfID[['ID','Prae','A','B','C','C1','C2','C3','C4','C5','C6','C7','D','E','Post']]
for col in ['Prae','Post','A']:
dfID[col]=dfID.apply(lambda row: getDfFromODIHelper(row,col,'A'),axis=1)
dfID.sort_values(by=['ID'], axis=0,ignore_index=True,inplace=True)
dfID.set_index('ID',verify_integrity=True,inplace=True)
dfID.loc['Objects.3S_XYZ_PUMPE.3S_XYZ_GSI_01.Out.EIN','Post']='.EIN'
dfID.loc['Objects.3S_XYZ_PUMPE.3S_XYZ_GSI_01.Out.EIN','A']='Objects'
dfID.loc['Objects.3S_XYZ_PUMPE.3S_XYZ_GSI_01.Out.EIN','B']='3S_XYZ_PUMPE'
dfID.loc['Objects.3S_XYZ_PUMPE.3S_XYZ_GSI_01.Out.EIN','C']='3S_XYZ_GSI_01'
dfID.loc['Objects.3S_XYZ_PUMPE.3S_XYZ_GSI_01.Out.EIN','D']='Out'
#dfID.loc['Objects.3S_XYZ_PUMPE.3S_XYZ_GSI_01.Out.EIN',:]
dfID.loc['Objects.3S_XYZ_RSCHIEBER.3S_XYZ_PCV_01.Out.SOLLW','Post']='.SOLLW'
dfID.loc['Objects.3S_XYZ_RSCHIEBER.3S_XYZ_PCV_01.Out.SOLLW','A']='Objects'
dfID.loc['Objects.3S_XYZ_RSCHIEBER.3S_XYZ_PCV_01.Out.SOLLW','B']='3S_XYZ_RSCHIEBER'
dfID.loc['Objects.3S_XYZ_RSCHIEBER.3S_XYZ_PCV_01.Out.SOLLW','C']='3S_XYZ_PCV_01'
dfID.loc['Objects.3S_XYZ_RSCHIEBER.3S_XYZ_PCV_01.Out.SOLLW','D']='Out'
#dfID.loc['Objects.3S_XYZ_RSCHIEBER.3S_XYZ_PCV_01.Out.SOLLW',:]
dfID['yUnit']=dfID.apply(lambda row: getDfFromODIHelperyUnit(row),axis=1)
dfID['yDesc']=dfID.apply(lambda row: getDfFromODIHelperyDesc(row),axis=1)
dfID=dfID[['yUnit','yDesc','Prae','A','B','C','C1','C2','C3','C4','C5','C6','C7','D','E','Post']]
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise LxError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return dfID
def addInitvalueToDfFromODI(INITFile,dfID):
"""
returns dfID extended with new Cols Initvalue and NumOfInits
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
dfIDext=dfID
try:
df=pd.read_csv(INITFile,delimiter=';',header=None,names=['ID','Value'])#,index_col=0)
dfGrped=df.groupby(by=['ID'])['Value'].agg(['count','min','max','mean','last'])
dfIDext=dfID.merge(dfGrped,left_index=True,right_index=True,how='left').filter(items=dfID.columns.to_list()+['last','count']).rename(columns={'last':'Initvalue','count':'NumOfInits'})
except Exception as e:
logStrFinal="{:s}Exception: Line: {:d}: {!s:s}: {:s}".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))
logger.error(logStrFinal)
raise LxError(logStrFinal)
finally:
logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return dfIDext
def fODIMatch(dfODI,TYPE=None,OBJTYPE=None,NAME1=None,NAME2=None):
df=dfODI
if TYPE != None:
df=df[df['TYPE']==TYPE]
if OBJTYPE != None:
df=df[df['OBJTYPE']==OBJTYPE]
if NAME1 != None:
df=df[df['NAME1']==NAME1]
if NAME2 != None:
df=df[df['NAME2']==NAME2]
return df
def fODIFindAllSchieberSteuerungsIDs(dfODI,NAME1=None,NAME2=None): # dfODI: pd.read_csv(ODI,delimiter=';')
df=fODIMatch(dfODI,TYPE='OL_2',OBJTYPE='VENT',NAME1=NAME1,NAME2=NAME2)
return sorted(list(df['ID'].unique())+[ID for ID in df['REF_ID'].unique() if not pd.isnull(ID)])
def fODIFindAllZeilenWithIDs(dfODI,IDs):
return dfODI[dfODI['ID'].isin(IDs) | dfODI['REF_ID'].isin(IDs)]
def getDfFromODIHelperyUnit(row):
"""
returns Unit
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
#logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
unit=None
try:
if row['E'] in ['AL_S','SB_S']:
unit='[-]'
elif row['E'] in ['LR_AV','LP_AV','QD_AV','SD_AV','AM_AV','FZ_AV','MZ_AV','NG_AV']:
unit='[Nm³/h]'
elif row['E'] in ['AC_AV','LR_AV']:
unit='[mm/s²]'
else:
unit='TBD in Lx'
except:
unit='ERROR'
finally:
#logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return unit
def getDfFromODIHelperyDesc(row):
"""
returns Desc
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
#logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
desc=None
try:
if row['E'] in ['AL_S','SB_S']:
desc='Status'
elif row['E'] in ['LR_AV','LP_AV','QD_AV','SD_AV','AM_AV','FZ_AV','MZ_AV','NG_AV']:
desc='Fluss'
elif row['E'] in ['AC_AV','LR_AV']:
desc='Beschleunigung'
else:
desc='TBD in Lx'
except:
desc='ERROR'
finally:
#logger.debug("{0:s}{1:s}".format(logStr,'_Done.'))
return desc
def getDfIDUniqueCols(dfID):
"""
returns df with uniques
"""
logStr = "{0:s}.{1:s}: ".format(__name__, sys._getframe().f_code.co_name)
#logger.debug("{0:s}{1:s}".format(logStr,'Start.'))
dfIDUniqueCols=pd.DataFrame()
try:
# Spalte mit der groessten Anzahl von Auspraegungen feststellen
lenMax=0
colMax=''
# ueber alle Spalten
for idx,col in enumerate(dfID):
s=pd.Series(dfID[col].unique())
if len(s) > lenMax:
lenMax=len(s)
colMax=col
s=pd.Series(dfID[colMax].unique(),name=colMax)
s.sort_values(inplace=True)
s=pd.Series(s.values,name=colMax)
dfIDUniqueCols=pd.DataFrame(s)
# ueber alle weiteren Spalten
for idx,col in enumerate([col for col in dfID.columns if col != colMax]):
# s unique erzeugen
s=pd.Series(dfID[col].unique(),name=col)
# s sortieren
s.sort_values(inplace=True)
s= | pd.Series(s.values,name=col) | pandas.Series |
import os
import numpy as np
import pandas as pd
import torch
from torch.utils.data import Dataset, DataLoader
# from sklearn.preprocessing import StandardScaler
from utils.tools import StandardScaler
from utils.timefeatures import time_features
import warnings
warnings.filterwarnings('ignore')
class Dataset_ETT_hour(Dataset):
def __init__(self, root_path, flag='train', size=None,
features='S', data_path='ETTh1.csv',
target='OT', scale=True, inverse=False, timeenc=0, freq='h', cols=None):
# size [seq_len, label_len, pred_len]
# info
if size == None:
self.seq_len = 24*4*4
self.label_len = 24*4
self.pred_len = 24*4
else:
self.seq_len = size[0]
self.label_len = size[1]
self.pred_len = size[2]
# init
assert flag in ['train', 'test', 'val']
type_map = {'train':0, 'val':1, 'test':2}
self.set_type = type_map[flag]
self.features = features
self.target = target
self.scale = scale
self.inverse = inverse
self.timeenc = timeenc
self.freq = freq
self.root_path = root_path
self.data_path = data_path
self.__read_data__()
def __read_data__(self):
self.scaler = StandardScaler()
df_raw = pd.read_csv(os.path.join(self.root_path,
self.data_path))
border1s = [0, 12*30*24 - self.seq_len, 12*30*24+4*30*24 - self.seq_len]
border2s = [12*30*24, 12*30*24+4*30*24, 12*30*24+8*30*24]
border1 = border1s[self.set_type]
border2 = border2s[self.set_type]
if self.features=='M' or self.features=='MS':
cols_data = df_raw.columns[1:]
df_data = df_raw[cols_data]
elif self.features=='S':
df_data = df_raw[[self.target]]
if self.scale:
train_data = df_data[border1s[0]:border2s[0]]
self.scaler.fit(train_data.values)
data = self.scaler.transform(df_data.values)
else:
data = df_data.values
df_stamp = df_raw[['date']][border1:border2]
df_stamp['date'] = pd.to_datetime(df_stamp.date)
data_stamp = time_features(df_stamp, timeenc=self.timeenc, freq=self.freq)
self.data_x = data[border1:border2]
if self.inverse:
self.data_y = df_data.values[border1:border2]
else:
self.data_y = data[border1:border2]
self.data_stamp = data_stamp
df_stamp.rename(columns={'date': 'ds'}, inplace=1)
df_stamp = df_stamp.reset_index(drop=True)
cols = [self.target] if self.features == 'S' else cols_data
self.df_scaled = pd.concat([df_stamp, | pd.DataFrame(self.data_x, columns=cols) | pandas.DataFrame |
from __future__ import division
import socket # for socket
import sys
import time
import re
import os
from base64 import b64decode
from typing import Sized
import pandas as pd
import random
import string
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from datetime import datetime
import sys
import numpy as np
cache_type= ''
#Function to setup connection to cache server
def setup_connection(ip, port) :
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
print ("Socket successfully created")
except socket.error as err:
print ("socket creation failed with error %s" %(err))
try:
host_ip = socket.gethostbyname(ip)
except socket.gaierror:
print ("there was an error resolving the host")
sys.exit()
s.connect((host_ip, port))
print ("the socket has successfully connected to server")
return s
#Function to calculate statistics
def set_up_experiment(ip, port, loss, delay):
data = pd.read_csv("source.csv")
source_data = data["website"].tolist()
iteration_num = 100 # number of iterations for each probe
experiment_num = 50
miss =[]
hit =[]
#initiate empty array to store delay or throughput
average_rtt=[]
average_size=[]
average_th=[]
for _ in range(experiment_num):
rtt = []
res_size = []
hitCount = 0
missCount = 0
for i in range(iteration_num):
req = random.choice(source_data)
print("experiment "+str(_)+" iteration "+str(i))
print(req)
r,s,t = send_request_to_cache(ip, port, req)
print("size", s)
r_number = np.random.randint(10)
r_number = r_number/10
print(r_number, loss)
if r_number < loss :
t= 2 # default loss
s = 1 # default size
missCount = missCount +1
else:
if r == 'HIT':
hitCount = hitCount+1
print(r, hitCount)
elif r == 'MISS':
missCount = missCount +1
t = t+ delay
rtt.append(t)
res_size.append(s)
average_rtt.append(sum(rtt)/iteration_num)
average_size.append(sum(res_size)/iteration_num)
average_th.append(sum(res_size)/sum(rtt))
print("MISS ", missCount/iteration_num)
miss.append(missCount/iteration_num)
hit.append(hitCount/iteration_num)
print("Miss avg ",sum(miss)/experiment_num)
print("Hit avg ",sum(hit)/experiment_num)
rtt_df = | pd.DataFrame() | pandas.DataFrame |
# Copyright (c) 2020 Google LLC
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""Builds a table with SMILES and yield information."""
import collections
import dataclasses
import pandas as pd
from rdkit import Chem
from rdkit.Chem import AllChem
def location_to_row_col(location, block, plate):
"""Converts a block location to (row, col) on the plate.
Args:
location: Text location in the block, e.g. "1:A".
block: Integer block number.
plate: Integer plate number.
Returns:
Tuple of (row, col) integers; the location on the plate.
"""
if plate == 3:
row_letter, col = location.split(':')
else:
col, row_letter = location.split(':')
col = int(col)
row = ord(row_letter) - 64
if block == 2:
col += 24
elif block == 3:
row += 16
elif block == 4:
row += 16
col += 24
return row, col
def read_yield_data():
"""Reads location/yield data from the yield_data/ directory.
Returns:
DataFrame with the following columns:
* plate
* row
* col
* yield
"""
data = []
for plate in [1, 2, 3]:
for block in [1, 2, 3, 4]:
filename = f'yield_data/plate{plate}.{block}.csv'
print(filename)
df = pd.read_csv(filename)
mask = df.Location.isna()
if mask.any():
print(df[mask])
df = df[~mask]
locations = df.apply(
lambda x: location_to_row_col(x.Location, block, plate), axis=1,
result_type='expand')
locations.rename(columns={0: 'row', 1: 'col'}, inplace=True)
df = pd.concat([df, locations], axis=1)
df['plate'] = plate
df = df[['plate', 'row', 'col', 'product_scaled']]
df.rename(columns={'product_scaled': 'yield'}, inplace=True)
data.append(df)
return pd.concat(data, ignore_index=True)
def read_compound_data():
"""Reads location/compound data from the layout/ and smiles/ directories.
Returns:
DataFrame with the following columns:
* plate
* row
* col
* additive
* additive_number
* additive_smiles
* aryl_halide
* aryl_halide_number
* aryl_halide_smiles
* base
* base_cas_number
* base_smiles
* ligand
* ligand_cas_number
* ligand_smiles
* product_smiles
"""
rows = pd.read_csv('layout/Table_S1.csv')
cols = | pd.read_csv('layout/Table_S2.csv') | pandas.read_csv |
#!/usr/bin/env python
# coding: utf-8
"""
This module preprocess the grades files
"""
import pandas as pd
import numpy as np
from sklearn.preprocessing import StandardScaler
def read_grades(dir1='../../data/intermediate_grades.xlsx',
dir2='../../data/final_grades.xlsx'):
"""
Read grades.xlsx files
Parameters
----------
dir1: intermidiate grades path
dir2: final grades path
"""
# Error meassages
if not isinstance(dir1, str) is True:
raise ValueError("'grades' should be should be a string (directory).")
if not isinstance(dir2, str) is True:
raise ValueError("'grades' should be should be a string (directory).")
# Get intermediate grades
mid_grades = pd.read_excel(dir1)
# Get final grades
xl_file = pd.ExcelFile(dir2)
sheet_name = xl_file.sheet_names
final_1st = | pd.read_excel(dir2, sheet_name=sheet_name[0]) | pandas.read_excel |
import sys
import os
sys.path.append(os.path.abspath("."))
sys.dont_write_bytecode = True
__author__ = "COSAL"
from bson import ObjectId
import pandas as pd
import pymongo
import rpy2
from utils import logger, lib
from store import mongo_driver
from misconceptions.common import helper, differences
FILE_STMT_COLLECTION = "file_stmts"
STMT_COLLECTION = "stmts"
STMT_NORMALIZED_COLLECTION = "normalized_stmts"
INPUTS_COLLECTIONS = "inputs"
DIFFERENCES_COLLECTIONS = "differences"
SELF_SYNTACTIC_DIFFERENCES_COLLECTION = "self_syntactic_differences"
STMT_FILE_COLLECTION = "stmts_file"
PRIMITIVES = {int, float, str, bool, type(None), list, set, dict, tuple, unicode}
DOT_ESCAPE_CHAR = "|@|"
DOLLAR_ESCAPE_CHAR = "|#|"
LOGGER = logger.get_logger(os.path.basename(__file__.split(".")[0]))
def mongo_escape(s):
return s.replace(".", DOT_ESCAPE_CHAR).replace("$", DOLLAR_ESCAPE_CHAR)
def mongo_de_escape(s):
return s.replace(DOT_ESCAPE_CHAR, ".").replace(DOLLAR_ESCAPE_CHAR, "$")
class MongoStore(lib.O):
def __init__(self, dataset, **kwargs):
self.dataset = dataset
lib.O.__init__(self, **kwargs)
# File Statements
def store_file_stmts(self, file_name, snippets, language):
collection = mongo_driver.get_collection(self.dataset, FILE_STMT_COLLECTION)
if not mongo_driver.is_collection_exists(collection):
mongo_driver.create_unique_index_for_collection(collection, "file_name")
collection.insert({
"file_name": file_name,
"snippets": snippets,
"language": language
})
def load_stmts_for_file_name(self, file_name):
try:
return mongo_driver.get_collection(self.dataset, FILE_STMT_COLLECTION).find_one({"file_name": file_name})
except Exception:
LOGGER.critical("Failed to load file name : %s" % file_name)
return None
def load_file_stmts(self, language=None):
if language:
return mongo_driver.get_collection(self.dataset, FILE_STMT_COLLECTION).find({"language": language})
else:
return mongo_driver.get_collection(self.dataset, FILE_STMT_COLLECTION).find()
def delete_file_stmts(self, language=None):
if language:
mongo_driver.get_collection(self.dataset, FILE_STMT_COLLECTION).delete_many({"language": language})
else:
mongo_driver.get_collection(self.dataset, FILE_STMT_COLLECTION).drop()
# Statements
def store_stmt(self, snippet, language, variables):
collection = mongo_driver.get_collection(self.dataset, STMT_COLLECTION)
if not mongo_driver.is_collection_exists(collection):
mongo_driver.create_unique_index_for_collection(collection, "snippet", "language")
collection.insert({
"snippet": snippet,
"language": language,
"variables": variables
})
def update_stmt_outputs(self, stmt_id, outputs):
collection = mongo_driver.get_collection(self.dataset, STMT_COLLECTION)
stmt = collection.find_one({'_id': stmt_id})
stmt['outputs'] = outputs
try:
collection.update_one({'_id': stmt_id}, {"$set": stmt}, upsert=False)
except Exception:
stmt['outputs'] = None
try:
collection.update_one({'_id': stmt_id}, {"$set": stmt}, upsert=False)
except Exception as e:
# import pprint
# pprint.pprint(outputs[outputs.keys()[0]])
raise e
def update_stmt(self, stmt_id, updates):
collection = mongo_driver.get_collection(self.dataset, STMT_COLLECTION)
collection.update_one({"_id": stmt_id}, {"$set": updates})
def load_stmts(self, language=None, is_valid=True, has_output=False, limit=None, use_normalized=False):
collection_name = STMT_NORMALIZED_COLLECTION if use_normalized else STMT_COLLECTION
if language:
stmts = mongo_driver.get_collection(self.dataset, collection_name).find({"language": language})
else:
stmts = mongo_driver.get_collection(self.dataset, collection_name).find()
formatted = {}
for stmt in stmts:
if (not is_valid or (is_valid and stmt.get('variables', None))) \
and (not has_output or (has_output and stmt.get('outputs', None))):
formatted[(stmt['snippet'], stmt['language'])] = stmt
if limit and len(formatted) == limit:
return formatted
return formatted
def load_stmt(self, mongo_id, projections=None, use_normalized=False):
collection_name = STMT_NORMALIZED_COLLECTION if use_normalized else STMT_COLLECTION
if not isinstance(mongo_id, ObjectId):
mongo_id = ObjectId(mongo_id)
return mongo_driver.get_collection(self.dataset, collection_name).find_one({"_id": mongo_id}, projection=projections)
def load_raw_stmts(self, language=None, use_normalized=False):
collection_name = STMT_NORMALIZED_COLLECTION if use_normalized else STMT_COLLECTION
if language:
return mongo_driver.get_collection(self.dataset, collection_name).find({"language": language})
else:
return mongo_driver.get_collection(self.dataset, collection_name).find()
def load_valid_snippets(self, language=None, use_normalized=False):
collection_name = STMT_NORMALIZED_COLLECTION if use_normalized else STMT_COLLECTION
projection = {"outputs": False}
if language:
stmts = mongo_driver.get_collection(self.dataset, collection_name).find({"language": language}, projection)
else:
stmts = mongo_driver.get_collection(self.dataset, collection_name).find({}, projection)
valids = []
for stmt in stmts:
if stmt.get('variables', None):
valids.append(stmt)
return valids
# Normalized Statements
def store_normalized_stmt(self, stmt_dict):
collection = mongo_driver.get_collection(self.dataset, STMT_NORMALIZED_COLLECTION)
if not mongo_driver.is_collection_exists(collection):
mongo_driver.create_unique_index_for_collection(collection, "snippet", "language")
try:
collection.insert(stmt_dict, continue_on_error=True)
except pymongo.errors.DuplicateKeyError as e:
pass
# Inputs
def save_inputs(self, inps):
collection = mongo_driver.get_collection(self.dataset, INPUTS_COLLECTIONS)
for inp in inps:
arg_set = [arg.to_dict(orient='records') for arg in inp]
collection.insert({
"args": arg_set
})
def delete_inputs(self):
collection = mongo_driver.get_collection(self.dataset, INPUTS_COLLECTIONS)
if collection:
collection.drop()
def load_inputs(self, column_names):
collection = mongo_driver.get_collection(self.dataset, INPUTS_COLLECTIONS)
inps = []
for inp in collection.find():
args = []
for arg in inp["args"]:
df = | pd.DataFrame(arg) | pandas.DataFrame |
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# formats: ipynb,py:light
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.10.3
# kernelspec:
# display_name: wikirecs
# language: python
# name: wikirecs
# ---
# # WikiRecs
# A project to recommend the next Wikipedia article you might like to edit
# + init_cell=true
# %matplotlib inline
# %load_ext autoreload
# %autoreload 2
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import logging
import wikipedia
import requests
import os
import wikirecs as wr
import implicit
from scipy.sparse import csr_matrix, csc_matrix, lil_matrix, coo_matrix
from tqdm.auto import tqdm
import umap
import pickle
import collections
import recommenders
import plotly.express as px
from pyarrow import feather
import itertools
from itables import show
import matplotlib
from implicit.nearest_neighbours import (
bm25_weight)
# -
from itables.javascript import load_datatables
load_datatables()
# + init_cell=true
pd.set_option('display.max_rows', 100)
pd.set_option('display.min_rows', 100)
# + init_cell=true
logging.basicConfig()
logging.getLogger().setLevel(logging.INFO)
# -
# # Assemble the complete histories
import os
all_histories = []
for fname in os.listdir('edit_histories_2021-05-28'):
if 'feather' in fname:
all_histories.append(feather.read_feather('edit_histories_2021-05-28/{}'.format(fname)))
all_histories = pd.concat(all_histories, ignore_index=True)
feather.write_feather(all_histories, "all_histories_2021-05-28.feather")
# %%time
all_histories = feather.read_feather("all_histories_2021-05-28.feather")
all_histories.columns
len(all_histories.pageid.unique())
# # Load all_histories (raw data), transform and split
# +
# %%time
all_histories = feather.read_feather("all_histories_2021-05-28.feather")
print("Length raw edit history data: {}".format(len(all_histories)))
# +
from pull_edit_histories import get_edit_history
## Add one particular user
cols = ['userid', 'user', 'pageid', 'title',
'timestamp', 'sizediff']
with open("../username.txt", "r") as file:
for username in file:
oneuser = get_edit_history(user=username.strip(),
latest_timestamp="2021-05-28T22:02:09Z",
earliest_timestamp="2020-05-28T22:02:09Z")
oneuser = pd.DataFrame(oneuser).loc[:,cols]
all_histories = pd.concat([all_histories, oneuser], ignore_index=True)
print("Length after adding users: {}".format(len(all_histories)))
# -
# ## EDA on raw histories
# Look at the distribution of edit counts
edit_counts = all_histories.groupby('userid').userid.count().values
plt.figure(figsize=(20,8))
plt.subplot(1,2,1)
sns.distplot(edit_counts,kde=False,bins=np.arange(0,20000,200))
plt.xlabel('Number of edits by user')
plt.subplot(1,2,2)
sns.distplot(edit_counts,kde=False,bins=np.arange(0,200,1))
plt.xlim([0,200])
plt.xlabel('Number of edits by user')
num_counts = len(edit_counts)
print("Median edit counts: %d" % np.median(edit_counts))
thres = 5
over_thres = np.sum(edit_counts > thres)
print("Number over threshold %d: %d (%.f%%)" % (thres, over_thres, 100*over_thres/num_counts))
# Most edits by user
all_histories.groupby(['userid','user']).userid.count().sort_values(ascending=False)
# Find the elbow in number of edits
plt.plot(all_histories.groupby(['userid','user']).userid.count().sort_values(ascending=False).values)
# plt.ylim([0,20000])
# +
# What are the most popular pages (edited by the most users)
page_popularity = all_histories.drop_duplicates(subset=['title','user']).groupby('title').count().user.sort_values()
pd.set_option('display.max_rows', 1000)
page_popularity.iloc[-1000:].iloc[::-1]
# -
# ## Clean data
# ### Remove consecutive edits and summarize runs
# +
# %%time
def remove_consecutive_edits(df):
c = dict(zip(df.columns, range(len(df.columns))))
keyfunc = lambda x: (x[c['userid']],x[c['pageid']])
first_and_last = lambda run: [run[0][c['userid']],
run[0][c['user']],
run[0][c['pageid']],
run[0][c['title']],
run[-1][c['timestamp']],
run[0][c['timestamp']],
sum([abs(r[c['sizediff']]) for r in run]),
len(run)]
d = df.values.tolist()
return pd.DataFrame([first_and_last(list(g)) for k,g in itertools.groupby(d, key=keyfunc)],
columns=['userid', 'user', 'pageid', 'title', 'first_timestamp', 'last_timestamp','sum_sizediff','consecutive_edits'])
clean_histories = remove_consecutive_edits(all_histories)
# -
# ### Remove top N most popular pages
# +
# Get the top most popular pages
TOPN = 20
popularpages = all_histories.drop_duplicates(subset=['title','pageid','userid']).groupby(['title','pageid']).count().user.sort_values()[-TOPN:]
before_count = len(all_histories)
# -
popularpages
# Remove those popular pages
popular_pageids = popularpages.index.get_level_values(level='pageid').values
is_popular_page_edit = clean_histories.pageid.isin(popular_pageids)
clean_histories = clean_histories.loc[~is_popular_page_edit].copy()
all_histories = None
after_count = len(clean_histories)
print("%d edits (%.1f%%) were in top %d popular pages. Length after removing: %d" % (np.sum(is_popular_page_edit),
100* np.sum(is_popular_page_edit)/before_count,
TOPN,
after_count)
)
print("Number of unique page ids: {}".format(len(clean_histories.pageid.unique())))
# ### Remove users with too many or too few edits
MIN_EDITS = 5
MAX_EDITS = 10000
# Get user edit counts
all_user_edit_counts = clean_histories.groupby(['userid','user']).userid.count()
# +
# Remove users with too few edits
keep_user = all_user_edit_counts.values >= MIN_EDITS
# Remove users with too many edits
keep_user = keep_user & (all_user_edit_counts.values <= MAX_EDITS)
# Remove users with "bot" in the name
is_bot = ['bot' in username.lower() for username in all_user_edit_counts.index.get_level_values(1).values]
keep_user = keep_user & ~np.array(is_bot)
print("Keep %d users out of %d (%.1f%%)" % (np.sum(keep_user), len(all_user_edit_counts), 100*float(np.sum(keep_user))/len(all_user_edit_counts)))
# +
# Remove those users
userids_to_keep = all_user_edit_counts.index.get_level_values(0).values[keep_user]
clean_histories = clean_histories.loc[clean_histories.userid.isin(userids_to_keep)]
clean_histories = clean_histories.reset_index(drop=True)
# -
print("Length after removing users: {}".format(len(clean_histories)))
# %%time
# Save cleaned histories
feather.write_feather(clean_histories, '../clean_histories_2021-05-28.feather')
# ## Build lookup tables
# %%time
clean_histories = feather.read_feather('../clean_histories_2021-05-28.feather')
# +
# Page id to title and back
lookup = clean_histories.drop_duplicates(subset=['pageid']).loc[:,['pageid','title']]
p2t = dict(zip(lookup.pageid, lookup.title))
t2p = dict(zip(lookup.title, lookup.pageid))
# User id to name and back
lookup = clean_histories.drop_duplicates(subset=['userid']).loc[:,['userid','user']]
u2n = dict(zip(lookup.userid, lookup.user))
n2u = dict(zip(lookup.user, lookup.userid))
# +
# Page id and userid to index in cooccurence matrix and back
pageids = np.sort(clean_histories.pageid.unique())
userids = np.sort(clean_histories.userid.unique())
p2i = {pageid:i for i, pageid in enumerate(pageids)}
u2i = {userid:i for i, userid in enumerate(userids)}
i2p = {v: k for k, v in p2i.items()}
i2u = {v: k for k, v in u2i.items()}
# +
# User name and page title to index and back
n2i = {k:u2i[v] for k, v in n2u.items() if v in u2i}
t2i = {k:p2i[v] for k, v in t2p.items() if v in p2i}
i2n = {v: k for k, v in n2i.items()}
i2t = {v: k for k, v in t2i.items()}
# -
wr.save_pickle((p2t, t2p, u2n, n2u, p2i, u2i, i2p, i2u, n2i, t2i, i2n, i2t), '../lookup_tables_2021-05-28.pickle')
wr.save_pickle((userids, pageids), '../users_and_pages_2021-05-28.pickle')
#
# ## Build test and training set
p2t, t2p, u2n, n2u, p2i, u2i, i2p, i2u, n2i, t2i, i2n, i2t = wr.load_pickle('../lookup_tables_2021-05-28.pickle')
userids, pageids = wr.load_pickle('../users_and_pages_2021-05-28.pickle')
# Make a test set from the most recent edit by each user
histories_test = clean_histories.groupby(['userid','user'],as_index=False).first()
# Subtract it from the rest to make the training set
histories_train = wr.dataframe_set_subtract(clean_histories, histories_test)
histories_train.reset_index(drop=True, inplace=True)
# Make a dev set from the second most recent edit by each user
histories_dev = histories_train.groupby(['userid','user'],as_index=False).first()
# Subtract it from the rest to make the final training set
histories_train = wr.dataframe_set_subtract(histories_train, histories_dev)
histories_train.reset_index(drop=True, inplace=True)
print("Length of test set: {}".format(len(histories_test)))
print("Length of dev set: {}".format(len(histories_dev)))
print("Length of training after removal of test: {}".format(len(histories_train)))
print("Number of pages in training set: {}".format(len(histories_train.pageid.unique())))
print("Number of users in training set: {}".format(len(histories_train.userid.unique())))
print("Number of pages with > 1 user editing: {}".format(np.sum(histories_train.drop_duplicates(subset=['title','user']).groupby('title').count().user > 1)))
feather.write_feather(histories_train, '../histories_train_2021-05-28.feather')
feather.write_feather(histories_dev, '../histories_dev_2021-05-28.feather')
feather.write_feather(histories_test, '../histories_test_2021-05-28.feather')
# +
resurface_userids, discovery_userids = wr.get_resurface_discovery(histories_train, histories_dev)
print("%d out of %d userids are resurfaced (%.1f%%)" % (len(resurface_userids), len(userids), 100*float(len(resurface_userids))/len(userids)))
print("%d out of %d userids are discovered (%.1f%%)" % (len(discovery_userids), len(userids), 100*float(len(discovery_userids))/len(userids)))
# -
wr.save_pickle((resurface_userids, discovery_userids), '../resurface_discovery_users_2021-05-28.pickle')
# # FIG Rama and other examples
print("Number of edits by Rama in a year: {}".format(len(all_histories.loc[all_histories.user == 'Rama'])))
print("Number of pages edited: {}".format(len(all_histories.loc[all_histories.user == 'Rama'].drop_duplicates(subset=['pageid']))))
# +
from pull_edit_histories import get_edit_history
oneuser = get_edit_history(user="Thornstrom",
latest_timestamp="2021-05-28T22:02:09Z",
earliest_timestamp="2020-05-28T22:02:09Z")
oneuser = pd.DataFrame(oneuser).loc[:,cols]
# -
wr.print_user_history(all_histories, user="Rama")
wr.print_user_history(all_histories, user="Meow")
# # Build matrix for implicit collaborative filtering
# +
# %%time
# Get the user/page edit counts
for_implicit = histories_train.groupby(["userid","pageid"]).count().first_timestamp.reset_index().rename(columns={'first_timestamp':'edits'})
for_implicit.loc[:,'edits'] = for_implicit.edits.astype(np.int32)
# +
row = np.array([p2i[p] for p in for_implicit.pageid.values])
col = np.array([u2i[u] for u in for_implicit.userid.values])
implicit_matrix_coo = coo_matrix((for_implicit.edits.values, (row, col)))
implicit_matrix = csc_matrix(implicit_matrix_coo)
# -
# %%time
wr.save_pickle(implicit_matrix,'../implicit_matrix_2021-05-28.pickle')
# ### Test the matrix and indices
implicit_matrix = wr.load_pickle('../implicit_matrix_2021-05-28.pickle')
# +
# Crude item to item recs by looking for items edited by the same editors (count how many editors overlap)
veditors = np.flatnonzero(implicit_matrix[t2i['Hamburger'],:].toarray())
indices = np.flatnonzero(np.sum(implicit_matrix[:,veditors] > 0,axis=1))
totals = np.asarray(np.sum(implicit_matrix[:,veditors] > 0 ,axis=1)[indices])
sorted_order = np.argsort(totals.squeeze())
[i2t.get(i, "") + " " + str(total[0]) for i,total in zip(indices[sorted_order],totals[sorted_order])][::-1]
# -
# Histories of editors who had that item
for ved in veditors:
print("\n\n\n" + i2n[ved])
wr.print_user_history(all_histories, user=i2n[ved])
# # Implicit recommendation
implicit_matrix = wr.load_pickle('../implicit_matrix_2021-05-28.pickle')
p2t, t2p, u2n, n2u, p2i, u2i, i2p, i2u, n2i, t2i, i2n, i2t = wr.load_pickle('../lookup_tables_2021-05-28.pickle')
bm25_matrix = bm25_weight(implicit_matrix, K1=100, B=0.25)
num_factors =200
regularization = 0.01
os.environ["OPENBLAS_NUM_THREADS"] = "1"
model = implicit.als.AlternatingLeastSquares(
factors=num_factors, regularization=regularization
)
model.fit(bm25_matrix)
wr.save_pickle(model,'../als%d_bm25_model.pickle' % num_factors)
model = wr.load_pickle('../als200_bm25_model_2021-05-28.pickle')
results = model.similar_items(t2i['Steven Universe'],20)
['%s %.4f' % (i2t[ind], score) for ind, score in results]
u = n2u["Rama"]
recommendations = model.recommend(u2i[u], bm25_matrix.tocsc(), N=1000, filter_already_liked_items=False)
[ ("*" if implicit_matrix[ind,u2i[u]]>0 else "") +
'%s %.4f' % (i2t[ind], score) + ' %d' % (implicit_matrix[ind,:]>0).sum()
for ind, score in recommendations]
# ## Grid search results
grid_search_results = wr.load_pickle("../implicit_grid_search.pickle")
pd.DataFrame(grid_search_results)
pd.DataFrame([[i['num_factors'], i['regularization']] + list(i['metrics'].values()) for i in grid_search_results],
columns = ['num_factors','regularization'] + list(grid_search_results[0]['metrics'].keys()))
grid_search_results_bm25 = wr.load_pickle("../implicit_grid_search_bm25.pickle")
pd.DataFrame([[i['num_factors'], i['regularization']] + list(i['metrics'].values()) for i in grid_search_results_bm25],
columns = ['num_factors','regularization'] + list(grid_search_results_bm25[0]['metrics'].keys()))
# # B25 Recommendation
from implicit.nearest_neighbours import BM25Recommender
# +
bm25_matrix = bm25_weight(implicit_matrix, K1=20, B=1)
bm25_matrix = bm25_matrix.tocsc()
sns.distplot(implicit_matrix[implicit_matrix.nonzero()],bins = np.arange(0,100,1),kde=False)
sns.distplot(bm25_matrix[bm25_matrix.nonzero()],bins = np.arange(0,100,1),kde=False)
# -
K1 = 100
B = 0.25
model = BM25Recommender(K1, B)
model.fit(implicit_matrix)
wr.save_pickle(model, '../bm25_model_2021-05-28.pkl')
results = model.similar_items(t2i['<NAME>'],20)
['%s %.4f' % (i2t[ind], score) for ind, score in results]
a = ['Steven Universe 429.4746',
'List of Steven Universe episodes 178.4544',
'Demon Bear 128.7237',
'Legion of Super Heroes (TV series) 128.7237',
'The Amazing World of Gumball 126.3522',
'Steven Universe Future 123.9198']
results = model.similar_items(t2i['Steven Universe'],20)
['%s %.4f' % (i2t[ind], score) for ind, score in results]
results = model.similar_items(t2i['<NAME>'],20)
['%s %.4f' % (i2t[ind], score) for ind, score in results]
results = model.similar_items(t2i['Hamburger'],20)
['%s %.4f' % (i2t[ind], score) for ind, score in results]
u = n2u["Rama"]
recommendations = model.recommend(u2i[u], implicit_matrix.astype(np.float32), N=1000, filter_already_liked_items=True)
[ ("*" if implicit_matrix[ind,u2i[u]]>0 else "") +
'%s %.4f' % (i2t[ind], score)
for ind, score in recommendations]
plt.plot([ score for i,(ind, score) in enumerate(recommendations) if implicit_matrix[ind,u2i[u]]==0])
wr.save_pickle(model, "b25_model.pickle")
model = wr.load_pickle("b25_model.pickle")
# # Evaluate models
# ## Item to item recommendation
results = model.similar_items(t2i['Steven Universe'],20)
['%s %.4f' % (i2t[ind], score) for ind, score in results]
# ## User to item recommendations
# +
# Check out a specific example
u = n2u["HyprMarc"]
wr.print_user_history(clean_histories, userid=u)
# -
u = n2u["HyprMarc"]
recommendations = model.recommend(u2i[u], implicit_matrix, N=100, filter_already_liked_items=False)
[ ("*" if implicit_matrix[ind,u2i[u]]>0 else "") +
'%s %.4f' % (i2t[ind], score)
for ind, score in recommendations]
# # Visualize implicit embeddings
model = wr.load_pickle('../als150_model.pickle')
# +
# Only plot the ones with over 3 entries
indices = np.squeeze(np.asarray(np.sum(implicit_matrix[nonzero,:],axis=1))) > 3
indices = nonzero[indices]
# -
len(indices)
# Visualize the collaborative filtering item vectors, embedding into 2D space with UMAP
# nonzero = np.flatnonzero(implicit_matrix.sum(axis=1))
# indices = nonzero[::100]
embedding = umap.UMAP().fit_transform(model.item_factors[indices,:])
plt.figure(figsize=(10,10))
plt.plot(embedding[:,0], embedding[:,1],'.')
# _ = plt.axis('square')
# ## Visualize actors in the embeddings space
# +
edit_counts = np.squeeze(np.asarray(np.sum(implicit_matrix[indices,:],axis=1)))
log_edit_counts = np.log10(np.squeeze(np.asarray(np.sum(implicit_matrix[indices,:],axis=1))))
emb_df = pd.DataFrame({'dim1':embedding[:,0].squeeze(),
'dim2':embedding[:,1].squeeze(),
'title':[i2t[i] for i in indices],
'edit_count':edit_counts,
'log_edit_count':log_edit_counts
})
# -
actors = ['<NAME>',
'<NAME>',
'<NAME>',
'<NAME>',
'<NAME> (actor)',
'<NAME>',
'<NAME>',
'<NAME>',
'<NAME>',
'<NAME>',
'<NAME>',
'<NAME>',
'<NAME>',
'<NAME>']
actor_indices = [t2i[a] for a in actors]
edit_counts = np.squeeze(np.asarray(np.sum(implicit_matrix[actor_indices,:],axis=1)))
log_edit_counts = np.log10(np.squeeze(np.asarray(np.sum(implicit_matrix[actor_indices,:],axis=1))))
embedding = umap.UMAP().fit_transform(model.item_factors[actor_indices,:])
emb_df = pd.DataFrame({'dim1':embedding[:,0].squeeze(),
'dim2':embedding[:,1].squeeze(),
'title':[i2t[i] for i in actor_indices],
'edit_count':edit_counts,
'log_edit_count':log_edit_counts
})
key = np.zeros(len(actors))
key[:8] = 1
fig = px.scatter(data_frame=emb_df,
x='dim1',
y='dim2',
hover_name='title',
color=key,
hover_data=['edit_count'])
fig.update_layout(
autosize=False,
width=600,
height=600,)
fig.show()
# +
# Full embedding plotly interactive visualization
emb_df = pd.DataFrame({'dim1':embedding[:,0].squeeze(),
'dim2':embedding[:,1].squeeze(),
'title':[i2t[i] for i in indices],
'edit_count':edit_counts,
'log_edit_count':log_edit_counts
})
fig = px.scatter(data_frame=emb_df,
x='dim1',
y='dim2',
hover_name='title',
color='log_edit_count',
hover_data=['edit_count'])
fig.update_layout(
autosize=False,
width=600,
height=600,)
fig.show()
# -
# # Evaluate on test set
# +
# Load the edit histories in the training set and the test set
histories_train = feather.read_feather('../histories_train_2021-05-28.feather')
histories_test = feather.read_feather('../histories_test_2021-05-28.feather')
histories_dev = feather.read_feather('../histories_dev_2021-05-28.feather')
implicit_matrix = wr.load_pickle('../implicit_matrix_2021-05-28.pickle')
p2t, t2p, u2n, n2u, p2i, u2i, i2p, i2u, n2i, t2i, i2n, i2t = wr.load_pickle('../lookup_tables_2021-05-28.pickle')
userids, pageids = wr.load_pickle('../users_and_pages_2021-05-28.pickle')
resurface_userids, discovery_userids = wr.load_pickle('../resurface_discovery_users_2021-05-28.pickle')
results = {}
# -
wr.display_recs_with_history(
recs,
userids[:100],
histories_test,
histories_train,
p2t,
u2n,
recs_to_display=5,
hist_to_display=10,
)
# ## Most popular
# +
# %%time
K=20
rec_name = "Popularity"
prec = recommenders.PopularityRecommender(histories_train)
precs = prec.recommend_all(userids, K)
wr.save_pickle(precs, "../" + rec_name +"_recs.pickle")
# +
results[rec_name] = wr.get_recs_metrics(
histories_dev, precs, K, discovery_userids, resurface_userids, implicit_matrix, i2p, u2i)
results[rec_name]
# -
# ## Most recent
# %%time
# Most recent
K=20
rrec = recommenders.MostRecentRecommender(histories_train)
rrecs = rrec.recommend_all(userids, K, interactions=histories_train)
rec_name = "Recent"
wr.save_pickle(rrecs, "../" + rec_name +"_recs.pickle")
len(resurface_userids)
results ={}
results[rec_name] = wr.get_recs_metrics(
histories_dev, rrecs, K, discovery_userids, resurface_userids, implicit_matrix, i2p, u2i)
results[rec_name]
# ## Most frequent
# %%time
# Sorted by frequency of edits
K=20
frec = recommenders.MostFrequentRecommender(histories_train)
frecs = frec.recommend_all(userids, K, interactions=histories_train)
rec_name = "Frequent"
wr.save_pickle(frecs, "../" + rec_name +"_recs.pickle")
results[rec_name] = wr.get_recs_metrics(
histories_dev, frecs, K, discovery_userids, resurface_userids, implicit_matrix, i2p, u2i)
results[rec_name]
# ## BM25
# %%time
K=20
brec = recommenders.MyBM25Recommender(model, implicit_matrix)
brecs = brec.recommend_all(userids, K, u2i=u2i, n2i=n2i, i2p=i2p, filter_already_liked_items=False)
rec_name = "bm25"
wr.save_pickle(brecs, "../" + rec_name +"_recs.pickle")
# filter_already_liked_items = False
results[rec_name] = wr.get_recs_metrics(
histories_dev, brecs, K, discovery_userids, resurface_userids, implicit_matrix, i2p, u2i)
results[rec_name]
# filter_already_liked_items = True
rec_name = "bm25_filtered"
brecs_filtered = brec.recommend_all(userids, K, u2i=u2i, n2i=n2i, i2p=i2p, filter_already_liked_items=True)
wr.save_pickle(brecs_filtered, "../" + rec_name +"_recs.pickle")
results[rec_name] = wr.get_recs_metrics(
histories_dev, recs['bm25_filtered'], K, discovery_userids, resurface_userids, implicit_matrix, i2p, u2i)
results[rec_name]
results[rec_name] = wr.get_recs_metrics(
histories_dev, recs['bm25_filtered'], K, discovery_userids, resurface_userids, implicit_matrix, i2p, u2i)
results[rec_name]
# ## ALS Implicit collaborative filtering
model_als = wr.load_pickle('../als200_bm25_model_2021-05-28.pickle')
# %%time
rec_name = "als"
K=20
irec = recommenders.ImplicitCollaborativeRecommender(model_als, bm25_matrix.tocsc())
irecs = irec.recommend_all(userids, K, i2p=i2p, filter_already_liked_items=False)
wr.save_pickle(irecs, "../" + rec_name +"_recs.pickle")
results[rec_name] = wr.get_recs_metrics(
histories_dev, irecs, K, discovery_userids, resurface_userids, bm25_matrix.tocsc(), i2p, u2i)
results[rec_name]
rec_name = "als_filtered"
K=20
irec = recommenders.ImplicitCollaborativeRecommender(model_als, bm25_matrix.tocsc())
irecs_filtered = irec.recommend_all(userids, K, i2p=i2p, filter_already_liked_items=True)
results[rec_name] = wr.get_recs_metrics(
histories_dev, irecs_filtered, K, discovery_userids, resurface_userids, bm25_matrix.tocsc(), i2p, u2i)
results[rec_name]
wr.save_pickle(irecs_filtered, "../" + rec_name +"_recs.pickle")
show(pd.DataFrame(results).T)
# ## Jaccard
# %%time
# Sorted by Jaccard
K=20
rrec = recommenders.MostRecentRecommender(histories_train)
recent_pages_dict = rrec.all_recent_only(K, userids, interactions=histories_train)
jrec = recommenders.JaccardRecommender(implicit_matrix, p2i=p2i, t2i=t2i, i2t=i2t, i2p=i2p, n2i=n2i, u2i=u2i, i2u=i2u)
jrecs = jrec.recommend_all(userids,
K,
num_lookpage_pages=1,
recent_pages_dict=recent_pages_dict,
interactions=histories_train)
wr.save_pickle(jrecs,"jaccard-1_recs.pickle")
rec_name = "Jaccard"
results[rec_name] = wr.get_recs_metrics(
histories_dev, jrecs, K, discovery_userids, resurface_userids, implicit_matrix, i2p, u2i)
results[rec_name]
wr.display_recs_with_history(
jrecs,
userids[:30],
histories_test,
histories_train,
p2t,
u2n,
recs_to_display=5,
hist_to_display=10,
)
# %%time
# Sorted by Jaccard
K=5
jrec = recommenders.JaccardRecommender(implicit_matrix, p2i=p2i, t2i=t2i, i2t=i2t, i2p=i2p, n2i=n2i, u2i=u2i, i2u=i2u)
jrecs = jrec.recommend_all(userids[:1000],
10,
num_lookpage_pages=50,
recent_pages_dict=recent_pages_dict,
interactions=histories_train)
print("Jaccard")
print("Recall @ %d: %.1f%%" % (K, 100*wr.recall(histories_test, jrecs, K)))
print("Prop resurfaced: %.1f%%" % (100*wr.prop_resurface(jrecs, K, implicit_matrix, i2p, u2i)))
print("Recall @ %d (discovery): %.1f%%" % (K, 100*wr.recall(histories_test, jrecs, K, userid_subset=discovery_userids)))
print("Recall @ %d (resurface): %.1f%%" % (K, 100*wr.recall(histories_test, jrecs, K, userid_subset=resurface_userids)))
# ## Interleaved
recs.keys()
# +
# Interleaved jaccard and recent
K=20
rec_name = "Interleaved"
print(rec_name)
intrec = recommenders.InterleaveRecommender()
intrecs = intrec.recommend_all(K, [recs['Recent'], recs['bm25_filtered']])
wr.save_pickle(intrecs, "../" + rec_name +"_recs.pickle")
# -
results[rec_name] = wr.get_recs_metrics(
histories_dev, intrecs, K, discovery_userids, resurface_userids, implicit_matrix, i2p, u2i)
results[rec_name]
# # Report on evaluations results
# ## Hard coded metrics
# +
results = {}
results["Popularity"] = {'recall': 0.16187274312040842,
'ndcg': 0.0005356797596941751,
'resurfaced': 0.6213422985929523,
'recall_discover': 0.11947959996459864,
'recall_resurface': 0.2624396388830569,
'ndcg_discover': 0.000410354483750028,
'ndcg_resurface': 0.0008329819416998272}
results["Recent"] = {'recall': 22.618602913709378,
'ndcg': 0.14306080818547054,
'resurfaced': 71.13808990163118,
'recall_discover': 0.03982653332153288,
'recall_resurface': 76.18097837497375,
'ndcg_discover': 0.00011494775493754298,
'ndcg_resurface': 0.4821633227780786}
results["Frequent"] = {'recall': 20.834889802017184,
'ndcg': 0.11356953338215306,
'resurfaced': 76.10353629684971,
'recall_discover': 0.035401362952473675,
'recall_resurface': 70.17635943732941,
'ndcg_discover': 9.90570471847343e-05,
'ndcg_resurface': 0.38274923359395385}
results["ALS"] = {'recall': 5.488108579255385,
'ndcg': 0.026193145556306998,
'resurfaced': 16.251556468683848,
'recall_discover': 1.146119125586335,
'recall_resurface': 15.788368675204703,
'ndcg_discover': 0.004817135435898367,
'ndcg_resurface': 0.0769022655123215}
results["ALS_filtered"] = {'recall': 0.9027518366330469,
'ndcg': 0.003856703716094881,
'resurfaced': 0.0,
'recall_discover': 1.2832994070271706,
'recall_resurface': 0.0,
'ndcg_discover': 0.005482465270193466,
'ndcg_resurface': 0.0}
results["BM25"] = {'recall': 18.945336819823186,
'ndcg': 0.1015175508656068,
'resurfaced': 74.0469742248786,
'recall_discover': 1.3939286662536507,
'recall_resurface': 60.581566239764854,
'ndcg_discover': 0.004204510293040833,
'ndcg_resurface': 0.332367864833573}
results["BM25_filtered"] = {'recall': 1.8148424853691942,
'ndcg': 0.008622285155255174,
'resurfaced': 0.14848711243929774,
'recall_discover': 2.522347110363749,
'recall_resurface': 0.1364686122191896,
'ndcg_discover': 0.011740495141426633,
'ndcg_resurface': 0.0012251290280766518}
results["Interleaved"] = {'recall': 21.382766778732414,
'ndcg': 0.12924273396038563,
'resurfaced': 42.478676379031256,
'recall_discover': 1.8364457031595716,
'recall_resurface': 67.75141717404996,
'ndcg_discover': 0.006943981897312752,
'ndcg_resurface': 0.4193652616867473}
results_df = pd.DataFrame(results).T
results_df.reset_index(inplace=True)
# -
# ## Table of results
results_df
# ### FIG Table for post
# +
def scatter_text(x, y, text_column, data, title, xlabel, ylabel):
"""Scatter plot with country codes on the x y coordinates
Based on this answer: https://stackoverflow.com/a/54789170/2641825"""
# Create the scatter plot
p1 = sns.scatterplot(x, y, data=data, size = 8, legend=False)
# Add text besides each point
for line in range(0,data.shape[0]):
p1.text(data[x][line]+0.01, data[y][line],
data[text_column][line], horizontalalignment='left',
size='medium', color='black', weight='semibold')
# Set title and axis labels
plt.title(title)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
return p1
def highlight_max(s):
'''
highlight the maximum in a Series yellow.
'''
is_max = s == s.max()
return ['background-color: yellow' if v else '' for v in is_max]
results_df.sort_values("recall", ascending=False).style.apply(highlight_max, subset=["recall",
"ndcg",
"resurfaced",
"recall_discover",
"recall_resurface",
"ndcg_discover",
"ndcg_resurface",]).format({"recall": "{:.1f}%",
"ndcg": "{:.3f}",
"resurfaced": "{:.1f}%",
"recall_discover": "{:.1f}%",
"recall_resurface": "{:.1f}%",
"ndcg_discover": "{:.3f}",
"ndcg_resurface": "{:.3f}",
})
# -
colnames = ["Recommender", "Recall@20", "nDCG@20","Resurfaced","Recall@20 discovery","Recall@20 resurface","nDCG@20 discovery","nDCG@20 resurface"]
#apply(highlight_max, subset=colnames[1:]).
results_df.columns = colnames
results_df.sort_values("Recall@20", ascending=False).style.\
format({"Recall@20": "{:.1f}%",
"nDCG@20": "{:.3f}",
"Resurfaced": "{:.1f}%",
"Recall@20 discovery": "{:.1f}%",
"Recall@20 resurface": "{:.1f}%",
"nDCG@20 discovery": "{:.3f}",
"nDCG@20 resurface": "{:.3f}",
})
# ## Scatter plots (resurface vs discover)
fig = px.scatter(data_frame=results_df,
x='ndcg_discover',
y='ndcg_resurface',
hover_name='index')
# hover_name='title',)
fig.show()
fig = px.scatter(data_frame=results_df,
x='recall_discover',
y='recall_resurface',
hover_name='index')
# hover_name='title',)
fig.show()
# ### FIG Scatterplot for post
x = 2*[results_df.loc[results_df.Recommender == "Interleaved","Recall@20 resurface"].values[0]]
y = [0, results_df.loc[results_df.Recommender == "Interleaved","Recall@20 discovery"].values[0]]
# +
sns.set_theme(style="darkgrid")
matplotlib.rcParams.update({'font.size': 48, 'figure.figsize':(8,5), 'legend.edgecolor':'k'})
plt.figure(figsize=(12,7))
A = results_df.loc[:,'Recall@20 discovery']
B = results_df.loc[:,'Recall@20 resurface']
x = 2*[results_df.loc[results_df.Recommender == "Interleaved","Recall@20 discovery"].values[0]]
y = [-1, results_df.loc[results_df.Recommender == "Interleaved","Recall@20 resurface"].values[0]]
plt.plot(x,y,":k")
x[0] = 0
y[0] = y[1]
# plt.rcParams.update({'font.size': 48})
plt.rc('xtick', labelsize=3)
font = {'family' : 'normal',
'weight' : 'normal',
'size' : 22}
matplotlib.rc('font', **font)
plt.plot(x,y,":k")
plt.plot(A,B,'.', MarkerSize=15)
for xyz in zip(results_df.Recommender, A, B): # <--
plt.gca().annotate('%s' % xyz[0], xy=np.array(xyz[1:])+(0.05,0), textcoords='data', fontsize=18) # <--
for tick in plt.gca().xaxis.get_major_ticks():
tick.label.set_fontsize(20)
for tick in plt.gca().yaxis.get_major_ticks():
tick.label.set_fontsize(20)
plt.xlabel("Recall@20 discovery (%)",fontsize=20)
plt.ylabel("Recall@20 resurface (%)",fontsize=20)
plt.xlim([0,3])
plt.ylim([-2,85])
axes = plt.gca()
# -
# ## Read recs in from files
recommender_names = ['Popularity', 'Recent', 'Frequent', 'ALS', 'ALS_filtered', 'BM25', 'BM25_filtered', 'Interleaved']
recs = {rname:wr.load_pickle("../" + rname + "_recs.pickle") for rname in recommender_names}
# ## Recall curves
histories_dev = feather.read_feather('../histories_dev_2021-05-28.feather')
plt.figure(figsize=(15,10))
for rname in recommender_names:
recall_curve = wr.recall_curve(histories_dev, recs[rname], 20)
# print(recall_curve[-1])
plt.plot(recall_curve,'.-')
plt.legend(recommender_names)
plt.figure(figsize=(15,10))
for rname in recommender_names:
recall_curve = wr.recall_curve(histories_dev, recs[rname], 20, discovery_userids)
plt.plot(recall_curve,'.-')
plt.legend(recommender_names)
plt.figure(figsize=(15,10))
for rname in recommender_names:
recall_curve = wr.recall_curve(histories_dev, recs[rname], 20, resurface_userids)
plt.plot(recall_curve,'.-')
plt.legend(recommender_names)
# ### FIG Implicit vs BM25 figure
sns.set_theme(style="darkgrid")
matplotlib.rcParams.update({'font.size': 18, 'figure.figsize':(8,5), 'legend.edgecolor':'k'})
plt.figure(figsize=(10,6))
for rname in ["ALS","BM25"]:
recall_curve = wr.recall_curve(histories_dev, recs[rname], 20, discovery_userids)
plt.plot(np.array(recall_curve)*100,'.-',markersize=12)
plt.legend( ["ALS","BM25"],title="Algorithm", fontsize=16, title_fontsize=16, facecolor="w")
plt.xlabel("@N",fontsize=20)
plt.ylabel("Discovery recall (%)",fontsize=20)
_ = plt.xticks(np.arange(0,20,2),np.arange(0,20,2)+1)
# plt.gca().legend(prop=dict(size=20))
for tick in plt.gca().xaxis.get_major_ticks():
tick.label.set_fontsize(20)
for tick in plt.gca().yaxis.get_major_ticks():
tick.label.set_fontsize(20)
# # User recommendation comparison
recs_subset = ["Recent","Frequent","Popularity","Implicit","bm25","interleaved"]
print("Next edit: " + histories_dev.loc[histories_dev.userid == userid].title.values[0])
# ## FIG Rama table
# +
def bold_viewed(val, viewed_pages):
"""
Takes a scalar and returns a string with
the css property `'color: red'` for negative
strings, black otherwise.
"""
weight = 'bold' if val in viewed_pages else 'normal'
return 'font-weight: %s' % weight
def color_target(val, target_page):
"""
Takes a scalar and returns a string with
the css property `'color: red'` for negative
strings, black otherwise.
"""
color = 'red' if val == target_page else 'black'
return 'color: %s' % color
def display_user_recs_comparison(user_name, recs, recs_subset, train_set, test_set, N=20):
userid = n2u[user_name]
recs_table = | pd.DataFrame({rec_name: [p2t[r] for r in recs[rec_name][userid][:N]] for rec_name in recs_subset}) | pandas.DataFrame |
import streamlit as st
import pandas as pd
import numpy as np
import altair as alt
import random
from ..data import load_cuba_data
@st.cache
def get_events(data):
events = []
for i, d in data.iterrows():
person_id = d["Cons"]
try:
age = int(d["Edad"])
sex = "FEMALE" if d["Sexo"] == "F" else "MALE"
except ValueError:
continue
state = "L"
if d["Asintomatico"]:
events.append(
dict(
from_state="L",
to_state="A",
duration=0,
age=age,
sex=sex,
id=person_id,
)
)
if d["Evolución"] == "Alta":
events.append(
dict(
from_state="A",
to_state="R",
duration=(
pd.to_datetime(d["Fecha Alta"])
- pd.to_datetime(d["F. Conf"])
).days,
age=age,
sex=sex,
id=person_id,
)
)
continue
events.append(
dict(
from_state="L",
to_state="I",
duration=0,
age=age,
sex=sex,
id=person_id,
)
)
events.append(
dict(
from_state="I",
to_state="Is",
duration=0,
age=age,
sex=sex,
id=person_id,
)
)
try:
symptoms_start = pd.to_datetime(d["FIS"], format="%m/%d/%Y", errors="raise")
admission_start = pd.to_datetime(d["FI"], format="%m/%d/%Y", errors="raise")
except:
continue
events.append(
dict(
from_state="Is",
to_state="H",
duration=(admission_start - symptoms_start).days,
age=age,
sex=sex,
id=person_id,
)
)
try:
alta = pd.to_datetime(d["Fecha Alta"], format="%m/%d/%Y", errors="raise")
except:
continue
if d["Evolución"] == "Fallecido":
events.append(
dict(
from_state="H",
to_state="D",
duration=(alta - admission_start).days,
age=age,
sex=sex,
id=person_id,
)
)
elif d["Evolución"] == "Alta":
events.append(
dict(
from_state="H",
to_state="R",
duration=(alta - admission_start).days,
age=age,
sex=sex,
id=person_id,
)
)
return pd.DataFrame(events)
@st.cache
def get_daily_values(data, asympt_length):
day_states = []
fend = data["Fecha Alta"].max() + pd.Timedelta(days=1)
for i, row in data.iterrows():
fs: pd.Timestamp = row["FIS"]
fi: pd.Timestamp = row["FI"]
fc: pd.Timestamp = row["F. Conf"]
fa: pd.Timestamp = row["Fecha Alta"]
contacts = row['# de contactos']
if pd.isna(contacts):
contacts = 0
if pd.isna(fa):
fa = fend
if pd.isna(fc):
continue
day_states.append(dict(day=fc, id=row["Cons"], status="nuevo-confirmado"))
if fa < fend:
day_states.append(dict(day=fa, id=row["Cons"], status="nuevo-alta"))
for day in range((fa - fc).days):
day_states.append(
dict(
day=fc + pd.Timedelta(days=day), id=row["Cons"], status="activo"
)
)
if not | pd.isna(fi) | pandas.isna |
import os
import sys
import numpy as np
import pandas as pd
import xlwings as xw
from logzero import logger
from openpyxl import load_workbook
from openpyxl.styles import Alignment
from spareparts.lib.colors import Colors
from spareparts.lib.filters import (
trash_assemblies,
trash_description,
trash_fastener,
trash_file_name,
trash_item_number,
trash_parts_ending_P1_or_A1,
trash_prp,
trash_prp1,
trash_robot,
)
from spareparts.lib.settings import (
JDEPATH,
blue,
dict_header,
excel_headers,
headers_bg_hue,
mauve,
orange,
splname,
temp_jde,
template1,
template2,
tempo_local,
)
from yaspin import Spinner, yaspin
sp = Spinner([
"[ ]",
"[= ]",
"[== ]",
"[=== ]",
"[ ===]",
"[ ==]",
"[ =]",
"[ ]",
"[ =]",
"[ ==]",
"[ ===]",
"[====]",
"[=== ]",
"[== ]",
"[= ]"
], 80)
class Spareparts:
"""Generate spareparts list."""
JDE_TEMP = os.path.join(tempo_local, temp_jde)
def __init__(self):
self.jde = self.load_jde_data()
self.db = pd.DataFrame()
self.spl = pd.DataFrame()
self.asm = pd.DataFrame()
self.elec = pd.DataFrame()
self.garbage = pd.DataFrame()
self.nuts = pd.DataFrame()
self.plates = pd.DataFrame()
self.gearbox = | pd.DataFrame() | pandas.DataFrame |
# -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
import pytest
from pandas.api.types import is_categorical_dtype
from _helpers import assert_array_nan_equal
from cellrank.tools import Lineage
from cellrank.tools._utils import (
_one_hot,
_process_series,
_fuzzy_to_discrete,
_merge_categorical_series,
_series_from_one_hot_matrix,
)
from cellrank.tools._colors import _map_names_and_colors
class TestToolsUtils:
def test_merge_not_categorical(self):
x = pd.Series(["a", "b", np.nan, "b", np.nan]).astype("category")
y = pd.Series(["b", np.nan, np.nan, "d", "a"])
with pytest.raises(TypeError):
_ = _merge_categorical_series(x, y)
def test_merge_different_index(self):
x = pd.Series(["a", "b", np.nan, "b", np.nan]).astype("category")
y = pd.Series(["b", np.nan, np.nan, "d", "a"], index=[5, 4, 3, 2, 1]).astype(
"category"
)
with pytest.raises(ValueError):
_ = _merge_categorical_series(x, y)
def test_merge_normal_run(self):
x = pd.Series(["a", "b", np.nan, "b", np.nan]).astype("category")
y = pd.Series(["b", np.nan, "a", "d", "a"]).astype("category")
expected = pd.Series(["b", "b", "a", "d", "a"]).astype("category")
res = _merge_categorical_series(x, y, inplace=False)
np.testing.assert_array_equal(res.values, expected.values)
def test_merge_normal_run_inplace(self):
x = pd.Series(["a", "b", np.nan, "b", np.nan]).astype("category")
y = pd.Series(["b", np.nan, "a", "d", "a"]).astype("category")
expected = pd.Series(["b", "b", "a", "d", "a"]).astype("category")
_ = _merge_categorical_series(x, y, inplace=True)
assert _ is None
np.testing.assert_array_equal(x.values, expected.values)
def test_merge_normal_run_completely_different_categories(self):
x = pd.Series(["a", "a", "a"]).astype("category")
y = | pd.Series(["b", "b", "b"]) | pandas.Series |
# -*- coding: utf-8 -*-
"""
Created on 8/5/17
Author: <NAME>
"""
import numpy as np
import pandas as pd
import config
features = ['symboling',
'normalized_losses',
'make',
'fuel_type',
'aspiration',
'num_of_doors',
'body_style',
'drive_wheels',
'engine_location',
'wheel_base',
'length',
'width',
'height',
'curb_weight',
'engine_type',
'num_of_cylinders',
'engine_size',
'fuel_system',
'bore',
'stroke',
'compression_ratio',
'horsepower',
'peak_rpm',
'city_mpg',
'highway_mpg',
'price']
def load_data():
"""Returns dataset replaced with null values('?') to np.NaN"""
data = pd.read_csv(config.DATA_DIR+'imports-85.data', names=features)
data = data.replace('?', np.NaN)
return data
def split_X_y(data):
y = | pd.to_numeric(data["normalized_losses"]) | pandas.to_numeric |
import keras
from keras.models import Model
from keras.layers import Input,Dense, Dropout, Activation, Flatten
from keras.layers import Conv2D, MaxPooling2D
from keras.utils import multi_gpu_model
from keras.utils import plot_model
from keras import losses
import os
import tensorflow as tf
from keras import backend as K
import DataGenerator as dg
import get_modelv2_3
import get_model
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from keras.callbacks import ModelCheckpoint
from keras.callbacks import EarlyStopping
import pandas as pd
import numpy as np
from keras.models import load_model
import re
import seaborn as sns
from sklearn.linear_model import LinearRegression
import scipy
import warnings
import sys
from sklearn.metrics import roc_curve, auc
import time
warnings.filterwarnings('ignore')
os.environ['CUDA_VISIBLE_DEVICES'] = '0, 1, 2'
class multi_task_training_respect:
def __init__(self):
self.model = keras.Model()
self.model_class_task= keras.Model()
self.model_reg_task= keras.Model()
self.lr1 = 0.0001
self.lr2 = 0.0001
self.alpha = 0.5
self.patience_class = 6
self.patience_reg = 6
self.font1 = {
'weight': 'normal',
'size': 16,
}
self.font2 = {
'weight': 'normal',
'size': 23,
}
def get_batch_data(self,prot, comp, y, batch_count, batch_size, batch_count_per_epoch):
batch_count = batch_count % batch_count_per_epoch
batch_prot = prot[batch_size * batch_count:min(batch_size * (batch_count + 1), len(prot))]
batch_comp = comp[batch_size * batch_count:min(batch_size * (batch_count + 1), len(prot))]
batch_y = y[batch_size * batch_count:min(batch_size * (batch_count + 1), len(prot))]
return batch_prot, batch_comp, batch_y
def draw_loss_and_accuracy_curve(self,history_class, history_class_vali, model_name, save_dir):
train_loss = []
vali_loss = []
train_accuracy = []
vali_accuracy = []
for tmp in history_class:
train_loss.append(tmp[0])
train_accuracy.append(tmp[1])
for tmp in history_class_vali:
vali_loss.append(tmp[0])
vali_accuracy.append(tmp[1])
epochs = range(1, len(history_class) + 1)
##---------------draw loss curve------------------##
plt.figure(figsize=(10, 10))
plt.plot(epochs, train_loss, 'b', label='Classification training loss')
plt.plot(epochs, vali_loss, 'r', label='Classification validation loss')
plt.title('Classification Training and Validation Loss', self.font2)
plt.xlabel('Epochs', self.font2)
plt.ylabel('Loss', self.font2)
plt.legend(prop=self.font1)
plt.savefig(save_dir + '/%s_class_training_validation_loss.png' % model_name)
##---------------draw accuracy curve------------------##
plt.figure(figsize=(10, 10))
plt.plot(epochs, train_accuracy, 'b', label='Classification training accuracy')
plt.plot(epochs, vali_accuracy, 'r', label='Classification validation accuracy')
plt.title('Training and Validation Accuracy', self.font2)
plt.xlabel('Epochs', self.font2)
plt.ylabel('Accuracy', self.font2)
plt.legend(prop=self.font1)
plt.savefig(save_dir + '/%s_class_training_validation_accuracy.png' % model_name)
def draw_loss_and_mse_curve(self,history_reg, history_reg_vali, model_name, save_dir):
train_loss = []
vali_loss = []
train_mse = []
vali_mse = []
for tmp in history_reg:
train_loss.append(tmp[0])
train_mse.append(tmp[1])
for tmp in history_reg_vali:
vali_loss.append(tmp[0])
vali_mse.append(tmp[1])
epochs = range(1, len(history_reg) + 1)
##---------------draw loss curve------------------##
plt.figure(figsize=(10.3, 10))
plt.plot(epochs, train_loss, 'b', label='Regression training loss')
plt.plot(epochs, vali_loss, 'r', label='Regression validation loss')
plt.title('Regression Training and Validation Loss', self.font2)
plt.xlabel('Epochs', self.font2)
plt.ylabel('Loss', self.font2)
plt.legend(prop=self.font1)
plt.savefig(save_dir + '/%s_reg_training_validation_loss.png' % model_name)
##---------------draw accuracy curve------------------##
plt.figure(figsize=(10, 10))
plt.plot(epochs, train_mse, 'b', label='Regression training mse')
plt.plot(epochs, vali_mse, 'r', label='Regression validation mse')
plt.title('Regression Training and Validation MSE', self.font2)
plt.xlabel('Epochs', self.font2)
plt.ylabel('MSE', self.font2)
plt.legend(prop=self.font1)
plt.savefig(save_dir + '/%s_reg_training_validation_mse.png' % model_name)
def mean_squared_error_l2(self,y_true, y_pred, lmbda=0.01):
cost = K.mean(K.square(y_pred - y_true))
# weights = self.model.get_weights()
weights = []
for layer in self.model_reg_task.layers:
# print(layer)
weights = weights + layer.get_weights()
# print (weights)
result = tf.reduce_sum([tf.reduce_sum(tf.pow(wi, 2)) for wi in weights])
l2 = lmbda * result # K.sum([K.square(wi) for wi in weights])
return cost + l2
def train_model(self,class_training_file,class_validation_file,reg_training_file,reg_validation_file,model_name,
reg_batch_size=128,class_batch_size=128,class_epoch = 50,reg_epoch = 100,
pro_branch_switch1 = 'inception_block', pro_branch_switch2 = 'inception_block',
pro_branch_switch3='inception_block_b', pro_add_attention = False,
comp_branch_switch1 = 'inception_block', comp_branch_switch2 = 'inception_block',
comp_branch_switch3 = 'inception_block_b', comp_add_attention = False):#reg_size=256
##2.get_model
save_dir = os.path.join(os.getcwd(), 'models',model_name)
if not os.path.exists(save_dir):
os.mkdir(save_dir)
self.model_class_task, self.model_reg_task = get_model.get_multi_model(save_dir, self.alpha,
pro_branch_switch1=pro_branch_switch1,pro_branch_switch2=pro_branch_switch2,
pro_branch_switch3=pro_branch_switch3,pro_add_attention=pro_add_attention,
comp_branch_switch1=comp_branch_switch1,comp_branch_switch2=comp_branch_switch2,
comp_branch_switch3=comp_branch_switch3,comp_add_attention=comp_add_attention)
optimizer1 = keras.optimizers.Adam(lr=self.lr1)
self.model_reg_task.compile(optimizer=optimizer1,
loss=self.mean_squared_error_l2,#'mean_squared_error'
metrics=['mse','mae'])
optimizer2 = keras.optimizers.Adam(lr=self.lr2)
self.model_class_task.compile(optimizer=optimizer2,loss='binary_crossentropy',metrics=['accuracy'])
##1.read data
print("Starting read reg training data:")
reg_train_generator = dg.read_reg_generator(reg_training_file, reg_batch_size)
reg_vali_prot, reg_vali_comp, reg_vali_value = dg.read_reg(reg_validation_file)
print('regression validation data shape:', len(reg_vali_prot))
class_train_generator = dg.read_class_generator(class_training_file, class_batch_size)
class_vali_prot, class_vali_comp, class_vali_label = dg.read_class(class_validation_file)
print('classification validation data shape:', len(class_vali_prot))
##3.training model
#before train prepare
batch_count_of_class=0
batch_count_per_epoch_class=189109//class_batch_size
batch_count_of_reg = 0
batch_count_per_epoch_reg = 18071 // reg_batch_size
epoch_class = 0
epoch_reg=0
history_class=[]
history_class_vali=[]
history_reg=[]
history_reg_vali=[]
class_erally_stop_flag=1
reg_erally_stop_flag = 1
class_batch_count = class_epoch * batch_count_per_epoch_class
reg_batch_count = reg_epoch * batch_count_per_epoch_reg
K = reg_batch_count/class_batch_count
total_batch_count=class_batch_count+reg_batch_count
#start train
reg_min_loss = float('inf')
reg_min_loss_index = 0
class_min_loss=float('inf')
class_min_loss_index=0
best_reg_model = None
best_class_model = None
best_reg_file = save_dir + "/%s_best_reg_model.hdf5" % model_name
best_class_file = save_dir + "/%s_best_class_model.hdf5" % model_name
reg_loss=[]
class_loss=[]
for i in range(total_batch_count):
#regression
if np.random.rand() * (1+K) >= 1 and reg_erally_stop_flag and epoch_reg<reg_epoch:
print('batch %d(reg):'%i)
reg_batch_prot, reg_batch_comp, reg_batch_value = next(reg_train_generator)
tmp_loss=self.model_reg_task.train_on_batch([reg_batch_prot, reg_batch_comp], reg_batch_value)
reg_loss.append(tmp_loss)
batch_count_of_reg+=1
if batch_count_of_reg % batch_count_per_epoch_reg==0 and batch_count_of_reg>0:
epoch_reg += 1
print("regression epoch %d:"%epoch_reg)
#train performance:loss, mse, mae
print(' regression training loss=',np.mean(reg_loss,axis=0))
history_reg.append(np.mean(reg_loss,axis=0))
reg_loss=[]
#validation performance
score=self.model_reg_task.evaluate([reg_vali_prot,reg_vali_comp],reg_vali_value)
print(' regression evaluation loss=',score)
history_reg_vali.append(score)
#checkpoint and earlly stop
if epoch_reg-reg_min_loss_index>=self.patience_reg:
reg_erally_stop_flag=0
if score[0]<reg_min_loss:
reg_min_loss_index=epoch_reg
reg_min_loss=score[0]
#checkpoint
best_reg_model = self.model_reg_task
# classification
else:
if class_erally_stop_flag and epoch_class<class_epoch:
print('batch %d(class):' % i)
class_batch_prot, class_batch_comp, class_batch_label = next(class_train_generator)
tmp_loss=self.model_class_task.train_on_batch([class_batch_prot, class_batch_comp], class_batch_label)
class_loss.append(tmp_loss)
batch_count_of_class += 1
if batch_count_of_class % batch_count_per_epoch_class == 0 and batch_count_of_class>0:
epoch_class += 1
print("classification epoch %d:"%epoch_class)
# train performance:loss, mse, mae
print(' classification training loss=',np.mean(class_loss,axis=0))
history_class.append(np.mean(class_loss,axis=0))
class_loss=[]#
accuracy = self.model_class_task.evaluate([class_vali_prot, class_vali_comp], class_vali_label)
# validation performance
print(' classification evaluation loss=',accuracy)
history_class_vali.append(accuracy)
# checkpoint and earlly stop
if epoch_class - class_min_loss_index >= self.patience_class:
class_erally_stop_flag = 0
if accuracy[0] < class_min_loss:
class_min_loss_index = epoch_class
class_min_loss = accuracy[0]
# checkpoint
best_class_model = self.model_class_task
##5.save model
#(1).class model
model_path = os.path.join(save_dir,model_name+'_class.h5')
best_class_model.save(model_path)
#(2).reg model
model_path = os.path.join(save_dir,model_name+'_reg.h5')
best_reg_model.save(model_path)
print("save model!")
def save_predict_result(self,predict_result,real_label_or_value,model_name,class_or_reg,type):
if predict_result.shape[1] == 1:
if class_or_reg=='class':
df = predict_result
df.columns = ['predict_label']
else:
df = predict_result
df.columns = ['predict_value']
else:
df = predict_result
df.columns = ['predict_label','predict_value']
if class_or_reg=='class':
df['real_lable'] = real_label_or_value
else:
df['real_value'] = real_label_or_value
df['set']=type
if not os.path.exists('predict_value'):
os.mkdir('predict_value')
df.to_csv('predict_value/multi-task_model_%s_%s_%s_predict_result.csv' % (model_name,class_or_reg,type),
index=False)
print('predict_value/multi-task_model_%s_%s_%s_predict_result.csv has been saved!' % (model_name,class_or_reg,type))
return df
def computer_parameter_draw_scatter_plot(self,predictions, model_name):
sns.set(context='paper', style='white')
sns.set_color_codes()
set_colors = {'train': 'b', 'validation': 'green', 'test': 'purple'}
for set_name, table in predictions.groupby('set'):
rmse = ((table['predict_value'] - table['real_value']) ** 2).mean() ** 0.5
mae = (np.abs(table['predict_value'] - table['real_value'])).mean()
corr = scipy.stats.pearsonr(table['predict_value'], table['real_value'])
lr = LinearRegression()
lr.fit(table[['predict_value']], table['real_value'])
y_ = lr.predict(table[['predict_value']])
sd = (((table["real_value"] - y_) ** 2).sum() / (len(table) - 1)) ** 0.5
print("%10s set: RMSE=%.3f, MAE=%.3f, R=%.2f (p=%.2e), SD=%.3f" %
(set_name, rmse, mae, *corr, sd))
grid = sns.jointplot('real_value', 'predict_value', data=table, stat_func=None, color=set_colors[set_name],
space=0, size=4, ratio=4, s=20, edgecolor='w', ylim=(0, 16), xlim=(0, 16)) # (0.16)
grid.set_axis_labels('real', 'predicted')#, fontsize=16
grid.ax_joint.set_xticks(range(0, 16, 5))
grid.ax_joint.set_yticks(range(0, 16, 5))
a = {'train': 'training', 'validation': 'validation', 'test': 'test'}
set_name=a[set_name]
grid.ax_joint.text(1, 14, set_name + ' set', fontsize=14) # 调整标题大小
grid.ax_joint.text(16, 19.5, 'RMSE: %.3f' % (rmse), fontsize=9)
grid.ax_joint.text(16, 18.5, 'MAE: %.3f ' % mae, fontsize=9)
grid.ax_joint.text(16, 17.5, 'R: %.2f ' % corr[0], fontsize=9)
grid.ax_joint.text(16, 16.5, 'SD: %.3f ' % sd, fontsize=9)
grid.fig.savefig('%s_%s_scatter_plot.jpg' %(model_name,set_name), dpi=400)
def draw_ROC_curve(self,predictions, model_name):
set_colors = {'train': 'b', 'validation': 'green', 'test': 'purple','independent test':'r'}
for set_name, table in predictions.groupby('set'):
fpr, tpr, threshold = roc_curve(table['real_lable'],table['predict_label'])
roc_auc = auc(fpr, tpr)
plt.figure(figsize=(10, 10))
lw = 2
plt.plot(fpr, tpr, color=set_colors[set_name],
lw=lw, label='ROC curve (auc = %0.3f)' % roc_auc)
plt.plot([0, 1], [0, 1], 'b--', lw=lw,
label='Random guess (auc = 0.5)')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.tick_params(labelsize=20)
plt.xlabel('False Positive Rate', self.font2)
plt.ylabel('True Positive Rate', self.font2)
# plt.title('ROC curv')
plt.legend(loc="lower right", prop=self.font1)
plt.savefig("%s_%s_ROC_curve.png" %(model_name,set_name))
def test_model(self,model_file,class_test_file,class_train_file,class_vali_file,
reg_test_file,reg_train_file,reg_vali_file):
##read data
print('starting read data!')
#1.train data
class_train_prot, class_train_comp, class_train_label=dg.multi_process_read_pro_com_file(class_train_file)
reg_train_prot, reg_train_comp,_, reg_train_value=dg.multi_process_read_pro_com_file_regression(reg_train_file)
#2.validation data
class_vali_prot, class_vali_comp, class_vali_label = dg.multi_process_read_pro_com_file(class_vali_file)
reg_vali_prot, reg_vali_comp, _,reg_vali_value = dg.multi_process_read_pro_com_file_regression(reg_vali_file)
#3.test data
class_test_prot, class_test_comp, class_test_label = dg.multi_process_read_pro_com_file(class_test_file)
reg_test_prot, reg_test_comp,_, reg_test_value = dg.multi_process_read_pro_com_file_regression(reg_test_file)
print('classification data size:', len(class_train_prot), len(class_vali_prot), len(class_test_prot))
print('regression data size:', len(reg_train_prot),len(reg_vali_prot),len(reg_test_prot))
##load_model
print('loading modle!')
model = load_model(model_file)
tmp = model_file.split('/')[-1]
model_name = re.findall(r"(.+?).h5", tmp)[0]
## saving predict value
#predict value
#1.train
class_train_predict_value = model.predict([class_train_prot, class_train_comp])
class_train_predict_value_df=pd.DataFrame(class_train_predict_value[0],columns=['label'])
class_train_predict_value_df['value']=class_train_predict_value[1]
reg_train_predict_value = model.predict([reg_train_prot, reg_train_comp])
reg_train_predict_value_df=pd.DataFrame(reg_train_predict_value[0],columns=['label'])
reg_train_predict_value_df['value']=reg_train_predict_value[1]
#2.vali
class_vali_predict_value = model.predict([class_vali_prot, class_vali_comp])
class_vali_predict_value_df = pd.DataFrame(class_vali_predict_value[0])
class_vali_predict_value_df['value']=class_vali_predict_value[1]
reg_vali_predict_value = model.predict([reg_vali_prot, reg_vali_comp])
reg_vali_predict_value_df = pd.DataFrame(reg_vali_predict_value[0])
reg_vali_predict_value_df['value']=reg_vali_predict_value[1]
#3.test
class_test_predict_value = model.predict([class_test_prot, class_test_comp])
class_test_predict_value_df = pd.DataFrame(class_test_predict_value[0])
class_test_predict_value_df['value']=class_test_predict_value[1]
reg_test_predict_value=model.predict([reg_test_prot, reg_test_comp])
reg_test_predict_value_df = pd.DataFrame(reg_test_predict_value[0])
reg_test_predict_value_df['value']=reg_test_predict_value[1]
# save predicted value
#1
class_train_df = self.save_predict_result(class_train_predict_value_df, class_train_label, model_name, 'class', 'train')
reg_train_df = self.save_predict_result(reg_train_predict_value_df, reg_train_value, model_name, 'reg', 'train')
#2
class_vali_df = self.save_predict_result(class_vali_predict_value_df, class_vali_label, model_name, 'class', 'validation')
reg_vali_df = self.save_predict_result(reg_vali_predict_value_df, reg_vali_value, model_name, 'reg', 'validation')
#3
class_test_df = self.save_predict_result(class_test_predict_value_df, class_test_label, model_name, 'class', 'test')
reg_test_df = self.save_predict_result(reg_test_predict_value_df, reg_test_value, model_name, 'reg', 'test')
## computing parameters and drawing scatter plot
self.computer_parameter_draw_scatter_plot(reg_train_df, model_name)
self.computer_parameter_draw_scatter_plot(reg_vali_df, model_name)
self.computer_parameter_draw_scatter_plot(reg_test_df, model_name)
self.draw_ROC_curve(class_train_df, model_name)
self.draw_ROC_curve(class_vali_df, model_name)
self.draw_ROC_curve(class_test_df, model_name)
def reg_test_model(self,model_file,reg_test_file,reg_train_file=None,reg_vali_file=None):
##load_model
print('loading modle!')
self.model_reg_task = load_model(model_file,
custom_objects={'mean_squared_error_l2': self.mean_squared_error_l2})
tmp = model_file.split('/')[-1]
if tmp.find('.h5')!=-1:
model_name = re.findall(r"(.+?).h5", tmp)[0]
else:
model_name = re.findall(r"(.+?).hdf5", tmp)[0]
##1.read data
print('starting read data!')
reg_test_prot, reg_test_comp,_, reg_test_value = dg.read_pro_com_file_regression(reg_test_file)#multi_process_read_pro_com_file_regression(reg_test_file)
print('test data size:',len(reg_test_prot))
reg_test_predict_value=self.model_reg_task.predict([reg_test_prot, reg_test_comp])
if model_name[-3:]=='reg':#reg_model
reg_test_predict_value_df = pd.DataFrame(reg_test_predict_value,columns=['value'])
else:#total model
reg_test_predict_value_df = pd.DataFrame(reg_test_predict_value[0], columns=['label'])
reg_test_predict_value_df['value']=reg_test_predict_value[1]
reg_test_df = self.save_predict_result(reg_test_predict_value_df, reg_test_value, model_name, 'reg', 'test')
self.computer_parameter_draw_scatter_plot(reg_test_df, model_name)
if reg_train_file!=None:
reg_train_prot, reg_train_comp,_, reg_train_value = dg.multi_process_read_pro_com_file_regression(reg_train_file)
reg_train_predict_value = self.model_reg_task.predict([reg_train_prot, reg_train_comp])
reg_train_predict_value=pd.DataFrame(reg_train_predict_value)
reg_train_df = self.save_predict_result(reg_train_predict_value, reg_train_value, model_name, 'reg', 'train')
self.computer_parameter_draw_scatter_plot(reg_train_df, model_name)
if reg_vali_file!=None:
reg_vali_prot, reg_vali_comp,_, reg_vali_value = dg.multi_process_read_pro_com_file_regression(reg_vali_file)
#predict value
reg_vali_predict_value = self.model_reg_task.predict([reg_vali_prot, reg_vali_comp])
reg_vali_predict_value=pd.DataFrame(reg_vali_predict_value)
reg_vali_df = self.save_predict_result(reg_vali_predict_value, reg_vali_value, model_name, 'reg', 'validation')
self.computer_parameter_draw_scatter_plot(reg_vali_df, model_name)
def class_test_model(self,model_file,class_test_file,class_train_file=None,class_vali_file=None):
##load_model
print('loading modle!')
self.model_class_task = load_model(model_file)
tmp = model_file.split('/')[-1]
if tmp.find('.h5')!=-1:
model_name = re.findall(r"(.+?).h5", tmp)[0]
else:
model_name = re.findall(r"(.+?).hdf5", tmp)[0]
# 1. data
##read data
print('starting read data!')
if class_test_file.split('/')[2]=='one-hot_dataset4':
print("对dataset4进行特殊对待!")
class_test_prot, class_test_comp, class_test_label,class_test_value = dg.multi_process_read_pro_com_file_regression(class_test_file)
# df_tmp = pd.DataFrame(class_test_value, columns=['real_value'])
# df_tmp.to_csv('dataset4_real_value.csv')
else:
class_test_prot, class_test_comp, class_test_label = dg.multi_process_read_pro_com_file(class_test_file)
print('test data size:', len(class_test_prot))
# 2.predict value
class_test_predict_value = self.model_class_task.predict([class_test_prot, class_test_comp])
print(model_name[-9:-4] )
if model_name[-9:-4] == 'class': # class_model
class_test_predict_value_df = pd.DataFrame(class_test_predict_value, columns=['label'])
else: # total model
class_test_predict_value_df = pd.DataFrame(class_test_predict_value[0], columns=['label'])
class_test_predict_value_df['value'] = class_test_predict_value[1]
# 3.save predicted value
test_file_name = re.findall(r"(.+?).txt", class_test_file)[0].split('/')[-1]
class_test_df = self.save_predict_result(class_test_predict_value_df, class_test_label, model_name+test_file_name, 'class', 'test')
# 4.computing parameters and drawing auc plot
self.draw_ROC_curve(class_test_df, model_name+test_file_name)
if class_train_file!=None:
# 1.read data
class_train_prot, class_train_comp, class_train_label = dg.multi_process_read_pro_com_file(class_train_file)
print('train data size:',len(class_train_prot))
# 2.predict value
class_train_predict_value = self.model_class_task.predict([class_train_prot, class_train_comp])
if model_name[-4:] == 'class': # reg_model
class_train_predict_value_df = pd.DataFrame(class_train_predict_value, columns=['label'])
else: # total model
class_train_predict_value_df = pd.DataFrame(class_train_predict_value[0], columns=['label'])
class_train_predict_value_df['value'] = class_train_predict_value[1]
# 3.save predicted value
class_train_df = self.save_predict_result(class_train_predict_value_df, class_train_label, model_name, 'class', 'train')
# 4.computing parameters and drawing auc plot
self.draw_ROC_curve(class_train_df, model_name)
if class_vali_file != None:
# 1.read data
class_vali_prot, class_vali_comp, class_vali_label = dg.multi_process_read_pro_com_file(class_vali_file)
print('validation data size:', len(class_vali_prot))
# 2.predict value
class_vali_predict_value = self.model_class_task.predict([class_vali_prot, class_vali_comp])
if model_name[-4:] == 'class': # reg_model
class_vali_predict_value_df = pd.DataFrame(class_vali_predict_value, columns=['label'])
else: # total model
class_vali_predict_value_df = pd.DataFrame(class_vali_predict_value[0], columns=['label'])
class_vali_predict_value_df['value'] = class_vali_predict_value[1]
# 3.save predicted value
class_vali_df = self.save_predict_result(class_vali_predict_value_df, class_vali_label, model_name, 'class',
'validation')
# 4.computing parameters and drawing auc plot
self.draw_ROC_curve(class_vali_df, model_name)
def load_model_predict(self,model_file,file):
##read data
print('starting read data!')
x_prot, x_comp, y_label = dg.read_pro_com_file(file)
# class_test_prot, class_test_comp, class_test_label = dg.multi_process_read_pro_com_file(class_test_file)
print('data size:', len(x_prot), len(x_comp), len(y_label))
##load_model
print('loading modle!')
model = load_model(model_file)
tmp = model_file.split('/')[-1]
model_name = re.findall(r"(.+?).h5", tmp)[0]
## saving predict value
#predict value
predict_value = model.predict([x_prot, x_comp])
predict_value_df = | pd.DataFrame(predict_value[0]) | pandas.DataFrame |
# %%
import json
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
# %%
#
# Input
#
try:
result_file = snakemake.input["result_file"]
json_file = snakemake.input["config_file"]
output_file = snakemake.output["output_file"]
output_zoomed_file = snakemake.output["output_zoomed_file"]
except NameError:
result_file = "../data/lfr-benchmark/results/results.csv"
json_file = "../data/lfr-benchmark/lfr-config.json"
output_file = "../figs/result-lfr.pdf"
output_zoomed_file = "../figs/result-zoomed-lfr.pdf"
# %%
#
# Load
#
result_table = pd.read_csv(result_file)
with open(json_file, "r") as f:
config_file = json.load(f)
config_table = pd.DataFrame(config_file)
config_table["param_id"] = np.arange(config_table.shape[0])
result_table = | pd.merge(result_table, config_table, on="param_id", how="left") | pandas.merge |
## 5. Generating Regression Data ##
from sklearn.datasets import make_regression
import pandas as pd
data = make_regression(n_samples=100, n_features=3, random_state=1)
features = | pd.DataFrame(data[0]) | pandas.DataFrame |
from __future__ import division
from datetime import timedelta
from functools import partial
import itertools
from nose.tools import assert_true
from parameterized import parameterized
import numpy as np
from numpy.testing import assert_array_equal, assert_almost_equal
import pandas as pd
from toolz import merge
from zipline.pipeline import SimplePipelineEngine, Pipeline, CustomFactor
from zipline.pipeline.common import (
EVENT_DATE_FIELD_NAME,
FISCAL_QUARTER_FIELD_NAME,
FISCAL_YEAR_FIELD_NAME,
SID_FIELD_NAME,
TS_FIELD_NAME,
)
from zipline.pipeline.data import DataSet
from zipline.pipeline.data import Column
from zipline.pipeline.domain import EquitySessionDomain
import platform
if platform.system() != 'Windows':
from zipline.pipeline.loaders.blaze.estimates import (
BlazeNextEstimatesLoader,
BlazeNextSplitAdjustedEstimatesLoader,
BlazePreviousEstimatesLoader,
BlazePreviousSplitAdjustedEstimatesLoader,
)
from zipline.pipeline.loaders.earnings_estimates import (
INVALID_NUM_QTRS_MESSAGE,
NextEarningsEstimatesLoader,
NextSplitAdjustedEarningsEstimatesLoader,
normalize_quarters,
PreviousEarningsEstimatesLoader,
PreviousSplitAdjustedEarningsEstimatesLoader,
split_normalized_quarters,
)
from zipline.testing.fixtures import (
WithAdjustmentReader,
WithTradingSessions,
ZiplineTestCase,
)
from zipline.testing.predicates import assert_equal, assert_raises_regex
from zipline.testing.predicates import assert_frame_equal
from zipline.utils.numpy_utils import datetime64ns_dtype
from zipline.utils.numpy_utils import float64_dtype
import platform
import unittest
class Estimates(DataSet):
event_date = Column(dtype=datetime64ns_dtype)
fiscal_quarter = Column(dtype=float64_dtype)
fiscal_year = Column(dtype=float64_dtype)
estimate = Column(dtype=float64_dtype)
class MultipleColumnsEstimates(DataSet):
event_date = Column(dtype=datetime64ns_dtype)
fiscal_quarter = Column(dtype=float64_dtype)
fiscal_year = Column(dtype=float64_dtype)
estimate1 = Column(dtype=float64_dtype)
estimate2 = Column(dtype=float64_dtype)
def QuartersEstimates(announcements_out):
class QtrEstimates(Estimates):
num_announcements = announcements_out
name = Estimates
return QtrEstimates
def MultipleColumnsQuartersEstimates(announcements_out):
class QtrEstimates(MultipleColumnsEstimates):
num_announcements = announcements_out
name = Estimates
return QtrEstimates
def QuartersEstimatesNoNumQuartersAttr(num_qtr):
class QtrEstimates(Estimates):
name = Estimates
return QtrEstimates
def create_expected_df_for_factor_compute(start_date,
sids,
tuples,
end_date):
"""
Given a list of tuples of new data we get for each sid on each critical
date (when information changes), create a DataFrame that fills that
data through a date range ending at `end_date`.
"""
df = pd.DataFrame(tuples,
columns=[SID_FIELD_NAME,
'estimate',
'knowledge_date'])
df = df.pivot_table(columns=SID_FIELD_NAME,
values='estimate',
index='knowledge_date')
df = df.reindex(
pd.date_range(start_date, end_date)
)
# Index name is lost during reindex.
df.index = df.index.rename('knowledge_date')
df['at_date'] = end_date.tz_localize('utc')
df = df.set_index(['at_date', df.index.tz_localize('utc')]).ffill()
new_sids = set(sids) - set(df.columns)
df = df.reindex(columns=df.columns.union(new_sids))
return df
class WithEstimates(WithTradingSessions, WithAdjustmentReader):
"""
ZiplineTestCase mixin providing cls.loader and cls.events as class
level fixtures.
Methods
-------
make_loader(events, columns) -> PipelineLoader
Method which returns the loader to be used throughout tests.
events : pd.DataFrame
The raw events to be used as input to the pipeline loader.
columns : dict[str -> str]
The dictionary mapping the names of BoundColumns to the
associated column name in the events DataFrame.
make_columns() -> dict[BoundColumn -> str]
Method which returns a dictionary of BoundColumns mapped to the
associated column names in the raw data.
"""
# Short window defined in order for test to run faster.
START_DATE = pd.Timestamp('2014-12-28')
END_DATE = pd.Timestamp('2015-02-04')
@classmethod
def make_loader(cls, events, columns):
raise NotImplementedError('make_loader')
@classmethod
def make_events(cls):
raise NotImplementedError('make_events')
@classmethod
def get_sids(cls):
return cls.events[SID_FIELD_NAME].unique()
@classmethod
def make_columns(cls):
return {
Estimates.event_date: 'event_date',
Estimates.fiscal_quarter: 'fiscal_quarter',
Estimates.fiscal_year: 'fiscal_year',
Estimates.estimate: 'estimate'
}
def make_engine(self, loader=None):
if loader is None:
loader = self.loader
return SimplePipelineEngine(
lambda x: loader,
self.asset_finder,
default_domain=EquitySessionDomain(
self.trading_days, self.ASSET_FINDER_COUNTRY_CODE,
),
)
@classmethod
def init_class_fixtures(cls):
cls.events = cls.make_events()
cls.ASSET_FINDER_EQUITY_SIDS = cls.get_sids()
cls.ASSET_FINDER_EQUITY_SYMBOLS = [
's' + str(n) for n in cls.ASSET_FINDER_EQUITY_SIDS
]
# We need to instantiate certain constants needed by supers of
# `WithEstimates` before we call their `init_class_fixtures`.
super(WithEstimates, cls).init_class_fixtures()
cls.columns = cls.make_columns()
# Some tests require `WithAdjustmentReader` to be set up by the time we
# make the loader.
cls.loader = cls.make_loader(cls.events, {column.name: val for
column, val in
cls.columns.items()})
class WithOneDayPipeline(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events as a class level fixture and
defining a test for all inheritors to use.
Attributes
----------
events : pd.DataFrame
A simple DataFrame with columns needed for estimates and a single sid
and no other data.
Tests
------
test_wrong_num_announcements_passed()
Tests that loading with an incorrect quarter number raises an error.
test_no_num_announcements_attr()
Tests that the loader throws an AssertionError if the dataset being
loaded has no `num_announcements` attribute.
"""
@classmethod
def make_columns(cls):
return {
MultipleColumnsEstimates.event_date: 'event_date',
MultipleColumnsEstimates.fiscal_quarter: 'fiscal_quarter',
MultipleColumnsEstimates.fiscal_year: 'fiscal_year',
MultipleColumnsEstimates.estimate1: 'estimate1',
MultipleColumnsEstimates.estimate2: 'estimate2'
}
@classmethod
def make_events(cls):
return pd.DataFrame({
SID_FIELD_NAME: [0] * 2,
TS_FIELD_NAME: [pd.Timestamp('2015-01-01'),
pd.Timestamp('2015-01-06')],
EVENT_DATE_FIELD_NAME: [pd.Timestamp('2015-01-10'),
pd.Timestamp('2015-01-20')],
'estimate1': [1., 2.],
'estimate2': [3., 4.],
FISCAL_QUARTER_FIELD_NAME: [1, 2],
FISCAL_YEAR_FIELD_NAME: [2015, 2015]
})
@classmethod
def make_expected_out(cls):
raise NotImplementedError('make_expected_out')
@classmethod
def init_class_fixtures(cls):
super(WithOneDayPipeline, cls).init_class_fixtures()
cls.sid0 = cls.asset_finder.retrieve_asset(0)
cls.expected_out = cls.make_expected_out()
def test_load_one_day(self):
# We want to test multiple columns
dataset = MultipleColumnsQuartersEstimates(1)
engine = self.make_engine()
results = engine.run_pipeline(
Pipeline({c.name: c.latest for c in dataset.columns}),
start_date=pd.Timestamp('2015-01-15', tz='utc'),
end_date=pd.Timestamp('2015-01-15', tz='utc'),
)
assert_frame_equal(results, self.expected_out)
class PreviousWithOneDayPipeline(WithOneDayPipeline, ZiplineTestCase):
"""
Tests that previous quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
@classmethod
def make_expected_out(cls):
return pd.DataFrame(
{
EVENT_DATE_FIELD_NAME: pd.Timestamp('2015-01-10'),
'estimate1': 1.,
'estimate2': 3.,
FISCAL_QUARTER_FIELD_NAME: 1.,
FISCAL_YEAR_FIELD_NAME: 2015.,
},
index=pd.MultiIndex.from_tuples(
((pd.Timestamp('2015-01-15', tz='utc'), cls.sid0),)
)
)
class NextWithOneDayPipeline(WithOneDayPipeline, ZiplineTestCase):
"""
Tests that next quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
@classmethod
def make_expected_out(cls):
return pd.DataFrame(
{
EVENT_DATE_FIELD_NAME: pd.Timestamp('2015-01-20'),
'estimate1': 2.,
'estimate2': 4.,
FISCAL_QUARTER_FIELD_NAME: 2.,
FISCAL_YEAR_FIELD_NAME: 2015.,
},
index=pd.MultiIndex.from_tuples(
((pd.Timestamp('2015-01-15', tz='utc'), cls.sid0),)
)
)
dummy_df = pd.DataFrame({SID_FIELD_NAME: 0},
columns=[SID_FIELD_NAME,
TS_FIELD_NAME,
EVENT_DATE_FIELD_NAME,
FISCAL_QUARTER_FIELD_NAME,
FISCAL_YEAR_FIELD_NAME,
'estimate'],
index=[0])
class WithWrongLoaderDefinition(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events as a class level fixture and
defining a test for all inheritors to use.
Attributes
----------
events : pd.DataFrame
A simple DataFrame with columns needed for estimates and a single sid
and no other data.
Tests
------
test_wrong_num_announcements_passed()
Tests that loading with an incorrect quarter number raises an error.
test_no_num_announcements_attr()
Tests that the loader throws an AssertionError if the dataset being
loaded has no `num_announcements` attribute.
"""
@classmethod
def make_events(cls):
return dummy_df
def test_wrong_num_announcements_passed(self):
bad_dataset1 = QuartersEstimates(-1)
bad_dataset2 = QuartersEstimates(-2)
good_dataset = QuartersEstimates(1)
engine = self.make_engine()
columns = {c.name + str(dataset.num_announcements): c.latest
for dataset in (bad_dataset1,
bad_dataset2,
good_dataset)
for c in dataset.columns}
p = Pipeline(columns)
with self.assertRaises(ValueError) as e:
engine.run_pipeline(
p,
start_date=self.trading_days[0],
end_date=self.trading_days[-1],
)
assert_raises_regex(e, INVALID_NUM_QTRS_MESSAGE % "-1,-2")
def test_no_num_announcements_attr(self):
dataset = QuartersEstimatesNoNumQuartersAttr(1)
engine = self.make_engine()
p = Pipeline({c.name: c.latest for c in dataset.columns})
with self.assertRaises(AttributeError):
engine.run_pipeline(
p,
start_date=self.trading_days[0],
end_date=self.trading_days[-1],
)
class PreviousWithWrongNumQuarters(WithWrongLoaderDefinition,
ZiplineTestCase):
"""
Tests that previous quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
class NextWithWrongNumQuarters(WithWrongLoaderDefinition,
ZiplineTestCase):
"""
Tests that next quarter loader correctly breaks if an incorrect
number of quarters is passed.
"""
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
options = ["split_adjustments_loader",
"split_adjusted_column_names",
"split_adjusted_asof"]
class WrongSplitsLoaderDefinition(WithEstimates, ZiplineTestCase):
"""
Test class that tests that loaders break correctly when incorrectly
instantiated.
Tests
-----
test_extra_splits_columns_passed(SplitAdjustedEstimatesLoader)
A test that checks that the loader correctly breaks when an
unexpected column is passed in the list of split-adjusted columns.
"""
@classmethod
def init_class_fixtures(cls):
super(WithEstimates, cls).init_class_fixtures()
@parameterized.expand(itertools.product(
(NextSplitAdjustedEarningsEstimatesLoader,
PreviousSplitAdjustedEarningsEstimatesLoader),
))
def test_extra_splits_columns_passed(self, loader):
columns = {
Estimates.event_date: 'event_date',
Estimates.fiscal_quarter: 'fiscal_quarter',
Estimates.fiscal_year: 'fiscal_year',
Estimates.estimate: 'estimate'
}
with self.assertRaises(ValueError):
loader(dummy_df,
{column.name: val for column, val in
columns.items()},
split_adjustments_loader=self.adjustment_reader,
split_adjusted_column_names=["estimate", "extra_col"],
split_adjusted_asof=pd.Timestamp("2015-01-01"))
class WithEstimatesTimeZero(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events as a class level fixture and
defining a test for all inheritors to use.
Attributes
----------
cls.events : pd.DataFrame
Generated dynamically in order to test inter-leavings of estimates and
event dates for multiple quarters to make sure that we select the
right immediate 'next' or 'previous' quarter relative to each date -
i.e., the right 'time zero' on the timeline. We care about selecting
the right 'time zero' because we use that to calculate which quarter's
data needs to be returned for each day.
Methods
-------
get_expected_estimate(q1_knowledge,
q2_knowledge,
comparable_date) -> pd.DataFrame
Retrieves the expected estimate given the latest knowledge about each
quarter and the date on which the estimate is being requested. If
there is no expected estimate, returns an empty DataFrame.
Tests
------
test_estimates()
Tests that we get the right 'time zero' value on each day for each
sid and for each column.
"""
# Shorter date range for performance
END_DATE = pd.Timestamp('2015-01-28')
q1_knowledge_dates = [pd.Timestamp('2015-01-01'),
pd.Timestamp('2015-01-04'),
pd.Timestamp('2015-01-07'),
pd.Timestamp('2015-01-11')]
q2_knowledge_dates = [pd.Timestamp('2015-01-14'),
pd.Timestamp('2015-01-17'),
pd.Timestamp('2015-01-20'),
pd.Timestamp('2015-01-23')]
# We want to model the possibility of an estimate predicting a release date
# that doesn't match the actual release. This could be done by dynamically
# generating more combinations with different release dates, but that
# significantly increases the amount of time it takes to run the tests.
# These hard-coded cases are sufficient to know that we can update our
# beliefs when we get new information.
q1_release_dates = [pd.Timestamp('2015-01-13'),
pd.Timestamp('2015-01-14')] # One day late
q2_release_dates = [pd.Timestamp('2015-01-25'), # One day early
pd.Timestamp('2015-01-26')]
@classmethod
def make_events(cls):
"""
In order to determine which estimate we care about for a particular
sid, we need to look at all estimates that we have for that sid and
their associated event dates.
We define q1 < q2, and thus event1 < event2 since event1 occurs
during q1 and event2 occurs during q2 and we assume that there can
only be 1 event per quarter. We assume that there can be multiple
estimates per quarter leading up to the event. We assume that estimates
will not surpass the relevant event date. We will look at 2 estimates
for an event before the event occurs, since that is the simplest
scenario that covers the interesting edge cases:
- estimate values changing
- a release date changing
- estimates for different quarters interleaving
Thus, we generate all possible inter-leavings of 2 estimates per
quarter-event where estimate1 < estimate2 and all estimates are < the
relevant event and assign each of these inter-leavings to a
different sid.
"""
sid_estimates = []
sid_releases = []
# We want all permutations of 2 knowledge dates per quarter.
it = enumerate(
itertools.permutations(cls.q1_knowledge_dates +
cls.q2_knowledge_dates,
4)
)
for sid, (q1e1, q1e2, q2e1, q2e2) in it:
# We're assuming that estimates must come before the relevant
# release.
if (q1e1 < q1e2 and
q2e1 < q2e2 and
# All estimates are < Q2's event, so just constrain Q1
# estimates.
q1e1 < cls.q1_release_dates[0] and
q1e2 < cls.q1_release_dates[0]):
sid_estimates.append(cls.create_estimates_df(q1e1,
q1e2,
q2e1,
q2e2,
sid))
sid_releases.append(cls.create_releases_df(sid))
return pd.concat(sid_estimates +
sid_releases).reset_index(drop=True)
@classmethod
def get_sids(cls):
sids = cls.events[SID_FIELD_NAME].unique()
# Tack on an extra sid to make sure that sids with no data are
# included but have all-null columns.
return list(sids) + [max(sids) + 1]
@classmethod
def create_releases_df(cls, sid):
# Final release dates never change. The quarters have very tight date
# ranges in order to reduce the number of dates we need to iterate
# through when testing.
return pd.DataFrame({
TS_FIELD_NAME: [pd.Timestamp('2015-01-13'),
pd.Timestamp('2015-01-26')],
EVENT_DATE_FIELD_NAME: [pd.Timestamp('2015-01-13'),
pd.Timestamp('2015-01-26')],
'estimate': [0.5, 0.8],
FISCAL_QUARTER_FIELD_NAME: [1.0, 2.0],
FISCAL_YEAR_FIELD_NAME: [2015.0, 2015.0],
SID_FIELD_NAME: sid
})
@classmethod
def create_estimates_df(cls,
q1e1,
q1e2,
q2e1,
q2e2,
sid):
return pd.DataFrame({
EVENT_DATE_FIELD_NAME: cls.q1_release_dates + cls.q2_release_dates,
'estimate': [.1, .2, .3, .4],
FISCAL_QUARTER_FIELD_NAME: [1.0, 1.0, 2.0, 2.0],
FISCAL_YEAR_FIELD_NAME: [2015.0, 2015.0, 2015.0, 2015.0],
TS_FIELD_NAME: [q1e1, q1e2, q2e1, q2e2],
SID_FIELD_NAME: sid,
})
def get_expected_estimate(self,
q1_knowledge,
q2_knowledge,
comparable_date):
return pd.DataFrame()
def test_estimates(self):
dataset = QuartersEstimates(1)
engine = self.make_engine()
results = engine.run_pipeline(
Pipeline({c.name: c.latest for c in dataset.columns}),
start_date=self.trading_days[1],
end_date=self.trading_days[-2],
)
for sid in self.ASSET_FINDER_EQUITY_SIDS:
sid_estimates = results.xs(sid, level=1)
# Separate assertion for all-null DataFrame to avoid setting
# column dtypes on `all_expected`.
if sid == max(self.ASSET_FINDER_EQUITY_SIDS):
assert_true(sid_estimates.isnull().all().all())
else:
ts_sorted_estimates = self.events[
self.events[SID_FIELD_NAME] == sid
].sort_values(TS_FIELD_NAME)
q1_knowledge = ts_sorted_estimates[
ts_sorted_estimates[FISCAL_QUARTER_FIELD_NAME] == 1
]
q2_knowledge = ts_sorted_estimates[
ts_sorted_estimates[FISCAL_QUARTER_FIELD_NAME] == 2
]
all_expected = pd.concat(
[self.get_expected_estimate(
q1_knowledge[q1_knowledge[TS_FIELD_NAME] <=
date.tz_localize(None)],
q2_knowledge[q2_knowledge[TS_FIELD_NAME] <=
date.tz_localize(None)],
date.tz_localize(None),
).set_index([[date]]) for date in sid_estimates.index],
axis=0)
assert_equal(all_expected[sid_estimates.columns],
sid_estimates)
class NextEstimate(WithEstimatesTimeZero, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
def get_expected_estimate(self,
q1_knowledge,
q2_knowledge,
comparable_date):
# If our latest knowledge of q1 is that the release is
# happening on this simulation date or later, then that's
# the estimate we want to use.
if (not q1_knowledge.empty and
q1_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] >=
comparable_date):
return q1_knowledge.iloc[-1:]
# If q1 has already happened or we don't know about it
# yet and our latest knowledge indicates that q2 hasn't
# happened yet, then that's the estimate we want to use.
elif (not q2_knowledge.empty and
q2_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] >=
comparable_date):
return q2_knowledge.iloc[-1:]
return pd.DataFrame(columns=q1_knowledge.columns,
index=[comparable_date])
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class BlazeNextEstimateLoaderTestCase(NextEstimate):
"""
Run the same tests as EventsLoaderTestCase, but using a BlazeEventsLoader.
"""
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
import blaze as bz
return BlazeNextEstimatesLoader(
bz.data(events),
columns,
)
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class PreviousEstimate(WithEstimatesTimeZero, ZiplineTestCase):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
def get_expected_estimate(self,
q1_knowledge,
q2_knowledge,
comparable_date):
# The expected estimate will be for q2 if the last thing
# we've seen is that the release date already happened.
# Otherwise, it'll be for q1, as long as the release date
# for q1 has already happened.
if (not q2_knowledge.empty and
q2_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] <=
comparable_date):
return q2_knowledge.iloc[-1:]
elif (not q1_knowledge.empty and
q1_knowledge[EVENT_DATE_FIELD_NAME].iloc[-1] <=
comparable_date):
return q1_knowledge.iloc[-1:]
return pd.DataFrame(columns=q1_knowledge.columns,
index=[comparable_date])
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class BlazePreviousEstimateLoaderTestCase(PreviousEstimate):
"""
Run the same tests as EventsLoaderTestCase, but using a BlazeEventsLoader.
"""
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
import blaze as bz
return BlazePreviousEstimatesLoader(
bz.data(events),
columns,
)
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class WithEstimateMultipleQuarters(WithEstimates):
"""
ZiplineTestCase mixin providing cls.events, cls.make_expected_out as
class-level fixtures and self.test_multiple_qtrs_requested as a test.
Attributes
----------
events : pd.DataFrame
Simple DataFrame with estimates for 2 quarters for a single sid.
Methods
-------
make_expected_out() --> pd.DataFrame
Returns the DataFrame that is expected as a result of running a
Pipeline where estimates are requested for multiple quarters out.
fill_expected_out(expected)
Fills the expected DataFrame with data.
Tests
------
test_multiple_qtrs_requested()
Runs a Pipeline that calculate which estimates for multiple quarters
out and checks that the returned columns contain data for the correct
number of quarters out.
"""
@classmethod
def make_events(cls):
return pd.DataFrame({
SID_FIELD_NAME: [0] * 2,
TS_FIELD_NAME: [pd.Timestamp('2015-01-01'),
pd.Timestamp('2015-01-06')],
EVENT_DATE_FIELD_NAME: [pd.Timestamp('2015-01-10'),
pd.Timestamp('2015-01-20')],
'estimate': [1., 2.],
FISCAL_QUARTER_FIELD_NAME: [1, 2],
FISCAL_YEAR_FIELD_NAME: [2015, 2015]
})
@classmethod
def init_class_fixtures(cls):
super(WithEstimateMultipleQuarters, cls).init_class_fixtures()
cls.expected_out = cls.make_expected_out()
@classmethod
def make_expected_out(cls):
expected = pd.DataFrame(columns=[cls.columns[col] + '1'
for col in cls.columns] +
[cls.columns[col] + '2'
for col in cls.columns],
index=cls.trading_days)
for (col, raw_name), suffix in itertools.product(
cls.columns.items(), ('1', '2')
):
expected_name = raw_name + suffix
if col.dtype == datetime64ns_dtype:
expected[expected_name] = pd.to_datetime(
expected[expected_name]
)
else:
expected[expected_name] = expected[
expected_name
].astype(col.dtype)
cls.fill_expected_out(expected)
return expected.reindex(cls.trading_days)
def test_multiple_qtrs_requested(self):
dataset1 = QuartersEstimates(1)
dataset2 = QuartersEstimates(2)
engine = self.make_engine()
results = engine.run_pipeline(
Pipeline(
merge([{c.name + '1': c.latest for c in dataset1.columns},
{c.name + '2': c.latest for c in dataset2.columns}])
),
start_date=self.trading_days[0],
end_date=self.trading_days[-1],
)
q1_columns = [col.name + '1' for col in self.columns]
q2_columns = [col.name + '2' for col in self.columns]
# We now expect a column for 1 quarter out and a column for 2
# quarters out for each of the dataset columns.
assert_equal(sorted(np.array(q1_columns + q2_columns)),
sorted(results.columns.values))
assert_equal(self.expected_out.sort_index(axis=1),
results.xs(0, level=1).sort_index(axis=1))
class NextEstimateMultipleQuarters(
WithEstimateMultipleQuarters, ZiplineTestCase
):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
@classmethod
def fill_expected_out(cls, expected):
# Fill columns for 1 Q out
for raw_name in cls.columns.values():
expected.loc[
pd.Timestamp('2015-01-01'):pd.Timestamp('2015-01-11'),
raw_name + '1'
] = cls.events[raw_name].iloc[0]
expected.loc[
pd.Timestamp('2015-01-11'):pd.Timestamp('2015-01-20'),
raw_name + '1'
] = cls.events[raw_name].iloc[1]
# Fill columns for 2 Q out
# We only have an estimate and event date for 2 quarters out before
# Q1's event happens; after Q1's event, we know 1 Q out but not 2 Qs
# out.
for col_name in ['estimate', 'event_date']:
expected.loc[
pd.Timestamp('2015-01-06'):pd.Timestamp('2015-01-10'),
col_name + '2'
] = cls.events[col_name].iloc[1]
# But we know what FQ and FY we'd need in both Q1 and Q2
# because we know which FQ is next and can calculate from there
expected.loc[
pd.Timestamp('2015-01-01'):pd.Timestamp('2015-01-09'),
FISCAL_QUARTER_FIELD_NAME + '2'
] = 2
expected.loc[
pd.Timestamp('2015-01-12'):pd.Timestamp('2015-01-20'),
FISCAL_QUARTER_FIELD_NAME + '2'
] = 3
expected.loc[
pd.Timestamp('2015-01-01'):pd.Timestamp('2015-01-20'),
FISCAL_YEAR_FIELD_NAME + '2'
] = 2015
return expected
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class BlazeNextEstimateMultipleQuarters(NextEstimateMultipleQuarters):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
import blaze as bz
return BlazeNextEstimatesLoader(
bz.data(events),
columns,
)
class PreviousEstimateMultipleQuarters(
WithEstimateMultipleQuarters,
ZiplineTestCase
):
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
@classmethod
def fill_expected_out(cls, expected):
# Fill columns for 1 Q out
for raw_name in cls.columns.values():
expected[raw_name + '1'].loc[
pd.Timestamp('2015-01-12'):pd.Timestamp('2015-01-19')
] = cls.events[raw_name].iloc[0]
expected[raw_name + '1'].loc[
pd.Timestamp('2015-01-20'):
] = cls.events[raw_name].iloc[1]
# Fill columns for 2 Q out
for col_name in ['estimate', 'event_date']:
expected[col_name + '2'].loc[
pd.Timestamp('2015-01-20'):
] = cls.events[col_name].iloc[0]
expected[
FISCAL_QUARTER_FIELD_NAME + '2'
].loc[pd.Timestamp('2015-01-12'):pd.Timestamp('2015-01-20')] = 4
expected[
FISCAL_YEAR_FIELD_NAME + '2'
].loc[pd.Timestamp('2015-01-12'):pd.Timestamp('2015-01-20')] = 2014
expected[
FISCAL_QUARTER_FIELD_NAME + '2'
].loc[pd.Timestamp('2015-01-20'):] = 1
expected[
FISCAL_YEAR_FIELD_NAME + '2'
].loc[pd.Timestamp('2015-01-20'):] = 2015
return expected
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class BlazePreviousEstimateMultipleQuarters(PreviousEstimateMultipleQuarters):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
import blaze as bz
return BlazePreviousEstimatesLoader(
bz.data(events),
columns,
)
class WithVaryingNumEstimates(WithEstimates):
"""
ZiplineTestCase mixin providing fixtures and a test to ensure that we
have the correct overwrites when the event date changes. We want to make
sure that if we have a quarter with an event date that gets pushed back,
we don't start overwriting for the next quarter early. Likewise,
if we have a quarter with an event date that gets pushed forward, we want
to make sure that we start applying adjustments at the appropriate, earlier
date, rather than the later date.
Methods
-------
assert_compute()
Defines how to determine that results computed for the `SomeFactor`
factor are correct.
Tests
-----
test_windows_with_varying_num_estimates()
Tests that we create the correct overwrites from 2015-01-13 to
2015-01-14 regardless of how event dates were updated for each
quarter for each sid.
"""
@classmethod
def make_events(cls):
return pd.DataFrame({
SID_FIELD_NAME: [0] * 3 + [1] * 3,
TS_FIELD_NAME: [pd.Timestamp('2015-01-09'),
pd.Timestamp('2015-01-12'),
pd.Timestamp('2015-01-13')] * 2,
EVENT_DATE_FIELD_NAME: [pd.Timestamp('2015-01-12'),
pd.Timestamp('2015-01-13'),
pd.Timestamp('2015-01-20'),
pd.Timestamp('2015-01-13'),
pd.Timestamp('2015-01-12'),
pd.Timestamp('2015-01-20')],
'estimate': [11., 12., 21.] * 2,
FISCAL_QUARTER_FIELD_NAME: [1, 1, 2] * 2,
FISCAL_YEAR_FIELD_NAME: [2015] * 6
})
@classmethod
def assert_compute(cls, estimate, today):
raise NotImplementedError('assert_compute')
def test_windows_with_varying_num_estimates(self):
dataset = QuartersEstimates(1)
assert_compute = self.assert_compute
class SomeFactor(CustomFactor):
inputs = [dataset.estimate]
window_length = 3
def compute(self, today, assets, out, estimate):
assert_compute(estimate, today)
engine = self.make_engine()
engine.run_pipeline(
Pipeline({'est': SomeFactor()}),
start_date=pd.Timestamp('2015-01-13', tz='utc'),
# last event date we have
end_date=pd.Timestamp('2015-01-14', tz='utc'),
)
class PreviousVaryingNumEstimates(
WithVaryingNumEstimates,
ZiplineTestCase
):
def assert_compute(self, estimate, today):
if today == pd.Timestamp('2015-01-13', tz='utc'):
assert_array_equal(estimate[:, 0],
np.array([np.NaN, np.NaN, 12]))
assert_array_equal(estimate[:, 1],
np.array([np.NaN, 12, 12]))
else:
assert_array_equal(estimate[:, 0],
np.array([np.NaN, 12, 12]))
assert_array_equal(estimate[:, 1],
np.array([12, 12, 12]))
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class BlazePreviousVaryingNumEstimates(PreviousVaryingNumEstimates):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
import blaze as bz
return BlazePreviousEstimatesLoader(
bz.data(events),
columns,
)
class NextVaryingNumEstimates(
WithVaryingNumEstimates,
ZiplineTestCase
):
def assert_compute(self, estimate, today):
if today == pd.Timestamp('2015-01-13', tz='utc'):
assert_array_equal(estimate[:, 0],
np.array([11, 12, 12]))
assert_array_equal(estimate[:, 1],
np.array([np.NaN, np.NaN, 21]))
else:
assert_array_equal(estimate[:, 0],
np.array([np.NaN, 21, 21]))
assert_array_equal(estimate[:, 1],
np.array([np.NaN, 21, 21]))
@classmethod
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class BlazeNextVaryingNumEstimates(NextVaryingNumEstimates):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
import blaze as bz
return BlazeNextEstimatesLoader(
bz.data(events),
columns,
)
class WithEstimateWindows(WithEstimates):
"""
ZiplineTestCase mixin providing fixures and a test to test running a
Pipeline with an estimates loader over differently-sized windows.
Attributes
----------
events : pd.DataFrame
DataFrame with estimates for 2 quarters for 2 sids.
window_test_start_date : pd.Timestamp
The date from which the window should start.
timelines : dict[int -> pd.DataFrame]
A dictionary mapping to the number of quarters out to
snapshots of how the data should look on each date in the date range.
Methods
-------
make_expected_timelines() -> dict[int -> pd.DataFrame]
Creates a dictionary of expected data. See `timelines`, above.
Tests
-----
test_estimate_windows_at_quarter_boundaries()
Tests that we overwrite values with the correct quarter's estimate at
the correct dates when we have a factor that asks for a window of data.
"""
END_DATE = pd.Timestamp('2015-02-10')
window_test_start_date = pd.Timestamp('2015-01-05')
critical_dates = [pd.Timestamp('2015-01-09', tz='utc'),
pd.Timestamp('2015-01-15', tz='utc'),
pd.Timestamp('2015-01-20', tz='utc'),
pd.Timestamp('2015-01-26', tz='utc'),
pd.Timestamp('2015-02-05', tz='utc'),
pd.Timestamp('2015-02-10', tz='utc')]
# Starting date, number of announcements out.
window_test_cases = list(itertools.product(critical_dates, (1, 2)))
@classmethod
def make_events(cls):
# Typical case: 2 consecutive quarters.
sid_0_timeline = pd.DataFrame({
TS_FIELD_NAME: [cls.window_test_start_date,
pd.Timestamp('2015-01-20'),
pd.Timestamp('2015-01-12'),
pd.Timestamp('2015-02-10'),
# We want a case where we get info for a later
# quarter before the current quarter is over but
# after the split_asof_date to make sure that
# we choose the correct date to overwrite until.
pd.Timestamp('2015-01-18')],
EVENT_DATE_FIELD_NAME:
[pd.Timestamp('2015-01-20'),
pd.Timestamp('2015-01-20'),
pd.Timestamp('2015-02-10'),
pd.Timestamp('2015-02-10'),
pd.Timestamp('2015-04-01')],
'estimate': [100., 101.] + [200., 201.] + [400],
FISCAL_QUARTER_FIELD_NAME: [1] * 2 + [2] * 2 + [4],
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 0,
})
# We want a case where we skip a quarter. We never find out about Q2.
sid_10_timeline = pd.DataFrame({
TS_FIELD_NAME: [pd.Timestamp('2015-01-09'),
pd.Timestamp('2015-01-12'),
pd.Timestamp('2015-01-09'),
pd.Timestamp('2015-01-15')],
EVENT_DATE_FIELD_NAME:
[pd.Timestamp('2015-01-22'), pd.Timestamp('2015-01-22'),
pd.Timestamp('2015-02-05'), pd.Timestamp('2015-02-05')],
'estimate': [110., 111.] + [310., 311.],
FISCAL_QUARTER_FIELD_NAME: [1] * 2 + [3] * 2,
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 10
})
# We want to make sure we have correct overwrites when sid quarter
# boundaries collide. This sid's quarter boundaries collide with sid 0.
sid_20_timeline = pd.DataFrame({
TS_FIELD_NAME: [cls.window_test_start_date,
pd.Timestamp('2015-01-07'),
cls.window_test_start_date,
pd.Timestamp('2015-01-17')],
EVENT_DATE_FIELD_NAME:
[pd.Timestamp('2015-01-20'),
pd.Timestamp('2015-01-20'),
pd.Timestamp('2015-02-10'),
pd.Timestamp('2015-02-10')],
'estimate': [120., 121.] + [220., 221.],
FISCAL_QUARTER_FIELD_NAME: [1] * 2 + [2] * 2,
FISCAL_YEAR_FIELD_NAME: 2015,
SID_FIELD_NAME: 20
})
concatted = pd.concat([sid_0_timeline,
sid_10_timeline,
sid_20_timeline]).reset_index()
np.random.seed(0)
return concatted.reindex(np.random.permutation(concatted.index))
@classmethod
def get_sids(cls):
sids = sorted(cls.events[SID_FIELD_NAME].unique())
# Add extra sids between sids in our data. We want to test that we
# apply adjustments to the correct sids.
return [sid for i in range(len(sids) - 1)
for sid in range(sids[i], sids[i+1])] + [sids[-1]]
@classmethod
def make_expected_timelines(cls):
return {}
@classmethod
def init_class_fixtures(cls):
super(WithEstimateWindows, cls).init_class_fixtures()
cls.create_expected_df_for_factor_compute = partial(
create_expected_df_for_factor_compute,
cls.window_test_start_date,
cls.get_sids()
)
cls.timelines = cls.make_expected_timelines()
@parameterized.expand(window_test_cases)
def test_estimate_windows_at_quarter_boundaries(self,
start_date,
num_announcements_out):
dataset = QuartersEstimates(num_announcements_out)
trading_days = self.trading_days
timelines = self.timelines
# The window length should be from the starting index back to the first
# date on which we got data. The goal is to ensure that as we
# progress through the timeline, all data we got, starting from that
# first date, is correctly overwritten.
window_len = (
self.trading_days.get_loc(start_date) -
self.trading_days.get_loc(self.window_test_start_date) + 1
)
class SomeFactor(CustomFactor):
inputs = [dataset.estimate]
window_length = window_len
def compute(self, today, assets, out, estimate):
today_idx = trading_days.get_loc(today)
today_timeline = timelines[
num_announcements_out
].loc[today].reindex(
trading_days[:today_idx + 1]
).values
timeline_start_idx = (len(today_timeline) - window_len)
assert_almost_equal(estimate,
today_timeline[timeline_start_idx:])
engine = self.make_engine()
engine.run_pipeline(
Pipeline({'est': SomeFactor()}),
start_date=start_date,
# last event date we have
end_date=pd.Timestamp('2015-02-10', tz='utc'),
)
class PreviousEstimateWindows(WithEstimateWindows, ZiplineTestCase):
@classmethod
def make_loader(cls, events, columns):
return PreviousEarningsEstimatesLoader(events, columns)
@classmethod
def make_expected_timelines(cls):
oneq_previous = pd.concat([
pd.concat([
cls.create_expected_df_for_factor_compute([
(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date)
], end_date)
for end_date in pd.date_range('2015-01-09', '2015-01-19')
]),
cls.create_expected_df_for_factor_compute(
[(0, 101, pd.Timestamp('2015-01-20')),
(10, np.NaN, cls.window_test_start_date),
(20, 121, pd.Timestamp('2015-01-20'))],
pd.Timestamp('2015-01-20')
),
cls.create_expected_df_for_factor_compute(
[(0, 101, pd.Timestamp('2015-01-20')),
(10, np.NaN, cls.window_test_start_date),
(20, 121, pd.Timestamp('2015-01-20'))],
pd.Timestamp('2015-01-21')
),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 101, pd.Timestamp('2015-01-20')),
(10, 111, pd.Timestamp('2015-01-22')),
(20, 121, pd.Timestamp('2015-01-20'))],
end_date
) for end_date in pd.date_range('2015-01-22', '2015-02-04')
]),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 101, pd.Timestamp('2015-01-20')),
(10, 311, pd.Timestamp('2015-02-05')),
(20, 121, pd.Timestamp('2015-01-20'))],
end_date
) for end_date in pd.date_range('2015-02-05', '2015-02-09')
]),
cls.create_expected_df_for_factor_compute(
[(0, 201, pd.Timestamp('2015-02-10')),
(10, 311, pd.Timestamp('2015-02-05')),
(20, 221, pd.Timestamp('2015-02-10'))],
pd.Timestamp('2015-02-10')
),
])
twoq_previous = pd.concat(
[cls.create_expected_df_for_factor_compute(
[(0, np.NaN, cls.window_test_start_date),
(10, np.NaN, cls.window_test_start_date),
(20, np.NaN, cls.window_test_start_date)],
end_date
) for end_date in pd.date_range('2015-01-09', '2015-02-09')] +
# We never get estimates for S1 for 2Q ago because once Q3
# becomes our previous quarter, 2Q ago would be Q2, and we have
# no data on it.
[cls.create_expected_df_for_factor_compute(
[(0, 101, pd.Timestamp('2015-02-10')),
(10, np.NaN, pd.Timestamp('2015-02-05')),
(20, 121, pd.Timestamp('2015-02-10'))],
pd.Timestamp('2015-02-10')
)]
)
return {
1: oneq_previous,
2: twoq_previous
}
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
class BlazePreviousEstimateWindows(PreviousEstimateWindows):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
import blaze as bz
return BlazePreviousEstimatesLoader(bz.data(events), columns)
class NextEstimateWindows(WithEstimateWindows, ZiplineTestCase):
@classmethod
@unittest.skipIf(platform.system() == 'Windows', "Don't run test on windows")
def make_loader(cls, events, columns):
return NextEarningsEstimatesLoader(events, columns)
@classmethod
def make_expected_timelines(cls):
oneq_next = pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 100, cls.window_test_start_date),
(10, 110, pd.Timestamp('2015-01-09')),
(20, 120, cls.window_test_start_date),
(20, 121, pd.Timestamp('2015-01-07'))],
pd.Timestamp('2015-01-09')
),
pd.concat([
cls.create_expected_df_for_factor_compute(
[(0, 100, cls.window_test_start_date),
(10, 110, pd.Timestamp('2015-01-09')),
(10, 111, pd.Timestamp('2015-01-12')),
(20, 120, cls.window_test_start_date),
(20, 121, pd.Timestamp('2015-01-07'))],
end_date
) for end_date in pd.date_range('2015-01-12', '2015-01-19')
]),
cls.create_expected_df_for_factor_compute(
[(0, 100, cls.window_test_start_date),
(0, 101, pd.Timestamp('2015-01-20')),
(10, 110, pd.Timestamp('2015-01-09')),
(10, 111, | pd.Timestamp('2015-01-12') | pandas.Timestamp |
import pandas as pd
from business_rules.operators import (DataframeType, StringType,
NumericType, BooleanType, SelectType,
SelectMultipleType, GenericType)
from . import TestCase
from decimal import Decimal
import sys
import pandas
class StringOperatorTests(TestCase):
def test_operator_decorator(self):
self.assertTrue(StringType("foo").equal_to.is_operator)
def test_string_equal_to(self):
self.assertTrue(StringType("foo").equal_to("foo"))
self.assertFalse(StringType("foo").equal_to("Foo"))
def test_string_not_equal_to(self):
self.assertTrue(StringType("foo").not_equal_to("Foo"))
self.assertTrue(StringType("foo").not_equal_to("boo"))
self.assertFalse(StringType("foo").not_equal_to("foo"))
def test_string_equal_to_case_insensitive(self):
self.assertTrue(StringType("foo").equal_to_case_insensitive("FOo"))
self.assertTrue(StringType("foo").equal_to_case_insensitive("foo"))
self.assertFalse(StringType("foo").equal_to_case_insensitive("blah"))
def test_string_starts_with(self):
self.assertTrue(StringType("hello").starts_with("he"))
self.assertFalse(StringType("hello").starts_with("hey"))
self.assertFalse(StringType("hello").starts_with("He"))
def test_string_ends_with(self):
self.assertTrue(StringType("hello").ends_with("lo"))
self.assertFalse(StringType("hello").ends_with("boom"))
self.assertFalse(StringType("hello").ends_with("Lo"))
def test_string_contains(self):
self.assertTrue(StringType("hello").contains("ell"))
self.assertTrue(StringType("hello").contains("he"))
self.assertTrue(StringType("hello").contains("lo"))
self.assertFalse(StringType("hello").contains("asdf"))
self.assertFalse(StringType("hello").contains("ElL"))
def test_string_matches_regex(self):
self.assertTrue(StringType("hello").matches_regex(r"^h"))
self.assertFalse(StringType("hello").matches_regex(r"^sh"))
def test_non_empty(self):
self.assertTrue(StringType("hello").non_empty())
self.assertFalse(StringType("").non_empty())
self.assertFalse(StringType(None).non_empty())
class NumericOperatorTests(TestCase):
def test_instantiate(self):
err_string = "foo is not a valid numeric type"
with self.assertRaisesRegexp(AssertionError, err_string):
NumericType("foo")
def test_numeric_type_validates_and_casts_decimal(self):
ten_dec = Decimal(10)
ten_int = 10
ten_float = 10.0
if sys.version_info[0] == 2:
ten_long = long(10)
else:
ten_long = int(10) # long and int are same in python3
ten_var_dec = NumericType(ten_dec) # this should not throw an exception
ten_var_int = NumericType(ten_int)
ten_var_float = NumericType(ten_float)
ten_var_long = NumericType(ten_long)
self.assertTrue(isinstance(ten_var_dec.value, Decimal))
self.assertTrue(isinstance(ten_var_int.value, Decimal))
self.assertTrue(isinstance(ten_var_float.value, Decimal))
self.assertTrue(isinstance(ten_var_long.value, Decimal))
def test_numeric_equal_to(self):
self.assertTrue(NumericType(10).equal_to(10))
self.assertTrue(NumericType(10).equal_to(10.0))
self.assertTrue(NumericType(10).equal_to(10.000001))
self.assertTrue(NumericType(10.000001).equal_to(10))
self.assertTrue(NumericType(Decimal('10.0')).equal_to(10))
self.assertTrue(NumericType(10).equal_to(Decimal('10.0')))
self.assertFalse(NumericType(10).equal_to(10.00001))
self.assertFalse(NumericType(10).equal_to(11))
def test_numeric_not_equal_to(self):
self.assertTrue(NumericType(10).not_equal_to(10.00001))
self.assertTrue(NumericType(10).not_equal_to(11))
self.assertTrue(NumericType(Decimal('10.0')).not_equal_to(Decimal('10.1')))
self.assertFalse(NumericType(10).not_equal_to(10))
self.assertFalse(NumericType(10).not_equal_to(10.0))
self.assertFalse(NumericType(Decimal('10.0')).not_equal_to(Decimal('10.0')))
def test_other_value_not_numeric(self):
error_string = "10 is not a valid numeric type"
with self.assertRaisesRegexp(AssertionError, error_string):
NumericType(10).equal_to("10")
def test_numeric_greater_than(self):
self.assertTrue(NumericType(10).greater_than(1))
self.assertFalse(NumericType(10).greater_than(11))
self.assertTrue(NumericType(10.1).greater_than(10))
self.assertFalse(NumericType(10.000001).greater_than(10))
self.assertTrue(NumericType(10.000002).greater_than(10))
def test_numeric_greater_than_or_equal_to(self):
self.assertTrue(NumericType(10).greater_than_or_equal_to(1))
self.assertFalse(NumericType(10).greater_than_or_equal_to(11))
self.assertTrue(NumericType(10.1).greater_than_or_equal_to(10))
self.assertTrue(NumericType(10.000001).greater_than_or_equal_to(10))
self.assertTrue(NumericType(10.000002).greater_than_or_equal_to(10))
self.assertTrue(NumericType(10).greater_than_or_equal_to(10))
def test_numeric_less_than(self):
self.assertTrue(NumericType(1).less_than(10))
self.assertFalse(NumericType(11).less_than(10))
self.assertTrue(NumericType(10).less_than(10.1))
self.assertFalse(NumericType(10).less_than(10.000001))
self.assertTrue(NumericType(10).less_than(10.000002))
def test_numeric_less_than_or_equal_to(self):
self.assertTrue(NumericType(1).less_than_or_equal_to(10))
self.assertFalse(NumericType(11).less_than_or_equal_to(10))
self.assertTrue(NumericType(10).less_than_or_equal_to(10.1))
self.assertTrue(NumericType(10).less_than_or_equal_to(10.000001))
self.assertTrue(NumericType(10).less_than_or_equal_to(10.000002))
self.assertTrue(NumericType(10).less_than_or_equal_to(10))
class BooleanOperatorTests(TestCase):
def test_instantiate(self):
err_string = "foo is not a valid boolean type"
with self.assertRaisesRegexp(AssertionError, err_string):
BooleanType("foo")
err_string = "None is not a valid boolean type"
with self.assertRaisesRegexp(AssertionError, err_string):
BooleanType(None)
def test_boolean_is_true_and_is_false(self):
self.assertTrue(BooleanType(True).is_true())
self.assertFalse(BooleanType(True).is_false())
self.assertFalse(BooleanType(False).is_true())
self.assertTrue(BooleanType(False).is_false())
class SelectOperatorTests(TestCase):
def test_contains(self):
self.assertTrue(SelectType([1, 2]).contains(2))
self.assertFalse(SelectType([1, 2]).contains(3))
self.assertTrue(SelectType([1, 2, "a"]).contains("A"))
def test_does_not_contain(self):
self.assertTrue(SelectType([1, 2]).does_not_contain(3))
self.assertFalse(SelectType([1, 2]).does_not_contain(2))
self.assertFalse(SelectType([1, 2, "a"]).does_not_contain("A"))
class SelectMultipleOperatorTests(TestCase):
def test_contains_all(self):
self.assertTrue(SelectMultipleType([1, 2]).
contains_all([2, 1]))
self.assertFalse(SelectMultipleType([1, 2]).
contains_all([2, 3]))
self.assertTrue(SelectMultipleType([1, 2, "a"]).
contains_all([2, 1, "A"]))
def test_is_contained_by(self):
self.assertTrue(SelectMultipleType([1, 2]).
is_contained_by([2, 1, 3]))
self.assertFalse(SelectMultipleType([1, 2]).
is_contained_by([2, 3, 4]))
self.assertTrue(SelectMultipleType([1, 2, "a"]).
is_contained_by([2, 1, "A"]))
def test_shares_at_least_one_element_with(self):
self.assertTrue(SelectMultipleType([1, 2]).
shares_at_least_one_element_with([2, 3]))
self.assertFalse(SelectMultipleType([1, 2]).
shares_at_least_one_element_with([4, 3]))
self.assertTrue(SelectMultipleType([1, 2, "a"]).
shares_at_least_one_element_with([4, "A"]))
def test_shares_exactly_one_element_with(self):
self.assertTrue(SelectMultipleType([1, 2]).
shares_exactly_one_element_with([2, 3]))
self.assertFalse(SelectMultipleType([1, 2]).
shares_exactly_one_element_with([4, 3]))
self.assertTrue(SelectMultipleType([1, 2, "a"]).
shares_exactly_one_element_with([4, "A"]))
self.assertFalse(SelectMultipleType([1, 2, 3]).
shares_exactly_one_element_with([2, 3, "a"]))
def test_shares_no_elements_with(self):
self.assertTrue(SelectMultipleType([1, 2]).
shares_no_elements_with([4, 3]))
self.assertFalse(SelectMultipleType([1, 2]).
shares_no_elements_with([2, 3]))
self.assertFalse(SelectMultipleType([1, 2, "a"]).
shares_no_elements_with([4, "A"]))
class DataframeOperatorTests(TestCase):
def test_exists(self):
df = pandas.DataFrame.from_dict({
"var1": [1, 2, 4, ],
"var2": [3, 5, 6, ],
})
result: pd.Series = DataframeType({"value": df}).exists({"target": "var1"})
self.assertTrue(result.equals(pd.Series([True, True, True, ])))
result: pd.Series = DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).exists({"target": "--r1"})
self.assertTrue(result.equals(pd.Series([True, True, True, ])))
result: pd.Series = DataframeType({"value": df}).exists({"target": "invalid"})
self.assertTrue(result.equals(pd.Series([False, False, False, ])))
def test_not_exists(self):
df = pandas.DataFrame.from_dict({
"var1": [1, 2, 4, ],
"var2": [3, 5, 6, ]
})
result: pd.Series = DataframeType({"value": df}).not_exists({"target": "invalid"})
self.assertTrue(result.equals(pd.Series([True, True, True, ])))
result: pd.Series = DataframeType({"value": df}).not_exists({"target": "var1"})
self.assertTrue(result.equals(pd.Series([False, False, False, ])))
result: pd.Series = DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).not_exists({"target": "--r1"})
self.assertTrue(result.equals(pd.Series([False, False, False, ])))
def test_equal_to(self):
df = pandas.DataFrame.from_dict({
"var1": [1, 2, 4, "", 7, ],
"var2": [3, 5, 6, "", 2, ],
"var3": [1, 3, 8, "", 7, ],
"var4": ["test", "issue", "one", "", "two", ]
})
self.assertTrue(DataframeType({"value": df}).equal_to({
"target": "var1",
"comparator": ""
}).equals(pandas.Series([False, False, False, False, False, ])))
self.assertTrue(DataframeType({"value": df}).equal_to({
"target": "var1",
"comparator": 2
}).equals(pandas.Series([False, True, False, False, False, ])))
self.assertTrue(DataframeType({"value": df}).equal_to({
"target": "var1",
"comparator": "var3"
}).equals(pandas.Series([True, False, False, False, True, ])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).equal_to({
"target": "--r1",
"comparator": "--r3"
}).equals(pandas.Series([True, False, False, False, True, ])))
self.assertTrue(DataframeType({"value": df}).equal_to({
"target": "var1",
"comparator": "var2"
}).equals(pandas.Series([False, False, False, False, False, ])))
self.assertTrue(DataframeType({"value": df}).equal_to({
"target": "var1",
"comparator": 20
}).equals(pandas.Series([False, False, False, False, False, ])))
self.assertTrue(DataframeType({"value": df}).equal_to({
"target": "var4",
"comparator": "var1",
"value_is_literal": True
}).equals(pandas.Series([False, False, False, False, False, ])))
def test_not_equal_to(self):
df = pandas.DataFrame.from_dict({
"var1": [1,2,4],
"var2": [3,5,6],
"var3": [1,3,8],
"var4": [1,2,4]
})
self.assertTrue(DataframeType({"value": df}).not_equal_to({
"target": "var1",
"comparator": "var4"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).not_equal_to({
"target": "var1",
"comparator": "var2"
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).not_equal_to({
"target": "--r1",
"comparator": "--r2"
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).not_equal_to({
"target": "--r1",
"comparator": 20
}).equals(pandas.Series([True, True, True])))
def test_equal_to_case_insensitive(self):
df = pandas.DataFrame.from_dict({
"var1": ["word", "", "new", "val"],
"var2": ["WORD", "", "test", "VAL"],
"var3": ["LET", "", "GO", "read"]
})
self.assertTrue(DataframeType({"value": df}).equal_to_case_insensitive({
"target": "var1",
"comparator": "NEW"
}).equals(pandas.Series([False, False, True, False])))
self.assertTrue(DataframeType({"value": df}).equal_to_case_insensitive({
"target": "var1",
"comparator": ""
}).equals(pandas.Series([False, False, False, False])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).equal_to_case_insensitive({
"target": "--r1",
"comparator": "--r2"
}).equals(pandas.Series([True, False, False, True])))
self.assertTrue(DataframeType({"value": df}).equal_to_case_insensitive({
"target": "var1",
"comparator": "var2"
}).equals(pandas.Series([True, False, False, True])))
self.assertTrue(DataframeType({"value": df}).equal_to_case_insensitive({
"target": "var1",
"comparator": "var3"
}).equals(pandas.Series([False, False, False, False])))
self.assertTrue(DataframeType({"value": df}).equal_to_case_insensitive({
"target": "var1",
"comparator": "var1",
"value_is_literal": True
}).equals(pandas.Series([False, False, False, False])))
def test_not_equal_to_case_insensitive(self):
df = pandas.DataFrame.from_dict({
"var1": ["word", "new", "val"],
"var2": ["WORD", "test", "VAL"],
"var3": ["LET", "GO", "read"],
"var4": ["WORD", "NEW", "VAL"]
})
self.assertTrue(DataframeType({"value": df}).not_equal_to_case_insensitive({
"target": "var1",
"comparator": "var4"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).not_equal_to_case_insensitive({
"target": "var1",
"comparator": "var2"
}).equals(pandas.Series([False, True, False])))
self.assertTrue(DataframeType({"value": df}).not_equal_to_case_insensitive({
"target": "var1",
"comparator": "var1",
"value_is_literal": True
}).equals(pandas.Series([True, True, True])))
def test_less_than(self):
df = pandas.DataFrame.from_dict({
"var1": [1,2,4],
"var2": [3,5,6],
"var3": [1,3,8],
"var4": [1,2,4]
})
self.assertTrue(DataframeType({"value": df}).less_than({
"target": "var1",
"comparator": "var4"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).less_than({
"target": "var1",
"comparator": "var3"
}).equals(pandas.Series([False, True, True])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).less_than({
"target": "--r1",
"comparator": "var3"
}).equals(pandas.Series([False, True, True])))
self.assertTrue(DataframeType({"value": df}).less_than({
"target": "var2",
"comparator": 2
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).less_than({
"target": "var1",
"comparator": 3
}).equals(pandas.Series([True, True, False])))
another_df = pandas.DataFrame.from_dict(
{
"LBDY": [4, None, None, None, None]
}
)
self.assertTrue(DataframeType({"value": another_df}).less_than({
"target": "LBDY",
"comparator": 5
}).equals(pandas.Series([True, False, False, False, False, ])))
def test_less_than_or_equal_to(self):
df = pandas.DataFrame.from_dict({
"var1": [1,2,4],
"var2": [3,5,6],
"var3": [1,3,8],
"var4": [1,2,4]
})
self.assertTrue(DataframeType({"value": df}).less_than_or_equal_to({
"target": "var1",
"comparator": "var4"
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).less_than_or_equal_to({
"target": "--r1",
"comparator": "var4"
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df}).less_than_or_equal_to({
"target": "var2",
"comparator": "var1"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).less_than_or_equal_to({
"target": "var2",
"comparator": 2
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).less_than_or_equal_to({
"target": "var2",
"comparator": "var3"
}).equals(pandas.Series([False, False, True])))
another_df = pandas.DataFrame.from_dict(
{
"LBDY": [4, 5, None, None, None]
}
)
self.assertTrue(DataframeType({"value": another_df}).less_than_or_equal_to({
"target": "LBDY",
"comparator": 5
}).equals(pandas.Series([True, True, False, False, False, ])))
def test_greater_than(self):
df = pandas.DataFrame.from_dict({
"var1": [1,2,4],
"var2": [3,5,6],
"var3": [1,3,8],
"var4": [1,2,4]
})
self.assertTrue(DataframeType({"value": df}).greater_than({
"target": "var1",
"comparator": "var4"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).greater_than({
"target": "var1",
"comparator": "var3"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).greater_than({
"target": "var1",
"comparator": "--r3"
}).equals(pandas.Series([False, False, False])))
self.assertTrue(DataframeType({"value": df}).greater_than({
"target": "var2",
"comparator": 2
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df}).greater_than({
"target": "var1",
"comparator": 5000
}).equals(pandas.Series([False, False, False])))
another_df = pandas.DataFrame.from_dict(
{
"LBDY": [4, None, None, None, None]
}
)
self.assertTrue(DataframeType({"value": another_df}).greater_than({
"target": "LBDY",
"comparator": 3
}).equals(pandas.Series([True, False, False, False, False, ])))
def test_greater_than_or_equal_to(self):
df = pandas.DataFrame.from_dict({
"var1": [1,2,4],
"var2": [3,5,6],
"var3": [1,3,8],
"var4": [1,2,4]
})
self.assertTrue(DataframeType({"value": df}).greater_than_or_equal_to({
"target": "var1",
"comparator": "var4"
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).greater_than_or_equal_to({
"target": "var1",
"comparator": "--r4"
}).equals(pandas.Series([True, True, True])))
self.assertTrue(DataframeType({"value": df}).greater_than_or_equal_to({
"target": "var2",
"comparator": "var3"
}).equals(pandas.Series([True, True, False])))
self.assertTrue(DataframeType({"value": df}).greater_than_or_equal_to({
"target": "var2",
"comparator": 2
}).equals(pandas.Series([True, True, True])))
another_df = pandas.DataFrame.from_dict(
{
"LBDY": [4, 3, None, None, None]
}
)
self.assertTrue(DataframeType({"value": another_df}).greater_than_or_equal_to({
"target": "LBDY",
"comparator": 3
}).equals(pandas.Series([True, True, False, False, False, ])))
def test_contains(self):
df = pandas.DataFrame.from_dict({
"var1": [1,2,4],
"var2": [3,5,6],
"var3": [1,3,8],
"var4": [1,2,4],
"string_var": ["hj", "word", "c"],
"var5": [[1,3,5],[1,3,5], [1,3,5]]
})
self.assertTrue(DataframeType({"value": df}).contains({
"target": "var1",
"comparator": 2
}).equals(pandas.Series([False, True, False])))
self.assertTrue(DataframeType({"value": df}).contains({
"target": "var1",
"comparator": "var3"
}).equals(pandas.Series([True, False, False])))
self.assertTrue(DataframeType({"value": df, "column_prefix_map": {"--": "va"}}).contains({
"target": "var1",
"comparator": "--r3"
}).equals( | pandas.Series([True, False, False]) | pandas.Series |
# -*- coding: utf-8 -*-
import sys
import numpy as np
import pandas as pd
import itertools
import logging
import inspect
from result import AlgoResult, ListResult
from scipy.misc import comb
import tools
class Algo(object):
""" Base class for algorithm calculating weights for online portfolio.
You have to subclass either step method to calculate weights sequentially
or weights method, which does it at once. weights method might be useful
for better performance when using matrix calculation, but be careful about
look-ahead bias.
Upper case letters stand for matrix and lower case for vectors (such as
B and b for weights).
"""
# if true, replace missing values by last values
REPLACE_MISSING = False
# type of prices going into weights or step function
# ratio: pt / pt-1
# log: log(pt / pt-1)
# raw: pt
PRICE_TYPE = 'ratio'
def __init__(self, min_history=None):
""" Subclass to define algo specific parameters here.
:param min_history: If not None, use initial weights for first min_window days. Use
this if the algo needs some history for proper parameter estimation.
"""
self.min_history = min_history or 0
def init_weights(self, m):
""" Set initial weights.
:param m: Number of assets.
"""
return np.zeros(m)
def init_step(self, X):
""" Called before step method. Use to initialize persistent variables.
:param X: Entire stock returns history.
"""
pass
def step(self, x, last_b, history):
""" Calculate new portfolio weights. If history parameter is omited, step
method gets passed just parameters `x` and `last_b`. This significantly
increases performance.
:param x: Last returns.
:param last_b: Last weights.
:param history: All returns up to now. You can omit this parameter to increase
performance.
"""
raise NotImplementedError('Subclass must implement this!')
def weights(self, X, log_progress=True):
""" Return weights. Call step method to update portfolio sequentially. Subclass
this method only at your own risk. """
# init
B = X.copy() * 0.
last_b = self.init_weights(X.shape[1])
# use history parameter in step method?
step_args = inspect.getargspec(self.step)[0]
use_history = len(step_args) >= 4
# run algo
self.init_step(X)
for t, (_, x) in enumerate(X.iterrows()):
# save weights
B.ix[t] = last_b
# keep initial weights for min_history
if t < self.min_history:
continue
# predict for t+1
if use_history:
history = X.iloc[:t+1]
last_b = self.step(x, last_b, history)
else:
last_b = self.step(x, last_b)
# convert last_b to suitable format if needed
if type(last_b) == np.matrix:
# remove dimension
last_b = np.squeeze(np.array(last_b))
# show progress by 10 pcts
if log_progress:
tools.log_progress(t, len(X), by=10)
return B
def run(self, S, log_progress=True):
""" Run algorithm and get weights.
:params S: Absolute stock prices. DataFrame with stocks in columns.
:param show_progress: Log computation progress. Works only for algos with
defined step method.
"""
if log_progress:
logging.debug('Running {}...'.format(self.__class__.__name__))
if isinstance(S, ListResult):
P = S.to_dataframe()
else:
P = S
# get weights
X = self._convert_prices(P, self.PRICE_TYPE, self.REPLACE_MISSING)
try:
B = self.weights(X, log_progress=log_progress)
except TypeError: # weights are missing log_progress parameter
B = self.weights(X)
# cast to dataframe if weights return numpy array
if not isinstance(B, pd.DataFrame):
B = | pd.DataFrame(B, index=P.index, columns=P.columns) | pandas.DataFrame |
# coding: utf-8
# In[3]:
import pandas as pd
import pybedtools
import pyBigWig
import numpy as np
import os
# In[1]:
# Develop a function that takes the top N intervals from an SV, and averages those, instead of just doing max.
def topUsage(df,n):
df['topUsage'] = df.sort_values('avgUsage',ascending=False).iloc[0:n,:]['avgUsage'].mean()
df['topExp'] = df.sort_values('avgExp',ascending=False).iloc[0:n,:]['avgExp'].mean()
return df
# In[2]:
# Define two functions that determine how far into the amino acids the SV begins
# assumes exon rank ordered.
def cdsRank(df):
l = df.shape[0]
dff = df.iloc[0,:] # get the first entry
# first separate by strand
if dff['strand'] == 1:
if dff['start'] - dff['cStart'] < 0: # if sv begins before the first exon
df['early'] = dff['cdsCount']
return df
else:
df['early'] = dff['start'] - dff['cStart'] + dff['cdsCount']
return df
# reverse strand
if dff['strand'] == -1:
if dff['cStop'] - dff['stop'] < 0: # if sv begins before the first exon
df['early'] = dff['cdsCount']
return df
else:
df['early'] = dff['cStop'] - dff['stop'] + dff['cdsCount']
return df
# assumes reverse exon rank ordered.
def cdsEnd(df):
l = df.shape[0]
dff = df.iloc[0,:]
# first separate by strand
if dff['strand'] == 1:
if dff['stop'] > dff['cStop']: # if sv begins before the first exon
df['late'] = dff['cdsCount'] + dff['size']
return df
else:
df['late'] = dff['stop'] - dff['cStart'] + dff['cdsCount']
return df
# reverse strand
if dff['strand'] == -1:
if dff['start'] < dff['cStart']: # if sv ends after the last overlapped exon
df['late'] = dff['cdsCount'] + dff['size']
return df
else:
df['late'] = dff['cStop'] - dff['start'] + dff['cdsCount']
return df
# In[2]:
# for def:
def annotateSVs(inpath, outpath, phylopPath, tempdir):
# read csv file into dataframe
df = pd.read_csv(inpath)
# Do all exon-level and gene-level features
exons = pybedtools.BedTool('data/exons_Appris_featurized_transcript_Chr1-Y_loeuf.sorted.bed')
df['ID'] = 'sv' + pd.Series(df.index.values).apply(str)
df[['chrom','start','end','ID']].to_csv(os.path.join(tempdir,'df.bed'),sep='\t', index=False,header=False)
a = pybedtools.BedTool(os.path.join(tempdir,'df.bed'))
b = a.intersect(exons, wa=True, wb=True).saveas(os.path.join(tempdir,'dfExonOverlap.bed'))
del a
del exons
del b
exonOverlap = pd.read_csv(os.path.join(tempdir,'dfExonOverlap.bed'), sep='\t', header=None,
names=['chrom', 'start', 'stop', 'ID', 'eChrom', 'eStart', 'eStop', 'gene', 'exonRank', 'skippable', 'exonsInGene', 'const','pLI','loeuf'])
exonOverlap['numExonsFinal'] = exonOverlap.groupby(by='ID').eStart.transform('size')
exonOverlap['allSkippable'] = exonOverlap.groupby(by='ID').skippable.transform(lambda x: all(x))
exonOverlap['lowestExonRank'] = exonOverlap.groupby(by='ID').exonRank.transform('min')
exonOverlap['lowestExonsInGene'] = exonOverlap.groupby(by='ID').exonsInGene.transform('min')
exonOverlap['anyConstExon'] = exonOverlap.groupby(by='ID').const.transform('max')
exonOverlap['pLIMax'] = exonOverlap.groupby(by='ID').pLI.transform('max')
exonOverlap['loeufMin'] = exonOverlap.groupby(by='ID').loeuf.transform('min')
exonOverlap.drop_duplicates(subset='ID', inplace=True)
df = df.merge(exonOverlap[['ID', 'numExonsFinal', 'allSkippable', 'lowestExonRank', 'lowestExonsInGene', 'anyConstExon','pLIMax','loeufMin']], how='left', on='ID')
del exonOverlap
# numExons = the total number of exons that an SV overlaps, across all genes
# allSkippable = 1 if all exons overlapped start and end in same phase, 0 otherwise
# lowestExonRank = the minimum rank of all exons overlapped
# lowestExonsInGene = the number of exons in the gene overlapped, minimum if multiple genes
# anyConstExon = 1 if any exon overlapped is constitutive, 0 otherwise
# pLIMax = the maximum pLI of all overlapped genes (high pLI is more intolerant)
# loeufMin = the minimum LOEUF of all overlapped genes (low loeuf is more intolerant)
# Calculate conservation feature using phyloP as average of top 400 most conserved position
size = 400
with open("data/hg38chromsizes.tsv") as f:
chrms = dict((k, v) for k,v in (line.split() for line in f))
consBW = pyBigWig.open(phylopPath)
# get phyloP value for each position in the SV
x = []
for i in range(df.shape[0]):
if int(df.loc[i,'end']) - int(df.loc[i,'start']) > 1000000:
x.append(np.array([15.0]))
else:
try:
x.append(np.nan_to_num(np.array(consBW.values(df.loc[i,'chrom'], int(df.loc[i,'start']), int(df.loc[i,'end'])))))
except:
print(df.loc[i,'start'])
x.append(np.array([0.5]))
del consBW
x = np.asarray(x)
# get the mean of the top 100 most conserved positions
cons = [np.mean(y[np.argsort(y)[-size:]]) for y in x]
del x
df['phyloP'] = pd.Series(cons)
del cons
# Add TAD features
tads = pybedtools.BedTool('data/rep12tadsMergedhg38.bed')
df[['chrom','start','end','ID']].to_csv(os.path.join(tempdir,'df.bed'),sep='\t', index=False,header=False)
a = pybedtools.BedTool(os.path.join(tempdir,'df.bed'))
b = a.intersect(tads, wa=True, wb=True).saveas(os.path.join(tempdir,'dfTadOverlap.bed'))
tadOverlap = pd.read_csv(os.path.join(tempdir,'dfTadOverlap.bed'), sep='\t', header=None,
names=['chrom', 'start', 'stop', 'ID', 'tChrom', 'tStart', 'tStop', 'strength'])
tadOverlap['maxStrength'] = tadOverlap.groupby(by='ID').strength.transform('max')
tadOverlap.drop_duplicates(subset='ID', inplace=True)
df = df.merge(tadOverlap[['ID', 'maxStrength']], how='left', on='ID')
df['maxStrength'].fillna(value=0, inplace=True)
del tads
del tadOverlap
## Add amino acid features
cds = pybedtools.BedTool('data/exons_CDS_Chr1-Y.sorted.bed')
df[['chrom','start','end','ID']].to_csv(os.path.join(tempdir,'df.bed'),sep='\t', index=False,header=False)
a = pybedtools.BedTool(os.path.join(tempdir,'df.bed'))
b = a.intersect(cds, wa=True, wb=True).saveas(os.path.join(tempdir,'dfCDSOverlap.bed'))
cdsOverlap = pd.read_csv(os.path.join(tempdir,'dfCDSOverlap.bed'), sep='\t', header=None,
names=['chrom', 'start', 'stop', 'ID', 'cChrom', 'cStart', 'cStop', 'CDSLength', 'size', 'exonRank', 'strand','gene', 'cdsCount', 'pLI','loeuf'])
del cds
# use if statement to address possible scenario in which all given variants are in UTR and don't overlap a CDS
if cdsOverlap.shape[0] != 0:
# apply above functions to the SVs that were previously intersected with coding exons
out = cdsOverlap.sort_values('exonRank').groupby(['ID','gene']).apply(cdsRank)
out = out.sort_values('exonRank', ascending=False).groupby(['ID','gene']).apply(cdsEnd)
# get shape, but don't drop duplicates (not in place)
out.drop_duplicates(subset='ID').shape[0]
# Featurize above information into features normalized by cds length
out['cdsFracStart'] = out['early']/out['CDSLength']
out['cdsFracEnd'] = out['late']/out['CDSLength']
out['cdsFrac'] = (out['late'] - out['early'])/out['CDSLength']
# This is an experimental feature, which gives the max pLI and loeuf of the genes which are signficantly disrupted by the SV.
out['pLI_max25'] = out[(out['cdsFracStart'] == 0) | (out['cdsFrac'] > 0.25)].groupby('ID')['pLI'].transform('max')
#out['pLI_max25'].fillna(value=0, inplace=True)
out['loeuf_min25']= out[(out['cdsFracStart'] == 0) | (out['cdsFrac'] > 0.25)].groupby('ID')['loeuf'].transform('min')
#out['loeuf_min25'].fillna(value=0, inplace=True)
# but we now need to fill in all the cells with the max loeuf_max25 in their ID
out['pLI_max25_ID'] = out.groupby('ID')['pLI_max25'].transform('max')
out['loeuf_min25_ID'] = out.groupby('ID')['loeuf_min25'].transform('max')
out['cdsFracMax'] = out.groupby('ID')['cdsFrac'].transform('max')
out['cdsFracStartMin'] = out.groupby('ID')['cdsFracStart'].transform('min')
out['cdsFracEndMax'] = out.groupby('ID')['cdsFracEnd'].transform('max')
out.drop_duplicates(subset='ID', inplace=True)
final = df.merge(out[['ID', 'cdsFracStartMin', 'cdsFracEndMax', 'cdsFracMax', 'pLI_max25_ID', 'loeuf_min25_ID']], how='left')
del out
else:
final = df.copy()
del df
final['cdsFracStartMin'] = float('NaN')
final['cdsFracEndMax'] = float('NaN')
final['cdsFracMax'] = float('NaN')
final['pLI_max25_ID'] = float('NaN')
final['loeuf_min25_ID'] = float('NaN')
del cdsOverlap
final['cdsFracStartMin'].fillna(value=2, inplace=True)
final['cdsFracEndMax'].fillna(value=-1, inplace=True)
final['cdsFracMax'].fillna(value=-1, inplace=True)
final['pLI_max25_ID'].fillna(value=-1, inplace=True)
final['loeuf_min25_ID'].fillna(value=3, inplace=True)
# Add exon inclusion features
usage = pybedtools.BedTool('data/summary_exon_usage_hg38.sorted.bed')
final[['chrom','start','end','ID']].to_csv(os.path.join(tempdir,'df.bed'),sep='\t', index=False,header=False)
a = pybedtools.BedTool(os.path.join(tempdir,'df.bed'))
b = a.intersect(usage, wa=True, wb=True).saveas(os.path.join(tempdir,'dfUsageOverlap.bed'))
usageOverlap = pd.read_csv(os.path.join(tempdir,'dfUsageOverlap.bed'), sep='\t', header=None,
names=['chrom', 'start', 'stop', 'ID', 'uChrom', 'uStart', 'uStop', 'avgUsage', 'avgExp'])
out = usageOverlap.groupby('ID').apply(topUsage,n=size)
out.drop_duplicates(subset='ID', inplace=True)
del a
del b
if out.shape[0] == 0:
# try looking for features that are nearby, using bedtools window, and window size of 1000
b = a.window(usage, w=1000).saveas(os.path.join(tempdir,'dfUsageOverlap.bed'))
usageOverlap = pd.read_csv(os.path.join(tempdir,'dfUsageOverlap.bed'), sep='\t', header=None,
names=['chrom', 'start', 'stop', 'ID', 'uChrom', 'uStart', 'uStop', 'avgUsage', 'avgExp'])
pd.read_csv('data/summary_exon_usage_hg38.sorted.bed')
out = usageOverlap.groupby('ID').apply(topUsage,n=size)
out.drop_duplicates(subset='ID', inplace=True)
if out.shape[0] == 0:
# if still zero, assume too far from exon to be found, give the median values of all exons
exp = | pd.read_csv('data/summary_exon_usage_hg38.sorted.bed', names=['uChrom', 'uStart', 'uStop','Usage','Exp'],sep='\t') | pandas.read_csv |
import os
from datetime import date
from typing import List, Union, Tuple
import pandas as pd
from code_base.excess_mortality.cov_mort import GetFullCovidMortality
from code_base.pyll.decode_vars import LIST_LIFE_EXP_DT_COUNTRIES
from code_base.pyll.folder_constants import output_pyll_bg, output_pyll_cz, source_le_countries_data
from code_base.pyll.get_life_data import FullLifeExpectancy
from code_base.utils.file_utils import SaveFileMixin
class MergeMortalityLifeExpectancy(SaveFileMixin):
# Class only applicable for Bulgaria and Czechia currently.
def __init__(self, country: str):
self.countries = LIST_LIFE_EXP_DT_COUNTRIES
if country not in self.countries:
raise TypeError(f'Incorrect country entered. Only acceptable options are: {", ".join(self.countries)}.')
self.country = country
self.directory = {
'Bulgaria': output_pyll_bg,
'Czechia': output_pyll_cz
}
self.cov_mort_file = GetFullCovidMortality(self.country).get_covid_mortality
self.life_expectancy_file = FullLifeExpectancy(self.country).get_life_tables()
self.work_life_ex = os.path.join(source_le_countries_data, 'work_life_expectancy.csv')
def __get_cov_mort_and_lf_expectancy(self,
start_date: Union[List, Tuple],
end_date: Union[List, Tuple],
start_age: int,
end_age: int,
sheet_name: str,
mode: str = 'PYLL') -> str:
if not mode == 'PYLL':
lf_ex = pd.read_csv(self.work_life_ex)
else:
lf_ex = pd.read_csv(self.life_expectancy_file)
if sheet_name:
cov_mort = pd.read_excel(self.cov_mort_file, sheet_name=sheet_name)
else:
cov_mort = | pd.read_csv(self.cov_mort_file) | pandas.read_csv |
######################################################################
# (c) Copyright EFC of NICS, Tsinghua University. All rights reserved.
# Author: <NAME>
# Email : <EMAIL>
#
# Create Date : 2020.08.16
# File Name : read_results.py
# Description : read the config of train and test accuracy data from
# log file and show on one screen to compare
# Dependencies:
######################################################################
import os
import sys
import h5py
import argparse
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
def check_column(configs, column_label):
''' check if there is already column named column_label '''
if column_label in configs.columns.values.tolist():
return True
else:
return False
def add_line(configs, count, wordlist, pos):
''' add info in one line of one file into dataframe configs
count is the line index
wordlist is the word list of this line
pos=1 means first level configs and pos=3 means second
'''
# first level configs
if pos == 1:
column_label = wordlist[0]
if check_column(configs, column_label):
configs.loc[count,(column_label)] = wordlist[2] \
if column_label != 'output_dir' else wordlist[2][-17:]
else:
configs[column_label] = None
configs.loc[count,(column_label)] = wordlist[2] \
if column_label != 'output_dir' else wordlist[2][-17:]
# second level configs
elif pos == 3:
# deal with q_cfg
if wordlist[2] == 'q_cfg':
for i in range(4, len(wordlist)):
if wordlist[i].endswith("':"):
column_label = wordlist[i]
data_element = wordlist[i+1]
for j in range(i+2, len(wordlist)):
if wordlist[j].endswith("':"): break
else: data_element += wordlist[j]
if check_column(configs, column_label):
configs.loc[count,(column_label)] = data_element
else:
configs[column_label] = None
configs.loc[count,(column_label)] = data_element
# len > 5 means list configs
elif len(wordlist) > 5:
column_label = wordlist[0]+wordlist[2]
data_element = wordlist[4]
for i in range(5, len(wordlist)):
data_element += wordlist[i]
if check_column(configs, column_label):
configs.loc[count,(column_label)] = data_element
else:
configs[column_label] = None
configs.loc[count,(column_label)] = data_element
# !len > 5 means one element configs
else:
column_label = wordlist[0]+wordlist[2]
if check_column(configs, column_label):
configs.loc[count,(column_label)] = wordlist[4]
else:
configs[column_label] = None
configs.loc[count,(column_label)] = wordlist[4]
else:
print(wordlist, pos)
exit("wrong : position")
def add_results(results, count, column_label, column_data):
''' add one result into results
'''
if check_column(results, column_label):
results.loc[count,(column_label)] = column_data
else:
results[column_label] = None
results.loc[count,(column_label)] = column_data
def process_file(filepath, configs, results, count):
''' process one file line by line and add all configs
and values into dataframe
'''
with open(filepath) as f:
temp_epoch = 0
train_acc = 0
train_loss = 0
test_loss = 0
for line in f: # check line by line
wordlist = line.split() # split one line to a list
# process long config lines with : at position 3
if len(wordlist) >= 5 and wordlist[0] != 'accuracy'\
and wordlist[0] != 'log':
if wordlist[3]==':':
add_line(configs, count, wordlist, 3) # add this line to configs
# process long config lines with : at position 1
elif len(wordlist) >= 3 and wordlist[0] != 'gpu':
if wordlist[1]==':':
add_line(configs, count, wordlist, 1) # add this line to configs
# process best result
if len(wordlist) > 1:
# add best acc
if wordlist[0] == 'best':
add_results(results, count, 'bestacc', wordlist[2])
add_results(results, count, 'bestepoch', wordlist[5])
# add train loss and acc
elif wordlist[0] == 'epoch:':
train_acc = wordlist[13][1:-1]
train_loss = wordlist[10][1:-1]
# add test loss
elif wordlist[0] == 'test:':
test_loss = wordlist[7][1:-1]
# add test acc and save all results in this epoch to results
elif wordlist[0] == '*':
add_results(results, count, str(temp_epoch)+'trainacc', train_acc)
add_results(results, count, str(temp_epoch)+'trainloss', train_loss)
add_results(results, count, str(temp_epoch)+'testloss', test_loss)
add_results(results, count, str(temp_epoch)+'testacc', wordlist[2])
add_results(results, count, str(temp_epoch)+'test5acc', wordlist[4])
temp_epoch += 1
return temp_epoch
def main(argv):
print(argparse)
print(type(argparse))
parser = argparse.argumentparser()
# required arguments:
parser.add_argument(
"type",
help = "what type of mission are you going to do.\n\
supported: compare loss_curve acc_curve data_range"
)
parser.add_argument(
"output_dir",
help = "the name of output dir to store the results."
)
parser.add_argument(
"--results_name",
help = "what results are you going to plot or compare.\n \
supported: best_acc test_acc train_acc test_loss train_loss"
)
parser.add_argument(
"--config_name",
help = "what configs are you going to show.\n \
example: all bw group hard "
)
parser.add_argument(
"--file_range",
nargs='+',
help = "the date range of input file to read the results."
)
args = parser.parse_args()
print(args.file_range)
dirlist = os.listdir('./')
print(dirlist)
configs = pd.dataframe()
print(configs)
results = | pd.dataframe() | pandas.dataframe |
import os
from tempfile import TemporaryFile
import joblib
import pandas as pd
from google.cloud import storage
from superwise import Superwise
CLIENT_ID = os.getenv("SUPERWISE_CLIENT_ID")
SECRET = os.getenv("SUPERWISE_SECRET")
SUPERWISE_MODEL_ID = os.getenv("SUPERWISE_MODEL_ID")
SUPERWISE_VERSION_ID = os.getenv("SUPERWISE_VERSION_ID")
class DiamondPricePredictor(object):
def __init__(self, model_gcs_path):
self._model = self._set_model(model_gcs_path)
self._sw = Superwise(
client_id=os.getenv("SUPERWISE_CLIENT_ID"),
secret=os.getenv("SUPERWISE_SECRET")
)
def _send_monitor_data(self, predictions):
"""
send predictions and input data to Superwise
:param pd.Serie prediction
:return str transaction_id
"""
transaction_id = self._sw.transaction.log_records(
model_id=int(os.getenv("SUPERWISE_MODEL_ID")),
version_id=int(os.getenv("SUPERWISE_VERSION_ID")),
records=predictions
)
return transaction_id
def _set_model(self, model_gcs_path):
"""
download file from gcs to temp file and deserialize it to sklearn object
:param str model_gcs_path: Path to gcs file
:return sklearn.Pipeline model: Deserialized pipeline ready for production
"""
storage_client = storage.Client()
bucket_name = os.environ["BUCKET_NAME"]
print(f"Loading from bucket {bucket_name} model {model_gcs_path}")
bucket = storage_client.get_bucket(bucket_name)
# select bucket file
blob = bucket.blob(model_gcs_path)
with TemporaryFile() as temp_file:
# download blob into temp file
blob.download_to_file(temp_file)
temp_file.seek(0)
# load into joblib
model = joblib.load(temp_file)
print(f"Finished loading model from GCS")
return model
def predict(self, instances):
"""
apply predictions on instances and log predictions to Superwise
:param list instances: [{record1}, {record2} ... {record-N}]
:return dict api_output: {[predicted_prices: prediction, transaction_id: str]}
"""
input_df = | pd.DataFrame(instances) | pandas.DataFrame |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from os.path import join as pjoin
import datetime
import io
import os
import json
import pytest
from pyarrow.compat import guid, u
from pyarrow.filesystem import LocalFileSystem
import pyarrow as pa
from .pandas_examples import dataframe_with_arrays, dataframe_with_lists
import numpy as np
import pandas as pd
import pandas.util.testing as tm
# Ignore these with pytest ... -m 'not parquet'
parquet = pytest.mark.parquet
def _write_table(table, path, **kwargs):
import pyarrow.parquet as pq
if isinstance(table, pd.DataFrame):
table = pa.Table.from_pandas(table)
pq.write_table(table, path, **kwargs)
return table
def _read_table(*args, **kwargs):
import pyarrow.parquet as pq
return pq.read_table(*args, **kwargs)
@parquet
def test_single_pylist_column_roundtrip(tmpdir):
for dtype in [int, float]:
filename = tmpdir.join('single_{}_column.parquet'
.format(dtype.__name__))
data = [pa.array(list(map(dtype, range(5))))]
table = pa.Table.from_arrays(data, names=('a', 'b'))
_write_table(table, filename.strpath)
table_read = _read_table(filename.strpath)
for col_written, col_read in zip(table.itercolumns(),
table_read.itercolumns()):
assert col_written.name == col_read.name
assert col_read.data.num_chunks == 1
data_written = col_written.data.chunk(0)
data_read = col_read.data.chunk(0)
assert data_written.equals(data_read)
def alltypes_sample(size=10000, seed=0):
np.random.seed(seed)
df = pd.DataFrame({
'uint8': np.arange(size, dtype=np.uint8),
'uint16': np.arange(size, dtype=np.uint16),
'uint32': np.arange(size, dtype=np.uint32),
'uint64': np.arange(size, dtype=np.uint64),
'int8': np.arange(size, dtype=np.int16),
'int16': np.arange(size, dtype=np.int16),
'int32': np.arange(size, dtype=np.int32),
'int64': np.arange(size, dtype=np.int64),
'float32': np.arange(size, dtype=np.float32),
'float64': np.arange(size, dtype=np.float64),
'bool': np.random.randn(size) > 0,
# TODO(wesm): Test other timestamp resolutions now that arrow supports
# them
'datetime': np.arange("2016-01-01T00:00:00.001", size,
dtype='datetime64[ms]'),
'str': [str(x) for x in range(size)],
'str_with_nulls': [None] + [str(x) for x in range(size - 2)] + [None],
'empty_str': [''] * size
})
return df
@parquet
def test_pandas_parquet_2_0_rountrip(tmpdir):
import pyarrow.parquet as pq
df = alltypes_sample(size=10000)
filename = tmpdir.join('pandas_rountrip.parquet')
arrow_table = pa.Table.from_pandas(df)
assert b'pandas' in arrow_table.schema.metadata
_write_table(arrow_table, filename.strpath, version="2.0",
coerce_timestamps='ms')
table_read = pq.read_pandas(filename.strpath)
assert b'pandas' in table_read.schema.metadata
assert arrow_table.schema.metadata == table_read.schema.metadata
df_read = table_read.to_pandas()
tm.assert_frame_equal(df, df_read)
@parquet
def test_pandas_parquet_custom_metadata(tmpdir):
import pyarrow.parquet as pq
df = alltypes_sample(size=10000)
filename = tmpdir.join('pandas_rountrip.parquet')
arrow_table = pa.Table.from_pandas(df)
assert b'pandas' in arrow_table.schema.metadata
_write_table(arrow_table, filename.strpath, version="2.0",
coerce_timestamps='ms')
md = pq.read_metadata(filename.strpath).metadata
assert b'pandas' in md
js = json.loads(md[b'pandas'].decode('utf8'))
assert js['index_columns'] == ['__index_level_0__']
@parquet
def test_pandas_parquet_2_0_rountrip_read_pandas_no_index_written(tmpdir):
import pyarrow.parquet as pq
df = alltypes_sample(size=10000)
filename = tmpdir.join('pandas_rountrip.parquet')
arrow_table = pa.Table.from_pandas(df, preserve_index=False)
js = json.loads(arrow_table.schema.metadata[b'pandas'].decode('utf8'))
assert not js['index_columns']
_write_table(arrow_table, filename.strpath, version="2.0",
coerce_timestamps='ms')
table_read = pq.read_pandas(filename.strpath)
js = json.loads(table_read.schema.metadata[b'pandas'].decode('utf8'))
assert not js['index_columns']
assert arrow_table.schema.metadata == table_read.schema.metadata
df_read = table_read.to_pandas()
tm.assert_frame_equal(df, df_read)
@parquet
def test_pandas_parquet_1_0_rountrip(tmpdir):
size = 10000
np.random.seed(0)
df = pd.DataFrame({
'uint8': np.arange(size, dtype=np.uint8),
'uint16': np.arange(size, dtype=np.uint16),
'uint32': np.arange(size, dtype=np.uint32),
'uint64': np.arange(size, dtype=np.uint64),
'int8': np.arange(size, dtype=np.int16),
'int16': np.arange(size, dtype=np.int16),
'int32': np.arange(size, dtype=np.int32),
'int64': np.arange(size, dtype=np.int64),
'float32': np.arange(size, dtype=np.float32),
'float64': np.arange(size, dtype=np.float64),
'bool': np.random.randn(size) > 0,
'str': [str(x) for x in range(size)],
'str_with_nulls': [None] + [str(x) for x in range(size - 2)] + [None],
'empty_str': [''] * size
})
filename = tmpdir.join('pandas_rountrip.parquet')
arrow_table = pa.Table.from_pandas(df)
_write_table(arrow_table, filename.strpath, version="1.0")
table_read = _read_table(filename.strpath)
df_read = table_read.to_pandas()
# We pass uint32_t as int64_t if we write Parquet version 1.0
df['uint32'] = df['uint32'].values.astype(np.int64)
tm.assert_frame_equal(df, df_read)
@parquet
def test_pandas_column_selection(tmpdir):
size = 10000
np.random.seed(0)
df = pd.DataFrame({
'uint8': np.arange(size, dtype=np.uint8),
'uint16': np.arange(size, dtype=np.uint16)
})
filename = tmpdir.join('pandas_rountrip.parquet')
arrow_table = pa.Table.from_pandas(df)
_write_table(arrow_table, filename.strpath)
table_read = _read_table(filename.strpath, columns=['uint8'])
df_read = table_read.to_pandas()
tm.assert_frame_equal(df[['uint8']], df_read)
def _random_integers(size, dtype):
# We do not generate integers outside the int64 range
platform_int_info = np.iinfo('int_')
iinfo = np.iinfo(dtype)
return np.random.randint(max(iinfo.min, platform_int_info.min),
min(iinfo.max, platform_int_info.max),
size=size).astype(dtype)
def _test_dataframe(size=10000, seed=0):
np.random.seed(seed)
df = pd.DataFrame({
'uint8': _random_integers(size, np.uint8),
'uint16': _random_integers(size, np.uint16),
'uint32': _random_integers(size, np.uint32),
'uint64': _random_integers(size, np.uint64),
'int8': _random_integers(size, np.int8),
'int16': _random_integers(size, np.int16),
'int32': _random_integers(size, np.int32),
'int64': _random_integers(size, np.int64),
'float32': np.random.randn(size).astype(np.float32),
'float64': np.arange(size, dtype=np.float64),
'bool': np.random.randn(size) > 0,
'strings': [tm.rands(10) for i in range(size)],
'all_none': [None] * size,
'all_none_category': [None] * size
})
# TODO(PARQUET-1015)
# df['all_none_category'] = df['all_none_category'].astype('category')
return df
@parquet
def test_pandas_parquet_native_file_roundtrip(tmpdir):
df = _test_dataframe(10000)
arrow_table = pa.Table.from_pandas(df)
imos = pa.BufferOutputStream()
_write_table(arrow_table, imos, version="2.0")
buf = imos.get_result()
reader = pa.BufferReader(buf)
df_read = _read_table(reader).to_pandas()
tm.assert_frame_equal(df, df_read)
@parquet
def test_read_pandas_column_subset(tmpdir):
import pyarrow.parquet as pq
df = _test_dataframe(10000)
arrow_table = pa.Table.from_pandas(df)
imos = pa.BufferOutputStream()
_write_table(arrow_table, imos, version="2.0")
buf = imos.get_result()
reader = pa.BufferReader(buf)
df_read = pq.read_pandas(reader, columns=['strings', 'uint8']).to_pandas()
tm.assert_frame_equal(df[['strings', 'uint8']], df_read)
@parquet
def test_pandas_parquet_empty_roundtrip(tmpdir):
df = _test_dataframe(0)
arrow_table = pa.Table.from_pandas(df)
imos = pa.BufferOutputStream()
_write_table(arrow_table, imos, version="2.0")
buf = imos.get_result()
reader = pa.BufferReader(buf)
df_read = _read_table(reader).to_pandas()
tm.assert_frame_equal(df, df_read)
@parquet
def test_pandas_parquet_pyfile_roundtrip(tmpdir):
filename = tmpdir.join('pandas_pyfile_roundtrip.parquet').strpath
size = 5
df = pd.DataFrame({
'int64': np.arange(size, dtype=np.int64),
'float32': np.arange(size, dtype=np.float32),
'float64': np.arange(size, dtype=np.float64),
'bool': np.random.randn(size) > 0,
'strings': ['foo', 'bar', None, 'baz', 'qux']
})
arrow_table = pa.Table.from_pandas(df)
with open(filename, 'wb') as f:
_write_table(arrow_table, f, version="1.0")
data = io.BytesIO(open(filename, 'rb').read())
table_read = _read_table(data)
df_read = table_read.to_pandas()
tm.assert_frame_equal(df, df_read)
@parquet
def test_pandas_parquet_configuration_options(tmpdir):
size = 10000
np.random.seed(0)
df = pd.DataFrame({
'uint8': np.arange(size, dtype=np.uint8),
'uint16': np.arange(size, dtype=np.uint16),
'uint32': np.arange(size, dtype=np.uint32),
'uint64': np.arange(size, dtype=np.uint64),
'int8': np.arange(size, dtype=np.int16),
'int16': np.arange(size, dtype=np.int16),
'int32': np.arange(size, dtype=np.int32),
'int64': np.arange(size, dtype=np.int64),
'float32': np.arange(size, dtype=np.float32),
'float64': np.arange(size, dtype=np.float64),
'bool': np.random.randn(size) > 0
})
filename = tmpdir.join('pandas_rountrip.parquet')
arrow_table = pa.Table.from_pandas(df)
for use_dictionary in [True, False]:
_write_table(arrow_table, filename.strpath,
version="2.0",
use_dictionary=use_dictionary)
table_read = _read_table(filename.strpath)
df_read = table_read.to_pandas()
tm.assert_frame_equal(df, df_read)
for compression in ['NONE', 'SNAPPY', 'GZIP']:
_write_table(arrow_table, filename.strpath,
version="2.0",
compression=compression)
table_read = _read_table(filename.strpath)
df_read = table_read.to_pandas()
tm.assert_frame_equal(df, df_read)
def make_sample_file(df):
import pyarrow.parquet as pq
a_table = pa.Table.from_pandas(df)
buf = io.BytesIO()
_write_table(a_table, buf, compression='SNAPPY', version='2.0',
coerce_timestamps='ms')
buf.seek(0)
return pq.ParquetFile(buf)
@parquet
def test_parquet_metadata_api():
df = alltypes_sample(size=10000)
df = df.reindex(columns=sorted(df.columns))
fileh = make_sample_file(df)
ncols = len(df.columns)
# Series of sniff tests
meta = fileh.metadata
repr(meta)
assert meta.num_rows == len(df)
assert meta.num_columns == ncols + 1 # +1 for index
assert meta.num_row_groups == 1
assert meta.format_version == '2.0'
assert 'parquet-cpp' in meta.created_by
# Schema
schema = fileh.schema
assert meta.schema is schema
assert len(schema) == ncols + 1 # +1 for index
repr(schema)
col = schema[0]
repr(col)
assert col.name == df.columns[0]
assert col.max_definition_level == 1
assert col.max_repetition_level == 0
assert col.max_repetition_level == 0
assert col.physical_type == 'BOOLEAN'
assert col.logical_type == 'NONE'
with pytest.raises(IndexError):
schema[ncols + 1] # +1 for index
with pytest.raises(IndexError):
schema[-1]
# Row group
rg_meta = meta.row_group(0)
repr(rg_meta)
assert rg_meta.num_rows == len(df)
assert rg_meta.num_columns == ncols + 1 # +1 for index
@parquet
def test_compare_schemas():
df = alltypes_sample(size=10000)
fileh = make_sample_file(df)
fileh2 = make_sample_file(df)
fileh3 = make_sample_file(df[df.columns[::2]])
assert fileh.schema.equals(fileh.schema)
assert fileh.schema.equals(fileh2.schema)
assert not fileh.schema.equals(fileh3.schema)
assert fileh.schema[0].equals(fileh.schema[0])
assert not fileh.schema[0].equals(fileh.schema[1])
@parquet
def test_column_of_arrays(tmpdir):
df, schema = dataframe_with_arrays()
filename = tmpdir.join('pandas_rountrip.parquet')
arrow_table = pa.Table.from_pandas(df, schema=schema)
_write_table(arrow_table, filename.strpath, version="2.0",
coerce_timestamps='ms')
table_read = _read_table(filename.strpath)
df_read = table_read.to_pandas()
tm.assert_frame_equal(df, df_read)
@parquet
def test_coerce_timestamps(tmpdir):
# ARROW-622
df, schema = dataframe_with_arrays()
filename = tmpdir.join('pandas_rountrip.parquet')
arrow_table = pa.Table.from_pandas(df, schema=schema)
_write_table(arrow_table, filename.strpath, version="2.0",
coerce_timestamps='us')
table_read = _read_table(filename.strpath)
df_read = table_read.to_pandas()
df_expected = df.copy()
for i, x in enumerate(df_expected['datetime64']):
if isinstance(x, np.ndarray):
df_expected['datetime64'][i] = x.astype('M8[us]')
tm.assert_frame_equal(df_expected, df_read)
with pytest.raises(ValueError):
_write_table(arrow_table, filename.strpath, version="2.0",
coerce_timestamps='unknown')
@parquet
def test_column_of_lists(tmpdir):
df, schema = dataframe_with_lists()
filename = tmpdir.join('pandas_rountrip.parquet')
arrow_table = pa.Table.from_pandas(df, schema=schema)
_write_table(arrow_table, filename.strpath, version="2.0",
coerce_timestamps='ms')
table_read = _read_table(filename.strpath)
df_read = table_read.to_pandas()
tm.assert_frame_equal(df, df_read)
@parquet
def test_date_time_types():
t1 = pa.date32()
data1 = np.array([17259, 17260, 17261], dtype='int32')
a1 = pa.Array.from_pandas(data1, type=t1)
t2 = pa.date64()
data2 = data1.astype('int64') * 86400000
a2 = pa.Array.from_pandas(data2, type=t2)
t3 = pa.timestamp('us')
start = pd.Timestamp('2000-01-01').value / 1000
data3 = np.array([start, start + 1, start + 2], dtype='int64')
a3 = pa.Array.from_pandas(data3, type=t3)
t4 = pa.time32('ms')
data4 = np.arange(3, dtype='i4')
a4 = pa.Array.from_pandas(data4, type=t4)
t5 = pa.time64('us')
a5 = pa.Array.from_pandas(data4.astype('int64'), type=t5)
t6 = pa.time32('s')
a6 = pa.Array.from_pandas(data4, type=t6)
ex_t6 = pa.time32('ms')
ex_a6 = pa.Array.from_pandas(data4 * 1000, type=ex_t6)
t7 = pa.timestamp('ns')
start = pd.Timestamp('2001-01-01').value
data7 = np.array([start, start + 1000, start + 2000],
dtype='int64')
a7 = pa.Array.from_pandas(data7, type=t7)
t7_us = pa.timestamp('us')
start = pd.Timestamp('2001-01-01').value
data7_us = np.array([start, start + 1000, start + 2000],
dtype='int64') // 1000
a7_us = pa.Array.from_pandas(data7_us, type=t7_us)
table = pa.Table.from_arrays([a1, a2, a3, a4, a5, a6, a7],
['date32', 'date64', 'timestamp[us]',
'time32[s]', 'time64[us]',
'time32_from64[s]',
'timestamp[ns]'])
# date64 as date32
# time32[s] to time32[ms]
# 'timestamp[ns]' to 'timestamp[us]'
expected = pa.Table.from_arrays([a1, a1, a3, a4, a5, ex_a6, a7_us],
['date32', 'date64', 'timestamp[us]',
'time32[s]', 'time64[us]',
'time32_from64[s]',
'timestamp[ns]'])
_check_roundtrip(table, expected=expected, version='2.0')
# date64 as date32
# time32[s] to time32[ms]
# 'timestamp[ns]' is saved as INT96 timestamp
expected = pa.Table.from_arrays([a1, a1, a3, a4, a5, ex_a6, a7],
['date32', 'date64', 'timestamp[us]',
'time32[s]', 'time64[us]',
'time32_from64[s]',
'timestamp[ns]'])
_check_roundtrip(table, expected=expected, version='2.0',
use_deprecated_int96_timestamps=True)
# Unsupported stuff
def _assert_unsupported(array):
table = pa.Table.from_arrays([array], ['unsupported'])
buf = io.BytesIO()
with pytest.raises(NotImplementedError):
_write_table(table, buf, version="2.0")
t7 = pa.time64('ns')
a7 = pa.Array.from_pandas(data4.astype('int64'), type=t7)
_assert_unsupported(a7)
@parquet
def test_fixed_size_binary():
t0 = pa.binary(10)
data = [b'fooooooooo', None, b'barooooooo', b'quxooooooo']
a0 = pa.array(data, type=t0)
table = pa.Table.from_arrays([a0],
['binary[10]'])
_check_roundtrip(table)
def _check_roundtrip(table, expected=None, **params):
buf = io.BytesIO()
_write_table(table, buf, **params)
buf.seek(0)
if expected is None:
expected = table
result = _read_table(buf)
assert result.equals(expected)
@parquet
def test_multithreaded_read():
df = alltypes_sample(size=10000)
table = pa.Table.from_pandas(df)
buf = io.BytesIO()
_write_table(table, buf, compression='SNAPPY', version='2.0')
buf.seek(0)
table1 = _read_table(buf, nthreads=4)
buf.seek(0)
table2 = _read_table(buf, nthreads=1)
assert table1.equals(table2)
@parquet
def test_min_chunksize():
data = pd.DataFrame([np.arange(4)], columns=['A', 'B', 'C', 'D'])
table = pa.Table.from_pandas(data.reset_index())
buf = io.BytesIO()
_write_table(table, buf, chunk_size=-1)
buf.seek(0)
result = _read_table(buf)
assert result.equals(table)
with pytest.raises(ValueError):
_write_table(table, buf, chunk_size=0)
@parquet
def test_pass_separate_metadata():
import pyarrow.parquet as pq
# ARROW-471
df = alltypes_sample(size=10000)
a_table = pa.Table.from_pandas(df)
buf = io.BytesIO()
_write_table(a_table, buf, compression='snappy', version='2.0')
buf.seek(0)
metadata = pq.read_metadata(buf)
buf.seek(0)
fileh = pq.ParquetFile(buf, metadata=metadata)
tm.assert_frame_equal(df, fileh.read().to_pandas())
@parquet
def test_read_single_row_group():
import pyarrow.parquet as pq
# ARROW-471
N, K = 10000, 4
df = alltypes_sample(size=N)
a_table = pa.Table.from_pandas(df)
buf = io.BytesIO()
_write_table(a_table, buf, row_group_size=N / K,
compression='snappy', version='2.0')
buf.seek(0)
pf = pq.ParquetFile(buf)
assert pf.num_row_groups == K
row_groups = [pf.read_row_group(i) for i in range(K)]
result = pa.concat_tables(row_groups)
tm.assert_frame_equal(df, result.to_pandas())
@parquet
def test_read_single_row_group_with_column_subset():
import pyarrow.parquet as pq
N, K = 10000, 4
df = alltypes_sample(size=N)
a_table = pa.Table.from_pandas(df)
buf = io.BytesIO()
_write_table(a_table, buf, row_group_size=N / K,
compression='snappy', version='2.0')
buf.seek(0)
pf = pq.ParquetFile(buf)
cols = df.columns[:2]
row_groups = [pf.read_row_group(i, columns=cols) for i in range(K)]
result = pa.concat_tables(row_groups)
tm.assert_frame_equal(df[cols], result.to_pandas())
@parquet
def test_parquet_piece_read(tmpdir):
import pyarrow.parquet as pq
df = _test_dataframe(1000)
table = pa.Table.from_pandas(df)
path = tmpdir.join('parquet_piece_read.parquet').strpath
_write_table(table, path, version='2.0')
piece1 = pq.ParquetDatasetPiece(path)
result = piece1.read()
assert result.equals(table)
@parquet
def test_parquet_piece_basics():
import pyarrow.parquet as pq
path = '/baz.parq'
piece1 = pq.ParquetDatasetPiece(path)
piece2 = pq.ParquetDatasetPiece(path, row_group=1)
piece3 = pq.ParquetDatasetPiece(
path, row_group=1, partition_keys=[('foo', 0), ('bar', 1)])
assert str(piece1) == path
assert str(piece2) == '/baz.parq | row_group=1'
assert str(piece3) == 'partition[foo=0, bar=1] /baz.parq | row_group=1'
assert piece1 == piece1
assert piece2 == piece2
assert piece3 == piece3
assert piece1 != piece3
@parquet
def test_partition_set_dictionary_type():
import pyarrow.parquet as pq
set1 = pq.PartitionSet('key1', [u('foo'), u('bar'), u('baz')])
set2 = pq.PartitionSet('key2', [2007, 2008, 2009])
assert isinstance(set1.dictionary, pa.StringArray)
assert isinstance(set2.dictionary, pa.IntegerArray)
set3 = pq.PartitionSet('key2', [datetime.datetime(2007, 1, 1)])
with pytest.raises(TypeError):
set3.dictionary
@parquet
def test_read_partitioned_directory(tmpdir):
fs = LocalFileSystem.get_instance()
base_path = str(tmpdir)
_partition_test_for_filesystem(fs, base_path)
@pytest.yield_fixture
def s3_example():
access_key = os.environ['PYARROW_TEST_S3_ACCESS_KEY']
secret_key = os.environ['PYARROW_TEST_S3_SECRET_KEY']
bucket_name = os.environ['PYARROW_TEST_S3_BUCKET']
import s3fs
fs = s3fs.S3FileSystem(key=access_key, secret=secret_key)
test_dir = guid()
bucket_uri = 's3://{0}/{1}'.format(bucket_name, test_dir)
fs.mkdir(bucket_uri)
yield fs, bucket_uri
fs.rm(bucket_uri, recursive=True)
@pytest.mark.s3
@parquet
def test_read_partitioned_directory_s3fs(s3_example):
from pyarrow.filesystem import S3FSWrapper
import pyarrow.parquet as pq
fs, bucket_uri = s3_example
wrapper = S3FSWrapper(fs)
_partition_test_for_filesystem(wrapper, bucket_uri)
# Check that we can auto-wrap
dataset = pq.ParquetDataset(bucket_uri, filesystem=fs)
dataset.read()
def _partition_test_for_filesystem(fs, base_path):
import pyarrow.parquet as pq
foo_keys = [0, 1]
bar_keys = ['<KEY>']
partition_spec = [
['foo', foo_keys],
['bar', bar_keys]
]
N = 30
df = pd.DataFrame({
'index': np.arange(N),
'foo': np.array(foo_keys, dtype='i4').repeat(15),
'bar': np.tile(np.tile(np.array(bar_keys, dtype=object), 5), 2),
'values': np.random.randn(N)
}, columns=['index', 'foo', 'bar', 'values'])
_generate_partition_directories(fs, base_path, partition_spec, df)
dataset = pq.ParquetDataset(base_path, filesystem=fs)
table = dataset.read()
result_df = (table.to_pandas()
.sort_values(by='index')
.reset_index(drop=True))
expected_df = (df.sort_values(by='index')
.reset_index(drop=True)
.reindex(columns=result_df.columns))
expected_df['foo'] = pd.Categorical(df['foo'], categories=foo_keys)
expected_df['bar'] = pd.Categorical(df['bar'], categories=bar_keys)
assert (result_df.columns == ['index', 'values', 'foo', 'bar']).all()
tm.assert_frame_equal(result_df, expected_df)
def _generate_partition_directories(fs, base_dir, partition_spec, df):
# partition_spec : list of lists, e.g. [['foo', [0, 1, 2],
# ['bar', ['a', 'b', 'c']]
# part_table : a pyarrow.Table to write to each partition
DEPTH = len(partition_spec)
def _visit_level(base_dir, level, part_keys):
name, values = partition_spec[level]
for value in values:
this_part_keys = part_keys + [(name, value)]
level_dir = pjoin(base_dir, '{0}={1}'.format(name, value))
fs.mkdir(level_dir)
if level == DEPTH - 1:
# Generate example data
file_path = pjoin(level_dir, 'data.parq')
filtered_df = _filter_partition(df, this_part_keys)
part_table = pa.Table.from_pandas(filtered_df)
with fs.open(file_path, 'wb') as f:
_write_table(part_table, f)
else:
_visit_level(level_dir, level + 1, this_part_keys)
_visit_level(base_dir, 0, [])
@parquet
def test_read_common_metadata_files(tmpdir):
import pyarrow.parquet as pq
N = 100
df = pd.DataFrame({
'index': np.arange(N),
'values': np.random.randn(N)
}, columns=['index', 'values'])
base_path = str(tmpdir)
data_path = pjoin(base_path, 'data.parquet')
table = pa.Table.from_pandas(df)
_write_table(table, data_path)
metadata_path = pjoin(base_path, '_metadata')
pq.write_metadata(table.schema, metadata_path)
dataset = pq.ParquetDataset(base_path)
assert dataset.metadata_path == metadata_path
common_schema = pq.read_metadata(data_path).schema
assert dataset.schema.equals(common_schema)
# handle list of one directory
dataset2 = pq.ParquetDataset([base_path])
assert dataset2.schema.equals(dataset.schema)
@parquet
def test_read_schema(tmpdir):
import pyarrow.parquet as pq
N = 100
df = pd.DataFrame({
'index': np.arange(N),
'values': np.random.randn(N)
}, columns=['index', 'values'])
data_path = pjoin(str(tmpdir), 'test.parquet')
table = pa.Table.from_pandas(df)
_write_table(table, data_path)
assert table.schema.equals(pq.read_schema(data_path))
def _filter_partition(df, part_keys):
predicate = np.ones(len(df), dtype=bool)
to_drop = []
for name, value in part_keys:
to_drop.append(name)
predicate &= df[name] == value
return df[predicate].drop(to_drop, axis=1)
@parquet
def test_read_multiple_files(tmpdir):
import pyarrow.parquet as pq
nfiles = 10
size = 5
dirpath = tmpdir.join(guid()).strpath
os.mkdir(dirpath)
test_data = []
paths = []
for i in range(nfiles):
df = _test_dataframe(size, seed=i)
# Hack so that we don't have a dtype cast in v1 files
df['uint32'] = df['uint32'].astype(np.int64)
path = pjoin(dirpath, '{0}.parquet'.format(i))
table = pa.Table.from_pandas(df)
_write_table(table, path)
test_data.append(table)
paths.append(path)
# Write a _SUCCESS.crc file
with open(pjoin(dirpath, '_SUCCESS.crc'), 'wb') as f:
f.write(b'0')
def read_multiple_files(paths, columns=None, nthreads=None, **kwargs):
dataset = pq.ParquetDataset(paths, **kwargs)
return dataset.read(columns=columns, nthreads=nthreads)
result = read_multiple_files(paths)
expected = pa.concat_tables(test_data)
assert result.equals(expected)
# Read with provided metadata
metadata = pq.read_metadata(paths[0])
result2 = read_multiple_files(paths, metadata=metadata)
assert result2.equals(expected)
result3 = pa.localfs.read_parquet(dirpath, schema=metadata.schema)
assert result3.equals(expected)
# Read column subset
to_read = [result[0], result[2], result[6], result[result.num_columns - 1]]
result = pa.localfs.read_parquet(
dirpath, columns=[c.name for c in to_read])
expected = pa.Table.from_arrays(to_read, metadata=result.schema.metadata)
assert result.equals(expected)
# Read with multiple threads
pa.localfs.read_parquet(dirpath, nthreads=2)
# Test failure modes with non-uniform metadata
bad_apple = _test_dataframe(size, seed=i).iloc[:, :4]
bad_apple_path = tmpdir.join('{0}.parquet'.format(guid())).strpath
t = pa.Table.from_pandas(bad_apple)
_write_table(t, bad_apple_path)
bad_meta = pq.read_metadata(bad_apple_path)
with pytest.raises(ValueError):
read_multiple_files(paths + [bad_apple_path])
with pytest.raises(ValueError):
read_multiple_files(paths, metadata=bad_meta)
mixed_paths = [bad_apple_path, paths[0]]
with pytest.raises(ValueError):
read_multiple_files(mixed_paths, schema=bad_meta.schema)
with pytest.raises(ValueError):
read_multiple_files(mixed_paths)
@parquet
def test_dataset_read_pandas(tmpdir):
import pyarrow.parquet as pq
nfiles = 5
size = 5
dirpath = tmpdir.join(guid()).strpath
os.mkdir(dirpath)
test_data = []
frames = []
paths = []
for i in range(nfiles):
df = _test_dataframe(size, seed=i)
df.index = np.arange(i * size, (i + 1) * size)
df.index.name = 'index'
path = pjoin(dirpath, '{0}.parquet'.format(i))
table = pa.Table.from_pandas(df)
_write_table(table, path)
test_data.append(table)
frames.append(df)
paths.append(path)
dataset = pq.ParquetDataset(dirpath)
columns = ['uint8', 'strings']
result = dataset.read_pandas(columns=columns).to_pandas()
expected = pd.concat([x[columns] for x in frames])
tm.assert_frame_equal(result, expected)
@parquet
def test_dataset_read_pandas_common_metadata(tmpdir):
# ARROW-1103
import pyarrow.parquet as pq
nfiles = 5
size = 5
dirpath = tmpdir.join(guid()).strpath
os.mkdir(dirpath)
test_data = []
frames = []
paths = []
for i in range(nfiles):
df = _test_dataframe(size, seed=i)
df.index = pd.Index(np.arange(i * size, (i + 1) * size))
df.index.name = 'index'
path = pjoin(dirpath, '{0}.parquet'.format(i))
df_ex_index = df.reset_index(drop=True)
df_ex_index['index'] = df.index
table = pa.Table.from_pandas(df_ex_index,
preserve_index=False)
# Obliterate metadata
table = table.replace_schema_metadata(None)
assert table.schema.metadata is None
_write_table(table, path)
test_data.append(table)
frames.append(df)
paths.append(path)
# Write _metadata common file
table_for_metadata = pa.Table.from_pandas(df)
pq.write_metadata(table_for_metadata.schema,
pjoin(dirpath, '_metadata'))
dataset = pq.ParquetDataset(dirpath)
columns = ['uint8', 'strings']
result = dataset.read_pandas(columns=columns).to_pandas()
expected = pd.concat([x[columns] for x in frames])
tm.assert_frame_equal(result, expected)
@parquet
def test_ignore_private_directories(tmpdir):
import pyarrow.parquet as pq
nfiles = 10
size = 5
dirpath = tmpdir.join(guid()).strpath
os.mkdir(dirpath)
test_data = []
paths = []
for i in range(nfiles):
df = _test_dataframe(size, seed=i)
path = pjoin(dirpath, '{0}.parquet'.format(i))
test_data.append(_write_table(df, path))
paths.append(path)
# private directory
os.mkdir(pjoin(dirpath, '_impala_staging'))
dataset = pq.ParquetDataset(dirpath)
assert set(paths) == set(x.path for x in dataset.pieces)
@parquet
def test_multiindex_duplicate_values(tmpdir):
num_rows = 3
numbers = list(range(num_rows))
index = pd.MultiIndex.from_arrays(
[['foo', 'foo', 'bar'], numbers],
names=['foobar', 'some_numbers'],
)
df = | pd.DataFrame({'numbers': numbers}, index=index) | pandas.DataFrame |
from sklearn.cluster import KMeans
import plotly.express as px
from sklearn.decomposition import PCA
from sklearn.preprocessing import RobustScaler
import pyarrow.parquet as pq
import os
from pathlib import Path
from hdbscan import HDBSCAN
import pandas as pd
def hdbscan_cluster(
df: pd.DataFrame,
min_cluster_size: int = 10,
gen_min_span_tree: bool = True
):
clusterer = HDBSCAN(
min_cluster_size=min_cluster_size,
gen_min_span_tree=gen_min_span_tree)
clusterer.fit(df)
return clusterer.labels_, clusterer.probabilities_
run_id = '4d8ddb41e7f340c182a6a62699502d9f'
score_path = os.path.join("data/output/score", run_id)
cols = ['mask_press', 'resp_flow', 'delivered_volum',
'mask_press_mu', 'resp_flow_mu', 'delivered_volum_mu',
'mask_press_se', 'resp_flow_se', 'delivered_volum_se',
'epoch_id']
scores = []
for f in Path(score_path).iterdir():
df = pq.read_table(f, columns=cols).to_pandas()
df["file_name"] = os.path.basename(f)[:15]
scores.append(df)
scores = | pd.concat(scores) | pandas.concat |
import numpy as np
from scipy import stats
import pandas as pd
import matplotlib.pyplot as plt
class EventMatrix(object):
def __init__(self, datetimes, symbols):
'''
:param datetimes:
:param symbols:
Constructs A pandas dataframe indexed by datetimes and with columns for each symbol.
The constructor fills this with all NANs and an abstract base method exists to be customized.
'''
# Build an empty event matrix with an index of all the datetimes and columns for each symbol.
# Fill with NANs
self.event_matrix = pd.DataFrame({'Date': datetimes})
self.event_matrix = self.event_matrix.set_index('Date')
self.event_matrix.tz_localize(tz='America/New_York')
self.event_matrix = self.event_matrix.sort_index()
self.event_matrix = self.event_matrix.loc[~self.event_matrix.index.duplicated(keep='first')]
# Prints True is sorted
#print(self.event_matrix.index.is_monotonic)
self.symbols = symbols
for symbol in self.symbols:
self.event_matrix[symbol] = np.nan
def build_event_matrix(self, start_date, end_date):
'''
Implement this method a derived class.
:param start_date:
:param end_date:
:return: FIll up the event matrix with 1's in the row/column for which there was an event.
'''
raise NotImplementedError("Please Implement this method in a base class")
class CarsCavcsResult(object):
def __init__(self, num_events,
cars, cars_std_err, cars_t_test, cars_significant, cars_positive, cars_num_stocks_positive,
cars_num_stocks_negative,
cavcs, cavcs_std_err, cavcs_t_test, cavcs_significant, cavcs_positive, cavcs_num_stocks_positive,
cavcs_num_stocks_negative):
"""
:param num_events: the number of events in the matrix
:param cars: time series of Cumulative Abnormal Return
:params cars_std_err: std error of the CARs
:param cars_t_test: t-test statistic that checks whether the CARs of all stock are significantly different from 0
:param cars_significant: True if the CARs of all stocks are significant
:param cars_positive: True if the CAR is positive
:param cars_num_stocks_positive: The number of stocks for which the CAR was significantly positive
:param cars_num_stocks_negative: The number of stocks for which the CAR was significantly negative
:param cavcs: time series of Cumulative Abnormal Volume Changes
:params cavcs_std_err: std error of the CAVCs
:param cavcs_t_test: t-test statistic that checks whether the CAVCs of all stock are significantly different from 0
:param cavcs_significant: True if the CAVCs of all stocks are significant
:param cavcs_positive: True if the CAVC is positive
:param cavcs_num_stocks_positive: The number of stocks for which the CAVC was significantly positive
:param cavcs_num_stocks_negative: The number of stocks for which the CAVC was significantly negative
All of the above t-tests are significant when they are in the 95% confidence levels
"""
self.num_events = num_events
self.cars = cars
self.cars_std_err = cars_std_err
self.cars_t_test = cars_t_test
self.cars_significant = cars_significant
self.cars_positive = cars_positive
self.cars_num_stocks_positive = cars_num_stocks_positive
self.cars_num_stocks_negative = cars_num_stocks_negative
self.cavcs = cavcs
self.cavcs_std_err = cavcs_std_err
self.cavcs_t_test = cavcs_t_test
self.cavcs_significant = cavcs_significant
self.cavcs_positive = cavcs_positive
self.cavcs_num_stocks_positive = cavcs_num_stocks_positive
self.cavcs_num_stocks_negative = cavcs_num_stocks_negative
def results_as_string(self):
result_string = 'Number of events processed: ' + str(self.num_events) + '\n'
result_string += 'CARS Results' + '\n'
result_string += ' Number of stocks with +CARS: ' + str(self.cars_num_stocks_positive) + '\n'
result_string += ' Number of stocks with -CARS: ' + str(self.cars_num_stocks_negative) + '\n'
result_string += ' CARS t-test value: ' + str(self.cars_t_test) + '\n'
result_string += ' CARS significant: ' + str(self.cars_significant) + '\n'
result_string += ' CARS positive: ' + str(self.cars_positive) + '\n'
result_string += 'CAVCS Results' + '\n'
result_string += ' Number of stocks with +CAVCS: ' + str(self.cavcs_num_stocks_positive) + '\n'
result_string += ' Number of stocks with -CAVCS: ' + str(self.cavcs_num_stocks_negative) + '\n'
result_string += ' CAVCS full t-test value: ' + str(self.cavcs_t_test) + '\n'
result_string += ' CAVCS significant: ' + str(self.cavcs_significant) + '\n'
result_string += ' CAVCS positive: ' + str(self.cavcs_positive) + '\n'
return result_string
class Calculator(object):
def __init__(self):
pass
def calculate_using_naive_benchmark(self, event_matrix, stock_data, market_symbol, look_back, look_forward):
"""
:param event_matrix:
:param stock_data:
:param market_symbol:
:param look_back:
:param look_forward:
:return car: time series of Cumulative Abnormal Return
:return std_err: the standard error
:return num_events: the number of events in the matrix
Most of the code was from here:
https://github.com/brettelliot/QuantSoftwareToolkit/blob/master/QSTK/qstkstudy/EventProfiler.py
"""
# Copy the stock prices into a new dataframe which will become filled with the returns
#import pdb;
#pdb.set_trace()
try:
# For IB
daily_returns = stock_data['Close'].copy()
volumes = stock_data['Volume'].copy()
except KeyError:
# For AV
daily_returns = stock_data['adjusted_close'].copy()
volumes = stock_data['volume'].copy()
# Convert prices into daily returns.
# This is the amount that the specific stock increased or decreased in value for one day.
daily_returns = daily_returns.pct_change().fillna(0)
mypct = lambda x: x[-1] - np.mean(x[:-1])
vlm_changes = volumes.rolling(5, 5).apply(mypct).fillna(0)
# Subtract the market returns from all of the stock's returns. The result is the abnormal return.
# beta = get_beta()
beta = 1.0 # deal with beta later
symbols = daily_returns.index.get_level_values(0).unique()
abnormal_returns = daily_returns.copy()
ex_vols = vlm_changes.copy()
#import pdb;
#pdb.set_trace()
for sym in symbols:
abnormal_returns.loc[sym, slice(None)] -= beta * daily_returns.loc[market_symbol, slice(None)].values
ex_vols.loc[sym, slice(None)] -= beta * vlm_changes.loc[market_symbol, slice(None)].values
#import pdb;
#pdb.set_trace()
# remove the market symbol from the returns and event matrix. It's no longer needed.
del daily_returns[market_symbol]
del vlm_changes[market_symbol]
del abnormal_returns[market_symbol]
del ex_vols[market_symbol]
try:
del event_matrix[market_symbol]
except KeyError as e:
pass
starting_event_num = len(event_matrix[(event_matrix == 1.0).any(axis=1)])
print("Starting number of events: {}".format(starting_event_num))
# The event matrix has a row for every data in the stock data.
# Zero (NaN) out any events in the rows at the beginning and end that would
# not have data.
event_matrix.values[0:look_back, :] = np.NaN
event_matrix.values[-look_forward:, :] = np.NaN
ending_event_num = len(event_matrix[(event_matrix == 1.0).any(axis=1)])
print("Ending number of events: {}".format(ending_event_num))
if (starting_event_num != ending_event_num):
print("{} events were dropped because they require data outside the data range.".format(starting_event_num -
ending_event_num))
# Number of events
i_no_events = int(np.logical_not(np.isnan(event_matrix.values)).sum())
assert i_no_events > 0, "Zero events in the event matrix"
na_all_rets = "False"
na_all_vlms = "False"
# import pdb; pdb.set_trace()
results = pd.DataFrame(index=symbols, columns=['pos', 'neg', 'vpos', 'vneg'])
# Looking for the events and pushing them to a matrix
#print(event_matrix.columns)
#print(symbols)
try:
#for i, s_sym in enumerate(event_matrix.columns):
for s_sym in symbols:
if s_sym == market_symbol:
continue
na_stock_rets = "False"
na_stock_vlms = "False"
for j, dt_date in enumerate(event_matrix.index):
if event_matrix[s_sym][dt_date] == 1:
na_ret = abnormal_returns[s_sym][j - look_back:j + 1 + look_forward]
na_vls = ex_vols[s_sym][j - look_back:j + 1 + look_forward]
if type(na_stock_rets) == type(""):
na_stock_rets = na_ret
na_stock_vlms = na_vls
else:
na_stock_rets = np.vstack((na_stock_rets, na_ret))
na_stock_vlms = np.vstack((na_stock_vlms, na_vls))
# reurns/vols for a particular stock analyze here
# then append to all rets
#import pdb;
#pdb.set_trace()
if type(na_stock_rets) == type("") or type(na_stock_rets) == type(""):
continue
if (np.mean(na_stock_rets) > 0):
results.loc[s_sym, 'pos'] = True
#print(s_sym)
else:
results.loc[s_sym, 'neg'] = True
if (np.mean(na_stock_vlms) > 0):
results.loc[s_sym, 'vpos'] = True
else:
results.loc[s_sym, 'vneg'] = True
if type(na_all_rets) == type(""):
na_all_rets = na_stock_rets
na_all_vlms = na_stock_vlms
else:
na_all_rets = np.vstack((na_all_rets, na_stock_rets))
na_all_vlms = np.vstack((na_all_vlms, na_stock_vlms))
except Exception as e:
#import pdb;
#pdb.set_trace()
#print(e)
raise e
#import pdb;
#pdb.set_trace()
if len(na_all_rets.shape) == 1:
na_all_rets = np.expand_dims(na_all_rets, axis=0)
# Computing daily returns
num_events = len(na_all_rets)
cars = np.mean(na_all_rets, axis=0)
cavs = np.mean(na_all_vlms, axis=0)
cars_std_err = np.std(na_all_rets, axis=0)
cavs_std_err = np.std(na_all_vlms, axis=0)
na_cum_rets = np.cumprod(na_all_rets + 1, axis=1)
na_cum_rets = (na_cum_rets.T / na_cum_rets[:, look_back]).T
na_cum_vlms = np.cumsum(na_all_vlms, axis=1)
cars_cum = np.mean(na_cum_rets, axis=0)
cavs_cum = np.mean(na_cum_vlms, axis=0)
if (cars_cum[-1] - 1) > 0:
cars_positive = True
else:
cars_positive = False
if (cavs_cum[-1]) > 0:
cavs_positive = True
else:
cavs_positive = False
cars_num_stocks_positive = results['pos'].sum()
cars_num_stocks_negative = results['neg'].sum()
cavs_num_stocks_positive = results['vpos'].sum()
cavs_num_stocks_negative = results['vneg'].sum()
std1 = np.std(cars)
cars_t_test = np.mean(cars) / std1 * np.sqrt(len(cars))
std2 = np.std(cavs)
cavs_t_test = np.mean(cavs) / std2 * np.sqrt(len(cavs))
#import pdb;
#pdb.set_trace()
from scipy import stats
# pval1 = 1 - stats.t.cdf(cars_t_test,df=len(cars))
pval1 = 2 * (1 - stats.t.cdf(abs(cars_t_test), df=num_events))
# pvalues = 2*(1-tcdf(abs(t),n-v))
pval2 = 2 * (1 - stats.t.cdf(abs(cavs_t_test), df=num_events))
if (pval1 < .05):
cars_significant = True
else:
cars_significant = False
if (pval2 < .05):
cavs_significant = True
else:
cavs_significant = False
#import pdb;
#pdb.set_trace()
ccr = CarsCavcsResult(num_events,
cars_cum, cars_std_err, cars_t_test, cars_significant,
cars_positive, cars_num_stocks_positive, cars_num_stocks_negative,
cavs_cum, cavs_std_err, cavs_t_test, cavs_significant,
cavs_positive, cavs_num_stocks_positive, cavs_num_stocks_negative)
return ccr
#import pdb;
#pdb.set_trace()
def calculate_using_single_factor_benchmark(self, event_matrix, stock_data, market_symbol, estimation_window=200,
buffer=5,
pre_event_window=10, post_event_window=10):
'''
:param event_matrix:
:param stock_data:
:param market_symbol:
:param estimation_window:
:param buffer:
:param pre_event_window:
:param post_event_window:
:return cars_cavcs_result: An instance of CarsCavcsResult containing the results.
Modeled after http://arno.uvt.nl/show.cgi?fid=129765
'''
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import datetime as dt
# The event matrix has a row for every data in the stock data.
# Zero (NaN) out any events in the rows at the beginning and end that would
# not have data.
starting_event_num = len(event_matrix[(event_matrix == 1.0).any(axis=1)])
print("Starting number of events: {}".format(starting_event_num))
event_matrix.values[0:estimation_window + buffer + pre_event_window + post_event_window, :] = np.NaN
event_matrix.values[-estimation_window - buffer - pre_event_window - post_event_window:, :] = np.NaN
ending_event_num = len(event_matrix[(event_matrix == 1.0).any(axis=1)])
print("Ending number of events: {}".format(ending_event_num))
if(starting_event_num != ending_event_num):
print("{} events were dropped because they require data outside the data range.".format(starting_event_num -
ending_event_num))
events = event_matrix[(event_matrix == 1.0).any(axis=1)]
dates = stock_data.loc[market_symbol, slice(None)].index
date1 = events.index[0]
index1 = dates.tolist().index(date1)
wtf = dates[index1]
date11 = dates[index1 - buffer]
date12 = dates[index1 - (buffer + estimation_window)]
#import pdb;
#pdb.set_trace()
# check remove duplicates
stock_data.index.value_counts()
stock_data.drop_duplicates(inplace=True)
# import pdb; pdb.set_trace()
try:
# For IB
closing_prices = stock_data['Close']
volumes = stock_data['Volume']
except KeyError:
# For AV
closing_prices = stock_data['adjusted_close']
volumes = stock_data['volume']
# check for duplicates
closing_prices.index.value_counts()
'''(RGR, 2005-12-30 00:00:00) 2
(SPY, 2000-12-29 00:00:00) 2
(RGR, 2006-12-29 00:00:00) 2'''
# removing duplicates
# now we are ready to do anlaysis
stock_ret = closing_prices.copy()
symbols = stock_data.index.get_level_values(0).unique().tolist()
mypct = lambda x: x[-1] - np.mean(x[:-1])
stock_ret = closing_prices.pct_change().fillna(0)
vlm_changes = volumes.rolling(5, 5).apply(mypct).fillna(0)
# do regeression
pre_stock_returns = stock_ret[
(stock_data.index.get_level_values(1) > date12) & (stock_data.index.get_level_values(1) <= date11)]
pre_stock_vlms = vlm_changes[
(stock_data.index.get_level_values(1) > date12) & (stock_data.index.get_level_values(1) <= date11)]
# **************
# First compute cars ******
# ***************
#import pdb;
#pdb.set_trace()
dates = stock_data.index.get_level_values(1).unique().tolist()
if (market_symbol in symbols):
stocks = [x for x in symbols if x != market_symbol]
else:
raise ValueError('calculate_using_single_factor_benchmark: market_symbol not found in data')
ar1 = ['cars', 'cavs'];
ar2 = ['slope', 'intercept']
from itertools import product
tuples = [(i, j) for i, j in product(ar1, ar2)] # tuples = list(zip(*arrays))
index = pd.MultiIndex.from_tuples(tuples, names=['first', 'second'])
df_regress = pd.DataFrame(0.0, index=index, columns=symbols)
# import pdb; pdb.set_trace()
for stock in stocks:
# set up data
x1 = pre_stock_returns[market_symbol]
y1 = pre_stock_returns[stock]
slope1, intercept1, cars0 = regress_vals(x1, y1)
cars = np.cumprod(cars0 + 1, axis=0)
# plot if you need
#plot_regressvals(x1, y1, slope1, intercept1, cars, stock)
# the same for cvals
x2 = pre_stock_vlms[market_symbol]
y2 = pre_stock_vlms[stock]
# y2.argsort()[::-1][:n]
# import pdb; pdb.set_trace()
slope2, intercept2, cavs0 = regress_vals(x2, y2)
cavs = np.cumsum(cavs0)
#plot_regressvals(x2, y2, slope2, intercept2, cavs, stock)
# store the regresion values
df_regress.loc[('cars', 'slope'), stock] = slope1
df_regress.loc[('cars', 'intercept'), stock] = intercept1
df_regress.loc[('cavs', 'slope'), stock] = slope2
df_regress.loc[('cavs', 'intercept'), stock] = intercept2
# do the same for volumes
# ***************
# now the event cars and cavs computations
ar11 = stocks
ar12 = ['cars', 'cavs']
tuples2 = [(i, j) for i, j in product(ar11, ar12)] # tuples = list(zip(*arrays))
index2 = pd.MultiIndex.from_tuples(tuples2, names=['first', 'second'])
df_results = pd.DataFrame(0.0, index=index2, columns=['positive', 'significant'])
ccarray = []
cvarray = []
# now the big loop
#import pdb;
#pdb.set_trace()
try:
for stock in stocks:
slope1 = df_regress.loc[('cars', 'slope'), stock]
intercept1 = df_regress.loc[('cars', 'intercept'), stock]
slope2 = df_regress.loc[('cavs', 'slope'), stock]
intercept2 = df_regress.loc[('cavs', 'intercept'), stock]
ccr = []
cvr = []
for event in events.iterrows():
dt1 = event[0]
idx1 = dates.index(dt1)
window = dates[idx1 - pre_event_window:idx1 + post_event_window + 1]
dummy_rets = | pd.Series(0.0, index=window) | pandas.Series |
import sys
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from mpl_toolkits import mplot3d
def trajectory_generator(T_final, N, traj=0, show_traj=False):
'''
Generates a circular trajectory given a final time and a sampling time
'''
r = 1 # radius
th = np.linspace(0,6*np.pi,N)
c_x, c_y = [0,0] # center coordinates
## circular trajectory
if traj ==0:
t = np.linspace(0,T_final,N)
x = r * np.cos(th) + c_x
y = r * np.sin(th) + c_y
z = np.ones_like(th)
if show_traj == True:
plt.figure()
ax = plt.axes(projection = "3d")
plt.title('Reference trajectory')
ax.plot3D(x, y, z)
ax.set_xlabel("x[m]")
ax.set_ylabel("y[m]")
ax.set_zlabel("z[m]")
plt.show()
## hellical trajectory
if traj ==1:
t = np.linspace(0,T_final,N)
x = r * np.cos(th) + c_x
y = r * np.sin(th) + c_y
z = np.linspace(1,2,N)
if show_traj == True:
plt.figure()
ax = plt.axes(projection = "3d")
plt.title('Reference trajectory')
ax.plot3D(x, y, z)
plt.show()
## vertical trajectory
if traj ==2:
t = np.linspace(0,T_final,N)
x = np.ones_like(t)
y = np.zeros_like(t)
z = np.linspace(1,2,N)
if show_traj == True:
plt.figure()
ax = plt.axes(projection = "3d")
plt.title('Reference trajectory')
ax.plot3D(x, y, z)
plt.show()
return t,x,y,z
def trajectory_generator2D( x0: np.array, # initial position of the quadrotor
N_hover: int, # number of time steps in the hovering phase
N_traj: int, # number of time steps in the simulation
N: int, # number of time step in a single time horizon (used to add an additional horizon in order for the closed loop simulation to work)
radius: float, # radius of the circular trajectory
show_traj=False): # boolean to show trajectory before the simulation
'''
Generates a circular trajectory with a hovering time of T_hover at the start,
given a trajectory time and a sampling time.
'''
# hovering trajectory
y_hover = np.ones(N_hover) * x0[0]
z_hover = np.ones(N_hover) * x0[1]
# circular trajectory parameters
theta = np.linspace(0,4*np.pi, N_traj+N)
c_x, c_y = [4,5] # center coordinates
## circular trajectory
y_circle = radius * np.cos(theta) + c_x
z_circle = radius * np.sin(theta) + c_y
# appending the hovering and the circular trajectories
y = np.append(y_hover, y_circle)
z = np.append(z_hover, z_circle)
if show_traj == True:
fig, ax = plt.subplots()
plt.title('Reference trajectory')
ax.plot(y, z)
ax.set_xlabel("y[m]")
ax.set_ylabel("z[m]")
plt.show()
return y,z
# trajectory generation with velocities
def trajectory_generotaor2D_with_vel( x0: np.array, # initial potision of the quadrotor
N_hover: int, # number of time steps in the hovering phase
model: object, # model of the drone (used to check if the maximum )
radius: float, # radius of the circular trajectory
freq: float, # used to control the speed of the trajectory
T_traj: float, # final time of the tre
Tf: float, # control horizon (used to add an additional horizon in order for the closed loop simulation to work)
dt: float):
# hovering trajectory
y_hover = np.ones(N_hover) * x0[0]
z_hover = np.ones(N_hover) * x0[1]
vz_hover = np.zeros(N_hover)
vy_hover = np.zeros(N_hover)
t = np.arange(0,T_traj+Tf,dt)
c_y, c_z = [4,5] # center coordinates
y_circle = radius * np.cos(freq * t) + c_y
z_circle = radius * np.sin(freq * t) + c_z
vy_circle = - radius * freq * np.sin(freq * t)
vz_circle = + radius * freq * np.cos(freq * t)
# appending the hovering and the circular trajectories
y = np.append(y_hover, y_circle)
z = np.append(z_hover, z_circle)
vy = np.append(vy_hover, vy_circle)
vz = np.append(vz_hover, vz_circle)
v = np.sqrt(vy**2 + vz**2)
# maximum velocity in the trajectory
v_max = np.max(v)
if v_max > model.v_max:
sys.exit("The desired trajectory contains velocities that the drone cannot handle.")
else:
return y, z, vy, vz
def readTrajectory(T_hover, N):
# import csv file of measX and simU (noisy measurement)
# ref_traj = pd.read_csv('used_data/matlab/ga4/measX.csv')
# ref_U = pd.read_csv('used_data/matlab/ga4/simU.csv')
# ref_traj = pd.read_csv('used_data/matlab/fmincon/J=u1/measX.csv')
# ref_U = pd.read_csv('used_data/matlab/fmincon/J=u1/simU.csv')
ref_traj = pd.read_csv('used_data/matlab/globalsearch_1/measX.csv')
ref_U = | pd.read_csv('used_data/matlab/globalsearch_1/simU.csv') | pandas.read_csv |
import pandas as pd
import numpy as np
from pathlib import Path
def load(path, dt=False, stats=False):
print("loading data from",path)
dataFrames = {}
dataFrames['gameLogs'] = pd.read_csv(path/'GameLogs.csv', index_col=False)
if dt:
dataFrames['gameLogs']['Date'] = pd.to_datetime(dataFrames['gameLogs']['Date'])
dataFrames['people'] = pd.read_csv(path/'People.csv', index_col=False)
dataFrames['teams'] = pd.read_csv(path/'Teams.csv', index_col=False)
dataFrames['managers'] = pd.read_csv(path/'Managers.csv', index_col=False)
dataFrames['fieldings'] = pd.read_csv(path/'Fielding.csv', index_col=False)
dataFrames['pitchings'] = pd.read_csv(path/'Pitching.csv', index_col=False)
dataFrames['battings'] = pd.read_csv(path/'Batting.csv', index_col=False)
if stats:
dataFrames['stats'] = pd.read_csv(path/'Stats.csv', index_col=False)
print("data loaded")
return dataFrames
def save(path, dataFrames, stats=False):
print("Saving data to",path)
dataFrames['gameLogs'].to_csv(path/'GameLogs.csv', index = False)
dataFrames['people'].to_csv(path/'People.csv', index = False)
dataFrames['teams'].to_csv(path/'Teams.csv', index = False)
dataFrames['managers'].to_csv(path/'Managers.csv', index = False)
dataFrames['fieldings'].to_csv(path/'Fielding.csv', index = False)
dataFrames['pitchings'].to_csv(path/'Pitching.csv', index = False)
dataFrames['battings'].to_csv(path/'Batting.csv', index = False)
if stats:
dataFrames['stats'].to_csv(path/'Stats.csv', index = False)
print("Data saved")
def filter(path, saveState=True):
def filterFrame(frame, columns, renames=None):
frame = frame[columns]
if(renames!=None):
frame = frame.rename(columns=renames)
return frame.reset_index(drop=True)
def filterGameLogs(gameLogs, people):
gameLogs['Date'] = pd.to_datetime(gameLogs['Date'], format="%Y%m%d")
gameLogs['Visiting league AL'] = gameLogs['Visiting league']=="AL"
gameLogs['Home league AL'] = gameLogs['Home league']=="AL"
gameLogs = gameLogs[gameLogs['Forfeit information'].isna()]
gameLogs = gameLogs[gameLogs['Protest information'].isna()]
generalColumns = [
'Date','Visiting: Team','Visiting league AL','Home: Team','Home league AL','Visiting: Score','Home: Score']
visitingStatsColumns = [
'Visiting at-bats','Visiting hits','Visiting doubles','Visiting triples','Visiting homeruns','Visiting RBI','Visiting sacrifice hits','Visiting sacrifice flies',
'Visiting hit-by-pitch','Visiting walks','Visiting intentional walks','Visiting strikeouts','Visiting stolen bases','Visiting caught stealing','Visiting grounded into double plays',
'Visiting left on base','Visiting pitchers used','Visiting individual earned runs','Visiting team earned runs','Visiting wild pitches',
'Visiting balks','Visiting putouts','Visiting assists','Visiting errors','Visiting passed balls','Visiting double plays','Visiting triple plays']
homeStatsColumns = [
'Home at-bats','Home hits','Home doubles','Home triples','Home homeruns','Home RBI','Home sacrifice hits','Home sacrifice flies',
'Home hit-by-pitch','Home walks','Home intentional walks','Home strikeouts','Home stolen bases','Home caught stealing','Home grounded into double plays',
'Home left on base','Home pitchers used','Home individual earned runs','Home team earned runs','Home wild pitches',
'Home balks','Home putouts','Home assists','Home errors','Home passed balls','Home double plays','Home triple plays']
visitingIDColumns = [
'Visiting team manager ID','Visiting starting pitcher ID',
'Visiting starting player 1 ID','Visiting starting player 2 ID','Visiting starting player 3 ID',
'Visiting starting player 4 ID','Visiting starting player 5 ID','Visiting starting player 6 ID',
'Visiting starting player 7 ID','Visiting starting player 8 ID','Visiting starting player 9 ID']
homeIDColumns = [
'Home team manager ID','Home starting pitcher ID',
'Home starting player 1 ID','Home starting player 2 ID','Home starting player 3 ID',
'Home starting player 4 ID','Home starting player 5 ID','Home starting player 6 ID',
'Home starting player 7 ID','Home starting player 8 ID','Home starting player 9 ID']
identifier = people[['playerID','retroID']].drop_duplicates(subset=['retroID']).dropna()
for column in visitingIDColumns+homeIDColumns:
merged = pd.merge(gameLogs[column], identifier, left_on=column, right_on='retroID', how="left")
gameLogs[column] = merged['playerID']
gameLogs = filterFrame(gameLogs, generalColumns+visitingStatsColumns+homeStatsColumns+visitingIDColumns+homeIDColumns)
gameLogs = gameLogs.dropna(subset=generalColumns)
for column in visitingStatsColumns+homeStatsColumns:
gameLogs = gameLogs[(gameLogs[column]>=0) | (gameLogs[column].isna())]
return gameLogs.reset_index(drop=True)
def filterPeople(people):
people['yearID'] = people['birthYear']
people['weight'] = 0.453592*people['weight']
people['height'] = 0.0254*people['height']
people['bats right'] = (people['bats']=="R") | (people['bats']=="B")
people['bats left'] = (people['bats']=="L") | (people['bats']=="B")
people['throws right'] = people['throws']=="R"
people = filterFrame(people, ['yearID','playerID','weight','height','bats right', 'bats left', 'throws right'])
return people.reset_index(drop=True)
def filterTeams(teams):
teams = filterFrame(teams,
['yearID','teamIDretro','divID','Rank','G','W','L','DivWin','LgWin','WSWin','R','AB','H','2B','3B','HR','BB','SO','SB','CS','HBP','SF','RA','ER','ERA','SHO','SV','HA','HRA','BBA','SOA','E','DP','FP'],
{"teamIDretro":"teamID","divID":"Division","G":"Games","W":"Wins","L":"Losses","DivWin":"Division winner","LgWin":"League winner","WSWin":"World series winner","R":"Runs scored","AB":"At bats"
,"H":"Hits by batters","2B":"Doubles","3B":"Triples","HR":"Homeruns","BB":"Walks","SO":"Strikeouts","SB":"Stolen bases","CS":"Cought stealing","HBP":"Batters hit by pitch"
,"SF":"Sacrifice flies","RA":"Opponents runs scored","ER":"Earned runs allowed","ERA":"Earned runs average","SHO":"Shutouts","SV":"Saves","HA":"Hits allowed"
,"HRA":"Homeruns allowed","BBA":"Walks allowed","SOA":"Strikeouts allowed","E":"Errors","DP":"Double plays","FP":"Fielding percentage"})
teams['division C'] = (teams['Division']=="C")
teams['division E'] = (teams['Division']=="E")
teams['division W'] = (teams['Division']=="W")
teams = teams.drop(columns=['Division'])
teams['Division winner'] = (teams['Division winner']=='Y')
teams['League winner'] = (teams['League winner']=='Y')
teams['World series winner']= (teams['World series winner']=='Y')
return teams.reset_index(drop=True)
print("start filtering")
dataFrames = load(path/'Input')
print("filter gameLogs")
dataFrames['gameLogs'] = filterGameLogs(dataFrames['gameLogs'], dataFrames['people'])
print("filter people")
dataFrames['people'] = filterPeople(dataFrames['people'])
print("filter teams")
dataFrames['teams'] = filterTeams(dataFrames['teams'])
print("filter managers")
dataFrames['managers'] = filterFrame(dataFrames['managers'],
['yearID','playerID','G','W','L'],
{"G":"Games","W":"Wins","L":"Losses"})
print("filter fieldings")
dataFrames['fieldings'] = filterFrame(dataFrames['fieldings'],
['yearID','playerID','PO','A','E','DP','PB','WP','SB','CS'],
{"PO":"Putouts","A":"Assists","E":"Error","DP":"Double plays","PB":"Passed Balls","WP":"Wild Pitches","SB":"Opponent Stolen Bases","CS":"Opponents Caught Stealing"})
print("filter pitchings")
dataFrames['pitchings'] = filterFrame(dataFrames['pitchings'],
['yearID','playerID','W','L','G','H','ER','HR','BB','SO','BAOpp','ERA','IBB','WP','HBP','BK','BFP','R','SH','SF','GIDP','SV','SHO'],
{"G":"Games","W":"Wins","L":"Losses","H":"Hits","ER":"Earned Runs","HR":"Homeruns","BB":"Walks","SO":"Strikeouts","BAOpp":"Opponent batting average","ERA":"ERA"
,"IBB":"Intentional walks","WP":"Wild pitches","HBP":"Batters hit by pitch","BK":"Balks","BFP":"Batters faced","R":"Runs allowed","SH":"Batters sacrifices"
,"SF":"Batters sacrifice flies","GIDP":"Grounded into double plays","SV":"Saves","SHO":"Shutouts"})
print("filter battings")
dataFrames['battings'] = filterFrame(dataFrames['battings'],
['yearID','playerID','AB','R','H','2B','3B','HR','RBI','SB','CS','BB','SO','IBB','HBP','SH','SF','GIDP'],
{"AB":"At bats","R":"Runs","H":"Hits","2B":"Doubles","3B":"Triples","HR":"Homeruns","RBI":"Runs batted in","SB":"Stolen bases","CS":"Caught stealing"
,"BB":"Base on balls","SO":"Strikeouts","IBB":"Intentional walks","HBP":"Hit by pitch","SH":"Sacrifice hits","SF":"Sacrifice flies","GIDP":"Grounded into double plays"})
print("data filtered")
if saveState:
save(path/'Filtered', dataFrames)
return dataFrames
def replace(path, dataFrames, default="mean", lastKnownState=True, saveState=True, inpurity=0.5):
def replaceFrame(frame, targets, gameLogs, default, lastKnownState, inpurity):
#define ID column
mID = 'playerID'
for column in frame.columns:
if column=='teamID':
mID = 'teamID'
break
if column=='playerID':
break
#drop inpure columns
nanFrame = frame.isna().sum().reset_index()
nanFrame['inpurity'] = nanFrame[0]/frame.index.size
frame = frame[nanFrame[nanFrame['inpurity']<=inpurity]['index'].tolist()]
#creating frame containing only usefull data
onlyFrame = None
for column in targets:
temp = gameLogs[['Date',column]]
temp['yearID'] = temp['Date'].dt.year-1
temp = temp.rename(columns={column:mID})
onlyFrame = pd.concat([onlyFrame, temp]).drop(columns=['Date']).drop_duplicates().dropna().reset_index(drop=True)
#combining duplicates
aggregators = {}
for column in frame.drop(columns=['yearID',mID]).columns:
if (column.find("average")>-1) or (column.find("percentage")>-1):
aggregators[column] = 'mean'
elif (column.find("winner")>-1) or (column.find("division")>-1) or (column.find("Rank")>-1):
aggregators[column] = 'max'
else:
aggregators[column] = 'sum'
temp = frame[frame.duplicated(keep=False, subset=['yearID',mID])]
temp2 = pd.merge(temp[['yearID',mID]],temp.drop(columns=['yearID',mID]).notna(), left_index=True, right_index=True).groupby(['yearID',mID], as_index=False).sum()
temp = temp.groupby(['yearID',mID], as_index=False).agg(aggregators)
for column in temp.columns:
vec = temp2[column]==0
col = temp[column]
col[vec] = None
temp[column] = col
frame = frame.drop_duplicates(keep=False, subset=['yearID',mID])
frame = pd.concat([frame, temp])
mIDs = np.array(list(dict.fromkeys(frame[mID].unique().tolist()+onlyFrame[mID].unique().tolist())))
years = np.array(list(dict.fromkeys(frame['yearID'].unique().tolist()+onlyFrame['yearID'].unique().tolist())))
fullFrame = pd.DataFrame(np.array(np.meshgrid(years, mIDs)).T.reshape(-1,2), columns=['yearID',mID])
fullFrame['yearID'] = | pd.to_numeric(fullFrame['yearID']) | pandas.to_numeric |
# -*- coding: utf-8 -*-
"""
we test .agg behavior / note that .apply is tested
generally in test_groupby.py
"""
from __future__ import print_function
import pytest
from datetime import datetime, timedelta
from functools import partial
import numpy as np
from numpy import nan
import pandas as pd
from pandas import (date_range, MultiIndex, DataFrame,
Series, Index, bdate_range, concat)
from pandas.util.testing import assert_frame_equal, assert_series_equal
from pandas.core.groupby import SpecificationError, DataError
from pandas.compat import OrderedDict
from pandas.io.formats.printing import pprint_thing
import pandas.util.testing as tm
class TestGroupByAggregate(object):
def setup_method(self, method):
self.ts = tm.makeTimeSeries()
self.seriesd = tm.getSeriesData()
self.tsd = tm.getTimeSeriesData()
self.frame = DataFrame(self.seriesd)
self.tsframe = DataFrame(self.tsd)
self.df = DataFrame(
{'A': ['foo', 'bar', 'foo', 'bar', 'foo', 'bar', 'foo', 'foo'],
'B': ['one', 'one', 'two', 'three', 'two', 'two', 'one', 'three'],
'C': np.random.randn(8),
'D': np.random.randn(8)})
self.df_mixed_floats = DataFrame(
{'A': ['foo', 'bar', 'foo', 'bar', 'foo', 'bar', 'foo', 'foo'],
'B': ['one', 'one', 'two', 'three', 'two', 'two', 'one', 'three'],
'C': np.random.randn(8),
'D': np.array(
np.random.randn(8), dtype='float32')})
index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'], ['one', 'two',
'three']],
labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['first', 'second'])
self.mframe = DataFrame(np.random.randn(10, 3), index=index,
columns=['A', 'B', 'C'])
self.three_group = DataFrame(
{'A': ['foo', 'foo', 'foo', 'foo', 'bar', 'bar', 'bar', 'bar',
'foo', 'foo', 'foo'],
'B': ['one', 'one', 'one', 'two', 'one', 'one', 'one', 'two',
'two', 'two', 'one'],
'C': ['dull', 'dull', 'shiny', 'dull', 'dull', 'shiny', 'shiny',
'dull', 'shiny', 'shiny', 'shiny'],
'D': np.random.randn(11),
'E': np.random.randn(11),
'F': np.random.randn(11)})
def test_agg_api(self):
# GH 6337
# http://stackoverflow.com/questions/21706030/pandas-groupby-agg-function-column-dtype-error
# different api for agg when passed custom function with mixed frame
df = DataFrame({'data1': np.random.randn(5),
'data2': np.random.randn(5),
'key1': ['a', 'a', 'b', 'b', 'a'],
'key2': ['one', 'two', 'one', 'two', 'one']})
grouped = df.groupby('key1')
def peak_to_peak(arr):
return arr.max() - arr.min()
expected = grouped.agg([peak_to_peak])
expected.columns = ['data1', 'data2']
result = grouped.agg(peak_to_peak)
assert_frame_equal(result, expected)
def test_agg_regression1(self):
grouped = self.tsframe.groupby([lambda x: x.year, lambda x: x.month])
result = grouped.agg(np.mean)
expected = grouped.mean()
assert_frame_equal(result, expected)
def test_agg_datetimes_mixed(self):
data = [[1, '2012-01-01', 1.0], [2, '2012-01-02', 2.0], [3, None, 3.0]]
df1 = DataFrame({'key': [x[0] for x in data],
'date': [x[1] for x in data],
'value': [x[2] for x in data]})
data = [[row[0], datetime.strptime(row[1], '%Y-%m-%d').date() if row[1]
else None, row[2]] for row in data]
df2 = DataFrame({'key': [x[0] for x in data],
'date': [x[1] for x in data],
'value': [x[2] for x in data]})
df1['weights'] = df1['value'] / df1['value'].sum()
gb1 = df1.groupby('date').aggregate(np.sum)
df2['weights'] = df1['value'] / df1['value'].sum()
gb2 = df2.groupby('date').aggregate(np.sum)
assert (len(gb1) == len(gb2))
def test_agg_period_index(self):
from pandas import period_range, PeriodIndex
prng = period_range('2012-1-1', freq='M', periods=3)
df = DataFrame(np.random.randn(3, 2), index=prng)
rs = df.groupby(level=0).sum()
assert isinstance(rs.index, PeriodIndex)
# GH 3579
index = period_range(start='1999-01', periods=5, freq='M')
s1 = Series(np.random.rand(len(index)), index=index)
s2 = Series(np.random.rand(len(index)), index=index)
series = [('s1', s1), ('s2', s2)]
df = DataFrame.from_items(series)
grouped = df.groupby(df.index.month)
list(grouped)
def test_agg_dict_parameter_cast_result_dtypes(self):
# GH 12821
df = DataFrame(
{'class': ['A', 'A', 'B', 'B', 'C', 'C', 'D', 'D'],
'time': date_range('1/1/2011', periods=8, freq='H')})
df.loc[[0, 1, 2, 5], 'time'] = None
# test for `first` function
exp = df.loc[[0, 3, 4, 6]].set_index('class')
grouped = df.groupby('class')
assert_frame_equal(grouped.first(), exp)
assert_frame_equal(grouped.agg('first'), exp)
assert_frame_equal(grouped.agg({'time': 'first'}), exp)
assert_series_equal(grouped.time.first(), exp['time'])
assert_series_equal(grouped.time.agg('first'), exp['time'])
# test for `last` function
exp = df.loc[[0, 3, 4, 7]].set_index('class')
grouped = df.groupby('class')
assert_frame_equal(grouped.last(), exp)
assert_frame_equal(grouped.agg('last'), exp)
assert_frame_equal(grouped.agg({'time': 'last'}), exp)
assert_series_equal(grouped.time.last(), exp['time'])
assert_series_equal(grouped.time.agg('last'), exp['time'])
# count
exp = pd.Series([2, 2, 2, 2],
index=Index(list('ABCD'), name='class'),
name='time')
assert_series_equal(grouped.time.agg(len), exp)
assert_series_equal(grouped.time.size(), exp)
exp = pd.Series([0, 1, 1, 2],
index=Index(list('ABCD'), name='class'),
name='time')
assert_series_equal(grouped.time.count(), exp)
def test_agg_cast_results_dtypes(self):
# similar to GH12821
# xref #11444
u = [datetime(2015, x + 1, 1) for x in range(12)]
v = list('aaabbbbbbccd')
df = pd.DataFrame({'X': v, 'Y': u})
result = df.groupby('X')['Y'].agg(len)
expected = df.groupby('X')['Y'].count()
assert_series_equal(result, expected)
def test_agg_must_agg(self):
grouped = self.df.groupby('A')['C']
pytest.raises(Exception, grouped.agg, lambda x: x.describe())
pytest.raises(Exception, grouped.agg, lambda x: x.index[:2])
def test_agg_ser_multi_key(self):
# TODO(wesm): unused
ser = self.df.C # noqa
f = lambda x: x.sum()
results = self.df.C.groupby([self.df.A, self.df.B]).aggregate(f)
expected = self.df.groupby(['A', 'B']).sum()['C']
assert_series_equal(results, expected)
def test_agg_apply_corner(self):
# nothing to group, all NA
grouped = self.ts.groupby(self.ts * np.nan)
assert self.ts.dtype == np.float64
# groupby float64 values results in Float64Index
exp = Series([], dtype=np.float64, index=pd.Index(
[], dtype=np.float64))
assert_series_equal(grouped.sum(), exp)
assert_series_equal(grouped.agg(np.sum), exp)
assert_series_equal(grouped.apply(np.sum), exp, check_index_type=False)
# DataFrame
grouped = self.tsframe.groupby(self.tsframe['A'] * np.nan)
exp_df = DataFrame(columns=self.tsframe.columns, dtype=float,
index=pd.Index([], dtype=np.float64))
assert_frame_equal(grouped.sum(), exp_df, check_names=False)
assert_frame_equal(grouped.agg(np.sum), exp_df, check_names=False)
assert_frame_equal(grouped.apply(np.sum), exp_df.iloc[:, :0],
check_names=False)
def test_agg_grouping_is_list_tuple(self):
from pandas.core.groupby import Grouping
df = tm.makeTimeDataFrame()
grouped = df.groupby(lambda x: x.year)
grouper = grouped.grouper.groupings[0].grouper
grouped.grouper.groupings[0] = Grouping(self.ts.index, list(grouper))
result = grouped.agg(np.mean)
expected = grouped.mean()
tm.assert_frame_equal(result, expected)
grouped.grouper.groupings[0] = Grouping(self.ts.index, tuple(grouper))
result = grouped.agg(np.mean)
expected = grouped.mean()
tm.assert_frame_equal(result, expected)
def test_aggregate_float64_no_int64(self):
# see gh-11199
df = DataFrame({"a": [1, 2, 3, 4, 5],
"b": [1, 2, 2, 4, 5],
"c": [1, 2, 3, 4, 5]})
expected = DataFrame({"a": [1, 2.5, 4, 5]},
index=[1, 2, 4, 5])
expected.index.name = "b"
result = df.groupby("b")[["a"]].mean()
tm.assert_frame_equal(result, expected)
expected = DataFrame({"a": [1, 2.5, 4, 5],
"c": [1, 2.5, 4, 5]},
index=[1, 2, 4, 5])
expected.index.name = "b"
result = df.groupby("b")[["a", "c"]].mean()
tm.assert_frame_equal(result, expected)
def test_aggregate_api_consistency(self):
# GH 9052
# make sure that the aggregates via dict
# are consistent
df = DataFrame({'A': ['foo', 'bar', 'foo', 'bar',
'foo', 'bar', 'foo', 'foo'],
'B': ['one', 'one', 'two', 'two',
'two', 'two', 'one', 'two'],
'C': np.random.randn(8) + 1.0,
'D': np.arange(8)})
grouped = df.groupby(['A', 'B'])
c_mean = grouped['C'].mean()
c_sum = grouped['C'].sum()
d_mean = grouped['D'].mean()
d_sum = grouped['D'].sum()
result = grouped['D'].agg(['sum', 'mean'])
expected = pd.concat([d_sum, d_mean],
axis=1)
expected.columns = ['sum', 'mean']
assert_frame_equal(result, expected, check_like=True)
result = grouped.agg([np.sum, np.mean])
expected = pd.concat([c_sum,
c_mean,
d_sum,
d_mean],
axis=1)
expected.columns = MultiIndex.from_product([['C', 'D'],
['sum', 'mean']])
assert_frame_equal(result, expected, check_like=True)
result = grouped[['D', 'C']].agg([np.sum, np.mean])
expected = pd.concat([d_sum,
d_mean,
c_sum,
c_mean],
axis=1)
expected.columns = MultiIndex.from_product([['D', 'C'],
['sum', 'mean']])
assert_frame_equal(result, expected, check_like=True)
result = grouped.agg({'C': 'mean', 'D': 'sum'})
expected = pd.concat([d_sum,
c_mean],
axis=1)
assert_frame_equal(result, expected, check_like=True)
result = grouped.agg({'C': ['mean', 'sum'],
'D': ['mean', 'sum']})
expected = pd.concat([c_mean,
c_sum,
d_mean,
d_sum],
axis=1)
expected.columns = MultiIndex.from_product([['C', 'D'],
['mean', 'sum']])
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
result = grouped[['D', 'C']].agg({'r': np.sum,
'r2': np.mean})
expected = pd.concat([d_sum,
c_sum,
d_mean,
c_mean],
axis=1)
expected.columns = MultiIndex.from_product([['r', 'r2'],
['D', 'C']])
assert_frame_equal(result, expected, check_like=True)
def test_agg_dict_renaming_deprecation(self):
# 15931
df = pd.DataFrame({'A': [1, 1, 1, 2, 2],
'B': range(5),
'C': range(5)})
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False) as w:
df.groupby('A').agg({'B': {'foo': ['sum', 'max']},
'C': {'bar': ['count', 'min']}})
assert "using a dict with renaming" in str(w[0].message)
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
df.groupby('A')[['B', 'C']].agg({'ma': 'max'})
with tm.assert_produces_warning(FutureWarning) as w:
df.groupby('A').B.agg({'foo': 'count'})
assert "using a dict on a Series for aggregation" in str(
w[0].message)
def test_agg_compat(self):
# GH 12334
df = DataFrame({'A': ['foo', 'bar', 'foo', 'bar',
'foo', 'bar', 'foo', 'foo'],
'B': ['one', 'one', 'two', 'two',
'two', 'two', 'one', 'two'],
'C': np.random.randn(8) + 1.0,
'D': np.arange(8)})
g = df.groupby(['A', 'B'])
expected = pd.concat([g['D'].sum(),
g['D'].std()],
axis=1)
expected.columns = MultiIndex.from_tuples([('C', 'sum'),
('C', 'std')])
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
result = g['D'].agg({'C': ['sum', 'std']})
assert_frame_equal(result, expected, check_like=True)
expected = pd.concat([g['D'].sum(),
g['D'].std()],
axis=1)
expected.columns = ['C', 'D']
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
result = g['D'].agg({'C': 'sum', 'D': 'std'})
assert_frame_equal(result, expected, check_like=True)
def test_agg_nested_dicts(self):
# API change for disallowing these types of nested dicts
df = DataFrame({'A': ['foo', 'bar', 'foo', 'bar',
'foo', 'bar', 'foo', 'foo'],
'B': ['one', 'one', 'two', 'two',
'two', 'two', 'one', 'two'],
'C': np.random.randn(8) + 1.0,
'D': np.arange(8)})
g = df.groupby(['A', 'B'])
def f():
g.aggregate({'r1': {'C': ['mean', 'sum']},
'r2': {'D': ['mean', 'sum']}})
pytest.raises(SpecificationError, f)
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
result = g.agg({'C': {'ra': ['mean', 'std']},
'D': {'rb': ['mean', 'std']}})
expected = pd.concat([g['C'].mean(), g['C'].std(), g['D'].mean(),
g['D'].std()], axis=1)
expected.columns = pd.MultiIndex.from_tuples([('ra', 'mean'), (
'ra', 'std'), ('rb', 'mean'), ('rb', 'std')])
assert_frame_equal(result, expected, check_like=True)
# same name as the original column
# GH9052
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
expected = g['D'].agg({'result1': np.sum, 'result2': np.mean})
expected = expected.rename(columns={'result1': 'D'})
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
result = g['D'].agg({'D': np.sum, 'result2': np.mean})
assert_frame_equal(result, expected, check_like=True)
def test_agg_python_multiindex(self):
grouped = self.mframe.groupby(['A', 'B'])
result = grouped.agg(np.mean)
expected = grouped.mean()
tm.assert_frame_equal(result, expected)
def test_aggregate_str_func(self):
def _check_results(grouped):
# single series
result = grouped['A'].agg('std')
expected = grouped['A'].std()
assert_series_equal(result, expected)
# group frame by function name
result = grouped.aggregate('var')
expected = grouped.var()
assert_frame_equal(result, expected)
# group frame by function dict
result = grouped.agg(OrderedDict([['A', 'var'], ['B', 'std'],
['C', 'mean'], ['D', 'sem']]))
expected = DataFrame(OrderedDict([['A', grouped['A'].var(
)], ['B', grouped['B'].std()], ['C', grouped['C'].mean()],
['D', grouped['D'].sem()]]))
assert_frame_equal(result, expected)
by_weekday = self.tsframe.groupby(lambda x: x.weekday())
_check_results(by_weekday)
by_mwkday = self.tsframe.groupby([lambda x: x.month,
lambda x: x.weekday()])
_check_results(by_mwkday)
def test_aggregate_item_by_item(self):
df = self.df.copy()
df['E'] = ['a'] * len(self.df)
grouped = self.df.groupby('A')
# API change in 0.11
# def aggfun(ser):
# return len(ser + 'a')
# result = grouped.agg(aggfun)
# assert len(result.columns) == 1
aggfun = lambda ser: ser.size
result = grouped.agg(aggfun)
foo = (self.df.A == 'foo').sum()
bar = (self.df.A == 'bar').sum()
K = len(result.columns)
# GH5782
# odd comparisons can result here, so cast to make easy
exp = pd.Series(np.array([foo] * K), index=list('BCD'),
dtype=np.float64, name='foo')
tm.assert_series_equal(result.xs('foo'), exp)
exp = pd.Series(np.array([bar] * K), index=list('BCD'),
dtype=np.float64, name='bar')
tm.assert_almost_equal(result.xs('bar'), exp)
def aggfun(ser):
return ser.size
result = DataFrame().groupby(self.df.A).agg(aggfun)
assert isinstance(result, DataFrame)
assert len(result) == 0
def test_agg_item_by_item_raise_typeerror(self):
from numpy.random import randint
df = DataFrame(randint(10, size=(20, 10)))
def raiseException(df):
pprint_thing('----------------------------------------')
pprint_thing(df.to_string())
raise TypeError
pytest.raises(TypeError, df.groupby(0).agg, raiseException)
def test_series_agg_multikey(self):
ts = tm.makeTimeSeries()
grouped = ts.groupby([lambda x: x.year, lambda x: x.month])
result = grouped.agg(np.sum)
expected = grouped.sum()
assert_series_equal(result, expected)
def test_series_agg_multi_pure_python(self):
data = DataFrame(
{'A': ['foo', 'foo', 'foo', 'foo', 'bar', 'bar', 'bar', 'bar',
'foo', 'foo', 'foo'],
'B': ['one', 'one', 'one', 'two', 'one', 'one', 'one', 'two',
'two', 'two', 'one'],
'C': ['dull', 'dull', 'shiny', 'dull', 'dull', 'shiny', 'shiny',
'dull', 'shiny', 'shiny', 'shiny'],
'D': np.random.randn(11),
'E': np.random.randn(11),
'F': np.random.randn(11)})
def bad(x):
assert (len(x.base) > 0)
return 'foo'
result = data.groupby(['A', 'B']).agg(bad)
expected = data.groupby(['A', 'B']).agg(lambda x: 'foo')
assert_frame_equal(result, expected)
def test_cythonized_aggers(self):
data = {'A': [0, 0, 0, 0, 1, 1, 1, 1, 1, 1., nan, nan],
'B': ['A', 'B'] * 6,
'C': np.random.randn(12)}
df = DataFrame(data)
df.loc[2:10:2, 'C'] = nan
def _testit(name):
op = lambda x: getattr(x, name)()
# single column
grouped = df.drop(['B'], axis=1).groupby('A')
exp = {}
for cat, group in grouped:
exp[cat] = op(group['C'])
exp = DataFrame({'C': exp})
exp.index.name = 'A'
result = op(grouped)
assert_frame_equal(result, exp)
# multiple columns
grouped = df.groupby(['A', 'B'])
expd = {}
for (cat1, cat2), group in grouped:
expd.setdefault(cat1, {})[cat2] = op(group['C'])
exp = DataFrame(expd).T.stack(dropna=False)
exp.index.names = ['A', 'B']
exp.name = 'C'
result = op(grouped)['C']
if name in ['sum', 'prod']:
assert_series_equal(result, exp)
_testit('count')
_testit('sum')
_testit('std')
_testit('var')
_testit('sem')
_testit('mean')
_testit('median')
_testit('prod')
_testit('min')
_testit('max')
def test_cython_agg_boolean(self):
frame = DataFrame({'a': np.random.randint(0, 5, 50),
'b': np.random.randint(0, 2, 50).astype('bool')})
result = frame.groupby('a')['b'].mean()
expected = frame.groupby('a')['b'].agg(np.mean)
assert_series_equal(result, expected)
def test_cython_agg_nothing_to_agg(self):
frame = DataFrame({'a': np.random.randint(0, 5, 50),
'b': ['foo', 'bar'] * 25})
pytest.raises(DataError, frame.groupby('a')['b'].mean)
frame = DataFrame({'a': np.random.randint(0, 5, 50),
'b': ['foo', 'bar'] * 25})
pytest.raises(DataError, frame[['b']].groupby(frame['a']).mean)
def test_cython_agg_nothing_to_agg_with_dates(self):
frame = DataFrame({'a': np.random.randint(0, 5, 50),
'b': ['foo', 'bar'] * 25,
'dates': pd.date_range('now', periods=50,
freq='T')})
with tm.assert_raises_regex(DataError,
"No numeric types to aggregate"):
frame.groupby('b').dates.mean()
def test_cython_agg_frame_columns(self):
# #2113
df = DataFrame({'x': [1, 2, 3], 'y': [3, 4, 5]})
df.groupby(level=0, axis='columns').mean()
df.groupby(level=0, axis='columns').mean()
df.groupby(level=0, axis='columns').mean()
df.groupby(level=0, axis='columns').mean()
def test_cython_agg_return_dict(self):
# GH 16741
ts = self.df.groupby('A')['B'].agg(
lambda x: x.value_counts().to_dict())
expected = Series([{'two': 1, 'one': 1, 'three': 1},
{'two': 2, 'one': 2, 'three': 1}],
index= | Index(['bar', 'foo'], name='A') | pandas.Index |
import os
import pandas as pd
import module_debug
import importlib as implib
from typing import List, Dict, Tuple, Union
import module_dataset_analysis; implib.reload(module_dataset_analysis)
import module_io; implib.reload(module_io)
p = print
#____________________________________________________________________________________________________________________________________
def rename_hrv(df: pd.DataFrame
) -> pd.DataFrame:
"""
:param df: df with standardized glyco features
:return dataframe with renamed columns
"""
# get column names
column_names = df.columns.to_list()
# rename columns
for i in range(len(column_names)):
column_names[i] = column_names[i].replace('(‰)', '')\
.replace(' Prima', '-1').replace(' Secunda', '-2').replace(' Tertia', '-3')
# assign column names
df.columns = column_names
return df
#____________________________________________________________________________________________________________________________________
def read_test_ecg_lt3c(source_dir: str,
intervals: List[str],
drop_frequency_domain: bool = True,
stop_after: Union[int, float] = float('inf'),
write_dir: str = None) -> Union[Tuple[dict, pd.DataFrame], None]:
"""
NOTE: to be refactored to work with all datasets
Parse files downloaded from test.ecg
:param source_dir: dir of test.ecg files
:param intervals: the intervals for which to parse the files, with corresponding time unit. ex: ['1h', '2h, ... '24h']
:param drop_frequency_domain: if True, don't consider frequency domain parameters
:param stop_after: stop after certain amount of patients if debugging or some other rechecking is needed
:param write_dir: write directory
:return: if no write directory is specified, the parsed dataframes are returned together with the samples and patients info
"""
# initialize paths and files
source = source_dir
patients_classification_path = 'C:\\Users\\ilija\\Pycharm Projects\\GLYCO\\DATA\\Classification_of_Patients\\NewClasses__ND_GD_BD.csv'
patients_classification = pd.read_csv(patients_classification_path)
patients_hba1c_path = 'C:\\Users\\ilija\\Pycharm Projects\\GLYCO\\DATA\\Clinical_Records\\PatientID_Hba1C_06102020.csv'
patients_hba1c = | pd.read_csv(patients_hba1c_path) | pandas.read_csv |
# -*- coding: utf-8 -*-
"""omin.intermine_tools
Provides
--------
Tools for querying the intermine database.
Tools for investigating with fasta and uniprot.
"""
# Copyright 2018 <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, and <NAME>.
# ==========
# TO DO LIST
# ==========
# FIXME: Make all of these functions verbose.
# FIXME: ID the brittle functions.
# FIXME: Extent to other intermine APIs.
# ----------------
# EXTERNAL IMPORTS
# ----------------
import re
import pandas as pd
import numpy as np
from urllib.error import HTTPError
from urllib.request import urlopen
# Try to import the intermine package.
try:
from intermine.webservice import Service
except ImportError as err:
print("Cannot import Intermine:", err)
# ==============
# INTERMINETOOLS
# ==============
class IntermineTools(object):
@staticmethod
def mousemine_accession_lookup(accession, verbose=False):
"""Return an intermine query object for a given protien accession number.
The query is design to find Entrez gene IDs for proetiens of a given accession
number. It only considers the cannonical isoform of whole mouse proteins(not fragments)
presnt in the Swiss-Prot or TrEMBL databases.
Parameters
----------
accession: str
Accession numbers in the form: XXXXXX-N, will be shortented to: XXXXXX.
verbose: bool
Defaults to False set to True for printed error messages.
Returns
-------
result: (:obj)
Intermine query object.
See Also
--------
mousemine_query_format
mousemine_accession_lookup_reduce
mousemine_accession_to_entrez
"""
query = None
# Remove the isoform number if present.
if "-" in accession:
accession = accession.split('-')[0]
try:
# Begin intermine call.
service = Service("http://www.mousemine.org/mousemine/service")
query = service.new_query("Gene")
query.add_view("primaryIdentifier",
"ncbiGeneNumber",
"proteins.primaryAccession",
"symbol",
"proteins.synonyms.value",
"proteins.length",
"proteins.isFragment",
"proteins.dataSets.name")
# Declare sort order of results.
query.add_sort_order("Gene.symbol", "ASC")
query.add_sort_order("Gene.proteins.dataSets.name", "ASC")
query.add_sort_order("Gene.proteins.length", "DESC")
# Declare constraints.
query.add_constraint("organism.taxonId", "=", "10090", code="B")
query.add_constraint("proteins.isFragment", "=", "False", code="C")
query.add_constraint("proteins.dataSets.name",
"ONE OF",
["Swiss-Prot data set", "TrEMBL data set"],
code="D")
# Main constraint: find all with accession number X.
query.add_constraint("Gene", "LOOKUP", accession, code="A")
return query
except Exception as err:
if verbose:
print(err)
return query
@staticmethod
def mousemine_query_format(query, format_list=None, verbose=False):
"""Return a DataFrame from an intermine query object.
Parameters
----------
query: (:obj)
Intermine query object.
format_list: list
List of information to include in the result.
Returns
-------
result: pandas.DataFrame
With columns labels based on format list.
"""
result = None
if format_list is None:
format_list = ["primaryIdentifier", "ncbiGeneNumber", "proteins.primaryAccession",
"symbol", "proteins.synonyms.value", "proteins.isFragment"]
# Remove the dots from the terms in format for DataFrame safety.
column_labels = list(map(lambda x: x.replace('.', ''), format_list))
result = list()
try:
for row in query.rows():
row_out = [row[j] for j in format_list]
result.append(row_out)
result = | pd.DataFrame(result) | pandas.DataFrame |
import unittest
import os
from collections import defaultdict
from unittest import mock
import warnings
import pandas as pd
import numpy as np
from dataprofiler.profilers import FloatColumn
from dataprofiler.profilers.profiler_options import FloatOptions
test_root_path = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
class TestFloatColumn(unittest.TestCase):
def test_base_case(self):
data = pd.Series([], dtype=object)
profiler = FloatColumn(data.name)
profiler.update(data)
self.assertEqual(profiler.match_count, 0)
self.assertEqual(profiler.min, None)
self.assertEqual(profiler.max, None)
self.assertEqual(profiler.sum, 0)
self.assertEqual(profiler.mean, 0)
self.assertTrue(profiler.median is np.nan)
self.assertEqual([np.nan], profiler.mode)
self.assertTrue(profiler.variance is np.nan)
self.assertTrue(profiler.skewness is np.nan)
self.assertTrue(profiler.kurtosis is np.nan)
self.assertTrue(profiler.stddev is np.nan)
self.assertIsNone(profiler.histogram_selection)
self.assertEqual(len(profiler.quantiles), 999)
self.assertIsNone(profiler.data_type_ratio)
def test_single_data_variance_case(self):
data = pd.Series([1.5]).apply(str)
profiler = FloatColumn(data.name)
profiler.update(data)
self.assertEqual(profiler.match_count, 1.0)
self.assertEqual(profiler.mean, 1.5)
self.assertTrue(profiler.variance is np.nan)
data = pd.Series([2.5]).apply(str)
profiler.update(data)
self.assertEqual(profiler.match_count, 2)
self.assertEqual(profiler.mean, 2.0)
self.assertEqual(profiler.variance, 0.5)
def test_profiled_precision(self):
"""
Checks whether the precision for the profiler is correct.
:return:
"""
df_1 = pd.Series([0.4, 0.3, 0.1, 0.1, 0.1]).apply(str)
df_2 = pd.Series([0.11, 0.11, 0.12, 2.11]).apply(str)
df_3 = pd.Series([4.114, 3.161, 2.512, 2.131]).apply(str)
df_mix = pd.Series([4.1, '3.', 2.52, 2.13143]).apply(str)
float_profiler = FloatColumn("Name")
float_profiler.update(df_3)
self.assertEqual(4, float_profiler.precision['min'])
self.assertEqual(4, float_profiler.precision['max'])
float_profiler.update(df_2)
self.assertEqual(2, float_profiler.precision['min'])
self.assertEqual(4, float_profiler.precision['max'])
float_profiler.update(df_1)
self.assertEqual(1, float_profiler.precision['min'])
self.assertEqual(4, float_profiler.precision['max'])
float_profiler = FloatColumn("Name")
float_profiler.update(df_mix)
self.assertEqual(1, float_profiler.precision['min'])
self.assertEqual(6, float_profiler.precision['max'])
# edge cases #
# integer with 0s on right and left side
df_ints = pd.Series(['0013245678', '123456700', '0012345600'])
float_profiler = FloatColumn("Name")
float_profiler.update(df_ints)
self.assertEqual(6, float_profiler.precision['min'])
self.assertEqual(8, float_profiler.precision['max'])
# scientific
df_scientific = pd.Series(['1.23e-3', '2.2344', '1.244e4'])
float_profiler = FloatColumn("Name")
float_profiler.update(df_scientific)
self.assertEqual(3, float_profiler.precision['min'])
self.assertEqual(5, float_profiler.precision['max'])
# plus
df_plus = pd.Series(['+1.3e-3', '+2.244', '+1.3324e4'])
float_profiler = FloatColumn("Name")
float_profiler.update(df_plus)
self.assertEqual(2, float_profiler.precision['min'])
self.assertEqual(5, float_profiler.precision['max'])
# minus
df_minus = pd.Series(['-1.3234e-3', '-0.244', '-1.3324e4'])
float_profiler = FloatColumn("Name")
float_profiler.update(df_minus)
self.assertEqual(3, float_profiler.precision['min'])
self.assertEqual(5, float_profiler.precision['max'])
# spaces around values
df_spaces = pd.Series([' -1.3234e-3 ', ' -0.244 '])
float_profiler = FloatColumn("Name")
float_profiler.update(df_spaces)
self.assertEqual(3, float_profiler.precision['min'])
self.assertEqual(5, float_profiler.precision['max'])
# constant precision
df_constant = pd.Series(['1.34', '+1.23e-4', '00101',
'+100.', '0.234', '-432', '.954',
'+.342', '-123e1', '23.1'])
float_profiler = FloatColumn("Name")
float_profiler.update(df_constant)
self.assertEqual(3, float_profiler.precision['min'])
self.assertEqual(3, float_profiler.precision['max'])
self.assertEqual(3, float_profiler.precision['mean'])
self.assertEqual(10, float_profiler.precision['sample_size'])
self.assertEqual(0, float_profiler.precision['var'])
self.assertEqual(0, float_profiler.precision['std'])
# random precision
df_random = pd.Series(['+ 9', '-.3', '-1e-3', '3.2343', '0',
'1230', '0.33', '4.3', '302.1', '-4.322'])
float_profiler = FloatColumn("Name")
float_profiler.update(df_random)
self.assertEqual(0, float_profiler.precision['min'])
self.assertEqual(5, float_profiler.precision['max'])
self.assertEqual(2.4444, float_profiler.precision['mean'])
self.assertEqual(9, float_profiler.precision['sample_size'])
self.assertEqual(2.7778, float_profiler.precision['var'])
self.assertEqual(1.6667, float_profiler.precision['std'])
# Ensure order doesn't change anything
df_random_order = pd.Series(['1230', '0.33', '4.3', '302.1', '-4.322',
'+ 9', '-.3', '-1e-3', '3.2343', '0'])
float_profiler_order = FloatColumn("Name")
float_profiler_order.update(df_random)
self.assertDictEqual(
float_profiler.precision, float_profiler_order.precision
)
# check to make sure all formats of precision are correctly predicted
samples = [
# value, min expected precision
['10.01', 4],
['.01', 1],
['0.01', 1],
['-0.01', 1],
['+0.01', 1],
[' +0.013', 2],
[' -1.3234e-3 ', 5],
[' 0012345600 ', 6],
[' 0012345600. ', 8],
[' -0012345600. ', 8],
]
for sample in samples:
df_series = pd.Series([sample[0]])
min_expected_precision = sample[1]
precision = FloatColumn._get_float_precision(df_series)
self.assertEqual(min_expected_precision, precision['min'],
msg='Errored for: {}'.format(sample[0]))
def test_profiled_min(self):
# test with multiple values
data = np.linspace(-5, 5, 11)
df = pd.Series(data).apply(str)
profiler = FloatColumn(df.name)
profiler.update(df[1:])
self.assertEqual(profiler.min, -4)
profiler.update(df)
self.assertEqual(profiler.min, -5)
profiler.update(pd.Series(['-4']))
self.assertEqual(profiler.min, -5)
# empty data
data = pd.Series([], dtype=object)
profiler = FloatColumn(data.name)
profiler.update(data)
self.assertEqual(profiler.min, None)
# data with None value
df = pd.Series([2.0, 3.0, None, np.nan]).apply(str)
profiler = FloatColumn(df.name)
profiler.update(df)
self.assertEqual(profiler.min, 2.0)
# data with one value
df = pd.Series([2.0]).apply(str)
profiler = FloatColumn(df.name)
profiler.update(df)
self.assertEqual(profiler.min, 2.0)
# data with unique value
df = pd.Series([2.0, 2.0, 2.0, 2.0, 2.0]).apply(str)
profiler = FloatColumn(df.name)
profiler.update(df)
self.assertEqual(profiler.min, 2.0)
# data with unique value as zero
df = pd.Series([0.0, 0.0, 0.0, 0.0, 0.0]).apply(str)
profiler = FloatColumn(df.name)
profiler.update(df)
self.assertEqual(profiler.min, 0.0)
def test_profiled_max(self):
data = np.linspace(-5, 5, 11)
df = pd.Series(data).apply(str)
profiler = FloatColumn(df.name)
profiler.update(df[:-1])
self.assertEqual(profiler.max, 4)
profiler.update(df)
self.assertEqual(profiler.max, 5)
profiler.update(pd.Series(['4']))
self.assertEqual(profiler.max, 5)
# empty data
data = pd.Series([], dtype=object)
profiler = FloatColumn(data.name)
profiler.update(data)
self.assertEqual(profiler.max, None)
# data with None value
df = pd.Series([2.0, 3.0, None, np.nan]).apply(str)
profiler = FloatColumn(df.name)
profiler.update(df)
self.assertEqual(profiler.max, 3.0)
# data with one value
df = pd.Series([2.0]).apply(str)
profiler = FloatColumn(df.name)
profiler.update(df)
self.assertEqual(profiler.max, 2.0)
# data with unique value
df = pd.Series([2.0, 2.0, 2.0, 2.0, 2.0]).apply(str)
profiler = FloatColumn(df.name)
profiler.update(df)
self.assertEqual(profiler.max, 2.0)
# data with unique value as zero
df = pd.Series([0.0, 0.0, 0.0, 0.0, 0.0]).apply(str)
profiler = FloatColumn(df.name)
profiler.update(df)
self.assertEqual(profiler.max, 0.0)
def test_profiled_mode(self):
# disabled mode
df = pd.Series([1, 1, 1, 1, 1, 1, 1]).apply(str)
options = FloatOptions()
options.mode.is_enabled = False
profiler = FloatColumn(df.name, options)
profiler.update(df)
self.assertListEqual([np.nan], profiler.mode)
# same values
df = pd.Series([1, 1, 1, 1, 1, 1, 1]).apply(str)
profiler = FloatColumn(df.name)
profiler.update(df)
self.assertListEqual([1], profiler.mode)
# multiple modes
df = pd.Series([1.5, 1.5, 2.5, 2.5, 3.5, 3.5, 4.1, 4.1]).apply(str)
profiler = FloatColumn(df.name)
profiler.update(df)
np.testing.assert_array_almost_equal([1.5, 2.5, 3.5, 4.1], profiler.mode,
decimal=2)
# with different values
df = pd.Series([1.25, 1.25, 1.25, 1.25, 2.9]).apply(str)
profiler = FloatColumn(df.name)
profiler.update(df)
np.testing.assert_array_almost_equal([1.25], profiler.mode, decimal=2)
# with negative values
df = pd.Series([-1.1, 1.9, 1.9, 1.9, 2.1, 2.01, 2.01, 2.01]).apply(str)
profiler = FloatColumn(df.name)
profiler.update(df)
np.testing.assert_array_almost_equal([1.9, 2.01], profiler.mode,
decimal=2)
# all unique values
df = pd.Series([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]).apply(str)
profiler = FloatColumn(df.name)
profiler.update(df)
# By default, returns 5 of the possible modes
np.testing.assert_array_almost_equal([1, 2, 3, 4, 5],
profiler.mode, decimal=2)
# Edge case where mode appears later in the dataset
df = pd.Series([1, 2, 3, 4, 5, 6.2, 6.2]).apply(str)
profiler = FloatColumn(df.name)
profiler.update(df)
np.testing.assert_array_almost_equal([6.2], profiler.mode, decimal=2)
df = pd.Series([2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7.1, 7.1, 7.1]).apply(str)
profiler = FloatColumn(df.name)
profiler.update(df)
np.testing.assert_array_almost_equal([7.1], profiler.mode, decimal=2)
def test_top_k_modes(self):
# Default options
options = FloatOptions()
df = pd.Series([1, 1, 2, 2, 3, 3, 4, 4, 5, 5]).apply(str)
profiler = FloatColumn(df.name, options)
profiler.update(df)
self.assertEqual(5, len(profiler.mode))
# Test if top_k_modes is less than the number of modes
options = FloatOptions()
options.mode.top_k_modes = 2
df = pd.Series([1, 1, 2, 2, 3, 3, 4, 4, 5, 5]).apply(str)
profiler = FloatColumn(df.name, options)
profiler.update(df)
self.assertEqual(2, len(profiler.mode))
# Test if top_k_mode is greater than the number of modes
options = FloatOptions()
options.mode.top_k_modes = 8
df = pd.Series([1, 1, 2, 2, 3, 3, 4, 4, 5, 5]).apply(str)
profiler = FloatColumn(df.name, options)
profiler.update(df)
# Only 5 possible modes so return 5
self.assertEqual(5, len(profiler.mode))
def test_profiled_median(self):
# disabled median
df = pd.Series([1, 1, 1, 1, 1, 1, 1]).apply(str)
options = FloatOptions()
options.median.is_enabled = False
profiler = FloatColumn(df.name, options)
profiler.update(df)
self.assertTrue(profiler.median is np.nan)
# same values
df = pd.Series([1, 1, 1, 1, 1, 1, 1]).apply(str)
profiler = FloatColumn(df.name)
profiler.update(df)
self.assertEqual(1, profiler.median)
# median lies between two values (2.5 and 3.5)
df = pd.Series([1.5, 1.5, 2.5, 2.5, 3.5, 3.5, 4.1, 4.1]).apply(str)
profiler = FloatColumn(df.name)
profiler.update(df)
self.assertAlmostEqual(3, profiler.median, places=2)
# with different values
df = pd.Series([1.25, 1.25, 1.25, 1.25, 2.9]).apply(str)
profiler = FloatColumn(df.name)
profiler.update(df)
self.assertAlmostEqual(1.25, profiler.median, places=2)
# with negative values, median lies in between values
df = pd.Series([-1.1, 1.9, 1.9, 1.9, 2.1, 2.1, 2.1, 2.1]).apply(str)
profiler = FloatColumn(df.name)
profiler.update(df)
self.assertAlmostEqual(2, profiler.median, places=2)
# all unique values
df = pd.Series([1, 2, 3, 4, 5, 6, 7, 8, 9]).apply(str)
profiler = FloatColumn(df.name)
profiler.update(df)
self.assertAlmostEqual(5, profiler.median, places=2)
def test_profiled_mean_and_variance(self):
"""
Checks the mean and variance of profiled numerical columns.
:return:
"""
def mean(df):
total = 0
for item in df:
total += item
return total / len(df)
def var(df):
var = 0
mean_df = mean(df)
for item in df:
var += (item - mean_df) ** 2
return var / (len(df) - 1)
def batch_variance(mean_a, var_a, count_a, mean_b, var_b, count_b):
delta = mean_b - mean_a
m_a = var_a * (count_a - 1)
m_b = var_b * (count_b - 1)
M2 = m_a + m_b + delta ** 2 * count_a * count_b / (
count_a + count_b)
return M2 / (count_a + count_b - 1)
data = np.linspace(-5, 5, 11).tolist()
df1 = pd.Series(data)
data = np.linspace(-3, 2, 11).tolist()
df2 = pd.Series(data)
data = np.full((10,), 1)
df3 = pd.Series(data)
num_profiler = FloatColumn(df1.name)
num_profiler.update(df1.apply(str))
self.assertEqual(mean(df1), num_profiler.mean)
self.assertEqual(var(df1), num_profiler.variance)
self.assertEqual(np.sqrt(var(df1)), num_profiler.stddev)
variance = batch_variance(
mean_a=num_profiler.mean, var_a=num_profiler.variance,
count_a=num_profiler.match_count,
mean_b=mean(df2), var_b=var(df2), count_b=df2.count()
)
num_profiler.update(df2.apply(str))
df = pd.concat([df1, df2])
self.assertEqual(mean(df), num_profiler.mean)
self.assertEqual(variance, num_profiler.variance)
self.assertEqual(np.sqrt(variance), num_profiler.stddev)
variance = batch_variance(
mean_a=num_profiler.mean, var_a=num_profiler.variance,
count_a=num_profiler.match_count,
mean_b=mean(df3), var_b=var(df3), count_b=df3.count()
)
num_profiler.update(df3.apply(str))
df = pd.concat([df1, df2, df3])
self.assertEqual(mean(df), num_profiler.mean)
self.assertEqual(variance, num_profiler.variance)
self.assertEqual(np.sqrt(variance), num_profiler.stddev)
def test_profiled_skewness(self):
data = np.linspace(-5, 5, 11).tolist()
df1 = pd.Series(data)
data = np.linspace(-3, 2, 11).tolist()
df2 = pd.Series(data)
data = np.full((10,), 1)
df3 = pd.Series(data)
num_profiler = FloatColumn(df1.name)
num_profiler.update(df1.apply(str))
self.assertEqual(0, num_profiler.skewness)
num_profiler.update(df2.apply(str))
self.assertAlmostEqual(np.sqrt(22 * 21) / 20 * 133 / 750, num_profiler.skewness)
num_profiler.update(df3.apply(str))
self.assertAlmostEqual(-0.3109967, num_profiler.skewness)
def test_profiled_kurtosis(self):
data = np.linspace(-5, 5, 11).tolist()
df1 = pd.Series(data)
data = np.linspace(-3, 2, 11).tolist()
df2 = pd.Series(data)
data = np.full((10,), 1)
df3 = pd.Series(data)
num_profiler = FloatColumn(df1.name)
num_profiler.update(df1.apply(str))
self.assertAlmostEqual(-6 / 5, num_profiler.kurtosis)
num_profiler.update(df2.apply(str))
self.assertAlmostEqual(-0.390358, num_profiler.kurtosis)
num_profiler.update(df3.apply(str))
self.assertAlmostEqual(0.3311739, num_profiler.kurtosis)
def test_bias_correction_option(self):
# df1 = [-5, -4, ..., 3, 4, 5]
data = np.linspace(-5, 5, 11).tolist()
df1 = pd.Series(data)
# df2 = [-3, -2.5, -2, ..., 1.5, 2]
data = np.linspace(-3, 2, 11).tolist()
df2 = pd.Series(data)
# df3 = [1, 1, ... , 1] (ten '1's)
data = np.full((10,), 1)
df3 = pd.Series(data)
# Disable bias correction
options = FloatOptions(); options.bias_correction.is_enabled = False
num_profiler = FloatColumn(df1.name, options=options)
num_profiler.update(df1.apply(str))
# Test biased values of variance, skewness, kurtosis
self.assertAlmostEqual(10, num_profiler.variance)
self.assertAlmostEqual(0, num_profiler.skewness)
self.assertAlmostEqual(89/50 - 3, num_profiler.kurtosis)
df2_ints = df2[df2 == df2.round()]
num_profiler.update(df2.apply(str))
df = pd.concat([df1, df2_ints])
self.assertAlmostEqual(6.3125, num_profiler.variance)
self.assertAlmostEqual(0.17733336, num_profiler.skewness)
self.assertAlmostEqual(-0.56798353, num_profiler.kurtosis)
df3_ints = df3[df3 == df3.round()]
num_profiler.update(df3.apply(str))
df = pd.concat([df1, df2_ints, df3_ints])
self.assertAlmostEqual(4.6755371, num_profiler.variance)
self.assertAlmostEqual(-0.29622465, num_profiler.skewness)
self.assertAlmostEqual(0.099825352, num_profiler.kurtosis)
def test_bias_correction_merge(self):
data = np.linspace(-5, 5, 11).tolist()
df1 = pd.Series(data)
data = np.linspace(-3, 2, 11).tolist()
df2 = pd.Series(data)
data = np.full((10,), 1)
df3 = pd.Series(data)
# Disable bias correction
options = FloatOptions();
options.bias_correction.is_enabled = False
num_profiler1 = FloatColumn(df1.name, options=options)
num_profiler1.update(df1.apply(str))
self.assertAlmostEqual(10, num_profiler1.variance)
self.assertAlmostEqual(0, num_profiler1.skewness)
self.assertAlmostEqual(89 / 50 - 3, num_profiler1.kurtosis)
num_profiler2 = FloatColumn(df2.name)
num_profiler2.update(df2.apply(str))
num_profiler = num_profiler1 + num_profiler2
self.assertFalse(num_profiler.bias_correction)
self.assertAlmostEqual(6.3125, num_profiler.variance)
self.assertAlmostEqual(0.17733336, num_profiler.skewness)
self.assertAlmostEqual(-0.56798353, num_profiler.kurtosis)
num_profiler3 = FloatColumn(df3.name)
num_profiler3.update(df3.apply(str))
num_profiler = num_profiler1 + num_profiler2 + num_profiler3
self.assertFalse(num_profiler.bias_correction)
self.assertAlmostEqual(4.6755371, num_profiler.variance)
self.assertAlmostEqual(-0.29622465, num_profiler.skewness)
self.assertAlmostEqual(0.099825352, num_profiler.kurtosis)
def test_null_values_for_histogram(self):
data = pd.Series(['-inf', 'inf'])
profiler = FloatColumn(data.name)
profiler.update(data)
profile = profiler.profile
histogram = profile['histogram']
self.assertEqual(histogram['bin_counts'], None)
self.assertEqual(histogram['bin_edges'], None)
data = pd.Series(['-2', '-1', '1', '2', '-inf', 'inf'])
profiler = FloatColumn(data.name)
profiler.update(data)
profile = profiler.profile
histogram = profile['histogram']
expected_histogram = {
'bin_counts': np.array([1, 1, 0, 2]),
'bin_edges': np.array([-2., -1., 0., 1., 2.]),
}
self.assertEqual(expected_histogram['bin_counts'].tolist(),
histogram['bin_counts'].tolist())
self.assertCountEqual(expected_histogram['bin_edges'],
histogram['bin_edges'])
def test_profiled_histogram(self):
"""
Checks the histogram of profiled numerical columns.
:return:
"""
list_data_test = []
# this data has 4 bins, range of 3
# with equal bin size, each bin has the width of 0.75
df1 = pd.Series(["1.0", "2.0", "3.0", "4.0"])
expected_histogram1 = {
'bin_counts': np.array([1, 1, 1, 1]),
'bin_edges': np.array([1.0, 1.75, 2.5, 3.25, 4.0]),
}
list_data_test.append([df1, expected_histogram1])
# this data has 4 bins, range of 12
# with equal bin size, each bin has the width of 3.0
df2 = pd.Series(["1.0", "5.0", "8.0", "13.0"])
expected_histogram2 = {
'bin_counts': np.array([1, 1, 1, 1]),
'bin_edges': np.array([1.0, 4.0, 7.0, 10.0, 13.0]),
}
list_data_test.append([df2, expected_histogram2])
# this data has 3 bins, range of 3
# with equal bin size, each bin has the width of 1
df3 = pd.Series(["1.0", "1.0", "3.0", "4.0"])
expected_histogram3 = {
'bin_counts': np.array([2, 0, 1, 1]),
'bin_edges': np.array([1.0, 1.75, 2.5, 3.25, 4.0]),
}
list_data_test.append([df3, expected_histogram3])
# this data has only one unique value, not overflow
df4 = pd.Series([-10.0, -10.0, -10.0]).apply(str)
expected_histogram4 = {
'bin_counts': np.array([3]),
'bin_edges': np.array([-10.0, -10.0]),
}
list_data_test.append([df4, expected_histogram4])
# this data has only one unique value, overflow
df5 = pd.Series([-10.0 ** 20]).apply(str)
expected_histogram5 = {
'bin_counts': np.array([1]),
'bin_edges': np.array([-10.0 ** 20, -10.0 ** 20]),
}
list_data_test.append([df5, expected_histogram5])
for i, (df, expected_histogram) in enumerate(list_data_test):
profiler = FloatColumn(df.name)
profiler.update(df)
profile = profiler.profile
histogram = profile['histogram']
self.assertEqual(expected_histogram['bin_counts'].tolist(),
histogram['bin_counts'].tolist())
if i != 4:
self.assertCountEqual(np.round(expected_histogram['bin_edges'], 12),
np.round(histogram['bin_edges'], 12))
else: # for overflow, dont use np.round
self.assertCountEqual(expected_histogram['bin_edges'],
histogram['bin_edges'])
def test_profile_histogram_w_updates(self):
"""
Checks if histogram properly resets the _profiled histogram after
merge or update.
:return:
"""
list_data_test = []
# this data has 4 bins, range of 3
# with equal bin size, each bin has the width of 0.75
df1 = pd.Series(["1.0", "2.0", "3.0", "4.0"])
expected_histogram1 = {
'bin_counts': np.array([1, 1, 1, 1]),
'bin_edges': np.array([1.0, 1.75, 2.5, 3.25, 4.0]),
}
list_data_test.append([df1, expected_histogram1])
# this data will be the second update of the profile.
# this results in the combination of the previous data and this data.
# the range should update to 12 from 3.
df2 = pd.Series(["1.0", "5.0", "8.0", "13.0"])
expected_histogram2 = {
'bin_counts': np.array([4, 1, 1, 1, 0, 1]),
'bin_edges': np.array([1.0, 3.0, 5.0, 7.0, 9.0, 11.0, 13.0]),
}
list_data_test.append([df2, expected_histogram2])
profiler = FloatColumn("test")
for i, (df, expected_histogram) in enumerate(list_data_test):
profiler.update(df)
self.assertIsNone(profiler.histogram_selection)
profile = profiler.profile
self.assertIsNotNone(profiler.histogram_selection)
histogram = profile['histogram']
self.assertEqual(expected_histogram['bin_counts'].tolist(),
histogram['bin_counts'].tolist())
self.assertCountEqual(np.round(expected_histogram['bin_edges'], 12),
np.round(histogram['bin_edges'], 12))
# apply test to merging profiles
expected_histogram = {
'bin_edges': np.array([1., 19/7, 31/7, 43/7, 55/7, 67/7, 79/7,
13.]),
'bin_counts': np.array([6, 4, 2, 0, 2, 0, 2])
}
merged_profiler = profiler + profiler
self.assertIsNone(merged_profiler.histogram_selection)
profile = merged_profiler.profile
self.assertIsNotNone(merged_profiler.histogram_selection)
histogram = profile['histogram']
self.assertEqual(expected_histogram['bin_counts'].tolist(),
histogram['bin_counts'].tolist())
self.assertCountEqual(np.round(expected_histogram['bin_edges'], 12),
np.round(histogram['bin_edges'], 12))
def test_histogram_with_varying_number_of_bin(self):
"""
Checks the histogram with large number of bins
"""
# this data use number of bins less than the max limit
df1 = pd.Series([1, 2, 3, 4]).apply(str)
profiler1 = FloatColumn(df1.name)
profiler1.max_histogram_bin = 50
profiler1.update(df1)
num_bins = len(profiler1.profile['histogram']['bin_counts'])
self.assertEqual(4, num_bins)
# this data uses large number of bins, which will be set to
# the max limit
df2 = pd.Series([3.195103249264023e+18, 9999995.0, 9999999.0,
0.0, -10 ** 10]).apply(str)
profiler2 = FloatColumn(df2.name)
profiler2.max_histogram_bin = 50
profiler2.update(df2)
num_bins = len(profiler2.profile['histogram']['bin_counts'])
self.assertEqual(50, num_bins)
# max number of bin is increased to 10000
profiler2 = FloatColumn(df2.name)
profiler2.max_histogram_bin = 10000
profiler2.update(df2)
num_bins = len(profiler2.profile['histogram']['bin_counts'])
self.assertEqual(10000, num_bins)
def test_estimate_stats_from_histogram(self):
data = pd.Series([], dtype=object)
profiler = FloatColumn(data.name)
profiler.update(data)
profiler._stored_histogram['histogram']['bin_counts'] = \
np.array([1, 2, 1])
profiler._stored_histogram['histogram']['bin_edges'] = \
np.array([1.0, 3.0, 5.0, 7.0])
expected_mean = (2.0 * 1 + 4.0 * 2 + 6.0 * 1) / 4
expected_var = (1 * (2.0 - expected_mean) ** 2
+ 2 * (4.0 - expected_mean) ** 2
+ 1 * (6.0 - expected_mean) ** 2) / 4
expected_std = np.sqrt(expected_var)
est_var = profiler._estimate_stats_from_histogram()
self.assertEqual(expected_var, est_var)
def test_total_histogram_bin_variance(self):
data = pd.Series([], dtype=object)
profiler = FloatColumn(data.name)
profiler.update(data)
profiler._stored_histogram['histogram']['bin_counts'] = \
np.array([3, 2, 1])
profiler._stored_histogram['histogram']['bin_edges'] = \
np.array([1.0, 3.0, 5.0, 7.0])
input_array = np.array([1.1, 1.5, 2.3, 3.5, 4.0, 6.5])
expected_total_var = np.array([1.1, 1.5, 2.3]).var() \
+ np.array([3.5, 4.0]).var() \
+ np.array([6.5]).var()
est_total_var = profiler._total_histogram_bin_variance(input_array)
self.assertEqual(expected_total_var, est_total_var)
def test_histogram_loss(self):
# run time is small
diff_var, avg_diffvar, total_var, avg_totalvar, run_time, avg_runtime =\
0.3, 0.2, 0.1, 0.05, 0.0014, 0.0022
expected_loss = 0.1 / 0.2 + 0.05 / 0.05
est_loss = FloatColumn._histogram_loss(
diff_var, avg_diffvar, total_var, avg_totalvar, run_time,
avg_runtime)
self.assertEqual(expected_loss, est_loss)
# run time is big
diff_var, avg_diffvar, total_var, avg_totalvar, run_time, avg_runtime =\
0.3, 0.2, 0.1, 0.05, 22, 14
expected_loss = 0.1 / 0.2 + 0.05 / 0.05 + 8 / 14
est_loss = FloatColumn._histogram_loss(
diff_var, avg_diffvar, total_var, avg_totalvar, run_time,
avg_runtime)
self.assertEqual(expected_loss, est_loss)
def test_select_method_for_histogram(self):
data = pd.Series([], dtype=object)
profiler = FloatColumn(data.name)
profiler.update(data)
list_method = ['auto', 'fd', 'doane', 'scott', 'rice', 'sturges',
'sqrt']
current_exact_var = 0
# sqrt has the least current loss
current_est_var = np.array([0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.005])
current_total_var = np.array([0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01])
current_run_time = np.array([0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01])
# all methods have the same total loss
list_total_loss = np.array([0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1])
for i, method in enumerate(list_method):
profiler.histogram_methods[method]['total_loss'] = \
list_total_loss[i]
selected_method = profiler._select_method_for_histogram(
current_exact_var, current_est_var,
current_total_var, current_run_time)
self.assertEqual(selected_method, 'sqrt')
# another test
current_exact_var = 0
# sqrt has the least current loss
current_est_var = np.array([0.03, 0.03, 0.03, 0.03, 0.03, 0.03, 0.029])
current_total_var = np.array([0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01])
current_run_time = np.array([0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01])
# but sturges has the least total loss
list_total_loss = np.array([0.1, 0.1, 0.1, 0.1, 0.1, 0.05, 0.1])
for i, method in enumerate(list_method):
profiler.histogram_methods[method]['total_loss'] = \
list_total_loss[i]
selected_method = profiler._select_method_for_histogram(
current_exact_var, current_est_var,
current_total_var, current_run_time)
self.assertEqual(selected_method, 'sturges')
def test_histogram_to_array(self):
data = pd.Series([], dtype=object)
profiler = FloatColumn(data.name)
profiler.update(data)
profiler._stored_histogram['histogram']['bin_counts'] = \
np.array([3, 2, 1])
profiler._stored_histogram['histogram']['bin_edges'] = \
np.array([1.0, 3.0, 5.0, 7.0])
array_from_histogram = profiler._histogram_to_array()
expected_array = [1.0, 1.0, 1.0, 3.0, 3.0, 7.0]
self.assertEqual(expected_array, array_from_histogram.tolist())
def test_merge_histogram(self):
data = pd.Series([], dtype=object)
profiler = FloatColumn(data.name)
profiler.update(data)
profiler._stored_histogram['histogram']['bin_counts'] = np.array([3, 2])
profiler._stored_histogram['histogram']['bin_edges'] = \
np.array([1.0, 3.0, 5.0])
input_array = [0.5, 1.0, 2.0, 5.0]
profiler._merge_histogram(input_array)
merged_hist = profiler._histogram_for_profile('sqrt')[0]
expected_bin_counts, expected_bin_edges = \
[5, 2, 2], [0.5, 2.0, 3.5, 5.0]
self.assertEqual(expected_bin_counts,
merged_hist['bin_counts'].tolist())
self.assertCountEqual(expected_bin_edges, merged_hist['bin_edges'])
def test_profiled_quantiles(self):
"""
Checks the quantiles of profiled numerical columns.
:return:
"""
# this data has 4 bins, range of 3
# with equal bin size, each bin has the width of 0.75
data = ["1.0", "2.0", "3.0", "4.0"]
df = pd.Series(data)
profiler = FloatColumn(df.name)
profiler.update(df)
profile = profiler.profile
est_quantiles = profile['quantiles']
est_Q1 = est_quantiles[249]
est_Q2 = est_quantiles[499]
est_Q3 = est_quantiles[749]
self.assertEqual(999, len(est_quantiles))
self.assertAlmostEqual(1.000012, est_quantiles[0])
self.assertEqual(est_Q1, 1.003)
self.assertEqual(est_Q2, 2.5)
self.assertEqual(est_Q3, 3.001)
self.assertAlmostEqual(3.999988, est_quantiles[-1])
def test_data_type_ratio(self):
data = np.linspace(-5, 5, 4)
df = pd.Series(data).apply(str)
profiler = FloatColumn(df.name)
profiler.update(df)
self.assertEqual(profiler.data_type_ratio, 1.0)
df = pd.Series(['not a float'])
profiler.update(df)
self.assertEqual(profiler.data_type_ratio, 0.8)
def test_profile(self):
data = [2.5, 12.5, 'not a float', 5, 'not a float']
df = pd.Series(data).apply(str)
profiler = FloatColumn(df.name)
expected_profile = dict(
min=2.5,
max=12.5,
mode=[2.5, 5, 12.5],
median=5,
sum=20.0,
mean=20/3.0,
variance=27 + 1/12.0,
skewness=35/13*np.sqrt(3/13),
kurtosis=np.nan,
num_negatives = 0,
num_zeros = 0,
stddev=np.sqrt(27+1/12.0),
histogram={
'bin_counts': np.array([1, 1, 0, 1]),
'bin_edges': np.array([2.5, 5.0, 7.5, 10.0, 12.5]),
},
quantiles={
0: 2.5075,
1: 5.005 ,
2: 12.4925,
},
times=defaultdict(float, {'histogram_and_quantiles': 1.0,
'precision': 1.0, 'max': 1.0, 'min': 1.0,
'skewness': 1.0,
'kurtosis': 1.0, 'sum': 1.0, 'variance': 1.0,
'num_zeros': 1.0, 'num_negatives': 1.0}),
precision={
'min': 1,
'max': 3,
'mean': 2.0,
'var': 1.0,
'std': 1.0,
'sample_size': 3,
'margin_of_error': 1.9,
'confidence_level': 0.999
}
)
time_array = [float(i) for i in range(100, 0, -1)]
with mock.patch('time.time', side_effect=lambda: time_array.pop()):
# Validate that the times dictionary is empty
self.assertEqual(defaultdict(float), profiler.profile['times'])
profiler.update(df)
profile = profiler.profile
# Validate mode
mode = profile.pop('mode')
expected_mode = expected_profile.pop('mode')
np.testing.assert_array_almost_equal(expected_mode, mode, decimal=2)
# pop out the histogram to test separately from the rest of the dict
# as we need comparison with some precision
histogram = profile.pop('histogram')
expected_histogram = expected_profile.pop('histogram')
quantiles = profile.pop('quantiles')
expected_quantiles = expected_profile.pop('quantiles')
median = profile.pop('median')
expected_median = expected_profile.pop('median')
skewness = profile.pop('skewness')
expected_skewness = expected_profile.pop('skewness')
variance = profile.pop('variance')
expected_variance = expected_profile.pop('variance')
self.assertDictEqual(expected_profile, profile)
self.assertDictEqual(expected_profile['precision'], profile['precision'])
self.assertEqual(expected_histogram['bin_counts'].tolist(),
histogram['bin_counts'].tolist())
self.assertCountEqual(np.round(expected_histogram['bin_edges'], 12),
np.round(histogram['bin_edges'], 12))
self.assertAlmostEqual(expected_quantiles[0], quantiles[249])
self.assertAlmostEqual(expected_quantiles[1], quantiles[499])
self.assertAlmostEqual(expected_quantiles[2], quantiles[749])
self.assertAlmostEqual(expected_skewness, skewness)
self.assertAlmostEqual(expected_variance, variance)
self.assertAlmostEqual(expected_median, median, places=2)
# Validate time in datetime class has expected time after second update
profiler.update(df)
expected = defaultdict(float, {'min': 2.0, 'max': 2.0,
'sum': 2.0, 'variance': 2.0,
'precision': 2.0,
'histogram_and_quantiles': 2.0,
'skewness': 2.0, 'kurtosis': 2.0,
'num_negatives': 2.0,
'num_zeros': 2.0,})
self.assertEqual(expected, profiler.profile['times'])
def test_option_precision(self):
data = [1.1, 2.2, 3.3, 4.4]
df = pd.Series(data).apply(str)
# Turn off precision
options = FloatOptions()
options.set({"precision.is_enabled": False})
profiler = FloatColumn(df.name, options=options)
profiler.update(df)
self.assertEqual(None, profiler.precision['sample_size'])
# Turn on precision, check sample_size
options = FloatOptions()
options.set({"precision.is_enabled": True})
profiler = FloatColumn(df.name, options=options)
profiler.update(df)
self.assertEqual(4, profiler.precision['sample_size'])
# Turn on precision, set 0.5 sample_size
options = FloatOptions()
options.set({"precision.sample_ratio": 0.5})
profiler = FloatColumn(df.name, options=options)
profiler.update(df)
self.assertEqual(2, profiler.precision['sample_size'])
def test_option_timing(self):
data = [2.0, 12.5, 'not a float', 6.0, 'not a float']
df = pd.Series(data).apply(str)
options = FloatOptions()
options.set({"min.is_enabled": False})
profiler = FloatColumn(df.name, options=options)
time_array = [float(i) for i in range(100, 0, -1)]
with mock.patch('time.time', side_effect=lambda: time_array.pop()):
# Validate that the times dictionary is empty
self.assertEqual(defaultdict(float), profiler.profile['times'])
profiler.update(df)
# Validate the time in the datetime class has the expected time.
profile = profiler.profile
expected = defaultdict(float, {'max': 1.0, 'sum': 1.0,
'variance': 1.0,
'precision': 1.0, 'skewness': 1.0,
'kurtosis': 1.0, 'num_negatives': 1.0,
'num_zeros': 1.0,
'histogram_and_quantiles': 15.0})
self.assertCountEqual(expected, profile['times'])
# Validate time in datetime class has expected time after second update
profiler.update(df)
expected = defaultdict(float, {'max': 2.0, 'sum': 2.0,
'variance': 2.0,
'precision': 2.0, 'skewness': 2.0,
'kurtosis': 2.0, 'num_negatives': 2.0,
'num_zeros': 2.0,
'histogram_and_quantiles': 30.0})
self.assertCountEqual(expected, profiler.profile['times'])
def test_profile_merge(self):
data = [2.0, 'not a float', 6.0, 'not a float']
df = pd.Series(data).apply(str)
profiler1 = FloatColumn("Float")
profiler1.update(df)
data2 = [10.0, 'not a float', 15.0, 'not a float']
df2 = pd.Series(data2).apply(str)
profiler2 = FloatColumn("Float")
profiler2.update(df2)
expected_profile = dict(
min=2.0,
max=15.0,
mode=[2, 6, 10, 15],
sum=33.0,
mean=8.25,
variance=30.916666666666668,
stddev=np.sqrt(30.916),
skewness=918 * np.sqrt(3 / 371) / 371,
kurtosis=-16068/19663,
histogram={
'bin_counts': np.array([1, 1, 1, 1]),
'bin_edges': np.array([2., 5.25, 8.5, 11.75, 15.])
},
)
profiler3 = profiler1 + profiler2
expected_histogram = expected_profile.pop('histogram')
profile3 = profiler3.profile
histogram = profile3.pop('histogram')
expected_mode = expected_profile.pop('mode')
mode = profile3.pop('mode')
np.testing.assert_array_almost_equal(expected_mode, mode, decimal=2)
self.assertTrue(profiler3.bias_correction)
self.assertAlmostEqual(profiler3.stddev,
expected_profile.pop('stddev'), places=3)
self.assertAlmostEqual(profiler3.variance,
expected_profile.pop('variance'), places=3)
self.assertAlmostEqual(profiler3.skewness,
expected_profile.pop('skewness'),places=3)
self.assertAlmostEqual(profiler3.kurtosis,
expected_profile.pop('kurtosis'), places=3)
self.assertEqual(profiler3.mean, expected_profile.pop('mean'))
self.assertEqual(profiler3.histogram_selection, 'doane')
self.assertEqual(profiler3.min, expected_profile.pop('min'))
self.assertEqual(profiler3.max, expected_profile.pop('max'))
self.assertEqual(histogram['bin_counts'].tolist(),
expected_histogram['bin_counts'].tolist())
self.assertCountEqual(histogram['bin_edges'],
expected_histogram['bin_edges'])
def test_profile_merge_for_zeros_and_negatives(self):
data = [2.0, 8.5, 'not an int', 6.0, -3, 0]
df = pd.Series(data).apply(str)
profiler1 = FloatColumn("Float")
profiler1.update(df)
data2 = [0.0, 3.5, 'not an int', 125.0, 0, -0.1, -88]
df2 = pd.Series(data2).apply(str)
profiler2 = FloatColumn("Float")
profiler2.update(df2)
expected_profile = dict(
num_zeros=3,
num_negatives=3
)
profiler3 = profiler1 + profiler2
self.assertEqual(profiler3.num_zeros,
expected_profile.pop('num_zeros'))
self.assertEqual(profiler3.num_negatives,
expected_profile.pop('num_negatives'))
def test_profile_merge_edge_case(self):
data = [2.0, 'not a float', 6.0, 'not a float']
df = pd.Series(data).apply(str)
profiler1 = FloatColumn("Float")
profiler1.update(df)
profiler1.match_count = 0
data2 = [10.0, 'not a float', 15.0, 'not a float']
df2 = pd.Series(data2).apply(str)
profiler2 = FloatColumn("Float")
profiler2.update(df2)
profiler3 = profiler1 + profiler2
self.assertEqual(profiler3.stddev, profiler2.stddev)
# test merge with empty data
df1 = pd.Series([], dtype=object)
profiler1 = FloatColumn("Float")
profiler1.update(df1)
df2 = pd.Series([], dtype=object)
profiler2 = FloatColumn("Float")
profiler2.update(df2)
profiler = profiler1 + profiler2
self.assertTrue(np.isnan(profiler.skewness))
self.assertTrue(np.isnan(profiler.kurtosis))
self.assertEqual(profiler.min, None)
self.assertEqual(profiler.max, None)
df3 = pd.Series([2.0, 3.0]).apply(str)
profiler3 = FloatColumn("Float")
profiler3.update(df3)
profiler = profiler1 + profiler3
self.assertTrue(np.isnan(profiler.skewness))
self.assertTrue(np.isnan(profiler.kurtosis))
self.assertEqual(profiler.min, 2.0)
self.assertEqual(profiler.max, 3.0)
df4 = pd.Series([4.0, 5.0]).apply(str)
profiler4 = FloatColumn("Float")
profiler4.update(df4)
profiler = profiler3 + profiler4
self.assertEqual(profiler.skewness, 0)
self.assertAlmostEqual(profiler.kurtosis, -1.2)
self.assertEqual(profiler.min, 2.0)
self.assertEqual(profiler.max, 5.0)
self.assertEqual(profiler.num_zeros, 0)
self.assertEqual(profiler.num_negatives, 0)
df5 = pd.Series([0.0, 0.0, -1.1, -1.0]).apply(str)
profiler5 = FloatColumn("Float")
profiler5.update(df5)
profiler = profiler4 + profiler5
self.assertEqual(profiler.min, -1.1)
self.assertEqual(profiler.max, 5)
self.assertEqual(profiler.num_zeros, 2)
self.assertEqual(profiler.num_negatives, 2)
def test_custom_bin_count_merge(self):
options = FloatOptions()
options.histogram_and_quantiles.bin_count_or_method = 10
data = [2.0, 'not a float', 6.0, 'not a float']
df = | pd.Series(data) | pandas.Series |
"""
Functions for preparing various inputs passed to the DataFrame or Series
constructors before passing them to a BlockManager.
"""
from collections import abc
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Sequence, Tuple, Union
import numpy as np
import numpy.ma as ma
from pandas._libs import lib
from pandas._typing import Axis, DtypeObj, Label, Scalar
from pandas.core.dtypes.cast import (
construct_1d_arraylike_from_scalar,
construct_1d_ndarray_preserving_na,
dict_compat,
maybe_cast_to_datetime,
maybe_convert_platform,
maybe_infer_to_datetimelike,
maybe_upcast,
)
from pandas.core.dtypes.common import (
is_categorical_dtype,
is_datetime64tz_dtype,
is_dtype_equal,
is_extension_array_dtype,
is_integer_dtype,
is_list_like,
is_named_tuple,
is_object_dtype,
)
from pandas.core.dtypes.generic import (
ABCDataFrame,
ABCDatetimeIndex,
ABCIndex,
ABCSeries,
ABCTimedeltaIndex,
)
from pandas.core import algorithms, common as com
from pandas.core.arrays import Categorical
from pandas.core.construction import extract_array, sanitize_array
from pandas.core.indexes import base as ibase
from pandas.core.indexes.api import (
Index,
ensure_index,
get_objs_combined_axis,
union_indexes,
)
from pandas.core.internals.managers import (
create_block_manager_from_arrays,
create_block_manager_from_blocks,
)
if TYPE_CHECKING:
from numpy.ma.mrecords import MaskedRecords
from pandas import Series
# ---------------------------------------------------------------------
# BlockManager Interface
def arrays_to_mgr(
arrays,
arr_names,
index,
columns,
dtype: Optional[DtypeObj] = None,
verify_integrity: bool = True,
):
"""
Segregate Series based on type and coerce into matrices.
Needs to handle a lot of exceptional cases.
"""
arr_names = ensure_index(arr_names)
if verify_integrity:
# figure out the index, if necessary
if index is None:
index = extract_index(arrays)
else:
index = ensure_index(index)
# don't force copy because getting jammed in an ndarray anyway
arrays = _homogenize(arrays, index, dtype)
columns = ensure_index(columns)
else:
columns = ensure_index(columns)
index = ensure_index(index)
# from BlockManager perspective
axes = [columns, index]
return create_block_manager_from_arrays(arrays, arr_names, axes)
def masked_rec_array_to_mgr(
data: "MaskedRecords", index, columns, dtype: Optional[DtypeObj], copy: bool
):
"""
Extract from a masked rec array and create the manager.
"""
# essentially process a record array then fill it
fdata = ma.getdata(data)
if index is None:
index = _get_names_from_index(fdata)
if index is None:
index = ibase.default_index(len(data))
index = ensure_index(index)
if columns is not None:
columns = ensure_index(columns)
arrays, arr_columns = to_arrays(fdata, columns)
# fill if needed
new_arrays = []
for col in arr_columns:
arr = data[col]
fv = arr.fill_value
mask = ma.getmaskarray(arr)
if mask.any():
arr, fv = maybe_upcast(arr, fill_value=fv, copy=True)
arr[mask] = fv
new_arrays.append(arr)
# create the manager
arrays, arr_columns = reorder_arrays(new_arrays, arr_columns, columns)
if columns is None:
columns = arr_columns
mgr = arrays_to_mgr(arrays, arr_columns, index, columns, dtype)
if copy:
mgr = mgr.copy()
return mgr
# ---------------------------------------------------------------------
# DataFrame Constructor Interface
def init_ndarray(values, index, columns, dtype: Optional[DtypeObj], copy: bool):
# input must be a ndarray, list, Series, index
if isinstance(values, ABCSeries):
if columns is None:
if values.name is not None:
columns = [values.name]
if index is None:
index = values.index
else:
values = values.reindex(index)
# zero len case (GH #2234)
if not len(values) and columns is not None and len(columns):
values = np.empty((0, 1), dtype=object)
# we could have a categorical type passed or coerced to 'category'
# recast this to an arrays_to_mgr
if is_categorical_dtype(getattr(values, "dtype", None)) or is_categorical_dtype(
dtype
):
if not hasattr(values, "dtype"):
values = _prep_ndarray(values, copy=copy)
values = values.ravel()
elif copy:
values = values.copy()
index, columns = _get_axes(len(values), 1, index, columns)
return arrays_to_mgr([values], columns, index, columns, dtype=dtype)
elif is_extension_array_dtype(values) or is_extension_array_dtype(dtype):
# GH#19157
if isinstance(values, np.ndarray) and values.ndim > 1:
# GH#12513 a EA dtype passed with a 2D array, split into
# multiple EAs that view the values
values = [values[:, n] for n in range(values.shape[1])]
else:
values = [values]
if columns is None:
columns = Index(range(len(values)))
return arrays_to_mgr(values, columns, index, columns, dtype=dtype)
# by definition an array here
# the dtypes will be coerced to a single dtype
values = _prep_ndarray(values, copy=copy)
if dtype is not None and not is_dtype_equal(values.dtype, dtype):
try:
values = construct_1d_ndarray_preserving_na(
values.ravel(), dtype=dtype, copy=False
).reshape(values.shape)
except Exception as orig:
# e.g. ValueError when trying to cast object dtype to float64
raise ValueError(
f"failed to cast to '{dtype}' (Exception was: {orig})"
) from orig
# _prep_ndarray ensures that values.ndim == 2 at this point
index, columns = _get_axes(
values.shape[0], values.shape[1], index=index, columns=columns
)
values = values.T
# if we don't have a dtype specified, then try to convert objects
# on the entire block; this is to convert if we have datetimelike's
# embedded in an object type
if dtype is None and is_object_dtype(values.dtype):
if values.ndim == 2 and values.shape[0] != 1:
# transpose and separate blocks
dvals_list = [maybe_infer_to_datetimelike(row) for row in values]
for n in range(len(dvals_list)):
if isinstance(dvals_list[n], np.ndarray):
dvals_list[n] = dvals_list[n].reshape(1, -1)
from pandas.core.internals.blocks import make_block
# TODO: What about re-joining object columns?
block_values = [
make_block(dvals_list[n], placement=[n], ndim=2)
for n in range(len(dvals_list))
]
else:
datelike_vals = maybe_infer_to_datetimelike(values)
block_values = [datelike_vals]
else:
block_values = [values]
return create_block_manager_from_blocks(block_values, [columns, index])
def init_dict(data: Dict, index, columns, dtype: Optional[DtypeObj] = None):
"""
Segregate Series based on type and coerce into matrices.
Needs to handle a lot of exceptional cases.
"""
arrays: Union[Sequence[Any], "Series"]
if columns is not None:
from pandas.core.series import Series
arrays = Series(data, index=columns, dtype=object)
data_names = arrays.index
missing = arrays.isna()
if index is None:
# GH10856
# raise ValueError if only scalars in dict
index = extract_index(arrays[~missing])
else:
index = ensure_index(index)
# no obvious "empty" int column
if missing.any() and not is_integer_dtype(dtype):
if dtype is None or (
not is_extension_array_dtype(dtype)
and np.issubdtype(dtype, np.flexible)
):
# GH#1783
nan_dtype = np.dtype(object)
else:
nan_dtype = dtype
val = construct_1d_arraylike_from_scalar(np.nan, len(index), nan_dtype)
arrays.loc[missing] = [val] * missing.sum()
else:
keys = list(data.keys())
columns = data_names = Index(keys)
arrays = [com.maybe_iterable_to_list(data[k]) for k in keys]
# GH#24096 need copy to be deep for datetime64tz case
# TODO: See if we can avoid these copies
arrays = [arr if not isinstance(arr, ABCIndex) else arr._data for arr in arrays]
arrays = [
arr if not is_datetime64tz_dtype(arr) else arr.copy() for arr in arrays
]
return arrays_to_mgr(arrays, data_names, index, columns, dtype=dtype)
def nested_data_to_arrays(
data: Sequence,
columns: Optional[Index],
index: Optional[Index],
dtype: Optional[DtypeObj],
):
"""
Convert a single sequence of arrays to multiple arrays.
"""
# By the time we get here we have already checked treat_as_nested(data)
if is_named_tuple(data[0]) and columns is None:
columns = data[0]._fields
arrays, columns = to_arrays(data, columns, dtype=dtype)
columns = ensure_index(columns)
if index is None:
if isinstance(data[0], ABCSeries):
index = _get_names_from_index(data)
elif isinstance(data[0], Categorical):
index = ibase.default_index(len(data[0]))
else:
index = ibase.default_index(len(data))
return arrays, columns, index
def treat_as_nested(data) -> bool:
"""
Check if we should use nested_data_to_arrays.
"""
return len(data) > 0 and is_list_like(data[0]) and getattr(data[0], "ndim", 1) == 1
# ---------------------------------------------------------------------
def _prep_ndarray(values, copy: bool = True) -> np.ndarray:
if not isinstance(values, (np.ndarray, ABCSeries, Index)):
if len(values) == 0:
return np.empty((0, 0), dtype=object)
elif isinstance(values, range):
arr = np.arange(values.start, values.stop, values.step, dtype="int64")
return arr[..., np.newaxis]
def convert(v):
return maybe_convert_platform(v)
# we could have a 1-dim or 2-dim list here
# this is equiv of np.asarray, but does object conversion
# and platform dtype preservation
try:
if is_list_like(values[0]) or hasattr(values[0], "len"):
values = np.array([convert(v) for v in values])
elif isinstance(values[0], np.ndarray) and values[0].ndim == 0:
# GH#21861
values = np.array([convert(v) for v in values])
else:
values = convert(values)
except (ValueError, TypeError):
values = convert(values)
else:
# drop subclass info, do not copy data
values = np.asarray(values)
if copy:
values = values.copy()
if values.ndim == 1:
values = values.reshape((values.shape[0], 1))
elif values.ndim != 2:
raise ValueError(f"Must pass 2-d input. shape={values.shape}")
return values
def _homogenize(data, index, dtype: Optional[DtypeObj]):
oindex = None
homogenized = []
for val in data:
if isinstance(val, ABCSeries):
if dtype is not None:
val = val.astype(dtype)
if val.index is not index:
# Forces alignment. No need to copy data since we
# are putting it into an ndarray later
val = val.reindex(index, copy=False)
else:
if isinstance(val, dict):
if oindex is None:
oindex = index.astype("O")
if isinstance(index, (ABCDatetimeIndex, ABCTimedeltaIndex)):
val = dict_compat(val)
else:
val = dict(val)
val = lib.fast_multiget(val, oindex._values, default=np.nan)
val = sanitize_array(
val, index, dtype=dtype, copy=False, raise_cast_failure=False
)
homogenized.append(val)
return homogenized
def extract_index(data) -> Index:
"""
Try to infer an Index from the passed data, raise ValueError on failure.
"""
index = None
if len(data) == 0:
index = Index([])
elif len(data) > 0:
raw_lengths = []
indexes: List[Union[List[Label], Index]] = []
have_raw_arrays = False
have_series = False
have_dicts = False
for val in data:
if isinstance(val, ABCSeries):
have_series = True
indexes.append(val.index)
elif isinstance(val, dict):
have_dicts = True
indexes.append(list(val.keys()))
elif is_list_like(val) and getattr(val, "ndim", 1) == 1:
have_raw_arrays = True
raw_lengths.append(len(val))
if not indexes and not raw_lengths:
raise ValueError("If using all scalar values, you must pass an index")
if have_series:
index = union_indexes(indexes)
elif have_dicts:
index = union_indexes(indexes, sort=False)
if have_raw_arrays:
lengths = list(set(raw_lengths))
if len(lengths) > 1:
raise ValueError("All arrays must be of the same length")
if have_dicts:
raise ValueError(
"Mixing dicts with non-Series may lead to ambiguous ordering."
)
if have_series:
assert index is not None # for mypy
if lengths[0] != len(index):
msg = (
f"array length {lengths[0]} does not match index "
f"length {len(index)}"
)
raise ValueError(msg)
else:
index = ibase.default_index(lengths[0])
return ensure_index(index)
def reorder_arrays(arrays, arr_columns, columns):
# reorder according to the columns
if (
columns is not None
and len(columns)
and arr_columns is not None
and len(arr_columns)
):
indexer = ensure_index(arr_columns).get_indexer(columns)
arr_columns = ensure_index([arr_columns[i] for i in indexer])
arrays = [arrays[i] for i in indexer]
return arrays, arr_columns
def _get_names_from_index(data):
has_some_name = any(getattr(s, "name", None) is not None for s in data)
if not has_some_name:
return ibase.default_index(len(data))
index: List[Label] = list(range(len(data)))
count = 0
for i, s in enumerate(data):
n = getattr(s, "name", None)
if n is not None:
index[i] = n
else:
index[i] = f"Unnamed {count}"
count += 1
return index
def _get_axes(N, K, index, columns) -> Tuple[Index, Index]:
# helper to create the axes as indexes
# return axes or defaults
if index is None:
index = ibase.default_index(N)
else:
index = ensure_index(index)
if columns is None:
columns = ibase.default_index(K)
else:
columns = ensure_index(columns)
return index, columns
def dataclasses_to_dicts(data):
"""
Converts a list of dataclass instances to a list of dictionaries.
Parameters
----------
data : List[Type[dataclass]]
Returns
--------
list_dict : List[dict]
Examples
--------
>>> @dataclass
>>> class Point:
... x: int
... y: int
>>> dataclasses_to_dicts([Point(1,2), Point(2,3)])
[{"x":1,"y":2},{"x":2,"y":3}]
"""
from dataclasses import asdict
return list(map(asdict, data))
# ---------------------------------------------------------------------
# Conversion of Inputs to Arrays
def to_arrays(data, columns, dtype: Optional[DtypeObj] = None):
"""
Return list of arrays, columns.
"""
if isinstance(data, ABCDataFrame):
if columns is not None:
arrays = [
data._ixs(i, axis=1).values
for i, col in enumerate(data.columns)
if col in columns
]
else:
columns = data.columns
arrays = [data._ixs(i, axis=1).values for i in range(len(columns))]
return arrays, columns
if not len(data):
if isinstance(data, np.ndarray):
columns = data.dtype.names
if columns is not None:
return [[]] * len(columns), columns
return [], [] # columns if columns is not None else []
elif isinstance(data[0], Categorical):
if columns is None:
columns = ibase.default_index(len(data))
return data, columns
elif isinstance(data, np.ndarray) and data.dtype.names is not None:
# e.g. recarray
columns = list(data.dtype.names)
arrays = [data[k] for k in columns]
return arrays, columns
if isinstance(data[0], (list, tuple)):
content, columns = _list_to_arrays(data, columns)
elif isinstance(data[0], abc.Mapping):
content, columns = _list_of_dict_to_arrays(data, columns)
elif isinstance(data[0], ABCSeries):
content, columns = _list_of_series_to_arrays(data, columns)
else:
# last ditch effort
data = [tuple(x) for x in data]
content, columns = _list_to_arrays(data, columns)
content, columns = _finalize_columns_and_data(content, columns, dtype)
return content, columns
def _list_to_arrays(
data: List[Scalar],
columns: Union[Index, List],
) -> Tuple[List[Scalar], Union[Index, List[Axis]]]:
# Note: we already check len(data) > 0 before getting hre
if isinstance(data[0], tuple):
content = lib.to_object_array_tuples(data)
else:
# list of lists
content = lib.to_object_array(data)
return content, columns
def _list_of_series_to_arrays(
data: List,
columns: Union[Index, List],
) -> Tuple[List[Scalar], Union[Index, List[Axis]]]:
if columns is None:
# We know pass_data is non-empty because data[0] is a Series
pass_data = [x for x in data if isinstance(x, (ABCSeries, ABCDataFrame))]
columns = get_objs_combined_axis(pass_data, sort=False)
indexer_cache: Dict[int, Scalar] = {}
aligned_values = []
for s in data:
index = getattr(s, "index", None)
if index is None:
index = ibase.default_index(len(s))
if id(index) in indexer_cache:
indexer = indexer_cache[id(index)]
else:
indexer = indexer_cache[id(index)] = index.get_indexer(columns)
values = | extract_array(s, extract_numpy=True) | pandas.core.construction.extract_array |
"""
This module contains function to plot smooth ROC curves using KFold
Examples:
result, aucs = roc_curve_cv(xgb.XGBClassifier(), X, y, n_splits=6)
plot_roc_curve_cv(result)
plt.show()
plot_specificity_cv(result)
plt.show()
plot_specificity_cv(result, invert_x=True, invert_y=True)
plt.show()
print(f"AUC: {np.mean(aucs)} (std:{np.std(aucs)})")
Comparing models:
result_xgb, aucs = roc_curve_cv(xgb.XGBClassifier(), X, y, n_splits=6, n_repeats=4)
result_rf, aucs = roc_curve_cv(RandomForestClassifier(), X, y, n_splits=6, n_repeats=4)
plot_specificity_cv({'XGB': result_xgb, 'RF':result_rf})
plt.show()
Comparing hyperparameters
results = []
for max_depth in (3,10):
for max_features in (0.5, 0.9):
result, _ = roc_curve_cv(
RandomForestClassifier(max_depth=max_depth, max_features=max_features),
x_full, y_full, n_repeats=4,
properties={'max features':max_features, 'max depth':max_depth})
results.append(result)
plot_specificity_cv(results, hue='max features', style='max depth', ci=False)
plt.show()
plot_roc_curve_cv(results, hue='max features', style='max depth', ci=False)
plt.show()
"""
from sklearn.model_selection import StratifiedKFold, RepeatedStratifiedKFold
from numpy import interp
import numpy as np
from sklearn.metrics import roc_curve, auc
import seaborn as sns
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.preprocessing import label_binarize
def roc_curve_simple(model, X, y):
y_pred = model.predict_proba(X)[:,1]
fpr, tpr, thres = roc_curve(y, y_pred)
result_df = pd.DataFrame({'fpr':fpr, 'tpr':tpr, 'threshold':thres}, index=range(len(fpr)))
return result_df, auc(fpr,tpr)
def roc_curve_cv(model, X, y, n_splits=5, n_repeats=1, properties=None):
if n_repeats > 1:
cv = RepeatedStratifiedKFold(n_splits=n_splits, n_repeats=n_repeats)
else:
cv = StratifiedKFold(n_splits=n_splits)
auc_list = []
result_df = pd.DataFrame()
for i, (train, test) in enumerate(cv.split(X, y)):
x_train, x_test = X.iloc[train], X.iloc[test]
y_train, y_test = y.iloc[train], y.iloc[test]
model.fit(x_train, y_train)
y_test_pred = model.predict_proba(x_test)[:,1]
fpr, tpr, thres = roc_curve(y_test, y_test_pred)
# x_label = "False Positive Rate"
# y_label = "True Positive Rate"
df = pd.DataFrame({'run':i, 'fpr':fpr, 'tpr':tpr, 'threshold':thres}, index=range(len(fpr)))
result_df = pd.concat([result_df, df])
auc_list.append(auc(fpr,tpr))
if properties is not None:
for key, value, in properties.items():
result_df[key] = value
return result_df, auc_list
def plot_roc_curve_cv(result, n_step=100, title=None, **kwargs):
"""
plot the ROC curve with a confidence interval
"""
fpr_linspace = np.linspace(0,1,n_step)
tpr_df = pd.DataFrame()
x_label = "False Positive Rate"
y_label = "True Positive Rate"
if isinstance(result, dict):
for key, value in result.items():
value['model'] = key
result = pd.concat(result.values())
kwargs['hue'] = 'model'
elif isinstance(result, list):
result = pd.concat(result)
result = result.rename(columns={'tpr':y_label, 'fpr':x_label})
group_cols = list(set(result.columns)-{x_label, y_label,'threshold'})
for name, group in result.groupby(group_cols):
df = pd.DataFrame(columns=[y_label, x_label]+group_cols)
df[y_label] = interp(fpr_linspace, group[x_label], group[y_label])
df[x_label] = fpr_linspace
df[group_cols] = name
tpr_df = pd.concat([tpr_df,df])
fig = plt.axes()
sns.lineplot(x=x_label, y =y_label, data=tpr_df, **kwargs)
if title is None:
title = "Roc curve cv"
fig.set_title(title)
return fig
def plot_specificity_cv(result, n_step=100, invert_x=False, invert_y=False, title=None, **kwargs):
"""
plot the curve of the specificity as a function of the sensibility
"""
tpr_linspace = np.linspace(0,1,n_step)
fpr_df = pd.DataFrame()
if isinstance(result, dict):
for key, value in result.items():
value['model'] = key
result = pd.concat(result.values())
kwargs['hue'] = 'model'
elif isinstance(result, list):
result = pd.concat(result)
group_cols = list(set(result.columns)-{'fpr','tpr','threshold'})
for name, group in result.groupby(group_cols):
df = pd.DataFrame(columns=['tpr', 'fpr']+group_cols)
df['fpr'] = interp(tpr_linspace, group['tpr'], group['fpr'])[:-1]
df['tpr'] = tpr_linspace[:-1]
df[group_cols]=name
fpr_df = pd.concat([fpr_df,df])
if invert_x:
x_label = 'False Negative Rate'
fpr_df[x_label] = 1-fpr_df['tpr']
else:
x_label = 'Sensitivity'
fpr_df[x_label] = fpr_df['tpr']
if invert_y:
y_label = 'False Positive Rate'
fpr_df[y_label] = fpr_df['fpr']
else:
y_label = 'Specificity'
fpr_df[y_label] = 1-fpr_df['fpr']
fig = plt.axes()
sns.lineplot(x=x_label, y =y_label, data=fpr_df)
if title is None:
title = "Specificity vs Sensitivity"
fig.set_title(title)
return fig
def plot_roc_threshold_cv(result, n_step=101, title=None, tpr=True, fpr=True, tnr=False, fnr=False, **kwargs):
"""
plot the ROC curve with a confidence interval
"""
fpr_linspace = np.linspace(0,1,n_step)
tpr_df = pd.DataFrame()
if isinstance(result, dict):
for key, value in result.items():
value['model'] = key
result = pd.concat(result.values())
kwargs['hue'] = 'model'
elif isinstance(result, list):
result = pd.concat(result)
threshold_dfs = []
group_cols = list(set(result.columns)-{'fpr','tpr','threshold'})
for name, group in result.groupby(group_cols):
group = group.sort_values(by='threshold')
if fpr:
df = | pd.DataFrame(columns=['rate', 'metric','threshold']+group_cols) | pandas.DataFrame |
import random
import csv
import signal
from typing import List, Tuple
import numpy as np
import pandas as pd
from db_env.DatabaseEnvironment import DatabaseEnvironment
from shared_utils.consts import PROJECT_DIR
from shared_utils.utils import create_logger
AGENT_CSV_FILE = f'{PROJECT_DIR}/data/agent_history.csv'
WEIGHTS_FILE = f'{PROJECT_DIR}/data/weights.csv'
class Agent:
def __init__(self, env: DatabaseEnvironment):
random.seed(2)
np.random.seed(2)
self._log = create_logger('agent')
self._env = env
self.exploration_probability = 0.9
self.exploration_probability_discount = 0.9
self.learning_rate = 0.001
self.discount_factor = 0.8
self._experience_memory_max_size = np.inf
self._experience_replay_count = 30
self._weights = np.random.rand(1 + self._env.action_space.n + self._env.observation_space.n)
"""Estimated weights of features with bias for every action."""
self._experience_memory: List[Tuple[List[int], int, float, List[int]]] = []
self.dict_info = {
'episode': int,
'step': int,
'state': List[bool],
'action': int,
'reward': float,
'next_state': List[bool],
'q': float,
'max_a': int,
'max_q': float,
'td_target': float,
'td_error': float,
'total_reward': float,
'exploration_probability': float,
'random_action': bool,
'initial_state_reward': float
}
self._pause_request = False
def signal_handler(sig, frame):
self._log.info('CTRL+C pressed - pausing training requested')
self._pause_request = True
signal.signal(signal.SIGINT, signal_handler)
def train(self, episode_count: int, steps_per_episode: int):
with open(AGENT_CSV_FILE, 'w', newline='') as file:
wr = csv.writer(file)
wr.writerow(self.dict_info.keys())
for episode in range(episode_count):
state = self._env.reset()
total_reward = 0.0
for step in range(steps_per_episode):
self._log.info(f'EPISODE {episode} - STEP {step} '
f'({(episode_count - episode) * steps_per_episode - step - 1} more steps to go)')
action = self._choose_action(state)
next_state, reward, _, info = self._env.step(action)
total_reward += reward
previous_weights = self._weights.copy()
self._update_weights(state, action, reward, next_state, previous_weights)
self._save_agent_information(episode, step, state, next_state, action, reward, total_reward, info)
self._experience_replay(previous_weights)
self._experience_append(state, action, reward, next_state)
self._save_agent_weights()
state = next_state
if self._pause_request:
return
self._reduce_exploration_probability()
def _get_features(self, state, action):
# bias + each action has own feature + state has n columns - n features
# return np.array([0] + [action == a for a in range(self._env.action_space.n)] + state)
l = np.array([0] + [action == a for a in range(self._env.action_space.n)] + state)
return l
def _experience_append(self, state, action, reward, next_state):
self._experience_memory.append((state, action, reward, next_state))
if len(self._experience_memory) > self._experience_memory_max_size:
self._experience_memory.pop()
def _experience_replay(self, previous_weights):
samples_count = min(len(self._experience_memory), self._experience_replay_count)
samples = random.sample(self._experience_memory, k=samples_count)
for state, action, reward, next_state in samples:
self._update_weights(state, action, reward, next_state, previous_weights)
def _update_weights(self, state, action, reward, next_state, weights):
features = self._get_features(state, action)
max_action, max_q = self._get_max_action(next_state)
approx_q = weights @ features
td_target = reward + self.discount_factor * max_q
td_error = td_target - approx_q
# w = w + a(r + y(max q) - w^T * F(s)) * F(s)
self._weights += self.learning_rate * td_error * features
self.dict_info['q'] = approx_q
self.dict_info['max_a'] = max_action
self.dict_info['max_q'] = max_q
self.dict_info['td_target'] = td_target
self.dict_info['td_error'] = td_error
def _get_max_action(self, state):
max_q = float('-inf')
max_action = None
for action in self._possible_actions(state):
q = self._calculate_q_value(state, action)
if max_q < q:
max_q = q
max_action = action
return max_action, max_q
def _calculate_q_value(self, state, action):
""":return dot product of weights and features"""
return self._weights @ self._get_features(state, action)
def _possible_actions(self, state) -> List[int]:
return [i * 2 + (not is_indexed) for i, is_indexed in enumerate(state)]
def _choose_action(self, state):
"""
:param state: current environment state
:return: random action with probability epsilon otherwise best action with probability 1-epsilon
"""
self.dict_info['random_action'] = False
self.dict_info['exploration_probability'] = self.exploration_probability
if random.random() < self.exploration_probability:
self.dict_info['random_action'] = True
return random.choice(self._possible_actions(state))
max_action, _ = self._get_max_action(state)
return max_action
def _reduce_exploration_probability(self):
self.exploration_probability = self.exploration_probability_discount * self.exploration_probability
def _save_agent_information(self, episode, step, state, next_state, action, reward, total_reward, info):
self.dict_info['episode'] = episode
self.dict_info['step'] = step
self.dict_info['state'] = state
self.dict_info['next_state'] = next_state
self.dict_info['action'] = action
self.dict_info['reward'] = reward
self.dict_info['total_reward'] = total_reward
self.dict_info['initial_state_reward'] = info['initial_state_reward']
with open(AGENT_CSV_FILE, 'a', newline='') as file:
wr = csv.writer(file)
wr.writerow(self.dict_info.values())
def _save_agent_weights(self):
| pd.DataFrame(self._weights) | pandas.DataFrame |
import pandas as pd
import numpy as np
import datetime
def clean_dates(df_in):
df = df_in.copy()
df_types = df.dtypes
date_cols = list(df_types.loc[df_types=="datetime64[ns]"].index) + ["covid19_admission_hospital_release","covid19_admission_hospital_date","covid19_date_lab_test",
"covid19_date_suspected_onset","dmt_start_date","edss_date_diagnosis","ms_diagnosis_date","ms_onset_date","covid19_outcome_death_date",
'covid19_self_isolation_date','dmt_end_date','dmt_glucocorticoid_start_date',
'dmt_glucocorticoid_stop_date','dmt_stop_date']
for col in date_cols:
df[col] = | pd.to_datetime(df[col],errors="coerce") | pandas.to_datetime |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Jul 17 18:04:57 2018
Generate manuscript figures.
@author: ben
"""
import os
import time
import warnings
import json
import pandas as pd
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import seaborn as sns
import tqdm
# NOTE: Add the module path to sys.path if calling from the scripts subdirectory
import pathlib
import sys
# sys.path.insert(1, "/workspaces/DPE")
sys.path.insert(0, str(pathlib.Path(__file__).parent.parent.absolute()))
# print(sys.path)
import dpe
from dpe.utilities import construct_mixture, format_seconds, load_accuracy # get_fpr_tpr,
from dpe.datasets import (load_diabetes_data, load_renal_data,
load_coeliac_data, load_glaucoma_data)
from dpe.plots import (plot_roc, plot_distributions, plot_bootstraps,
plot_characterisation, plot_selected_violins,
get_error_bars)
# from dpe.config import adjust_excess, ci_method, correct_bias
# ---------------------------- Define constants ------------------------------
# TODO: Apply bias correction to other methods e.g. 'bca'?
# TODO: Report or use correct_bias?
FRESH_DATA = False # CAUTION!
seed = 0
sample_seed = 42 # Used for sampling for Renal non-cases
n_boot = 1000 # 10 # 1000
n_mix = 100 # 10 # 100
sample_size = 1000 # -1
n_seeds = 1 # Deprecated
n_construction_seeds = 100
verbose = False
# Set method details
alpha = 0.05 # Alpha for confidence intervals
ci_method = "bca" # "experimental" # "stderr" # "centile" "jeffreys"
correct_bias = False # Flag to use bias correction: corrected = 2 * pe_point - mean(pe_boot)
adjust_excess = False # TODO: Reimplement this or remove?
# KDE_kernel = 'gaussian' # ['gaussian', 'tophat', 'epanechnikov', 'exponential', 'linear', 'cosine']
methods = {method: True for method in dpe._ALL_METHODS_}
# Configure plots
output_diabetes_rocs = False
output_application = {'Diabetes': False, 'Renal': False, 'Coeliac': True, 'Glaucoma': True}
output_application_vary_cases = {'Diabetes': False, 'Renal': False, 'Coeliac': False, 'Glaucoma': True}
application_xlims = {'Diabetes': None, 'Renal': (0, 0.3), 'Coeliac': (0, 0.3), 'Glaucoma': (0, 0.3)}
output_analysis = {'Diabetes': True, 'Renal': False, 'Coeliac': False, 'Glaucoma': False}
output_characterisation = {'Diabetes': False, 'Renal': False, 'Coeliac': False, 'Glaucoma': False}
average = np.mean # NOTE: Used in get_error_bars, calculate_bias (and plot_characterisation). Does not apply when using BCa (which implicitly uses the median).
# deviation = np.std
# Set plotting style
mpl.rc('figure', figsize=(10, 8))
mpl.rc('font', size=14)
mpl.rc('axes', titlesize=14) # fontsize of the axes title
mpl.rc('axes', labelsize=14) # fontsize of the x and y labels
mpl.rc('xtick', labelsize=12) # fontsize of the tick labels
mpl.rc('ytick', labelsize=12) # fontsize of the tick labels
mpl.rc('legend', fontsize=11) # legend fontsize
mpl.rc('figure', titlesize=14) # fontsize of the figure title
mpl.rc('lines', linewidth=2)
mpl.rc('figure', dpi=100)
mpl.rc('savefig', dpi=600)
mpl.rc('mpl_toolkits', legacy_colorbar=False) # Supress MatplotlibDeprecationWarning
# mpl.style.use('seaborn')
# plt.style.use('seaborn-white')
sns.set_style("ticks")
np.seterr(divide='ignore', invalid='ignore')
# ----------------------------------------------------------------------------
if __name__ == "__main__":
if seed is None:
seed = np.random.randint(np.iinfo(np.int32).max)
print(f"Created new RNG seed: {seed}")
assert 0 <= seed < np.iinfo(np.int32).max
# Create output directories
characterisation_dir = os.path.join("results", "characterisation")
out_dir = os.path.join("results", f"n{sample_size}_m{n_mix}_b{n_boot}_s{seed}")
fig_dir = os.path.join(out_dir, "figs")
os.makedirs(out_dir, exist_ok=True)
os.makedirs(fig_dir, exist_ok=True)
if output_diabetes_rocs:
# Plot ROC curves
fig, axes = plt.subplots(2, 3, sharex=False, sharey=False, figsize=(18, 12))
(scores, bins, means, medians, p_C) = load_diabetes_data('T1GRS')
plot_roc(scores, bins, title='Diabetes: T1GRS', ax=axes[0, 0])
plot_distributions(scores, bins, 'Diabetes: T1GRS', norm=True, despine=False, ax=axes[1, 0])
(scores, bins, means, medians, p_C) = load_diabetes_data('T2GRS')
plot_roc(scores, bins, title='Diabetes: T2GRS', ax=axes[0, 1])
plot_distributions(scores, bins, 'Diabetes: T2GRS', norm=True, despine=False, ax=axes[1, 1])
(scores, bins, means, medians, p_C) = load_renal_data()
plot_roc(scores, bins, title='Renal', ax=axes[0, 2])
plot_distributions(scores, bins, 'Renal', norm=True, despine=False, ax=axes[1, 2])
fig.savefig(os.path.join(fig_dir, 'roc_Diabetes.png'))
# exit()
if adjust_excess:
adjustment_factor = 1 / 0.92 # adjusted for fact it underestimates by 8%
else:
adjustment_factor = 1.0
for data_label, data in [("Diabetes", load_diabetes_data('T1GRS')),
("Coeliac", load_coeliac_data()),
("Renal", load_renal_data(seed=sample_seed)),
("Glaucoma", load_glaucoma_data())]:
# Set random seed
# np.random.seed(seed)
# rng = np.random.RandomState(42) ... rng.choie()
# rng = np.random.default_rng(seed)
(scores, bins, means, medians, p_C) = data
if output_application[data_label]:
res_file = os.path.join(out_dir, f"pe_results_{data_label}.pkl")
summary_file = os.path.join(out_dir, f"summary_{data_label}.json")
if FRESH_DATA or not os.path.isfile(res_file):
print(f"Running mixture analysis on {data_label} scores...", flush=True)
t = time.time() # Start timer
summary, df_pe = dpe.analyse_mixture(scores, bins, methods,
n_boot=n_boot, boot_size=-1, n_mix=n_mix, # boot_size=sample_size,
alpha=alpha, ci_method=ci_method,
correct_bias=correct_bias, seed=seed, n_jobs=-1, # Previously correct_bias defaulted to False
true_pC=p_C, logfile=os.path.join(out_dir, f"pe_{data_label}.log"))
elapsed = time.time() - t
print(f'Elapsed time = {elapsed:.3f} seconds\n')
# Save results
df_pe.to_pickle(res_file)
with open(summary_file, "w") as sf:
json.dump(summary, sf, indent=4)
else:
print(f"Loading {data_label} analysis...", flush=True)
df_pe = pd.read_pickle(res_file)
# if os.path.isfile(res_file):
# df_pe = pd.read_pickle(res_file)
# else:
# warnings.warn(f"Missing data file: {res_file}")
# break
with open(summary_file, "r") as sf:
summary = json.load(sf)
# Plot worked examples
print(f"Plotting application with {data_label} scores...", flush=True)
with mpl.rc_context({'axes.labelsize': 11,
'xtick.labelsize': 10,
'ytick.labelsize': 10,
'legend.fontsize': 9}):
fig_ex = plt.figure(figsize=(12, 3.7))
# gs = plt.GridSpec(nrows=1, ncols=2, hspace=0.15, wspace=0.15,
# left=0.08, right=0.95, bottom=0.15, top=0.96)
gs = plt.GridSpec(nrows=1, ncols=3, hspace=0.3, wspace=0.25,
left=0.08, right=0.95, bottom=0.15, top=0.96)
# sns.set_style("ticks")
with sns.axes_style("ticks"):
ax_dists_ex = fig_ex.add_subplot(gs[0, 0])
plot_distributions(scores, bins, data_label, ax=ax_dists_ex)
with sns.axes_style("ticks"):
ax_roc_ex = fig_ex.add_subplot(gs[0, 1])
plot_roc(scores, bins, full_labels=False, ax=ax_roc_ex)
sns.despine(ax=ax_roc_ex, top=True, right=True, trim=True)
ax_roc_ex.set_xlim([0, 1.01]) # Prevent clipping of line
ax_roc_ex.set_ylim([0, 1.01]) # Prevent clipping of line
# with sns.axes_style("whitegrid"):
with sns.axes_style("ticks", {"axes.grid": True, "axes.spines.left": False, 'ytick.left': False}):
ax_ci_ex = fig_ex.add_subplot(gs[0, -1])
plot_bootstraps(df_pe, summary, # for confidence_intervals
# scores=scores, bins=bins, prepared_methods=methods,
correct_bias=correct_bias, p_C=p_C,
ax=ax_ci_ex, limits=application_xlims[data_label],
ci_method=ci_method, initial=False, legend=False,
violins=True, orient='h', average=average)
# if application_xlims[data_label]:
# ax_ci_ex.set_xlim(*application_xlims[data_label])
fig_ex.savefig(os.path.join(fig_dir, f'application_{data_label}.png'))
fig_ex.savefig(os.path.join(fig_dir, f'application_{data_label}.svg'), transparent=True)
if output_application_vary_cases[data_label]:
assert "Mix_C" in scores and "Mix_N" in scores
res_file = os.path.join(out_dir, f"pe_results_vary_cases_{data_label}.pkl")
n_steps = int(round(1 / 0.05)) + 1 # 5% steps including ends
constructed_p_Cs = np.linspace(0, 1, num=n_steps, endpoint=True)
if FRESH_DATA or not os.path.isfile(res_file):
print(f"Running mixture analysis with varying cases on {data_label} scores...", flush=True)
t = time.time() # Start timer
# Seed RNG for permutations, construct_mixture and analyse_mixture
rng = np.random.default_rng(seed)
mix_seeds = rng.integers(0, np.iinfo(np.int32).max, endpoint=False, size=n_steps * n_construction_seeds)
# Maintain the same mixture size for each constructed mixture (sampling with replacement)
size = len(scores["Mix"])
construct_results = []
p_C_bar = tqdm.tqdm(constructed_p_Cs, dynamic_ncols=True)
for p, constructed_p_C in enumerate(p_C_bar):
p_C_bar.set_description(f" p_C = {constructed_p_C:6.2f}")
for mix in tqdm.trange(n_construction_seeds, dynamic_ncols=True, desc=" Mix"):
mix_seed = mix_seeds[p * n_construction_seeds + mix]
# Construct new mixtures
constructed_scores = {"R_C": scores["R_C"], "R_N": scores["R_N"]} # Ensure summary statistics are updated
constructed_scores["Mix"] = construct_mixture(scores['Mix_C'], scores['Mix_N'],
constructed_p_C, size, seed=mix_seed)
# Consider only the point estimates n_boot=0
summary, df_pe = dpe.analyse_mixture(constructed_scores, bins, methods,
n_boot=0, boot_size=-1, n_mix=n_mix, # boot_size=sample_size,
alpha=alpha, ci_method=ci_method,
correct_bias=correct_bias, seed=mix_seed, n_jobs=-1, # Previously correct_bias defaulted to False
verbose=0, true_pC=constructed_p_C,
logfile=os.path.join(out_dir, f"pe_{data_label}_constructed_p_C_{constructed_p_C:3.2f}.log"))
# summary := {"Excess": {"p_C": ...}, ...}
df_construct = df_pe.iloc[[0]].copy()
df_construct["p_C"] = constructed_p_C
df_construct["Mix"] = mix
construct_results.append(df_construct)
df_construct = pd.concat(construct_results, ignore_index=True)
elapsed = time.time() - t
print(f'Elapsed time = {elapsed:.3f} seconds\n')
# Save results
df_construct.to_pickle(res_file)
else:
print(f"Loading {data_label} with varying cases analysis...", flush=True)
df_construct = pd.read_pickle(res_file)
# Plot results
print("Plotting analysis of p_C vs. constructed p_C with {} scores...".format(data_label), flush=True)
with sns.axes_style("whitegrid"):
fig, axes = plt.subplots(nrows=1, ncols=len(methods), sharex=True, sharey=True, figsize=(12, 4))
df_construct_tidy = df_construct.melt(var_name="Method",
id_vars=["p_C", "Mix"],
value_name="Estimate")
for m, method in enumerate(methods):
method_data = df_construct_tidy.query(f"Method == '{method}'")
ax = axes[m]
sns.lineplot(x="p_C", y="Estimate", data=method_data, err_style="bars", ax=ax)
ax.plot([0, 1], [0, 1], '--', color=(0.6, 0.6, 0.6), label='Perfect Estimator')
ax.set_aspect('equal')
ax.set_xlim([0, 1])
ax.set_ylim([0, 1])
ticks = np.linspace(0, 1, 6, endpoint=True)
ax.set_xticks(ticks)
ax.set_yticks(ticks)
if m == 0:
ax.set_ylabel(r"$\hat{p}_C$ (Estimate)")
ax.set_xlabel(r"$p_C$ (Ground Truth)")
ax.set_title(method)
fig.savefig(os.path.join(fig_dir, f'estimation_test_{data_label}.png'))
fig.savefig(os.path.join(fig_dir, f'estimation_test_{data_label}.svg'), transparent=True)
if output_characterisation[data_label]:
# if FRESH_DATA:
# exec(open("./bootstrap.py").read())
# Load bootstraps of accurarcy data
(point_estimates, boots_estimates, proportions, sample_sizes) = load_accuracy(characterisation_dir, data_label)
# Plot point estimates of p1
if bool(point_estimates):
print("Plotting characterisation of {} scores...".format(data_label), flush=True)
fig = plot_characterisation(point_estimates, proportions, sample_sizes, average=average)
fig.savefig(os.path.join(fig_dir, 'point_characterise_{}.png'.format(data_label)))
fig.savefig(os.path.join(fig_dir, 'point_characterise_{}.svg'.format(data_label)), transparent=True)
# Plot bootstrapped estimates of p1
if False: # bool(boots_estimates):
print("Plotting bootstrapped characterisation of {} scores...".format(data_label), flush=True)
fig = plot_characterisation(boots_estimates, proportions, sample_sizes, average=average)
fig.savefig(os.path.join(fig_dir, 'boots_characterise_{}.png'.format(data_label)))
if output_analysis[data_label]:
# Seed RNG for permutations, construct_mixture and analyse_mixture
rng = np.random.default_rng(seed)
# Plot violins for a set of proportions
# p_stars = [0.05, 0.25, 0.50, 0.75, 0.95]
# sizes = [100, 500, 1000, 5000, 10000]
# p_stars = [0.25, 0.50, 0.75]
# p_stars = [0.1, 0.50, 0.75]
p_stars = [0.1, 0.4, 0.8]
sizes = [500, 1000, 5000]
# n_boot = 5
selected_mix = 0
# Generate multiple mixes
point_estimates_res_file = os.path.join(out_dir, f"pe_stack_analysis_point_{data_label}.pkl")
boot_estimates_res_file = os.path.join(out_dir, f"pe_stack_analysis_{data_label}.pkl")
summaries_file = os.path.join(out_dir, f"ma_summaries_{data_label}.json")
if FRESH_DATA: # or True:
print(f"Running mixture analysis with {data_label} scores...", flush=True)
t = time.time() # Start timer
# Split the references distributions to ensure i.i.d. data for
# constructing the mixtures and estimating them.
n_R_C, n_R_N = len(scores['R_C']), len(scores['R_N'])
partition_R_C, partition_R_N = n_R_C // 2, n_R_N // 2
# inds_R_C = np.random.permutation(n_R_C)
# inds_R_N = np.random.permutation(n_R_N)
inds_R_C = rng.permutation(n_R_C)
inds_R_N = rng.permutation(n_R_N)
hold_out_scores = {'R_C': scores['R_C'][inds_R_C[:partition_R_C]],
'R_N': scores['R_N'][inds_R_N[:partition_R_N]]}
violin_scores = {'R_C': scores['R_C'][inds_R_C[partition_R_C:]],
'R_N': scores['R_N'][inds_R_N[partition_R_N:]]}
dfs_point = []
dfs_boot = []
mix_dfs = []
summaries = []
size_bar = tqdm.tqdm(sizes, dynamic_ncols=True)
for s, size in enumerate(size_bar):
size_bar.set_description(f"Size = {size:6,}")
Mixtures = {mix: {} for mix in range(n_seeds)}
mix_dfs.append([])
summaries.append([])
for mix in tqdm.trange(n_seeds, dynamic_ncols=True, desc=" Mix"): # Redundant loop
mix_dist_file = os.path.join(out_dir, f"ma_{data_label}_size_{size:05d}_mix_{mix:03d}.pkl")
summaries[s].append([])
prop_bar = tqdm.tqdm(p_stars, dynamic_ncols=True)
for p, p_star in enumerate(prop_bar):
prop_bar.set_description(f" p_C = {p_star:6.2f}")
violin_scores['Mix'] = construct_mixture(hold_out_scores['R_C'], hold_out_scores['R_N'], p_star, size, seed=rng)
Mixtures[mix][p_star] = violin_scores['Mix']
summary, df_cm = dpe.analyse_mixture(violin_scores, bins, methods,
n_boot=n_boot, boot_size=size,
n_mix=n_mix,
alpha=alpha, true_pC=p_star,
ci_method=ci_method,
correct_bias=correct_bias, # Previously correct_bias defaulted to False
seed=rng.integers(np.iinfo(np.int32).max, dtype=np.int32),
n_jobs=-1, verbose=0,
logfile=os.path.join(out_dir, f"pe_{data_label}_size_{size:05d}_p_C_{p_star:3.2f}.log"))
summaries[s][mix].append(summary)
# summary_file = os.path.join(out_dir, f"ma_{data_label}_size_{size:05d}_mix_{mix:03d}_p_C_{p_star:3.2f}.json")
# with open(summary_file, "w") as sf:
# json.dump(summary, sf)
df_point = df_cm.iloc[[0]].copy()
df_point['Size'] = size
df_point["p_C"] = p_star
df_point['Mix'] = mix
# df_point = df_point.melt(var_name='Method', id_vars=["p_C", 'Size', 'Mix'], value_name='Estimate')
dfs_point.append(df_point)
df_boots = df_cm.iloc[1:, :].copy()
if n_mix > 0:
n_bootstraps_total = n_mix * n_boot
else:
n_bootstraps_total = n_boot
df_boots['Size'] = size * np.ones(n_bootstraps_total, dtype=int)
df_boots["p_C"] = p_star * np.ones(n_bootstraps_total)
df_boots['Mix'] = mix * np.ones(n_bootstraps_total, dtype=int)
df_boots["Boot"] = list(range(n_bootstraps_total))
df_boots = df_boots.melt(var_name='Method',
id_vars=["p_C", 'Size', 'Mix', "Boot"],
value_name='Estimate')
dfs_boot.append(df_boots)
df_size = pd.DataFrame(Mixtures[mix], columns=p_stars)
mix_dfs[s].append(df_size)
df_size.to_pickle(mix_dist_file)
df_point = | pd.concat(dfs_point, ignore_index=True) | pandas.concat |
#!/usr/bin/env python
# coding: utf-8
import os
import pprint
import pandas as pd
from collections import OrderedDict
def get_parameters():
# Read Data
try:
df_363 = pd.read_excel(
io=os.path.join(os.path.dirname(__file__), 'data', 'tab_dd_363.xlsx'),
sheet_name='dd_363',
index_col=0
)
except Exception as e:
#print(e, '\n')
#print('Read table from GitHub')
df_363 = pd.read_excel(
io='https://github.com/gaemapiracicaba/norma_dd_363_11/raw/main/src/normas/data/tab_dd_363.xlsx',
sheet_name='dd_363',
index_col=0
)
# Filter only quality
df_363 = df_363.loc[(df_363['tipo_padrao'] == 'qualidade')]
# Classes
list_classes = list(set(df_363['padrao_qualidade']))
list_classes = [x for x in list_classes if pd.notnull(x)]
list_classes.sort()
return df_363, list_classes
def filter_by_classe(df_363, classe):
# Filter dataframe by Classe
df_363 = df_363.loc[(df_363['padrao_qualidade'] == classe)]
# Parâmetros
list_parametros = list(set(df_363['parametro_descricao']))
list_parametros = [x for x in list_parametros if pd.notnull(x)]
list_parametros.sort()
return df_363, list_parametros
# def filter_by_parameters(df_363, parametro):
# # Filter dataframe by Parametro
# df_363 = df_363.loc[(df_363['parametro_descricao'] == parametro)]
#
# # Check and Get Results
# if len(df_363) == 1:
# dict_363 = df_363.to_dict(orient='records')[0]
# dict_363 = OrderedDict(sorted(dict_363.items(), key=lambda x: df_363.columns.get_loc(x[0])))
# return dict_363
# else:
# return 'erro'
def filter_by_parameters(df_363, parametro, condicao=None):
# Filter dataframe by Parametro
df_363 = df_363.loc[(df_363['parametro_descricao'] == parametro)]
# Condição
array = df_363['condicao'].values
dict_condicao = dict(enumerate(array.flatten(), 1))
# Check and Get Results
if len(df_363) == 1 and len(array) == 1:
dict_363 = df_363.to_dict(orient='records')[0]
dict_363 = OrderedDict(sorted(dict_363.items(), key=lambda x: df_363.columns.get_loc(x[0])))
return dict_363
elif len(df_363) > 1 and len(array) > 1 and condicao is not None:
try:
# Filtra a Condição
#condicao = df_357['condicao'].values[condicao]
df_363 = df_363.loc[(df_363['condicao'] == dict_condicao[int(condicao)])]
dict_363 = df_363.to_dict(orient='records')[0]
dict_363 = OrderedDict(sorted(dict_363.items(), key=lambda x: df_363.columns.get_loc(x[0])))
return dict_363
except Exception as e:
#print(e)
print('A condição definida foi "{}".\nAs opções possíveis são:\n'.format(condicao))
print(*('{} - {}'.format(k, v) for k,v in dict_condicao.items()), sep='\n')
else:
print('Parâmetro "{}" tem mais de um registro.\nFaz-se necessário definir condição!\n'.format(parametro))
print(*('{} - {}'.format(k, v) for k,v in dict_condicao.items()), sep='\n')
def set_type_desconformidade(dict_363):
if pd.isnull(dict_363['valor_minimo_permitido']) & pd.notnull(dict_363['valor_maximo_permitido']):
#print('Parâmetro só tem "valor máximo". Caso o valor medido esteja acima, é amostra desconforme!')
tipo_363 = 'acima>desconforme'
elif pd.notnull(dict_363['valor_minimo_permitido']) & pd.isnull(dict_363['valor_maximo_permitido']):
#print('Parâmetro só tem "valor mínimo". Caso o valor medido esteja abaixo, é amostra desconforme!')
tipo_363 = 'abaixo>desconforme'
elif pd.notnull(dict_363['valor_minimo_permitido']) & pd.notnull(dict_363['valor_maximo_permitido']):
#print('Parâmetro tem "valor mínimo" e "valor máximo". Caso o valor medido acima ou abaixo, é amostra desconforme!')
tipo_363 = 'abaixo_acima>desconforme'
elif | pd.isnull(dict_363['valor_minimo_permitido']) | pandas.isnull |
import os
from io import BytesIO
import zipfile
import time
import warnings
import json
from pathlib import Path
import argparse
import requests
import pandas as pd
import geopandas as gpd
import fiona
DATA_DIR = Path(os.path.dirname(__file__), "../data")
RAW_DIR = Path(DATA_DIR, "raw")
PROCESSED_DIR = Path(DATA_DIR, "processed")
def load_gdf(path, epsg=27700):
gdf = gpd.read_file(path)
gdf.to_crs(epsg=epsg, inplace=True)
return gdf
def download_la_shape(lad20cd="E08000021", overwrite=False):
save_path = Path(PROCESSED_DIR, lad20cd, "la_shape", "la.shp")
if os.path.exists(save_path) and not overwrite:
return gpd.read_file(save_path)
os.makedirs(save_path.parent, exist_ok=True)
# From https://geoportal.statistics.gov.uk/datasets/ons::local-authority-districts-december-2020-uk-bgc/about
base = "https://services1.arcgis.com/ESMARspQHYMw9BZ9/arcgis/rest/services/Local_Authority_Districts_December_2020_UK_BGC/FeatureServer/0"
query = (
f"query?where=LAD20CD%20%3D%20%27{lad20cd}%27&outFields=*&outSR=27700&f=json"
)
url = f"{base}/{query}"
la = query_ons_records(url, save_path=None)
la = columns_to_lowercase(la)
la = la[["geometry", "lad20cd", "lad20nm"]]
la.to_file(save_path)
return la
def lad20cd_to_lad11cd(lad20cd, mappings=None):
if mappings is None:
mappings = download_oa_mappings()
return mappings[mappings.lad20cd == lad20cd]["lad11cd"].unique()
def lad11cd_to_lad20cd(lad11cd, mappings=None):
if mappings is None:
mappings = download_oa_mappings()
return mappings[mappings.lad11cd == lad11cd]["lad20cd"].unique()
def lad20nm_to_lad20cd(lad20nm, mappings=None):
if mappings is None:
mappings = download_oa_mappings()
return mappings[mappings.lad20nm == lad20nm]["lad20cd"].iloc[0]
def lad20cd_to_lad20nm(lad20cd, mappings=None):
if mappings is None:
mappings = download_oa_mappings()
return mappings[mappings.lad20cd == lad20cd]["lad20nm"].iloc[0]
def lad11nm_to_lad11cd(lad11nm, mappings=None):
if mappings is None:
mappings = download_oa_mappings()
return mappings[mappings.lad11nm == lad11nm]["lad11cd"].iloc[0]
def download_oa_shape(lad11cd="E08000021", lad20cd=None, overwrite=False):
if isinstance(lad11cd, str):
lad11cd = [lad11cd]
if lad20cd is None:
lad20cd = lad11cd_to_lad20cd(lad11cd[0])[0]
save_path = Path(PROCESSED_DIR, lad20cd, "oa_shape", "oa.shp")
if os.path.exists(save_path) and not overwrite:
return gpd.read_file(save_path)
os.makedirs(save_path.parent, exist_ok=True)
oa = []
for la in lad11cd:
# From https://geoportal.statistics.gov.uk/datasets/ons::output-areas-december-2011-boundaries-ew-bgc-1/about
url = f"https://ons-inspire.esriuk.com/arcgis/rest/services/Census_Boundaries/Output_Area_December_2011_Boundaries/FeatureServer/2/query?where=lad11cd%20%3D%20'{la}'&outFields=*&outSR=27700&f=json"
oa.append(query_ons_records(url, save_path=None))
oa = pd.concat(oa)
oa = columns_to_lowercase(oa)
oa = oa[["oa11cd", "geometry"]]
oa.to_file(save_path)
return oa
def download_oa_mappings(overwrite=False):
save_path = Path(RAW_DIR, "oa_mappings.csv")
if os.path.exists(save_path) and not overwrite:
return pd.read_csv(save_path, dtype=str)
# 2011
# https://geoportal.statistics.gov.uk/datasets/ons::output-area-to-lower-layer-super-output-area-to-middle-layer-super-output-area-to-local-authority-district-december-2011-lookup-in-england-and-wales/about
url = "https://opendata.arcgis.com/api/v3/datasets/6ecda95a83304543bc8feedbd1a58303_0/downloads/data?format=csv&spatialRefId=4326"
df2011 = pd.read_csv(url)
df2011.drop("ObjectId", axis=1, inplace=True)
# 2020
# https://geoportal.statistics.gov.uk/datasets/ons::output-area-to-lower-layer-super-output-area-to-middle-layer-super-output-area-to-local-authority-district-december-2020-lookup-in-england-and-wales/about
url = "https://opendata.arcgis.com/api/v3/datasets/65664b00231444edb3f6f83c9d40591f_0/downloads/data?format=csv&spatialRefId=4326"
df2020 = | pd.read_csv(url) | pandas.read_csv |
# coding: utf-8
# In[193]:
#Libraries
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import style
import plotly
import plotly.plotly as py
import plotly.tools as tls
import plotly.graph_objs as go
import time
import pandas_datareader as web
# Package and modules for importing data;
import datetime
import requests
import json as js
import csv
# In[195]:
# Calling API for Microsoft stock prices
headers = {
'X-API-KEY': 'Get API key',
}
API_KEY = 'Get API key'
url = 'https://www.alphavantage.co/query?function=TIME_SERIES_DAILY&symbol=MSFT&outputsize=full&apikey='+API_KEY
api_call=requests.get(url, headers=headers)
file1 = api_call.text
file1=js.loads(api_call.text)
# In[197]:
file1['Time Series (Daily)']['2017-07-27']
# In[198]:
# To write into csv
from datetime import datetime
csv.writer(open("data.csv", "wb"), dialect="excel")
x = file1
#f = csv.writer(open("abc.csv", ""))
# Write CSV Header, If you dont need that, remove this line
#f.writerow(["pk", "model", "codename", "name", "content_type"])
temp_data = file1['Time Series (Daily)']
with open('Microsoft_stock.csv','w') as f:
writ = csv.writer(f)
label=('Date','Open','High','Low','Close','Volume')
writ.writerow(label)
for temp_date in temp_data:
datetime_object = datetime.strptime(temp_date, '%Y-%m-%d')
fields =[datetime_object,
float(temp_data[temp_date]['1. open']),
float(temp_data[temp_date]['2. high']),
float(temp_data[temp_date]['3. low']),
float(temp_data[temp_date]['4. close']),
float(temp_data[temp_date]['5. volume'])]
# print (fields)
with open('Microsoft_stock.csv','a') as f:
writ = csv.writer(f)
writ.writerow(fields)
# In[199]:
# Changing time to Day Month Year format
temp_data = file1['Time Series (Daily)']
for temp_date in temp_data:
datetime_object = datetime.strptime(temp_date, '%Y-%m-%d')
(datetime_object)
# In[200]:
Microsoft.dropna(inplace=True)
Microsoft=pd.read_csv('Microsoft_stock.csv', parse_dates=True, index_col=0 )
print(Microsoft.head(5))
# In[201]:
Microsoft.index.values
# In[202]:
#Cleaning the index values. Changing time to Day Month Year format
Address_M='Microsoft_stock.csv'
Microsoft=pd.read_csv(Address_M)
Microsoft['Date'] = pd.to_datetime(Microsoft['Date']).apply(lambda x: x.strftime('%Y-%m-%d')if not pd.isnull(x) else '')
# In[203]:
Microsoft[['High','Low']].plot()
plt.show()
print()
# In[204]:
a=Microsoft['Date']
b=Microsoft['High']
trace= go.Scatter(x=a,y=b)
data=[trace]
py.iplot(data, filename='Microsoft')
# In[205]:
# Calling API for Apple's stock prices
headers = {
'X-API-KEY': 'Get api key ',
}
API_KEY = 'Get api key'
url = 'https://www.alphavantage.co/query?function=TIME_SERIES_DAILY&symbol=AAPL&outputsize=ful&apikey='+API_KEY
api_call=requests.get(url, headers=headers)
file2 = api_call.text
file2=js.loads(api_call.text)
# In[206]:
# To write into csv
csv.writer(open("data.csv", "wb"), dialect="excel")
x = file2
temp_data = file2['Time Series (Daily)']
with open('Apple_stock.csv','w') as f:
writ = csv.writer(f)
label=('Date','Open','High','Low','Close','Volume')
writ.writerow(label)
for temp_date in temp_data:
datetime_object = datetime.strptime(temp_date, '%Y-%m-%d')
fields =[datetime_object,
float(temp_data[temp_date]['1. open']),
float(temp_data[temp_date]['2. high']),
float(temp_data[temp_date]['3. low']),
float(temp_data[temp_date]['4. close']),
float(temp_data[temp_date]['5. volume'])]
# print (fields)
with open('Apple_stock.csv','a') as f:
writ = csv.writer(f)
writ.writerow(fields)
# In[207]:
# Changing time to Day Month Year format
temp_data = file2['Time Series (Daily)']
for temp_date in temp_data:
datetime_object = datetime.strptime(temp_date, '%Y-%m-%d')
(datetime_object)
# In[208]:
Apple.dropna(inplace=True)
Apple=pd.read_csv('Apple_stock.csv', parse_dates=True, index_col=0 )
# In[209]:
#Cleaning the index values. Changing time to Day Month Year format
Address_A='Apple_stock.csv'
Apple=pd.read_csv(Address_A)
Apple['Date'] = pd.to_datetime(Apple['Date']).apply(lambda x: x.strftime('%Y-%m-%d')if not pd.isnull(x) else '')
# In[210]:
a=Apple['Date']
b=Apple['High']
trace= go.Scatter(x=a,y=b)
data=[trace]
py.iplot(data, filename='Apple')
# In[211]:
# Calling API for Facebook stock prices
headers = {
'X-API-KEY': 'Get API key',
}
API_KEY = 'Get API key'
url = 'https://www.alphavantage.co/query?function=TIME_SERIES_DAILY&symbol=FB&outputsize=full&apikey='+API_KEY
api_call=requests.get(url, headers=headers)
file3 = api_call.text
file3=js.loads(api_call.text)
# In[212]:
# To write into csv
csv.writer(open("data.csv", "wb"), dialect="excel")
x = file3
temp_data = file3['Time Series (Daily)']
with open('Facebook_stock.csv','w') as f:
writ = csv.writer(f)
label=('Date','Open','High','Low','Close','Volume')
writ.writerow(label)
for temp_date in temp_data:
datetime_object = datetime.strptime(temp_date, '%Y-%m-%d')
fields =[datetime_object,
float(temp_data[temp_date]['1. open']),
float(temp_data[temp_date]['2. high']),
float(temp_data[temp_date]['3. low']),
float(temp_data[temp_date]['4. close']),
float(temp_data[temp_date]['5. volume'])]
# print (fields)
with open('Facebook_stock.csv','a') as f:
writ = csv.writer(f)
writ.writerow(fields)
# In[213]:
# Changing time to Day Month Year format
temp_data = file3['Time Series (Daily)']
for temp_date in temp_data:
datetime_object = datetime.strptime(temp_date, '%Y-%m-%d')
(datetime_object)
# In[214]:
Facebook = pd.read_csv('Facebook_stock.csv', parse_dates=True, index_col=0 )
Facebook.dropna(inplace=True)
# In[215]:
#Cleaning the index values. Changing time to Day Month Year format
Address_F='Facebook_stock.csv'
Facebook=pd.read_csv(Address_F)
Facebook['Date'] = pd.to_datetime(Facebook['Date']).apply(lambda x: x.strftime('%Y-%m-%d')if not pd.isnull(x) else '')
# In[216]:
a=Facebook['Date']
b=Facebook['High']
trace= go.Scatter(x=a,y=b)
data=[trace]
py.iplot(data, filename='Facebook')
# In[217]:
# Calling API for Google stock prices
headers = {
'X-API-KEY': 'Get API key',
}
API_KEY = 'Get API key'
url = 'https://www.alphavantage.co/query?function=TIME_SERIES_DAILY&symbol=GOOG&outputsize=full&apikey='+API_KEY
api_call=requests.get(url, headers=headers)
file4 = api_call.text
file4=js.loads(api_call.text)
a=file4['Time Series (Daily)']
# In[218]:
x = file4
temp_data = file4['Time Series (Daily)']
with open('Google_stock.csv','w') as f:
writ = csv.writer(f)
label=('Date','Open','High','Low','Close','Volume')
writ.writerow(label)
for temp_date in temp_data:
datetime_object = datetime.strptime(temp_date, '%Y-%m-%d')
fields =[datetime_object,
float(temp_data[temp_date]['1. open']),
float(temp_data[temp_date]['2. high']),
float(temp_data[temp_date]['3. low']),
float(temp_data[temp_date]['4. close']),
float(temp_data[temp_date]['5. volume'])]
# print (fields)
with open('Google_stock.csv','a') as f:
writ = csv.writer(f)
writ.writerow(fields)
# In[219]:
# Changing time to Day Month Year format
temp_data = file4['Time Series (Daily)']
for temp_date in temp_data:
datetime_object = datetime.strptime(temp_date, '%Y-%m-%d')
(datetime_object)
# In[220]:
Google = pd.read_csv('Google_stock.csv', parse_dates=True, index_col=0 )
Google.dropna(inplace=True)
# In[221]:
#Cleaning the index values. Changing time to Day Month Year format
Address_G='Google_stock.csv'
Google=pd.read_csv(Address_G)
Google['Date'] = pd.to_datetime(Google['Date']).apply(lambda x: x.strftime('%Y-%m-%d')if not pd.isnull(x) else '')
# In[222]:
a=Google['Date']
b=Google['High']
trace= go.Scatter(x=a,y=b)
data=[trace]
py.iplot(data, filename='Google')
# In[224]:
# Calling API for Disney stock prices
headers = {
'X-API-KEY': 'Get API key',
}
API_KEY = 'Get API key'
url = 'https://www.alphavantage.co/query?function=TIME_SERIES_DAILY&symbol=DIS&outputsize=full&apikey='+API_KEY
api_call=requests.get(url, headers=headers)
file5 = api_call.text
file5=js.loads(api_call.text)
# In[225]:
# To write into csv
csv.writer(open("data.csv", "wb"), dialect="excel")
x = file5
temp_data = file5['Time Series (Daily)']
with open('Disney_stock.csv','w') as f:
writ = csv.writer(f)
label=('Date','Open','High','Low','Close','Volume')
writ.writerow(label)
for temp_date in temp_data:
datetime_object = datetime.strptime(temp_date, '%Y-%m-%d')
fields =[datetime_object,
float(temp_data[temp_date]['1. open']),
float(temp_data[temp_date]['2. high']),
float(temp_data[temp_date]['3. low']),
float(temp_data[temp_date]['4. close']),
float(temp_data[temp_date]['5. volume'])]
# print (fields)
with open('Disney_stock.csv','a') as f:
writ = csv.writer(f)
writ.writerow(fields)
# In[226]:
# Changing time to Day Month Year format
temp_data = file5['Time Series (Daily)']
for temp_date in temp_data:
datetime_object = datetime.strptime(temp_date, '%Y-%m-%d')
(datetime_object)
# In[227]:
Disney = pd.read_csv('Disney_stock.csv', parse_dates=True, index_col=0 )
Disney.dropna(inplace=True)
# In[228]:
#Cleaning the index values. Changing time to Day Month Year format
Address_D='Disney_stock.csv'
Disney=pd.read_csv(Address_D)
Disney['Date'] = pd.to_datetime(Disney['Date']).apply(lambda x: x.strftime('%Y-%m-%d')if not pd.isnull(x) else '')
# In[230]:
a=Disney['Date']
b=Disney['High']
trace= go.Scatter(x=a,y=b)
data=[trace]
py.iplot(data, filename='Disney')
# In[231]:
# Calling API for Netflix stock prices
headers = {
'X-API-KEY': 'Get API key',
}
API_KEY = 'Get API key'
url = 'https://www.alphavantage.co/query?function=TIME_SERIES_DAILY&symbol=NFLX&outputsize=full&apikey='+API_KEY
api_call=requests.get(url, headers=headers)
file6 = api_call.text
file6=js.loads(api_call.text)
# In[232]:
# To write into csv
csv.writer(open("data.csv", "wb"), dialect="excel")
x = file6
temp_data = file6['Time Series (Daily)']
with open('Netflix_stock.csv','w') as f:
writ = csv.writer(f)
label=('Date','Open','High','Low','Close','Volume')
writ.writerow(label)
for temp_date in temp_data:
datetime_object = datetime.strptime(temp_date, '%Y-%m-%d')
fields =[datetime_object,
float(temp_data[temp_date]['1. open']),
float(temp_data[temp_date]['2. high']),
float(temp_data[temp_date]['3. low']),
float(temp_data[temp_date]['4. close']),
float(temp_data[temp_date]['5. volume'])]
# print (fields)
with open('Netflix_stock.csv','a') as f:
writ = csv.writer(f)
writ.writerow(fields)
# In[233]:
# Changing time to Day Month Year format
temp_data = file6['Time Series (Daily)']
for temp_date in temp_data:
datetime_object = datetime.strptime(temp_date, '%Y-%m-%d')
(datetime_object)
# In[234]:
Netflix = pd.read_csv('Netflix_stock.csv', parse_dates=True, index_col=0 )
Netflix.dropna(inplace=True)
# In[235]:
#Cleaning the index values. Changing time to Day Month Year format
Address_N='Netflix_stock.csv'
Netflix=pd.read_csv(Address_N)
Netflix['Date'] = pd.to_datetime(Netflix['Date']).apply(lambda x: x.strftime('%Y-%m-%d')if not pd.isnull(x) else '')
# In[236]:
a=Netflix['Date']
b=Netflix['High']
trace= go.Scatter(x=a,y=b)
data=[trace]
py.iplot(data, filename='Netflix')
# In[237]:
# Calling API for Amazon stock prices
headers = {
'X-API-KEY': 'Get API key',
}
API_KEY = 'Get API key'
url = 'https://www.alphavantage.co/query?function=TIME_SERIES_DAILY&symbol=AMZN&outputsize=full&apikey='+API_KEY
api_call=requests.get(url, headers=headers)
file7 = api_call.text
file7 =js.loads(api_call.text)
# In[238]:
# To write into csv
csv.writer(open("data.csv", "wb"), dialect="excel")
x = file7
temp_data = file7['Time Series (Daily)']
with open('Amazon_stock.csv','w') as f:
writ = csv.writer(f)
label=('Date','Open','High','Low','Close','Volume')
writ.writerow(label)
for temp_date in temp_data:
datetime_object = datetime.strptime(temp_date, '%Y-%m-%d')
fields =[datetime_object,
float(temp_data[temp_date]['1. open']),
float(temp_data[temp_date]['2. high']),
float(temp_data[temp_date]['3. low']),
float(temp_data[temp_date]['4. close']),
float(temp_data[temp_date]['5. volume'])]
# print (fields)
with open('Amazon_stock.csv','a') as f:
writ = csv.writer(f)
writ.writerow(fields)
# In[239]:
# Changing time to Day Month Year format
temp_data = file7['Time Series (Daily)']
for temp_date in temp_data:
datetime_object = datetime.strptime(temp_date, '%Y-%m-%d')
(datetime_object)
# In[240]:
Amazon = pd.read_csv('Amazon_stock.csv', parse_dates=True, index_col=0 )
Amazon.dropna(inplace=True)
# In[241]:
#Cleaning the index values. Changing time to Day Month Year format
Address_A='Amazon_stock.csv'
Amazon=pd.read_csv(Address_A)
Amazon['Date'] = | pd.to_datetime(Amazon['Date']) | pandas.to_datetime |
#Rule 1 - Perfect excel format no extra spaces. - No leading and trailing spaces in the column names. No leading and trailing zeros. Please make sure you delete any unwanted spaces in TEXT .
def perfect_excel_format(fle, fleName, target):
import re
import os
import sys
import json
import openpyxl
import pandas as pd
from pandas import ExcelWriter
from pandas import ExcelFile
from dateutil.parser import parse
import validators
file_name="Perfect_Excel_format.py"
configFile = 'https://s3.us-east.cloud-object-storage.appdomain.cloud/sharad-saurav-bucket/Configuration.xlsx'
rule="Perfect_Excel_format"
config=pd.read_excel(configFile)
newdf=config[config['RULE']==rule]
to_check=''
for index,row in newdf.iterrows():
to_check=row['TO_CHECK']
to_check=json.loads(to_check)
files_to_apply=to_check['files_to_apply']
columns_to_match=to_check['columns_to_match']
if(files_to_apply=='ALL' or fleName in files_to_apply):
data=[]
regex = re.compile('[@!#$%^&*()<>?/\|}{~:]')
cols = {}
df = pd.read_excel(fle)
df.index = range(2,df.shape[0]+2)
columns=df.columns
for col in columns:
if(col.startswith(' ')):
entry=[index,fleName,col+' has leading spaces']
print('Column name '+col+' in the file '+fleName+' has leading spaces')
data.append(entry)
if(col.endswith(' ')):
entry=[index,fleName,col+' has trailing spaces']
print('Column name '+col+' in the file '+fleName+' has trailing spaces')
data.append(entry)
if(regex.search(col) != None):
entry=[index,fleName,col+' has special characters']
print('Column name '+col+' in the file '+fleName+' has special characters')
data.append(entry)
if(col.startswith('0')):
entry=[index,fleName,col+' has leading zeros']
print('Column name '+col+' in the file '+fleName+' has leading zeros')
data.append(entry)
if(col.endswith('0')):
entry=[index,fleName,col+' has trailing zeros']
print('Column name '+col+' in the file '+fleName+' has trailing zeros')
data.append(entry)
#Rule - Check if the columns satisfies the data structure of all the data files
for key,value in cols.items():
cols_value=columns_to_match[key]
if(sorted(cols_value)!=sorted(value.to_list())):
entry=[index,fleName,key+' does not match the structure of the data file']
print('The columns of the '+key+' does not match the structure of the data file')
df1 = pd.DataFrame(data, columns = ['ROW_NO', 'FILE_NAME', 'COMMENTS'])
if( | ExcelFile(target) | pandas.ExcelFile |
import os
from io import StringIO
from pathlib import Path
import pandas as pd
import pandas._testing as pt
import pytest
from pyplotutil.datautil import Data, DataSet
csv_dir_path = os.path.join(os.path.dirname(__file__), "data")
test_data = """\
a,b,c,d,e
1,0.01,10.0,3.5,100
2,0.02,20.0,7.5,200
3,0.03,30.0,9.5,300
4,0.04,40.0,11.5,400
"""
test_dataset = """\
tag,a,b,c,d,e
tag01,0,1,2,3,4
tag01,5,6,7,8,9
tag01,10,11,12,13,14
tag01,15,16,17,18,19
tag01,20,21,22,23,24
tag01,25,26,27,28,29
tag02,10,11,12,13,14
tag02,15,16,17,18,19
tag02,110,111,112,113,114
tag02,115,116,117,118,119
tag02,120,121,122,123,124
tag02,125,126,127,128,129
tag03,20,21,22,23,24
tag03,25,26,27,28,29
tag03,210,211,212,213,214
tag03,215,216,217,218,219
tag03,220,221,222,223,224
tag03,225,226,227,228,229
"""
@pytest.mark.parametrize("cls", [str, Path])
def test_data_init_path(cls) -> None:
csv_path = os.path.join(csv_dir_path, "test.csv")
path = cls(csv_path)
expected_df = pd.read_csv(csv_path)
data = Data(path)
assert data.datapath == Path(csv_path)
pt.assert_frame_equal(data.dataframe, expected_df)
def test_data_init_StringIO() -> None:
csv_path = os.path.join(csv_dir_path, "test.csv")
expected_df = pd.read_csv(csv_path)
data = Data(StringIO(test_data))
assert data.datapath is None
pt.assert_frame_equal(data.dataframe, expected_df)
def test_data_init_DataFrame() -> None:
csv_path = os.path.join(csv_dir_path, "test.csv")
expected_df = pd.read_csv(csv_path)
if isinstance(expected_df, pd.DataFrame):
data = Data(expected_df)
assert data.datapath is None
pt.assert_frame_equal(data.dataframe, expected_df)
else:
pytest.skip(f"Expected DataFram type: {type(expected_df)}")
def test_data_init_kwds() -> None:
csv_path = os.path.join(csv_dir_path, "test.csv")
expected_df = pd.read_csv(csv_path, usecols=[0, 1])
data = Data(csv_path, usecols=[0, 1])
assert len(data.dataframe.columns) == 2
pt.assert_frame_equal(data.dataframe, expected_df)
def test_data_getitem() -> None:
df = pd.DataFrame([[0, 1, 2], [3, 4, 5], [6, 7, 8]], columns=["a", "b", "c"])
data = Data(df)
pt.assert_series_equal(data["a"], df.a) # type: ignore
pt.assert_series_equal(data["b"], df.b) # type: ignore
pt.assert_series_equal(data["c"], df.c) # type: ignore
def test_data_getitem_no_header() -> None:
df = pd.DataFrame([[0, 1, 2], [3, 4, 5], [6, 7, 8]])
data = Data(df)
| pt.assert_series_equal(data[0], df[0]) | pandas._testing.assert_series_equal |
##
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
'''
Run test:
>> pytest -q python/test/test_aggregate.py
'''
import numpy as np
import pyarrow as pa
import pandas as pd
import pycylon as cn
from pycylon import CylonContext
def test_aggregate():
ctx: CylonContext = CylonContext(config=None, distributed=False)
columns = 2
data1 = np.array([0, 1, 2, 3, 4, 5], dtype=np.int32)
data2 = np.array([10, 11, 12, 13, 14, 15], dtype=np.float32)
nd_array_list = [data1, data2]
ar_array: pa.array = pa.array(nd_array_list)
ar_table: pa.Table = pa.Table.from_arrays(nd_array_list, names=['x0', 'x1'])
ar1 = pa.array([1, 2, 3, 4])
ar2 = pa.array(['a', 'b', 'c', 'd'])
ar_tb2: pa.Table = pa.Table.from_arrays([ar1, ar2], names=['col1', 'col2'])
assert isinstance(ar_tb2, pa.Table)
col_names = ['col1', 'col2']
cn_tb1 = cn.Table.from_numpy(ctx, col_names, nd_array_list)
assert cn_tb1.row_count == data1.shape[0] and cn_tb1.column_count == len(nd_array_list)
data_list = [[1, 2, 3, 4], ['p', 'q', 'r', 's']]
cn_tb2 = cn.Table.from_list(ctx, col_names, data_list)
assert cn_tb2.row_count == len(data_list[0]) and cn_tb2.column_count == len(data_list)
dict1 = {'col1': [1, 2], 'col2': ['a', 'b']}
ar_tb3: pa.Table = pa.Table.from_pydict(dict1)
cn_tb3: cn.Table = cn.Table.from_pydict(ctx, dict1)
assert cn_tb3.row_count == len(dict1['col1']) and cn_tb3.column_count == len(dict1)
pdf = | pd.DataFrame(dict1) | pandas.DataFrame |
import glob
import datetime
import os
import pandas as pd
import numpy as np
import re
from tkinter import filedialog
from tkinter import *
from tkinter import ttk
from tkinter import messagebox
# pyinstaller --onefile --noconsole --icon GetCSV.ico Arca_GetCSVConverter_2-0-0.py
#for MMW 18-6 spreadsheets
probCol = False
#infer desktop
desktopPath = os.path.expanduser("~/Desktop/")
filelist=['']
probRecords = []
probColls = []
#filename = r'arms_modsonly_May9.csv'
col_names = ["IslandoraContentModel","BCRDHSimpleObjectPID",'imageLink','filename','directory','childKey','title', 'alternativeTitle', 'creator1', 'creator2','creator3']
col_names += ['corporateCreator1','corporateCreator2','contributor1','contributor2','corporateContributor1','publisher_original','publisher_location']
col_names += ['dateCreated','description','extent','topicalSubject1','topicalSubject2','topicalSubject3','topicalSubject4','topicalSubject5']
col_names += ['geographicSubject1','coordinates','personalSubject1','personalSubject2','corporateSubject1','corporateSubject2', 'dateIssued_start']
col_names += ['dateIssued_end','dateRange', 'frequency','genre','genreAuthority','type','internetMediaType','language1','language2','notes']
col_names += ['accessIdentifier','localIdentifier','ISBN','classification','URI']
col_names += ['source','rights','creativeCommons_URI','rightsStatement_URI','relatedItem_title','relatedItem_PID','recordCreationDate','recordOrigin']
pattern1 = r'^[A-Z][a-z]{2}-\d{2}$' #%b-%Y date (e.g. Jun-17)
pattern2 = r'^\d{2}-\d{2}-[1-2]\d{3}$'
contentModels = {
r"info:fedora/islandora:sp_large_image_cmodel": "Large Image",
r"info:fedora/islandora:sp_basic_image": "Basic Image",
r"info:fedora/islandora:bookCModel": "Book",
r"info:fedora/islandora:newspaperIssueCModel":"Newspaper - issue",
r"info:fedora/islandora:newspaperPageCModel":"Newspaper",
r"info:fedora/islandora:sp_PDF":"PDF",
r"info:fedora/islandora:sp-audioCModel":"Audio",
r"info:fedora/islandora:sp_videoCModel":"Video",
r"info:fedora/islandora:sp_compoundCModel":"Compound",
r"info:fedora/ir:citationCModel":"Citation"
}
def browse_button():
# Allow user to select a directory and store it in global var
# called folder_path1
lbl1['text'] = ""
csvname = filedialog.askopenfilename(initialdir = desktopPath,title = "Select file",filetypes = (("csv files","*.csv"),("all files","*.*")))
if ".csv" not in csvname:
lbl1['text'] = "**Please choose a file with a .csv extension!"
else:
filelist[0] = csvname
lbl1['text'] = csvname
def splitMultiHdgs(hdgs):
if pd.notna(hdgs):
hdgs = hdgs.replace("\\,",";")
hdgs = hdgs.split(",")
newhdgs = []
for hdg in hdgs:
newhdg = hdg.replace(";", ",")
newhdgs.append(newhdg)
return newhdgs
else:
return None
def getMultiVals(item, string, df, pd):
hdgs = df.filter(like=string).columns
for hdg in hdgs:
vals = df.at[item.Index,hdg]
if pd.notna(vals):
vals = splitMultiHdgs(vals)
return vals
return None
def convert_date(dt_str, letter_date):
"""
Converts an invalid formatted date into a proper date for ARCA Mods
Correct format: Y-m-d
Fixes:
Incorrect format: m-d-Y
Incorrect format (letter date): m-d e.g. Jun-17
:param dt_str: the date string
:param letter_date: whether the string is a letter date. Letter date is something like Jun-17
:return: the correctly formatted date
"""
if letter_date:
rev_date = datetime.datetime.strptime(dt_str, '%b-%y').strftime('%Y-%m') # convert date to yymm string format
rev_date_pts = rev_date.split("-")
year_num = int(rev_date_pts[0])
if year_num > 1999:
year_num = year_num - 100
year_str = str(year_num)
rev_date_pts[0] = year_str
revised = "-".join(rev_date_pts)
else:
revised = datetime.datetime.strptime(dt_str, '%d-%m-%Y').strftime(
'%Y-%m-%d') # convert date to YY-mm string format
return revised
def sortValues(lst):
for item in lst:
if pd.isna(item):
lst.remove(item)
lst = set(lst)
lst = list(lst)
return lst
def dropNullCols(df):
nullcols = []
for col in df.columns:
notNull = df[col].notna().sum()
if notNull < 1:
nullcols.append(col)
return nullcols
def convert():
probCol = False
df2 = pd.DataFrame(columns = col_names)
df2.append(pd.Series(), ignore_index=True)
f=filelist[0]
# if not os.path.exists(savePath): #if folder does not exist
# os.makedirs(savePath)
try:
df = pd.read_csv(f,dtype = "string", encoding = 'utf_7')
except UnicodeDecodeError:
df = pd.read_csv(f,dtype = "string", encoding = 'utf_8')
nullcols = dropNullCols(df)
df.drop(nullcols, axis=1, inplace=True)
i = 1
for item in df.itertuples():
#PID
df2.at[i, 'BCRDHSimpleObjectPID'] = item.PID
if 'mods_subject_name_personal_namePart_ms' in df.columns:
pNames = item.mods_subject_name_personal_namePart_ms
#ContentModel
cModel = item.RELS_EXT_hasModel_uri_s
df2.at[i,"IslandoraContentModel"] =contentModels[cModel]
#Local Identifier
if 'mods_identifier_local_ms' in df.columns:
localID = item.mods_identifier_local_ms
if pd.notna(localID) and localID != "None":
df2.at[i,'localIdentifier'] = localID
#Access Identifer
if 'mods_identifier_access_ms' in df.columns:
accessID = item.mods_identifier_access_ms
if pd.notna(accessID):
df2.at[i,'accessIdentifier'] = accessID
#Image Link
# Link to Image
PIDparts = item.PID.split(":")
repo = PIDparts[0] #repository code
num = PIDparts[1] #auto-generated accession number
imageLink = "https://bcrdh.ca/islandora/object/" + repo + "%3A" + num
df2.at[i, 'imageLink'] = imageLink
#Title
if 'mods_titleInfo_title_ms' in df.columns:
title = item.mods_titleInfo_title_ms
if pd.notna(title):
df2.at[i,'title'] = title.replace("\,",",")
#Alternative Title
if "mods_titleInfo_alternative_title_ms" in df.columns:
altTitle = item.mods_titleInfo_alternative_title_ms
if pd.notna(altTitle):
df2.at[i, 'alternativeTitle'] = altTitle.replace("\,",",")
#Date
if "mods_originInfo_dateIssued_ms" in df.columns:
dt = item.mods_originInfo_dateIssued_ms
if pd.notna(dt):
if (re.match(pattern1, dt)): #letter date, i.e. Jun-17
dt = convert_date(dt, True)
elif (re.match(pattern2, dt)): #reverse date
dt = convert_date(dt, False)
df2.at[i,'dateCreated'] = dt
#Date Issued Start
if 'mods_originInfo_encoding_w3cdtf_keyDate_yes_point_start_dateIssued_ms' in df.columns:
startDt = item.mods_originInfo_encoding_w3cdtf_keyDate_yes_point_start_dateIssued_ms
if pd.notna(startDt):
df2.at[i,'dateIssued_start'] = startDt
#Date Issued End
if 'mods_originInfo_encoding_w3cdtf_keyDate_yes_point_end_dateIssued_ms' in df.columns:
endDt = item.mods_originInfo_encoding_w3cdtf_keyDate_yes_point_end_dateIssued_ms
if pd.notna(endDt):
df2.at[i,'dateIssued_end'] = startDt
#Publisher
if 'mods_originInfo_publisher_ms' in df.columns:
pub = item.mods_originInfo_publisher_ms
if pd.notna(pub):
df2.at[i, 'publisher_original'] = pub
#Publisher Location
if 'mods_originInfo_place_placeTerm_text_ms' in df.columns:
place = item.mods_originInfo_place_placeTerm_text_ms
if pd.notna(place):
df2.at[i,'publisher_location'] = place
#Frequency (serials only)
if 'mods_originInfo_frequency_ms' in df.columns:
freq = item.mods_originInfo_frequency_ms
if | pd.notna(freq) | pandas.notna |
from pypowerbifix.client import PowerBIClient
from pypowerbifix.activity_logs import ActivityLogs
from datetime import datetime
import pandas as pd
from Credentials import client_id, username, password
# create your powerbi api client
client = PowerBIClient.get_client_with_username_password(client_id=client_id, username=username, password=password)
# When testing, only logs from December 15th, 2019 and later were available. This may change in the future though.
dt = datetime(2019, 12, 16)
logs = client.activity_logs.get_activity_logs(dt)
print(logs)
pandas_installed = True
try:
import pandas as pd
| pd.set_option('display.max_columns', 500) | pandas.set_option |
import itertools
import click
import click_log
from datetime import datetime
from dotenv import load_dotenv
import os
import pandas as pd
from pathlib import Path
from .core import estimate_tray_centroids, extract_tray_carry_events_from_inferred, infer_tray_interactions, fetch_cuwb_data, fetch_cuwb_data_from_datapoints, fetch_motion_features, generate_human_activity_groundtruth, generate_human_activity_model, generate_tray_carry_groundtruth, generate_tray_carry_model, infer_human_activity, infer_tray_carry, pose_data_with_body_centroid
from .utils.io import load_csv, read_generic_pkl, write_cuwb_data_pkl, write_datafile_to_csv, write_generic_pkl
from .utils.log import logger
from .uwb_predict_tray_centroids import validate_tray_centroids_dataframe
now = datetime.now().strftime("%Y-%m-%dT%H:%M:%S")
valid_date_formats = list(itertools.chain.from_iterable(
map(lambda d: ["{}".format(d), "{}%z".format(d), "{} %Z".format(d)], ['%Y-%m-%dT%H:%M:%S', '%Y-%m-%d %H:%M:%S'])))
_cli_options_env_start_end = [
click.option("--environment", type=str, required=True),
click.option("--start", type=click.DateTime(formats=valid_date_formats), required=True,
help="Filter is passed to remote query or used to filter --cuwb-data (if --cuwb-data is provided)"),
click.option("--end", type=click.DateTime(formats=valid_date_formats), required=True,
help="Filter is passed to remote query or used to filter --cuwb-data (if --cuwb-data is provided)")
]
_cli_options_uwb_data = [
click.option("--cuwb-data", type=click.Path(exists=True), required=False,
help="Pickle formatted UWB data (create with 'fetch-cuwb-data')")
]
_cli_options_uwb_motion_data = [
click.option("--motion-feature-data", type=click.Path(exists=True), required=False,
help="Pickle formatted UWB motion data object (create with 'fetch-motion-features')")
]
def add_options(options):
def _add_options(func):
for option in reversed(options):
func = option(func)
return func
return _add_options
def _load_model_and_scaler(model_path, feature_scaler_path=None):
model = read_generic_pkl(model_path)
feature_scaler = None
if feature_scaler_path is not None:
feature_scaler = read_generic_pkl(feature_scaler_path)
return model, feature_scaler
def _load_tray_positions_from_csv(tray_positions_csv):
df_tray_centroids = None
if tray_positions_csv is not None:
try:
df_tray_centroids = load_csv(tray_positions_csv)
valid, msg = validate_tray_centroids_dataframe(df_tray_centroids)
if not valid:
logger.error(msg)
return None
except Exception as err:
logger.error(err)
return None
return df_tray_centroids
def _infer_tray_carry(df_tray_features, model, scaler=None):
inferred = infer_tray_carry(model=model, scaler=scaler, df_tray_features=df_tray_features)
df_carry_events = extract_tray_carry_events_from_inferred(inferred)
if df_carry_events is None or len(df_carry_events) == 0:
logger.warn("No carry events inferred")
return None
return df_carry_events
def _infer_human_activity(df_person_features, model, scaler=None):
df_person_features_with_nan = df_person_features[df_person_features.isna().any(axis=1)]
devices_without_acceleration = list(pd.unique(df_person_features_with_nan['device_id']))
if len(devices_without_acceleration) > 0:
logger.info("Devices dropped due to missing acceleration data: {}".format(devices_without_acceleration))
df_person_features.dropna(inplace=True)
return infer_human_activity(model=model, scaler=scaler, df_person_features=df_person_features)
@click.command(name="fetch-cuwb-data", help="Generate a pickled dataframe of CUWB data")
@add_options(_cli_options_env_start_end)
@click.option("--entity-type", type=click.Choice(['tray', 'person', 'all'],
case_sensitive=False), default='all', help="CUWB entity type")
@click.option("--data-type", type=click.Choice(['position', 'accelerometer', 'gyroscope', 'magnetometer', 'all'],
case_sensitive=False), default='all', help="Data to return")
@click.option("--data-source", type=click.Choice(['datapoints', 'imu_tables'],
case_sensitive=False), default='imu_tables', help="Source data resides (datapoints was retired 03/23/2021)")
@click.option("--output", type=click.Path(), default="%s/output" % (os.getcwd()),
help="output folder for CUWB data, data stored in <<output>>/uwb_data/<<file>>.pkl")
def cli_fetch_cuwb_data(environment, start, end, entity_type, data_type, data_source, output):
uwb_output = "{}/uwb_data".format(output)
Path(uwb_output).mkdir(parents=True, exist_ok=True)
if data_source == 'datapoints':
df = fetch_cuwb_data_from_datapoints(environment,
start,
end,
entity_type=entity_type,
data_type=data_type)
else:
df = fetch_cuwb_data(
environment,
start,
end,
entity_type=entity_type,
data_type=data_type
)
if df is None or len(df) == 0:
logger.warning("No CUWB data found")
return
write_cuwb_data_pkl(
df,
filename_prefix='uwb',
environment_name=environment,
start_time=start,
end_time=end,
directory=uwb_output
)
@click.command(name="fetch-motion-features",
help="Generate a pickled dataframe of UWB data converted into motion features")
@add_options(_cli_options_env_start_end)
@add_options(_cli_options_uwb_data)
@click.option("--output", type=click.Path(), default="%s/output" % (os.getcwd()),
help="output folder for cuwb tray features data, features stored in <<output>>/feature_data/<<file>>.pkl")
def cli_fetch_motion_features(environment, start, end, cuwb_data, output):
feature_data_output = "{}/feature_data".format(output)
Path(feature_data_output).mkdir(parents=True, exist_ok=True)
df_uwb_data = None
if cuwb_data is not None:
df_uwb_data = read_generic_pkl(cuwb_data)
df_uwb_data = df_uwb_data.loc[(df_uwb_data.index >= start) & (df_uwb_data.index <= end)]
df_features = fetch_motion_features(
environment,
start,
end,
include_meta_fields=True,
df_uwb_data=df_uwb_data
)
if df_features is None or len(df_features) == 0:
logger.warning("No CUWB data found")
return
write_cuwb_data_pkl(
df_features,
filename_prefix='motion-features',
environment_name=environment,
start_time=start,
end_time=end,
directory=feature_data_output
)
@click.command(name="generate-tray-carry-groundtruth",
help="Generate a pickled dataframe of trainable groundtruth features")
@click.option("--groundtruth-csv", type=click.Path(exists=True),
help="CSV formatted groundtruth data", required=True)
@click.option("--output", type=click.Path(), default="%s/output" % (os.getcwd()),
help="output folder, output includes data features pickle (<<output>>/groundtruth/<<now>>_tray_carry_groundtruth_features.pkl)")
def cli_generate_tray_carry_groundtruth(groundtruth_csv, output):
groundtruth_features_output = "{}/groundtruth".format(output)
Path(groundtruth_features_output).mkdir(parents=True, exist_ok=True)
df_groundtruth_features = generate_tray_carry_groundtruth(groundtruth_csv)
if df_groundtruth_features is None:
logger.warn("Unexpected result, unable to store groundtruth features")
else:
write_generic_pkl(
df_groundtruth_features,
"{}_tray_carry_groundtruth_features".format(now),
groundtruth_features_output)
@click.command(name="generate-human-activity-groundtruth",
help="Generate a pickled dataframe of trainable groundtruth features")
@click.option("--groundtruth-csv", type=click.Path(exists=True),
help="CSV formatted groundtruth data", required=True)
@click.option("--output", type=click.Path(), default="%s/output" % (os.getcwd()),
help="output folder, output includes data features pickle (<<output>>/groundtruth/<<now>>_human_activity_groundtruth_features.pkl)")
def cli_generate_human_activity_groundtruth(groundtruth_csv, output):
groundtruth_features_output = "{}/groundtruth".format(output)
Path(groundtruth_features_output).mkdir(parents=True, exist_ok=True)
df_groundtruth_features = generate_human_activity_groundtruth(groundtruth_csv)
if df_groundtruth_features is None:
logger.warn("Unexpected result, unable to store groundtruth features")
else:
write_generic_pkl(
df_groundtruth_features,
"{}_human_activity_groundtruth_features".format(now),
groundtruth_features_output)
@click.command(name="train-human-activity-model",
help="Train and generate a pickled model and feature scaler given groundtruth features")
@click.option("--groundtruth-features", type=click.Path(exists=True),
help="Pickle formatted groundtruth features data (create with 'generate-human-activity-groundtruth')")
@click.option("--output", type=click.Path(), default="%s/output/models" % (os.getcwd()),
help="output folder, model output includes pickled model (<<output>>/models/<<DATE>>_model.pkl) and pickled scaler (<<output>>/models/<<DATE>>_scaler.pkl)")
def cli_train_human_activity_model(groundtruth_features, output):
models_output = "{}/models".format(output)
Path(models_output).mkdir(parents=True, exist_ok=True)
df_groundtruth_features = | pd.read_pickle(groundtruth_features) | pandas.read_pickle |
# -*- coding: utf-8 -*-
from datetime import timedelta
import pytest
import numpy as np
import pandas as pd
import pandas.util.testing as tm
from pandas import (Timedelta,
period_range, Period, PeriodIndex,
_np_version_under1p10)
import pandas.core.indexes.period as period
class TestPeriodIndexArithmetic(object):
def test_pi_add_offset_array(self):
# GH#18849
pi = pd.PeriodIndex([pd.Period('2015Q1'), pd.Period('2016Q2')])
offs = np.array([pd.offsets.QuarterEnd(n=1, startingMonth=12),
pd.offsets.QuarterEnd(n=-2, startingMonth=12)])
res = pi + offs
expected = pd.PeriodIndex([pd.Period('2015Q2'), pd.Period('2015Q4')])
tm.assert_index_equal(res, expected)
unanchored = np.array([pd.offsets.Hour(n=1),
pd.offsets.Minute(n=-2)])
with pytest.raises(period.IncompatibleFrequency):
pi + unanchored
with pytest.raises(TypeError):
unanchored + pi
@pytest.mark.xfail(reason='GH#18824 radd doesnt implement this case')
def test_pi_radd_offset_array(self):
# GH#18849
pi = pd.PeriodIndex([pd.Period('2015Q1'), pd.Period('2016Q2')])
offs = np.array([pd.offsets.QuarterEnd(n=1, startingMonth=12),
pd.offsets.QuarterEnd(n=-2, startingMonth=12)])
res = offs + pi
expected = pd.PeriodIndex([pd.Period('2015Q2'), pd.Period('2015Q4')])
tm.assert_index_equal(res, expected)
def test_add_iadd(self):
rng = pd.period_range('1/1/2000', freq='D', periods=5)
other = pd.period_range('1/6/2000', freq='D', periods=5)
# previously performed setop union, now raises TypeError (GH14164)
with pytest.raises(TypeError):
rng + other
with pytest.raises(TypeError):
rng += other
# offset
# DateOffset
rng = pd.period_range('2014', '2024', freq='A')
result = rng + pd.offsets.YearEnd(5)
expected = pd.period_range('2019', '2029', freq='A')
tm.assert_index_equal(result, expected)
rng += pd.offsets.YearEnd(5)
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1),
pd.offsets.Minute(), np.timedelta64(365, 'D'),
timedelta(365), Timedelta(days=365)]:
msg = ('Input has different freq(=.+)? '
'from PeriodIndex\\(freq=A-DEC\\)')
with tm.assert_raises_regex(
period.IncompatibleFrequency, msg):
rng + o
rng = pd.period_range('2014-01', '2016-12', freq='M')
result = rng + pd.offsets.MonthEnd(5)
expected = pd.period_range('2014-06', '2017-05', freq='M')
tm.assert_index_equal(result, expected)
rng += pd.offsets.MonthEnd(5)
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1),
pd.offsets.Minute(), np.timedelta64(365, 'D'),
timedelta(365), Timedelta(days=365)]:
rng = pd.period_range('2014-01', '2016-12', freq='M')
msg = 'Input has different freq(=.+)? from PeriodIndex\\(freq=M\\)'
with tm.assert_raises_regex(
period.IncompatibleFrequency, msg):
rng + o
# Tick
offsets = [pd.offsets.Day(3), timedelta(days=3),
np.timedelta64(3, 'D'), pd.offsets.Hour(72),
timedelta(minutes=60 * 24 * 3), np.timedelta64(72, 'h'),
Timedelta('72:00:00')]
for delta in offsets:
rng = pd.period_range('2014-05-01', '2014-05-15', freq='D')
result = rng + delta
expected = pd.period_range('2014-05-04', '2014-05-18', freq='D')
tm.assert_index_equal(result, expected)
rng += delta
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1),
pd.offsets.Minute(), np.timedelta64(4, 'h'),
timedelta(hours=23), Timedelta('23:00:00')]:
rng = pd.period_range('2014-05-01', '2014-05-15', freq='D')
msg = 'Input has different freq(=.+)? from PeriodIndex\\(freq=D\\)'
with tm.assert_raises_regex(
period.IncompatibleFrequency, msg):
rng + o
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), pd.offsets.Minute(120),
timedelta(minutes=120), np.timedelta64(120, 'm'),
Timedelta(minutes=120)]
for delta in offsets:
rng = pd.period_range('2014-01-01 10:00', '2014-01-05 10:00',
freq='H')
result = rng + delta
expected = pd.period_range('2014-01-01 12:00', '2014-01-05 12:00',
freq='H')
tm.assert_index_equal(result, expected)
rng += delta
tm.assert_index_equal(rng, expected)
for delta in [pd.offsets.YearBegin(2), timedelta(minutes=30),
np.timedelta64(30, 's'), Timedelta(seconds=30)]:
rng = pd.period_range('2014-01-01 10:00', '2014-01-05 10:00',
freq='H')
msg = 'Input has different freq(=.+)? from PeriodIndex\\(freq=H\\)'
with tm.assert_raises_regex(
period.IncompatibleFrequency, msg):
rng + delta
with tm.assert_raises_regex(
period.IncompatibleFrequency, msg):
rng += delta
def test_pi_add_int(self, one):
# Variants of `one` for #19012
rng = pd.period_range('2000-01-01 09:00', freq='H', periods=10)
result = rng + one
expected = pd.period_range('2000-01-01 10:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
rng += one
tm.assert_index_equal(rng, expected)
@pytest.mark.parametrize('five', [5, np.array(5, dtype=np.int64)])
def test_sub(self, five):
rng = period_range('2007-01', periods=50)
result = rng - five
exp = rng + (-five)
tm.assert_index_equal(result, exp)
def test_sub_isub(self):
# previously performed setop, now raises TypeError (GH14164)
# TODO needs to wait on #13077 for decision on result type
rng = pd.period_range('1/1/2000', freq='D', periods=5)
other = pd.period_range('1/6/2000', freq='D', periods=5)
with pytest.raises(TypeError):
rng - other
with pytest.raises(TypeError):
rng -= other
# offset
# DateOffset
rng = pd.period_range('2014', '2024', freq='A')
result = rng - pd.offsets.YearEnd(5)
expected = pd.period_range('2009', '2019', freq='A')
tm.assert_index_equal(result, expected)
rng -= pd.offsets.YearEnd(5)
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1),
pd.offsets.Minute(), np.timedelta64(365, 'D'),
timedelta(365)]:
rng = pd.period_range('2014', '2024', freq='A')
msg = ('Input has different freq(=.+)? '
'from PeriodIndex\\(freq=A-DEC\\)')
with tm.assert_raises_regex(
period.IncompatibleFrequency, msg):
rng - o
rng = pd.period_range('2014-01', '2016-12', freq='M')
result = rng - pd.offsets.MonthEnd(5)
expected = pd.period_range('2013-08', '2016-07', freq='M')
tm.assert_index_equal(result, expected)
rng -= pd.offsets.MonthEnd(5)
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1),
pd.offsets.Minute(), np.timedelta64(365, 'D'),
timedelta(365)]:
rng = pd.period_range('2014-01', '2016-12', freq='M')
msg = 'Input has different freq(=.+)? from PeriodIndex\\(freq=M\\)'
with tm.assert_raises_regex(
period.IncompatibleFrequency, msg):
rng - o
# Tick
offsets = [pd.offsets.Day(3), timedelta(days=3),
np.timedelta64(3, 'D'), pd.offsets.Hour(72),
timedelta(minutes=60 * 24 * 3), np.timedelta64(72, 'h')]
for delta in offsets:
rng = pd.period_range('2014-05-01', '2014-05-15', freq='D')
result = rng - delta
expected = pd.period_range('2014-04-28', '2014-05-12', freq='D')
tm.assert_index_equal(result, expected)
rng -= delta
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1),
pd.offsets.Minute(), np.timedelta64(4, 'h'),
timedelta(hours=23)]:
rng = pd.period_range('2014-05-01', '2014-05-15', freq='D')
msg = 'Input has different freq(=.+)? from PeriodIndex\\(freq=D\\)'
with tm.assert_raises_regex(
period.IncompatibleFrequency, msg):
rng - o
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), pd.offsets.Minute(120),
timedelta(minutes=120), np.timedelta64(120, 'm')]
for delta in offsets:
rng = pd.period_range('2014-01-01 10:00', '2014-01-05 10:00',
freq='H')
result = rng - delta
expected = pd.period_range('2014-01-01 08:00', '2014-01-05 08:00',
freq='H')
tm.assert_index_equal(result, expected)
rng -= delta
tm.assert_index_equal(rng, expected)
for delta in [pd.offsets.YearBegin(2), timedelta(minutes=30),
np.timedelta64(30, 's')]:
rng = pd.period_range('2014-01-01 10:00', '2014-01-05 10:00',
freq='H')
msg = 'Input has different freq(=.+)? from PeriodIndex\\(freq=H\\)'
with tm.assert_raises_regex(
period.IncompatibleFrequency, msg):
rng + delta
with tm.assert_raises_regex(
period.IncompatibleFrequency, msg):
rng += delta
# int
rng = pd.period_range('2000-01-01 09:00', freq='H', periods=10)
result = rng - 1
expected = pd.period_range('2000-01-01 08:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
rng -= 1
tm.assert_index_equal(rng, expected)
class TestPeriodIndexSeriesMethods(object):
""" Test PeriodIndex and Period Series Ops consistency """
def _check(self, values, func, expected):
idx = pd.PeriodIndex(values)
result = func(idx)
if isinstance(expected, pd.Index):
tm.assert_index_equal(result, expected)
else:
# comp op results in bool
tm.assert_numpy_array_equal(result, expected)
s = pd.Series(values)
result = func(s)
exp = pd.Series(expected, name=values.name)
tm.assert_series_equal(result, exp)
def test_pi_ops(self):
idx = PeriodIndex(['2011-01', '2011-02', '2011-03',
'2011-04'], freq='M', name='idx')
expected = PeriodIndex(['2011-03', '2011-04',
'2011-05', '2011-06'], freq='M', name='idx')
self._check(idx, lambda x: x + 2, expected)
self._check(idx, lambda x: 2 + x, expected)
self._check(idx + 2, lambda x: x - 2, idx)
result = idx - Period('2011-01', freq='M')
exp = pd.Index([0, 1, 2, 3], name='idx')
tm.assert_index_equal(result, exp)
result = Period('2011-01', freq='M') - idx
exp = pd.Index([0, -1, -2, -3], name='idx')
| tm.assert_index_equal(result, exp) | pandas.util.testing.assert_index_equal |
import numpy as np
import pandas as pd
import pytest
from etna.datasets import TSDataset
from etna.datasets import generate_ar_df
from etna.datasets import generate_const_df
from etna.datasets import generate_periodic_df
from etna.metrics import R2
from etna.models import LinearPerSegmentModel
from etna.transforms import FilterFeaturesTransform
from etna.transforms.encoders.categorical import LabelEncoderTransform
from etna.transforms.encoders.categorical import OneHotEncoderTransform
@pytest.fixture
def two_df_with_new_values():
d = {
"timestamp": list( | pd.date_range(start="2021-01-01", end="2021-01-03") | pandas.date_range |
import math
import warnings
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import xgboost
from scipy.stats import pearsonr
from sklearn.linear_model import Ridge, LinearRegression, Lasso
from sklearn.metrics import mean_squared_log_error
from sklearn.model_selection import KFold
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import Imputer, RobustScaler
from sklearn.tree import DecisionTreeRegressor
from MLE import MultiLabelEncoder
warnings.filterwarnings(action="ignore", module="scipy", message="^internal gelsd")
pd.set_option('display.max_rows', 1000)
trainingFile = "./data/train.csv"
testFile = "./data/test.csv"
trainDf = pd.read_csv(trainingFile, header=0)
testDf = pd.read_csv(testFile, header=0)
trainDf['MoSold'] = trainDf['MoSold'].apply(str)
testDf['MoSold'] = testDf['MoSold'].apply(str)
target = 'SalePrice'
Y = trainDf[target]
def prepare_data():
training = trainDf.drop(['Id'], axis=1)
testing = testDf.drop(['Id'], axis=1)
str_cols = []
str_cols_idx = []
pos = 0
for c in training.columns:
if c != target:
if training[c].dtype == np.object:
str_cols.append(c)
str_cols_idx.append(pos)
pos = pos + 1
print("Number of string columns %d " % len(str_cols))
# treat NaN as a different category
for c in str_cols:
training[c] = training[c].fillna("$NULL")
testing[c] = testing[c].fillna("$NULL")
# training = training.drop(training[(training['GrLivArea']>4000) & (training['SalePrice']<300000)].index)
training = training.drop([target], axis=1)
print(training.dtypes)
print(training.head(10))
enc = MultiLabelEncoder(input_cols=np.array(str_cols_idx))
t_pipe = Pipeline(steps=[
('catencode', enc),
('null_handler', Imputer(missing_values='NaN', strategy='mean', axis=0)),
('rs', RobustScaler())
])
fit_pipeline = t_pipe.fit(pd.concat([training, testing], axis=0))
transformed = fit_pipeline.transform(training)
transformed_test = fit_pipeline.transform(testing)
print("T_TRAIN: ", transformed)
print("T_TEST: ", transformed_test)
return (pd.DataFrame(data=transformed, columns=training.columns),
pd.DataFrame(data=transformed_test, columns=testing.columns))
def correlations(t_df):
correlations = {}
features = t_df.columns
for f in features:
if f != target:
x1 = t_df[f]
key = f + ' vs ' + target
correlations[key] = pearsonr(x1, Y)[0]
data_correlations = pd.DataFrame(correlations, index=['Value']).T
sorted_c = data_correlations.loc[data_correlations['Value'].abs().sort_values(ascending=False).index]
pd.set_option('display.max_rows', None)
print(sorted_c)
def cv(df, pipeline):
iter_rmsle = []
iteration = 0
kf = KFold(n_splits=10, random_state=10)
for train_idx, test_idx in kf.split(df):
print("KFold iteration ", iteration)
x_train, x_test = df.iloc[train_idx], df.iloc[test_idx]
y_train, y_test = Y[train_idx], Y[test_idx]
model = pipeline.fit(x_train, y_train)
y_predict = model.predict(x_test)
mse = mean_squared_log_error(y_test, y_predict)
rmsle = math.sqrt(mse)
print(rmsle)
iter_rmsle.append(rmsle)
iteration += 1
return np.mean(iter_rmsle)
def select_pipeline(t_df, make_pipelines):
c_df = t_df.copy()
rmsles = []
pipelines = []
for pipeline in make_pipelines():
mean = cv(c_df, pipeline)
print("Mean RMSLE: ", mean)
rmsles.append(mean)
pipelines.append(pipeline)
min_index = np.argmin(rmsles)
print('Min RMSLE: ', np.min(rmsles))
print('Min RMSLE index: ', min_index)
best_pipeline = pipelines[min_index]
print('Best pipeline', best_pipeline)
best_model = best_pipeline.fit(c_df, Y)
print("RMSLES : ", rmsles)
return (best_model, rmsles)
def decision_tree_regressor():
pipelines = []
for d in range(2, 20):
est = DecisionTreeRegressor(max_depth=d, random_state=10)
pipelines.append(Pipeline(steps=[('DecisionTreeRegressor', est)]))
return pipelines
def xgb_regressor():
pipelines = []
for l in [0, 0.5, 0.7, 1.0, 2]:
est = xgboost.XGBRegressor(base_score=0.5, colsample_bylevel=1, colsample_bytree=0.8, gamma=0,
learning_rate=0.1, max_delta_step=0, max_depth=5,
min_child_weight=1, missing=None, n_estimators=500, nthread=-1,
objective='reg:linear', reg_alpha=l, reg_lambda=0,
scale_pos_weight=1, seed=10, silent=True, subsample=1)
pipelines.append(Pipeline(steps=[('XGBRegressor', est)]))
return pipelines
def ridge():
pipelines = []
for l in [0, 0.5, 0.7, 1.0, 1.5, 2]:
est = Ridge(alpha=l, copy_X=True, fit_intercept=True, max_iter=None,
normalize=False, random_state=10, solver='auto', tol=0.0001)
pipelines.append(Pipeline(steps=[('Ridge', est)]))
return pipelines
def linear():
pipelines = []
est = LinearRegression(normalize=False)
pipelines.append(Pipeline(steps=[('LinearRegression', est)]))
return pipelines
def lasso():
pipelines = []
for l in [0, 0.5, 0.7, 1.0, 2.0, 3.0, 10, 20, 30, 50, 100, 300, 1000, 2000, 5000]:
est = Lasso(alpha=l, max_iter=10000, tol=0.01)
pipelines.append(Pipeline(steps=[('Lasso', est)]))
return pipelines
def predict(model, testing):
sp_id = testDf['Id']
pred = model.predict(testing)
result = pd.DataFrame({'Id': sp_id, 'SalePrice': pred}, index=None)
print(result.head(10))
result.to_csv('./submission.csv', index=False)
print("Submission file created")
def stacking(training,
y,
test,
pipelines):
kf = KFold(n_splits=5, random_state=10)
validation_body = {
}
test_body = {
}
for p in pipelines:
validation_body['pred_' + p.steps[0][0]] = np.zeros(len(training.index))
test_body['pred_' + p.steps[0][0]] = np.zeros(len(test.index))
valid_df = pd.DataFrame(validation_body)
test_df = | pd.DataFrame(test_body) | pandas.DataFrame |
"""Visualizes burst data."""
import pandas as pd
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
def to_pandas(ebursts, offsets, svo, unit='s'):
"""Exports burst and offset data to dataframes for a single term.
ebursts is an edgebust dict from the SVO object
offsets is an offsets dict from the SVO object
"""
svos = " | ".join(svo)
bdf = pd.DataFrame(ebursts)
bdf[1] = pd.to_datetime(bdf[1], unit=unit)
bdf[2] = pd.to_datetime(bdf[2], unit=unit)
bdf.columns = ['level', 'start', 'end']
bdf['svo'] = svos
odf = pd.DataFrame()
i = pd.to_datetime(offsets, unit='s')
odf['Date'], odf['Year'], odf['Month'], odf[
'Day'] = i.date, i.year, i.month, i.day
odf = odf.set_index(i)
odf['svo'] = svos
return bdf, odf
def plot_bursts(odf,
bdf,
lowest_level=0,
title=True,
daterange=None,
xrangeoffsets=3,
s=None,
gamma=None):
"""Plots burst and offset data.
odf = an offsets dataframe
bdf = an edgeburst dataframe
lowest_level = subset the burst dataframe with bursts greater than or equal to the specified level
daterange = a tuple with two elements: a start date and end date as *strings*. format is 'year-month-day'
xrangeoffsets = the number of days to add before and after the min and max x dates
"""
svo_title = str(set(bdf['svo']).pop())
fig, (axa, axb) = plt.subplots(2, sharey=False, sharex=True)
fig.set_figwidth(10)
fig.set_figheight(6)
formatter = mdates.DateFormatter("%b %d\n%Y")
axb.xaxis.set_major_formatter(formatter)
# offsets plot
day_freq = odf.resample('D').size()
axa.plot(day_freq, color='#32363A')
axa.xaxis.set_major_formatter(formatter)
axa.xaxis_date()
axa.tick_params(axis='both', which='both', length=0)
axa.set_ylabel('Daily offsets')
if daterange:
axa.set_xlim(pd.Timestamp(daterange[0]), pd.Timestamp(daterange[1]))
# bursts plot
days = [day_freq.index[0]]
levels = [0]
for i in range(1, len(day_freq.index)):
period_start = odf.resample('D').size().index[i - 1]
period_end = odf.resample('D').size().index[i]
max_burst = set()
days.append(period_end)
for j in range(len(bdf)):
burst_start = bdf['start'][j]
burst_end = bdf['end'][j]
level = bdf['level'][j]
if burst_end < period_start or period_end < burst_start :
pass
else:
max_burst.add(level)
levels.append(max(max_burst))
finaldf = pd.DataFrame({"start": days, "level": levels})
if lowest_level > 0:
bdf = bdf[bdf['level'] >= lowest_level]
xmin = min(bdf['start'])
xmax = max(bdf['start'])
if xmin == xmax:
raise Exception("There must be at least two bursts at or above the specified level. Try reducing the `lowest_level` parameter.")
daterange = ((xmin + pd.DateOffset(days=2)).date(), (xmax + pd.DateOffset(days=2)).date())
# bursts plot
axb.bar(finaldf['start'], finaldf['level'], color='#32363A', width=1)
if s != None and gamma != None:
axb.set_ylabel(r'Burst levels (s = {}, $\gamma$ = {})'.format(s, gamma))
else:
axb.set_ylabel('Burst level')
axb.tick_params(axis='both', which='both', length=0)
if daterange:
axb.set_xlim( | pd.Timestamp(daterange[0]) | pandas.Timestamp |
import pandas as pd
import streamlit as st
# import sqlite3
# from sqlite3 import Connection
# from . import excelpage,dashboard
# from .resource import *
# from multipage import MultiPage
import numpy as np
import pandas as pd
from utils.data_helper import load_data
import plotly.express as px
from sklearn.preprocessing import StandardScaler
import plotly.figure_factory as ff
#import excelpage
# @st.cache
#from DataTransfer.src.resource import *
def app():
df = load_data()
outlier_temp = """<div style="background-color:#98AFC7;padding:10px"><h4 style="color:white;text-align:center;">Outlier</h4>
<h6 style="color:white;text-align:center;">In statistics, an outlier is a data point that differs significantly from other observations.
An outlier may be due to variability in the measurement or it may indicate experimental error; the latter are sometimes excluded from the data set.
An outlier can cause serious problems in statistical analyses.</h6></div><br></br>"""
st.markdown(outlier_temp, unsafe_allow_html=True)
st.markdown(
"""<link rel="stylesheet" href="https://cdn.jsdelivr.net/npm/[email protected]/dist/css/bootstrap.min.css"
integrity="<KEY>" crossorigin="anonymous">""",
unsafe_allow_html=True,)
query_params = st.experimental_get_query_params()
tabs = ["IQR Test", "Z-Score Test", "Visualize"]
if "tab" in query_params:
active_tab = query_params["tab"][0]
else:
active_tab = "Z-Score Test"
if active_tab not in tabs:
st.experimental_set_query_params(tab="Z-Score Test")
active_tab = "Z-Score Test"
li_items = "".join(
f"""
<li class="nav-item">
<a class="nav-link{' active' if t == active_tab else ''}" href="/?tab={t}">{t}</a>
</li>
"""
for t in tabs)
tabs_html = f"""
<ul class="nav nav-tabs">
{li_items}
</ul>"""
st.markdown(tabs_html, unsafe_allow_html=True)
st.markdown("<br>", unsafe_allow_html=True)
def outlier_detection_iqr(dataframe):
my_dict = {'Features': [], 'IQR': [], 'Q3 + 1.5*IQR': [], 'Q1 - 1.5*IQR': [], 'Upper outlier count': [],
'Lower outlier count': [], 'Total outliers': [], 'Outlier percent': []}
for column in dataframe.select_dtypes(include=np.number).columns:
try:
upper_count = 0
lower_count = 0
q1 = np.percentile(dataframe[column].fillna(dataframe[column].mean()), 25)
q3 = np.percentile(dataframe[column].fillna(dataframe[column].mean()), 75)
IQR = q3 - q1
upper_limit = q3 + (IQR * 1.5)
lower_limit = q1 - (IQR * 1.5)
for element in dataframe[column].fillna(dataframe[column].mean()):
if element > upper_limit:
upper_count += 1
elif element < lower_limit:
lower_count += 1
my_dict['Features'].append(column)
my_dict['IQR'].append(IQR)
my_dict['Q3 + 1.5*IQR'].append(upper_limit)
my_dict['Q1 - 1.5*IQR'].append(lower_limit)
my_dict['Upper outlier count'].append(upper_count)
my_dict['Lower outlier count'].append(lower_count)
my_dict['Total outliers'].append(upper_count + lower_count)
my_dict['Outlier percent'].append(round((upper_count + lower_count) / len(dataframe[column]) * 100, 2))
except Exception as e:
print(e)
return | pd.DataFrame(my_dict) | pandas.DataFrame |
import pandas as pd
import numpy as np
# Sample the given dataframe df to select n_sample number of points.
def stratified_sample_df(df, col, n_samples,sampled='stratified',random_state=1):
if(sampled=='stratified'):
df_=df.groupby(col, group_keys=False).apply(lambda x: x.sample(int(np.rint(n_samples*len(x)/len(df))))).sample(frac=1,random_state=random_state).reset_index(drop=True)
elif(sampled=='equal'):
df_=df.groupby(col, group_keys=False).apply(lambda x: x.sample(int(n_samples/2))).sample(frac=1,random_state=random_state).reset_index(drop=True)
return df_
###### data collection taking all at a time
def data_collector(file_names,params,is_train):
if(params['csv_file']=='*_full.csv'):
index=12
elif(params['csv_file']=='*_translated.csv'):
index=23
sample_ratio=params['sample_ratio']
type_train=params['how_train']
sampled=params['samp_strategy']
take_ratio=params['take_ratio']
language=params['language']
# If the data being loaded is not train, i.e. either val or test, load everything and return
if(is_train!=True):
df_test=[]
for file in file_names:
lang_temp=file.split('/')[-1][:-index]
if(lang_temp==language):
df_test.append(pd.read_csv(file))
df_test=pd.concat(df_test,axis=0)
return df_test
# If train data is being loaded,
else:
# Baseline setting - only target language data is loaded
if(type_train=='baseline'):
df_test=[]
for file in file_names:
lang_temp=file.split('/')[-1][:-index]
print(lang_temp)
if(lang_temp==language):
temp= | pd.read_csv(file) | pandas.read_csv |
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
# -*- coding:utf-8 -*-
from app.classificator import helpers
from sklearn import svm
import pandas as pd
class Models():
def __init__(self):
self.cv = CountVectorizer()
self.svm_model = None
self.dataset = None
self.document_fake = None
self.document_real = None
return
def create_documents(self, dataset):
# Count vectorizer
self.cv.fit_transform(dataset['text'].values)
# Dataset to cv
dataset_real = dataset.loc[dataset['label'] == 0]
dataset_fake = dataset.loc[dataset['label'] == 1]
cv_real = self.cv.transform(dataset_real['text'])
cv_fake = self.cv.transform(dataset_fake['text'])
# Create TF-IDF models
tfidf_model_real = TfidfTransformer(use_idf=True).fit(cv_real)
tfidf_model_fake = TfidfTransformer(use_idf=True).fit(cv_fake)
# Get TF-IDF distribution
tfidf_real = tfidf_model_real.transform(cv_real)
tfidf_fake = tfidf_model_fake.transform(cv_fake)
# Get relevant words
topn = 600
sorted_items= helpers.sort_coo(tfidf_real.tocoo())
feature_names = self.cv.get_feature_names()
results, top_real_words = helpers.extract_topn_from_vector(feature_names, sorted_items, topn)
sorted_items= helpers.sort_coo(tfidf_fake.tocoo())
feature_names = self.cv.get_feature_names()
results, top_fake_words = helpers.extract_topn_from_vector(feature_names, sorted_items, topn)
# Store documents
self.document_real = top_real_words
self.document_fake = top_fake_words
self.dataset = dataset
def compute_similarity_dataset(self):
top_real_words_coded = self.cv.transform([' '.join(self.document_real)])
top_fake_words_coded = self.cv.transform([' '.join(self.document_fake)])
self.dataset['cos_fake'] = self.dataset['label']*0.000000000005
self.dataset['cos_real'] = self.dataset['label']*0.000000000005
for index, row in self.dataset.iterrows():
to_number = self.cv.transform([row['text']])
cosine_sim_fake = helpers.get_cosine_similarity(top_fake_words_coded, to_number)
cosine_sim_real = helpers.get_cosine_similarity(top_real_words_coded, to_number)
self.dataset.at[index,'cos_fake'] = cosine_sim_fake[0]
self.dataset.at[index,'cos_real'] = cosine_sim_real[0]
return self.dataset[['cos_real', 'cos_fake']].values
def update_cv(self, content):
self.cv = CountVectorizer()
all_text = self.document_fake + self.document_real + content
self.cv.fit_transform(all_text)
def predict(self, x, y):
dataset = | pd.DataFrame() | pandas.DataFrame |
# coding=utf-8
# pylint: disable-msg=E1101,W0612
import numpy as np
import pandas as pd
from pandas import (Index, Series, DataFrame, isnull)
from pandas.compat import lrange
from pandas import compat
from pandas.util.testing import assert_series_equal
import pandas.util.testing as tm
from .common import TestData
class TestSeriesApply(TestData, tm.TestCase):
def test_apply(self):
with np.errstate(all='ignore'):
assert_series_equal(self.ts.apply(np.sqrt), np.sqrt(self.ts))
# elementwise-apply
import math
assert_series_equal(self.ts.apply(math.exp), np.exp(self.ts))
# how to handle Series result, #2316
result = self.ts.apply(lambda x: Series(
[x, x ** 2], index=['x', 'x^2']))
expected = DataFrame({'x': self.ts, 'x^2': self.ts ** 2})
tm.assert_frame_equal(result, expected)
# empty series
s = Series(dtype=object, name='foo', index=pd.Index([], name='bar'))
rs = s.apply(lambda x: x)
tm.assert_series_equal(s, rs)
# check all metadata (GH 9322)
self.assertIsNot(s, rs)
self.assertIs(s.index, rs.index)
self.assertEqual(s.dtype, rs.dtype)
self.assertEqual(s.name, rs.name)
# index but no data
s = Series(index=[1, 2, 3])
rs = s.apply(lambda x: x)
tm.assert_series_equal(s, rs)
def test_apply_same_length_inference_bug(self):
s = Series([1, 2])
f = lambda x: (x, x + 1)
result = s.apply(f)
expected = s.map(f)
assert_series_equal(result, expected)
s = Series([1, 2, 3])
result = s.apply(f)
expected = s.map(f)
assert_series_equal(result, expected)
def test_apply_dont_convert_dtype(self):
s = Series(np.random.randn(10))
f = lambda x: x if x > 0 else np.nan
result = s.apply(f, convert_dtype=False)
self.assertEqual(result.dtype, object)
def test_apply_args(self):
s = Series(['foo,bar'])
result = s.apply(str.split, args=(',', ))
self.assertEqual(result[0], ['foo', 'bar'])
tm.assertIsInstance(result[0], list)
def test_apply_box(self):
# ufunc will not be boxed. Same test cases as the test_map_box
vals = [pd.Timestamp('2011-01-01'), pd.Timestamp('2011-01-02')]
s = pd.Series(vals)
self.assertEqual(s.dtype, 'datetime64[ns]')
# boxed value must be Timestamp instance
res = s.apply(lambda x: '{0}_{1}_{2}'.format(x.__class__.__name__,
x.day, x.tz))
exp = pd.Series(['Timestamp_1_None', 'Timestamp_2_None'])
tm.assert_series_equal(res, exp)
vals = [pd.Timestamp('2011-01-01', tz='US/Eastern'),
pd.Timestamp('2011-01-02', tz='US/Eastern')]
s = pd.Series(vals)
self.assertEqual(s.dtype, 'datetime64[ns, US/Eastern]')
res = s.apply(lambda x: '{0}_{1}_{2}'.format(x.__class__.__name__,
x.day, x.tz))
exp = pd.Series(['Timestamp_1_US/Eastern', 'Timestamp_2_US/Eastern'])
tm.assert_series_equal(res, exp)
# timedelta
vals = [pd.Timedelta('1 days'), pd.Timedelta('2 days')]
s = pd.Series(vals)
self.assertEqual(s.dtype, 'timedelta64[ns]')
res = s.apply(lambda x: '{0}_{1}'.format(x.__class__.__name__, x.days))
exp = pd.Series(['Timedelta_1', 'Timedelta_2'])
tm.assert_series_equal(res, exp)
# period (object dtype, not boxed)
vals = [pd.Period('2011-01-01', freq='M'),
pd.Period('2011-01-02', freq='M')]
s = pd.Series(vals)
self.assertEqual(s.dtype, 'object')
res = s.apply(lambda x: '{0}_{1}'.format(x.__class__.__name__,
x.freqstr))
exp = pd.Series(['Period_M', 'Period_M'])
tm.assert_series_equal(res, exp)
def test_apply_datetimetz(self):
values = pd.date_range('2011-01-01', '2011-01-02',
freq='H').tz_localize('Asia/Tokyo')
s = pd.Series(values, name='XX')
result = s.apply(lambda x: x + pd.offsets.Day())
exp_values = pd.date_range('2011-01-02', '2011-01-03',
freq='H').tz_localize('Asia/Tokyo')
exp = pd.Series(exp_values, name='XX')
tm.assert_series_equal(result, exp)
# change dtype
# GH 14506 : Returned dtype changed from int32 to int64
result = s.apply(lambda x: x.hour)
exp = pd.Series(list(range(24)) + [0], name='XX', dtype=np.int64)
tm.assert_series_equal(result, exp)
# not vectorized
def f(x):
if not isinstance(x, pd.Timestamp):
raise ValueError
return str(x.tz)
result = s.map(f)
exp = pd.Series(['Asia/Tokyo'] * 25, name='XX')
tm.assert_series_equal(result, exp)
class TestSeriesMap(TestData, tm.TestCase):
def test_map(self):
index, data = tm.getMixedTypeDict()
source = Series(data['B'], index=data['C'])
target = Series(data['C'][:4], index=data['D'][:4])
merged = target.map(source)
for k, v in compat.iteritems(merged):
self.assertEqual(v, source[target[k]])
# input could be a dict
merged = target.map(source.to_dict())
for k, v in compat.iteritems(merged):
self.assertEqual(v, source[target[k]])
# function
result = self.ts.map(lambda x: x * 2)
self.assert_series_equal(result, self.ts * 2)
# GH 10324
a = Series([1, 2, 3, 4])
b = Series(["even", "odd", "even", "odd"], dtype="category")
c = Series(["even", "odd", "even", "odd"])
exp = Series(["odd", "even", "odd", np.nan], dtype="category")
self.assert_series_equal(a.map(b), exp)
exp = Series(["odd", "even", "odd", np.nan])
self.assert_series_equal(a.map(c), exp)
a = Series(['a', 'b', 'c', 'd'])
b = Series([1, 2, 3, 4],
index=pd.CategoricalIndex(['b', 'c', 'd', 'e']))
c = Series([1, 2, 3, 4], index=Index(['b', 'c', 'd', 'e']))
exp = Series([np.nan, 1, 2, 3])
self.assert_series_equal(a.map(b), exp)
exp = Series([np.nan, 1, 2, 3])
self.assert_series_equal(a.map(c), exp)
a = Series(['a', 'b', 'c', 'd'])
b = Series(['B', 'C', 'D', 'E'], dtype='category',
index=pd.CategoricalIndex(['b', 'c', 'd', 'e']))
c = Series(['B', 'C', 'D', 'E'], index=Index(['b', 'c', 'd', 'e']))
exp = Series(pd.Categorical([np.nan, 'B', 'C', 'D'],
categories=['B', 'C', 'D', 'E']))
self.assert_series_equal(a.map(b), exp)
exp = Series([np.nan, 'B', 'C', 'D'])
self.assert_series_equal(a.map(c), exp)
def test_map_compat(self):
# related GH 8024
s = Series([True, True, False], index=[1, 2, 3])
result = s.map({True: 'foo', False: 'bar'})
expected = Series(['foo', 'foo', 'bar'], index=[1, 2, 3])
assert_series_equal(result, expected)
def test_map_int(self):
left = Series({'a': 1., 'b': 2., 'c': 3., 'd': 4})
right = Series({1: 11, 2: 22, 3: 33})
self.assertEqual(left.dtype, np.float_)
self.assertTrue(issubclass(right.dtype.type, np.integer))
merged = left.map(right)
self.assertEqual(merged.dtype, np.float_)
self.assertTrue(isnull(merged['d']))
self.assertTrue(not isnull(merged['c']))
def test_map_type_inference(self):
s = Series(lrange(3))
s2 = s.map(lambda x: np.where(x == 0, 0, 1))
self.assertTrue(issubclass(s2.dtype.type, np.integer))
def test_map_decimal(self):
from decimal import Decimal
result = self.series.map(lambda x: Decimal(str(x)))
self.assertEqual(result.dtype, np.object_)
| tm.assertIsInstance(result[0], Decimal) | pandas.util.testing.assertIsInstance |
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# This file contains dummy data for the model unit tests
import numpy as np
import pandas as pd
AIR_FCST_LINEAR_95 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("1961-01-01 00:00:00"),
1: pd.Timestamp("1961-02-01 00:00:00"),
2: pd.Timestamp("1961-03-01 00:00:00"),
3: pd.Timestamp("1961-04-01 00:00:00"),
4: pd.Timestamp("1961-05-01 00:00:00"),
5: pd.Timestamp("1961-06-01 00:00:00"),
6: pd.Timestamp("1961-07-01 00:00:00"),
7: pd.Timestamp("1961-08-01 00:00:00"),
8: pd.Timestamp("1961-09-01 00:00:00"),
9: pd.Timestamp("1961-10-01 00:00:00"),
10: pd.Timestamp("1961-11-01 00:00:00"),
11: pd.Timestamp("1961-12-01 00:00:00"),
12: pd.Timestamp("1962-01-01 00:00:00"),
13: pd.Timestamp("1962-02-01 00:00:00"),
14: pd.Timestamp("1962-03-01 00:00:00"),
15: pd.Timestamp("1962-04-01 00:00:00"),
16: pd.Timestamp("1962-05-01 00:00:00"),
17: pd.Timestamp("1962-06-01 00:00:00"),
18: pd.Timestamp("1962-07-01 00:00:00"),
19: pd.Timestamp("1962-08-01 00:00:00"),
20: pd.Timestamp("1962-09-01 00:00:00"),
21: pd.Timestamp("1962-10-01 00:00:00"),
22: pd.Timestamp("1962-11-01 00:00:00"),
23: pd.Timestamp("1962-12-01 00:00:00"),
24: pd.Timestamp("1963-01-01 00:00:00"),
25: pd.Timestamp("1963-02-01 00:00:00"),
26: pd.Timestamp("1963-03-01 00:00:00"),
27: pd.Timestamp("1963-04-01 00:00:00"),
28: pd.Timestamp("1963-05-01 00:00:00"),
29: pd.Timestamp("1963-06-01 00:00:00"),
},
"fcst": {
0: 472.9444444444443,
1: 475.60162835249025,
2: 478.2588122605362,
3: 480.9159961685822,
4: 483.57318007662815,
5: 486.23036398467417,
6: 488.88754789272014,
7: 491.5447318007661,
8: 494.20191570881207,
9: 496.85909961685803,
10: 499.516283524904,
11: 502.17346743295,
12: 504.830651340996,
13: 507.48783524904195,
14: 510.1450191570879,
15: 512.8022030651339,
16: 515.4593869731799,
17: 518.1165708812258,
18: 520.7737547892718,
19: 523.4309386973177,
20: 526.0881226053638,
21: 528.7453065134097,
22: 531.4024904214557,
23: 534.0596743295017,
24: 536.7168582375476,
25: 539.3740421455936,
26: 542.0312260536396,
27: 544.6884099616856,
28: 547.3455938697316,
29: 550.0027777777775,
},
"fcst_lower": {
0: 380.6292037661305,
1: 383.26004701147235,
2: 385.8905370924373,
3: 388.52067431512216,
4: 391.1504589893095,
5: 393.7798914284503,
6: 396.4089719496461,
7: 399.0377008736321,
8: 401.66607852475926,
9: 404.2941052309762,
10: 406.9217813238114,
11: 409.54910713835505,
12: 412.1760830132403,
13: 414.80270929062544,
14: 417.42898631617453,
15: 420.0549144390392,
16: 422.68049401183924,
17: 425.3057253906438,
18: 427.93060893495215,
19: 430.555145007674,
20: 433.1793339751107,
21: 435.8031762069345,
22: 438.42667207616984,
23: 441.0498219591729,
24: 443.6726262356114,
25: 446.2950852884452,
26: 448.91719950390507,
27: 451.53896927147304,
28: 454.1603949838614,
29: 456.78147703699216,
},
"fcst_upper": {
0: 565.2596851227581,
1: 567.9432096935082,
2: 570.6270874286351,
3: 573.3113180220422,
4: 575.9959011639468,
5: 578.680836540898,
6: 581.3661238357942,
7: 584.0517627279,
8: 586.7377528928648,
9: 589.4240940027398,
10: 592.1107857259966,
11: 594.797827727545,
12: 597.4852196687516,
13: 600.1729612074585,
14: 602.8610519980012,
15: 605.5494916912286,
16: 608.2382799345206,
17: 610.9274163718079,
18: 613.6169006435915,
19: 616.3067323869615,
20: 618.9969112356168,
21: 621.6874368198849,
22: 624.3783087667415,
23: 627.0695266998305,
24: 629.7610902394838,
25: 632.4529990027421,
26: 635.145252603374,
27: 637.8378506518982,
28: 640.5307927556019,
29: 643.2240785185628,
},
}
)
AIR_FCST_LINEAR_99 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("1961-01-01 00:00:00"),
1: pd.Timestamp("1961-02-01 00:00:00"),
2: pd.Timestamp("1961-03-01 00:00:00"),
3: pd.Timestamp("1961-04-01 00:00:00"),
4: pd.Timestamp("1961-05-01 00:00:00"),
5: pd.Timestamp("1961-06-01 00:00:00"),
6: pd.Timestamp("1961-07-01 00:00:00"),
7: pd.Timestamp("1961-08-01 00:00:00"),
8: pd.Timestamp("1961-09-01 00:00:00"),
9: pd.Timestamp("1961-10-01 00:00:00"),
10: pd.Timestamp("1961-11-01 00:00:00"),
11: pd.Timestamp("1961-12-01 00:00:00"),
12: pd.Timestamp("1962-01-01 00:00:00"),
13: pd.Timestamp("1962-02-01 00:00:00"),
14: pd.Timestamp("1962-03-01 00:00:00"),
15: pd.Timestamp("1962-04-01 00:00:00"),
16: pd.Timestamp("1962-05-01 00:00:00"),
17: pd.Timestamp("1962-06-01 00:00:00"),
18: pd.Timestamp("1962-07-01 00:00:00"),
19: pd.Timestamp("1962-08-01 00:00:00"),
20: pd.Timestamp("1962-09-01 00:00:00"),
21: pd.Timestamp("1962-10-01 00:00:00"),
22: pd.Timestamp("1962-11-01 00:00:00"),
23: pd.Timestamp("1962-12-01 00:00:00"),
24: pd.Timestamp("1963-01-01 00:00:00"),
25: pd.Timestamp("1963-02-01 00:00:00"),
26: pd.Timestamp("1963-03-01 00:00:00"),
27: pd.Timestamp("1963-04-01 00:00:00"),
28: pd.Timestamp("1963-05-01 00:00:00"),
29: pd.Timestamp("1963-06-01 00:00:00"),
},
"fcst": {
0: 472.9444444444443,
1: 475.60162835249025,
2: 478.2588122605362,
3: 480.9159961685822,
4: 483.57318007662815,
5: 486.23036398467417,
6: 488.88754789272014,
7: 491.5447318007661,
8: 494.20191570881207,
9: 496.85909961685803,
10: 499.516283524904,
11: 502.17346743295,
12: 504.830651340996,
13: 507.48783524904195,
14: 510.1450191570879,
15: 512.8022030651339,
16: 515.4593869731799,
17: 518.1165708812258,
18: 520.7737547892718,
19: 523.4309386973177,
20: 526.0881226053638,
21: 528.7453065134097,
22: 531.4024904214557,
23: 534.0596743295017,
24: 536.7168582375476,
25: 539.3740421455936,
26: 542.0312260536396,
27: 544.6884099616856,
28: 547.3455938697316,
29: 550.0027777777775,
},
"fcst_lower": {
0: 351.01805478037915,
1: 353.64044896268456,
2: 356.2623766991775,
3: 358.883838394139,
4: 361.50483445671773,
5: 364.12536530090745,
6: 366.74543134552374,
7: 369.3650330141812,
8: 371.98417073526997,
9: 374.6028449419319,
10: 377.2210560720369,
11: 379.83880456815905,
12: 382.45609087755207,
13: 385.07291545212513,
14: 387.68927874841813,
15: 390.3051812275768,
16: 392.92062335532785,
17: 395.5356056019535,
18: 398.15012844226646,
19: 400.764192355584,
20: 403.37779782570226,
21: 405.99094534087044,
22: 408.60363539376465,
23: 411.2158684814615,
24: 413.82764510541136,
25: 416.4389657714128,
26: 419.04983098958445,
27: 421.66024127433906,
28: 424.2701971443558,
29: 426.8796991225531,
},
"fcst_upper": {
0: 594.8708341085095,
1: 597.562807742296,
2: 600.255247821895,
3: 602.9481539430253,
4: 605.6415256965386,
5: 608.3353626684409,
6: 611.0296644399166,
7: 613.724430587351,
8: 616.4196606823541,
9: 619.1153542917842,
10: 621.8115109777711,
11: 624.508130297741,
12: 627.2052118044398,
13: 629.9027550459588,
14: 632.6007595657577,
15: 635.299224902691,
16: 637.998150591032,
17: 640.6975361604982,
18: 643.3973811362772,
19: 646.0976850390515,
20: 648.7984473850253,
21: 651.4996676859489,
22: 654.2013454491467,
23: 656.903480177542,
24: 659.6060713696838,
25: 662.3091185197744,
26: 665.0126211176946,
27: 667.716578649032,
28: 670.4209905951075,
29: 673.1258564330019,
},
}
)
PEYTON_FCST_LINEAR_95 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("2013-05-01 00:00:00"),
1: pd.Timestamp("2013-05-02 00:00:00"),
2: pd.Timestamp("2013-05-03 00:00:00"),
3: pd.Timestamp("2013-05-04 00:00:00"),
4: pd.Timestamp("2013-05-05 00:00:00"),
5: pd.Timestamp("2013-05-06 00:00:00"),
6: pd.Timestamp("2013-05-07 00:00:00"),
7: pd.Timestamp("2013-05-08 00:00:00"),
8: pd.Timestamp("2013-05-09 00:00:00"),
9: pd.Timestamp("2013-05-10 00:00:00"),
10: pd.Timestamp("2013-05-11 00:00:00"),
11: pd.Timestamp("2013-05-12 00:00:00"),
12: pd.Timestamp("2013-05-13 00:00:00"),
13: pd.Timestamp("2013-05-14 00:00:00"),
14: pd.Timestamp("2013-05-15 00:00:00"),
15: pd.Timestamp("2013-05-16 00:00:00"),
16: pd.Timestamp("2013-05-17 00:00:00"),
17: pd.Timestamp("2013-05-18 00:00:00"),
18: pd.Timestamp("2013-05-19 00:00:00"),
19: pd.Timestamp("2013-05-20 00:00:00"),
20: pd.Timestamp("2013-05-21 00:00:00"),
21: pd.Timestamp("2013-05-22 00:00:00"),
22: pd.Timestamp("2013-05-23 00:00:00"),
23: pd.Timestamp("2013-05-24 00:00:00"),
24: pd.Timestamp("2013-05-25 00:00:00"),
25: pd.Timestamp("2013-05-26 00:00:00"),
26: pd.Timestamp("2013-05-27 00:00:00"),
27: pd.Timestamp("2013-05-28 00:00:00"),
28: pd.Timestamp("2013-05-29 00:00:00"),
29: pd.Timestamp("2013-05-30 00:00:00"),
},
"fcst": {
0: 8.479624727157459,
1: 8.479984673362159,
2: 8.480344619566859,
3: 8.48070456577156,
4: 8.48106451197626,
5: 8.48142445818096,
6: 8.481784404385662,
7: 8.482144350590362,
8: 8.482504296795062,
9: 8.482864242999762,
10: 8.483224189204464,
11: 8.483584135409163,
12: 8.483944081613863,
13: 8.484304027818565,
14: 8.484663974023265,
15: 8.485023920227965,
16: 8.485383866432667,
17: 8.485743812637367,
18: 8.486103758842066,
19: 8.486463705046766,
20: 8.486823651251468,
21: 8.487183597456168,
22: 8.487543543660868,
23: 8.48790348986557,
24: 8.48826343607027,
25: 8.48862338227497,
26: 8.48898332847967,
27: 8.489343274684371,
28: 8.489703220889071,
29: 8.490063167093771,
},
"fcst_lower": {
0: 7.055970485245664,
1: 7.056266316358524,
2: 7.056561800026597,
3: 7.056856936297079,
4: 7.057151725217398,
5: 7.05744616683524,
6: 7.057740261198534,
7: 7.058034008355445,
8: 7.058327408354395,
9: 7.058620461244044,
10: 7.0589131670733005,
11: 7.059205525891312,
12: 7.059497537747475,
13: 7.059789202691431,
14: 7.0600805207730595,
15: 7.060371492042489,
16: 7.060662116550093,
17: 7.060952394346479,
18: 7.06124232548251,
19: 7.0615319100092835,
20: 7.061821147978145,
21: 7.062110039440677,
22: 7.062398584448709,
23: 7.062686783054313,
24: 7.0629746353098,
25: 7.063262141267724,
26: 7.063549300980883,
27: 7.063836114502315,
28: 7.0641225818852975,
29: 7.064408703183352,
},
"fcst_upper": {
0: 9.903278969069254,
1: 9.903703030365794,
2: 9.90412743910712,
3: 9.904552195246042,
4: 9.904977298735123,
5: 9.90540274952668,
6: 9.90582854757279,
7: 9.906254692825279,
8: 9.90668118523573,
9: 9.90710802475548,
10: 9.907535211335626,
11: 9.907962744927016,
12: 9.908390625480251,
13: 9.9088188529457,
14: 9.90924742727347,
15: 9.909676348413441,
16: 9.91010561631524,
17: 9.910535230928254,
18: 9.910965192201623,
19: 9.91139550008425,
20: 9.91182615452479,
21: 9.912257155471659,
22: 9.912688502873028,
23: 9.913120196676825,
24: 9.91355223683074,
25: 9.913984623282214,
26: 9.914417355978456,
27: 9.914850434866427,
28: 9.915283859892844,
29: 9.91571763100419,
},
}
)
PEYTON_FCST_LINEAR_99 = pd.DataFrame(
{
"time": {
0: pd.Timestamp("2013-05-01 00:00:00"),
1: pd.Timestamp("2013-05-02 00:00:00"),
2: pd.Timestamp("2013-05-03 00:00:00"),
3: pd.Timestamp("2013-05-04 00:00:00"),
4: pd.Timestamp("2013-05-05 00:00:00"),
5: pd.Timestamp("2013-05-06 00:00:00"),
6: pd.Timestamp("2013-05-07 00:00:00"),
7: pd.Timestamp("2013-05-08 00:00:00"),
8: pd.Timestamp("2013-05-09 00:00:00"),
9: pd.Timestamp("2013-05-10 00:00:00"),
10: pd.Timestamp("2013-05-11 00:00:00"),
11: pd.Timestamp("2013-05-12 00:00:00"),
12: pd.Timestamp("2013-05-13 00:00:00"),
13: pd.Timestamp("2013-05-14 00:00:00"),
14: pd.Timestamp("2013-05-15 00:00:00"),
15: pd.Timestamp("2013-05-16 00:00:00"),
16: pd.Timestamp("2013-05-17 00:00:00"),
17: pd.Timestamp("2013-05-18 00:00:00"),
18: pd.Timestamp("2013-05-19 00:00:00"),
19: pd.Timestamp("2013-05-20 00:00:00"),
20: pd.Timestamp("2013-05-21 00:00:00"),
21: pd.Timestamp("2013-05-22 00:00:00"),
22: pd.Timestamp("2013-05-23 00:00:00"),
23: pd.Timestamp("2013-05-24 00:00:00"),
24: pd.Timestamp("2013-05-25 00:00:00"),
25: pd.Timestamp("2013-05-26 00:00:00"),
26: pd.Timestamp("2013-05-27 00:00:00"),
27: pd.Timestamp("2013-05-28 00:00:00"),
28: pd.Timestamp("2013-05-29 00:00:00"),
29: pd.Timestamp("2013-05-30 00:00:00"),
},
"fcst": {
0: 8.479624727157459,
1: 8.479984673362159,
2: 8.480344619566859,
3: 8.48070456577156,
4: 8.48106451197626,
5: 8.48142445818096,
6: 8.481784404385662,
7: 8.482144350590362,
8: 8.482504296795062,
9: 8.482864242999762,
10: 8.483224189204464,
11: 8.483584135409163,
12: 8.483944081613863,
13: 8.484304027818565,
14: 8.484663974023265,
15: 8.485023920227965,
16: 8.485383866432667,
17: 8.485743812637367,
18: 8.486103758842066,
19: 8.486463705046766,
20: 8.486823651251468,
21: 8.487183597456168,
22: 8.487543543660868,
23: 8.48790348986557,
24: 8.48826343607027,
25: 8.48862338227497,
26: 8.48898332847967,
27: 8.489343274684371,
28: 8.489703220889071,
29: 8.490063167093771,
},
"fcst_lower": {
0: 6.605000045325637,
1: 6.605275566724015,
2: 6.605550630617649,
3: 6.605825237068679,
4: 6.606099386139563,
5: 6.60637307789309,
6: 6.606646312392368,
7: 6.606919089700827,
8: 6.607191409882221,
9: 6.607463273000626,
10: 6.607734679120443,
11: 6.608005628306389,
12: 6.608276120623508,
13: 6.608546156137163,
14: 6.608815734913038,
15: 6.609084857017139,
16: 6.609353522515795,
17: 6.609621731475649,
18: 6.609889483963668,
19: 6.610156780047143,
20: 6.61042361979368,
21: 6.610690003271204,
22: 6.610955930547961,
23: 6.611221401692519,
24: 6.611486416773756,
25: 6.611750975860878,
26: 6.612015079023405,
27: 6.612278726331177,
28: 6.612541917854348,
29: 6.612804653663393,
},
"fcst_upper": {
0: 10.354249408989281,
1: 10.354693780000304,
2: 10.355138608516068,
3: 10.355583894474442,
4: 10.356029637812957,
5: 10.35647583846883,
6: 10.356922496378955,
7: 10.357369611479896,
8: 10.357817183707903,
9: 10.358265212998898,
10: 10.358713699288483,
11: 10.359162642511938,
12: 10.359612042604219,
13: 10.360061899499968,
14: 10.360512213133493,
15: 10.36096298343879,
16: 10.361414210349539,
17: 10.361865893799084,
18: 10.362318033720465,
19: 10.36277063004639,
20: 10.363223682709256,
21: 10.363677191641132,
22: 10.364131156773775,
23: 10.364585578038621,
24: 10.365040455366783,
25: 10.365495788689062,
26: 10.365951577935935,
27: 10.366407823037564,
28: 10.366864523923793,
29: 10.36732168052415,
},
}
)
PEYTON_FCST_LINEAR_INVALID_ZERO = pd.DataFrame(
{
"time": {
0: pd.Timestamp("2012-05-02 00:00:00"),
1: pd.Timestamp("2012-05-03 00:00:00"),
2: pd.Timestamp("2012-05-04 00:00:00"),
3: pd.Timestamp("2012-05-05 00:00:00"),
4: pd.Timestamp("2012-05-06 00:00:00"),
5: pd.Timestamp("2012-05-07 00:00:00"),
6: pd.Timestamp("2012-05-08 00:00:00"),
7: pd.Timestamp("2012-05-09 00:00:00"),
8: pd.Timestamp("2012-05-10 00:00:00"),
9: pd.Timestamp("2012-05-11 00:00:00"),
10: pd.Timestamp("2012-05-12 00:00:00"),
11: pd.Timestamp("2012-05-13 00:00:00"),
12: pd.Timestamp("2012-05-14 00:00:00"),
13: pd.Timestamp("2012-05-15 00:00:00"),
14: pd.Timestamp("2012-05-16 00:00:00"),
15: pd.Timestamp("2012-05-17 00:00:00"),
16: pd.Timestamp("2012-05-18 00:00:00"),
17: pd.Timestamp("2012-05-19 00:00:00"),
18: pd.Timestamp("2012-05-20 00:00:00"),
19: pd.Timestamp("2012-05-21 00:00:00"),
20: pd.Timestamp("2012-05-22 00:00:00"),
21: pd.Timestamp("2012-05-23 00:00:00"),
22: pd.Timestamp("2012-05-24 00:00:00"),
23: pd.Timestamp("2012-05-25 00:00:00"),
24: pd.Timestamp("2012-05-26 00:00:00"),
25: pd.Timestamp("2012-05-27 00:00:00"),
26: pd.Timestamp("2012-05-28 00:00:00"),
27: pd.Timestamp("2012-05-29 00:00:00"),
28: pd.Timestamp("2012-05-30 00:00:00"),
29: pd.Timestamp("2012-05-31 00:00:00"),
30: pd.Timestamp("2012-06-01 00:00:00"),
31: pd.Timestamp("2012-06-02 00:00:00"),
32: pd.Timestamp("2012-06-03 00:00:00"),
33: pd.Timestamp("2012-06-04 00:00:00"),
34: pd.Timestamp("2012-06-05 00:00:00"),
35: pd.Timestamp("2012-06-06 00:00:00"),
36: pd.Timestamp("2012-06-07 00:00:00"),
37: pd.Timestamp("2012-06-08 00:00:00"),
38: pd.Timestamp("2012-06-09 00:00:00"),
39: pd.Timestamp("2012-06-10 00:00:00"),
40: pd.Timestamp("2012-06-11 00:00:00"),
41: pd.Timestamp("2012-06-12 00:00:00"),
42: pd.Timestamp("2012-06-13 00:00:00"),
43: pd.Timestamp("2012-06-14 00:00:00"),
44: pd.Timestamp("2012-06-15 00:00:00"),
45: pd.Timestamp("2012-06-16 00:00:00"),
46: pd.Timestamp("2012-06-17 00:00:00"),
47: pd.Timestamp("2012-06-18 00:00:00"),
48: pd.Timestamp("2012-06-19 00:00:00"),
49: pd.Timestamp("2012-06-20 00:00:00"),
50: pd.Timestamp("2012-06-21 00:00:00"),
51: pd.Timestamp("2012-06-22 00:00:00"),
52: pd.Timestamp("2012-06-23 00:00:00"),
53: pd.Timestamp("2012-06-24 00:00:00"),
54: pd.Timestamp("2012-06-25 00:00:00"),
55: pd.Timestamp("2012-06-26 00:00:00"),
56: pd.Timestamp("2012-06-27 00:00:00"),
57: pd.Timestamp("2012-06-28 00:00:00"),
58: pd.Timestamp("2012-06-29 00:00:00"),
59: pd.Timestamp("2012-06-30 00:00:00"),
60: pd.Timestamp("2012-07-01 00:00:00"),
61: pd.Timestamp("2012-07-02 00:00:00"),
62: pd.Timestamp("2012-07-03 00:00:00"),
63: pd.Timestamp("2012-07-04 00:00:00"),
64: pd.Timestamp("2012-07-05 00:00:00"),
65: pd.Timestamp("2012-07-06 00:00:00"),
66: pd.Timestamp("2012-07-07 00:00:00"),
67: pd.Timestamp("2012-07-08 00:00:00"),
68: pd.Timestamp("2012-07-09 00:00:00"),
69: pd.Timestamp("2012-07-10 00:00:00"),
70: pd.Timestamp("2012-07-11 00:00:00"),
71: pd.Timestamp("2012-07-12 00:00:00"),
72: pd.Timestamp("2012-07-13 00:00:00"),
73: pd.Timestamp("2012-07-14 00:00:00"),
74: pd.Timestamp("2012-07-15 00:00:00"),
75: pd.Timestamp("2012-07-16 00:00:00"),
76: pd.Timestamp("2012-07-17 00:00:00"),
77: pd.Timestamp("2012-07-18 00:00:00"),
78: pd.Timestamp("2012-07-19 00:00:00"),
79: pd.Timestamp("2012-07-20 00:00:00"),
80: pd.Timestamp("2012-07-21 00:00:00"),
81: pd.Timestamp("2012-07-22 00:00:00"),
82: pd.Timestamp("2012-07-23 00:00:00"),
83: pd.Timestamp("2012-07-24 00:00:00"),
84: pd.Timestamp("2012-07-25 00:00:00"),
85: pd.Timestamp("2012-07-26 00:00:00"),
86: pd.Timestamp("2012-07-27 00:00:00"),
87: pd.Timestamp("2012-07-28 00:00:00"),
88: pd.Timestamp("2012-07-29 00:00:00"),
89: pd.Timestamp("2012-07-30 00:00:00"),
90: pd.Timestamp("2012-07-31 00:00:00"),
91: pd.Timestamp("2012-08-01 00:00:00"),
92: pd.Timestamp("2012-08-02 00:00:00"),
93: pd.Timestamp("2012-08-03 00:00:00"),
94: pd.Timestamp("2012-08-04 00:00:00"),
95: pd.Timestamp("2012-08-05 00:00:00"),
96: pd.Timestamp("2012-08-06 00:00:00"),
97: pd.Timestamp("2012-08-07 00:00:00"),
98: pd.Timestamp("2012-08-08 00:00:00"),
99: pd.Timestamp("2012-08-09 00:00:00"),
100: pd.Timestamp("2012-08-10 00:00:00"),
101: pd.Timestamp("2012-08-11 00:00:00"),
102: pd.Timestamp("2012-08-12 00:00:00"),
103: pd.Timestamp("2012-08-13 00:00:00"),
104: pd.Timestamp("2012-08-14 00:00:00"),
105: pd.Timestamp("2012-08-15 00:00:00"),
106: pd.Timestamp("2012-08-16 00:00:00"),
107: pd.Timestamp("2012-08-17 00:00:00"),
108: pd.Timestamp("2012-08-18 00:00:00"),
109: pd.Timestamp("2012-08-19 00:00:00"),
110: pd.Timestamp("2012-08-20 00:00:00"),
111: pd.Timestamp("2012-08-21 00:00:00"),
112: pd.Timestamp("2012-08-22 00:00:00"),
113: pd.Timestamp("2012-08-23 00:00:00"),
114: pd.Timestamp("2012-08-24 00:00:00"),
115: pd.Timestamp("2012-08-25 00:00:00"),
116: pd.Timestamp("2012-08-26 00:00:00"),
117: pd.Timestamp("2012-08-27 00:00:00"),
118: pd.Timestamp("2012-08-28 00:00:00"),
119: pd.Timestamp("2012-08-29 00:00:00"),
120: pd.Timestamp("2012-08-30 00:00:00"),
121: pd.Timestamp("2012-08-31 00:00:00"),
122: pd.Timestamp("2012-09-01 00:00:00"),
123: pd.Timestamp("2012-09-02 00:00:00"),
124: pd.Timestamp("2012-09-03 00:00:00"),
125: pd.Timestamp("2012-09-04 00:00:00"),
126: pd.Timestamp("2012-09-05 00:00:00"),
127: pd.Timestamp("2012-09-06 00:00:00"),
128: pd.Timestamp("2012-09-07 00:00:00"),
129: pd.Timestamp("2012-09-08 00:00:00"),
130: pd.Timestamp("2012-09-09 00:00:00"),
131: pd.Timestamp("2012-09-10 00:00:00"),
132: pd.Timestamp("2012-09-11 00:00:00"),
133: pd.Timestamp("2012-09-12 00:00:00"),
134: pd.Timestamp("2012-09-13 00:00:00"),
135: pd.Timestamp("2012-09-14 00:00:00"),
136: pd.Timestamp("2012-09-15 00:00:00"),
137: pd.Timestamp("2012-09-16 00:00:00"),
138: pd.Timestamp("2012-09-17 00:00:00"),
139: pd.Timestamp("2012-09-18 00:00:00"),
140: pd.Timestamp("2012-09-19 00:00:00"),
141: pd.Timestamp("2012-09-20 00:00:00"),
142: pd.Timestamp("2012-09-21 00:00:00"),
143: pd.Timestamp("2012-09-22 00:00:00"),
144: pd.Timestamp("2012-09-23 00:00:00"),
145: pd.Timestamp("2012-09-24 00:00:00"),
146: pd.Timestamp("2012-09-25 00:00:00"),
147: pd.Timestamp("2012-09-26 00:00:00"),
148: pd.Timestamp("2012-09-27 00:00:00"),
149: pd.Timestamp("2012-09-28 00:00:00"),
150: pd.Timestamp("2012-09-29 00:00:00"),
151: pd.Timestamp("2012-09-30 00:00:00"),
152: pd.Timestamp("2012-10-01 00:00:00"),
153: pd.Timestamp("2012-10-02 00:00:00"),
154: pd.Timestamp("2012-10-03 00:00:00"),
155: pd.Timestamp("2012-10-04 00:00:00"),
156: pd.Timestamp("2012-10-05 00:00:00"),
157: pd.Timestamp("2012-10-06 00:00:00"),
158: pd.Timestamp("2012-10-07 00:00:00"),
159: pd.Timestamp("2012-10-08 00:00:00"),
160: pd.Timestamp("2012-10-09 00:00:00"),
161: pd.Timestamp("2012-10-10 00:00:00"),
162: pd.Timestamp("2012-10-11 00:00:00"),
163: pd.Timestamp("2012-10-12 00:00:00"),
164: pd.Timestamp("2012-10-13 00:00:00"),
165: pd.Timestamp("2012-10-14 00:00:00"),
166: pd.Timestamp("2012-10-15 00:00:00"),
167: pd.Timestamp("2012-10-16 00:00:00"),
168: pd.Timestamp("2012-10-17 00:00:00"),
169: pd.Timestamp("2012-10-18 00:00:00"),
170: pd.Timestamp("2012-10-19 00:00:00"),
171: pd.Timestamp("2012-10-20 00:00:00"),
172: pd.Timestamp("2012-10-21 00:00:00"),
173: pd.Timestamp("2012-10-22 00:00:00"),
174: pd.Timestamp("2012-10-23 00:00:00"),
175: pd.Timestamp("2012-10-24 00:00:00"),
176: pd.Timestamp("2012-10-25 00:00:00"),
177: pd.Timestamp("2012-10-26 00:00:00"),
178: pd.Timestamp("2012-10-27 00:00:00"),
179: pd.Timestamp("2012-10-28 00:00:00"),
180: pd.Timestamp("2012-10-29 00:00:00"),
181: pd.Timestamp("2012-10-30 00:00:00"),
182: pd.Timestamp("2012-10-31 00:00:00"),
183: pd.Timestamp("2012-11-01 00:00:00"),
184: pd.Timestamp("2012-11-02 00:00:00"),
185: pd.Timestamp("2012-11-03 00:00:00"),
186: pd.Timestamp("2012-11-04 00:00:00"),
187: pd.Timestamp("2012-11-05 00:00:00"),
188: pd.Timestamp("2012-11-06 00:00:00"),
189: pd.Timestamp("2012-11-07 00:00:00"),
190: pd.Timestamp("2012-11-08 00:00:00"),
191: pd.Timestamp("2012-11-09 00:00:00"),
192: pd.Timestamp("2012-11-10 00:00:00"),
193: pd.Timestamp("2012-11-11 00:00:00"),
194: pd.Timestamp("2012-11-12 00:00:00"),
195: pd.Timestamp("2012-11-13 00:00:00"),
196: pd.Timestamp("2012-11-14 00:00:00"),
197: pd.Timestamp("2012-11-15 00:00:00"),
198: pd.Timestamp("2012-11-16 00:00:00"),
199: pd.Timestamp("2012-11-17 00:00:00"),
200: pd.Timestamp("2012-11-18 00:00:00"),
201: pd.Timestamp("2012-11-19 00:00:00"),
202: pd.Timestamp("2012-11-20 00:00:00"),
203: pd.Timestamp("2012-11-21 00:00:00"),
204: pd.Timestamp("2012-11-22 00:00:00"),
205: pd.Timestamp("2012-11-23 00:00:00"),
206: pd.Timestamp("2012-11-24 00:00:00"),
207: pd.Timestamp("2012-11-25 00:00:00"),
208: pd.Timestamp("2012-11-26 00:00:00"),
209: pd.Timestamp("2012-11-27 00:00:00"),
210: pd.Timestamp("2012-11-28 00:00:00"),
211: pd.Timestamp("2012-11-29 00:00:00"),
212: pd.Timestamp("2012-11-30 00:00:00"),
213: pd.Timestamp("2012-12-01 00:00:00"),
214: pd.Timestamp("2012-12-02 00:00:00"),
215: pd.Timestamp("2012-12-03 00:00:00"),
216: pd.Timestamp("2012-12-04 00:00:00"),
217: pd.Timestamp("2012-12-05 00:00:00"),
218: pd.Timestamp("2012-12-06 00:00:00"),
219: pd.Timestamp("2012-12-07 00:00:00"),
220: pd.Timestamp("2012-12-08 00:00:00"),
221: pd.Timestamp("2012-12-09 00:00:00"),
222: pd.Timestamp("2012-12-10 00:00:00"),
223: pd.Timestamp("2012-12-11 00:00:00"),
224: pd.Timestamp("2012-12-12 00:00:00"),
225: pd.Timestamp("2012-12-13 00:00:00"),
226: pd.Timestamp("2012-12-14 00:00:00"),
227: pd.Timestamp("2012-12-15 00:00:00"),
228: pd.Timestamp("2012-12-16 00:00:00"),
229: pd.Timestamp("2012-12-17 00:00:00"),
230: pd.Timestamp("2012-12-18 00:00:00"),
231: pd.Timestamp("2012-12-19 00:00:00"),
232: pd.Timestamp("2012-12-20 00:00:00"),
233: pd.Timestamp("2012-12-21 00:00:00"),
234: pd.Timestamp("2012-12-22 00:00:00"),
235: pd.Timestamp("2012-12-23 00:00:00"),
236: pd.Timestamp("2012-12-24 00:00:00"),
237: pd.Timestamp("2012-12-25 00:00:00"),
238: pd.Timestamp("2012-12-26 00:00:00"),
239: pd.Timestamp("2012-12-27 00:00:00"),
240: pd.Timestamp("2012-12-28 00:00:00"),
241: pd.Timestamp("2012-12-29 00:00:00"),
242: pd.Timestamp("2012-12-30 00:00:00"),
243: pd.Timestamp("2012-12-31 00:00:00"),
244: pd.Timestamp("2013-01-01 00:00:00"),
245: pd.Timestamp("2013-01-02 00:00:00"),
246: pd.Timestamp("2013-01-03 00:00:00"),
247: pd.Timestamp("2013-01-04 00:00:00"),
248: pd.Timestamp("2013-01-05 00:00:00"),
249: pd.Timestamp("2013-01-06 00:00:00"),
250: pd.Timestamp("2013-01-07 00:00:00"),
251: pd.Timestamp("2013-01-08 00:00:00"),
252: pd.Timestamp("2013-01-09 00:00:00"),
253: pd.Timestamp("2013-01-10 00:00:00"),
254: pd.Timestamp("2013-01-11 00:00:00"),
255: pd.Timestamp("2013-01-12 00:00:00"),
256: pd.Timestamp("2013-01-13 00:00:00"),
257: pd.Timestamp("2013-01-14 00:00:00"),
258: pd.Timestamp("2013-01-15 00:00:00"),
259: pd.Timestamp("2013-01-16 00:00:00"),
260: pd.Timestamp("2013-01-17 00:00:00"),
261: pd.Timestamp("2013-01-18 00:00:00"),
262: pd.Timestamp("2013-01-19 00:00:00"),
263: pd.Timestamp("2013-01-20 00:00:00"),
264: pd.Timestamp("2013-01-21 00:00:00"),
265: pd.Timestamp("2013-01-22 00:00:00"),
266: pd.Timestamp("2013-01-23 00:00:00"),
267: pd.Timestamp("2013-01-24 00:00:00"),
268: pd.Timestamp("2013-01-25 00:00:00"),
269: pd.Timestamp("2013-01-26 00:00:00"),
270: pd.Timestamp("2013-01-27 00:00:00"),
271: pd.Timestamp("2013-01-28 00:00:00"),
272: pd.Timestamp("2013-01-29 00:00:00"),
273: pd.Timestamp("2013-01-30 00:00:00"),
274: pd.Timestamp("2013-01-31 00:00:00"),
275: pd.Timestamp("2013-02-01 00:00:00"),
276: pd.Timestamp("2013-02-02 00:00:00"),
277: pd.Timestamp("2013-02-03 00:00:00"),
278: pd.Timestamp("2013-02-04 00:00:00"),
279: pd.Timestamp("2013-02-05 00:00:00"),
280: pd.Timestamp("2013-02-06 00:00:00"),
281: pd.Timestamp("2013-02-07 00:00:00"),
282: pd.Timestamp("2013-02-08 00:00:00"),
283: pd.Timestamp("2013-02-09 00:00:00"),
284: pd.Timestamp("2013-02-10 00:00:00"),
285: pd.Timestamp("2013-02-11 00:00:00"),
286: pd.Timestamp("2013-02-12 00:00:00"),
287: pd.Timestamp("2013-02-13 00:00:00"),
288: pd.Timestamp("2013-02-14 00:00:00"),
289: pd.Timestamp("2013-02-15 00:00:00"),
290: pd.Timestamp("2013-02-16 00:00:00"),
291: pd.Timestamp("2013-02-17 00:00:00"),
292: pd.Timestamp("2013-02-18 00:00:00"),
293: pd.Timestamp("2013-02-19 00:00:00"),
294: pd.Timestamp("2013-02-20 00:00:00"),
295: pd.Timestamp("2013-02-21 00:00:00"),
296: pd.Timestamp("2013-02-22 00:00:00"),
297: pd.Timestamp("2013-02-23 00:00:00"),
298: pd.Timestamp("2013-02-24 00:00:00"),
299: pd.Timestamp("2013-02-25 00:00:00"),
300: pd.Timestamp("2013-02-26 00:00:00"),
301: pd.Timestamp("2013-02-27 00:00:00"),
302: pd.Timestamp("2013-02-28 00:00:00"),
303: pd.Timestamp("2013-03-01 00:00:00"),
304: pd.Timestamp("2013-03-02 00:00:00"),
305: pd.Timestamp("2013-03-03 00:00:00"),
306: pd.Timestamp("2013-03-04 00:00:00"),
307: pd.Timestamp("2013-03-05 00:00:00"),
308: pd.Timestamp("2013-03-06 00:00:00"),
309: pd.Timestamp("2013-03-07 00:00:00"),
310: pd.Timestamp("2013-03-08 00:00:00"),
311: pd.Timestamp("2013-03-09 00:00:00"),
312: pd.Timestamp("2013-03-10 00:00:00"),
313: pd.Timestamp("2013-03-11 00:00:00"),
314: pd.Timestamp("2013-03-12 00:00:00"),
315: pd.Timestamp("2013-03-13 00:00:00"),
316: pd.Timestamp("2013-03-14 00:00:00"),
317: pd.Timestamp("2013-03-15 00:00:00"),
318: pd.Timestamp("2013-03-16 00:00:00"),
319: pd.Timestamp("2013-03-17 00:00:00"),
320: pd.Timestamp("2013-03-18 00:00:00"),
321: pd.Timestamp("2013-03-19 00:00:00"),
322: pd.Timestamp("2013-03-20 00:00:00"),
323: pd.Timestamp("2013-03-21 00:00:00"),
324: pd.Timestamp("2013-03-22 00:00:00"),
325: pd.Timestamp("2013-03-23 00:00:00"),
326: pd.Timestamp("2013-03-24 00:00:00"),
327: pd.Timestamp("2013-03-25 00:00:00"),
328: pd.Timestamp("2013-03-26 00:00:00"),
329: pd.Timestamp("2013-03-27 00:00:00"),
330: pd.Timestamp("2013-03-28 00:00:00"),
331: pd.Timestamp("2013-03-29 00:00:00"),
332: pd.Timestamp("2013-03-30 00:00:00"),
333: pd.Timestamp("2013-03-31 00:00:00"),
334: pd.Timestamp("2013-04-01 00:00:00"),
335: pd.Timestamp("2013-04-02 00:00:00"),
336: pd.Timestamp("2013-04-03 00:00:00"),
337: pd.Timestamp("2013-04-04 00:00:00"),
338: pd.Timestamp("2013-04-05 00:00:00"),
339: pd.Timestamp("2013-04-06 00:00:00"),
340: pd.Timestamp("2013-04-07 00:00:00"),
341: pd.Timestamp("2013-04-08 00:00:00"),
342: pd.Timestamp("2013-04-09 00:00:00"),
343: pd.Timestamp("2013-04-10 00:00:00"),
344: pd.Timestamp("2013-04-11 00:00:00"),
345: pd.Timestamp("2013-04-12 00:00:00"),
346: pd.Timestamp("2013-04-13 00:00:00"),
347: pd.Timestamp("2013-04-14 00:00:00"),
348: pd.Timestamp("2013-04-15 00:00:00"),
349: pd.Timestamp("2013-04-16 00:00:00"),
350: pd.Timestamp("2013-04-17 00:00:00"),
351: pd.Timestamp("2013-04-18 00:00:00"),
352: pd.Timestamp("2013-04-19 00:00:00"),
353: pd.Timestamp("2013-04-20 00:00:00"),
354: pd.Timestamp("2013-04-21 00:00:00"),
355: pd.Timestamp("2013-04-22 00:00:00"),
356: pd.Timestamp("2013-04-23 00:00:00"),
357: pd.Timestamp("2013-04-24 00:00:00"),
358: pd.Timestamp("2013-04-25 00:00:00"),
359: pd.Timestamp("2013-04-26 00:00:00"),
360: pd.Timestamp("2013-04-27 00:00:00"),
361: pd.Timestamp("2013-04-28 00:00:00"),
362: pd.Timestamp("2013-04-29 00:00:00"),
363: pd.Timestamp("2013-04-30 00:00:00"),
364: pd.Timestamp("2013-05-01 00:00:00"),
365: pd.Timestamp("2013-05-02 00:00:00"),
366: pd.Timestamp("2013-05-03 00:00:00"),
367: pd.Timestamp("2013-05-04 00:00:00"),
368: pd.Timestamp("2013-05-05 00:00:00"),
369: pd.Timestamp("2013-05-06 00:00:00"),
370: pd.Timestamp("2013-05-07 00:00:00"),
371: pd.Timestamp("2013-05-08 00:00:00"),
372: pd.Timestamp("2013-05-09 00:00:00"),
373: pd.Timestamp("2013-05-10 00:00:00"),
374: pd.Timestamp("2013-05-11 00:00:00"),
375: pd.Timestamp("2013-05-12 00:00:00"),
376: pd.Timestamp("2013-05-13 00:00:00"),
377: pd.Timestamp("2013-05-14 00:00:00"),
378: pd.Timestamp("2013-05-15 00:00:00"),
379: pd.Timestamp("2013-05-16 00:00:00"),
380: pd.Timestamp("2013-05-17 00:00:00"),
381: pd.Timestamp("2013-05-18 00:00:00"),
382: pd.Timestamp("2013-05-19 00:00:00"),
383: pd.Timestamp("2013-05-20 00:00:00"),
384: pd.Timestamp("2013-05-21 00:00:00"),
385: pd.Timestamp("2013-05-22 00:00:00"),
386: pd.Timestamp("2013-05-23 00:00:00"),
387: pd.Timestamp("2013-05-24 00:00:00"),
388: pd.Timestamp("2013-05-25 00:00:00"),
389: pd.Timestamp("2013-05-26 00:00:00"),
390: pd.Timestamp("2013-05-27 00:00:00"),
391: pd.Timestamp("2013-05-28 00:00:00"),
392: pd.Timestamp("2013-05-29 00:00:00"),
393: pd.Timestamp("2013-05-30 00:00:00"),
},
"fcst": {
0: 8.348604308646497,
1: 8.348964254851197,
2: 8.349324201055898,
3: 8.349684147260598,
4: 8.350044093465298,
5: 8.350404039669998,
6: 8.3507639858747,
7: 8.3511239320794,
8: 8.3514838782841,
9: 8.351843824488801,
10: 8.352203770693501,
11: 8.352563716898201,
12: 8.352923663102903,
13: 8.353283609307603,
14: 8.353643555512303,
15: 8.354003501717003,
16: 8.354363447921704,
17: 8.354723394126404,
18: 8.355083340331104,
19: 8.355443286535806,
20: 8.355803232740506,
21: 8.356163178945206,
22: 8.356523125149906,
23: 8.356883071354607,
24: 8.357243017559307,
25: 8.357602963764007,
26: 8.357962909968709,
27: 8.358322856173409,
28: 8.358682802378109,
29: 8.35904274858281,
30: 8.35940269478751,
31: 8.35976264099221,
32: 8.36012258719691,
33: 8.360482533401612,
34: 8.360842479606312,
35: 8.361202425811012,
36: 8.361562372015714,
37: 8.361922318220413,
38: 8.362282264425113,
39: 8.362642210629813,
40: 8.363002156834515,
41: 8.363362103039215,
42: 8.363722049243915,
43: 8.364081995448617,
44: 8.364441941653316,
45: 8.364801887858016,
46: 8.365161834062716,
47: 8.365521780267418,
48: 8.365881726472118,
49: 8.366241672676818,
50: 8.36660161888152,
51: 8.36696156508622,
52: 8.36732151129092,
53: 8.367681457495621,
54: 8.368041403700321,
55: 8.368401349905021,
56: 8.36876129610972,
57: 8.369121242314423,
58: 8.369481188519122,
59: 8.369841134723822,
60: 8.370201080928524,
61: 8.370561027133224,
62: 8.370920973337924,
63: 8.371280919542624,
64: 8.371640865747326,
65: 8.372000811952026,
66: 8.372360758156725,
67: 8.372720704361427,
68: 8.373080650566127,
69: 8.373440596770827,
70: 8.373800542975529,
71: 8.374160489180229,
72: 8.374520435384929,
73: 8.374880381589628,
74: 8.37524032779433,
75: 8.37560027399903,
76: 8.37596022020373,
77: 8.376320166408432,
78: 8.376680112613132,
79: 8.377040058817832,
80: 8.377400005022531,
81: 8.377759951227233,
82: 8.378119897431933,
83: 8.378479843636633,
84: 8.378839789841335,
85: 8.379199736046035,
86: 8.379559682250735,
87: 8.379919628455436,
88: 8.380279574660136,
89: 8.380639520864836,
90: 8.380999467069536,
91: 8.381359413274238,
92: 8.381719359478938,
93: 8.382079305683638,
94: 8.38243925188834,
95: 8.38279919809304,
96: 8.38315914429774,
97: 8.383519090502439,
98: 8.38387903670714,
99: 8.38423898291184,
100: 8.38459892911654,
101: 8.384958875321242,
102: 8.385318821525942,
103: 8.385678767730642,
104: 8.386038713935344,
105: 8.386398660140044,
106: 8.386758606344744,
107: 8.387118552549444,
108: 8.387478498754145,
109: 8.387838444958845,
110: 8.388198391163545,
111: 8.388558337368247,
112: 8.388918283572947,
113: 8.389278229777647,
114: 8.389638175982347,
115: 8.389998122187048,
116: 8.390358068391748,
117: 8.390718014596448,
118: 8.39107796080115,
119: 8.39143790700585,
120: 8.39179785321055,
121: 8.392157799415251,
122: 8.392517745619951,
123: 8.392877691824651,
124: 8.393237638029351,
125: 8.393597584234053,
126: 8.393957530438753,
127: 8.394317476643453,
128: 8.394677422848154,
129: 8.395037369052854,
130: 8.395397315257554,
131: 8.395757261462254,
132: 8.396117207666956,
133: 8.396477153871656,
134: 8.396837100076356,
135: 8.397197046281057,
136: 8.397556992485757,
137: 8.397916938690457,
138: 8.398276884895157,
139: 8.398636831099859,
140: 8.398996777304559,
141: 8.399356723509259,
142: 8.39971666971396,
143: 8.40007661591866,
144: 8.40043656212336,
145: 8.400796508328062,
146: 8.401156454532762,
147: 8.401516400737462,
148: 8.401876346942162,
149: 8.402236293146863,
150: 8.402596239351563,
151: 8.402956185556263,
152: 8.403316131760965,
153: 8.403676077965665,
154: 8.404036024170365,
155: 8.404395970375065,
156: 8.404755916579767,
157: 8.405115862784466,
158: 8.405475808989166,
159: 8.405835755193868,
160: 8.406195701398568,
161: 8.406555647603268,
162: 8.40691559380797,
163: 8.40727554001267,
164: 8.40763548621737,
165: 8.40799543242207,
166: 8.408355378626771,
167: 8.408715324831471,
168: 8.409075271036171,
169: 8.409435217240873,
170: 8.409795163445573,
171: 8.410155109650272,
172: 8.410515055854972,
173: 8.410875002059674,
174: 8.411234948264374,
175: 8.411594894469074,
176: 8.411954840673776,
177: 8.412314786878476,
178: 8.412674733083175,
179: 8.413034679287877,
180: 8.413394625492577,
181: 8.413754571697277,
182: 8.414114517901977,
183: 8.414474464106679,
184: 8.414834410311379,
185: 8.415194356516078,
186: 8.41555430272078,
187: 8.41591424892548,
188: 8.41627419513018,
189: 8.41663414133488,
190: 8.416994087539582,
191: 8.417354033744282,
192: 8.417713979948982,
193: 8.418073926153683,
194: 8.418433872358383,
195: 8.418793818563083,
196: 8.419153764767785,
197: 8.419513710972485,
198: 8.419873657177185,
199: 8.420233603381885,
200: 8.420593549586586,
201: 8.420953495791286,
202: 8.421313441995986,
203: 8.421673388200688,
204: 8.422033334405388,
205: 8.422393280610088,
206: 8.422753226814788,
207: 8.42311317301949,
208: 8.42347311922419,
209: 8.423833065428889,
210: 8.42419301163359,
211: 8.42455295783829,
212: 8.42491290404299,
213: 8.42527285024769,
214: 8.425632796452392,
215: 8.425992742657092,
216: 8.426352688861792,
217: 8.426712635066494,
218: 8.427072581271194,
219: 8.427432527475894,
220: 8.427792473680595,
221: 8.428152419885295,
222: 8.428512366089995,
223: 8.428872312294695,
224: 8.429232258499397,
225: 8.429592204704097,
226: 8.429952150908797,
227: 8.430312097113498,
228: 8.430672043318198,
229: 8.431031989522898,
230: 8.431391935727598,
231: 8.4317518819323,
232: 8.432111828137,
233: 8.4324717743417,
234: 8.432831720546401,
235: 8.433191666751101,
236: 8.433551612955801,
237: 8.433911559160503,
238: 8.434271505365203,
239: 8.434631451569903,
240: 8.434991397774603,
241: 8.435351343979304,
242: 8.435711290184004,
243: 8.436071236388704,
244: 8.436431182593406,
245: 8.436791128798106,
246: 8.437151075002806,
247: 8.437511021207506,
248: 8.437870967412207,
249: 8.438230913616907,
250: 8.438590859821607,
251: 8.438950806026309,
252: 8.439310752231009,
253: 8.439670698435709,
254: 8.44003064464041,
255: 8.44039059084511,
256: 8.44075053704981,
257: 8.44111048325451,
258: 8.441470429459212,
259: 8.441830375663912,
260: 8.442190321868612,
261: 8.442550268073314,
262: 8.442910214278013,
263: 8.443270160482713,
264: 8.443630106687413,
265: 8.443990052892115,
266: 8.444349999096815,
267: 8.444709945301515,
268: 8.445069891506217,
269: 8.445429837710916,
270: 8.445789783915616,
271: 8.446149730120318,
272: 8.446509676325018,
273: 8.446869622529718,
274: 8.447229568734418,
275: 8.44758951493912,
276: 8.44794946114382,
277: 8.44830940734852,
278: 8.448669353553221,
279: 8.449029299757921,
280: 8.449389245962621,
281: 8.449749192167321,
282: 8.450109138372023,
283: 8.450469084576723,
284: 8.450829030781422,
285: 8.451188976986124,
286: 8.451548923190824,
287: 8.451908869395524,
288: 8.452268815600226,
289: 8.452628761804926,
290: 8.452988708009626,
291: 8.453348654214325,
292: 8.453708600419027,
293: 8.454068546623727,
294: 8.454428492828427,
295: 8.454788439033129,
296: 8.455148385237829,
297: 8.455508331442529,
298: 8.455868277647228,
299: 8.45622822385193,
300: 8.45658817005663,
301: 8.45694811626133,
302: 8.457308062466032,
303: 8.457668008670732,
304: 8.458027954875432,
305: 8.458387901080131,
306: 8.458747847284833,
307: 8.459107793489533,
308: 8.459467739694233,
309: 8.459827685898935,
310: 8.460187632103635,
311: 8.460547578308335,
312: 8.460907524513036,
313: 8.461267470717736,
314: 8.461627416922436,
315: 8.461987363127136,
316: 8.462347309331838,
317: 8.462707255536538,
318: 8.463067201741238,
319: 8.46342714794594,
320: 8.46378709415064,
321: 8.46414704035534,
322: 8.464506986560039,
323: 8.46486693276474,
324: 8.46522687896944,
325: 8.46558682517414,
326: 8.465946771378842,
327: 8.466306717583542,
328: 8.466666663788242,
329: 8.467026609992944,
330: 8.467386556197644,
331: 8.467746502402344,
332: 8.468106448607044,
333: 8.468466394811745,
334: 8.468826341016445,
335: 8.469186287221145,
336: 8.469546233425847,
337: 8.469906179630547,
338: 8.470266125835247,
339: 8.470626072039947,
340: 8.470986018244648,
341: 8.471345964449348,
342: 8.471705910654048,
343: 8.47206585685875,
344: 8.47242580306345,
345: 8.47278574926815,
346: 8.473145695472851,
347: 8.473505641677551,
348: 8.473865587882251,
349: 8.474225534086951,
350: 8.474585480291653,
351: 8.474945426496353,
352: 8.475305372701053,
353: 8.475665318905754,
354: 8.476025265110454,
355: 8.476385211315154,
356: 8.476745157519854,
357: 8.477105103724556,
358: 8.477465049929256,
359: 8.477824996133956,
360: 8.478184942338657,
361: 8.478544888543357,
362: 8.478904834748057,
363: 8.479264780952759,
364: 8.479624727157459,
365: 8.479984673362159,
366: 8.480344619566859,
367: 8.48070456577156,
368: 8.48106451197626,
369: 8.48142445818096,
370: 8.481784404385662,
371: 8.482144350590362,
372: 8.482504296795062,
373: 8.482864242999762,
374: 8.483224189204464,
375: 8.483584135409163,
376: 8.483944081613863,
377: 8.484304027818565,
378: 8.484663974023265,
379: 8.485023920227965,
380: 8.485383866432667,
381: 8.485743812637367,
382: 8.486103758842066,
383: 8.486463705046766,
384: 8.486823651251468,
385: 8.487183597456168,
386: 8.487543543660868,
387: 8.48790348986557,
388: 8.48826343607027,
389: 8.48862338227497,
390: 8.48898332847967,
391: 8.489343274684371,
392: 8.489703220889071,
393: 8.490063167093771,
},
"fcst_lower": {
0: -np.inf,
1: -np.inf,
2: -np.inf,
3: -np.inf,
4: -np.inf,
5: -np.inf,
6: -np.inf,
7: -np.inf,
8: -np.inf,
9: -np.inf,
10: -np.inf,
11: -np.inf,
12: -np.inf,
13: -np.inf,
14: -np.inf,
15: -np.inf,
16: -np.inf,
17: -np.inf,
18: -np.inf,
19: -np.inf,
20: -np.inf,
21: -np.inf,
22: -np.inf,
23: -np.inf,
24: -np.inf,
25: -np.inf,
26: -np.inf,
27: -np.inf,
28: -np.inf,
29: -np.inf,
30: -np.inf,
31: -np.inf,
32: -np.inf,
33: -np.inf,
34: -np.inf,
35: -np.inf,
36: -np.inf,
37: -np.inf,
38: -np.inf,
39: -np.inf,
40: -np.inf,
41: -np.inf,
42: -np.inf,
43: -np.inf,
44: -np.inf,
45: -np.inf,
46: -np.inf,
47: -np.inf,
48: -np.inf,
49: -np.inf,
50: -np.inf,
51: -np.inf,
52: -np.inf,
53: -np.inf,
54: -np.inf,
55: -np.inf,
56: -np.inf,
57: -np.inf,
58: -np.inf,
59: -np.inf,
60: -np.inf,
61: -np.inf,
62: -np.inf,
63: -np.inf,
64: -np.inf,
65: -np.inf,
66: -np.inf,
67: -np.inf,
68: -np.inf,
69: -np.inf,
70: -np.inf,
71: -np.inf,
72: -np.inf,
73: -np.inf,
74: -np.inf,
75: -np.inf,
76: -np.inf,
77: -np.inf,
78: -np.inf,
79: -np.inf,
80: -np.inf,
81: -np.inf,
82: -np.inf,
83: -np.inf,
84: -np.inf,
85: -np.inf,
86: -np.inf,
87: -np.inf,
88: -np.inf,
89: -np.inf,
90: -np.inf,
91: -np.inf,
92: -np.inf,
93: -np.inf,
94: -np.inf,
95: -np.inf,
96: -np.inf,
97: -np.inf,
98: -np.inf,
99: -np.inf,
100: -np.inf,
101: -np.inf,
102: -np.inf,
103: -np.inf,
104: -np.inf,
105: -np.inf,
106: -np.inf,
107: -np.inf,
108: -np.inf,
109: -np.inf,
110: -np.inf,
111: -np.inf,
112: -np.inf,
113: -np.inf,
114: -np.inf,
115: -np.inf,
116: -np.inf,
117: -np.inf,
118: -np.inf,
119: -np.inf,
120: -np.inf,
121: -np.inf,
122: -np.inf,
123: -np.inf,
124: -np.inf,
125: -np.inf,
126: -np.inf,
127: -np.inf,
128: -np.inf,
129: -np.inf,
130: -np.inf,
131: -np.inf,
132: -np.inf,
133: -np.inf,
134: -np.inf,
135: -np.inf,
136: -np.inf,
137: -np.inf,
138: -np.inf,
139: -np.inf,
140: -np.inf,
141: -np.inf,
142: -np.inf,
143: -np.inf,
144: -np.inf,
145: -np.inf,
146: -np.inf,
147: -np.inf,
148: -np.inf,
149: -np.inf,
150: -np.inf,
151: -np.inf,
152: -np.inf,
153: -np.inf,
154: -np.inf,
155: -np.inf,
156: -np.inf,
157: -np.inf,
158: -np.inf,
159: -np.inf,
160: -np.inf,
161: -np.inf,
162: -np.inf,
163: -np.inf,
164: -np.inf,
165: -np.inf,
166: -np.inf,
167: -np.inf,
168: -np.inf,
169: -np.inf,
170: -np.inf,
171: -np.inf,
172: -np.inf,
173: -np.inf,
174: -np.inf,
175: -np.inf,
176: -np.inf,
177: -np.inf,
178: -np.inf,
179: -np.inf,
180: -np.inf,
181: -np.inf,
182: -np.inf,
183: -np.inf,
184: -np.inf,
185: -np.inf,
186: -np.inf,
187: -np.inf,
188: -np.inf,
189: -np.inf,
190: -np.inf,
191: -np.inf,
192: -np.inf,
193: -np.inf,
194: -np.inf,
195: -np.inf,
196: -np.inf,
197: -np.inf,
198: -np.inf,
199: -np.inf,
200: -np.inf,
201: -np.inf,
202: -np.inf,
203: -np.inf,
204: -np.inf,
205: -np.inf,
206: -np.inf,
207: -np.inf,
208: -np.inf,
209: -np.inf,
210: -np.inf,
211: -np.inf,
212: -np.inf,
213: -np.inf,
214: -np.inf,
215: -np.inf,
216: -np.inf,
217: -np.inf,
218: -np.inf,
219: -np.inf,
220: -np.inf,
221: -np.inf,
222: -np.inf,
223: -np.inf,
224: -np.inf,
225: -np.inf,
226: -np.inf,
227: -np.inf,
228: -np.inf,
229: -np.inf,
230: -np.inf,
231: -np.inf,
232: -np.inf,
233: -np.inf,
234: -np.inf,
235: -np.inf,
236: -np.inf,
237: -np.inf,
238: -np.inf,
239: -np.inf,
240: -np.inf,
241: -np.inf,
242: -np.inf,
243: -np.inf,
244: -np.inf,
245: -np.inf,
246: -np.inf,
247: -np.inf,
248: -np.inf,
249: -np.inf,
250: -np.inf,
251: -np.inf,
252: -np.inf,
253: -np.inf,
254: -np.inf,
255: -np.inf,
256: -np.inf,
257: -np.inf,
258: -np.inf,
259: -np.inf,
260: -np.inf,
261: -np.inf,
262: -np.inf,
263: -np.inf,
264: -np.inf,
265: -np.inf,
266: -np.inf,
267: -np.inf,
268: -np.inf,
269: -np.inf,
270: -np.inf,
271: -np.inf,
272: -np.inf,
273: -np.inf,
274: -np.inf,
275: -np.inf,
276: -np.inf,
277: -np.inf,
278: -np.inf,
279: -np.inf,
280: -np.inf,
281: -np.inf,
282: -np.inf,
283: -np.inf,
284: -np.inf,
285: -np.inf,
286: -np.inf,
287: -np.inf,
288: -np.inf,
289: -np.inf,
290: -np.inf,
291: -np.inf,
292: -np.inf,
293: -np.inf,
294: -np.inf,
295: -np.inf,
296: -np.inf,
297: -np.inf,
298: -np.inf,
299: -np.inf,
300: -np.inf,
301: -np.inf,
302: -np.inf,
303: -np.inf,
304: -np.inf,
305: -np.inf,
306: -np.inf,
307: -np.inf,
308: -np.inf,
309: -np.inf,
310: -np.inf,
311: -np.inf,
312: -np.inf,
313: -np.inf,
314: -np.inf,
315: -np.inf,
316: -np.inf,
317: -np.inf,
318: -np.inf,
319: -np.inf,
320: -np.inf,
321: -np.inf,
322: -np.inf,
323: -np.inf,
324: -np.inf,
325: -np.inf,
326: -np.inf,
327: -np.inf,
328: -np.inf,
329: -np.inf,
330: -np.inf,
331: -np.inf,
332: -np.inf,
333: -np.inf,
334: -np.inf,
335: -np.inf,
336: -np.inf,
337: -np.inf,
338: -np.inf,
339: -np.inf,
340: -np.inf,
341: -np.inf,
342: -np.inf,
343: -np.inf,
344: -np.inf,
345: -np.inf,
346: -np.inf,
347: -np.inf,
348: -np.inf,
349: -np.inf,
350: -np.inf,
351: -np.inf,
352: -np.inf,
353: -np.inf,
354: -np.inf,
355: -np.inf,
356: -np.inf,
357: -np.inf,
358: -np.inf,
359: -np.inf,
360: -np.inf,
361: -np.inf,
362: -np.inf,
363: -np.inf,
364: -np.inf,
365: -np.inf,
366: -np.inf,
367: -np.inf,
368: -np.inf,
369: -np.inf,
370: -np.inf,
371: -np.inf,
372: -np.inf,
373: -np.inf,
374: -np.inf,
375: -np.inf,
376: -np.inf,
377: -np.inf,
378: -np.inf,
379: -np.inf,
380: -np.inf,
381: -np.inf,
382: -np.inf,
383: -np.inf,
384: -np.inf,
385: -np.inf,
386: -np.inf,
387: -np.inf,
388: -np.inf,
389: -np.inf,
390: -np.inf,
391: -np.inf,
392: -np.inf,
393: -np.inf,
},
"fcst_upper": {
0: np.inf,
1: np.inf,
2: np.inf,
3: np.inf,
4: np.inf,
5: np.inf,
6: np.inf,
7: np.inf,
8: np.inf,
9: np.inf,
10: np.inf,
11: np.inf,
12: np.inf,
13: np.inf,
14: np.inf,
15: np.inf,
16: np.inf,
17: np.inf,
18: np.inf,
19: np.inf,
20: np.inf,
21: np.inf,
22: np.inf,
23: np.inf,
24: np.inf,
25: np.inf,
26: np.inf,
27: np.inf,
28: np.inf,
29: np.inf,
30: np.inf,
31: np.inf,
32: np.inf,
33: np.inf,
34: np.inf,
35: np.inf,
36: np.inf,
37: np.inf,
38: np.inf,
39: np.inf,
40: np.inf,
41: np.inf,
42: np.inf,
43: np.inf,
44: np.inf,
45: np.inf,
46: np.inf,
47: np.inf,
48: np.inf,
49: np.inf,
50: np.inf,
51: np.inf,
52: np.inf,
53: np.inf,
54: np.inf,
55: np.inf,
56: np.inf,
57: np.inf,
58: np.inf,
59: np.inf,
60: np.inf,
61: np.inf,
62: np.inf,
63: np.inf,
64: np.inf,
65: np.inf,
66: np.inf,
67: np.inf,
68: np.inf,
69: np.inf,
70: np.inf,
71: np.inf,
72: np.inf,
73: np.inf,
74: np.inf,
75: np.inf,
76: np.inf,
77: np.inf,
78: np.inf,
79: np.inf,
80: np.inf,
81: np.inf,
82: np.inf,
83: np.inf,
84: np.inf,
85: np.inf,
86: np.inf,
87: np.inf,
88: np.inf,
89: np.inf,
90: np.inf,
91: np.inf,
92: np.inf,
93: np.inf,
94: np.inf,
95: np.inf,
96: np.inf,
97: np.inf,
98: np.inf,
99: np.inf,
100: np.inf,
101: np.inf,
102: np.inf,
103: np.inf,
104: np.inf,
105: np.inf,
106: np.inf,
107: np.inf,
108: np.inf,
109: np.inf,
110: np.inf,
111: np.inf,
112: np.inf,
113: np.inf,
114: np.inf,
115: np.inf,
116: np.inf,
117: np.inf,
118: np.inf,
119: np.inf,
120: np.inf,
121: np.inf,
122: np.inf,
123: np.inf,
124: np.inf,
125: np.inf,
126: np.inf,
127: np.inf,
128: np.inf,
129: np.inf,
130: np.inf,
131: np.inf,
132: np.inf,
133: np.inf,
134: np.inf,
135: np.inf,
136: np.inf,
137: np.inf,
138: np.inf,
139: np.inf,
140: np.inf,
141: np.inf,
142: np.inf,
143: np.inf,
144: np.inf,
145: np.inf,
146: np.inf,
147: np.inf,
148: np.inf,
149: np.inf,
150: np.inf,
151: np.inf,
152: np.inf,
153: np.inf,
154: np.inf,
155: np.inf,
156: np.inf,
157: np.inf,
158: np.inf,
159: np.inf,
160: np.inf,
161: np.inf,
162: np.inf,
163: np.inf,
164: np.inf,
165: np.inf,
166: np.inf,
167: np.inf,
168: np.inf,
169: np.inf,
170: np.inf,
171: np.inf,
172: np.inf,
173: np.inf,
174: np.inf,
175: np.inf,
176: np.inf,
177: np.inf,
178: np.inf,
179: np.inf,
180: np.inf,
181: np.inf,
182: np.inf,
183: np.inf,
184: np.inf,
185: np.inf,
186: np.inf,
187: np.inf,
188: np.inf,
189: np.inf,
190: np.inf,
191: np.inf,
192: np.inf,
193: np.inf,
194: np.inf,
195: np.inf,
196: np.inf,
197: np.inf,
198: np.inf,
199: np.inf,
200: np.inf,
201: np.inf,
202: np.inf,
203: np.inf,
204: np.inf,
205: np.inf,
206: np.inf,
207: np.inf,
208: np.inf,
209: np.inf,
210: np.inf,
211: np.inf,
212: np.inf,
213: np.inf,
214: np.inf,
215: np.inf,
216: np.inf,
217: np.inf,
218: np.inf,
219: np.inf,
220: np.inf,
221: np.inf,
222: np.inf,
223: np.inf,
224: np.inf,
225: np.inf,
226: np.inf,
227: np.inf,
228: np.inf,
229: np.inf,
230: np.inf,
231: np.inf,
232: np.inf,
233: np.inf,
234: np.inf,
235: np.inf,
236: np.inf,
237: np.inf,
238: np.inf,
239: np.inf,
240: np.inf,
241: np.inf,
242: np.inf,
243: np.inf,
244: np.inf,
245: np.inf,
246: np.inf,
247: np.inf,
248: np.inf,
249: np.inf,
250: np.inf,
251: np.inf,
252: np.inf,
253: np.inf,
254: np.inf,
255: np.inf,
256: np.inf,
257: np.inf,
258: np.inf,
259: np.inf,
260: np.inf,
261: np.inf,
262: np.inf,
263: np.inf,
264: np.inf,
265: np.inf,
266: np.inf,
267: np.inf,
268: np.inf,
269: np.inf,
270: np.inf,
271: np.inf,
272: np.inf,
273: np.inf,
274: np.inf,
275: np.inf,
276: np.inf,
277: np.inf,
278: np.inf,
279: np.inf,
280: np.inf,
281: np.inf,
282: np.inf,
283: np.inf,
284: np.inf,
285: np.inf,
286: np.inf,
287: np.inf,
288: np.inf,
289: np.inf,
290: np.inf,
291: np.inf,
292: np.inf,
293: np.inf,
294: np.inf,
295: np.inf,
296: np.inf,
297: np.inf,
298: np.inf,
299: np.inf,
300: np.inf,
301: np.inf,
302: np.inf,
303: np.inf,
304: np.inf,
305: np.inf,
306: np.inf,
307: np.inf,
308: np.inf,
309: np.inf,
310: np.inf,
311: np.inf,
312: np.inf,
313: np.inf,
314: np.inf,
315: np.inf,
316: np.inf,
317: np.inf,
318: np.inf,
319: np.inf,
320: np.inf,
321: np.inf,
322: np.inf,
323: np.inf,
324: np.inf,
325: np.inf,
326: np.inf,
327: np.inf,
328: np.inf,
329: np.inf,
330: np.inf,
331: np.inf,
332: np.inf,
333: np.inf,
334: np.inf,
335: np.inf,
336: np.inf,
337: np.inf,
338: np.inf,
339: np.inf,
340: np.inf,
341: np.inf,
342: np.inf,
343: np.inf,
344: np.inf,
345: np.inf,
346: np.inf,
347: np.inf,
348: np.inf,
349: np.inf,
350: np.inf,
351: np.inf,
352: np.inf,
353: np.inf,
354: np.inf,
355: np.inf,
356: np.inf,
357: np.inf,
358: np.inf,
359: np.inf,
360: np.inf,
361: np.inf,
362: np.inf,
363: np.inf,
364: np.inf,
365: np.inf,
366: np.inf,
367: np.inf,
368: np.inf,
369: np.inf,
370: np.inf,
371: np.inf,
372: np.inf,
373: np.inf,
374: np.inf,
375: np.inf,
376: np.inf,
377: np.inf,
378: np.inf,
379: np.inf,
380: np.inf,
381: np.inf,
382: np.inf,
383: np.inf,
384: np.inf,
385: np.inf,
386: np.inf,
387: np.inf,
388: np.inf,
389: np.inf,
390: np.inf,
391: np.inf,
392: np.inf,
393: np.inf,
},
}
)
PEYTON_FCST_LINEAR_INVALID_NEG_ONE = pd.DataFrame(
{
"time": {
0: pd.Timestamp("2012-05-02 00:00:00"),
1: pd.Timestamp("2012-05-03 00:00:00"),
2: pd.Timestamp("2012-05-04 00:00:00"),
3: pd.Timestamp("2012-05-05 00:00:00"),
4: pd.Timestamp("2012-05-06 00:00:00"),
5: pd.Timestamp("2012-05-07 00:00:00"),
6: pd.Timestamp("2012-05-08 00:00:00"),
7: pd.Timestamp("2012-05-09 00:00:00"),
8: pd.Timestamp("2012-05-10 00:00:00"),
9: pd.Timestamp("2012-05-11 00:00:00"),
10: pd.Timestamp("2012-05-12 00:00:00"),
11: pd.Timestamp("2012-05-13 00:00:00"),
12: pd.Timestamp("2012-05-14 00:00:00"),
13: pd.Timestamp("2012-05-15 00:00:00"),
14: pd.Timestamp("2012-05-16 00:00:00"),
15: pd.Timestamp("2012-05-17 00:00:00"),
16: pd.Timestamp("2012-05-18 00:00:00"),
17: pd.Timestamp("2012-05-19 00:00:00"),
18: pd.Timestamp("2012-05-20 00:00:00"),
19: pd.Timestamp("2012-05-21 00:00:00"),
20: pd.Timestamp("2012-05-22 00:00:00"),
21: pd.Timestamp("2012-05-23 00:00:00"),
22: pd.Timestamp("2012-05-24 00:00:00"),
23: pd.Timestamp("2012-05-25 00:00:00"),
24: pd.Timestamp("2012-05-26 00:00:00"),
25: pd.Timestamp("2012-05-27 00:00:00"),
26: pd.Timestamp("2012-05-28 00:00:00"),
27: pd.Timestamp("2012-05-29 00:00:00"),
28: pd.Timestamp("2012-05-30 00:00:00"),
29: pd.Timestamp("2012-05-31 00:00:00"),
30: pd.Timestamp("2012-06-01 00:00:00"),
31: pd.Timestamp("2012-06-02 00:00:00"),
32: pd.Timestamp("2012-06-03 00:00:00"),
33: pd.Timestamp("2012-06-04 00:00:00"),
34: pd.Timestamp("2012-06-05 00:00:00"),
35: pd.Timestamp("2012-06-06 00:00:00"),
36: pd.Timestamp("2012-06-07 00:00:00"),
37: pd.Timestamp("2012-06-08 00:00:00"),
38: pd.Timestamp("2012-06-09 00:00:00"),
39: pd.Timestamp("2012-06-10 00:00:00"),
40: pd.Timestamp("2012-06-11 00:00:00"),
41: pd.Timestamp("2012-06-12 00:00:00"),
42: pd.Timestamp("2012-06-13 00:00:00"),
43: pd.Timestamp("2012-06-14 00:00:00"),
44: pd.Timestamp("2012-06-15 00:00:00"),
45: pd.Timestamp("2012-06-16 00:00:00"),
46: pd.Timestamp("2012-06-17 00:00:00"),
47: pd.Timestamp("2012-06-18 00:00:00"),
48: pd.Timestamp("2012-06-19 00:00:00"),
49: pd.Timestamp("2012-06-20 00:00:00"),
50: pd.Timestamp("2012-06-21 00:00:00"),
51: pd.Timestamp("2012-06-22 00:00:00"),
52: pd.Timestamp("2012-06-23 00:00:00"),
53: pd.Timestamp("2012-06-24 00:00:00"),
54: pd.Timestamp("2012-06-25 00:00:00"),
55: pd.Timestamp("2012-06-26 00:00:00"),
56: pd.Timestamp("2012-06-27 00:00:00"),
57: pd.Timestamp("2012-06-28 00:00:00"),
58: pd.Timestamp("2012-06-29 00:00:00"),
59: pd.Timestamp("2012-06-30 00:00:00"),
60: pd.Timestamp("2012-07-01 00:00:00"),
61: pd.Timestamp("2012-07-02 00:00:00"),
62: pd.Timestamp("2012-07-03 00:00:00"),
63: pd.Timestamp("2012-07-04 00:00:00"),
64: pd.Timestamp("2012-07-05 00:00:00"),
65: pd.Timestamp("2012-07-06 00:00:00"),
66: pd.Timestamp("2012-07-07 00:00:00"),
67: pd.Timestamp("2012-07-08 00:00:00"),
68: pd.Timestamp("2012-07-09 00:00:00"),
69: pd.Timestamp("2012-07-10 00:00:00"),
70: pd.Timestamp("2012-07-11 00:00:00"),
71: pd.Timestamp("2012-07-12 00:00:00"),
72: pd.Timestamp("2012-07-13 00:00:00"),
73: pd.Timestamp("2012-07-14 00:00:00"),
74: pd.Timestamp("2012-07-15 00:00:00"),
75: pd.Timestamp("2012-07-16 00:00:00"),
76: pd.Timestamp("2012-07-17 00:00:00"),
77: pd.Timestamp("2012-07-18 00:00:00"),
78: pd.Timestamp("2012-07-19 00:00:00"),
79: pd.Timestamp("2012-07-20 00:00:00"),
80: pd.Timestamp("2012-07-21 00:00:00"),
81: pd.Timestamp("2012-07-22 00:00:00"),
82: pd.Timestamp("2012-07-23 00:00:00"),
83: pd.Timestamp("2012-07-24 00:00:00"),
84: pd.Timestamp("2012-07-25 00:00:00"),
85: pd.Timestamp("2012-07-26 00:00:00"),
86: pd.Timestamp("2012-07-27 00:00:00"),
87: pd.Timestamp("2012-07-28 00:00:00"),
88: pd.Timestamp("2012-07-29 00:00:00"),
89: pd.Timestamp("2012-07-30 00:00:00"),
90: pd.Timestamp("2012-07-31 00:00:00"),
91: pd.Timestamp("2012-08-01 00:00:00"),
92: pd.Timestamp("2012-08-02 00:00:00"),
93: pd.Timestamp("2012-08-03 00:00:00"),
94: pd.Timestamp("2012-08-04 00:00:00"),
95: pd.Timestamp("2012-08-05 00:00:00"),
96: pd.Timestamp("2012-08-06 00:00:00"),
97: pd.Timestamp("2012-08-07 00:00:00"),
98: pd.Timestamp("2012-08-08 00:00:00"),
99: pd.Timestamp("2012-08-09 00:00:00"),
100: pd.Timestamp("2012-08-10 00:00:00"),
101: pd.Timestamp("2012-08-11 00:00:00"),
102: pd.Timestamp("2012-08-12 00:00:00"),
103: pd.Timestamp("2012-08-13 00:00:00"),
104: pd.Timestamp("2012-08-14 00:00:00"),
105: pd.Timestamp("2012-08-15 00:00:00"),
106: pd.Timestamp("2012-08-16 00:00:00"),
107: pd.Timestamp("2012-08-17 00:00:00"),
108: pd.Timestamp("2012-08-18 00:00:00"),
109: pd.Timestamp("2012-08-19 00:00:00"),
110: pd.Timestamp("2012-08-20 00:00:00"),
111: pd.Timestamp("2012-08-21 00:00:00"),
112: pd.Timestamp("2012-08-22 00:00:00"),
113: pd.Timestamp("2012-08-23 00:00:00"),
114: pd.Timestamp("2012-08-24 00:00:00"),
115: pd.Timestamp("2012-08-25 00:00:00"),
116: pd.Timestamp("2012-08-26 00:00:00"),
117: pd.Timestamp("2012-08-27 00:00:00"),
118: pd.Timestamp("2012-08-28 00:00:00"),
119: pd.Timestamp("2012-08-29 00:00:00"),
120: pd.Timestamp("2012-08-30 00:00:00"),
121: pd.Timestamp("2012-08-31 00:00:00"),
122: pd.Timestamp("2012-09-01 00:00:00"),
123: pd.Timestamp("2012-09-02 00:00:00"),
124: pd.Timestamp("2012-09-03 00:00:00"),
125: pd.Timestamp("2012-09-04 00:00:00"),
126: pd.Timestamp("2012-09-05 00:00:00"),
127: pd.Timestamp("2012-09-06 00:00:00"),
128: pd.Timestamp("2012-09-07 00:00:00"),
129: pd.Timestamp("2012-09-08 00:00:00"),
130: pd.Timestamp("2012-09-09 00:00:00"),
131: pd.Timestamp("2012-09-10 00:00:00"),
132: pd.Timestamp("2012-09-11 00:00:00"),
133: pd.Timestamp("2012-09-12 00:00:00"),
134: pd.Timestamp("2012-09-13 00:00:00"),
135: pd.Timestamp("2012-09-14 00:00:00"),
136: pd.Timestamp("2012-09-15 00:00:00"),
137: pd.Timestamp("2012-09-16 00:00:00"),
138: pd.Timestamp("2012-09-17 00:00:00"),
139: pd.Timestamp("2012-09-18 00:00:00"),
140: pd.Timestamp("2012-09-19 00:00:00"),
141: pd.Timestamp("2012-09-20 00:00:00"),
142: pd.Timestamp("2012-09-21 00:00:00"),
143: pd.Timestamp("2012-09-22 00:00:00"),
144: pd.Timestamp("2012-09-23 00:00:00"),
145: pd.Timestamp("2012-09-24 00:00:00"),
146: pd.Timestamp("2012-09-25 00:00:00"),
147: pd.Timestamp("2012-09-26 00:00:00"),
148: pd.Timestamp("2012-09-27 00:00:00"),
149: pd.Timestamp("2012-09-28 00:00:00"),
150: pd.Timestamp("2012-09-29 00:00:00"),
151: pd.Timestamp("2012-09-30 00:00:00"),
152: pd.Timestamp("2012-10-01 00:00:00"),
153: pd.Timestamp("2012-10-02 00:00:00"),
154: pd.Timestamp("2012-10-03 00:00:00"),
155: pd.Timestamp("2012-10-04 00:00:00"),
156: pd.Timestamp("2012-10-05 00:00:00"),
157: pd.Timestamp("2012-10-06 00:00:00"),
158: pd.Timestamp("2012-10-07 00:00:00"),
159: pd.Timestamp("2012-10-08 00:00:00"),
160: pd.Timestamp("2012-10-09 00:00:00"),
161: pd.Timestamp("2012-10-10 00:00:00"),
162: pd.Timestamp("2012-10-11 00:00:00"),
163: pd.Timestamp("2012-10-12 00:00:00"),
164: pd.Timestamp("2012-10-13 00:00:00"),
165: pd.Timestamp("2012-10-14 00:00:00"),
166: pd.Timestamp("2012-10-15 00:00:00"),
167: pd.Timestamp("2012-10-16 00:00:00"),
168: pd.Timestamp("2012-10-17 00:00:00"),
169: pd.Timestamp("2012-10-18 00:00:00"),
170: pd.Timestamp("2012-10-19 00:00:00"),
171: pd.Timestamp("2012-10-20 00:00:00"),
172: pd.Timestamp("2012-10-21 00:00:00"),
173: pd.Timestamp("2012-10-22 00:00:00"),
174: pd.Timestamp("2012-10-23 00:00:00"),
175: pd.Timestamp("2012-10-24 00:00:00"),
176: pd.Timestamp("2012-10-25 00:00:00"),
177: pd.Timestamp("2012-10-26 00:00:00"),
178: pd.Timestamp("2012-10-27 00:00:00"),
179: pd.Timestamp("2012-10-28 00:00:00"),
180: pd.Timestamp("2012-10-29 00:00:00"),
181: pd.Timestamp("2012-10-30 00:00:00"),
182: pd.Timestamp("2012-10-31 00:00:00"),
183: pd.Timestamp("2012-11-01 00:00:00"),
184: pd.Timestamp("2012-11-02 00:00:00"),
185: pd.Timestamp("2012-11-03 00:00:00"),
186: pd.Timestamp("2012-11-04 00:00:00"),
187: pd.Timestamp("2012-11-05 00:00:00"),
188: pd.Timestamp("2012-11-06 00:00:00"),
189: pd.Timestamp("2012-11-07 00:00:00"),
190: pd.Timestamp("2012-11-08 00:00:00"),
191: pd.Timestamp("2012-11-09 00:00:00"),
192: pd.Timestamp("2012-11-10 00:00:00"),
193: pd.Timestamp("2012-11-11 00:00:00"),
194: pd.Timestamp("2012-11-12 00:00:00"),
195: pd.Timestamp("2012-11-13 00:00:00"),
196: pd.Timestamp("2012-11-14 00:00:00"),
197: pd.Timestamp("2012-11-15 00:00:00"),
198: pd.Timestamp("2012-11-16 00:00:00"),
199: pd.Timestamp("2012-11-17 00:00:00"),
200: pd.Timestamp("2012-11-18 00:00:00"),
201: pd.Timestamp("2012-11-19 00:00:00"),
202: pd.Timestamp("2012-11-20 00:00:00"),
203: pd.Timestamp("2012-11-21 00:00:00"),
204: pd.Timestamp("2012-11-22 00:00:00"),
205: pd.Timestamp("2012-11-23 00:00:00"),
206: pd.Timestamp("2012-11-24 00:00:00"),
207: pd.Timestamp("2012-11-25 00:00:00"),
208: pd.Timestamp("2012-11-26 00:00:00"),
209: pd.Timestamp("2012-11-27 00:00:00"),
210: pd.Timestamp("2012-11-28 00:00:00"),
211: pd.Timestamp("2012-11-29 00:00:00"),
212: pd.Timestamp("2012-11-30 00:00:00"),
213: pd.Timestamp("2012-12-01 00:00:00"),
214: pd.Timestamp("2012-12-02 00:00:00"),
215: pd.Timestamp("2012-12-03 00:00:00"),
216: pd.Timestamp("2012-12-04 00:00:00"),
217: pd.Timestamp("2012-12-05 00:00:00"),
218: pd.Timestamp("2012-12-06 00:00:00"),
219: pd.Timestamp("2012-12-07 00:00:00"),
220: pd.Timestamp("2012-12-08 00:00:00"),
221: pd.Timestamp("2012-12-09 00:00:00"),
222: pd.Timestamp("2012-12-10 00:00:00"),
223: pd.Timestamp("2012-12-11 00:00:00"),
224: pd.Timestamp("2012-12-12 00:00:00"),
225: pd.Timestamp("2012-12-13 00:00:00"),
226: pd.Timestamp("2012-12-14 00:00:00"),
227: pd.Timestamp("2012-12-15 00:00:00"),
228: pd.Timestamp("2012-12-16 00:00:00"),
229: pd.Timestamp("2012-12-17 00:00:00"),
230: pd.Timestamp("2012-12-18 00:00:00"),
231: pd.Timestamp("2012-12-19 00:00:00"),
232: pd.Timestamp("2012-12-20 00:00:00"),
233: pd.Timestamp("2012-12-21 00:00:00"),
234: pd.Timestamp("2012-12-22 00:00:00"),
235: pd.Timestamp("2012-12-23 00:00:00"),
236: pd.Timestamp("2012-12-24 00:00:00"),
237: pd.Timestamp("2012-12-25 00:00:00"),
238: pd.Timestamp("2012-12-26 00:00:00"),
239: pd.Timestamp("2012-12-27 00:00:00"),
240: pd.Timestamp("2012-12-28 00:00:00"),
241: pd.Timestamp("2012-12-29 00:00:00"),
242: pd.Timestamp("2012-12-30 00:00:00"),
243: pd.Timestamp("2012-12-31 00:00:00"),
244: pd.Timestamp("2013-01-01 00:00:00"),
245: pd.Timestamp("2013-01-02 00:00:00"),
246: pd.Timestamp("2013-01-03 00:00:00"),
247: pd.Timestamp("2013-01-04 00:00:00"),
248: pd.Timestamp("2013-01-05 00:00:00"),
249: pd.Timestamp("2013-01-06 00:00:00"),
250: pd.Timestamp("2013-01-07 00:00:00"),
251: pd.Timestamp("2013-01-08 00:00:00"),
252: pd.Timestamp("2013-01-09 00:00:00"),
253: pd.Timestamp("2013-01-10 00:00:00"),
254: pd.Timestamp("2013-01-11 00:00:00"),
255: pd.Timestamp("2013-01-12 00:00:00"),
256: pd.Timestamp("2013-01-13 00:00:00"),
257: pd.Timestamp("2013-01-14 00:00:00"),
258: pd.Timestamp("2013-01-15 00:00:00"),
259: pd.Timestamp("2013-01-16 00:00:00"),
260: pd.Timestamp("2013-01-17 00:00:00"),
261: pd.Timestamp("2013-01-18 00:00:00"),
262: pd.Timestamp("2013-01-19 00:00:00"),
263: pd.Timestamp("2013-01-20 00:00:00"),
264: pd.Timestamp("2013-01-21 00:00:00"),
265: pd.Timestamp("2013-01-22 00:00:00"),
266: pd.Timestamp("2013-01-23 00:00:00"),
267: pd.Timestamp("2013-01-24 00:00:00"),
268: pd.Timestamp("2013-01-25 00:00:00"),
269: pd.Timestamp("2013-01-26 00:00:00"),
270: pd.Timestamp("2013-01-27 00:00:00"),
271: pd.Timestamp("2013-01-28 00:00:00"),
272: pd.Timestamp("2013-01-29 00:00:00"),
273: pd.Timestamp("2013-01-30 00:00:00"),
274: pd.Timestamp("2013-01-31 00:00:00"),
275: pd.Timestamp("2013-02-01 00:00:00"),
276: pd.Timestamp("2013-02-02 00:00:00"),
277: pd.Timestamp("2013-02-03 00:00:00"),
278: pd.Timestamp("2013-02-04 00:00:00"),
279: pd.Timestamp("2013-02-05 00:00:00"),
280: pd.Timestamp("2013-02-06 00:00:00"),
281: pd.Timestamp("2013-02-07 00:00:00"),
282: pd.Timestamp("2013-02-08 00:00:00"),
283: pd.Timestamp("2013-02-09 00:00:00"),
284: pd.Timestamp("2013-02-10 00:00:00"),
285: pd.Timestamp("2013-02-11 00:00:00"),
286: pd.Timestamp("2013-02-12 00:00:00"),
287: pd.Timestamp("2013-02-13 00:00:00"),
288: pd.Timestamp("2013-02-14 00:00:00"),
289: pd.Timestamp("2013-02-15 00:00:00"),
290: pd.Timestamp("2013-02-16 00:00:00"),
291: pd.Timestamp("2013-02-17 00:00:00"),
292: pd.Timestamp("2013-02-18 00:00:00"),
293: pd.Timestamp("2013-02-19 00:00:00"),
294: pd.Timestamp("2013-02-20 00:00:00"),
295: pd.Timestamp("2013-02-21 00:00:00"),
296: pd.Timestamp("2013-02-22 00:00:00"),
297: pd.Timestamp("2013-02-23 00:00:00"),
298: pd.Timestamp("2013-02-24 00:00:00"),
299: pd.Timestamp("2013-02-25 00:00:00"),
300: pd.Timestamp("2013-02-26 00:00:00"),
301: pd.Timestamp("2013-02-27 00:00:00"),
302: pd.Timestamp("2013-02-28 00:00:00"),
303: pd.Timestamp("2013-03-01 00:00:00"),
304: pd.Timestamp("2013-03-02 00:00:00"),
305: pd.Timestamp("2013-03-03 00:00:00"),
306: pd.Timestamp("2013-03-04 00:00:00"),
307: pd.Timestamp("2013-03-05 00:00:00"),
308: pd.Timestamp("2013-03-06 00:00:00"),
309: pd.Timestamp("2013-03-07 00:00:00"),
310: pd.Timestamp("2013-03-08 00:00:00"),
311: pd.Timestamp("2013-03-09 00:00:00"),
312: pd.Timestamp("2013-03-10 00:00:00"),
313: pd.Timestamp("2013-03-11 00:00:00"),
314: pd.Timestamp("2013-03-12 00:00:00"),
315: pd.Timestamp("2013-03-13 00:00:00"),
316: pd.Timestamp("2013-03-14 00:00:00"),
317: pd.Timestamp("2013-03-15 00:00:00"),
318: pd.Timestamp("2013-03-16 00:00:00"),
319: pd.Timestamp("2013-03-17 00:00:00"),
320: pd.Timestamp("2013-03-18 00:00:00"),
321: pd.Timestamp("2013-03-19 00:00:00"),
322: pd.Timestamp("2013-03-20 00:00:00"),
323: pd.Timestamp("2013-03-21 00:00:00"),
324: pd.Timestamp("2013-03-22 00:00:00"),
325: pd.Timestamp("2013-03-23 00:00:00"),
326: pd.Timestamp("2013-03-24 00:00:00"),
327: pd.Timestamp("2013-03-25 00:00:00"),
328: pd.Timestamp("2013-03-26 00:00:00"),
329: pd.Timestamp("2013-03-27 00:00:00"),
330: pd.Timestamp("2013-03-28 00:00:00"),
331: pd.Timestamp("2013-03-29 00:00:00"),
332: pd.Timestamp("2013-03-30 00:00:00"),
333: pd.Timestamp("2013-03-31 00:00:00"),
334: pd.Timestamp("2013-04-01 00:00:00"),
335: pd.Timestamp("2013-04-02 00:00:00"),
336: pd.Timestamp("2013-04-03 00:00:00"),
337: pd.Timestamp("2013-04-04 00:00:00"),
338: pd.Timestamp("2013-04-05 00:00:00"),
339: pd.Timestamp("2013-04-06 00:00:00"),
340: pd.Timestamp("2013-04-07 00:00:00"),
341: pd.Timestamp("2013-04-08 00:00:00"),
342: pd.Timestamp("2013-04-09 00:00:00"),
343: pd.Timestamp("2013-04-10 00:00:00"),
344: pd.Timestamp("2013-04-11 00:00:00"),
345: pd.Timestamp("2013-04-12 00:00:00"),
346: pd.Timestamp("2013-04-13 00:00:00"),
347: pd.Timestamp("2013-04-14 00:00:00"),
348: pd.Timestamp("2013-04-15 00:00:00"),
349: pd.Timestamp("2013-04-16 00:00:00"),
350: pd.Timestamp("2013-04-17 00:00:00"),
351: pd.Timestamp("2013-04-18 00:00:00"),
352: pd.Timestamp("2013-04-19 00:00:00"),
353: pd.Timestamp("2013-04-20 00:00:00"),
354: pd.Timestamp("2013-04-21 00:00:00"),
355: pd.Timestamp("2013-04-22 00:00:00"),
356: pd.Timestamp("2013-04-23 00:00:00"),
357: pd.Timestamp("2013-04-24 00:00:00"),
358: pd.Timestamp("2013-04-25 00:00:00"),
359: pd.Timestamp("2013-04-26 00:00:00"),
360: pd.Timestamp("2013-04-27 00:00:00"),
361: pd.Timestamp("2013-04-28 00:00:00"),
362: pd.Timestamp("2013-04-29 00:00:00"),
363: pd.Timestamp("2013-04-30 00:00:00"),
364: pd.Timestamp("2013-05-01 00:00:00"),
365: pd.Timestamp("2013-05-02 00:00:00"),
366: pd.Timestamp("2013-05-03 00:00:00"),
367: pd.Timestamp("2013-05-04 00:00:00"),
368: pd.Timestamp("2013-05-05 00:00:00"),
369: pd.Timestamp("2013-05-06 00:00:00"),
370: pd.Timestamp("2013-05-07 00:00:00"),
371: | pd.Timestamp("2013-05-08 00:00:00") | pandas.Timestamp |
import os
import requests
import json
import pandas as pd
import time
from datetime import datetime
# Make sure the environment variable is already set up
bearer_token = os.environ.get("BEARER_TOKEN")
def extract_public_metrics(df_tweets):
'''
Pulls out the `public_metrics` object and appends this to the Pandas dataframe as separate columns.
'''
if 'public_metrics' in df_tweets.columns:
public_metric_columns = ['retweet_count', 'reply_count', 'like_count', 'quote_count']
for public_metric in public_metric_columns:
df_tweets[public_metric] = df_tweets['public_metrics'].apply(lambda x: x[public_metric])
df_tweets = df_tweets.drop(columns=['public_metrics'])
return df_tweets
def extract_referenced_tweets(df_tweets):
'''
Pulls out the `referenced_tweets` object and appends this to the Pandas dataframe as separate columns.
'''
if 'referenced_tweets' in df_tweets.columns:
df_tweets['type'] = df_tweets['referenced_tweets'].apply(lambda x: x[0]['type'] if isinstance(x, list) else None)
df_tweets['referenced_tweet_id'] = df_tweets['referenced_tweets'].apply(lambda x: x[0]['id'] if isinstance(x, list) else None)
df_tweets = df_tweets.drop(columns=['referenced_tweets'])
return df_tweets
def clean_tweets_dataframe(df_tweets):
'''
Clean up dataframe object obtained from REST API JSON
'''
df_tweets = extract_public_metrics(df_tweets)
df_tweets = extract_referenced_tweets(df_tweets)
return df_tweets
def tweets_url(ids:list):
tweet_fields = 'id,author_id,public_metrics,conversation_id,created_at' #,in_reply_to_user_id #,entities
user_fields = 'name,username,profile_image_url'
expansions = 'author_id,referenced_tweets.id,in_reply_to_user_id,referenced_tweets.id.author_id'
url = f"https://api.twitter.com/2/tweets?ids={','.join(ids)}"+\
f"&tweet.fields={tweet_fields}"+\
f"&user.fields={user_fields}"+\
f"&expansions={expansions}"
return url
def tweet_url(tweet_id:str):
'''
Pulls data for an individual tweet. You can adjust ids to include a single Tweets
or add to up to 100 comma-separated IDs
'''
tweet_fields = "tweet.fields=lang,author_id"
ids = "ids="+tweet_id
url = "https://api.twitter.com/2/tweets?{}&{}".format(ids, tweet_fields)
return url
def search_url(query:str, max_results:int=100, start_time=None, end_time=None) -> str:
'''
Generates endpoint for Twitter REST API: GET /2/tweets/search/recent
Time format must be in RFC 3339 UTC timestamp eg `2022-01-04T00:00:00.000Z`
'''
tweet_fields = 'id,author_id,public_metrics,conversation_id,created_at' #,in_reply_to_user_id #,entities
user_fields = 'name,username,profile_image_url'
expansions = 'author_id,referenced_tweets.id,in_reply_to_user_id,referenced_tweets.id.author_id'
url = f"https://api.twitter.com/2/tweets/search/recent"+\
f"?query={query} -is:reply -is:quote"+\
f"&max_results={max_results}"+\
f"&tweet.fields={tweet_fields}"+\
f"&user.fields={user_fields}"+\
f"&expansions={expansions}"
if start_time is not None:
url+=f"&start_time={start_time}"
if start_time is not None:
url+=f"&end_time={end_time}"
return url
def replies_to_user_url(user_id:str, max_results:int=100) -> str:
'''
Generates endpoint for Twitter REST API: GET /2/tweets/search/recent
Gets all replies to an individual user_id
'''
tweet_fields = 'id,author_id,public_metrics'
user_fields = 'name,username,profile_image_url'
expansions = 'author_id,referenced_tweets.id,in_reply_to_user_id,referenced_tweets.id.author_id'
url = f"https://api.twitter.com/2/tweets/search/recent?query=to%3A{user_id}%20OR%20retweets_of%3A{user_id}"+\
f"&max_results={max_results}"+\
f"&tweet.fields={tweet_fields}"+\
f"&user.fields={user_fields}"+\
f"&expansions={expansions}"
return url
def liking_users_url(tweet_id):
'''
'''
url = f"https://api.twitter.com/2/tweets/{tweet_id}/liking_users"
return url
def retweeting_users_url(tweet_id):
url = f"https://api.twitter.com/2/tweets/{tweet_id}/retweeted_by"
return url
def user_url(user_id):
url = f"https://api.twitter.com/2/users/{user_id}"
return url
def get_conversation_url(conversation_id, max_results=100):
'''
Get all comments and replies related to this tweet
'''
tweet_fields = 'id,author_id,public_metrics,conversation_id,created_at' #,in_reply_to_user_id #,entities
user_fields = 'name,username,profile_image_url'
expansions = 'author_id,referenced_tweets.id,in_reply_to_user_id,referenced_tweets.id.author_id'
url = f"https://api.twitter.com/2/tweets/search/recent"+\
f"?query=conversation_id:{conversation_id}"+\
f"&max_results={max_results}"+\
f"&tweet.fields={tweet_fields}"+\
f"&user.fields={user_fields}"+\
f"&expansions={expansions}"
return url
def bearer_oauth(r):
'''
Method required by bearer token authentication.
'''
r.headers['Authorization'] = f"Bearer {bearer_token}"
return r
def connect_to_endpoint(url, wait_on_timeout=True):
response = requests.request("GET", url, auth=bearer_oauth)
epochtime = response.headers['x-rate-limit-reset']
rate_limit_reset_time = datetime.fromtimestamp(int(epochtime))
rate_limit_remaining = response.headers['x-rate-limit-remaining']
print(f"{response.status_code}\tx-rate-limit-remaining: {rate_limit_remaining}\tx-rate-limit-reset: {rate_limit_reset_time}")
# If the REST API limit is reached, we can sleep until the limit is reset and then continue
if response.status_code == 429 and wait_on_timeout == True:
rate_limit_reset_time = datetime.fromtimestamp(int(epochtime))
time_difference = rate_limit_reset_time-datetime.now()
print(f"Rate limit resets at {rate_limit_reset_time}. Sleeping for {time_difference.seconds} seconds...")
time.sleep(time_difference.seconds+10)
print(datetime.now())
response = requests.request("GET", url, auth=bearer_oauth)
if response.status_code != 200:
raise Exception(
"Request returned an error: {} {}".format(
response.status_code, response.text
)
)
return response.json()
def day_to_time(day:str) -> str:
'''
Convert `dd-mon-yyyy` format to UTC timestamp. Used to generate Twitter API url.
'''
if day is not None:
time = datetime.strptime(day,'%d-%b-%Y').isoformat() + "Z"
else:
time = None
return time
def search_and_paginate(query:str, num_results:int=100, wait_on_timeout=True, start_day=None, end_day=None):
'''
Calls the Twitter REST API /2/tweets/search/recent and paginates results
:return: Tweets as a Pandas dataframe.
'''
results_each_call = 100
# Must be RFC 3339 UTC timestamp
start_time = day_to_time(start_day)
end_time = day_to_time(end_day)
url = search_url(query, results_each_call,start_time,end_time)
json_response = connect_to_endpoint(url, wait_on_timeout)
df_tweets = pd.DataFrame(json_response['data'])
df_users = pd.DataFrame(json_response['includes']['users'])
# As maximum results for each call is 100, more REST API calls may need to be made to collect
# more results.
while 'next_token' in json_response['meta'] and len(df_tweets) < num_results:
pagination_token = json_response['meta']['next_token']
json_response = connect_to_endpoint(f'{url}&next_token={pagination_token}', wait_on_timeout)
df_tweets = df_tweets.append(pd.DataFrame(json_response['data']),ignore_index=True)
df_users = df_users.append(pd.DataFrame(json_response['includes']['users']),ignore_index=True)
df_tweets = clean_tweets_dataframe(df_tweets)
return df_tweets, df_users
def get_original_tweets(df_tweets, df_users):
'''
If the retweeted tweet is not in the original list, grab this as well and append it to the Pandas dataframe.
Can probably do one call for multiple `conversation_id`s
'''
df_referenced_tweets = | pd.DataFrame() | pandas.DataFrame |
r"""
Tests for news results
Author: <NAME>
License: BSD-3
"""
from statsmodels.compat.pandas import NumericIndex
from statsmodels.compat.pandas import (
assert_frame_equal,
assert_series_equal,
)
import numpy as np
from numpy.testing import assert_, assert_allclose, assert_equal
import pandas as pd
import pytest
from statsmodels import datasets
from statsmodels.tsa.statespace import (
dynamic_factor,
sarimax,
structural,
varmax,
)
dta = datasets.macrodata.load_pandas().data
dta.index = pd.period_range(start='1959Q1', end='2009Q3', freq='Q')
def check_impact_indices(news, impact_dates, impacted_variables):
# Note: the index for impacts is only a time index, because we compute
# impacts for all variables during these time periods.
for attr in ['total_impacts', 'update_impacts', 'revision_impacts',
'post_impacted_forecasts', 'prev_impacted_forecasts']:
val = getattr(news, attr)
assert_(val.index.equals(impact_dates))
assert_equal(val.columns.tolist(), impacted_variables)
def check_revision_indices(news, revisions_index):
# Note: revision indices must be a MultiIndex, because for each time
# period, not all variables necessarily revised.
# If there are no revisions, we just check the length is zero
if news.news_results.revision_impacts is None:
assert_equal(len(news.revisions_iloc['revision date']), 0)
assert_equal(len(news.revisions_iloc['revised variable']), 0)
assert_equal(len(news.revisions_ix['revision date']), 0)
assert_equal(len(news.revisions_ix['revised variable']), 0)
# Otherwise, check that the revision indexes are correct
else:
# Get the integer locations of the given indexes
dates = news.previous.model._index
endog_names = news.previous.model.endog_names
if isinstance(endog_names, str):
endog_names = [endog_names]
desired_ix = revisions_index.to_frame().reset_index(drop=True)
desired_iloc = desired_ix.copy()
desired_iloc['revision date'] = [
dates.get_loc(date) for date in desired_ix['revision date']]
desired_iloc['revised variable'] = [
endog_names.index(name)
for name in desired_ix['revised variable']]
assert_(news.revisions_iloc.equals(desired_iloc))
assert_(news.revisions_ix.equals(desired_ix))
def check_update_indices(news, updates_index):
# Note: update indices are also a MultiIndex, for the same reason as the
# revision indices.
# If there are no updates, we just check the length is zero
if news.news_results.update_impacts is None:
assert_equal(len(news.updates_iloc['update date']), 0)
assert_equal(len(news.updates_iloc['updated variable']), 0)
assert_equal(len(news.updates_ix['update date']), 0)
assert_equal(len(news.updates_ix['updated variable']), 0)
# Otherwise, check that the revision indexes are correct
else:
# Get the integer locations of the given indexes
dates = news.updated.model._index
endog_names = news.updated.model.endog_names
if isinstance(endog_names, str):
endog_names = [endog_names]
desired_ix = updates_index.to_frame().reset_index(drop=True)
desired_iloc = desired_ix.copy()
desired_iloc['update date'] = [
dates.get_loc(date) for date in desired_ix['update date']]
desired_iloc['updated variable'] = [
endog_names.index(name)
for name in desired_ix['updated variable']]
assert_(news.updates_iloc.equals(desired_iloc))
assert_(news.updates_ix.equals(desired_ix))
def check_news_indices(news, updates_index, impact_dates):
# News are computed only from updates, so the news indices are the same as
# the update indices
if len(updates_index):
news_index = updates_index
else:
news_index = pd.MultiIndex.from_product(
[[], []], names=['update date', 'updated variable'])
endog_names = news.previous.model.endog_names
if isinstance(endog_names, str):
endog_names = [endog_names]
assert_(news.news.index.equals(news_index))
assert_(news.update_forecasts.index.equals(news_index))
assert_(news.update_realized.index.equals(news_index))
assert_(news.weights.index.equals(news_index))
weights_columns = pd.MultiIndex.from_product([impact_dates, endog_names])
assert_(news.weights.columns.equals(weights_columns))
def check_news(news, revisions, updates, impact_dates, impacted_variables,
revisions_index, updates_index,
revision_impacts, update_impacts,
prev_impacted_forecasts, post_impacted_forecasts,
update_forecasts, update_realized, news_desired, weights):
# Note: we use atol=1e-12 to handle cases where impacts, etc. are equal to
# zero, but numerical precision of the Kalman filter procedures gives an
# answer of e.g. 1e-16.
# Note: Here we set the tolerance to be slightly negative, since some of
# the tests have weights or impacts exactly equal to zero, while we still
# want to include those in tests.
news.tolerance = -1e-10
# - Indexes --------------------------------------------------------------
# Index of impacts
check_impact_indices(news, impact_dates, impacted_variables)
# Reivision indices
check_revision_indices(news, revisions_index)
# Update indices
check_update_indices(news, updates_index)
# News indices
check_news_indices(news, updates_index, impact_dates)
# - Impacts --------------------------------------------------------------
if updates:
assert_allclose(news.update_impacts, update_impacts, atol=1e-12)
else:
assert_(np.all(news.update_impacts.isnull()))
# Impacts from revisions
if revisions:
assert_allclose(news.revision_impacts, revision_impacts, atol=1e-12)
else:
assert_(news.news_results.revision_impacts is None)
assert_(np.all(news.revision_impacts.isnull()))
# Total impacts
total_impacts = (news.revision_impacts.fillna(0) +
news.update_impacts.fillna(0))
assert_allclose(news.total_impacts, total_impacts, atol=1e-12)
# - Impacted variable forecasts ------------------------------------------
assert_allclose(news.prev_impacted_forecasts, prev_impacted_forecasts,
atol=1e-12)
assert_allclose(news.post_impacted_forecasts, post_impacted_forecasts,
atol=1e-12)
# - News -----------------------------------------------------------------
assert_allclose(news.update_forecasts, update_forecasts, atol=1e-12)
assert_allclose(news.update_realized, update_realized, atol=1e-12)
# The "news" is simply the forecast error
assert_allclose(news.news, news_desired, atol=1e-12)
# The weight is zero on previously known data, and is geometrically
# declining (according to the AR parameter) in the forecast period
assert_allclose(news.weights, weights, atol=1e-12)
# - Table: data revisions ------------------------------------------------
assert_equal(news.data_revisions.columns.tolist(),
['observed (prev)', 'revised'])
assert_equal(news.data_revisions.index.names,
['revision date', 'revised variable'])
assert_(news.data_revisions.index.equals(revisions_index))
# - Table: data updates --------------------------------------------------
assert_equal(news.data_updates.columns.tolist(),
['observed', 'forecast (prev)'])
assert_equal(news.data_updates.index.names,
['update date', 'updated variable'])
assert_(news.data_updates.index.equals(news.news.index))
assert_allclose(news.data_updates['forecast (prev)'],
news.update_forecasts, atol=1e-12)
assert_allclose(news.data_updates['observed'], news.update_realized,
atol=1e-12)
# - Table: details_by_impact ---------------------------------------------
details_by_impact = news.details_by_impact
desired = ['observed', 'forecast (prev)', 'news', 'weight', 'impact']
assert_equal(details_by_impact.columns.tolist(), desired)
desired = ['impact date', 'impacted variable',
'update date', 'updated variable']
assert_equal(details_by_impact.index.names, desired)
if updates:
actual = (news.details_by_impact['forecast (prev)']
.drop_duplicates()
.reset_index([0, 1])['forecast (prev)'])
assert_allclose(actual, news.update_forecasts, atol=1e-12)
actual = (news.details_by_impact['observed']
.drop_duplicates().reset_index([0, 1])['observed'])
assert_allclose(actual, news.update_realized, atol=1e-12)
actual = (news.details_by_impact['news']
.drop_duplicates().reset_index([0, 1])['news'])
assert_allclose(actual, news.news, atol=1e-12)
# Weights
assert_allclose(details_by_impact['weight'].unstack([0, 1]),
news.weights, atol=1e-12)
# Impact of news
actual = (news.details_by_impact['impact']
.unstack([2, 3]).sum(axis=1).unstack(1))
assert_allclose(actual, news.update_impacts, atol=1e-12)
# - Table: details_by_update ---------------------------------------------
details_by_update = news.details_by_update
desired = ['news', 'weight', 'impact']
assert_equal(details_by_update.columns.tolist(), desired)
desired = ['update date', 'updated variable', 'observed',
'forecast (prev)', 'impact date', 'impacted variable']
assert_equal(details_by_update.index.names, desired)
if updates:
# News
# Special case for Pandas = 0.23, see above
actual = (news.details_by_update['news']
.drop_duplicates().reset_index([2, 3, 4, 5])['news'])
assert_allclose(actual, news.news, atol=1e-12)
# Weights
assert_allclose(news.details_by_update['weight'].unstack([4, 5]),
news.weights, atol=1e-12)
# Impact of news
actual = (news.details_by_update['impact']
.unstack([4, 5]).sum(axis=0).unstack(1))
assert_allclose(actual, news.update_impacts, atol=1e-12)
# - Table: impacts -------------------------------------------------------
impacts = news.impacts
desired = ['estimate (prev)', 'impact of revisions', 'impact of news',
'total impact', 'estimate (new)']
assert_equal(impacts.columns.tolist(), desired)
desired = ['impact date', 'impacted variable']
assert_equal(impacts.index.names, desired)
assert_allclose(impacts.loc[:, 'estimate (prev)'],
news.prev_impacted_forecasts.stack(), atol=1e-12)
assert_allclose(impacts.loc[:, 'impact of revisions'],
news.revision_impacts.fillna(0).stack(), atol=1e-12)
assert_allclose(impacts.loc[:, 'impact of news'],
news.update_impacts.fillna(0).stack(), atol=1e-12)
assert_allclose(impacts.loc[:, 'total impact'],
news.total_impacts.stack(), atol=1e-12)
assert_allclose(impacts.loc[:, 'estimate (new)'],
news.post_impacted_forecasts.stack(), atol=1e-12)
@pytest.mark.parametrize('revisions', [True, False])
@pytest.mark.parametrize('updates', [True, False])
def test_sarimax_time_invariant(revisions, updates):
# Construct previous and updated datasets
endog = dta['infl'].copy()
comparison_type = None
if updates:
endog1 = endog.loc[:'2009Q2'].copy()
endog2 = endog.loc[:'2009Q3'].copy()
else:
endog1 = endog.loc[:'2009Q3'].copy()
endog2 = endog.loc[:'2009Q3'].copy()
# Without updates and without NaN values, we need to specify that
# the type of the comparison object that we're passing is "updated"
comparison_type = 'updated'
if revisions:
endog1.iloc[-1] = 0.
# Get the previous results object and compute the news
mod = sarimax.SARIMAX(endog1)
res = mod.smooth([0.5, 1.0])
news = res.news(endog2, start='2009Q2', end='2010Q1',
comparison_type=comparison_type)
# Compute the true values for each combination of (revsions, updates)
impact_dates = pd.period_range(start='2009Q2', end='2010Q1', freq='Q')
impacted_variables = ['infl']
# Revisions
if revisions and updates:
revisions_index = pd.MultiIndex.from_arrays(
[endog1.index[-1:], ['infl']],
names=['revision date', 'revised variable'])
# If we have updates, the revision is to 2009Q2
revision_impacts = endog2.iloc[-2] * 0.5**np.arange(4).reshape(4, 1)
elif revisions:
revisions_index = pd.MultiIndex.from_arrays(
[endog1.index[-1:], ['infl']],
names=['revision date', 'revised variable'])
# With no updates, the revision is to 2009Q3
revision_impacts = np.r_[
0, endog2.iloc[-1] * 0.5**np.arange(3)].reshape(4, 1)
else:
revisions_index = pd.MultiIndex.from_arrays(
[[], []], names=['revision date', 'revised variable'])
revision_impacts = None
# Updates
if updates:
updates_index = pd.MultiIndex.from_arrays(
[pd.period_range(start='2009Q3', periods=1, freq='Q'), ['infl']],
names=['update date', 'updated variable'])
update_impacts = np.array([[
0, endog.loc['2009Q3'] - 0.5 * endog.loc['2009Q2'],
0.5 * endog.loc['2009Q3'] - 0.5**2 * endog.loc['2009Q2'],
0.5**2 * endog.loc['2009Q3'] - 0.5**3 * endog.loc['2009Q2']]]).T
else:
updates_index = pd.MultiIndex.from_arrays(
[[], []], names=['update date', 'updated variable'])
update_impacts = None
# Impact forecasts
if updates:
prev_impacted_forecasts = np.r_[
endog1.iloc[-1] * 0.5**np.arange(4)].reshape(4, 1)
else:
prev_impacted_forecasts = np.r_[
endog1.iloc[-2], endog1.iloc[-1] * 0.5**np.arange(3)].reshape(4, 1)
post_impacted_forecasts = np.r_[
endog2.iloc[-2], 0.5 ** np.arange(3) * endog2.iloc[-1]].reshape(4, 1)
# News
if updates:
# Note: update_forecasts is created using the endog2 dataset even if
# there were revisions, because it should be computed after revisions
# have already been taken into account
update_forecasts = [0.5 * endog2.loc['2009Q2']]
update_realized = [endog2.loc['2009Q3']]
news_desired = [update_realized[i] - update_forecasts[i]
for i in range(len(update_forecasts))]
weights = pd.DataFrame(np.r_[0, 0.5**np.arange(3)]).T
else:
update_forecasts = pd.Series([], dtype=np.float64)
update_realized = pd.Series([], dtype=np.float64)
news_desired = | pd.Series([], dtype=np.float64) | pandas.Series |
import logging
import itertools
import numpy
import pandas
def trade_pair(pair_code, bid, ask, volume):
"""
Computes the balance after the operation takes place.
Example:
XXLMXXBT 38092.21 0.000008210 0.000008340 121.618 --> With a volume of 1 we go long 0.000008210 XXBT and short 1 XXLM
:param pair_code:
:param bid:
:param ask:
:param volume:
:return:
"""
currency_first = pair_code[:4]
currency_second = pair_code[4:]
balance = {currency_first: 0, currency_second: 0}
trade = None
if volume > 0:
allowed_volume = min(volume, bid['volume'])
capped = numpy.NaN
if allowed_volume < volume:
capped = allowed_volume
balance = {currency_first: allowed_volume * -1, currency_second: allowed_volume * bid['price']}
trade = {'direction': 'buy', 'pair': pair_code, 'quantity': allowed_volume, 'price': bid['price'],
'capped': capped}
elif volume < 0:
allowed_volume = min(abs(volume), ask['volume'])
capped = numpy.NaN
if allowed_volume < abs(volume):
capped = allowed_volume
balance = {currency_first: allowed_volume, currency_second: allowed_volume * ask['price'] * -1}
trade = {'direction': 'sell', 'pair': pair_code, 'quantity': allowed_volume, 'price': ask['price'],
'capped': capped}
return balance, trade
def buy_currency_using_pair(currency, volume, pair_code, bid, ask):
"""
:param currency:
:param volume: amount to buy denominated in currency
:param pair_code:
:param bid:
:param ask:
:return:
"""
logging.info('buying {} {} using {}'.format(volume, currency, pair_code))
if pair_code[4:] == currency:
# Direct quotation
logging.debug('direct quotation')
target_volume = volume / bid['price']
balance, performed_trade = trade_pair(pair_code, bid, ask, round(target_volume, 10))
else:
# Indirect quotation
logging.debug('indirect quotation')
balance, performed_trade = trade_pair(pair_code, bid, ask, volume * -1)
return balance, performed_trade
def sell_currency_using_pair(currency, volume, pair_code, bid, ask):
"""
:param currency:
:param volume: amount to buy denominated in currency
:param pair_code:
:param bid:
:param ask:
:return:
"""
logging.info('selling {} {} using {}'.format(volume, currency, pair_code))
if pair_code[4:] == currency:
# Direct quotation
logging.debug('direct quotation')
target_volume = -1 * volume / ask['price']
balance, performed_trade = trade_pair(pair_code, bid, ask, round(target_volume, 10))
else:
# Indirect quotation
logging.debug('indirect quotation')
balance, performed_trade = trade_pair(pair_code, bid, ask, volume)
return balance, performed_trade
def calculate_arbitrage_opportunity(pair_1, pair_bid_1, pair_ask_1, pair_2, pair_bid_2, pair_ask_2, pair_3, pair_bid_3,
pair_ask_3, skip_capped=True):
"""
:param pair_1:
:param pair_bid_1:
:param pair_ask_1:
:param pair_2:
:param pair_bid_2:
:param pair_ask_2:
:param pair_3:
:param pair_bid_3:
:param pair_ask_3:
:param skip_capped:
:return: (trades, balances)
"""
pairs = [pair_1, pair_2, pair_3]
pair_bids = [pair_bid_1, pair_bid_2, pair_bid_3]
pair_asks = [pair_ask_1, pair_ask_2, pair_ask_3]
results = list()
for first, second, third in itertools.permutations([0, 1, 2]):
currency_initial = pairs[first][4:]
initial_bid = pair_bids[first]
initial_ask = pair_asks[first]
if currency_initial in pairs[second]:
next_pair = pairs[second]
next_bid = pair_bids[second]
next_ask = pair_asks[second]
final_pair = pairs[third]
final_bid = pair_bids[third]
final_ask = pair_asks[third]
else:
next_pair = pairs[third]
next_bid = pair_bids[third]
next_ask = pair_asks[third]
final_pair = pairs[second]
final_bid = pair_bids[second]
final_ask = pair_asks[second]
if next_pair[:4] != currency_initial:
currency_next = next_pair[:4]
else:
currency_next = next_pair[4:]
balance_initial, trade_initial = buy_currency_using_pair(currency_initial, 1, pairs[first], initial_bid,
initial_ask)
balance_next, trade_next = sell_currency_using_pair(currency_initial, balance_initial[currency_initial],
next_pair, next_bid, next_ask)
balance_final, trade_final = sell_currency_using_pair(currency_next, balance_next[currency_next], final_pair,
final_bid, final_ask)
balance1_series = pandas.Series(balance_initial, name='initial')
balance2_series = pandas.Series(balance_next, name='next')
balance3_series = pandas.Series(balance_final, name='final')
balances = pandas.concat([balance1_series, balance2_series, balance3_series], axis=1)
trades_df = | pandas.DataFrame([trade_initial, trade_next, trade_final]) | pandas.DataFrame |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.